Radon FB + sliding improvements dev

Thu, 29 Aug 2024 00:00:00 -0500

author
Tuomo Valkonen <tuomov@iki.fi>
date
Thu, 29 Aug 2024 00:00:00 -0500
branch
dev
changeset 34
efa60bc4f743
parent 33
aec67cdd6b14
child 35
b087e3eab191

Radon FB + sliding improvements

Cargo.lock file | annotate | diff | comparison | revisions
src/dataterm.rs file | annotate | diff | comparison | revisions
src/experiments.rs file | annotate | diff | comparison | revisions
src/fb.rs file | annotate | diff | comparison | revisions
src/forward_model.rs file | annotate | diff | comparison | revisions
src/frank_wolfe.rs file | annotate | diff | comparison | revisions
src/kernels/gaussian.rs file | annotate | diff | comparison | revisions
src/kernels/hat_convolution.rs file | annotate | diff | comparison | revisions
src/main.rs file | annotate | diff | comparison | revisions
src/measures/discrete.rs file | annotate | diff | comparison | revisions
src/measures/merging.rs file | annotate | diff | comparison | revisions
src/pdps.rs file | annotate | diff | comparison | revisions
src/plot.rs file | annotate | diff | comparison | revisions
src/radon_fb.rs file | annotate | diff | comparison | revisions
src/regularisation.rs file | annotate | diff | comparison | revisions
src/run.rs file | annotate | diff | comparison | revisions
src/sliding_fb.rs file | annotate | diff | comparison | revisions
src/subproblem.rs file | annotate | diff | comparison | revisions
src/subproblem/l1squared_nonneg.rs file | annotate | diff | comparison | revisions
src/subproblem/l1squared_unconstrained.rs file | annotate | diff | comparison | revisions
src/subproblem/nonneg.rs file | annotate | diff | comparison | revisions
src/subproblem/unconstrained.rs file | annotate | diff | comparison | revisions
src/transport.rs file | annotate | diff | comparison | revisions
src/types.rs file | annotate | diff | comparison | revisions
--- a/Cargo.lock	Tue Aug 01 10:32:12 2023 +0300
+++ b/Cargo.lock	Thu Aug 29 00:00:00 2024 -0500
@@ -56,6 +56,12 @@
 ]
 
 [[package]]
+name = "android-tzdata"
+version = "0.1.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e999941b234f3131b00bc13c22d06e8c5ff726d1b6318ac7eb276997bbb4fef0"
+
+[[package]]
 name = "android_system_properties"
 version = "0.1.5"
 source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -74,17 +80,6 @@
 ]
 
 [[package]]
-name = "atty"
-version = "0.2.14"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8"
-dependencies = [
- "hermit-abi 0.1.19",
- "libc",
- "winapi",
-]
-
-[[package]]
 name = "autocfg"
 version = "1.1.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -92,9 +87,9 @@
 
 [[package]]
 name = "bit_field"
-version = "0.10.1"
+version = "0.10.2"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "dcb6dd1c2376d2e096796e234a70e17e94cc2d5d54ff8ce42b28cef1d0d359a4"
+checksum = "dc827186963e592360843fb5ba4b973e145841266c1357f7180c43526f2e5b61"
 
 [[package]]
 name = "bitflags"
@@ -103,6 +98,12 @@
 checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a"
 
 [[package]]
+name = "bitflags"
+version = "2.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b4682ae6287fcf752ecaabbfcc7b6f9b72aa33933dc23a554d853aea8eea8635"
+
+[[package]]
 name = "bstr"
 version = "0.2.17"
 source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -116,27 +117,30 @@
 
 [[package]]
 name = "bumpalo"
-version = "3.11.1"
+version = "3.14.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "572f695136211188308f16ad2ca5c851a712c464060ae6974944458eb83880ba"
+checksum = "7f30e7476521f6f8af1a1c4c0b8cc94f0bee37d91763d0ca2665f299b6cd8aec"
 
 [[package]]
 name = "bytemuck"
-version = "1.12.3"
+version = "1.14.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "aaa3a8d9a1ca92e282c96a32d6511b695d7d994d1d102ba85d279f9b2756947f"
+checksum = "374d28ec25809ee0e23827c2ab573d729e293f281dfe393500e7ad618baa61c6"
 
 [[package]]
 name = "byteorder"
-version = "1.4.3"
+version = "1.5.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "14c189c53d098945499cdfa7ecc63567cf3886b3332b312a5b4585d8d3a6a610"
+checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b"
 
 [[package]]
 name = "cc"
-version = "1.0.77"
+version = "1.0.83"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e9f73505338f7d905b19d18738976aae232eb46b8efc15554ffc56deb5d9ebe4"
+checksum = "f1174fb0b6ec23863f8b971027804a42614e347eafb0a95bf0b12cdae21fc4d0"
+dependencies = [
+ "libc",
+]
 
 [[package]]
 name = "cfg-if"
@@ -146,27 +150,26 @@
 
 [[package]]
 name = "chrono"
-version = "0.4.23"
+version = "0.4.31"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "16b0a3d9ed01224b22057780a37bb8c5dbfe1be8ba48678e7bf57ec4b385411f"
+checksum = "7f2c685bad3eb3d45a01354cedb7d5faa66194d1d58ba6e267a8de788f79db38"
 dependencies = [
+ "android-tzdata",
  "iana-time-zone",
  "js-sys",
- "num-integer",
  "num-traits",
  "serde",
- "time",
  "wasm-bindgen",
- "winapi",
+ "windows-targets",
 ]
 
 [[package]]
 name = "clap"
-version = "4.0.27"
+version = "4.0.32"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "0acbd8d28a0a60d7108d7ae850af6ba34cf2d1257fc646980e5f97ce14275966"
+checksum = "a7db700bc935f9e43e88d00b0850dae18a63773cfbec6d8e070fccf7fef89a39"
 dependencies = [
- "bitflags",
+ "bitflags 1.3.2",
  "clap_derive",
  "clap_lex",
  "is-terminal",
@@ -188,29 +191,19 @@
  "proc-macro-error",
  "proc-macro2",
  "quote",
- "syn",
+ "syn 1.0.109",
 ]
 
 [[package]]
 name = "clap_lex"
-version = "0.3.0"
+version = "0.3.3"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "0d4198f73e42b4936b35b5bb248d81d2b595ecb170da0bac7655c54eedfa8da8"
+checksum = "033f6b7a4acb1f358c742aaca805c939ee73b4c6209ae4318ec7aca81c42e646"
 dependencies = [
  "os_str_bytes",
 ]
 
 [[package]]
-name = "codespan-reporting"
-version = "0.11.1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "3538270d33cc669650c4b093848450d380def10c331d38c768e34cac80576e6e"
-dependencies = [
- "termcolor",
- "unicode-width",
-]
-
-[[package]]
 name = "color_quant"
 version = "1.1.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -227,20 +220,20 @@
 
 [[package]]
 name = "colored"
-version = "2.0.0"
+version = "2.0.4"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "b3616f750b84d8f0de8a58bda93e08e2a81ad3f523089b05f1dffecab48c6cbd"
+checksum = "2674ec482fbc38012cf31e6c42ba0177b431a0cb6f15fe40efa5aab1bda516f6"
 dependencies = [
- "atty",
+ "is-terminal",
  "lazy_static",
- "winapi",
+ "windows-sys",
 ]
 
 [[package]]
 name = "core-foundation-sys"
-version = "0.8.3"
+version = "0.8.4"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "5827cebf4670468b8772dd191856768aedcb1b0278a04f989f7766351917b9dc"
+checksum = "e496a50fda8aacccc86d7529e2c1e0892dbd0f898a6b5645b5561b89c3210efa"
 
 [[package]]
 name = "cpu-time"
@@ -262,20 +255,10 @@
 ]
 
 [[package]]
-name = "crossbeam-channel"
-version = "0.5.6"
+name = "crossbeam-deque"
+version = "0.8.3"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "c2dd04ddaf88237dc3b8d8f9a3c1004b506b54b3313403944054d23c0870c521"
-dependencies = [
- "cfg-if",
- "crossbeam-utils",
-]
-
-[[package]]
-name = "crossbeam-deque"
-version = "0.8.2"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "715e8152b692bba2d374b53d4875445368fdf21a94751410af607a5ac677d1fc"
+checksum = "ce6fd6f855243022dcecf8702fef0c297d4338e226845fe067f6341ad9fa0cef"
 dependencies = [
  "cfg-if",
  "crossbeam-epoch",
@@ -284,9 +267,9 @@
 
 [[package]]
 name = "crossbeam-epoch"
-version = "0.9.13"
+version = "0.9.15"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "01a9af1f4c2ef74bb8aa1f7e19706bc72d03598c8a570bb5de72243c7a9d9d5a"
+checksum = "ae211234986c545741a7dc064309f67ee1e5ad243d0e48335adc0484d960bcc7"
 dependencies = [
  "autocfg",
  "cfg-if",
@@ -297,9 +280,9 @@
 
 [[package]]
 name = "crossbeam-utils"
-version = "0.8.14"
+version = "0.8.16"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "4fb766fa798726286dbbb842f174001dab8abc7b627a1dd86e0b7222a95d929f"
+checksum = "5a22b2d63d4d1dc0b7f1b6b2747dd0088008a9be28b6ddf0b1e7d335e3037294"
 dependencies = [
  "cfg-if",
 ]
@@ -325,104 +308,59 @@
 
 [[package]]
 name = "csv-core"
-version = "0.1.10"
+version = "0.1.11"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "2b2466559f260f48ad25fe6317b3c8dac77b5bdb5763ac7d9d6103530663bc90"
+checksum = "5efa2b3d7902f4b634a20cae3c9c4e6209dc4779feb6863329607560143efa70"
 dependencies = [
  "memchr",
 ]
 
 [[package]]
-name = "cxx"
-version = "1.0.82"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d4a41a86530d0fe7f5d9ea779916b7cadd2d4f9add748b99c2c029cbbdfaf453"
-dependencies = [
- "cc",
- "cxxbridge-flags",
- "cxxbridge-macro",
- "link-cplusplus",
-]
-
-[[package]]
-name = "cxx-build"
-version = "1.0.82"
+name = "either"
+version = "1.9.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "06416d667ff3e3ad2df1cd8cd8afae5da26cf9cec4d0825040f88b5ca659a2f0"
-dependencies = [
- "cc",
- "codespan-reporting",
- "once_cell",
- "proc-macro2",
- "quote",
- "scratch",
- "syn",
-]
-
-[[package]]
-name = "cxxbridge-flags"
-version = "1.0.82"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "820a9a2af1669deeef27cb271f476ffd196a2c4b6731336011e0ba63e2c7cf71"
-
-[[package]]
-name = "cxxbridge-macro"
-version = "1.0.82"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a08a6e2fcc370a089ad3b4aaf54db3b1b4cee38ddabce5896b33eb693275f470"
-dependencies = [
- "proc-macro2",
- "quote",
- "syn",
-]
-
-[[package]]
-name = "either"
-version = "1.8.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "90e5c1c8368803113bf0c9584fc495a58b86dc8a29edbf8fe877d21d9507e797"
+checksum = "a26ae43d7bcc3b814de94796a5e736d4029efb0ee900c12e2d54c993ad1a1e07"
 
 [[package]]
 name = "errno"
-version = "0.2.8"
+version = "0.3.5"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f639046355ee4f37944e44f60642c6f3a7efa3cf6b78c78a0d989a8ce6c396a1"
+checksum = "ac3e13f66a2f95e32a39eaa81f6b95d42878ca0e1db0c7543723dfe12557e860"
 dependencies = [
- "errno-dragonfly",
  "libc",
- "winapi",
-]
-
-[[package]]
-name = "errno-dragonfly"
-version = "0.1.2"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "aa68f1b12764fab894d2755d2518754e71b4fd80ecfb822714a1206c2aab39bf"
-dependencies = [
- "cc",
- "libc",
+ "windows-sys",
 ]
 
 [[package]]
 name = "exr"
-version = "1.5.2"
+version = "1.71.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "8eb5f255b5980bb0c8cf676b675d1a99be40f316881444f44e0462eaf5df5ded"
+checksum = "832a761f35ab3e6664babfbdc6cef35a4860e816ec3916dcfd0882954e98a8a8"
 dependencies = [
  "bit_field",
  "flume",
  "half",
  "lebe",
  "miniz_oxide",
+ "rayon-core",
  "smallvec",
- "threadpool",
+ "zune-inflate",
+]
+
+[[package]]
+name = "fdeflate"
+version = "0.3.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d329bdeac514ee06249dabc27877490f17f5d371ec693360768b838e19f3ae10"
+dependencies = [
+ "simd-adler32",
 ]
 
 [[package]]
 name = "flate2"
-version = "1.0.25"
+version = "1.0.28"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a8a2db397cb1c8772f31494cb8917e48cd1e64f0fa7efac59fbd741a0a8ce841"
+checksum = "46303f565772937ffe1d394a4fac6f411c6013172fadde9dcdb1e147a086940e"
 dependencies = [
  "crc32fast",
  "miniz_oxide",
@@ -439,47 +377,29 @@
 
 [[package]]
 name = "flume"
-version = "0.10.14"
+version = "0.11.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "1657b4441c3403d9f7b3409e47575237dac27b1b5726df654a6ecbf92f0f7577"
+checksum = "55ac459de2512911e4b674ce33cf20befaba382d05b62b008afc1c8b57cbf181"
 dependencies = [
- "futures-core",
- "futures-sink",
- "nanorand",
- "pin-project",
  "spin",
 ]
 
 [[package]]
-name = "futures-core"
-version = "0.3.25"
+name = "getrandom"
+version = "0.2.10"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "04909a7a7e4633ae6c4a9ab280aeb86da1236243a77b694a49eacd659a4bd3ac"
-
-[[package]]
-name = "futures-sink"
-version = "0.3.25"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "39c15cf1a4aa79df40f1bb462fb39676d0ad9e366c2a33b590d7c66f4f81fcf9"
-
-[[package]]
-name = "getrandom"
-version = "0.2.8"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "c05aeb6a22b8f62540c194aac980f2115af067bfe15a0734d7277a768d396b31"
+checksum = "be4136b2a15dd319360be1c07d9933517ccf0be8f16bf62a3bee4f0d618df427"
 dependencies = [
  "cfg-if",
- "js-sys",
  "libc",
- "wasi 0.11.0+wasi-snapshot-preview1",
- "wasm-bindgen",
+ "wasi",
 ]
 
 [[package]]
 name = "gif"
-version = "0.11.4"
+version = "0.12.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "3edd93c6756b4dfaf2709eafcc345ba2636565295c198a9cfbf75fa5e3e00b06"
+checksum = "80792593675e051cf94a4b111980da2ba60d4a83e43e0048c5693baab3977045"
 dependencies = [
  "color_quant",
  "weezl",
@@ -487,66 +407,53 @@
 
 [[package]]
 name = "half"
-version = "2.1.0"
+version = "2.2.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "ad6a9459c9c30b177b925162351f97e7d967c7ea8bab3b8352805327daf45554"
+checksum = "02b4af3693f1b705df946e9fe5631932443781d0aabb423b62fcd4d73f6d2fd0"
 dependencies = [
  "crunchy",
 ]
 
 [[package]]
 name = "heck"
-version = "0.4.0"
+version = "0.4.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "2540771e65fc8cb83cd6e8a237f70c319bd5c29f78ed1084ba5d50eeac86f7f9"
+checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8"
 
 [[package]]
 name = "hermit-abi"
-version = "0.1.19"
+version = "0.3.3"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "62b467343b94ba476dcb2500d242dadbb39557df889310ac77c5d99100aaac33"
-dependencies = [
- "libc",
-]
-
-[[package]]
-name = "hermit-abi"
-version = "0.2.6"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "ee512640fe35acbfb4bb779db6f0d80704c2cacfa2e39b601ef3e3f47d1ae4c7"
-dependencies = [
- "libc",
-]
+checksum = "d77f7ec81a6d05a3abb01ab6eb7590f6083d08449fe5a1c8b1e620283546ccb7"
 
 [[package]]
 name = "iana-time-zone"
-version = "0.1.53"
+version = "0.1.57"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "64c122667b287044802d6ce17ee2ddf13207ed924c712de9a66a5814d5b64765"
+checksum = "2fad5b825842d2b38bd206f3e81d6957625fd7f0a361e345c30e01a0ae2dd613"
 dependencies = [
  "android_system_properties",
  "core-foundation-sys",
  "iana-time-zone-haiku",
  "js-sys",
  "wasm-bindgen",
- "winapi",
+ "windows",
 ]
 
 [[package]]
 name = "iana-time-zone-haiku"
-version = "0.1.1"
+version = "0.1.2"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "0703ae284fc167426161c2e3f1da3ea71d94b21bedbcc9494e92b28e334e3dca"
+checksum = "f31827a206f56af32e590ba56d5d2d085f558508192593743f16b2306495269f"
 dependencies = [
- "cxx",
- "cxx-build",
+ "cc",
 ]
 
 [[package]]
 name = "image"
-version = "0.24.5"
+version = "0.24.7"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "69b7ea949b537b0fd0af141fff8c77690f2ce96f4f41f042ccb6c69c6c965945"
+checksum = "6f3dfdbdd72063086ff443e297b61695500514b1e41095b6fb9a5ab48a70a711"
 dependencies = [
  "bytemuck",
  "byteorder",
@@ -557,35 +464,29 @@
  "num-rational",
  "num-traits",
  "png",
- "scoped_threadpool",
+ "qoi",
  "tiff",
 ]
 
 [[package]]
 name = "io-lifetimes"
-version = "0.7.5"
+version = "1.0.11"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "59ce5ef949d49ee85593fc4d3f3f95ad61657076395cbbce23e2121fc5542074"
-
-[[package]]
-name = "io-lifetimes"
-version = "1.0.2"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e394faa0efb47f9f227f1cd89978f854542b318a6f64fa695489c9c993056656"
+checksum = "eae7b9aee968036d54dce06cebaefd919e4472e753296daccd6d344e3e2df0c2"
 dependencies = [
+ "hermit-abi",
  "libc",
  "windows-sys",
 ]
 
 [[package]]
 name = "is-terminal"
-version = "0.4.0"
+version = "0.4.9"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "aae5bc6e2eb41c9def29a3e0f1306382807764b9b53112030eff57435667352d"
+checksum = "cb0889898416213fab133e1d33a0e5858a48177452750691bde3666d0fdbaf8b"
 dependencies = [
- "hermit-abi 0.2.6",
- "io-lifetimes 1.0.2",
- "rustix 0.36.3",
+ "hermit-abi",
+ "rustix 0.38.19",
  "windows-sys",
 ]
 
@@ -606,9 +507,9 @@
 
 [[package]]
 name = "itoa"
-version = "1.0.4"
+version = "1.0.9"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "4217ad341ebadf8d8e724e264f13e593e0648f5b3e94b3896a5df283be015ecc"
+checksum = "af150ab688ff2122fcef229be89cb50dd66af9e01a4ff320cc137eecc9bacc38"
 
 [[package]]
 name = "jpeg-decoder"
@@ -621,9 +522,9 @@
 
 [[package]]
 name = "js-sys"
-version = "0.3.60"
+version = "0.3.64"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "49409df3e3bf0856b916e2ceaca09ee28e6871cf7d9ce97a692cacfdb2a25a47"
+checksum = "c5f195fe497f702db0f318b07fdd68edb16955aed830df8363d837542f8f935a"
 dependencies = [
  "wasm-bindgen",
 ]
@@ -642,42 +543,33 @@
 
 [[package]]
 name = "libc"
-version = "0.2.137"
+version = "0.2.149"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "fc7fcc620a3bff7cdd7a365be3376c97191aeaccc2a603e600951e452615bf89"
+checksum = "a08173bc88b7955d1b3145aa561539096c421ac8debde8cbc3612ec635fee29b"
 
 [[package]]
 name = "libm"
-version = "0.2.6"
+version = "0.2.8"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "348108ab3fba42ec82ff6e9564fc4ca0247bdccdc68dd8af9764bbc79c3c8ffb"
-
-[[package]]
-name = "link-cplusplus"
-version = "1.0.7"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "9272ab7b96c9046fbc5bc56c06c117cb639fe2d509df0c421cad82d2915cf369"
-dependencies = [
- "cc",
-]
+checksum = "4ec2a862134d2a7d32d7983ddcdd1c4923530833c9f2ea1a44fc5fa473989058"
 
 [[package]]
 name = "linux-raw-sys"
-version = "0.0.46"
+version = "0.3.8"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d4d2456c373231a208ad294c33dc5bff30051eafd954cd4caae83a712b12854d"
+checksum = "ef53942eb7bf7ff43a617b3e2c1c4a5ecf5944a7c1bc12d7ee39bbb15e5c1519"
 
 [[package]]
 name = "linux-raw-sys"
-version = "0.1.3"
+version = "0.4.10"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "8f9f08d8963a6c613f4b1a78f4f4a4dbfadf8e6545b2d72861731e4858b8b47f"
+checksum = "da2479e8c062e40bf0066ffa0bc823de0a9368974af99c9f6df941d2c231e03f"
 
 [[package]]
 name = "lock_api"
-version = "0.4.9"
+version = "0.4.10"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "435011366fe56583b16cf956f9df0095b405b82d76425bc8981c0e22e60ec4df"
+checksum = "c1cc9717a20b1bb222f333e6a92fd32f7d8a18ddc5a3191a11af45dcbf4dcd16"
 dependencies = [
  "autocfg",
  "scopeguard",
@@ -685,44 +577,43 @@
 
 [[package]]
 name = "log"
-version = "0.4.17"
+version = "0.4.20"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "abb12e687cfb44aa40f41fc3978ef76448f9b6038cad6aef4259d3c095a2382e"
-dependencies = [
- "cfg-if",
-]
+checksum = "b5e6163cb8c49088c2c36f57875e58ccd8c87c7427f7fbd50ea6710b2f3f2e8f"
 
 [[package]]
 name = "matrixmultiply"
-version = "0.3.2"
+version = "0.3.8"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "add85d4dd35074e6fedc608f8c8f513a3548619a9024b751949ef0e8e45a4d84"
+checksum = "7574c1cf36da4798ab73da5b215bbf444f50718207754cb522201d78d1cd0ff2"
 dependencies = [
+ "autocfg",
  "rawpointer",
 ]
 
 [[package]]
 name = "memchr"
-version = "2.5.0"
+version = "2.6.4"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "2dffe52ecf27772e601905b7522cb4ef790d2cc203488bbd0e2fe85fcb74566d"
+checksum = "f665ee40bc4a3c5590afb1e9677db74a508659dfd71e126420da8274909a0167"
 
 [[package]]
 name = "memoffset"
-version = "0.7.1"
+version = "0.9.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "5de893c32cde5f383baa4c04c5d6dbdd735cfd4a794b0debdb2bb1b421da5ff4"
+checksum = "5a634b1c61a95585bd15607c6ab0c4e5b226e695ff2800ba0cdccddf208c406c"
 dependencies = [
  "autocfg",
 ]
 
 [[package]]
 name = "miniz_oxide"
-version = "0.6.2"
+version = "0.7.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "b275950c28b37e794e8c55d88aeb5e139d0ce23fdbbeda68f8d7174abdf9e8fa"
+checksum = "e7810e0be55b428ada41041c41f32c9f1a42817901b4ccf45fa3d4b6561e74c7"
 dependencies = [
  "adler",
+ "simd-adler32",
 ]
 
 [[package]]
@@ -750,23 +641,14 @@
 dependencies = [
  "proc-macro2",
  "quote",
- "syn",
-]
-
-[[package]]
-name = "nanorand"
-version = "0.7.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "6a51313c5820b0b02bd422f4b44776fbf47961755c74ce64afc73bfad10226c3"
-dependencies = [
- "getrandom",
+ "syn 1.0.109",
 ]
 
 [[package]]
 name = "num"
-version = "0.4.0"
+version = "0.4.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "43db66d1170d347f9a065114077f7dccb00c1b9478c89384490a3425279a4606"
+checksum = "b05180d69e3da0e530ba2a1dae5110317e49e3b7f3d41be227dc5f92e49ee7af"
 dependencies = [
  "num-bigint",
  "num-complex",
@@ -778,9 +660,9 @@
 
 [[package]]
 name = "num-bigint"
-version = "0.4.3"
+version = "0.4.4"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f93ab6289c7b344a8a9f60f88d80aa20032336fe78da341afc91c8a2341fc75f"
+checksum = "608e7659b5c3d7cba262d894801b9ec9d00de989e8a82bd4bef91d08da45cdc0"
 dependencies = [
  "autocfg",
  "num-integer",
@@ -789,9 +671,9 @@
 
 [[package]]
 name = "num-complex"
-version = "0.4.2"
+version = "0.4.4"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "7ae39348c8bc5fbd7f40c727a9925f03517afd2ab27d46702108b6a7e5414c19"
+checksum = "1ba157ca0885411de85d6ca030ba7e2a83a28636056c7c699b07c8b6f7383214"
 dependencies = [
  "num-traits",
 ]
@@ -831,86 +713,57 @@
 
 [[package]]
 name = "num-traits"
-version = "0.2.15"
+version = "0.2.17"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "578ede34cf02f8924ab9447f50c28075b4d3e5b269972345e7e0372b38c6cdcd"
+checksum = "39e3200413f237f41ab11ad6d161bc7239c84dcb631773ccd7de3dfe4b5c267c"
 dependencies = [
  "autocfg",
  "libm",
 ]
 
 [[package]]
-name = "num_cpus"
-version = "1.14.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f6058e64324c71e02bc2b150e4f3bc8286db6c83092132ffa3f6b1eab0f9def5"
-dependencies = [
- "hermit-abi 0.1.19",
- "libc",
-]
-
-[[package]]
 name = "numeric_literals"
 version = "0.2.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "095aa67b0b9f2081746998f4f17106bdb51d56dc8c211afca5531b92b83bf98a"
 dependencies = [
  "quote",
- "syn",
+ "syn 1.0.109",
 ]
 
 [[package]]
 name = "once_cell"
-version = "1.16.0"
+version = "1.18.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "86f0b0d4bf799edbc74508c1e8bf170ff5f41238e5f8225603ca7caaae2b7860"
+checksum = "dd8b5dd2ae5ed71462c540258bedcb51965123ad7e7ccf4b9a8cafaa4a63576d"
 
 [[package]]
 name = "os_str_bytes"
-version = "6.4.1"
+version = "6.5.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "9b7820b9daea5457c9f21c69448905d723fbd21136ccf521748f23fd49e723ee"
+checksum = "4d5d9eb14b174ee9aa2ef96dc2b94637a2d4b6e7cb873c7e171f0c20c6cf3eac"
 
 [[package]]
 name = "paste"
-version = "1.0.9"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "b1de2e551fb905ac83f73f7aedf2f0cb4a0da7e35efa24a202a936269f1f18e1"
-
-[[package]]
-name = "pin-project"
-version = "1.0.12"
+version = "1.0.14"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "ad29a609b6bcd67fee905812e544992d216af9d755757c05ed2d0e15a74c6ecc"
-dependencies = [
- "pin-project-internal",
-]
-
-[[package]]
-name = "pin-project-internal"
-version = "1.0.12"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "069bdb1e05adc7a8990dce9cc75370895fbe4e3d58b9b73bf1aee56359344a55"
-dependencies = [
- "proc-macro2",
- "quote",
- "syn",
-]
+checksum = "de3145af08024dea9fa9914f381a17b8fc6034dfb00f3a84013f7ff43f29ed4c"
 
 [[package]]
 name = "pkg-config"
-version = "0.3.26"
+version = "0.3.27"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "6ac9a59f73473f1b8d852421e59e64809f025994837ef743615c6d0c5b305160"
+checksum = "26072860ba924cbfa98ea39c8c19b4dd6a4a25423dbdf219c1eca91aa0cf6964"
 
 [[package]]
 name = "png"
-version = "0.17.7"
+version = "0.17.10"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "5d708eaf860a19b19ce538740d2b4bdeeb8337fa53f7738455e706623ad5c638"
+checksum = "dd75bf2d8dd3702b9707cdbc56a5b9ef42cec752eb8b3bafc01234558442aa64"
 dependencies = [
- "bitflags",
+ "bitflags 1.3.2",
  "crc32fast",
+ "fdeflate",
  "flate2",
  "miniz_oxide",
 ]
@@ -965,7 +818,7 @@
  "proc-macro-error-attr",
  "proc-macro2",
  "quote",
- "syn",
+ "syn 1.0.109",
  "version_check",
 ]
 
@@ -982,18 +835,27 @@
 
 [[package]]
 name = "proc-macro2"
-version = "1.0.47"
+version = "1.0.69"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "5ea3d908b0e36316caf9e9e2c4625cdde190a7e6f440d794667ed17a1855e725"
+checksum = "134c189feb4956b20f6f547d2cf727d4c0fe06722b20a0eec87ed445a97f92da"
 dependencies = [
  "unicode-ident",
 ]
 
 [[package]]
+name = "qoi"
+version = "0.4.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "7f6d64c71eb498fe9eae14ce4ec935c555749aef511cca85b5568910d6e48001"
+dependencies = [
+ "bytemuck",
+]
+
+[[package]]
 name = "quote"
-version = "1.0.21"
+version = "1.0.33"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "bbe448f377a7d6961e30f5955f9b8d106c3f5e449d493ee1b125c1d43c2b5179"
+checksum = "5267fca4496028628a95160fc423a33e8b2e6af8a5302579e322e4b520293cae"
 dependencies = [
  "proc-macro2",
 ]
@@ -1046,32 +908,29 @@
 
 [[package]]
 name = "rayon"
-version = "1.6.0"
+version = "1.8.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "1e060280438193c554f654141c9ea9417886713b7acd75974c85b18a69a88e0b"
+checksum = "9c27db03db7734835b3f53954b534c91069375ce6ccaa2e065441e07d9b6cdb1"
 dependencies = [
- "crossbeam-deque",
  "either",
  "rayon-core",
 ]
 
 [[package]]
 name = "rayon-core"
-version = "1.10.1"
+version = "1.12.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "cac410af5d00ab6884528b4ab69d1e8e146e8d471201800fa1b4524126de6ad3"
+checksum = "5ce3fb6ad83f861aac485e76e1985cd109d9a3713802152be56c3b1f0e0658ed"
 dependencies = [
- "crossbeam-channel",
  "crossbeam-deque",
  "crossbeam-utils",
- "num_cpus",
 ]
 
 [[package]]
 name = "regex"
-version = "1.7.0"
+version = "1.7.3"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e076559ef8e241f2ae3479e36f97bd5741c0330689e217ad51ce2c76808b868a"
+checksum = "8b1f693b24f6ac912f4893ef08244d70b6067480d2f1a46e950c9691e6749d1d"
 dependencies = [
  "aho-corasick",
  "memchr",
@@ -1086,107 +945,94 @@
 
 [[package]]
 name = "regex-syntax"
-version = "0.6.28"
+version = "0.6.29"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "456c603be3e8d448b072f410900c09faf164fbce2d480456f50eea6e25f9c848"
+checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1"
 
 [[package]]
 name = "rgb"
-version = "0.8.34"
+version = "0.8.36"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "3603b7d71ca82644f79b5a06d1220e9a58ede60bd32255f698cb1af8838b8db3"
+checksum = "20ec2d3e3fc7a92ced357df9cebd5a10b6fb2aa1ee797bf7e9ce2f17dffc8f59"
 dependencies = [
  "bytemuck",
 ]
 
 [[package]]
 name = "rustix"
-version = "0.35.13"
+version = "0.37.25"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "727a1a6d65f786ec22df8a81ca3121107f235970dc1705ed681d3e6e8b9cd5f9"
+checksum = "d4eb579851244c2c03e7c24f501c3432bed80b8f720af1d6e5b0e0f01555a035"
 dependencies = [
- "bitflags",
+ "bitflags 1.3.2",
  "errno",
- "io-lifetimes 0.7.5",
+ "io-lifetimes",
  "libc",
- "linux-raw-sys 0.0.46",
+ "linux-raw-sys 0.3.8",
  "windows-sys",
 ]
 
 [[package]]
 name = "rustix"
-version = "0.36.3"
+version = "0.38.19"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "0b1fbb4dfc4eb1d390c02df47760bb19a84bb80b301ecc947ab5406394d8223e"
+checksum = "745ecfa778e66b2b63c88a61cb36e0eea109e803b0b86bf9879fbc77c70e86ed"
 dependencies = [
- "bitflags",
+ "bitflags 2.4.0",
  "errno",
- "io-lifetimes 1.0.2",
  "libc",
- "linux-raw-sys 0.1.3",
+ "linux-raw-sys 0.4.10",
  "windows-sys",
 ]
 
 [[package]]
 name = "ryu"
-version = "1.0.11"
+version = "1.0.15"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "4501abdff3ae82a1c1b477a17252eb69cee9e66eb915c1abaa4f44d873df9f09"
+checksum = "1ad4cc8da4ef723ed60bced201181d83791ad433213d8c24efffda1eec85d741"
 
 [[package]]
 name = "safe_arch"
-version = "0.6.0"
+version = "0.7.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "794821e4ccb0d9f979512f9c1973480123f9bd62a90d74ab0f9426fcf8f4a529"
+checksum = "f398075ce1e6a179b46f51bd88d0598b92b00d3551f1a2d4ac49e771b56ac354"
 dependencies = [
  "bytemuck",
 ]
 
 [[package]]
-name = "scoped_threadpool"
-version = "0.1.9"
+name = "scopeguard"
+version = "1.2.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "1d51f5df5af43ab3f1360b429fa5e0152ac5ce8c0bd6485cae490332e96846a8"
-
-[[package]]
-name = "scopeguard"
-version = "1.1.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd"
-
-[[package]]
-name = "scratch"
-version = "1.0.2"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "9c8132065adcfd6e02db789d9285a0deb2f3fcb04002865ab67d5fb103533898"
+checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49"
 
 [[package]]
 name = "serde"
-version = "1.0.148"
+version = "1.0.189"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e53f64bb4ba0191d6d0676e1b141ca55047d83b74f5607e6d8eb88126c52c2dc"
+checksum = "8e422a44e74ad4001bdc8eede9a4570ab52f71190e9c076d14369f38b9200537"
 dependencies = [
  "serde_derive",
 ]
 
 [[package]]
 name = "serde_derive"
-version = "1.0.148"
+version = "1.0.189"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a55492425aa53521babf6137309e7d34c20bbfbbfcfe2c7f3a047fd1f6b92c0c"
+checksum = "1e48d1f918009ce3145511378cf68d613e3b3d9137d67272562080d68a2b32d5"
 dependencies = [
  "proc-macro2",
  "quote",
- "syn",
+ "syn 2.0.38",
 ]
 
 [[package]]
 name = "serde_json"
-version = "1.0.89"
+version = "1.0.107"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "020ff22c755c2ed3f8cf162dbb41a7268d934702f3ed3631656ea597e08fc3db"
+checksum = "6b420ce6e3d8bd882e9b243c6eed35dbc9a6110c9769e74b584e0d68d1f20c65"
 dependencies = [
- "itoa 1.0.4",
+ "itoa 1.0.9",
  "ryu",
  "serde",
 ]
@@ -1205,16 +1051,22 @@
 ]
 
 [[package]]
+name = "simd-adler32"
+version = "0.3.7"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d66dc143e6b11c1eddc06d5c423cfc97062865baf299914ab64caa38182078fe"
+
+[[package]]
 name = "smallvec"
-version = "1.10.0"
+version = "1.11.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a507befe795404456341dfab10cef66ead4c041f62b8b11bbb92bffe5d0953e0"
+checksum = "942b4a808e05215192e39f4ab80813e599068285906cc91aa64f923db842bd5a"
 
 [[package]]
 name = "spin"
-version = "0.9.4"
+version = "0.9.8"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "7f6002a767bff9e83f8eeecf883ecb8011875a21ae8da43bffb817a57e78cc09"
+checksum = "6980e8d7511241f8acf4aebddbb1ff938df5eebe98691418c4468d0b72a96a67"
 dependencies = [
  "lock_api",
 ]
@@ -1227,9 +1079,20 @@
 
 [[package]]
 name = "syn"
-version = "1.0.104"
+version = "1.0.109"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "4ae548ec36cf198c0ef7710d3c230987c2d6d7bd98ad6edc0274462724c585ce"
+checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "unicode-ident",
+]
+
+[[package]]
+name = "syn"
+version = "2.0.38"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e96b79aaa137db8f61e26363a0c9b47d8b4ec75da28b7d1d614c2303e232408b"
 dependencies = [
  "proc-macro2",
  "quote",
@@ -1238,43 +1101,34 @@
 
 [[package]]
 name = "tagger"
-version = "4.3.4"
+version = "4.3.5"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "6aaa6f5d645d1dae4cd0286e9f8bf15b75a31656348e5e106eb1a940abd34b63"
+checksum = "094c9f64d6de9a8506b1e49b63a29333b37ed9e821ee04be694d431b3264c3c5"
 
 [[package]]
 name = "termcolor"
-version = "1.1.3"
+version = "1.3.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "bab24d30b911b2376f3a13cc2cd443142f0c81dda04c118693e35b3835757755"
+checksum = "6093bad37da69aab9d123a8091e4be0aa4a03e4d601ec641c327398315f62b64"
 dependencies = [
  "winapi-util",
 ]
 
 [[package]]
 name = "terminal_size"
-version = "0.2.2"
+version = "0.2.6"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "40ca90c434fd12083d1a6bdcbe9f92a14f96c8a1ba600ba451734ac334521f7a"
+checksum = "8e6bf6f19e9f8ed8d4048dc22981458ebcf406d67e94cd422e5ecd73d63b3237"
 dependencies = [
- "rustix 0.35.13",
+ "rustix 0.37.25",
  "windows-sys",
 ]
 
 [[package]]
-name = "threadpool"
-version = "1.8.1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d050e60b33d41c19108b32cea32164033a9013fe3b46cbd4457559bfbf77afaa"
-dependencies = [
- "num_cpus",
-]
-
-[[package]]
 name = "tiff"
-version = "0.8.0"
+version = "0.9.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f17def29300a156c19ae30814710d9c63cd50288a49c6fd3a10ccfbe4cf886fd"
+checksum = "6d172b0f4d3fba17ba89811858b9d3d97f928aece846475bbda076ca46736211"
 dependencies = [
  "flate2",
  "jpeg-decoder",
@@ -1282,17 +1136,6 @@
 ]
 
 [[package]]
-name = "time"
-version = "0.1.45"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "1b797afad3f312d1c66a56d11d0316f916356d11bd158fbc6ca6389ff6bf805a"
-dependencies = [
- "libc",
- "wasi 0.10.0+wasi-snapshot-preview1",
- "winapi",
-]
-
-[[package]]
 name = "trait-set"
 version = "0.2.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -1300,35 +1143,35 @@
 dependencies = [
  "proc-macro2",
  "quote",
- "syn",
+ "syn 1.0.109",
 ]
 
 [[package]]
 name = "typenum"
-version = "1.15.0"
+version = "1.17.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "dcf81ac59edc17cc8697ff311e8f5ef2d99fcbd9817b34cec66f90b6c3dfd987"
+checksum = "42ff0bf0c66b8238c6f3b578df37d0b7848e55df8577b3f74f92a69acceeb825"
 
 [[package]]
 name = "unicase"
-version = "2.6.0"
+version = "2.7.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "50f37be617794602aabbeee0be4f259dc1778fabe05e2d67ee8f79326d5cb4f6"
+checksum = "f7d2d4dafb69621809a81864c9c1b864479e1235c0dd4e199924b9742439ed89"
 dependencies = [
  "version_check",
 ]
 
 [[package]]
 name = "unicode-ident"
-version = "1.0.5"
+version = "1.0.12"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "6ceab39d59e4c9499d4e5a8ee0e2735b891bb7308ac83dfb4e80cad195c9f6f3"
+checksum = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b"
 
 [[package]]
 name = "unicode-width"
-version = "0.1.10"
+version = "0.1.11"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "c0edd1e5b14653f783770bce4a4dabb4a5108a5370a5f5d8cfe8710c361f6c8b"
+checksum = "e51733f11c9c4f72aa0c160008246859e340b00807569a0da0e7a1079b27ba85"
 
 [[package]]
 name = "version_check"
@@ -1338,21 +1181,15 @@
 
 [[package]]
 name = "wasi"
-version = "0.10.0+wasi-snapshot-preview1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "1a143597ca7c7793eff794def352d41792a93c481eb1042423ff7ff72ba2c31f"
-
-[[package]]
-name = "wasi"
 version = "0.11.0+wasi-snapshot-preview1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423"
 
 [[package]]
 name = "wasm-bindgen"
-version = "0.2.83"
+version = "0.2.87"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "eaf9f5aceeec8be17c128b2e93e031fb8a4d469bb9c4ae2d7dc1888b26887268"
+checksum = "7706a72ab36d8cb1f80ffbf0e071533974a60d0a308d01a5d0375bf60499a342"
 dependencies = [
  "cfg-if",
  "wasm-bindgen-macro",
@@ -1360,24 +1197,24 @@
 
 [[package]]
 name = "wasm-bindgen-backend"
-version = "0.2.83"
+version = "0.2.87"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "4c8ffb332579b0557b52d268b91feab8df3615f265d5270fec2a8c95b17c1142"
+checksum = "5ef2b6d3c510e9625e5fe6f509ab07d66a760f0885d858736483c32ed7809abd"
 dependencies = [
  "bumpalo",
  "log",
  "once_cell",
  "proc-macro2",
  "quote",
- "syn",
+ "syn 2.0.38",
  "wasm-bindgen-shared",
 ]
 
 [[package]]
 name = "wasm-bindgen-macro"
-version = "0.2.83"
+version = "0.2.87"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "052be0f94026e6cbc75cdefc9bae13fd6052cdcaf532fa6c45e7ae33a1e6c810"
+checksum = "dee495e55982a3bd48105a7b947fd2a9b4a8ae3010041b9e0faab3f9cd028f1d"
 dependencies = [
  "quote",
  "wasm-bindgen-macro-support",
@@ -1385,22 +1222,22 @@
 
 [[package]]
 name = "wasm-bindgen-macro-support"
-version = "0.2.83"
+version = "0.2.87"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "07bc0c051dc5f23e307b13285f9d75df86bfdf816c5721e573dec1f9b8aa193c"
+checksum = "54681b18a46765f095758388f2d0cf16eb8d4169b639ab575a8f5693af210c7b"
 dependencies = [
  "proc-macro2",
  "quote",
- "syn",
+ "syn 2.0.38",
  "wasm-bindgen-backend",
  "wasm-bindgen-shared",
 ]
 
 [[package]]
 name = "wasm-bindgen-shared"
-version = "0.2.83"
+version = "0.2.87"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "1c38c045535d93ec4f0b4defec448e4291638ee608530863b1e2ba115d4fff7f"
+checksum = "ca6ad05a4870b2bf5fe995117d3728437bd27d7cd5f06f13c17443ef369775a1"
 
 [[package]]
 name = "weezl"
@@ -1410,9 +1247,9 @@
 
 [[package]]
 name = "wide"
-version = "0.7.5"
+version = "0.7.12"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "ae41ecad2489a1655c8ef8489444b0b113c0a0c795944a3572a0931cf7d2525c"
+checksum = "ebecebefc38ff1860b4bc47550bbfa63af5746061cf0d29fcd7fa63171602598"
 dependencies = [
  "bytemuck",
  "safe_arch",
@@ -1436,9 +1273,9 @@
 
 [[package]]
 name = "winapi-util"
-version = "0.1.5"
+version = "0.1.6"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "70ec6ce85bb158151cae5e5c87f95a8e97d2c0c4b001223f33a334e3ce5de178"
+checksum = "f29e6f9198ba0d26b4c9f07dbe6f9ed633e1f3d5b8b414090084349e46a52596"
 dependencies = [
  "winapi",
 ]
@@ -1450,10 +1287,28 @@
 checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f"
 
 [[package]]
+name = "windows"
+version = "0.48.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e686886bc078bc1b0b600cac0147aadb815089b6e4da64016cbd754b6342700f"
+dependencies = [
+ "windows-targets",
+]
+
+[[package]]
 name = "windows-sys"
-version = "0.42.0"
+version = "0.48.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "5a3e1820f08b8513f676f7ab6c1f99ff312fb97b553d30ff4dd86f9f15728aa7"
+checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9"
+dependencies = [
+ "windows-targets",
+]
+
+[[package]]
+name = "windows-targets"
+version = "0.48.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9a2fa6e2155d7247be68c096456083145c183cbbbc2764150dda45a87197940c"
 dependencies = [
  "windows_aarch64_gnullvm",
  "windows_aarch64_msvc",
@@ -1466,42 +1321,51 @@
 
 [[package]]
 name = "windows_aarch64_gnullvm"
-version = "0.42.0"
+version = "0.48.5"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "41d2aa71f6f0cbe00ae5167d90ef3cfe66527d6f613ca78ac8024c3ccab9a19e"
+checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8"
 
 [[package]]
 name = "windows_aarch64_msvc"
-version = "0.42.0"
+version = "0.48.5"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "dd0f252f5a35cac83d6311b2e795981f5ee6e67eb1f9a7f64eb4500fbc4dcdb4"
+checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc"
 
 [[package]]
 name = "windows_i686_gnu"
-version = "0.42.0"
+version = "0.48.5"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "fbeae19f6716841636c28d695375df17562ca208b2b7d0dc47635a50ae6c5de7"
+checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e"
 
 [[package]]
 name = "windows_i686_msvc"
-version = "0.42.0"
+version = "0.48.5"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "84c12f65daa39dd2babe6e442988fc329d6243fdce47d7d2d155b8d874862246"
+checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406"
 
 [[package]]
 name = "windows_x86_64_gnu"
-version = "0.42.0"
+version = "0.48.5"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "bf7b1b21b5362cbc318f686150e5bcea75ecedc74dd157d874d754a2ca44b0ed"
+checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e"
 
 [[package]]
 name = "windows_x86_64_gnullvm"
-version = "0.42.0"
+version = "0.48.5"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "09d525d2ba30eeb3297665bd434a54297e4170c7f1a44cad4ef58095b4cd2028"
+checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc"
 
 [[package]]
 name = "windows_x86_64_msvc"
-version = "0.42.0"
+version = "0.48.5"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f40009d85759725a34da6d89a94e63d7bdc50a862acf0dbc7c8e488f1edcb6f5"
+checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538"
+
+[[package]]
+name = "zune-inflate"
+version = "0.2.54"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "73ab332fe2f6680068f3582b16a24f90ad7096d5d39b974d1c0aff0125116f02"
+dependencies = [
+ "simd-adler32",
+]
--- a/src/dataterm.rs	Tue Aug 01 10:32:12 2023 +0300
+++ b/src/dataterm.rs	Thu Aug 29 00:00:00 2024 -0500
@@ -46,7 +46,7 @@
 ) -> V {
     let mut r = b.clone();
     opA.gemv(&mut r, 1.0, μ, -1.0);
-    opA.gemv(&mut r, 1.0, μ_delta, -1.0);
+    opA.gemv(&mut r, 1.0, μ_delta, 1.0);
     r
 }
 
--- a/src/experiments.rs	Tue Aug 01 10:32:12 2023 +0300
+++ b/src/experiments.rs	Thu Aug 29 00:00:00 2024 -0500
@@ -15,7 +15,7 @@
 
 use crate::ExperimentOverrides;
 use crate::kernels::*;
-use crate::kernels::{SupportProductFirst as Prod};
+use crate::kernels::SupportProductFirst as Prod;
 use crate::pdps::PDPSConfig;
 use crate::types::*;
 use crate::run::{
--- a/src/fb.rs	Tue Aug 01 10:32:12 2023 +0300
+++ b/src/fb.rs	Thu Aug 29 00:00:00 2024 -0500
@@ -136,17 +136,6 @@
     DataTerm,
 };
 
-/// Method for constructing $μ$ on each iteration
-#[derive(Clone, Copy, Eq, PartialEq, Serialize, Deserialize, Debug)]
-#[allow(dead_code)]
-pub enum InsertionStyle {
-    /// Resuse previous $μ$ from previous iteration, optimising weights
-    /// before inserting new spikes.
-    Reuse,
-    /// Start each iteration with $μ=0$.
-    Zero,
-}
-
 /// Settings for [`pointsource_fb_reg`].
 #[derive(Clone, Copy, Eq, PartialEq, Serialize, Deserialize, Debug)]
 #[serde(default)]
@@ -154,7 +143,7 @@
     /// Step length scaling
     pub τ0 : F,
     /// Generic parameters
-    pub insertion : FBGenericConfig<F>,
+    pub generic : FBGenericConfig<F>,
 }
 
 /// Settings for the solution of the stepwise optimality condition in algorithms based on
@@ -162,29 +151,43 @@
 #[derive(Clone, Copy, Eq, PartialEq, Serialize, Deserialize, Debug)]
 #[serde(default)]
 pub struct FBGenericConfig<F : Float> {
-    /// Method for constructing $μ$ on each iteration; see [`InsertionStyle`].
-    pub insertion_style : InsertionStyle,
     /// Tolerance for point insertion.
     pub tolerance : Tolerance<F>,
+
     /// Stop looking for predual maximum (where to isert a new point) below
     /// `tolerance` multiplied by this factor.
+    ///
+    /// Not used by [`super::radon_fb`].
     pub insertion_cutoff_factor : F,
+
     /// Settings for branch and bound refinement when looking for predual maxima
     pub refinement : RefinementSettings<F>,
+
     /// Maximum insertions within each outer iteration
+    ///
+    /// Not used by [`super::radon_fb`].
     pub max_insertions : usize,
+
     /// Pair `(n, m)` for maximum insertions `m` on first `n` iterations.
+    ///
+    /// Not used by [`super::radon_fb`].
     pub bootstrap_insertions : Option<(usize, usize)>,
+
     /// Inner method settings
     pub inner : InnerSettings<F>,
+
     /// Spike merging method
     pub merging : SpikeMergingMethod<F>,
+
     /// Tolerance multiplier for merges
     pub merge_tolerance_mult : F,
+
     /// Spike merging method after the last step
     pub final_merging : SpikeMergingMethod<F>,
+
     /// Iterations between merging heuristic tries
     pub merge_every : usize,
+
     /// Save $μ$ for postprocessing optimisation
     pub postprocessing : bool
 }
@@ -194,7 +197,7 @@
     fn default() -> Self {
         FBConfig {
             τ0 : 0.99,
-            insertion : Default::default()
+            generic : Default::default(),
         }
     }
 }
@@ -203,7 +206,6 @@
 impl<F : Float> Default for FBGenericConfig<F> {
     fn default() -> Self {
         FBGenericConfig {
-            insertion_style : InsertionStyle::Reuse,
             tolerance : Default::default(),
             insertion_cutoff_factor : 1.0,
             refinement : Default::default(),
@@ -211,7 +213,7 @@
             //bootstrap_insertions : None,
             bootstrap_insertions : Some((10, 1)),
             inner : InnerSettings {
-                method : InnerMethod::SSN,
+                method : InnerMethod::Default,
                 .. Default::default()
             },
             merging : SpikeMergingMethod::None,
@@ -224,35 +226,9 @@
     }
 }
 
-#[replace_float_literals(F::cast_from(literal))]
-pub(crate) fn μ_diff<F : Float, const N : usize>(
-    μ_new : &DiscreteMeasure<Loc<F, N>, F>,
-    μ_base : &DiscreteMeasure<Loc<F, N>, F>,
-    ν_delta : Option<&DiscreteMeasure<Loc<F, N>, F>>,
-    config : &FBGenericConfig<F>
-) -> DiscreteMeasure<Loc<F, N>, F> {
-    let mut ν : DiscreteMeasure<Loc<F, N>, F> = match config.insertion_style {
-        InsertionStyle::Reuse => {
-            μ_new.iter_spikes()
-                 .zip(μ_base.iter_masses().chain(std::iter::repeat(0.0)))
-                 .map(|(δ, α_base)| (δ.x, α_base - δ.α))
-                 .collect()
-        },
-        InsertionStyle::Zero => {
-            μ_new.iter_spikes()
-                 .map(|δ| -δ)
-                 .chain(μ_base.iter_spikes().copied())
-                 .collect()
-        }
-    };
-    ν.prune(); // Potential small performance improvement
-    // Add ν_delta if given
-    match ν_delta {
-        None => ν,
-        Some(ν_d) => ν + ν_d,
-    }
-}
-
+/// TODO: document.
+/// `μ_base + ν_delta` is the base point, where `μ` and `μ_base` are assumed to have the same spike
+/// locations, while `ν_delta` may have different locations.
 #[replace_float_literals(F::cast_from(literal))]
 pub(crate) fn insert_and_reweigh<
     'a, F, GA, 𝒟, BTA, G𝒟, S, K, Reg, State, const N : usize
@@ -284,24 +260,15 @@
       State : AlgIteratorState {
 
     // Maximum insertion count and measure difference calculation depend on insertion style.
-    let (m, warn_insertions) = match (state.iteration(), config.bootstrap_insertions) {
+    let (max_insertions, warn_insertions) = match (state.iteration(), config.bootstrap_insertions) {
         (i, Some((l, k))) if i <= l => (k, false),
         _ => (config.max_insertions, !state.is_quiet()),
     };
-    let max_insertions = match config.insertion_style {
-        InsertionStyle::Zero => {
-            todo!("InsertionStyle::Zero does not currently work with FISTA, so diabled.");
-            // let n = μ.len();
-            // μ = DiscreteMeasure::new();
-            // n + m
-        },
-        InsertionStyle::Reuse => m,
-    };
 
-    // TODO: should avoid a second copy of μ here; μ_base already stores a copy.
+    // TODO: should avoid a copy of μ_base here.
     let ω0 = op𝒟.apply(match ν_delta {
-        None => μ.clone(),
-        Some(ν_d) => &*μ + ν_d,
+        None => μ_base.clone(),
+        Some(ν_d) => &*μ_base + ν_d,
     });
 
     // Add points to support until within error tolerance or maximum insertion count reached.
@@ -333,7 +300,10 @@
 
         // Form d = ω0 - τv - 𝒟μ = -𝒟(μ - μ^k) - τv for checking the proximate optimality
         // conditions in the predual space, and finding new points for insertion, if necessary.
-        let mut d = minus_τv + op𝒟.preapply(μ_diff(μ, μ_base, ν_delta, config));
+        let mut d = minus_τv + op𝒟.preapply(match ν_delta {
+            None => μ_base.sub_matching(μ),
+            Some(ν) =>  μ_base.sub_matching(μ) + ν
+        });
 
         // If no merging heuristic is used, let's be more conservative about spike insertion,
         // and skip it after first round. If merging is done, being more greedy about spike
@@ -404,16 +374,10 @@
       Reg : RegTerm<F, N>,
       State : AlgIteratorState {
     if state.iteration() % config.merge_every == 0 {
-        let n_before_merge = μ.len();
-        μ.merge_spikes(config.merging, |μ_candidate| {
-            let μd = μ_diff(&μ_candidate, &μ_base, None, config);
-            let mut d = minus_τv + op𝒟.preapply(μd);
-
+        stats.merged += μ.merge_spikes(config.merging, |μ_candidate| {
+            let mut d = minus_τv + op𝒟.preapply(μ_base.sub_matching(&μ_candidate));
             reg.verify_merge_candidate(&mut d, μ_candidate, τ, ε, &config)
-                .then_some(())
         });
-        debug_assert!(μ.len() >= n_before_merge);
-        stats.merged += μ.len() - n_before_merge;
     }
 
     let n_before_prune = μ.len();
@@ -495,7 +459,7 @@
       Reg : RegTerm<F, N> {
 
     // Set up parameters
-    let config = &fbconfig.insertion;
+    let config = &fbconfig.generic;
     let op𝒟norm = op𝒟.opnorm_bound();
     let τ = fbconfig.τ0/opA.lipschitz_factor(&op𝒟).unwrap();
     // We multiply tolerance by τ for FB since our subproblems depending on tolerances are scaled
@@ -621,7 +585,7 @@
       Reg : RegTerm<F, N> {
 
     // Set up parameters
-    let config = &fbconfig.insertion;
+    let config = &fbconfig.generic;
     let op𝒟norm = op𝒟.opnorm_bound();
     let τ = fbconfig.τ0/opA.lipschitz_factor(&op𝒟).unwrap();
     let mut λ = 1.0;
--- a/src/forward_model.rs	Tue Aug 01 10:32:12 2023 +0300
+++ b/src/forward_model.rs	Thu Aug 29 00:00:00 2024 -0500
@@ -23,6 +23,7 @@
 use alg_tools::nalgebra_support::ToNalgebraRealField;
 use alg_tools::tabledump::write_csv;
 use alg_tools::error::DynError;
+use alg_tools::maputil::map2;
 
 use crate::types::*;
 use crate::measures::*;
@@ -594,15 +595,25 @@
 #[replace_float_literals(F::cast_from(literal))]
 impl<F, BT, S, P, const N : usize> TransportLipschitz<L2Squared>
 for SensorGrid<F, S, P, BT, N>
-where F : Float + nalgebra::RealField + ToNalgebraRealField,
+where F : Float + ToNalgebraRealField,
       BT : SensorGridBT<F, S, P, N>,
       S : Sensor<F, N>,
       P : Spread<F, N>,
-      Convolution<S, P> : Spread<F, N> + Lipschitz<L2> {
+      Convolution<S, P> : Spread<F, N> + Lipschitz<L2, FloatType = F> {
     type FloatType = F;
 
     fn transport_lipschitz_factor(&self, L2Squared : L2Squared) -> Self::FloatType {
-        todo!("Unimplemented")
+        // We estimate the factor by N_ψL^2, where L is the 2-norm Lipschitz factor of
+        // the base sensor (sensor * base_spread), and N_ψ the maximum overlap.
+        let l = self.base_sensor.lipschitz_factor(L2).unwrap();
+        let w = self.base_sensor.support_hint().width();
+        let d = map2(self.domain.width(), &self.sensor_count, |wi, &i| wi/F::cast_from(i));
+        let n = w.iter()
+                 .zip(d.iter())
+                 .map(|(&wi, &di)| (wi/di).ceil())
+                 .reduce(F::mul)
+                 .unwrap();
+        2.0 * n * l.powi(2)
     }
 }
 
--- a/src/frank_wolfe.rs	Tue Aug 01 10:32:12 2023 +0300
+++ b/src/frank_wolfe.rs	Thu Aug 29 00:00:00 2024 -0500
@@ -232,9 +232,9 @@
         //                 = C sup_{‖x‖_1 ≤ 1} ‖Ax‖_2 = C ‖A‖_{1,2},
         // where C = √m satisfies ‖x‖_1 ≤ C ‖x‖_2. Since we are intested in ‖A_*A‖, no
         // square root is needed when we scale:
-        let inner_τ = inner.τ0 / (findim_data.opAnorm_squared * F::cast_from(μ.len()));
-        let iters = quadratic_unconstrained(inner.method, &Ã, &g̃, self.α(),
-                                            &mut x, inner_τ, iterator);
+        let normest = findim_data.opAnorm_squared * F::cast_from(μ.len());
+        let iters = quadratic_unconstrained(&Ã, &g̃, self.α(), &mut x,
+                                            normest, inner, iterator);
         // Update masses of μ based on solution of finite-dimensional subproblem.
         μ.set_masses_dvector(&x);
 
@@ -329,9 +329,9 @@
         //                 = C sup_{‖x‖_1 ≤ 1} ‖Ax‖_2 = C ‖A‖_{1,2},
         // where C = √m satisfies ‖x‖_1 ≤ C ‖x‖_2. Since we are intested in ‖A_*A‖, no
         // square root is needed when we scale:
-        let inner_τ = inner.τ0 / (findim_data.opAnorm_squared * F::cast_from(μ.len()));
-        let iters = quadratic_nonneg(inner.method, &Ã, &g̃, self.α(),
-                                     &mut x, inner_τ, iterator);
+        let normest = findim_data.opAnorm_squared * F::cast_from(μ.len());
+        let iters = quadratic_nonneg(&Ã, &g̃, self.α(), &mut x,
+                                     normest, inner, iterator);
         // Update masses of μ based on solution of finite-dimensional subproblem.
         μ.set_masses_dvector(&x);
 
@@ -480,12 +480,11 @@
         inner_iters += reg.optimise_weights(&mut μ, opA, b, &findim_data, &config.inner, inner_it);
    
         // Merge spikes and update residual for next step and `if_verbose` below.
-        let n_before_merge = μ.len();
-        residual = μ.merge_spikes_fitness(config.merging,
-                                         |μ̃| opA.apply(μ̃) - b,
-                                          A::Observable::norm2_squared);
-        assert!(μ.len() >= n_before_merge);
-        merged += μ.len() - n_before_merge;
+        let (r, count) = μ.merge_spikes_fitness(config.merging,
+                                                |μ̃| opA.apply(μ̃) - b,
+                                                A::Observable::norm2_squared);
+        residual = r;
+        merged += count;
 
 
         // Prune points with zero mass
@@ -512,6 +511,8 @@
                 pruned,
                 ε : ε_prev,
                 postprocessing : None,
+                untransported_fraction : None,
+                transport_error : None,
             };
             inner_iters = 0;
             this_iters = 0;
--- a/src/kernels/gaussian.rs	Tue Aug 01 10:32:12 2023 +0300
+++ b/src/kernels/gaussian.rs	Thu Aug 29 00:00:00 2024 -0500
@@ -298,8 +298,8 @@
             if c1 >= c2 {
                 0.0
             } else {
-                // erf'(z) = (2/√π)*exp(-z^2), and we get extra factor -1/√(2*σ) = -1/t
-                // from the chain rule
+                // erf'(z) = (2/√π)*exp(-z^2), and we get extra factor -1/(√2*σ) = -1/t
+                // from the chain rule (the minus comes from inside c_1 or c_2).
                 let de1 = (-(c1/t).powi(2)).exp();
                 let de2 = (-(c2/t).powi(2)).exp();
                 c_div_t * (de1 - de2)
@@ -323,15 +323,70 @@
 }
 
 #[replace_float_literals(F::cast_from(literal))]
+impl<'a, F : Float, R, C, S, const N : usize> Lipschitz<L1>
+for Convolution<CubeIndicator<R, N>, BasicCutGaussian<C, S, N>>
+where R : Constant<Type=F>,
+      C : Constant<Type=F>,
+      S : Constant<Type=F> {
+    type FloatType = F;
+
+    fn lipschitz_factor(&self, L1 : L1) -> Option<F> {
+        // To get the product Lipschitz factor, we note that for any ψ_i, we have
+        // ∏_{i=1}^N φ_i(x_i) - ∏_{i=1}^N φ_i(y_i)
+        // = [φ_1(x_1)-φ_1(y_1)] ∏_{i=2}^N φ_i(x_i)
+        //   + φ_1(y_1)[ ∏_{i=2}^N φ_i(x_i) - ∏_{i=2}^N φ_i(y_i)]
+        // = ∑_{j=1}^N [φ_j(x_j)-φ_j(y_j)]∏_{i > j} φ_i(x_i) ∏_{i < j} φ_i(y_i)
+        // Thus
+        // |∏_{i=1}^N φ_i(x_i) - ∏_{i=1}^N φ_i(y_i)|
+        // ≤ ∑_{j=1}^N |φ_j(x_j)-φ_j(y_j)| ∏_{j ≠ i} \max_i |φ_i|
+        //
+        // Thus we need 1D Lipschitz factors, and the maximum for φ = θ * ψ.
+        //
+        // We have
+        // θ * ψ(x) = 0 if c_1(x) ≥ c_2(x)
+        //          = (1/2)[erf(c_2(x)/(√2σ)) - erf(c_1(x)/(√2σ))] if c_1(x) < c_2(x),
+        // where c_1(x) = max{-x-b,-a} = -min{b+x,a} and c_2(x)=min{b-x,a}, C is the Gaussian
+        // normalisation factor, and erf(s) = (2/√π) ∫_0^s e^{-t^2} dt.
+        // Thus, if c_1(x) < c_2(x) and c_1(y) < c_2(y), we have
+        // θ * ψ(x) - θ * ψ(y) = (1/√π)[∫_{c_1(x)/(√2σ)}^{c_1(y)/(√2σ) e^{-t^2} dt
+        //                       - ∫_{c_2(x)/(√2σ)}^{c_2(y)/(√2σ)] e^{-t^2} dt]
+        // Thus
+        // |θ * ψ(x) - θ * ψ(y)| ≤ (1/√π)/(√2σ)(|c_1(x)-c_1(y)|+|c_2(x)-c_2(y)|)
+        //                       ≤ 2(1/√π)/(√2σ)|x-y|
+        //                       ≤ √2/(√πσ)|x-y|.
+        //
+        // For the product we also need the value θ * ψ(0), which is
+        // (1/2)[erf(min{a,b}/(√2σ))-erf(max{-b,-a}/(√2σ)]
+        //  = (1/2)[erf(min{a,b}/(√2σ))-erf(-min{a,b}/(√2σ))]
+        //  = erf(min{a,b}/(√2σ))
+        //
+        // If c_1(x) ≥ c_2(x), then x ∉ [-(a+b), a+b]. If also y is outside that range,
+        // θ * ψ(x) = θ * ψ(y). If only y is in the range [-(a+b), a+b], we can replace
+        // x by -(a+b) or (a+b), either of which is closer to y and still θ * ψ(x)=0.
+        // Thus same calculations as above work for the Lipschitz factor.
+        let Convolution(ref ind,
+                        SupportProductFirst(ref cut,
+                                            ref gaussian)) = self;
+        let a = cut.r.value();
+        let b = ind.r.value();
+        let σ = gaussian.variance.value().sqrt();
+        let π = F::PI;
+        let t = F::SQRT_2 * σ;
+        let l1d = F::SQRT_2 / (π.sqrt() * σ);
+        let e0 = F::cast_from(erf((a.min(b) / t).as_()));
+        Some(l1d * e0.powi(N as i32-1))
+    }
+}
+
 impl<'a, F : Float, R, C, S, const N : usize> Lipschitz<L2>
 for Convolution<CubeIndicator<R, N>, BasicCutGaussian<C, S, N>>
 where R : Constant<Type=F>,
       C : Constant<Type=F>,
       S : Constant<Type=F> {
     type FloatType = F;
-
-    fn lipschitz_factor(&self, L2 : L2) -> Option<F> {
-        todo!("This requirement some error function work.")
+    #[inline]
+    fn lipschitz_factor(&self, L2 : L2) -> Option<Self::FloatType> {
+        self.lipschitz_factor(L1).map(|l1| l1 * <S::Type>::cast_from(N).sqrt())
     }
 }
 
--- a/src/kernels/hat_convolution.rs	Tue Aug 01 10:32:12 2023 +0300
+++ b/src/kernels/hat_convolution.rs	Thu Aug 29 00:00:00 2024 -0500
@@ -97,7 +97,9 @@
         // |∏_{i=1}^N ψ_i(x_i) - ∏_{i=1}^N ψ_i(y_i)|
         // ≤ ∑_{j=1}^N |ψ_j(x_j)-ψ_j(y_j)| ∏_{j ≠ i} \max_i |ψ_i|
         let σ = self.radius();
-        Some((self.lipschitz_1d_σ1() / (σ*σ)) * (self.value_1d_σ1(0.0) / σ))
+        let l1d = self.lipschitz_1d_σ1() / (σ*σ);
+        let m1d = self.value_1d_σ1(0.0) / σ;
+        Some(l1d * m1d.powi(N as i32 - 1))
     }
 }
 
@@ -398,7 +400,7 @@
                 |y| (2.0/3.0) * (y + 1.0).powi(3),
                 || i(a, b, -0.5, 0.0,
                     // -2 y^3 - 2 y^2 + 1/3  on  -1/2 < y ≤ 0
-                    |y| -2.0*(y - 1.0) * y * y + (1.0/3.0),
+                    |y| -2.0*(y + 1.0) * y * y + (1.0/3.0),
                     || i(a, b, 0.0, 0.5,
                             // 2 y^3 - 2 y^2 + 1/3 on 0 < y < 1/2
                             |y| 2.0*(y - 1.0) * y * y + (1.0/3.0),
--- a/src/main.rs	Tue Aug 01 10:32:12 2023 +0300
+++ b/src/main.rs	Thu Aug 29 00:00:00 2024 -0500
@@ -10,7 +10,8 @@
 // Linear operators may be written e.g. as `opA`, to keep the capital letters of mathematical
 // convention while referring to the type (trait) of the operator as `A`.
 #![allow(non_snake_case)]
-#![feature(drain_filter)]
+// Need to create parse errors
+#![feature(dec2flt)]
 
 use clap::Parser;
 use serde::{Serialize, Deserialize};
@@ -36,6 +37,7 @@
 pub mod regularisation;
 pub mod dataterm;
 pub mod fb;
+pub mod radon_fb;
 pub mod sliding_fb;
 pub mod frank_wolfe;
 pub mod pdps;
@@ -170,6 +172,18 @@
     /// regularisation parameters. Only affects PDPS.
     sigma0 : Option<F>,
 
+    #[arg(long)]
+    /// Normalised transport step length for sliding_fb.
+    theta0 : Option<F>,
+
+    #[arg(long)]
+    /// Transport toleranced wrt. ω
+    transport_tolerance_omega : Option<F>,
+
+    #[arg(long)]
+    /// Transport toleranced wrt. ∇v
+    transport_tolerance_dv : Option<F>,
+
     #[arg(value_enum, long)]
     /// PDPS acceleration, when available.
     acceleration : Option<pdps::Acceleration>,
--- a/src/measures/discrete.rs	Tue Aug 01 10:32:12 2023 +0300
+++ b/src/measures/discrete.rs	Thu Aug 29 00:00:00 2024 -0500
@@ -121,7 +121,13 @@
     /// Prune all spikes with zero mass.
     #[inline]
     pub fn prune(&mut self) {
-        self.spikes.retain(|δ| δ.α != F::ZERO);
+        self.prune_by(|δ| δ.α != F::ZERO);
+    }
+
+    /// Prune spikes by the predicate `g`.
+    #[inline]
+    pub fn prune_by<G : FnMut(&DeltaMeasure<Domain, F>) -> bool>(&mut self, g : G) {
+        self.spikes.retain(g);
     }
 
     /// Add the spikes produced by `iter` to this measure.
@@ -138,6 +144,48 @@
     pub fn push(&mut self, δ : DeltaMeasure<Domain, F>) {
         self.spikes.push(δ);
     }
+
+    /// Iterate over triples of masses and locations of two discrete measures, which are assumed
+    /// to have equal locations of same spike indices.
+    pub fn both_matching<'a>(&'a self, other : &'a DiscreteMeasure<Domain, F>) ->
+      impl Iterator<Item=(F, F, &'a Domain)> {
+        let m = self.len().max(other.len());
+        self.iter_spikes().map(Some).chain(std::iter::repeat(None))
+            .zip(other.iter_spikes().map(Some).chain(std::iter::repeat(None)))
+            .take(m)
+            .map(|(oδ, orδ)| {
+                match (oδ, orδ) {
+                    (Some(δ), Some(rδ)) => (δ.α, rδ.α, &δ.x), // Assumed δ.x=rδ.x
+                    (Some(δ), None)     => (δ.α, F::ZERO,  &δ.x),
+                    (None, Some(rδ))    => (F::ZERO, rδ.α, &rδ.x),
+                    (None, None)        => panic!("This cannot happen!"),
+                }
+            })
+    }
+
+    /// Subtract `other` from `self`, assuming equal locations of same spike indices
+    pub fn sub_matching(&self, other : &DiscreteMeasure<Domain, F>) -> DiscreteMeasure<Domain, F>
+    where Domain : Clone {
+        self.both_matching(other)
+            .map(|(α, β, x)| (x.clone(), α - β))
+            .collect()
+    }
+
+    /// Add `other` to `self`, assuming equal locations of same spike indices
+    pub fn add_matching(&self, other : &DiscreteMeasure<Domain, F>) -> DiscreteMeasure<Domain, F>
+    where Domain : Clone {
+        self.both_matching(other)
+            .map(|(α, β, x)| (x.clone(), α + β))
+            .collect()
+    }
+
+    /// Calculate the Radon-norm distance of `self` to `other`,
+    /// assuming equal locations of same spike indices.
+    pub fn dist_matching(&self, other : &DiscreteMeasure<Domain, F>) -> F where F : Float {
+        self.both_matching(other)
+            .map(|(α, β, _)| (α-β).abs())
+            .sum()
+    }
 }
 
 impl<Domain, F : Num> IntoIterator for DiscreteMeasure<Domain, F> {
--- a/src/measures/merging.rs	Tue Aug 01 10:32:12 2023 +0300
+++ b/src/measures/merging.rs	Thu Aug 29 00:00:00 2024 -0500
@@ -20,8 +20,10 @@
 #[derive(Clone, Copy, Eq, PartialEq, Serialize, Deserialize, Debug)]
 #[allow(dead_code)]
 pub enum SpikeMergingMethod<F> {
-    /// Try to merge spikes within a given radius of eachother
+    /// Try to merge spikes within a given radius of each other, averaging the location
     HeuristicRadius(F),
+    /// Try to merge spikes within a given radius of each other, attempting original locations
+    HeuristicRadiusNoInterp(F),
     /// No merging
     None,
 }
@@ -40,7 +42,8 @@
     fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> Result<(), std::fmt::Error> {
         match self {
             Self::None => write!(f, "none"),
-            Self::HeuristicRadius(r) => std::fmt::Display::fmt(r, f),
+            Self::HeuristicRadius(r) => write!(f, "i:{}", r),
+            Self::HeuristicRadiusNoInterp(r) => write!(f, "n:{}", r),
         }
     }
 }
@@ -52,7 +55,19 @@
         if s == "none" {
             Ok(Self::None)
         } else {
-            Ok(Self::HeuristicRadius(F::from_str(s)?))
+            let mut subs = s.split(':');
+            match subs.next() {
+                None =>  Ok(Self::HeuristicRadius(F::from_str(s)?)),
+                Some(t) if t == "n" => match subs.next() {
+                    None => Err(core::num::dec2flt::pfe_invalid()),
+                    Some(v) => Ok(Self::HeuristicRadiusNoInterp(F::from_str(v)?))
+                },
+                Some(t) if t == "i" => match subs.next() {
+                    None => Err(core::num::dec2flt::pfe_invalid()),
+                    Some(v) => Ok(Self::HeuristicRadius(F::from_str(v)?))
+                },
+                Some(v) => Ok(Self::HeuristicRadius(F::from_str(v)?))
+            }
         }
     }
 }
@@ -77,11 +92,14 @@
     ///
     /// This method is stable with respect to spike locations:  on merge, the weight of existing
     /// spikes is set to zero, and a new one inserted at the end of the spike vector.
-    fn merge_spikes<G, V>(&mut self, method : SpikeMergingMethod<F>, accept : G) -> Option<V>
-    where G : Fn(&'_ Self) -> Option<V> {
+    fn merge_spikes<G>(&mut self, method : SpikeMergingMethod<F>, accept : G) -> usize
+    where G : FnMut(&'_ Self) -> bool {
         match method {
-            SpikeMergingMethod::HeuristicRadius(ρ) => self.do_merge_spikes_radius(ρ, accept),
-            SpikeMergingMethod::None => None,
+            SpikeMergingMethod::HeuristicRadius(ρ) =>
+                self.do_merge_spikes_radius(ρ, true, accept),
+            SpikeMergingMethod::HeuristicRadiusNoInterp(ρ) =>
+                self.do_merge_spikes_radius(ρ, false, accept),
+            SpikeMergingMethod::None => 0,
         }
     }
 
@@ -90,22 +108,23 @@
     /// Calls [`SpikeMerging::merge_spikes`] with `accept` constructed from the composition of
     /// `value` and `fitness`, compared to initial fitness. Returns the last return value of `value`
     // for a merge  accepted by `fitness`. If no merge was accepted, `value` applied to the initial
-    /// `self` is returned.
+    /// `self` is returned. also the number of merges is returned;
     fn merge_spikes_fitness<G, H, V, O>(
         &mut self,
         method : SpikeMergingMethod<F>,
         value : G,
         fitness : H
-    ) -> V
+    ) -> (V, usize)
     where G : Fn(&'_ Self) -> V,
           H : Fn(&'_ V) -> O,
           O : PartialOrd {
-        let initial_res = value(self);
-        let initial_fitness = fitness(&initial_res);
-        self.merge_spikes(method, |μ| {
-            let res = value(μ);
-            (fitness(&res) <= initial_fitness).then_some(res)
-        }).unwrap_or(initial_res)
+        let mut res = value(self);
+        let initial_fitness = fitness(&res);
+        let count = self.merge_spikes(method, |μ| {
+            res = value(μ);
+            fitness(&res) <= initial_fitness
+        });
+        (res, count)
     }
 
     /// Attempt to merge spikes that are within radius $ρ$ of each other (unspecified norm).
@@ -113,8 +132,8 @@
     /// This method implements [`SpikeMerging::merge_spikes`] for
     /// [`SpikeMergingMethod::HeuristicRadius`]. The closure `accept` and the return value are
     /// as for that method.
-    fn do_merge_spikes_radius<G, V>(&mut self, ρ : F, accept : G) -> Option<V>
-    where G : Fn(&'_ Self) -> Option<V>;
+    fn do_merge_spikes_radius<G>(&mut self, ρ : F, interp : bool, accept : G) -> usize
+    where G : FnMut(&'_ Self) -> bool;
 }
 
 #[replace_float_literals(F::cast_from(literal))]
@@ -126,70 +145,58 @@
     /// The parameter `res` points to the current “result” for [`SpikeMerging::merge_spikes`].
     /// If the merge is accepted by `accept` returning a [`Some`], `res` will be replaced by its
     /// return value.
-    fn attempt_merge<G, V>(
+    ///
+    /// Returns the index of `self.spikes` storing the new spike.
+    fn attempt_merge<G>(
         &mut self,
-        res : &mut Option<V>,
         i : usize,
         j : usize,
-        accept : &G
-    ) -> bool
-    where G : Fn(&'_ Self) -> Option<V> {
+        interp : bool,
+        accept : &mut G
+    ) -> Option<usize>
+    where G : FnMut(&'_ Self) -> bool {
         let &DeltaMeasure{ x : xi, α : αi } = &self.spikes[i];
         let &DeltaMeasure{ x : xj, α : αj } = &self.spikes[j];
 
-        // Merge inplace
-        self.spikes[i].α = 0.0;
-        self.spikes[j].α = 0.0;
-        //self.spikes.push(DeltaMeasure{ α : αi + αj, x : (xi + xj)/2.0 });
-        self.spikes.push(DeltaMeasure{ α : αi + αj, x : (xi * αi + xj * αj) / (αi + αj) });
-        match accept(self) {
-            some@Some(..) => {
-                // Merge accepted, update our return value
-                *res = some;
-                // On next iteration process the newly merged spike.
-                //indices[k+1] = self.spikes.len() - 1;
-                true
-            },
-            None => {
+        if interp {
+            // Merge inplace
+            self.spikes[i].α = 0.0;
+            self.spikes[j].α = 0.0;
+            let αia = αi.abs();
+            let αja = αj.abs();
+            self.spikes.push(DeltaMeasure{ α : αi + αj, x : (xi * αia + xj * αja) / (αia + αja) });
+            if accept(self) {
+                Some(self.spikes.len()-1)
+            } else {
                 // Merge not accepted, restore modification
                 self.spikes[i].α = αi;
                 self.spikes[j].α = αj;
                 self.spikes.pop();
-                false
+                None
+            }
+        } else {
+            // Attempt merge inplace, first combination
+            self.spikes[i].α = αi + αj;
+            self.spikes[j].α = 0.0;
+            if accept(self) {
+                // Merge accepted
+                Some(i)
+            } else {
+                // Attempt merge inplace, second combination
+                self.spikes[i].α = 0.0;
+                self.spikes[j].α = αi + αj;
+                if accept(self) {
+                    // Merge accepted
+                    Some(j)
+                } else {
+                    // Merge not accepted, restore modification
+                    self.spikes[i].α = αi;
+                    self.spikes[j].α = αj;
+                    None
+                }
             }
         }
     }
-
-    /*
-    /// Attempts to merge spikes with indices i and j, acceptance through a delta.
-    fn attempt_merge_change<G, V>(
-        &mut self,
-        res : &mut Option<V>,
-        i : usize,
-        j : usize,
-        accept_change : &G
-    ) -> bool
-    where G : Fn(&'_ Self) -> Option<V> {
-        let &DeltaMeasure{ x : xi, α : αi } = &self.spikes[i];
-        let &DeltaMeasure{ x : xj, α : αj } = &self.spikes[j];
-        let δ = DeltaMeasure{ α : αi + αj, x : (xi + xj)/2.0 };
-        let λ = [-self.spikes[i], -self.spikes[j], δ.clone()].into();
-
-        match accept_change(&λ) {
-            some@Some(..) => {
-                // Merge accepted, update our return value
-                *res = some;
-                self.spikes[i].α = 0.0;
-                self.spikes[j].α = 0.0;
-                self.spikes.push(δ);
-                true
-            },
-            None => {
-                false
-            }
-        }
-    }*/
-
 }
 
 /// Sorts a vector of indices into `slice` by `compare`.
@@ -207,12 +214,13 @@
 #[replace_float_literals(F::cast_from(literal))]
 impl<F : Float> SpikeMerging<F> for DiscreteMeasure<Loc<F, 1>, F> {
 
-    fn do_merge_spikes_radius<G, V>(
+    fn do_merge_spikes_radius<G>(
         &mut self,
         ρ : F,
-        accept : G
-    ) -> Option<V>
-    where G : Fn(&'_ Self) -> Option<V> {
+        interp : bool,
+        mut accept : G
+    ) -> usize
+    where G : FnMut(&'_ Self) -> bool {
         // Sort by coordinate into an indexing array.
         let mut indices = sort_indices_by(&self.spikes, |&δ1, &δ2| {
             let &Loc([x1]) = &δ1.x;
@@ -222,11 +230,11 @@
         });
 
         // Initialise result
-        let mut res = None;
+        let mut count = 0;
 
         // Scan consecutive pairs and merge if close enough and accepted by `accept`.
         if indices.len() == 0 {
-            return res
+            return count
         }
         for k in 0..(indices.len()-1) {
             let i = indices[k];
@@ -236,13 +244,16 @@
             debug_assert!(xi <= xj);
             // If close enough, attempt merging
             if αi != 0.0 && αj != 0.0 && xj <= xi + ρ {
-                if self.attempt_merge(&mut res, i, j, &accept) {
-                    indices[k+1] = self.spikes.len() - 1;
+                if let Some(l) = self.attempt_merge(i, j, interp, &mut accept) {
+                    // For this to work (the debug_assert! to not trigger above), the new
+                    // coordinate produced by attempt_merge has to be at most xj.
+                    indices[k+1] = l;
+                    count += 1
                 }
             }
         }
 
-        res
+        count
     }
 }
 
@@ -260,18 +271,18 @@
 #[replace_float_literals(F::cast_from(literal))]
 impl<F : Float> SpikeMerging<F> for DiscreteMeasure<Loc<F, 2>, F> {
 
-    fn do_merge_spikes_radius<G, V>(&mut self, ρ : F, accept : G) -> Option<V>
-    where G : Fn(&'_ Self) -> Option<V> {
+    fn do_merge_spikes_radius<G>(&mut self, ρ : F, interp : bool, mut accept : G) -> usize
+    where G : FnMut(&'_ Self) -> bool {
         // Sort by first coordinate into an indexing array.
         let mut indices = sort_indices_by(&self.spikes, compare_first_coordinate);
 
         // Initialise result
-        let mut res = None;
+        let mut count = 0;
         let mut start_scan_2nd = 0;
 
         // Scan in order
         if indices.len() == 0 {
-            return res
+            return count
         }
         for k in 0..indices.len()-1 {
             let i = indices[k];
@@ -324,9 +335,10 @@
 
             // Attempt merging closest close-enough spike
             if let Some((l, j, _)) = closest {
-                if self.attempt_merge(&mut res, i, j, &accept) {
+                if let Some(n) = self.attempt_merge(i, j, interp, &mut accept) {
                     // If merge was succesfull, make new spike candidate for merging.
-                    indices[l] = self.spikes.len() - 1;
+                    indices[l] = n;
+                    count += 1;
                     let compare = |i, j| compare_first_coordinate(&self.spikes[i],
                                                                   &self.spikes[j]);
                     // Re-sort relevant range of indices
@@ -339,7 +351,7 @@
             }
         }
 
-        res
+        count
     }
 }
 
--- a/src/pdps.rs	Tue Aug 01 10:32:12 2023 +0300
+++ b/src/pdps.rs	Thu Aug 29 00:00:00 2024 -0500
@@ -121,7 +121,7 @@
     /// Accelerate if available
     pub acceleration : Acceleration,
     /// Generic parameters
-    pub insertion : FBGenericConfig<F>,
+    pub generic : FBGenericConfig<F>,
 }
 
 #[replace_float_literals(F::cast_from(literal))]
@@ -132,7 +132,7 @@
             τ0,
             σ0 : 0.99/τ0,
             acceleration : Acceleration::Partial,
-            insertion : Default::default()
+            generic : Default::default(),
         }
     }
 }
@@ -233,7 +233,7 @@
       Reg : RegTerm<F, N> {
 
     // Set up parameters
-    let config = &pdpsconfig.insertion;
+    let config = &pdpsconfig.generic;
     let op𝒟norm = op𝒟.opnorm_bound();
     let l = opA.lipschitz_factor(&op𝒟).unwrap().sqrt();
     let mut τ = pdpsconfig.τ0 / l;
--- a/src/plot.rs	Tue Aug 01 10:32:12 2023 +0300
+++ b/src/plot.rs	Thu Aug 29 00:00:00 2024 -0500
@@ -12,7 +12,11 @@
 
 use alg_tools::types::*;
 use alg_tools::lingrid::LinGrid;
-use alg_tools::mapping::RealMapping;
+use alg_tools::mapping::{
+    RealMapping,
+    DifferentiableRealMapping,
+    SliceCodomain
+};
 use alg_tools::loc::Loc;
 use alg_tools::bisection_tree::Bounds;
 use alg_tools::maputil::map4;
@@ -52,6 +56,26 @@
         filename : String,
         explanation : String
     );
+
+    /// Plot a differentiable mapping into several file, sampling values on a given grid.
+    fn plot_into_file_diff<
+        F : Float,
+        T1 : DifferentiableRealMapping<F, N>,
+    > (
+        g : &T1,
+        grid : LinGrid<F, N>,
+        base_filename : String,
+        explanation : String
+    ) {
+        Self::plot_into_file(g, grid, base_filename.clone(), explanation.clone());
+        let d = g.diff_ref();
+        for i in 0..N {
+            Self::plot_into_file(&d.slice_codomain_ref(i),
+                                 grid,
+                                 format!("{base_filename}_diff{i}"),
+                                 format!("{explanation} differential, dimension {i}"));
+        }
+    }
 }
 
 /// Helper type for looking up a [`Plotting`] based on dimension.
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/radon_fb.rs	Thu Aug 29 00:00:00 2024 -0500
@@ -0,0 +1,455 @@
+/*!
+Solver for the point source localisation problem using a simplified forward-backward splitting method.
+
+Instead of the $𝒟$-norm of `fb.rs`, this uses a standard Radon norm for the proximal map.
+*/
+
+use numeric_literals::replace_float_literals;
+use serde::{Serialize, Deserialize};
+use colored::Colorize;
+use nalgebra::DVector;
+
+use alg_tools::iterate::{
+    AlgIteratorFactory,
+    AlgIteratorState,
+};
+use alg_tools::euclidean::Euclidean;
+use alg_tools::linops::Apply;
+use alg_tools::sets::Cube;
+use alg_tools::loc::Loc;
+use alg_tools::bisection_tree::{
+    BTFN,
+    Bounds,
+    BTNodeLookup,
+    BTNode,
+    BTSearch,
+    P2Minimise,
+    SupportGenerator,
+    LocalAnalysis,
+};
+use alg_tools::mapping::RealMapping;
+use alg_tools::nalgebra_support::ToNalgebraRealField;
+
+use crate::types::*;
+use crate::measures::{
+    DiscreteMeasure,
+    DeltaMeasure,
+};
+use crate::measures::merging::{
+    SpikeMergingMethod,
+    SpikeMerging,
+};
+use crate::forward_model::ForwardModel;
+use crate::plot::{
+    SeqPlotter,
+    Plotting,
+    PlotLookup
+};
+use crate::regularisation::RegTerm;
+use crate::dataterm::{
+    calculate_residual,
+    L2Squared,
+    DataTerm,
+};
+
+use crate::fb::{
+    FBGenericConfig,
+    postprocess
+};
+
+/// Settings for [`pointsource_fb_reg`].
+#[derive(Clone, Copy, Eq, PartialEq, Serialize, Deserialize, Debug)]
+#[serde(default)]
+pub struct RadonFBConfig<F : Float> {
+    /// Step length scaling
+    pub τ0 : F,
+    /// Generic parameters
+    pub insertion : FBGenericConfig<F>,
+}
+
+#[replace_float_literals(F::cast_from(literal))]
+impl<F : Float> Default for RadonFBConfig<F> {
+    fn default() -> Self {
+        RadonFBConfig {
+            τ0 : 0.99,
+            insertion : Default::default()
+        }
+    }
+}
+
+#[replace_float_literals(F::cast_from(literal))]
+pub(crate) fn insert_and_reweigh<
+    'a, F, GA, BTA, S, Reg, State, const N : usize
+>(
+    μ : &mut DiscreteMeasure<Loc<F, N>, F>,
+    minus_τv : &mut BTFN<F, GA, BTA, N>,
+    μ_base : &mut DiscreteMeasure<Loc<F, N>, F>,
+    _ν_delta: Option<&DiscreteMeasure<Loc<F, N>, F>>,
+    τ : F,
+    ε : F,
+    config : &FBGenericConfig<F>,
+    reg : &Reg,
+    _state : &State,
+    stats : &mut IterInfo<F, N>,
+)
+where F : Float + ToNalgebraRealField,
+      GA : SupportGenerator<F, N, SupportType = S, Id = usize> + Clone,
+      BTA : BTSearch<F, N, Data=usize, Agg=Bounds<F>>,
+      S: RealMapping<F, N> + LocalAnalysis<F, Bounds<F>, N>,
+      BTNodeLookup: BTNode<F, usize, Bounds<F>, N>,
+      DiscreteMeasure<Loc<F, N>, F> : SpikeMerging<F>,
+      Reg : RegTerm<F, N>,
+      State : AlgIteratorState {
+
+    'i_and_w: for i in 0..=1 {
+        // Optimise weights
+        if μ.len() > 0 {
+            // Form finite-dimensional subproblem. The subproblem references to the original μ^k
+            // from the beginning of the iteration are all contained in the immutable c and g.
+            let g̃ = DVector::from_iterator(μ.len(),
+                                           μ.iter_locations()
+                                            .map(|ζ| F::to_nalgebra_mixed(minus_τv.apply(ζ))));
+            let mut x = μ.masses_dvector();
+            let y = μ_base.masses_dvector();
+
+            // Solve finite-dimensional subproblem.
+            stats.inner_iters += reg.solve_findim_l1squared(&y, &g̃, τ, &mut x, ε, config);
+
+            // Update masses of μ based on solution of finite-dimensional subproblem.
+            μ.set_masses_dvector(&x);
+        }
+
+        if i>0 {
+            // Simple debugging test to see if more inserts would be needed. Doesn't seem so.
+            //let n = μ.dist_matching(μ_base);
+            //println!("{:?}", reg.find_tolerance_violation_slack(minus_τv, τ, ε, false, config, n));
+            break 'i_and_w
+        }
+        
+        // Calculate ‖μ - μ_base‖_ℳ
+        let n = μ.dist_matching(μ_base);
+       
+        // Find a spike to insert, if needed.
+        // This only check the overall tolerances, not tolerances on support of μ-μ_base or μ,
+        // which are supposed to have been guaranteed by the finite-dimensional weight optimisation.
+        match reg.find_tolerance_violation_slack(minus_τv, τ, ε, false, config, n) {
+            None => { break 'i_and_w },
+            Some((ξ, _v_ξ, _in_bounds)) => {
+                // Weight is found out by running the finite-dimensional optimisation algorithm
+                // above
+                *μ += DeltaMeasure { x : ξ, α : 0.0 };
+                *μ_base += DeltaMeasure { x : ξ, α : 0.0 };
+            }
+        };
+    }
+}
+
+#[replace_float_literals(F::cast_from(literal))]
+pub(crate) fn prune_and_maybe_simple_merge<
+    'a, F, GA, BTA, S, Reg, State, const N : usize
+>(
+    μ : &mut DiscreteMeasure<Loc<F, N>, F>,
+    minus_τv : &mut BTFN<F, GA, BTA, N>,
+    μ_base : &DiscreteMeasure<Loc<F, N>, F>,
+    τ : F,
+    ε : F,
+    config : &FBGenericConfig<F>,
+    reg : &Reg,
+    state : &State,
+    stats : &mut IterInfo<F, N>,
+)
+where F : Float + ToNalgebraRealField,
+      GA : SupportGenerator<F, N, SupportType = S, Id = usize> + Clone,
+      BTA : BTSearch<F, N, Data=usize, Agg=Bounds<F>>,
+      S: RealMapping<F, N> + LocalAnalysis<F, Bounds<F>, N>,
+      BTNodeLookup: BTNode<F, usize, Bounds<F>, N>,
+      DiscreteMeasure<Loc<F, N>, F> : SpikeMerging<F>,
+      Reg : RegTerm<F, N>,
+      State : AlgIteratorState {
+
+    assert!(μ_base.len() <= μ.len());
+
+    if state.iteration() % config.merge_every == 0 {
+        stats.merged += μ.merge_spikes(config.merging, |μ_candidate| {
+            // Important: μ_candidate's new points are afterwards,
+            // and do not conflict with μ_base.
+            // TODO: could simplify to requiring μ_base instead of μ_radon.
+            // but may complicate with sliding base's exgtra points that need to be
+            // after μ_candidate's extra points.
+            // TODO: doesn't seem to work, maybe need to merge μ_base as well?
+            // Although that doesn't seem to make sense.
+            let μ_radon = μ_candidate.sub_matching(μ_base);
+            reg.verify_merge_candidate_radonsq(minus_τv, μ_candidate, τ, ε, &config, &μ_radon)
+            //let n = μ_candidate.dist_matching(μ_base);
+            //reg.find_tolerance_violation_slack(minus_τv, τ, ε, false, config, n).is_none()
+        });
+    }
+
+    let n_before_prune = μ.len();
+    μ.prune();
+    debug_assert!(μ.len() <= n_before_prune);
+    stats.pruned += n_before_prune - μ.len();
+}
+
+
+/// Iteratively solve the pointsource localisation problem using simplified forward-backward splitting.
+///
+/// The settings in `config` have their [respective documentation](FBConfig). `opA` is the
+/// forward operator $A$, $b$ the observable, and $\lambda$ the regularisation weight.
+/// Finally, the `iterator` is an outer loop verbosity and iteration count control
+/// as documented in [`alg_tools::iterate`].
+///
+/// For details on the mathematical formulation, see the [module level](self) documentation.
+///
+/// The implementation relies on [`alg_tools::bisection_tree::BTFN`] presentations of
+/// sums of simple functions usign bisection trees, and the related
+/// [`alg_tools::bisection_tree::Aggregator`]s, to efficiently search for component functions
+/// active at a specific points, and to maximise their sums. Through the implementation of the
+/// [`alg_tools::bisection_tree::BT`] bisection trees, it also relies on the copy-on-write features
+/// of [`std::sync::Arc`] to only update relevant parts of the bisection tree when adding functions.
+///
+/// Returns the final iterate.
+#[replace_float_literals(F::cast_from(literal))]
+pub fn pointsource_radon_fb_reg<
+    'a, F, I, A, GA, BTA, S, Reg, const N : usize
+>(
+    opA : &'a A,
+    b : &A::Observable,
+    reg : Reg,
+    fbconfig : &RadonFBConfig<F>,
+    iterator : I,
+    mut _plotter : SeqPlotter<F, N>,
+) -> DiscreteMeasure<Loc<F, N>, F>
+where F : Float + ToNalgebraRealField,
+      I : AlgIteratorFactory<IterInfo<F, N>>,
+      for<'b> &'b A::Observable : std::ops::Neg<Output=A::Observable>,
+                                  //+ std::ops::Mul<F, Output=A::Observable>,  <-- FIXME: compiler overflow
+      A::Observable : std::ops::MulAssign<F>,
+      GA : SupportGenerator<F, N, SupportType = S, Id = usize> + Clone,
+      A : ForwardModel<Loc<F, N>, F, PreadjointCodomain = BTFN<F, GA, BTA, N>>,
+      BTA : BTSearch<F, N, Data=usize, Agg=Bounds<F>>,
+      S: RealMapping<F, N> + LocalAnalysis<F, Bounds<F>, N>,
+      BTNodeLookup: BTNode<F, usize, Bounds<F>, N>,
+      Cube<F, N>: P2Minimise<Loc<F, N>, F>,
+      PlotLookup : Plotting<N>,
+      DiscreteMeasure<Loc<F, N>, F> : SpikeMerging<F>,
+      Reg : RegTerm<F, N> {
+
+    // Set up parameters
+    let config = &fbconfig.insertion;
+    // We need L such that the descent inequality F(ν) - F(μ) - ⟨F'(μ),ν-μ⟩ ≤ (L/2)‖ν-μ‖²_ℳ ∀ ν,μ
+    // holds. Since the left hand side expands as (1/2)‖A(ν-μ)‖₂², this is to say, we need L such
+    // that ‖Aμ‖₂² ≤ L ‖μ‖²_ℳ ∀ μ. Thus `opnorm_bound` gives the square root of L.
+    let τ = fbconfig.τ0/opA.opnorm_bound().powi(2);
+    // We multiply tolerance by τ for FB since our subproblems depending on tolerances are scaled
+    // by τ compared to the conditional gradient approach.
+    let tolerance = config.tolerance * τ * reg.tolerance_scaling();
+    let mut ε = tolerance.initial();
+
+    // Initialise iterates
+    let mut μ = DiscreteMeasure::new();
+    let mut residual = -b;
+    let mut stats = IterInfo::new();
+
+    // Run the algorithm
+    iterator.iterate(|state| {
+        // Calculate smooth part of surrogate model.
+        // Using `std::mem::replace` here is not ideal, and expects that `empty_observable`
+        // has no significant overhead. For some reosn Rust doesn't allow us simply moving
+        // the residual and replacing it below before the end of this closure.
+        residual *= -τ;
+        let r = std::mem::replace(&mut residual, opA.empty_observable());
+        let mut minus_τv = opA.preadjoint().apply(r);
+
+        // Save current base point
+        let mut μ_base = μ.clone();
+            
+        // Insert and reweigh
+        insert_and_reweigh(
+            &mut μ, &mut minus_τv, &mut μ_base, None,
+            τ, ε,
+            config, &reg, state, &mut stats
+        );
+
+        // Prune and possibly merge spikes
+        prune_and_maybe_simple_merge(
+            &mut μ, &mut minus_τv, &μ_base,
+            τ, ε,
+            config, &reg, state, &mut stats
+        );
+
+        // Update residual
+        residual = calculate_residual(&μ, opA, b);
+
+        // Update main tolerance for next iteration
+        let ε_prev = ε;
+        ε = tolerance.update(ε, state.iteration());
+        stats.this_iters += 1;
+
+        // Give function value if needed
+        state.if_verbose(|| {
+            // Plot if so requested
+            // plotter.plot_spikes(
+            //     format!("iter {} end;", state.iteration()), &d,
+            //     "start".to_string(), Some(&minus_τv),
+            //     reg.target_bounds(τ, ε_prev), &μ,
+            // );
+            // Calculate mean inner iterations and reset relevant counters.
+            // Return the statistics
+            let res = IterInfo {
+                value : residual.norm2_squared_div2() + reg.apply(&μ),
+                n_spikes : μ.len(),
+                ε : ε_prev,
+                postprocessing: config.postprocessing.then(|| μ.clone()),
+                .. stats
+            };
+            stats = IterInfo::new();
+            res
+        })
+    });
+
+    postprocess(μ, config, L2Squared, opA, b)
+}
+
+/// Iteratively solve the pointsource localisation problem using simplified inertial forward-backward splitting.
+///
+/// The settings in `config` have their [respective documentation](FBConfig). `opA` is the
+/// forward operator $A$, $b$ the observable, and $\lambda$ the regularisation weight.
+/// Finally, the `iterator` is an outer loop verbosity and iteration count control
+/// as documented in [`alg_tools::iterate`].
+///
+/// For details on the mathematical formulation, see the [module level](self) documentation.
+///
+/// The implementation relies on [`alg_tools::bisection_tree::BTFN`] presentations of
+/// sums of simple functions usign bisection trees, and the related
+/// [`alg_tools::bisection_tree::Aggregator`]s, to efficiently search for component functions
+/// active at a specific points, and to maximise their sums. Through the implementation of the
+/// [`alg_tools::bisection_tree::BT`] bisection trees, it also relies on the copy-on-write features
+/// of [`std::sync::Arc`] to only update relevant parts of the bisection tree when adding functions.
+///
+/// Returns the final iterate.
+#[replace_float_literals(F::cast_from(literal))]
+pub fn pointsource_radon_fista_reg<
+    'a, F, I, A, GA, BTA, S, Reg, const N : usize
+>(
+    opA : &'a A,
+    b : &A::Observable,
+    reg : Reg,
+    fbconfig : &RadonFBConfig<F>,
+    iterator : I,
+    mut _plotter : SeqPlotter<F, N>,
+) -> DiscreteMeasure<Loc<F, N>, F>
+where F : Float + ToNalgebraRealField,
+      I : AlgIteratorFactory<IterInfo<F, N>>,
+      for<'b> &'b A::Observable : std::ops::Neg<Output=A::Observable>,
+                                  //+ std::ops::Mul<F, Output=A::Observable>,  <-- FIXME: compiler overflow
+      A::Observable : std::ops::MulAssign<F>,
+      GA : SupportGenerator<F, N, SupportType = S, Id = usize> + Clone,
+      A : ForwardModel<Loc<F, N>, F, PreadjointCodomain = BTFN<F, GA, BTA, N>>,
+      BTA : BTSearch<F, N, Data=usize, Agg=Bounds<F>>,
+      S: RealMapping<F, N> + LocalAnalysis<F, Bounds<F>, N>,
+      BTNodeLookup: BTNode<F, usize, Bounds<F>, N>,
+      Cube<F, N>: P2Minimise<Loc<F, N>, F>,
+      PlotLookup : Plotting<N>,
+      DiscreteMeasure<Loc<F, N>, F> : SpikeMerging<F>,
+      Reg : RegTerm<F, N> {
+
+    // Set up parameters
+    let config = &fbconfig.insertion;
+    // We need L such that the descent inequality F(ν) - F(μ) - ⟨F'(μ),ν-μ⟩ ≤ (L/2)‖ν-μ‖²_ℳ ∀ ν,μ
+    // holds. Since the left hand side expands as (1/2)‖A(ν-μ)‖₂², this is to say, we need L such
+    // that ‖Aμ‖₂² ≤ L ‖μ‖²_ℳ ∀ μ. Thus `opnorm_bound` gives the square root of L.
+    let τ = fbconfig.τ0/opA.opnorm_bound().powi(2);
+    let mut λ = 1.0;
+    // We multiply tolerance by τ for FB since our subproblems depending on tolerances are scaled
+    // by τ compared to the conditional gradient approach.
+    let tolerance = config.tolerance * τ * reg.tolerance_scaling();
+    let mut ε = tolerance.initial();
+
+    // Initialise iterates
+    let mut μ = DiscreteMeasure::new();
+    let mut μ_prev = DiscreteMeasure::new();
+    let mut residual = -b;
+    let mut stats = IterInfo::new();
+    let mut warned_merging = false;
+
+    // Run the algorithm
+    iterator.iterate(|state| {
+        // Calculate smooth part of surrogate model.
+        // Using `std::mem::replace` here is not ideal, and expects that `empty_observable`
+        // has no significant overhead. For some reosn Rust doesn't allow us simply moving
+        // the residual and replacing it below before the end of this closure.
+        residual *= -τ;
+        let r = std::mem::replace(&mut residual, opA.empty_observable());
+        let mut minus_τv = opA.preadjoint().apply(r);
+
+        // Save current base point
+        let mut μ_base = μ.clone();
+            
+        // Insert new spikes and reweigh
+        insert_and_reweigh(
+            &mut μ, &mut minus_τv, &mut μ_base, None,
+            τ, ε,
+            config, &reg, state, &mut stats
+        );
+
+        // (Do not) merge spikes.
+        if state.iteration() % config.merge_every == 0 {
+            match config.merging {
+                SpikeMergingMethod::None => { },
+                _ => if !warned_merging {
+                    let err = format!("Merging not supported for μFISTA");
+                    println!("{}", err.red());
+                    warned_merging = true;
+                }
+            }
+        }
+
+        // Update inertial prameters
+        let λ_prev = λ;
+        λ = 2.0 * λ_prev / ( λ_prev + (4.0 + λ_prev * λ_prev).sqrt() );
+        let θ = λ / λ_prev - λ;
+
+        // Perform inertial update on μ.
+        // This computes μ ← (1 + θ) * μ - θ * μ_prev, pruning spikes where both μ
+        // and μ_prev have zero weight. Since both have weights from the finite-dimensional
+        // subproblem with a proximal projection step, this is likely to happen when the
+        // spike is not needed. A copy of the pruned μ without artithmetic performed is
+        // stored in μ_prev.
+        let n_before_prune = μ.len();
+        μ.pruning_sub(1.0 + θ, θ, &mut μ_prev);
+        debug_assert!(μ.len() <= n_before_prune);
+        stats.pruned += n_before_prune - μ.len();
+
+        // Update residual
+        residual = calculate_residual(&μ, opA, b);
+
+        // Update main tolerance for next iteration
+        let ε_prev = ε;
+        ε = tolerance.update(ε, state.iteration());
+        stats.this_iters += 1;
+
+        // Give function value if needed
+        state.if_verbose(|| {
+            // Plot if so requested
+            // plotter.plot_spikes(
+            //     format!("iter {} end;", state.iteration()), &d,
+            //     "start".to_string(), Some(&minus_τv),
+            //     reg.target_bounds(τ, ε_prev), &μ_prev,
+            // );
+            // Calculate mean inner iterations and reset relevant counters.
+            // Return the statistics
+            let res = IterInfo {
+                value : L2Squared.calculate_fit_op(&μ_prev, opA, b) + reg.apply(&μ_prev),
+                n_spikes : μ_prev.len(),
+                ε : ε_prev,
+                postprocessing: config.postprocessing.then(|| μ_prev.clone()),
+                .. stats
+            };
+            stats = IterInfo::new();
+            res
+        })
+    });
+
+    postprocess(μ_prev, config, L2Squared, opA, b)
+}
--- a/src/regularisation.rs	Tue Aug 01 10:32:12 2023 +0300
+++ b/src/regularisation.rs	Thu Aug 29 00:00:00 2024 -0500
@@ -34,9 +34,13 @@
 use crate::subproblem::{
     nonneg::quadratic_nonneg,
     unconstrained::quadratic_unconstrained,
+    l1squared_unconstrained::l1squared_unconstrained,
+    l1squared_nonneg::l1squared_nonneg
 };
 use alg_tools::iterate::AlgIteratorFactory;
 
+use std::cmp::Ordering::{Greater, Less, Equal};
+
 /// The regularisation term $α\\|μ\\|\_{ℳ(Ω)} + δ_{≥ 0}(μ)$ for [`pointsource_fb_reg`] and other
 /// algorithms.
 ///
@@ -131,6 +135,23 @@
         config : &FBGenericConfig<F>
     ) -> usize;
 
+    /// Approximately solve the problem
+    /// <div>$$
+    ///     \min_{x ∈ ℝ^n} \frac{1}{2} |x-y|_1^2 - g^⊤ x + τ G(x)
+    /// $$</div>
+    /// for $G$ depending on the trait implementation.
+    ///
+    /// Returns the number of iterations taken.
+    fn solve_findim_l1squared(
+        &self,
+        y : &DVector<F::MixedType>,
+        g : &DVector<F::MixedType>,
+        τ : F,
+        x : &mut DVector<F::MixedType>,
+        ε : F,
+        config : &FBGenericConfig<F>
+    ) -> usize;
+
     /// Find a point where `d` may violate the tolerance `ε`.
     ///
     /// If `skip_by_rough_check` is set, do not find the point if a rough check indicates that we
@@ -151,8 +172,37 @@
     where BT : BTSearch<F, N, Agg=Bounds<F>>,
           G : SupportGenerator<F, N, Id=BT::Data>,
           G::SupportType : Mapping<Loc<F, N>,Codomain=F>
+                           + LocalAnalysis<F, Bounds<F>, N> {
+        self.find_tolerance_violation_slack(d, τ, ε, skip_by_rough_check, config, F::ZERO)
+    }
+
+    /// Find a point where `d` may violate the tolerance `ε`.
+    ///
+    /// This version includes a `slack` parameter to expand the tolerances.
+    /// It is used for Radon-norm squared proximal term in [`crate::radon_fb`].
+    ///
+    /// If `skip_by_rough_check` is set, do not find the point if a rough check indicates that we
+    /// are in bounds. `ε` is the current main tolerance and `τ` a scaling factor for the
+    /// regulariser.
+    ///
+    /// Returns `None` if `d` is in bounds either based on the rough check, or a more precise check
+    /// terminating early. Otherwise returns a possibly violating point, the value of `d` there,
+    /// and a boolean indicating whether the found point is in bounds.
+    fn find_tolerance_violation_slack<G, BT>(
+        &self,
+        d : &mut BTFN<F, G, BT, N>,
+        τ : F,
+        ε : F,
+        skip_by_rough_check : bool,
+        config : &FBGenericConfig<F>,
+        slack : F,
+    ) -> Option<(Loc<F, N>, F, bool)>
+    where BT : BTSearch<F, N, Agg=Bounds<F>>,
+          G : SupportGenerator<F, N, Id=BT::Data>,
+          G::SupportType : Mapping<Loc<F, N>,Codomain=F>
                            + LocalAnalysis<F, Bounds<F>, N>;
 
+
     /// Verify that `d` is in bounds `ε` for a merge candidate `μ`
     ///
     /// `ε` is the current main tolerance and `τ` a scaling factor for the regulariser.
@@ -169,6 +219,28 @@
           G::SupportType : Mapping<Loc<F, N>,Codomain=F>
                            + LocalAnalysis<F, Bounds<F>, N>;
 
+    /// Verify that `d` is in bounds `ε` for a merge candidate `μ`
+    ///
+    /// This version is s used for Radon-norm squared proximal term in [`crate::radon_fb`].
+    /// The [`DiscreteMeasure`]s `μ` and `radon_μ` are supposed to have same coordinates at
+    /// same agreeing indices.
+    ///
+    /// `ε` is the current main tolerance and `τ` a scaling factor for the regulariser.
+    fn verify_merge_candidate_radonsq<G, BT>(
+        &self,
+        d : &mut BTFN<F, G, BT, N>,
+        μ : &DiscreteMeasure<Loc<F, N>, F>,
+        τ : F,
+        ε : F,
+        config : &FBGenericConfig<F>,
+        radon_μ :&DiscreteMeasure<Loc<F, N>, F>,
+    ) -> bool
+    where BT : BTSearch<F, N, Agg=Bounds<F>>,
+          G : SupportGenerator<F, N, Id=BT::Data>,
+          G::SupportType : Mapping<Loc<F, N>,Codomain=F>
+                           + LocalAnalysis<F, Bounds<F>, N>;
+
+
     /// TODO: document this
     fn target_bounds(&self, τ : F, ε : F) -> Option<Bounds<F>>;
 
@@ -197,6 +269,9 @@
           G : SupportGenerator<F, N, Id=BT::Data>,
           G::SupportType : Mapping<Loc<F, N>,Codomain=F>
                            + LocalAnalysis<F, Bounds<F>, N>;
+
+    /// Convert bound on the regulariser to a bond on the Radon norm
+    fn radon_norm_bound(&self, b : F) -> F;
 }
 
 #[replace_float_literals(F::cast_from(literal))]
@@ -215,30 +290,47 @@
     ) -> usize {
         let inner_tolerance = ε * config.inner.tolerance_mult;
         let inner_it = config.inner.iterator_options.stop_target(inner_tolerance);
-        let inner_τ = config.inner.τ0 / mA_normest;
-        quadratic_nonneg(config.inner.method, mA, g, τ * self.α(), x,
-                         inner_τ, inner_it)
+        quadratic_nonneg(mA, g, τ * self.α(), x,
+                         mA_normest, &config.inner, inner_it)
+
     }
 
+    fn solve_findim_l1squared(
+        &self,
+        y : &DVector<F::MixedType>,
+        g : &DVector<F::MixedType>,
+        τ : F,
+        x : &mut DVector<F::MixedType>,
+        ε : F,
+        config : &FBGenericConfig<F>
+    ) -> usize {
+        let inner_tolerance = ε * config.inner.tolerance_mult;
+        let inner_it = config.inner.iterator_options.stop_target(inner_tolerance);
+        l1squared_nonneg(y, g, τ * self.α(), 1.0, x,
+                         &config.inner, inner_it)
+    }
+
+
     #[inline]
-    fn find_tolerance_violation<G, BT>(
+    fn find_tolerance_violation_slack<G, BT>(
         &self,
         d : &mut BTFN<F, G, BT, N>,
         τ : F,
         ε : F,
         skip_by_rough_check : bool,
         config : &FBGenericConfig<F>,
+        slack : F
     ) -> Option<(Loc<F, N>, F, bool)>
     where BT : BTSearch<F, N, Agg=Bounds<F>>,
           G : SupportGenerator<F, N, Id=BT::Data>,
           G::SupportType : Mapping<Loc<F, N>,Codomain=F>
                            + LocalAnalysis<F, Bounds<F>, N> {
         let τα = τ * self.α();
-        let keep_below = τα + ε;
-        let maximise_above = τα + ε * config.insertion_cutoff_factor;
+        let keep_below = τα + slack + ε;
+        let maximise_above = τα + slack + ε * config.insertion_cutoff_factor;
         let refinement_tolerance = ε * config.refinement.tolerance_mult;
 
-        // If preliminary check indicates that we are in bonds, and if it otherwise matches
+        // If preliminary check indicates that we are in bounds, and if it otherwise matches
         // the insertion strategy, skip insertion.
         if skip_by_rough_check && d.bounds().upper() <= keep_below {
             None
@@ -271,9 +363,9 @@
         return (
             bnd.lower() >= keep_supp_above
             ||
-            μ.iter_spikes().map(|&DeltaMeasure{ α : β, ref x }| {
-                (β == 0.0) || d.apply(x) >= keep_supp_above
-            }).all(std::convert::identity)
+            μ.iter_spikes().all(|&DeltaMeasure{ α, ref x }| {
+                (α == 0.0) || d.apply(x) >= keep_supp_above
+            })
          ) && (
             bnd.upper() <= keep_below
             ||
@@ -281,6 +373,50 @@
         )
     }
 
+    fn verify_merge_candidate_radonsq<G, BT>(
+        &self,
+        d : &mut BTFN<F, G, BT, N>,
+        μ : &DiscreteMeasure<Loc<F, N>, F>,
+        τ : F,
+        ε : F,
+        config : &FBGenericConfig<F>,
+        radon_μ :&DiscreteMeasure<Loc<F, N>, F>,
+    ) -> bool
+    where BT : BTSearch<F, N, Agg=Bounds<F>>,
+          G : SupportGenerator<F, N, Id=BT::Data>,
+          G::SupportType : Mapping<Loc<F, N>,Codomain=F>
+                           + LocalAnalysis<F, Bounds<F>, N> {
+        let τα = τ * self.α();
+        let refinement_tolerance = ε * config.refinement.tolerance_mult;
+        let merge_tolerance = config.merge_tolerance_mult * ε;
+        let slack = radon_μ.norm(Radon);
+        let bnd = d.bounds();
+
+        return {
+            μ.both_matching(radon_μ)
+             .all(|(α, rα, x)| {
+                let v = d.apply(x);
+                let (l1, u1) = match α.partial_cmp(&0.0).unwrap_or(Equal) {
+                    Greater => (τα, τα),
+                    _ => (F::NEG_INFINITY, τα),
+                    // Less should not happen; treated as Equal
+                };
+                let (l2, u2) = match rα.partial_cmp(&0.0).unwrap_or(Equal) {
+                    Greater => (slack, slack),
+                    Equal => (-slack, slack),
+                    Less => (-slack, -slack),
+                };
+                // TODO: both fail.
+                (l1 + l2 - merge_tolerance <= v) && (v <= u1 + u2 + merge_tolerance)
+            })
+         } && {
+            let keep_below = τα + slack + merge_tolerance;
+            bnd.upper() <= keep_below
+            ||
+            d.has_upper_bound(keep_below, refinement_tolerance, config.refinement.max_steps)
+         }
+    }
+
     fn target_bounds(&self, τ : F, ε : F) -> Option<Bounds<F>> {
         let τα = τ * self.α();
         Some(Bounds(τα - ε,  τα + ε))
@@ -310,9 +446,12 @@
           G : SupportGenerator<F, N, Id=BT::Data>,
           G::SupportType : Mapping<Loc<F, N>,Codomain=F>
                            + LocalAnalysis<F, Bounds<F>, N> {
-        //let w = |x| 1.0.min((ε + d.apply(x))/(τ * self.α()));
-        let τw = |x| τ.min((ε + d.apply(x))/self.α());
-        τw(z) - τw(y)
+        let w = |x| 1.0.min((ε + d.apply(x))/(τ * self.α()));
+        w(z) - w(y)
+    }
+
+    fn radon_norm_bound(&self, b : F) -> F {
+        b / self.α()
     }
 }
 
@@ -331,28 +470,43 @@
     ) -> usize {
         let inner_tolerance = ε * config.inner.tolerance_mult;
         let inner_it = config.inner.iterator_options.stop_target(inner_tolerance);
-        let inner_τ = config.inner.τ0 / mA_normest;
-        quadratic_unconstrained(config.inner.method, mA, g, τ * self.α(), x,
-                                inner_τ, inner_it)
+        quadratic_unconstrained(mA, g, τ * self.α(), x,
+                                mA_normest, &config.inner, inner_it)
     }
 
-   fn find_tolerance_violation<G, BT>(
+    fn solve_findim_l1squared(
+        &self,
+        y : &DVector<F::MixedType>,
+        g : &DVector<F::MixedType>,
+        τ : F,
+        x : &mut DVector<F::MixedType>,
+        ε : F,
+        config : &FBGenericConfig<F>
+    ) -> usize {
+        let inner_tolerance = ε * config.inner.tolerance_mult;
+        let inner_it = config.inner.iterator_options.stop_target(inner_tolerance);
+        l1squared_unconstrained(y, g, τ * self.α(), 1.0, x,
+                                &config.inner, inner_it)
+    }
+
+    fn find_tolerance_violation_slack<G, BT>(
         &self,
         d : &mut BTFN<F, G, BT, N>,
         τ : F,
         ε : F,
         skip_by_rough_check : bool,
         config : &FBGenericConfig<F>,
+        slack : F,
     ) -> Option<(Loc<F, N>, F, bool)>
     where BT : BTSearch<F, N, Agg=Bounds<F>>,
           G : SupportGenerator<F, N, Id=BT::Data>,
           G::SupportType : Mapping<Loc<F, N>,Codomain=F>
                            + LocalAnalysis<F, Bounds<F>, N> {
         let τα = τ * self.α();
-        let keep_below = τα + ε;
-        let keep_above = -τα - ε;
-        let maximise_above = τα + ε * config.insertion_cutoff_factor;
-        let minimise_below = -τα - ε * config.insertion_cutoff_factor;
+        let keep_below = τα + slack + ε;
+        let keep_above = -(τα + slack) - ε;
+        let maximise_above = τα + slack + ε * config.insertion_cutoff_factor;
+        let minimise_below = -(τα + slack) - ε * config.insertion_cutoff_factor;
         let refinement_tolerance = ε * config.refinement.tolerance_mult;
 
         // If preliminary check indicates that we are in bonds, and if it otherwise matches
@@ -405,14 +559,13 @@
         return (
             (bnd.lower() >= keep_supp_pos_above && bnd.upper() <= keep_supp_neg_below)
             ||
-            μ.iter_spikes().map(|&DeltaMeasure{ α : β, ref x }| {
-                use std::cmp::Ordering::*;
+            μ.iter_spikes().all(|&DeltaMeasure{ α : β, ref x }| {
                 match β.partial_cmp(&0.0) {
                     Some(Greater) => d.apply(x) >= keep_supp_pos_above,
                     Some(Less) => d.apply(x) <= keep_supp_neg_below,
                     _ => true,
                 }
-            }).all(std::convert::identity)
+            })
         ) && (
             bnd.upper() <= keep_below
             ||
@@ -426,6 +579,56 @@
         )
     }
 
+    fn verify_merge_candidate_radonsq<G, BT>(
+        &self,
+        d : &mut BTFN<F, G, BT, N>,
+        μ : &DiscreteMeasure<Loc<F, N>, F>,
+        τ : F,
+        ε : F,
+        config : &FBGenericConfig<F>,
+        radon_μ : &DiscreteMeasure<Loc<F, N>, F>,
+    ) -> bool
+    where BT : BTSearch<F, N, Agg=Bounds<F>>,
+          G : SupportGenerator<F, N, Id=BT::Data>,
+          G::SupportType : Mapping<Loc<F, N>,Codomain=F>
+                           + LocalAnalysis<F, Bounds<F>, N> {
+        let τα = τ * self.α();
+        let refinement_tolerance = ε * config.refinement.tolerance_mult;
+        let merge_tolerance = config.merge_tolerance_mult * ε;
+        let slack = radon_μ.norm(Radon);
+        let bnd = d.bounds();
+
+        return {
+            μ.both_matching(radon_μ)
+             .all(|(α, rα, x)| {
+                let v = d.apply(x);
+                let (l1, u1) = match α.partial_cmp(&0.0).unwrap_or(Equal) {
+                    Greater => (τα, τα),
+                    Equal => (-τα, τα),
+                    Less => (-τα, -τα),
+                };
+                let (l2, u2) = match rα.partial_cmp(&0.0).unwrap_or(Equal) {
+                    Greater => (slack, slack),
+                    Equal => (-slack, slack),
+                    Less => (-slack, -slack),
+                };
+                (l1 + l2 - merge_tolerance <= v) && (v <= u1 + u2 + merge_tolerance)
+            })
+         } && {
+            let keep_below = τα + slack + merge_tolerance;
+            bnd.upper() <= keep_below
+            ||
+            d.has_upper_bound(keep_below, refinement_tolerance,
+                              config.refinement.max_steps)
+        } && {
+            let keep_above = -τα - slack - merge_tolerance;
+            bnd.lower() >= keep_above
+            ||
+            d.has_lower_bound(keep_above, refinement_tolerance,
+                              config.refinement.max_steps)
+        }
+    }
+
     fn target_bounds(&self, τ : F, ε : F) -> Option<Bounds<F>> {
         let τα = τ * self.α();
         Some(Bounds(-τα - ε,  τα + ε))
@@ -457,14 +660,14 @@
                            + LocalAnalysis<F, Bounds<F>, N> {
 
         let α = self.α();
-        // let w = |x| {
-        //     let dx = d.apply(x);
-        //     ((-ε + dx)/(τ * α)).max(1.0.min(ε + dx)/(τ * α))
-        // };
-        let τw = |x| {
+        let w = |x| {
             let dx = d.apply(x);
-            ((-ε + dx)/α).max(τ.min(ε + dx)/α)
+            ((-ε + dx)/(τ * α)).max(1.0.min(ε + dx)/(τ * α))
         };
-        τw(z) - τw(y)
+        w(z) - w(y)
+    }
+
+    fn radon_norm_bound(&self, b : F) -> F {
+        b / self.α()
     }
 }
\ No newline at end of file
--- a/src/run.rs	Tue Aug 01 10:32:12 2023 +0300
+++ b/src/run.rs	Thu Aug 29 00:00:00 2024 -0500
@@ -31,7 +31,10 @@
 use alg_tools::error::DynError;
 use alg_tools::tabledump::TableDump;
 use alg_tools::sets::Cube;
-use alg_tools::mapping::{RealMapping, Differentiable};
+use alg_tools::mapping::{
+    RealMapping,
+    DifferentiableRealMapping
+};
 use alg_tools::nalgebra_support::ToNalgebraRealField;
 use alg_tools::euclidean::Euclidean;
 use alg_tools::lingrid::lingrid;
@@ -48,6 +51,11 @@
     pointsource_fb_reg,
     pointsource_fista_reg,
 };
+use crate::radon_fb::{
+    RadonFBConfig,
+    pointsource_radon_fb_reg,
+    pointsource_radon_fista_reg,
+};
 use crate::sliding_fb::{
     SlidingFBConfig,
     pointsource_sliding_fb_reg
@@ -85,6 +93,8 @@
     FISTA(FBConfig<F>),
     FW(FWConfig<F>),
     PDPS(PDPSConfig<F>),
+    RadonFB(RadonFBConfig<F>),
+    RadonFISTA(RadonFBConfig<F>),
     SlidingFB(SlidingFBConfig<F>),
 }
 
@@ -114,19 +124,19 @@
         match self {
             FB(fb) => FB(FBConfig {
                 τ0 : cli.tau0.unwrap_or(fb.τ0),
-                insertion : override_fb_generic(fb.insertion),
+                generic : override_fb_generic(fb.generic),
                 .. fb
             }),
             FISTA(fb) => FISTA(FBConfig {
                 τ0 : cli.tau0.unwrap_or(fb.τ0),
-                insertion : override_fb_generic(fb.insertion),
+                generic : override_fb_generic(fb.generic),
                 .. fb
             }),
             PDPS(pdps) => PDPS(PDPSConfig {
                 τ0 : cli.tau0.unwrap_or(pdps.τ0),
                 σ0 : cli.sigma0.unwrap_or(pdps.σ0),
                 acceleration : cli.acceleration.unwrap_or(pdps.acceleration),
-                insertion : override_fb_generic(pdps.insertion),
+                generic : override_fb_generic(pdps.generic),
                 .. pdps
             }),
             FW(fw) => FW(FWConfig {
@@ -134,8 +144,21 @@
                 tolerance : cli.tolerance.as_ref().map(unpack_tolerance).unwrap_or(fw.tolerance),
                 .. fw
             }),
+            RadonFB(fb) => RadonFB(RadonFBConfig {
+                τ0 : cli.tau0.unwrap_or(fb.τ0),
+                insertion : override_fb_generic(fb.insertion),
+                .. fb
+            }),
+            RadonFISTA(fb) => RadonFISTA(RadonFBConfig {
+                τ0 : cli.tau0.unwrap_or(fb.τ0),
+                insertion : override_fb_generic(fb.insertion),
+                .. fb
+            }),
             SlidingFB(sfb) => SlidingFB(SlidingFBConfig {
                 τ0 : cli.tau0.unwrap_or(sfb.τ0),
+                θ0 : cli.theta0.unwrap_or(sfb.θ0),
+                transport_tolerance_ω: cli.transport_tolerance_omega.unwrap_or(sfb.transport_tolerance_ω),
+                transport_tolerance_dv: cli.transport_tolerance_dv.unwrap_or(sfb.transport_tolerance_dv),
                 insertion : override_fb_generic(sfb.insertion),
                 .. sfb
             }),
@@ -169,6 +192,12 @@
     /// The μPDPS primal-dual proximal splitting method
     #[clap(name = "pdps")]
     PDPS,
+    /// The RadonFB forward-backward method
+    #[clap(name = "radon_fb")]
+    RadonFB,
+    /// The RadonFISTA inertial forward-backward method
+    #[clap(name = "radon_fista")]
+    RadonFISTA,
     /// The Sliding FB method
     #[clap(name = "sliding_fb", alias = "sfb")]
     SlidingFB,
@@ -187,6 +216,8 @@
                 .. Default::default()
             }),
             PDPS => AlgorithmConfig::PDPS(Default::default()),
+            RadonFB => AlgorithmConfig::RadonFB(Default::default()),
+            RadonFISTA => AlgorithmConfig::RadonFISTA(Default::default()),
             SlidingFB => AlgorithmConfig::SlidingFB(Default::default()),
         }
     }
@@ -364,13 +395,13 @@
                          // TODO: very weird that rust only compiles with Differentiable
                          // instead of the above one on references, which is required by
                          // poitsource_sliding_fb_reg.
-                         + Differentiable<Loc<F, N>, Output = Loc<F, N>>
-                         + Lipschitz<L2>,
+                         + DifferentiableRealMapping<F, N>
+                         + Lipschitz<L2, FloatType=F>,
       // <DefaultSG<F, S, P, N> as ForwardModel<Loc<F, N>, F>::PreadjointCodomain : for<'b> Differentiable<&'b Loc<F, N>, Output = Loc<F, N>>,
       AutoConvolution<P> : BoundedBy<F, K>,
-      K : SimpleConvolutionKernel<F, N> + LocalAnalysis<F, Bounds<F>, N>
+      K : SimpleConvolutionKernel<F, N>
+          + LocalAnalysis<F, Bounds<F>, N>
           + Copy + Serialize + std::fmt::Debug,
-          //+ Differentiable<Loc<F, N>, Output = Loc<F, N>>, // TODO: shouldn't need to assume differentiability
       Cube<F, N>: P2Minimise<Loc<F, N>, F> + SetOrd,
       PlotLookup : Plotting<N>,
       DefaultBT<F, N> : SensorGridBT<F, S, P, N, Depth=DynamicDepth> + BTSearch<F, N>,
@@ -569,6 +600,50 @@
                         }
                     }
                 },
+                AlgorithmConfig::RadonFB(ref algconfig) => {
+                    match (regularisation, dataterm) {
+                        (Regularisation::NonnegRadon(α), DataTerm::L2Squared) => {
+                            running();
+                            pointsource_radon_fb_reg(
+                                &opA, &b, NonnegRadonRegTerm(α), algconfig,
+                                iterator, plotter
+                            )
+                        },
+                        (Regularisation::Radon(α), DataTerm::L2Squared) => {
+                            running();
+                            pointsource_radon_fb_reg(
+                                &opA, &b, RadonRegTerm(α), algconfig,
+                                iterator, plotter
+                            )
+                        },
+                        _ => {
+                            not_implemented();
+                            continue
+                        }
+                    }
+                },
+                AlgorithmConfig::RadonFISTA(ref algconfig) => {
+                    match (regularisation, dataterm) {
+                        (Regularisation::NonnegRadon(α), DataTerm::L2Squared) => {
+                            running();
+                            pointsource_radon_fista_reg(
+                                &opA, &b, NonnegRadonRegTerm(α), algconfig,
+                                iterator, plotter
+                            )
+                        },
+                        (Regularisation::Radon(α), DataTerm::L2Squared) => {
+                            running();
+                            pointsource_radon_fista_reg(
+                                &opA, &b, RadonRegTerm(α), algconfig,
+                                iterator, plotter
+                            )
+                        },
+                        _ => {
+                            not_implemented();
+                            continue
+                        }
+                    }
+                },
                 AlgorithmConfig::SlidingFB(ref algconfig) => {
                     match (regularisation, dataterm) {
                         (Regularisation::NonnegRadon(α), DataTerm::L2Squared) => {
@@ -685,11 +760,12 @@
       Sensor : RealMapping<F, N> + Support<F, N> + Clone,
       Spread : RealMapping<F, N> + Support<F, N> + Clone,
       Kernel : RealMapping<F, N> + Support<F, N>,
-      Convolution<Sensor, Spread> : RealMapping<F, N> + Support<F, N>,
+      Convolution<Sensor, Spread> : DifferentiableRealMapping<F, N> + Support<F, N>,
+      //Differential<Loc<F, N>, Convolution<Sensor, Spread>> : RealVectorField<F, N, N>,
       𝒟 : DiscreteMeasureOp<Loc<F, N>, F>,
       𝒟::Codomain : RealMapping<F, N>,
       A : ForwardModel<Loc<F, N>, F>,
-      A::PreadjointCodomain : RealMapping<F, N> + Bounded<F>,
+      A::PreadjointCodomain : DifferentiableRealMapping<F, N> + Bounded<F>,
       PlotLookup : Plotting<N>,
       Cube<F, N> : SetOrd {
 
@@ -706,7 +782,7 @@
     PlotLookup::plot_into_file(sensor, plotgrid, pfx("sensor"), "sensor".to_string());
     PlotLookup::plot_into_file(kernel, plotgrid, pfx("kernel"), "kernel".to_string());
     PlotLookup::plot_into_file(spread, plotgrid, pfx("spread"), "spread".to_string());
-    PlotLookup::plot_into_file(&base, plotgrid, pfx("base_sensor"), "base_sensor".to_string());
+    PlotLookup::plot_into_file_diff(&base, plotgrid, pfx("base_sensor"), "base_sensor".to_string());
 
     let plotgrid2 = lingrid(&domain, &[resolution; N]);
 
@@ -725,6 +801,10 @@
         plotgrid2, None, &μ_hat,
         pfx("omega_b")
     );
+    PlotLookup::plot_into_file_diff(&preadj_b, plotgrid2, pfx("preadj_b"),
+                                    "preadj_b".to_string());
+    PlotLookup::plot_into_file_diff(&preadj_b_hat, plotgrid2, pfx("preadj_b_hat"),
+                                    "preadj_b_hat".to_string());
 
     // Save true solution and observables
     let pfx = |n| format!("{}{}", prefix, n);
--- a/src/sliding_fb.rs	Tue Aug 01 10:32:12 2023 +0300
+++ b/src/sliding_fb.rs	Thu Aug 29 00:00:00 2024 -0500
@@ -8,19 +8,17 @@
 //use colored::Colorize;
 //use nalgebra::{DVector, DMatrix};
 use itertools::izip;
-use std::iter::{Map, Flatten};
+use std::iter::Iterator;
 
 use alg_tools::iterate::{
     AlgIteratorFactory,
     AlgIteratorState
 };
-use alg_tools::euclidean::{
-    Euclidean,
-    Dot
-};
+use alg_tools::euclidean::Euclidean;
 use alg_tools::sets::Cube;
 use alg_tools::loc::Loc;
 use alg_tools::mapping::{Apply, Differentiable};
+use alg_tools::norms::{Norm, L2};
 use alg_tools::bisection_tree::{
     BTFN,
     PreBTFN,
@@ -37,10 +35,7 @@
 use alg_tools::nalgebra_support::ToNalgebraRealField;
 
 use crate::types::*;
-use crate::measures::{
-    DiscreteMeasure,
-    DeltaMeasure,
-};
+use crate::measures::{DeltaMeasure, DiscreteMeasure, Radon};
 use crate::measures::merging::{
     //SpikeMergingMethod,
     SpikeMerging,
@@ -69,15 +64,15 @@
 pub struct SlidingFBConfig<F : Float> {
     /// Step length scaling
     pub τ0 : F,
-    /// Transport smoothness assumption
-    pub ℓ0 : F,
-    /// Inverse of the scaling factor $θ$ of the 2-norm-squared transport cost.
-    /// This means that $τθ$ is the step length for the transport step.
-    pub inverse_transport_scaling : F,
-    /// Factor for deciding transport reduction based on smoothness assumption violation
-    pub minimum_goodness_factor : F,
-    /// Maximum rays to retain in transports from each source.
-    pub maximum_rays : usize,
+    /// Transport step length $θ$ normalised to $(0, 1)$.
+    pub θ0 : F,
+    /// Maximum transport mass scaling.
+    // /// The maximum transported mass is this factor times $\norm{b}^2/(2α)$.
+    // pub max_transport_scale : F,
+    /// Transport tolerance wrt. ω
+    pub transport_tolerance_ω : F,
+    /// Transport tolerance wrt. ∇v
+    pub transport_tolerance_dv : F,
     /// Generic parameters
     pub insertion : FBGenericConfig<F>,
 }
@@ -87,129 +82,32 @@
     fn default() -> Self {
         SlidingFBConfig {
             τ0 : 0.99,
-            ℓ0 : 1.5,
-            inverse_transport_scaling : 1.0,
-            minimum_goodness_factor : 1.0, // TODO: totally arbitrary choice,
-                                           // should be scaled by problem data?
-            maximum_rays : 10,
+            θ0 : 0.99,
+            //max_transport_scale : 10.0,
+            transport_tolerance_ω : 1.0, // TODO: no idea what this should be
+            transport_tolerance_dv : 1.0, // TODO: no idea what this should be
             insertion : Default::default()
         }
     }
 }
 
-/// A transport ray (including various additional computational information).
-#[derive(Clone, Debug)]
-pub struct Ray<Domain, F : Num> {
-    /// The destination of the ray, and the mass. The source is indicated in a [`RaySet`].
-    δ : DeltaMeasure<Domain, F>,
-    /// Goodness of the data term for the aray: $v(z)-v(y)-⟨∇v(x), z-y⟩ + ℓ‖z-y‖^2$.
-    goodness : F,
-    /// Goodness of the regularisation term for the ray: $w(z)-w(y)$.
-    /// Initially zero until $w$ can be constructed.
-    reg_goodness : F,
-    /// Indicates that this ray also forms a component in γ^{k+1} with the mass `to_return`.
-    to_return : F,
-}
-
-/// A set of transport rays with the same source point.
-#[derive(Clone, Debug)]
-pub struct RaySet<Domain, F : Num> {
-    /// Source of every ray in thset
-    source : Domain,
-    /// Mass of the diagonal ray, with destination the same as the source.
-    diagonal: F,
-    /// Goodness of the data term for the diagonal ray with $z=x$:
-    /// $v(x)-v(y)-⟨∇v(x), x-y⟩ + ℓ‖x-y‖^2$.
-    diagonal_goodness : F,
-    /// Goodness of the data term for the diagonal ray with $z=x$: $w(x)-w(y)$.
-    diagonal_reg_goodness : F,
-    /// The non-diagonal rays.
-    rays : Vec<Ray<Domain, F>>,
-}
-
+/// Scale each |γ|_i ≠ 0 by q_i=q̄/g(γ_i)
 #[replace_float_literals(F::cast_from(literal))]
-impl<Domain, F : Float> RaySet<Domain, F> {
-    fn non_diagonal_mass(&self) -> F {
-        self.rays
-            .iter()
-            .map(|Ray{ δ : DeltaMeasure{ α, .. }, .. }| *α)
-            .sum()
-    }
-
-    fn total_mass(&self) -> F {
-        self.non_diagonal_mass() + self.diagonal
-    }
-
-    fn targets<'a>(&'a self)
-    -> Map<
-        std::slice::Iter<'a, Ray<Domain, F>>,
-        fn(&'a Ray<Domain, F>) -> &'a DeltaMeasure<Domain, F>
-    > {
-        fn get_δ<'b, Domain, F : Float>(Ray{ δ, .. }: &'b Ray<Domain, F>)
-        -> &'b DeltaMeasure<Domain, F> {
-            δ
+fn scale_down<'a, I, F, G, const N : usize>(
+    iter : I,
+    q̄ : F,
+    mut g : G
+) where F : Float,
+        I : Iterator<Item = &'a mut DeltaMeasure<Loc<F,N>, F>>,
+        G : FnMut(&DeltaMeasure<Loc<F,N>, F>) -> F {
+    iter.for_each(|δ| {
+        if δ.α != 0.0 {
+            let b = g(δ);
+            if b * δ.α > 0.0 {
+                δ.α *= q̄/b;
+            }
         }
-        self.rays
-            .iter()
-            .map(get_δ)
-    }
-
-    // fn non_diagonal_goodness(&self) -> F {
-    //     self.rays
-    //         .iter()
-    //         .map(|&Ray{ δ : DeltaMeasure{ α, .. }, goodness, reg_goodness, .. }| {
-    //             α * (goodness + reg_goodness)
-    //         })
-    //         .sum()
-    // }
-
-    // fn total_goodness(&self) -> F {
-    //     self.non_diagonal_goodness() + (self.diagonal_goodness + self.diagonal_reg_goodness)
-    // }
-
-    fn non_diagonal_badness(&self) -> F {
-        self.rays
-            .iter()
-            .map(|&Ray{ δ : DeltaMeasure{ α, .. }, goodness, reg_goodness, .. }| {
-                0.0.max(- α * (goodness + reg_goodness))
-            })
-            .sum()
-    }
-
-    fn total_badness(&self) -> F {
-        self.non_diagonal_badness()
-        + 0.0.max(- self.diagonal * (self.diagonal_goodness + self.diagonal_reg_goodness))
-    }
-
-    fn total_return(&self) -> F {
-        self.rays
-            .iter()
-            .map(|&Ray{ to_return, .. }| to_return)
-            .sum()
-    }
-}
-
-#[replace_float_literals(F::cast_from(literal))]
-impl<Domain : Clone, F : Num> RaySet<Domain, F> {
-    fn return_targets<'a>(&'a self)
-    -> Flatten<Map<
-        std::slice::Iter<'a, Ray<Domain, F>>,
-        fn(&'a Ray<Domain, F>) -> Option<DeltaMeasure<Domain, F>>
-    >> {
-        fn get_return<'b, Domain : Clone, F : Num>(ray: &'b Ray<Domain, F>)
-        -> Option<DeltaMeasure<Domain, F>> {
-            (ray.to_return != 0.0).then_some(
-                DeltaMeasure{x : ray.δ.x.clone(), α : ray.to_return}
-            )
-        }
-        let tmp : Map<
-            std::slice::Iter<'a, Ray<Domain, F>>,
-            fn(&'a Ray<Domain, F>) -> Option<DeltaMeasure<Domain, F>>
-        > = self.rays
-                .iter()
-                .map(get_return);
-        tmp.flatten()
-    }
+    });
 }
 
 /// Iteratively solve the pointsource localisation problem using sliding forward-backward
@@ -218,7 +116,7 @@
 /// The parametrisatio is as for [`pointsource_fb_reg`].
 /// Inertia is currently not supported.
 #[replace_float_literals(F::cast_from(literal))]
-pub fn pointsource_sliding_fb_reg<'a, F, I, A, GA, 𝒟, BTA, G𝒟, S, K, Reg, const N : usize>(
+pub fn pointsource_sliding_fb_reg<'a, F, I, A, GA, 𝒟, BTA, BT𝒟, G𝒟, S, K, Reg, const N : usize>(
     opA : &'a A,
     b : &A::Observable,
     reg : Reg,
@@ -238,8 +136,9 @@
           + Lipschitz<&'a 𝒟, FloatType=F> + TransportLipschitz<L2Squared, FloatType=F>,
       BTA : BTSearch<F, N, Data=usize, Agg=Bounds<F>>,
       G𝒟 : SupportGenerator<F, N, SupportType = K, Id = usize> + Clone,
-      𝒟 : DiscreteMeasureOp<Loc<F, N>, F, PreCodomain = PreBTFN<F, G𝒟, N>>,
-      𝒟::Codomain : RealMapping<F, N>,
+      𝒟 : DiscreteMeasureOp<Loc<F, N>, F, PreCodomain = PreBTFN<F, G𝒟, N>,
+                                          Codomain = BTFN<F, G𝒟, BT𝒟, N>>,
+      BT𝒟 : BTSearch<F, N, Data=usize, Agg=Bounds<F>>,
       S: RealMapping<F, N> + LocalAnalysis<F, Bounds<F>, N>
          + Differentiable<Loc<F, N>, Output=Loc<F,N>>,
       K: RealMapping<F, N> + LocalAnalysis<F, Bounds<F>, N>,
@@ -251,25 +150,25 @@
       Reg : SlidingRegTerm<F, N> {
 
     assert!(sfbconfig.τ0 > 0.0 &&
-            sfbconfig.inverse_transport_scaling > 0.0 &&
-            sfbconfig.ℓ0 > 0.0);
+            sfbconfig.θ0 > 0.0);
 
     // Set up parameters
     let config = &sfbconfig.insertion;
     let op𝒟norm = op𝒟.opnorm_bound();
-    let θ = sfbconfig.inverse_transport_scaling;
-    let τ = sfbconfig.τ0/opA.lipschitz_factor(&op𝒟).unwrap()
-                            .max(opA.transport_lipschitz_factor(L2Squared) * θ);
-    let ℓ = sfbconfig.ℓ0; // TODO: v scaling?
+    //let max_transport = sfbconfig.max_transport_scale
+    //                    * reg.radon_norm_bound(b.norm2_squared() / 2.0);
+    //let tlip = opA.transport_lipschitz_factor(L2Squared) * max_transport;
+    //let ℓ = 0.0;
+    let θ = sfbconfig.θ0; // (ℓ + tlip);
+    let τ = sfbconfig.τ0/opA.lipschitz_factor(&op𝒟).unwrap();
     // We multiply tolerance by τ for FB since our subproblems depending on tolerances are scaled
     // by τ compared to the conditional gradient approach.
     let tolerance = config.tolerance * τ * reg.tolerance_scaling();
     let mut ε = tolerance.initial();
 
     // Initialise iterates
-    let mut μ : DiscreteMeasure<Loc<F, N>, F> = DiscreteMeasure::new();
-    let mut μ_transported_base = DiscreteMeasure::new();
-    let mut γ_hat : Vec<RaySet<Loc<F, N>, F>> = Vec::new();   // γ̂_k and extra info
+    let mut μ = DiscreteMeasure::new();
+    let mut γ1 = DiscreteMeasure::new();
     let mut residual = -b;
     let mut stats = IterInfo::new();
 
@@ -279,272 +178,170 @@
         // Using `std::mem::replace` here is not ideal, and expects that `empty_observable`
         // has no significant overhead. For some reosn Rust doesn't allow us simply moving
         // the residual and replacing it below before the end of this closure.
-        residual *= -τ;
         let r = std::mem::replace(&mut residual, opA.empty_observable());
-        let minus_τv = opA.preadjoint().apply(r);
-
-        // Save current base point and shift μ to new positions.
-        let μ_base = μ.clone();
-        for δ in μ.iter_spikes_mut() {
-            δ.x += minus_τv.differential(&δ.x) * θ;
-        }
-        let mut μ_transported = μ.clone();
-
-        assert_eq!(μ.len(), γ_hat.len());
+        let v = opA.preadjoint().apply(r);
 
-        // Calculate the goodness λ formed from γ_hat (≈ γ̂_k) and γ^{k+1}, where the latter
-        // transports points x from μ_base to points y in μ as shifted above, or “returns”
-        // them “home” to z given by the rays in γ_hat. Returning is necessary if the rays
-        // are not “good” for the smoothness assumptions, or if γ_hat has more mass than
-        // μ_base.
-        let mut total_goodness = 0.0;     // data term goodness
-        let mut total_reg_goodness = 0.0; // regulariser goodness
-        let minimum_goodness = - ε * sfbconfig.minimum_goodness_factor;
-
-        for (δ, r) in izip!(μ.iter_spikes(), γ_hat.iter_mut()) {
-            // Calculate data term goodness for all rays.
-            let &DeltaMeasure{ x : ref y, α : δ_mass } = δ;
-            let x = &r.source;
-            let mvy = minus_τv.apply(y);
-            let mdvx = minus_τv.differential(x);
-            let mut r_total_mass = 0.0; // Total mass of all rays with source r.source.
-            let mut bad_mass = 0.0;
-            let mut calc_goodness = |goodness : &mut F, reg_goodness : &mut F, α, z : &Loc<F, N>| {
-                *reg_goodness = 0.0; // Initial guess
-                *goodness = mvy - minus_τv.apply(z) + mdvx.dot(&(z-y))
-                            + ℓ * z.dist2_squared(&y);
-                total_goodness += *goodness * α;
-                r_total_mass += α; // TODO: should this include to_return from staging? (Probably not)
-                if *goodness < 0.0 {
-                    bad_mass += α;
-                }
+        // Save current base point and shift μ to new positions. Idea is that
+        //  μ_base(_masses) = μ^k (vector of masses)
+        //  μ_base_minus_γ0 = μ^k - π_♯^0γ^{k+1}
+        //  γ1 = π_♯^1γ^{k+1}
+        //  μ = μ^{k+1}
+        let μ_base_masses : Vec<F> = μ.iter_masses().collect();
+        let mut μ_base_minus_γ0 = μ.clone(); // Weights will be set in the loop below.
+        // Construct μ^{k+1} and π_♯^1γ^{k+1} initial candidates
+        let mut sum_norm_dv_times_γinit = 0.0;
+        let mut sum_abs_γinit = 0.0;
+        //let mut sum_norm_dv = 0.0;
+        let γ_prev_len = γ1.len();
+        assert!(μ.len() >= γ_prev_len);
+        γ1.extend(μ[γ_prev_len..].iter().cloned());
+        for (δ, ρ) in izip!(μ.iter_spikes_mut(), γ1.iter_spikes_mut()) {
+            let d_v_x = v.differential(&δ.x);
+            // If old transport has opposing sign, the new transport will be none.
+            ρ.α = if (ρ.α > 0.0 && δ.α < 0.0) || (ρ.α < 0.0 && δ.α > 0.0) {
+                0.0
+            } else {
+                δ.α
             };
-            for ray in r.rays.iter_mut() {
-                calc_goodness(&mut ray.goodness, &mut ray.reg_goodness, ray.δ.α, &ray.δ.x);
-            }
-            calc_goodness(&mut r.diagonal_goodness, &mut r.diagonal_reg_goodness, r.diagonal, x);
-
-            // If the total mass of the ray set is less than that of μ at the same source,
-            // a diagonal component needs to be added to be able to (attempt to) transport
-            // all mass of μ. In the opposite case, we need to construct γ_{k+1} to ‘return’
-            // the the extra mass of γ̂_k to the target z. We return mass from the oldest “bad”
-            // rays in the set.
-            if δ_mass >= r_total_mass {
-                r.diagonal += δ_mass - r_total_mass;
-            } else {
-                let mut reduce_transport = r_total_mass - δ_mass;
-                let mut good_needed = (bad_mass - reduce_transport).max(0.0);
-                // NOTE: reg_goodness is zero at this point, so it is not used in this code.
-                let mut reduce_ray = |goodness, to_return : Option<&mut F>, α : &mut F| {
-                    if reduce_transport > 0.0 {
-                        let return_amount = if goodness < 0.0 {
-                            α.min(reduce_transport)
-                        } else {
-                            let amount = α.min(good_needed);
-                            good_needed -= amount;
-                            amount
-                        };
-
-                        if return_amount > 0.0 {
-                            reduce_transport -= return_amount;
-                            // Adjust total goodness by returned amount
-                            total_goodness -= goodness * return_amount;
-                            to_return.map(|tr| *tr += return_amount);
-                            *α -= return_amount;
-                            *α > 0.0
-                        } else {
-                            true
-                        }
-                    } else {
-                        true
-                    }
-                };
-                r.rays.retain_mut(|ray| {
-                    reduce_ray(ray.goodness, Some(&mut ray.to_return), &mut ray.δ.α)
-                });
-                // A bad diagonal is simply reduced without any 'return'.
-                // It was, after all, just added to match μ, but there is no need to match it.
-                // It's just a heuristic.
-                // TODO: Maybe a bad diagonal should be the first to go.
-                reduce_ray(r.diagonal_goodness, None, &mut r.diagonal);
+            δ.x -= d_v_x * (θ * δ.α.signum()); // This is δ.α.signum() when δ.α ≠ 0.
+            ρ.x = δ.x;
+            let nrm = d_v_x.norm(L2);
+            let a = ρ.α.abs();
+            let v = nrm * a;
+            if v > 0.0 {
+                sum_norm_dv_times_γinit += v;
+                sum_abs_γinit += a;
             }
         }
 
+        // A priori transport adaptation based on bounding ∫ ⟨∇v(x), z-y⟩ dλ(x, y, z).
+        // This is just one option, there are many.
+        let t = ε * sfbconfig.transport_tolerance_dv;
+        if sum_norm_dv_times_γinit > t {
+            // Scale each |γ|_i by q_i=q̄/‖vx‖_i such that ∑_i |γ|_i q_i ‖vx‖_i = t
+            // TODO: store the closure values above?
+            scale_down(γ1.iter_spikes_mut(),
+                       t / sum_abs_γinit,
+                       |δ| v.differential(&δ.x).norm(L2));
+        }
+        //println!("|γ| = {}, |μ| = {}", γ1.norm(crate::measures::Radon), μ.norm(crate::measures::Radon));
+
         // Solve finite-dimensional subproblem several times until the dual variable for the
         // regularisation term conforms to the assumptions made for the transport above.
         let (d, within_tolerances) = 'adapt_transport: loop {
-            // If transport violates goodness requirements, shift it to ‘return’ mass to z,
-            // forcing y = z. Based on the badness of each ray set (sum of bad rays' goodness),
-            // we proportionally distribute the reductions to each ray set, and within each ray
-            // set, prioritise reducing the oldest bad rays' weight.
-            let tg = total_goodness + total_reg_goodness;
-            let adaptation_needed = minimum_goodness - tg;
-            if adaptation_needed > 0.0 {
-                let total_badness = γ_hat.iter().map(|r| r.total_badness()).sum();
-
-                let mut return_ray = |goodness : F,
-                                      reg_goodness : F,
-                                      to_return : Option<&mut F>,
-                                      α : &mut F,
-                                      left_to_return : &mut F| {
-                    let g = goodness + reg_goodness;
-                    assert!(*α >= 0.0 && *left_to_return >= 0.0);
-                    if *left_to_return > 0.0 && g < 0.0 {
-                        let return_amount = (*left_to_return / (-g)).min(*α);
-                        *left_to_return -= (-g) * return_amount;
-                        total_goodness -= goodness * return_amount;
-                        total_reg_goodness -= reg_goodness * return_amount;
-                        to_return.map(|tr| *tr += return_amount);
-                        *α -= return_amount;
-                        *α > 0.0
-                    } else {
-                        true
-                    }
-                };
-                
-                for r in γ_hat.iter_mut() {
-                    let mut left_to_return = adaptation_needed * r.total_badness() / total_badness;
-                    if left_to_return > 0.0 {
-                        for ray in r.rays.iter_mut() {
-                            return_ray(ray.goodness, ray.reg_goodness,
-                                       Some(&mut ray.to_return), &mut ray.δ.α, &mut left_to_return);
-                        }
-                        return_ray(r.diagonal_goodness, r.diagonal_reg_goodness,
-                                   None, &mut r.diagonal, &mut left_to_return);
-                    }
-                }
+            // Update weights for μ_base_minus_γ0 = μ^k - π_♯^0γ^{k+1}
+            for (δ_γ1, δ_μ_base_minus_γ0, &α_μ_base) in izip!(γ1.iter_spikes(),
+                                                              μ_base_minus_γ0.iter_spikes_mut(),
+                                                              μ_base_masses.iter()) {
+                δ_μ_base_minus_γ0.set_mass(α_μ_base - δ_γ1.get_mass());
             }
 
-            // Construct μ_k + (π_#^1-π_#^0)γ_{k+1}.
-            // This can be broken down into
-            //
-            // μ_transported_base = [μ - π_#^0 (γ_shift + γ_return)] + π_#^1 γ_return, and
-            // μ_transported = π_#^1 γ_shift
-            //
-            // where γ_shift is our “true” γ_{k+1}, and γ_return is the return compoennt.
-            // The former can be constructed from δ.x and δ_new.x for δ in μ_base and δ_new in μ
-            // (which has already been shifted), and the mass stored in a γ_hat ray's δ measure
-            // The latter can be constructed from γ_hat rays' source and destination with the
-            // to_return mass.
-            //
-            // Note that μ_transported is constructed to have the same spike locations as μ, but
-            // to have same length as μ_base. This loop does not iterate over the spikes of μ
-            // (and corresponding transports of γ_hat) that have been newly     added in the current
-            // 'adapt_transport loop.
-            for (δ, δ_transported, r) in izip!(μ_base.iter_spikes(),
-                                               μ_transported.iter_spikes_mut(),
-                                               γ_hat.iter()) {
-                let &DeltaMeasure{ref x, α} = δ;
-                debug_assert_eq!(*x, r.source);
-                let shifted_mass = r.total_mass();
-                let ret_mass = r.total_return();
-                // μ - π_#^0 (γ_shift + γ_return)
-                μ_transported_base += DeltaMeasure { x : *x, α : α - shifted_mass - ret_mass };
-                // π_#^1 γ_return
-                μ_transported_base.extend(r.return_targets());
-                // π_#^1 γ_shift
-                δ_transported.set_mass(shifted_mass);
-            }
             // Calculate transported_minus_τv = -τA_*(A[μ_transported + μ_transported_base]-b)
-            let transported_residual = calculate_residual2(&μ_transported,
-                                                           &μ_transported_base,
-                                                           opA, b);
-            let transported_minus_τv = opA.preadjoint()
-                                          .apply(transported_residual);
+            let residual_μ̆ = calculate_residual2(&γ1, &μ_base_minus_γ0, opA, b);
+            let transported_minus_τv̆ = opA.preadjoint().apply(residual_μ̆ * (-τ));
 
             // Construct μ^{k+1} by solving finite-dimensional subproblems and insert new spikes.
-            let (mut d, within_tolerances) = insert_and_reweigh(
-                &mut μ, &transported_minus_τv, &μ_transported, Some(&μ_transported_base),
+            let (d, within_tolerances) = insert_and_reweigh(
+                &mut μ, &transported_minus_τv̆, &γ1, Some(&μ_base_minus_γ0),
                 op𝒟, op𝒟norm,
                 τ, ε,
-                config, &reg, state, &mut stats
+                config,
+                &reg, state, &mut stats,
             );
 
-            // We have  d = ω0 - τv - 𝒟μ = -𝒟(μ - μ^k) - τv; more precisely
-            //          d = minus_τv + op𝒟.preapply(μ_diff(μ, μ_transported, config));
-            // We “essentially” assume that the subdifferential w of the regularisation term
-            // satisfies w'(y)=0, so for a “goodness” estimate τ[w(y)-w(z)-w'(y)(z-y)]
-            // that incorporates the assumption, we need to calculate τ[w(z) - w(y)] for
-            // some w in the subdifferential of the regularisation term, such that
-            // -ε ≤ τw - d ≤ ε. This is done by [`RegTerm::goodness`].
-            for r in γ_hat.iter_mut() {
-                for ray in r.rays.iter_mut() {
-                    ray.reg_goodness = reg.goodness(&mut d, &μ, &r.source, &ray.δ.x, τ, ε, config);
-                    total_reg_goodness += ray.reg_goodness * ray.δ.α;
+            // A posteriori transport adaptation based on bounding (1/τ)∫ ω(z) - ω(y) dλ(x, y, z).
+            let all_ok = if false { // Basic check
+                // If π_♯^1γ^{k+1} = γ1 has non-zero mass at some point y, but μ = μ^{k+1} does not,
+                // then the ansatz ∇w̃_x(y) = w^{k+1}(y) may not be satisfied. So set the mass of γ1
+                // at that point to zero, and retry.
+                let mut all_ok = true;
+                for (α_μ, α_γ1) in izip!(μ.iter_masses(), γ1.iter_masses_mut()) {
+                    if α_μ == 0.0 && *α_γ1 != 0.0 {
+                        all_ok = false;
+                        *α_γ1 = 0.0;
+                    }
                 }
-            }
+                all_ok
+            } else {
+                // TODO: Could maybe optimise, as this is also formed in insert_and_reweigh above.
+                let mut minus_ω = op𝒟.apply(γ1.sub_matching(&μ) + &μ_base_minus_γ0);
+
+                // let vpos = γ1.iter_spikes()
+                //              .filter(|δ| δ.α > 0.0)
+                //              .map(|δ| minus_ω.apply(&δ.x))
+                //              .reduce(F::max)
+                //              .and_then(|threshold| {
+                //                 minus_ω.minimise_below(threshold,
+                //                                         ε * config.refinement.tolerance_mult,
+                //                                         config.refinement.max_steps)
+                //                        .map(|(_z, minus_ω_z)| minus_ω_z)
+                //              });
 
-            // If update of regularisation term goodness didn't invalidate minimum goodness
-            // requirements, we have found our step. Otherwise we need to keep reducing
-            // transport by repeating the loop.
-            if total_goodness + total_reg_goodness >= minimum_goodness {
+                // let vneg = γ1.iter_spikes()
+                //              .filter(|δ| δ.α < 0.0)
+                //              .map(|δ| minus_ω.apply(&δ.x))
+                //              .reduce(F::min)
+                //              .and_then(|threshold| {
+                //                 minus_ω.maximise_above(threshold,
+                //                                         ε * config.refinement.tolerance_mult,
+                //                                         config.refinement.max_steps)
+                //                        .map(|(_z, minus_ω_z)| minus_ω_z)
+                //              });
+                let (_, vpos) = minus_ω.minimise(ε * config.refinement.tolerance_mult,
+                                                 config.refinement.max_steps);
+                let (_, vneg) = minus_ω.maximise(ε * config.refinement.tolerance_mult,
+                                                 config.refinement.max_steps);
+            
+                let t = τ * ε * sfbconfig.transport_tolerance_ω;
+                let val = |δ : &DeltaMeasure<Loc<F, N>, F>| {
+                    δ.α * (minus_ω.apply(&δ.x) - if δ.α >= 0.0 { vpos } else { vneg })
+                    // match if δ.α >= 0.0 { vpos } else { vneg } {
+                    //     None => 0.0,
+                    //     Some(v) => δ.α * (minus_ω.apply(&δ.x) - v)
+                    // }
+                };
+                // Calculate positive/bad (rp) values under the integral.
+                // Also store sum of masses for the positive entries.
+                let (rp, w) = γ1.iter_spikes().fold((0.0, 0.0), |(p, w), δ| {
+                    let v = val(δ);
+                    if v <= 0.0 { (p, w) } else { (p + v, w + δ.α.abs()) }
+                });
+
+                if rp > t {
+                    // TODO: store v above?
+                    scale_down(γ1.iter_spikes_mut(), t / w, val);
+                    false
+                } else {
+                    true
+                }
+            };
+
+            if all_ok {
                 break 'adapt_transport (d, within_tolerances)
             }
         };
 
-        // Update γ_hat to new location
-        for (δ_new, r) in izip!(μ.iter_spikes(), γ_hat.iter_mut()) {
-            // Prune rays that only had a return component, as the return component becomes
-            // a diagonal in γ̂^{k+1}.
-            r.rays.retain(|ray| ray.δ.α != 0.0);
-            // Otherwise zero out the return component, or stage rays for pruning
-            // to keep memory and computational demands reasonable.
-            let n_rays = r.rays.len();
-            for (ray, ir) in izip!(r.rays.iter_mut(), (0..n_rays).rev()) {
-                if ir >= sfbconfig.maximum_rays {
-                    // Only keep sfbconfig.maximum_rays - 1 previous rays, staging others for
-                    // pruning in next step.
-                    ray.to_return = ray.δ.α;
-                    ray.δ.α = 0.0;
-                } else {
-                    ray.to_return = 0.0;
-                }
-                ray.goodness = 0.0; // TODO: probably not needed
-                ray.reg_goodness = 0.0;
-            }
-            // Add a new ray for the currently diagonal component
-            if r.diagonal > 0.0 {
-                r.rays.push(Ray{
-                    δ : DeltaMeasure{x : r.source, α : r.diagonal},
-                    goodness : 0.0,
-                    reg_goodness : 0.0,
-                    to_return : 0.0,
-                });
-                // TODO: Maybe this does not need to be done here, and is sufficent to to do where
-                // the goodness is calculated.
-                r.diagonal = 0.0;
-            }
-            r.diagonal_goodness = 0.0;
+        stats.untransported_fraction = Some({
+            assert_eq!(μ_base_masses.len(), γ1.len());
+            let (a, b) = stats.untransported_fraction.unwrap_or((0.0, 0.0));
+            let source = μ_base_masses.iter().map(|v| v.abs()).sum();
+            (a + μ_base_minus_γ0.norm(Radon), b + source)
+        });
+        stats.transport_error = Some({
+            assert_eq!(μ_base_masses.len(), γ1.len());
+            let (a, b) = stats.transport_error.unwrap_or((0.0, 0.0));
+            let err = izip!(μ.iter_masses(), γ1.iter_masses()).map(|(v,w)| (v-w).abs()).sum();
+            (a + err, b + γ1.norm(Radon))
+        });
 
-            // Shift source
-            r.source = δ_new.x;
-        }
-        // Extend to new spikes
-        γ_hat.extend(μ[γ_hat.len()..].iter().map(|δ_new| {
-            RaySet{
-                source : δ_new.x,
-                rays : [].into(),
-                diagonal : 0.0,
-                diagonal_goodness : 0.0,
-                diagonal_reg_goodness : 0.0
-            }
-        }));
-
-        // Prune spikes with zero weight. This also moves the marginal differences of corresponding
-        // transports from γ_hat to γ_pruned_marginal_diff.
-        // TODO: optimise standard prune with swap_remove.
-        μ_transported_base.clear();
-        let mut i = 0;
-        assert_eq!(μ.len(), γ_hat.len());
-        while i < μ.len() {
-            if μ[i].α == F::ZERO {
-                μ.swap_remove(i);
-                let r = γ_hat.swap_remove(i);
-                μ_transported_base.extend(r.targets().cloned());
-                μ_transported_base -= DeltaMeasure{ α : r.non_diagonal_mass(), x : r.source };
-            } else {
-                i += 1;
-            }
+        // Prune spikes with zero weight. To maintain correct ordering between μ and γ1, also the
+        // latter needs to be pruned when μ is.
+        // TODO: This could do with a two-vector Vec::retain to avoid copies.
+        let μ_new = DiscreteMeasure::from_iter(μ.iter_spikes().filter(|δ| δ.α != F::ZERO).cloned());
+        if μ_new.len() != μ.len() {
+            let mut μ_iter = μ.iter_spikes();
+            γ1.prune_by(|_| μ_iter.next().unwrap().α != F::ZERO);
+            μ = μ_new;
         }
 
         // TODO: how to merge?
@@ -562,7 +359,7 @@
             // Plot if so requested
             plotter.plot_spikes(
                 format!("iter {} end; {}", state.iteration(), within_tolerances), &d,
-                "start".to_string(), Some(&minus_τv),
+                "start".to_string(), None::<&A::PreadjointCodomain>, // TODO: Should be Some(&((-τ) * v)), but not implemented
                 reg.target_bounds(τ, ε_prev), &μ,
             );
             // Calculate mean inner iterations and reset relevant counters.
--- a/src/subproblem.rs	Tue Aug 01 10:32:12 2023 +0300
+++ b/src/subproblem.rs	Thu Aug 29 00:00:00 2024 -0500
@@ -14,6 +14,9 @@
 
 pub mod nonneg;
 pub mod unconstrained;
+pub mod l1squared_unconstrained;
+pub mod l1squared_nonneg;
+
 
 /// Method for solving finite-dimensional subproblems
 #[derive(Clone, Copy, Eq, PartialEq, Serialize, Deserialize, Debug)]
@@ -23,6 +26,10 @@
     FB,
     /// Semismooth Newton
     SSN,
+    /// PDPS
+    PDPS,
+    /// Problem-specific choice (in practise FB or PDPS, whichever is implemented)
+    Default,
 }
 
 /// Settings for the solution of finite-dimensional subproblems
@@ -30,8 +37,10 @@
 pub struct InnerSettings<F : Float> {
     /// Method
     pub method : InnerMethod,
-    /// Proportional step length (∈ [0, 1) for `InnerMethod::FB`).
-    pub τ0 : F,
+    /// Proportional step length ∈ [0, 1) for `InnerMethod::FB`.
+    pub fb_τ0 : F,
+    /// Proportional primal and dual step lengths for `InnerMethod::PDPS`.
+    pub pdps_τσ0 : (F, F),
     /// Fraction of `tolerance` given to inner algorithm
     pub tolerance_mult : F,
     /// Iterator options
@@ -43,7 +52,8 @@
 impl<F : Float> Default for InnerSettings<F> {
     fn default() -> Self {
         InnerSettings {
-            τ0 : 0.99,
+            fb_τ0 : 0.99,
+            pdps_τσ0 : (1.98, 0.5),
             iterator_options : AlgIteratorOptions {
                 // max_iter cannot be very small, as initially FB needs many iterations, although
                 // on later invocations even one or two tends to be enough
@@ -55,7 +65,7 @@
                 quiet : true,
                 .. Default::default()
             },
-            method : InnerMethod::FB,
+            method : InnerMethod::Default,
             tolerance_mult : 0.01,
         }
     }
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/subproblem/l1squared_nonneg.rs	Thu Aug 29 00:00:00 2024 -0500
@@ -0,0 +1,425 @@
+/*!
+Iterative algorithms for solving the finite-dimensional subproblem with constraint.
+*/
+
+use nalgebra::DVector;
+use numeric_literals::replace_float_literals;
+use itertools::izip;
+//use std::iter::zip;
+use std::cmp::Ordering::*;
+
+use alg_tools::iterate::{
+    AlgIteratorFactory,
+    AlgIteratorState,
+};
+use alg_tools::nalgebra_support::ToNalgebraRealField;
+use alg_tools::norms::{Dist, L1};
+use alg_tools::nanleast::NaNLeast;
+
+use crate::types::*;
+use super::{
+    InnerMethod,
+    InnerSettings
+};
+use super::nonneg::nonneg_soft_thresholding;
+use super::l1squared_unconstrained::l1squared_prox;
+
+/// Return maximum of `dist` and distnce of inteval `[lb, ub]` to zero.
+#[replace_float_literals(F::cast_from(literal))]
+pub(super) fn max_interval_dist_to_zero<F : Float>(dist  : F, lb : F, ub : F) -> F {
+    if lb < 0.0 {
+        if ub > 0.0 {
+            dist
+        } else {
+            dist.max(-ub)
+        }
+    } else /* ub ≥ 0.0*/ {
+        dist.max(lb)
+    }
+}
+
+/// Returns the ∞-norm minimal subdifferential of $x ↦ (β/2)|x-y|_1^2 - g^⊤ x + λ\|x\|₁ +δ_{≥}(x)$ at $x$.
+///
+/// `v` will be modified and cannot be trusted to contain useful values afterwards.
+#[replace_float_literals(F::cast_from(literal))]
+fn min_subdifferential<F : Float + nalgebra::RealField>(
+    y : &DVector<F>,
+    x : &DVector<F>,
+    g : &DVector<F>,
+    λ : F,
+    β : F
+) -> F {
+    let mut val = 0.0;
+    let tmp = β*y.dist(x, L1);
+    for (&g_i, &x_i, y_i) in izip!(g.iter(), x.iter(), y.iter()) {
+        let (mut lb, mut ub) = (-g_i, -g_i);
+        match x_i.partial_cmp(y_i) {
+            Some(Greater) => { lb += tmp; ub += tmp },
+            Some(Less) => { lb -= tmp; ub -= tmp },
+            Some(Equal) => { lb -= tmp; ub += tmp },
+            None => {},
+        }
+        match x_i.partial_cmp(&0.0) {
+            Some(Greater) => { lb += λ; ub += λ },
+            // Less should not happen
+            Some(Less|Equal) => { lb = F::NEG_INFINITY; ub += λ },
+            None => {},
+        };
+        val = max_interval_dist_to_zero(val, lb, ub);
+    }
+    val
+}
+
+#[replace_float_literals(F::cast_from(literal))]
+fn lbd_soft_thresholding<F : Float>(v : F, λ : F, b : F) -> F
+{
+    match (b >= 0.0, v >= b) {
+        (true, false)  => b,
+        (true, true)   => b.max(v - λ),         // soft-to-b from above
+        (false, true)  => super::unconstrained::soft_thresholding(v, λ),
+        (false, false)  => 0.0.min(b.max(v - λ)), // soft-to-0 with lower bound
+    }
+}
+
+/// Calculate $prox_f(x)$ for $f(x)=\frac{β}{2}\norm{x-y}_1^2 + δ_{≥0}(x)$.
+///
+/// To derive an algorithm for this, we can use
+/// $prox_f(x) = prox_{f_0}(x - y) - y$ for
+/// $f_0(z)=\frac{β}{2}\norm{z}_1^2 + δ_{≥-y}(z)$.
+/// Now, the optimality conditions for $w = prox_{f_0}(x)$ are
+/// $$\tag{*}
+///     x ∈ w + β\norm{w}_1\sign w + N_{≥ -y}(w).
+/// $$
+/// If we know $\norm{w}_1$, then this is easily solved by lower-bounded soft-thresholding.
+/// We find this by sorting the elements by the distance to the 'locked' lower-bounded
+/// soft-thresholding target ($0$ or $-y_i$).
+/// Then we loop over this sorted vector, increasing our estimate of $\norm{w}_1$ as we decide
+/// that the soft-thresholding parameter `β\norm{w}_1` has to be such that the passed elements
+/// will reach their locked value (after which they cannot change anymore, for a larger
+/// soft-thresholding parameter. This has to be slightly more fine-grained for account
+/// for the case that $-y_i<0$ and $x_i < -y_i$.
+///
+/// Indeed, we denote by x' and w' the subset of elements such that w_i ≠ 0 and w_i > -y_i,
+/// we can calculate by applying $⟨\cdot, \sign w'⟩$ to the corresponding lines of (*) that
+/// $$
+///     \norm{x'} = \norm{w'} + β \norm{w}_1 m.
+/// $$
+/// Having a value for $t = \norm{w}-\norm{w'}$, we can then calculate
+/// $$
+///     \norm{x'} - t = (1+β m)\norm{w}_1,
+/// $$
+/// from where we can calculate the soft-thresholding parameter $λ=β\norm{w}_1$.
+/// Since we do not actually know the unlocked elements, but just loop over all the possibilities
+/// for them, we have to check that $λ$ is above the current lower bound for this parameter
+/// (`shift` in the code), and below a value that would cause changes in the locked set
+/// (`max_shift` in the code).
+#[replace_float_literals(F::cast_from(literal))]
+pub(super) fn l1squared_nonneg_prox<F :Float + nalgebra::RealField>(
+    sorted : &mut Vec<(F, F, F, Option<(F, F)>)>,
+    x : &mut DVector<F>,
+    y : &DVector<F>,
+    β : F
+) {
+    // nalgebra double-definition bullshit workaround
+    //let max = alg_tools::NumTraitsFloat::max;
+    let abs = alg_tools::NumTraitsFloat::abs;
+
+    *x -= y;
+
+    for (az_x_i, &x_i, &y_i) in izip!(sorted.iter_mut(), x.iter(), y.iter()) {
+        // The first component of each az_x_i contains the distance of x_i to the
+        // soft-thresholding limit. If it is negative, it is always reached.
+        // The second component contains the absolute value of the result for that component
+        // w_i of the solution, if the soft-thresholding limit is reached.
+        // This is stored here due to the sorting, although could otherwise be computed directly.
+        // Likewise the third component contains the absolute value of x_i.
+        // The fourth component contains an initial lower bound.
+        let a_i = abs(x_i);
+        let b = -y_i;
+        *az_x_i = match (b >= 0.0, x_i >= b) {
+            (true, false)  => (x_i-b, b, a_i, None),  // w_i=b, so sorting element negative!
+            (true, true)   => (x_i-b, b, a_i, None),  // soft-to-b from above
+            (false, true)  => (a_i, 0.0, a_i, None),  // soft-to-0
+            (false, false) => (a_i, 0.0, a_i, Some((b, b-x_i))),   // soft-to-0 with initial limit
+        };
+    }
+    sorted.as_mut_slice()
+          .sort_unstable_by(|(a, _, _, _), (b, _, _, _)| NaNLeast(*a).cmp(&NaNLeast(*b)));
+
+    let mut nwlow = 0.0;
+    let mut shift = 0.0;
+    // This main loop is over different combinations of elements of the solution locked
+    // to the soft-thresholding lower bound (`0` or `-y_i`), in the sorted order of locking.
+    for (i, az_x_i) in izip!(0.., sorted.iter()) {
+        // This 'attempt is over different combinations of elements locked to the
+        // lower bound (`-y_i ≤ 0`). It calculates `max_shift` as the maximum shift that
+        // can be done until the locking would change (or become non-strictly-complementary).
+        // If the main rule (*) gives an estimate of `λ` that stays below `max_shift`, it is
+        // accepted. Otherwise `shift` is updated to `max_shift`, and we attempt again,
+        // with the non-locking set that participates in the calculation of `λ` then including
+        // the elements that are no longer locked to the lower bound.
+        'attempt: loop {
+            let mut nwthis = 0.0; // contribution to ‖w‖ from elements with locking
+                                  //soft-thresholding parameter = `shift`
+            let mut nxmore = 0.0; // ‖x'‖ for those elements through to not be locked to
+                                  // neither the soft-thresholding limit, nor the lower bound
+            let mut nwlbd = 0.0;  // contribution to ‖w‖ from those elements locked to their
+                                  // lower bound
+            let mut m = 0;
+            let mut max_shift = F::INFINITY; // maximal shift for which our estimate of the set of
+                                             // unlocked elements is valid.
+            let mut max_shift_from_lbd = false; // Whether max_shift comes from the next element
+                                                // or from a lower bound being reached.
+            for az_x_j in sorted[i as usize..].iter() {
+                if az_x_j.0 <= shift {
+                    nwthis += az_x_j.1;
+                } else {
+                    match az_x_j.3 {
+                        Some((l, s)) if shift < s => {
+                            if max_shift > s {
+                                max_shift_from_lbd = true;
+                                max_shift = s;
+                            }
+                            nwlbd += -l;
+                        },
+                        _ => {
+                            nxmore += az_x_j.2;
+                            if m == 0 && max_shift > az_x_j.0 {
+                                max_shift = az_x_j.0;
+                                max_shift_from_lbd = false;
+                            }
+                            m += 1;
+                        }
+                    }
+                }
+            }
+
+            // We need ‖x'‖ = ‖w'‖ + β m ‖w‖, i.e. ‖x'‖ - (‖w‖-‖w'‖)= (1 + β m)‖w‖.
+            let tmp = β*(nxmore - (nwlow + nwthis + nwlbd))/(1.0 + β*F::cast_from(m));
+            if tmp > max_shift {
+                if max_shift_from_lbd {
+                    shift = max_shift;
+                    continue 'attempt;
+                } else {
+                    break 'attempt
+                }
+            } else if tmp < shift {
+                // TODO: this should probably be an assert!(false)
+                break 'attempt;
+            } else {
+                // success
+                x.zip_apply(y, |x_i, y_i| *x_i = y_i + lbd_soft_thresholding(*x_i, tmp, -y_i));
+                return
+            }
+        }
+        shift = az_x_i.0;
+        nwlow += az_x_i.1;
+    }
+    // TODO: this fallback has not been verified to be correct
+    x.zip_apply(y, |x_i, y_i| *x_i = y_i + lbd_soft_thresholding(*x_i, shift, -y_i));
+}
+
+/// Proximal point method implementation of [`l1squared_nonneg`].
+/// For detailed documentation of the inputs and outputs, refer to there.
+///
+/// The `λ` component of the model is handled in the proximal step instead of the gradient step
+/// for potential performance improvements.
+#[replace_float_literals(F::cast_from(literal).to_nalgebra_mixed())]
+pub fn l1squared_nonneg_pp<F, I>(
+    y : &DVector<F::MixedType>,
+    g : &DVector<F::MixedType>,
+    λ_ : F,
+    β_ : F,
+    x : &mut DVector<F::MixedType>,
+    τ_ : F,
+    θ_ : F,
+    iterator : I
+) -> usize
+where F : Float + ToNalgebraRealField,
+      I : AlgIteratorFactory<F>
+{
+    let λ = λ_.to_nalgebra_mixed();
+    let β = β_.to_nalgebra_mixed();
+    let mut τ = τ_.to_nalgebra_mixed();
+    let θ = θ_.to_nalgebra_mixed();
+    let mut tmp = std::iter::repeat((0.0, 0.0, 0.0, None)).take(x.len()).collect();
+    let mut iters = 0;
+
+    iterator.iterate(|state| {
+        // Primal step: x^{k+1} = prox_{(τβ/2)|.-y|_1^2+δ_{≥0}+}(x^k - τ(λ𝟙^⊤-g))
+        x.apply(|x_i| *x_i -= τ*λ);
+        x.axpy(τ, g, 1.0);
+        l1squared_nonneg_prox(&mut tmp, x, y, τ*β);
+        
+        iters += 1;
+        // This gives O(1/N^2) rates due to monotonicity of function values.
+        // Higher acceleration does not seem to be numerically stable.
+        τ += θ;
+
+        // This gives O(1/N^3) rates due to monotonicity of function values.
+        // Higher acceleration does not seem to be numerically stable.
+        //τ + = F::cast_from(iters).to_nalgebra_mixed()*θ;
+
+        state.if_verbose(|| {
+            F::from_nalgebra_mixed(min_subdifferential(y, x, g, λ, β))
+        })
+    });
+
+    iters
+}
+
+/// PDPS implementation of [`l1squared_nonneg`].
+/// For detailed documentation of the inputs and outputs, refer to there.
+///
+/// The `λ` component of the model is handled in the proximal step instead of the gradient step
+/// for potential performance improvements.
+/// The parameter `θ` is used to multiply the rescale the operator (identity) of the PDPS model.
+#[replace_float_literals(F::cast_from(literal).to_nalgebra_mixed())]
+pub fn l1squared_nonneg_pdps<F, I>(
+    y : &DVector<F::MixedType>,
+    g : &DVector<F::MixedType>,
+    λ_ : F,
+    β_ : F,
+    x : &mut DVector<F::MixedType>,
+    τ_ : F,
+    σ_ : F,
+    θ_ : F,
+    iterator : I
+) -> usize
+where F : Float + ToNalgebraRealField,
+      I : AlgIteratorFactory<F>
+{
+    let λ = λ_.to_nalgebra_mixed();
+    let β = β_.to_nalgebra_mixed();
+    let τ = τ_.to_nalgebra_mixed();
+    let σ = σ_.to_nalgebra_mixed();
+    let θ = θ_.to_nalgebra_mixed();
+    let mut w = DVector::zeros(x.len());
+    let mut tmp = DVector::zeros(x.len());
+    let mut xprev = x.clone();
+    let mut iters = 0;
+
+    iterator.iterate(|state| {
+        // Primal step: x^{k+1} = prox_{(τβ/2)|.-y|_1^2}(x^k - τ (w^k - g))
+        x.axpy(-τ*θ, &w, 1.0);
+        x.axpy(τ, g, 1.0);
+        l1squared_prox(&mut tmp, x, y, τ*β);
+        
+        // Dual step: w^{k+1} = proj_{[-∞,λ]}(w^k + σ(2x^{k+1}-x^k))
+        w.axpy(2.0*σ*θ, x, 1.0);
+        w.axpy(-σ*θ, &xprev, 1.0);
+        w.apply(|w_i| *w_i = w_i.min(λ));
+        xprev.copy_from(x);
+        
+        iters +=1;
+
+        state.if_verbose(|| {
+            F::from_nalgebra_mixed(min_subdifferential(y, x, g, λ, β))
+        })
+    });
+
+    iters
+}
+
+/// Alternative PDPS implementation of [`l1squared_nonneg`].
+/// For detailed documentation of the inputs and outputs, refer to there.
+///
+/// The `λ` component of the model is handled in the proximal step instead of the gradient step
+/// for potential performance improvements.
+/// The parameter `θ` is used to multiply the rescale the operator (identity) of the PDPS model.
+#[replace_float_literals(F::cast_from(literal).to_nalgebra_mixed())]
+pub fn l1squared_nonneg_pdps_alt<F, I>(
+    y : &DVector<F::MixedType>,
+    g : &DVector<F::MixedType>,
+    λ_ : F,
+    β_ : F,
+    x : &mut DVector<F::MixedType>,
+    τ_ : F,
+    σ_ : F,
+    θ_ : F,
+    iterator : I
+) -> usize
+where F : Float + ToNalgebraRealField,
+      I : AlgIteratorFactory<F>
+{
+    let λ = λ_.to_nalgebra_mixed();
+    let τ = τ_.to_nalgebra_mixed();
+    let σ = σ_.to_nalgebra_mixed();
+    let θ = θ_.to_nalgebra_mixed();
+    let β = β_.to_nalgebra_mixed();
+    let βdivθ = β / θ;
+    let σθ = σ*θ;
+    let τθ = τ*θ;
+    let mut w = DVector::zeros(x.len());
+    let mut tmp = DVector::zeros(x.len());
+    let mut xprev = x.clone();
+    let mut iters = 0;
+
+    iterator.iterate(|state| {
+        // Primal step: x^{k+1} = nonnegsoft_τλ(x^k - τ(θ w^k -g))
+        x.axpy(-τθ, &w, 1.0);
+        x.axpy(τ, g, 1.0);
+        x.apply(|x_i| *x_i = nonneg_soft_thresholding(*x_i, τ*λ));
+        
+        // Dual step: w^{k+1} = prox_{σ[(β/2)‖./θ-y‖₁²]^*}(q) for q = w^k + σθ(2x^{k+1}-x^k)
+        //                    = q - σ prox_{(β/(2σ))‖./θ-y‖₁²}(q/σ)
+        //                    = q - (σθ) prox_{(β/(2σθ^2))‖.-y‖₁²}(q/(σθ))
+        //                    = σθ(q/(σθ) - prox_{(β/(2σθ^2))‖.-y‖₁²}(q/(σθ))
+        w /= σθ;
+        w.axpy(2.0, x, 1.0);
+        w.axpy(-1.0, &xprev, 1.0);
+        xprev.copy_from(&w); // use xprev as temporary variable
+        l1squared_prox(&mut tmp, &mut xprev, y, βdivθ/σθ);
+        w -= &xprev;
+        w *= σθ;
+        xprev.copy_from(x);
+        
+        iters +=1;
+
+        state.if_verbose(|| {
+            F::from_nalgebra_mixed(min_subdifferential(y, x, g, λ, β))
+        })
+    });
+
+    iters
+}
+
+
+/// This function applies an iterative method for the solution of the problem
+/// <div>$$
+///     \min_{x ∈ ℝ^n} \frac{β}{2} |x-y|_1^2 - g^⊤ x + λ\|x\|₁ + δ_{≥ 0}(x).
+/// $$</div>
+///
+/// This function returns the number of iterations taken.
+#[replace_float_literals(F::cast_from(literal))]
+pub fn l1squared_nonneg<F, I>(
+    y : &DVector<F::MixedType>,
+    g : &DVector<F::MixedType>,
+    λ : F,
+    β : F,
+    x : &mut DVector<F::MixedType>,
+    inner : &InnerSettings<F>,
+    iterator : I
+) -> usize
+where F : Float + ToNalgebraRealField,
+      I : AlgIteratorFactory<F>
+{
+    match inner.method {
+        InnerMethod::PDPS | InnerMethod::Default => {
+            let inner_θ = 0.001;
+            // Estimate of ‖K‖ for K=θ\Id.
+            let normest = inner_θ;
+            let (inner_τ, inner_σ) = (inner.pdps_τσ0.0 / normest, inner.pdps_τσ0.1 / normest);
+            l1squared_nonneg_pdps_alt(y, g, λ, β, x, inner_τ, inner_σ, inner_θ, iterator)
+        },
+        InnerMethod::FB /*| InnerMethod::Default*/ => {
+            // The Lipschitz factor of ∇[x ↦ g^⊤ x + λ∑x]=g - λ𝟙 is FB is just a proximal point
+            // method with on constraints on τ. We “accelerate” it by adding to τ the constant θ
+            // on each iteration. Exponential growth does not seem stable.
+            let inner_τ = inner.fb_τ0;
+            let inner_θ = inner_τ;
+            l1squared_nonneg_pp(y, g, λ, β, x, inner_τ, inner_θ, iterator)
+        },
+        _ => unimplemented!("{:?}", inner.method),
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/subproblem/l1squared_unconstrained.rs	Thu Aug 29 00:00:00 2024 -0500
@@ -0,0 +1,195 @@
+/*!
+Iterative algorithms for solving the finite-dimensional subproblem without constraints.
+*/
+
+use nalgebra::DVector;
+use numeric_literals::replace_float_literals;
+use itertools::izip;
+use std::cmp::Ordering::*;
+
+use std::iter::zip;
+use alg_tools::iterate::{
+    AlgIteratorFactory,
+    AlgIteratorState,
+};
+use alg_tools::nalgebra_support::ToNalgebraRealField;
+use alg_tools::nanleast::NaNLeast;
+use alg_tools::norms::{Dist, L1};
+
+use crate::types::*;
+use super::{
+    InnerMethod,
+    InnerSettings
+};
+use super::unconstrained::soft_thresholding;
+use super::l1squared_nonneg::max_interval_dist_to_zero;
+
+/// Calculate $prox_f(x)$ for $f(x)=\frac{β}{2}\norm{x-y}_1^2$.
+///
+/// To derive an algorithm for this, we can assume that $y=0$, as
+/// $prox_f(x) = prox_{f_0}(x - y) - y$ for $f_0=\frac{β}{2}\norm{x}_1^2$.
+/// Now, the optimality conditions for $w = prox_f(x)$ are
+/// $$\tag{*}
+///     0 ∈ w-x + β\norm{w}_1\sign w.
+/// $$
+/// Clearly then $w = \soft_{β\norm{w}_1}(x)$.
+/// Thus the components of $x$ with smallest absolute value will be zeroed out.
+/// Denoting by $w'$ the non-zero components, and by $x'$ the corresponding components
+/// of $x$, and by $m$ their count,  multipying the corresponding lines of (*) by $\sign x'$,
+/// we obtain
+/// $$
+///     \norm{x'}_1 = (1+βm)\norm{w'}_1.
+/// $$
+/// That is, $\norm{w}_1=\norm{w'}_1=\norm{x'}_1/(1+βm)$.
+/// Thus, sorting $x$ by absolute value, and sequentially in order eliminating the smallest
+/// elements, we can easily calculate what $\norm{w}_1$ should be for that choice, and
+/// then easily calculate $w = \soft_{β\norm{w}_1}(x)$. We just have to verify that
+/// the resulting $w$ has the same norm. There's a shortcut to this, as we work
+/// sequentially: just check that the smallest assumed-nonzero component $i$ satisfies the
+/// condition of soft-thresholding to remain non-zero: $|x_i|>τ\norm{x'}/(1+τm)$.
+/// Clearly, if this condition fails for x_i, it will fail for all the components
+/// already exluced. While, if it holds, it will hold for all components not excluded.
+#[replace_float_literals(F::cast_from(literal))]
+pub(super) fn l1squared_prox<F :Float + nalgebra::RealField>(
+    sorted_abs : &mut DVector<F>,
+    x : &mut DVector<F>,
+    y : &DVector<F>,
+    β : F
+) {
+    sorted_abs.copy_from(x);
+    sorted_abs.axpy(-1.0, y, 1.0);
+    sorted_abs.apply(|z_i| *z_i = num_traits::abs(*z_i));
+    sorted_abs.as_mut_slice().sort_unstable_by(|a, b| NaNLeast(*a).cmp(&NaNLeast(*b)));
+
+    let mut n = sorted_abs.sum();
+    for (m, az_i) in zip((1..=x.len() as u32).rev(), sorted_abs) {
+        // test first
+        let tmp = β*n/(1.0 + β*F::cast_from(m));
+        if *az_i <= tmp {
+            // Fail
+            n -= *az_i;
+        } else {
+            // Success
+            x.zip_apply(y, |x_i, y_i| *x_i = y_i + soft_thresholding(*x_i-y_i, tmp));
+            return
+        }
+    }
+    // m = 0 should always work, but x is zero.
+    x.fill(0.0);
+}
+
+/// Returns the ∞-norm minimal subdifferential of $x ↦ (β/2)|x-y|_1^2 - g^⊤ x + λ\|x\|₁$ at $x$.
+///
+/// `v` will be modified and cannot be trusted to contain useful values afterwards.
+#[replace_float_literals(F::cast_from(literal))]
+fn min_subdifferential<F : Float + nalgebra::RealField>(
+    y : &DVector<F>,
+    x : &DVector<F>,
+    g : &DVector<F>,
+    λ : F,
+    β : F
+) -> F {
+    let mut val = 0.0;
+    let tmp = β*y.dist(x, L1);
+    for (&g_i, &x_i, y_i) in izip!(g.iter(), x.iter(), y.iter()) {
+        let (mut lb, mut ub) = (-g_i, -g_i);
+        match x_i.partial_cmp(y_i) {
+            Some(Greater) => { lb += tmp; ub += tmp },
+            Some(Less) => { lb -= tmp; ub -= tmp },
+            Some(Equal) => { lb -= tmp; ub += tmp },
+            None => {},
+        }
+        match x_i.partial_cmp(&0.0) {
+            Some(Greater) => { lb += λ; ub += λ },
+            Some(Less) => { lb -= λ; ub -= λ },
+            Some(Equal) => { lb -= λ; ub += λ },
+            None => {},
+        };
+        val = max_interval_dist_to_zero(val, lb, ub);
+    }
+    val
+}
+
+
+/// PDPS implementation of [`l1squared_unconstrained`].
+/// For detailed documentation of the inputs and outputs, refer to there.
+///
+/// The `λ` component of the model is handled in the proximal step instead of the gradient step
+/// for potential performance improvements.
+#[replace_float_literals(F::cast_from(literal).to_nalgebra_mixed())]
+pub fn l1squared_unconstrained_pdps<F, I>(
+    y : &DVector<F::MixedType>,
+    g : &DVector<F::MixedType>,
+    λ_ : F,
+    β_ : F,
+    x : &mut DVector<F::MixedType>,
+    τ_ : F,
+    σ_ : F,
+    iterator : I
+) -> usize
+where F : Float + ToNalgebraRealField,
+      I : AlgIteratorFactory<F>
+{
+    let λ = λ_.to_nalgebra_mixed();
+    let β = β_.to_nalgebra_mixed();
+    let τ = τ_.to_nalgebra_mixed();
+    let σ = σ_.to_nalgebra_mixed();
+    let mut w = DVector::zeros(x.len());
+    let mut tmp = DVector::zeros(x.len());
+    let mut xprev = x.clone();
+    let mut iters = 0;
+
+    iterator.iterate(|state| {
+        // Primal step: x^{k+1} = prox_{τ|.-y|_1^2}(x^k - τ (w^k - g))
+        x.axpy(-τ, &w, 1.0);
+        x.axpy(τ, g, 1.0);
+        l1squared_prox(&mut tmp, x, y, τ*β);
+        
+        // Dual step: w^{k+1} = proj_{[-λ,λ]}(w^k + σ(2x^{k+1}-x^k))
+        w.axpy(2.0*σ, x, 1.0);
+        w.axpy(-σ, &xprev, 1.0);
+        w.apply(|w_i| *w_i = num_traits::clamp(*w_i, -λ, λ));
+        xprev.copy_from(x);
+        
+        iters +=1;
+
+        state.if_verbose(|| {
+            F::from_nalgebra_mixed(min_subdifferential(y, x, g, λ, β))
+        })
+    });
+
+    iters
+}
+
+
+/// This function applies an iterative method for the solution of the problem
+/// <div>$$
+///     \min_{x ∈ ℝ^n} \frac{β}{2} |x-y|_1^2 - g^⊤ x + λ\|x\|₁.
+/// $$</div>
+/// Only PDPS is supported.
+///
+/// This function returns the number of iterations taken.
+#[replace_float_literals(F::cast_from(literal))]
+pub fn l1squared_unconstrained<F, I>(
+    y : &DVector<F::MixedType>,
+    g : &DVector<F::MixedType>,
+    λ : F,
+    β : F,
+    x : &mut DVector<F::MixedType>,
+    inner : &InnerSettings<F>,
+    iterator : I
+) -> usize
+where F : Float + ToNalgebraRealField,
+      I : AlgIteratorFactory<F>
+{
+    // Estimate of ‖K‖ for K=Id.
+    let normest = 1.0;
+
+    let (inner_τ, inner_σ) = (inner.pdps_τσ0.0 / normest, inner.pdps_τσ0.1 / normest);
+
+    match inner.method {
+        InnerMethod::PDPS | InnerMethod::Default =>
+            l1squared_unconstrained_pdps(y, g, λ, β, x, inner_τ, inner_σ, iterator),
+        _ => unimplemented!(),
+    }
+}
--- a/src/subproblem/nonneg.rs	Tue Aug 01 10:32:12 2023 +0300
+++ b/src/subproblem/nonneg.rs	Thu Aug 29 00:00:00 2024 -0500
@@ -6,6 +6,7 @@
 use numeric_literals::replace_float_literals;
 use itertools::{izip, Itertools};
 use colored::Colorize;
+use std::cmp::Ordering::*;
 
 use alg_tools::iter::Mappable;
 use alg_tools::error::NumericalError;
@@ -22,38 +23,42 @@
     InnerMethod,
     InnerSettings
 };
+use super::l1squared_nonneg::max_interval_dist_to_zero;
 
 /// Compute the proximal operator of $x \mapsto x + \delta\_{[0, \infty)}$, i.e.,
 /// the non-negativity contrained soft-thresholding operator.
 #[inline]
 #[replace_float_literals(F::cast_from(literal))]
-fn nonneg_soft_thresholding<F : Float>(v : F, λ : F) -> F {
+pub(super) fn nonneg_soft_thresholding<F : Float>(v : F, λ : F) -> F {
     (v - λ).max(0.0)
 }
 
-/// Returns the ∞-norm minimal subdifferential of $x ↦ x^⊤Ax - g^⊤ x + λ\vec 1^⊤ x δ_{≥ 0}(x)$
+/// Returns the ∞-norm minimal subdifferential of $x ↦ x^⊤Ax/2 - g^⊤ x + λ\vec 1^⊤ x + δ_{≥ 0}(x)$
 /// at $x$.
 ///
 /// `v` will be modified and cannot be trusted to contain useful values afterwards.
-#[replace_float_literals(F::cast_from(literal).to_nalgebra_mixed())]
-fn min_subdifferential<F : Float + ToNalgebraRealField>(
-    v : &mut DVector<F::MixedType>,
-    mA : &DMatrix<F::MixedType>,
-    x : &DVector<F::MixedType>,
-    g : &DVector<F::MixedType>,
-    λ : F::MixedType
+#[replace_float_literals(F::cast_from(literal))]
+fn min_subdifferential<F : Float + nalgebra::RealField>(
+    v : &mut DVector<F>,
+    mA : &DMatrix<F>,
+    x : &DVector<F>,
+    g : &DVector<F>,
+    λ : F
 ) -> F {
     v.copy_from(g);
     mA.gemv(v, 1.0, x, -1.0);   // v =  Ax - g
     let mut val = 0.0;
     for (&v_i, &x_i) in izip!(v.iter(), x.iter()) {
-        // The subdifferential of the objective is $Ax - g + λ + ∂ δ_{≥ 0}(x)$.
-        let d = v_i + λ;
-        if x_i > 0.0 || d < 0.0 {
-            val = val.max(d.abs());
+        let (mut lb, mut ub) = (v_i, v_i);
+        match x_i.partial_cmp(&0.0) {
+            Some(Greater) => { lb += λ; ub += λ },
+            // Less should not happen
+            Some(Less|Equal) => { lb = F::NEG_INFINITY; ub += λ },
+            None => {},
         }
+        val = max_interval_dist_to_zero(val, lb, ub);
     }
-    F::from_nalgebra_mixed(val)
+    val
 }
 
 /// Forward-backward splitting implementation of [`quadratic_nonneg`].
@@ -98,7 +103,7 @@
         iters +=1;
 
         backup.map(|_| {
-            min_subdifferential(&mut v, mA, x, g, λ)
+            F::from_nalgebra_mixed(min_subdifferential(&mut v, mA, x, g, λ))
         })
     });
 
@@ -281,7 +286,7 @@
         // 4. Report solution quality
         state.if_verbose(|| {
             // Calculate subdifferential at the FB step `x` that hasn't yet had `s` yet added.
-            min_subdifferential(&mut v, mA, x, g, λ)
+            F::from_nalgebra_mixed(min_subdifferential(&mut v, mA, x, g, λ))
         })
     });
 
@@ -291,7 +296,7 @@
 /// This function applies an iterative method for the solution of the quadratic non-negativity
 /// constrained problem
 /// <div>$$
-///     \min_{x ∈ ℝ^n} \frac{1}{2} x^⊤Ax - g^⊤ x + λ{\vec 1}^⊤ x + c + δ_{≥ 0}(x).
+///     \min_{x ∈ ℝ^n} \frac{1}{2} x^⊤Ax - g^⊤ x + λ{\vec 1}^⊤ x + δ_{≥ 0}(x).
 /// $$</div>
 /// Semismooth Newton or forward-backward are supported based on the setting in `method`.
 /// The parameter `mA` is matrix $A$, and `g` and `λ` are as in the mathematical formulation.
@@ -307,27 +312,28 @@
 ///
 /// This function returns the number of iterations taken.
 pub fn quadratic_nonneg<F, I>(
-    method : InnerMethod,
     mA : &DMatrix<F::MixedType>,
     g : &DVector<F::MixedType>,
-    //c_ : F,
     λ : F,
     x : &mut DVector<F::MixedType>,
-    τ : F,
+    mA_normest : F,
+    inner : &InnerSettings<F>,
     iterator : I
 ) -> usize
 where F : Float + ToNalgebraRealField,
       I : AlgIteratorFactory<F>
 {
-    
-    match method {
-        InnerMethod::FB =>
-            quadratic_nonneg_fb(mA, g, λ, x, τ, iterator),
+    let inner_τ = inner.fb_τ0 / mA_normest;
+
+    match inner.method {
+        InnerMethod::FB | InnerMethod::Default =>
+            quadratic_nonneg_fb(mA, g, λ, x, inner_τ, iterator),
         InnerMethod::SSN =>
-            quadratic_nonneg_ssn(mA, g, λ, x, τ, iterator).unwrap_or_else(|e| {
+            quadratic_nonneg_ssn(mA, g, λ, x, inner_τ, iterator).unwrap_or_else(|e| {
                 println!("{}", format!("{e}. Using FB fallback.").red());
                 let ins = InnerSettings::<F>::default();
-                quadratic_nonneg_fb(mA, g, λ, x, τ, ins.iterator_options)
-            })
+                quadratic_nonneg_fb(mA, g, λ, x, inner_τ, ins.iterator_options)
+            }),
+        InnerMethod::PDPS => unimplemented!(),
     }
 }
--- a/src/subproblem/unconstrained.rs	Tue Aug 01 10:32:12 2023 +0300
+++ b/src/subproblem/unconstrained.rs	Thu Aug 29 00:00:00 2024 -0500
@@ -23,11 +23,12 @@
     InnerMethod,
     InnerSettings
 };
+use super::l1squared_nonneg::max_interval_dist_to_zero;
 
 /// Compute the proximal operator of $x \mapsto |x|$, i.e., the soft-thresholding operator.
 #[inline]
 #[replace_float_literals(F::cast_from(literal))]
-fn soft_thresholding<F : Float>(v : F, λ : F) -> F {
+pub(crate) fn soft_thresholding<F : Float>(v : F, λ : F) -> F {
     if v > λ {
         v - λ
     } else if v < -λ {
@@ -40,27 +41,28 @@
 /// Returns the ∞-norm minimal subdifferential of $x ↦  x^⊤Ax - g^⊤ x + λ\|x\|₁$ at $x$.
 ///
 /// `v` will be modified and cannot be trusted to contain useful values afterwards.
-#[replace_float_literals(F::cast_from(literal).to_nalgebra_mixed())]
-fn min_subdifferential<F : Float + ToNalgebraRealField>(
-    v : &mut DVector<F::MixedType>,
-    mA : &DMatrix<F::MixedType>,
-    x : &DVector<F::MixedType>,
-    g : &DVector<F::MixedType>,
-    λ : F::MixedType
+#[replace_float_literals(F::cast_from(literal))]
+fn min_subdifferential<F : Float + nalgebra::RealField>(
+    v : &mut DVector<F>,
+    mA : &DMatrix<F>,
+    x : &DVector<F>,
+    g : &DVector<F>,
+    λ : F
 ) -> F {
     v.copy_from(g);
     mA.gemv(v, 1.0, x, -1.0);   // v =  Ax - g
     let mut val = 0.0;
     for (&v_i, &x_i) in izip!(v.iter(), x.iter()) {
-        // The subdifferential at x is $Ax - g + λ ∂‖·‖₁(x)$.
-        val = val.max(match x_i.partial_cmp(&0.0) {
-            Some(Greater) => v_i + λ,
-            Some(Less) => v_i - λ,
-            Some(Equal) => soft_thresholding(v_i, λ),
-            None => F::MixedType::nan(),
-        })
+        let (mut lb, mut ub) = (v_i, v_i);
+        match x_i.partial_cmp(&0.0) {
+            Some(Greater) => { lb += λ; ub += λ },
+            Some(Less) => { lb -= λ; ub -= λ },
+            Some(Equal) => { lb -= λ; ub += λ },
+            None => {},
+        }
+        val = max_interval_dist_to_zero(val, lb, ub);
     }
-    F::from_nalgebra_mixed(val)
+    val
 }
 
 
@@ -106,7 +108,7 @@
         iters +=1;
 
         backup.map(|_| {
-            min_subdifferential(&mut v, mA, x, g, λ)
+            F::from_nalgebra_mixed(min_subdifferential(&mut v, mA, x, g, λ))
         })
     });
 
@@ -242,7 +244,7 @@
         // 4. Report solution quality
         state.if_verbose(|| {
             // Calculate subdifferential at the FB step `x` that hasn't yet had `s` yet added.
-            min_subdifferential(&mut v, mA, x, g, λ)
+            F::from_nalgebra_mixed(min_subdifferential(&mut v, mA, x, g, λ))
         })
     });
 
@@ -251,7 +253,7 @@
 
 /// This function applies an iterative method for the solution of the problem
 /// <div>$$
-///     \min_{x ∈ ℝ^n} \frac{1}{2} x^⊤Ax - g^⊤ x + λ\|x\|₁ + c.
+///     \min_{x ∈ ℝ^n} \frac{1}{2} x^⊤Ax - g^⊤ x + λ\|x\|₁.
 /// $$</div>
 /// Semismooth Newton or forward-backward are supported based on the setting in `method`.
 /// The parameter `mA` is matrix $A$, and `g` and `λ` are as in the mathematical formulation.
@@ -262,27 +264,28 @@
 ///
 /// This function returns the number of iterations taken.
 pub fn quadratic_unconstrained<F, I>(
-    method : InnerMethod,
     mA : &DMatrix<F::MixedType>,
     g : &DVector<F::MixedType>,
-    //c_ : F,
     λ : F,
     x : &mut DVector<F::MixedType>,
-    τ : F,
+    mA_normest : F,
+    inner : &InnerSettings<F>,
     iterator : I
 ) -> usize
 where F : Float + ToNalgebraRealField,
       I : AlgIteratorFactory<F>
 {
+    let inner_τ = inner.fb_τ0 / mA_normest;
     
-    match method {
-        InnerMethod::FB =>
-            quadratic_unconstrained_fb(mA, g, λ, x, τ, iterator),
+    match inner.method {
+        InnerMethod::FB | InnerMethod::Default =>
+            quadratic_unconstrained_fb(mA, g, λ, x, inner_τ, iterator),
         InnerMethod::SSN =>
-            quadratic_unconstrained_ssn(mA, g, λ, x, τ, iterator).unwrap_or_else(|e| {
+            quadratic_unconstrained_ssn(mA, g, λ, x, inner_τ, iterator).unwrap_or_else(|e| {
                 println!("{}", format!("{e}. Using FB fallback.").red());
                 let ins = InnerSettings::<F>::default();
-                quadratic_unconstrained_fb(mA, g, λ, x, τ, ins.iterator_options)
-            })
+                quadratic_unconstrained_fb(mA, g, λ, x, inner_τ, ins.iterator_options)
+            }),
+        InnerMethod::PDPS => unimplemented!(),
     }
 }
--- a/src/transport.rs	Tue Aug 01 10:32:12 2023 +0300
+++ b/src/transport.rs	Thu Aug 29 00:00:00 2024 -0500
@@ -11,7 +11,7 @@
     /// If `Self` is a linear operator $A$ on $ℳ(Ω)$, and `Cost` represents the spatial
     /// cost function $c$, this factor $L$ is such that, for all $0 ≤ λ ∈ ℳ(Ω^2)$,
     /// $$
-    ///     \norm{A(π_\#^1-π_\#^0)λ}^2 ≤ L^2 \norm{λ}_{ℳ(Ω^2)} ∫ c(x, y) dλ(x, y).
+    ///     \norm{A(π_\#^1-π_\#^0)λ}^2 ≤ L \norm{λ}_{ℳ(Ω^2)} ∫ c(x, y) dλ(x, y).
     /// $$
     fn transport_lipschitz_factor(&self, cost : Cost) -> Self::FloatType;
 }
--- a/src/types.rs	Tue Aug 01 10:32:12 2023 +0300
+++ b/src/types.rs	Thu Aug 29 00:00:00 2024 -0500
@@ -27,7 +27,7 @@
 pub struct IterInfo<F : Float, const N : usize> {
     /// Function value
     pub value : F,
-    /// Number of speaks
+    /// Number of spikes
     pub n_spikes : usize,
     /// Number of iterations this statistic covers
     pub this_iters : usize,
@@ -37,6 +37,10 @@
     pub pruned : usize,
     /// Number of inner iterations since last IterInfo statistic
     pub inner_iters : usize,
+    /// Tuple of (transported mass, source mass)
+    pub untransported_fraction : Option<(F, F)>,
+    /// Tuple of (|destination mass - untransported_mass|, transported mass)
+    pub transport_error : Option<(F, F)>,
     /// Current tolerance
     pub ε : F,
     /// Solve fin.dim problem for this measure to get the optimal `value`.
@@ -55,19 +59,38 @@
             inner_iters : 0,
             ε : F::NAN,
             postprocessing : None,
+            untransported_fraction : None,
+            transport_error : None,
         }
     }
 }
 
+#[replace_float_literals(F::cast_from(literal))]
 impl<F, const N : usize> LogRepr for IterInfo<F, N> where F : LogRepr + Float {
     fn logrepr(&self) -> ColoredString {
-        format!("{}\t| N = {}, ε = {:.8}, inner_iters_mean = {}, merged+pruned_mean = {}+{}",
+        format!("{}\t| N = {}, ε = {:.8}, inner_iters_mean = {}, merged+pruned_mean = {}+{}{}{}",
                 self.value.logrepr(),
                 self.n_spikes,
                 self.ε,
                 self.inner_iters as float / self.this_iters as float,
                 self.merged as float / self.this_iters as float,
                 self.pruned as float / self.this_iters as float,
+                match self.untransported_fraction {
+                    None => format!(""),
+                    Some((a, b)) => if b > 0.0 {
+                        format!(", untransported {:.2}%", 100.0*a/b)
+                    } else {
+                        format!("")
+                    }
+                },
+                match self.transport_error {
+                    None => format!(""),
+                    Some((a, b)) => if b > 0.0 {
+                        format!(", transport error {:.2}%", 100.0*a/b)
+                    } else {
+                        format!("")
+                    }
+                }
         ).as_str().into()
     }
 }

mercurial