Initial version

Thu, 01 Dec 2022 23:07:35 +0200

author
Tuomo Valkonen <tuomov@iki.fi>
date
Thu, 01 Dec 2022 23:07:35 +0200
changeset 0
eb3c7813b67a
child 1
d4fd5f32d10e
child 2
7a953a87b6c1
child 3
0778a71cbb6a
child 4
5aa5c279e341
child 5
df971c81282e

Initial version

.hgignore file | annotate | diff | comparison | revisions
Cargo.lock file | annotate | diff | comparison | revisions
Cargo.toml file | annotate | diff | comparison | revisions
LICENSE file | annotate | diff | comparison | revisions
README.md file | annotate | diff | comparison | revisions
misc/doc_alias.sh file | annotate | diff | comparison | revisions
misc/katex-header.html file | annotate | diff | comparison | revisions
rust-toolchain.toml file | annotate | diff | comparison | revisions
src/experiments.rs file | annotate | diff | comparison | revisions
src/fb.rs file | annotate | diff | comparison | revisions
src/forward_model.rs file | annotate | diff | comparison | revisions
src/fourier.rs file | annotate | diff | comparison | revisions
src/frank_wolfe.rs file | annotate | diff | comparison | revisions
src/kernels.rs file | annotate | diff | comparison | revisions
src/kernels/ball_indicator.rs file | annotate | diff | comparison | revisions
src/kernels/base.rs file | annotate | diff | comparison | revisions
src/kernels/gaussian.rs file | annotate | diff | comparison | revisions
src/kernels/hat.rs file | annotate | diff | comparison | revisions
src/kernels/hat_convolution.rs file | annotate | diff | comparison | revisions
src/kernels/mollifier.rs file | annotate | diff | comparison | revisions
src/main.rs file | annotate | diff | comparison | revisions
src/measures.rs file | annotate | diff | comparison | revisions
src/measures/base.rs file | annotate | diff | comparison | revisions
src/measures/delta.rs file | annotate | diff | comparison | revisions
src/measures/discrete.rs file | annotate | diff | comparison | revisions
src/measures/merging.rs file | annotate | diff | comparison | revisions
src/pdps.rs file | annotate | diff | comparison | revisions
src/plot.rs file | annotate | diff | comparison | revisions
src/rand_distr.rs file | annotate | diff | comparison | revisions
src/run.rs file | annotate | diff | comparison | revisions
src/seminorms.rs file | annotate | diff | comparison | revisions
src/subproblem.rs file | annotate | diff | comparison | revisions
src/tolerance.rs file | annotate | diff | comparison | revisions
src/types.rs file | annotate | diff | comparison | revisions
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/.hgignore	Thu Dec 01 23:07:35 2022 +0200
@@ -0,0 +1,6 @@
+^target/
+^debug_out/
+^pointsource.._.*\.txt
+flamegraph.svg
+DEADJOE
+.*\.orig
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/Cargo.lock	Thu Dec 01 23:07:35 2022 +0200
@@ -0,0 +1,1480 @@
+# This file is automatically @generated by Cargo.
+# It is not intended for manual editing.
+version = 3
+
+[[package]]
+name = "GSL"
+version = "6.0.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "c9becaf6d7d1ba36a457288e661fa6a0472e8328629276f45369eafcd48ef1ce"
+dependencies = [
+ "GSL-sys",
+ "paste",
+]
+
+[[package]]
+name = "GSL-sys"
+version = "3.0.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "4577670dcc0720995dc39f04c438595eaae8ccc27f4aafd3e572dd408d01bd9d"
+dependencies = [
+ "libc",
+ "pkg-config",
+]
+
+[[package]]
+name = "adler"
+version = "1.0.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe"
+
+[[package]]
+name = "alg_tools"
+version = "0.1.0"
+dependencies = [
+ "colored",
+ "cpu-time",
+ "csv",
+ "itertools",
+ "nalgebra",
+ "num",
+ "num-traits",
+ "numeric_literals",
+ "rayon",
+ "serde",
+ "serde_json",
+ "trait-set",
+]
+
+[[package]]
+name = "android_system_properties"
+version = "0.1.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "819e7219dbd41043ac279b19830f2efc897156490d7fd6ea916720117ee66311"
+dependencies = [
+ "libc",
+]
+
+[[package]]
+name = "approx"
+version = "0.5.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "cab112f0a86d568ea0e627cc1d6be74a1e9cd55214684db5561995f6dad897c6"
+dependencies = [
+ "num-traits",
+]
+
+[[package]]
+name = "atty"
+version = "0.2.14"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8"
+dependencies = [
+ "hermit-abi 0.1.19",
+ "libc",
+ "winapi",
+]
+
+[[package]]
+name = "autocfg"
+version = "1.1.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa"
+
+[[package]]
+name = "bit_field"
+version = "0.10.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "dcb6dd1c2376d2e096796e234a70e17e94cc2d5d54ff8ce42b28cef1d0d359a4"
+
+[[package]]
+name = "bitflags"
+version = "1.3.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a"
+
+[[package]]
+name = "bstr"
+version = "0.2.17"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ba3569f383e8f1598449f1a423e72e99569137b47740b1da11ef19af3d5c3223"
+dependencies = [
+ "lazy_static",
+ "memchr",
+ "regex-automata",
+ "serde",
+]
+
+[[package]]
+name = "bumpalo"
+version = "3.11.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "572f695136211188308f16ad2ca5c851a712c464060ae6974944458eb83880ba"
+
+[[package]]
+name = "bytemuck"
+version = "1.12.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "aaa3a8d9a1ca92e282c96a32d6511b695d7d994d1d102ba85d279f9b2756947f"
+
+[[package]]
+name = "byteorder"
+version = "1.4.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "14c189c53d098945499cdfa7ecc63567cf3886b3332b312a5b4585d8d3a6a610"
+
+[[package]]
+name = "cc"
+version = "1.0.77"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e9f73505338f7d905b19d18738976aae232eb46b8efc15554ffc56deb5d9ebe4"
+
+[[package]]
+name = "cfg-if"
+version = "1.0.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd"
+
+[[package]]
+name = "chrono"
+version = "0.4.23"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "16b0a3d9ed01224b22057780a37bb8c5dbfe1be8ba48678e7bf57ec4b385411f"
+dependencies = [
+ "iana-time-zone",
+ "js-sys",
+ "num-integer",
+ "num-traits",
+ "serde",
+ "time",
+ "wasm-bindgen",
+ "winapi",
+]
+
+[[package]]
+name = "clap"
+version = "4.0.27"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "0acbd8d28a0a60d7108d7ae850af6ba34cf2d1257fc646980e5f97ce14275966"
+dependencies = [
+ "bitflags",
+ "clap_derive",
+ "clap_lex",
+ "is-terminal",
+ "once_cell",
+ "strsim",
+ "termcolor",
+ "terminal_size",
+ "unicase",
+ "unicode-width",
+]
+
+[[package]]
+name = "clap_derive"
+version = "4.0.21"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "0177313f9f02afc995627906bbd8967e2be069f5261954222dac78290c2b9014"
+dependencies = [
+ "heck",
+ "proc-macro-error",
+ "proc-macro2",
+ "quote",
+ "syn",
+]
+
+[[package]]
+name = "clap_lex"
+version = "0.3.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "0d4198f73e42b4936b35b5bb248d81d2b595ecb170da0bac7655c54eedfa8da8"
+dependencies = [
+ "os_str_bytes",
+]
+
+[[package]]
+name = "codespan-reporting"
+version = "0.11.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "3538270d33cc669650c4b093848450d380def10c331d38c768e34cac80576e6e"
+dependencies = [
+ "termcolor",
+ "unicode-width",
+]
+
+[[package]]
+name = "color_quant"
+version = "1.1.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "3d7b894f5411737b7867f4827955924d7c254fc9f4d91a6aad6b097804b1018b"
+
+[[package]]
+name = "colorbrewer"
+version = "0.2.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "525be5012d97bc222e124ded87f18601e6fbd24a406761bcb1664475663919a6"
+dependencies = [
+ "rgb",
+]
+
+[[package]]
+name = "colored"
+version = "2.0.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b3616f750b84d8f0de8a58bda93e08e2a81ad3f523089b05f1dffecab48c6cbd"
+dependencies = [
+ "atty",
+ "lazy_static",
+ "winapi",
+]
+
+[[package]]
+name = "core-foundation-sys"
+version = "0.8.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5827cebf4670468b8772dd191856768aedcb1b0278a04f989f7766351917b9dc"
+
+[[package]]
+name = "cpu-time"
+version = "1.0.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e9e393a7668fe1fad3075085b86c781883000b4ede868f43627b34a87c8b7ded"
+dependencies = [
+ "libc",
+ "winapi",
+]
+
+[[package]]
+name = "crc32fast"
+version = "1.3.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b540bd8bc810d3885c6ea91e2018302f68baba2129ab3e88f32389ee9370880d"
+dependencies = [
+ "cfg-if",
+]
+
+[[package]]
+name = "crossbeam-channel"
+version = "0.5.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "c2dd04ddaf88237dc3b8d8f9a3c1004b506b54b3313403944054d23c0870c521"
+dependencies = [
+ "cfg-if",
+ "crossbeam-utils",
+]
+
+[[package]]
+name = "crossbeam-deque"
+version = "0.8.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "715e8152b692bba2d374b53d4875445368fdf21a94751410af607a5ac677d1fc"
+dependencies = [
+ "cfg-if",
+ "crossbeam-epoch",
+ "crossbeam-utils",
+]
+
+[[package]]
+name = "crossbeam-epoch"
+version = "0.9.13"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "01a9af1f4c2ef74bb8aa1f7e19706bc72d03598c8a570bb5de72243c7a9d9d5a"
+dependencies = [
+ "autocfg",
+ "cfg-if",
+ "crossbeam-utils",
+ "memoffset",
+ "scopeguard",
+]
+
+[[package]]
+name = "crossbeam-utils"
+version = "0.8.14"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "4fb766fa798726286dbbb842f174001dab8abc7b627a1dd86e0b7222a95d929f"
+dependencies = [
+ "cfg-if",
+]
+
+[[package]]
+name = "crunchy"
+version = "0.2.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "7a81dae078cea95a014a339291cec439d2f232ebe854a9d672b796c6afafa9b7"
+
+[[package]]
+name = "csv"
+version = "1.1.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "22813a6dc45b335f9bade10bf7271dc477e81113e89eb251a0bc2a8a81c536e1"
+dependencies = [
+ "bstr",
+ "csv-core",
+ "itoa 0.4.8",
+ "ryu",
+ "serde",
+]
+
+[[package]]
+name = "csv-core"
+version = "0.1.10"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "2b2466559f260f48ad25fe6317b3c8dac77b5bdb5763ac7d9d6103530663bc90"
+dependencies = [
+ "memchr",
+]
+
+[[package]]
+name = "cxx"
+version = "1.0.82"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d4a41a86530d0fe7f5d9ea779916b7cadd2d4f9add748b99c2c029cbbdfaf453"
+dependencies = [
+ "cc",
+ "cxxbridge-flags",
+ "cxxbridge-macro",
+ "link-cplusplus",
+]
+
+[[package]]
+name = "cxx-build"
+version = "1.0.82"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "06416d667ff3e3ad2df1cd8cd8afae5da26cf9cec4d0825040f88b5ca659a2f0"
+dependencies = [
+ "cc",
+ "codespan-reporting",
+ "once_cell",
+ "proc-macro2",
+ "quote",
+ "scratch",
+ "syn",
+]
+
+[[package]]
+name = "cxxbridge-flags"
+version = "1.0.82"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "820a9a2af1669deeef27cb271f476ffd196a2c4b6731336011e0ba63e2c7cf71"
+
+[[package]]
+name = "cxxbridge-macro"
+version = "1.0.82"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a08a6e2fcc370a089ad3b4aaf54db3b1b4cee38ddabce5896b33eb693275f470"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "syn",
+]
+
+[[package]]
+name = "either"
+version = "1.8.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "90e5c1c8368803113bf0c9584fc495a58b86dc8a29edbf8fe877d21d9507e797"
+
+[[package]]
+name = "errno"
+version = "0.2.8"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f639046355ee4f37944e44f60642c6f3a7efa3cf6b78c78a0d989a8ce6c396a1"
+dependencies = [
+ "errno-dragonfly",
+ "libc",
+ "winapi",
+]
+
+[[package]]
+name = "errno-dragonfly"
+version = "0.1.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "aa68f1b12764fab894d2755d2518754e71b4fd80ecfb822714a1206c2aab39bf"
+dependencies = [
+ "cc",
+ "libc",
+]
+
+[[package]]
+name = "exr"
+version = "1.5.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8eb5f255b5980bb0c8cf676b675d1a99be40f316881444f44e0462eaf5df5ded"
+dependencies = [
+ "bit_field",
+ "flume",
+ "half",
+ "lebe",
+ "miniz_oxide",
+ "smallvec",
+ "threadpool",
+]
+
+[[package]]
+name = "flate2"
+version = "1.0.25"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a8a2db397cb1c8772f31494cb8917e48cd1e64f0fa7efac59fbd741a0a8ce841"
+dependencies = [
+ "crc32fast",
+ "miniz_oxide",
+]
+
+[[package]]
+name = "float_extras"
+version = "0.1.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b22b70f8649ea2315955f1a36d964b0e4da482dfaa5f0d04df0d1fb7c338ab7a"
+dependencies = [
+ "libc",
+]
+
+[[package]]
+name = "flume"
+version = "0.10.14"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1657b4441c3403d9f7b3409e47575237dac27b1b5726df654a6ecbf92f0f7577"
+dependencies = [
+ "futures-core",
+ "futures-sink",
+ "nanorand",
+ "pin-project",
+ "spin",
+]
+
+[[package]]
+name = "futures-core"
+version = "0.3.25"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "04909a7a7e4633ae6c4a9ab280aeb86da1236243a77b694a49eacd659a4bd3ac"
+
+[[package]]
+name = "futures-sink"
+version = "0.3.25"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "39c15cf1a4aa79df40f1bb462fb39676d0ad9e366c2a33b590d7c66f4f81fcf9"
+
+[[package]]
+name = "getrandom"
+version = "0.2.8"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "c05aeb6a22b8f62540c194aac980f2115af067bfe15a0734d7277a768d396b31"
+dependencies = [
+ "cfg-if",
+ "js-sys",
+ "libc",
+ "wasi 0.11.0+wasi-snapshot-preview1",
+ "wasm-bindgen",
+]
+
+[[package]]
+name = "gif"
+version = "0.11.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "3edd93c6756b4dfaf2709eafcc345ba2636565295c198a9cfbf75fa5e3e00b06"
+dependencies = [
+ "color_quant",
+ "weezl",
+]
+
+[[package]]
+name = "half"
+version = "2.1.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ad6a9459c9c30b177b925162351f97e7d967c7ea8bab3b8352805327daf45554"
+dependencies = [
+ "crunchy",
+]
+
+[[package]]
+name = "heck"
+version = "0.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "2540771e65fc8cb83cd6e8a237f70c319bd5c29f78ed1084ba5d50eeac86f7f9"
+
+[[package]]
+name = "hermit-abi"
+version = "0.1.19"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "62b467343b94ba476dcb2500d242dadbb39557df889310ac77c5d99100aaac33"
+dependencies = [
+ "libc",
+]
+
+[[package]]
+name = "hermit-abi"
+version = "0.2.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ee512640fe35acbfb4bb779db6f0d80704c2cacfa2e39b601ef3e3f47d1ae4c7"
+dependencies = [
+ "libc",
+]
+
+[[package]]
+name = "iana-time-zone"
+version = "0.1.53"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "64c122667b287044802d6ce17ee2ddf13207ed924c712de9a66a5814d5b64765"
+dependencies = [
+ "android_system_properties",
+ "core-foundation-sys",
+ "iana-time-zone-haiku",
+ "js-sys",
+ "wasm-bindgen",
+ "winapi",
+]
+
+[[package]]
+name = "iana-time-zone-haiku"
+version = "0.1.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "0703ae284fc167426161c2e3f1da3ea71d94b21bedbcc9494e92b28e334e3dca"
+dependencies = [
+ "cxx",
+ "cxx-build",
+]
+
+[[package]]
+name = "image"
+version = "0.24.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "69b7ea949b537b0fd0af141fff8c77690f2ce96f4f41f042ccb6c69c6c965945"
+dependencies = [
+ "bytemuck",
+ "byteorder",
+ "color_quant",
+ "exr",
+ "gif",
+ "jpeg-decoder",
+ "num-rational",
+ "num-traits",
+ "png",
+ "scoped_threadpool",
+ "tiff",
+]
+
+[[package]]
+name = "io-lifetimes"
+version = "0.7.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "59ce5ef949d49ee85593fc4d3f3f95ad61657076395cbbce23e2121fc5542074"
+
+[[package]]
+name = "io-lifetimes"
+version = "1.0.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e394faa0efb47f9f227f1cd89978f854542b318a6f64fa695489c9c993056656"
+dependencies = [
+ "libc",
+ "windows-sys",
+]
+
+[[package]]
+name = "is-terminal"
+version = "0.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "aae5bc6e2eb41c9def29a3e0f1306382807764b9b53112030eff57435667352d"
+dependencies = [
+ "hermit-abi 0.2.6",
+ "io-lifetimes 1.0.2",
+ "rustix 0.36.3",
+ "windows-sys",
+]
+
+[[package]]
+name = "itertools"
+version = "0.10.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b0fd2260e829bddf4cb6ea802289de2f86d6a7a690192fbe91b3f46e0f2c8473"
+dependencies = [
+ "either",
+]
+
+[[package]]
+name = "itoa"
+version = "0.4.8"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b71991ff56294aa922b450139ee08b3bfc70982c6b2c7562771375cf73542dd4"
+
+[[package]]
+name = "itoa"
+version = "1.0.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "4217ad341ebadf8d8e724e264f13e593e0648f5b3e94b3896a5df283be015ecc"
+
+[[package]]
+name = "jpeg-decoder"
+version = "0.3.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "bc0000e42512c92e31c2252315bda326620a4e034105e900c98ec492fa077b3e"
+dependencies = [
+ "rayon",
+]
+
+[[package]]
+name = "js-sys"
+version = "0.3.60"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "49409df3e3bf0856b916e2ceaca09ee28e6871cf7d9ce97a692cacfdb2a25a47"
+dependencies = [
+ "wasm-bindgen",
+]
+
+[[package]]
+name = "lazy_static"
+version = "1.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646"
+
+[[package]]
+name = "lebe"
+version = "0.5.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "03087c2bad5e1034e8cace5926dec053fb3790248370865f5117a7d0213354c8"
+
+[[package]]
+name = "libc"
+version = "0.2.137"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "fc7fcc620a3bff7cdd7a365be3376c97191aeaccc2a603e600951e452615bf89"
+
+[[package]]
+name = "libm"
+version = "0.2.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "348108ab3fba42ec82ff6e9564fc4ca0247bdccdc68dd8af9764bbc79c3c8ffb"
+
+[[package]]
+name = "link-cplusplus"
+version = "1.0.7"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9272ab7b96c9046fbc5bc56c06c117cb639fe2d509df0c421cad82d2915cf369"
+dependencies = [
+ "cc",
+]
+
+[[package]]
+name = "linux-raw-sys"
+version = "0.0.46"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d4d2456c373231a208ad294c33dc5bff30051eafd954cd4caae83a712b12854d"
+
+[[package]]
+name = "linux-raw-sys"
+version = "0.1.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8f9f08d8963a6c613f4b1a78f4f4a4dbfadf8e6545b2d72861731e4858b8b47f"
+
+[[package]]
+name = "lock_api"
+version = "0.4.9"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "435011366fe56583b16cf956f9df0095b405b82d76425bc8981c0e22e60ec4df"
+dependencies = [
+ "autocfg",
+ "scopeguard",
+]
+
+[[package]]
+name = "log"
+version = "0.4.17"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "abb12e687cfb44aa40f41fc3978ef76448f9b6038cad6aef4259d3c095a2382e"
+dependencies = [
+ "cfg-if",
+]
+
+[[package]]
+name = "matrixmultiply"
+version = "0.3.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "add85d4dd35074e6fedc608f8c8f513a3548619a9024b751949ef0e8e45a4d84"
+dependencies = [
+ "rawpointer",
+]
+
+[[package]]
+name = "memchr"
+version = "2.5.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "2dffe52ecf27772e601905b7522cb4ef790d2cc203488bbd0e2fe85fcb74566d"
+
+[[package]]
+name = "memoffset"
+version = "0.7.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5de893c32cde5f383baa4c04c5d6dbdd735cfd4a794b0debdb2bb1b421da5ff4"
+dependencies = [
+ "autocfg",
+]
+
+[[package]]
+name = "miniz_oxide"
+version = "0.6.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b275950c28b37e794e8c55d88aeb5e139d0ce23fdbbeda68f8d7174abdf9e8fa"
+dependencies = [
+ "adler",
+]
+
+[[package]]
+name = "nalgebra"
+version = "0.31.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "20bd243ab3dbb395b39ee730402d2e5405e448c75133ec49cc977762c4cba3d1"
+dependencies = [
+ "approx",
+ "matrixmultiply",
+ "nalgebra-macros",
+ "num-complex",
+ "num-rational",
+ "num-traits",
+ "rand",
+ "simba",
+ "typenum",
+]
+
+[[package]]
+name = "nalgebra-macros"
+version = "0.1.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "01fcc0b8149b4632adc89ac3b7b31a12fb6099a0317a4eb2ebff574ef7de7218"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "syn",
+]
+
+[[package]]
+name = "nanorand"
+version = "0.7.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "6a51313c5820b0b02bd422f4b44776fbf47961755c74ce64afc73bfad10226c3"
+dependencies = [
+ "getrandom",
+]
+
+[[package]]
+name = "num"
+version = "0.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "43db66d1170d347f9a065114077f7dccb00c1b9478c89384490a3425279a4606"
+dependencies = [
+ "num-bigint",
+ "num-complex",
+ "num-integer",
+ "num-iter",
+ "num-rational",
+ "num-traits",
+]
+
+[[package]]
+name = "num-bigint"
+version = "0.4.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f93ab6289c7b344a8a9f60f88d80aa20032336fe78da341afc91c8a2341fc75f"
+dependencies = [
+ "autocfg",
+ "num-integer",
+ "num-traits",
+]
+
+[[package]]
+name = "num-complex"
+version = "0.4.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "7ae39348c8bc5fbd7f40c727a9925f03517afd2ab27d46702108b6a7e5414c19"
+dependencies = [
+ "num-traits",
+]
+
+[[package]]
+name = "num-integer"
+version = "0.1.45"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "225d3389fb3509a24c93f5c29eb6bde2586b98d9f016636dff58d7c6f7569cd9"
+dependencies = [
+ "autocfg",
+ "num-traits",
+]
+
+[[package]]
+name = "num-iter"
+version = "0.1.43"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "7d03e6c028c5dc5cac6e2dec0efda81fc887605bb3d884578bb6d6bf7514e252"
+dependencies = [
+ "autocfg",
+ "num-integer",
+ "num-traits",
+]
+
+[[package]]
+name = "num-rational"
+version = "0.4.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "0638a1c9d0a3c0914158145bc76cff373a75a627e6ecbfb71cbe6f453a5a19b0"
+dependencies = [
+ "autocfg",
+ "num-bigint",
+ "num-integer",
+ "num-traits",
+]
+
+[[package]]
+name = "num-traits"
+version = "0.2.15"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "578ede34cf02f8924ab9447f50c28075b4d3e5b269972345e7e0372b38c6cdcd"
+dependencies = [
+ "autocfg",
+ "libm",
+]
+
+[[package]]
+name = "num_cpus"
+version = "1.14.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f6058e64324c71e02bc2b150e4f3bc8286db6c83092132ffa3f6b1eab0f9def5"
+dependencies = [
+ "hermit-abi 0.1.19",
+ "libc",
+]
+
+[[package]]
+name = "numeric_literals"
+version = "0.2.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "095aa67b0b9f2081746998f4f17106bdb51d56dc8c211afca5531b92b83bf98a"
+dependencies = [
+ "quote",
+ "syn",
+]
+
+[[package]]
+name = "once_cell"
+version = "1.16.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "86f0b0d4bf799edbc74508c1e8bf170ff5f41238e5f8225603ca7caaae2b7860"
+
+[[package]]
+name = "os_str_bytes"
+version = "6.4.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9b7820b9daea5457c9f21c69448905d723fbd21136ccf521748f23fd49e723ee"
+
+[[package]]
+name = "paste"
+version = "1.0.9"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b1de2e551fb905ac83f73f7aedf2f0cb4a0da7e35efa24a202a936269f1f18e1"
+
+[[package]]
+name = "pin-project"
+version = "1.0.12"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ad29a609b6bcd67fee905812e544992d216af9d755757c05ed2d0e15a74c6ecc"
+dependencies = [
+ "pin-project-internal",
+]
+
+[[package]]
+name = "pin-project-internal"
+version = "1.0.12"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "069bdb1e05adc7a8990dce9cc75370895fbe4e3d58b9b73bf1aee56359344a55"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "syn",
+]
+
+[[package]]
+name = "pkg-config"
+version = "0.3.26"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "6ac9a59f73473f1b8d852421e59e64809f025994837ef743615c6d0c5b305160"
+
+[[package]]
+name = "png"
+version = "0.17.7"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5d708eaf860a19b19ce538740d2b4bdeeb8337fa53f7738455e706623ad5c638"
+dependencies = [
+ "bitflags",
+ "crc32fast",
+ "flate2",
+ "miniz_oxide",
+]
+
+[[package]]
+name = "pointsource_algs"
+version = "0.1.0"
+dependencies = [
+ "GSL",
+ "alg_tools",
+ "chrono",
+ "clap",
+ "colorbrewer",
+ "colored",
+ "cpu-time",
+ "float_extras",
+ "image",
+ "itertools",
+ "nalgebra",
+ "num-traits",
+ "numeric_literals",
+ "poloto",
+ "rand",
+ "rand_distr",
+ "rgb",
+ "serde",
+ "serde_json",
+]
+
+[[package]]
+name = "poloto"
+version = "3.13.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a2541c28c0622b297e342444bd8b1d87b02c8478dd3ed0ecc3eee47dc4d13282"
+dependencies = [
+ "tagger",
+]
+
+[[package]]
+name = "ppv-lite86"
+version = "0.2.17"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de"
+
+[[package]]
+name = "proc-macro-error"
+version = "1.0.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "da25490ff9892aab3fcf7c36f08cfb902dd3e71ca0f9f9517bea02a73a5ce38c"
+dependencies = [
+ "proc-macro-error-attr",
+ "proc-macro2",
+ "quote",
+ "syn",
+ "version_check",
+]
+
+[[package]]
+name = "proc-macro-error-attr"
+version = "1.0.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a1be40180e52ecc98ad80b184934baf3d0d29f979574e439af5a55274b35f869"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "version_check",
+]
+
+[[package]]
+name = "proc-macro2"
+version = "1.0.47"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5ea3d908b0e36316caf9e9e2c4625cdde190a7e6f440d794667ed17a1855e725"
+dependencies = [
+ "unicode-ident",
+]
+
+[[package]]
+name = "quote"
+version = "1.0.21"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "bbe448f377a7d6961e30f5955f9b8d106c3f5e449d493ee1b125c1d43c2b5179"
+dependencies = [
+ "proc-macro2",
+]
+
+[[package]]
+name = "rand"
+version = "0.8.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404"
+dependencies = [
+ "libc",
+ "rand_chacha",
+ "rand_core",
+]
+
+[[package]]
+name = "rand_chacha"
+version = "0.3.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88"
+dependencies = [
+ "ppv-lite86",
+ "rand_core",
+]
+
+[[package]]
+name = "rand_core"
+version = "0.6.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c"
+dependencies = [
+ "getrandom",
+]
+
+[[package]]
+name = "rand_distr"
+version = "0.4.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "32cb0b9bc82b0a0876c2dd994a7e7a2683d3e7390ca40e6886785ef0c7e3ee31"
+dependencies = [
+ "num-traits",
+ "rand",
+]
+
+[[package]]
+name = "rawpointer"
+version = "0.2.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "60a357793950651c4ed0f3f52338f53b2f809f32d83a07f72909fa13e4c6c1e3"
+
+[[package]]
+name = "rayon"
+version = "1.6.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1e060280438193c554f654141c9ea9417886713b7acd75974c85b18a69a88e0b"
+dependencies = [
+ "crossbeam-deque",
+ "either",
+ "rayon-core",
+]
+
+[[package]]
+name = "rayon-core"
+version = "1.10.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "cac410af5d00ab6884528b4ab69d1e8e146e8d471201800fa1b4524126de6ad3"
+dependencies = [
+ "crossbeam-channel",
+ "crossbeam-deque",
+ "crossbeam-utils",
+ "num_cpus",
+]
+
+[[package]]
+name = "regex-automata"
+version = "0.1.10"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "6c230d73fb8d8c1b9c0b3135c5142a8acee3a0558fb8db5cf1cb65f8d7862132"
+
+[[package]]
+name = "rgb"
+version = "0.8.34"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "3603b7d71ca82644f79b5a06d1220e9a58ede60bd32255f698cb1af8838b8db3"
+dependencies = [
+ "bytemuck",
+]
+
+[[package]]
+name = "rustix"
+version = "0.35.13"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "727a1a6d65f786ec22df8a81ca3121107f235970dc1705ed681d3e6e8b9cd5f9"
+dependencies = [
+ "bitflags",
+ "errno",
+ "io-lifetimes 0.7.5",
+ "libc",
+ "linux-raw-sys 0.0.46",
+ "windows-sys",
+]
+
+[[package]]
+name = "rustix"
+version = "0.36.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "0b1fbb4dfc4eb1d390c02df47760bb19a84bb80b301ecc947ab5406394d8223e"
+dependencies = [
+ "bitflags",
+ "errno",
+ "io-lifetimes 1.0.2",
+ "libc",
+ "linux-raw-sys 0.1.3",
+ "windows-sys",
+]
+
+[[package]]
+name = "ryu"
+version = "1.0.11"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "4501abdff3ae82a1c1b477a17252eb69cee9e66eb915c1abaa4f44d873df9f09"
+
+[[package]]
+name = "safe_arch"
+version = "0.6.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "794821e4ccb0d9f979512f9c1973480123f9bd62a90d74ab0f9426fcf8f4a529"
+dependencies = [
+ "bytemuck",
+]
+
+[[package]]
+name = "scoped_threadpool"
+version = "0.1.9"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1d51f5df5af43ab3f1360b429fa5e0152ac5ce8c0bd6485cae490332e96846a8"
+
+[[package]]
+name = "scopeguard"
+version = "1.1.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd"
+
+[[package]]
+name = "scratch"
+version = "1.0.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9c8132065adcfd6e02db789d9285a0deb2f3fcb04002865ab67d5fb103533898"
+
+[[package]]
+name = "serde"
+version = "1.0.148"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e53f64bb4ba0191d6d0676e1b141ca55047d83b74f5607e6d8eb88126c52c2dc"
+dependencies = [
+ "serde_derive",
+]
+
+[[package]]
+name = "serde_derive"
+version = "1.0.148"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a55492425aa53521babf6137309e7d34c20bbfbbfcfe2c7f3a047fd1f6b92c0c"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "syn",
+]
+
+[[package]]
+name = "serde_json"
+version = "1.0.89"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "020ff22c755c2ed3f8cf162dbb41a7268d934702f3ed3631656ea597e08fc3db"
+dependencies = [
+ "itoa 1.0.4",
+ "ryu",
+ "serde",
+]
+
+[[package]]
+name = "simba"
+version = "0.7.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "2f3fd720c48c53cace224ae62bef1bbff363a70c68c4802a78b5cc6159618176"
+dependencies = [
+ "approx",
+ "num-complex",
+ "num-traits",
+ "paste",
+ "wide",
+]
+
+[[package]]
+name = "smallvec"
+version = "1.10.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a507befe795404456341dfab10cef66ead4c041f62b8b11bbb92bffe5d0953e0"
+
+[[package]]
+name = "spin"
+version = "0.9.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "7f6002a767bff9e83f8eeecf883ecb8011875a21ae8da43bffb817a57e78cc09"
+dependencies = [
+ "lock_api",
+]
+
+[[package]]
+name = "strsim"
+version = "0.10.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623"
+
+[[package]]
+name = "syn"
+version = "1.0.104"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "4ae548ec36cf198c0ef7710d3c230987c2d6d7bd98ad6edc0274462724c585ce"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "unicode-ident",
+]
+
+[[package]]
+name = "tagger"
+version = "4.3.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "6aaa6f5d645d1dae4cd0286e9f8bf15b75a31656348e5e106eb1a940abd34b63"
+
+[[package]]
+name = "termcolor"
+version = "1.1.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "bab24d30b911b2376f3a13cc2cd443142f0c81dda04c118693e35b3835757755"
+dependencies = [
+ "winapi-util",
+]
+
+[[package]]
+name = "terminal_size"
+version = "0.2.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "40ca90c434fd12083d1a6bdcbe9f92a14f96c8a1ba600ba451734ac334521f7a"
+dependencies = [
+ "rustix 0.35.13",
+ "windows-sys",
+]
+
+[[package]]
+name = "threadpool"
+version = "1.8.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d050e60b33d41c19108b32cea32164033a9013fe3b46cbd4457559bfbf77afaa"
+dependencies = [
+ "num_cpus",
+]
+
+[[package]]
+name = "tiff"
+version = "0.8.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f17def29300a156c19ae30814710d9c63cd50288a49c6fd3a10ccfbe4cf886fd"
+dependencies = [
+ "flate2",
+ "jpeg-decoder",
+ "weezl",
+]
+
+[[package]]
+name = "time"
+version = "0.1.45"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1b797afad3f312d1c66a56d11d0316f916356d11bd158fbc6ca6389ff6bf805a"
+dependencies = [
+ "libc",
+ "wasi 0.10.0+wasi-snapshot-preview1",
+ "winapi",
+]
+
+[[package]]
+name = "trait-set"
+version = "0.2.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "875c4c873cc824e362fa9a9419ffa59807244824275a44ad06fec9684fff08f2"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "syn",
+]
+
+[[package]]
+name = "typenum"
+version = "1.15.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "dcf81ac59edc17cc8697ff311e8f5ef2d99fcbd9817b34cec66f90b6c3dfd987"
+
+[[package]]
+name = "unicase"
+version = "2.6.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "50f37be617794602aabbeee0be4f259dc1778fabe05e2d67ee8f79326d5cb4f6"
+dependencies = [
+ "version_check",
+]
+
+[[package]]
+name = "unicode-ident"
+version = "1.0.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "6ceab39d59e4c9499d4e5a8ee0e2735b891bb7308ac83dfb4e80cad195c9f6f3"
+
+[[package]]
+name = "unicode-width"
+version = "0.1.10"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "c0edd1e5b14653f783770bce4a4dabb4a5108a5370a5f5d8cfe8710c361f6c8b"
+
+[[package]]
+name = "version_check"
+version = "0.9.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f"
+
+[[package]]
+name = "wasi"
+version = "0.10.0+wasi-snapshot-preview1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1a143597ca7c7793eff794def352d41792a93c481eb1042423ff7ff72ba2c31f"
+
+[[package]]
+name = "wasi"
+version = "0.11.0+wasi-snapshot-preview1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423"
+
+[[package]]
+name = "wasm-bindgen"
+version = "0.2.83"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "eaf9f5aceeec8be17c128b2e93e031fb8a4d469bb9c4ae2d7dc1888b26887268"
+dependencies = [
+ "cfg-if",
+ "wasm-bindgen-macro",
+]
+
+[[package]]
+name = "wasm-bindgen-backend"
+version = "0.2.83"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "4c8ffb332579b0557b52d268b91feab8df3615f265d5270fec2a8c95b17c1142"
+dependencies = [
+ "bumpalo",
+ "log",
+ "once_cell",
+ "proc-macro2",
+ "quote",
+ "syn",
+ "wasm-bindgen-shared",
+]
+
+[[package]]
+name = "wasm-bindgen-macro"
+version = "0.2.83"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "052be0f94026e6cbc75cdefc9bae13fd6052cdcaf532fa6c45e7ae33a1e6c810"
+dependencies = [
+ "quote",
+ "wasm-bindgen-macro-support",
+]
+
+[[package]]
+name = "wasm-bindgen-macro-support"
+version = "0.2.83"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "07bc0c051dc5f23e307b13285f9d75df86bfdf816c5721e573dec1f9b8aa193c"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "syn",
+ "wasm-bindgen-backend",
+ "wasm-bindgen-shared",
+]
+
+[[package]]
+name = "wasm-bindgen-shared"
+version = "0.2.83"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1c38c045535d93ec4f0b4defec448e4291638ee608530863b1e2ba115d4fff7f"
+
+[[package]]
+name = "weezl"
+version = "0.1.7"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9193164d4de03a926d909d3bc7c30543cecb35400c02114792c2cae20d5e2dbb"
+
+[[package]]
+name = "wide"
+version = "0.7.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ae41ecad2489a1655c8ef8489444b0b113c0a0c795944a3572a0931cf7d2525c"
+dependencies = [
+ "bytemuck",
+ "safe_arch",
+]
+
+[[package]]
+name = "winapi"
+version = "0.3.9"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419"
+dependencies = [
+ "winapi-i686-pc-windows-gnu",
+ "winapi-x86_64-pc-windows-gnu",
+]
+
+[[package]]
+name = "winapi-i686-pc-windows-gnu"
+version = "0.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6"
+
+[[package]]
+name = "winapi-util"
+version = "0.1.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "70ec6ce85bb158151cae5e5c87f95a8e97d2c0c4b001223f33a334e3ce5de178"
+dependencies = [
+ "winapi",
+]
+
+[[package]]
+name = "winapi-x86_64-pc-windows-gnu"
+version = "0.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f"
+
+[[package]]
+name = "windows-sys"
+version = "0.42.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5a3e1820f08b8513f676f7ab6c1f99ff312fb97b553d30ff4dd86f9f15728aa7"
+dependencies = [
+ "windows_aarch64_gnullvm",
+ "windows_aarch64_msvc",
+ "windows_i686_gnu",
+ "windows_i686_msvc",
+ "windows_x86_64_gnu",
+ "windows_x86_64_gnullvm",
+ "windows_x86_64_msvc",
+]
+
+[[package]]
+name = "windows_aarch64_gnullvm"
+version = "0.42.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "41d2aa71f6f0cbe00ae5167d90ef3cfe66527d6f613ca78ac8024c3ccab9a19e"
+
+[[package]]
+name = "windows_aarch64_msvc"
+version = "0.42.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "dd0f252f5a35cac83d6311b2e795981f5ee6e67eb1f9a7f64eb4500fbc4dcdb4"
+
+[[package]]
+name = "windows_i686_gnu"
+version = "0.42.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "fbeae19f6716841636c28d695375df17562ca208b2b7d0dc47635a50ae6c5de7"
+
+[[package]]
+name = "windows_i686_msvc"
+version = "0.42.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "84c12f65daa39dd2babe6e442988fc329d6243fdce47d7d2d155b8d874862246"
+
+[[package]]
+name = "windows_x86_64_gnu"
+version = "0.42.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "bf7b1b21b5362cbc318f686150e5bcea75ecedc74dd157d874d754a2ca44b0ed"
+
+[[package]]
+name = "windows_x86_64_gnullvm"
+version = "0.42.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "09d525d2ba30eeb3297665bd434a54297e4170c7f1a44cad4ef58095b4cd2028"
+
+[[package]]
+name = "windows_x86_64_msvc"
+version = "0.42.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f40009d85759725a34da6d89a94e63d7bdc50a862acf0dbc7c8e488f1edcb6f5"
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/Cargo.toml	Thu Dec 01 23:07:35 2022 +0200
@@ -0,0 +1,45 @@
+[package]
+name = "pointsource_algs"
+version = "0.1.0"
+edition = "2021"
+authors = ["Tuomo Valkonen <tuomov@iki.fi>"]
+description = "Algorithms for point source localisation"
+homepage = "https://tuomov.iki.fi/software/pointsource_algs/"
+repository = "https://tuomov.iki.fi/repos/pointsource_algs/"
+license-file = "LICENSE"
+keywords = [
+    "optimization",
+    "measure",
+    "pointsource",
+    "forward-backward",
+    "primal-dual",
+    "pdps",
+    "fista",
+    "frank-wolfe",
+    "conditional gradient"
+]
+categories = ["mathematics", "science", "computer-vision"]
+
+[dependencies]
+alg_tools = { version = "~0.1.0", path = "../alg_tools", default-features = false }
+serde = { version = "1.0", features = ["derive"] }
+num-traits = { version = "~0.2.14", features = ["std"] }
+rand = "~0.8.5"
+colored = "~2.0.0"
+rand_distr = "~0.4.3"
+nalgebra = { version = "~0.31.0", features = ["rand-no-std"] }
+itertools = "~0.10.3" 
+numeric_literals = "~0.2.0"
+poloto = "~3.13.1"
+GSL = "~6.0.0"
+float_extras = "~0.1.6"
+clap = { version = "~4.0.27", features = ["derive", "unicode", "wrap_help"] }
+image = "~0.24.3"
+cpu-time = "~1.0.0"
+colorbrewer = "~0.2.0"
+rgb = "~0.8.33"
+serde_json = "~1.0.85"
+chrono = { version = "~0.4.23", features = ["alloc", "std", "serde"] }
+
+[profile.release]
+debug = true
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/LICENSE	Thu Dec 01 23:07:35 2022 +0200
@@ -0,0 +1,43 @@
+
+# Anti-abuse license
+
+## Rationale
+
+The purpose of this license is to give end-users and developers maximal
+freedom to use this software while preventing the authors from being
+abused by powerful middle-men that repackage software for convenient
+installation by users. Such potentially abusive middle-men include in
+particular Linux distributions and similar centralising software
+distribution schemes developed for other operating systems.
+The ethos of this license is *bollocks to copyright and distributions!*
+
+## Rules
+
+This software is distributed without any warranty whatsoever.
+
+If you redistribute modified versions of this software to the public,
+you must clearly mark them as modified.
+
+If you redistribute this software to the public as part of a large
+collection of software with the purpose of providing end-users with
+a convenient installation method, you must do one of the following:
+
+(a) Always redistribute the **unmodified** and **latest** version
+provided by the authors. If the lead author releases a new version (on a
+specific branch, such as 'stable' or 'development'), you must promptly
+make that new version the default version offered to your users (on
+that specific branch).
+
+(b) Rename the software, and make it obvious that your modified or obsolete
+software is in no way connected to the authors of the original software.
+The users of your version should under no circumstances be under the 
+illusion that they can contact the lead author or any of the authors
+of the original software if they have any complaints or queries.
+
+(c) Do not in any way directly expose this software to your users. 
+
+Otherwise, do whatever you want with this software. In particular, you may
+freely use the software as part of other projects, and redistribute to
+the public archival copies of the software (as long as your archive cannot
+be considered a “convenient installation method” that will be governed by
+the rules above).
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/README.md	Thu Dec 01 23:07:35 2022 +0200
@@ -0,0 +1,42 @@
+
+# pointsource_algs
+
+This repository contains [Rust][] codes for the manuscript “_Proximal methods for point source localisation_” by Tuomo Valkonen ⟨tuomov@iki.fi⟩.
+It concerns solution of problems of the type
+$$
+    \min_{μ ∈ ℳ(Ω)}~ F(x) + λ \|μ\|_{ℳ(Ω)} + δ_{≥ 0}(x),
+$$
+where $F(x)=\frac12\|Ax-b\|_2^2$ and $A \in 𝕃(ℳ(Ω); ℝ^m)$, and $ℳ(Ω)$ is the space of Radon measures on the (rectangular) domain $Ω ⊂ ℝ^n$.
+
+## Installation and usage
+
+First install the Install [Rust][] compiler and `cargo`.
+Also install the [GNU Scientific Library][gsl]. On a Mac with [Homebrew][]
+installed, the latter can be done with
+```sh
+$ brew install gsl
+```
+Then download [alg_tools][] and unpack it under the same directory as this package.
+To compile the code and run the experiments in the manuscript, use
+```sh
+$ cargo run --release
+```
+The `--release` flag is required to build optimised high performance code.
+Without that flag the performance will be significantly worse.
+
+## Documentation
+
+The integrated documentation may be built and opened with
+```sh
+$ carg doc              # build dependency docs
+$ . misc/doc-alias.sh   # load KaTeX helper macro
+$ cargo-d --open        # build and open KaTeX-aware docs for this crate
+```
+The `cargo-d` alias ensures that KaTeX mathematics is rendered in the generated documentation. `Rustdoc` is obsolete rubbish that does not support modern markdown featues, so `cargo doc` does not render mathematics. Instead an ugly workaround is needed.
+
+  [alg_tools]: https://tuomov.iki.fi/software/alg_tools/
+  [Rust]: https://www.rust-lang.org/
+  [gsl]: https://www.gnu.org/software/gsl/
+  [Homebrew]: https://brew.sh
+
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/misc/doc_alias.sh	Thu Dec 01 23:07:35 2022 +0200
@@ -0,0 +1,5 @@
+# source this file. use cargo rustdoc or cargo d --no-deps no build the documentation.
+echo 'Creating cargo-d alias'
+alias cargo-d='RUSTDOCFLAGS="--html-in-header misc/katex-header.html" BROWSER=/Applications/Firefox.app/Contents/MacOS/firefox-bin cargo d --no-deps'
+
+ 
\ No newline at end of file
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/misc/katex-header.html	Thu Dec 01 23:07:35 2022 +0200
@@ -0,0 +1,15 @@
+<link rel="stylesheet" href="https://cdn.jsdelivr.net/npm/katex@0.16.0/dist/katex.min.css" integrity="sha384-Xi8rHCmBmhbuyyhbI88391ZKP2dmfnOl4rT9ZfRI7mLTdk1wblIUnrIq35nqwEvC" crossorigin="anonymous">
+<script defer src="https://cdn.jsdelivr.net/npm/katex@0.16.0/dist/katex.min.js" integrity="sha384-X/XCfMm41VSsqRNQgDerQczD69XqmjOOOwYQvr/uuC+j4OPoNhVgjdGFwhvN02Ja" crossorigin="anonymous"></script>
+<script defer src="https://cdn.jsdelivr.net/npm/katex@0.16.0/dist/contrib/auto-render.min.js" integrity="sha384-+XBljXPPiv+OzfbB3cVmLHf4hdUFHlWNZN5spNQ7rmHTXpd7WvJum6fIACpNNfIR" crossorigin="anonymous" onload="renderMathInElement(document.body);"></script>
+<script>
+    document.addEventListener("DOMContentLoaded", function() {
+        renderMathInElement(document.body, {
+            delimiters: [
+                {left: "$$", right: "$$", display: true},
+                {left: "\\(", right: "\\)", display: false},
+                {left: "$", right: "$", display: false},
+                {left: "\\[", right: "\\]", display: true}
+            ]
+        });
+    });
+</script>
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/rust-toolchain.toml	Thu Dec 01 23:07:35 2022 +0200
@@ -0,0 +1,2 @@
+[toolchain]
+channel = "nightly"
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/experiments.rs	Thu Dec 01 23:07:35 2022 +0200
@@ -0,0 +1,299 @@
+/*!
+Experimental setups.
+*/
+
+//use numeric_literals::replace_float_literals;
+use serde::{Serialize, Deserialize};
+use clap::ValueEnum;
+use std::collections::HashMap;
+use std::hash::{Hash, Hasher};
+use std::collections::hash_map::DefaultHasher;
+
+use alg_tools::bisection_tree::*;
+use alg_tools::error::DynResult;
+use alg_tools::norms::Linfinity;
+
+use crate::ExperimentOverrides;
+use crate::kernels::*;
+use crate::kernels::{SupportProductFirst as Prod};
+use crate::pdps::PDPSConfig;
+use crate::types::*;
+use crate::run::{
+    RunnableExperiment,
+    Experiment,
+    Named,
+    DefaultAlgorithm,
+    AlgorithmConfig
+};
+//use crate::fb::FBGenericConfig;
+use crate::rand_distr::{SerializableNormal, SaltAndPepper};
+
+/// Experiments shorthands, to be used with the command line parser
+
+#[derive(ValueEnum, Debug, Copy, Clone, Eq, PartialEq, Hash, Serialize, Deserialize)]
+#[allow(non_camel_case_types)]
+pub enum DefaultExperiment {
+    /// One dimension, cut gaussian spread, 2-norm-squared data fidelity
+    #[clap(name = "1d")]
+    Experiment1D,
+    /// One dimension, “fast” spread, 2-norm-squared data fidelity
+    #[clap(name = "1d_fast")]
+    Experiment1DFast,
+    /// Two dimensions, cut gaussian spread, 2-norm-squared data fidelity
+    #[clap(name = "2d")]
+    Experiment2D,
+    /// Two dimensions, “fast” spread, 2-norm-squared data fidelity
+    #[clap(name = "2d_fast")]
+    Experiment2DFast,
+    /// One dimension, cut gaussian spread, 1-norm data fidelity
+    #[clap(name = "1d_l1")]
+    Experiment1D_L1,
+    /// One dimension, ‘“fast” spread, 1-norm data fidelity
+    #[clap(name = "1d_l1_fast")]
+    Experiment1D_L1_Fast,
+    /// Two dimensions, cut gaussian spread, 1-norm data fidelity
+    #[clap(name = "2d_l1")]
+    Experiment2D_L1,
+    /// Two dimensions, “fast” spread, 1-norm data fidelity
+    #[clap(name = "2d_l1_fast")]
+    Experiment2D_L1_Fast,
+}
+
+macro_rules! make_float_constant {
+    ($name:ident = $value:expr) => {
+        #[derive(Debug, Copy, Eq, PartialEq, Clone, Serialize, Deserialize)]
+        #[serde(into = "float")]
+        struct $name;
+        impl Into<float> for $name {
+            #[inline]
+            fn into(self) -> float { $value }
+        }
+        impl Constant for $name {
+            type Type = float;
+            fn value(&self) -> float { $value }
+        }
+    }
+}
+
+/// Ground-truth measure spike locations and magnitudes for 1D experiments
+static MU_TRUE_1D_BASIC : [(float, float); 4] = [
+    (0.10, 10.0),
+    (0.30, 2.0),
+    (0.70, 3.0),
+    (0.80, 5.0)
+];
+
+/// Ground-truth measure spike locations and magnitudes for 2D experiments
+static MU_TRUE_2D_BASIC : [([float; 2], float); 4] = [
+    ([0.15, 0.15], 10.0),
+    ([0.75, 0.45], 2.0),
+    ([0.80, 0.50], 4.0),
+    ([0.30, 0.70], 5.0)
+];
+
+//#[replace_float_literals(F::cast_from(literal))]
+impl DefaultExperiment {
+    /// Convert the experiment shorthand into a runnable experiment configuration.
+    pub fn get_experiment(&self, cli : &ExperimentOverrides<float>) -> DynResult<Box<dyn RunnableExperiment<float>>> {
+        let name = "pointsource".to_string()
+                                + self.to_possible_value().unwrap().get_name();
+
+        let kernel_plot_width = 0.2;
+
+        const BASE_SEED : u64 = 915373234;
+
+        const N_SENSORS_1D : usize = 100;
+        make_float_constant!(SensorWidth1D = 0.4/(N_SENSORS_1D as float));
+
+        const N_SENSORS_2D : usize = 16;
+        make_float_constant!(SensorWidth2D = 0.4/(N_SENSORS_2D as float));
+
+        const N_SENSORS_2D_MORE : usize = 32;
+        make_float_constant!(SensorWidth2DMore = 0.4/(N_SENSORS_2D_MORE as float));
+
+        make_float_constant!(Variance1 = 0.05.powi(2));
+        make_float_constant!(CutOff1 = 0.15);
+        make_float_constant!(Hat1 = 0.16);
+
+        // We use a different step length for PDPS in 2D experiments
+        let pdps_2d = || {
+            let τ0 = 3.0;
+            PDPSConfig {
+                τ0,
+                σ0 : 0.99 / τ0,
+                .. Default::default()
+            }
+        };
+
+        //  We add a hash of the experiment name to the configured
+        // noise seed to not use the same noise for different experiments.
+        let mut h = DefaultHasher::new();
+        name.hash(&mut h);
+        let noise_seed = cli.noise_seed.unwrap_or(BASE_SEED) + h.finish();
+
+        use DefaultExperiment::*;
+        Ok(match self {
+            Experiment1D => {
+                let base_spread = Gaussian { variance : Variance1 };
+                let spread_cutoff = BallIndicator { r : CutOff1, exponent : Linfinity };
+                Box::new(Named { name, data : Experiment {
+                    domain : [[0.0, 1.0]].into(),
+                    sensor_count : [N_SENSORS_1D],
+                    α : cli.alpha.unwrap_or(0.09),
+                    noise_distr : SerializableNormal::new(0.0, cli.variance.unwrap_or(0.2))?,
+                    dataterm : DataTerm::L2Squared,
+                    μ_hat : MU_TRUE_1D_BASIC.into(),
+                    sensor : BallIndicator { r : SensorWidth1D, exponent : Linfinity },
+                    spread : Prod(spread_cutoff, base_spread),
+                    kernel : Prod(AutoConvolution(spread_cutoff), base_spread),
+                    kernel_plot_width,
+                    noise_seed,
+                    algorithm_defaults: HashMap::new(),
+                }})
+            },
+            Experiment1DFast => {
+                let base_spread = HatConv { radius : Hat1 };
+                Box::new(Named { name, data : Experiment {
+                    domain : [[0.0, 1.0]].into(),
+                    sensor_count : [N_SENSORS_1D],
+                    α : cli.alpha.unwrap_or(0.06),
+                    noise_distr : SerializableNormal::new(0.0, cli.variance.unwrap_or(0.2))?,
+                    dataterm : DataTerm::L2Squared,
+                    μ_hat : MU_TRUE_1D_BASIC.into(),
+                    sensor : BallIndicator { r : SensorWidth1D, exponent : Linfinity },
+                    spread : base_spread,
+                    kernel : base_spread,
+                    kernel_plot_width,
+                    noise_seed,
+                    algorithm_defaults: HashMap::new(),
+                }})
+            },
+            Experiment2D => {
+                let base_spread = Gaussian { variance : Variance1 };
+                let spread_cutoff = BallIndicator { r : CutOff1, exponent : Linfinity };
+                Box::new(Named { name, data : Experiment {
+                    domain : [[0.0, 1.0]; 2].into(),
+                    sensor_count : [N_SENSORS_2D; 2],
+                    α : cli.alpha.unwrap_or(0.19), // 0.18, //0.17, //0.16,
+                    noise_distr : SerializableNormal::new(0.0, cli.variance.unwrap_or(0.25))?,
+                    dataterm : DataTerm::L2Squared,
+                    μ_hat : MU_TRUE_2D_BASIC.into(),
+                    sensor : BallIndicator { r : SensorWidth2D, exponent : Linfinity },
+                    spread : Prod(spread_cutoff, base_spread),
+                    kernel : Prod(AutoConvolution(spread_cutoff), base_spread),
+                    kernel_plot_width,
+                    noise_seed,
+                    algorithm_defaults: HashMap::from([
+                        (DefaultAlgorithm::PDPS, AlgorithmConfig::PDPS(pdps_2d()))
+                    ]),
+                }})
+            },
+            Experiment2DFast => {
+                let base_spread = HatConv { radius : Hat1 };
+                Box::new(Named { name, data : Experiment {
+                    domain : [[0.0, 1.0]; 2].into(),
+                    sensor_count : [N_SENSORS_2D; 2],
+                    α : cli.alpha.unwrap_or(0.12), //0.10, //0.14,
+                    noise_distr : SerializableNormal::new(0.0, cli.variance.unwrap_or(0.15))?, //0.25
+                    dataterm : DataTerm::L2Squared,
+                    μ_hat : MU_TRUE_2D_BASIC.into(),
+                    sensor : BallIndicator { r : SensorWidth2D, exponent : Linfinity },
+                    spread : base_spread,
+                    kernel : base_spread,
+                    kernel_plot_width,
+                    noise_seed,
+                    algorithm_defaults: HashMap::from([
+                        (DefaultAlgorithm::PDPS, AlgorithmConfig::PDPS(pdps_2d()))
+                    ]),
+                }})
+            },
+            Experiment1D_L1 => {
+                let base_spread = Gaussian { variance : Variance1 };
+                let spread_cutoff = BallIndicator { r : CutOff1, exponent : Linfinity };
+                Box::new(Named { name, data : Experiment {
+                    domain : [[0.0, 1.0]].into(),
+                    sensor_count : [N_SENSORS_1D],
+                    α : cli.alpha.unwrap_or(0.1),
+                    noise_distr : SaltAndPepper::new(
+                        cli.salt_and_pepper.as_ref().map_or(0.6, |v| v[0]),
+                        cli.salt_and_pepper.as_ref().map_or(0.4, |v| v[1])
+                    )?,
+                    dataterm : DataTerm::L1,
+                    μ_hat : MU_TRUE_1D_BASIC.into(),
+                    sensor : BallIndicator { r : SensorWidth1D, exponent : Linfinity },
+                    spread : Prod(spread_cutoff, base_spread),
+                    kernel : Prod(AutoConvolution(spread_cutoff), base_spread),
+                    kernel_plot_width,
+                    noise_seed,
+                    algorithm_defaults: HashMap::new(),
+                }})
+            },
+            Experiment1D_L1_Fast => {
+                let base_spread = HatConv { radius : Hat1 };
+                Box::new(Named { name, data : Experiment {
+                    domain : [[0.0, 1.0]].into(),
+                    sensor_count : [N_SENSORS_1D],
+                    α : cli.alpha.unwrap_or(0.12),
+                    noise_distr : SaltAndPepper::new(
+                        cli.salt_and_pepper.as_ref().map_or(0.6, |v| v[0]),
+                        cli.salt_and_pepper.as_ref().map_or(0.4, |v| v[1])
+                    )?,
+                    dataterm : DataTerm::L1,
+                    μ_hat : MU_TRUE_1D_BASIC.into(),
+                    sensor : BallIndicator { r : SensorWidth1D, exponent : Linfinity },
+                    spread : base_spread,
+                    kernel : base_spread,
+                    kernel_plot_width,
+                    noise_seed,
+                    algorithm_defaults: HashMap::new(),
+                }})
+            },
+            Experiment2D_L1 => {
+                let base_spread = Gaussian { variance : Variance1 };
+                let spread_cutoff = BallIndicator { r : CutOff1, exponent : Linfinity };
+                Box::new(Named { name, data : Experiment {
+                    domain : [[0.0, 1.0]; 2].into(),
+                    sensor_count : [N_SENSORS_2D; 2],
+                    α : cli.alpha.unwrap_or(0.35),
+                    noise_distr : SaltAndPepper::new(
+                        cli.salt_and_pepper.as_ref().map_or(0.8, |v| v[0]),
+                        cli.salt_and_pepper.as_ref().map_or(0.2, |v| v[1])
+                    )?,
+                    dataterm : DataTerm::L1,
+                    μ_hat : MU_TRUE_2D_BASIC.into(),
+                    sensor : BallIndicator { r : SensorWidth2D, exponent : Linfinity },
+                    spread : Prod(spread_cutoff, base_spread),
+                    kernel : Prod(AutoConvolution(spread_cutoff), base_spread),
+                    kernel_plot_width,
+                    noise_seed,
+                    algorithm_defaults: HashMap::from([
+                        (DefaultAlgorithm::PDPS, AlgorithmConfig::PDPS(pdps_2d()))
+                    ]),
+                }})
+            },
+            Experiment2D_L1_Fast => {
+                let base_spread = HatConv { radius : Hat1 };
+                Box::new(Named { name, data : Experiment {
+                    domain : [[0.0, 1.0]; 2].into(),
+                    sensor_count : [N_SENSORS_2D; 2],
+                    α : cli.alpha.unwrap_or(0.40),
+                    noise_distr : SaltAndPepper::new(
+                        cli.salt_and_pepper.as_ref().map_or(0.8, |v| v[0]),
+                        cli.salt_and_pepper.as_ref().map_or(0.2, |v| v[1])
+                    )?,
+                    dataterm : DataTerm::L1,
+                    μ_hat : MU_TRUE_2D_BASIC.into(),
+                    sensor : BallIndicator { r : SensorWidth2D, exponent : Linfinity },
+                    spread : base_spread,
+                    kernel : base_spread,
+                    kernel_plot_width,
+                    noise_seed,
+                    algorithm_defaults: HashMap::from([
+                        (DefaultAlgorithm::PDPS, AlgorithmConfig::PDPS(pdps_2d()))
+                    ]),
+                }})
+            },
+        })
+    }
+}
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/fb.rs	Thu Dec 01 23:07:35 2022 +0200
@@ -0,0 +1,860 @@
+/*!
+Solver for the point source localisation problem using a forward-backward splitting method.
+
+This corresponds to the manuscript
+
+ * Valkonen T. - _Proximal methods for point source localisation_. ARXIV TO INSERT.
+
+The main routine is [`pointsource_fb`]. It is based on [`generic_pointsource_fb`], which is also
+used by our [primal-dual proximal splitting][crate::pdps] implementation.
+
+FISTA-type inertia can also be enabled through [`FBConfig::meta`].
+
+## Problem
+
+<p>
+Our objective is to solve
+$$
+    \min_{μ ∈ ℳ(Ω)}~ F_0(Aμ-b) + α \|μ\|_{ℳ(Ω)} + δ_{≥ 0}(μ),
+$$
+where $F_0(y)=\frac{1}{2}\|y\|_2^2$ and the forward operator $A \in 𝕃(ℳ(Ω); ℝ^n)$.
+</p>
+
+## Approach
+
+<p>
+As documented in more detail in the paper, on each step we approximately solve
+$$
+    \min_{μ ∈ ℳ(Ω)}~ F(x) + α \|μ\|_{ℳ(Ω)} + δ_{≥ 0}(x) + \frac{1}{2}\|μ-μ^k|_𝒟^2,
+$$
+where $𝒟: 𝕃(ℳ(Ω); C_c(Ω))$ is typically a convolution operator.
+</p>
+
+## Finite-dimensional subproblems.
+
+With $C$ a projection from [`DiscreteMeasure`] to the weights, and $x^k$ such that $x^k=Cμ^k$, we
+form the discretised linearised inner problem
+<p>
+$$
+    \min_{x ∈ ℝ^n}~ τ\bigl(F(Cx^k) + [C^*∇F(Cx^k)]^⊤(x-x^k) + α {\vec 1}^⊤ x\bigr)
+                    + δ_{≥ 0}(x) + \frac{1}{2}\|x-x^k\|_{C^*𝒟C}^2,
+$$
+equivalently
+$$
+    \begin{aligned}
+    \min_x~ & τF(Cx^k) - τ[C^*∇F(Cx^k)]^⊤x^k + \frac{1}{2} (x^k)^⊤ C^*𝒟C x^k
+            \\
+            &
+            - [C^*𝒟C x^k - τC^*∇F(Cx^k)]^⊤ x
+            \\
+            &
+            + \frac{1}{2} x^⊤ C^*𝒟C x
+            + τα {\vec 1}^⊤ x + δ_{≥ 0}(x),
+    \end{aligned}
+$$
+In other words, we obtain the quadratic non-negativity constrained problem
+$$
+    \min_{x ∈ ℝ^n}~ \frac{1}{2} x^⊤ Ã x - b̃^⊤ x + c + τα {\vec 1}^⊤ x + δ_{≥ 0}(x).
+$$
+where
+$$
+   \begin{aligned}
+    Ã & = C^*𝒟C,
+    \\
+    g̃ & = C^*𝒟C x^k - τ C^*∇F(Cx^k)
+        = C^* 𝒟 μ^k - τ C^*A^*(Aμ^k - b)
+    \\
+    c & = τ F(Cx^k) - τ[C^*∇F(Cx^k)]^⊤x^k + \frac{1}{2} (x^k)^⊤ C^*𝒟C x^k
+        \\
+        &
+        = \frac{τ}{2} \|Aμ^k-b\|^2 - τ[Aμ^k-b]^⊤Aμ^k + \frac{1}{2} \|μ_k\|_{𝒟}^2
+        \\
+        &
+        = -\frac{τ}{2} \|Aμ^k-b\|^2 + τ[Aμ^k-b]^⊤ b + \frac{1}{2} \|μ_k\|_{𝒟}^2.
+   \end{aligned}
+$$
+</p>
+
+We solve this with either SSN or FB via [`quadratic_nonneg`] as determined by
+[`InnerSettings`] in [`FBGenericConfig::inner`].
+*/
+
+use numeric_literals::replace_float_literals;
+use std::cmp::Ordering::*;
+use serde::{Serialize, Deserialize};
+use colored::Colorize;
+use nalgebra::DVector;
+
+use alg_tools::iterate::{
+    AlgIteratorFactory,
+    AlgIteratorState,
+};
+use alg_tools::euclidean::Euclidean;
+use alg_tools::norms::Norm;
+use alg_tools::linops::Apply;
+use alg_tools::sets::Cube;
+use alg_tools::loc::Loc;
+use alg_tools::bisection_tree::{
+    BTFN,
+    PreBTFN,
+    Bounds,
+    BTNodeLookup,
+    BTNode,
+    BTSearch,
+    P2Minimise,
+    SupportGenerator,
+    LocalAnalysis,
+    Bounded,
+};
+use alg_tools::mapping::RealMapping;
+use alg_tools::nalgebra_support::ToNalgebraRealField;
+
+use crate::types::*;
+use crate::measures::{
+    DiscreteMeasure,
+    DeltaMeasure,
+    Radon
+};
+use crate::measures::merging::{
+    SpikeMergingMethod,
+    SpikeMerging,
+};
+use crate::forward_model::ForwardModel;
+use crate::seminorms::{
+    DiscreteMeasureOp, Lipschitz
+};
+use crate::subproblem::{
+    quadratic_nonneg,
+    InnerSettings,
+    InnerMethod,
+};
+use crate::tolerance::Tolerance;
+use crate::plot::{
+    SeqPlotter,
+    Plotting,
+    PlotLookup
+};
+
+/// Method for constructing $μ$ on each iteration
+#[derive(Clone, Copy, Eq, PartialEq, Serialize, Deserialize, Debug)]
+#[allow(dead_code)]
+pub enum InsertionStyle {
+    /// Resuse previous $μ$ from previous iteration, optimising weights
+    /// before inserting new spikes.
+    Reuse,
+    /// Start each iteration with $μ=0$.
+    Zero,
+}
+
+/// Meta-algorithm type
+#[derive(Clone, Copy, Eq, PartialEq, Serialize, Deserialize, Debug)]
+#[allow(dead_code)]
+pub enum FBMetaAlgorithm {
+    /// No meta-algorithm
+    None,
+    /// FISTA-style inertia
+    InertiaFISTA,
+}
+
+/// Ergodic tolerance application style
+#[derive(Clone, Copy, Eq, PartialEq, Serialize, Deserialize, Debug)]
+#[allow(dead_code)]
+pub enum ErgodicTolerance<F> {
+    /// Non-ergodic iteration-wise tolerance
+    NonErgodic,
+    /// Bound after `n`th iteration to `factor` times value on that iteration.
+    AfterNth{ n : usize, factor : F },
+}
+
+/// Settings for [`pointsource_fb`].
+#[derive(Clone, Copy, Eq, PartialEq, Serialize, Deserialize, Debug)]
+#[serde(default)]
+pub struct FBConfig<F : Float> {
+    /// Step length scaling
+    pub τ0 : F,
+    /// Meta-algorithm to apply
+    pub meta : FBMetaAlgorithm,
+    /// Generic parameters
+    pub insertion : FBGenericConfig<F>,
+}
+
+/// Settings for the solution of the stepwise optimality condition in algorithms based on
+/// [`generic_pointsource_fb`].
+#[derive(Clone, Copy, Eq, PartialEq, Serialize, Deserialize, Debug)]
+#[serde(default)]
+pub struct FBGenericConfig<F : Float> {
+    /// Method for constructing $μ$ on each iteration; see [`InsertionStyle`].
+    pub insertion_style : InsertionStyle,
+    /// Tolerance for point insertion.
+    pub tolerance : Tolerance<F>,
+    /// Stop looking for predual maximum (where to isert a new point) below
+    /// `tolerance` multiplied by this factor.
+    pub insertion_cutoff_factor : F,
+    /// Apply tolerance ergodically
+    pub ergodic_tolerance : ErgodicTolerance<F>,
+    /// Settings for branch and bound refinement when looking for predual maxima
+    pub refinement : RefinementSettings<F>,
+    /// Maximum insertions within each outer iteration
+    pub max_insertions : usize,
+    /// Pair `(n, m)` for maximum insertions `m` on first `n` iterations.
+    pub bootstrap_insertions : Option<(usize, usize)>,
+    /// Inner method settings
+    pub inner : InnerSettings<F>,
+    /// Spike merging method
+    pub merging : SpikeMergingMethod<F>,
+    /// Tolerance multiplier for merges
+    pub merge_tolerance_mult : F,
+    /// Spike merging method after the last step
+    pub final_merging : SpikeMergingMethod<F>,
+    /// Iterations between merging heuristic tries
+    pub merge_every : usize,
+    /// Save $μ$ for postprocessing optimisation
+    pub postprocessing : bool
+}
+
+#[replace_float_literals(F::cast_from(literal))]
+impl<F : Float> Default for FBConfig<F> {
+    fn default() -> Self {
+        FBConfig {
+            τ0 : 0.99,
+            meta : FBMetaAlgorithm::None,
+            insertion : Default::default()
+        }
+    }
+}
+
+#[replace_float_literals(F::cast_from(literal))]
+impl<F : Float> Default for FBGenericConfig<F> {
+    fn default() -> Self {
+        FBGenericConfig {
+            insertion_style : InsertionStyle::Reuse,
+            tolerance : Default::default(),
+            insertion_cutoff_factor : 1.0,
+            ergodic_tolerance : ErgodicTolerance::NonErgodic,
+            refinement : Default::default(),
+            max_insertions : 100,
+            //bootstrap_insertions : None,
+            bootstrap_insertions : Some((10, 1)),
+            inner : InnerSettings {
+                method : InnerMethod::SSN,
+                .. Default::default()
+            },
+            merging : SpikeMergingMethod::None,
+            //merging : Default::default(),
+            final_merging : Default::default(),
+            merge_every : 10,
+            merge_tolerance_mult : 2.0,
+            postprocessing : false,
+        }
+    }
+}
+
+/// Trait for specialisation of [`generic_pointsource_fb`] to basic FB, FISTA.
+///
+/// The idea is that the residual $Aμ - b$ in the forward step can be replaced by an arbitrary
+/// value. For example, to implement [primal-dual proximal splitting][crate::pdps] we replace it
+/// with the dual variable $y$. We can then also implement alternative data terms, as the
+/// (pre)differential of $F(μ)=F\_0(Aμ-b)$ is $F\'(μ) = A\_*F\_0\'(Aμ-b)$. In the case of the
+/// quadratic fidelity $F_0(y)=\frac{1}{2}\\|y\\|_2^2$ in a Hilbert space, of course,
+/// $F\_0\'(Aμ-b)=Aμ-b$ is the residual.
+pub trait FBSpecialisation<F : Float, Observable : Euclidean<F>, const N : usize> : Sized {
+    /// Updates the residual and does any necessary pruning of `μ`.
+    ///
+    /// Returns the new residual and possibly a new step length.
+    ///
+    /// The measure `μ` may also be modified to apply, e.g., inertia to it.
+    /// The updated residual should correspond to the residual at `μ`.
+    /// See the [trait documentation][FBSpecialisation] for the use and meaning of the residual.
+    ///
+    /// The parameter `μ_base` is the base point of the iteration, typically the previous iterate,
+    /// but for, e.g., FISTA has inertia applied to it.
+    fn update(
+        &mut self,
+        μ : &mut DiscreteMeasure<Loc<F, N>, F>,
+        μ_base : &DiscreteMeasure<Loc<F, N>, F>,
+    ) -> (Observable, Option<F>);
+
+    /// Calculates the data term value corresponding to iterate `μ` and available residual.
+    ///
+    /// Inertia and other modifications, as deemed, necessary, should be applied to `μ`.
+    ///
+    /// The blanket implementation correspondsn to the 2-norm-squared data fidelity
+    /// $\\|\text{residual}\\|\_2^2/2$.
+    fn calculate_fit(
+        &self,
+        _μ : &DiscreteMeasure<Loc<F, N>, F>,
+        residual : &Observable
+    ) -> F {
+        residual.norm2_squared_div2()
+    }
+
+    /// Calculates the data term value at $μ$.
+    ///
+    /// Unlike [`Self::calculate_fit`], no inertia, etc., should be applied to `μ`.
+    fn calculate_fit_simple(
+        &self,
+        μ : &DiscreteMeasure<Loc<F, N>, F>,
+    ) -> F;
+
+    /// Returns the final iterate after any necessary postprocess pruning, merging, etc.
+    fn postprocess(self, mut μ : DiscreteMeasure<Loc<F, N>, F>, merging : SpikeMergingMethod<F>)
+    -> DiscreteMeasure<Loc<F, N>, F>
+    where  DiscreteMeasure<Loc<F, N>, F> : SpikeMerging<F> {
+        μ.merge_spikes_fitness(merging,
+                               |μ̃| self.calculate_fit_simple(μ̃),
+                               |&v| v);
+        μ.prune();
+        μ
+    }
+
+    /// Returns measure to be used for value calculations, which may differ from μ.
+    fn value_μ<'c, 'b : 'c>(&'b self, μ : &'c DiscreteMeasure<Loc<F, N>, F>)
+    -> &'c DiscreteMeasure<Loc<F, N>, F> {
+        μ
+    }
+}
+
+/// Specialisation of [`generic_pointsource_fb`] to basic μFB.
+struct BasicFB<
+    'a,
+    F : Float + ToNalgebraRealField,
+    A : ForwardModel<Loc<F, N>, F>,
+    const N : usize
+> {
+    /// The data
+    b : &'a A::Observable,
+    /// The forward operator
+    opA : &'a A,
+}
+
+/// Implementation of [`FBSpecialisation`] for basic μFB forward-backward splitting.
+#[replace_float_literals(F::cast_from(literal))]
+impl<'a, F : Float + ToNalgebraRealField , A : ForwardModel<Loc<F, N>, F>, const N : usize>
+FBSpecialisation<F, A::Observable, N> for BasicFB<'a, F, A, N> {
+    fn update(
+        &mut self,
+        μ : &mut DiscreteMeasure<Loc<F, N>, F>,
+        _μ_base : &DiscreteMeasure<Loc<F, N>, F>
+    ) -> (A::Observable, Option<F>) {
+        μ.prune();
+        //*residual = self.opA.apply(μ) - self.b;
+        let mut residual = self.b.clone();
+        self.opA.gemv(&mut residual, 1.0, μ, -1.0);
+        (residual, None)
+    }
+
+    fn calculate_fit_simple(
+        &self,
+        μ : &DiscreteMeasure<Loc<F, N>, F>,
+    ) -> F {
+        let mut residual = self.b.clone();
+        self.opA.gemv(&mut residual, 1.0, μ, -1.0);
+        residual.norm2_squared_div2()
+    }
+}
+
+/// Specialisation of [`generic_pointsource_fb`] to FISTA.
+struct FISTA<
+    'a,
+    F : Float + ToNalgebraRealField,
+    A : ForwardModel<Loc<F, N>, F>,
+    const N : usize
+> {
+    /// The data
+    b : &'a A::Observable,
+    /// The forward operator
+    opA : &'a A,
+    /// Current inertial parameter
+    λ : F,
+    /// Previous iterate without inertia applied.
+    /// We need to store this here because `μ_base` passed to [`FBSpecialisation::update`] will
+    /// have inertia applied to it, so is not useful to use.
+    μ_prev : DiscreteMeasure<Loc<F, N>, F>,
+}
+
+/// Implementation of [`FBSpecialisation`] for μFISTA inertial forward-backward splitting.
+#[replace_float_literals(F::cast_from(literal))]
+impl<'a, F : Float + ToNalgebraRealField, A : ForwardModel<Loc<F, N>, F>, const N : usize>
+FBSpecialisation<F, A::Observable, N> for FISTA<'a, F, A, N> {
+    fn update(
+        &mut self,
+        μ : &mut DiscreteMeasure<Loc<F, N>, F>,
+        _μ_base : &DiscreteMeasure<Loc<F, N>, F>
+    ) -> (A::Observable, Option<F>) {
+        // Update inertial parameters
+        let λ_prev = self.λ;
+        self.λ = 2.0 * λ_prev / ( λ_prev + (4.0 + λ_prev * λ_prev).sqrt() );
+        let θ = self.λ / λ_prev - self.λ;
+        // Perform inertial update on μ.
+        // This computes μ ← (1 + θ) * μ - θ * μ_prev, pruning spikes where both μ
+        // and μ_prev have zero weight. Since both have weights from the finite-dimensional
+        // subproblem with a proximal projection step, this is likely to happen when the
+        // spike is not needed. A copy of the pruned μ without artithmetic performed is
+        // stored in μ_prev.
+        μ.pruning_sub(1.0 + θ, θ, &mut self.μ_prev);
+
+        //*residual = self.opA.apply(μ) - self.b;
+        let mut residual = self.b.clone();
+        self.opA.gemv(&mut residual, 1.0, μ, -1.0);
+        (residual, None)
+    }
+
+    fn calculate_fit_simple(
+        &self,
+        μ : &DiscreteMeasure<Loc<F, N>, F>,
+    ) -> F {
+        let mut residual = self.b.clone();
+        self.opA.gemv(&mut residual, 1.0, μ, -1.0);
+        residual.norm2_squared_div2()
+    }
+
+    fn calculate_fit(
+        &self,
+        _μ : &DiscreteMeasure<Loc<F, N>, F>,
+        _residual : &A::Observable
+    ) -> F {
+        self.calculate_fit_simple(&self.μ_prev)
+    }
+
+    // For FISTA we need to do a final pruning as well, due to the limited
+    // pruning that can be done on each step.
+    fn postprocess(mut self, μ_base : DiscreteMeasure<Loc<F, N>, F>, merging : SpikeMergingMethod<F>)
+    -> DiscreteMeasure<Loc<F, N>, F>
+    where  DiscreteMeasure<Loc<F, N>, F> : SpikeMerging<F> {
+        let mut μ = self.μ_prev;
+        self.μ_prev = μ_base;
+        μ.merge_spikes_fitness(merging,
+                               |μ̃| self.calculate_fit_simple(μ̃),
+                               |&v| v);
+        μ.prune();
+        μ
+    }
+
+    fn value_μ<'c, 'b : 'c>(&'c self, _μ : &'c DiscreteMeasure<Loc<F, N>, F>)
+    -> &'c DiscreteMeasure<Loc<F, N>, F> {
+        &self.μ_prev
+    }
+}
+
+/// Iteratively solve the pointsource localisation problem using forward-backward splitting
+///
+/// The settings in `config` have their [respective documentation](FBConfig). `opA` is the
+/// forward operator $A$, $b$ the observable, and $\lambda$ the regularisation weight.
+/// The operator `op𝒟` is used for forming the proximal term. Typically it is a convolution
+/// operator. Finally, the `iterator` is an outer loop verbosity and iteration count control
+/// as documented in [`alg_tools::iterate`].
+///
+/// For details on the mathematical formulation, see the [module level](self) documentation.
+///
+/// Returns the final iterate.
+#[replace_float_literals(F::cast_from(literal))]
+pub fn pointsource_fb<'a, F, I, A, GA, 𝒟, BTA, G𝒟, S, K, const N : usize>(
+    opA : &'a A,
+    b : &A::Observable,
+    α : F,
+    op𝒟 : &'a 𝒟,
+    config : &FBConfig<F>,
+    iterator : I,
+    plotter : SeqPlotter<F, N>
+) -> DiscreteMeasure<Loc<F, N>, F>
+where F : Float + ToNalgebraRealField,
+      I : AlgIteratorFactory<IterInfo<F, N>>,
+      for<'b> &'b A::Observable : std::ops::Neg<Output=A::Observable>,
+                                  //+ std::ops::Mul<F, Output=A::Observable>,  <-- FIXME: compiler overflow
+      A::Observable : std::ops::MulAssign<F>,
+      GA : SupportGenerator<F, N, SupportType = S, Id = usize> + Clone,
+      A : ForwardModel<Loc<F, N>, F, PreadjointCodomain = BTFN<F, GA, BTA, N>>
+          + Lipschitz<𝒟, FloatType=F>,
+      BTA : BTSearch<F, N, Data=usize, Agg=Bounds<F>>,
+      G𝒟 : SupportGenerator<F, N, SupportType = K, Id = usize> + Clone,
+      𝒟 : DiscreteMeasureOp<Loc<F, N>, F, PreCodomain = PreBTFN<F, G𝒟, N>>,
+      𝒟::Codomain : RealMapping<F, N>,
+      S: RealMapping<F, N> + LocalAnalysis<F, Bounds<F>, N>,
+      K: RealMapping<F, N> + LocalAnalysis<F, Bounds<F>, N>,
+      BTNodeLookup: BTNode<F, usize, Bounds<F>, N>,
+      Cube<F, N>: P2Minimise<Loc<F, N>, F>,
+      PlotLookup : Plotting<N>,
+      DiscreteMeasure<Loc<F, N>, F> : SpikeMerging<F> {
+
+    let initial_residual = -b;
+    let τ = config.τ0/opA.lipschitz_factor(&op𝒟).unwrap();
+
+    match config.meta {
+        FBMetaAlgorithm::None => generic_pointsource_fb(
+            opA, α, op𝒟, τ, &config.insertion, iterator, plotter, initial_residual,
+            BasicFB{ b, opA }
+        ),
+        FBMetaAlgorithm::InertiaFISTA => generic_pointsource_fb(
+            opA, α, op𝒟, τ, &config.insertion, iterator, plotter, initial_residual,
+            FISTA{ b, opA, λ : 1.0, μ_prev : DiscreteMeasure::new() }
+        ),
+    }
+}
+
+/// Generic implementation of [`pointsource_fb`].
+///
+/// The method can be specialised to even primal-dual proximal splitting through the
+/// [`FBSpecialisation`] parameter `specialisation`.
+/// The settings in `config` have their [respective documentation](FBGenericConfig). `opA` is the
+/// forward operator $A$, $b$ the observable, and $\lambda$ the regularisation weight.
+/// The operator `op𝒟` is used for forming the proximal term. Typically it is a convolution
+/// operator. Finally, the `iterator` is an outer loop verbosity and iteration count control
+/// as documented in [`alg_tools::iterate`].
+///
+/// The implementation relies on [`alg_tools::bisection_tree::BTFN`] presentations of
+/// sums of simple functions usign bisection trees, and the related
+/// [`alg_tools::bisection_tree::Aggregator`]s, to efficiently search for component functions
+/// active at a specific points, and to maximise their sums. Through the implementation of the
+/// [`alg_tools::bisection_tree::BT`] bisection trees, it also relies on the copy-on-write features
+/// of [`std::sync::Arc`] to only update relevant parts of the bisection tree when adding functions.
+///
+/// Returns the final iterate.
+#[replace_float_literals(F::cast_from(literal))]
+pub fn generic_pointsource_fb<'a, F, I, A, GA, 𝒟, BTA, G𝒟, S, K, Spec, const N : usize>(
+    opA : &'a A,
+    α : F,
+    op𝒟 : &'a 𝒟,
+    mut τ : F,
+    config : &FBGenericConfig<F>,
+    iterator : I,
+    mut plotter : SeqPlotter<F, N>,
+    mut residual : A::Observable,
+    mut specialisation : Spec,
+) -> DiscreteMeasure<Loc<F, N>, F>
+where F : Float + ToNalgebraRealField,
+      I : AlgIteratorFactory<IterInfo<F, N>>,
+      Spec : FBSpecialisation<F, A::Observable, N>,
+      A::Observable : std::ops::MulAssign<F>,
+      GA : SupportGenerator<F, N, SupportType = S, Id = usize> + Clone,
+      A : ForwardModel<Loc<F, N>, F, PreadjointCodomain = BTFN<F, GA, BTA, N>>
+          + Lipschitz<𝒟, FloatType=F>,
+      BTA : BTSearch<F, N, Data=usize, Agg=Bounds<F>>,
+      G𝒟 : SupportGenerator<F, N, SupportType = K, Id = usize> + Clone,
+      𝒟 : DiscreteMeasureOp<Loc<F, N>, F, PreCodomain = PreBTFN<F, G𝒟, N>>,
+      𝒟::Codomain : RealMapping<F, N>,
+      S: RealMapping<F, N> + LocalAnalysis<F, Bounds<F>, N>,
+      K: RealMapping<F, N> + LocalAnalysis<F, Bounds<F>, N>,
+      BTNodeLookup: BTNode<F, usize, Bounds<F>, N>,
+      Cube<F, N>: P2Minimise<Loc<F, N>, F>,
+      PlotLookup : Plotting<N>,
+      DiscreteMeasure<Loc<F, N>, F> : SpikeMerging<F> {
+
+    // Set up parameters
+    let quiet = iterator.is_quiet();
+    let op𝒟norm = op𝒟.opnorm_bound();
+    // We multiply tolerance by τ for FB since
+    // our subproblems depending on tolerances are scaled by τ compared to the conditional
+    // gradient approach.
+    let mut tolerance = config.tolerance * τ * α;
+    let mut ε = tolerance.initial();
+
+    // Initialise operators
+    let preadjA = opA.preadjoint();
+
+    // Initialise iterates
+    let mut μ = DiscreteMeasure::new();
+
+    let mut after_nth_bound = F::INFINITY;
+    // FIXME: Don't allocate if not needed.
+    let mut after_nth_accum = opA.zero_observable();
+
+    let mut inner_iters = 0;
+    let mut this_iters = 0;
+    let mut pruned = 0;
+    let mut merged = 0;
+
+    let μ_diff = |μ_new : &DiscreteMeasure<Loc<F, N>, F>,
+                  μ_base : &DiscreteMeasure<Loc<F, N>, F>| {
+        let mut ν : DiscreteMeasure<Loc<F, N>, F> = match config.insertion_style {
+            InsertionStyle::Reuse => {
+                μ_new.iter_spikes()
+                        .zip(μ_base.iter_masses().chain(std::iter::repeat(0.0)))
+                        .map(|(δ, α_base)| (δ.x, α_base - δ.α))
+                        .collect()
+            },
+            InsertionStyle::Zero => {
+                μ_new.iter_spikes()
+                        .map(|δ| -δ)
+                        .chain(μ_base.iter_spikes().copied())
+                        .collect()
+            }
+        };
+        ν.prune(); // Potential small performance improvement
+        ν
+    };
+
+    // Run the algorithm
+    iterator.iterate(|state| {
+        // Calculate subproblem tolerances, and update main tolerance for next iteration
+        let τα = τ * α;
+        // if μ.len() == 0 /*state.iteration() == 1*/ {
+        //     let t = minus_τv.bounds().upper() * 0.001;
+        //     if t > 0.0 {
+        //         let (ξ, v_ξ) = minus_τv.maximise(t, config.refinement.max_steps);
+        //         if τα + ε > v_ξ && v_ξ > τα {
+        //             // The zero measure is already within bounds, so improve them
+        //             tolerance = config.tolerance * (v_ξ - τα);
+        //             ε = tolerance.initial();
+        //         }
+        //         μ += DeltaMeasure { x : ξ, α : 0.0 };
+        //     } else {
+        //         // Zero is the solution.
+        //         return Step::Terminated
+        //     }
+        // }
+        let target_bounds = Bounds(τα - ε,  τα + ε);
+        let merge_tolerance = config.merge_tolerance_mult * ε;
+        let merge_target_bounds = Bounds(τα - merge_tolerance,  τα + merge_tolerance);
+        let inner_tolerance = ε * config.inner.tolerance_mult;
+        let refinement_tolerance = ε * config.refinement.tolerance_mult;
+        let maximise_above = τα + ε * config.insertion_cutoff_factor;
+        let mut ε1 = ε;
+        let ε_prev = ε;
+        ε = tolerance.update(ε, state.iteration());
+
+        // Maximum insertion count and measure difference calculation depend on insertion style.
+        let (m, warn_insertions) = match (state.iteration(), config.bootstrap_insertions) {
+            (i, Some((l, k))) if i <= l => (k, false),
+            _ => (config.max_insertions, !quiet),
+        };
+        let max_insertions = match config.insertion_style {
+            InsertionStyle::Zero => {
+                todo!("InsertionStyle::Zero does not currently work with FISTA, so diabled.");
+                // let n = μ.len();
+                // μ = DiscreteMeasure::new();
+                // n + m
+            },
+            InsertionStyle::Reuse => m,
+        };
+
+        // Calculate smooth part of surrogate model.
+        residual *= -τ;
+        if let ErgodicTolerance::AfterNth{ .. } = config.ergodic_tolerance {
+            // Negative residual times τ expected here, as set above.
+            // TODO: is this the correct location?
+            after_nth_accum += &residual;
+        }
+        // Using `std::mem::replace` here is not ideal, and expects that `empty_observable`
+        // has no significant overhead. For some reosn Rust doesn't allow us simply moving
+        // the residual and replacing it below before the end of this closure.
+        let r = std::mem::replace(&mut residual, opA.empty_observable());
+        let minus_τv = preadjA.apply(r);     // minus_τv = -τA^*(Aμ^k-b)
+        // TODO: should avoid a second copy of μ here; μ_base already stores a copy.
+        let ω0 = op𝒟.apply(μ.clone());       // 𝒟μ^k
+        //let g = &minus_τv + ω0;            // Linear term of surrogate model
+
+        // Save current base point
+        let μ_base = μ.clone();
+            
+        // Add points to support until within error tolerance or maximum insertion count reached.
+        let mut count = 0;
+        let (within_tolerances, d) = 'insertion: loop {
+            if μ.len() > 0 {
+                // Form finite-dimensional subproblem. The subproblem references to the original μ^k
+                // from the beginning of the iteration are all contained in the immutable c and g.
+                let à = op𝒟.findim_matrix(μ.iter_locations());
+                let g̃ = DVector::from_iterator(μ.len(),
+                                               μ.iter_locations()
+                                                .map(|ζ| minus_τv.apply(ζ) + ω0.apply(ζ))
+                                                .map(F::to_nalgebra_mixed));
+                let mut x = μ.masses_dvector();
+
+                // The gradient of the forward component of the inner objective is C^*𝒟Cx - g̃.
+                // We have |C^*𝒟Cx|_2 = sup_{|z|_2 ≤ 1} ⟨z, C^*𝒟Cx⟩ = sup_{|z|_2 ≤ 1} ⟨Cz|𝒟Cx⟩
+                // ≤ sup_{|z|_2 ≤ 1} |Cz|_ℳ |𝒟Cx|_∞ ≤  sup_{|z|_2 ≤ 1} |Cz|_ℳ |𝒟| |Cx|_ℳ
+                // ≤ sup_{|z|_2 ≤ 1} |z|_1 |𝒟| |x|_1 ≤ sup_{|z|_2 ≤ 1} n |z|_2 |𝒟| |x|_2
+                // = n |𝒟| |x|_2, where n is the number of points. Therefore
+                let inner_τ = config.inner.τ0 / (op𝒟norm * F::cast_from(μ.len()));
+
+                // Solve finite-dimensional subproblem.
+                let inner_it = config.inner.iterator_options.stop_target(inner_tolerance);
+                inner_iters += quadratic_nonneg(config.inner.method, &Ã, &g̃, τ*α, &mut x,
+                                                inner_τ, inner_it);
+
+                // Update masses of μ based on solution of finite-dimensional subproblem.
+                μ.set_masses_dvector(&x);
+            }
+
+            // Form d = ω0 - τv - 𝒟μ = -𝒟(μ - μ^k) - τv for checking the proximate optimality
+            // conditions in the predual space, and finding new points for insertion, if necessary.
+            let mut d = &minus_τv + op𝒟.preapply(μ_diff(&μ, &μ_base));
+
+            // If no merging heuristic is used, let's be more conservative about spike insertion,
+            // and skip it after first round. If merging is done, being more greedy about spike
+            // insertion also seems to improve performance.
+            let may_break = if let SpikeMergingMethod::None = config.merging {
+                false
+            } else {
+                count > 0
+            };
+
+            // First do a rough check whether we are within bounds and can stop.
+            let in_bounds = match config.ergodic_tolerance {
+                ErgodicTolerance::NonErgodic => {
+                    target_bounds.superset(&d.bounds())
+                },
+                ErgodicTolerance::AfterNth{ n, factor } => {
+                    // Bound -τ∑_{k=0}^{N-1}[A_*(Aμ^k-b)+α] from above.
+                    match state.iteration().cmp(&n) {
+                        Less => true,
+                        Equal => {
+                            let iter = F::cast_from(state.iteration());
+                            let mut tmp = preadjA.apply(&after_nth_accum);
+                            let (_, v0) = tmp.maximise(refinement_tolerance,
+                                                    config.refinement.max_steps);
+                            let v = v0 - iter * τ * α;
+                            after_nth_bound = factor * v;
+                            println!("{}", format!("Set ergodic tolerance to {}", after_nth_bound));
+                            true
+                        },
+                        Greater => {
+                            // TODO: can divide after_nth_accum by N, so use basic tolerance on that.
+                            let iter = F::cast_from(state.iteration());
+                            let mut tmp = preadjA.apply(&after_nth_accum);
+                            tmp.has_upper_bound(after_nth_bound + iter * τ * α,
+                                                refinement_tolerance,
+                                                config.refinement.max_steps)
+                        }
+                    }
+                }
+            };
+
+            // If preliminary check indicates that we are in bonds, and if it otherwise matches
+            // the insertion strategy, skip insertion.
+            if may_break && in_bounds {
+                break 'insertion (true, d)
+            }
+
+            // If the rough check didn't indicate stopping, find maximising point, maintaining for
+            // the calculations in the beginning of the loop that v_ξ = (ω0-τv-𝒟μ)(ξ) = d(ξ),
+            // where 𝒟μ is now distinct from μ0 after the insertions already performed.
+            // We do not need to check lower bounds, as a solution of the finite-dimensional
+            // subproblem should always satisfy them.
+
+            // // Find the mimimum over the support of μ.
+            // let d_min_supp = d_max;μ.iter_spikes().filter_map(|&DeltaMeasure{ α, ref x }| {
+            //    (α != F::ZERO).then(|| d.value(x))
+            // }).reduce(F::min).unwrap_or(0.0);
+
+            let (ξ, v_ξ) = if false /* μ.len() == 0*/ /*count == 0 &&*/ {
+                // If μ has no spikes, just find the maximum of d. Then adjust the tolerance, if
+                // necessary, to adapt it to the problem.
+                let (ξ, v_ξ) = d.maximise(refinement_tolerance, config.refinement.max_steps);
+                //dbg!((τα, v_ξ, target_bounds.upper(), maximise_above));
+                if τα < v_ξ  && v_ξ < target_bounds.upper() {
+                    ε1 = v_ξ - τα;
+                    ε *= ε1 / ε_prev;
+                    tolerance *= ε1 / ε_prev;
+                }
+                (ξ, v_ξ)
+            } else {
+                // If μ has some spikes, only find a maximum of d if it is above a threshold
+                // defined by the refinment tolerance.
+                match d.maximise_above(maximise_above, refinement_tolerance,
+                                    config.refinement.max_steps) {
+                    None => break 'insertion (true, d),
+                    Some(res) => res,
+                }
+            };
+
+            // // Do a one final check whether we can stop already without inserting more points
+            // // because `d` actually in bounds based on a more refined estimate.
+            // if may_break && target_bounds.upper() >= v_ξ {
+            //     break (true, d)
+            // }
+
+            // Break if maximum insertion count reached
+            if count >= max_insertions {
+                let in_bounds2 = target_bounds.upper() >= v_ξ;
+                break 'insertion (in_bounds2, d)
+            }
+
+            // No point in optimising the weight here; the finite-dimensional algorithm is fast.
+            μ += DeltaMeasure { x : ξ, α : 0.0 };
+            count += 1;
+        };
+
+        if !within_tolerances && warn_insertions {
+            // Complain (but continue) if we failed to get within tolerances
+            // by inserting more points.
+            let err = format!("Maximum insertions reached without achieving \
+                                subproblem solution tolerance");
+            println!("{}", err.red());
+        }
+
+        // Merge spikes
+        if state.iteration() % config.merge_every == 0 {
+            let n_before_merge = μ.len();
+            μ.merge_spikes(config.merging, |μ_candidate| {
+                //println!("Merge attempt!");
+                let mut d = &minus_τv + op𝒟.preapply(μ_diff(&μ_candidate, &μ_base));
+
+                if merge_target_bounds.superset(&d.bounds()) {
+                    //println!("…Early Ok");
+                    return Some(())
+                }
+
+                let d_min_supp = μ_candidate.iter_spikes().filter_map(|&DeltaMeasure{ α, ref x }| {
+                    (α != 0.0).then(|| d.apply(x))
+                }).reduce(F::min);
+
+                if d_min_supp.map_or(true, |b| b >= merge_target_bounds.lower()) &&
+                d.has_upper_bound(merge_target_bounds.upper(), refinement_tolerance,
+                                    config.refinement.max_steps) {
+                    //println!("…Ok");
+                    Some(())
+                } else {
+                    //println!("…Fail");
+                    None
+                }
+            });
+            debug_assert!(μ.len() >= n_before_merge);
+            merged += μ.len() - n_before_merge;
+        }
+
+        let n_before_prune = μ.len();
+        (residual, τ) = match specialisation.update(&mut μ, &μ_base) {
+            (r, None) => (r, τ),
+            (r, Some(new_τ)) => (r, new_τ)
+        };
+        debug_assert!(μ.len() <= n_before_prune);
+        pruned += n_before_prune - μ.len();
+
+        this_iters += 1;
+
+        // Give function value if needed
+        state.if_verbose(|| {
+            let value_μ = specialisation.value_μ(&μ);
+            // Plot if so requested
+            plotter.plot_spikes(
+                format!("iter {} end; {}", state.iteration(), within_tolerances), &d,
+                "start".to_string(), Some(&minus_τv),
+                Some(target_bounds), value_μ,
+            );
+            // Calculate mean inner iterations and reset relevant counters
+            // Return the statistics
+            let res = IterInfo {
+                value : specialisation.calculate_fit(&μ, &residual) + α * value_μ.norm(Radon),
+                n_spikes : value_μ.len(),
+                inner_iters,
+                this_iters,
+                merged,
+                pruned,
+                ε : ε_prev,
+                maybe_ε1 : Some(ε1),
+                postprocessing: config.postprocessing.then(|| value_μ.clone()),
+            };
+            inner_iters = 0;
+            this_iters = 0;
+            merged = 0;
+            pruned = 0;
+            res
+        })
+    });
+
+    specialisation.postprocess(μ, config.final_merging)
+}
+
+
+
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/forward_model.rs	Thu Dec 01 23:07:35 2022 +0200
@@ -0,0 +1,678 @@
+/*!
+Forward models from discrete measures to observations.
+*/
+
+use numeric_literals::replace_float_literals;
+use nalgebra::base::{
+    DMatrix,
+    DVector
+};
+use std::iter::Zip;
+use std::ops::RangeFrom;
+use std::marker::PhantomData;
+
+pub use alg_tools::linops::*;
+use alg_tools::euclidean::Euclidean;
+use alg_tools::norms::{
+    L1, Linfinity, Norm
+};
+use alg_tools::bisection_tree::*;
+use alg_tools::mapping::RealMapping;
+use alg_tools::lingrid::*;
+use alg_tools::iter::{MapX, Mappable};
+use alg_tools::nalgebra_support::ToNalgebraRealField;
+use alg_tools::tabledump::write_csv;
+use alg_tools::error::DynError;
+
+use crate::types::*;
+use crate::measures::*;
+use crate::seminorms::{
+    Lipschitz,
+    ConvolutionOp,
+    SimpleConvolutionKernel,
+};
+use crate::kernels::{
+    Convolution,
+    AutoConvolution,
+    BoundedBy,
+};
+
+pub type RNDM<F, const N : usize> = DiscreteMeasure<Loc<F,N>, F>;
+
+/// `ForwardeModel`s are bounded preadjointable linear operators  $A ∈ 𝕃(𝒵(Ω); E)$
+/// where $𝒵(Ω) ⊂ ℳ(Ω)$ is the space of sums of delta measures, presented by
+/// [`DiscreteMeasure`], and $E$ is a [`Euclidean`] space.
+pub trait ForwardModel<Domain, F : Float + ToNalgebraRealField>
+: BoundedLinear<DiscreteMeasure<Domain, F>, Codomain=Self::Observable, FloatType=F>
++ GEMV<F, DiscreteMeasure<Domain, F>, Self::Observable>
++ Linear<DeltaMeasure<Domain, F>, Codomain=Self::Observable>
++ Preadjointable<DiscreteMeasure<Domain, F>, Self::Observable> {
+    /// The codomain or value space (of “observables”) for this operator.
+    /// It is assumed to be a [`Euclidean`] space, and therefore also (identified with)
+    /// the domain of the preadjoint.
+    type Observable : Euclidean<F, Output=Self::Observable>
+                      + AXPY<F>
+                      + Clone;
+
+    /// Return A_*A and A_* b
+    fn findim_quadratic_model(
+        &self,
+        μ : &DiscreteMeasure<Domain, F>,
+        b : &Self::Observable
+    ) -> (DMatrix<F::MixedType>, DVector<F::MixedType>);
+
+    /// Write an observable into a file.
+    fn write_observable(&self, b : &Self::Observable, prefix : String) -> DynError;
+
+    /// Returns a zero observable
+    fn zero_observable(&self) -> Self::Observable;
+
+    /// Returns an empty (uninitialised) observable.
+    ///
+    /// This is used as a placeholder for temporary [`std::mem::replace`] move operations.
+    fn empty_observable(&self) -> Self::Observable;
+}
+
+pub type ShiftedSensor<F, S, P, const N : usize> = Shift<Convolution<S, P>, F, N>;
+
+/// Trait for physical convolution models. Has blanket implementation for all cases.
+pub trait Spread<F : Float, const N : usize>
+: 'static + Clone + Support<F, N> + RealMapping<F, N> + Bounded<F> {}
+
+impl<F, T, const N : usize> Spread<F, N> for T
+where F : Float,
+      T : 'static + Clone + Support<F, N> + Bounded<F> + RealMapping<F, N> {}
+
+/// Trait for compactly supported sensors. Has blanket implementation for all cases.
+pub trait Sensor<F : Float, const N : usize> : Spread<F, N> + Norm<F, L1> + Norm<F, Linfinity> {}
+
+impl<F, T, const N : usize> Sensor<F, N> for T
+where F : Float,
+      T : Spread<F, N> + Norm<F, L1> + Norm<F, Linfinity> {}
+
+
+pub trait SensorGridBT<F, S, P, const N : usize> :
+Clone + BTImpl<F, N, Data=usize, Agg=Bounds<F>>
+where F : Float,
+      S : Sensor<F, N>,
+      P : Spread<F, N> {}
+
+impl<F, S, P, T, const N : usize>
+SensorGridBT<F, S, P, N>
+for T
+where T : Clone + BTImpl<F, N, Data=usize, Agg=Bounds<F>>,
+      F : Float,
+      S : Sensor<F, N>,
+      P : Spread<F, N> {}
+
+// We need type alias bounds to access associated types
+#[allow(type_alias_bounds)]
+type SensorGridBTFN<F, S, P, BT : SensorGridBT<F, S, P, N>, const N : usize>
+= BTFN<F, SensorGridSupportGenerator<F, S, P, N>, BT, N>;
+
+/// Sensor grid forward model
+#[derive(Clone)]
+pub struct SensorGrid<F, S, P, BT, const N : usize>
+where F : Float,
+      S : Sensor<F, N>,
+      P : Spread<F, N>,
+      Convolution<S, P> : Spread<F, N>,
+      BT : SensorGridBT<F, S, P, N>, {
+    domain : Cube<F, N>,
+    sensor_count : [usize; N],
+    sensor : S,
+    spread : P,
+    base_sensor : Convolution<S, P>,
+    bt : BT,
+}
+
+impl<F, S, P, BT, const N : usize> SensorGrid<F, S, P, BT, N>
+where F : Float,
+      BT : SensorGridBT<F, S, P, N>,
+      S : Sensor<F, N>,
+      P : Spread<F, N>,
+      Convolution<S, P> : Spread<F, N>,
+      ShiftedSensor<F, S, P, N> : LocalAnalysis<F, BT::Agg, N> {
+
+    pub fn new(
+        domain : Cube<F, N>,
+        sensor_count : [usize; N],
+        sensor : S,
+        spread : P,
+        depth : BT::Depth
+    ) -> Self {
+        let base_sensor = Convolution(sensor.clone(), spread.clone());
+        let bt = BT::new(domain, depth);
+        let mut sensorgrid = SensorGrid {
+            domain,
+            sensor_count,
+            sensor,
+            spread,
+            base_sensor,
+            bt,
+        };
+
+        for (x, id) in sensorgrid.grid().into_iter().zip(0usize..) {
+            let s = sensorgrid.shifted_sensor(x);
+            sensorgrid.bt.insert(id, &s);
+        }
+
+        sensorgrid
+    }
+
+    pub fn grid(&self) -> LinGrid<F, N> {
+        lingrid_centered(&self.domain, &self.sensor_count)
+    }
+
+    pub fn n_sensors(&self) -> usize {
+        self.sensor_count.iter().product()
+    }
+
+    #[inline]
+    fn shifted_sensor(&self, x : Loc<F, N>) -> ShiftedSensor<F, S, P, N> {
+        self.base_sensor.clone().shift(x)
+    }
+
+    #[inline]
+    fn _zero_observable(&self) -> DVector<F> {
+        DVector::zeros(self.n_sensors())
+    }
+}
+
+impl<F, S, P, BT, const N : usize> Apply<RNDM<F, N>> for SensorGrid<F, S, P, BT, N>
+where F : Float,
+      BT : SensorGridBT<F, S, P, N>,
+      S : Sensor<F, N>,
+      P : Spread<F, N>,
+      Convolution<S, P> : Spread<F, N>,
+      ShiftedSensor<F, S, P, N> : LocalAnalysis<F, BT::Agg, N> {
+
+    type Output =  DVector<F>;
+
+    #[inline]
+    fn apply(&self, μ : RNDM<F, N>) -> DVector<F> {
+        self.apply(&μ)
+    }
+}
+
+impl<'a, F, S, P, BT, const N : usize> Apply<&'a RNDM<F, N>> for SensorGrid<F, S, P, BT, N>
+where F : Float,
+      BT : SensorGridBT<F, S, P, N>,
+      S : Sensor<F, N>,
+      P : Spread<F, N>,
+      Convolution<S, P> : Spread<F, N>,
+      ShiftedSensor<F, S, P, N> : LocalAnalysis<F, BT::Agg, N> {
+
+    type Output =  DVector<F>;
+
+    fn apply(&self, μ : &'a RNDM<F, N>) ->  DVector<F> {
+        let mut res = self._zero_observable();
+        self.apply_add(&mut res, μ);
+        res
+    }
+}
+
+impl<F, S, P, BT, const N : usize> Linear<RNDM<F, N>> for SensorGrid<F, S, P, BT, N>
+where F : Float,
+      BT : SensorGridBT<F, S, P, N>,
+      S : Sensor<F, N>,
+      P : Spread<F, N>,
+      Convolution<S, P> : Spread<F, N>,
+      ShiftedSensor<F, S, P, N> : LocalAnalysis<F, BT::Agg, N> {
+    type Codomain = DVector<F>;
+}
+
+
+#[replace_float_literals(F::cast_from(literal))]
+impl<F, S, P, BT, const N : usize> GEMV<F, RNDM<F, N>, DVector<F>> for SensorGrid<F, S, P, BT, N>
+where F : Float,
+      BT : SensorGridBT<F, S, P, N>,
+      S : Sensor<F, N>,
+      P : Spread<F, N>,
+      Convolution<S, P> : Spread<F, N>,
+      ShiftedSensor<F, S, P, N> : LocalAnalysis<F, BT::Agg, N> {
+
+    fn gemv(&self, y : &mut DVector<F>, α : F, μ : &RNDM<F, N>, β : F) {
+        let grid = self.grid();
+        if β == 0.0 {
+            y.fill(0.0)
+        } else if β != 1.0 {
+            *y *= β; // Need to multiply first, as we have to be able to add to y.
+        }
+        if α == 1.0 {
+            self.apply_add(y, μ)
+        } else {
+            for δ in μ.iter_spikes() {
+                for &d in self.bt.iter_at(&δ.x) {
+                    let sensor = self.shifted_sensor(grid.entry_linear_unchecked(d));
+                    y[d] += sensor.apply(&δ.x) * (α * δ.α);
+                }
+            }
+        }
+    }
+
+    fn apply_add(&self, y : &mut DVector<F>, μ : &RNDM<F, N>) {
+        let grid = self.grid();
+        for δ in μ.iter_spikes() {
+            for &d in self.bt.iter_at(&δ.x) {
+                let sensor = self.shifted_sensor(grid.entry_linear_unchecked(d));
+                y[d] += sensor.apply(&δ.x) * δ.α;
+            }
+        }
+    }
+
+}
+
+impl<F, S, P, BT, const N : usize> Apply<DeltaMeasure<Loc<F, N>, F>>
+for SensorGrid<F, S, P, BT, N>
+where F : Float,
+      BT : SensorGridBT<F, S, P, N>,
+      S : Sensor<F, N>,
+      P : Spread<F, N>,
+      Convolution<S, P> : Spread<F, N>,
+      ShiftedSensor<F, S, P, N> : LocalAnalysis<F, BT::Agg, N> {
+
+    type Output =  DVector<F>;
+
+    #[inline]
+    fn apply(&self, δ : DeltaMeasure<Loc<F, N>, F>) -> DVector<F> {
+        self.apply(&δ)
+    }
+}
+
+impl<'a, F, S, P, BT, const N : usize> Apply<&'a DeltaMeasure<Loc<F, N>, F>>
+for SensorGrid<F, S, P, BT, N>
+where F : Float,
+      BT : SensorGridBT<F, S, P, N>,
+      S : Sensor<F, N>,
+      P : Spread<F, N>,
+      Convolution<S, P> : Spread<F, N>,
+      ShiftedSensor<F, S, P, N> : LocalAnalysis<F, BT::Agg, N> {
+
+    type Output =  DVector<F>;
+
+    fn apply(&self, δ : &DeltaMeasure<Loc<F, N>, F>) -> DVector<F> {
+        let mut res = DVector::zeros(self.n_sensors());
+        let grid = self.grid();
+        for &d in self.bt.iter_at(&δ.x) {
+            let sensor = self.shifted_sensor(grid.entry_linear_unchecked(d));
+            res[d] += sensor.apply(&δ.x) * δ.α;
+        }
+        res
+    }
+}
+
+impl<F, S, P, BT, const N : usize> Linear<DeltaMeasure<Loc<F, N>, F>> for SensorGrid<F, S, P, BT, N>
+where F : Float,
+      BT : SensorGridBT<F, S, P, N>,
+      S : Sensor<F, N>,
+      P : Spread<F, N>,
+      Convolution<S, P> : Spread<F, N>,
+      ShiftedSensor<F, S, P, N> : LocalAnalysis<F, BT::Agg, N> {
+    type Codomain = DVector<F>;
+}
+
+impl<F, S, P, BT, const N : usize> BoundedLinear<RNDM<F, N>> for SensorGrid<F, S, P, BT, N>
+where F : Float,
+      BT : SensorGridBT<F, S, P, N, Agg=Bounds<F>>,
+      S : Sensor<F, N>,
+      P : Spread<F, N>,
+      Convolution<S, P> : Spread<F, N>,
+      ShiftedSensor<F, S, P, N> : LocalAnalysis<F, BT::Agg, N> {
+    type FloatType = F;
+
+    /// An estimate on the operator norm in $𝕃(ℳ(Ω); ℝ^n)$ with $ℳ(Ω)$ equipped
+    /// with the Radon norm, and $ℝ^n$ with the Euclidean norm.
+    fn opnorm_bound(&self) -> F {
+        // With {x_i}_{i=1}^n the grid centres and φ the kernel, we have
+        // |Aμ|_2 = sup_{|z|_2 ≤ 1} ⟨z,Αμ⟩ = sup_{|z|_2 ≤ 1} ⟨A^*z|μ⟩
+        // ≤ sup_{|z|_2 ≤ 1} |A^*z|_∞ |μ|_ℳ
+        // = sup_{|z|_2 ≤ 1} |∑ φ(· - x_i)z_i|_∞ |μ|_ℳ
+        // ≤ sup_{|z|_2 ≤ 1} |φ|_∞ ∑ |z_i| |μ|_ℳ
+        // ≤ sup_{|z|_2 ≤ 1} |φ|_∞ √n |z|_2 |μ|_ℳ
+        // = |φ|_∞ √n |μ|_ℳ.
+        // Hence
+        let n = F::cast_from(self.n_sensors());
+        self.base_sensor.bounds().uniform() * n.sqrt()
+    }
+}
+
+type SensorGridPreadjoint<'a, A, F, const N : usize> = PreadjointHelper<'a, A, RNDM<F,N>>;
+
+
+impl<F, S, P, BT, const N : usize>
+Preadjointable<RNDM<F, N>, DVector<F>>
+for SensorGrid<F, S, P, BT, N>
+where F : Float,
+      BT : SensorGridBT<F, S, P, N>,
+      S : Sensor<F, N>,
+      P : Spread<F, N>,
+      Convolution<S, P> : Spread<F, N>,
+      ShiftedSensor<F, S, P, N> : LocalAnalysis<F, BT::Agg, N>,
+      Weighted<ShiftedSensor<F, S, P, N>, F> : LocalAnalysis<F, BT::Agg, N> {
+    type PreadjointCodomain = BTFN<F, SensorGridSupportGenerator<F, S, P, N>, BT, N>;
+    type Preadjoint<'a> = SensorGridPreadjoint<'a, Self, F, N> where Self : 'a;
+
+    fn preadjoint(&self) -> Self::Preadjoint<'_> {
+        PreadjointHelper::new(self)
+    }
+}
+
+#[derive(Clone,Debug)]
+pub struct SensorGridSupportGenerator<F, S, P, const N : usize>
+where F : Float,
+      S : Sensor<F, N>,
+      P : Spread<F, N> {
+    base_sensor : Convolution<S, P>,
+    grid : LinGrid<F, N>,
+    weights : DVector<F>
+}
+
+impl<F, S, P, const N : usize> SensorGridSupportGenerator<F, S, P, N>
+where F : Float,
+      S : Sensor<F, N>,
+      P : Spread<F, N>,
+      Convolution<S, P> : Spread<F, N> {
+
+    #[inline]
+    fn construct_sensor(&self, id : usize, w : F) -> Weighted<ShiftedSensor<F, S, P, N>, F> {
+        let x = self.grid.entry_linear_unchecked(id);
+        self.base_sensor.clone().shift(x).weigh(w)
+    }
+
+    #[inline]
+    fn construct_sensor_and_id<'a>(&'a self, (id, w) : (usize, &'a F))
+    -> (usize, Weighted<ShiftedSensor<F, S, P, N>, F>) {
+        (id.into(), self.construct_sensor(id, *w))
+    }
+}
+
+impl<F, S, P, const N : usize> SupportGenerator<F, N>
+for SensorGridSupportGenerator<F, S, P, N>
+where F : Float,
+      S : Sensor<F, N>,
+      P : Spread<F, N>,
+      Convolution<S, P> : Spread<F, N> {
+    type Id = usize;
+    type SupportType = Weighted<ShiftedSensor<F, S, P, N>, F>;
+    type AllDataIter<'a> = MapX<'a, Zip<RangeFrom<usize>,
+                                        std::slice::Iter<'a, F>>,
+                                Self,
+                                (Self::Id, Self::SupportType)>
+                           where Self : 'a;
+
+    #[inline]
+    fn support_for(&self, d : Self::Id) -> Self::SupportType {
+        self.construct_sensor(d, self.weights[d])
+    }
+
+    #[inline]
+    fn support_count(&self) -> usize {
+        self.weights.len()
+    }
+
+    #[inline]
+    fn all_data(&self) -> Self::AllDataIter<'_> {
+        (0..).zip(self.weights.as_slice().iter()).mapX(self, Self::construct_sensor_and_id)
+    }
+}
+
+/// Helper structure for constructing preadjoints of `S` where `S : Linear<X>`.
+/// [`Linear`] needs to be implemented for each instance, but [`Adjointable`]
+/// and [`BoundedLinear`] have blanket implementations.
+#[derive(Clone,Debug)]
+pub struct PreadjointHelper<'a, S : 'a, X> {
+    forward_op : &'a S,
+    _domain : PhantomData<X>
+}
+
+impl<'a, S : 'a, X> PreadjointHelper<'a, S, X> {
+    pub fn new(forward_op : &'a S) -> Self {
+        PreadjointHelper { forward_op, _domain: PhantomData }
+    }
+}
+
+impl<'a, X, Ypre, S> Adjointable<Ypre, X>
+for PreadjointHelper<'a, S, X>
+where Self : Linear<Ypre>,
+      S : Clone + Linear<X> {
+    type AdjointCodomain = S::Codomain;
+    type Adjoint<'b> = S where Self : 'b;
+    fn adjoint(&self) -> Self::Adjoint<'_> {
+        self.forward_op.clone()
+    }
+}
+
+impl<'a, X, Ypre, S> BoundedLinear<Ypre>
+for PreadjointHelper<'a, S, X>
+where Self : Linear<Ypre>,
+      S : 'a + Clone + BoundedLinear<X> {
+    type FloatType = S::FloatType;
+    fn opnorm_bound(&self) -> Self::FloatType {
+        self.forward_op.opnorm_bound()
+    }
+}
+
+
+impl<'a, 'b, F, S, P, BT, const N : usize> Apply<&'b DVector<F>>
+for PreadjointHelper<'a, SensorGrid<F, S, P, BT, N>, RNDM<F,N>>
+where F : Float,
+      BT : SensorGridBT<F, S, P, N>,
+      S : Sensor<F, N>,
+      P : Spread<F, N>,
+      Convolution<S, P> : Spread<F, N>,
+      ShiftedSensor<F, S, P, N> : LocalAnalysis<F, BT::Agg, N>,
+      Weighted<ShiftedSensor<F, S, P, N>, F> : LocalAnalysis<F, BT::Agg, N> {
+
+    type Output = SensorGridBTFN<F, S, P, BT, N>;
+
+    fn apply(&self, x : &'b DVector<F>) -> Self::Output {
+        self.apply(x.clone())
+    }
+}
+
+impl<'a, F, S, P, BT, const N : usize> Apply<DVector<F>>
+for PreadjointHelper<'a, SensorGrid<F, S, P, BT, N>, RNDM<F,N>>
+where F : Float,
+      BT : SensorGridBT<F, S, P, N>,
+      S : Sensor<F, N>,
+      P : Spread<F, N>,
+      Convolution<S, P> : Spread<F, N>,
+      ShiftedSensor<F, S, P, N> : LocalAnalysis<F, BT::Agg, N>,
+      Weighted<ShiftedSensor<F, S, P, N>, F> : LocalAnalysis<F, BT::Agg, N> {
+
+    type Output = SensorGridBTFN<F, S, P, BT, N>;
+
+    fn apply(&self, x : DVector<F>) -> Self::Output {
+        let fwd = &self.forward_op;
+        let generator = SensorGridSupportGenerator{
+            base_sensor : fwd.base_sensor.clone(),
+            grid : fwd.grid(),
+            weights : x
+        };
+        BTFN::new_refresh(&fwd.bt, generator)
+    }
+}
+
+impl<'a, F, S, P, BT, const N : usize> Linear<DVector<F>>
+for PreadjointHelper<'a, SensorGrid<F, S, P, BT, N>, RNDM<F,N>>
+where F : Float,
+      BT : SensorGridBT<F, S, P, N>,
+      S : Sensor<F, N>,
+      P : Spread<F, N>,
+      Convolution<S, P> : Spread<F, N>,
+      ShiftedSensor<F, S, P, N> : LocalAnalysis<F, BT::Agg, N>,
+      Weighted<ShiftedSensor<F, S, P, N>, F> : LocalAnalysis<F, BT::Agg, N> {
+
+    type Codomain = SensorGridBTFN<F, S, P, BT, N>;
+}
+
+impl<F, S, P, BT, const N : usize> ForwardModel<Loc<F, N>, F>
+for SensorGrid<F, S, P, BT, N>
+where F : Float + ToNalgebraRealField<MixedType=F> + nalgebra::RealField,
+      BT : SensorGridBT<F, S, P, N>,
+      S : Sensor<F, N>,
+      P : Spread<F, N>,
+      Convolution<S, P> : Spread<F, N>,
+      ShiftedSensor<F, S, P, N> : LocalAnalysis<F, BT::Agg, N>,
+      Weighted<ShiftedSensor<F, S, P, N>, F> : LocalAnalysis<F, BT::Agg, N> {
+    type Observable = DVector<F>;
+
+    fn findim_quadratic_model(
+        &self,
+        μ : &DiscreteMeasure<Loc<F, N>, F>,
+        b : &Self::Observable
+    ) -> (DMatrix<F::MixedType>, DVector<F::MixedType>) {
+        assert_eq!(b.len(), self.n_sensors());
+        let mut mA = DMatrix::zeros(self.n_sensors(), μ.len());
+        let grid = self.grid();
+        for (mut mAcol, δ) in mA.column_iter_mut().zip(μ.iter_spikes()) {
+            for &d in self.bt.iter_at(&δ.x) {
+                let sensor = self.shifted_sensor(grid.entry_linear_unchecked(d));
+                mAcol[d] += sensor.apply(&δ.x);
+            }
+        }
+        let mAt = mA.transpose();
+        (&mAt * mA, &mAt * b)
+    }
+
+    fn write_observable(&self, b : &Self::Observable, prefix : String) -> DynError {
+        let it = self.grid().into_iter().zip(b.iter()).map(|(x, &v)| (x, v));
+        write_csv(it, prefix + ".txt")
+    }
+
+    #[inline]
+    fn zero_observable(&self) -> Self::Observable {
+        self._zero_observable()
+    }
+
+    #[inline]
+    fn empty_observable(&self) -> Self::Observable {
+        DVector::zeros(0)
+    }
+
+}
+
+/// Implements the calculation a factor $L$ such that $A_*A ≤ L 𝒟$ for $A$ the forward model
+/// and $𝒟$ a seminorm of suitable form.
+///
+/// **This assumes (but does not check) that the sensors are not overlapping.**
+#[replace_float_literals(F::cast_from(literal))]
+impl<F, BT, S, P, K, const N : usize> Lipschitz<ConvolutionOp<F, K, BT, N>>
+for SensorGrid<F, S, P, BT, N>
+where F : Float + nalgebra::RealField + ToNalgebraRealField,
+      BT : SensorGridBT<F, S, P, N>,
+      S : Sensor<F, N>,
+      P : Spread<F, N>,
+      Convolution<S, P> : Spread<F, N>,
+      K : SimpleConvolutionKernel<F, N>,
+      AutoConvolution<P> : BoundedBy<F, K> {
+
+    type FloatType = F;
+
+    fn lipschitz_factor(&self, seminorm : &ConvolutionOp<F, K, BT, N>) -> Option<F> {
+        // Sensors should not take on negative values to allow
+        // A_*A to be upper bounded by a simple convolution of `spread`.
+        if self.sensor.bounds().lower() < 0.0 {
+            return None
+        }
+
+        // Calculate the factor $L_1$ for betwee $ℱ[ψ * ψ] ≤ L_1 ℱ[ρ]$ for $ψ$ the base spread
+        // and $ρ$ the kernel of the seminorm.
+        let l1 = AutoConvolution(self.spread.clone()).bounding_factor(seminorm.kernel())?;
+
+        // Calculate the factor for transitioning from $A_*A$ to `AutoConvolution<P>`, where A
+        // consists of several `Convolution<S, P>` for the physical model `P` and the sensor `S`.
+        let l0 = self.sensor.norm(Linfinity) * self.sensor.norm(L1);
+
+        // The final transition factor is:
+        Some(l0 * l1)
+    }
+}
+
+macro_rules! make_sensorgridsupportgenerator_scalarop_rhs {
+    ($trait:ident, $fn:ident, $trait_assign:ident, $fn_assign:ident) => {
+        impl<F, S, P, const N : usize>
+        std::ops::$trait_assign<F>
+        for SensorGridSupportGenerator<F, S, P, N>
+        where F : Float,
+              S : Sensor<F, N>,
+              P : Spread<F, N>,
+              Convolution<S, P> : Spread<F, N> {
+            fn $fn_assign(&mut self, t : F) {
+                self.weights.$fn_assign(t);
+            }
+        }
+
+        impl<F, S, P, const N : usize>
+        std::ops::$trait<F>
+        for SensorGridSupportGenerator<F, S, P, N>
+        where F : Float,
+              S : Sensor<F, N>,
+              P : Spread<F, N>,
+              Convolution<S, P> : Spread<F, N> {
+            type Output = SensorGridSupportGenerator<F, S, P, N>;
+            fn $fn(mut self, t : F) -> Self::Output {
+                std::ops::$trait_assign::$fn_assign(&mut self.weights, t);
+                self
+            }
+        }
+
+        impl<'a, F, S, P, const N : usize>
+        std::ops::$trait<F>
+        for &'a SensorGridSupportGenerator<F, S, P, N>
+        where F : Float,
+              S : Sensor<F, N>,
+              P : Spread<F, N>,
+              Convolution<S, P> : Spread<F, N> {
+            type Output = SensorGridSupportGenerator<F, S, P, N>;
+            fn $fn(self, t : F) -> Self::Output {
+                SensorGridSupportGenerator{
+                    base_sensor : self.base_sensor.clone(),
+                    grid : self.grid,
+                    weights : (&self.weights).$fn(t)
+                }
+            }
+        }
+    }
+}
+
+make_sensorgridsupportgenerator_scalarop_rhs!(Mul, mul, MulAssign, mul_assign);
+make_sensorgridsupportgenerator_scalarop_rhs!(Div, div, DivAssign, div_assign);
+
+macro_rules! make_sensorgridsupportgenerator_unaryop {
+    ($trait:ident, $fn:ident) => {
+        impl<F, S, P, const N : usize>
+        std::ops::$trait
+        for SensorGridSupportGenerator<F, S, P, N>
+        where F : Float,
+              S : Sensor<F, N>,
+              P : Spread<F, N>,
+              Convolution<S, P> : Spread<F, N> {
+            type Output = SensorGridSupportGenerator<F, S, P, N>;
+            fn $fn(mut self) -> Self::Output {
+                self.weights = self.weights.$fn();
+                self
+            }
+        }
+
+        impl<'a, F, S, P, const N : usize>
+        std::ops::$trait
+        for &'a SensorGridSupportGenerator<F, S, P, N>
+        where F : Float,
+              S : Sensor<F, N>,
+              P : Spread<F, N>,
+              Convolution<S, P> : Spread<F, N> {
+            type Output = SensorGridSupportGenerator<F, S, P, N>;
+            fn $fn(self) -> Self::Output {
+                SensorGridSupportGenerator{
+                    base_sensor : self.base_sensor.clone(),
+                    grid : self.grid,
+                    weights : (&self.weights).$fn()
+                }
+            }
+        }
+    }
+}
+
+make_sensorgridsupportgenerator_unaryop!(Neg, neg);
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/fourier.rs	Thu Dec 01 23:07:35 2022 +0200
@@ -0,0 +1,32 @@
+/*!
+Fourier transform traits
+*/
+
+use alg_tools::types::{Num, Float};
+use alg_tools::mapping::{RealMapping, Mapping};
+use alg_tools::bisection_tree::Weighted;
+use alg_tools::loc::Loc;
+
+/// Trait for Fourier transforms. When F is a non-complex number, the transform
+/// also has to be non-complex, i.e., the function itself symmetric.
+pub trait Fourier<F : Num> : Mapping<Self::Domain, Codomain=F> {
+    type Domain;
+    type Transformed : Mapping<Self::Domain, Codomain=F>;
+
+    fn fourier(&self) -> Self::Transformed;
+}
+
+impl<F : Float, T, const N : usize> Fourier<F>
+for Weighted<T, F>
+where T : Fourier<F, Domain = Loc<F, N>> + RealMapping<F, N> {
+    type Domain = T::Domain;
+    type Transformed = Weighted<T::Transformed, F>;
+
+    #[inline]
+    fn fourier(&self) -> Self::Transformed {
+        Weighted {
+            base_fn : self.base_fn.fourier(),
+            weight : self.weight
+        }
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/frank_wolfe.rs	Thu Dec 01 23:07:35 2022 +0200
@@ -0,0 +1,333 @@
+/*!
+Solver for the point source localisation problem using a conditional gradient method.
+
+We implement two variants, the “fully corrective” method from
+
+  * Pieper K., Walter D. _Linear convergence of accelerated conditional gradient algorithms
+    in spaces of measures_, DOI: [10.1051/cocv/2021042](https://doi.org/10.1051/cocv/2021042),
+    arXiv: [1904.09218](https://doi.org/10.48550/arXiv.1904.09218).
+
+and what we call the “relaxed” method from
+
+  * Bredies K., Pikkarainen H. - _Inverse problems in spaces of measures_,
+    DOI: [10.1051/cocv/2011205](https://doi.org/0.1051/cocv/2011205).
+*/
+
+use numeric_literals::replace_float_literals;
+use serde::{Serialize, Deserialize};
+//use colored::Colorize;
+
+use alg_tools::iterate::{
+    AlgIteratorFactory,
+    AlgIteratorState,
+    AlgIteratorOptions,
+};
+use alg_tools::euclidean::Euclidean;
+use alg_tools::norms::Norm;
+use alg_tools::linops::Apply;
+use alg_tools::sets::Cube;
+use alg_tools::loc::Loc;
+use alg_tools::bisection_tree::{
+    BTFN,
+    Bounds,
+    BTNodeLookup,
+    BTNode,
+    BTSearch,
+    P2Minimise,
+    SupportGenerator,
+    LocalAnalysis,
+};
+use alg_tools::mapping::RealMapping;
+use alg_tools::nalgebra_support::ToNalgebraRealField;
+
+use crate::types::*;
+use crate::measures::{
+    DiscreteMeasure,
+    DeltaMeasure,
+    Radon,
+};
+use crate::measures::merging::{
+    SpikeMergingMethod,
+    SpikeMerging,
+};
+use crate::forward_model::ForwardModel;
+#[allow(unused_imports)] // Used in documentation
+use crate::subproblem::{
+    quadratic_nonneg,
+    InnerSettings,
+    InnerMethod,
+};
+use crate::tolerance::Tolerance;
+use crate::plot::{
+    SeqPlotter,
+    Plotting,
+    PlotLookup
+};
+
+/// Settings for [`pointsource_fw`].
+#[derive(Clone, Copy, Eq, PartialEq, Serialize, Deserialize, Debug)]
+#[serde(default)]
+pub struct FWConfig<F : Float> {
+    /// Tolerance for branch-and-bound new spike location discovery
+    pub tolerance : Tolerance<F>,
+    /// Inner problem solution configuration. Has to have `method` set to [`InnerMethod::FB`]
+    /// as the conditional gradient subproblems' optimality conditions do not in general have an
+    /// invertible Newton derivative for SSN.
+    pub inner : InnerSettings<F>,
+    /// Variant of the conditional gradient method
+    pub variant : FWVariant,
+    /// Settings for branch and bound refinement when looking for predual maxima
+    pub refinement : RefinementSettings<F>,
+    /// Spike merging heuristic
+    pub merging : SpikeMergingMethod<F>,
+}
+
+/// Conditional gradient method variant; see also [`FWConfig`].
+#[derive(Clone, Copy, Eq, PartialEq, Serialize, Deserialize, Debug)]
+#[allow(dead_code)]
+pub enum FWVariant {
+    /// Algorithm 2 of Walter-Pieper
+    FullyCorrective,
+    /// Bredies–Pikkarainen. Forces `FWConfig.inner.max_iter = 1`.
+    Relaxed,
+}
+
+impl<F : Float> Default for FWConfig<F> {
+    fn default() -> Self {
+        FWConfig {
+            tolerance : Default::default(),
+            refinement : Default::default(),
+            inner : Default::default(),
+            variant : FWVariant::FullyCorrective,
+            merging : Default::default(),
+        }
+    }
+}
+
+/// Helper struct for pre-initialising the finite-dimensional subproblems solver
+/// [`prepare_optimise_weights`].
+///
+/// The pre-initialisation is done by [`prepare_optimise_weights`].
+pub struct FindimData<F : Float> {
+    opAnorm_squared : F
+}
+
+/// Return a pre-initialisation struct for [`prepare_optimise_weights`].
+///
+/// The parameter `opA` is the forward operator $A$.
+pub fn prepare_optimise_weights<F, A, const N : usize>(opA : &A) -> FindimData<F>
+where F : Float + ToNalgebraRealField,
+      A : ForwardModel<Loc<F, N>, F> {
+    FindimData{
+        opAnorm_squared : opA.opnorm_bound().powi(2)
+    }
+}
+
+/// Solve the finite-dimensional weight optimisation problem for the 2-norm-squared data fidelity
+/// point source localisation problem.
+///
+/// That is, we minimise
+/// <div>$$
+///     μ ↦ \frac{1}{2}\|Aμ-b\|_w^2 + α\|μ\|_ℳ + δ_{≥ 0}(μ)
+/// $$</div>
+/// only with respect to the weights of $μ$.
+///
+/// The parameter `μ` is the discrete measure whose weights are to be optimised.
+/// The `opA` parameter is the forward operator $A$, while `b`$ and `α` are as in the
+/// objective above. The method parameter are set in `inner` (see [`InnerSettings`]), while
+/// `iterator` is used to iterate the steps of the method, and `plotter` may be used to
+/// save intermediate iteration states as images. The parameter `findim_data` should be
+/// prepared using [`prepare_optimise_weights`]:
+///
+/// Returns the number of iterations taken by the method configured in `inner`.
+pub fn optimise_weights<'a, F, A, I, const N : usize>(
+    μ : &mut DiscreteMeasure<Loc<F, N>, F>,
+    opA : &'a A,
+    b : &A::Observable,
+    α : F,
+    findim_data : &FindimData<F>,
+    inner : &InnerSettings<F>,
+    iterator : I
+) -> usize
+where F : Float + ToNalgebraRealField,
+      I : AlgIteratorFactory<F>,
+      A : ForwardModel<Loc<F, N>, F>
+{
+    // Form and solve finite-dimensional subproblem.
+    let (Ã, g̃) = opA.findim_quadratic_model(&μ, b);
+    let mut x = μ.masses_dvector();
+
+    // `inner_τ1` is based on an estimate of the operator norm of $A$ from ℳ(Ω) to
+    // ℝ^n. This estimate is a good one for the matrix norm from ℝ^m to ℝ^n when the
+    // former is equipped with the 1-norm. We need the 2-norm. To pass from 1-norm to
+    // 2-norm, we estimate
+    //      ‖A‖_{2,2} := sup_{‖x‖_2 ≤ 1} ‖Ax‖_2 ≤ sup_{‖x‖_1 ≤ C} ‖Ax‖_2
+    //                 = C sup_{‖x‖_1 ≤ 1} ‖Ax‖_2 = C ‖A‖_{1,2},
+    // where C = √m satisfies ‖x‖_1 ≤ C ‖x‖_2. Since we are intested in ‖A_*A‖, no
+    // square root is needed when we scale:
+    let inner_τ = inner.τ0 / (findim_data.opAnorm_squared * F::cast_from(μ.len()));
+    let iters = quadratic_nonneg(inner.method, &Ã, &g̃, α, &mut x, inner_τ, iterator);
+    // Update masses of μ based on solution of finite-dimensional subproblem.
+    μ.set_masses_dvector(&x);
+
+    iters
+}
+
+/// Solve point source localisation problem using a conditional gradient method
+/// for the 2-norm-squared data fidelity, i.e., the problem
+/// <div>$$
+///     \min_μ \frac{1}{2}\|Aμ-b\|_w^2 + α\|μ\|_ℳ + δ_{≥ 0}(μ).
+/// $$</div>
+///
+/// The `opA` parameter is the forward operator $A$, while `b`$ and `α` are as in the
+/// objective above. The method parameter are set in `config` (see [`FWConfig`]), while
+/// `iterator` is used to iterate the steps of the method, and `plotter` may be used to
+/// save intermediate iteration states as images.
+#[replace_float_literals(F::cast_from(literal))]
+pub fn pointsource_fw<'a, F, I, A, GA, BTA, S, const N : usize>(
+    opA : &'a A,
+    b : &A::Observable,
+    α : F,
+    //domain : Cube<F, N>,
+    config : &FWConfig<F>,
+    iterator : I,
+    mut plotter : SeqPlotter<F, N>,
+) -> DiscreteMeasure<Loc<F, N>, F>
+where F : Float + ToNalgebraRealField,
+      I : AlgIteratorFactory<IterInfo<F, N>>,
+      for<'b> &'b A::Observable : std::ops::Neg<Output=A::Observable>,
+                                  //+ std::ops::Mul<F, Output=A::Observable>,  <-- FIXME: compiler overflow
+      A::Observable : std::ops::MulAssign<F>,
+      GA : SupportGenerator<F, N, SupportType = S, Id = usize> + Clone,
+      A : ForwardModel<Loc<F, N>, F, PreadjointCodomain = BTFN<F, GA, BTA, N>>,
+      BTA : BTSearch<F, N, Data=usize, Agg=Bounds<F>>,
+      S: RealMapping<F, N> + LocalAnalysis<F, Bounds<F>, N>,
+      BTNodeLookup: BTNode<F, usize, Bounds<F>, N>,
+      Cube<F, N>: P2Minimise<Loc<F, N>, F>,
+      PlotLookup : Plotting<N>,
+      DiscreteMeasure<Loc<F, N>, F> : SpikeMerging<F> {
+
+    // Set up parameters
+    // We multiply tolerance by α for all algoritms.
+    let tolerance = config.tolerance * α;
+    let mut ε = tolerance.initial();
+    let findim_data = prepare_optimise_weights(opA);
+    let m0 = b.norm2_squared() / (2.0 * α);
+    let φ = |t| if t <= m0 { α * t } else { α / (2.0 * m0) * (t*t + m0 * m0) };
+
+    // Initialise operators
+    let preadjA = opA.preadjoint();
+
+    // Initialise iterates
+    let mut μ = DiscreteMeasure::new();
+    let mut residual = -b;
+
+    let mut inner_iters = 0;
+    let mut this_iters = 0;
+    let mut pruned = 0;
+    let mut merged = 0;
+
+    // Run the algorithm
+    iterator.iterate(|state| {
+        // Update tolerance
+        let inner_tolerance = ε * config.inner.tolerance_mult;
+        let refinement_tolerance = ε * config.refinement.tolerance_mult;
+        let ε_prev = ε;
+        ε = tolerance.update(ε, state.iteration());
+
+        // Calculate smooth part of surrogate model.
+        //
+        // Using `std::mem::replace` here is not ideal, and expects that `empty_observable`
+        // has no significant overhead. For some reosn Rust doesn't allow us simply moving
+        // the residual and replacing it below before the end of this closure.
+        let r = std::mem::replace(&mut residual, opA.empty_observable());
+        let mut g = -preadjA.apply(r);
+
+        // Find absolute value maximising point
+        let (ξmax, v_ξmax) = g.maximise(refinement_tolerance,
+                                        config.refinement.max_steps);
+        let (ξmin, v_ξmin) = g.minimise(refinement_tolerance,
+                                        config.refinement.max_steps);
+        let (ξ, v_ξ) = if v_ξmin < 0.0 && -v_ξmin > v_ξmax {
+            (ξmin, v_ξmin)
+        } else {
+            (ξmax, v_ξmax)
+        };
+
+        let inner_it = match config.variant {
+            FWVariant::FullyCorrective => {
+                // No point in optimising the weight here: the finite-dimensional algorithm is fast.
+                μ += DeltaMeasure { x : ξ, α : 0.0 };
+                config.inner.iterator_options.stop_target(inner_tolerance)
+            },
+            FWVariant::Relaxed => {
+                // Perform a relaxed initialisation of μ
+                let v = if v_ξ.abs() <= α { 0.0 } else { m0 / α * v_ξ };
+                let δ = DeltaMeasure { x : ξ, α : v };
+                let dp = μ.apply(&g) - δ.apply(&g);
+                let d = opA.apply(&μ) - opA.apply(&δ);
+                let r = d.norm2_squared();
+                let s = if r == 0.0 {
+                    1.0
+                } else {
+                    1.0.min( (α * μ.norm(Radon) - φ(v.abs()) - dp) / r)
+                };
+                μ *= 1.0 - s;
+                μ += δ * s;
+                // The stop_target is only needed for the type system.
+                AlgIteratorOptions{ max_iter : 1, .. config.inner.iterator_options}.stop_target(0.0)
+            }
+        };
+
+        inner_iters += optimise_weights(&mut μ, opA, b, α, &findim_data, &config.inner, inner_it);
+   
+        // Merge spikes and update residual for next step and `if_verbose` below.
+        let n_before_merge = μ.len();
+        residual = μ.merge_spikes_fitness(config.merging,
+                                         |μ̃| opA.apply(μ̃) - b,
+                                          A::Observable::norm2_squared);
+        assert!(μ.len() >= n_before_merge);
+        merged += μ.len() - n_before_merge;
+
+
+        // Prune points with zero mass
+        let n_before_prune = μ.len();
+        μ.prune();
+        debug_assert!(μ.len() <= n_before_prune);
+        pruned += n_before_prune - μ.len();
+
+        this_iters +=1;
+
+        // Give function value if needed
+        state.if_verbose(|| {
+            plotter.plot_spikes(
+                format!("iter {} start", state.iteration()), &g,
+                "".to_string(), None::<&A::PreadjointCodomain>,
+                None, &μ
+            );
+            let res = IterInfo {
+                value : residual.norm2_squared_div2() + α * μ.norm(Radon),
+                n_spikes : μ.len(),
+                inner_iters,
+                this_iters,
+                merged,
+                pruned,
+                ε : ε_prev,
+                maybe_ε1 : None,
+                postprocessing : None,
+            };
+            inner_iters = 0;
+            this_iters = 0;
+            merged = 0;
+            pruned = 0;
+            res
+        })
+    });
+
+    // Return final iterate
+    μ
+}
+
+
+
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/kernels.rs	Thu Dec 01 23:07:35 2022 +0200
@@ -0,0 +1,27 @@
+/*!
+Various function presentations, useful as convolution kernels.
+
+The kernels typically implement
+ * [`Mapping`][alg_tools::mapping::Mapping] for value evaluation
+ * [`Support`][alg_tools::bisection_tree::Support] for insertion into
+   [́`BT`][alg_tools::bisection_tree::BT] bisection tree
+ * [`GlobalAnalysis`][alg_tools::bisection_tree::GlobalAnalysis] on
+   [`Bounds`][alg_tools::bisection_tree::Bounds] for bounding the kernel globally.
+ * [`LocalAnalysis`][alg_tools::bisection_tree::LocalAnalysis] on
+   [`Bounds`][alg_tools::bisection_tree::Bounds] for
+   bounding the kernel locally on a [`Cube`][alg_tools::sets::Cube].
+*/
+
+mod base;
+pub use base::*;
+mod mollifier;
+pub use mollifier::*;
+mod hat;
+pub use hat::*;
+mod gaussian;
+pub use gaussian::*;
+mod ball_indicator;
+pub use ball_indicator::*;
+mod hat_convolution;
+pub use hat_convolution::*;
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/kernels/ball_indicator.rs	Thu Dec 01 23:07:35 2022 +0200
@@ -0,0 +1,260 @@
+
+//! Implementation of the indicator function of a ball with respect to various norms.
+use float_extras::f64::{tgamma as gamma};
+use numeric_literals::replace_float_literals;
+use serde::Serialize;
+use alg_tools::types::*;
+use alg_tools::norms::*;
+use alg_tools::loc::Loc;
+use alg_tools::sets::Cube;
+use alg_tools::bisection_tree::{
+    Support,
+    Constant,
+    Bounds,
+    LocalAnalysis,
+    GlobalAnalysis,
+};
+use alg_tools::mapping::Apply;
+use alg_tools::maputil::array_init;
+use alg_tools::coefficients::factorial;
+
+use super::base::*;
+
+/// Representation of the indicator of the ball $𝔹_q = \\{ x ∈ ℝ^N \mid \\|x\\|\_q ≤ r \\}$,
+/// where $q$ is the `Exponent`, and $r$ is the radius [`Constant`] `C`.
+#[derive(Copy,Clone,Serialize,Debug,Eq,PartialEq)]
+pub struct BallIndicator<C : Constant, Exponent : NormExponent, const N : usize> {
+    /// The radius of the ball.
+    pub r : C,
+    /// The exponent $q$ of the norm creating the ball
+    pub exponent : Exponent,
+}
+
+/// Alias for the representation of the indicator of the $∞$-norm-ball
+/// $𝔹_∞ = \\{ x ∈ ℝ^N \mid \\|x\\|\_∞ ≤ c \\}$.
+pub type CubeIndicator<C, const N : usize> = BallIndicator<C, Linfinity, N>;
+
+#[replace_float_literals(C::Type::cast_from(literal))]
+impl<'a, F : Float, C : Constant<Type=F>, Exponent : NormExponent, const N : usize>
+Apply<&'a Loc<C::Type, N>>
+for BallIndicator<C, Exponent, N>
+where Loc<F, N> : Norm<F, Exponent> {
+    type Output = C::Type;
+    #[inline]
+    fn apply(&self, x : &'a Loc<C::Type, N>) -> Self::Output {
+        let r = self.r.value();
+        let n = x.norm(self.exponent);
+        if n <= r {
+            1.0
+        } else {
+            0.0
+        }
+    }
+}
+
+impl<F : Float, C : Constant<Type=F>, Exponent : NormExponent, const N : usize>
+Apply<Loc<C::Type, N>>
+for BallIndicator<C, Exponent, N>
+where Loc<F, N> : Norm<F, Exponent> {
+    type Output = C::Type;
+    #[inline]
+    fn apply(&self, x : Loc<C::Type, N>) -> Self::Output {
+        self.apply(&x)
+    }
+}
+
+
+impl<'a, F : Float, C : Constant<Type=F>, Exponent : NormExponent, const N : usize>
+Support<C::Type, N>
+for BallIndicator<C, Exponent, N>
+where Loc<F, N> : Norm<F, Exponent>,
+      Linfinity : Dominated<F, Exponent, Loc<F, N>> {
+
+    #[inline]
+    fn support_hint(&self) -> Cube<F,N> {
+        let r = Linfinity.from_norm(self.r.value(), self.exponent);
+        array_init(|| [-r, r]).into()
+    }
+
+    #[inline]
+    fn in_support(&self, x : &Loc<F,N>) -> bool {
+        let r = Linfinity.from_norm(self.r.value(), self.exponent);
+        x.norm(self.exponent) <= r
+    }
+
+    /// This can only really work in a reasonable fashion for N=1.
+    #[inline]
+    fn bisection_hint(&self, cube : &Cube<F, N>) -> [Option<F>; N] {
+        let r = Linfinity.from_norm(self.r.value(), self.exponent);
+        cube.map(|a, b| symmetric_interval_hint(r, a, b))
+    }
+}
+
+#[replace_float_literals(F::cast_from(literal))]
+impl<'a, F : Float, C : Constant<Type=F>, Exponent : NormExponent, const N : usize>
+GlobalAnalysis<F, Bounds<F>>
+for BallIndicator<C, Exponent, N>
+where Loc<F, N> : Norm<F, Exponent> {
+    #[inline]
+    fn global_analysis(&self) -> Bounds<F> {
+        Bounds(0.0, 1.0)
+    }
+}
+
+#[replace_float_literals(F::cast_from(literal))]
+impl<'a, F : Float, C : Constant<Type=F>, Exponent : NormExponent, const N : usize>
+Norm<F, Linfinity>
+for BallIndicator<C, Exponent, N>
+where Loc<F, N> : Norm<F, Exponent> {
+    #[inline]
+    fn norm(&self, _ : Linfinity) -> F {
+        1.0
+    }
+}
+
+#[replace_float_literals(F::cast_from(literal))]
+impl<'a, F : Float, C : Constant<Type=F>, const N : usize>
+Norm<F, L1>
+for BallIndicator<C, L1, N> {
+    #[inline]
+    fn norm(&self, _ : L1) -> F {
+        // Using https://en.wikipedia.org/wiki/Volume_of_an_n-ball#Balls_in_Lp_norms,
+        // we have V_N^1(r) = (2r)^N / N!
+        let r = self.r.value();
+        if N==1 {
+            2.0 * r
+        } else if N==2 {
+            r*r
+        } else {
+            (2.0 * r).powi(N as i32) * F::cast_from(factorial(N))
+        }
+    }
+}
+
+#[replace_float_literals(F::cast_from(literal))]
+impl<'a, F : Float, C : Constant<Type=F>, const N : usize>
+Norm<F, L1>
+for BallIndicator<C, L2, N> {
+    #[inline]
+    fn norm(&self, _ : L1) -> F {
+        // See https://en.wikipedia.org/wiki/Volume_of_an_n-ball#The_volume.
+        let r = self.r.value();
+        let π = F::PI;
+        if N==1 {
+            2.0 * r
+        } else if N==2 {
+            π * (r * r)
+        } else {
+            let ndiv2 = F::cast_from(N) / 2.0;
+            let γ = F::cast_from(gamma((ndiv2 + 1.0).as_()));
+            π.powf(ndiv2) / γ * r.powi(N as i32)
+        }
+    }
+}
+
+#[replace_float_literals(F::cast_from(literal))]
+impl<'a, F : Float, C : Constant<Type=F>, const N : usize>
+Norm<F, L1>
+for BallIndicator<C, Linfinity, N> {
+    #[inline]
+    fn norm(&self, _ : L1) -> F {
+        let two_r = 2.0 * self.r.value();
+        two_r.powi(N as i32)
+    }
+}
+
+
+macro_rules! indicator_local_analysis {
+    ($exponent:ident) => {
+        impl<'a, F : Float, C : Constant<Type=F>, const N : usize>
+        LocalAnalysis<F, Bounds<F>, N>
+        for BallIndicator<C, $exponent, N>
+        where Loc<F, N> : Norm<F, $exponent>,
+            Linfinity : Dominated<F, $exponent, Loc<F, N>> {
+            #[inline]
+            fn local_analysis(&self, cube : &Cube<F, N>) -> Bounds<F> {
+                // The function is maximised/minimised where the 2-norm is minimised/maximised.
+                let lower = self.apply(cube.maxnorm_point());
+                let upper = self.apply(cube.minnorm_point());
+                Bounds(lower, upper)
+            }
+        }
+    }
+}
+
+indicator_local_analysis!(L1);
+indicator_local_analysis!(L2);
+indicator_local_analysis!(Linfinity);
+
+
+#[replace_float_literals(F::cast_from(literal))]
+impl<'a, F : Float, R, const N : usize> Apply<&'a Loc<F, N>>
+for AutoConvolution<CubeIndicator<R, N>>
+where R : Constant<Type=F> {
+    type Output = F;
+
+    #[inline]
+    fn apply(&self, y : &'a Loc<F, N>) -> F {
+        let two_r = 2.0 * self.0.r.value();
+        // This is just a product of one-dimensional versions
+        y.iter().map(|&x| {
+            0.0.max(two_r - x.abs())
+        }).product()
+    }
+}
+
+impl<F : Float, R, const N : usize> Apply<Loc<F, N>>
+for AutoConvolution<CubeIndicator<R, N>>
+where R : Constant<Type=F> {
+    type Output = F;
+
+    #[inline]
+    fn apply(&self, y : Loc<F, N>) -> F {
+        self.apply(&y)
+    }
+}
+
+#[replace_float_literals(F::cast_from(literal))]
+impl<F : Float, R, const N : usize> Support<F, N>
+for AutoConvolution<CubeIndicator<R, N>>
+where R : Constant<Type=F> {
+    #[inline]
+    fn support_hint(&self) -> Cube<F, N> {
+        let two_r = 2.0 * self.0.r.value();
+        array_init(|| [-two_r, two_r]).into()
+    }
+
+    #[inline]
+    fn in_support(&self, y : &Loc<F, N>) -> bool {
+        let two_r = 2.0 * self.0.r.value();
+        y.iter().all(|x| x.abs() <= two_r)
+    }
+
+    #[inline]
+    fn bisection_hint(&self, cube : &Cube<F, N>) -> [Option<F>; N] {
+        let two_r = 2.0 * self.0.r.value();
+        cube.map(|c, d| symmetric_interval_hint(two_r, c, d))
+    }
+}
+
+#[replace_float_literals(F::cast_from(literal))]
+impl<F : Float, R, const N : usize> GlobalAnalysis<F, Bounds<F>>
+for AutoConvolution<CubeIndicator<R, N>>
+where R : Constant<Type=F> {
+    #[inline]
+    fn global_analysis(&self) -> Bounds<F> {
+        Bounds(0.0, self.apply(Loc::ORIGIN))
+    }
+}
+
+impl<F : Float, R, const N : usize> LocalAnalysis<F, Bounds<F>, N>
+for AutoConvolution<CubeIndicator<R, N>>
+where R : Constant<Type=F> {
+    #[inline]
+    fn local_analysis(&self, cube : &Cube<F, N>) -> Bounds<F> {
+        // The function is maximised/minimised where the absolute value is minimised/maximised.
+        let lower = self.apply(cube.maxnorm_point());
+        let upper = self.apply(cube.minnorm_point());
+        Bounds(lower, upper)
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/kernels/base.rs	Thu Dec 01 23:07:35 2022 +0200
@@ -0,0 +1,404 @@
+
+//! Things for constructing new kernels from component kernels and traits for analysing them
+use serde::Serialize;
+use numeric_literals::replace_float_literals;
+
+use alg_tools::types::*;
+use alg_tools::norms::*;
+use alg_tools::loc::Loc;
+use alg_tools::sets::Cube;
+use alg_tools::bisection_tree::{
+    Support,
+    Bounds,
+    LocalAnalysis,
+    GlobalAnalysis,
+    Bounded,
+};
+use alg_tools::mapping::Apply;
+use alg_tools::maputil::{array_init, map2};
+use alg_tools::sets::SetOrd;
+
+use crate::fourier::Fourier;
+
+/// Representation of the product of two kernels.
+///
+/// The kernels typically implement [`Support`] and [`Mapping`][alg_tools::mapping::Mapping].
+///
+/// The implementation [`Support`] only uses the [`Support::support_hint`] of the first parameter!
+#[derive(Copy,Clone,Serialize,Debug)]
+pub struct SupportProductFirst<A, B>(
+    /// First kernel
+    pub A,
+    /// Second kernel
+    pub B
+);
+
+impl<A, B, F : Float, const N : usize> Apply<Loc<F, N>>
+for SupportProductFirst<A, B>
+where A : for<'a> Apply<&'a Loc<F, N>, Output=F>,
+      B : for<'a> Apply<&'a Loc<F, N>, Output=F> {
+    type Output = F;
+    #[inline]
+    fn apply(&self, x : Loc<F, N>) -> Self::Output {
+        self.0.apply(&x) * self.1.apply(&x)
+    }
+}
+
+impl<'a, A, B, F : Float, const N : usize> Apply<&'a Loc<F, N>>
+for SupportProductFirst<A, B>
+where A : Apply<&'a Loc<F, N>, Output=F>,
+      B : Apply<&'a Loc<F, N>, Output=F> {
+    type Output = F;
+    #[inline]
+    fn apply(&self, x : &'a Loc<F, N>) -> Self::Output {
+        self.0.apply(x) * self.1.apply(x)
+    }
+}
+
+impl<'a, A, B, F : Float, const N : usize> Support<F, N>
+for SupportProductFirst<A, B>
+where A : Support<F, N>,
+      B : Support<F, N> {
+    #[inline]
+    fn support_hint(&self) -> Cube<F, N> {
+        self.0.support_hint()
+    }
+
+    #[inline]
+    fn in_support(&self, x : &Loc<F, N>) -> bool {
+        self.0.in_support(x)
+    }
+
+    #[inline]
+    fn bisection_hint(&self, cube : &Cube<F, N>) -> [Option<F>; N] {
+        self.0.bisection_hint(cube)
+    }
+}
+
+impl<'a, A, B, F : Float> GlobalAnalysis<F, Bounds<F>>
+for SupportProductFirst<A, B>
+where A : GlobalAnalysis<F, Bounds<F>>,
+      B : GlobalAnalysis<F, Bounds<F>> {
+    #[inline]
+    fn global_analysis(&self) -> Bounds<F> {
+        self.0.global_analysis() * self.1.global_analysis()
+    }
+}
+
+impl<'a, A, B, F : Float, const N : usize> LocalAnalysis<F, Bounds<F>, N>
+for SupportProductFirst<A, B>
+where A : LocalAnalysis<F, Bounds<F>, N>,
+      B : LocalAnalysis<F, Bounds<F>, N> {
+    #[inline]
+    fn local_analysis(&self, cube : &Cube<F, N>) -> Bounds<F> {
+        self.0.local_analysis(cube) * self.1.local_analysis(cube)
+    }
+}
+
+/// Representation of the sum of two kernels
+///
+/// The kernels typically implement [`Support`] and [`Mapping`][alg_tools::mapping::Mapping].
+///
+/// The implementation [`Support`] only uses the [`Support::support_hint`] of the first parameter!
+#[derive(Copy,Clone,Serialize,Debug)]
+pub struct SupportSum<A, B>(
+    /// First kernel
+    pub A,
+    /// Second kernel
+    pub B
+);
+
+impl<'a, A, B, F : Float, const N : usize> Apply<&'a Loc<F, N>>
+for SupportSum<A, B>
+where A : Apply<&'a Loc<F, N>, Output=F>,
+      B : Apply<&'a Loc<F, N>, Output=F> {
+    type Output = F;
+    #[inline]
+    fn apply(&self, x : &'a Loc<F, N>) -> Self::Output {
+        self.0.apply(x) + self.1.apply(x)
+    }
+}
+
+impl<A, B, F : Float, const N : usize> Apply<Loc<F, N>>
+for SupportSum<A, B>
+where A : for<'a> Apply<&'a Loc<F, N>, Output=F>,
+      B : for<'a> Apply<&'a Loc<F, N>, Output=F> {
+    type Output = F;
+    #[inline]
+    fn apply(&self, x : Loc<F, N>) -> Self::Output {
+        self.0.apply(&x) + self.1.apply(&x)
+    }
+}
+
+impl<'a, A, B, F : Float, const N : usize> Support<F, N>
+for SupportSum<A, B>
+where A : Support<F, N>,
+      B : Support<F, N>,
+      Cube<F, N> : SetOrd {
+    #[inline]
+    fn support_hint(&self) -> Cube<F, N> {
+        self.0.support_hint().common(&self.1.support_hint())
+    }
+
+    #[inline]
+    fn in_support(&self, x : &Loc<F, N>) -> bool {
+        self.0.in_support(x) || self.1.in_support(x)
+    }
+
+    #[inline]
+    fn bisection_hint(&self, cube : &Cube<F, N>) -> [Option<F>; N] {
+        map2(self.0.bisection_hint(cube),
+             self.1.bisection_hint(cube),
+             |a, b| a.or(b))
+    }
+}
+
+impl<'a, A, B, F : Float> GlobalAnalysis<F, Bounds<F>>
+for SupportSum<A, B>
+where A : GlobalAnalysis<F, Bounds<F>>,
+      B : GlobalAnalysis<F, Bounds<F>> {
+    #[inline]
+    fn global_analysis(&self) -> Bounds<F> {
+        self.0.global_analysis() + self.1.global_analysis()
+    }
+}
+
+impl<'a, A, B, F : Float, const N : usize> LocalAnalysis<F, Bounds<F>, N>
+for SupportSum<A, B>
+where A : LocalAnalysis<F, Bounds<F>, N>,
+      B : LocalAnalysis<F, Bounds<F>, N>,
+      Cube<F, N> : SetOrd {
+    #[inline]
+    fn local_analysis(&self, cube : &Cube<F, N>) -> Bounds<F> {
+        self.0.local_analysis(cube) + self.1.local_analysis(cube)
+    }
+}
+
+/// Representation of the convolution of two kernels.
+///
+/// The kernels typically implement [`Support`]s and [`Mapping`][alg_tools::mapping::Mapping].
+//
+/// Trait implementations have to be on a case-by-case basis.
+#[derive(Copy,Clone,Serialize,Debug,Eq,PartialEq)]
+pub struct Convolution<A, B>(
+    /// First kernel
+    pub A,
+    /// Second kernel
+    pub B
+);
+
+/// Representation of the autoconvolution of a kernel.
+///
+/// The kernel typically implements [`Support`] and [`Mapping`][alg_tools::mapping::Mapping].
+///
+/// Trait implementations have to be on a case-by-case basis.
+#[derive(Copy,Clone,Serialize,Debug,Eq,PartialEq)]
+pub struct AutoConvolution<A>(
+    /// The kernel to be autoconvolved
+    pub A
+);
+
+/// Representation a multi-dimensional product of a one-dimensional kernel.
+///
+/// For $G: ℝ → ℝ$, this is the function $F(x\_1, …, x\_n) := \prod_{i=1}^n G(x\_i)$.
+/// The kernel $G$ typically implements [`Support`] and [`Mapping`][alg_tools::mapping::Mapping]
+/// on [`Loc<F, 1>`]. Then the product implements them on [`Loc<F, N>`].
+#[derive(Copy,Clone,Serialize,Debug,Eq,PartialEq)]
+struct UniformProduct<G, const N : usize>(
+    /// The one-dimensional kernel
+    G
+);
+
+impl<'a, G, F : Float, const N : usize> Apply<&'a Loc<F, N>>
+for UniformProduct<G, N>
+where G : Apply<Loc<F, 1>, Output=F> {
+    type Output = F;
+    #[inline]
+    fn apply(&self, x : &'a Loc<F, N>) -> F {
+        x.iter().map(|&y| self.0.apply(Loc([y]))).product()
+    }
+}
+
+impl<G, F : Float, const N : usize> Apply<Loc<F, N>>
+for UniformProduct<G, N>
+where G : Apply<Loc<F, 1>, Output=F> {
+    type Output = F;
+    #[inline]
+    fn apply(&self, x : Loc<F, N>) -> F {
+        x.into_iter().map(|y| self.0.apply(Loc([y]))).product()
+    }
+}
+
+impl<G, F : Float, const N : usize> Support<F, N>
+for UniformProduct<G, N>
+where G : Support<F, 1> {
+    #[inline]
+    fn support_hint(&self) -> Cube<F, N> {
+        let [a] : [[F; 2]; 1] = self.0.support_hint().into();
+        array_init(|| a.clone()).into()
+    }
+
+    #[inline]
+    fn in_support(&self, x : &Loc<F, N>) -> bool {
+        x.iter().all(|&y| self.0.in_support(&Loc([y])))
+    }
+
+    #[inline]
+    fn bisection_hint(&self, cube : &Cube<F, N>) -> [Option<F>; N] {
+        cube.map(|a, b| {
+            let [h] = self.0.bisection_hint(&[[a, b]].into());
+            h
+        })
+    }
+}
+
+impl<G, F : Float, const N : usize> GlobalAnalysis<F, Bounds<F>>
+for UniformProduct<G, N>
+where G : GlobalAnalysis<F, Bounds<F>> {
+    #[inline]
+    fn global_analysis(&self) -> Bounds<F> {
+        let g = self.0.global_analysis();
+        (0..N).map(|_| g).product()
+    }
+}
+
+impl<G, F : Float, const N : usize> LocalAnalysis<F, Bounds<F>, N>
+for UniformProduct<G, N>
+where G : LocalAnalysis<F, Bounds<F>, 1> {
+    #[inline]
+    fn local_analysis(&self, cube : &Cube<F, N>) -> Bounds<F> {
+        cube.iter_coords().map(
+            |&[a, b]| self.0.local_analysis(&([[a, b]].into()))
+        ).product()
+    }
+}
+
+macro_rules! product_lpnorm {
+    ($lp:ident) => {
+        impl<G, F : Float, const N : usize> Norm<F, $lp>
+        for UniformProduct<G, N>
+        where G : Norm<F, $lp> {
+            #[inline]
+            fn norm(&self, lp : $lp) -> F {
+                self.0.norm(lp).powi(N as i32)
+            }
+        }
+    }
+}
+
+product_lpnorm!(L1);
+product_lpnorm!(L2);
+product_lpnorm!(Linfinity);
+
+
+/// Trait for bounding one kernel with respect to another.
+///
+/// The type `F` is the scalar field, and `T` another kernel to which `Self` is compared.
+pub trait BoundedBy<F : Num, T> {
+    /// Calclate a bounding factor $c$ such that the Fourier transforms $ℱ\[v\] ≤ c ℱ\[u\]$ for
+    /// $v$ `self` and $u$ `other`.
+    ///
+    /// If no such factors exits, `None` is returned.
+    fn bounding_factor(&self, other : &T) -> Option<F>;
+}
+
+/// This [`BoundedBy`] implementation bounds $(uv) * (uv)$ by $(ψ * ψ) u$.
+#[replace_float_literals(F::cast_from(literal))]
+impl<F, C, BaseP>
+BoundedBy<F, SupportProductFirst<AutoConvolution<C>, BaseP>>
+for AutoConvolution<SupportProductFirst<C, BaseP>>
+where F : Float,
+      C : Clone + PartialEq,
+      BaseP :  Fourier<F> + PartialOrd, // TODO: replace by BoundedBy,
+      <BaseP as Fourier<F>>::Transformed : Bounded<F> + Norm<F, L1> {
+
+    fn bounding_factor(&self, kernel : &SupportProductFirst<AutoConvolution<C>, BaseP>) -> Option<F> {
+        let SupportProductFirst(AutoConvolution(ref cutoff2), base_spread2) = kernel;
+        let AutoConvolution(SupportProductFirst(ref cutoff, ref base_spread)) = self;
+        let v̂ = base_spread.fourier();
+
+        // Verify that the cut-off and ideal physical model (base spread) are the same.
+        if cutoff == cutoff2
+           && base_spread <= base_spread2
+           && v̂.bounds().lower() >= 0.0 {
+            // Calculate the factor between the convolution approximation
+            // `AutoConvolution<SupportProductFirst<C, BaseP>>` of $A_*A$ and the
+            // kernel of the seminorm. This depends on the physical model P being
+            // `SupportProductFirst<C, BaseP>` with the kernel `K` being
+            // a `SupportSum` involving `SupportProductFirst<AutoConvolution<C>, BaseP>`.
+            Some(v̂.norm(L1))
+        } else {
+            // We cannot compare
+            None
+        }
+    }
+}
+
+impl<F : Float, A, B, C> BoundedBy<F, SupportSum<B, C>> for A
+where A : BoundedBy<F, B>,
+      C : Bounded<F> {
+
+    #[replace_float_literals(F::cast_from(literal))]
+    fn bounding_factor(&self, SupportSum(ref kernel1, kernel2) : &SupportSum<B, C>) -> Option<F> {
+        if kernel2.bounds().lower() >= 0.0 {
+            self.bounding_factor(kernel1)
+        } else {
+            None
+        }
+    }
+}
+
+/// Generates on $[a, b]$ [`Support::support_hint`] for a symmetric interval $[-r, r]$.
+///
+/// It will attempt to place the subdivision point at $-r$ or $r$.
+/// If neither of these points lies within $[a, b]$, `None` is returned.
+#[inline]
+pub(super) fn symmetric_interval_hint<F : Float>(r : F, a : F, b : F) -> Option<F> {
+    if a < -r && -r < b {
+        Some(-r)
+    } else if a < r && r < b {
+        Some(r)
+    } else {
+        None
+    }
+}
+
+/// Generates on $[a, b]$ [`Support::support_hint`] for a function with monotone derivative,
+/// support on $[-r, r]$ and peak at $0.
+///
+/// It will attempt to place the subdivision point at $-r$, $r$, or $0$, depending on which
+/// gives the longer length for the shorter of the two subintervals. If none of these points
+/// lies within $[a, b]$, or the resulting interval would be shorter than $0.3r$, `None` is
+/// returned.
+#[replace_float_literals(F::cast_from(literal))]
+#[inline]
+pub(super) fn symmetric_peak_hint<F : Float>(r : F, a : F, b : F) -> Option<F> {
+    let stage1 = if a < -r {
+        if b <= -r {
+            None
+        } else if a + r < -b {
+            Some(-r)
+        } else {
+            Some(0.0)
+        }
+    } else if a < 0.0 {
+        if b <= 0.0 {
+            None
+        } else if a < r - b {
+            Some(0.0)
+        } else {
+            Some(r)
+        }
+    } else if a < r && b > r {
+        Some(r)
+    } else {
+        None
+    };
+
+    // Ignore stage1 hint if either side of subdivision would be just a small fraction of the
+    // interval
+    match stage1 {
+        Some(h) if (h - a).min(b-h) >= 0.3 * r => Some(h),
+        _ => None
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/kernels/gaussian.rs	Thu Dec 01 23:07:35 2022 +0200
@@ -0,0 +1,295 @@
+//! Implementation of the gaussian kernel.
+
+use float_extras::f64::erf;
+use numeric_literals::replace_float_literals;
+use serde::Serialize;
+use alg_tools::types::*;
+use alg_tools::euclidean::Euclidean;
+use alg_tools::norms::*;
+use alg_tools::loc::Loc;
+use alg_tools::sets::Cube;
+use alg_tools::bisection_tree::{
+    Support,
+    Constant,
+    Bounds,
+    LocalAnalysis,
+    GlobalAnalysis,
+    Weighted,
+    Bounded,
+};
+use alg_tools::mapping::Apply;
+use alg_tools::maputil::array_init;
+
+use crate::fourier::Fourier;
+use super::base::*;
+use super::ball_indicator::CubeIndicator;
+
+/// Storage presentation of the the anisotropic gaussian kernel of `variance` $σ^2$.
+///
+/// This is the function $f(x) = C e^{-\\|x\\|\_2^2/(2σ^2)}$ for $x ∈ ℝ^N$
+/// with $C=1/(2πσ^2)^{N/2}$.
+#[derive(Copy,Clone,Debug,Serialize,Eq)]
+pub struct Gaussian<S : Constant, const N : usize> {
+    /// The variance $σ^2$.
+    pub variance : S,
+}
+
+impl<S1, S2, const N : usize> PartialEq<Gaussian<S2, N>> for Gaussian<S1, N>
+where S1 : Constant,
+      S2 : Constant<Type=S1::Type> {
+    fn eq(&self, other : &Gaussian<S2, N>) -> bool {
+        self.variance.value() == other.variance.value()
+    }
+}
+
+impl<S1, S2, const N : usize> PartialOrd<Gaussian<S2, N>> for Gaussian<S1, N>
+where S1 : Constant,
+      S2 : Constant<Type=S1::Type> {
+
+    fn partial_cmp(&self, other : &Gaussian<S2, N>) -> Option<std::cmp::Ordering> {
+        // A gaussian is ≤ another gaussian if the Fourier transforms satisfy the
+        // corresponding inequality. That in turns holds if and only if the variances
+        // satisfy the opposite inequality.
+        let σ1sq = self.variance.value();
+        let σ2sq = other.variance.value();
+        σ2sq.partial_cmp(&σ1sq)
+    }
+}
+
+
+#[replace_float_literals(S::Type::cast_from(literal))]
+impl<'a, S, const N : usize> Apply<&'a Loc<S::Type, N>> for Gaussian<S, N>
+where S : Constant {
+    type Output = S::Type;
+    // This is not normalised to neither to have value 1 at zero or integral 1
+    // (unless the cut-off ε=0).
+    #[inline]
+    fn apply(&self, x : &'a Loc<S::Type, N>) -> Self::Output {
+        let d_squared = x.norm2_squared();
+        let σ2 = self.variance.value();
+        let scale = self.scale();
+        (-d_squared / (2.0 * σ2)).exp() / scale
+    }
+}
+
+impl<S, const N : usize> Apply<Loc<S::Type, N>> for Gaussian<S, N>
+where S : Constant {
+    type Output = S::Type;
+    // This is not normalised to neither to have value 1 at zero or integral 1
+    // (unless the cut-off ε=0).
+    #[inline]
+    fn apply(&self, x : Loc<S::Type, N>) -> Self::Output {
+        self.apply(&x)
+    }
+}
+
+
+#[replace_float_literals(S::Type::cast_from(literal))]
+impl<'a, S, const N : usize> Gaussian<S, N>
+where S : Constant {
+
+    /// Returns the (reciprocal) scaling constant $1/C=(2πσ^2)^{N/2}$.
+    #[inline]
+    pub fn scale(&self) -> S::Type {
+        let π = S::Type::PI;
+        let σ2 = self.variance.value();
+        (2.0*π*σ2).powi(N as i32).sqrt()
+    }
+}
+
+impl<'a, S, const N : usize> Support<S::Type, N> for Gaussian<S, N>
+where S : Constant {
+    #[inline]
+    fn support_hint(&self) -> Cube<S::Type,N> {
+        array_init(|| [S::Type::NEG_INFINITY, S::Type::INFINITY]).into()
+    }
+
+    #[inline]
+    fn in_support(&self, _x : &Loc<S::Type,N>) -> bool {
+        true
+    }
+}
+
+#[replace_float_literals(S::Type::cast_from(literal))]
+impl<S, const N : usize> GlobalAnalysis<S::Type, Bounds<S::Type>>  for Gaussian<S, N>
+where S : Constant {
+    #[inline]
+    fn global_analysis(&self) -> Bounds<S::Type> {
+        Bounds(0.0, 1.0/self.scale())
+    }
+}
+
+impl<S, const N : usize> LocalAnalysis<S::Type, Bounds<S::Type>, N>  for Gaussian<S, N>
+where S : Constant {
+    #[inline]
+    fn local_analysis(&self, cube : &Cube<S::Type, N>) -> Bounds<S::Type> {
+        // The function is maximised/minimised where the 2-norm is minimised/maximised.
+        let lower = self.apply(cube.maxnorm_point());
+        let upper = self.apply(cube.minnorm_point());
+        Bounds(lower, upper)
+    }
+}
+
+#[replace_float_literals(C::Type::cast_from(literal))]
+impl<'a, C : Constant, const N : usize> Norm<C::Type, L1>
+for Gaussian<C, N> {
+    #[inline]
+    fn norm(&self, _ : L1) -> C::Type {
+        1.0
+    }
+}
+
+#[replace_float_literals(C::Type::cast_from(literal))]
+impl<'a, C : Constant, const N : usize> Norm<C::Type, Linfinity>
+for Gaussian<C, N> {
+    #[inline]
+    fn norm(&self, _ : Linfinity) -> C::Type {
+        self.bounds().upper()
+    }
+}
+
+#[replace_float_literals(C::Type::cast_from(literal))]
+impl<'a, C : Constant, const N : usize> Fourier<C::Type>
+for Gaussian<C, N> {
+    type Domain = Loc<C::Type, N>;
+    type Transformed = Weighted<Gaussian<C::Type, N>, C::Type>;
+
+    #[inline]
+    fn fourier(&self) -> Self::Transformed {
+        let π = C::Type::PI;
+        let σ2 = self.variance.value();
+        let g = Gaussian { variance : 1.0 / (4.0*π*π*σ2) };
+        g.weigh(g.scale())
+    }
+}
+
+/// Representation of the “cut” gaussian $f χ\_{[-a, a]^n}$
+/// where $a>0$ and $f$ is a gaussian kernel on $ℝ^n$.
+pub type BasicCutGaussian<C, S, const N : usize> = SupportProductFirst<CubeIndicator<C, N>,
+                                                                       Gaussian<S, N>>;
+
+
+/// This implements $χ\_{[-b, b]^n} \* (f χ\_{[-a, a]^n})$
+/// where $a,b>0$ and $f$ is a gaussian kernel on $ℝ^n$.
+#[replace_float_literals(F::cast_from(literal))]
+impl<'a, F : Float, R, C, S, const N : usize> Apply<&'a Loc<F, N>>
+for Convolution<CubeIndicator<R, N>, BasicCutGaussian<C, S, N>>
+where R : Constant<Type=F>,
+      C : Constant<Type=F>,
+      S : Constant<Type=F> {
+
+    type Output = F;
+
+    #[inline]
+    fn apply(&self, y : &'a Loc<F, N>) -> F {
+        let Convolution(ref ind,
+                        SupportProductFirst(ref cut,
+                                            ref gaussian)) = self;
+        let a = cut.r.value();
+        let b = ind.r.value();
+        let σ = gaussian.variance.value().sqrt();
+        let π = F::PI;
+        let t = F::SQRT_2 * σ;
+        let c = σ * (8.0/π).sqrt();
+        
+        // This is just a product of one-dimensional versions
+        let unscaled = y.product_map(|x| {
+            let c1 = -(a.min(b + x)); //(-a).max(-x-b);
+            let c2 = a.min(b - x);
+            if c1 >= c2 {
+                0.0
+            } else {
+                let e1 = F::cast_from(erf((c1 / t).as_()));
+                let e2 = F::cast_from(erf((c2 / t).as_()));
+                debug_assert!(e2 >= e1);
+                c * (e2 - e1)
+            }
+        });
+        
+        unscaled / gaussian.scale()
+    }
+}
+
+impl<F : Float, R, C, S, const N : usize> Apply<Loc<F, N>>
+for Convolution<CubeIndicator<R, N>, BasicCutGaussian<C, S, N>>
+where R : Constant<Type=F>,
+      C : Constant<Type=F>,
+      S : Constant<Type=F> {
+
+    type Output = F;
+
+    #[inline]
+    fn apply(&self, y : Loc<F, N>) -> F {
+        self.apply(&y)
+    }
+}
+
+impl<F : Float, R, C, S, const N : usize>
+Convolution<CubeIndicator<R, N>, BasicCutGaussian<C, S, N>>
+where R : Constant<Type=F>,
+      C : Constant<Type=F>,
+      S : Constant<Type=F> {
+
+    #[inline]
+    fn get_r(&self) -> F {
+        let Convolution(ref ind,
+                        SupportProductFirst(ref cut, ..)) = self;
+        ind.r.value() + cut.r.value()
+    }
+}
+
+impl<F : Float, R, C, S, const N : usize> Support<F, N>
+for Convolution<CubeIndicator<R, N>, BasicCutGaussian<C, S, N>>
+where R : Constant<Type=F>,
+      C : Constant<Type=F>,
+      S : Constant<Type=F> {
+    #[inline]
+    fn support_hint(&self) -> Cube<F, N> {
+        let r = self.get_r();
+        array_init(|| [-r, r]).into()
+    }
+
+    #[inline]
+    fn in_support(&self, y : &Loc<F, N>) -> bool {
+        let r = self.get_r();
+        y.iter().all(|x| x.abs() <= r)
+    }
+
+    #[inline]
+    fn bisection_hint(&self, cube : &Cube<F, N>) -> [Option<F>; N] {
+        let r = self.get_r();
+        // From c1 = -a.min(b + x) and c2 = a.min(b - x) with c_1 < c_2,
+        // solve bounds for x. that is 0 ≤ a.min(b + x) + a.min(b - x).
+        // If b + x ≤ a and b - x ≤ a, the sum is 2b ≥ 0.
+        // If b + x ≥ a and b - x ≥ a, the sum is 2a ≥ 0.
+        // If b + x ≤ a and b - x ≥ a, the sum is b + x + a ⟹ need x ≥ -a - b = -r.
+        // If b + x ≥ a and b - x ≤ a, the sum is a + b - x ⟹ need x ≤ a + b = r.
+        cube.map(|c, d| symmetric_peak_hint(r, c, d))
+    }
+}
+
+impl<F : Float, R, C, S, const N : usize> GlobalAnalysis<F, Bounds<F>>
+for Convolution<CubeIndicator<R, N>, BasicCutGaussian<C, S, N>>
+where R : Constant<Type=F>,
+      C : Constant<Type=F>,
+      S : Constant<Type=F> {
+    #[inline]
+    fn global_analysis(&self) -> Bounds<F> {
+        Bounds(F::ZERO, self.apply(Loc::ORIGIN))
+    }
+}
+
+impl<F : Float, R, C, S, const N : usize> LocalAnalysis<F, Bounds<F>, N>
+for Convolution<CubeIndicator<R, N>, BasicCutGaussian<C, S, N>>
+where R : Constant<Type=F>,
+      C : Constant<Type=F>,
+      S : Constant<Type=F> {
+    #[inline]
+    fn local_analysis(&self, cube : &Cube<F, N>) -> Bounds<F> {
+        // The function is maximised/minimised where the absolute value is minimised/maximised.
+        let lower = self.apply(cube.maxnorm_point());
+        let upper = self.apply(cube.minnorm_point());
+        Bounds(lower, upper)
+    }
+}
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/kernels/hat.rs	Thu Dec 01 23:07:35 2022 +0200
@@ -0,0 +1,118 @@
+//! Implementation of the hat function
+
+use numeric_literals::replace_float_literals;
+use serde::Serialize;
+use alg_tools::types::*;
+use alg_tools::norms::*;
+use alg_tools::loc::Loc;
+use alg_tools::sets::Cube;
+use alg_tools::bisection_tree::{
+    Support,
+    Constant,
+    Bounds,
+    LocalAnalysis,
+    GlobalAnalysis,
+    Bounded,
+};
+use alg_tools::mapping::Apply;
+use alg_tools::maputil::{array_init};
+
+/// Representation of the hat function $f(x)=1-\\|x\\|\_1/ε$ of `width` $ε$ on $ℝ^N$.
+#[derive(Copy,Clone,Serialize,Debug,Eq,PartialEq)]
+pub struct Hat<C : Constant, const N : usize> {
+    /// The parameter $ε>0$.
+    pub width : C,
+}
+
+#[replace_float_literals(C::Type::cast_from(literal))]
+impl<'a, C : Constant, const N : usize> Apply<&'a Loc<C::Type, N>> for Hat<C, N> {
+    type Output = C::Type;
+    #[inline]
+    fn apply(&self, x : &'a Loc<C::Type, N>) -> Self::Output {
+        let ε = self.width.value();
+        0.0.max(1.0-x.norm(L1)/ε)
+    }
+}
+
+#[replace_float_literals(C::Type::cast_from(literal))]
+impl<C : Constant, const N : usize> Apply<Loc<C::Type, N>> for Hat<C, N> {
+    type Output = C::Type;
+    #[inline]
+    fn apply(&self, x : Loc<C::Type, N>) -> Self::Output {
+        self.apply(&x)
+    }
+}
+
+
+#[replace_float_literals(C::Type::cast_from(literal))]
+impl<'a, C : Constant, const N : usize> Support<C::Type, N> for Hat<C, N> {
+    #[inline]
+    fn support_hint(&self) -> Cube<C::Type,N> {
+        let ε = self.width.value();
+        array_init(|| [-ε, ε]).into()
+    }
+
+    #[inline]
+    fn in_support(&self, x : &Loc<C::Type,N>) -> bool {
+        x.norm(L1) < self.width.value()
+    }
+    
+    /*fn fully_in_support(&self, _cube : &Cube<C::Type,N>) -> bool {
+        todo!("Not implemented, but not used at the moment")
+    }*/
+
+    #[inline]
+    fn bisection_hint(&self, cube : &Cube<C::Type,N>) -> [Option<C::Type>; N] {
+        let ε = self.width.value();
+        cube.map(|a, b| {
+            if a < 1.0 {
+                if 1.0 < b {
+                    Some(1.0)
+                } else {
+                    if a < -ε {
+                        if b > -ε { Some(-ε) } else { None }
+                    } else {
+                        None
+                    }
+                }
+            } else {
+                if b > ε { Some(ε) } else { None }
+            }
+        });
+        todo!("also diagonals")
+    }
+}
+
+
+#[replace_float_literals(C::Type::cast_from(literal))]
+impl<'a, C : Constant, const N : usize>
+GlobalAnalysis<C::Type, Bounds<C::Type>>
+for Hat<C, N> {
+    #[inline]
+    fn global_analysis(&self) -> Bounds<C::Type> {
+        Bounds(0.0, 1.0)
+    }
+}
+
+impl<'a, C : Constant, const N : usize>
+LocalAnalysis<C::Type, Bounds<C::Type>, N>
+for Hat<C, N> {
+    #[inline]
+    fn local_analysis(&self, cube : &Cube<C::Type, N>) -> Bounds<C::Type> {
+        // The function is maximised/minimised where the 1-norm is minimised/maximised.
+        let lower = self.apply(cube.maxnorm_point());
+        let upper = self.apply(cube.minnorm_point());
+        Bounds(lower, upper)
+    }
+}
+
+#[replace_float_literals(C::Type::cast_from(literal))]
+impl<'a, C : Constant, const N : usize>
+Norm<C::Type, Linfinity>
+for Hat<C, N> {
+    #[inline]
+    fn norm(&self, _ : Linfinity) -> C::Type {
+        self.bounds().upper()
+    }
+}
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/kernels/hat_convolution.rs	Thu Dec 01 23:07:35 2022 +0200
@@ -0,0 +1,450 @@
+//! Implementation of the convolution of two hat functions,
+//! and its convolution with a [`CubeIndicator`].
+use numeric_literals::replace_float_literals;
+use serde::Serialize;
+use alg_tools::types::*;
+use alg_tools::norms::*;
+use alg_tools::loc::Loc;
+use alg_tools::sets::Cube;
+use alg_tools::bisection_tree::{
+    Support,
+    Constant,
+    Bounds,
+    LocalAnalysis,
+    GlobalAnalysis,
+    Bounded,
+};
+use alg_tools::mapping::Apply;
+use alg_tools::maputil::array_init;
+
+use super::base::*;
+use super::ball_indicator::CubeIndicator;
+
+/// Hat convolution kernel.
+///
+/// This struct represents the function
+/// $$
+///     f(x\_1, …, x\_n) = \prod\_{i=1}^n \frac{4}{σ} (h\*h)(x\_i/σ)
+/// $$
+/// where the “hat function” $h(y)= \max(0, 1 - |2y|)$.
+/// The factor $4/σ$ normalises $∫ f d x = 1$.
+/// We have
+/// $$
+///     (h*h)(y) =
+///     \begin{cases}
+///         \frac{2}{3} (y+1)^3 & -1<y\leq -\frac{1}{2}, \\\\
+///         -2 y^3-2 y^2+\frac{1}{3} & -\frac{1}{2}<y\leq 0, \\\\
+///         2 y^3-2 y^2+\frac{1}{3} & 0<y<\frac{1}{2}, \\\\
+///         -\frac{2}{3} (y-1)^3 & \frac{1}{2}\leq y<1. \\\\
+///     \end{cases}
+/// $$
+#[derive(Copy,Clone,Debug,Serialize,Eq)]
+pub struct HatConv<S : Constant, const N : usize> {
+    /// The parameter $σ$ of the kernel.
+    pub radius : S,
+}
+
+impl<S1, S2, const N : usize> PartialEq<HatConv<S2, N>> for HatConv<S1, N>
+where S1 : Constant,
+      S2 : Constant<Type=S1::Type> {
+    fn eq(&self, other : &HatConv<S2, N>) -> bool {
+        self.radius.value() == other.radius.value()
+    }
+}
+
+impl<'a, S, const N : usize> HatConv<S, N> where S : Constant {
+    /// Returns the $σ$ parameter of the kernel.
+    #[inline]
+    pub fn radius(&self) -> S::Type {
+        self.radius.value()
+    }
+}
+
+impl<'a, S, const N : usize> Apply<&'a Loc<S::Type, N>> for HatConv<S, N>
+where S : Constant {
+    type Output = S::Type;
+    #[inline]
+    fn apply(&self, y : &'a Loc<S::Type, N>) -> Self::Output {
+        let σ = self.radius();
+        y.product_map(|x| {
+            self.value_1d_σ1(x  / σ) / σ
+        })
+    }
+}
+
+impl<'a, S, const N : usize> Apply<Loc<S::Type, N>> for HatConv<S, N>
+where S : Constant {
+    type Output = S::Type;
+    #[inline]
+    fn apply(&self, y : Loc<S::Type, N>) -> Self::Output {
+        self.apply(&y)
+    }
+}
+
+
+#[replace_float_literals(S::Type::cast_from(literal))]
+impl<'a, F : Float, S, const N : usize> HatConv<S, N>
+where S : Constant<Type=F> {
+    /// Computes the value of the kernel for $n=1$ with $σ=1$.
+    #[inline]
+    fn value_1d_σ1(&self, x : F) -> F {
+        let y = x.abs();
+        if y >= 1.0 {
+            0.0
+        } else if y > 0.5 {
+            - (8.0/3.0) * (y - 1.0).powi(3)
+        } else /* 0 ≤ y ≤ 0.5 */ {
+            (4.0/3.0) + 8.0 * y * y * (y - 1.0)
+        }
+    }
+}
+
+impl<'a, S, const N : usize> Support<S::Type, N> for HatConv<S, N>
+where S : Constant {
+    #[inline]
+    fn support_hint(&self) -> Cube<S::Type,N> {
+        let σ = self.radius();
+        array_init(|| [-σ, σ]).into()
+    }
+
+    #[inline]
+    fn in_support(&self, y : &Loc<S::Type,N>) -> bool {
+        let σ = self.radius();
+        y.iter().all(|x| x.abs() <= σ)
+    }
+
+    #[inline]
+    fn bisection_hint(&self, cube : &Cube<S::Type, N>) -> [Option<S::Type>; N] {
+        let σ = self.radius();
+        cube.map(|c, d| symmetric_peak_hint(σ, c, d))
+    }
+}
+
+#[replace_float_literals(S::Type::cast_from(literal))]
+impl<S, const N : usize> GlobalAnalysis<S::Type, Bounds<S::Type>>  for HatConv<S, N>
+where S : Constant {
+    #[inline]
+    fn global_analysis(&self) -> Bounds<S::Type> {
+        Bounds(0.0, self.apply(Loc::ORIGIN))
+    }
+}
+
+impl<S, const N : usize> LocalAnalysis<S::Type, Bounds<S::Type>, N>  for HatConv<S, N>
+where S : Constant {
+    #[inline]
+    fn local_analysis(&self, cube : &Cube<S::Type, N>) -> Bounds<S::Type> {
+        // The function is maximised/minimised where the 2-norm is minimised/maximised.
+        let lower = self.apply(cube.maxnorm_point());
+        let upper = self.apply(cube.minnorm_point());
+        Bounds(lower, upper)
+    }
+}
+
+#[replace_float_literals(C::Type::cast_from(literal))]
+impl<'a, C : Constant, const N : usize> Norm<C::Type, L1>
+for HatConv<C, N> {
+    #[inline]
+    fn norm(&self, _ : L1) -> C::Type {
+        1.0
+    }
+}
+
+#[replace_float_literals(C::Type::cast_from(literal))]
+impl<'a, C : Constant, const N : usize> Norm<C::Type, Linfinity>
+for HatConv<C, N> {
+    #[inline]
+    fn norm(&self, _ : Linfinity) -> C::Type {
+        self.bounds().upper()
+    }
+}
+
+#[replace_float_literals(F::cast_from(literal))]
+impl<'a, F : Float, R, C, const N : usize> Apply<&'a Loc<F, N>>
+for Convolution<CubeIndicator<R, N>, HatConv<C, N>>
+where R : Constant<Type=F>,
+      C : Constant<Type=F> {
+
+    type Output = F;
+
+    #[inline]
+    fn apply(&self, y : &'a Loc<F, N>) -> F {
+        let Convolution(ref ind, ref hatconv) = self;
+        let β = ind.r.value();
+        let σ = hatconv.radius();
+
+        // This is just a product of one-dimensional versions
+        y.product_map(|x| {
+            // With $u_σ(x) = u_1(x/σ)/σ$ the normalised hat convolution
+            // we have
+            // $$
+            //      [χ_{-β,β} * u_σ](x)
+            //      = ∫_{x-β}^{x+β} u_σ(z) d z
+            //      = (1/σ)∫_{x-β}^{x+β} u_1(z/σ) d z
+            //      = ∫_{(x-β)/σ}^{(x+β)/σ} u_1(z) d z
+            //      = [χ_{-β/σ, β/σ} * u_1](x/σ)
+            // $$
+            self.value_1d_σ1(x / σ, β / σ)
+        })
+    }
+}
+
+impl<'a, F : Float, R, C, const N : usize> Apply<Loc<F, N>>
+for Convolution<CubeIndicator<R, N>, HatConv<C, N>>
+where R : Constant<Type=F>,
+      C : Constant<Type=F> {
+
+    type Output = F;
+
+    #[inline]
+    fn apply(&self, y : Loc<F, N>) -> F {
+        self.apply(&y)
+    }
+}
+
+
+#[replace_float_literals(F::cast_from(literal))]
+impl<F : Float, C, R, const N : usize> Convolution<CubeIndicator<R, N>, HatConv<C, N>>
+where R : Constant<Type=F>,
+      C : Constant<Type=F> {
+    #[inline]
+    pub fn value_1d_σ1(&self, x : F, β : F) -> F {
+        // The integration interval
+        let a = x - β;
+        let b = x + β;
+
+        #[inline]
+        fn pow4<F : Float>(x : F) -> F {
+            let y = x * x;
+            y * y
+        }
+        
+        /// Integrate $f$, whose support is $[c, d]$, on $[a, b]$.
+        /// If $b > d$, add $g()$ to the result.
+        #[inline]
+        fn i<F: Float>(a : F, b : F, c : F, d : F, f : impl Fn(F) -> F,
+                       g : impl Fn() -> F) -> F {
+            if b < c {
+                0.0
+            } else if b <= d {
+                if a <= c {
+                    f(b) - f(c)
+                } else {
+                    f(b) - f(a)
+                }
+            } else /* b > d */ {
+                g() + if a <= c {
+                    f(d) - f(c)
+                } else if a < d {
+                    f(d) - f(a)
+                } else {
+                    0.0
+                }
+            }
+        }
+
+        // Observe the factor 1/6 at the front from the antiderivatives below.
+        // The factor 4 is from normalisation of the original function.
+        (4.0/6.0) * i(a, b, -1.0, -0.5,
+                // (2/3) (y+1)^3  on  -1 < y ≤ - 1/2
+                // The antiderivative is  (2/12)(y+1)^4 = (1/6)(y+1)^4
+                |y| pow4(y+1.0),
+                || i(a, b, -0.5, 0.0,
+                    // -2 y^3 - 2 y^2 + 1/3  on  -1/2 < y ≤ 0
+                    // The antiderivative is -1/2 y^4 - 2/3 y^3 + 1/3 y
+                    |y| y*(-y*y*(y*3.0 + 4.0) + 2.0),
+                    || i(a, b, 0.0, 0.5,
+                            // 2 y^3 - 2 y^2 + 1/3 on 0 < y < 1/2
+                            // The antiderivative is 1/2 y^4 - 2/3 y^3 + 1/3 y
+                            |y| y*(y*y*(y*3.0 - 4.0) + 2.0),
+                            || i(a, b, 0.5, 1.0,
+                                // -(2/3) (y-1)^3  on  1/2 < y ≤ 1
+                                // The antiderivative is  -(2/12)(y-1)^4 = -(1/6)(y-1)^4
+                                |y| -pow4(y-1.0),
+                                || 0.0
+                            )
+                    )
+                )
+        )
+    }
+}
+
+impl<F : Float, R, C, const N : usize>
+Convolution<CubeIndicator<R, N>, HatConv<C, N>>
+where R : Constant<Type=F>,
+      C : Constant<Type=F> {
+
+    #[inline]
+    fn get_r(&self) -> F {
+        let Convolution(ref ind, ref hatconv) = self;
+        ind.r.value() + hatconv.radius()
+    }
+}
+
+impl<F : Float, R, C, const N : usize> Support<F, N>
+for Convolution<CubeIndicator<R, N>, HatConv<C, N>>
+where R : Constant<Type=F>,
+      C : Constant<Type=F> {
+    
+    #[inline]
+    fn support_hint(&self) -> Cube<F, N> {
+        let r = self.get_r();
+        array_init(|| [-r, r]).into()
+    }
+
+    #[inline]
+    fn in_support(&self, y : &Loc<F, N>) -> bool {
+        let r = self.get_r();
+        y.iter().all(|x| x.abs() <= r)
+    }
+
+    #[inline]
+    fn bisection_hint(&self, cube : &Cube<F, N>) -> [Option<F>; N] {
+        // It is not difficult to verify that [`HatConv`] is C^2.
+        // Therefore, so is [`Convolution<CubeIndicator<R, N>, HatConv<C, N>>`] so that a finer
+        // subdivision for the hint than this is not particularly useful.
+        let r = self.get_r();
+        cube.map(|c, d| symmetric_peak_hint(r, c, d))
+    }
+}
+
+impl<F : Float, R, C, const N : usize> GlobalAnalysis<F, Bounds<F>>
+for Convolution<CubeIndicator<R, N>, HatConv<C, N>>
+where R : Constant<Type=F>,
+      C : Constant<Type=F> {
+    #[inline]
+    fn global_analysis(&self) -> Bounds<F> {
+        Bounds(F::ZERO, self.apply(Loc::ORIGIN))
+    }
+}
+
+impl<F : Float, R, C, const N : usize> LocalAnalysis<F, Bounds<F>, N>
+for Convolution<CubeIndicator<R, N>, HatConv<C,  N>>
+where R : Constant<Type=F>,
+      C : Constant<Type=F> {
+    #[inline]
+    fn local_analysis(&self, cube : &Cube<F, N>) -> Bounds<F> {
+        // The function is maximised/minimised where the absolute value is minimised/maximised.
+        let lower = self.apply(cube.maxnorm_point());
+        let upper = self.apply(cube.minnorm_point());
+        //assert!(upper >= lower);
+        if upper < lower {
+            let Convolution(ref ind, ref hatconv) = self;
+            let β = ind.r.value();
+            let σ = hatconv.radius();
+            eprintln!("WARNING: Hat convolution {β} {σ} upper bound {upper} < lower bound {lower} on {cube:?} with min-norm point {:?} and max-norm point {:?}", cube.minnorm_point(), cube.maxnorm_point());
+            Bounds(upper, lower)
+        } else {
+            Bounds(lower, upper)
+        }
+    }
+}
+
+
+/// This [`BoundedBy`] implementation bounds $u * u$ by $(ψ * ψ) u$ for $u$ a hat convolution and
+/// $ψ = χ_{[-a,a]^N}$ for some $a>0$.
+///
+/// This is based on the general formula for bounding $(uχ) * (uχ)$ by $(ψ * ψ) u$,
+/// where we take $ψ = χ_{[-a,a]^N}$ and $χ = χ_{[-σ,σ]^N}$ for $σ$ the width of the hat
+/// convolution.
+#[replace_float_literals(F::cast_from(literal))]
+impl<F, C, S, const N : usize>
+BoundedBy<F, SupportProductFirst<AutoConvolution<CubeIndicator<S, N>>, HatConv<C, N>>>
+for AutoConvolution<HatConv<C, N>>
+where F : Float,
+      C : Constant<Type=F>,
+      S : Constant<Type=F> {
+
+    fn bounding_factor(
+        &self,
+        kernel : &SupportProductFirst<AutoConvolution<CubeIndicator<S, N>>, HatConv<C, N>>
+    ) -> Option<F> {
+        // We use the comparison $ℱ[𝒜(ψ v)] ≤ L_1 ℱ[𝒜(ψ)u] ⟺ I_{v̂} v̂ ≤ L_1 û$ with
+        // $ψ = χ_{[-w, w]}$ satisfying $supp v ⊂ [-w, w]$, i.e. $w ≥ σ$. Here $v̂ = ℱ[v]$ and
+        // $I_{v̂} = ∫ v̂ d ξ. For this relationship to be valid, we need $v̂ ≥ 0$, which is guaranteed
+        // by $v̂ = u_σ$ being an autoconvolution. With $u = v$, therefore $L_1 = I_v̂ = ∫ u_σ(ξ) d ξ$.
+        let SupportProductFirst(AutoConvolution(ref ind), hatconv2) = kernel;
+        let σ = self.0.radius();
+        let a = ind.r.value();
+        let bounding_1d = 4.0 / (3.0 * σ);
+
+        // Check that the cutting indicator of the comparison
+        // `SupportProductFirst<AutoConvolution<CubeIndicator<S, N>>, HatConv<C, N>>`
+        // is wide enough, and that the hat convolution has the same radius as ours.
+        if σ <= a && hatconv2 == &self.0 {
+            Some(bounding_1d.powi(N as i32))
+        } else {
+            // We cannot compare
+            None
+        }
+    }
+}
+
+/// This [`BoundedBy`] implementation bounds $u * u$ by $u$ for $u$ a hat convolution.
+///
+/// This is based on Example 3.3 in the manuscript.
+#[replace_float_literals(F::cast_from(literal))]
+impl<F, C, const N : usize>
+BoundedBy<F, HatConv<C, N>>
+for AutoConvolution<HatConv<C, N>>
+where F : Float,
+      C : Constant<Type=F> {
+
+    /// Returns an estimate of the factor $L_1$.
+    ///
+    /// Returns  `None` if `kernel` does not have the same width as hat convolution that `self`
+    /// is based on.
+    fn bounding_factor(
+        &self,
+        kernel : &HatConv<C, N>
+    ) -> Option<F> {
+        if kernel == &self.0 {
+            Some(1.0)
+        } else {
+            // We cannot compare
+            None
+        }
+    }
+}
+
+#[cfg(test)]
+mod tests {
+    use alg_tools::lingrid::linspace;
+    use alg_tools::mapping::Apply;
+    use alg_tools::norms::Linfinity;
+    use alg_tools::loc::Loc;
+    use crate::kernels::{BallIndicator, CubeIndicator, Convolution};
+    use super::HatConv;
+
+    /// Tests numerically that [`HatConv<f64, 1>`] is monotone.
+    #[test]
+    fn hatconv_monotonicity() {
+        let grid = linspace(0.0, 1.0, 100000);
+        let hatconv : HatConv<f64, 1> = HatConv{ radius : 1.0 };
+        let mut vals = grid.into_iter().map(|t| hatconv.apply(Loc::from(t)));
+        let first = vals.next().unwrap();
+        let monotone = vals.fold((first, true), |(prev, ok), t| (prev, ok && prev >= t)).1;
+        assert!(monotone);
+    }
+
+    /// Tests numerically that [`Convolution<CubeIndicator<f64, 1>, HatConv<f64, 1>>`] is monotone.
+    #[test]
+    fn convolution_cubeind_hatconv_monotonicity() {
+        let grid = linspace(-2.0, 0.0, 100000);
+        let hatconv : Convolution<CubeIndicator<f64, 1>, HatConv<f64, 1>>
+            = Convolution(BallIndicator { r : 0.5, exponent : Linfinity },
+                          HatConv{ radius : 1.0 } );
+        let mut vals = grid.into_iter().map(|t| hatconv.apply(Loc::from(t)));
+        let first = vals.next().unwrap();
+        let monotone = vals.fold((first, true), |(prev, ok), t| (prev, ok && prev <= t)).1;
+        assert!(monotone);
+
+        let grid = linspace(0.0, 2.0, 100000);
+        let hatconv : Convolution<CubeIndicator<f64, 1>, HatConv<f64, 1>>
+            = Convolution(BallIndicator { r : 0.5, exponent : Linfinity },
+                          HatConv{ radius : 1.0 } );
+        let mut vals = grid.into_iter().map(|t| hatconv.apply(Loc::from(t)));
+        let first = vals.next().unwrap();
+        let monotone = vals.fold((first, true), |(prev, ok), t| (prev, ok && prev >= t)).1;
+        assert!(monotone);
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/kernels/mollifier.rs	Thu Dec 01 23:07:35 2022 +0200
@@ -0,0 +1,136 @@
+
+//! Implementation of the standard mollifier
+
+use rgsl::hypergeometric::hyperg_U;
+use float_extras::f64::{tgamma as gamma};
+use numeric_literals::replace_float_literals;
+use serde::Serialize;
+use alg_tools::types::*;
+use alg_tools::euclidean::Euclidean;
+use alg_tools::norms::*;
+use alg_tools::loc::Loc;
+use alg_tools::sets::Cube;
+use alg_tools::bisection_tree::{
+    Support,
+    Constant,
+    Bounds,
+    LocalAnalysis,
+    GlobalAnalysis
+};
+use alg_tools::mapping::Apply;
+use alg_tools::maputil::array_init;
+
+/// Reresentation of the (unnormalised) standard mollifier.
+///
+/// For the `width` parameter $ε>0$, this is
+/// <div>$$
+///     f(x)=\begin{cases}
+///         e^{\frac{ε^2}{\|x\|_2^2-ε^2}}, & \|x\|_2 < ε, \\
+///         0, & \text{otherwise}.
+///     \end{cases}
+/// $$</div>
+#[derive(Copy,Clone,Serialize,Debug,Eq,PartialEq)]
+pub struct Mollifier<C : Constant, const N : usize> {
+    /// The parameter $ε$ of the mollifier.
+    pub width : C,
+}
+
+#[replace_float_literals(C::Type::cast_from(literal))]
+impl<'a, C : Constant, const N : usize> Apply<&'a Loc<C::Type, N>> for Mollifier<C, N> {
+    type Output = C::Type;
+    #[inline]
+    fn apply(&self, x : &'a Loc<C::Type, N>) -> Self::Output {
+        let ε = self.width.value();
+        let ε2 = ε*ε;
+        let n2 = x.norm2_squared();
+        if n2 < ε2 {
+            (n2 / (n2 - ε2)).exp()
+        } else {
+            0.0
+        }
+    }
+}
+
+impl<C : Constant, const N : usize> Apply<Loc<C::Type, N>> for Mollifier<C, N> {
+    type Output = C::Type;
+    #[inline]
+    fn apply(&self, x : Loc<C::Type, N>) -> Self::Output {
+        self.apply(&x)
+    }
+}
+
+impl<'a, C : Constant, const N : usize> Support<C::Type, N> for Mollifier<C, N> {
+    #[inline]
+    fn support_hint(&self) -> Cube<C::Type,N> {
+        let ε = self.width.value();
+        array_init(|| [-ε, ε]).into()
+    }
+
+    #[inline]
+    fn in_support(&self, x : &Loc<C::Type,N>) -> bool {
+        x.norm2() < self.width.value()
+    }
+    
+    /*fn fully_in_support(&self, _cube : &Cube<C::Type,N>) -> bool {
+        todo!("Not implemented, but not used at the moment")
+    }*/
+}
+
+#[replace_float_literals(C::Type::cast_from(literal))]
+impl<'a, C : Constant, const N : usize> GlobalAnalysis<C::Type, Bounds<C::Type>>
+for Mollifier<C, N> {
+    #[inline]
+    fn global_analysis(&self) -> Bounds<C::Type> {
+        // The function is maximised/minimised where the 2-norm is minimised/maximised.
+        Bounds(0.0, 1.0)
+    }
+}
+
+impl<'a, C : Constant, const N : usize> LocalAnalysis<C::Type, Bounds<C::Type>, N>
+for Mollifier<C, N> {
+    #[inline]
+    fn local_analysis(&self, cube : &Cube<C::Type, N>) -> Bounds<C::Type> {
+        // The function is maximised/minimised where the 2-norm is minimised/maximised.
+        let lower = self.apply(cube.maxnorm_point());
+        let upper = self.apply(cube.minnorm_point());
+        Bounds(lower, upper)
+    }
+}
+
+/// Calculate integral of the standard mollifier of width 1 in $ℝ^n$.
+///
+/// This is based on the formula from
+/// [https://math.stackexchange.com/questions/4359683/integral-of-the-usual-mollifier-function-finding-its-necessary-constant]().
+///
+/// If `rescaled` is `true`, return the integral of the scaled mollifier that has value one at the
+/// origin.
+#[inline]
+pub fn mollifier_norm1(n_ : usize, rescaled : bool) -> f64 {
+    assert!(n_ > 0);
+    let n = n_ as f64;
+    let q = 2.0;
+    let p = 2.0;
+    let base = (2.0*gamma(1.0 + 1.0/p)).powi(n_ as i32)
+               /*/ gamma(1.0 + n / p)
+               * gamma(1.0 + n / q)*/
+               * hyperg_U(1.0 + n / q, 2.0, 1.0);
+    if rescaled { base } else { base / f64::E }
+}
+
+impl<'a, C : Constant, const N : usize> Norm<C::Type, L1>
+for Mollifier<C, N> {
+    #[inline]
+    fn norm(&self, _ : L1) -> C::Type {
+        let ε = self.width.value();
+        C::Type::cast_from(mollifier_norm1(N, true)) * ε.powi(N as i32)
+    }
+}
+
+#[replace_float_literals(C::Type::cast_from(literal))]
+impl<'a, C : Constant, const N : usize> Norm<C::Type, Linfinity>
+for Mollifier<C, N> {
+    #[inline]
+    fn norm(&self, _ : Linfinity) -> C::Type {
+        1.0
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/main.rs	Thu Dec 01 23:07:35 2022 +0200
@@ -0,0 +1,233 @@
+// The main documentation is in the README.
+#![doc = include_str!("../README.md")]
+
+// We use unicode. We would like to use much more of it than Rust allows.
+// Live with it. Embrace it.
+#![allow(uncommon_codepoints)]
+#![allow(mixed_script_confusables)]
+#![allow(confusable_idents)]
+// Linear operators may be writtten e.g. as `opA` for a resemblance
+// to mathematical convention.
+#![allow(non_snake_case)]
+// We need the drain filter for inertial prune
+#![feature(drain_filter)]
+
+use clap::Parser;
+use itertools::Itertools;
+use serde_json;
+use alg_tools::iterate::Verbose;
+use alg_tools::parallelism::{
+    set_num_threads,
+    set_max_threads,
+};
+use std::num::NonZeroUsize;
+
+pub mod types;
+pub mod measures;
+pub mod fourier;
+pub mod kernels;
+pub mod seminorms;
+pub mod forward_model;
+pub mod plot;
+pub mod subproblem;
+pub mod tolerance;
+pub mod fb;
+pub mod frank_wolfe;
+pub mod pdps;
+pub mod run;
+pub mod rand_distr;
+pub mod experiments;
+
+use types::{float, ClapFloat};
+use run::{
+    DefaultAlgorithm,
+    Configuration,
+    PlotLevel,
+    Named,
+    AlgorithmConfig,
+};
+use experiments::DefaultExperiment;
+use measures::merging::SpikeMergingMethod;
+use DefaultExperiment::*;
+use DefaultAlgorithm::*;
+
+/// Command line parameters
+#[derive(Parser, Debug)]
+#[clap(
+    about = env!("CARGO_PKG_DESCRIPTION"),
+    author = env!("CARGO_PKG_AUTHORS"),
+    version = env!("CARGO_PKG_VERSION"),
+    after_help = "Pass --help for longer descriptions.",
+    after_long_help = "",
+)]
+pub struct CommandLineArgs {
+    #[arg(long, short = 'm', value_name = "M")]
+    /// Maximum iteration count
+    max_iter : Option<usize>,
+
+    #[arg(long, short = 'n', value_name = "N")]
+    /// Output status every N iterations. Set to 0 to disable.
+    verbose_iter : Option<usize>,
+
+    #[arg(long, short = 'q')]
+    /// Don't display iteration progress
+    quiet : bool,
+
+    /// List of experiments to perform.
+    #[arg(value_enum, value_name = "EXPERIMENT",
+           default_values_t = [Experiment1D, Experiment1DFast,
+                               Experiment2D, Experiment2DFast,
+                               Experiment1D_L1])]
+    experiments : Vec<DefaultExperiment>,
+
+    /// Default algorithm configration(s) to use on the experiments.
+    ///
+    /// Not all algorithms are available for all the experiments.
+    /// In particular, only PDPS is available for the experiments with L¹ data term.
+    #[arg(value_enum, value_name = "ALGORITHM", long, short = 'a',
+           default_values_t = [FB, FISTA, PDPS, FW, FWRelax])]
+    algorithm : Vec<DefaultAlgorithm>,
+
+    /// Saved algorithm configration(s) to use on the experiments
+    #[arg(value_name = "JSON_FILE", long)]
+    saved_algorithm : Vec<String>,
+
+    /// Write plots for every verbose iteration
+    #[arg(value_enum, long, short = 'p', default_value_t = PlotLevel::Data)]
+    plot : PlotLevel,
+
+    /// Directory for saving results
+    #[arg(long, short = 'o', default_value = "out")]
+    outdir : String,
+
+    #[arg(long, help_heading = "Multi-threading", default_value = "4")]
+    /// Maximum number of threads
+    max_threads : usize,
+
+    #[arg(long, help_heading = "Multi-threading")]
+    /// Number of threads. Overrides the maximum number.
+    num_threads : Option<usize>,
+
+    #[clap(flatten, next_help_heading = "Experiment overrides")]
+    /// Experiment setup overrides
+    experiment_overrides : ExperimentOverrides<float>,
+
+    #[clap(flatten, next_help_heading = "Algorithm overrides")]
+    /// Algorithm parametrisation overrides
+    algoritm_overrides : AlgorithmOverrides<float>,
+}
+
+/// Command line experiment setup overrides
+#[derive(Parser, Debug)]
+pub struct ExperimentOverrides<F : ClapFloat> {
+    #[arg(long)]
+    /// Regularisation parameter override.
+    ///
+    /// Only use if running just a single experiment, as different experiments have different
+    /// regularisation parameters.
+    alpha : Option<F>,
+
+    #[arg(long)]
+    /// Gaussian noise variance override
+    variance : Option<F>,
+
+    #[arg(long, value_names = &["MAGNITUDE", "PROBABILITY"])]
+    /// Salt and pepper noise override.
+    salt_and_pepper : Option<Vec<F>>,
+
+    #[arg(long)]
+    /// Noise seed
+    noise_seed : Option<u64>,
+}
+
+/// Command line algorithm parametrisation overrides
+#[derive(Parser, Debug)]
+pub struct AlgorithmOverrides<F : ClapFloat> {
+    #[arg(long, value_names = &["COUNT", "EACH"])]
+    /// Override bootstrap insertion iterations for --algorithm.
+    ///
+    /// The first parameter is the number of bootstrap insertion iterations, and the second
+    /// the maximum number of iterations on each of them.
+    bootstrap_insertions : Option<Vec<usize>>,
+
+    #[arg(long, requires = "algorithm")]
+    /// Primal step length parameter override for --algorithm.
+    ///
+    /// Only use if running just a single algorithm, as different algorithms have different
+    /// regularisation parameters. Does not affect the algorithms fw and fwrelax.
+    tau0 : Option<F>,
+
+    #[arg(long, requires = "algorithm")]
+    /// Dual step length parameter override for --algorithm.
+    ///
+    /// Only use if running just a single algorithm, as different algorithms have different
+    /// regularisation parameters. Only affects PDPS.
+    sigma0 : Option<F>,
+
+    #[arg(value_enum, long)]
+    /// PDPS acceleration, when available.
+    acceleration : Option<pdps::Acceleration>,
+
+    #[arg(long)]
+    /// Perform postprocess weight optimisation for saved iterations
+    ///
+    /// Only affects FB, FISTA, and PDPS.
+    postprocessing : Option<bool>,
+
+    #[arg(value_name = "n", long)]
+    /// Merging frequency, if merging enabled (every n iterations)
+    ///
+    /// Only affects FB, FISTA, and PDPS.
+    merge_every : Option<usize>,
+
+    #[arg(value_enum, long)]//, value_parser = SpikeMergingMethod::<float>::value_parser())]
+    /// Merging strategy
+    ///
+    /// Either the string "none", or a radius value for heuristic merging.
+    merging : Option<SpikeMergingMethod<F>>,
+
+    #[arg(value_enum, long)]//, value_parser = SpikeMergingMethod::<float>::value_parser())]
+    /// Final merging strategy
+    ///
+    /// Either the string "none", or a radius value for heuristic merging.
+    /// Only affects FB, FISTA, and PDPS.
+    final_merging : Option<SpikeMergingMethod<F>>,
+}
+
+/// The entry point for the program.
+pub fn main() {
+    let cli = CommandLineArgs::parse();
+
+    if let Some(n_threads) = cli.num_threads {
+        let n = NonZeroUsize::new(n_threads).expect("Invalid thread count");
+        set_num_threads(n);
+    } else {
+        let m = NonZeroUsize::new(cli.max_threads).expect("Invalid maximum thread count");
+        set_max_threads(m);
+    }
+
+    for experiment_shorthand in cli.experiments.iter().unique() {
+        let experiment = experiment_shorthand.get_experiment(&cli.experiment_overrides).unwrap();
+        let mut config : Configuration<float> = experiment.default_config();
+        let mut algs : Vec<Named<AlgorithmConfig<float>>>
+            = cli.algorithm.iter()
+                            .map(|alg| experiment.algorithm_defaults(*alg, &cli.algoritm_overrides))
+                            .collect();
+        for filename in cli.saved_algorithm.iter() {
+            let f = std::fs::File::open(filename).unwrap();
+            let alg = serde_json::from_reader(f).unwrap();
+            algs.push(alg);
+        }
+        cli.max_iter.map(|m| config.iterator_options.max_iter = m);
+        cli.verbose_iter.map(|n| config.iterator_options.verbose_iter = Verbose::Every(n));
+        config.plot = cli.plot;
+        config.iterator_options.quiet = cli.quiet;
+        config.outdir = cli.outdir.clone();
+        if !algs.is_empty() {
+            config.algorithms = algs.clone();
+        }
+
+        experiment.runall(config)
+                  .unwrap()
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/measures.rs	Thu Dec 01 23:07:35 2022 +0200
@@ -0,0 +1,9 @@
+//! This module implementes measures, in particular [`DeltaMeasure`]s and [`DiscreteMeasure`]s.
+
+mod base;
+pub use base::*;
+mod delta;
+pub use delta::*;
+mod discrete;
+pub use discrete::*;
+pub mod merging;
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/measures/base.rs	Thu Dec 01 23:07:35 2022 +0200
@@ -0,0 +1,18 @@
+//! Basic definitions for measures
+
+use serde::Serialize;
+use alg_tools::types::Num;
+use alg_tools::norms::{Norm, NormExponent};
+
+/// This is used with [`Norm::norm`] to indicate that a Radon norm is to be computed.
+#[derive(Copy,Clone,Serialize,Debug)]
+pub struct Radon;
+impl NormExponent for Radon {}
+
+/// A trait for (Radon) measures.
+///
+/// Currently has no methods, just the requirement that the Radon norm be implemented.
+pub trait Measure<F : Num> : Norm<F, Radon> {
+    type Domain;
+}
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/measures/delta.rs	Thu Dec 01 23:07:35 2022 +0200
@@ -0,0 +1,291 @@
+/*!
+This module implementes delta measures, i.e., single spikes $\alpha \delta_x$ for some
+location $x$ and mass $\alpha$.
+*/
+
+use super::base::*;
+use crate::types::*;
+use std::ops::{Div, Mul, DivAssign, MulAssign, Neg};
+use serde::ser::{Serialize, Serializer, SerializeStruct};
+use alg_tools::norms::{Norm, Dist};
+use alg_tools::linops::{Apply, Linear};
+
+/// Representation of a delta measure.
+///
+/// This is a single spike $\alpha \delta\_x$ for some location $x$ in `Domain` and
+/// a mass $\alpha$ in `F`.
+#[derive(Clone,Copy,Debug)]
+pub struct DeltaMeasure<Domain, F : Num> {
+    // This causes [`csv`] to crash.
+    //#[serde(flatten)]
+    /// Location of the spike
+    pub x : Domain,
+    /// Mass of the spike
+    pub α : F
+}
+
+const COORDINATE_NAMES : &'static [&'static str] = &[
+    "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7"
+];
+
+// Need to manually implement serialisation as [`csv`] writer fails on
+// structs with nested arrays as well as with #[serde(flatten)].
+impl<F : Num, const N : usize> Serialize for DeltaMeasure<Loc<F, N>, F>
+where
+    F: Serialize,
+{
+    fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
+    where
+        S: Serializer,
+    {
+        assert!(N <= COORDINATE_NAMES.len());
+
+        let mut s = serializer.serialize_struct("DeltaMeasure", N+1)?;
+        for (i, e) in (0..).zip(self.x.iter()) {
+            s.serialize_field(COORDINATE_NAMES[i], e)?;
+        }
+        s.serialize_field("weight", &self.α)?;
+        s.end()
+    }
+}
+
+
+impl<Domain : PartialEq, F : Float> Measure<F> for DeltaMeasure<Domain, F> {
+    type Domain = Domain;
+}
+
+impl<Domain : PartialEq, F : Float> Norm<F, Radon> for DeltaMeasure<Domain, F> {
+    #[inline]
+    fn norm(&self, _ : Radon) -> F {
+        self.α.abs()
+    }
+}
+
+impl<Domain : PartialEq, F : Float> Dist<F, Radon> for DeltaMeasure<Domain, F> {
+    #[inline]
+    fn dist(&self, other : &Self, _ : Radon) -> F {
+        if self.x == other. x {
+            (self.α - other.α).abs()
+        } else {
+            self.α.abs() + other.α.abs()
+        }
+    }
+}
+
+impl<'b, Domain, G, F : Num, V : Mul<F, Output=V>> Apply<G> for DeltaMeasure<Domain, F>
+where G: for<'a> Apply<&'a Domain, Output = V>,
+      V : Mul<F> {
+    type Output = V;
+
+    #[inline]
+    fn apply(&self, g : G) -> Self::Output {
+        g.apply(&self.x) * self.α
+    }
+}
+
+impl<Domain, G, F : Num, V : Mul<F, Output=V>> Linear<G> for DeltaMeasure<Domain, F>
+where G: for<'a> Apply<&'a Domain, Output = V> {
+    type Codomain = V;
+}
+
+// /// Partial blanket implementation of [`DeltaMeasure`] as a linear functional of [`Mapping`]s.
+// /// A full blanket implementation is not possible due to annoying Rust limitations: only [`Apply`]
+// /// on a reference is implemented, but a consuming [`Apply`] has to be implemented on a case-by-case
+// /// basis, not because an implementation could not be written, but because the Rust trait system
+// /// chokes up.
+// impl<Domain, G, F : Num, V> Linear<G> for DeltaMeasure<Domain, F>
+// where G: for<'a> Apply<&'a Domain, Output = V>,
+//       V : Mul<F>,
+//       Self: Apply<G, Output =  <V as Mul<F>>::Output> {
+//     type Codomain = <V as Mul<F>>::Output;
+// }
+
+// impl<'b, Domain, G, F : Num, V> Apply<&'b G> for DeltaMeasure<Domain, F>
+// where G: for<'a> Apply<&'a Domain, Output = V>,
+//       V : Mul<F> {
+//     type Output = <V as Mul<F>>::Output;
+
+//     #[inline]
+//     fn apply(&self, g : &'b G) -> Self::Output {
+//         g.apply(&self.x) * self.α
+//     }
+// }
+
+// /// Implementation of the necessary apply for BTFNs
+// mod btfn_apply {
+//     use super::*;
+//     use alg_tools::bisection_tree::{BTFN, BTImpl, SupportGenerator, LocalAnalysis};
+
+//     impl<F : Float, BT, G, V, const N : usize> Apply<BTFN<F, G, BT, N>>
+//     for DeltaMeasure<Loc<F, N>, F>
+//     where BT : BTImpl<F, N>,
+//         G : SupportGenerator<F, N, Id=BT::Data>,
+//         G::SupportType : LocalAnalysis<F, BT::Agg, N> + for<'a> Apply<&'a Loc<F, N>, Output = V>,
+//         V : std::iter::Sum + Mul<F> {
+        
+//         type Output = <V as Mul<F>>::Output;
+
+//         #[inline]
+//         fn apply(&self, g : BTFN<F, G, BT, N>) -> Self::Output {
+//             g.apply(&self.x) * self.α
+//         }
+//     }
+// }
+
+
+impl<D, Domain, F : Num> From<(D, F)> for DeltaMeasure<Domain, F>
+where D : Into<Domain> {
+    #[inline]
+    fn from((x, α) : (D, F)) -> Self {
+        DeltaMeasure{x: x.into(), α: α}
+    }
+}
+
+/*impl<F : Num> From<(F, F)> for DeltaMeasure<Loc<F, 1>, F> {
+    #[inline]
+    fn from((x, α) : (F, F)) -> Self {
+        DeltaMeasure{x: Loc([x]), α: α}
+    }
+}*/
+
+impl<Domain, F : Num> DeltaMeasure<Domain, F> {
+    /// Set the mass of the spike.
+    #[inline]
+    pub fn set_mass(&mut self, α : F) {
+        self.α = α
+    }
+
+    /// Set the location of the spike.
+    #[inline]
+    pub fn set_location(&mut self, x : Domain) {
+        self.x = x
+    }
+
+    /// Get the mass of the spike.
+    #[inline]
+    pub fn get_mass(&self) -> F {
+        self.α
+    }
+
+    /// Get a mutable reference to the mass of the spike.
+    #[inline]
+    pub fn get_mass_mut(&mut self) -> &mut F {
+        &mut self.α
+    }
+
+    /// Get a reference to the location of the spike.
+    #[inline]
+    pub fn get_location(&self) -> &Domain {
+        &self.x
+    }
+
+    /// Get a mutable reference to the location of the spike.
+    #[inline]
+    pub fn get_location_mut(&mut self) -> &mut Domain {
+        &mut self.x
+    }
+}
+
+
+macro_rules! make_delta_scalarop_rhs {
+    ($trait:ident, $fn:ident, $trait_assign:ident, $fn_assign:ident) => {
+        impl<F : Num, Domain> $trait<F> for DeltaMeasure<Domain, F> {
+            type Output = Self;
+            fn $fn(mut self, b : F) -> Self {
+                self.α.$fn_assign(b);
+                self
+            }
+        }
+
+        impl<'a, F : Num, Domain> $trait<&'a F> for DeltaMeasure<Domain, F> {
+            type Output = Self;
+            fn $fn(mut self, b : &'a F) -> Self {
+                self.α.$fn_assign(*b);
+                self
+            }
+        }
+
+        impl<'b, F : Num, Domain : Clone> $trait<F> for &'b DeltaMeasure<Domain, F> {
+            type Output = DeltaMeasure<Domain, F>;
+            fn $fn(self, b : F) -> Self::Output {
+                DeltaMeasure { α : self.α.$fn(b), x : self.x.clone() }
+            }
+        }
+
+        impl<'a, 'b, F : Num, Domain : Clone> $trait<&'a F> for &'b DeltaMeasure<Domain, F> {
+            type Output = DeltaMeasure<Domain, F>;
+            fn $fn(self, b : &'a F) -> Self::Output {
+                DeltaMeasure { α : self.α.$fn(*b), x : self.x.clone() }
+            }
+        }
+
+        impl<F : Num, Domain> $trait_assign<F> for DeltaMeasure<Domain, F> {
+            fn $fn_assign(&mut self, b : F) {
+                self.α.$fn_assign(b)
+            }
+        }
+
+        impl<'a, F : Num, Domain> $trait_assign<&'a F> for DeltaMeasure<Domain, F> {
+            fn $fn_assign(&mut self, b : &'a F) {
+                self.α.$fn_assign(*b)
+            }
+        }
+    }
+}
+
+make_delta_scalarop_rhs!(Mul, mul, MulAssign, mul_assign);
+make_delta_scalarop_rhs!(Div, div, DivAssign, div_assign);
+
+macro_rules! make_delta_scalarop_lhs {
+    ($trait:ident, $fn:ident; $($f:ident)+) => { $(
+        impl<Domain> $trait<DeltaMeasure<Domain, $f>> for $f {
+            type Output = DeltaMeasure<Domain, $f>;
+            fn $fn(self, mut δ : DeltaMeasure<Domain, $f>) -> Self::Output {
+                δ.α = self.$fn(δ.α);
+                δ
+            }
+        }
+
+        impl<'a, Domain : Clone> $trait<&'a DeltaMeasure<Domain, $f>> for $f {
+            type Output = DeltaMeasure<Domain, $f>;
+            fn $fn(self, δ : &'a DeltaMeasure<Domain, $f>) -> Self::Output {
+                DeltaMeasure{ x : δ.x.clone(), α : self.$fn(δ.α) }
+            }
+        }
+
+        impl<'b, Domain> $trait<DeltaMeasure<Domain, $f>> for &'b $f {
+            type Output = DeltaMeasure<Domain, $f>;
+            fn $fn(self, mut δ : DeltaMeasure<Domain, $f>) -> Self::Output {
+                δ.α = self.$fn(δ.α);
+                δ
+            }
+        }
+
+        impl<'a, 'b, Domain : Clone> $trait<&'a DeltaMeasure<Domain, $f>> for &'b $f {
+            type Output = DeltaMeasure<Domain, $f>;
+            fn $fn(self, δ : &'a DeltaMeasure<Domain, $f>) -> Self::Output {
+                DeltaMeasure{ x : δ.x.clone(), α : self.$fn(δ.α) }
+            }
+        }
+    )+ }
+}
+
+make_delta_scalarop_lhs!(Mul, mul; f32 f64 i8 i16 i32 i64 isize u8 u16 u32 u64 usize);
+make_delta_scalarop_lhs!(Div, div; f32 f64 i8 i16 i32 i64 isize u8 u16 u32 u64 usize);
+
+macro_rules! make_delta_unary {
+    ($trait:ident, $fn:ident, $type:ty) => {
+        impl<'a, F : Num + Neg<Output=F>, Domain : Clone> Neg for $type {
+            type Output = DeltaMeasure<Domain, F>;
+            fn $fn(self) -> Self::Output {
+                let mut tmp = self.clone();
+                tmp.α = tmp.α.$fn();
+                tmp
+            }
+        }
+    }
+}
+
+make_delta_unary!(Neg, neg, DeltaMeasure<Domain, F>);
+make_delta_unary!(Neg, neg, &'a DeltaMeasure<Domain, F>);
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/measures/discrete.rs	Thu Dec 01 23:07:35 2022 +0200
@@ -0,0 +1,576 @@
+//! This module implementes discrete measures.
+
+use std::ops::{
+    Div,Mul,DivAssign,MulAssign,Neg,
+    Add,Sub,AddAssign,SubAssign,
+    Index,IndexMut,
+};
+use std::iter::Sum;
+use serde::ser::{Serializer, Serialize, SerializeSeq};
+use nalgebra::DVector;
+
+use alg_tools::norms::Norm;
+use alg_tools::tabledump::TableDump;
+use alg_tools::linops::{Apply, Linear};
+use alg_tools::iter::{MapF,Mappable};
+use alg_tools::nalgebra_support::ToNalgebraRealField;
+
+use crate::types::*;
+use super::base::*;
+use super::delta::*;
+
+/// Representation of a discrete measure.
+///
+/// This is the measure $μ = ∑_{k=1}^n α_k δ_{x_k}$, consisting of several
+/// [`DeltaMeasure`], i.e., “spikes” $α_k δ_{x_k}$ with weights $\alpha_k$ in `F` at locations
+/// $x_k$ in `Domain`.
+#[derive(Clone,Debug)]
+pub struct DiscreteMeasure<Domain, F : Num> {
+    pub(super) spikes : Vec<DeltaMeasure<Domain, F>>,
+}
+
+/// Iterator over the [`DeltaMeasure`] spikes of a [`DiscreteMeasure`].
+pub type SpikeIter<'a, Domain, F> = std::slice::Iter<'a, DeltaMeasure<Domain, F>>;
+
+/// Iterator over mutable [`DeltaMeasure`] spikes of a [`DiscreteMeasure`].
+pub type SpikeIterMut<'a, Domain, F> = std::slice::IterMut<'a, DeltaMeasure<Domain, F>>;
+
+/// Iterator over the locations of the spikes of a [`DiscreteMeasure`].
+pub type LocationIter<'a, Domain, F>
+    = std::iter::Map<SpikeIter<'a, Domain, F>, fn(&'a DeltaMeasure<Domain, F>) -> &'a Domain>;
+
+/// Iterator over the masses of the spikes of a [`DiscreteMeasure`].
+pub type MassIter<'a, Domain, F>
+    = std::iter::Map<SpikeIter<'a, Domain, F>, fn(&'a DeltaMeasure<Domain, F>) -> F>;
+
+/// Iterator over the mutable locations of the spikes of a [`DiscreteMeasure`].
+pub type MassIterMut<'a, Domain, F>
+    = std::iter::Map<SpikeIterMut<'a, Domain, F>, for<'r> fn(&'r mut DeltaMeasure<Domain, F>) -> &'r mut F>;
+
+impl<Domain, F : Num> DiscreteMeasure<Domain, F> {
+    /// Create a new zero measure (empty spike set).
+    pub fn new() -> Self {
+        DiscreteMeasure{ spikes : Vec::new() }
+    }
+
+    /// Number of [`DeltaMeasure`] spikes in the measure
+    #[inline]
+    pub fn len(&self) -> usize {
+        self.spikes.len()
+    }
+
+    /// Iterate over (references to) the [`DeltaMeasure`] spikes in this measure
+    #[inline]
+    pub fn iter_spikes(&self) -> SpikeIter<'_, Domain, F> {
+        self.spikes.iter()
+    }
+
+    /// Iterate over mutable references to the [`DeltaMeasure`] spikes in this measure
+    #[inline]
+    pub fn iter_spikes_mut(&mut self) -> SpikeIterMut<'_, Domain, F> {
+        self.spikes.iter_mut()
+    }
+
+    /// Iterate over the location of the spikes in this measure
+    #[inline]
+    pub fn iter_locations(&self) -> LocationIter<'_, Domain, F> {
+        self.iter_spikes().map(DeltaMeasure::get_location)
+    }
+
+    /// Iterate over the masses of the spikes in this measure
+    #[inline]
+    pub fn iter_masses(&self) -> MassIter<'_, Domain, F> {
+        self.iter_spikes().map(DeltaMeasure::get_mass)
+    }
+
+    /// Iterate over the masses of the spikes in this measure
+    #[inline]
+    pub fn iter_masses_mut(&mut self) -> MassIterMut<'_, Domain, F> {
+        self.iter_spikes_mut().map(DeltaMeasure::get_mass_mut)
+    }
+
+    /// Update the masses of all the spikes to those produced by an iterator.
+    #[inline]
+    pub fn set_masses<I : Iterator<Item=F>>(&mut self, iter : I) {
+        self.spikes.iter_mut().zip(iter).for_each(|(δ, α)| δ.set_mass(α));
+    }
+
+    // /// Map the masses of all the spikes using a function and an iterator
+    // #[inline]
+    // pub fn zipmap_masses<
+    //     I : Iterator<Item=F>,
+    //     G : Fn(F, I::Item) -> F
+    // > (&mut self, iter : I, g : G) {
+    //     self.spikes.iter_mut().zip(iter).for_each(|(δ, v)| δ.set_mass(g(δ.get_mass(), v)));
+    // }
+
+    /// Prune all spikes with zero mass.
+    #[inline]
+    pub fn prune(&mut self) {
+        self.spikes.retain(|δ| δ.α != F::ZERO);
+    }
+}
+
+impl<Domain : Clone, F : Float> DiscreteMeasure<Domain, F> {
+    /// Computes `μ1 ← θ * μ1 - ζ * μ2`, pruning entries where both `μ1` (`self`) and `μ2` have
+    // zero weight. `μ2` will contain copy of pruned original `μ1` without arithmetic performed.
+    /// **This expects `self` and `μ2` to have matching coordinates in each index**.
+    // `μ2` can be than `self`, but not longer.
+    pub fn pruning_sub(&mut self, θ : F, ζ : F, μ2 : &mut Self) {
+        let mut μ2_get = 0;
+        let mut μ2_insert = 0;
+        self.spikes.drain_filter(|&mut DeltaMeasure{ α : ref mut α_ref, ref x }| {
+            // Get weight of spike in μ2, zero if out of bounds.
+            let β = μ2.spikes.get(μ2_get).map_or(F::ZERO, DeltaMeasure::get_mass);
+            μ2_get += 1;
+
+            if *α_ref == F::ZERO && β == F::ZERO {
+                // Prune
+                true
+            } else {
+                // Save self weight
+                let α = *α_ref;
+                // Modify self
+                *α_ref = θ * α - ζ * β;
+                // Make copy of old self weight in μ2
+                let δ = DeltaMeasure{ α, x : x.clone() };
+                match μ2.spikes.get_mut(μ2_insert) {
+                    Some(replace) => {
+                        *replace = δ;
+                    },
+                    None => {
+                        debug_assert_eq!(μ2.len(), μ2_insert);
+                        μ2.spikes.push(δ);
+                    },
+                }
+                μ2_insert += 1;
+                // Keep
+                false
+            }
+        });
+        // Truncate μ2 to same length as self.
+        μ2.spikes.truncate(μ2_insert);
+        debug_assert_eq!(μ2.len(), self.len());
+    }
+}
+
+impl<Domain, F : Float> DiscreteMeasure<Domain, F> {
+    /// Prune all spikes with mass absolute value less than the given `tolerance`.
+    #[inline]
+    pub fn prune_approx(&mut self, tolerance : F) {
+        self.spikes.retain(|δ| δ.α.abs() > tolerance);
+    }
+}
+
+impl<Domain, F : Float + ToNalgebraRealField> DiscreteMeasure<Domain, F> {
+    /// Extracts the masses of the spikes as a [`DVector`].
+    pub fn masses_dvector(&self) -> DVector<F::MixedType> {
+        DVector::from_iterator(self.len(),
+                               self.iter_masses()
+                                   .map(|α| α.to_nalgebra_mixed()))
+    }
+
+    /// Sets the masses of the spikes from the values of a [`DVector`].
+    pub fn set_masses_dvector(&mut self, x : &DVector<F::MixedType>) {
+        self.set_masses(x.iter().map(|&α| F::from_nalgebra_mixed(α)));
+    }
+}
+
+impl<Domain, F :Num> Index<usize> for DiscreteMeasure<Domain, F> {
+    type Output = DeltaMeasure<Domain, F>;
+    #[inline]
+    fn index(&self, i : usize) -> &Self::Output {
+        self.spikes.index(i)
+    }
+}
+
+impl<Domain, F :Num> IndexMut<usize> for DiscreteMeasure<Domain, F> {
+    #[inline]
+    fn index_mut(&mut self, i : usize) -> &mut Self::Output {
+        self.spikes.index_mut(i)
+    }
+}
+
+impl<Domain, F : Num, D : Into<DeltaMeasure<Domain, F>>, const K : usize> From<[D; K]>
+for DiscreteMeasure<Domain, F> {
+    #[inline]
+    fn from(list : [D; K]) -> Self {
+        list.into_iter().collect()
+    }
+}
+
+impl<Domain, F : Num, D : Into<DeltaMeasure<Domain, F>>> FromIterator<D>
+for DiscreteMeasure<Domain, F> {
+    #[inline]
+    fn from_iter<T>(iter : T) -> Self
+    where T : IntoIterator<Item=D> {
+        DiscreteMeasure{
+            spikes : iter.into_iter().map(|m| m.into()).collect()
+        }
+    }
+}
+
+impl<'a, F : Num, const N : usize> TableDump<'a>
+for DiscreteMeasure<Loc<F, N>,F>
+where DeltaMeasure<Loc<F, N>, F> : Serialize + 'a {
+    type Iter = std::slice::Iter<'a, DeltaMeasure<Loc<F, N>, F>>;
+
+    // fn tabledump_headers(&'a self) -> Vec<String> {
+    //     let mut v : Vec<String> = (0..N).map(|i| format!("x{}", i)).collect();
+    //     v.push("weight".into());
+    //     v
+    // }
+
+    fn tabledump_entries(&'a self) -> Self::Iter {
+        // Ensure order matching the headers above
+        self.spikes.iter()
+    }
+}
+
+// Need to manually implement serialisation for DeltaMeasure<Loc<F, N>, F> [`csv`] writer fails on
+// structs with nested arrays as well as with #[serde(flatten)].
+// Then derive no longer works for DiscreteMeasure
+impl<F : Num, const N : usize> Serialize for DiscreteMeasure<Loc<F, N>, F>
+where
+    F: Serialize,
+{
+    fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
+    where
+        S: Serializer,
+    {
+        let mut s = serializer.serialize_seq(Some(self.spikes.len()))?;
+        for δ in self.spikes.iter() {
+            s.serialize_element(δ)?;
+        }
+        s.end()
+    }
+}
+
+impl<Domain : PartialEq, F : Float> Measure<F> for DiscreteMeasure<Domain, F> {
+    type Domain = Domain;
+}
+
+impl<Domain : PartialEq, F : Float> Norm<F, Radon> for DiscreteMeasure<Domain, F>
+where DeltaMeasure<Domain, F> : Norm<F, Radon> {
+    #[inline]
+    fn norm(&self, _ : Radon) -> F {
+        self.spikes.iter().map(|m| m.norm(Radon)).sum()
+    }
+}
+
+impl<Domain, G, F : Num, Y : Sum + Mul<F, Output=Y>> Apply<G> for DiscreteMeasure<Domain, F>
+where G: for<'a> Apply<&'a Domain, Output = Y> {
+    type Output = Y;
+    #[inline]
+    fn apply(&self, g : G) -> Y {
+        self.spikes.iter().map(|m| g.apply(&m.x) * m.α).sum()
+    }
+}
+
+impl<Domain, G, F : Num, Y : Sum + Mul<F, Output=Y>> Linear<G> for DiscreteMeasure<Domain, F>
+where G : for<'a> Apply<&'a Domain, Output = Y> {
+    type Codomain = Y;
+}
+
+
+/// Helper trait for constructing arithmetic operations for combinations
+/// of [`DiscreteMeasure`] and [`DeltaMeasure`], and their references.
+trait Lift<F : Num, Domain> {
+    type Producer : Iterator<Item=DeltaMeasure<Domain, F>>;
+
+    /// Lifts `self` into a [`DiscreteMeasure`].
+    fn lift(self) -> DiscreteMeasure<Domain, F>;
+
+    /// Lifts `self` into a [`DiscreteMeasure`], apply either `f` or `f_mut` whether the type
+    /// this method is implemented for is a reference or or not.
+    fn lift_with(self,
+                 f : impl Fn(&DeltaMeasure<Domain, F>) -> DeltaMeasure<Domain, F>,
+                 f_mut : impl FnMut(&mut DeltaMeasure<Domain, F>))
+                 -> DiscreteMeasure<Domain, F>;
+
+    /// Extend `self` into a [`DiscreteMeasure`] with the spikes produced by `iter`.
+    fn lift_extend<I : Iterator<Item=DeltaMeasure<Domain, F>>>(
+        self,
+        iter : I
+    ) -> DiscreteMeasure<Domain, F>;
+
+    /// Returns an iterator for producing copies of the spikes of `self`.
+    fn produce(self) -> Self::Producer;
+}
+
+impl<F : Num, Domain> Lift<F, Domain> for DiscreteMeasure<Domain, F> {
+    type Producer = std::vec::IntoIter<DeltaMeasure<Domain, F>>;
+
+    #[inline]
+    fn lift(self) -> DiscreteMeasure<Domain, F> { self }
+
+    fn lift_with(mut self,
+                 _f : impl Fn(&DeltaMeasure<Domain, F>) -> DeltaMeasure<Domain, F>,
+                 f_mut : impl FnMut(&mut DeltaMeasure<Domain, F>))
+                 -> DiscreteMeasure<Domain, F> {
+        self.spikes.iter_mut().for_each(f_mut);
+        self
+    }
+
+    #[inline]
+    fn lift_extend<I : Iterator<Item=DeltaMeasure<Domain, F>>>(
+        mut self,
+        iter : I
+    ) -> DiscreteMeasure<Domain, F> {
+        self.spikes.extend(iter);
+        self
+    }
+
+    #[inline]
+    fn produce(self) -> Self::Producer {
+        self.spikes.into_iter()
+    }
+}
+
+impl<'a, F : Num, Domain : Clone> Lift<F, Domain> for &'a DiscreteMeasure<Domain, F> {
+    type Producer = MapF<std::slice::Iter<'a, DeltaMeasure<Domain, F>>, DeltaMeasure<Domain, F>>;
+    
+    #[inline]
+    fn lift(self) -> DiscreteMeasure<Domain, F> { self.clone() }
+
+    fn lift_with(self,
+                 f : impl Fn(&DeltaMeasure<Domain, F>) -> DeltaMeasure<Domain, F>,
+                 _f_mut : impl FnMut(&mut DeltaMeasure<Domain, F>))
+                 -> DiscreteMeasure<Domain, F> {
+        DiscreteMeasure{ spikes : self.spikes.iter().map(f).collect() }
+    }
+
+    #[inline]
+    fn lift_extend<I : Iterator<Item=DeltaMeasure<Domain, F>>>(
+        self,
+        iter : I
+    ) -> DiscreteMeasure<Domain, F> {
+        let mut res = self.clone();
+        res.spikes.extend(iter);
+        res
+    }
+
+    #[inline]
+    fn produce(self) -> Self::Producer {
+        // TODO: maybe not optimal to clone here and would benefit from
+        // a reference version of lift_extend.
+        self.spikes.iter().mapF(Clone::clone)
+    }
+}
+
+impl<F : Num, Domain> Lift<F, Domain> for DeltaMeasure<Domain, F> {
+    type Producer = std::iter::Once<DeltaMeasure<Domain, F>>;
+
+    #[inline]
+    fn lift(self) -> DiscreteMeasure<Domain, F> { DiscreteMeasure { spikes : vec![self] } }
+
+    #[inline]
+    fn lift_with(mut self,
+                 _f : impl Fn(&DeltaMeasure<Domain, F>) -> DeltaMeasure<Domain, F>,
+                 mut f_mut : impl FnMut(&mut DeltaMeasure<Domain, F>))
+                 -> DiscreteMeasure<Domain, F> {
+        f_mut(&mut self);
+        DiscreteMeasure{ spikes : vec![self] }
+    }
+
+    #[inline]
+    fn lift_extend<I : Iterator<Item=DeltaMeasure<Domain, F>>>(
+        self,
+        iter : I
+    ) -> DiscreteMeasure<Domain, F> {
+        let mut spikes = vec![self];
+        spikes.extend(iter);
+        DiscreteMeasure{ spikes : spikes }
+    }
+
+    #[inline]
+    fn produce(self) -> Self::Producer {
+        std::iter::once(self)
+    }
+}
+
+impl<'a, F : Num, Domain : Clone> Lift<F, Domain> for &'a DeltaMeasure<Domain, F> {
+    type Producer = std::iter::Once<DeltaMeasure<Domain, F>>;
+
+    #[inline]
+    fn lift(self) -> DiscreteMeasure<Domain, F> { DiscreteMeasure { spikes : vec![self.clone()] } }
+
+    #[inline]
+    fn lift_with(self,
+                 f : impl Fn(&DeltaMeasure<Domain, F>) -> DeltaMeasure<Domain, F>,
+                 _f_mut : impl FnMut(&mut DeltaMeasure<Domain, F>))
+                 -> DiscreteMeasure<Domain, F> {
+        DiscreteMeasure{ spikes : vec![f(self)] }
+    }
+
+    #[inline]
+    fn lift_extend<I : Iterator<Item=DeltaMeasure<Domain, F>>>(
+        self,
+        iter : I
+    ) -> DiscreteMeasure<Domain, F> {
+        let mut spikes = vec![self.clone()];
+        spikes.extend(iter);
+        DiscreteMeasure{ spikes : spikes }
+    }
+
+    #[inline]
+    fn produce(self) -> Self::Producer {
+        std::iter::once(self.clone())
+    }
+}
+
+macro_rules! make_discrete_addsub_assign {
+    ($rhs:ty) => {
+        // Discrete += (&)Discrete
+        impl<'a, F : Num, Domain : Clone> AddAssign<$rhs>
+        for DiscreteMeasure<Domain, F> {
+            fn add_assign(&mut self, other : $rhs) {
+                self.spikes.extend(other.produce());
+            }
+        }
+
+        impl<'a, F : Num + Neg<Output=F>, Domain : Clone> SubAssign<$rhs>
+        for DiscreteMeasure<Domain, F> {
+            fn sub_assign(&mut self, other : $rhs) {
+                self.spikes.extend(other.produce().map(|δ| -δ));
+            }
+        }
+    }
+}
+
+make_discrete_addsub_assign!(DiscreteMeasure<Domain, F>);
+make_discrete_addsub_assign!(&'a DiscreteMeasure<Domain, F>);
+make_discrete_addsub_assign!(DeltaMeasure<Domain, F>);
+make_discrete_addsub_assign!(&'a DeltaMeasure<Domain, F>);
+
+macro_rules! make_discrete_addsub {
+    ($lhs:ty, $rhs:ty, $alt_order:expr) => {
+        impl<'a, 'b, F : Num, Domain : Clone> Add<$rhs> for $lhs {
+            type Output = DiscreteMeasure<Domain, F>;
+            fn add(self, other : $rhs) -> DiscreteMeasure<Domain, F> {
+                if !$alt_order {
+                    self.lift_extend(other.produce())
+                } else {
+                    other.lift_extend(self.produce())
+                }
+            }
+        }
+
+        impl<'a, 'b, F : Num + Neg<Output=F>, Domain : Clone> Sub<$rhs> for $lhs {
+            type Output = DiscreteMeasure<Domain, F>;
+            fn sub(self, other : $rhs) -> DiscreteMeasure<Domain, F> {
+                self.lift_extend(other.produce().map(|δ| -δ))
+            }
+        }
+    };
+}
+
+make_discrete_addsub!(DiscreteMeasure<Domain, F>,     DiscreteMeasure<Domain, F>,     false);
+make_discrete_addsub!(DiscreteMeasure<Domain, F>,     &'b DiscreteMeasure<Domain, F>, false);
+make_discrete_addsub!(&'a DiscreteMeasure<Domain, F>, DiscreteMeasure<Domain, F>,     true);
+make_discrete_addsub!(&'a DiscreteMeasure<Domain, F>, &'b DiscreteMeasure<Domain, F>, false);
+make_discrete_addsub!(DeltaMeasure<Domain, F>,        DiscreteMeasure<Domain, F>,     false);
+make_discrete_addsub!(DeltaMeasure<Domain, F>,        &'b DiscreteMeasure<Domain, F>, false);
+make_discrete_addsub!(&'a DeltaMeasure<Domain, F>,    DiscreteMeasure<Domain, F>,     true);
+make_discrete_addsub!(&'a DeltaMeasure<Domain, F>,    &'b DiscreteMeasure<Domain, F>, false);
+make_discrete_addsub!(DiscreteMeasure<Domain, F>,     DeltaMeasure<Domain, F>,        false);
+make_discrete_addsub!(DiscreteMeasure<Domain, F>,     &'b DeltaMeasure<Domain, F>,    false);
+make_discrete_addsub!(&'a DiscreteMeasure<Domain, F>, DeltaMeasure<Domain, F>,        false);
+make_discrete_addsub!(&'a DiscreteMeasure<Domain, F>, &'b DeltaMeasure<Domain, F>,    false);
+make_discrete_addsub!(DeltaMeasure<Domain, F>,        DeltaMeasure<Domain, F>,        false);
+make_discrete_addsub!(DeltaMeasure<Domain, F>,        &'b DeltaMeasure<Domain, F>,    false);
+make_discrete_addsub!(&'a DeltaMeasure<Domain, F>,    DeltaMeasure<Domain, F>,        false);
+make_discrete_addsub!(&'a DeltaMeasure<Domain, F>,    &'b DeltaMeasure<Domain, F>,    false);
+
+macro_rules! make_discrete_scalarop_rhs {
+    ($trait:ident, $fn:ident, $trait_assign:ident, $fn_assign:ident) => {
+        make_discrete_scalarop_rhs!(@assign DiscreteMeasure<Domain, F>, F, $trait_assign, $fn_assign);
+        make_discrete_scalarop_rhs!(@assign DiscreteMeasure<Domain, F>, &'a F, $trait_assign, $fn_assign);
+        make_discrete_scalarop_rhs!(@new DiscreteMeasure<Domain, F>, F, $trait, $fn, $fn_assign);
+        make_discrete_scalarop_rhs!(@new DiscreteMeasure<Domain, F>, &'a F, $trait, $fn, $fn_assign);
+        make_discrete_scalarop_rhs!(@new &'b DiscreteMeasure<Domain, F>, F, $trait, $fn, $fn_assign);
+        make_discrete_scalarop_rhs!(@new &'b DiscreteMeasure<Domain, F>, &'a F, $trait, $fn, $fn_assign);
+    };
+
+    (@assign $lhs:ty, $rhs:ty, $trait_assign:ident, $fn_assign:ident) => {
+        impl<'a, 'b, F : Num, Domain> $trait_assign<$rhs> for $lhs {
+            fn $fn_assign(&mut self, b : $rhs) {
+                self.spikes.iter_mut().for_each(|δ| δ.$fn_assign(b));
+            }
+        }
+    };
+    (@new $lhs:ty, $rhs:ty, $trait:ident, $fn:ident, $fn_assign:ident) => {
+        impl<'a, 'b, F : Num, Domain : Clone> $trait<$rhs> for $lhs {
+            type Output = DiscreteMeasure<Domain, F>;
+            fn $fn(self, b : $rhs) -> Self::Output {
+                self.lift_with(|δ| δ.$fn(b), |δ| δ.$fn_assign(b))
+            }
+        }
+    };
+}
+
+make_discrete_scalarop_rhs!(Mul, mul, MulAssign, mul_assign);
+make_discrete_scalarop_rhs!(Div, div, DivAssign, div_assign);
+
+macro_rules! make_discrete_unary {
+    ($trait:ident, $fn:ident, $type:ty) => {
+        impl<'a, F : Num + Neg<Output=F>, Domain : Clone> Neg for $type {
+            type Output = DiscreteMeasure<Domain, F>;
+            fn $fn(self) -> Self::Output {
+                self.lift_with(|δ| δ.$fn(), |δ| δ.α = δ.α.$fn())
+            }
+        }
+    }
+}
+
+make_discrete_unary!(Neg, neg, DiscreteMeasure<Domain, F>);
+make_discrete_unary!(Neg, neg, &'a DiscreteMeasure<Domain, F>);
+
+// impl<F : Num, Domain> Neg for DiscreteMeasure<Domain, F> {
+//     type Output = Self;
+//     fn $fn(mut self, b : F) -> Self {
+//         self.lift().spikes.iter_mut().for_each(|δ| δ.neg(b));
+//         self
+//     }
+// }
+
+macro_rules! make_discrete_scalarop_lhs {
+    ($trait:ident, $fn:ident; $($f:ident)+) => { $(
+        impl<Domain> $trait<DiscreteMeasure<Domain, $f>> for $f {
+            type Output = DiscreteMeasure<Domain, $f>;
+            fn $fn(self, mut v : DiscreteMeasure<Domain, $f>) -> Self::Output {
+                v.spikes.iter_mut().for_each(|δ| δ.α = self.$fn(δ.α));
+                v
+            }
+        }
+
+        impl<'a, Domain : Copy> $trait<&'a DiscreteMeasure<Domain, $f>> for $f {
+            type Output = DiscreteMeasure<Domain, $f>;
+            fn $fn(self, v : &'a DiscreteMeasure<Domain, $f>) -> Self::Output {
+                DiscreteMeasure{
+                    spikes : v.spikes.iter().map(|δ| self.$fn(δ)).collect()
+                }
+            }
+        }
+
+        impl<'b, Domain> $trait<DiscreteMeasure<Domain, $f>> for &'b $f {
+            type Output = DiscreteMeasure<Domain, $f>;
+            fn $fn(self, mut v : DiscreteMeasure<Domain, $f>) -> Self::Output {
+                v.spikes.iter_mut().for_each(|δ| δ.α = self.$fn(δ.α));
+                v
+            }
+        }
+
+        impl<'a, 'b, Domain : Copy> $trait<&'a DiscreteMeasure<Domain, $f>> for &'b $f {
+            type Output = DiscreteMeasure<Domain, $f>;
+            fn $fn(self, v : &'a DiscreteMeasure<Domain, $f>) -> Self::Output {
+                DiscreteMeasure{
+                    spikes : v.spikes.iter().map(|δ| self.$fn(δ)).collect()
+                }
+            }
+        }
+    )+ }
+}
+
+make_discrete_scalarop_lhs!(Mul, mul; f32 f64 i8 i16 i32 i64 isize u8 u16 u32 u64 usize);
+make_discrete_scalarop_lhs!(Div, div; f32 f64 i8 i16 i32 i64 isize u8 u16 u32 u64 usize);
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/measures/merging.rs	Thu Dec 01 23:07:35 2022 +0200
@@ -0,0 +1,345 @@
+/*!
+Spike merging heuristics for [`DiscreteMeasure`]s.
+
+This module primarily provides the [`SpikeMerging`] trait, and within it,
+the [`SpikeMerging::merge_spikes`] method. The trait is implemented on
+[`DiscreteMeasure<Loc<F, N>, F>`]s in dimensions `N=1` and `N=2`.
+*/
+
+use numeric_literals::replace_float_literals;
+use std::cmp::Ordering;
+use serde::{Serialize, Deserialize};
+//use clap::builder::{PossibleValuesParser, PossibleValue};
+use alg_tools::nanleast::NaNLeast;
+
+use crate::types::*;
+use super::delta::*;
+use super::discrete::*;
+
+/// Spike merging heuristic selection
+#[derive(Clone, Copy, Eq, PartialEq, Serialize, Deserialize, Debug)]
+#[allow(dead_code)]
+pub enum SpikeMergingMethod<F> {
+    /// Try to merge spikes within a given radius of eachother
+    HeuristicRadius(F),
+    /// No merging
+    None,
+}
+
+// impl<F : Float> SpikeMergingMethod<F> {
+//     /// This is for [`clap`] to display command line help.
+//     pub fn value_parser() -> PossibleValuesParser {
+//         PossibleValuesParser::new([
+//             PossibleValue::new("none").help("No merging"),
+//             PossibleValue::new("<radius>").help("Heuristic merging within indicated radius")
+//         ])
+//     }
+// }
+
+impl<F : ClapFloat> std::fmt::Display for SpikeMergingMethod<F> {
+    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> Result<(), std::fmt::Error> {
+        match self {
+            Self::None => write!(f, "none"),
+            Self::HeuristicRadius(r) => std::fmt::Display::fmt(r, f),
+        }
+    }
+}
+
+impl<F : ClapFloat> std::str::FromStr for SpikeMergingMethod<F> {
+    type Err = F::Err;
+
+    fn from_str(s: &str) -> Result<Self, Self::Err> {
+        if s == "none" {
+            Ok(Self::None)
+        } else {
+            Ok(Self::HeuristicRadius(F::from_str(s)?))
+        }
+    }
+}
+
+#[replace_float_literals(F::cast_from(literal))]
+impl<F : Float> Default for SpikeMergingMethod<F> {
+    fn default() -> Self {
+        SpikeMergingMethod::HeuristicRadius(0.02)
+    }
+}
+
+/// Trait for dimension-dependent implementation of heuristic peak merging strategies.
+pub trait SpikeMerging<F> {
+    /// Attempt spike merging according to [`SpikeMerging`] method.
+    ///
+    /// Returns the last [`Some`] returned by the merging candidate acceptance decision closure
+    /// `accept` if any merging is performed. The closure should accept as its only parameter a
+    /// new candidate measure (it will generally be internally mutated `self`, although this is
+    /// not guaranteed), and return [`None`] if the merge is accepted, and otherwise a [`Some`] of
+    /// an arbitrary value. This method will return that value for the *last* accepted merge, or
+    /// [`None`] if no merge was accepted.
+    ///
+    /// This method is stable with respect to spike locations:  on merge, the weight of existing
+    /// spikes is set to zero, and a new one inserted at the end of the spike vector.
+    fn merge_spikes<G, V>(&mut self, method : SpikeMergingMethod<F>, accept : G) -> Option<V>
+    where G : Fn(&'_ Self) -> Option<V> {
+        match method {
+            SpikeMergingMethod::HeuristicRadius(ρ) => self.do_merge_spikes_radius(ρ, accept),
+            SpikeMergingMethod::None => None,
+        }
+    }
+
+    /// Attempt to merge spikes based on a value and a fitness function.
+    ///
+    /// Calls [`SpikeMerging::merge_spikes`] with `accept` constructed from the composition of
+    /// `value` and `fitness`, compared to initial fitness. Returns the last return value of `value`
+    // for a merge  accepted by `fitness`. If no merge was accepted, `value` applied to the initial
+    /// `self` is returned.
+    fn merge_spikes_fitness<G, H, V, O>(
+        &mut self,
+        method : SpikeMergingMethod<F>,
+        value : G,
+        fitness : H
+    ) -> V
+    where G : Fn(&'_ Self) -> V,
+          H : Fn(&'_ V) -> O,
+          O : PartialOrd {
+        let initial_res = value(self);
+        let initial_fitness = fitness(&initial_res);
+        self.merge_spikes(method, |μ| {
+            let res = value(μ);
+            (fitness(&res) <= initial_fitness).then_some(res)
+        }).unwrap_or(initial_res)
+    }
+
+    /// Attempt to merge spikes that are within radius $ρ$ of each other (unspecified norm).
+    ///
+    /// This method implements [`SpikeMerging::merge_spikes`] for
+    /// [`SpikeMergingMethod::HeuristicRadius`]. The closure `accept` and the return value are
+    /// as for that method.
+    fn do_merge_spikes_radius<G, V>(&mut self, ρ : F, accept : G) -> Option<V>
+    where G : Fn(&'_ Self) -> Option<V>;
+}
+
+#[replace_float_literals(F::cast_from(literal))]
+impl<F : Float, const N : usize>  DiscreteMeasure<Loc<F, N>, F> {
+    /// Attempts to merge spikes with indices `i` and `j`.
+    ///
+    /// This assumes that the weights of the two spikes have already been checked not to be zero.
+    ///
+    /// The parameter `res` points to the current “result” for [`SpikeMerging::merge_spikes`].
+    /// If the merge is accepted by `accept` returning a [`Some`], `res` will be replaced by its
+    /// return value.
+    fn attempt_merge<G, V>(
+        &mut self,
+        res : &mut Option<V>,
+        i : usize,
+        j : usize,
+        accept : &G
+    ) -> bool
+    where G : Fn(&'_ Self) -> Option<V> {
+        let &DeltaMeasure{ x : xi, α : αi } = &self.spikes[i];
+        let &DeltaMeasure{ x : xj, α : αj } = &self.spikes[j];
+
+        // Merge inplace
+        self.spikes[i].α = 0.0;
+        self.spikes[j].α = 0.0;
+        //self.spikes.push(DeltaMeasure{ α : αi + αj, x : (xi + xj)/2.0 });
+        self.spikes.push(DeltaMeasure{ α : αi + αj, x : (xi * αi + xj * αj) / (αi + αj) });
+        match accept(self) {
+            some@Some(..) => {
+                // Merge accepted, update our return value
+                *res = some;
+                // On next iteration process the newly merged spike.
+                //indices[k+1] = self.spikes.len() - 1;
+                true
+            },
+            None => {
+                // Merge not accepted, restore modification
+                self.spikes[i].α = αi;
+                self.spikes[j].α = αj;
+                self.spikes.pop();
+                false
+            }
+        }
+    }
+
+    /*
+    /// Attempts to merge spikes with indices i and j, acceptance through a delta.
+    fn attempt_merge_change<G, V>(
+        &mut self,
+        res : &mut Option<V>,
+        i : usize,
+        j : usize,
+        accept_change : &G
+    ) -> bool
+    where G : Fn(&'_ Self) -> Option<V> {
+        let &DeltaMeasure{ x : xi, α : αi } = &self.spikes[i];
+        let &DeltaMeasure{ x : xj, α : αj } = &self.spikes[j];
+        let δ = DeltaMeasure{ α : αi + αj, x : (xi + xj)/2.0 };
+        let λ = [-self.spikes[i], -self.spikes[j], δ.clone()].into();
+
+        match accept_change(&λ) {
+            some@Some(..) => {
+                // Merge accepted, update our return value
+                *res = some;
+                self.spikes[i].α = 0.0;
+                self.spikes[j].α = 0.0;
+                self.spikes.push(δ);
+                true
+            },
+            None => {
+                false
+            }
+        }
+    }*/
+
+}
+
+/// Sorts a vector of indices into `slice` by `compare`.
+///
+/// The closure `compare` operators on references to elements of `slice`.
+/// Returns the sorted vector of indices into `slice`.
+pub fn sort_indices_by<V, F>(slice : &[V], mut compare : F) -> Vec<usize>
+where F : FnMut(&V, &V) -> Ordering
+{
+    let mut indices = Vec::from_iter(0..slice.len());
+    indices.sort_by(|&i, &j| compare(&slice[i], &slice[j]));
+    indices
+}
+
+#[replace_float_literals(F::cast_from(literal))]
+impl<F : Float> SpikeMerging<F> for DiscreteMeasure<Loc<F, 1>, F> {
+
+    fn do_merge_spikes_radius<G, V>(
+        &mut self,
+        ρ : F,
+        accept : G
+    ) -> Option<V>
+    where G : Fn(&'_ Self) -> Option<V> {
+        // Sort by coordinate into an indexing array.
+        let mut indices = sort_indices_by(&self.spikes, |&δ1, &δ2| {
+            let &Loc([x1]) = &δ1.x;
+            let &Loc([x2]) = &δ2.x;
+            // nan-ignoring ordering of floats
+            NaNLeast(x1).cmp(&NaNLeast(x2))
+        });
+
+        // Initialise result
+        let mut res = None;
+
+        // Scan consecutive pairs and merge if close enough and accepted by `accept`.
+        if indices.len() == 0 {
+            return res
+        }
+        for k in 0..(indices.len()-1) {
+            let i = indices[k];
+            let j = indices[k+1];
+            let &DeltaMeasure{ x : Loc([xi]), α : αi } = &self.spikes[i];
+            let &DeltaMeasure{ x : Loc([xj]), α : αj } = &self.spikes[j];
+            debug_assert!(xi <= xj);
+            // If close enough, attempt merging
+            if αi != 0.0 && αj != 0.0 && xj <= xi + ρ {
+                if self.attempt_merge(&mut res, i, j, &accept) {
+                    indices[k+1] = self.spikes.len() - 1;
+                }
+            }
+        }
+
+        res
+    }
+}
+
+/// Orders `δ1` and `δ1` according to the first coordinate.
+fn compare_first_coordinate<F : Float>(
+    δ1 : &DeltaMeasure<Loc<F, 2>, F>,
+    δ2 : &DeltaMeasure<Loc<F, 2>, F>
+) -> Ordering {
+    let &Loc([x11, ..]) = &δ1.x;
+    let &Loc([x21, ..]) = &δ2.x;
+    // nan-ignoring ordering of floats
+    NaNLeast(x11).cmp(&NaNLeast(x21))
+}
+
+#[replace_float_literals(F::cast_from(literal))]
+impl<F : Float> SpikeMerging<F> for DiscreteMeasure<Loc<F, 2>, F> {
+
+    fn do_merge_spikes_radius<G, V>(&mut self, ρ : F, accept : G) -> Option<V>
+    where G : Fn(&'_ Self) -> Option<V> {
+        // Sort by first coordinate into an indexing array.
+        let mut indices = sort_indices_by(&self.spikes, compare_first_coordinate);
+
+        // Initialise result
+        let mut res = None;
+        let mut start_scan_2nd = 0;
+
+        // Scan in order
+        if indices.len() == 0 {
+            return res
+        }
+        for k in 0..indices.len()-1 {
+            let i = indices[k];
+            let &DeltaMeasure{ x : Loc([xi1, xi2]), α : αi } = &self[i];
+
+            if αi == 0.0 {
+                // Nothin to be done if the weight is already zero
+                continue
+            }
+
+            let mut closest = None;
+
+            // Scan for second spike. We start from `start_scan_2nd + 1` with `start_scan_2nd`
+            // the smallest invalid merging index on the previous loop iteration, because a
+            // the _closest_ mergeable spike might have index less than `k` in `indices`, and a
+            // merge with it might have not been attempted with this spike if a different closer
+            // spike was discovered based on the second coordinate.
+            'scan_2nd: for l in (start_scan_2nd+1)..indices.len() {
+                if l == k {
+                    // Do not attempt to merge a spike with itself
+                    continue
+                }
+                let j = indices[l];
+                let &DeltaMeasure{ x : Loc([xj1, xj2]), α : αj } = &self[j];
+
+                if xj1 < xi1 - ρ {
+                    // Spike `j = indices[l]` has too low first coordinate. Update starting index
+                    // for next iteration, and continue scanning.
+                    start_scan_2nd = l;
+                    continue 'scan_2nd
+                } else if xj1 > xi1 + ρ {
+                    // Break out: spike `j = indices[l]` has already too high first coordinate, no
+                    // more close enough spikes can be found due to the sorting of `indices`.
+                    break 'scan_2nd
+                }
+
+                // If also second coordinate is close enough, attempt merging if closer than
+                // previously discovered mergeable spikes.
+                let d2 = (xi2-xj2).abs();
+                if αj != 0.0 && d2 <= ρ {
+                    let r1 = xi1-xj1;
+                    let d = (d2*d2 + r1*r1).sqrt();
+                    match closest {
+                        None => closest = Some((l, j, d)),
+                        Some((_, _, r)) if r > d => closest = Some((l, j, d)),
+                        _ => {},
+                    }
+                }
+            }
+
+            // Attempt merging closest close-enough spike
+            if let Some((l, j, _)) = closest {
+                if self.attempt_merge(&mut res, i, j, &accept) {
+                    // If merge was succesfull, make new spike candidate for merging.
+                    indices[l] = self.spikes.len() - 1;
+                    let compare = |i, j| compare_first_coordinate(&self.spikes[i],
+                                                                  &self.spikes[j]);
+                    // Re-sort relevant range of indices
+                    if l < k {
+                        indices[l..k].sort_by(|&i, &j| compare(i, j));
+                    } else {
+                        indices[k+1..=l].sort_by(|&i, &j| compare(i, j));
+                    }
+                }
+            }
+        }
+
+        res
+    }
+}
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/pdps.rs	Thu Dec 01 23:07:35 2022 +0200
@@ -0,0 +1,355 @@
+/*!
+Solver for the point source localisation problem with primal-dual proximal splitting.
+
+This corresponds to the manuscript
+
+ * Valkonen T. - _Proximal methods for point source localisation_. ARXIV TO INSERT.
+
+The main routine is [`pointsource_pdps`]. It is based on specilisatinn of
+[`generic_pointsource_fb`] through relevant [`FBSpecialisation`] implementations.
+Both norm-2-squared and norm-1 data terms are supported. That is, implemented are solvers for
+<div>
+$$
+    \min_{μ ∈ ℳ(Ω)}~ F_0(Aμ - b) + α \|μ\|_{ℳ(Ω)} + δ_{≥ 0}(μ),
+$$
+for both $F_0(y)=\frac{1}{2}\|y\|_2^2$ and  $F_0(y)=\|y\|_1$ with the forward operator
+$A \in 𝕃(ℳ(Ω); ℝ^n)$.
+</div>
+
+## Approach
+
+<p>
+The problem above can be written as
+$$
+    \min_μ \max_y G(μ) + ⟨y, Aμ-b⟩ - F_0^*(μ),
+$$
+where $G(μ) = α \|μ\|_{ℳ(Ω)} + δ_{≥ 0}(μ)$.
+The Fenchel–Rockafellar optimality conditions, employing the predual in $ℳ(Ω)$, are
+$$
+    0 ∈ A_*y + ∂G(μ)
+    \quad\text{and}\quad
+    Aμ - b ∈ ∂ F_0^*(y).
+$$
+The solution of the first part is as for forward-backward, treated in the manuscript.
+This is the task of <code>generic_pointsource_fb</code>, where we use <code>FBSpecialisation</code>
+to replace the specific residual $Aμ-b$ by $y$.
+For $F_0(y)=\frac{1}{2}\|y\|_2^2$ the second part reads $y = Aμ -b$.
+For $F_0(y)=\|y\|_1$ the second part reads $y ∈ ∂\|·\|_1(Aμ - b)$.
+</p>
+
+Based on zero initialisation for $μ$, we use the [`Subdifferentiable`] trait to make an
+initialisation corresponding to the second part of the optimality conditions.
+In the algorithm itself, standard proximal steps are taking with respect to $F\_0^* + ⟨b, ·⟩$.
+*/
+
+use numeric_literals::replace_float_literals;
+use serde::{Serialize, Deserialize};
+use nalgebra::DVector;
+use clap::ValueEnum;
+
+use alg_tools::iterate:: AlgIteratorFactory;
+use alg_tools::sets::Cube;
+use alg_tools::loc::Loc;
+use alg_tools::euclidean::Euclidean;
+use alg_tools::norms::{
+    L1, Linfinity,
+    Projection, Norm,
+};
+use alg_tools::bisection_tree::{
+    BTFN,
+    PreBTFN,
+    Bounds,
+    BTNodeLookup,
+    BTNode,
+    BTSearch,
+    P2Minimise,
+    SupportGenerator,
+    LocalAnalysis,
+};
+use alg_tools::mapping::RealMapping;
+use alg_tools::nalgebra_support::ToNalgebraRealField;
+use alg_tools::linops::AXPY;
+
+use crate::types::*;
+use crate::measures::DiscreteMeasure;
+use crate::measures::merging::{
+    SpikeMerging,
+};
+use crate::forward_model::ForwardModel;
+use crate::seminorms::{
+    DiscreteMeasureOp, Lipschitz
+};
+use crate::plot::{
+    SeqPlotter,
+    Plotting,
+    PlotLookup
+};
+use crate::fb::{
+    FBGenericConfig,
+    FBSpecialisation,
+    generic_pointsource_fb
+};
+
+/// Acceleration
+#[derive(Clone, Copy, Eq, PartialEq, Serialize, Deserialize, ValueEnum, Debug)]
+pub enum Acceleration {
+    /// No acceleration
+    #[clap(name = "none")]
+    None,
+    /// Partial acceleration, $ω = 1/\sqrt{1+σ}$
+    #[clap(name = "partial", help = "Partial acceleration, ω = 1/√(1+σ)")]
+    Partial,
+    /// Full acceleration, $ω = 1/\sqrt{1+2σ}$; no gap convergence guaranteed
+    #[clap(name = "full", help = "Full acceleration, ω = 1/√(1+2σ); no gap convergence guaranteed")]
+    Full
+}
+
+/// Settings for [`pointsource_pdps`].
+#[derive(Clone, Copy, Eq, PartialEq, Serialize, Deserialize, Debug)]
+#[serde(default)]
+pub struct PDPSConfig<F : Float> {
+    /// Primal step length scaling. We must have `τ0 * σ0 < 1`.
+    pub τ0 : F,
+    /// Dual step length scaling. We must have `τ0 * σ0 < 1`.
+    pub σ0 : F,
+    /// Accelerate if available
+    pub acceleration : Acceleration,
+    /// Generic parameters
+    pub insertion : FBGenericConfig<F>,
+}
+
+#[replace_float_literals(F::cast_from(literal))]
+impl<F : Float> Default for PDPSConfig<F> {
+    fn default() -> Self {
+        let τ0 = 0.5;
+        PDPSConfig {
+            τ0,
+            σ0 : 0.99/τ0,
+            acceleration : Acceleration::Partial,
+            insertion : Default::default()
+        }
+    }
+}
+
+/// Trait for subdifferentiable objects
+pub trait Subdifferentiable<F : Float, V, U=V> {
+    /// Calculate some subdifferential at `x`
+    fn some_subdifferential(&self, x : V) -> U;
+}
+
+/// Type for indicating norm-2-squared data fidelity.
+pub struct L2Squared;
+
+impl<F : Float, V : Euclidean<F>> Subdifferentiable<F, V> for L2Squared {
+    fn some_subdifferential(&self, x : V) -> V { x }
+}
+
+impl<F : Float + nalgebra::RealField> Subdifferentiable<F, DVector<F>> for L1 {
+    fn some_subdifferential(&self, mut x : DVector<F>) -> DVector<F> {
+        // nalgebra sucks for providing second copies of the same stuff that's elsewhere as well.
+        x.iter_mut()
+         .for_each(|v| if *v != F::ZERO { *v = *v/<F as NumTraitsFloat>::abs(*v) });
+        x
+    }
+}
+
+/// Specialisation of [`generic_pointsource_fb`] to PDPS.
+pub struct PDPS<
+    'a,
+    F : Float + ToNalgebraRealField,
+    A : ForwardModel<Loc<F, N>, F>,
+    D,
+    const N : usize
+> {
+    /// The data
+    b : &'a A::Observable,
+    /// The forward operator
+    opA : &'a A,
+    /// Primal step length
+    τ : F,
+    // Dual step length
+    σ : F,
+    /// Whether acceleration should be applied (if data term supports)
+    acceleration : Acceleration,
+    /// The dataterm. Only used by the type system.
+    _dataterm : D,
+    /// Previous dual iterate.
+    y_prev : A::Observable,
+}
+
+/// Implementation of [`FBSpecialisation`] for μPDPS with norm-2-squared data fidelity.
+#[replace_float_literals(F::cast_from(literal))]
+impl<
+    'a,
+    F : Float + ToNalgebraRealField,
+    A : ForwardModel<Loc<F, N>, F>,
+    const N : usize
+> FBSpecialisation<F, A::Observable, N> for PDPS<'a, F, A, L2Squared, N>
+where for<'b> &'b A::Observable : std::ops::Add<A::Observable, Output=A::Observable> {
+
+    fn update(
+        &mut self,
+        μ : &mut DiscreteMeasure<Loc<F, N>, F>,
+        μ_base : &DiscreteMeasure<Loc<F, N>, F>
+    ) -> (A::Observable, Option<F>) {
+        let σ = self.σ;
+        let τ = self.τ;
+        let ω = match self.acceleration {
+            Acceleration::None => 1.0,
+            Acceleration::Partial => {
+                let ω = 1.0 / (1.0 + σ).sqrt();
+                self.σ = σ * ω;
+                self.τ = τ / ω;
+                ω
+            },
+            Acceleration::Full => {
+                let ω = 1.0 / (1.0 + 2.0 * σ).sqrt();
+                self.σ = σ * ω;
+                self.τ = τ / ω;
+                ω
+            },
+        };
+
+        μ.prune();
+
+        let mut y = self.b.clone();
+        self.opA.gemv(&mut y, 1.0 + ω, μ, -1.0);
+        self.opA.gemv(&mut y, -ω, μ_base, 1.0);
+        y.axpy(1.0 / (1.0 + σ), &self.y_prev,  σ / (1.0 + σ));
+        self.y_prev.copy_from(&y);
+
+        (y, Some(self.τ))
+    }
+
+    fn calculate_fit(
+        &self,
+        μ : &DiscreteMeasure<Loc<F, N>, F>,
+        _y : &A::Observable
+    ) -> F {
+        self.calculate_fit_simple(μ)
+    }
+
+    fn calculate_fit_simple(
+        &self,
+        μ : &DiscreteMeasure<Loc<F, N>, F>,
+    ) -> F {
+        let mut residual = self.b.clone();
+        self.opA.gemv(&mut residual, 1.0, μ, -1.0);
+        residual.norm2_squared_div2()
+    }
+}
+
+/// Implementation of [`FBSpecialisation`] for μPDPS with norm-1 data fidelity.
+#[replace_float_literals(F::cast_from(literal))]
+impl<
+    'a,
+    F : Float + ToNalgebraRealField,
+    A : ForwardModel<Loc<F, N>, F>,
+    const N : usize
+> FBSpecialisation<F, A::Observable, N> for PDPS<'a, F, A, L1, N>
+where A::Observable : Projection<F, Linfinity> + Norm<F, L1>,
+      for<'b> &'b A::Observable : std::ops::Add<A::Observable, Output=A::Observable> {
+    fn update(
+        &mut self,
+        μ : &mut DiscreteMeasure<Loc<F, N>, F>,
+        μ_base : &DiscreteMeasure<Loc<F, N>, F>
+    ) -> (A::Observable, Option<F>) {
+        let σ = self.σ;
+
+        μ.prune();
+
+        //let ȳ = self.opA.apply(μ) * 2.0 - self.opA.apply(μ_base);
+        //*y = proj_{[-1,1]}(&self.y_prev + (ȳ - self.b) * σ)
+        let mut y = self.y_prev.clone();
+        self.opA.gemv(&mut y, 2.0 * σ, μ, 1.0);
+        self.opA.gemv(&mut y, -σ, μ_base, 1.0);
+        y.axpy(-σ, self.b, 1.0);
+        y.proj_ball_mut(1.0, Linfinity);
+        self.y_prev.copy_from(&y);
+
+        (y, None)
+    }
+
+    fn calculate_fit(
+        &self,
+        μ : &DiscreteMeasure<Loc<F, N>, F>,
+        _y : &A::Observable
+    ) -> F {
+        self.calculate_fit_simple(μ)
+    }
+
+    fn calculate_fit_simple(
+        &self,
+        μ : &DiscreteMeasure<Loc<F, N>, F>,
+    ) -> F {
+        let mut residual = self.b.clone();
+        self.opA.gemv(&mut residual, 1.0, μ, -1.0);
+        residual.norm(L1)
+    }
+}
+
+/// Iteratively solve the pointsource localisation problem using primal-dual proximal splitting.
+///
+/// The `dataterm` should be either [`L1`] for norm-1 data term or [`L2Squared`] for norm-2-squared.
+/// The settings in `config` have their [respective documentation](PDPSConfig). `opA` is the
+/// forward operator $A$, $b$ the observable, and $\lambda$ the regularisation weight.
+/// The operator `op𝒟` is used for forming the proximal term. Typically it is a convolution
+/// operator. Finally, the `iterator` is an outer loop verbosity and iteration count control
+/// as documented in [`alg_tools::iterate`].
+///
+/// For the mathematical formulation, see the [module level](self) documentation and the manuscript.
+///
+/// Returns the final iterate.
+#[replace_float_literals(F::cast_from(literal))]
+pub fn pointsource_pdps<'a, F, I, A, GA, 𝒟, BTA, G𝒟, S, K, D, const N : usize>(
+    opA : &'a A,
+    b : &'a A::Observable,
+    α : F,
+    op𝒟 : &'a 𝒟,
+    config : &PDPSConfig<F>,
+    iterator : I,
+    plotter : SeqPlotter<F, N>,
+    dataterm : D,
+) -> DiscreteMeasure<Loc<F, N>, F>
+where F : Float + ToNalgebraRealField,
+      I : AlgIteratorFactory<IterInfo<F, N>>,
+      for<'b> &'b A::Observable : std::ops::Neg<Output=A::Observable>
+                                  + std::ops::Add<A::Observable, Output=A::Observable>,
+                                  //+ std::ops::Mul<F, Output=A::Observable>, // <-- FIXME: compiler overflow
+      A::Observable : std::ops::MulAssign<F>,
+      GA : SupportGenerator<F, N, SupportType = S, Id = usize> + Clone,
+      A : ForwardModel<Loc<F, N>, F, PreadjointCodomain = BTFN<F, GA, BTA, N>>
+          + Lipschitz<𝒟, FloatType=F>,
+      BTA : BTSearch<F, N, Data=usize, Agg=Bounds<F>>,
+      G𝒟 : SupportGenerator<F, N, SupportType = K, Id = usize> + Clone,
+      𝒟 : DiscreteMeasureOp<Loc<F, N>, F, PreCodomain = PreBTFN<F, G𝒟, N>>,
+      𝒟::Codomain : RealMapping<F, N>,
+      S: RealMapping<F, N> + LocalAnalysis<F, Bounds<F>, N>,
+      K: RealMapping<F, N> + LocalAnalysis<F, Bounds<F>, N>,
+      BTNodeLookup: BTNode<F, usize, Bounds<F>, N>,
+      Cube<F, N>: P2Minimise<Loc<F, N>, F>,
+      PlotLookup : Plotting<N>,
+      DiscreteMeasure<Loc<F, N>, F> : SpikeMerging<F>,
+      PDPS<'a, F, A, D, N> : FBSpecialisation<F, A::Observable, N>,
+      D : Subdifferentiable<F, A::Observable> {
+
+    let y = dataterm.some_subdifferential(-b);
+    let l = opA.lipschitz_factor(&op𝒟).unwrap().sqrt();
+    let τ = config.τ0 / l;
+    let σ = config.σ0 / l;
+
+    let pdps = PDPS {
+        b,
+        opA,
+        τ,
+        σ,
+        acceleration : config.acceleration,
+        _dataterm : dataterm,
+        y_prev : y.clone(),
+    };
+
+    generic_pointsource_fb(
+        opA, α, op𝒟, τ, &config.insertion, iterator, plotter, y,
+        pdps
+    )
+}
\ No newline at end of file
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/plot.rs	Thu Dec 01 23:07:35 2022 +0200
@@ -0,0 +1,413 @@
+//! Plotting helper utilities
+
+use numeric_literals::replace_float_literals;
+use std::io::Write;
+use image::{
+    ImageFormat,
+    ImageBuffer,
+    Rgb
+};
+use itertools::izip;
+use colorbrewer::Palette as CbPalette;
+
+use alg_tools::types::*;
+use alg_tools::lingrid::LinGrid;
+use alg_tools::mapping::RealMapping;
+use alg_tools::loc::Loc;
+use alg_tools::bisection_tree::Bounds;
+use alg_tools::maputil::map4;
+use alg_tools::tabledump::write_csv;
+use crate::measures::*;
+
+/// Default RGB ramp from [`colorbrewer`].
+///
+/// This is a tuple of parameters to [`colorbrewer::get_color_ramp`].
+const RAMP : (CbPalette, u32) = (CbPalette::RdBu, 11);
+
+/// Helper trait for implementing dimension-dependent plotting routines.
+pub trait Plotting<const N : usize> {
+    /// Plot several mappings and a discrete measure into a file.
+    fn plot_into_file_spikes<
+        F : Float,
+        T1 : RealMapping<F, N>,
+        T2 : RealMapping<F, N>
+    > (
+        g_explanation : String,
+        g : &T1,
+        ω_explanation : String,
+        ω : Option<&T2>,
+        grid : LinGrid<F, N>,
+        bnd : Option<Bounds<F>>,
+        μ : &DiscreteMeasure<Loc<F, N>, F>,
+        filename : String,
+    );
+
+    /// Plot a mapping into a file, sampling values on a given grid.
+    fn plot_into_file<
+        F : Float,
+        T1 : RealMapping<F, N>,
+    > (
+        g : &T1,
+        grid : LinGrid<F, N>,
+        filename : String,
+        explanation : String
+    );
+}
+
+/// Helper type for looking up a [`Plotting`] based on dimension.
+pub struct PlotLookup;
+
+impl Plotting<1> for PlotLookup {
+    fn plot_into_file_spikes<
+        F : Float,
+        T1 : RealMapping<F, 1>,
+        T2 : RealMapping<F, 1>
+    > (
+        g_explanation : String,
+        g : &T1,
+        ω_explanation : String,
+        ω0 : Option<&T2>,
+        grid : LinGrid<F, 1>,
+        bnd0 : Option<Bounds<F>>,
+        μ : &DiscreteMeasure<Loc<F, 1>, F>,
+        filename : String,
+    ) {
+        let start = grid.start[0].as_();
+        let end = grid.end[0].as_();
+        let m = μ.iter_masses().fold(F::ZERO, |m, α| m.max(α));
+        let s = μ.iter_masses().fold(F::ZERO, |m, α| m.add(α));
+        let mut spike_scale = F::ONE;
+
+        let mut plotter = poloto::plot(
+            "f", "x",
+            format!("f(x); spike max={:.4}, n={}, ∑={:.4}", m, μ.len(), s)
+        ).move_into();
+
+        if let Some(ω) = ω0 {
+            let graph_ω = grid.into_iter().map(|x@Loc([x0]) : Loc<F, 1>| {
+                [x0.as_(), ω.apply(&x).as_()]
+            });
+            plotter.line(ω_explanation.as_str(), graph_ω.clone());
+            // let csv_f = format!("{}.txt", filename);
+            // write_csv(graph_ω, csv_f).expect("CSV save error");
+        }
+
+        let graph_g = grid.into_iter().map(|x@Loc([x0]) : Loc<F, 1>| {
+            [x0.as_(), g.apply(&x).as_()]
+        });
+        plotter.line(g_explanation.as_str(), graph_g.clone());
+        // let csv_f = format!("{}.txt", filename);
+        // write_csv(graph_g, csv_f).expect("CSV save error");
+
+        bnd0.map(|bnd| {
+            let upperb = bnd.upper().as_();
+            let lowerb =  bnd.lower().as_();
+            let upper : [[f64; 2]; 2] = [[start, upperb], [end, upperb]];
+            let lower = [[start, lowerb], [end, lowerb]];
+            spike_scale *= bnd.upper();
+
+            plotter.line("upper bound", upper)
+                   .line("lower bound", lower)
+                   .ymarker(lowerb)
+                   .ymarker(upperb);
+        });
+
+        for &DeltaMeasure{ α, x : Loc([x]) } in μ.iter_spikes() {
+            let spike = [[x.as_(), 0.0], [x.as_(), (α/m * spike_scale).as_()]];
+            plotter.line("", spike);
+        }
+
+        let svg = format!("{}", poloto::disp(|a| poloto::simple_theme(a, plotter)));
+
+        std::fs::File::create(filename + ".svg").and_then(|mut file|
+            file.write_all(svg.as_bytes())
+        ).expect("SVG save error");
+    }
+
+    fn plot_into_file<
+        F : Float,
+        T1 : RealMapping<F, 1>,
+    > (
+        g : &T1,
+        grid : LinGrid<F, 1>,
+        filename : String,
+        explanation : String
+    ) {
+        let graph_g = grid.into_iter().map(|x@Loc([x0]) : Loc<F, 1>| {
+            [x0.as_(), g.apply(&x).as_()]
+        });
+
+        let plotter: poloto::Plotter<'_, float, float> = poloto::plot("f", "x", "f(x)")
+            .line(explanation.as_str(), graph_g.clone())
+            .move_into();
+
+        let svg = format!("{}", poloto::disp(|a| poloto::simple_theme(a, plotter)));
+
+        let svg_f = format!("{}.svg", filename);
+        std::fs::File::create(svg_f).and_then(|mut file|
+            file.write_all(svg.as_bytes())
+        ).expect("SVG save error");
+
+        let csv_f = format!("{}.txt", filename);
+        write_csv(graph_g, csv_f).expect("CSV save error");
+    }
+
+}
+
+/// Convert $[0, 1] ∈ F$ to $\\\{0, …, M\\\} ∈ F$ where $M=$`F::RANGE_MAX`.
+#[inline]
+fn scale_uint<F, U>(v : F) -> U
+where F : Float + CastFrom<U> + num_traits::cast::AsPrimitive<U>,
+      U : Unsigned {
+    (v*F::cast_from(U::RANGE_MAX)).as_()
+}
+
+/// Convert $[a, b] ∈ F$ to $\\\{0, …, M\\\} ∈ F$ where $M=$`F::RANGE_MAX`.
+#[replace_float_literals(F::cast_from(literal))]
+#[inline]
+fn scale_range_uint<F, U>(v : F, &Bounds(a, b) : &Bounds<F>) -> U
+where F : Float + CastFrom<U> + num_traits::cast::AsPrimitive<U>,
+      U : Unsigned {
+    debug_assert!(a < b);
+    scale_uint(((v - a)/(b - a)).max(0.0).min(1.0))
+}
+
+
+/// Sample a mapping on a grid.
+///
+/// Returns a vector of values as well as upper and lower bounds of the values.
+fn rawdata_and_range<F, T>(grid : &LinGrid<F, 2>, g :&T) -> (Vec<F>, Bounds<F>)
+where F : Float,
+      T : RealMapping<F, 2> {
+    let rawdata : Vec<F> = grid.into_iter().map(|x| g.apply(&x)).collect();
+    let range = rawdata.iter()
+                        .map(|&v| Bounds(v, v))
+                        .reduce(|b1, b2| b1.common(&b2))
+                        .unwrap();
+    (rawdata, range)
+}
+
+/*fn to_range<'a, F, U>(rawdata : &'a Vec<F>,  range : &'a Bounds<F>)
+-> std::iter::Map<std::slice::Iter<'a, F>, impl FnMut(&'a F) -> U>
+where F : Float + CastFrom<U> + num_traits::cast::AsPrimitive<U>,
+      U : Unsigned {
+    rawdata.iter().map(move |&v| scale_range_uint(v, range))
+}*/
+
+/// Convert a scalar value to an RGB triplet.
+///
+/// Converts the value `v` supposed to be within the range `[a, b]` to an rgb value according
+/// to the given `ramp` of equally-spaced rgb interpolation points.
+#[replace_float_literals(F::cast_from(literal))]
+fn one_to_ramp<F, U>(
+    &Bounds(a, b) : &Bounds<F>,
+    ramp : &Vec<Loc<F, 3>>,
+    v : F,
+) -> Rgb<U>
+where F : Float + CastFrom<U> + num_traits::cast::AsPrimitive<U>,
+      U : Unsigned {
+
+    let n = ramp.len() - 1;
+    let m = F::cast_from(U::RANGE_MAX);
+    let ramprange = move |v : F| {let m : usize = v.as_(); m.min(n).max(0) };
+
+    let w = F::cast_from(n) * (v - a) / (b - a);  // convert [0, 1] to [0, n]
+    let (l, u) = (w.floor(), w.ceil());           // Find closest integers
+    let (rl, ru) = (ramprange(l), ramprange(u));
+    let (cl, cu) = (ramp[rl], ramp[ru]);          // Get corresponding colours
+    let λ = match rl==ru {                        // Interpolation factor
+        true => 0.0,
+        false => (u - w) / (u - l),
+    };
+    let Loc(rgb) = cl * λ + cu * (1.0 - λ);       // Interpolate
+
+    Rgb(rgb.map(|v| (v * m).round().min(m).max(0.0).as_()))
+}
+
+/// Convert a an iterator over scalar values to an iterator over RGB triplets.
+///
+/// The conversion is that performed by [`one_to_ramp`].
+#[replace_float_literals(F::cast_from(literal))]
+fn to_ramp<'a, F, U, I>(
+    bounds : &'a Bounds<F>,
+    ramp : &'a Vec<Loc<F, 3>>,
+    iter : I,
+) -> std::iter::Map<I, impl FnMut(F) -> Rgb<U> + 'a>
+where F : Float + CastFrom<U> + num_traits::cast::AsPrimitive<U>,
+      U : Unsigned,
+      I : Iterator<Item = F> + 'a {
+    iter.map(move |v| one_to_ramp(bounds, ramp, v))
+}
+
+/// Convert a [`colorbrewer`] sepcification to a ramp of rgb triplets.
+fn get_ramp<F : Float>((palette, nb) : (CbPalette, u32)) -> Vec<Loc<F, 3>> {
+    let m = F::cast_from(u8::MAX);
+    colorbrewer::get_color_ramp(palette, nb)
+                 .expect("Invalid colorbrewer ramp")
+                 .into_iter()
+                 .map(|rgb::RGB{r, g, b}| {
+                    [r, g, b].map(|c| F::cast_from(c) / m).into()
+                 }).collect()
+}
+
+/// Perform hue shifting of an RGB value.
+///
+// The hue `ω` is in radians.
+#[replace_float_literals(F::cast_from(literal))]
+fn hueshift<F, U>(ω : F, Rgb([r_in, g_in, b_in]) : Rgb<U>) -> Rgb<U>
+where F : Float + CastFrom<U>,
+      U : Unsigned {
+    let m = F::cast_from(U::RANGE_MAX);
+    let r = F::cast_from(r_in) / m;
+    let g = F::cast_from(g_in) / m;
+    let b = F::cast_from(b_in) / m;
+    let u = ω.cos();
+    let w = ω.sin();
+
+    let nr = (0.299 + 0.701*u + 0.168*w) * r
+              + (0.587 - 0.587*u + 0.330*w) * g
+              + (0.114 - 0.114*u - 0.497*w) * b;
+    let ng = (0.299 - 0.299*u - 0.328*w) * r
+              + (0.587 + 0.413*u + 0.035*w) * g
+              + (0.114 - 0.114*u + 0.292*w) *b;
+    let nb = (0.299 - 0.3*u + 1.25*w) * r
+              + (0.587 - 0.588*u - 1.05*w) * g
+              + (0.114 + 0.886*u - 0.203*w) * b;
+
+    Rgb([nr, ng, nb].map(scale_uint))
+}
+
+
+impl Plotting<2> for PlotLookup {
+    #[replace_float_literals(F::cast_from(literal))]
+    fn plot_into_file_spikes<
+        F : Float,
+        T1 : RealMapping<F, 2>,
+        T2 : RealMapping<F, 2>
+    > (
+        _g_explanation : String,
+        g : &T1,
+        _ω_explanation : String,
+        ω0 : Option<&T2>,
+        grid : LinGrid<F, 2>,
+        _bnd0 : Option<Bounds<F>>,
+        μ : &DiscreteMeasure<Loc<F, 2>, F>,
+        filename : String,
+    ) {
+        let [w, h] = grid.count;
+        let (rawdata_g, range_g) = rawdata_and_range(&grid, g);
+        let (rawdata_ω, range) = match ω0 {
+            Some(ω) => {
+                let (rawdata_ω, range_ω) = rawdata_and_range(&grid, ω);
+                (rawdata_ω, range_g.common(&range_ω))
+            },
+            None => {
+                let mut zeros = Vec::new();
+                zeros.resize(rawdata_g.len(), 0.0);
+                (zeros, range_g)
+            }
+        };
+        let ramp = get_ramp(RAMP);
+        let base_im_iter = to_ramp::<F, u16, _>(&range_g, &ramp, rawdata_g.iter().cloned());
+        let im_iter = izip!(base_im_iter, rawdata_g.iter(), rawdata_ω.iter())
+            .map(|(rgb, &v, &w)| {
+                hueshift(2.0 * F::PI * (v - w).abs() / range.upper(), rgb)
+            });
+        let mut img = ImageBuffer::new(w as u32, h as u32);
+        img.pixels_mut()
+           .zip(im_iter)
+           .for_each(|(p, v)| *p = v);
+
+        // Add spikes
+        let m = μ.iter_masses().fold(F::ZERO, |m, α| m.max(α));
+        let μ_range = Bounds(F::ZERO, m);
+        for &DeltaMeasure{ ref x, α } in μ.iter_spikes() {
+            let [a, b] = map4(x, &grid.start, &grid.end, &grid.count, |&ξ, &a, &b, &n| {
+                ((ξ-a)/(b-a)*F::cast_from(n)).as_()
+            });
+            if a < w.as_() && b < h.as_() {
+                let sc : u16 = scale_range_uint(α, &μ_range);
+                // TODO: use max of points that map to this pixel.
+                img[(a, b)] = Rgb([u16::MAX, u16::MAX, sc/2]);
+            }
+        }
+
+        img.save_with_format(filename + ".png", ImageFormat::Png)
+           .expect("Image save error");
+    }
+
+    fn plot_into_file<
+        F : Float,
+        T1 : RealMapping<F, 2>,
+    > (
+        g : &T1,
+        grid : LinGrid<F, 2>,
+        filename : String,
+        _explanation : String
+    ) {
+        let [w, h] = grid.count;
+        let (rawdata, range) = rawdata_and_range(&grid, g);
+        let ramp = get_ramp(RAMP);
+        let im_iter = to_ramp::<F, u16, _>(&range, &ramp, rawdata.iter().cloned());
+        let mut img = ImageBuffer::new(w as u32, h as u32);
+        img.pixels_mut()
+           .zip(im_iter)
+           .for_each(|(p, v)| *p = v);
+        img.save_with_format(filename.clone() + ".png", ImageFormat::Png)
+           .expect("Image save error");
+        
+        let csv_iter = grid.into_iter().zip(rawdata.iter()).map(|(Loc(x), &v)| (x, v));
+        let csv_f = filename + ".txt";
+        write_csv(csv_iter, csv_f).expect("CSV save error");
+    }
+
+}
+
+/// A helper structure for plotting a sequence of images.
+#[derive(Clone,Debug)]
+pub struct SeqPlotter<F : Float, const N : usize> {
+    /// File name prefix
+    prefix : String,
+    /// Maximum number of plots to perform
+    max_plots : usize,
+    /// Sampling grid
+    grid : LinGrid<F, N>,
+    /// Current plot count
+    plot_count : usize,
+}
+
+impl<F : Float, const N : usize> SeqPlotter<F, N>
+where PlotLookup : Plotting<N> {
+    /// Creates a new sequence plotter instance
+    pub fn new(prefix : String, max_plots : usize, grid : LinGrid<F, N>) -> Self {
+        SeqPlotter { prefix, max_plots, grid, plot_count : 0 }
+    }
+
+    /// This calls [`PlotLookup::plot_into_file_spikes`] with a sequentially numbered file name.
+    pub fn plot_spikes<T1, T2>(
+        &mut self,
+        g_explanation : String,
+        g : &T1,
+        ω_explanation : String,
+        ω : Option<&T2>,
+        tol : Option<Bounds<F>>,
+        μ : &DiscreteMeasure<Loc<F, N>, F>,
+    ) where T1 : RealMapping<F, N>,
+            T2 : RealMapping<F, N>
+    {
+        if self.plot_count == 0 && self.max_plots > 0 {
+            std::fs::create_dir_all(&self.prefix).expect("Unable to create plot directory");
+        }
+        if self.plot_count < self.max_plots {
+            PlotLookup::plot_into_file_spikes(
+                g_explanation, g,
+                ω_explanation, ω,
+                self.grid,
+                tol,
+                μ,
+                format!("{}out{:03}", self.prefix, self.plot_count)
+            );
+            self.plot_count += 1;
+        }
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/rand_distr.rs	Thu Dec 01 23:07:35 2022 +0200
@@ -0,0 +1,98 @@
+//! Random distribution wrappers and implementations
+
+use numeric_literals::replace_float_literals;
+use rand::Rng;
+use rand_distr::{Distribution, Normal, StandardNormal, NormalError};
+use serde::{Serialize, Deserialize};
+use serde::ser::{Serializer, SerializeStruct};
+use alg_tools::types::*;
+
+/// Wrapper for [`Normal`] that can be serialized by serde.
+pub struct SerializableNormal<T : Float>(Normal<T>)
+where StandardNormal : Distribution<T>;
+
+impl<T : Float> Distribution<T> for SerializableNormal<T>
+where StandardNormal : Distribution<T> {
+    fn sample<R>(&self, rng: &mut R) -> T
+    where
+        R : Rng + ?Sized
+    { self.0.sample(rng) }
+}
+
+impl<T : Float> SerializableNormal<T>
+where StandardNormal : Distribution<T> {
+    pub fn new(mean : T, std_dev : T) -> Result<SerializableNormal<T>, NormalError> {
+        Ok(SerializableNormal(Normal::new(mean, std_dev)?))
+    }
+}
+
+impl<F> Serialize for SerializableNormal<F>
+where
+    StandardNormal : Distribution<F>,
+    F: Float + Serialize,
+{
+    fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
+    where
+        S: Serializer,
+    {
+        let mut s = serializer.serialize_struct("Normal", 2)?;
+        s.serialize_field("mean", &self.0.mean())?;
+        s.serialize_field("std_dev", &self.0.std_dev())?;
+        s.end()
+    }
+}
+
+/// Salt-and-pepper noise distribution
+///
+/// This is the distribution that outputs each $\\{-m,0,m\\}$ with the corresponding
+/// probabilities $\\{1-p, p/2, p/2\\}$.
+#[derive(Copy, Clone, Debug, Serialize, Deserialize)]
+pub struct SaltAndPepper<T : Float>{
+    /// The magnitude parameter $m$
+    magnitude : T,
+    /// The probability parameter $p$
+    probability : T
+}
+
+/// Error for [`SaltAndPepper`].
+#[derive(Copy, Clone, Debug, Serialize, Deserialize)]
+pub enum SaltAndPepperError {
+    /// The probability parameter $p$ is not in the range [0, 1].
+    InvalidProbability,
+}
+impl std::error::Error for SaltAndPepperError {}
+
+impl std::fmt::Display for SaltAndPepperError {
+    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+        f.write_str(match self {
+            SaltAndPepperError::InvalidProbability =>
+                " The probability parameter is not in the range [0, 1].",
+        })
+    }
+}
+
+#[replace_float_literals(T::cast_from(literal))]
+impl<T : Float> SaltAndPepper<T> {
+    pub fn new(magnitude : T, probability : T) -> Result<SaltAndPepper<T>, SaltAndPepperError> {
+        if probability > 1.0 || probability < 0.0 {
+            Err(SaltAndPepperError::InvalidProbability)
+        } else {
+            Ok(SaltAndPepper { magnitude, probability })
+        }
+    }
+}
+
+#[replace_float_literals(T::cast_from(literal))]
+impl<T : Float> Distribution<T> for SaltAndPepper<T> {
+    fn sample<R>(&self, rng: &mut R) -> T
+    where
+        R : Rng + ?Sized
+    {
+        let (p, sign) : (float, bool) = rng.gen();
+        match (p < self.probability.as_(), sign) {
+            (false, _)      =>  0.0,
+            (true, true)    =>  self.magnitude,
+            (true, false)   => -self.magnitude,
+        }
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/run.rs	Thu Dec 01 23:07:35 2022 +0200
@@ -0,0 +1,602 @@
+/*!
+This module provides [`RunnableExperiment`] for running chosen algorithms on a chosen experiment.
+*/
+
+use numeric_literals::replace_float_literals;
+use colored::Colorize;
+use serde::{Serialize, Deserialize};
+use serde_json;
+use nalgebra::base::DVector;
+use std::hash::Hash;
+use chrono::{DateTime, Utc};
+use cpu_time::ProcessTime;
+use clap::ValueEnum;
+use std::collections::HashMap;
+use std::time::Instant;
+
+use rand::prelude::{
+    StdRng,
+    SeedableRng
+};
+use rand_distr::Distribution;
+
+use alg_tools::bisection_tree::*;
+use alg_tools::iterate::{
+    Timed,
+    AlgIteratorOptions,
+    Verbose,
+    AlgIteratorFactory,
+};
+use alg_tools::logger::Logger;
+use alg_tools::error::DynError;
+use alg_tools::tabledump::TableDump;
+use alg_tools::sets::Cube;
+use alg_tools::mapping::RealMapping;
+use alg_tools::nalgebra_support::ToNalgebraRealField;
+use alg_tools::euclidean::Euclidean;
+use alg_tools::norms::{Norm, L1};
+use alg_tools::lingrid::lingrid;
+use alg_tools::sets::SetOrd;
+
+use crate::kernels::*;
+use crate::types::*;
+use crate::measures::*;
+use crate::measures::merging::SpikeMerging;
+use crate::forward_model::*;
+use crate::fb::{
+    FBConfig,
+    pointsource_fb,
+    FBMetaAlgorithm, FBGenericConfig,
+};
+use crate::pdps::{
+    PDPSConfig,
+    L2Squared,
+    pointsource_pdps,
+};
+use crate::frank_wolfe::{
+    FWConfig,
+    FWVariant,
+    pointsource_fw,
+    prepare_optimise_weights,
+    optimise_weights,
+};
+use crate::subproblem::InnerSettings;
+use crate::seminorms::*;
+use crate::plot::*;
+use crate::AlgorithmOverrides;
+
+/// Available algorithms and their configurations
+#[derive(Copy, Clone, Debug, Serialize, Deserialize)]
+pub enum AlgorithmConfig<F : Float> {
+    FB(FBConfig<F>),
+    FW(FWConfig<F>),
+    PDPS(PDPSConfig<F>),
+}
+
+impl<F : ClapFloat> AlgorithmConfig<F> {
+    /// Override supported parameters based on the command line.
+    pub fn cli_override(self, cli : &AlgorithmOverrides<F>) -> Self {
+        let override_fb_generic = |g : FBGenericConfig<F>| {
+            FBGenericConfig {
+                bootstrap_insertions : cli.bootstrap_insertions
+                                          .as_ref()
+                                          .map_or(g.bootstrap_insertions,
+                                                  |n| Some((n[0], n[1]))),
+                merge_every : cli.merge_every.unwrap_or(g.merge_every),
+                merging : cli.merging.clone().unwrap_or(g.merging),
+                final_merging : cli.final_merging.clone().unwrap_or(g.final_merging),
+                .. g
+            }
+        };
+
+        use AlgorithmConfig::*;
+        match self {
+            FB(fb) => FB(FBConfig {
+                τ0 : cli.tau0.unwrap_or(fb.τ0),
+                insertion : override_fb_generic(fb.insertion),
+                .. fb
+            }),
+            PDPS(pdps) => PDPS(PDPSConfig {
+                τ0 : cli.tau0.unwrap_or(pdps.τ0),
+                σ0 : cli.sigma0.unwrap_or(pdps.σ0),
+                acceleration : cli.acceleration.unwrap_or(pdps.acceleration),
+                insertion : override_fb_generic(pdps.insertion),
+                .. pdps
+            }),
+            FW(fw) => FW(FWConfig {
+                merging : cli.merging.clone().unwrap_or(fw.merging),
+                .. fw
+            })
+        }
+    }
+}
+
+/// Helper struct for tagging and [`AlgorithmConfig`] or [`Experiment`] with a name.
+#[derive(Clone, Debug, Serialize, Deserialize)]
+pub struct Named<Data> {
+    pub name : String,
+    #[serde(flatten)]
+    pub data : Data,
+}
+
+/// Shorthand algorithm configurations, to be used with the command line parser
+#[derive(ValueEnum, Debug, Copy, Clone, Eq, PartialEq, Hash)]
+pub enum DefaultAlgorithm {
+    /// The μFB forward-backward method
+    #[clap(name = "fb")]
+    FB,
+    /// The μFISTA inertial forward-backward method
+    #[clap(name = "fista")]
+    FISTA,
+    /// The “fully corrective” conditional gradient method
+    #[clap(name = "fw")]
+    FW,
+    /// The “relaxed conditional gradient method
+    #[clap(name = "fwrelax")]
+    FWRelax,
+    /// The μPDPS primal-dual proximal splitting method
+    #[clap(name = "pdps")]
+    PDPS,
+}
+
+impl DefaultAlgorithm {
+    /// Returns the algorithm configuration corresponding to the algorithm shorthand
+    pub fn default_config<F : Float>(&self) -> AlgorithmConfig<F> {
+        use DefaultAlgorithm::*;
+        match *self {
+            FB => AlgorithmConfig::FB(Default::default()),
+            FISTA => AlgorithmConfig::FB(FBConfig{
+                meta : FBMetaAlgorithm::InertiaFISTA,
+                .. Default::default()
+            }),
+            FW => AlgorithmConfig::FW(Default::default()),
+            FWRelax => AlgorithmConfig::FW(FWConfig{
+                variant : FWVariant::Relaxed,
+                .. Default::default()
+            }),
+            PDPS => AlgorithmConfig::PDPS(Default::default()),
+        }
+    }
+
+    /// Returns the [`Named`] algorithm corresponding to the algorithm shorthand
+    pub fn get_named<F : Float>(&self) -> Named<AlgorithmConfig<F>> {
+        self.to_named(self.default_config())
+    }
+
+    pub fn to_named<F : Float>(self, alg : AlgorithmConfig<F>) -> Named<AlgorithmConfig<F>> {
+        let name = self.to_possible_value().unwrap().get_name().to_string();
+        Named{ name , data : alg }
+    }
+}
+
+
+// // Floats cannot be hashed directly, so just hash the debug formatting
+// // for use as file identifier.
+// impl<F : Float> Hash for AlgorithmConfig<F> {
+//     fn hash<H: Hasher>(&self, state: &mut H) {
+//         format!("{:?}", self).hash(state);
+//     }
+// }
+
+/// Plotting level configuration
+#[derive(Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Serialize, ValueEnum, Debug)]
+pub enum PlotLevel {
+    /// Plot nothing
+    #[clap(name = "none")]
+    None,
+    /// Plot problem data
+    #[clap(name = "data")]
+    Data,
+    /// Plot iterationwise state
+    #[clap(name = "iter")]
+    Iter,
+}
+
+/// Algorithm and iterator config for the experiments
+
+#[derive(Clone, Debug, Serialize)]
+#[serde(default)]
+pub struct Configuration<F : Float> {
+    /// Algorithms to run
+    pub algorithms : Vec<Named<AlgorithmConfig<F>>>,
+    /// Options for algorithm step iteration (verbosity, etc.)
+    pub iterator_options : AlgIteratorOptions,
+    /// Plotting level
+    pub plot : PlotLevel,
+    /// Directory where to save results
+    pub outdir : String,
+    /// Bisection tree depth
+    pub bt_depth : DynamicDepth,
+}
+
+type DefaultBT<F, const N : usize> = BT<
+    DynamicDepth,
+    F,
+    usize,
+    Bounds<F>,
+    N
+>;
+type DefaultSeminormOp<F, K, const N : usize> = ConvolutionOp<F, K, DefaultBT<F, N>, N>;
+type DefaultSG<F, Sensor, Spread, const N : usize> = SensorGrid::<
+    F,
+    Sensor,
+    Spread,
+    DefaultBT<F, N>,
+    N
+>;
+
+/// This is a dirty workaround to rust-csv not supporting struct flattening etc.
+#[derive(Serialize)]
+struct CSVLog<F> {
+    iter : usize,
+    cpu_time : f64,
+    value : F,
+    post_value : F,
+    n_spikes : usize,
+    inner_iters : usize,
+    merged : usize,
+    pruned : usize,
+    this_iters : usize,
+}
+
+/// Collected experiment statistics
+#[derive(Clone, Debug, Serialize)]
+struct ExperimentStats<F : Float> {
+    /// Signal-to-noise ratio in decibels
+    ssnr : F,
+    /// Proportion of noise in the signal as a number in $[0, 1]$.
+    noise_ratio : F,
+    /// When the experiment was run (UTC)
+    when : DateTime<Utc>,
+}
+
+#[replace_float_literals(F::cast_from(literal))]
+impl<F : Float> ExperimentStats<F> {
+    /// Calculate [`ExperimentStats`] based on a noisy `signal` and the separated `noise` signal.
+    fn new<E : Euclidean<F>>(signal : &E, noise : &E) -> Self {
+        let s = signal.norm2_squared();
+        let n = noise.norm2_squared();
+        let noise_ratio = (n / s).sqrt();
+        let ssnr = 10.0 * (s /  n).log10();
+        ExperimentStats {
+            ssnr,
+            noise_ratio,
+            when : Utc::now(),
+        }
+    }
+}
+/// Collected algorithm statistics
+#[derive(Clone, Debug, Serialize)]
+struct AlgorithmStats<F : Float> {
+    /// Overall CPU time spent
+    cpu_time : F,
+    /// Real time spent
+    elapsed : F
+}
+
+
+/// A wrapper for [`serde_json::to_writer_pretty`] that takes a filename as input
+/// and outputs a [`DynError`].
+fn write_json<T : Serialize>(filename : String, data : &T) -> DynError {
+    serde_json::to_writer_pretty(std::fs::File::create(filename)?, data)?;
+    Ok(())
+}
+
+
+/// Struct for experiment configurations
+#[derive(Debug, Clone, Serialize)]
+pub struct Experiment<F, NoiseDistr, S, K, P, const N : usize>
+where F : Float,
+      [usize; N] : Serialize,
+      NoiseDistr : Distribution<F>,
+      S : Sensor<F, N>,
+      P : Spread<F, N>,
+      K : SimpleConvolutionKernel<F, N>,
+{
+    /// Domain $Ω$.
+    pub domain : Cube<F, N>,
+    /// Number of sensors along each dimension
+    pub sensor_count : [usize; N],
+    /// Noise distribution
+    pub noise_distr : NoiseDistr,
+    /// Seed for random noise generation (for repeatable experiments)
+    pub noise_seed : u64,
+    /// Sensor $θ$; $θ * ψ$ forms the forward operator $𝒜$.
+    pub sensor : S,
+    /// Spread $ψ$; $θ * ψ$ forms the forward operator $𝒜$.
+    pub spread : P,
+    /// Kernel $ρ$ of $𝒟$.
+    pub kernel : K,
+    /// True point sources
+    pub μ_hat : DiscreteMeasure<Loc<F, N>, F>,
+    /// Regularisation parameter
+    pub α : F,
+    /// For plotting : how wide should the kernels be plotted
+    pub kernel_plot_width : F,
+    /// Data term
+    pub dataterm : DataTerm,
+    /// A map of default configurations for algorithms
+    #[serde(skip)]
+    pub algorithm_defaults : HashMap<DefaultAlgorithm, AlgorithmConfig<F>>,
+}
+
+/// Trait for runnable experiments
+pub trait RunnableExperiment<F : ClapFloat> {
+    /// Run all algorithms of the [`Configuration`] `config` on the experiment.
+    fn runall(&self, config : Configuration<F>) -> DynError;
+
+    /// Returns the default configuration
+    fn default_config(&self) -> Configuration<F>;
+
+    /// Return algorithm default config
+    fn algorithm_defaults(&self, alg : DefaultAlgorithm, cli : &AlgorithmOverrides<F>)
+    -> Named<AlgorithmConfig<F>>;
+}
+
+impl<F, NoiseDistr, S, K, P, const N : usize> RunnableExperiment<F> for
+Named<Experiment<F, NoiseDistr, S, K, P, N>>
+where F : ClapFloat + nalgebra::RealField + ToNalgebraRealField<MixedType=F>,
+      [usize; N] : Serialize,
+      S : Sensor<F, N> + Copy + Serialize,
+      P : Spread<F, N> + Copy + Serialize,
+      Convolution<S, P>: Spread<F, N> + Bounded<F> + LocalAnalysis<F, Bounds<F>, N> + Copy,
+      AutoConvolution<P> : BoundedBy<F, K>,
+      K : SimpleConvolutionKernel<F, N> + LocalAnalysis<F, Bounds<F>, N> + Copy + Serialize,
+      Cube<F, N>: P2Minimise<Loc<F, N>, F> + SetOrd,
+      PlotLookup : Plotting<N>,
+      DefaultBT<F, N> : SensorGridBT<F, S, P, N, Depth=DynamicDepth> + BTSearch<F, N>,
+      BTNodeLookup: BTNode<F, usize, Bounds<F>, N>,
+      DiscreteMeasure<Loc<F, N>, F> : SpikeMerging<F>,
+      NoiseDistr : Distribution<F> + Serialize {
+
+    fn algorithm_defaults(&self, alg : DefaultAlgorithm, cli : &AlgorithmOverrides<F>)
+    -> Named<AlgorithmConfig<F>> {
+        alg.to_named(
+            self.data
+                .algorithm_defaults
+                .get(&alg)
+                .map_or_else(|| alg.default_config(),
+                            |config| config.clone())
+                .cli_override(cli)
+        )
+    }
+
+    fn default_config(&self) -> Configuration<F> {
+        let default_alg = match self.data.dataterm {
+            DataTerm::L2Squared => DefaultAlgorithm::FB.get_named(),
+            DataTerm::L1 => DefaultAlgorithm::PDPS.get_named(),
+        };
+
+        Configuration{
+            algorithms : vec![default_alg],
+            iterator_options : AlgIteratorOptions{
+                max_iter : 2000,
+                verbose_iter : Verbose::Logarithmic(10),
+                quiet : false,
+            },
+            plot : PlotLevel::Data,
+            outdir : "out".to_string(),
+            bt_depth : DynamicDepth(8),
+        }
+    }
+
+    fn runall(&self, config : Configuration<F>) -> DynError {
+        let &Named {
+            name : ref experiment_name,
+            data : Experiment {
+                domain, sensor_count, ref noise_distr, sensor, spread, kernel,
+                ref μ_hat, α, kernel_plot_width, dataterm, noise_seed,
+                ..
+            }
+        } = self;
+
+        // Set path
+        let prefix = format!("{}/{}/", config.outdir, experiment_name);
+
+        // Set up operators
+        let depth = config.bt_depth;
+        let opA = DefaultSG::new(domain, sensor_count, sensor, spread, depth);
+        let op𝒟 = DefaultSeminormOp::new(depth, domain, kernel);
+
+        // Set up random number generator.
+        let mut rng = StdRng::seed_from_u64(noise_seed);
+
+        // Generate the data and calculate SSNR statistic
+        let b_hat = opA.apply(μ_hat);
+        let noise = DVector::from_distribution(b_hat.len(), &noise_distr, &mut rng);
+        let b = &b_hat + &noise;
+        // Need to wrap calc_ssnr into a function to hide ultra-lame nalgebra::RealField
+        // overloading log10 and conflicting with standard NumTraits one.
+        let stats = ExperimentStats::new(&b, &noise);
+
+        // Save experiment configuration and statistics
+        let mkname_e = |t| format!("{prefix}{t}.json", prefix = prefix, t = t);
+        std::fs::create_dir_all(&prefix)?;
+        write_json(mkname_e("experiment"), self)?;
+        write_json(mkname_e("config"), &config)?;
+        write_json(mkname_e("stats"), &stats)?;
+
+        plotall(&config, &prefix, &domain, &sensor, &kernel, &spread,
+                &μ_hat, &op𝒟, &opA, &b_hat, &b, kernel_plot_width)?;
+
+        // Run the algorithm(s)
+        for named @ Named { name : alg_name, data : alg } in config.algorithms.iter() {
+            let this_prefix = format!("{}{}/", prefix, alg_name);
+
+            let running = || {
+                println!("{}\n{}\n{}",
+                        format!("Running {} on experiment {}…", alg_name, experiment_name).cyan(),
+                        format!("{:?}", config.iterator_options).bright_black(),
+                        format!("{:?}", alg).bright_black());
+            };
+
+            // Create Logger and IteratorFactory
+            let mut logger = Logger::new();
+            let findim_data = prepare_optimise_weights(&opA);
+            let inner_config : InnerSettings<F> = Default::default();
+            let inner_it = inner_config.iterator_options;
+            let logmap = |iter, Timed { cpu_time, data }| {
+                let IterInfo {
+                    value,
+                    n_spikes,
+                    inner_iters,
+                    merged,
+                    pruned,
+                    postprocessing,
+                    this_iters,
+                    ..
+                } = data;
+                let post_value = match postprocessing {
+                    None => value,
+                    Some(mut μ) => {
+                        match dataterm {
+                            DataTerm::L2Squared => {
+                                optimise_weights(
+                                    &mut μ, &opA, &b, α, &findim_data, &inner_config,
+                                    inner_it
+                                );
+                                dataterm.value_at_residual(opA.apply(&μ) - &b) + α * μ.norm(Radon)
+                            },
+                            _ => value,
+                        }
+                    }
+                };
+                CSVLog {
+                    iter,
+                    value,
+                    post_value,
+                    n_spikes,
+                    cpu_time : cpu_time.as_secs_f64(),
+                    inner_iters,
+                    merged,
+                    pruned,
+                    this_iters
+                }
+            };
+            let iterator = config.iterator_options
+                                 .instantiate()
+                                 .timed()
+                                 .mapped(logmap)
+                                 .into_log(&mut logger);
+            let plotgrid = lingrid(&domain, &[if N==1 { 1000 } else { 100 }; N]);
+
+            // Create plotter and directory if needed.
+            let plot_count = if config.plot >= PlotLevel::Iter { 2000 } else { 0 };
+            let plotter = SeqPlotter::new(this_prefix, plot_count, plotgrid);
+
+            // Run the algorithm
+            let start = Instant::now();
+            let start_cpu = ProcessTime::now();
+            let μ : DiscreteMeasure<Loc<F, N>, F> = match (alg, dataterm) {
+                (AlgorithmConfig::FB(ref algconfig), DataTerm::L2Squared) => {
+                    running();
+                    pointsource_fb(&opA, &b, α, &op𝒟, &algconfig, iterator, plotter)
+                },
+                (AlgorithmConfig::FW(ref algconfig), DataTerm::L2Squared) => {
+                    running();
+                    pointsource_fw(&opA, &b, α, &algconfig, iterator, plotter)
+                },
+                (AlgorithmConfig::PDPS(ref algconfig), DataTerm::L2Squared) => {
+                    running();
+                    pointsource_pdps(&opA, &b, α, &op𝒟, &algconfig, iterator, plotter, L2Squared)
+                },
+                (AlgorithmConfig::PDPS(ref algconfig), DataTerm::L1) => {
+                    running();
+                    pointsource_pdps(&opA, &b, α, &op𝒟, &algconfig, iterator, plotter, L1)
+                },
+                _ =>  {
+                    let msg = format!("Algorithm “{}” not implemented for dataterm {:?}. Skipping.",
+                                      alg_name, dataterm).red();
+                    eprintln!("{}", msg);
+                    continue
+                }
+            };
+            let elapsed = start.elapsed().as_secs_f64();
+            let cpu_time = start_cpu.elapsed().as_secs_f64();
+
+            println!("{}", format!("Elapsed {elapsed}s (CPU time {cpu_time}s)… ").yellow());
+
+            // Save results
+            println!("{}", "Saving results…".green());
+
+            let mkname = |
+            t| format!("{p}{n}_{t}", p = prefix, n = alg_name, t = t);
+
+            write_json(mkname("config.json"), &named)?;
+            write_json(mkname("stats.json"), &AlgorithmStats { cpu_time, elapsed })?;
+            μ.write_csv(mkname("reco.txt"))?;
+            logger.write_csv(mkname("log.txt"))?;
+        }
+
+        Ok(())
+    }
+}
+
+/// Plot experiment setup
+#[replace_float_literals(F::cast_from(literal))]
+fn plotall<F, Sensor, Kernel, Spread, 𝒟, A, const N : usize>(
+    config : &Configuration<F>,
+    prefix : &String,
+    domain : &Cube<F, N>,
+    sensor : &Sensor,
+    kernel : &Kernel,
+    spread : &Spread,
+    μ_hat : &DiscreteMeasure<Loc<F, N>, F>,
+    op𝒟 : &𝒟,
+    opA : &A,
+    b_hat : &A::Observable,
+    b : &A::Observable,
+    kernel_plot_width : F,
+) -> DynError
+where F : Float + ToNalgebraRealField,
+      Sensor : RealMapping<F, N> + Support<F, N> + Clone,
+      Spread : RealMapping<F, N> + Support<F, N> + Clone,
+      Kernel : RealMapping<F, N> + Support<F, N>,
+      Convolution<Sensor, Spread> : RealMapping<F, N> + Support<F, N>,
+      𝒟 : DiscreteMeasureOp<Loc<F, N>, F>,
+      𝒟::Codomain : RealMapping<F, N>,
+      A : ForwardModel<Loc<F, N>, F>,
+      A::PreadjointCodomain : RealMapping<F, N> + Bounded<F>,
+      PlotLookup : Plotting<N>,
+      Cube<F, N> : SetOrd {
+
+    if config.plot < PlotLevel::Data {
+        return Ok(())
+    }
+
+    let base = Convolution(sensor.clone(), spread.clone());
+
+    let resolution = if N==1 { 100 } else { 40 };
+    let pfx = |n| format!("{}{}", prefix, n);
+    let plotgrid = lingrid(&[[-kernel_plot_width, kernel_plot_width]; N].into(), &[resolution; N]);
+
+    PlotLookup::plot_into_file(sensor, plotgrid, pfx("sensor"), "sensor".to_string());
+    PlotLookup::plot_into_file(kernel, plotgrid, pfx("kernel"), "kernel".to_string());
+    PlotLookup::plot_into_file(spread, plotgrid, pfx("spread"), "spread".to_string());
+    PlotLookup::plot_into_file(&base, plotgrid, pfx("base_sensor"), "base_sensor".to_string());
+
+    let plotgrid2 = lingrid(&domain, &[resolution; N]);
+
+    let ω_hat = op𝒟.apply(μ_hat);
+    let noise =  opA.preadjoint().apply(opA.apply(μ_hat) - b);
+    PlotLookup::plot_into_file(&ω_hat, plotgrid2, pfx("omega_hat"), "ω̂".to_string());
+    PlotLookup::plot_into_file(&noise, plotgrid2, pfx("omega_noise"),
+                               "noise Aᵀ(Aμ̂ - b)".to_string());
+
+    let preadj_b =  opA.preadjoint().apply(b);
+    let preadj_b_hat =  opA.preadjoint().apply(b_hat);
+    //let bounds = preadj_b.bounds().common(&preadj_b_hat.bounds());
+    PlotLookup::plot_into_file_spikes(
+        "Aᵀb".to_string(), &preadj_b,
+        "Aᵀb̂".to_string(), Some(&preadj_b_hat),
+        plotgrid2, None, &μ_hat,
+        pfx("omega_b")
+    );
+
+    // Save true solution and observables
+    let pfx = |n| format!("{}{}", prefix, n);
+    μ_hat.write_csv(pfx("orig.txt"))?;
+    opA.write_observable(&b_hat, pfx("b_hat"))?;
+    opA.write_observable(&b, pfx("b_noisy"))
+}
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/seminorms.rs	Thu Dec 01 23:07:35 2022 +0200
@@ -0,0 +1,378 @@
+/*! This module implements the convolution operators $𝒟$.
+
+The principal data type of the module is [`ConvolutionOp`] and the main abstraction
+the trait [`DiscreteMeasureOp`].
+*/
+
+use std::iter::Zip;
+use std::ops::RangeFrom;
+use alg_tools::types::*;
+use alg_tools::loc::Loc;
+use alg_tools::sets::Cube;
+use alg_tools::bisection_tree::*;
+use alg_tools::mapping::RealMapping;
+use alg_tools::iter::{Mappable, FilterMapX};
+use alg_tools::linops::{Apply, Linear, BoundedLinear};
+use alg_tools::nalgebra_support::ToNalgebraRealField;
+use crate::measures::{DiscreteMeasure, DeltaMeasure, SpikeIter};
+use nalgebra::DMatrix;
+use std::marker::PhantomData;
+use itertools::Itertools;
+
+/// Abstraction for operators $𝒟 ∈ 𝕃(𝒵(Ω); C_c(Ω))$.
+///
+/// Here $𝒵(Ω) ⊂ ℳ(Ω)$ is the space of sums of delta measures, presented by [`DiscreteMeasure`].
+pub trait DiscreteMeasureOp<Domain, F> : BoundedLinear<DiscreteMeasure<Domain, F>, FloatType=F>
+where F : Float + ToNalgebraRealField,
+      Domain : 'static {
+    /// The output type of [`Self::preapply`].
+    type PreCodomain;
+
+    /// Creates a finite-dimensional presentatin of the operator restricted to a fixed support.
+    ///
+    /// <p>
+    /// This returns the matrix $C_*𝒟C$, where $C ∈ 𝕃(ℝ^n; 𝒵(Ω))$, $Ca = ∑_{i=1}^n α_i δ_{x_i}$
+    /// for a $x_1, …, x_n$ the coordinates given by the iterator `I`, and $a=(α_1,…,α_n)$.
+    /// Here $C_* ∈ 𝕃(C_c(Ω); ℝ^n) $ stands for the preadjoint.
+    /// </p>
+    fn findim_matrix<'a, I>(&self, points : I) -> DMatrix<F::MixedType>
+    where I : ExactSizeIterator<Item=&'a Domain> + Clone;
+
+    /// [`Apply::apply`] that typically returns an uninitialised [`PreBTFN`]
+    /// instead of a full [`BTFN`].
+    fn preapply(&self, μ : DiscreteMeasure<Domain, F>) -> Self::PreCodomain;
+}
+
+// Blanket implementation of a measure as a linear functional over a predual
+// (that by assumption is a linear functional over a measure).
+/*impl<F, Domain, Predual> Linear<Predual>
+for DiscreteMeasure<Domain, F>
+where F : Float + ToNalgebraRealField,
+      Predual : Linear<DiscreteMeasure<Domain, F>, Codomain=F> {
+    type Codomain = F;
+
+    #[inline]
+    fn apply(&self, ω : &Predual) -> F {
+        ω.apply(self)
+    }
+}*/
+
+//
+// Convolutions for discrete measures
+//
+
+/// A trait alias for simple convolution kernels.
+pub trait SimpleConvolutionKernel<F : Float, const N : usize>
+: RealMapping<F, N> + Support<F, N> + Bounded<F> + Clone + 'static {}
+
+impl<T, F : Float, const N : usize> SimpleConvolutionKernel<F, N> for T
+where T : RealMapping<F, N> + Support<F, N> + Bounded<F> + Clone + 'static {}
+
+/// [`SupportGenerator`] for [`ConvolutionOp`].
+#[derive(Clone,Debug)]
+pub struct ConvolutionSupportGenerator<F : Float, K, const N : usize>
+where K : SimpleConvolutionKernel<F, N> {
+    kernel : K,
+    centres : DiscreteMeasure<Loc<F, N>, F>,
+}
+
+impl<F : Float, K, const N : usize> ConvolutionSupportGenerator<F, K, N>
+where K : SimpleConvolutionKernel<F, N> {
+
+    /// Construct the convolution kernel corresponding to `δ`, i.e., one centered at `δ.x` and
+    /// weighted by `δ.α`.
+    #[inline]
+    fn construct_kernel<'a>(&'a self, δ : &'a DeltaMeasure<Loc<F, N>, F>)
+    -> Weighted<Shift<K, F, N>, F> {
+        self.kernel.clone().shift(δ.x).weigh(δ.α)
+    }
+
+    /// This is a helper method for the implementation of [`ConvolutionSupportGenerator::all_data`].
+    /// It filters out `δ` with zero weight, and otherwise returns the corresponding convolution
+    /// kernel. The `id` is passed through as-is.
+    #[inline]
+    fn construct_kernel_and_id_filtered<'a>(
+        &'a self,
+        (id, δ) : (usize, &'a DeltaMeasure<Loc<F, N>, F>)
+    ) -> Option<(usize, Weighted<Shift<K, F, N>, F>)> {
+        (δ.α != F::ZERO).then(|| (id.into(), self.construct_kernel(δ)))
+    }
+}
+
+impl<F : Float, K, const N : usize> SupportGenerator<F, N>
+for ConvolutionSupportGenerator<F, K, N>
+where K : SimpleConvolutionKernel<F, N> {
+    type Id = usize;
+    type SupportType = Weighted<Shift<K, F, N>, F>;
+    type AllDataIter<'a> = FilterMapX<'a, Zip<RangeFrom<usize>, SpikeIter<'a, Loc<F, N>, F>>,
+                                      Self, (Self::Id, Self::SupportType)>;
+
+    #[inline]
+    fn support_for(&self, d : Self::Id) -> Self::SupportType {
+        self.construct_kernel(&self.centres[d])
+    }
+
+    #[inline]
+    fn support_count(&self) -> usize {
+        self.centres.len()
+    }
+
+    #[inline]
+    fn all_data(&self) -> Self::AllDataIter<'_> {
+        (0..).zip(self.centres.iter_spikes())
+             .filter_mapX(self, Self::construct_kernel_and_id_filtered)
+    }
+}
+
+/// Representation of a convolution operator $𝒟$.
+#[derive(Clone,Debug)]
+pub struct ConvolutionOp<F, K, BT, const N : usize>
+where F : Float + ToNalgebraRealField,
+      BT : BTImpl<F, N, Data=usize>,
+      K : SimpleConvolutionKernel<F, N> {
+    /// Depth of the [`BT`] bisection tree for the outputs [`Apply::apply`].
+    depth : BT::Depth,
+    /// Domain of the [`BT`] bisection tree for the outputs [`Apply::apply`].
+    domain : Cube<F, N>,
+    /// The convolution kernel
+    kernel : K,
+    _phantoms : PhantomData<(F,BT)>,
+}
+
+impl<F, K, BT, const N : usize> ConvolutionOp<F, K, BT, N>
+where F : Float + ToNalgebraRealField,
+      BT : BTImpl<F, N, Data=usize>,
+      K : SimpleConvolutionKernel<F, N> {
+
+    /// Creates a new convolution operator $𝒟$ with `kernel` on `domain`.
+    ///
+    /// The output of [`Apply::apply`] is a [`BT`] of given `depth`.
+    pub fn new(depth : BT::Depth, domain : Cube<F, N>, kernel : K) -> Self {
+        ConvolutionOp {
+            depth : depth,
+            domain : domain,
+            kernel : kernel,
+            _phantoms : PhantomData
+        }
+    }
+
+    /// Returns the support generator for this convolution operator.
+    fn support_generator(&self, μ : DiscreteMeasure<Loc<F, N>, F>)
+    -> ConvolutionSupportGenerator<F, K, N> {
+
+        // TODO: can we avoid cloning μ?
+        ConvolutionSupportGenerator {
+            kernel : self.kernel.clone(),
+            centres : μ
+        }
+    }
+
+    /// Returns a reference to the kernel of this convolution operator.
+    pub fn kernel(&self) -> &K {
+        &self.kernel
+    }
+}
+
+impl<F, K, BT, const N : usize> Apply<DiscreteMeasure<Loc<F, N>, F>>
+for ConvolutionOp<F, K, BT, N>
+where F : Float + ToNalgebraRealField,
+      BT : BTImpl<F, N, Data=usize>,
+      K : SimpleConvolutionKernel<F, N>,
+      Weighted<Shift<K, F, N>, F> : LocalAnalysis<F, BT::Agg, N> {
+
+    type Output = BTFN<F, ConvolutionSupportGenerator<F, K, N>, BT, N>;
+
+    fn apply(&self, μ : DiscreteMeasure<Loc<F, N>, F>) -> Self::Output {
+        let g = self.support_generator(μ);
+        BTFN::construct(self.domain.clone(), self.depth, g)
+    }
+}
+
+impl<'a, F, K, BT, const N : usize> Apply<&'a DiscreteMeasure<Loc<F, N>, F>>
+for ConvolutionOp<F, K, BT, N>
+where F : Float + ToNalgebraRealField,
+      BT : BTImpl<F, N, Data=usize>,
+      K : SimpleConvolutionKernel<F, N>,
+      Weighted<Shift<K, F, N>, F> : LocalAnalysis<F, BT::Agg, N> {
+
+    type Output = BTFN<F, ConvolutionSupportGenerator<F, K, N>, BT, N>;
+
+    fn apply(&self, μ : &'a DiscreteMeasure<Loc<F, N>, F>) -> Self::Output {
+        self.apply(μ.clone())
+    }
+}
+
+/// [`ConvolutionOp`]s as linear operators over [`DiscreteMeasure`]s.
+impl<F, K, BT, const N : usize> Linear<DiscreteMeasure<Loc<F, N>, F>>
+for ConvolutionOp<F, K, BT, N>
+where F : Float + ToNalgebraRealField,
+      BT : BTImpl<F, N, Data=usize>,
+      K : SimpleConvolutionKernel<F, N>,
+      Weighted<Shift<K, F, N>, F> : LocalAnalysis<F, BT::Agg, N> {
+    type Codomain = BTFN<F, ConvolutionSupportGenerator<F, K, N>, BT, N>;
+}
+
+impl<F, K, BT, const N : usize> Apply<DeltaMeasure<Loc<F, N>, F>>
+for ConvolutionOp<F, K, BT, N>
+where F : Float + ToNalgebraRealField,
+      BT : BTImpl<F, N, Data=usize>,
+      K : SimpleConvolutionKernel<F, N> {
+
+    type Output = Weighted<Shift<K, F, N>, F>;
+
+    #[inline]
+    fn apply(&self, δ : DeltaMeasure<Loc<F, N>, F>) -> Self::Output {
+        self.kernel.clone().shift(δ.x).weigh(δ.α)
+    }
+}
+
+impl<'a, F, K, BT, const N : usize> Apply<&'a DeltaMeasure<Loc<F, N>, F>>
+for ConvolutionOp<F, K, BT, N>
+where F : Float + ToNalgebraRealField,
+      BT : BTImpl<F, N, Data=usize>,
+      K : SimpleConvolutionKernel<F, N> {
+
+    type Output = Weighted<Shift<K, F, N>, F>;
+
+    #[inline]
+    fn apply(&self, δ : &'a DeltaMeasure<Loc<F, N>, F>) -> Self::Output {
+        self.kernel.clone().shift(δ.x).weigh(δ.α)
+    }
+}
+
+/// [`ConvolutionOp`]s as linear operators over [`DeltaMeasure`]s.
+///
+/// The codomain is different from the implementation for [`DiscreteMeasure`].
+impl<F, K, BT, const N : usize> Linear<DeltaMeasure<Loc<F, N>, F>>
+for ConvolutionOp<F, K, BT, N>
+where F : Float + ToNalgebraRealField,
+      BT : BTImpl<F, N, Data=usize>,
+      K : SimpleConvolutionKernel<F, N> {
+    type Codomain = Weighted<Shift<K, F, N>, F>;
+}
+
+impl<F, K, BT, const N : usize> BoundedLinear<DiscreteMeasure<Loc<F, N>, F>>
+for ConvolutionOp<F, K, BT, N>
+where F : Float + ToNalgebraRealField,
+      BT : BTImpl<F, N, Data=usize>,
+      K : SimpleConvolutionKernel<F, N>,
+      Weighted<Shift<K, F, N>, F> : LocalAnalysis<F, BT::Agg, N> {
+
+    type FloatType = F;
+
+    fn opnorm_bound(&self) -> F {
+        // With μ = ∑_i α_i δ_{x_i}, we have
+        // |𝒟μ|_∞
+        // = sup_z |∑_i α_i φ(z - x_i)|
+        // ≤ sup_z ∑_i |α_i| |φ(z - x_i)|
+        // ≤ ∑_i |α_i| |φ|_∞
+        // = |μ|_ℳ |φ|_∞
+        self.kernel.bounds().uniform()
+    }
+}
+
+
+impl<F, K, BT, const N : usize> DiscreteMeasureOp<Loc<F, N>, F>
+for ConvolutionOp<F, K, BT, N>
+where F : Float + ToNalgebraRealField,
+      BT : BTImpl<F, N, Data=usize>,
+      K : SimpleConvolutionKernel<F, N>,
+      Weighted<Shift<K, F, N>, F> : LocalAnalysis<F, BT::Agg, N> {
+    type PreCodomain = PreBTFN<F, ConvolutionSupportGenerator<F, K, N>, N>;
+
+    fn findim_matrix<'a, I>(&self, points : I) -> DMatrix<F::MixedType>
+    where I : ExactSizeIterator<Item=&'a Loc<F,N>> + Clone {
+        // TODO: Preliminary implementation. It be best to use sparse matrices or
+        // possibly explicit operators without matrices
+        let n = points.len();
+        let points_clone = points.clone();
+        let pairs = points.cartesian_product(points_clone);
+        let kernel = &self.kernel;
+        let values = pairs.map(|(x, y)| kernel.apply(y-x).to_nalgebra_mixed());
+        DMatrix::from_iterator(n, n, values)
+    }
+
+    /// A version of [`Apply::apply`] that does not instantiate the [`BTFN`] codomain with
+    /// a bisection tree, instead returning a [`PreBTFN`]. This can improve performance when
+    /// the output is to be added as the right-hand-side operand to a proper BTFN.
+    fn preapply(&self, μ : DiscreteMeasure<Loc<F, N>, F>) -> Self::PreCodomain {
+        BTFN::new_pre(self.support_generator(μ))
+    }
+}
+
+/// Generates an scalar operation (e.g. [`std::ops::Mul`], [`std::ops::Div`])
+/// for [`ConvolutionSupportGenerator`].
+macro_rules! make_convolutionsupportgenerator_scalarop_rhs {
+    ($trait:ident, $fn:ident, $trait_assign:ident, $fn_assign:ident) => {
+        impl<F : Float, K : SimpleConvolutionKernel<F, N>, const N : usize>
+        std::ops::$trait_assign<F>
+        for ConvolutionSupportGenerator<F, K, N> {
+            fn $fn_assign(&mut self, t : F) {
+                self.centres.$fn_assign(t);
+            }
+        }
+
+        impl<F : Float, K : SimpleConvolutionKernel<F, N>, const N : usize>
+        std::ops::$trait<F>
+        for ConvolutionSupportGenerator<F, K, N> {
+            type Output = ConvolutionSupportGenerator<F, K, N>;
+            fn $fn(mut self, t : F) -> Self::Output {
+                std::ops::$trait_assign::$fn_assign(&mut self.centres, t);
+                self
+            }
+        }
+        impl<'a, F : Float, K : SimpleConvolutionKernel<F, N>, const N : usize>
+        std::ops::$trait<F>
+        for &'a ConvolutionSupportGenerator<F, K, N> {
+            type Output = ConvolutionSupportGenerator<F, K, N>;
+            fn $fn(self, t : F) -> Self::Output {
+                ConvolutionSupportGenerator{
+                    kernel : self.kernel.clone(),
+                    centres : (&self.centres).$fn(t),
+                }
+            }
+        }
+    }
+}
+
+make_convolutionsupportgenerator_scalarop_rhs!(Mul, mul, MulAssign, mul_assign);
+make_convolutionsupportgenerator_scalarop_rhs!(Div, div, DivAssign, div_assign);
+
+
+/// Generates an unary operation (e.g. [`std::ops::Neg`]) for [`ConvolutionSupportGenerator`].
+macro_rules! make_convolutionsupportgenerator_unaryop {
+    ($trait:ident, $fn:ident) => {
+        impl<F : Float, K : SimpleConvolutionKernel<F, N>, const N : usize>
+        std::ops::$trait
+        for ConvolutionSupportGenerator<F, K, N> {
+            type Output = ConvolutionSupportGenerator<F, K, N>;
+            fn $fn(mut self) -> Self::Output {
+                self.centres = self.centres.$fn();
+                self
+            }
+        }
+
+        impl<'a, F : Float, K : SimpleConvolutionKernel<F, N>, const N : usize>
+        std::ops::$trait
+        for &'a ConvolutionSupportGenerator<F, K, N> {
+            type Output = ConvolutionSupportGenerator<F, K, N>;
+            fn $fn(self) -> Self::Output {
+                ConvolutionSupportGenerator{
+                    kernel : self.kernel.clone(),
+                    centres : (&self.centres).$fn(),
+                }
+            }
+        }
+    }
+}
+
+make_convolutionsupportgenerator_unaryop!(Neg, neg);
+
+/// Trait for indicating that `Self` is Lipschitz with respect to the seminorm `D`.
+pub trait Lipschitz<D> {
+    /// The type of floats
+    type FloatType : Float;
+
+    /// Returns the Lipschitz factor of `self` with respect to the seminorm `D`.
+    fn lipschitz_factor(&self, seminorm : &D) -> Option<Self::FloatType>;
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/subproblem.rs	Thu Dec 01 23:07:35 2022 +0200
@@ -0,0 +1,373 @@
+//! Iterative algorithms for solving finite-dimensional subproblems.
+
+use serde::{Serialize, Deserialize};
+use nalgebra::{DVector, DMatrix};
+use numeric_literals::replace_float_literals;
+use itertools::{izip, Itertools};
+use colored::Colorize;
+
+use alg_tools::iter::Mappable;
+use alg_tools::error::NumericalError;
+use alg_tools::iterate::{
+    AlgIteratorFactory,
+    AlgIteratorState,
+    AlgIteratorOptions,
+    Verbose,
+    Step,
+};
+use alg_tools::linops::GEMV;
+use alg_tools::nalgebra_support::ToNalgebraRealField;
+
+use crate::types::*;
+
+/// Method for solving finite-dimensional subproblems
+#[derive(Clone, Copy, Eq, PartialEq, Serialize, Deserialize, Debug)]
+#[allow(dead_code)]
+pub enum InnerMethod {
+    /// Forward-backward
+    FB,
+    /// Semismooth Newton
+    SSN,
+}
+
+/// Settings for the solution of finite-dimensional subproblems
+#[derive(Clone, Copy, Eq, PartialEq, Serialize, Deserialize, Debug)]
+pub struct InnerSettings<F : Float> {
+    /// Method
+    pub method : InnerMethod,
+    /// Proportional step length (∈ [0, 1) for `InnerMethod::FB`).
+    pub τ0 : F,
+    /// Fraction of `tolerance` given to inner algorithm
+    pub tolerance_mult : F,
+    /// Iterator options
+    #[serde(flatten)]
+    pub iterator_options : AlgIteratorOptions,
+}
+
+#[replace_float_literals(F::cast_from(literal))]
+impl<F : Float> Default for InnerSettings<F> {
+    fn default() -> Self {
+        InnerSettings {
+            τ0 : 0.99,
+            iterator_options : AlgIteratorOptions {
+                // max_iter cannot be very small, as initially FB needs many iterations, although
+                // on later invocations even one or two tends to be enough
+                max_iter : 2000,
+                // verbose_iter affects testing of sufficient convergence, so we set it to
+                // a small value…
+                verbose_iter : Verbose::Every(1),
+                // … but don't print out anything
+                quiet : true,
+                .. Default::default()
+            },
+            method : InnerMethod::FB,
+            tolerance_mult : 0.01,
+        }
+    }
+}
+
+/// Compute the proximal operator of $x \mapsto x + \delta\_{[0, \infty)}$, i.e.,
+/// the non-negativity contrained soft-thresholding operator.
+#[inline]
+#[replace_float_literals(F::cast_from(literal))]
+fn nonneg_soft_thresholding<F : Float>(v : F, λ : F) -> F {
+    (v - λ).max(0.0)
+}
+
+/// Forward-backward splitting implementation of [`quadratic_nonneg`].
+/// For detailed documentation of the inputs and outputs, refer to there.
+///
+/// The `λ` component of the model is handled in the proximal step instead of the gradient step
+/// for potential performance improvements.
+#[replace_float_literals(F::cast_from(literal).to_nalgebra_mixed())]
+pub fn quadratic_nonneg_fb<F, I>(
+    mA : &DMatrix<F::MixedType>,
+    g : &DVector<F::MixedType>,
+    //c_ : F,
+    λ_ : F,
+    x : &mut DVector<F::MixedType>,
+    τ_ : F,
+    iterator : I
+) -> usize
+where F : Float + ToNalgebraRealField,
+      I : AlgIteratorFactory<F>
+{
+    let mut xprev = x.clone();
+    //let c = c_.to_nalgebra_mixed();
+    let λ = λ_.to_nalgebra_mixed();
+    let τ = τ_.to_nalgebra_mixed();
+    let τλ = τ * λ;
+    let mut v = DVector::zeros(x.len());
+    let mut iters = 0;
+
+    iterator.iterate(|state| {
+        // Replace `x` with $x - τ[Ax-g]= [x + τg]- τAx$
+        v.copy_from(g);             // v = g
+        v.axpy(1.0, x, τ);          // v = x + τ*g
+        v.sygemv(-τ, mA, x, 1.0);   // v = [x + τg]- τAx
+        let backup = state.if_verbose(|| {
+            xprev.copy_from(x)
+        });
+        // Calculate the proximal map
+        x.iter_mut().zip(v.iter()).for_each(|(x_i, &v_i)| {
+            *x_i = nonneg_soft_thresholding(v_i, τλ);
+        });
+
+        iters +=1;
+
+        backup.map(|_| {
+            // The subdifferential of the objective is $Ax - g + λ + ∂ δ_{≥ 0}(x)$.
+            // We return the minimal ∞-norm over all subderivatives.
+            v.copy_from(g);                  // d = g
+            mA.gemv(&mut v, 1.0, x, -1.0);   // d =  Ax - g
+            let mut val = 0.0;
+            for (&v_i, &x_i) in izip!(v.iter(), x.iter()) {
+                let d = v_i + λ;
+                if x_i > 0.0 || d < 0.0 {
+                    val = val.max(d.abs());
+                }
+            }
+            F::from_nalgebra_mixed(val)
+        })
+    });
+
+    iters
+}
+
+/// Semismooth Newton implementation of [`quadratic_nonneg`].
+///
+/// For detailed documentation of the inputs, refer to there.
+/// This function returns the number of iterations taken if there was no inversion failure,
+///
+/// ## Method derivation
+///
+/// **The below may look like garbage. Sorry, but rustdoc is obsolete rubbish
+/// that doesn't directly support by-now standard-in-markdown LaTeX math. Instead it
+/// forces one into unreliable KaTeX autorender postprocessing  andescape hell and that
+/// it doesn't even process correctly.**
+///
+/// <p>
+/// For the objective
+/// $$
+///     J(x) = \frac{1}{2} x^⊤Ax - g^⊤ x + λ{\vec 1}^⊤ x + c + δ_{≥ 0}(x),
+/// $$
+/// we have the optimality condition
+/// $$
+///     x - \mathop{\mathrm{prox}}_{τλ{\vec 1}^⊤ + δ_{≥ 0}}(x - τ[Ax-g^⊤]) = 0,
+/// $$
+/// which we write as
+/// $$
+///     x - [G ∘ F](x)=0
+/// $$
+/// for
+/// $$
+///     G(x) = \mathop{\mathrm{prox}}_{λ{\vec 1}^⊤ + δ_{≥ 0}}
+///     \quad\text{and}\quad
+///     F(x) = x - τ Ax + τ g^⊤
+/// $$
+/// We can use Newton derivative chain rule to compute
+/// $D_N[G ∘ F](x) = D_N G(F(x)) D_N F(x)$, where
+/// $D_N F(x) = \mathop{\mathrm{Id}} - τ A$,
+/// and $[D_N G(F(x))]_i = 1$ for inactive coordinates and $=0$ for active coordinates.
+/// </p>
+///
+/// <p>
+/// The method itself involves solving $D_N[Id - G ∘ F](x^k) s^k = - [Id - G ∘ F](x^k)$ and
+/// updating $x^{k+1} = x^k + s^k$. Consequently
+/// $$
+///     s^k - D_N G(F(x^k)) [s^k - τ As^k] = - x^k + [G ∘ F](x^k)
+/// $$
+/// For $𝒜$ the set of active coordinates and $ℐ$ the set of inactive coordinates, this
+/// expands as
+/// $$
+///     [τ A_{ℐ × ℐ}]s^k_ℐ = - x^k_ℐ + [G ∘ F](x^k)_ℐ - [τ A_{ℐ × 𝒜}]s^k_𝒜
+/// $$
+/// and
+/// $$
+///     s^k_𝒜 = - x^k_𝒜 + [G ∘ F](x^k)_𝒜.
+/// $$
+/// Thus on $𝒜$ the update $[x^k + s^k]_𝒜 = [G ∘ F](x^k)_𝒜$ is just the forward-backward update.
+/// </p>
+///
+/// <p>
+/// We need to detect stopping by a subdifferential and return $x$ satisfying $x ≥ 0$,
+/// which is in general not true for the SSN. We therefore use that $[G ∘ F](x^k)$ is a valid
+/// forward-backward step.
+/// </p>
+#[replace_float_literals(F::cast_from(literal).to_nalgebra_mixed())]
+pub fn quadratic_nonneg_ssn<F, I>(
+    mA : &DMatrix<F::MixedType>,
+    g : &DVector<F::MixedType>,
+    //c_ : F,
+    λ_ : F,
+    x : &mut DVector<F::MixedType>,
+    τ_ : F,
+    iterator : I
+) -> Result<usize, NumericalError>
+where F : Float + ToNalgebraRealField,
+      I : AlgIteratorFactory<F>
+{
+    let n = x.len();
+    let mut xprev = x.clone();
+    let mut v = DVector::zeros(n);
+    //let c = c_.to_nalgebra_mixed();
+    let λ = λ_.to_nalgebra_mixed();
+    let τ = τ_.to_nalgebra_mixed();
+    let τλ = τ * λ;
+    let mut inact : Vec<bool> = Vec::from_iter(std::iter::repeat(false).take(n));
+    let mut s = DVector::zeros(0);
+    let mut decomp = nalgebra::linalg::LU::new(DMatrix::zeros(0, 0));
+    let mut iters = 0;
+
+    let res = iterator.iterate_fallible(|state| {
+        // 1. Perform delayed SSN-update based on previously computed step on active
+        // coordinates. The step is delayed to the beginning of the loop because
+        // the SSN step may violate constraints, so we arrange `x` to contain at the
+        // end of the loop the valid FB step that forms part of the SSN step
+        let mut si = s.iter();
+        for (&ast, x_i, xprev_i) in izip!(inact.iter(), x.iter_mut(), xprev.iter_mut()) {
+            if ast {
+                *x_i = *xprev_i + *si.next().unwrap()
+            }
+            *xprev_i = *x_i;
+        }
+
+        //xprev.copy_from(x);
+
+        // 2. Calculate FB step.
+        // 2.1. Replace `x` with $x⁻ - τ[Ax⁻-g]= [x⁻ + τg]- τAx⁻$
+        x.axpy(τ, g, 1.0);               // x = x⁻ + τ*g
+        x.sygemv(-τ, mA, &xprev, 1.0);   // x = [x⁻ + τg]- τAx⁻
+        // 2.2. Calculate prox and set of active coordinates at the same time
+        let mut act_changed = false;
+        let mut n_inact = 0;
+        for (x_i, ast) in izip!(x.iter_mut(), inact.iter_mut()) {
+            if *x_i > τλ {
+                *x_i -= τλ;
+                if !*ast {
+                    act_changed = true;
+                    *ast = true;
+                }
+                n_inact += 1;
+            } else {
+                *x_i = 0.0;
+                if *ast {
+                    act_changed = true;
+                    *ast = false;
+                }
+            }
+        }
+
+        // *** x now contains forward-backward step ***
+
+        // 3. Solve SSN step `s`.
+        // 3.1 Construct [τ A_{ℐ × ℐ}] if the set of inactive coordinates has changed.
+        if act_changed {
+            let decomp_iter = inact.iter().cartesian_product(inact.iter()).zip(mA.iter());
+            let decomp_constr = decomp_iter.filter_map(|((&i_inact, &j_inact), &mAij)| {
+                //(i_inact && j_inact).then_some(mAij * τ)
+                (i_inact && j_inact).then_some(mAij) // 🔺 below matches removal of τ
+            });
+            let mat = DMatrix::from_iterator(n_inact, n_inact, decomp_constr);
+            decomp = nalgebra::linalg::LU::new(mat);
+        }
+
+        // 3.2 Solve `s` = $s_ℐ^k$ from
+        // $[τ A_{ℐ × ℐ}]s^k_ℐ = - x^k_ℐ + [G ∘ F](x^k)_ℐ - [τ A_{ℐ × 𝒜}]s^k_𝒜$.
+        // With current variable setup we have $[G ∘ F](x^k) = $`x` and $x^k = x⁻$ = `xprev`,
+        // so the system to solve is $[τ A_{ℐ × ℐ}]s^k_ℐ = (x-x⁻)_ℐ  - [τ A_{ℐ × 𝒜}](x-x⁻)_𝒜$
+        // The matrix $[τ A_{ℐ × ℐ}]$ we have already LU-decomposed above into `decomp`.
+        s = if n_inact > 0 {
+            // 3.2.1 Construct `rhs` = $(x-x⁻)_ℐ  - [τ A_{ℐ × 𝒜}](x-x⁻)_𝒜$
+            let inactfilt = inact.iter().copied();
+            let rhs_iter = izip!(x.iter(), xprev.iter(), mA.row_iter()).filter_zip(inactfilt);
+            let rhs_constr = rhs_iter.map(|(&x_i, &xprev_i, mAi)| {
+                // Calculate row i of [τ A_{ℐ × 𝒜}]s^k_𝒜 = [τ A_{ℐ × 𝒜}](x-xprev)_𝒜
+                let actfilt = inact.iter().copied().map(std::ops::Not::not);
+                let actit = izip!(x.iter(), xprev.iter(), mAi.iter()).filter_zip(actfilt);
+                let actpart = actit.map(|(&x_j, &xprev_j, &mAij)| {
+                    mAij * (x_j - xprev_j)
+                }).sum();
+                // Subtract it from [x-prev]_i
+                //x_i - xprev_i - τ * actpart
+                (x_i - xprev_i) / τ - actpart // 🔺 change matches removal of τ above
+            });
+            let mut rhs = DVector::from_iterator(n_inact, rhs_constr);
+            assert_eq!(rhs.len(), n_inact);
+            // Solve the system
+            if !decomp.solve_mut(&mut rhs) {
+                return Step::Failure(NumericalError(
+                    "Failed to solve linear system for subproblem SSN."
+                ))
+            }
+            rhs
+        } else {
+            DVector::zeros(0)
+        };
+
+        iters += 1;
+
+        // 4. Report solution quality
+        state.if_verbose(|| {
+            // Calculate subdifferential at the FB step `x` that hasn't yet had `s` yet added.
+            // The subdifferential of the objective is $Ax - g + λ + ∂ δ_{≥ 0}(x)$.
+            // We return the minimal ∞-norm over all subderivatives.
+            v.copy_from(g);                  // d = g
+            mA.gemv(&mut v, 1.0, x, -1.0);   // d =  Ax - g
+            let mut val = 0.0;
+            for (&v_i, &x_i) in izip!(v.iter(), x.iter()) {
+                let d = v_i + λ;
+                if x_i > 0.0 || d < 0.0 {
+                    val = val.max(d.abs());
+                }
+            }
+            F::from_nalgebra_mixed(val)
+        })
+    });
+
+    res.map(|_| iters)
+}
+
+/// This function applies an iterative method for the solution of the quadratic non-negativity
+/// constrained problem
+/// <div>$$
+///     \min_{x ∈ ℝ^n} \frac{1}{2} x^⊤Ax - g^⊤ x + λ{\vec 1}^⊤ x + c + δ_{≥ 0}(x).
+/// $$</div>
+/// Semismooth Newton or forward-backward are supported based on the setting in `method`.
+/// The parameter `mA` is matrix $A$, and `g` and `λ` are as in the mathematical formulation.
+/// The constant $c$ does not need to be provided. The step length parameter is `τ` while
+/// `x` contains the initial iterate and on return the final one. The `iterator` controls
+/// stopping. The “verbose” value output by all methods is the $ℓ\_∞$ distance of some
+/// subdifferential of the objective to zero.
+///
+/// Interior point methods could offer a further alternative, for example, the one in:
+///
+///  * Valkonen T. - _A method for weighted projections to the positive definite
+///    cone_, <https://doi.org/10.1080/02331934.2014.929680>.
+///
+/// This function returns the number of iterations taken.
+pub fn quadratic_nonneg<F, I>(
+    method : InnerMethod,
+    mA : &DMatrix<F::MixedType>,
+    g : &DVector<F::MixedType>,
+    //c_ : F,
+    λ : F,
+    x : &mut DVector<F::MixedType>,
+    τ : F,
+    iterator : I
+) -> usize
+where F : Float + ToNalgebraRealField,
+      I : AlgIteratorFactory<F>
+{
+    
+    match method {
+        InnerMethod::FB =>
+            quadratic_nonneg_fb(mA, g, λ, x, τ, iterator),
+        InnerMethod::SSN =>
+            quadratic_nonneg_ssn(mA, g, λ, x, τ, iterator).unwrap_or_else(|e| {
+                println!("{}", format!("{e}. Using FB fallback.").red());
+                let ins = InnerSettings::<F>::default();
+                quadratic_nonneg_fb(mA, g, λ, x, τ, ins.iterator_options)
+            })
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/tolerance.rs	Thu Dec 01 23:07:35 2022 +0200
@@ -0,0 +1,89 @@
+//! Tolerance update schemes for subproblem solution quality
+use serde::{Serialize, Deserialize};
+use numeric_literals::replace_float_literals;
+use crate::types::*;
+
+/// Update style for optimality system solution tolerance
+#[derive(Clone, Copy, Eq, PartialEq, Serialize, Deserialize, Debug)]
+#[allow(dead_code)]
+pub enum Tolerance<F : Float> {
+    /// $ε_k = εθ^k$ for the `factor` $θ$ and initial tolerance $ε$.
+    Exponential{ factor : F, initial : F },
+    /// $ε_k = ε/(1+θk)^p$ for the `factor` $θ$, `exponent` $p$, and initial tolerance $ε$.
+    Power{ factor : F, exponent : F, initial : F},
+    /// $ε_k = εθ^{⌊k^p⌋}$ for the `factor` $θ$, initial tolerance $ε$, and exponent $p$.
+    SlowExp{ factor : F, exponent : F, initial : F }
+}
+
+#[replace_float_literals(F::cast_from(literal))]
+impl<F : Float> Default for Tolerance<F> {
+    fn default() -> Self {
+        Tolerance::Power {
+            initial : 0.5,
+            factor : 0.2,
+            exponent : 1.4  // 1.5 works but is already slower in practise on our examples.
+        }
+    }
+}
+
+#[replace_float_literals(F::cast_from(literal))]
+impl<F : Float> Tolerance<F> {
+    /// Get the initial tolerance
+    pub fn initial(&self) -> F {
+        match self {
+            &Tolerance::Exponential { initial, .. } => initial,
+            &Tolerance::Power { initial, .. } => initial,
+            &Tolerance::SlowExp { initial, .. } => initial,
+        }
+    }
+
+    /// Get mutable reference to the initial tolerance
+    fn initial_mut(&mut self) -> &mut F {
+        match self {
+            Tolerance::Exponential { ref mut initial, .. } => initial,
+            Tolerance::Power { ref mut initial, .. } => initial,
+            Tolerance::SlowExp { ref mut initial, .. } => initial,
+        }
+    }
+
+    /// Set the initial tolerance
+    pub fn set_initial(&mut self, set : F)  {
+        *self.initial_mut() = set;
+    }
+
+    /// Update `tolerance` for iteration `iter`.
+    /// `tolerance` may or may not be used depending on the specific
+    /// update scheme.
+    pub fn update(&self, tolerance : F, iter : usize) -> F {
+        match self {
+            &Tolerance::Exponential { factor, .. } => {
+                tolerance * factor
+            },
+            &Tolerance::Power { factor, exponent, initial } => {
+                initial /(1.0 + factor * F::cast_from(iter)).powf(exponent)
+            },
+            &Tolerance::SlowExp { factor, exponent, initial } => {
+                // let m = (speed
+                //          * factor.powi(-(iter as i32))
+                //          * F::cast_from(iter).powf(-exponent)
+                //         ).floor().as_();
+                let m = F::cast_from(iter).powf(exponent).floor().as_();
+                initial * factor.powi(m)
+            },
+        }
+    }
+}
+
+impl<F: Float> std::ops::MulAssign<F> for Tolerance<F> {
+    fn mul_assign(&mut self, factor : F) {
+        *self.initial_mut() *= factor;
+    }
+}
+
+impl<F: Float> std::ops::Mul<F> for Tolerance<F> {
+    type Output = Tolerance<F>;
+    fn mul(mut self, factor : F) -> Self::Output {
+        *self.initial_mut() *= factor;
+        self
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/types.rs	Thu Dec 01 23:07:35 2022 +0200
@@ -0,0 +1,105 @@
+//! Type definitions and re-exports
+
+use numeric_literals::replace_float_literals;
+
+use colored::ColoredString;
+use serde::{Serialize, Deserialize};
+use clap::ValueEnum;
+use alg_tools::iterate::LogRepr;
+use alg_tools::euclidean::Euclidean;
+use alg_tools::norms::{Norm, L1};
+
+pub use alg_tools::types::*;
+pub use alg_tools::loc::Loc;
+pub use alg_tools::sets::Cube;
+
+use crate::measures::DiscreteMeasure;
+
+/// [`Float`] with extra display and string conversion traits such that [`clap`] doesn't choke up.
+pub trait ClapFloat : Float
+                      + std::str::FromStr<Err=std::num::ParseFloatError>
+                      + std::fmt::Display {}
+impl ClapFloat for f32 {}
+impl ClapFloat for f64 {}
+
+/// Structure for storing iteration statistics
+#[derive(Debug, Clone, Serialize)]
+pub struct IterInfo<F : Float, const N : usize> {
+    /// Function value
+    pub value : F,
+    /// Number of speaks
+    pub n_spikes : usize,
+    /// Number of iterations this statistic covers
+    pub this_iters : usize,
+    /// Number of spikes removed by merging since last IterInfo statistic
+    pub merged : usize,
+    /// Number of spikes removed by pruning since last IterInfo statistic
+    pub pruned : usize,
+    /// Number of inner iterations since last IterInfo statistic
+    pub inner_iters : usize,
+    /// Current tolerance
+    pub ε : F,
+    /// Strict tolerance update if one was used
+    pub maybe_ε1 : Option<F>,
+    /// Solve fin.dim problem for this measure to get the optimal `value`.
+    pub postprocessing : Option<DiscreteMeasure<Loc<F, N>, F>>,
+}
+
+impl<F, const N : usize> LogRepr for IterInfo<F, N> where F : LogRepr + Float {
+    fn logrepr(&self) -> ColoredString {
+        let eqsign = match self.maybe_ε1 {
+            Some(ε1) if ε1 < self.ε => '≛',
+            _ => '=',
+        };
+        format!("{}\t| N = {}, ε {} {:.8}, inner_iters_mean = {}, merged+pruned_mean = {}+{}",
+                self.value.logrepr(),
+                self.n_spikes,
+                eqsign,
+                self.ε,
+                self.inner_iters as float / self.this_iters as float,
+                self.merged as float / self.this_iters as float,
+                self.pruned as float / self.this_iters as float,
+        ).as_str().into()
+    }
+}
+
+/// Branch and bound refinement settings
+#[derive(Clone, Copy, Eq, PartialEq, Serialize, Deserialize, Debug)]
+#[serde(default)]
+pub struct RefinementSettings<F : Float> {
+    /// Function value tolerance multiplier for bisection tree refinement in
+    /// [`alg_tools::bisection_tree::BTFN::maximise`] and related functions.
+    pub tolerance_mult : F,
+    /// Maximum branch and bound steps
+    pub max_steps : usize,
+}
+
+#[replace_float_literals(F::cast_from(literal))]
+impl<F : Float> Default for RefinementSettings<F> {
+    fn default() -> Self {
+        RefinementSettings {
+            tolerance_mult : 0.1,
+            max_steps : 50000,
+        }
+    }
+}
+
+/// Data term type
+#[derive(Clone, Copy, Eq, PartialEq, Serialize, Deserialize, Debug, ValueEnum)]
+pub enum DataTerm {
+    /// $\\|z\\|\_2^2/2$
+    L2Squared,
+    /// $\\|z\\|\_1$
+    L1,
+}
+
+impl DataTerm {
+    /// Calculate the data term value at residual $z=Aμ - b$.
+    pub fn value_at_residual<F : Float, E : Euclidean<F> + Norm<F, L1>>(&self, z : E) -> F {
+        match self {
+            Self::L2Squared => z.norm2_squared_div2(),
+            Self::L1 => z.norm(L1),
+        }
+    }
+}
+

mercurial