Initialise new clean repository

Sat, 22 Oct 2022 13:47:15 +0300

author
Tuomo Valkonen <tuomov@iki.fi>
date
Sat, 22 Oct 2022 13:47:15 +0300
changeset 0
9f27689eb130
child 1
df3901ec2f5d

Initialise new clean repository

Cargo.lock file | annotate | diff | comparison | revisions
Cargo.toml file | annotate | diff | comparison | revisions
LICENSE file | annotate | diff | comparison | revisions
README.md file | annotate | diff | comparison | revisions
misc/doc_alias.sh file | annotate | diff | comparison | revisions
misc/katex-header.html file | annotate | diff | comparison | revisions
rust-toolchain.toml file | annotate | diff | comparison | revisions
src/bisection_tree.rs file | annotate | diff | comparison | revisions
src/bisection_tree/aggregator.rs file | annotate | diff | comparison | revisions
src/bisection_tree/bt.rs file | annotate | diff | comparison | revisions
src/bisection_tree/btfn.rs file | annotate | diff | comparison | revisions
src/bisection_tree/either.rs file | annotate | diff | comparison | revisions
src/bisection_tree/refine.rs file | annotate | diff | comparison | revisions
src/bisection_tree/support.rs file | annotate | diff | comparison | revisions
src/bisection_tree/supportid.rs file | annotate | diff | comparison | revisions
src/coefficients.rs file | annotate | diff | comparison | revisions
src/error.rs file | annotate | diff | comparison | revisions
src/fe_model.rs file | annotate | diff | comparison | revisions
src/fe_model/base.rs file | annotate | diff | comparison | revisions
src/fe_model/p2_local_model.rs file | annotate | diff | comparison | revisions
src/iter.rs file | annotate | diff | comparison | revisions
src/iterate.rs file | annotate | diff | comparison | revisions
src/lib.rs file | annotate | diff | comparison | revisions
src/lingrid.rs file | annotate | diff | comparison | revisions
src/linops.rs file | annotate | diff | comparison | revisions
src/linsolve.rs file | annotate | diff | comparison | revisions
src/loc.rs file | annotate | diff | comparison | revisions
src/logger.rs file | annotate | diff | comparison | revisions
src/mapping.rs file | annotate | diff | comparison | revisions
src/maputil.rs file | annotate | diff | comparison | revisions
src/nalgebra_support.rs file | annotate | diff | comparison | revisions
src/nanleast.rs file | annotate | diff | comparison | revisions
src/norms.rs file | annotate | diff | comparison | revisions
src/sets.rs file | annotate | diff | comparison | revisions
src/sets/cube.rs file | annotate | diff | comparison | revisions
src/tabledump.rs file | annotate | diff | comparison | revisions
src/tuple.rs file | annotate | diff | comparison | revisions
src/types.rs file | annotate | diff | comparison | revisions
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/Cargo.lock	Sat Oct 22 13:47:15 2022 +0300
@@ -0,0 +1,444 @@
+# This file is automatically @generated by Cargo.
+# It is not intended for manual editing.
+version = 3
+
+[[package]]
+name = "alg_tools"
+version = "0.1.0"
+dependencies = [
+ "colored",
+ "cpu-time",
+ "csv",
+ "itertools",
+ "nalgebra",
+ "num",
+ "num-traits",
+ "numeric_literals",
+ "serde",
+ "serde_json",
+ "trait-set",
+]
+
+[[package]]
+name = "approx"
+version = "0.5.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "cab112f0a86d568ea0e627cc1d6be74a1e9cd55214684db5561995f6dad897c6"
+dependencies = [
+ "num-traits",
+]
+
+[[package]]
+name = "atty"
+version = "0.2.14"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8"
+dependencies = [
+ "hermit-abi",
+ "libc",
+ "winapi",
+]
+
+[[package]]
+name = "autocfg"
+version = "1.1.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa"
+
+[[package]]
+name = "bstr"
+version = "0.2.17"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ba3569f383e8f1598449f1a423e72e99569137b47740b1da11ef19af3d5c3223"
+dependencies = [
+ "lazy_static",
+ "memchr",
+ "regex-automata",
+ "serde",
+]
+
+[[package]]
+name = "bytemuck"
+version = "1.12.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "2f5715e491b5a1598fc2bef5a606847b5dc1d48ea625bd3c02c00de8285591da"
+
+[[package]]
+name = "colored"
+version = "2.0.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b3616f750b84d8f0de8a58bda93e08e2a81ad3f523089b05f1dffecab48c6cbd"
+dependencies = [
+ "atty",
+ "lazy_static",
+ "winapi",
+]
+
+[[package]]
+name = "cpu-time"
+version = "1.0.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e9e393a7668fe1fad3075085b86c781883000b4ede868f43627b34a87c8b7ded"
+dependencies = [
+ "libc",
+ "winapi",
+]
+
+[[package]]
+name = "csv"
+version = "1.1.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "22813a6dc45b335f9bade10bf7271dc477e81113e89eb251a0bc2a8a81c536e1"
+dependencies = [
+ "bstr",
+ "csv-core",
+ "itoa 0.4.8",
+ "ryu",
+ "serde",
+]
+
+[[package]]
+name = "csv-core"
+version = "0.1.10"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "2b2466559f260f48ad25fe6317b3c8dac77b5bdb5763ac7d9d6103530663bc90"
+dependencies = [
+ "memchr",
+]
+
+[[package]]
+name = "either"
+version = "1.8.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "90e5c1c8368803113bf0c9584fc495a58b86dc8a29edbf8fe877d21d9507e797"
+
+[[package]]
+name = "hermit-abi"
+version = "0.1.19"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "62b467343b94ba476dcb2500d242dadbb39557df889310ac77c5d99100aaac33"
+dependencies = [
+ "libc",
+]
+
+[[package]]
+name = "itertools"
+version = "0.10.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b0fd2260e829bddf4cb6ea802289de2f86d6a7a690192fbe91b3f46e0f2c8473"
+dependencies = [
+ "either",
+]
+
+[[package]]
+name = "itoa"
+version = "0.4.8"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b71991ff56294aa922b450139ee08b3bfc70982c6b2c7562771375cf73542dd4"
+
+[[package]]
+name = "itoa"
+version = "1.0.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "6c8af84674fe1f223a982c933a0ee1086ac4d4052aa0fb8060c12c6ad838e754"
+
+[[package]]
+name = "lazy_static"
+version = "1.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646"
+
+[[package]]
+name = "libc"
+version = "0.2.134"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "329c933548736bc49fd575ee68c89e8be4d260064184389a5b77517cddd99ffb"
+
+[[package]]
+name = "matrixmultiply"
+version = "0.3.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "add85d4dd35074e6fedc608f8c8f513a3548619a9024b751949ef0e8e45a4d84"
+dependencies = [
+ "rawpointer",
+]
+
+[[package]]
+name = "memchr"
+version = "2.5.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "2dffe52ecf27772e601905b7522cb4ef790d2cc203488bbd0e2fe85fcb74566d"
+
+[[package]]
+name = "nalgebra"
+version = "0.31.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e9e0a04ce089f9401aac565c740ed30c46291260f27d4911fdbaa6ca65fa3044"
+dependencies = [
+ "approx",
+ "matrixmultiply",
+ "nalgebra-macros",
+ "num-complex",
+ "num-rational",
+ "num-traits",
+ "simba",
+ "typenum",
+]
+
+[[package]]
+name = "nalgebra-macros"
+version = "0.1.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "01fcc0b8149b4632adc89ac3b7b31a12fb6099a0317a4eb2ebff574ef7de7218"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "syn",
+]
+
+[[package]]
+name = "num"
+version = "0.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "43db66d1170d347f9a065114077f7dccb00c1b9478c89384490a3425279a4606"
+dependencies = [
+ "num-bigint",
+ "num-complex",
+ "num-integer",
+ "num-iter",
+ "num-rational",
+ "num-traits",
+]
+
+[[package]]
+name = "num-bigint"
+version = "0.4.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f93ab6289c7b344a8a9f60f88d80aa20032336fe78da341afc91c8a2341fc75f"
+dependencies = [
+ "autocfg",
+ "num-integer",
+ "num-traits",
+]
+
+[[package]]
+name = "num-complex"
+version = "0.4.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "7ae39348c8bc5fbd7f40c727a9925f03517afd2ab27d46702108b6a7e5414c19"
+dependencies = [
+ "num-traits",
+]
+
+[[package]]
+name = "num-integer"
+version = "0.1.45"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "225d3389fb3509a24c93f5c29eb6bde2586b98d9f016636dff58d7c6f7569cd9"
+dependencies = [
+ "autocfg",
+ "num-traits",
+]
+
+[[package]]
+name = "num-iter"
+version = "0.1.43"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "7d03e6c028c5dc5cac6e2dec0efda81fc887605bb3d884578bb6d6bf7514e252"
+dependencies = [
+ "autocfg",
+ "num-integer",
+ "num-traits",
+]
+
+[[package]]
+name = "num-rational"
+version = "0.4.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "0638a1c9d0a3c0914158145bc76cff373a75a627e6ecbfb71cbe6f453a5a19b0"
+dependencies = [
+ "autocfg",
+ "num-bigint",
+ "num-integer",
+ "num-traits",
+]
+
+[[package]]
+name = "num-traits"
+version = "0.2.15"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "578ede34cf02f8924ab9447f50c28075b4d3e5b269972345e7e0372b38c6cdcd"
+dependencies = [
+ "autocfg",
+]
+
+[[package]]
+name = "numeric_literals"
+version = "0.2.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "095aa67b0b9f2081746998f4f17106bdb51d56dc8c211afca5531b92b83bf98a"
+dependencies = [
+ "quote",
+ "syn",
+]
+
+[[package]]
+name = "paste"
+version = "1.0.9"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b1de2e551fb905ac83f73f7aedf2f0cb4a0da7e35efa24a202a936269f1f18e1"
+
+[[package]]
+name = "proc-macro2"
+version = "1.0.46"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "94e2ef8dbfc347b10c094890f778ee2e36ca9bb4262e86dc99cd217e35f3470b"
+dependencies = [
+ "unicode-ident",
+]
+
+[[package]]
+name = "quote"
+version = "1.0.21"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "bbe448f377a7d6961e30f5955f9b8d106c3f5e449d493ee1b125c1d43c2b5179"
+dependencies = [
+ "proc-macro2",
+]
+
+[[package]]
+name = "rawpointer"
+version = "0.2.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "60a357793950651c4ed0f3f52338f53b2f809f32d83a07f72909fa13e4c6c1e3"
+
+[[package]]
+name = "regex-automata"
+version = "0.1.10"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "6c230d73fb8d8c1b9c0b3135c5142a8acee3a0558fb8db5cf1cb65f8d7862132"
+
+[[package]]
+name = "ryu"
+version = "1.0.11"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "4501abdff3ae82a1c1b477a17252eb69cee9e66eb915c1abaa4f44d873df9f09"
+
+[[package]]
+name = "safe_arch"
+version = "0.6.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "794821e4ccb0d9f979512f9c1973480123f9bd62a90d74ab0f9426fcf8f4a529"
+dependencies = [
+ "bytemuck",
+]
+
+[[package]]
+name = "serde"
+version = "1.0.145"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "728eb6351430bccb993660dfffc5a72f91ccc1295abaa8ce19b27ebe4f75568b"
+dependencies = [
+ "serde_derive",
+]
+
+[[package]]
+name = "serde_derive"
+version = "1.0.145"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "81fa1584d3d1bcacd84c277a0dfe21f5b0f6accf4a23d04d4c6d61f1af522b4c"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "syn",
+]
+
+[[package]]
+name = "serde_json"
+version = "1.0.85"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e55a28e3aaef9d5ce0506d0a14dbba8054ddc7e499ef522dd8b26859ec9d4a44"
+dependencies = [
+ "itoa 1.0.3",
+ "ryu",
+ "serde",
+]
+
+[[package]]
+name = "simba"
+version = "0.7.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "c48e45e5961033db030b56ad67aef22e9c908c493a6e8348c0a0f6b93433cd77"
+dependencies = [
+ "approx",
+ "num-complex",
+ "num-traits",
+ "paste",
+ "wide",
+]
+
+[[package]]
+name = "syn"
+version = "1.0.101"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e90cde112c4b9690b8cbe810cba9ddd8bc1d7472e2cae317b69e9438c1cba7d2"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "unicode-ident",
+]
+
+[[package]]
+name = "trait-set"
+version = "0.2.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "875c4c873cc824e362fa9a9419ffa59807244824275a44ad06fec9684fff08f2"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "syn",
+]
+
+[[package]]
+name = "typenum"
+version = "1.15.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "dcf81ac59edc17cc8697ff311e8f5ef2d99fcbd9817b34cec66f90b6c3dfd987"
+
+[[package]]
+name = "unicode-ident"
+version = "1.0.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "dcc811dc4066ac62f84f11307873c4850cb653bfa9b1719cee2bd2204a4bc5dd"
+
+[[package]]
+name = "wide"
+version = "0.7.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b3aba2d1dac31ac7cae82847ac5b8be822aee8f99a4e100f279605016b185c5f"
+dependencies = [
+ "bytemuck",
+ "safe_arch",
+]
+
+[[package]]
+name = "winapi"
+version = "0.3.9"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419"
+dependencies = [
+ "winapi-i686-pc-windows-gnu",
+ "winapi-x86_64-pc-windows-gnu",
+]
+
+[[package]]
+name = "winapi-i686-pc-windows-gnu"
+version = "0.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6"
+
+[[package]]
+name = "winapi-x86_64-pc-windows-gnu"
+version = "0.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f"
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/Cargo.toml	Sat Oct 22 13:47:15 2022 +0300
@@ -0,0 +1,25 @@
+[package]
+name = "alg_tools"
+version = "0.1.0"
+edition = "2021"
+authors = ["Tuomo Valkonen <tuomov@iki.fi>"]
+
+[dependencies]
+serde = { version = "1.0", features = ["derive"] }
+csv = "~1.1.6"
+nalgebra = "~0.31.0"
+num-traits = { version = "~0.2.14", features = ["std"] }
+colored = "~2.0.0"
+trait-set = "~0.2.0"
+num = "~0.4.0"
+itertools = "~0.10.3"
+numeric_literals = "~0.2.0"
+cpu-time = "~1.0.0"
+serde_json = "~1.0.85"
+
+[package.metadata.docs.rs]
+rustdoc-args = [ "--html-in-header", "katex-header.html" ]
+
+
+[profile.release]
+debug = true
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/LICENSE	Sat Oct 22 13:47:15 2022 +0300
@@ -0,0 +1,43 @@
+
+# Anti-abuse license
+
+## Rationale
+
+The purpose of this license is to give end-users and developers maximal
+freedom to use this software while preventing the authors from being
+abused by powerful middle-men that repackage software for convenient
+installation by users. Such potentially abusive middle-men include in
+particular Linux distributions and similar centralising software
+distribution schemes developed for other operating systems.
+The ethos of this license is *bollocks to copyright and distributions!*
+
+## Rules
+
+This software is distributed without any warranty whatsoever.
+
+If you redistribute modified versions of this software to the public,
+you must clearly mark them as modified.
+
+If you redistribute this software to the public as part of a large
+collection of software with the purpose of providing end-users with
+a convenient installation method, you must do one of the following:
+
+(a) Always redistribute the **unmodified** and **latest** version
+provided by the authors. If the lead author releases a new version (on a
+specific branch, such as 'stable' or 'development'), you must promptly
+make that new version the default version offered to your users (on
+that specific branch).
+
+(b) Rename the software, and make it obvious that your modified or obsolete
+software is in no way connected to the authors of the original software.
+The users of your version should under no circumstances be under the 
+illusion that they can contact the lead author or any of the authors
+of the original software if they have any complaints or queries.
+
+(c) Do not in any way directly expose this software to your users. 
+
+Otherwise, do whatever you want with this software. In particular, you may
+freely use the software as part of other projects, and redistribute to
+the public archival copies of the software (as long as your archive cannot
+be considered a “convenient installation method” that will be governed by
+the rules above).
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/README.md	Sat Oct 22 13:47:15 2022 +0300
@@ -0,0 +1,24 @@
+
+# AlgTools
+
+Author: Tuomo Valkonen <tuomov@iki.fi>
+
+**This crate is very much incomplete.**
+
+This crate/repository/package contains some general utility routines and tools for implementing
+iterative algorithms and numerical computing in Rust. Former versions of the package were for Julia.
+They are no longer mintained. Included are:
+
+  * [Linear operator](crate::linops) abstraction, with support for matrices via `nalgebra`
+    or `ndarray` if this crate is compiled with the relevant features.
+  * [Iteration mechanism](crate::iterate) for generically implementing intermittent verbosity and stopping rules in algorithms.
+  * The iterators can employ a [logger](crate::logger) to store the intermittent results
+    (e.g. function values) in memory to later write into a `csv`-file.
+  * [Bisection trees](crate::bisection_tree) for branch-and-bound optimisation.
+
+<!--The code is used by [ImageTools][] and both, for example, by :
+-->
+
+## Installation
+
+???
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/misc/doc_alias.sh	Sat Oct 22 13:47:15 2022 +0300
@@ -0,0 +1,5 @@
+# source this file. use cargo rustdoc or cargo d --no-deps no build the documentation.
+echo 'Creating cargo-d alias'
+alias cargo-d='RUSTDOCFLAGS="--html-in-header misc/katex-header.html" BROWSER=/Applications/Firefox.app/Contents/MacOS/firefox-bin cargo d --no-deps'
+
+ 
\ No newline at end of file
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/misc/katex-header.html	Sat Oct 22 13:47:15 2022 +0300
@@ -0,0 +1,15 @@
+<link rel="stylesheet" href="https://cdn.jsdelivr.net/npm/katex@0.16.0/dist/katex.min.css" integrity="sha384-Xi8rHCmBmhbuyyhbI88391ZKP2dmfnOl4rT9ZfRI7mLTdk1wblIUnrIq35nqwEvC" crossorigin="anonymous">
+<script defer src="https://cdn.jsdelivr.net/npm/katex@0.16.0/dist/katex.min.js" integrity="sha384-X/XCfMm41VSsqRNQgDerQczD69XqmjOOOwYQvr/uuC+j4OPoNhVgjdGFwhvN02Ja" crossorigin="anonymous"></script>
+<script defer src="https://cdn.jsdelivr.net/npm/katex@0.16.0/dist/contrib/auto-render.min.js" integrity="sha384-+XBljXPPiv+OzfbB3cVmLHf4hdUFHlWNZN5spNQ7rmHTXpd7WvJum6fIACpNNfIR" crossorigin="anonymous" onload="renderMathInElement(document.body);"></script>
+<script>
+    document.addEventListener("DOMContentLoaded", function() {
+        renderMathInElement(document.body, {
+            delimiters: [
+                {left: "$$", right: "$$", display: true},
+                {left: "\\(", right: "\\)", display: false},
+                {left: "$", right: "$", display: false},
+                {left: "\\[", right: "\\]", display: true}
+            ]
+        });
+    });
+</script>
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/rust-toolchain.toml	Sat Oct 22 13:47:15 2022 +0300
@@ -0,0 +1,2 @@
+[toolchain]
+channel = "nightly"
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/bisection_tree.rs	Sat Oct 22 13:47:15 2022 +0300
@@ -0,0 +1,18 @@
+///
+/// Geometrical bisection trees
+///
+
+mod supportid;
+pub use supportid::*;
+mod aggregator;
+pub use aggregator::*;
+mod either;
+pub use either::*;
+mod support;
+pub use support::*;
+mod bt;
+pub use bt::*;
+mod refine;
+pub use refine::*;
+mod btfn;
+pub use btfn::*;
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/bisection_tree/aggregator.rs	Sat Oct 22 13:47:15 2022 +0300
@@ -0,0 +1,154 @@
+use crate::types::*;
+use crate::sets::Set;
+
+/// Trait for aggregating information about a branch of a [`super::BT`] bisection tree.
+pub trait Aggregator : Clone + std::fmt::Debug {
+    // Aggregate a new leaf data point to current state.
+    fn aggregate<I>(&mut self, aggregates : I)
+    where I : Iterator<Item=Self>;
+
+    // Summarise several other aggregators, resetting current state.
+    fn summarise<'a, I>(&'a mut self, aggregates : I)
+    where I : Iterator<Item=&'a Self>;
+
+    /// Initialisation of aggregate data for an empty lower node.
+    fn new() -> Self;
+}
+
+/// An [`Aggregator`] that doesn't aggregate anything.
+#[derive(Clone,Debug)]
+pub struct NullAggregator;
+
+impl Aggregator for NullAggregator {
+    // TODO: these should probabably also take a Cube as argument to
+    // allow integrating etc.
+    fn aggregate<I>(&mut self, _aggregates : I)
+    where I : Iterator<Item=Self> {}
+
+    fn summarise<'a, I>(&'a mut self, _aggregates : I)
+    where I : Iterator<Item=&'a Self> {}
+
+    fn new() -> Self { NullAggregator }
+}
+
+/// Upper and lower bounds on an `F`-valued function.
+/// Returned by [`super::support::Bounded::bounds`].
+#[derive(Copy,Clone,Debug)]
+pub struct Bounds<F>(pub F, pub F);
+
+impl<F : Num> Bounds<F> {
+    /// Returns the lower bound
+    #[inline]
+    pub fn lower(&self) -> F { self.0 }
+
+    /// Returns the upper bound
+    #[inline]
+    pub fn upper(&self) -> F { self.1 }
+}
+
+impl<F : Float> Bounds<F> {
+    /// Returns a uniform bound on the function (maximum over the absolute values of the
+    /// upper and lower bound).
+    #[inline]
+    pub fn uniform(&self) -> F {
+        let &Bounds(lower, upper) = self;
+        lower.abs().max(upper.abs())
+    }
+}
+
+impl<'a, F : Float> std::ops::Add<Self> for Bounds<F> {
+    type Output = Self;
+    #[inline]
+    fn add(self, Bounds(l2, u2) : Self) -> Self::Output {
+        let Bounds(l1, u1) = self;
+        debug_assert!(l1 <= u1 && l2 <= u2);
+        Bounds(l1 + l2, u1 + u2)
+    }
+}
+
+impl<'a, F : Float> std::ops::Mul<Self> for Bounds<F> {
+    type Output = Self;
+    #[inline]
+    fn mul(self, Bounds(l2, u2) : Self) -> Self::Output {
+        let Bounds(l1, u1) = self;
+        debug_assert!(l1 <= u1 && l2 <= u2);
+        let a = l1 * l2;
+        let b = u1 * u2;
+        // The order may flip when negative numbers are involved, so need min/max
+        Bounds(a.min(b), a.max(b))
+    }
+}
+
+impl<F : Float> std::iter::Product for Bounds<F> {
+    #[inline]
+    fn product<I>(mut iter: I) -> Self
+    where I: Iterator<Item = Self> {
+        match iter.next() {
+            None => Bounds(F::ZERO, F::ZERO),
+            Some(init) => iter.fold(init, |a, b| a*b)
+        }
+    }
+}
+
+impl<F : Float> Set<F> for Bounds<F> {
+    fn contains(&self, item : &F) -> bool {
+        let &Bounds(l, u) = self;
+        debug_assert!(l <= u);
+        l <= *item && *item <= u
+    }
+}
+
+impl<F : Float> Bounds<F> {
+    /// Calculate a common bound (glb, lub) for two bounds.
+    #[inline]
+    pub fn common(&self, &Bounds(l2, u2) : &Self) -> Self {
+        let &Bounds(l1, u1) = self;
+        debug_assert!(l1 <= u1 && l2 <= u2);
+        Bounds(l1.min(l2), u1.max(u2))
+    }
+
+    #[inline]
+    pub fn superset(&self, &Bounds(l2, u2) : &Self) -> bool {
+        let &Bounds(l1, u1) = self;
+        debug_assert!(l1 <= u1 && l2 <= u2);
+        l1 <= l2 && u2 <= u1
+    }
+
+    // Return the greatest bound contained by both argument bounds, if one exists.
+    #[inline]
+    pub fn glb(&self, &Bounds(l2, u2) : &Self) -> Option<Self> {
+        let &Bounds(l1, u1) = self;
+        debug_assert!(l1 <= u1 && l2 <= u2);
+        let l = l1.max(l2);
+        let u = u1.min(u2);
+        if l < u {
+            Some(Bounds(l, u))
+        } else {
+            None
+        }
+    }
+}
+
+impl<F : Float> Aggregator for Bounds<F> {
+    #[inline]
+    fn aggregate<I>(&mut self, aggregates : I)
+    where I : Iterator<Item=Self> {
+        *self = aggregates.fold(*self, |a, b| a + b);
+    }
+
+    #[inline]
+    fn summarise<'a, I>(&'a mut self, mut aggregates : I)
+    where I : Iterator<Item=&'a Self> {
+        *self = match aggregates.next() {
+            None => Bounds(F::ZERO, F::ZERO), // No parts in this cube; the function is zero
+            Some(&bounds) => {
+                aggregates.fold(bounds, |a, b| a.common(b))
+            }
+        }
+    }
+
+    #[inline]
+    fn new() -> Self {
+        Bounds(F::ZERO, F::ZERO)
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/bisection_tree/bt.rs	Sat Oct 22 13:47:15 2022 +0300
@@ -0,0 +1,588 @@
+
+use std::slice::{Iter,IterMut};
+use std::iter::once;
+use std::rc::Rc;
+use serde::{Serialize, Deserialize};
+pub use nalgebra::Const;
+use itertools::izip;
+
+use crate::iter::{MapF,Mappable};
+use crate::types::{Float, Num};
+use crate::coefficients::pow;
+use crate::maputil::{
+    array_init,
+    map2,
+    map2_indexed,
+    collect_into_array_unchecked
+};
+pub use crate::sets::Cube;
+pub use crate::loc::Loc;
+use super::support::*;
+use super::aggregator::*;
+
+#[derive(Clone,Debug)]
+pub enum NodeOption<F : Num, D, A : Aggregator, const N : usize, const P : usize> {
+    // TODO: Could optimise Uninitialised away by simply treat Leaf with an empty Vec as
+    // something that can be still replaced with Branches.
+    Uninitialised,
+    // TODO: replace with QuickVec fast and w/o allocs on single elements.
+    Leaf(Rc<Vec<D>>),
+    Branches(Rc<Branches<F, D, A, N, P>>),
+}
+
+/// Node of a [`BT`] bisection tree.
+#[derive(Clone,Debug)]
+pub struct Node<F : Num, D, A : Aggregator, const N : usize, const P : usize> {
+    pub(super) data : NodeOption<F, D, A, N, P>,
+    /// Aggregator for `data`.
+    pub(super) aggregator : A,
+}
+
+/// Branch information of a [`Node`] of a [`BT`] bisection tree.
+#[derive(Clone,Debug)]
+pub struct Branches<F : Num, D, A : Aggregator, const N : usize, const P : usize> {
+    /// Point for subdivision of the (unstored) [`Cube`] corresponding to the node.
+    pub(super) branch_at : Loc<F, N>,
+    /// Subnodes
+    pub(super) nodes : [Node<F, D, A, N, P>; P],
+}
+
+/// Dirty workaround to broken Rust drop, see [https://github.com/rust-lang/rust/issues/58068]().
+impl<F : Num, D, A : Aggregator, const N : usize, const P : usize>
+Drop for Node<F, D, A, N, P> {
+    fn drop(&mut self) {
+        use NodeOption as NO;
+
+        let process = |brc : Rc<Branches<F, D, A, N, P>>,
+                       to_drop : &mut Vec<Rc<Branches<F, D, A, N, P>>>| {
+            // We only drop Branches if we have the only strong reference.
+            Rc::try_unwrap(brc).ok().map(|branches| branches.nodes.map(|mut node| {
+                if let NO::Branches(brc2) = std::mem::replace(&mut node.data, NO::Uninitialised) {
+                    to_drop.push(brc2)
+                }
+            }));
+        };
+
+        // We mark Self as NodeOption::Uninitialised, extracting the real contents.
+        // If we have subprocess, we need to process them.
+        if let NO::Branches(brc1) = std::mem::replace(&mut self.data, NO::Uninitialised) {
+            // We store a queue of Rc<Branches> to drop into a vector
+            let mut to_drop = Vec::new();
+            process(brc1, &mut to_drop);
+
+            // While there are any Branches in the drop queue vector, we continue the process,
+            // pushing all internal branching nodes into the queue.
+            while let Some(brc) = to_drop.pop() {
+                process(brc, &mut to_drop)
+            }
+        }
+    }
+}
+
+pub trait Depth : 'static + Copy + std::fmt::Debug {
+    type Lower : Depth;
+    fn lower(&self) -> Option<Self::Lower>;
+    fn lower_or(&self) -> Self::Lower;
+}
+
+#[derive(Copy,Clone,Debug,Serialize,Deserialize)]
+pub struct DynamicDepth(pub u8);
+impl Depth for DynamicDepth {
+    type Lower = Self;
+    #[inline]
+    fn lower(&self) -> Option<Self> {
+        if self.0>0 {
+            Some(DynamicDepth(self.0-1))
+         } else {
+            None
+        }
+    }
+    #[inline]
+    fn lower_or(&self) -> Self {
+        DynamicDepth(if self.0>0 { self.0 - 1 } else { 0 })
+    }
+}
+
+impl Depth for Const<0> {
+    type Lower = Self;
+    fn lower(&self) -> Option<Self::Lower> { None }
+    fn lower_or(&self) -> Self::Lower { Const }
+}
+
+macro_rules! impl_constdepth {
+    ($($n:literal)*) => { $(
+        impl Depth for Const<$n> {
+            type Lower = Const<{$n-1}>;
+            fn lower(&self) -> Option<Self::Lower> { Some(Const) }
+            fn lower_or(&self) -> Self::Lower { Const }
+        }
+    )* };
+}
+impl_constdepth!(1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32);
+
+
+pub trait BranchCount<const N : usize> {}
+macro_rules! impl_branchcount {
+    ($($n:literal)*) => { $(
+        impl BranchCount<$n> for Const<{pow(2, $n)}>{}
+    )* }
+}
+impl_branchcount!(1 2 3 4 5 6 7 8);
+
+impl<F : Float, D, A, const N : usize, const P : usize> Branches<F,D,A,N,P>
+where Const<P> : BranchCount<N>,
+      A : Aggregator
+{
+    fn get_node_index(&self, x : &Loc<F, N>) -> usize {
+        izip!(0..P, x.iter(), self.branch_at.iter()).map(|(i, x_i, branch_i)|
+            if x_i > branch_i { 1<<i } else { 0 }
+        ).sum()
+    }
+
+    #[inline]
+    pub fn get_node(&self, x : &Loc<F,N>) -> &Node<F,D,A,N,P> {
+         &self.nodes[self.get_node_index(x)]
+    }
+}
+
+
+pub struct BTIter<'a, D> {
+    iter : Iter<'a, D>,
+}
+
+pub struct SubcubeIter<'b, F : Float, const N : usize, const P : usize> {
+    domain : &'b Cube<F, N>,
+    branch_at : Loc<F, N>,
+    index : usize,
+}
+
+#[inline]
+fn get_subcube<F : Float, const N : usize>(branch_at : &Loc<F, N>, domain : &Cube<F, N>, i : usize) -> Cube<F, N> {
+    map2_indexed(branch_at, domain, move |j, &branch, &[start, end]| {
+        if i & (1 << j) != 0 {
+            [branch, end]
+        } else {
+            [start, branch]
+        }
+    }).into()
+}
+
+impl<'a, 'b, F : Float, const N : usize, const P : usize> Iterator
+for SubcubeIter<'b, F, N, P> {
+    type Item = Cube<F, N>;
+    #[inline]
+    fn next(&mut self) -> Option<Self::Item> {
+        if self.index < P {
+            let i = self.index;
+            self.index += 1;
+            Some(get_subcube(&self.branch_at, self.domain, i))
+        } else {
+            None
+        }
+    }
+}
+
+impl<F : Float, D : Copy, A, const N : usize, const P : usize>
+Branches<F,D,A,N,P>
+where Const<P> : BranchCount<N>,
+      A : Aggregator {
+
+    pub fn new_with<S : LocalAnalysis <F, A, N>>(
+        domain : &Cube<F,N>,
+        support : &S
+    ) -> Self {
+        let hint = support.bisection_hint(domain);
+        let branch_at = map2(&hint, domain, |h, r| {
+            h.unwrap_or_else(|| (r[0]+r[1])/F::TWO).max(r[0]).min(r[1])
+        }).into();
+        Branches{
+            branch_at : branch_at,
+            nodes : array_init(|| Node::new()),
+        }
+    }
+
+    /// Get an iterator over the aggregators of the nodes of this branch head.
+    #[inline]
+    pub fn aggregators(&self) -> MapF<Iter<'_, Node<F,D,A,N,P>>, &'_ A> {
+        self.nodes.iter().mapF(Node::get_aggregator)
+    }
+
+    #[inline]
+    pub fn iter_subcubes<'b>(&self, domain : &'b Cube<F, N>)
+    -> SubcubeIter<'b, F, N, P> {
+        SubcubeIter {
+            domain : domain,
+            branch_at : self.branch_at,
+            index : 0,
+        }
+    }
+
+    /// Iterate over all nodes and corresponding subcubes of self.
+    #[inline]
+    pub fn nodes_and_cubes<'a, 'b>(&'a self, domain : &'b Cube<F, N>)
+    -> std::iter::Zip<Iter<'a, Node<F,D,A,N,P>>, SubcubeIter<'b, F, N, P>> {
+        self.nodes.iter().zip(self.iter_subcubes(domain))
+    }
+
+    /// Mutably iterate over all nodes and corresponding subcubes of self.
+    #[inline]
+    pub fn nodes_and_cubes_mut<'a, 'b>(&'a mut self, domain : &'b Cube<F, N>)
+    -> std::iter::Zip<IterMut<'a, Node<F,D,A,N,P>>, SubcubeIter<'b, F, N, P>> {
+        let subcube_iter = self.iter_subcubes(domain);
+        self.nodes.iter_mut().zip(subcube_iter)
+    }
+
+    /// Insert data into the branch.
+    pub fn insert<M : Depth, S : LocalAnalysis<F, A, N>>(
+        &mut self,
+        domain : &Cube<F,N>,
+        d : D,
+        new_leaf_depth : M,
+        support : &S
+    ) {
+        let support_hint = support.support_hint();
+        for (node, subcube) in self.nodes_and_cubes_mut(&domain) {
+            if support_hint.intersects(&subcube) {
+                node.insert(
+                    &subcube,
+                    d,
+                    new_leaf_depth,
+                    support
+                );
+            }
+        }
+    }
+
+    /// Construct a new instance for a different aggregator
+    pub fn convert_aggregator<ANew, G>(
+        self,
+        generator : &G,
+        domain : &Cube<F, N>
+    ) -> Branches<F,D,ANew,N,P>
+    where ANew : Aggregator,
+          G : SupportGenerator<F, N, Id=D>,
+          G::SupportType : LocalAnalysis<F, ANew, N> {
+        let branch_at = self.branch_at;
+        let subcube_iter = self.iter_subcubes(domain);
+        let new_nodes = self.nodes.into_iter().zip(subcube_iter).map(|(node, subcube)| {
+            // TODO: avoid clone
+            node.convert_aggregator(generator, &subcube)
+        });
+        Branches {
+            branch_at : branch_at,
+            nodes : collect_into_array_unchecked(new_nodes),
+        }
+    }
+
+    /// Recalculate aggregator after changes to generator
+    pub fn refresh_aggregator<G>(
+        &mut self,
+        generator : &G,
+        domain : &Cube<F, N>
+    ) where G : SupportGenerator<F, N, Id=D>,
+            G::SupportType : LocalAnalysis<F, A, N> {
+        for (node, subcube) in self.nodes_and_cubes_mut(domain) {
+            node.refresh_aggregator(generator, &subcube)
+        }
+    }
+}
+
+impl<F : Float, D : Copy, A, const N : usize, const P : usize>
+Node<F,D,A,N,P>
+where Const<P> : BranchCount<N>,
+      A : Aggregator {
+
+    #[inline]
+    pub fn new() -> Self {
+        Node {
+            data : NodeOption::Uninitialised,
+            aggregator : A::new(),
+        }
+    }
+
+    /// Get leaf data
+    #[inline]
+    pub fn get_leaf_data(&self, x : &Loc<F, N>) -> Option<&Vec<D>> {
+        match self.data {
+            NodeOption::Uninitialised => None,
+            NodeOption::Leaf(ref data) => Some(data),
+            NodeOption::Branches(ref b) => b.get_node(x).get_leaf_data(x),
+        }
+    }
+
+    /// Returns a reference to the aggregator of this node
+    #[inline]
+    pub fn get_aggregator(&self) -> &A {
+        &self.aggregator
+    }
+
+    /// Insert `d` into the tree. If an `Incomplete` node is encountered, a new
+    /// leaf is created at a minimum depth of `new_leaf_depth`
+    pub fn insert<M : Depth, S : LocalAnalysis <F, A, N>>(
+        &mut self,
+        domain : &Cube<F,N>,
+        d : D,
+        new_leaf_depth : M,
+        support : &S
+    ) {
+        match &mut self.data {
+            NodeOption::Uninitialised => {
+                // Replace uninitialised node with a leaf or a branch
+                self.data = match new_leaf_depth.lower() {
+                    None => {
+                        let a = support.local_analysis(&domain);
+                        self.aggregator.aggregate(once(a));
+                        // TODO: this is currently a dirty hard-coded heuristic;
+                        // should add capacity as a parameter
+                        let mut vec = Vec::with_capacity(2*P+1);
+                        vec.push(d);
+                        NodeOption::Leaf(Rc::new(vec))
+                    },
+                    Some(lower) => {
+                        let b = Rc::new({
+                            let mut b0 = Branches::new_with(domain, support);
+                            b0.insert(domain, d, lower, support);
+                            b0
+                        });
+                        self.aggregator.summarise(b.aggregators());
+                        NodeOption::Branches(b)
+                    }
+                }
+            },
+            NodeOption::Leaf(leaf) => {
+                Rc::make_mut(leaf).push(d);
+                let a = support.local_analysis(&domain);
+                self.aggregator.aggregate(once(a));
+            },
+            NodeOption::Branches(b) => {
+                Rc::make_mut(b).insert(domain, d, new_leaf_depth.lower_or(), support);
+                self.aggregator.summarise(b.aggregators());
+            },
+        }
+    }
+
+    /// Construct a new instance for a different aggregator
+    pub fn convert_aggregator<ANew, G>(
+        mut self,
+        generator : &G,
+        domain : &Cube<F, N>
+    ) -> Node<F,D,ANew,N,P>
+    where ANew : Aggregator,
+          G : SupportGenerator<F, N, Id=D>,
+          G::SupportType : LocalAnalysis<F, ANew, N> {
+
+        // The mem::replace is needed due to the [`Drop`] implementation to extract self.data.
+        match std::mem::replace(&mut self.data, NodeOption::Uninitialised) {
+            NodeOption::Uninitialised => Node {
+                data : NodeOption::Uninitialised,
+                aggregator : ANew::new(),
+            },
+            NodeOption::Leaf(v) => {
+                let mut anew = ANew::new();
+                anew.aggregate(v.iter().map(|d| {
+                    let support = generator.support_for(*d);
+                    support.local_analysis(&domain)
+                }));
+
+                Node {
+                    data : NodeOption::Leaf(v),
+                    aggregator : anew,
+                }
+            },
+            NodeOption::Branches(b) => {
+                // TODO: now with Rc, convert_aggregator should be reference-based.
+                let bnew = Rc::new(Rc::unwrap_or_clone(b).convert_aggregator(generator, domain));
+                let mut anew = ANew::new();
+                anew.summarise(bnew.aggregators());
+                Node {
+                    data : NodeOption::Branches(bnew),
+                    aggregator : anew,
+                }
+            }
+        }
+    }
+
+    /// Refresh aggregator after changes to generator
+    pub fn refresh_aggregator<G>(
+        &mut self,
+        generator : &G,
+        domain : &Cube<F, N>
+    ) where G : SupportGenerator<F, N, Id=D>,
+            G::SupportType : LocalAnalysis<F, A, N> {
+        match &mut self.data {
+            NodeOption::Uninitialised => { },
+            NodeOption::Leaf(v) => {
+                self.aggregator = A::new();
+                self.aggregator.aggregate(v.iter().map(|d| {
+                    generator.support_for(*d)
+                             .local_analysis(&domain)
+                }));
+            },
+            NodeOption::Branches(ref mut b) => {
+                // TODO: now with Rc, convert_aggregator should be reference-based.
+                Rc::make_mut(b).refresh_aggregator(generator, domain);
+                self.aggregator.summarise(b.aggregators());
+            }
+        }
+    }
+}
+
+impl<'a, D> Iterator for BTIter<'a,D> {
+    type Item = &'a D;
+    #[inline]
+    fn next(&mut self) -> Option<&'a D> {
+        self.iter.next()
+    }
+}
+
+
+//
+// BT
+//
+
+/// Internal structure to hide the `const P : usize` parameter of [`Node`] until
+/// const generics are flexible enough to fix `P=pow(2, N)`.
+pub trait BTNode<F, D, A, const N : usize>
+where F : Float,
+        D : 'static + Copy,
+        A : Aggregator {
+    type Node : Clone + std::fmt::Debug;
+}
+
+#[derive(Debug)]
+pub struct BTNodeLookup;
+
+/// Interface to a [`BT`] bisection tree.
+pub trait BTImpl<F : Float, const N : usize> : std::fmt::Debug + Clone + GlobalAnalysis<F, Self::Agg> {
+    type Data : 'static + Copy;
+    type Depth : Depth;
+    type Agg : Aggregator;
+    type Converted<ANew> : BTImpl<F, N, Data=Self::Data, Agg=ANew> where ANew : Aggregator;
+
+    /// Insert d into the `BisectionTree`.
+    fn insert<S : LocalAnalysis<F, Self::Agg, N>>(
+        &mut self,
+        d : Self::Data,
+        support : &S
+    );
+
+    /// Construct a new instance for a different aggregator
+    fn convert_aggregator<ANew, G>(self, generator : &G)
+    -> Self::Converted<ANew>
+    where ANew : Aggregator,
+          G : SupportGenerator<F, N, Id=Self::Data>,
+          G::SupportType : LocalAnalysis<F, ANew, N>;
+
+
+    /// Refresh aggregator after changes to generator
+    fn refresh_aggregator<G>(&mut self, generator : &G)
+    where G : SupportGenerator<F, N, Id=Self::Data>,
+          G::SupportType : LocalAnalysis<F, Self::Agg, N>;
+
+    /// Iterarate items at x
+    fn iter_at<'a>(&'a self, x : &'a Loc<F,N>) -> BTIter<'a, Self::Data>;
+
+    /// Create a new instance
+    fn new(domain : Cube<F, N>, depth : Self::Depth) -> Self;
+}
+
+/// The main bisection tree structure. The interface operations are via [`BTImpl`]
+/// to hide the `const P : usize` parameter until const generics are flexible enough
+/// to fix `P=pow(2, N)`.
+#[derive(Clone,Debug)]
+pub struct BT<
+    M : Depth,
+    F : Float,
+    D : 'static + Copy,
+    A : Aggregator,
+    const N : usize,
+> where BTNodeLookup : BTNode<F, D, A, N> {
+    pub(super) depth : M,
+    pub(super) domain : Cube<F, N>,
+    pub(super) topnode : <BTNodeLookup as BTNode<F, D, A, N>>::Node,
+}
+
+macro_rules! impl_bt {
+    ($($n:literal)*) => { $(
+        impl<F, D, A> BTNode<F, D, A, $n> for BTNodeLookup
+        where F : Float,
+              D : 'static + Copy + std::fmt::Debug,
+              A : Aggregator {
+            type Node = Node<F,D,A,$n,{pow(2, $n)}>;
+        }
+
+        impl<M,F,D,A> BTImpl<F,$n> for BT<M,F,D,A,$n>
+        where M : Depth,
+              F : Float,
+              D : 'static + Copy + std::fmt::Debug,
+              A : Aggregator {
+            type Data = D;
+            type Depth = M;
+            type Agg = A;
+            type Converted<ANew> = BT<M,F,D,ANew,$n> where ANew : Aggregator;
+
+            /// Insert `d` into the tree.
+            fn insert<S: LocalAnalysis<F, A, $n>>(
+                &mut self,
+                d : D,
+                support : &S
+            ) {
+                self.topnode.insert(
+                    &self.domain,
+                    d,
+                    self.depth,
+                    support
+                );
+            }
+
+            /// Construct a new instance for a different aggregator
+            fn convert_aggregator<ANew, G>(self, generator : &G) -> Self::Converted<ANew>
+            where ANew : Aggregator,
+                  G : SupportGenerator<F, $n, Id=D>,
+                  G::SupportType : LocalAnalysis<F, ANew, $n> {
+                let topnode = self.topnode.convert_aggregator(generator, &self.domain);
+                BT {
+                    depth : self.depth,
+                    domain : self.domain,
+                    topnode
+                }
+            }
+
+            /// Refresh aggregator after changes to generator
+            fn refresh_aggregator<G>(&mut self, generator : &G)
+            where G : SupportGenerator<F, $n, Id=Self::Data>,
+                G::SupportType : LocalAnalysis<F, Self::Agg, $n> {
+                self.topnode.refresh_aggregator(generator, &self.domain);
+            }
+
+            /// Iterate elements at `x`.
+            fn iter_at<'a>(&'a self, x : &'a Loc<F,$n>) -> BTIter<'a,D> {
+                match self.topnode.get_leaf_data(x) {
+                    Some(data) => BTIter { iter : data.iter() },
+                    None => BTIter { iter : [].iter() }
+                }
+            }
+
+            fn new(domain : Cube<F, $n>, depth : M) -> Self {
+                BT {
+                    depth : depth,
+                    domain : domain,
+                    topnode : Node::new(),
+                }
+            }
+        }
+
+        impl<M,F,D,A> GlobalAnalysis<F,A> for BT<M,F,D,A,$n>
+        where M : Depth,
+              F : Float,
+              D : 'static + Copy + std::fmt::Debug,
+              A : Aggregator {
+            fn global_analysis(&self) -> A {
+                self.topnode.get_aggregator().clone()
+            }
+        }
+    )* }
+}
+
+impl_bt!(1 2 3 4);
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/bisection_tree/btfn.rs	Sat Oct 22 13:47:15 2022 +0300
@@ -0,0 +1,689 @@
+
+use numeric_literals::replace_float_literals;
+use std::iter::Sum;
+use std::marker::PhantomData;
+pub use nalgebra::Const;
+use crate::types::Float;
+use crate::mapping::Mapping;
+use crate::linops::Linear;
+use crate::sets::Set;
+pub use crate::sets::Cube;
+pub use crate::loc::Loc;
+use super::support::*;
+use super::bt::*;
+use super::refine::*;
+use super::aggregator::*;
+use super::either::*;
+use crate::fe_model::base::RealLocalModel;
+use crate::fe_model::p2_local_model::*;
+
+/// Presentation for functions constructed as a sum of components with limited support.
+/// Lookup of relevant components at a specific point of a [`Loc<F,N>`] domain is done
+/// with a bisection tree (BT). We do notn impose that the `BT` parameter would  be an in
+/// instance [`BTImpl`] to allow performance improvements via “pre-BTFNs” that do not
+/// construct a BT until being added to another function. Addition and subtraction always
+/// use a copy-on-write clone of the BT of the left-hand-side operand, discarding
+/// the BT of the right-hand-side operand.
+#[derive(Clone,Debug)]
+pub struct BTFN<
+    F : Float,
+    G : SupportGenerator<F, N>,
+    BT /*: BTImpl<F, N>*/,
+    const N : usize
+> /*where G::SupportType : LocalAnalysis<F, A, N>*/ {
+    bt : BT,
+    generator : G,
+    _phantoms : PhantomData<F>,
+}
+
+impl<F : Float, G, BT, const N : usize>
+BTFN<F, G, BT, N>
+where G : SupportGenerator<F, N, Id=BT::Data>,
+      G::SupportType : LocalAnalysis<F, BT::Agg, N>,
+      BT : BTImpl<F, N> {
+
+    /// Returns the [`Support`] corresponding to the identifier `d`.
+    /// Simply wraps [`SupportGenerator::support_for`].
+    #[inline]
+    fn support_for(&self, d : G::Id) -> G::SupportType {
+        self.generator.support_for(d)
+    }
+
+    /// Create a new BTFN. The bisection tree `bt` should be pre-initialised to
+    /// correspond to the `generator`. Use [`Self::construct`] if no preinitialised tree
+    /// is available. Use [`Self::new_refresh`] when the aggregator may need updates.
+    pub fn new(bt : BT, generator : G) -> Self {
+        BTFN {
+            bt : bt,
+            generator : generator,
+            _phantoms : std::marker::PhantomData,
+        }
+    }
+
+    /// Create a new BTFN. The bisection tree `bt` should be pre-initialised to
+    /// correspond to the `generator`, but the aggregator may be out of date.
+    pub fn new_refresh(bt : &BT, generator : G) -> Self {
+        // clone().refresh_aggregator(…) as opposed to convert_aggregator
+        // ensures that type is maintained. Due to Rc-pointer copy-on-write,
+        // the effort is not significantly different.
+        let mut btnew = bt.clone();
+        btnew.refresh_aggregator(&generator);
+        BTFN::new(btnew, generator)
+    }
+
+    /// Create a new BTFN. Unlike [`Self::new`], this constructs the bisection tree based on
+    /// the generator.
+    pub fn construct(domain : Cube<F, N>, depth : BT::Depth, generator : G) -> Self {
+        let mut bt = BT::new(domain, depth);
+        for (d, support) in generator.all_data() {
+            bt.insert(d, &support);
+        }
+        Self::new(bt, generator)
+    }
+
+    /// Construct a new instance for a different aggregator
+    pub fn convert_aggregator<ANew>(self) -> BTFN<F, G, BT::Converted<ANew>, N>
+    where ANew : Aggregator,
+          G : SupportGenerator<F, N, Id=BT::Data>,
+          G::SupportType : LocalAnalysis<F, ANew, N> {
+        BTFN::new(self.bt.convert_aggregator(&self.generator), self.generator)
+    }
+
+    /// Change the [`BTImpl`].
+    pub fn instantiate<
+        BTNew : BTImpl<F, N, Data=BT::Data>,
+    > (self, domain : Cube<F, N>, depth : BTNew::Depth) -> BTFN<F, G, BTNew, N>
+    where G::SupportType : LocalAnalysis<F, BTNew::Agg, N>  {
+        BTFN::construct(domain, depth, self.generator)
+    }
+
+    /// Change the generator (after, e.g., a scaling of the latter).
+    pub fn new_generator(&self, generator : G) -> Self {
+        BTFN::new_refresh(&self.bt, generator)
+    }
+
+    /// Refresh aggregator after updates to generator
+    fn refresh_aggregator(&mut self) {
+        self.bt.refresh_aggregator(&self.generator);
+    }
+
+}
+
+/// A “pre-BTFN” with no bisection tree.
+pub type PreBTFN<F, G, const N : usize> = BTFN<F, G, (), N>;
+
+impl<F : Float, G, const N : usize> PreBTFN<F, G, N> where G : SupportGenerator<F, N> {
+
+    /// Create a new “pre-BTFN” with no bisection tree. This can feasibly only be
+    /// multiplied by a scalar or added to or subtracted from a proper [`BTFN`].
+    pub fn new_pre(generator : G) -> Self {
+        BTFN {
+            bt : (),
+            generator : generator,
+            _phantoms : std::marker::PhantomData,
+        }
+    }
+}
+
+impl<F : Float, G, BT, const N : usize>
+BTFN<F, G, BT, N>
+where G : SupportGenerator<F, N, Id=usize>,
+      G::SupportType : LocalAnalysis<F, BT::Agg, N>,
+      BT : BTImpl<F, N, Data=usize> {
+
+    /// Helper function for implementing [`std::ops::Add`].
+    fn add_another<G2>(self, other : G2) -> BTFN<F, BothGenerators<G, G2>, BT, N>
+    where G2 : SupportGenerator<F, N, Id=usize>,
+          G2::SupportType : LocalAnalysis<F, BT::Agg, N> {
+
+        let mut bt = self.bt;
+        let both = BothGenerators(self.generator, other);
+
+        for (d, support) in both.all_right_data() {
+            bt.insert(d, &support);
+        }
+
+        BTFN {
+            bt : bt,
+            generator : both,
+            _phantoms : std::marker::PhantomData,
+        }
+    }
+}
+
+macro_rules! make_btfn_add {
+    ($lhs:ty, $preprocess:path, $($extra_trait:ident)?) => {
+        impl<'a, F : Float, G1, G2, BT1, BT2, const N : usize>
+        std::ops::Add<BTFN<F, G2, BT2, N>> for
+        $lhs
+        where BT1 : BTImpl<F, N, Data=usize>,
+              G1 : SupportGenerator<F, N, Id=usize> + $($extra_trait)?,
+              G2 : SupportGenerator<F, N, Id=usize>,
+              G1::SupportType : LocalAnalysis<F, BT1::Agg, N>,
+              G2::SupportType : LocalAnalysis<F, BT1::Agg, N> {
+            type Output = BTFN<F, BothGenerators<G1, G2>, BT1, N>;
+            #[inline]
+            fn add(self, other : BTFN<F, G2, BT2, N>) -> Self::Output {
+                $preprocess(self).add_another(other.generator)
+            }
+        }
+
+        impl<'a, 'b, F : Float, G1, G2,  BT1, BT2, const N : usize>
+        std::ops::Add<&'b BTFN<F, G2, BT2, N>> for
+        $lhs
+        where BT1 : BTImpl<F, N, Data=usize>,
+              G1 : SupportGenerator<F, N, Id=usize> + $($extra_trait)?,
+              G2 : SupportGenerator<F, N, Id=usize> + Clone,
+              G1::SupportType : LocalAnalysis<F, BT1::Agg, N>,
+              G2::SupportType : LocalAnalysis<F, BT1::Agg, N> {
+
+            type Output = BTFN<F, BothGenerators<G1, G2>, BT1, N>;
+            #[inline]
+            fn add(self, other : &'b BTFN<F, G2, BT2, N>) -> Self::Output {
+                $preprocess(self).add_another(other.generator.clone())
+            }
+        }
+    }
+}
+
+make_btfn_add!(BTFN<F, G1, BT1, N>, std::convert::identity, );
+make_btfn_add!(&'a BTFN<F, G1, BT1, N>, Clone::clone, Clone);
+
+macro_rules! make_btfn_sub {
+    ($lhs:ty, $preprocess:path, $($extra_trait:ident)?) => {
+        impl<'a, F : Float, G1, G2, BT1, BT2, const N : usize>
+        std::ops::Sub<BTFN<F, G2, BT2, N>> for
+        $lhs
+        where BT1 : BTImpl<F, N, Data=usize>,
+              G1 : SupportGenerator<F, N, Id=usize> + $($extra_trait)?,
+              G2 : SupportGenerator<F, N, Id=usize>,
+              G1::SupportType : LocalAnalysis<F, BT1::Agg, N>,
+              G2::SupportType : LocalAnalysis<F, BT1::Agg, N> {
+            type Output = BTFN<F, BothGenerators<G1, G2>, BT1, N>;
+            #[inline]
+            fn sub(self, other : BTFN<F, G2, BT2, N>) -> Self::Output {
+                $preprocess(self).add_another(other.generator.neg())
+            }
+        }
+
+        impl<'a, 'b, F : Float, G1, G2,  BT1, BT2, const N : usize>
+        std::ops::Sub<&'b BTFN<F, G2, BT2, N>> for
+        $lhs
+        where BT1 : BTImpl<F, N, Data=usize>,
+              G1 : SupportGenerator<F, N, Id=usize> + $($extra_trait)?,
+              G2 : SupportGenerator<F, N, Id=usize> + Clone,
+              G1::SupportType : LocalAnalysis<F, BT1::Agg, N>,
+              G2::SupportType : LocalAnalysis<F, BT1::Agg, N>,
+              /*&'b G2 : std::ops::Neg<Output=G2>*/ {
+
+            type Output = BTFN<F, BothGenerators<G1, G2>, BT1, N>;
+            #[inline]
+            fn sub(self, other : &'b BTFN<F, G2, BT2, N>) -> Self::Output {
+                // FIXME: the compiler crashes in an overflow on this.
+                //$preprocess(self).add_another(std::ops::Neg::neg(&other.generator))
+                $preprocess(self).add_another(other.generator.clone().neg())
+            }
+        }
+    }
+}
+
+make_btfn_sub!(BTFN<F, G1, BT1, N>, std::convert::identity, );
+make_btfn_sub!(&'a BTFN<F, G1, BT1, N>, Clone::clone, Clone);
+
+macro_rules! make_btfn_scalarop_rhs {
+    ($trait:ident, $fn:ident, $trait_assign:ident, $fn_assign:ident) => {
+        impl<F : Float, G, BT, const N : usize>
+        std::ops::$trait_assign<F>
+        for BTFN<F, G, BT, N>
+        where BT : BTImpl<F, N>,
+              G : SupportGenerator<F, N, Id=BT::Data>,
+              G::SupportType : LocalAnalysis<F, BT::Agg, N> {
+            #[inline]
+            fn $fn_assign(&mut self, t : F) {
+                self.generator.$fn_assign(t);
+                self.refresh_aggregator();
+            }
+        }
+
+        impl<F : Float, G, BT, const N : usize>
+        std::ops::$trait<F>
+        for BTFN<F, G, BT, N>
+        where BT : BTImpl<F, N>,
+              G : SupportGenerator<F, N, Id=BT::Data>,
+              G::SupportType : LocalAnalysis<F, BT::Agg, N> {
+            type Output = Self;
+            #[inline]
+            fn $fn(mut self, t : F) -> Self::Output {
+                self.generator.$fn_assign(t);
+                self.refresh_aggregator();
+                self
+            }
+        }
+
+        impl<'a, F : Float, G, BT, const N : usize>
+        std::ops::$trait<F>
+        for &'a BTFN<F, G, BT, N>
+        where BT : BTImpl<F, N>,
+              G : SupportGenerator<F, N, Id=BT::Data>,
+              G::SupportType : LocalAnalysis<F, BT::Agg, N>,
+              &'a G : std::ops::$trait<F,Output=G> {
+            type Output = BTFN<F, G, BT, N>;
+            #[inline]
+            fn $fn(self, t : F) -> Self::Output {
+                self.new_generator(self.generator.$fn(t))
+            }
+        }
+    }
+}
+
+make_btfn_scalarop_rhs!(Mul, mul, MulAssign, mul_assign);
+make_btfn_scalarop_rhs!(Div, div, DivAssign, div_assign);
+
+macro_rules! make_btfn_scalarop_lhs {
+    ($trait:ident, $fn:ident, $fn_assign:ident, $($f:ident)+) => { $(
+        impl<G, BT, const N : usize>
+        std::ops::$trait<BTFN<$f, G, BT, N>>
+        for $f
+        where BT : BTImpl<$f, N>,
+              G : SupportGenerator<$f, N, Id=BT::Data>,
+              G::SupportType : LocalAnalysis<$f, BT::Agg, N> {
+            type Output = BTFN<$f, G, BT, N>;
+            #[inline]
+            fn $fn(self, mut a : BTFN<$f, G, BT, N>) -> Self::Output {
+                a.generator.$fn_assign(self);
+                a.refresh_aggregator();
+                a
+            }
+        }
+
+        impl<'a, G, BT, const N : usize>
+        std::ops::$trait<&'a BTFN<$f, G, BT, N>>
+        for $f
+        where BT : BTImpl<$f, N>,
+              G : SupportGenerator<$f, N, Id=BT::Data> + Clone,
+              G::SupportType : LocalAnalysis<$f, BT::Agg, N>,
+              // FIXME: This causes compiler overflow
+              /*&'a G : std::ops::$trait<$f,Output=G>*/ {
+            type Output = BTFN<$f, G, BT, N>;
+            #[inline]
+            fn $fn(self, a : &'a BTFN<$f, G, BT, N>) -> Self::Output {
+                let mut tmp = a.generator.clone();
+                tmp.$fn_assign(self);
+                a.new_generator(tmp)
+                // FIXME: Prevented by the compiler overflow above.
+                //a.new_generator(a.generator.$fn(a))
+            }
+        }
+    )+ }
+}
+
+make_btfn_scalarop_lhs!(Mul, mul, mul_assign, f32 f64);
+make_btfn_scalarop_lhs!(Div, div, div_assign, f32 f64);
+
+macro_rules! make_btfn_unaryop {
+    ($trait:ident, $fn:ident) => {
+        impl<F : Float, G, BT, const N : usize>
+        std::ops::$trait
+        for BTFN<F, G, BT, N>
+        where BT : BTImpl<F, N>,
+              G : SupportGenerator<F, N, Id=BT::Data>,
+              G::SupportType : LocalAnalysis<F, BT::Agg, N> {
+            type Output = Self;
+            #[inline]
+            fn $fn(mut self) -> Self::Output {
+                self.generator = self.generator.$fn();
+                self.refresh_aggregator();
+                self
+            }
+        }
+
+        /*impl<'a, F : Float, G, BT, const N : usize>
+        std::ops::$trait
+        for &'a BTFN<F, G, BT, N>
+        where BT : BTImpl<F, N>,
+              G : SupportGenerator<F, N, Id=BT::Data>,
+              G::SupportType : LocalAnalysis<F, BT::Agg, N>,
+              &'a G : std::ops::$trait<Output=G> {
+            type Output = BTFN<F, G, BT, N>;
+            #[inline]
+            fn $fn(self) -> Self::Output {
+                self.new_generator(std::ops::$trait::$fn(&self.generator))
+            }
+        }*/
+    }
+}
+
+make_btfn_unaryop!(Neg, neg);
+
+
+
+//
+// Mapping
+//
+
+impl<'a, F : Float, G, BT, V, const N : usize> Mapping<&'a Loc<F,N>>
+for BTFN<F, G, BT, N>
+where BT : BTImpl<F, N>,
+      G : SupportGenerator<F, N, Id=BT::Data>,
+      G::SupportType : LocalAnalysis<F, BT::Agg, N> + Mapping<&'a Loc<F,N>, Codomain = V>,
+      V : Sum {
+
+    type Codomain = V;
+
+    fn value(&self, x : &'a Loc<F,N>) -> Self::Codomain {
+        self.bt.iter_at(x).map(|&d| self.support_for(d).value(x)).sum()
+    }
+}
+
+impl<F : Float, G, BT, V, const N : usize> Mapping<Loc<F,N>>
+for BTFN<F, G, BT, N>
+where BT : BTImpl<F, N>,
+      G : SupportGenerator<F, N, Id=BT::Data>,
+      G::SupportType : LocalAnalysis<F, BT::Agg, N> + Mapping<Loc<F,N>, Codomain = V>,
+      V : Sum {
+
+    type Codomain = V;
+
+    fn value(&self, x : Loc<F,N>) -> Self::Codomain {
+        self.bt.iter_at(&x).map(|&d| self.support_for(d).value(x)).sum()
+    }
+}
+
+impl<F : Float, G, BT, const N : usize> GlobalAnalysis<F, BT::Agg>
+for BTFN<F, G, BT, N>
+where BT : BTImpl<F, N>,
+      G : SupportGenerator<F, N, Id=BT::Data>,
+      G::SupportType : LocalAnalysis<F, BT::Agg, N> {
+
+    #[inline]
+    fn global_analysis(&self) -> BT::Agg {
+        self.bt.global_analysis()
+    }
+}
+
+//
+// Blanket implementation of BTFN as a linear functional over objects
+// that are linear functionals over BTFN.
+//
+
+impl<X, F : Float, G, BT, const N : usize> Linear<X>
+for BTFN<F, G, BT, N>
+where BT : BTImpl<F, N>,
+      G : SupportGenerator<F, N, Id=BT::Data>,
+      G::SupportType : LocalAnalysis<F, BT::Agg, N>,
+      X : Linear<BTFN<F, G, BT, N>, Codomain=F> {
+    type Codomain = F;
+
+    #[inline]
+    fn apply(&self, x : &X) -> F {
+        x.apply(self)
+    }
+}
+
+/// Helper trait for performing approximate minimisation using P2 elements.
+pub trait P2Minimise<U, F : Float> : Set<U> {
+    fn p2_minimise<G : Fn(&U) -> F>(&self, g : G) -> (U, F);
+
+}
+
+impl<F : Float> P2Minimise<Loc<F, 1>, F> for Cube<F, 1> {
+    fn p2_minimise<G : Fn(&Loc<F, 1>) -> F>(&self, g : G) -> (Loc<F, 1>, F) {
+        let interval = Simplex(self.corners());
+        interval.p2_model(&g).minimise(&interval)
+    }
+}
+
+#[replace_float_literals(F::cast_from(literal))]
+impl<F : Float> P2Minimise<Loc<F, 2>, F> for Cube<F, 2> {
+    fn p2_minimise<G : Fn(&Loc<F, 2>) -> F>(&self, g : G) -> (Loc<F, 2>, F) {
+        if false {
+            // Split into two triangle (simplex) with separate P2 model in each.
+            // The six nodes of each triangle are the corners and the edges.
+            let [a, b, c, d] = self.corners();
+            let [va, vb, vc, vd] = [g(&a), g(&b), g(&c), g(&d)];
+
+            let ab = midpoint(&a, &b);
+            let bc = midpoint(&b, &c);
+            let ca = midpoint(&c, &a);
+            let cd = midpoint(&c, &d);
+            let da = midpoint(&d, &a);
+            let [vab, vbc, vca, vcd, vda] = [g(&ab), g(&bc), g(&ca), g(&cd), g(&da)];
+
+            let s1 = Simplex([a, b, c]);
+            let m1 = P2LocalModel::<F, 2, 3, 3, 3>::new(
+                &[a, b, c, ab, bc, ca],
+                &[va, vb, vc, vab, vbc, vca]
+            );
+
+            let r1@(_, v1) = m1.minimise(&s1);
+
+            let s2 = Simplex([c, d, a]);
+            let m2 = P2LocalModel::<F, 2, 3, 3, 3>::new(
+                &[c, d, a, cd, da, ca],
+                &[vc, vd, va, vcd, vda, vca]
+            );
+
+            let r2@(_, v2) = m2.minimise(&s2);
+
+            if v1 < v2 { r1 } else { r2 }
+        } else {
+            // Single P2 model for the entire cube.
+            let [a, b, c, d] = self.corners();
+            let [va, vb, vc, vd] = [g(&a), g(&b), g(&c), g(&d)];
+            let [e, f] = match 'r' {
+                 'm' => [(&a + &b + &c) / 3.0,    (&c + &d + &a) / 3.0],
+                 'c' => [midpoint(&a, &b),        midpoint(&a, &d)],
+                 'w' => [(&a + &b * 2.0) / 3.0,   (&a + &d * 2.0) / 3.0],
+                 'r' => {
+                    // Pseudo-randomise edge midpoints
+                    let Loc([x, y]) = a;
+                    let tmp : f64 = (x+y).as_();
+                    match tmp.to_bits() % 4 {
+                        0 => [midpoint(&a, &b),        midpoint(&a, &d)],
+                        1 => [midpoint(&c, &d),        midpoint(&a, &d)],
+                        2 => [midpoint(&a, &b),        midpoint(&b, &c)],
+                        _ => [midpoint(&c, &d),        midpoint(&b, &c)],
+                    }
+                 },
+                 _ => [self.center(),           (&a + &b) / 2.0],
+            };
+            let [ve, vf] = [g(&e), g(&f)];
+
+            let m1 = P2LocalModel::<F, 2, 3, 3, 3>::new(
+                &[a, b, c, d, e, f],
+                &[va, vb, vc, vd, ve, vf],
+            );
+
+            m1.minimise(self)
+        }
+    }
+}
+
+struct RefineMax;
+struct RefineMin;
+
+/// TODO: update doc.
+/// Simple refiner that keeps the difference between upper and lower bounds
+/// of [`Bounded`] functions within $ε$. The functions have to also be continuous
+/// for this refiner to ever succeed.
+/// The type parameter `T` should be either [`RefineMax`] or [`RefineMin`].
+struct P2Refiner<F : Float, T> {
+    bound : Option<F>,
+    tolerance : F,
+    max_steps : usize,
+    #[allow(dead_code)] // `how` is just for type system purposes.
+    how : T,
+}
+
+impl<F : Float, G, const N : usize> Refiner<F, Bounds<F>, G, N>
+for P2Refiner<F, RefineMax>
+where Cube<F, N> : P2Minimise<Loc<F, N>, F>,
+      G : SupportGenerator<F, N>,
+      G::SupportType : for<'a> Mapping<&'a Loc<F,N>,Codomain=F>
+                       + LocalAnalysis<F, Bounds<F>, N> {
+    type Result = Option<(Loc<F, N>, F)>;
+    type Sorting = UpperBoundSorting<F>;
+
+    fn refine(
+        &self,
+        aggregator : &Bounds<F>,
+        cube : &Cube<F, N>,
+        data : &Vec<G::Id>,
+        generator : &G,
+        step : usize
+    ) -> RefinerResult<Bounds<F>, Self::Result> {
+        // g gives the negative of the value of the function presented by `data` and `generator`.
+        let g = move |x : &Loc<F,N>| {
+            let f = move |&d| generator.support_for(d).value(x);
+            -data.iter().map(f).sum::<F>()
+        };
+        // … so the negative of the minimum is the maximm we want.
+        let (x, neg_v) = cube.p2_minimise(g);
+        let v = -neg_v;
+
+        if self.bound.map_or(false, |b| aggregator.upper() <= b + self.tolerance) {
+            // The upper bound is below the maximisation threshold. Don't bother with this cube.
+            RefinerResult::Uncertain(*aggregator, None)
+        } else if step < self.max_steps && (aggregator.upper() - v).abs() > self.tolerance {
+            // The function isn't refined enough in `cube`, so return None
+            // to indicate that further subdivision is required.
+            RefinerResult::NeedRefinement
+        } else {
+            // The data is refined enough, so return new hopefully better bounds
+            // and the maximiser.
+            let res = (x, v);
+            let bounds = Bounds(aggregator.lower(), v);
+            RefinerResult::Uncertain(bounds, Some(res))
+        }
+    }
+}
+
+
+impl<F : Float, G, const N : usize> Refiner<F, Bounds<F>, G, N>
+for P2Refiner<F, RefineMin>
+where Cube<F, N> : P2Minimise<Loc<F, N>, F>,
+      G : SupportGenerator<F, N>,
+      G::SupportType : for<'a> Mapping<&'a Loc<F,N>,Codomain=F>
+                       + LocalAnalysis<F, Bounds<F>, N> {
+    type Result = Option<(Loc<F, N>, F)>;
+    type Sorting = LowerBoundSorting<F>;
+
+    fn refine(
+        &self,
+        aggregator : &Bounds<F>,
+        cube : &Cube<F, N>,
+        data : &Vec<G::Id>,
+        generator : &G,
+        step : usize
+    ) -> RefinerResult<Bounds<F>, Self::Result> {
+        // g gives the value of the function presented by `data` and `generator`.
+        let g = move |x : &Loc<F,N>| {
+            let f = move |&d| generator.support_for(d).value(x);
+            data.iter().map(f).sum::<F>()
+        };
+        // Minimise it.
+        let (x, v) = cube.p2_minimise(g);
+
+        if self.bound.map_or(false, |b| aggregator.lower() >= b - self.tolerance) {
+            // The lower bound is above the minimisation threshold. Don't bother with this cube.
+            RefinerResult::Uncertain(*aggregator, None)
+        } else if step < self.max_steps && (aggregator.lower() - v).abs() > self.tolerance {
+            // The function isn't refined enough in `cube`, so return None
+            // to indicate that further subdivision is required.
+            RefinerResult::NeedRefinement
+        } else {
+            // The data is refined enough, so return new hopefully better bounds
+            // and the minimiser.
+            let res = (x, v);
+            let bounds = Bounds(v, aggregator.upper());
+            RefinerResult::Uncertain(bounds, Some(res))
+        }
+    }
+}
+
+
+
+struct BoundRefiner<F : Float, T> {
+    bound : F,
+    tolerance : F,
+    max_steps : usize,
+    #[allow(dead_code)] // `how` is just for type system purposes.
+    how : T,
+}
+
+impl<F : Float, G, const N : usize> Refiner<F, Bounds<F>, G, N>
+for BoundRefiner<F, RefineMax>
+where G : SupportGenerator<F, N> {
+    type Result = bool;
+    type Sorting = UpperBoundSorting<F>;
+
+    fn refine(
+        &self,
+        aggregator : &Bounds<F>,
+        _cube : &Cube<F, N>,
+        _data : &Vec<G::Id>,
+        _generator : &G,
+        step : usize
+    ) -> RefinerResult<Bounds<F>, Self::Result> {
+        if aggregator.upper() <= self.bound + self.tolerance {
+            // Below upper bound within tolerances. Indicate uncertain success.
+            RefinerResult::Uncertain(*aggregator, true)
+        } else if aggregator.lower() >= self.bound - self.tolerance {
+            // Above upper bound within tolerances. Indicate certain failure.
+            RefinerResult::Certain(false)
+        } else if step < self.max_steps {
+            // No decision possible, but within step bounds - further subdivision is required.
+            RefinerResult::NeedRefinement
+        } else {
+            // No decision possible, but past step bounds
+            RefinerResult::Uncertain(*aggregator, false)
+        }
+    }
+}
+
+
+impl<F : Float, G, BT, const N : usize> BTFN<F, G, BT, N>
+where BT : BTSearch<F, N, Agg=Bounds<F>>,
+      G : SupportGenerator<F, N, Id=BT::Data>,
+      G::SupportType : for<'a> Mapping<&'a Loc<F,N>,Codomain=F>
+                       + LocalAnalysis<F, Bounds<F>, N>,
+      Cube<F, N> : P2Minimise<Loc<F, N>, F> {
+
+    /// Try to maximise the BTFN within stated value `tolerance`, taking at most `max_steps`
+    /// refinement steps. Returns the maximiser and the maximum value.
+    pub fn maximise(&mut self, tolerance : F, max_steps : usize) -> (Loc<F, N>, F) {
+        let refiner = P2Refiner{ tolerance, max_steps, how : RefineMax, bound : None };
+        self.bt.search_and_refine(&refiner, &self.generator).expect("Refiner failure.").unwrap()
+    }
+
+    /// Try to maximise the BTFN within stated value `tolerance`, taking at most `max_steps`
+    /// refinement steps. Returns the maximiser and the maximum value when one is found above
+    /// the `bound` threshold, otherwise `None`.
+    pub fn maximise_above(&mut self, bound : F, tolerance : F, max_steps : usize)
+    -> Option<(Loc<F, N>, F)> {
+        let refiner = P2Refiner{ tolerance, max_steps, how : RefineMax, bound : Some(bound) };
+        self.bt.search_and_refine(&refiner, &self.generator).expect("Refiner failure.")
+    }
+
+    /// Try to minimise the BTFN within stated value `tolerance`, taking at most `max_steps`
+    /// refinement steps. Returns the minimiser and the minimum value.
+    pub fn minimise(&mut self, tolerance : F, max_steps : usize) -> (Loc<F, N>, F) {
+        let refiner = P2Refiner{ tolerance, max_steps, how : RefineMin, bound : None };
+        self.bt.search_and_refine(&refiner, &self.generator).expect("Refiner failure.").unwrap()
+    }
+
+    /// Try to minimise the BTFN within stated value `tolerance`, taking at most `max_steps`
+    /// refinement steps. Returns the minimiser and the minimum value when one is found below
+    /// the `bound` threshold, otherwise `None`.
+    pub fn minimise_below(&mut self, bound : F, tolerance : F, max_steps : usize)
+    -> Option<(Loc<F, N>, F)> {
+        let refiner = P2Refiner{ tolerance, max_steps, how : RefineMin, bound : Some(bound) };
+        self.bt.search_and_refine(&refiner, &self.generator).expect("Refiner failure.")
+    }
+    /// Verify that the BTFN is has stated upper `bound` within stated `tolerance, taking at most
+    /// `max_steps` refinment steps.
+    pub fn has_upper_bound(&mut self, bound : F, tolerance : F, max_steps : usize) -> bool {
+        let refiner = BoundRefiner{ bound, tolerance, max_steps, how : RefineMax };
+        self.bt.search_and_refine(&refiner, &self.generator).expect("Refiner failure.")
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/bisection_tree/either.rs	Sat Oct 22 13:47:15 2022 +0300
@@ -0,0 +1,225 @@
+
+use crate::types::*;
+use super::support::*;
+use crate::mapping::Mapping;
+use std::iter::Chain;
+use crate::iter::{Mappable,MapF,MapZ};
+use super::aggregator::*;
+
+// TODO: try to remove F and N
+#[derive(Debug,Clone)]
+pub struct BothGenerators<A, B>(
+    pub(super) A,
+    pub(super) B,
+);
+
+#[derive(Debug,Clone)]
+pub enum EitherSupport<A, B> {
+    Left(A),
+    Right(B),
+}
+
+// We need type alias bounds to access associate types.
+#[allow(type_alias_bounds)]
+type BothAllDataIter<
+    'a, F,
+    G1 : SupportGenerator<F, N>,
+    G2 : SupportGenerator<F, N>,
+    const N : usize
+> = Chain<
+    MapF<G1::AllDataIter<'a>, (usize, EitherSupport<G1::SupportType, G2::SupportType>)>,
+    MapZ<G2::AllDataIter<'a>, usize, (usize, EitherSupport<G1::SupportType, G2::SupportType>)>,
+>;
+
+impl<G1, G2> BothGenerators<G1, G2> {
+    /// Helper for [`all_data`].
+    #[inline]
+    fn map_left<F : Float, const N : usize>((d, support) : (G1::Id, G1::SupportType))
+    -> (usize, EitherSupport<G1::SupportType, G2::SupportType>)
+    where G1 : SupportGenerator<F, N, Id=usize>,
+          G2 : SupportGenerator<F, N, Id=usize> {
+
+        let id : usize = d.into();
+        (id.into(), EitherSupport::Left(support))
+    }
+
+    /// Helper for [`all_data`].
+    #[inline]
+    fn map_right<F : Float, const N : usize>(n0 : &usize, (d, support) : (G2::Id, G2::SupportType))
+    -> (usize, EitherSupport<G1::SupportType, G2::SupportType>)
+    where G1 : SupportGenerator<F, N, Id=usize>,
+          G2 : SupportGenerator<F, N, Id=usize> {
+
+        let id : usize = d.into();
+        ((n0+id).into(), EitherSupport::Right(support))
+    }
+
+    #[inline]
+    pub(super) fn all_left_data<F : Float, const N : usize>(&self)
+    -> MapF<G1::AllDataIter<'_>, (usize, EitherSupport<G1::SupportType, G2::SupportType>)>
+    where G1 : SupportGenerator<F, N, Id=usize>,
+          G2 : SupportGenerator<F, N, Id=usize> {
+        self.0.all_data().mapF(Self::map_left)
+    }
+
+    #[inline]
+    pub(super) fn all_right_data<F : Float, const N : usize>(&self)
+    -> MapZ<G2::AllDataIter<'_>, usize, (usize, EitherSupport<G1::SupportType, G2::SupportType>)>
+    where G1 : SupportGenerator<F, N, Id=usize>,
+          G2 : SupportGenerator<F, N, Id=usize> {
+        let n0 = self.0.support_count();
+        self.1.all_data().mapZ(n0, Self::map_right)
+    }
+}
+
+impl<F : Float, G1, G2, const N : usize>
+SupportGenerator<F, N>
+for BothGenerators<G1, G2>
+where G1 : SupportGenerator<F, N, Id=usize>,
+      G2 : SupportGenerator<F, N, Id=usize> {
+
+    type Id = usize;
+    type SupportType = EitherSupport<G1::SupportType, G2::SupportType>;
+    type AllDataIter<'a> = BothAllDataIter<'a, F, G1, G2, N> where G1 : 'a, G2 : 'a;
+
+    #[inline]
+    fn support_for(&self,  id : Self::Id)
+    -> Self::SupportType {
+        let n0 = self.0.support_count();
+        if id < n0 {
+            EitherSupport::Left(self.0.support_for(id.into()))
+        } else {
+            EitherSupport::Right(self.1.support_for((id-n0).into()))
+        }
+    }
+
+    #[inline]
+    fn support_count(&self) -> usize {
+        self.0.support_count() + self.1.support_count()
+    }
+
+    #[inline]
+    fn all_data(&self) -> Self::AllDataIter<'_> {
+        self.all_left_data().chain(self.all_right_data())
+    }
+}
+
+impl<F: Float, S1, S2, const N : usize> Support<F, N> for EitherSupport<S1, S2>
+where S1 : Support<F, N>,
+      S2 : Support<F, N> {
+
+    #[inline]
+    fn support_hint(&self) -> Cube<F,N> {
+        match self {
+            EitherSupport::Left(ref a) => a.support_hint(),
+            EitherSupport::Right(ref b) => b.support_hint(),
+        }
+    }
+
+    #[inline]
+    fn in_support(&self, x : &Loc<F,N>) -> bool {
+        match self {
+            EitherSupport::Left(ref a) => a.in_support(x),
+            EitherSupport::Right(ref b) => b.in_support(x),
+        }
+    }
+
+    #[inline]
+    fn bisection_hint(&self, cube : &Cube<F, N>) -> [Option<F>; N] {
+        match self {
+            EitherSupport::Left(ref a) => a.bisection_hint(cube),
+            EitherSupport::Right(ref b) => b.bisection_hint(cube),
+        }
+    }
+}
+
+impl<F : Float, A, S1, S2, const N : usize> LocalAnalysis<F, A, N> for EitherSupport<S1, S2>
+where A : Aggregator,
+      S1 : LocalAnalysis<F, A, N>,
+      S2 : LocalAnalysis<F, A, N>, {
+
+    #[inline]
+    fn local_analysis(&self, cube : &Cube<F, N>) -> A {
+        match self {
+            EitherSupport::Left(ref a) => a.local_analysis(cube),
+            EitherSupport::Right(ref b) => b.local_analysis(cube),
+        }
+    }
+}
+
+impl<F : Float, A, S1, S2> GlobalAnalysis<F, A> for EitherSupport<S1, S2>
+where A : Aggregator,
+      S1 : GlobalAnalysis<F, A>,
+      S2 : GlobalAnalysis<F, A>, {
+
+    #[inline]
+    fn global_analysis(&self) -> A {
+        match self {
+            EitherSupport::Left(ref a) => a.global_analysis(),
+            EitherSupport::Right(ref b) => b.global_analysis(),
+        }
+    }
+}
+
+impl<F, S1, S2, X> Mapping<X> for EitherSupport<S1, S2>
+where S1 : Mapping<X, Codomain=F>,
+      S2 : Mapping<X, Codomain=F> {
+    type Codomain = F;
+    #[inline]
+    fn value(&self, x : X) -> F {
+        match self {
+            EitherSupport::Left(ref a) => a.value(x),
+            EitherSupport::Right(ref b) => b.value(x),
+        }
+    }
+}
+
+macro_rules! make_either_scalarop_rhs {
+    ($trait:ident, $fn:ident, $trait_assign:ident, $fn_assign:ident) => {
+        impl<F : Float, G1, G2>
+        std::ops::$trait_assign<F>
+        for BothGenerators<G1, G2>
+        where G1 : std::ops::$trait_assign<F>,
+              G2 : std::ops::$trait_assign<F>, {
+            #[inline]
+            fn $fn_assign(&mut self, t : F) {
+                self.0.$fn_assign(t);
+                self.1.$fn_assign(t);
+            }
+        }
+
+        impl<'a, F : Float, G1, G2>
+        std::ops::$trait<F>
+        for &'a BothGenerators<G1, G2>
+        where &'a G1 : std::ops::$trait<F,Output=G1>,
+              &'a G2 : std::ops::$trait<F,Output=G2> {
+            type Output = BothGenerators<G1, G2>;
+            #[inline]
+            fn $fn(self, t : F) -> BothGenerators<G1, G2> {
+                BothGenerators(self.0.$fn(t), self.1.$fn(t))
+            }
+        }
+    }
+}
+
+make_either_scalarop_rhs!(Mul, mul, MulAssign, mul_assign);
+make_either_scalarop_rhs!(Div, div, DivAssign, div_assign);
+
+impl<G1, G2> std::ops::Neg for BothGenerators<G1, G2>
+where G1 : std::ops::Neg, G2 : std::ops::Neg, {
+    type Output = BothGenerators<G1::Output, G2::Output>;
+    #[inline]
+    fn neg(self) -> Self::Output {
+        BothGenerators(self.0.neg(), self.1.neg())
+    }
+}
+/*
+impl<'a, G1, G2> std::ops::Neg for &'a BothGenerators<G1, G2>
+where &'a G1 : std::ops::Neg, &'a G2 : std::ops::Neg, {
+    type Output = BothGenerators<<&'a G1 as std::ops::Neg>::Output,
+                                 <&'a G2 as std::ops::Neg>::Output>;
+    fn neg(self) -> Self::Output {
+        BothGenerators(self.0.neg(), self.1.neg())
+    }
+}
+*/
\ No newline at end of file
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/bisection_tree/refine.rs	Sat Oct 22 13:47:15 2022 +0300
@@ -0,0 +1,425 @@
+
+use std::collections::BinaryHeap;
+use std::cmp::{PartialOrd,Ord,Ordering,Ordering::*,max};
+use std::rc::Rc;
+use std::marker::PhantomData;
+use crate::types::*;
+use super::support::*;
+use super::bt::*;
+use super::aggregator::*;
+use crate::nanleast::NaNLeast;
+
+/// Trait for sorting [`Aggregator`]s for [`BT`] refinement.
+///
+/// The sorting involves two sorting keys, the “upper” and the “lower” key. Any [`BT`] branches
+/// with upper key less the lower key of another are discarded from the refinement process.
+pub trait AggregatorSorting {
+    // Priority
+    type Agg : Aggregator;
+    type Sort : Ord + Copy + std::fmt::Debug;
+
+    /// Returns lower sorting key
+    fn sort_lower(aggregator : &Self::Agg) -> Self::Sort;
+
+    /// Returns upper sorting key
+    fn sort_upper(aggregator : &Self::Agg) -> Self::Sort;
+
+    /// Returns bottom sorting key.
+    fn bottom() -> Self::Sort;
+}
+
+/// An [`AggregatorSorting`] for [`Bounds`], using the upper/lower bound as the upper/lower key.
+///
+/// See [`LowerBoundSorting`] for the opposite ordering.
+pub struct UpperBoundSorting<F : Float>(PhantomData<F>);
+
+/// An [`AggregatorSorting`] for [`Bounds`], using the upper/lower bound as the lower/upper key.
+///
+/// See [`UpperBoundSorting`] for the opposite ordering.
+pub struct LowerBoundSorting<F : Float>(PhantomData<F>);
+
+impl<F : Float> AggregatorSorting for UpperBoundSorting<F> {
+    type Agg = Bounds<F>;
+    type Sort = NaNLeast<F>;
+
+    #[inline]
+    fn sort_lower(aggregator : &Bounds<F>) -> Self::Sort { NaNLeast(aggregator.lower()) }
+
+    #[inline]
+    fn sort_upper(aggregator : &Bounds<F>) -> Self::Sort { NaNLeast(aggregator.upper()) }
+
+    #[inline]
+    fn bottom() -> Self::Sort { NaNLeast(F::NEG_INFINITY) }
+}
+
+
+impl<F : Float> AggregatorSorting for LowerBoundSorting<F> {
+    type Agg = Bounds<F>;
+    type Sort = NaNLeast<F>;
+
+    #[inline]
+    fn sort_upper(aggregator : &Bounds<F>) -> Self::Sort { NaNLeast(-aggregator.lower()) }
+
+    #[inline]
+    fn sort_lower(aggregator : &Bounds<F>) -> Self::Sort { NaNLeast(-aggregator.upper()) }
+
+    #[inline]
+    fn bottom() -> Self::Sort { NaNLeast(F::NEG_INFINITY) }
+}
+
+/// Result type of [`Refiner::refine`] for a refiner producing a result of type `R` acting on
+/// an [`Aggregator`] of type `A`.
+pub enum RefinerResult<A : Aggregator, R> {
+    /// Indicates in insufficiently refined state: the [`BT`] needs to be further refined.
+    NeedRefinement,
+    /// Indicates a certain result `R`, stop refinement immediately.
+    Certain(R),
+    /// Indicates an uncertain result: continue refinement until candidates have been exhausted
+    /// or a certain result found.
+    Uncertain(A, R)
+}
+
+use RefinerResult::*;
+
+/// A `Refiner` is used to determine whether an [`Aggregator`] `A` is sufficiently refined within
+/// a [`Cube`] of a [`BT`], and in such a case, produce a desired result (e.g. a maximum value of
+/// a function).
+pub trait Refiner<F : Float, A, G, const N : usize>
+where F : Num,
+      A : Aggregator,
+      G : SupportGenerator<F, N> {
+
+    type Result : std::fmt::Debug;
+    type Sorting : AggregatorSorting<Agg = A>;
+
+    /// Determines whether `aggregator` is sufficiently refined within `cube`.
+    /// Should return a possibly refined version of the `aggregator` and an arbitrary value of
+    /// the result type of the refiner.
+    fn refine(
+        &self,
+        aggregator : &A,
+        domain : &Cube<F, N>,
+        data : &Vec<G::Id>,
+        generator : &G,
+        step : usize,
+    ) -> RefinerResult<A, Self::Result>;
+}
+
+/// Structure for tracking the refinement process in a [`BinaryHeap`].
+struct RefinementInfo<'a, F, D, A, S, RResult, const N : usize, const P : usize>
+where F : Float,
+      D : 'static +,
+      A : Aggregator,
+      S : AggregatorSorting<Agg = A> {
+    cube : Cube<F, N>,
+    node : &'a mut Node<F, D, A, N, P>,
+    refiner_info : Option<(A, RResult)>,
+    sorting : PhantomData<S>,
+}
+
+impl<'a, F, D, A, S, RResult, const N : usize, const P : usize>
+RefinementInfo<'a, F, D, A, S, RResult, N, P>
+where F : Float,
+      D : 'static,
+      A : Aggregator,
+      S : AggregatorSorting<Agg = A> {
+
+    #[inline]
+    fn aggregator(&self) -> &A {
+        match self.refiner_info {
+            Some((ref agg, _)) => agg,
+            None => &self.node.aggregator,
+        }
+    }
+
+    #[inline]
+    fn sort_lower(&self) -> S::Sort {
+        S::sort_lower(self.aggregator())
+    }
+
+    #[inline]
+    fn sort_upper(&self) -> S::Sort {
+        S::sort_upper(self.aggregator())
+    }
+}
+
+impl<'a, F, D, A, S, RResult, const N : usize, const P : usize> PartialEq
+for RefinementInfo<'a, F, D, A, S, RResult, N, P>
+where F : Float,
+      D : 'static,
+      A : Aggregator,
+      S : AggregatorSorting<Agg = A> {
+
+    #[inline]
+    fn eq(&self, other : &Self) -> bool { self.cmp(other) == Equal }
+}
+
+impl<'a, F, D, A, S, RResult, const N : usize, const P : usize> PartialOrd
+for RefinementInfo<'a, F, D, A, S, RResult, N, P>
+where F : Float,
+      D : 'static,
+      A : Aggregator,
+      S : AggregatorSorting<Agg = A> {
+
+    #[inline]
+    fn partial_cmp(&self, other : &Self) -> Option<Ordering> { Some(self.cmp(other)) }
+}
+
+impl<'a, F, D, A, S, RResult, const N : usize, const P : usize> Eq
+for RefinementInfo<'a, F, D, A, S, RResult, N, P>
+where F : Float,
+      D : 'static,
+      A : Aggregator,
+      S : AggregatorSorting<Agg = A> {
+}
+
+impl<'a, F, D, A, S, RResult, const N : usize, const P : usize> Ord
+for RefinementInfo<'a, F, D, A, S, RResult, N, P>
+where F : Float,
+      D : 'static,
+      A : Aggregator,
+      S : AggregatorSorting<Agg = A> {
+
+    #[inline]
+    fn cmp(&self, other : &Self) -> Ordering {
+       let agg1 = self.aggregator();
+       let agg2 = other.aggregator();
+       match S::sort_upper(agg1).cmp(&S::sort_upper(agg2)) {
+            Equal => S::sort_lower(agg1).cmp(&S::sort_lower(agg2)),
+            order => order,
+       }
+    }
+}
+
+pub struct HeapContainer<'a, F, D, A, S, RResult, const N : usize, const P : usize>
+where F : Float,
+      D : 'static + Copy,
+      Const<P> : BranchCount<N>,
+      A : Aggregator,
+      S : AggregatorSorting<Agg = A> {
+    heap : BinaryHeap<RefinementInfo<'a, F, D, A, S, RResult, N, P>>,
+    glb : S::Sort,
+    glb_stale_counter : usize,
+    stale_insert_counter : usize,
+}
+
+impl<'a, F, D, A, S, RResult, const N : usize, const P : usize>
+HeapContainer<'a, F, D, A, S, RResult, N, P>
+where F : Float,
+      D : 'static + Copy,
+      Const<P> : BranchCount<N>,
+      A : Aggregator,
+      S : AggregatorSorting<Agg = A> {
+
+    fn push(&mut self, ri : RefinementInfo<'a, F, D, A, S, RResult, N, P>) {
+        if ri.sort_upper() >= self.glb {
+            let l = ri.sort_lower();
+            self.heap.push(ri);
+            self.glb = self.glb.max(l);
+            if self.glb_stale_counter > 0 {
+                self.stale_insert_counter += 1;
+            }
+        }
+    }
+}
+
+impl<F : Float, D : 'static + Copy, A, const N : usize, const P : usize>
+Branches<F,D,A,N,P>
+where Const<P> : BranchCount<N>,
+      A : Aggregator {
+
+    /// Stage all subnodes of `self` into the refinement queue [`container`].
+    fn stage_refine<'a, S, RResult>(
+        &'a mut self,
+        domain : Cube<F,N>,
+        container : &mut HeapContainer<'a, F, D, A, S, RResult, N, P>,
+    ) where S : AggregatorSorting<Agg = A> {
+        // Insert all subnodes into the refinement heap.
+        for (node, subcube) in self.nodes_and_cubes_mut(&domain) {
+            container.push(RefinementInfo {
+                cube : subcube,
+                node : node,
+                refiner_info : None,
+                sorting : PhantomData,
+            });
+        }
+    }
+}
+
+
+impl<F : Float, D : 'static + Copy, A, const N : usize, const P : usize>
+Node<F,D,A,N,P>
+where Const<P> : BranchCount<N>,
+      A : Aggregator {
+
+    /// If `self` is a leaf node, uses the `refiner` to determine whether further subdivision
+    /// is required to get a sufficiently refined solution for the problem the refiner is used
+    /// to solve. If the refiner returns [`RefinerResult::Certain`] result, it is returned.
+    /// If [`RefinerResult::Uncertain`] is returned, the leaf is inserted back into the refinement
+    /// queue `container`. If `self` is a branch, its subnodes are staged into `container` using
+    /// [`Branches::stage_refine`].
+    fn search_and_refine<'a, 'b, R, G>(
+        &'a mut self,
+        domain : Cube<F,N>,
+        refiner : &R,
+        generator : &G,
+        container : &'b mut HeapContainer<'a, F, D, A, R::Sorting, R::Result, N, P>,
+        step : usize
+    ) -> Option<R::Result>
+    where R : Refiner<F, A, G, N>,
+          G : SupportGenerator<F, N, Id=D>,
+          G::SupportType : LocalAnalysis<F, A, N> {
+
+        // The “complex” repeated pattern matching here is forced by mutability requirements.
+
+        // Refine a leaf.
+        let res = if let NodeOption::Leaf(ref v) = &mut self.data {
+            let res = refiner.refine(&self.aggregator, &domain, v, generator, step);
+            if let NeedRefinement = res {
+                // The refiner has deemed the leaf unsufficiently refined, so subdivide
+                // it and add the new nodes into the refinement priority heap.
+                // We start iterating from the end to mix support_hint a bit.
+                let mut it = v.iter().rev();
+                if let Some(&d) = it.next() {
+                    // Construct new Branches
+                    let support = generator.support_for(d);
+                    let b = Rc::new({
+                        let mut b0 = Branches::new_with(&domain, &support);
+                        b0.insert(&domain, d, Const::<1>, &support);
+                        for &d in it {
+                            let support = generator.support_for(d);
+                            // TODO: can we be smarter than just refining one level?
+                            b0.insert(&domain, d, Const::<1>, &support);
+                        }
+                        b0
+                    });
+                    // Update current node
+                    self.aggregator.summarise(b.aggregators());
+                    self.data = NodeOption::Branches(b);
+                    // The branches will be inserted into the refinement priority queue below.
+                }
+            }
+            res
+        } else {
+            NeedRefinement
+        };
+
+        if let Uncertain(agg, val) = res {
+            // The refiner gave an undertain result. Push a leaf back into the refinement queue
+            // with the new refined aggregator and custom return value. It will be popped and
+            // returned in the loop of [`BT::search_and_refine`] when there are no unrefined
+            // candidates that could potentially be better according to their basic aggregator.
+            container.push(RefinementInfo {
+                cube : domain,
+                node : self,
+                refiner_info : Some((agg, val)),
+                sorting : PhantomData,
+            });
+            None
+        } else if let Certain(val) = res {
+            // The refiner gave a certain result so return it to allow early termination
+            Some(val)
+        } else if let NodeOption::Branches(ref mut b) = &mut self.data {
+            // Insert branches into refinement priority queue.
+            Rc::make_mut(b).stage_refine(domain, container);
+            None
+        } else {
+            None
+        }
+    }
+}
+
+/// Helper trait for implementing a refining search on a [`BT`].
+pub trait BTSearch<F, const N : usize> : BTImpl<F, N>
+where F : Float {
+
+    /// Perform a refining search on [`Self`], as determined by `refiner`. Nodes are inserted
+    /// in a priority queue and processed in the order determined by the [`AggregatorSorting`]
+    /// [`Refiner::Sorting`]. Leaf nodes are subdivided until the refiner decides that a
+    /// sufficiently refined leaf node has been found, as determined by either the refiner
+    /// returning a [`RefinerResult::Certain`] result, or a previous [`RefinerResult::Uncertain`]
+    /// result is found again at the top of the priority queue.
+    fn search_and_refine<'b, R, G>(
+        &'b mut self,
+        refiner : &R,
+        generator : &G,
+    ) -> Option<R::Result>
+    where R : Refiner<F, Self::Agg, G, N>,
+          G : SupportGenerator<F, N, Id=Self::Data>,
+          G::SupportType : LocalAnalysis<F, Self::Agg, N>;
+}
+
+// Needed to get access to a Node without a trait interface.
+macro_rules! impl_btsearch {
+    ($($n:literal)*) => { $(
+        impl<'a, M, F, D, A>
+        BTSearch<F, $n>
+        for BT<M,F,D,A,$n>
+        where //Self : BTImpl<F,$n,Data=D,Agg=A, Depth=M>,  //  <== automatically deduce to be implemented
+              M : Depth,
+              F : Float,
+              A : 'a + Aggregator,
+              D : 'static + Copy + std::fmt::Debug {
+            fn search_and_refine<'b, R, G>(
+                &'b mut self,
+                refiner : &R,
+                generator : &G,
+            ) -> Option<R::Result>
+            where R : Refiner<F, A, G, $n>,
+                  G : SupportGenerator<F, $n, Id=D>,
+                  G::SupportType : LocalAnalysis<F, A, $n> {
+                let mut container = HeapContainer {
+                    heap : BinaryHeap::new(),
+                    glb : R::Sorting::bottom(),
+                    glb_stale_counter : 0,
+                    stale_insert_counter : 0,
+                };
+                container.push(RefinementInfo {
+                    cube : self.domain,
+                    node : &mut self.topnode,
+                    refiner_info : None,
+                    sorting : PhantomData,
+                });
+                let mut step = 0;
+                while let Some(ri) = container.heap.pop() {
+                    if let Some((_, result)) = ri.refiner_info {
+                        // Terminate based on a “best possible” result.
+                        return Some(result)
+                    }
+
+                    if ri.sort_lower() >= container.glb {
+                        container.glb_stale_counter += 1;
+                        if container.stale_insert_counter + container.glb_stale_counter
+                           > container.heap.len()/2 {
+                            // GLB propery no longer correct.
+                            match container.heap.iter().map(|ri| ri.sort_lower()).reduce(max) {
+                                Some(glb) => {
+                                    container.glb = glb;
+                                    container.heap.retain(|ri| ri.sort_upper() >= glb);
+                                },
+                                None => {
+                                    container.glb = R::Sorting::bottom()
+                                }
+                            }
+                            container.glb_stale_counter = 0;
+                            container.stale_insert_counter = 0;
+                        }
+                    }
+
+                    let res = ri.node.search_and_refine(ri.cube, refiner, generator,
+                                                        &mut container, step);
+                    if let Some(_) = res {
+                        // Terminate based on a certain result from the refiner
+                        return res
+                    }
+
+                    step += 1;
+                }
+                None
+            }
+        }
+    )* }
+}
+
+impl_btsearch!(1 2 3 4);
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/bisection_tree/support.rs	Sat Oct 22 13:47:15 2022 +0300
@@ -0,0 +1,420 @@
+
+use serde::Serialize;
+use std::ops::{MulAssign,DivAssign,Neg};
+pub use nalgebra::Const;
+use crate::types::{Float,Num};
+use crate::maputil::{array_init,map2};
+use crate::mapping::{Mapping};
+pub use crate::sets::cube::Cube;
+pub use crate::loc::Loc;
+use super::aggregator::Bounds;
+use crate::norms::{Norm, L1, L2, Linfinity};
+
+/// A trait for encoding constant values
+pub trait Constant : Copy + 'static + std::fmt::Debug + Into<Self::Type> {
+    type Type : Float;
+    fn value(&self) -> Self::Type;
+}
+
+impl<F : Float> Constant for F {
+    type Type = F;
+    #[inline]
+    fn value(&self) -> F { *self }
+}
+
+
+/// A trait for working with the supports of [`Mapping`]s.
+/// Mapping is not a super-trait to allow more general use.
+pub trait Support<F : Num, const N : usize> : Sized {
+    /// Return a cube containing the support of the function.
+    /// It may be larger than the actual support, but must contain it.
+    fn support_hint(&self) -> Cube<F,N>;
+
+    /// Indicate whether `x` is in the support of the function.
+    fn in_support(&self, x : &Loc<F,N>) -> bool;
+
+    // Indicate whether `cube` is fully in the support of the function.
+    //fn fully_in_support(&self, cube : &Cube<F,N>) -> bool;
+
+    /// Return an optional hint for bisecting the support. Useful for nonsmooth
+    /// functions and finite element approximations to make the bisections compatible
+    /// with the grid or points of non-differentiability.
+    #[inline]
+    fn bisection_hint(&self, _cube : &Cube<F, N>) -> [Option<F>; N] {
+        array_init(|| None)
+    }
+
+    /// Shift the support by `x`.
+    #[inline]
+    fn shift(self, x : Loc<F, N>) -> Shift<Self, F, N> {
+        Shift { shift : x, base_fn : self }
+    }
+
+    /// Weight any corresponding [`Mapping`]
+    #[inline]
+    fn weigh<C : Constant<Type=F>>(self, a : C) -> Weighted<Self, C> {
+        Weighted { weight : a, base_fn : self }
+    }
+}
+
+/// Trait for globally analysing a certain property of a [`Mapping`].
+pub trait GlobalAnalysis<F : Num, A> {
+    fn global_analysis(&self) -> A;
+}
+
+// default impl<F, A, N, L> GlobalAnalysis<F, A, N> for L
+// where L : LocalAnalysis<F, A, N> {
+//     #[inline]
+//     fn global_analysis(&self) -> Bounds<F> {
+//         self.local_analysis(&self.support_hint())
+//     }
+// }
+
+/// Trait for locally analysing a certain property `A` of a [`Support`] or [`Mapping`]
+/// within a [`Cube<F, N>`].
+pub trait LocalAnalysis<F : Num, A, const N : usize> : GlobalAnalysis<F, A> + Support<F, N> {
+    fn local_analysis(&self, cube : &Cube<F, N>) -> A;
+}
+
+/// Trait for determining the upper and lower bounds of a [`Mapping`] on [`Loc<F, N>`].
+/// This is a blanket-implemented alias for [`GlobalAnalysis`]`<F, Bounds<F>>`
+/// [`Mapping`] is not a supertrait to allow flexibility in the implementation of either
+/// reference or non-reference arguments.
+pub trait Bounded<F : Float> : GlobalAnalysis<F, Bounds<F>> {
+    /// Return lower and upper bounds for the values of of `self`.
+    #[inline]
+    fn bounds(&self) -> Bounds<F> {
+        self.global_analysis()
+    }
+}
+
+impl<F : Float, T : GlobalAnalysis<F, Bounds<F>>> Bounded<F> for T { }
+
+/// Shift of [`Support`] and [`Bounded`].
+#[derive(Copy,Clone,Debug,Serialize)] // Serialize! but not implemented by Loc.
+pub struct Shift<T, F, const N : usize> {
+    shift : Loc<F, N>,
+    base_fn : T,
+}
+
+impl<'a, T, V, F : Float, const N : usize> Mapping<&'a Loc<F, N>> for Shift<T,F,N>
+where T : for<'b> Mapping<&'b Loc<F,N>,Codomain=V> {
+    type Codomain = V;
+    #[inline]
+    fn value(&self, x : &'a Loc<F, N>) -> Self::Codomain {
+        self.base_fn.value(&(x - &self.shift))
+    }
+}
+
+impl<'a, T, V, F : Float, const N : usize> Mapping<Loc<F, N>> for Shift<T,F,N>
+where T : for<'b> Mapping<Loc<F,N>,Codomain=V> {
+    type Codomain = V;
+    #[inline]
+    fn value(&self, x : Loc<F, N>) -> Self::Codomain {
+        self.base_fn.value(x - &self.shift)
+    }
+}
+
+impl<'a, T, F : Float, const N : usize> Support<F,N> for Shift<T,F,N>
+where T : Support<F, N> {
+    #[inline]
+    fn support_hint(&self) -> Cube<F,N> {
+        self.base_fn.support_hint().shift(&self.shift)
+    }
+
+    #[inline]
+    fn in_support(&self, x : &Loc<F,N>) -> bool {
+        self.base_fn.in_support(&(x - &self.shift))
+    }
+    
+    // fn fully_in_support(&self, _cube : &Cube<F,N>) -> bool {
+    //     //self.base_fn.fully_in_support(cube.shift(&vectorneg(self.shift)))
+    //     todo!("Not implemented, but not used at the moment")
+    // }
+
+    #[inline]
+    fn bisection_hint(&self, cube : &Cube<F,N>) -> [Option<F>; N] {
+        let base_hint = self.base_fn.bisection_hint(cube);
+        map2(base_hint, &self.shift, |h, s| h.map(|z| z + *s))
+    }
+
+}
+
+impl<'a, T, F : Float, const N : usize> GlobalAnalysis<F, Bounds<F>> for Shift<T,F,N>
+where T : LocalAnalysis<F, Bounds<F>, N> {
+    #[inline]
+    fn global_analysis(&self) -> Bounds<F> {
+        self.base_fn.global_analysis()
+    }
+}
+
+impl<'a, T, F : Float, const N : usize> LocalAnalysis<F, Bounds<F>, N> for Shift<T,F,N>
+where T : LocalAnalysis<F, Bounds<F>, N> {
+    #[inline]
+    fn local_analysis(&self, cube : &Cube<F, N>) -> Bounds<F> {
+        self.base_fn.local_analysis(&cube.shift(&(-self.shift)))
+    }
+}
+
+macro_rules! impl_shift_norm {
+    ($($norm:ident)*) => { $(
+        impl<'a, T, F : Float, const N : usize> Norm<F, $norm> for Shift<T,F,N>
+        where T : Norm<F, $norm> {
+            #[inline]
+            fn norm(&self, n : $norm) -> F {
+                self.base_fn.norm(n)
+            }
+        }
+    )* }
+}
+
+impl_shift_norm!(L1 L2 Linfinity);
+
+/// Weighting of [`Support`] and [`Bounded`].
+#[derive(Copy,Clone,Debug,Serialize)]
+pub struct Weighted<T, C : Constant> {
+    pub weight : C,
+    pub base_fn : T,
+}
+
+impl<'a, T, V, F : Float, C, const N : usize> Mapping<&'a Loc<F, N>> for Weighted<T, C>
+where T : for<'b> Mapping<&'b Loc<F,N>,Codomain=V>,
+      V : std::ops::Mul<F,Output=V>,
+      C : Constant<Type=F> {
+    type Codomain = V;
+    #[inline]
+    fn value(&self, x : &'a Loc<F, N>) -> Self::Codomain {
+        self.base_fn.value(x) * self.weight.value()
+    }
+}
+
+impl<'a, T, V, F : Float, C, const N : usize> Mapping<Loc<F, N>> for Weighted<T, C>
+where T : for<'b> Mapping<Loc<F,N>,Codomain=V>,
+      V : std::ops::Mul<F,Output=V>,
+      C : Constant<Type=F> {
+    type Codomain = V;
+    #[inline]
+    fn value(&self, x : Loc<F, N>) -> Self::Codomain {
+        self.base_fn.value(x) * self.weight.value()
+    }
+}
+
+impl<'a, T, F : Float, C, const N : usize> Support<F,N> for Weighted<T, C>
+where T : Support<F, N>,
+      C : Constant<Type=F> {
+
+    #[inline]
+    fn support_hint(&self) -> Cube<F,N> {
+        self.base_fn.support_hint()
+    }
+
+    #[inline]
+    fn in_support(&self, x : &Loc<F,N>) -> bool {
+        self.base_fn.in_support(x)
+    }
+    
+    // fn fully_in_support(&self, cube : &Cube<F,N>) -> bool {
+    //     self.base_fn.fully_in_support(cube)
+    // }
+
+    #[inline]
+    fn bisection_hint(&self, cube : &Cube<F,N>) -> [Option<F>; N] {
+        self.base_fn.bisection_hint(cube)
+    }
+}
+
+impl<'a, T, F : Float, C> GlobalAnalysis<F, Bounds<F>> for Weighted<T, C>
+where T : GlobalAnalysis<F, Bounds<F>>,
+      C : Constant<Type=F> {
+    #[inline]
+    fn global_analysis(&self) -> Bounds<F> {
+        let Bounds(lower, upper) = self.base_fn.global_analysis();
+        match self.weight.value() {
+            w if w < F::ZERO => Bounds(w * upper, w * lower),
+            w => Bounds(w * lower, w * upper),
+        }
+    }
+}
+
+impl<'a, T, F : Float, C, const N : usize> LocalAnalysis<F, Bounds<F>, N> for Weighted<T, C>
+where T : LocalAnalysis<F, Bounds<F>, N>,
+      C : Constant<Type=F> {
+    #[inline]
+    fn local_analysis(&self, cube : &Cube<F, N>) -> Bounds<F> {
+        let Bounds(lower, upper) = self.base_fn.local_analysis(cube);
+        match self.weight.value() {
+            w if w < F::ZERO => Bounds(w * upper, w * lower),
+            w => Bounds(w * lower, w * upper),
+        }
+    }
+}
+
+macro_rules! make_weighted_scalarop_rhs {
+    ($trait:ident, $fn:ident, $trait_assign:ident, $fn_assign:ident) => {
+        impl<F : Float, T> std::ops::$trait_assign<F> for Weighted<T, F> {
+            #[inline]
+            fn $fn_assign(&mut self, t : F) {
+                self.weight.$fn_assign(t);
+            }
+        }
+
+        impl<'a, F : Float, T> std::ops::$trait<F> for Weighted<T, F> {
+            type Output = Self;
+            #[inline]
+            fn $fn(mut self, t : F) -> Self {
+                self.weight.$fn_assign(t);
+                self
+            }
+        }
+
+        impl<'a, F : Float, T> std::ops::$trait<F> for &'a Weighted<T, F>
+        where T : Clone {
+            type Output = Weighted<T, F>;
+            #[inline]
+            fn $fn(self, t : F) -> Self::Output {
+                Weighted { weight : self.weight.$fn(t), base_fn : self.base_fn.clone() }
+            }
+        }
+    }
+}
+
+make_weighted_scalarop_rhs!(Mul, mul, MulAssign, mul_assign);
+make_weighted_scalarop_rhs!(Div, div, DivAssign, div_assign);
+
+macro_rules! impl_weighted_norm {
+    ($($norm:ident)*) => { $(
+        impl<'a, T, F : Float> Norm<F, $norm> for Weighted<T,F>
+        where T : Norm<F, $norm> {
+            #[inline]
+            fn norm(&self, n : $norm) -> F {
+                self.base_fn.norm(n) * self.weight.abs()
+            }
+        }
+    )* }
+}
+
+impl_weighted_norm!(L1 L2 Linfinity);
+
+
+/// Normalisation of [`Support`] and [`Bounded`] to L¹ norm 1.
+/// Currently only scalar-valued functions are supported.
+#[derive(Copy, Clone, Debug, Serialize, PartialEq)]
+pub struct Normalised<T>(pub T);
+
+impl<'a, T, F : Float, const N : usize> Mapping<&'a Loc<F, N>> for Normalised<T>
+where T : Norm<F, L1> + for<'b> Mapping<&'b Loc<F,N>, Codomain=F> {
+    type Codomain = F;
+    #[inline]
+    fn value(&self, x : &'a Loc<F, N>) -> Self::Codomain {
+        let w = self.0.norm(L1);
+        if w == F::ZERO { F::ZERO } else { self.0.value(x) / w }
+    }
+}
+
+impl<'a, T, F : Float, const N : usize> Mapping<Loc<F, N>> for Normalised<T>
+where T : Norm<F, L1> + for<'b> Mapping<Loc<F,N>, Codomain=F> {
+    type Codomain = F;
+    #[inline]
+    fn value(&self, x : Loc<F, N>) -> Self::Codomain {
+        let w = self.0.norm(L1);
+        if w == F::ZERO { F::ZERO } else { self.0.value(x) / w }
+    }
+}
+
+impl<'a, T, F : Float, const N : usize> Support<F,N> for Normalised<T>
+where T : Norm<F, L1> + Support<F, N> {
+
+    #[inline]
+    fn support_hint(&self) -> Cube<F,N> {
+        self.0.support_hint()
+    }
+
+    #[inline]
+    fn in_support(&self, x : &Loc<F,N>) -> bool {
+        self.0.in_support(x)
+    }
+    
+    // fn fully_in_support(&self, cube : &Cube<F,N>) -> bool {
+    //     self.0.fully_in_support(cube)
+    // }
+
+    #[inline]
+    fn bisection_hint(&self, cube : &Cube<F,N>) -> [Option<F>; N] {
+        self.0.bisection_hint(cube)
+    }
+}
+
+impl<'a, T, F : Float> GlobalAnalysis<F, Bounds<F>> for Normalised<T>
+where T : Norm<F, L1> + GlobalAnalysis<F, Bounds<F>> {
+    #[inline]
+    fn global_analysis(&self) -> Bounds<F> {
+        let Bounds(lower, upper) = self.0.global_analysis();
+        let w = self.0.norm(L1);
+        debug_assert!(w >= F::ZERO);
+        Bounds(w * lower, w * upper)
+    }
+}
+
+impl<'a, T, F : Float, const N : usize> LocalAnalysis<F, Bounds<F>, N> for Normalised<T>
+where T : Norm<F, L1> + LocalAnalysis<F, Bounds<F>, N> {
+    #[inline]
+    fn local_analysis(&self, cube : &Cube<F, N>) -> Bounds<F> {
+        let Bounds(lower, upper) = self.0.local_analysis(cube);
+        let w = self.0.norm(L1);
+        debug_assert!(w >= F::ZERO);
+        Bounds(w * lower, w * upper)
+    }
+}
+
+impl<'a, T, F : Float> Norm<F, L1> for Normalised<T>
+where T : Norm<F, L1> {
+    #[inline]
+    fn norm(&self, _ : L1) -> F {
+        let w = self.0.norm(L1);
+        if w == F::ZERO { F::ZERO } else { F::ONE }
+    }
+}
+
+macro_rules! impl_normalised_norm {
+    ($($norm:ident)*) => { $(
+        impl<'a, T, F : Float> Norm<F, $norm> for Normalised<T>
+        where T : Norm<F, $norm> + Norm<F, L1> {
+            #[inline]
+            fn norm(&self, n : $norm) -> F {
+                let w = self.0.norm(L1);
+                if w == F::ZERO { F::ZERO } else { self.0.norm(n) / w }
+            }
+        }
+    )* }
+}
+
+impl_normalised_norm!(L2 Linfinity);
+
+/*
+impl<F : Num, S : Support<F, N>, const N : usize> LocalAnalysis<F, NullAggregator, N> for S {
+    fn local_analysis(&self, _cube : &Cube<F, N>) -> NullAggregator { NullAggregator }
+}
+
+impl<F : Float, S : Bounded<F>, const N : usize> LocalAnalysis<F, Bounds<F>, N> for S {
+    #[inline]
+    fn local_analysis(&self, cube : &Cube<F, N>) -> Bounds<F> {
+        self.bounds(cube)
+    }
+}*/
+
+pub trait SupportGenerator<F : Float, const N : usize>
+: MulAssign<F> + DivAssign<F> + Neg<Output=Self> {
+    type Id : 'static + Copy;
+    type SupportType : 'static + Support<F, N>;
+    type AllDataIter<'a> : Iterator<Item=(Self::Id, Self::SupportType)> where Self : 'a;
+
+    /// Gives the [`Support`] for component function identified by `d`.
+    fn support_for(&self, d : Self::Id) -> Self::SupportType;
+    
+    /// Returns the number of different component functions in this generator.
+    fn support_count(&self) -> usize;
+
+    /// Iterator over all pairs of (id, support).
+    fn all_data(&self) -> Self::AllDataIter<'_>;
+}
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/bisection_tree/supportid.rs	Sat Oct 22 13:47:15 2022 +0300
@@ -0,0 +1,39 @@
+use std::marker::PhantomData;
+
+#[derive(Debug)]
+pub struct SupportId<S> {
+    pub id : usize,
+    _phantom : PhantomData<S>
+}
+
+// derive fails so need to do Copy and Clone manually
+
+impl<S> Clone for SupportId<S> {
+    fn clone(&self) -> Self {
+        SupportId { id : self.id, _phantom : PhantomData }
+    }
+}
+
+impl<S> Copy for SupportId<S> {}
+
+impl<S> From<u32> for SupportId<S> {
+    #[inline]
+    fn from(id : u32) -> SupportId<S> {
+        SupportId { id : id as usize, _phantom : PhantomData }
+    }
+}
+
+impl<S> From<usize> for SupportId<S> {
+    #[inline]
+    fn from(id : usize) -> SupportId<S> {
+        SupportId { id : id, _phantom : PhantomData }
+    }
+}
+
+impl<S> Into<usize> for SupportId<S> {
+    #[inline]
+    fn into(self) -> usize {
+        self.id
+    }
+}
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/coefficients.rs	Sat Oct 22 13:47:15 2022 +0300
@@ -0,0 +1,26 @@
+///! This module implements some arithmetic functions that are valid in a ‘const context”, i.e.,
+///! can be computed at compile-time.
+
+/// Calculate $n!$.
+/// (The version in the crate `num_integer` deesn't currently work in a `const`  context.
+pub const fn factorial(n : usize) -> usize {
+    const fn f(i : usize, a : usize) -> usize {
+        if i==0 { a } else { f(i-1, i*a) }
+    }
+    f(n, 1)
+}
+
+/// Calculate $\nchoosek{n}{k}$.
+/// (The version in the crate `num_integer` deesn't currently work in a `const` context.
+pub const fn binomial(n : usize, k : usize) -> usize {
+    factorial(n)/(factorial(n-k)*factorial(k))
+}
+
+/// Calculate $n^k$.
+/// (The version in the crate `num_integer` deesn't currently work in a `const` context.
+pub const fn pow(n : usize, k : usize) -> usize {
+    const fn pow_accum(n : usize, k : usize, accum : usize) -> usize {
+        if k==0 { accum } else { pow_accum(n, k-1, accum*n) }
+    }
+    pow_accum(n, k, 1)
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/error.rs	Sat Oct 22 13:47:15 2022 +0300
@@ -0,0 +1,8 @@
+
+use std::error::Error;
+
+pub type DynResult<T> = Result<T, Box<dyn Error>>;
+pub type DynError = DynResult<()>;
+
+
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/fe_model.rs	Sat Oct 22 13:47:15 2022 +0300
@@ -0,0 +1,4 @@
+/// Simple planar (1D and 2D) finite element discretisation on a box.
+
+pub mod base;
+pub mod p2_local_model;
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/fe_model/base.rs	Sat Oct 22 13:47:15 2022 +0300
@@ -0,0 +1,16 @@
+///! Base types for simple (finite) element models.
+
+/// A local model can be evaluated for value and differential
+pub trait LocalModel<Domain, Codomain> {
+    /// Get the value of the model at `x`
+    fn value(&self, x : &Domain) -> Codomain;
+    /// Get the differential of the model at `x`
+    fn differential(&self, x : &Domain) -> Domain;
+}
+
+/// A real local model is a minimisable [`LocalModel`].
+pub trait RealLocalModel<S, Domain, Codomain> : LocalModel<Domain, Codomain> {
+    /// Find a (minimum, minimiser) pair for he model within `el`, which is
+    /// typically a simplex subset of `Domain`.
+    fn minimise(&self, el : &S) -> (Domain, Codomain);
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/fe_model/p2_local_model.rs	Sat Oct 22 13:47:15 2022 +0300
@@ -0,0 +1,446 @@
+use crate::types::*;
+use crate::loc::Loc;
+use crate::sets::{Set,NPolygon,SpannedHalfspace};
+use crate::linsolve::*;
+use crate::norms::Dot;
+use super::base::{LocalModel,RealLocalModel};
+use crate::sets::Cube;
+use numeric_literals::replace_float_literals;
+
+pub struct Simplex<F : Float, const N : usize, const D : usize>(pub [Loc<F, N>; D]);
+pub type PlanarSimplex<F> = Simplex<F,2,3>;
+pub type RealInterval<F> = Simplex<F,1,2>;
+
+#[inline]
+#[replace_float_literals(F::cast_from(literal))]
+pub(crate) fn midpoint<F : Float, const N : usize>(a : &Loc<F,N>, b : &Loc<F,N>) -> Loc<F, N> {
+    (a+b)/2.0
+}
+
+impl<'a, F : Float> Set<Loc<F,1>> for RealInterval<F> {
+    #[inline]
+    fn contains(&self, &Loc([x]) : &Loc<F,1>) -> bool {
+        let &[Loc([x0]), Loc([x1])] = &self.0;
+        (x0 < x && x < x1) || (x1 < x && x < x0)
+    }
+}
+
+impl<'a, F : Float> Set<Loc<F,2>> for PlanarSimplex<F> {
+    #[inline]
+    fn contains(&self, x : &Loc<F,2>) -> bool {
+        let &[x0, x1, x2] = &self.0;
+        NPolygon([[x0, x1].spanned_halfspace(),
+                  [x1, x2].spanned_halfspace(),
+                  [x2, x0].spanned_halfspace()]).contains(x)
+    }
+}
+
+trait P2Powers {
+    type Output;
+    type Diff;
+    type Full;
+    fn p2powers(&self) -> Self::Output;
+    fn p2powers_full(&self) -> Self::Full;
+    fn p2powers_diff(&self) -> Self::Diff;
+}
+
+#[replace_float_literals(F::cast_from(literal))]
+impl<F : Float> P2Powers for Loc<F, 1> {
+    type Output = Loc<F, 1>;
+    type Full = Loc<F, 3>;
+    type Diff = Loc<Loc<F, 1>, 1>;
+
+    #[inline]
+    fn p2powers(&self) -> Self::Output {
+        let &Loc([x0]) = self;
+        [x0*x0].into()
+    }
+
+    #[inline]
+    fn p2powers_full(&self) -> Self::Full {
+        let &Loc([x0]) = self;
+        [1.0, x0, x0*x0].into()
+    }
+
+    #[inline]
+    fn p2powers_diff(&self) -> Self::Diff {
+        let &Loc([x0]) = self;
+        [[x0+x0].into()].into()
+    }
+}
+
+#[replace_float_literals(F::cast_from(literal))]
+impl<F : Float> P2Powers for Loc<F, 2> {
+    type Output = Loc<F, 3>;
+    type Full = Loc<F, 6>;
+    type Diff = Loc<Loc<F, 3>, 2>;
+
+    #[inline]
+    fn p2powers(&self) -> Self::Output {
+        let &Loc([x0, x1]) = self;
+        [x0*x0, x0*x1, x1*x1].into()
+    }
+
+    #[inline]
+    fn p2powers_full(&self) -> Self::Full {
+        let &Loc([x0, x1]) = self;
+        [1.0, x0, x1, x0*x0, x0*x1, x1*x1].into()
+    }
+
+    #[inline]
+    fn p2powers_diff(&self) -> Self::Diff {
+        let &Loc([x0, x1]) = self;
+        [[x0+x0, x1, 0.0].into(), [0.0, x0, x1+x1].into()].into()
+    }
+}
+
+pub trait P2Model<F : Num, const N : usize> {
+    type Model : LocalModel<Loc<F,N>,F>;
+    fn p2_model<G : Fn(&Loc<F, N>) -> F>(&self, g : G) -> Self::Model;
+}
+
+pub struct P2LocalModel<F : Num, const N : usize, const E : usize, const V : usize, const Q : usize> {
+    a0 : F,
+    a1 : Loc<F, N>,
+    a2 :  Loc<F, E>,
+    //node_values : Loc<F, V>,
+    //edge_values : Loc<F, Q>,
+}
+
+//
+// 1D planar model construction
+//
+
+impl<F : Float> RealInterval<F> {
+    #[inline]
+    fn midpoints(&self) -> [Loc<F, 1>; 1] {
+        let [ref n0, ref n1] = &self.0;
+        let n01 = midpoint(n0, n1);
+        [n01]
+    }
+}
+
+impl<F : Float> P2LocalModel<F, 1, 1, 2, 0> {
+    #[inline]
+    pub fn new(
+        &[n0, n1, n01] : &[Loc<F, 1>; 3],
+        &[v0, v1, v01] : &[F; 3],
+    ) -> Self {
+        let p = move |x : &Loc<F, 1>, v : F| {
+            let Loc([c, d, e]) = x.p2powers_full();
+            [c, d, e, v]
+        };
+        let [a0, a1, a11] = linsolve([
+            p(&n0, v0),
+            p(&n1, v1),
+            p(&n01, v01)
+        ]);
+        P2LocalModel {
+            a0 : a0,
+            a1 : [a1].into(),
+            a2 : [a11].into(),
+            //node_values : [v0, v1].into(),
+            //edge_values: [].into(),
+        }
+    }
+}
+
+impl<F : Float> P2Model<F,1> for RealInterval<F> {
+    type Model = P2LocalModel<F, 1, 1, 2, 0>;
+
+    #[inline]
+    fn p2_model<G : Fn(&Loc<F, 1>) -> F>(&self, g : G) -> Self::Model {
+        let [n01] =  self.midpoints();
+        let [n0, n1] = self.0;
+        let vals = [g(&n0), g(&n1), g(&n01)];
+        let nodes = [n0, n1, n01];
+        Self::Model::new(&nodes, &vals)
+    }
+}
+
+//
+// 2D planar model construction
+//
+
+impl<F : Float> PlanarSimplex<F> {
+    #[inline]
+    fn midpoints(&self) -> [Loc<F, 2>; 3] {
+        let [ref n0, ref n1, ref n2] = &self.0;
+        let n01 = midpoint(n0, n1);
+        let n12 = midpoint(n1, n2);
+        let n20 = midpoint(n2, n0);
+        [n01, n12, n20]
+    }
+}
+
+impl<F : Float> P2LocalModel<F, 2, 3, 3, 3> {
+    #[inline]
+    pub fn new(
+        &[n0, n1, n2, n01, n12, n20] : &[Loc<F, 2>; 6],
+        &[v0, v1, v2, v01, v12, v20] : &[F; 6],
+    ) -> Self {
+        let p = move |x : &Loc<F,2>, v :F| {
+            let Loc([c, d, e, f, g, h]) = x.p2powers_full();
+            [c, d, e, f, g, h, v]
+        };
+        let [a0, a1, a2, a11, a12, a22] = linsolve([
+            p(&n0, v0),
+            p(&n1, v1),
+            p(&n2, v2),
+            p(&n01, v01),
+            p(&n12, v12),
+            p(&n20, v20),
+        ]);
+        P2LocalModel {
+            a0 : a0,
+            a1 : [a1, a2].into(),
+            a2 : [a11, a12, a22].into(),
+            //node_values : [v0, v1, v2].into(),
+            //edge_values: [v01, v12, v20].into(),
+        }
+    }
+}
+
+impl<F : Float> P2Model<F,2> for PlanarSimplex<F> {
+    type Model = P2LocalModel<F, 2, 3, 3, 3>;
+
+    #[inline]
+    fn p2_model<G : Fn(&Loc<F, 2>) -> F>(&self, g : G) -> Self::Model {
+        let midpoints = self.midpoints();
+        let [ref n0, ref n1, ref n2] = self.0;
+        let [ref n01, ref n12, ref n20] = midpoints;
+        let vals = [g(n0), g(n1), g(n2), g(n01), g(n12), g(n20)];
+        let nodes = [*n0, *n1, *n2, *n01, *n12, *n20];
+        Self::Model::new(&nodes, &vals)
+    }
+}
+
+macro_rules! impl_local_model {
+    ($n:literal, $e:literal, $v:literal, $q:literal) => {
+        impl<F : Float> LocalModel<Loc<F, $n>, F> for P2LocalModel<F, $n, $e, $v, $q> {
+            #[inline]
+            fn value(&self, x : &Loc<F,$n>) -> F {
+                self.a0 + x.dot(&self.a1) + x.p2powers().dot(&self.a2)
+            }
+
+            #[inline]
+            fn differential(&self, x : &Loc<F,$n>) -> Loc<F,$n> {
+                self.a1 + x.p2powers_diff().map(|di| di.dot(&self.a2))
+            }
+        }
+    }
+}
+
+impl_local_model!(1, 1, 2, 0);
+impl_local_model!(2, 3, 3, 3);
+
+
+//
+// Minimisation
+//
+
+#[replace_float_literals(F::cast_from(literal))]
+impl<F : Float> P2LocalModel<F, 1, 1, 2, 0> {
+    #[inline]
+    fn minimise_edge(&self, x0 : Loc<F, 1>, x1 : Loc<F,1>) -> (Loc<F,1>, F) {
+        let &P2LocalModel{
+            a1 : Loc([a1]),
+            a2 : Loc([a11]),
+            //node_values : Loc([v0, v1]),
+            ..
+         } = self;
+        // We do this in cases, first trying for an interior solution, then edges.
+        // For interior solution, first check determinant; no point trying if non-positive
+        if a11 > 0.0 {
+            // An interior solution x[1] has to satisfy
+            //   2a₁₁*x[1] + a₁ =0
+            // This gives
+            let t = -a1/(2.0*a11);
+            let (Loc([t0]), Loc([t1])) = (x0, x1);
+            if (t0 <= t && t <= t1) || (t1 <= t && t <= t0) {
+                let x = [t].into();
+                let v = self.value(&x);
+                return (x, v)
+            }
+        }
+
+        let v0 = self.value(&x0);
+        let v1 = self.value(&x1);
+        if v0 < v1 { (x0, v0) } else { (x1, v1) }
+    }
+}
+
+impl<'a, F : Float> RealLocalModel<RealInterval<F>,Loc<F,1>,F>
+for P2LocalModel<F, 1, 1, 2, 0> {
+    #[inline]
+    fn minimise(&self, &Simplex([x0, x1]) : &RealInterval<F>) -> (Loc<F,1>, F) {
+        self.minimise_edge(x0, x1)
+    }
+}
+
+#[replace_float_literals(F::cast_from(literal))]
+impl<F : Float> P2LocalModel<F, 2, 3, 3, 3> {
+    /// Minimise the 2D model on the edge {x0 + t(x1 - x0) | t ∈ [0, 1] }
+    #[inline]
+    fn minimise_edge(&self, x0 : &Loc<F,2>, x1 : &Loc<F,2>/*, v0 : F, v1 : F*/) -> (Loc<F,2>, F) {
+        let &P2LocalModel {
+            a0,
+            a1 : Loc([a1, a2]),
+            a2 : Loc([a11, a12, a22]),
+            ..
+         } = self;
+        let &Loc([x00, x01]) = x0;
+        let d@Loc([d0, d1]) = x1 - x0;
+        let b0 = a0 + a1*x00 + a2*x01 + a11*x00*x00 + a12*x00*x01 + a22*x01*x01;
+        let b1 = a1*d0 + a2*d1 + 2.0*a11*d0*x00 + a12*(d0*x01 + d1*x00) + 2.0*a22*d1*x01;
+        let b11 = a11*d0*d0 + a12*d0*d1 + a22*d1*d1;
+        let edge_1d_model = P2LocalModel {
+            a0 : b0,
+            a1 : Loc([b1]),
+            a2 : Loc([b11]),
+            //node_values : Loc([v0, v1]),
+        };
+        let (Loc([t]), v) = edge_1d_model.minimise_edge(0.0.into(), 1.0.into());
+        (x0 + d*t, v)
+    }
+}
+
+#[replace_float_literals(F::cast_from(literal))]
+impl<'a, F : Float> RealLocalModel<PlanarSimplex<F>,Loc<F,2>,F>
+for P2LocalModel<F, 2, 3, 3, 3> {
+    #[inline]
+    fn minimise(&self, el : &PlanarSimplex<F>) -> (Loc<F,2>, F) {
+        let &P2LocalModel {
+            a1 : Loc([a1, a2]),
+            a2 : Loc([a11, a12, a22]),
+            //node_values : Loc([v0, v1, v2]),
+            ..
+         } = self;
+
+        // We do this in cases, first trying for an interior solution, then edges.
+        // For interior solution, first check determinant; no point trying if non-positive
+        let r = 2.0*(a11*a22-a12*a12);
+        if r > 0.0 {
+            // An interior solution (x[1], x[2]) has to satisfy
+            //   2a₁₁*x[1] + 2a₁₂*x[2]+a₁ =0 and 2a₂₂*x[1] + 2a₁₂*x[1]+a₂=0
+            // This gives
+            let x = [(a22*a1-a12*a2)/r, (a12*a1-a11*a2)/r].into();
+            if el.contains(&x) {
+                return (x, self.value(&x))
+            }
+        }
+
+        let &[ref x0, ref x1, ref x2] = &el.0;
+        let mut min_edge = self.minimise_edge(x0, x1);
+        let more_edge = [self.minimise_edge(x1, x2),
+                         self.minimise_edge(x2, x0)];
+        
+        for edge in more_edge {
+            if edge.1 < min_edge.1 {
+                min_edge = edge;
+            }
+        }
+
+        min_edge
+    }
+}
+
+#[replace_float_literals(F::cast_from(literal))]
+impl<'a, F : Float> RealLocalModel<Cube<F, 2>,Loc<F,2>,F>
+for P2LocalModel<F, 2, 3, 3, 3> {
+    #[inline]
+    fn minimise(&self, el : &Cube<F, 2>) -> (Loc<F,2>, F) {
+        let &P2LocalModel {
+            a1 : Loc([a1, a2]),
+            a2 : Loc([a11, a12, a22]),
+            //node_values : Loc([v0, v1, v2]),
+            ..
+         } = self;
+
+        // We do this in cases, first trying for an interior solution, then edges.
+        // For interior solution, first check determinant; no point trying if non-positive
+        let r = 2.0*(a11*a22-a12*a12);
+        if r > 0.0 {
+            // An interior solution (x[1], x[2]) has to satisfy
+            //   2a₁₁*x[1] + 2a₁₂*x[2]+a₁ =0 and 2a₂₂*x[1] + 2a₁₂*x[1]+a₂=0
+            // This gives
+            let x = [(a22*a1-a12*a2)/r, (a12*a1-a11*a2)/r].into();
+            if el.contains(&x) {
+                return (x, self.value(&x))
+            }
+        }
+
+        let [x0, x1, x2, x3] = el.corners();
+        let mut min_edge = self.minimise_edge(&x0, &x1);
+        let more_edge = [self.minimise_edge(&x1, &x2),
+                         self.minimise_edge(&x2, &x3),
+                         self.minimise_edge(&x3, &x0)];
+
+        
+        for edge in more_edge {
+            if edge.1 < min_edge.1 {
+                min_edge = edge;
+            }
+        }
+
+        min_edge
+    }
+}
+
+// impl<F : Float> P2Model<F, 1> for Cube<F, 1> {
+//     type Model = CubeP2LocalModel<F,1,1,1>;
+
+//     fn p2_model<G : FnMut(&Loc<F, 1>) -> F>(&self, fun : &G) -> Self::Model {
+//         CubeP2LocalModel([Simplex(self.corners()).p2_model(fun)])
+//     }
+// }
+
+// impl<F : Float> P2Model<F, 2> for Cube<F, 2> {
+//     type Model = CubeP2LocalModel<F,2,3,2>;
+
+//     fn p2_model<G : FnMut(&Loc<F, 2>) -> F>(&self, fun : &G) -> Self::Model {
+//         let [a, b, c, d] = self.corners();
+//         CubeP2LocalModel([Simplex([a, b, c]).p2_model(fun),
+//                           Simplex([b, c, d]).p2_model(fun)])
+//     }
+// }
+
+
+#[cfg(test)]
+mod tests {
+    use super::*;
+
+    #[test]
+    fn p2_model_1d_test() {
+        let vertices = [Loc([0.0]), Loc([1.0])];
+        let domain = Simplex(vertices);
+        // A simple quadratic function for which the approximation is exact on reals,
+        // and appears exact on f64 as well.
+        let f = |&Loc([x]) : &Loc<f64, 1>| x*x + x + 1.0;
+        let model = domain.p2_model(f);
+        let xs = [Loc([0.5]), Loc([0.25])];
+
+        for x in vertices.iter().chain(xs.iter()) {
+            assert_eq!(model.value(&x), f(&x));
+        }
+
+        assert_eq!(model.minimise(&domain), (Loc([0.0]), 1.0));
+    }
+
+    #[test]
+    fn p2_model_2d_test() {
+        let vertices = [Loc([0.0, 0.0]), Loc([1.0, 0.0]), Loc([1.0, 1.0])];
+        let domain = Simplex(vertices);
+        // A simple quadratic function for which the approximation is exact on reals,
+        // and appears exact on f64 as well.
+        let f = |&Loc([x, y]) : &Loc<f64, 2>| x*x + x*y + x - y + 1.0;
+        let model = domain.p2_model(f);
+        let xs = [Loc([0.5, 0.5]), Loc([0.25, 0.25])];
+
+        for x in vertices.iter().chain(xs.iter()) {
+            assert_eq!(model.value(&x), f(&x));
+        }
+
+        assert_eq!(model.minimise(&domain), (Loc([1.0, 1.0]), 3.0));
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/iter.rs	Sat Oct 22 13:47:15 2022 +0300
@@ -0,0 +1,473 @@
+/// Iteration utilities; [stateful][StatefulIterator] and
+/// [restartable][RestartableIterator] iterators.
+
+use crate::types::Integer;
+use std::marker::PhantomData;
+
+/// Trait for iterators that can be queried the current state without advancing.
+pub trait StatefulIterator : Iterator {
+    /// Return current value of iterator. If the iterator has not yet started,
+    /// `None` should be returned.
+    fn current(&self) -> Option<Self::Item>;
+}
+
+/// Iterators that can be restarted and queried the current state without
+/// advancing to next state.
+pub trait RestartableIterator : StatefulIterator {
+    /// Restart the iterator. Return the first item.
+    fn restart(&mut self) -> Option<Self::Item>;
+}
+
+/// A variant of `std::iter::Map` that works for methods instead of closures,
+/// so can be returned from functions without `dyn`.
+/// Use [`Mappable::mapF`] to create this iterator.
+pub struct MapF<I,J> where I : Iterator {
+    iter : I,
+    func : fn(I::Item) -> J,
+}
+
+impl<I,J> Iterator for MapF<I,J> where I : Iterator {
+    type Item = J;
+    fn next(&mut self) -> Option<J> {
+        self.iter.next().map(|item| (self.func)(item))
+    }
+}
+
+/// A variant of [`MapF`] with extra data for the function stored as a reference.
+/// See also [`MapZ`] and use [`Mappable::mapX`] to create this iteartor.
+pub struct MapX<'a,I,T,J> where I : Iterator {
+    iter : I,
+    data : &'a T,
+    func : fn(&'a T, I::Item) -> J,
+}
+
+impl<'a,I,T,J> Iterator for MapX<'a,I,T,J> where I : Iterator {
+    type Item = J;
+    fn next(&mut self) -> Option<J> {
+        self.iter.next().map(|item| (self.func)(self.data, item))
+    }
+}
+
+/// A variant of [`MapF`] and [`MapX`] with extra data for the function stored in the struct.
+/// Use [`Mappable::mapZ`] to create this iterator.
+pub struct MapZ<I,T,J> where I : Iterator {
+    iter : I,
+    data : T,
+    func : fn(&T, I::Item) -> J,
+}
+
+impl<'a,I,T,J> Iterator for MapZ<I,T,J> where I : Iterator {
+    type Item = J;
+    fn next(&mut self) -> Option<J> {
+        self.iter.next().map(|item| (self.func)(&self.data, item))
+    }
+}
+
+/// A variant of `std::iter::FilterMap` that works for methods instead of closures,
+/// so can be returned from functions without `dyn`.
+pub struct FilterMapX<'a,I,T,J> where I : Iterator {
+    iter : I,
+    data : &'a T,
+    func : fn(&'a T, I::Item) -> Option<J>,
+}
+
+impl<'a,I,T,J> Iterator for FilterMapX<'a,I,T,J> where I : Iterator {
+    type Item = J;
+    fn next(&mut self) -> Option<J> {
+        while let Some(item) = self.iter.next() {
+            let v = (self.func)(self.data, item);
+            if v.is_some() {
+                return v
+            }
+        }
+        None
+    }
+}
+
+/// Helper for [`Mappable::filter_zip`].
+pub struct FilterZip<I, J>
+where I : Iterator,
+      J : Iterator<Item=bool> {
+    iter : I,
+    filter : J,
+}
+
+impl<I,J> Iterator for FilterZip<I, J>
+where I : Iterator,
+      J : Iterator<Item=bool> {
+    type Item = I::Item;
+    #[inline]
+    fn next(&mut self) -> Option<I::Item> {
+        while let (v@Some(..), Some(filt)) = (self.iter.next(), self.filter.next()) {
+            if filt {
+                return v
+            }
+        }
+        None
+    }
+}
+
+/// This trait implements several mapping methods on iterators.
+pub trait Mappable : Iterator + Sized {
+    /// Apply `func` to the output of the iterator.
+    #[allow(non_snake_case)]
+    fn mapF<J>(self, func : fn(Self::Item) -> J) -> MapF<Self,J>;
+    /// Apply `func` to the output of the iterator, passing also `d` to it.
+    #[allow(non_snake_case)]
+    fn mapX<'a,T,J>(self, d : &'a T, func : fn(&'a T, Self::Item) -> J) -> MapX<'a,Self,T,J>;
+    /// Apply `func` to the output of the iterator, passing also `d` to it.
+    #[allow(non_snake_case)]
+    fn mapZ<T,J>(self, d : T, func : fn(&T, Self::Item) -> J) -> MapZ<Self,T,J>;
+    /// Apply `func` to the output of the iterator, filtering based on the result.
+    /// The data `d` is passed to `func`.
+    #[allow(non_snake_case)]
+    fn filter_mapX<'a,T,J>(self, d : &'a T, func : fn(&'a T, Self::Item) -> Option<J>) -> FilterMapX<'a,Self,T,J>;
+
+    /// Filter `self` based on another iterator `iter` with `bool` items.
+    fn filter_zip<J>(self, filter : J) -> FilterZip<Self, J> where J : Iterator<Item=bool>;
+}
+
+impl<I> Mappable for I where I : Iterator + Sized {
+    #[inline]
+    fn mapF<J>(self, func : fn(Self::Item) -> J) -> MapF<Self,J> {
+        MapF{ iter : self, func : func }
+    }
+    #[inline]
+    fn mapX<'a,T,J>(self, d : &'a T, func : fn(&'a T, Self::Item) -> J) -> MapX<'a,Self,T,J> {
+        MapX{ iter : self, data : d, func : func }
+    }
+    #[inline]
+    fn mapZ<T,J>(self, d : T, func : fn(&T, Self::Item) -> J) -> MapZ<Self,T,J> {
+        MapZ{ iter : self, data : d, func : func }
+    }
+    #[inline]
+    fn filter_mapX<'a,T,J>(self, d : &'a T, func : fn(&'a T, Self::Item) -> Option<J>)
+    -> FilterMapX<'a,Self,T,J> {
+        FilterMapX{ iter : self, data : d, func : func }
+    }
+
+    #[inline]
+    fn filter_zip<J>(self, filter : J) -> FilterZip<Self, J>
+    where J : Iterator<Item=bool> {
+        FilterZip{ iter : self, filter : filter }
+    }
+}
+
+/*
+/// This is a helper struct returning iterators with a lifetime from trait functions.
+/// `T` will typically be `&'a Self` or some other reference that can only be
+/// specified in the trait function declaration without having to parametrise the entire
+/// trait with `'a`. The parameter `ImplData` is supposed to be a unique type that idenfiers
+/// the iterator for the implementation of the trait. Then `Iterator` is to be implemented
+/// for `ProxyIterator<T, ImplState>`. For example:
+/// TODO: not updated.
+/// ```
+/// trait Test {
+///    type ImplData;
+///    fn returns_iterator<'a>(&'a self) -> ProxyIterator<&'a Self, Self::ImplData>;
+/// }
+/// struct MyIndex{
+///     index : usize,
+/// }
+/// impl<const N : usize> Test for [f64; N] {
+///     type ImplData = MyIndex;
+///    fn returns_iterator<'a>(&'a self) -> ProxyIterator<&'a Self, MyIndex> {
+///        TraitIterator::new(self, MyIndex{ index : 0 })
+///    }
+/// }
+/// impl<'a, const N : usize> Iterator for `TraitIterator<&'a [f64; N], MyIndex> {
+///     type Item = f64;
+///     fn next(&mut self) -> Option<f64> {
+///         if self.state.index < N {
+///             let v = self.data[self.state.index];
+///             self.state.index += 1;
+///             Some(v)
+///         } else {
+///             None
+///         }
+///     }
+/// }
+/// ```
+*/
+
+macro_rules! impl_trait_iterator {
+    ($name:ident, $proxyname:ident, &'a $($mut:ident)?, $($itemref:tt)*) => {
+        pub trait $name {
+            type Item : 'static;
+            type Data : ?Sized;
+            fn next_with<'a>(&mut self, data : &'a $($mut)? Self::Data) -> Option<$($itemref)* Self::Item>;
+        }
+
+        pub struct $proxyname<'a, Impl>
+        where Impl : $name {
+            pub data :  &'a $($mut)? Impl::Data,
+            pub iter : Impl,
+        }
+
+        impl<'a, Impl> $proxyname<'a, Impl>
+        where Impl : $name {
+            #[inline]
+            pub fn new(data : &'a $($mut)? Impl::Data, iter : Impl) -> $proxyname<'a, Impl> {
+                $proxyname{ data : data, iter : iter}
+            }
+        }
+
+        impl<'a, Impl> Iterator for $proxyname<'a, Impl>
+        where Impl : $name {
+            type Item = $($itemref)* Impl::Item;
+            #[inline]
+            fn next(&mut self) -> Option<Self::Item> {
+                self.iter.next_with(self.data)
+            }
+        }
+    }
+}
+
+impl_trait_iterator!(TraitIterator,    ProxyIterator,    &'a,     );
+impl_trait_iterator!(TraitIteratorRef, ProxyIteratorRef, &'a,     &'a);
+// TODO: Lifetime errors. Probably due to potential overlapping mutable borrows.
+// So should use some unsafe pointer hacks or so.
+//impl_trait_iterator!(TraitIteratorMut, ProxyIteratorMut, &'a mut, &'a mut);
+
+/*
+pub struct FlattenTI<SI>
+where SI : TraitIterator,
+      SI::Item : Iterator {
+    iter_iter : SI,
+    element_iter : Option<SI::Item>,
+}
+
+impl<SI> FlattenTI<SI>
+where SI : TraitIterator,
+      SI::Item : Iterator {
+    fn new(iter : SI) -> Self {
+        FlattenTI{ iter_iter : iter, element_iter : None  }
+    }
+}
+
+impl<SI : TraitIterator> TraitIterator for FlattenTI<SI>
+where SI::Item : Iterator {
+    type Item = <SI::Item as Iterator>::Item;
+    type Data = SI::Data;
+
+    fn next_with<'a>(&mut self, data : &'a $($mut)? SI::Data) -> Option<Self::Item> {
+        loop {
+            if let Some(ei) = self.element_iter {
+                if let n @ Some(_) = ei.next() {
+                    return n
+                }
+            } else {
+                self.element_iter = self.iter_iter.next_with(data);
+                if let None = self.element_iter {
+                    return None
+                }
+            }
+        }
+    }
+}
+*/
+
+/*
+pub enum EitherIterator<T, L : Iterator<Item=T>, R : Iterator<Item=T>> {
+    Left(L),
+    Right(R)
+}
+
+impl<T, L : Iterator<Item=T>, R : Iterator<Item=T>> Iterator for EitherIterator<T, L, R> {
+    type Item = T;
+    #[inline]
+    fn next(&mut self) -> Option<T> {
+        match self {
+            EitherIterator::Left(ref l)  => { l.next() }
+            EitherIterator::Right(ref r) => { r.next() }
+        }
+    }
+}
+*/
+
+/// A [`RestartableIterator`] over the range ´[start, end)`.
+#[derive(Clone,Copy,Debug)]
+pub struct RangeIter<T> {
+    start : T,
+    end : T,
+    value : Option<T>,
+}
+
+pub trait StepNext : Ord + Copy {
+    fn step_next(&self) -> Self;
+    fn dist_to(&self, other : &Self) -> usize;
+}
+
+impl<T : Integer + Into<usize>> StepNext for T {
+    #[inline]
+    fn step_next(&self) -> Self { *self + Self::ONE }
+    
+    #[inline]
+    fn dist_to(&self, other : &Self) -> usize {
+        if *other >= *self {
+             (*other - *self).into()
+        } else {
+            0
+        }
+    }
+}
+
+impl<T : StepNext> RangeIter<T> {
+    /// Create a new [`RangeIter`].
+    pub fn new(start : T, end : T) -> Self {
+        RangeIter{ start : start.min(end), end : end, value : None }
+    }
+}
+
+impl<T : StepNext> Iterator for RangeIter<T> {
+    type Item = T;
+
+    #[inline]
+    fn next(&mut self) -> Option<Self::Item> {
+        match self.value {
+            None => {
+                if self.start < self.end {
+                    self.value = Some(self.start);
+                    self.value
+                } else {
+                    None
+                }
+            }
+            Some(v) => {
+                let vn = v.step_next();
+                if vn < self.end {
+                    self.value = Some(vn);
+                    self.value
+                } else {
+                    None
+                }
+            }
+        }
+    }
+}
+
+impl<T : StepNext> ExactSizeIterator for RangeIter<T> {
+    #[inline]
+    fn len(&self) -> usize { self.start.dist_to(&self.end) }
+}
+
+impl<T : StepNext> StatefulIterator for RangeIter<T> {
+    #[inline]
+    fn current(&self) -> Option<Self::Item> {
+        self.value
+    }
+}
+
+impl<T : StepNext> RestartableIterator for RangeIter<T> {
+    fn restart(&mut self) -> Option<Self::Item> {
+        if self.start < self.end {
+            self.value = Some(self.start);
+            self.value
+        } else {
+            None
+        }
+    }
+}
+
+
+pub struct RestartableIter<'a, T> {
+    inner : RangeIter<*const T>,
+    _phantom : PhantomData<&'a[T]>
+}
+
+impl<T> StepNext for *const T {
+    #[inline]
+    fn step_next(&self) -> Self { unsafe { self.add(1) } }
+    
+    #[inline]
+    fn dist_to(&self, other : &Self) -> usize {
+        unsafe { other.offset_from(*self).max(0) as usize }
+    }
+}
+
+impl<'a, T : 'a> RestartableIter<'a, T> {
+    fn map_result(result : Option<*const T>) -> Option<&'a T> {
+        match result {
+            None        => { None }
+            Some(ptr)   => { Some(unsafe{ &*ptr }) }
+        }
+    }
+
+    pub fn new(slice : &'a [T]) -> Self {
+        let ptr_range = slice.as_ptr_range();
+        let inner = RangeIter::new(ptr_range.start, ptr_range.end);
+        RestartableIter{ inner : inner, _phantom : PhantomData }
+    }
+}
+
+impl<'a, T : 'a> Iterator for RestartableIter<'a, T> {
+    type Item = &'a T;
+    #[inline]
+    fn next(&mut self) -> Option<&'a T> {
+        Self::map_result(self.inner.next())
+    }
+}
+
+impl<'a, T : 'a> ExactSizeIterator for RestartableIter<'a, T> {
+    #[inline]
+    fn len(&self) -> usize { self.inner.len() }
+}
+
+impl<'a, T : 'a> StatefulIterator for RestartableIter<'a, T> {
+    #[inline]
+    fn current(&self) -> Option<Self::Item> {
+        Self::map_result(self.inner.current())
+    }
+}
+
+impl<'a, T : 'a> RestartableIterator for RestartableIter<'a, T> {
+    fn restart(&mut self) -> Option<Self::Item> {
+        Self::map_result(self.inner.current())
+    }
+}
+
+pub struct RestartableCloned<I : Iterator> {
+    inner : I,
+}
+
+impl<'a, T : Clone + 'a , I : Iterator<Item=&'a T>> RestartableCloned<I> {
+    fn map_result(result : Option<I::Item>) -> Option<T> {
+        match result {
+            None    => { None }
+            Some(v) => { Some(v.clone()) }
+        }
+    }
+
+    pub fn new(inner : I) -> Self {
+        RestartableCloned{ inner : inner }
+    }
+}
+
+impl<'a, T : Clone + 'a , I : Iterator<Item=&'a T>>  Iterator
+for RestartableCloned<I> {
+    type Item = T;
+    fn next(&mut self) -> Option<Self::Item> {
+        Self::map_result(self.inner.next())
+    }
+}
+
+impl<'a, T : Clone + 'a , I : ExactSizeIterator<Item=&'a T>> ExactSizeIterator
+for RestartableCloned<I> {
+    fn len(&self) -> usize {
+        self.inner.len()
+    }
+}
+
+impl<'a, T : Clone + 'a , I : StatefulIterator<Item=&'a T>>  StatefulIterator
+for RestartableCloned<I> {
+    fn current(&self)  -> Option<Self::Item> {
+        Self::map_result(self.inner.current())
+    }
+}
+
+impl<'a, T : Clone + 'a , I : RestartableIterator<Item=&'a T>> RestartableIterator
+for RestartableCloned<I> {
+    fn restart(&mut self) -> Option<Self::Item> {
+        Self::map_result(self.inner.restart())
+    }
+}
\ No newline at end of file
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/iterate.rs	Sat Oct 22 13:47:15 2022 +0300
@@ -0,0 +1,916 @@
+/*!
+
+This module provides tools for separating the computational steps of iterative
+algorithms from visualisation and other reporting routines. The approach is heavily
+indebted to functional programming. The computational step is to be implemented as a
+closure. That closure gets passed a `state` parameter that implements an
+[`if_verbose`][AlgIteratorState::if_verbose] method that is to be called to determine
+whether function values or other potentially costly things need to be calculated on that
+iteration. The parameter of [`if_verbose`][AlgIteratorState::if_verbose] is another
+closure that does the necessary computation.
+
+## Simple example
+
+```rust
+use alg_tools::types::*;
+use alg_tools::iterate::*;
+let mut iter = AlgIteratorOptions{ max_iter : 100,
+                                   verbose_iter : Verbose::Every(10),
+                                   .. Default::default() };
+let mut x = 1 as float;
+iter.iterate(|state|{
+    // This is our computational step
+    x = x + x.sqrt();
+    state.if_verbose(||{
+        // return current value when requested
+        return x
+     })
+})
+```
+There is no colon after `state.if_verbose`, because we need to return its value. If you do something after the step, you need to store the result in a variable and return it from the main computational step afterwards.
+
+This will print out
+```output
+10/100 J=31.051164
+20/100 J=108.493699
+30/100 J=234.690039
+40/100 J=410.056327
+50/100 J=634.799262
+60/100 J=909.042928
+70/100 J=1232.870172
+80/100 J=1606.340254
+90/100 J=2029.497673
+100/100 J=2502.377071
+```
+*/
+
+use colored::{Colorize,ColoredString};
+//use num_traits::Num;
+use core::fmt::Debug;
+//use core::fmt::Display;
+//use std::sync::mpsc::Receiver;
+use serde::{Serialize, Deserialize};
+use cpu_time::ProcessTime;
+use std::marker::PhantomData;
+use std::time::Duration;
+use crate::types::*;
+use crate::logger::*;
+
+/// Create the displayed presentation for log items. Override for nice (condensed) presentation of rich log items, or define `show` if it is not defined for your type.
+pub trait LogRepr : Debug {
+    fn logrepr(&self) -> ColoredString { format!("« {:?} »", self).as_str().into() }
+}
+
+impl LogRepr for str {
+    fn logrepr(&self) -> ColoredString { self.into() }
+}
+
+impl LogRepr for String {
+    fn logrepr(&self) -> ColoredString { self.as_str().into() }
+}
+
+impl<T> LogRepr for T where T : Num {
+    fn logrepr(&self) -> ColoredString { format!("J={}", self).as_str().into() }
+}
+
+impl<V> LogRepr for Option<V> where V : LogRepr {
+    fn logrepr(&self) -> ColoredString {
+        match self {
+            None    => { "===missing value===".red() }
+            Some(v) => { v.logrepr() }
+        }
+    }
+}
+
+#[derive(Debug,Clone)]
+pub struct Annotated<F>(pub F, pub String);
+
+impl<V> LogRepr for Annotated<V> where V : LogRepr {
+    fn logrepr(&self) -> ColoredString {
+        format!("{}\t| {}", self.0.logrepr(), self.1).as_str().into()
+    }
+}
+
+
+/// Basic log item.
+#[derive(Serialize, Deserialize, Debug, Eq, PartialEq, Clone)]
+pub struct LogItem<V> {
+    pub iter : usize,
+    // This causes [`csv`] to crash.
+    //#[serde(flatten)]
+    pub data : V
+}
+
+fn make_logitem<V>(iter : usize, data : V) -> LogItem<V> {
+    LogItem{ iter, data }
+}
+
+/// State of an [`AlgIterator`]
+pub trait AlgIteratorState {
+    fn if_verbose<A>(&self, calc_objective : impl FnMut() -> A) -> Option<A>;
+
+    fn iteration(&self) -> usize;
+}
+
+/// Result of a step of an [`AlgIterator`]
+#[derive(Debug, Serialize)]
+pub enum Step<V> {
+    /// Iteration should be terminated
+    Terminated,
+    /// No result this iteration (due to verbosity settings)
+    Quiet,
+    /// Result of this iteration (due to verbosity settings)
+    Result(V),
+}
+
+/// An iterator for algorithms, produced by [`AlgIteratorFactory.prepare`].
+/// For usage instructions see the help for [`AlgIteratorFactory`].
+pub trait AlgIterator<V> : Sized {
+    type State : AlgIteratorState;
+    type Output;
+
+    /// Advance the iterator.
+    fn step(&mut self) -> Step<Self::Output>;
+
+    /// Return current iteration count.
+    fn iteration(&self) -> usize {
+        self.state().iteration()
+    }
+
+    /// Return current iteration stats.
+    fn state(&self) -> Self::State;
+
+    /// Iterate the function `step`. It should accept one `state` ([`AlgIteratorState`] parameter,
+    /// and return the output of `state.if_verbose`.
+    /// Typically called through [`AlgIteratorFactory::iterate`].
+    #[inline]
+    fn iterate(&mut self) {
+        loop {
+            if let Step::Terminated = self.step() {
+                break
+            }
+        }
+    }
+
+    /// Converts the `AlgIterator` into a plain [`Iterator`], discarding [`Step::Quiet`] results.
+    /// (This is to avoid having to manually implement [`IntoIterator`] or [`Iterator`]
+    /// for every single [`AlgIterator`].)
+    fn downcast(self) -> AlgIteratorI<Self, V> {
+        AlgIteratorI(self, PhantomData)
+    }
+}
+
+/// Conversion of an `AlgIterator` into a plain [`Iterator`].
+/// It discards [`Step::Quiet`] results.
+pub struct AlgIteratorI<A, V>(A, PhantomData<V>);
+
+impl<A, V> Iterator for AlgIteratorI<A, V>
+where A : AlgIterator<V> {
+    type Item = A::Output;
+
+    fn next(&mut self) -> Option<A::Output> {
+        loop {
+            match self.0.step() {
+                Step::Result(v) => return Some(v),
+                Step::Terminated => return None,
+                Step::Quiet => continue,
+            }
+        }
+    }
+}
+
+/// A factory for producing an [`AlgIterator`]. To use one:
+///
+/// ```rust
+/// use alg_tools::iterate::*;
+/// let mut iter = AlgIteratorOptions::default();
+/// iter.iterate(|state|{
+///     // perform iterations
+///     state.if_verbose(||{
+///         // calculate and return function value or other displayed data v
+///         return 0
+///     })
+/// })
+/// ```
+pub trait AlgIteratorFactory<V> : Sized {
+    type Iter<F> : AlgIterator<V, State = Self::State, Output = Self::Output>
+        where F : FnMut(&Self::State) -> Step<V>;
+    type State : AlgIteratorState;
+    type Output;
+
+    /// Prepare an [`AlgIterator`], consuming the factory.
+    /// The function `step_fn` should accept a `state` ([`AlgIteratorState`] parameter, and return
+    /// a [`Step`].
+    fn prepare<F>(self, step_fn : F) -> Self::Iter<F>
+    where F : FnMut(&Self::State) -> Step<V>;
+
+    /// Iterate the function `step`. It should accept one `state` ([`AlgIteratorState`] parameter,
+    /// and return the output of `state.if_verbose`. For usage instructions see the
+    /// [module documentation][self].
+    ///
+    /// This method is equivalent to [`Self::prepare`] followed by [`AlgIterator.iterate`].
+    #[inline]
+    fn iterate<F>(self, mut step : F) -> () where F : FnMut(&Self::State) -> Option<V> {
+        self.prepare(move |state| {
+            match step(state) {
+                None => Step::Quiet,
+                Some(v) => Step::Result(v),
+            }
+        }).iterate()
+    }
+
+    /// Iterate the function `step`. It should accept a `state` ([`AlgIteratorState`] parameter,
+    /// and a data parameter taking frmo `datasource`, and return the output of `state.if_verbose`.
+    /// For usage instructions see the [module documentation][self].
+    #[inline]
+    fn iterate_data<F, D, I>(self, mut datasource : I, mut step : F)
+    where F : FnMut(&Self::State, D) -> Option<V>,
+          I : Iterator<Item = D> {
+        self.prepare(move |state| {
+            match datasource.next() {
+                None => Step::Terminated,
+                Some(d) => match step(state, d) {
+                    None => Step::Quiet,
+                    Some(v) => Step::Result(v),
+                }
+            }
+        }).iterate()
+    }
+
+    // fn make_iterate<'own>(self)
+    // -> Box<dyn (FnMut(dyn FnMut(&Self::State) -> Option<V>) -> ()) + 'own> {
+    //     Box::new(move |step| self.iterate(step))
+    // }
+
+    // fn make_iterate_data<'own, D, I>(self, datasource : I)
+    // -> Box<dyn (FnMut(dyn FnMut(&Self::State, D) -> Option<V>) -> ()) + 'own>
+    // where I : Iterator<Item = D> + 'own {
+    //     Box::new(move |step| self.iterate_data(step, datasource))
+    // }
+
+    /// Add logging to the iterator produced by the factory.
+    fn into_log<'log>(self, logger : &'log mut Logger<Self::Output>)
+    -> LoggingIteratorFactory<'log, Self::Output, Self>
+    where Self : Sized {
+        LoggingIteratorFactory {
+            base_options : self,
+            logger,
+        }
+    }
+
+    /// Map the output of the iterator produced by the factory.
+    fn mapped<U, G>(self, map : G)
+    -> MappingIteratorFactory<G, Self>
+    where Self : Sized,
+          G : Fn(usize, Self::Output) -> U {
+        MappingIteratorFactory {
+            base_options : self,
+            map
+        }
+    }
+
+    /// Adds iteration number to the output. Typically followed by [`Self::into_log`].
+    fn with_iteration_number(self)
+    -> MappingIteratorFactory<fn(usize, Self::Output) -> LogItem<Self::Output>, Self>
+    where Self : Sized {
+        self.mapped(make_logitem)
+    }
+
+    /// Add timing to the iterator produced by the factory.
+    fn timed(self) -> TimingIteratorFactory<Self>
+    where Self : Sized {
+        TimingIteratorFactory(self)
+    }
+
+    /// Add value stopping threshold to the iterator produce by the factory
+    fn stop_target(self, target : Self::Output) -> ValueIteratorFactory<Self::Output, Self>
+    where Self : Sized,
+          Self::Output : Num {
+        ValueIteratorFactory { base_options : self, target : target }
+    }
+
+    /// Add stall stopping to the iterator produce by the factory
+    fn stop_stall(self, stall : Self::Output) -> StallIteratorFactory<Self::Output, Self>
+    where Self : Sized,
+          Self::Output : Num {
+        StallIteratorFactory { base_options : self, stall : stall }
+    }
+
+    /// Is the iterator quiet, i.e., on-verbose?
+    fn is_quiet(&self) -> bool { false }
+}
+
+/// Options for [`BasicAlgIteratorFactory`]. Use as:
+/// ```
+/// # use alg_tools::iterate::*;
+/// let iter = AlgIteratorOptions{ max_iter : 10000, .. Default::default() };
+/// ```
+#[derive(Clone, Copy, Debug, Serialize, Deserialize, Eq, PartialEq)]
+pub struct AlgIteratorOptions {
+    /// Maximum number of iterations
+    pub max_iter : usize,
+    /// Number of iterations between verbose iterations that display state.
+    pub verbose_iter : Verbose,
+    /// Whether verbose iterations are displayed, or just passed onwards to a containing
+    /// `AlgIterator`.
+    pub quiet : bool,
+}
+
+#[derive(Clone, Copy, Debug, Serialize, Deserialize, Eq, PartialEq)]
+pub enum Verbose {
+    /// Be verbose every $n$ iterations.
+    Every(usize),
+    /// Be verbose every $n$ iterations and initial $m$ iterations.
+    EveryAndInitial{ every : usize, initial : usize },
+    /// Be verbose if iteration number $n$ divides by $b^{\text{floor}(\log_b(n))}$, where
+    /// $b$ is indicated logarithmic base. So, with $b=10$,
+    /// * every iteration for first 10 iterations,
+    /// * every 10 iterations from there on until 100 iterations,
+    /// * every 100 iteartinos frmo there on until 1000 iterations, etc.
+    Logarithmic(usize),
+}
+
+impl Verbose {
+    /// Indicates whether given iteration number is verbose
+    pub fn is_verbose(&self, iter : usize) -> bool {
+        match self {
+            &Verbose::Every(every) => {
+                every != 0 && iter % every == 0
+            },
+            &Verbose::EveryAndInitial{ every, initial } => {
+                iter <= initial || (every != 0 && iter % every == 0)
+            },
+            &Verbose::Logarithmic(base) => {
+                let every = 10usize.pow((iter as float).log(base as float).floor() as u32);
+                iter % every == 0
+            }
+        }
+    }
+}
+
+impl Default for AlgIteratorOptions {
+    fn default() -> AlgIteratorOptions {
+        AlgIteratorOptions{
+            max_iter : 1000,
+            verbose_iter : Verbose::EveryAndInitial { every : 100, initial : 10 },
+            quiet : false
+        }
+    }
+}
+
+/// State of a `BasicAlgIterator`
+#[derive(Clone,Copy,Debug,Serialize,Eq,PartialEq)]
+pub struct BasicState {
+    iter : usize,
+    verbose : bool,
+    calc : bool,
+}
+
+/// [`AlgIteratorFactory`] for [`BasicAlgIterator`]
+#[derive(Clone,Debug)]
+pub struct BasicAlgIteratorFactory<V> {
+    options : AlgIteratorOptions,
+    phantoms : PhantomData<V>,
+}
+
+/// The simplest [`AlgIterator`], reated by [`BasicAlgIteratorFactory`]
+#[derive(Clone,Debug)]
+pub struct BasicAlgIterator<F> {
+    options : AlgIteratorOptions,
+    iter : usize,
+    step_fn : F,
+}
+
+impl AlgIteratorOptions {
+    /// [`AlgIteratorOptions`] is directly a factory for [`BasicAlgIterator`],
+    /// however, due to type inference issues, it may become convenient to instantiate
+    /// it to a specific return type for the inner step function. This method does that.
+    pub fn instantiate<V>(&self) -> BasicAlgIteratorFactory<V> {
+        BasicAlgIteratorFactory {
+            options : self.clone(),
+            phantoms : PhantomData
+        }
+    }
+}
+
+impl<V> AlgIteratorFactory<V> for AlgIteratorOptions
+where V : LogRepr {
+    type State = BasicState;
+    type Iter<F> = BasicAlgIterator<F> where F : FnMut(&Self::State) -> Step<V>;
+    type Output = V;
+
+    fn prepare<F>(self, step_fn : F) -> Self::Iter<F>
+    where F : FnMut(&Self::State) -> Step<V> {
+        BasicAlgIterator{ options : self, iter : 0, step_fn }
+    }
+
+    #[inline]
+    fn is_quiet(&self) -> bool {
+        self.quiet
+    }
+}
+
+impl<V> AlgIteratorFactory<V> for BasicAlgIteratorFactory<V>
+where V : LogRepr {
+    type State = BasicState;
+    type Iter<F> = BasicAlgIterator<F> where F : FnMut(&Self::State) -> Step<V>;
+    type Output = V;
+
+    fn prepare<F>(self, step_fn : F) -> Self::Iter<F>
+    where F : FnMut(&Self::State) -> Step<V> {
+        BasicAlgIterator{ options : self.options, iter : 0, step_fn }
+    }
+
+    #[inline]
+    fn is_quiet(&self) -> bool {
+        self.options.quiet
+    }
+}
+
+impl<V, F> AlgIterator<V> for BasicAlgIterator<F>
+where V : LogRepr,
+      F : FnMut(&BasicState) -> Step<V> {
+    type State = BasicState;
+    type Output = V;
+
+    #[inline]
+    fn step(&mut self) -> Step<V> {
+        if self.iter >= self.options.max_iter {
+            Step::Terminated
+        } else {
+            self.iter += 1;
+            let state = self.state();
+            let res = (self.step_fn)(&state);
+            if let Step::Result(ref val) = res {
+                if state.verbose && !self.options.quiet {
+                    println!("{}{}/{} {}{}", "".dimmed(),
+                            state.iter,
+                            self.options.max_iter,
+                            val.logrepr(),
+                            "".clear());
+                }
+            }
+            res
+        }
+    }
+
+    #[inline]
+    fn iteration(&self) -> usize {
+        self.iter
+    }
+
+    #[inline]
+    fn state(&self) -> BasicState {
+        let iter = self.iter;
+        let verbose = self.options.verbose_iter.is_verbose(iter);
+        BasicState{
+            iter : iter,
+            verbose : verbose,
+            calc : verbose,
+        }
+    }
+}
+
+impl AlgIteratorState for BasicState {
+    #[inline]
+    fn if_verbose<A>(&self, mut calc_objective : impl FnMut() -> A) -> Option<A> {
+        if self.calc {
+            Some(calc_objective())
+        } else {
+            None
+        }
+    }
+
+    #[inline]
+    fn iteration(&self) -> usize {
+        return self.iter;
+    }
+}
+
+//
+// Stall detecting iteration function.
+//
+
+/// An [`AlgIteratorFactory`] for an [`AlgIterator`] that detects “stall”
+/// $(v_{k+n}-v_k)/v_k ≤ θ$, where $n$ the distance between [`Step::Result`] iterations.
+#[derive(Clone,Copy,Debug,Serialize,Eq,PartialEq)]
+pub struct StallIteratorFactory<U : Num, BaseFactory> {
+    /// Basic options
+    pub base_options : BaseFactory,
+    /// Stalling threshold $θ$.
+    pub stall : U,
+}
+
+/// Iterator produced by [`StallIteratorFactory`].
+pub struct StallIterator<U : Num, BaseIterator> {
+    base_iterator : BaseIterator,
+    stall : U,
+    previous_value : Option<U>,
+}
+
+impl<V, U, BaseFactory> AlgIteratorFactory<V>
+for StallIteratorFactory<U, BaseFactory>
+where BaseFactory : AlgIteratorFactory<V, Output=U>,
+      U : SignedNum + PartialOrd {
+    type Iter<F> = StallIterator<U, BaseFactory::Iter<F>>
+        where F : FnMut(&Self::State) -> Step<V>;
+    type State = BaseFactory::State;
+    type Output = BaseFactory::Output;
+
+    fn prepare<F>(self, step_fn : F) -> Self::Iter<F>
+    where F : FnMut(&Self::State) -> Step<V> {
+        StallIterator {
+            base_iterator : self.base_options.prepare(step_fn),
+            stall : self.stall,
+            previous_value : None,
+        }
+    }
+
+    fn is_quiet(&self) -> bool {
+        self.base_options.is_quiet()
+    }
+}
+
+impl<V, U, BaseIterator> AlgIterator<V>
+for StallIterator<U, BaseIterator>
+where BaseIterator : AlgIterator<V, Output=U>,
+      U : SignedNum + PartialOrd {
+    type State = BaseIterator::State;
+    type Output = BaseIterator::Output;
+
+    #[inline]
+    fn step(&mut self) -> Step<U> {
+        match self.base_iterator.step() {
+            Step::Result(nv) => {
+                let previous_v = self.previous_value;
+                self.previous_value = Some(nv);
+                match previous_v {
+                    Some(pv) if (nv - pv).abs() <= self.stall * pv.abs() => Step::Terminated,
+                    _ => Step::Result(nv),
+                }
+            },
+            val => val,
+        }
+    }
+
+    #[inline]
+    fn iteration(&self) -> usize {
+        self.base_iterator.iteration()
+    }
+
+    #[inline]
+    fn state(&self) -> Self::State {
+        self.base_iterator.state()
+    }
+}
+
+/// An [`AlgIteratorFactory`] for an [`AlgIterator`] that detect whether step function
+/// return value is less than `target`, and terminates if it is.
+#[derive(Clone,Copy,Debug,Serialize,Eq,PartialEq)]
+pub struct ValueIteratorFactory<U : Num, BaseFactory> {
+    /// Basic options
+    pub base_options : BaseFactory,
+    /// Stalling threshold $θ$.
+    pub target : U,
+}
+
+/// Iterator produced by [`ValueIteratorFactory`].
+pub struct ValueIterator<U : Num, BaseIterator> {
+    base_iterator : BaseIterator,
+    target : U,
+}
+
+impl<V, U, BaseFactory> AlgIteratorFactory<V>
+for ValueIteratorFactory<U, BaseFactory>
+where BaseFactory : AlgIteratorFactory<V, Output=U>,
+      U : SignedNum + PartialOrd {
+    type Iter<F> = ValueIterator<U, BaseFactory::Iter<F>>
+        where F : FnMut(&Self::State) -> Step<V>;
+    type State = BaseFactory::State;
+    type Output = BaseFactory::Output;
+
+    fn prepare<F>(self, step_fn : F) -> Self::Iter<F>
+    where F : FnMut(&Self::State) -> Step<V> {
+        ValueIterator {
+            base_iterator : self.base_options.prepare(step_fn),
+            target : self.target
+        }
+    }
+
+    fn is_quiet(&self) -> bool {
+        self.base_options.is_quiet()
+    }
+}
+
+impl<V, U, BaseIterator> AlgIterator<V>
+for ValueIterator<U, BaseIterator>
+where BaseIterator : AlgIterator<V, Output=U>,
+      U : SignedNum + PartialOrd {
+    type State = BaseIterator::State;
+    type Output = BaseIterator::Output;
+
+    #[inline]
+    fn step(&mut self) -> Step<U> {
+        match self.base_iterator.step() {
+            Step::Result(v) => {
+                 if v <= self.target {
+                    Step::Terminated
+                 } else {
+                    Step::Result(v)
+                 }
+            },
+            val => val,
+        }
+    }
+
+    #[inline]
+    fn iteration(&self) -> usize {
+        self.base_iterator.iteration()
+    }
+
+    #[inline]
+    fn state(&self) -> Self::State {
+        self.base_iterator.state()
+    }
+}
+
+//
+// Logging iterator
+//
+
+/// [`AlgIteratorFactory`] for a logging [`AlgIterator`].
+/// The `Output` of the corresponding [`LoggingIterator`] is `()`:
+/// The data is only inserted into the log, and not passed onwards.
+/// Use as:
+/// ```rust
+/// use alg_tools::iterate::*;
+/// use alg_tools::logger::*;
+/// let iter = AlgIteratorOptions::default();
+/// let mut log = Logger::new();
+/// iter.into_log(&mut log).iterate(|state|{
+///     // perform iterations
+///     state.if_verbose(||{
+///         // calculate and return function value or other displayed data v
+///         return 0
+///     })
+/// })
+/// ```
+#[derive(Debug)]
+pub struct LoggingIteratorFactory<'log, U, BaseFactory> {
+    pub base_options : BaseFactory,
+    pub logger : &'log mut Logger<U>,
+}
+
+/// Iterator produced by `LoggingIteratorFactory`.
+pub struct LoggingIterator<'log, U, BaseIterator> {
+    base_iterator : BaseIterator,
+    logger : &'log mut Logger<U>,
+}
+
+
+impl<'log, V, BaseFactory> AlgIteratorFactory<V>
+for LoggingIteratorFactory<'log, BaseFactory::Output, BaseFactory>
+where BaseFactory : AlgIteratorFactory<V>,
+      BaseFactory::Output : 'log {
+    type State = BaseFactory::State;
+    type Iter<F> = LoggingIterator<'log, BaseFactory::Output, BaseFactory::Iter<F>>
+        where F : FnMut(&Self::State) -> Step<V>;
+    type Output = ();
+
+    fn prepare<F>(self, step_fn : F) -> Self::Iter<F>
+    where F : FnMut(&Self::State) -> Step<V> {
+        LoggingIterator {
+            base_iterator : self.base_options.prepare(step_fn),
+            logger : self.logger,
+        }
+    }
+
+    #[inline]
+    fn is_quiet(&self) -> bool {
+        self.base_options.is_quiet()
+    }
+}
+
+impl<'log, V, BaseIterator> AlgIterator<V>
+for LoggingIterator<'log, BaseIterator::Output, BaseIterator>
+where BaseIterator : AlgIterator<V>,
+      BaseIterator::Output : 'log {
+    type State = BaseIterator::State;
+    type Output = ();
+
+    #[inline]
+    fn step(&mut self) -> Step<Self::Output> {
+        match self.base_iterator.step() {
+            Step::Result(v) => {
+                self.logger.log(v);
+                Step::Quiet
+            },
+            Step::Quiet => Step::Quiet,
+            Step::Terminated => Step::Terminated,
+        }
+    }
+
+    #[inline]
+    fn iteration(&self) -> usize {
+        self.base_iterator.iteration()
+    }
+
+    #[inline]
+    fn state(&self) -> Self::State {
+        self.base_iterator.state()
+    }
+}
+
+/// This [`AlgIteratorFactory`] allows output mapping.
+#[derive(Debug)]
+pub struct MappingIteratorFactory<G, BaseFactory> {
+    pub base_options : BaseFactory,
+    pub map : G,
+}
+
+/// Iterator produced by `MappingIteratorFactory`.
+pub struct MappingIterator<G, BaseIterator> {
+    base_iterator : BaseIterator,
+    map : G,
+}
+
+
+impl<V, U, G, BaseFactory> AlgIteratorFactory<V>
+for MappingIteratorFactory<G, BaseFactory>
+where BaseFactory : AlgIteratorFactory<V>,
+      G : Fn(usize, BaseFactory::Output) -> U {
+    type State = BaseFactory::State;
+    type Iter<F> = MappingIterator<G, BaseFactory::Iter<F>>
+        where F : FnMut(&Self::State) -> Step<V>;
+    type Output = U;
+
+    fn prepare<F>(self, step_fn : F) -> Self::Iter<F>
+    where F : FnMut(&Self::State) -> Step<V> {
+        MappingIterator {
+            base_iterator : self.base_options.prepare(step_fn),
+            map : self.map
+        }
+    }
+
+    #[inline]
+    fn is_quiet(&self) -> bool {
+        self.base_options.is_quiet()
+    }
+}
+
+impl<V, U, G, BaseIterator> AlgIterator<V>
+for MappingIterator<G, BaseIterator>
+where BaseIterator : AlgIterator<V>,
+      G : Fn(usize, BaseIterator::Output) -> U {
+    type State = BaseIterator::State;
+    type Output = U;
+
+    #[inline]
+    fn step(&mut self) -> Step<Self::Output> {
+        match self.base_iterator.step() {
+            Step::Result(v) => Step::Result((self.map)(self.iteration(), v)),
+            Step::Quiet => Step::Quiet,
+            Step::Terminated => Step::Terminated,
+        }
+    }
+
+    #[inline]
+    fn iteration(&self) -> usize {
+        self.base_iterator.iteration()
+    }
+
+    #[inline]
+    fn state(&self) -> Self::State {
+        self.base_iterator.state()
+    }
+}
+
+//
+// Timing iterator
+//
+
+/// An [`AlgIteratorFactory`] for an [`AlgIterator`] that adds CPU time spent to verbose events.
+#[derive(Debug)]
+pub struct TimingIteratorFactory<BaseFactory>(pub BaseFactory);
+
+/// Iterator produced by [`TimingIteratorFactory`]
+#[derive(Debug)]
+pub struct TimingIterator<BaseIterator> {
+    base_iterator : BaseIterator,
+    start_time : ProcessTime,
+}
+
+/// Data `U` with production time attached
+#[derive(Copy, Clone, Debug, Serialize)]
+pub struct Timed<U> {
+    pub cpu_time : Duration,
+    //#[serde(flatten)]
+    pub data : U
+}
+
+impl<T> LogRepr for Timed<T> where T : LogRepr {
+    fn logrepr(&self) -> ColoredString {
+        format!("[{:.3}s] {}", self.cpu_time.as_secs_f64(), self.data.logrepr()).as_str().into()
+    }
+}
+
+
+impl<V, BaseFactory> AlgIteratorFactory<V>
+for TimingIteratorFactory<BaseFactory>
+where BaseFactory : AlgIteratorFactory<V> {
+    type State = BaseFactory::State;
+    type Iter<F> = TimingIterator<BaseFactory::Iter<F>>
+        where F : FnMut(&Self::State) -> Step<V>;
+    type Output = Timed<BaseFactory::Output>;
+
+    fn prepare<F>(self, step_fn : F) -> Self::Iter<F>
+    where F : FnMut(&Self::State) -> Step<V> {
+        TimingIterator {
+            base_iterator : self.0.prepare(step_fn),
+            start_time : ProcessTime::now()
+        }
+    }
+
+    #[inline]
+    fn is_quiet(&self) -> bool {
+        self.0.is_quiet()
+    }
+}
+
+impl<V, BaseIterator> AlgIterator<V>
+for TimingIterator<BaseIterator>
+where BaseIterator : AlgIterator<V> {
+    type State = BaseIterator::State;
+    type Output = Timed<BaseIterator::Output>;
+
+    #[inline]
+    fn step(&mut self) -> Step<Self::Output> {
+        match self.base_iterator.step() {
+            Step::Result(data) => {
+                Step::Result(Timed{
+                    cpu_time : self.start_time.elapsed(),
+                    data
+                })
+            },
+            Step::Quiet => Step::Quiet,
+            Step::Terminated => Step::Terminated,
+        }
+    }
+
+    #[inline]
+    fn iteration(&self) -> usize {
+        self.base_iterator.iteration()
+    }
+
+    #[inline]
+    fn state(&self) -> Self::State {
+        self.base_iterator.state()
+    }
+}
+
+//
+// Tests
+//
+
+#[cfg(test)]
+mod tests {
+    use super::*;
+    use crate::logger::Logger;
+    #[test]
+    fn iteration() {
+        let options = AlgIteratorOptions{
+            max_iter : 10,
+            verbose_iter : Verbose::Every(3),
+            .. Default::default()
+        };
+
+        {
+            let mut start = 1 as int;
+            options.iterate(|state| {
+                start = start * 2;
+                state.if_verbose(|| start)
+            });
+            assert_eq!(start, (2 as int).pow(10));
+        }
+
+        {
+            let mut start = 1 as int;
+            let mut log = Logger::new();
+            let factory = options.instantiate()
+                                 .with_iteration_number()
+                                 .into_log(&mut log);
+            factory.iterate(|state| {
+                start = start * 2;
+                state.if_verbose(|| start)
+            });
+            assert_eq!(start, (2 as int).pow(10));
+            assert_eq!(log.data()
+                          .iter()
+                          .map(|LogItem{ data : v, iter : _ }| v.clone())
+                          .collect::<Vec<int>>(),
+                      (0..10).step_by(3)
+                             .map(|i| (2 as int).pow(i+1))
+                             .collect::<Vec<int>>())
+        }
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/lib.rs	Sat Oct 22 13:47:15 2022 +0300
@@ -0,0 +1,49 @@
+// The main documentation is in the README.
+#![doc = include_str!("../README.md")]
+
+// We use unicode. We would like to use much more of it than Rust allows.
+// Live with it. Embrace it.
+#![allow(uncommon_codepoints)]
+#![allow(mixed_script_confusables)]
+#![allow(confusable_idents)]
+
+// We need BinaryHeap::retain from nightly
+#![feature(binary_heap_retain)]
+
+#![feature(maybe_uninit_uninit_array,maybe_uninit_array_assume_init,maybe_uninit_slice)]
+#![feature(try_trait_v2_residual,try_trait_v2)]
+
+#![feature(array_methods)]
+
+#![feature(arc_unwrap_or_clone)]
+
+#![feature(float_minimum_maximum)]
+
+// They don't work:
+//#![feature(negative_impls)]
+//#![feature(specialization)]
+
+pub mod types;
+pub mod nanleast;
+pub mod error;
+pub mod maputil;
+pub mod tuple;
+pub mod norms;
+#[macro_use]
+pub mod loc;
+pub mod iter;
+pub mod linops;
+pub mod iterate;
+pub mod tabledump;
+pub mod logger;
+pub mod linsolve;
+pub mod lingrid;
+pub mod sets;
+pub mod mapping;
+pub mod coefficients;
+pub mod fe_model;
+pub mod bisection_tree;
+pub mod nalgebra_support;
+
+
+pub use types::*;
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/lingrid.rs	Sat Oct 22 13:47:15 2022 +0300
@@ -0,0 +1,203 @@
+/// Production of equally spaced nodes within intervals.
+
+use crate::types::*;
+use crate::loc::Loc;
+use crate::sets::Cube;
+use crate::iter::{RestartableIterator, StatefulIterator};
+use crate::maputil::{map2, map4};
+use serde::Serialize;
+
+// TODO: rewrite this using crate::sets::Cube.
+
+/// The interval `[start, end]` divided into `count` nodes.
+/// Implementes `IntoIterator` to iterate over the nodes of the `LinSpace`.
+/// It is similar to [`num::range_step_inclusive`], but [restartable][RestartableIterator] and
+/// parametrised by the number of nodes instead of a step. This way it can be ensured that the
+/// last item produced is equal to `end`. To use other types than `float` and `f32`, create
+/// the `LinSpace` struct directly.
+#[derive(Clone,Copy,Debug,Serialize,Eq,PartialEq)]
+pub struct LinSpace<F, I> {
+    pub start : F,
+    pub end : F,
+    pub count : I,
+}
+
+#[allow(type_alias_bounds)] // Need it to access F::CompatibleSize.
+pub type LinGrid<F : Float, const N : usize> = LinSpace<Loc<F, N>, [usize; N]>;
+
+/// Creates a [`LinSpace`] on the real line.
+pub fn linspace<F : Float>(start : F, end : F, count : usize) -> LinSpace<F, usize> {
+    LinSpace{ start : start, end : end, count : count }
+}
+
+/// Create a multi-dimensional linear grid.
+///
+/// The first and last point in each dimension are the boundaries of the corresponding
+/// dimensions of `cube`, and there are `count` nodes along each dimension.
+pub fn lingrid<F : Float, const N : usize>(
+    cube : &Cube<F, N>,
+    count : &[usize; N]
+) -> LinSpace<Loc<F, N>, [usize; N]> {
+    LinSpace{ start : cube.span_start(), end : cube.span_end(), count : *count }
+}
+
+/// Create a multi-dimensional linear grid with centered nodes.
+///
+/// There are `count` along each dimension and each node has equally-sized subcube surrounding it
+/// inside `cube`. Thus, if $w_i$ is the width of the cube along dimension $i$, and $n_i$ the number
+/// of nodes, the width of the subcube along this dimension is $h_i = w_i/(n_i+1)$, and the first
+/// and last nodes are at a distance $h_i/2$ from the closest boundary.
+pub fn lingrid_centered<F : Float, const N : usize>(
+    cube : &Cube<F, N>,
+    count : &[usize; N]
+) -> LinSpace<Loc<F, N>, [usize; N]> {
+    let h_div_2 = map2(cube.width(), count, |w, &n| w / F::cast_from(2 * (n + 1)));
+    let span_start = map2(cube.span_start(), &h_div_2, |a, &t| a + t).into();
+    let span_end   = map2(cube.span_end(),   &h_div_2, |b, &t| b - t).into();
+    LinSpace{ start : span_start, end : span_end, count : *count }
+}
+
+
+/// Iterator over a `LinSpace`.
+#[derive(Clone, Debug)]
+pub struct LinSpaceIterator<F, I> {
+    lingrid : LinSpace<F,I>,
+    current : Option<I>,
+}
+
+pub trait Grid<F, I> {
+    fn entry_linear_unchecked(&self, i : usize) -> F;
+    //fn entry(&self, i : I) -> Option<F>;
+    fn entry_unchecked(&self, i : &I) -> F;
+}
+
+pub trait GridIteration<F, I> {
+    fn next_index(&mut self) -> Option<I>;
+}
+
+impl<F : Float + CastFrom<I>, I : Unsigned> Grid<F, I> for LinSpace<F, I> {
+    /*fn entry(&self, i : I) -> Option<F> {
+         if i < self.count {
+            Some(self.entry_unchecked(i))
+         } else {
+            None
+         }
+    }*/
+
+    #[inline]
+    fn entry_linear_unchecked(&self, i : usize) -> F {
+        self.entry_unchecked(&I::cast_from(i))
+    }
+
+    #[inline]
+    fn entry_unchecked(&self, i : &I) -> F {
+        let idx = F::cast_from(*i);
+        let scale = F::cast_from(self.count-I::ONE);
+        self.start + (self.end-self.start)*idx/scale
+    }
+}
+
+impl<F : Float + CastFrom<I>, I : Unsigned> GridIteration<F, I>
+for LinSpaceIterator<F, I> {
+    #[inline]
+    fn next_index(&mut self) -> Option<I> {
+        match self.current {
+            None if I::ZERO < self.lingrid.count     => { self.current = Some(I::ZERO); self.current }
+            Some(v) if v+I::ONE < self.lingrid.count => { self.current = Some(v+I::ONE); self.current }
+            _                                        => { None }
+        }
+    }
+}
+
+impl<F : Float + CastFrom<I>, I : Unsigned, const N : usize> Grid<Loc<F,N>, [I; N]>
+for LinSpace<Loc<F,N>, [I; N]> {
+    #[inline]
+    fn entry_linear_unchecked(&self, i_ : usize) -> Loc<F, N> {
+        let mut i = I::cast_from(i_);
+        let mut tmp = [I::ZERO; N];
+        for k in 0..N {
+            tmp[k] = i % self.count[k];
+            i /= self.count[k];
+        }
+        self.entry_unchecked(&tmp)
+    }
+
+    #[inline]
+    fn entry_unchecked(&self, i : &[I; N]) -> Loc<F, N> {
+        let LinSpace{ start, end, count } = self;
+        map4(i, start, end, count, |&ik, &sk, &ek, &ck| {
+            let idx = F::cast_from(ik);
+            let scale = F::cast_from(ck-I::ONE);
+            sk + (ek - sk) * idx / scale
+        }).into()
+    }
+}
+
+impl<F : Float + CastFrom<I>, I : Unsigned, const N : usize> GridIteration<Loc<F,N>, [I; N]>
+for LinSpaceIterator<Loc<F,N>, [I; N]> {
+
+    #[inline]
+    fn next_index(&mut self) -> Option<[I; N]> {
+        match self.current {
+            None if self.lingrid.count.iter().all(|v| I::ZERO < *v)  => {
+                self.current = Some([I::ZERO; N]);
+                self.current
+            },
+            Some(ref mut v) => {
+                for k in 0..N {
+                    let a = v[k] + I::ONE;
+                    if a < self.lingrid.count[k] {
+                        v[k] = a;
+                        return self.current
+                    } else {
+                        v[k] = I::ZERO;
+                    }
+                }
+                None
+            },
+            _ => None
+        }
+    }
+}
+
+impl<F, I> IntoIterator for LinSpace<F,I>
+where LinSpace<F, I> : Grid<F, I>,
+      LinSpaceIterator<F, I> : GridIteration<F, I> {
+    type Item = F;
+    type IntoIter = LinSpaceIterator<F,I>;
+
+    #[inline]
+    fn into_iter(self) -> Self::IntoIter {
+        LinSpaceIterator { lingrid : self, current : None }
+    }
+}
+
+impl<F, I> Iterator for LinSpaceIterator<F,I>
+where LinSpace<F, I> : Grid<F, I>,
+      LinSpaceIterator<F, I> : GridIteration<F, I> {
+    type Item = F;
+    #[inline]
+    fn next(&mut self) -> Option<F> {
+        self.next_index().map(|v| self.lingrid.entry_unchecked(&v))
+    }
+}
+
+impl<F, I> StatefulIterator for LinSpaceIterator<F,I>
+where LinSpace<F, I> : Grid<F, I>,
+      LinSpaceIterator<F, I> : GridIteration<F, I> {
+    #[inline]
+    fn current(&self) -> Option<F> {
+        self.current.as_ref().map(|c| self.lingrid.entry_unchecked(c))
+    }
+}
+
+
+impl<F, I> RestartableIterator for LinSpaceIterator<F,I>
+where LinSpace<F, I> : Grid<F, I>,
+      LinSpaceIterator<F, I> : GridIteration<F, I> {
+    #[inline]
+    fn restart(&mut self) -> Option<F> {
+        self.current = None;
+        self.next()
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/linops.rs	Sat Oct 22 13:47:15 2022 +0300
@@ -0,0 +1,139 @@
+/*!
+Abstract linear operators.
+*/
+
+use numeric_literals::replace_float_literals;
+use std::marker::PhantomData;
+use crate::types::*;
+use serde::Serialize;
+
+/// Trait for linear operators on `X`.
+pub trait Linear<X> {
+    /// The range space of the operator.
+    type Codomain;
+    /// Apply the linear operator to `x`.
+    fn apply(&self, x : &X) -> Self::Codomain;
+}
+
+/// Efficient in-place summation.
+#[replace_float_literals(F::cast_from(literal))]
+pub trait AXPY<F : Num, X = Self> {
+    /// Computes  `y = βy + αx`, where `y` is `Self`.
+    fn axpy(&mut self, α : F, x : &X, β : F);
+
+    /// Copies `x` to `self`.
+    fn copy_from(&mut self, x : &X) {
+        self.axpy(1.0, x, 0.0)
+    }
+
+     /// Computes  `y = αx`, where `y` is `Self`.
+    fn scale_from(&mut self, α : F, x : &X) {
+        self.axpy(α, x, 0.0)
+    }
+}
+
+/// Efficient in-place application for [`Linear`] operators.
+#[replace_float_literals(F::cast_from(literal))]
+pub trait GEMV<F : Num, X, Y = <Self as Linear<X>>::Codomain> : Linear<X> {
+    // Computes  `y = αAx + βy`, where `A` is `Self`.
+    fn gemv(&self, y : &mut Y, α : F, x : &X, β : F);
+
+    // Computes `y = Ax`, where `A` is `Self`
+    fn apply_mut(&self, y : &mut Y, x : &X){
+        self.gemv(y, 1.0, x, 0.0)
+    }
+
+    // Computes `y += Ax`, where `A` is `Self`
+    fn apply_add(&self, y : &mut Y, x : &X){
+        self.gemv(y, 1.0, x, 1.0)
+    }
+}
+
+
+/// Bounded linear operators
+pub trait BoundedLinear<X> : Linear<X> {
+    type FloatType : Float;
+    /// A bound on the operator norm $\|A\|$ for the linear operator $A$=`self`.
+    /// This is not expected to be the norm, just any bound on it that can be
+    /// reasonably implemented.
+    fn opnorm_bound(&self) -> Self::FloatType;
+}
+
+/// Linear operator application into mutable target. The [`AsRef`] bound
+/// is used to guarantee compatibility with `Yʹ` and `Self::Codomain`;
+/// the former is assumed to be e.g. a view into the latter.
+
+/*impl<X,Y,T> Fn(&X) -> Y for T where T : Linear<X,Codomain=Y> {
+    fn call(&self, x : &X) -> Y {
+        self.apply(x)
+    }
+}*/
+
+/// Trait for forming the adjoint operator of an operator $A$=`Self`.
+pub trait Adjointable<X,Yʹ> : Linear<X> {
+    type AdjointCodomain;
+    type Adjoint<'a> : Linear<Yʹ, Codomain=Self::AdjointCodomain> where Self : 'a;
+
+    /// Form the adjoint operator of `self`.
+    fn adjoint(&self) -> Self::Adjoint<'_>;
+
+    /*fn adjoint_apply(&self, y : &Yʹ) -> Self::AdjointCodomain {
+        self.adjoint().apply(y)
+    }*/
+}
+
+/// Trait for forming a preadjoint of an operator $A$, i.e., an operator $A_*$
+/// such that its adjoint $(A_*)^*=A$. The space `X` is the domain of the `Self`
+/// operator. The space `Ypre` is the predual of its codomain, and should be the
+/// domain of the adjointed operator. `Self::Preadjoint` should be
+/// [`Adjointable`]`<'a,Ypre,X>`.
+pub trait Preadjointable<X,Ypre> : Linear<X> {
+    type PreadjointCodomain;
+    type Preadjoint<'a> : Adjointable<Ypre, X, Codomain=Self::PreadjointCodomain> where Self : 'a;
+
+    /// Form the preadjoint operator of `self`.
+    fn preadjoint(&self) -> Self::Preadjoint<'_>;
+}
+
+/// Adjointable operators $A: X → Y$ on between reflexibe spaces $X$ and $Y$.
+pub trait SimplyAdjointable<X> : Adjointable<X,<Self as Linear<X>>::Codomain> {}
+impl<'a,X,T> SimplyAdjointable<X> for T where T : Adjointable<X,<Self as Linear<X>>::Codomain> {}
+
+/// The identity operator
+#[derive(Clone,Copy,Debug,Serialize,Eq,PartialEq)]
+pub struct IdOp<X : Clone> (PhantomData<X>);
+
+impl<X> IdOp<X> where X : Clone {
+    fn new() -> IdOp<X> { IdOp(PhantomData) }
+}
+
+impl<X> Linear<X> for IdOp<X> where X : Clone {
+    type Codomain = X;
+    fn apply(&self, x : &X) -> X {
+        x.clone()
+    }
+}
+
+#[replace_float_literals(F::cast_from(literal))]
+impl<F : Num, X, Y> GEMV<F, X, Y> for IdOp<X> where Y : AXPY<F, X>, X : Clone {
+    // Computes  `y = αAx + βy`, where `A` is `Self`.
+    fn gemv(&self, y : &mut Y, α : F, x : &X, β : F) {
+        y.axpy(α, x, β)
+    }
+
+    fn apply_mut(&self, y : &mut Y, x : &X){
+        y.copy_from(x);
+    }
+}
+
+impl<X> BoundedLinear<X> for IdOp<X> where X : Clone {
+    type FloatType = float;
+    fn opnorm_bound(&self) -> float { 1.0 }
+}
+
+impl<X> Adjointable<X,X> for IdOp<X> where X : Clone {
+    type AdjointCodomain=X;
+    type Adjoint<'a> = IdOp<X> where X : 'a;
+    fn adjoint(&self) -> Self::Adjoint<'_> { IdOp::new() }
+}
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/linsolve.rs	Sat Oct 22 13:47:15 2022 +0300
@@ -0,0 +1,90 @@
+/// Linear solvers for small problems.
+
+use crate::types::Float;
+use std::mem::MaybeUninit;
+
+/// Gaussian elimination for $AX=B$, where $A$ and $B$ are both stored in `ab`,
+/// $A \in \mathbb{R}^{M \times M}$ and $X, B \in \mathbb{R}^{M \times K}$.
+pub fn linsolve0<F : Float, const M : usize, const N : usize, const K : usize>(
+    mut ab : [[F; N]; M]
+) -> [[F; K]; M] {
+    assert_eq!(M + K , N);
+
+    let mut k = 0;
+
+    // Convert to row-echelon form
+    for h in 0..(M-1) {
+        // Find pivotable column (has some non-zero entries in rows ≥ h)
+        'find_pivot: while k < N {
+            let (mut î, mut v) = (h, ab[h][k].abs());
+            // Find row ≥ h of maximum absolute value in this column
+            for i in (h+1)..M {
+                let ṽ = ab[i][k].abs();
+                if ṽ > v  {
+                    î = i;
+                    v = ṽ;
+                }
+            }
+            if v > F::ZERO {
+                ab.swap(h, î);
+                for i in (h+1)..M {
+                    let f = ab[i][k] / ab[h][k];
+                    ab[i][k] = F::ZERO;
+                    for j in (k+1)..N {
+                        ab[i][j] -= ab[h][j]*f;
+                    }
+                }
+                k += 1;
+                break 'find_pivot;
+            }
+            k += 1
+        }
+    }
+
+    // Solve UAX=UB for X where UA with U presenting the transformations above an
+    // upper triangular matrix.
+    // This use of MaybeUninit assumes F : Copy. Otherwise undefined behaviour may occur.
+    let mut x : [[MaybeUninit<F>; K]; M] = core::array::from_fn(|_| MaybeUninit::uninit_array::<K>() );
+    //unsafe { std::mem::MaybeUninit::uninit().assume_init() };
+    for i in (0..M).rev() {
+        for 𝓁 in 0..K {
+            let mut tmp  = ab[i][M+𝓁];
+            for j in (i+1)..M {
+                tmp -= ab[i][j] * unsafe { *(x[j][𝓁].assume_init_ref()) };
+            }
+            tmp /= ab[i][i];
+            x[i][𝓁].write(tmp);
+        }
+    }
+    //unsafe { MaybeUninit::array_assume_init(x) };
+    let xinit = unsafe {
+        //core::intrinsics::assert_inhabited::<[[F; K]; M]>();
+        (&x as *const _ as *const [[F; K]; M]).read()
+    };
+
+    std::mem::forget(x);
+    xinit
+}
+
+/// Gaussian elimination for $Ax=b$, where $A$ and $b$ are both stored in `ab`,
+/// $A \in \mathbb{R}^{M \times M}$ and $x, b \in \mathbb{R}^M$.
+#[inline]
+pub fn linsolve<F : Float, const M : usize, const N : usize>(ab : [[F; N]; M]) -> [F; M] {
+    let x : [[F; 1]; M] = linsolve0(ab);
+    unsafe { *((&x as *const [F; 1]) as *const [F; M] ) }
+}
+
+
+#[cfg(test)]
+mod tests {
+    use super::*;
+
+    #[test]
+    fn linsolve_test() {
+        let ab1 = [[1.0, 2.0, 3.0], [2.0, 1.0, 6.0]];
+        assert_eq!(linsolve(ab1), [3.0, 0.0]);
+        let ab2 = [[1.0, 2.0, 0.0, 1.0], [4.0, 0.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0]];
+        assert_eq!(linsolve(ab2), [0.0, 0.5, 0.0]);
+    }
+}
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/loc.rs	Sat Oct 22 13:47:15 2022 +0300
@@ -0,0 +1,530 @@
+/*!
+Array containers with vectorspace operations on floats.
+For working with vectors in $ℝ^2$ or $ℝ^3$.
+*/
+
+use std::ops::{Add,Sub,AddAssign,SubAssign,Mul,Div,MulAssign,DivAssign,Neg,Index,IndexMut};
+use std::slice::{Iter,IterMut};
+use crate::types::{Float,Num,SignedNum};
+use crate::maputil::{FixedLength,FixedLengthMut,map1,map2,map1_mut,map2_mut};
+use crate::norms::*;
+use crate::linops::AXPY;
+use serde::ser::{Serialize, Serializer, SerializeSeq};
+
+#[derive(Copy,Clone,Debug,PartialEq,Eq)]
+pub struct Loc<F, const N : usize>(pub [F; N]);
+
+// Need to manually implement as [F; N] serialisation is provided only for some N.
+impl<F, const N : usize> Serialize for Loc<F, N>
+where
+    F: Serialize,
+{
+    fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
+    where
+        S: Serializer,
+    {
+        let mut seq = serializer.serialize_seq(Some(N))?;
+        for e in self.iter() {
+            seq.serialize_element(e)?;
+        }
+        seq.end()
+    }
+}
+
+impl<F, const N : usize> Loc<F, N> {
+    #[inline]
+    pub fn new(arr : [F; N]) -> Self {
+        Loc(arr)
+    }
+    
+    #[inline]
+    pub fn iter(&self) -> Iter<'_, F> {
+        self.0.iter()
+    }
+
+    #[inline]
+    pub fn iter_mut(&mut self) -> IterMut<'_, F> {
+        self.0.iter_mut()
+    }
+}
+
+impl<F : Copy, const N : usize> Loc<F, N> {
+    #[inline]
+    pub fn map<H, G : Fn(F) -> H>(&self, g : G) -> Loc<H, N> {
+        Loc::new(map1(self, |u| g(*u)))
+    }
+
+    #[inline]
+    pub fn map2<H, G : Fn(F, F) -> H>(&self, other : &Self, g : G) -> Loc<H, N> {
+        Loc::new(map2(self, other, |u, v| g(*u, *v)))
+    }
+
+    #[inline]
+    pub fn map_mut<G : Fn(&mut F)>(&mut self, g : G) {
+        map1_mut(self, g)
+    }
+
+    #[inline]
+    pub fn map2_mut<G : Fn(&mut F, F)>(&mut self, other : &Self, g : G) {
+        map2_mut(self, other, |u, v| g(u, *v))
+    }
+
+    #[inline]
+    pub fn product_map<G : Fn(F) -> A, A : Num>(&self, f : G) -> A {
+        match N {
+            1 => f(unsafe { *self.0.get_unchecked(0) }),
+            2 => f(unsafe { *self.0.get_unchecked(0) }) *
+                 f(unsafe { *self.0.get_unchecked(1) }),
+            3 => f(unsafe { *self.0.get_unchecked(0) }) *
+                 f(unsafe { *self.0.get_unchecked(1) }) *
+                 f(unsafe { *self.0.get_unchecked(2) }),
+            _ => self.iter().fold(A::ONE, |m, &x| m*f(x))
+        }
+    }
+}
+
+#[macro_export]
+macro_rules! loc {
+    ($($x:expr),+ $(,)?) => { Loc::new([$($x),+]) }
+}
+
+// Conversions
+
+impl<F, const N : usize> From<[F; N]> for Loc<F, N> {
+    #[inline]
+    fn from(other: [F; N]) -> Loc<F, N> {
+        Loc(other)
+    }
+}
+
+/*impl<F : Copy, const N : usize> From<&[F; N]> for Loc<F, N> {
+    #[inline]
+    fn from(other: &[F; N]) -> Loc<F, N> {
+        Loc(*other)
+    }
+}*/
+
+impl<F> From<F> for Loc<F, 1> {
+    #[inline]
+    fn from(other: F) -> Loc<F, 1> {
+        Loc([other])
+    }
+}
+
+impl<F, const N : usize> From<Loc<F, N>> for [F; N] {
+    #[inline]
+    fn from(other : Loc<F, N>) -> [F; N] {
+        other.0
+    }
+}
+
+/*impl<F : Copy, const N : usize> From<&Loc<F, N>> for [F; N] {
+    #[inline]
+    fn from(other : &Loc<F, N>) -> [F; N] {
+        other.0
+    }
+}*/
+
+
+impl<F, const N : usize> IntoIterator for Loc<F, N> {
+    type Item = <[F; N] as IntoIterator>::Item;
+    type IntoIter = <[F; N] as IntoIterator>::IntoIter;
+
+    #[inline]
+    fn into_iter(self) -> Self::IntoIter {
+        self.0.into_iter()
+    }
+}
+
+// Indexing
+
+impl<F, Ix, const N : usize> Index<Ix> for Loc<F,N>
+where [F; N] : Index<Ix> {
+    type Output = <[F; N] as Index<Ix>>::Output;
+
+    #[inline]
+    fn index(&self, ix : Ix) -> &Self::Output {
+        self.0.index(ix)
+    }
+}
+
+impl<F, Ix, const N : usize> IndexMut<Ix> for Loc<F,N>
+where [F; N] : IndexMut<Ix> {
+    #[inline]
+    fn index_mut(&mut self, ix : Ix) -> &mut Self::Output {
+        self.0.index_mut(ix)
+    }
+}
+
+// Arithmetic
+
+macro_rules! make_binop {
+    ($trait:ident, $fn:ident, $trait_assign:ident, $fn_assign:ident) => {
+        impl<F : Num, const N : usize> $trait<Loc<F,N>> for Loc<F, N> {
+            type Output = Loc<F, N>;
+            #[inline]
+            fn $fn(mut self, other : Loc<F, N>) -> Self::Output {
+                self.$fn_assign(other);
+                self
+            }
+        }
+
+        impl<'a, F : Num, const N : usize> $trait<&'a Loc<F,N>> for Loc<F, N> {
+            type Output = Loc<F, N>;
+            #[inline]
+            fn $fn(mut self, other : &'a Loc<F, N>) -> Self::Output {
+                self.$fn_assign(other);
+                self
+            }
+        }
+
+        impl<'b, F : Num, const N : usize> $trait<Loc<F,N>> for &'b Loc<F, N> {
+            type Output = Loc<F, N>;
+            #[inline]
+            fn $fn(self, other : Loc<F, N>) -> Self::Output {
+                self.map2(&other, |a, b| a.$fn(b))
+            }
+        }
+
+        impl<'a, 'b, F : Num, const N : usize> $trait<&'a Loc<F,N>> for &'b Loc<F, N> {
+            type Output = Loc<F, N>;
+            #[inline]
+            fn $fn(self, other : &'a Loc<F, N>) -> Self::Output {
+                self.map2(other, |a, b| a.$fn(b))
+            }
+        }
+
+       impl<F : Num, const N : usize> $trait_assign<Loc<F,N>> for Loc<F, N> {
+            #[inline]
+            fn $fn_assign(&mut self, other : Loc<F, N>) {
+                self.map2_mut(&other, |a, b| a.$fn_assign(b))
+            }
+        }
+
+        impl<'a, F : Num, const N : usize> $trait_assign<&'a Loc<F,N>> for Loc<F, N> {
+            #[inline]
+            fn $fn_assign(&mut self, other : &'a Loc<F, N>) {
+                self.map2_mut(other, |a, b| a.$fn_assign(b))
+            }
+        }
+    }
+}
+
+make_binop!(Add, add, AddAssign, add_assign);
+make_binop!(Sub, sub, SubAssign, sub_assign);
+
+macro_rules! make_scalarop_rhs {
+    ($trait:ident, $fn:ident, $trait_assign:ident, $fn_assign:ident) => {
+        impl<F : Num, const N : usize> $trait<F> for Loc<F, N> {
+            type Output = Loc<F, N>;
+            #[inline]
+            fn $fn(self, b : F) -> Self::Output {
+                self.map(|a| a.$fn(b))
+            }
+        }
+
+        impl<'a, F : Num, const N : usize> $trait<&'a F> for Loc<F, N> {
+            type Output = Loc<F, N>;
+            #[inline]
+            fn $fn(self, b : &'a F) -> Self::Output {
+                self.map(|a| a.$fn(*b))
+            }
+        }
+
+        impl<'b, F : Num, const N : usize> $trait<F> for &'b Loc<F, N> {
+            type Output = Loc<F, N>;
+            #[inline]
+            fn $fn(self, b : F) -> Self::Output {
+                self.map(|a| a.$fn(b))
+            }
+        }
+
+        impl<'a, 'b, F : Float, const N : usize> $trait<&'a F> for &'b Loc<F, N> {
+            type Output = Loc<F, N>;
+            #[inline]
+            fn $fn(self, b : &'a F) -> Self::Output {
+                self.map(|a| a.$fn(*b))
+            }
+        }
+
+        impl<F : Num, const N : usize> $trait_assign<F> for Loc<F, N> {
+            #[inline]
+            fn $fn_assign(&mut self, b : F) {
+                self.map_mut(|a| a.$fn_assign(b));
+            }
+        }
+
+        impl<'a, F : Num, const N : usize> $trait_assign<&'a F> for Loc<F, N> {
+            #[inline]
+            fn $fn_assign(&mut self, b : &'a F) {
+                self.map_mut(|a| a.$fn_assign(*b));
+            }
+        }
+    }
+}
+
+
+make_scalarop_rhs!(Mul, mul, MulAssign, mul_assign);
+make_scalarop_rhs!(Div, div, DivAssign, div_assign);
+
+macro_rules! make_unaryop {
+    ($trait:ident, $fn:ident) => {
+        impl<F : SignedNum, const N : usize> $trait for Loc<F, N> {
+            type Output = Loc<F, N>;
+            #[inline]
+            fn $fn(mut self) -> Self::Output {
+                self.map_mut(|a| *a = (*a).$fn());
+                self
+            }
+        }
+
+        impl<'a, F : SignedNum, const N : usize> $trait for &'a Loc<F, N> {
+            type Output = Loc<F, N>;
+            #[inline]
+            fn $fn(self) -> Self::Output {
+                self.map(|a| a.$fn())
+            }
+        }
+    }
+}
+
+make_unaryop!(Neg, neg);
+
+macro_rules! make_scalarop_lhs {
+    ($trait:ident, $fn:ident; $($f:ident)+) => { $(
+        impl<const N : usize> $trait<Loc<$f,N>> for $f {
+            type Output = Loc<$f, N>;
+            #[inline]
+            fn $fn(self, v : Loc<$f,N>) -> Self::Output {
+                v.map(|b| self.$fn(b))
+            }
+        }
+
+        impl<'a, const N : usize> $trait<&'a Loc<$f,N>> for $f {
+            type Output = Loc<$f, N>;
+            #[inline]
+            fn $fn(self, v : &'a Loc<$f,N>) -> Self::Output {
+                v.map(|b| self.$fn(b))
+            }
+        }
+
+        impl<'b, const N : usize> $trait<Loc<$f,N>> for &'b $f {
+            type Output = Loc<$f, N>;
+            #[inline]
+            fn $fn(self, v : Loc<$f,N>) -> Self::Output {
+                v.map(|b| self.$fn(b))
+            }
+        }
+
+        impl<'a, 'b, const N : usize> $trait<&'a Loc<$f,N>> for &'b $f {
+            type Output = Loc<$f, N>;
+            #[inline]
+            fn $fn(self, v : &'a Loc<$f, N>) -> Self::Output {
+                v.map(|b| self.$fn(b))
+            }
+        }
+    )+ }
+}
+
+make_scalarop_lhs!(Mul, mul; f32 f64 i8 i16 i32 i64 isize u8 u16 u32 u64 usize);
+make_scalarop_lhs!(Div, div; f32 f64 i8 i16 i32 i64 isize u8 u16 u32 u64 usize);
+
+// Norms
+
+macro_rules! domination {
+    ($norm:ident, $dominates:ident) => {
+        impl<F : Float, const N : usize> Dominated<F, $dominates, Loc<F, N>> for $norm {
+            #[inline]
+            fn norm_factor(&self, _p : $dominates) -> F {
+                F::ONE
+            }
+            #[inline]
+            fn from_norm(&self, p_norm : F, _p : $dominates) -> F {
+                p_norm
+            }
+        }
+    };
+    ($norm:ident, $dominates:ident, $fn:path) => {
+        impl<F : Float, const N : usize> Dominated<F, $dominates, Loc<F, N>> for $norm {
+            #[inline]
+            fn norm_factor(&self, _p : $dominates) -> F {
+                $fn(F::cast_from(N))
+            }
+        }
+    };
+}
+
+domination!(L1, L1);
+domination!(L2, L2);
+domination!(Linfinity, Linfinity);
+
+domination!(L1, L2, F::sqrt);
+domination!(L2, Linfinity, F::sqrt);
+domination!(L1, Linfinity, std::convert::identity);
+
+domination!(Linfinity, L1);
+domination!(Linfinity, L2);
+domination!(L2, L1);
+
+impl<F : Num,const N : usize> Dot<Loc<F, N>,F> for Loc<F, N> {
+    /// This implementation is not stabilised as it's meant to be used for very small vectors.
+    /// Use [`nalgebra`] for larger vectors.
+    #[inline]
+    fn dot(&self, other : &Loc<F, N>) -> F {
+        self.0.iter()
+              .zip(other.0.iter())
+              .fold(F::ZERO, |m, (&v, &w)| m + v * w)
+    }
+}
+
+impl<F : Float,const N : usize> Euclidean<F> for Loc<F, N> {
+    type Output = Self;
+
+    #[inline]
+    fn similar_origin(&self) -> Self {
+        Self::ORIGIN
+    }
+
+    /// This implementation is not stabilised as it's meant to be used for very small vectors.
+    /// Use [`nalgebra`] for larger vectors.
+    #[inline]
+    fn norm2_squared(&self) -> F {
+        self.iter().fold(F::ZERO, |m, &v| m + v * v)
+    }
+
+    fn dist2_squared(&self, other : &Self) -> F {
+        self.iter()
+            .zip(other.iter())
+            .fold(F::ZERO, |m, (&v, &w)| { let d = v - w; m + d * d })
+    }
+
+    #[inline]
+    fn norm2(&self) -> F {
+        // Optimisation for N==1 that avoids squaring and square rooting.
+        if N==1 {
+            unsafe { self.0.get_unchecked(0) }.abs()
+        } else {
+            self.norm2_squared().sqrt()
+        }
+    }
+
+    #[inline]
+    fn dist2(&self, other : &Self) -> F {
+        // Optimisation for N==1 that avoids squaring and square rooting.
+        if N==1 {
+            unsafe { *self.0.get_unchecked(0) - *other.0.get_unchecked(0) }.abs()
+        } else {
+            self.dist2_squared(other).sqrt()
+        }
+    }
+}
+
+impl<F : Num, const N : usize> Loc<F, N> {
+    pub const ORIGIN : Self = Loc([F::ZERO; N]);
+}
+
+impl<F : Float,const N : usize> StaticEuclidean<F> for Loc<F, N> {
+    #[inline]
+    fn origin() -> Self {
+        Self::ORIGIN
+    }
+}
+
+impl<F : Float, const N : usize> Norm<F, L2> for Loc<F, N> {
+    #[inline]
+    fn norm(&self, _ : L2) -> F { self.norm2() }
+}
+
+impl<F : Float, const N : usize> Dist<F, L2> for Loc<F, N> {
+    #[inline]
+    fn dist(&self, other : &Self, _ : L2) -> F { self.dist2(other) }
+}
+
+impl<F : Float, const N : usize> Norm<F, L1> for Loc<F, N> {
+    /// This implementation is not stabilised as it's meant to be used for very small vectors.
+    /// Use [`nalgebra`] for larger vectors.
+    #[inline]
+    fn norm(&self, _ : L1) -> F {
+        self.iter().fold(F::ZERO, |m, v| m + v.abs())
+    }
+}
+
+impl<F : Float, const N : usize> Dist<F, L1> for Loc<F, N> {
+    #[inline]
+    fn dist(&self, other : &Self, _ : L1) -> F {
+        self.iter()
+            .zip(other.iter())
+            .fold(F::ZERO, |m, (&v, &w)| m + (v-w).abs() )
+    }
+}
+
+impl<F : Float, const N : usize> Projection<F, Linfinity> for Loc<F, N> {
+    #[inline]
+    fn proj_ball_mut(&mut self, ρ : F, _ : Linfinity) {
+        self.iter_mut().for_each(|v| *v = num_traits::clamp(*v, -ρ, ρ))
+    }
+}
+
+impl<F : Float, const N : usize> Norm<F, Linfinity> for Loc<F, N> {
+    /// This implementation is not stabilised as it's meant to be used for very small vectors.
+    /// Use [`nalgebra`] for larger vectors.
+    #[inline]
+    fn norm(&self, _ : Linfinity) -> F {
+        self.iter().fold(F::ZERO, |m, v| m.max(v.abs()))
+    }
+}
+
+impl<F : Float, const N : usize> Dist<F, Linfinity> for Loc<F, N> {
+    #[inline]
+    fn dist(&self, other : &Self, _ : Linfinity) -> F {
+        self.iter()
+            .zip(other.iter())
+            .fold(F::ZERO, |m, (&v, &w)| m.max((v-w).abs()))
+    }
+}
+
+
+// Misc.
+
+impl<A, const N : usize> FixedLength<N> for Loc<A,N> {
+    type Iter = std::array::IntoIter<A, N>;
+    type Elem = A;
+    #[inline]
+    fn fl_iter(self) -> Self::Iter {
+        self.into_iter()
+    }
+}
+
+impl<A, const N : usize> FixedLengthMut<N> for Loc<A,N> {
+    type IterMut<'a> = std::slice::IterMut<'a, A> where A : 'a;
+    #[inline]
+    fn fl_iter_mut(&mut self) -> Self::IterMut<'_> {
+        self.iter_mut()
+    }
+}
+
+impl<'a, A, const N : usize> FixedLength<N> for &'a Loc<A,N> {
+    type Iter = std::slice::Iter<'a, A>;
+    type Elem = &'a A;
+    #[inline]
+    fn fl_iter(self) -> Self::Iter {
+        self.iter()
+    }
+}
+
+impl<F : Num, const N : usize> AXPY<F, Loc<F, N>> for Loc<F, N> {
+
+    #[inline]
+    fn axpy(&mut self, α : F, x : &Loc<F, N>, β : F) {
+        if β == F::ZERO {
+            map2_mut(self, x, |yi, xi| { *yi = α * (*xi) })
+        } else {
+            map2_mut(self, x, |yi, xi| { *yi = β * (*yi) + α * (*xi) })
+        }
+    }
+
+    #[inline]
+    fn copy_from(&mut self, x : &Loc<F, N>) {
+        map2_mut(self, x, |yi, xi| *yi = *xi )
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/logger.rs	Sat Oct 22 13:47:15 2022 +0300
@@ -0,0 +1,39 @@
+/*!
+Logging routines for intermediate computational results.
+*/
+
+use serde::Serialize;
+use crate::tabledump::TableDump;
+
+/// A log of items of type `T` along with log configuration.
+/// The constructor takes no arguments;  create the log with `Log{T}()` for `T` your
+/// data type, e.g., `Float64`.
+#[derive(Debug, Clone)]
+pub struct Logger<V> {
+    data : Vec<V>,
+}
+
+impl<V> Logger<V> {
+    /// Create new log
+    pub fn new() -> Logger<V> {
+        Logger{ data : Vec::new() }
+    }
+
+    /// Store the value `v` in the log at index `i`.
+    pub fn log(&mut self,  v : V) -> () {
+        self.data.push(v);
+    }
+
+    /// Get logged data.
+    pub fn data(&self) -> &Vec<V> {
+        &self.data
+    }
+}
+
+impl<'a, V : Serialize + 'a> TableDump<'a> for Logger<V> {
+    type Iter = std::slice::Iter<'a, V>;
+
+    fn tabledump_entries(&'a self) -> Self::Iter {
+        self.data.iter()
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/mapping.rs	Sat Oct 22 13:47:15 2022 +0300
@@ -0,0 +1,64 @@
+/// Traits for (mathematical) functions.
+
+use std::marker::PhantomData;
+use crate::types::{Float};
+use serde::Serialize;
+use crate::loc::Loc;
+
+pub trait Mapping<Domain> {
+    type Codomain;
+
+    fn value(&self, x : Domain) -> Self::Codomain;
+}
+
+pub trait RealRefMapping<F : Float, const N : usize>
+: for<'a> Mapping<&'a Loc<F, N>, Codomain=F> {}
+
+impl<F : Float, T, const N : usize> RealRefMapping<F, N> for T
+where T : for<'a> Mapping<&'a Loc<F, N>, Codomain=F> {}
+
+pub trait DifferentiableMapping<Domain> : Mapping<Domain> {
+    type Differential;
+
+    fn differential(&self, x : Domain) -> Self::Differential;
+}
+
+pub trait RealMapping<Domain> : Mapping<Domain> where Self::Codomain : Float {
+    fn minimise(&self, tolerance : Self::Codomain) -> (Domain, Self::Codomain);
+    fn maximise(&self, tolerance : Self::Codomain) -> (Domain, Self::Codomain);
+}
+
+//
+// Sum
+//
+
+#[derive(Serialize, Debug, Clone)]
+pub struct Sum<Domain, M : Mapping<Domain>> {
+    components : Vec<M>,
+    _domain : PhantomData<Domain>,
+}
+
+impl<Domain, M> Mapping<Domain> for Sum<Domain,M>
+where M : Mapping<Domain>,
+      M :: Codomain : std::iter::Sum,
+      Domain : Copy {
+
+    type Codomain = M::Codomain;
+
+    fn value(&self, x : Domain) -> Self::Codomain {
+        self.components.iter().map(|c| c.value(x)).sum()
+    }
+}
+
+impl<Domain, M> DifferentiableMapping<Domain> for Sum<Domain,M>
+where M : DifferentiableMapping<Domain>,
+      M :: Codomain : std::iter::Sum,
+      M :: Differential : std::iter::Sum,
+      Domain : Copy {
+
+    type Differential = M::Differential;
+
+    fn differential(&self, x : Domain) -> Self::Differential {
+        self.components.iter().map(|c| c.differential(x)).sum()
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/maputil.rs	Sat Oct 22 13:47:15 2022 +0300
@@ -0,0 +1,369 @@
+///
+/// Mapping utilities
+///
+
+use std::mem::MaybeUninit;
+use itertools::izip;
+
+pub trait FixedLength<const N : usize> {
+    type Elem;
+    type Iter : Iterator<Item=Self::Elem>;
+    fn fl_iter(self) -> Self::Iter;
+}
+
+pub trait FixedLengthMut<const N : usize> : FixedLength<N> {
+    type IterMut<'a> : Iterator<Item=&'a mut Self::Elem> where Self : 'a;
+    fn fl_iter_mut(&mut self) -> Self::IterMut<'_>;
+}
+
+impl<A, const N : usize> FixedLength<N> for [A; N] {
+    type Elem = A;
+    type Iter = std::array::IntoIter<A, N>;
+    #[inline]
+    fn fl_iter(self) -> Self::Iter {
+        self.into_iter()
+    }
+}
+
+impl<A, const N : usize> FixedLengthMut<N> for [A; N] {
+    type IterMut<'a> = std::slice::IterMut<'a, A> where A : 'a;
+    #[inline]
+    fn fl_iter_mut(&mut self) -> Self::IterMut<'_> {
+        self.iter_mut()
+    }
+}
+
+impl<'a, A, const N : usize> FixedLength<N> for &'a [A; N] {
+    type Elem = &'a A;
+    type Iter = std::slice::Iter<'a, A>;
+    #[inline]
+    fn fl_iter(self) -> Self::Iter {
+        self.iter()
+    }
+}
+
+macro_rules! tuple_or_singleton {
+    ($a:ident,) => { $a };
+    ($($a:ident),+) => { ($($a),+) }
+}
+
+macro_rules! make_mapmany {
+    ($name:ident, $name_indexed:ident, $var0:ident $($var:ident)* ;
+     $etype0:ident $($etype:ident)*, $ctype0:ident $($ctype:ident)*) => {
+        /// Map over multiple [`FixedLength`] containers, returning an array.
+        #[inline]
+        pub fn $name<
+            $etype0,
+            $($etype,)*
+            $ctype0 : FixedLength<N,Elem=$etype0>,
+            $($ctype : FixedLength<N,Elem=$etype>,)*
+            Res,
+            const N : usize
+        >(
+            $var0 : $ctype0,
+            $($var : $ctype,)*
+            f : impl Fn($etype0, $($etype),*) -> Res
+        ) -> [Res; N] {
+            let zipit = izip!($var0.fl_iter(), $($var.fl_iter()),*);
+            let map = zipit.map(|tuple_or_singleton!($var0, $($var),*)| f($var0, $($var),*));
+            collect_into_array_unchecked(map)
+        }
+
+        /// Map over multiple [`FixedLength`] containers, returning an array.
+        /// This version also passes the index to the mapping function.
+        #[inline]
+        pub fn $name_indexed<
+            $etype0,
+            $($etype,)*
+            $ctype0 : FixedLength<N,Elem=$etype0>,
+            $($ctype : FixedLength<N,Elem=$etype>,)*
+            Res,
+            const N : usize
+        >(
+            $var0 : $ctype0,
+            $($var : $ctype,)*
+            f : impl Fn(usize, $etype0, $($etype),*) -> Res
+        ) -> [Res; N] {
+            let zipit = (0..N).zip(izip!($var0.fl_iter(), $($var.fl_iter()),*));
+            let map = zipit.map(|(i, tuple_or_singleton!($var0, $($var),*))| f(i, $var0, $($var),*));
+            collect_into_array_unchecked(map)
+        }
+    }
+}
+
+make_mapmany!(map1, map1_indexed, a;           A,           CA);
+make_mapmany!(map2, map2_indexed, a b;         A B,         CA CB);
+make_mapmany!(map3, map3_indexed, a b c;       A B C,       CA CB CC);
+make_mapmany!(map4, map4_indexed, a b c d;     A B C D,     CA CB CC CD);
+make_mapmany!(map5, map5_indexed, a b c d e;   A B C D E,   CA CB CC CD CE);
+make_mapmany!(map6, map6_indexed, a b c d e f; A B C D E F, CA CB CC CD CE CF);
+
+macro_rules! make_mapmany_mut{
+    ($name:ident, $name_indexed:ident, $var0:ident $($var:ident)* ;
+     $etype0:ident $($etype:ident)*, $ctype0:ident $($ctype:ident)*) => {
+        /// Map over multiple [`FixedLength`] container inserting the result in the first.
+        #[inline]
+        pub fn $name<
+            $etype0,
+            $($etype,)*
+            $ctype0 : FixedLengthMut<N,Elem=$etype0>,
+            $($ctype : FixedLength<N,Elem=$etype>,)*
+            const N : usize
+        > (
+            $var0 : &mut $ctype0,
+            $($var : $ctype,)*
+            f : impl Fn(&mut $etype0, $($etype),*)
+        ) {
+            let zipit = izip!($var0.fl_iter_mut(), $($var.fl_iter()),*);
+            zipit.for_each(|tuple_or_singleton!($var0, $($var),*)| f($var0, $($var),*));
+        }
+
+        /// Map over multiple [`FixedLength`] container inserting the result in the first.
+        /// This version also passes the index to the mapping function.
+        #[inline]
+        pub fn $name_indexed<
+            $etype0,
+            $($etype,)*
+            $ctype0 : FixedLengthMut<N,Elem=$etype0>,
+            $($ctype : FixedLength<N,Elem=$etype>,)*
+            const N : usize
+        > (
+            $var0 : &mut $ctype0,
+            $($var : $ctype,)*
+            f : impl Fn(usize, &mut $etype0, $($etype),*)
+        ) {
+            let zipit = (0..N).zip(izip!($var0.fl_iter_mut(), $($var.fl_iter()),*));
+            zipit.for_each(|(i, tuple_or_singleton!($var0, $($var),*))| f(i, $var0, $($var),*));
+        }
+    }
+}
+
+make_mapmany_mut!(map1_mut, map1_indexed_mut, a;           A,           CA);
+make_mapmany_mut!(map2_mut, map2_indexed_mut, a b;         A B,         CA CB);
+make_mapmany_mut!(map3_mut, map3_indexed_mut, a b c;       A B C,       CA CB CC);
+make_mapmany_mut!(map4_mut, map4_indexed_mut, a b c d;     A B C D,     CA CB CC CD);
+make_mapmany_mut!(map5_mut, map5_indexed_mut, a b c d e;   A B C D E,   CA CB CC CD CE);
+make_mapmany_mut!(map6_mut, map6_indexed_mut, a b c d e f; A B C D E F, CA CB CC CD CE CF);
+
+
+/// Initialise an array of length `N` by calling `f` multiple times.
+#[inline]
+pub fn array_init<A, F :  Fn() -> A, const N : usize>(f : F) -> [A; N] {
+    //[(); N].map(|_| f())
+    core::array::from_fn(|_| f())
+}
+
+// /// Initialise an array of length `N` by calling `f` with the index of each element.
+// #[inline]
+// pub fn array_gen<A, F :  Fn(usize) -> A, const N : usize>(f : F) -> [A; N] {
+//     //[(); N].indexmap(|i, _| f(i))
+//     core::array::from_fn(f)
+// }
+
+
+pub struct FoldMap<I : Iterator<Item=A>, A, B, J : Copy, F : Fn(J, A) -> (J, B)> {
+    iter : I,
+    f : F,
+    j : J,
+}
+
+impl<A, B, I : Iterator<Item=A>, J : Copy, F : Fn(J, A) -> (J, B)> Iterator for FoldMap<I, A, B, J, F> {
+    type Item = B;
+    #[inline]
+    fn next(&mut self) -> Option<B> {
+        self.iter.next().map(|a| {
+            let (jnew, b) = (self.f)(self.j, a);
+            self.j = jnew;
+            b
+        })
+    }
+}
+
+pub struct IndexMap<I : Iterator<Item=A>, A, B, F : Fn(usize, A) -> B> {
+    iter : I,
+    f : F,
+    j : usize,
+}
+
+impl<A, B, I : Iterator<Item=A>, F : Fn(usize, A) -> B> Iterator for IndexMap<I, A, B, F> {
+    type Item = B;
+    #[inline]
+    fn next(&mut self) -> Option<B> {
+        self.iter.next().map(|a| {
+            let b = (self.f)(self.j, a);
+            self.j = self.j+1;
+            b
+        })
+    }
+}
+
+pub trait FoldMappable<A, B, J, F : Fn(J, A) -> (J, B)> : Sized {
+    type Output;
+    /// Fold and map over an array; typically used to count the element while traversing a map
+    fn foldmap(self, j : J, f : F) -> Self::Output;
+}
+
+pub trait IndexMappable<A, B, F : Fn(usize, A) -> B> : Sized {
+    type Output;
+    fn indexmap(self, f : F) -> Self::Output;
+}
+
+impl<'a, A, B, J : Copy, F : Fn(J, &'a A) -> (J, B)> FoldMappable<&'a A, B, J, F>
+for std::slice::Iter<'a, A> {
+    type Output = FoldMap<Self, &'a A, B, J, F>;
+    #[inline]
+    fn foldmap(self, j : J, f : F) -> Self::Output {
+        FoldMap { iter : self, j, f }
+    }
+}
+
+impl<'a, A, B, F : Fn(usize, &'a A) -> B> IndexMappable<&'a A, B, F>
+for std::slice::Iter<'a, A> {
+    type Output = IndexMap<Self, &'a A, B, F>;
+    #[inline]
+    fn indexmap(self, f : F) -> Self::Output {
+        IndexMap { iter : self, j : 0, f }
+    }
+}
+
+
+impl<A, B, J : Copy, F : Fn(J, A) -> (J, B), const N : usize> FoldMappable<A, B, J, F>
+for std::array::IntoIter<A, N> {
+    type Output = FoldMap<Self, A, B, J, F>;
+    #[inline]
+    fn foldmap(self, j : J, f : F) -> Self::Output {
+        FoldMap { iter : self, j, f }
+    }
+}
+
+impl<'a, A, B, F : Fn(usize, A) -> B, const N : usize> IndexMappable<A, B, F>
+for std::array::IntoIter<A, N> {
+    type Output = IndexMap<Self, A, B, F>;
+    #[inline]
+    fn indexmap(self, f : F) -> Self::Output {
+        IndexMap { iter : self, j : 0, f }
+    }
+}
+
+
+
+
+impl<A, B, J : Copy, F : Fn(J, A) -> (J, B), const N : usize> FoldMappable<A, B, J, F> for [A; N] {
+    type Output = [B; N];
+    #[inline]
+    fn foldmap(self, j : J, f : F) -> [B; N] {
+        // //let mut res : [MaybeUninit<B>; N] = unsafe { MaybeUninit::uninit().assume_init() };
+        // let mut res = MaybeUninit::uninit_array::<N>();
+        // for (a, i) in self.into_iter().zip(0..N) {
+        //     let (jnew, b) = f(j, a);
+        //     unsafe { *(res.get_unchecked_mut(i)) = MaybeUninit::new(b) };
+        //     j = jnew;
+        // }
+        // //unsafe { res.as_mut_ptr().cast::<[B; N]>().read() }
+        // unsafe { MaybeUninit::array_assume_init(res) }
+        let it = self.into_iter().foldmap(j, f);
+        collect_into_array_unchecked(it)
+    }
+}
+
+impl<A, B, F : Fn(usize, A) -> B, const N : usize> IndexMappable<A, B, F> for [A; N] {
+    type Output= [B; N];
+    #[inline]
+    fn indexmap(self, f : F) -> [B; N] {
+        // //let mut res : [MaybeUninit<B>; N] = unsafe { MaybeUninit::uninit().assume_init() };
+        // let mut res = MaybeUninit::uninit_array::<N>();
+        // for (a, i) in self.into_iter().zip(0..N) {
+        //     let b = f(i, a);
+        //     unsafe { *(res.get_unchecked_mut(i)) = MaybeUninit::new(b) };
+        // }
+        // //unsafe { res.as_mut_ptr().cast::<[B; N]>().read() }
+        // unsafe { MaybeUninit::array_assume_init(res) }
+        let it = self.into_iter().indexmap(f);
+        collect_into_array_unchecked(it)
+    }
+}
+
+///
+/// This is taken and simplified from core::array to not involve `ControlFlow`,
+/// `Try` etc. (Pulling everything including `NeverShortCircuit` turned out
+/// too much to maintain here.)
+///
+/// Pulls `N` items from `iter` and returns them as an array. If the iterator
+/// yields fewer than `N` items, `None` is returned and all already yielded
+/// items are dropped.
+///
+/// Since the iterator is passed as a mutable reference and this function calls
+/// `next` at most `N` times, the iterator can still be used afterwards to
+/// retrieve the remaining items.
+///
+/// If `iter.next()` panicks, all items already yielded by the iterator are
+/// dropped.
+#[inline]
+pub(crate) fn collect_into_array_unchecked<T, I : Iterator<Item=T>, const N: usize>(mut iter: I) -> [T; N]
+{
+    if N == 0 {
+        // SAFETY: An empty array is always inhabited and has no validity invariants.
+        return unsafe { core::mem::zeroed() };
+    }
+
+    struct Guard<'a, T, const N: usize> {
+        array_mut: &'a mut [MaybeUninit<T>; N],
+        initialized: usize,
+    }
+
+    impl<T, const N: usize> Drop for Guard<'_, T, N> {
+        #[inline]
+        fn drop(&mut self) {
+            debug_assert!(self.initialized <= N);
+
+            // SAFETY: this slice will contain only initialized objects.
+            unsafe {
+                core::ptr::drop_in_place(MaybeUninit::slice_assume_init_mut(
+                    &mut self.array_mut.get_unchecked_mut(..self.initialized),
+                ));
+            }
+        }
+    }
+
+    let mut array = MaybeUninit::uninit_array::<N>();
+    let mut guard = Guard { array_mut: &mut array, initialized: 0 };
+
+    while let Some(item) = iter.next() {
+        // SAFETY: `guard.initialized` starts at 0, is increased by one in the
+        // loop and the loop is aborted once it reaches N (which is
+        // `array.len()`).
+        unsafe {
+            guard.array_mut.get_unchecked_mut(guard.initialized).write(item);
+        }
+        guard.initialized += 1;
+
+        // Check if the whole array was initialized.
+        if guard.initialized == N {
+            core::mem::forget(guard);
+
+            // SAFETY: the condition above asserts that all elements are
+            // initialized.
+            let out = unsafe { MaybeUninit::array_assume_init(array) };
+            return out;
+        }
+    }
+
+    unreachable!("Something went wrong with iterator length")
+}
+
+
+#[cfg(test)]
+mod tests {
+    use super::*;
+
+    #[test]
+    fn mapx_test() {
+        let a = [0,1,2];
+        let mut b = [2,1,0];
+        assert_eq!(map1(a, |x| x+1), [1,2,3]);
+        assert_eq!(map2(a, b, |x, y| x+y), [2,2,2]);
+        assert_eq!(map1_indexed(a, |i, y| y-i), [0,0,0]);
+        map1_indexed_mut(&mut b, |i, y| *y=i);
+        assert_eq!(b, a);
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/nalgebra_support.rs	Sat Oct 22 13:47:15 2022 +0300
@@ -0,0 +1,297 @@
+///! Integration with nalgebra
+
+use nalgebra::{
+    Matrix, Storage, StorageMut, OMatrix, Dim, DefaultAllocator, Scalar,
+    ClosedMul, ClosedAdd, SimdComplexField, Vector, OVector, RealField,
+    LpNorm, UniformNorm
+};
+use nalgebra::Norm as NalgebraNorm;
+use nalgebra::base::constraint::{
+    ShapeConstraint, SameNumberOfRows, SameNumberOfColumns
+};
+use nalgebra::base::dimension::*;
+use nalgebra::base::allocator::Allocator;
+use std::ops::Mul;
+use num_traits::identities::{Zero, One};
+use crate::linops::*;
+use crate::norms::Dot;
+use crate::types::Float;
+use crate::norms::*;
+
+impl<SM,SV,N,M,K,E> Linear<Matrix<E,M,K,SV>> for Matrix<E,N,M,SM>
+where SM: Storage<E,N,M>, SV: Storage<E,M,K>,
+        N : Dim, M : Dim, K : Dim, E : Scalar + ClosedMul + ClosedAdd + Zero + One,
+        DefaultAllocator : Allocator<E,N,K>,
+        DefaultAllocator : Allocator<E,M,K>,
+        DefaultAllocator : Allocator<E,N,M>,
+        DefaultAllocator : Allocator<E,M,N> {
+    type Codomain = OMatrix<E,N,K>;
+
+    #[inline]
+    fn apply(&self, x : &Matrix<E,M,K,SV>) -> Self::Codomain {
+        self.mul(x)
+    }
+}
+
+impl<SM,SV1,SV2,N,M,K,E> GEMV<E, Matrix<E,M,K,SV1>, Matrix<E,N,K,SV2>> for Matrix<E,N,M,SM>
+where SM: Storage<E,N,M>, SV1: Storage<E,M,K>, SV2: StorageMut<E,N,K>,
+      N : Dim, M : Dim, K : Dim, E : Scalar + ClosedMul + ClosedAdd + Zero + One + Float,
+      DefaultAllocator : Allocator<E,N,K>,
+      DefaultAllocator : Allocator<E,M,K>,
+      DefaultAllocator : Allocator<E,N,M>,
+      DefaultAllocator : Allocator<E,M,N> {
+
+    #[inline]
+    fn gemv(&self, y : &mut Matrix<E,N,K,SV2>, α : E, x : &Matrix<E,M,K,SV1>, β : E) {
+        Matrix::gemm(y, α, self, x, β)
+    }
+
+    #[inline]
+    fn apply_mut<'a>(&self, y : &mut Matrix<E,N,K,SV2>, x : &Matrix<E,M,K,SV1>) {
+        self.mul_to(x, y)
+    }
+}
+
+impl<SM,SV1,M,E> AXPY<E, Vector<E,M,SV1>> for Vector<E,M,SM>
+where SM: StorageMut<E,M>, SV1: Storage<E,M>,
+      M : Dim, E : Scalar + ClosedMul + ClosedAdd + Zero + One + Float,
+      DefaultAllocator : Allocator<E,M> {
+
+    #[inline]
+    fn axpy(&mut self, α : E, x : &Vector<E,M,SV1>, β : E) {
+        Matrix::axpy(self, α, x, β)
+    }
+
+    #[inline]
+    fn copy_from(&mut self, y : &Vector<E,M,SV1>) {
+        Matrix::copy_from(self, y)
+    }
+}
+
+impl<SM,M,E> Projection<E, Linfinity> for Vector<E,M,SM>
+where SM: StorageMut<E,M>,
+      M : Dim, E : Scalar + ClosedMul + ClosedAdd + Zero + One + Float + RealField,
+      DefaultAllocator : Allocator<E,M> {
+    #[inline]
+    fn proj_ball_mut(&mut self, ρ : E, _ : Linfinity) {
+        self.iter_mut().for_each(|v| *v = num_traits::clamp(*v, -ρ, ρ))
+    }
+}
+
+impl<'own,SV1,SV2,SM,N,M,K,E> Adjointable<Matrix<E,M,K,SV1>,Matrix<E,N,K,SV2>>
+for Matrix<E,N,M,SM>
+where SM: Storage<E,N,M>, SV1: Storage<E,M,K>, SV2: Storage<E,N,K>,
+      N : Dim, M : Dim, K : Dim, E : Scalar + ClosedMul + ClosedAdd + Zero + One + SimdComplexField,
+      DefaultAllocator : Allocator<E,N,K>,
+      DefaultAllocator : Allocator<E,M,K>,
+      DefaultAllocator : Allocator<E,N,M>,
+      DefaultAllocator : Allocator<E,M,N> {
+    type AdjointCodomain = OMatrix<E,M,K>;
+    type Adjoint<'a> = OMatrix<E,M,N> where SM : 'a;
+
+    #[inline]
+    fn adjoint(&self) -> Self::Adjoint<'_> {
+        Matrix::adjoint(self)
+    }
+}
+
+impl<E,M,S,Si> Dot<Vector<E,M,Si>,E>
+for Vector<E,M,S>
+where M : Dim,
+      E : Float + Scalar + ClosedMul + ClosedAdd + Zero + One,
+      S : Storage<E,M>,
+      Si : Storage<E,M>,
+      DefaultAllocator : Allocator<E,M> {
+
+    #[inline]
+    fn dot(&self, other : &Vector<E,M,Si>) -> E {
+        Vector::<E,M,S>::dot(self, other)
+    }
+}
+
+/// This function is [`nalgebra::EuclideanNorm::metric_distance`] without the `sqrt`.
+#[inline]
+fn metric_distance_squared<T, R1, C1, S1, R2, C2, S2>(
+    /*ed: &EuclideanNorm,*/
+    m1: &Matrix<T, R1, C1, S1>,
+    m2: &Matrix<T, R2, C2, S2>,
+) -> T::SimdRealField
+where
+    T:  SimdComplexField,
+    R1: Dim,
+    C1: Dim,
+    S1: Storage<T, R1, C1>,
+    R2: Dim,
+    C2: Dim,
+    S2: Storage<T, R2, C2>,
+    ShapeConstraint: SameNumberOfRows<R1, R2> + SameNumberOfColumns<C1, C2>,
+{
+    m1.zip_fold(m2, T::SimdRealField::zero(), |acc, a, b| {
+        let diff = a - b;
+        acc + diff.simd_modulus_squared()
+    })
+}
+
+// TODO: should allow different input storages in `Euclidean`.
+
+impl<E,M,S> Euclidean<E>
+for Vector<E,M,S>
+where M : Dim,
+      S : StorageMut<E,M>,
+      E : Float + Scalar + ClosedMul + ClosedAdd + Zero + One + RealField,
+      DefaultAllocator : Allocator<E,M> {
+
+    type Output = OVector<E, M>;
+    
+    #[inline]
+    fn similar_origin(&self) -> OVector<E, M> {
+        OVector::zeros_generic(M::from_usize(self.len()), Const)
+    }
+
+    #[inline]
+    fn norm2_squared(&self) -> E {
+        Vector::<E,M,S>::norm_squared(self)
+    }
+
+    #[inline]
+    fn dist2_squared(&self, other : &Self) -> E {
+        metric_distance_squared(self, other)
+    }
+}
+
+impl<E,M,S> StaticEuclidean<E>
+for Vector<E,M,S>
+where M : DimName,
+      S : StorageMut<E,M>,
+      E : Float + Scalar + ClosedMul + ClosedAdd + Zero + One + RealField,
+      DefaultAllocator : Allocator<E,M> {
+
+    #[inline]
+    fn origin() -> OVector<E, M> {
+        OVector::zeros()
+    }
+}
+
+impl<E,M,S> Norm<E, L1>
+for Vector<E,M,S>
+where M : Dim,
+      S : StorageMut<E,M>,
+      E : Float + Scalar + ClosedMul + ClosedAdd + Zero + One + RealField,
+      DefaultAllocator : Allocator<E,M> {
+
+    #[inline]
+    fn norm(&self, _ : L1) -> E {
+        LpNorm(1).norm(self)
+    }
+}
+
+impl<E,M,S> Dist<E, L1>
+for Vector<E,M,S>
+where M : Dim,
+      S : StorageMut<E,M>,
+      E : Float + Scalar + ClosedMul + ClosedAdd + Zero + One + RealField,
+      DefaultAllocator : Allocator<E,M> {
+    #[inline]
+    fn dist(&self, other : &Self, _ : L1) -> E {
+        LpNorm(1).metric_distance(self, other)
+    }
+}
+
+impl<E,M,S> Norm<E, L2>
+for Vector<E,M,S>
+where M : Dim,
+      S : StorageMut<E,M>,
+      E : Float + Scalar + ClosedMul + ClosedAdd + Zero + One + RealField,
+      DefaultAllocator : Allocator<E,M> {
+
+    #[inline]
+    fn norm(&self, _ : L2) -> E {
+        LpNorm(2).norm(self)
+    }
+}
+
+impl<E,M,S> Dist<E, L2>
+for Vector<E,M,S>
+where M : Dim,
+      S : StorageMut<E,M>,
+      E : Float + Scalar + ClosedMul + ClosedAdd + Zero + One + RealField,
+      DefaultAllocator : Allocator<E,M> {
+    #[inline]
+    fn dist(&self, other : &Self, _ : L2) -> E {
+        LpNorm(2).metric_distance(self, other)
+    }
+}
+
+impl<E,M,S> Norm<E, Linfinity>
+for Vector<E,M,S>
+where M : Dim,
+      S : StorageMut<E,M>,
+      E : Float + Scalar + ClosedMul + ClosedAdd + Zero + One + RealField,
+      DefaultAllocator : Allocator<E,M> {
+
+    #[inline]
+    fn norm(&self, _ : Linfinity) -> E {
+        UniformNorm.norm(self)
+    }
+}
+
+impl<E,M,S> Dist<E, Linfinity>
+for Vector<E,M,S>
+where M : Dim,
+      S : StorageMut<E,M>,
+      E : Float + Scalar + ClosedMul + ClosedAdd + Zero + One + RealField,
+      DefaultAllocator : Allocator<E,M> {
+    #[inline]
+    fn dist(&self, other : &Self, _ : Linfinity) -> E {
+        UniformNorm.metric_distance(self, other)
+    }
+}
+
+/// Helper trait to hide the symbols of `nalgebra::RealField`
+/// while allowing nalgebra to be used in subroutines.
+pub trait ToNalgebraRealField : Float {
+    type NalgebraType : RealField;
+    type MixedType : RealField + Float;
+
+    fn to_nalgebra(self) -> Self::NalgebraType;
+    fn to_nalgebra_mixed(self) -> Self::MixedType;
+
+    fn from_nalgebra(t : Self::NalgebraType) -> Self;
+    fn from_nalgebra_mixed(t : Self::MixedType) -> Self;
+}
+
+impl ToNalgebraRealField for f32 {
+    type NalgebraType = f32;
+    type MixedType = f32;
+
+    #[inline]
+    fn to_nalgebra(self) -> Self::NalgebraType { self }
+
+    #[inline]
+    fn to_nalgebra_mixed(self) -> Self::MixedType { self }
+
+    #[inline]
+    fn from_nalgebra(t : Self::NalgebraType) -> Self { t }
+
+    #[inline]
+    fn from_nalgebra_mixed(t : Self::MixedType) -> Self { t }
+
+}
+
+impl ToNalgebraRealField for f64 {
+    type NalgebraType = f64;
+    type MixedType = f64;
+
+    #[inline]
+    fn to_nalgebra(self) -> Self::NalgebraType { self }
+
+    #[inline]
+    fn to_nalgebra_mixed(self) -> Self::MixedType { self }
+
+    #[inline]
+    fn from_nalgebra(t : Self::NalgebraType) -> Self { t }
+
+    #[inline]
+    fn from_nalgebra_mixed(t : Self::MixedType) -> Self { t }
+}
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/nanleast.rs	Sat Oct 22 13:47:15 2022 +0300
@@ -0,0 +1,36 @@
+use crate::types::Float;
+use std::cmp::{PartialOrd,Ord,Ordering,Ordering::*};
+
+/// A container for floating point numbers for sorting NaN as a least element
+/// (to essentially ignore errorneous computations producing NaNs).
+#[derive(Debug, Clone, Copy)]
+pub struct NaNLeast<F : Float>(pub F);
+
+/// Compare floating point numbers ordering nan as the least element.
+
+impl<F : Float> Ord for NaNLeast<F> {
+    #[inline]
+    fn cmp(&self, NaNLeast(b) : &Self) -> Ordering {
+        let NaNLeast(a) = self;
+        match a.partial_cmp(b) {
+            None => match (a.is_nan(), b.is_nan()) {
+                (true, false) => Less,
+                (false, true) => Greater,
+                _ => Equal // The case (true, true) should not occur!
+            }
+            Some(order) => order
+        }
+    }
+}
+
+impl<F : Float> PartialEq for NaNLeast<F> {
+    #[inline]
+    fn eq(&self, other : &Self) -> bool { self.cmp(other)==Equal }
+}
+
+impl<F : Float> Eq for NaNLeast<F> { }
+
+impl<F : Float> PartialOrd for NaNLeast<F> {
+    #[inline]
+    fn partial_cmp(&self, other : &Self) -> Option<Ordering> { Some(self.cmp(other)) }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/norms.rs	Sat Oct 22 13:47:15 2022 +0300
@@ -0,0 +1,233 @@
+/*!
+Norms, projections, etc.
+*/
+
+use crate::types::*;
+use std::ops::{Mul,MulAssign,Div,DivAssign,Add,Sub,AddAssign,SubAssign,Neg};
+use serde::Serialize;
+//use std::iter::Sum;
+
+//
+// Dot products
+//
+
+/// Space with a defined dot product.
+pub trait Dot<U,F> {
+    fn dot(&self, other : &U) -> F;
+}
+
+//self.iter().zip(other.iter()).map(|(&x,&y)| x*y).sum()
+
+//
+// Euclidean spaces
+//
+
+pub trait Euclidean<F : Float> : Sized + Dot<Self,F>
+        + Mul<F, Output=<Self as Euclidean<F>>::Output> + MulAssign<F>
+        + Div<F, Output=<Self as Euclidean<F>>::Output> + DivAssign<F>
+        + Add<Self, Output=<Self as Euclidean<F>>::Output>
+        + Sub<Self, Output=<Self as Euclidean<F>>::Output>
+        + for<'b> Add<&'b Self, Output=<Self as Euclidean<F>>::Output>
+        + for<'b> Sub<&'b Self, Output=<Self as Euclidean<F>>::Output>
+        + AddAssign<Self> + for<'b> AddAssign<&'b Self>
+        + SubAssign<Self> + for<'b> SubAssign<&'b Self>
+        + Neg<Output=<Self as Euclidean<F>>::Output> {
+    type Output : Euclidean<F>;
+
+    /// Returns the origin of same dimensions as `self`.
+    fn similar_origin(&self) -> <Self as Euclidean<F>>::Output;
+
+    /// Calculate the square of the 2-norm.
+    #[inline]
+    fn norm2_squared(&self) -> F {
+        self.dot(self)
+    }
+
+    /// Calculate the square of the 2-norm divided by 2.
+    #[inline]
+    fn norm2_squared_div2(&self) -> F {
+        self.norm2_squared()/F::TWO
+    }
+
+    /// Calculate the 2-norm.
+    #[inline]
+    fn norm2(&self) -> F {
+        self.norm2_squared().sqrt()
+    }
+
+    /// Calculate the 2-distance squared.
+    fn dist2_squared(&self, other : &Self) -> F;
+
+    /// Calculate the 2-distance.
+    #[inline]
+    fn dist2(&self, other : &Self) -> F {
+        self.dist2_squared(other).sqrt()
+    }
+
+    /// Project to the 2-ball.
+    #[inline]
+    fn proj_ball2(mut self, ρ : F) -> Self {
+        self.proj_ball2_mut(ρ);
+        self
+    }
+
+    /// Project to the 2-ball in-place.
+    #[inline]
+    fn proj_ball2_mut(&mut self, ρ : F) {
+        let r = self.norm2();
+        if r>ρ {
+            *self *= ρ/r
+        }
+    }
+}
+
+/// Trait for [`Euclidean`] spaces with dimensions known at compile time.
+pub trait StaticEuclidean<F : Float> : Euclidean<F> {
+    /// Returns the origin
+    fn origin() -> <Self as Euclidean<F>>::Output;
+}
+
+//
+// Abstract norms
+//
+
+#[derive(Copy,Debug,Clone,Serialize,Eq,PartialEq)]
+pub struct L1;
+
+#[derive(Copy,Debug,Clone,Serialize,Eq,PartialEq)]
+pub struct L2;
+
+#[derive(Copy,Debug,Clone,Serialize,Eq,PartialEq)]
+pub struct Linfinity;
+
+#[derive(Copy,Debug,Clone,Serialize,Eq,PartialEq)]
+pub struct L21;
+
+#[derive(Copy,Debug,Clone,Serialize,Eq,PartialEq)]
+pub struct HuberL1<F : Float>(pub F);
+
+#[derive(Copy,Debug,Clone,Serialize,Eq,PartialEq)]
+pub struct HuberL21<F : Float>(pub F);
+
+pub trait Norm<F, Exponent> {
+    /// Calculate the norm
+    fn norm(&self, _p : Exponent) -> F;
+}
+
+/// Indicates that a [`Norm`] is dominated by another norm (`Exponent`) on `Elem` with the
+/// corresponding field `F`.
+pub trait Dominated<F : Num, Exponent, Elem> {
+    /// Indicates the factor $c$ for the inequality $‖x‖ ≤ C ‖x‖_p$.
+    fn norm_factor(&self, p : Exponent) -> F;
+    /// Given a norm-value $‖x‖_p$, calculates $C‖x‖_p$ such that $‖x‖ ≤ C‖x‖_p$
+    #[inline]
+    fn from_norm(&self, p_norm : F, p : Exponent) -> F {
+        p_norm * self.norm_factor(p)
+    }
+}
+
+pub trait Dist<F,Exponent> : Norm<F, Exponent> {
+    /// Calculate the distance
+    fn dist(&self, other : &Self, _p : Exponent) -> F;
+}
+
+pub trait Projection<F, Exponent> : Norm<F, Exponent> + Euclidean<F> where F : Float {
+    /// Project to the norm-ball.
+    fn proj_ball(mut self, ρ : F, q : Exponent) -> Self {
+        self.proj_ball_mut(ρ, q);
+        self
+    }
+
+    /// Project to the norm-ball in-place.
+    fn proj_ball_mut(&mut self, ρ : F, _q : Exponent);
+}
+
+/*impl<F : Float, E : Euclidean<F>> Norm<F, L2> for E {
+    #[inline]
+    fn norm(&self, _p : L2) -> F { self.norm2() }
+
+    fn dist(&self, other : &Self, _p : L2) -> F { self.dist2(other) }
+}*/
+
+impl<F : Float, E : Euclidean<F> + Norm<F, L2>> Projection<F, L2> for E {
+    #[inline]
+    fn proj_ball(self, ρ : F, _p : L2) -> Self { self.proj_ball2(ρ) }
+
+    #[inline]
+    fn proj_ball_mut(&mut self, ρ : F, _p : L2) { self.proj_ball2_mut(ρ) }
+}
+
+impl<F : Float> HuberL1<F> {
+    fn apply(self, xnsq : F) -> F {
+        let HuberL1(γ) = self;
+        let xn = xnsq.sqrt();
+        if γ == F::ZERO {
+            xn
+        } else {
+            if xn > γ {
+                xn-γ/F::TWO
+            } else if xn<(-γ) {
+                -xn-γ/F::TWO
+            } else {
+                xnsq/(F::TWO*γ)
+            }
+        }
+    }
+}
+
+impl<F : Float, E : Euclidean<F>> Norm<F, HuberL1<F>> for E {
+    fn norm(&self, huber : HuberL1<F>) -> F {
+        huber.apply(self.norm2_squared())
+    }
+}
+
+impl<F : Float, E : Euclidean<F>> Dist<F, HuberL1<F>> for E {
+    fn dist(&self, other : &Self, huber : HuberL1<F>) -> F {
+        huber.apply(self.dist2_squared(other))
+    }
+}
+
+/*
+#[inline]
+pub fn mean<V>(x : V) -> V::Field where V : ValidArray {
+     x.iter().sum()/x.len()
+}
+
+#[inline]
+pub fn proj_nonneg_mut<V>(x : &mut V) -> &mut V where V : ValidArray {
+    x.iter_mut().for_each(|&mut p| if p < 0 { *p = 0 } );
+    x
+}
+*/
+
+
+//
+// 2,1-norm generic implementation
+//
+
+/*
+pub trait InnerVectors {
+    type Item;
+    type Iter : Iterator<Item=Self::Item>;
+    fn inner_vectors(&self) -> &Self::Iter;
+}
+
+pub trait InnerVectorsMut : InnerVectors {
+    type IterMut : Iterator<Item=Self::Item>;
+    fn inner_vectors_mut(&self) -> &mut Self::Item;
+}
+
+impl<F : Float + Sum, T : InnerVectors> Norm<F, L21> for T where T::Item : Norm<F, L2> {
+    fn norm(&self, _ : L21) -> F {
+        self.inner_vectors().map(|t| t.norm(L2)).sum()
+    }
+}
+
+impl<F : Float + Sum, T : InnerVectorsMut + Euclidean<F>> Projection<F, L21>
+for T where T::ItemMut : Projection<F, L2> {
+    fn proj_ball_mut(&mut self, _ : L21) {
+        self.inner_vectors_mut().for_each(|t| t.proj_ball_mut(L2));
+    }
+}
+*/
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/sets.rs	Sat Oct 22 13:47:15 2022 +0300
@@ -0,0 +1,108 @@
+/// Various types of sets.
+use std::ops::{RangeFull,RangeFrom,Range,RangeInclusive,RangeTo,RangeToInclusive};
+use std::marker::PhantomData;
+use crate::types::*;
+use crate::loc::{Loc};
+use crate::norms::Dot;
+use serde::Serialize;
+
+pub mod cube;
+pub use cube::*;
+
+/// Trait for arbitrary sets. The parameter `U` is the element type.
+pub trait Set<U> where U : ?Sized {
+    /// Check for containment.
+    fn contains(&self, item : &U) -> bool;
+}
+
+pub trait SetOrd : Sized {
+     /// Returns the smallest set of same class contain both parameters.
+     fn common(&self, other : &Self) -> Self;
+
+     /// Returns the greatest set of same class contaied by n both parameter sets.
+     fn intersect(&self, other : &Self) -> Option<Self>;
+}
+
+impl<U, const N : usize> Set<Loc<U, N>>
+for Cube<U,N>
+where U : Num + PartialOrd + Sized {
+    fn contains(&self, item : &Loc<U, N>) -> bool {
+        self.0.iter().zip(item.iter()).all(|(s, x)| s.contains(x))
+    }
+}
+
+impl<U> Set<U> for RangeFull {
+    fn contains(&self, _item : &U) -> bool { true }
+}
+
+macro_rules! impl_ranges {
+    ($($range:ident),*) => { $(
+        impl<U,Idx> Set<U>
+        for $range<Idx>
+        where Idx : PartialOrd<U>, U : PartialOrd<Idx> + ?Sized, Idx : PartialOrd {
+            #[inline]
+            fn contains(&self, item : &U) -> bool { $range::contains(&self, item) }
+        }
+    )* }
+}
+
+impl_ranges!(RangeFrom,Range,RangeInclusive,RangeTo,RangeToInclusive);
+
+/// Halfspace. The orthogonal (dual) vectors `A` are supposed to implement [`Dot`]`<U,F>`
+/// for `U` the element type.
+#[derive(Clone,Copy,Debug,Serialize,Eq,PartialEq)]
+pub struct Halfspace<A, F, U> where A : Dot<U,F>, F : Float {
+    pub orthogonal : A,
+    pub offset : F,
+    _phantom : PhantomData<U>,
+}
+
+impl<A,F,U> Halfspace<A,F,U> where A : Dot<U,F>, F : Float {
+    #[inline]
+    pub fn new(orthogonal : A, offset : F) -> Self {
+        Halfspace{ orthogonal : orthogonal, offset : offset, _phantom : PhantomData }
+    }
+}
+
+pub trait SpannedHalfspace<F,U> where  F : Float {
+    type A : Dot<U,F>;
+    fn spanned_halfspace(&self) -> Halfspace<Self::A, F, U>;
+}
+
+// TODO: Gram-Schmidt for higher N.
+impl<F : Float> SpannedHalfspace<F,Loc<F, 1>> for [Loc<F, 1>; 2] {
+    type A = Loc<F, 1>;
+    fn spanned_halfspace(&self) -> Halfspace<Self::A, F, Loc<F, 1>> {
+        let (x0, x1) = (self[0], self[1]);
+        Halfspace::new(x1-x0, x0[0])
+    }
+}
+
+// TODO: Gram-Schmidt for higher N.
+impl<F : Float> SpannedHalfspace<F,Loc<F, 2>> for [Loc<F, 2>; 2] {
+    type A = Loc<F, 2>;
+    fn spanned_halfspace(&self) -> Halfspace<Self::A, F, Loc<F, 2>> {
+        let (x0, x1) = (&self[0], &self[1]);
+        let d = x1 - x0;
+        let orthog = loc![d[1], -d[0]]; // We don't normalise for efficiency
+        let offset = x0.dot(&orthog);
+        Halfspace::new(orthog, offset)
+    }
+}
+
+impl<A,U,F> Set<U> for Halfspace<A,F,U> where A : Dot<U,F>, F : Float {
+    #[inline]
+    fn contains(&self, item : &U) -> bool {
+        self.orthogonal.dot(item) >= self.offset
+    }
+}
+
+/// Polygons defined by `N` `Halfspace`s.
+#[derive(Clone,Copy,Debug,Eq,PartialEq)]
+pub struct NPolygon<A, F, U, const N : usize>(pub [Halfspace<A,F,U>; N]) where A : Dot<U,F>, F : Float;
+
+impl<A,U,F,const N : usize> Set<U> for NPolygon<A,F,U,N> where A : Dot<U,F>, F : Float {
+    fn contains(&self, item : &U) -> bool {
+        self.0.iter().all(|halfspace| halfspace.contains(item))
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/sets/cube.rs	Sat Oct 22 13:47:15 2022 +0300
@@ -0,0 +1,314 @@
+///! Presetnation of cubes $[a_1, b_1] × ⋯ × [a_n, b_n]$
+
+use serde::ser::{Serialize, Serializer, SerializeTupleStruct};
+use crate::types::*;
+use crate::loc::Loc;
+use crate::sets::SetOrd;
+use crate::maputil::{
+    FixedLength,
+    FixedLengthMut,
+    map1,
+    map1_indexed,
+    map2,
+};
+
+/// A half-open `N`-cube of elements of type `U`.
+#[derive(Copy, Clone, Debug, Eq, PartialEq)]
+pub struct Cube<U : Num, const N : usize>(pub(super) [[U; 2]; N]);
+
+// Need to manually implement as [F; N] serialisation is provided only for some N.
+impl<F : Num + Serialize, const N : usize> Serialize for Cube<F, N>
+where
+    F: Serialize,
+{
+    fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
+    where
+        S: Serializer,
+    {
+        let mut ts = serializer.serialize_tuple_struct("Cube", N)?;
+        for e in self.0.iter() {
+            ts.serialize_field(e)?;
+        }
+        ts.end()
+    }
+}
+
+impl<A : Num, const N : usize> FixedLength<N> for Cube<A,N> {
+    type Iter = std::array::IntoIter<[A; 2], N>;
+    type Elem = [A; 2];
+    #[inline]
+    fn fl_iter(self) -> Self::Iter {
+        self.0.into_iter()
+    }
+}
+
+impl<A : Num, const N : usize> FixedLengthMut<N> for Cube<A,N> {
+    type IterMut<'a> = std::slice::IterMut<'a, [A; 2]>;
+    #[inline]
+    fn fl_iter_mut(&mut self) -> Self::IterMut<'_> {
+        self.0.iter_mut()
+    }
+}
+
+impl<'a, A : Num, const N : usize> FixedLength<N> for &'a Cube<A,N> {
+    type Iter = std::slice::Iter<'a, [A; 2]>;
+    type Elem = &'a [A; 2];
+    #[inline]
+    fn fl_iter(self) -> Self::Iter {
+        self.0.iter()
+    }
+}
+
+
+/// Iterator for [`Cube`] corners.
+pub struct CubeCornersIter<'a, U : Num, const N : usize> {
+    index : usize,
+    cube : &'a Cube<U, N>,
+}
+
+impl<'a, U : Num, const N : usize> Iterator for CubeCornersIter<'a, U, N> {
+    type Item = Loc<U, N>;
+    #[inline]
+    fn next(&mut self) -> Option<Self::Item> {
+        if self.index >= N {
+            None
+        } else {
+            let i = self.index;
+            self.index += 1;
+            let arr = self.cube.map_indexed(|k, a, b| if (i>>k)&1 == 0 { a } else { b });
+            Some(arr.into())
+        }
+    }
+}
+
+impl<U : Num, const N : usize> Cube<U, N> {
+    #[inline]
+    pub fn map_indexed<T>(&self, f : impl Fn(usize, U, U) -> T) -> [T; N] {
+        map1_indexed(self, |i, &[a, b]| f(i, a, b))
+    }
+
+    #[inline]
+    pub fn map<T>(&self, f : impl Fn(U, U) -> T) -> [T; N] {
+        map1(self, |&[a, b]| f(a, b))
+    }
+
+    #[inline]
+    pub fn iter_coords(&self) -> std::slice::Iter<'_, [U; 2]> {
+        self.0.iter()
+    }
+
+    #[inline]
+    pub fn start(&self, i : usize) -> U {
+        self.0[i][0]
+    }
+
+    #[inline]
+    pub fn end(&self, i : usize) -> U {
+        self.0[i][1]
+    }
+   
+    #[inline]
+    pub fn span_start(&self) -> Loc<U, N> {
+        Loc::new(self.map(|a, _b| a))
+    }
+
+    #[inline]
+    pub fn span_end(&self) -> Loc<U, N> {
+        Loc::new(self.map(|_a, b| b))
+    }
+
+    #[inline]
+    pub fn iter_corners(&self) -> CubeCornersIter<'_, U, N> {
+        CubeCornersIter{ index : 0, cube : self }
+    }
+
+    #[inline]
+    pub fn width(&self) -> Loc<U, N> {
+        Loc::new(self.map(|a, b| b-a))
+    }
+
+    #[inline]
+    pub fn shift(&self, shift : &Loc<U, N>) -> Self {
+        let mut cube = self.clone();
+        for i in 0..N {
+            cube.0[i][0] += shift[i];
+            cube.0[i][1] += shift[i];
+        }
+        cube
+    }
+
+    #[inline]
+    pub fn new(data : [[U; 2]; N]) -> Self {
+        Cube(data)
+    }
+}
+
+impl<F : Float, const N : usize> Cube<F, N> {
+    /// Returns the centre of the cube
+    pub fn center(&self) -> Loc<F, N> {
+        map1(self, |&[a, b]| (a + b) / F::TWO).into()
+    }
+}
+
+impl<U : Num> Cube<U, 1> {
+    /// Get the corners of the cube.
+    /// TODO: generic implementation once const-generics can be involved in
+    /// calculations.
+    #[inline]
+    pub fn corners(&self) -> [Loc<U, 1>; 2] {
+        let [[a, b]] = self.0;
+        [a.into(), b.into()]
+    }
+}
+
+impl<U : Num> Cube<U, 2> {
+    /// Get the corners of the cube in counter-clockwise order.
+    /// TODO: generic implementation once const-generics can be involved in
+    /// calculations.
+    #[inline]
+    pub fn corners(&self) -> [Loc<U, 2>; 4] {
+        let [[a1, b1], [a2, b2]]=self.0;
+        [[a1, a2].into(),
+         [b1, a2].into(),
+         [b1, b2].into(),
+         [a1, b2].into()]
+    }
+}
+
+impl<U : Num> Cube<U, 3> {
+    /// Get the corners of the cube.
+    /// TODO: generic implementation once const-generics can be involved in
+    /// calculations.
+    #[inline]
+    pub fn corners(&self) -> [Loc<U, 3>; 8] {
+        let [[a1, b1], [a2, b2], [a3, b3]]=self.0;
+        [[a1, a2, a3].into(),
+         [b1, a2, a3].into(),
+         [b1, b2, a3].into(),
+         [a1, b2, a3].into(),
+         [a1, b2, b3].into(),
+         [b1, b2, b3].into(),
+         [b1, a2, b3].into(),
+         [a1, a2, b3].into()]
+    }
+}
+
+// TODO: Implement Add and Sub of Loc to Cube, and Mul and Div by U : Num.
+
+impl<U : Num, const N : usize> From<[[U; 2]; N]> for Cube<U, N> {
+    #[inline]
+    fn from(data : [[U; 2]; N]) -> Self {
+        Cube(data)
+    }
+}
+
+impl<U : Num, const N : usize> From<Cube<U, N>> for [[U; 2]; N] {
+    #[inline]
+    fn from(Cube(data) : Cube<U, N>) -> Self {
+        data
+    }
+}
+
+
+impl<U, const N : usize> Cube<U, N> where U : Num + PartialOrd {
+    /// Checks whether the cube is non-degenerate, i.e., the start coordinate
+    /// of each axis is strictly less than the end coordinate.
+    #[inline]
+    pub fn nondegenerate(&self) -> bool {
+        self.0.iter().all(|range| range[0] < range[1])
+    }
+    
+    /// Checks whether the cube intersects some `other` cube.
+    /// Matching boundary points are not counted, so `U` is ideally a [`Float`].
+    #[inline]
+    pub fn intersects(&self, other : &Cube<U, N>) -> bool {
+        self.iter_coords().zip(other.iter_coords()).all(|([a1, b1], [a2, b2])| {
+            a1 < b2 && a2 < b1
+        })
+    }
+
+    /// Checks whether the cube contains some `other` cube.
+    pub fn contains_set(&self, other : &Cube<U, N>) -> bool {
+        self.iter_coords().zip(other.iter_coords()).all(|([a1, b1], [a2, b2])| {
+            a1 <= a2 && b1 >= b2
+        })
+    }
+
+    /// Produces the point of minimum $ℓ^p$-norm within the cube `self` for any $p$-norm.
+    /// This is the point where each coordinate is closest to zero.
+    #[inline]
+    pub fn minnorm_point(&self) -> Loc<U, N> {
+        let z = U::ZERO;
+        // As always, we assume that a ≤ b.
+        self.map(|a, b| {
+            debug_assert!(a <= b);
+            match (a < z, z < b) {
+                (false, _)   => a,
+                (_, false)   => b,
+                (true, true) => z
+            }
+        }).into()
+    }
+
+    /// Produces the point of maximum $ℓ^p$-norm within the cube `self` for any $p$-norm.
+    /// This is the point where each coordinate is furthest from zero.
+    #[inline]
+    pub fn maxnorm_point(&self) -> Loc<U, N> {
+        let z = U::ZERO;
+        // As always, we assume that a ≤ b.
+        self.map(|a, b| {
+            debug_assert!(a <= b);
+            match (a < z, z < b) {
+                (false, _)   => b,
+                (_, false)   => a,
+                // A this stage we must have a < 0 (so U must be signed), and want to check
+                // whether |a| > |b|. We can do this without assuming U to actually implement
+                // `Neg` by comparing whether 0 > a + b.
+                (true, true) => if z > a + b { a } else { b }
+            }
+        }).into()
+    }
+}
+
+macro_rules! impl_common {
+    ($($t:ty)*, $min:ident, $max:ident) => { $(
+        impl<const N : usize> SetOrd for Cube<$t, N> {
+            #[inline]
+            fn common(&self, other : &Self) -> Self {
+                map2(self, other, |&[a1, b1], &[a2, b2]| {
+                    debug_assert!(a1 <= b1 && a2 <= b2);
+                    [a1.$min(a2), b1.$max(b2)]
+                }).into()
+            }
+
+            #[inline]
+            fn intersect(&self, other : &Self) -> Option<Self> {
+                let arr = map2(self, other, |&[a1, b1], &[a2, b2]| {
+                    debug_assert!(a1 <= b1 && a2 <= b2);
+                    [a1.$max(a2), b1.$min(b2)]
+                });
+                arr.iter().all(|&[a, b]| a >= b).then(|| arr.into())
+            }
+        }
+    )* }
+}
+
+impl_common!(u8 u16 u32 u64 u128 usize
+             i8 i16 i32 i64 i128 isize, min, max);
+// Any NaN yields NaN
+impl_common!(f32 f64, minimum, maximum);
+
+impl<U : Num, const N : usize> std::ops::Index<usize> for Cube<U, N> {
+    type Output = [U; 2];
+    #[inline]
+    fn index(&self, index: usize) -> &Self::Output {
+        &self.0[index]
+    }
+}
+
+impl<U : Num, const N : usize> std::ops::IndexMut<usize> for Cube<U, N> {
+    #[inline]
+    fn index_mut(&mut self, index: usize) -> &mut Self::Output {
+        &mut self.0[index]
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/tabledump.rs	Sat Oct 22 13:47:15 2022 +0300
@@ -0,0 +1,73 @@
+
+use std::slice::Iter;
+use csv;
+use serde_json;
+use serde::Serialize;
+use crate::error::DynError;
+
+/// Write a CSV from an iterator
+pub fn write_csv<I, J>(mut iter : I, f : String) -> DynError
+where I : Iterator<Item=J>,
+      J : Serialize {
+    let wtr = csv::WriterBuilder::new()
+        .has_headers(true)
+        .delimiter(b'\t')
+        .from_path(f);
+    wtr.and_then(|mut w|{
+        //w.write_record(self.tabledump_headers())?;
+        iter.try_for_each(|item| w.serialize(item))
+    })?;
+    Ok(())
+}
+
+/// Write a JSON from an iterator
+pub fn write_json<I, J>(iter : I, f : String) -> DynError
+where I : Iterator<Item=J>,
+      J : Serialize {
+    let v : Vec<J> = iter.collect();
+    serde_json::to_writer_pretty(std::fs::File::create(f)?, &v)?;
+    Ok(())
+}
+
+/// Helper trait for dumping data in a CSV or JSON file.
+pub trait TableDump<'a>
+where <Self::Iter as Iterator>::Item : Serialize {
+    /// Iterator over the rows
+    type Iter : Iterator;
+
+    // Return the headers of the CSV file.
+    //fn tabledump_headers(&'a self) -> Vec<String>;
+
+    /// Return an iterator over the rows of the CSV file.
+    fn tabledump_entries(&'a self) -> Self::Iter;
+
+    /// Write a CSV file.
+    fn write_csv(&'a self, f : String) -> DynError {
+        write_csv(self.tabledump_entries(), f)
+    }
+
+    /// Write mapped CSV. This is a workaround to rust-csv not supporting struct flattening
+    fn write_csv_mapped<D, G>(&'a self, f : String, g : G) -> DynError
+    where D : Serialize,
+          G : FnMut(<Self::Iter as Iterator>::Item) -> D {
+        write_csv(self.tabledump_entries().map(g), f)
+    }
+
+    /// Write a JSON file.
+    fn write_json(&'a self, f : String) -> DynError {
+        write_json(self.tabledump_entries(), f)
+    }
+}
+
+impl<'a, T : Serialize + 'a> TableDump<'a> for [T] {
+    type Iter = Iter<'a, T>;
+
+    // fn tabledump_headers(&'a self) -> Vec<String> {
+    //     vec!["value".into()]
+    // }
+
+    fn tabledump_entries(&'a self) -> Self::Iter {
+        self.iter()
+    }
+}
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/tuple.rs	Sat Oct 22 13:47:15 2022 +0300
@@ -0,0 +1,69 @@
+//
+// Tuple tools
+//
+
+/// A trait for removing the first item from a tuple and returning the rest.
+/// Only implemented for a few low dimensions.
+pub trait TupleOps : Sized {
+    type Head;
+    type Tail;
+    type Nested;
+
+    /// Remove the first item from a tuple and returning the rest.
+    fn tail(self) -> Self::Tail { self.split_tail().1 }
+    fn split_tail(self) -> (Self::Head, Self::Tail);
+    fn nest(self) -> Self::Nested;
+    fn from_nested(nested : Self::Nested) -> Self;
+    fn from_head_tail(head : Self::Head, tail : Self::Tail) -> Self;
+}
+
+macro_rules! nest {
+    ($t1:ident) => { ($t1,) };
+    ($t1:ident $($ts:ident)+) => { ($t1, nest!($($ts)+)) };
+}
+
+macro_rules! impl_tuple_ops {
+    (@do $type1:ident $($types:ident)*, $var1:ident $($vars:ident)*) => {
+        impl<$type1, $($types),*> TupleOps for ($type1, $($types),*) {
+            type Head = $type1;
+            type Tail = ($($types,)*);
+            type Nested = nest!($type1 $($types)*);
+
+            #[inline]
+            fn split_tail(self) -> (Self::Head, Self::Tail) {
+                let ($var1, $($vars),*) = self;
+                ($var1, ($($vars,)*))
+            }
+
+            #[inline]
+            fn nest(self) -> Self::Nested {
+                let ($var1, $($vars),*) = self;
+                nest!($var1 $($vars)*)
+            }
+
+            #[inline]
+            fn from_nested(nested : Self::Nested) -> Self {
+                let nest!($var1 $($vars)*) = nested;
+                ($var1, $($vars),*)
+            }
+
+            #[inline]
+            fn from_head_tail($var1 : Self::Head, tail : Self::Tail) -> Self {
+                let ($($vars,)*) = tail;
+                ($var1, $($vars),*)
+            }
+        }
+    };
+    ($type1:ident $($types:ident)+, $var1:ident $($vars:ident)+) => {
+        impl_tuple_ops!(@do $type1 $($types)+, $var1 $($vars)+);
+        impl_tuple_ops!($($types)+, $($vars)+);
+    };
+    ($type1:ident, $var1:ident) => {
+        impl_tuple_ops!(@do $type1, $var1);
+    };
+}
+
+impl_tuple_ops!(A B C D E F G H I J K L M N O P Q R S T U V W X Y Z,
+                a b c d e f g h i j k l m n o p q r s t u v w x y z);
+
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/types.rs	Sat Oct 22 13:47:15 2022 +0300
@@ -0,0 +1,148 @@
+///! Some useful (numerical) types and type traits
+
+use trait_set::trait_set;
+pub use num_traits::Float as NumTraitsFloat; // needed to re-export functions.
+pub use num_traits::cast::AsPrimitive;
+
+/// Typical integer type
+#[allow(non_camel_case_types)]
+pub type int = i64;
+
+/// Typical unsigned integer type
+#[allow(non_camel_case_types)]
+pub type uint = u64;
+
+/// Typical floating point number type
+#[allow(non_camel_case_types)]
+pub type float = f64;
+
+/// Casts of abstract numerical types to others via the standard `as` keyword.
+pub trait CastFrom<T : 'static + Copy> : num_traits::cast::AsPrimitive<T> {
+    fn cast_from(other : T) -> Self;
+}
+
+macro_rules! impl_casts {
+    ($($type:ty)*) => { $(
+        impl_casts!(@phase2, $type,
+                    u8 u16 u32 u64 u128 usize
+                    i8 i16 i32 i64 i128 isize
+                    f32 f64);
+    )* };
+    (@phase2, $type:ty, $($type2:ty)*) => { $(
+        impl CastFrom<$type2> for $type {
+            #[inline]
+            fn cast_from(other : $type2) -> Self { other as $type }
+        }
+    )* };
+}
+
+impl_casts!(u8 u16 u32 u64 u128 usize
+            i8 i16 i32 i64 i128 isize
+            f32 f64);
+
+/// Trait for numeric types
+pub trait Num : 'static + Copy + num::Num + num_traits::NumAssign
+                 + std::iter::Sum + std::iter::Product
+                 + std::fmt::Debug + std::fmt::Display + serde::Serialize
+                 + CastFrom<u8>   + CastFrom<u16> + CastFrom<u32> + CastFrom<u64>
+                 + CastFrom<u128> + CastFrom<usize>
+                 + CastFrom<i8>   + CastFrom<i16> + CastFrom<i32> + CastFrom<i64>
+                 + CastFrom<i128> + CastFrom<isize>
+                 + CastFrom<f32>  + CastFrom<f64> {
+
+    const ZERO : Self;
+    const ONE : Self;
+    const TWO : Self;
+    /// Generic version of <Self>::MAX;
+    const RANGE_MAX : Self;
+    /// Generic version of <Self>::MIN;
+    const RANGE_MIN : Self;
+}
+
+/// Trait for signed numeric types
+pub trait SignedNum : Num + num::Signed + std::ops::Neg<Output=Self> {}
+impl<U : Num + num::Signed + std::ops::Neg<Output=Self>> SignedNum for U { }
+
+/// Trait for floating point numbers
+pub trait Float : SignedNum + num::Float + From<Self::CompatibleSize> {
+    /// An unsigned integer that can be used for indexing operations and
+    /// converted to F without loss.
+    type CompatibleSize : CompatibleUnsigned<Self>;
+
+    const PI : Self;
+    const E : Self;
+    const EPSILON : Self;
+    const SQRT_2 : Self;
+    const INFINITY : Self;
+    const NEG_INFINITY : Self;
+    const FRAC_2_SQRT_PI : Self;
+}
+
+/// Trait for integers
+pub trait Integer : Num + num::Integer {}
+
+/// Trait for unsigned integers
+pub trait Unsigned : Num + Integer + num::Unsigned {}
+
+/// Trait for signed integers
+pub trait Signed : SignedNum + Integer {}
+
+macro_rules! impl_num_consts {
+    ($($type:ty)*) => { $(
+        impl Num for $type {
+            const ZERO : Self = 0 as $type;
+            const ONE : Self = 1 as $type;
+            const TWO : Self = 2 as $type;
+            const RANGE_MAX : Self = <$type>::MAX;
+            const RANGE_MIN : Self = <$type>::MIN;
+        }
+    )* }
+}
+
+macro_rules! impl_integers {
+    ($signed:ty : $($type:ty)*) => { $(
+        impl_num_consts!($type);
+        impl Integer for $type {}
+        impl $signed for $type {}
+    )* }
+}
+
+impl_integers!(Signed: i8 i16 i32 i64 i128 isize);
+impl_integers!(Unsigned: u8 u16 u32 u64 u128 usize);
+
+impl_num_consts!(f32 f64);
+
+impl Float for f64 {
+    #[cfg(any(target_pointer_width = "128", target_pointer_width = "64"))]
+    type CompatibleSize = u32;
+    #[cfg(any(target_pointer_width = "32", target_pointer_width = "16"))]
+    type CompatibleSize = usize;
+
+    const PI : Self = std::f64::consts::PI;
+    const E : Self = std::f64::consts::E;
+    const EPSILON : Self = std::f64::EPSILON;
+    const SQRT_2 : Self = std::f64::consts::SQRT_2;
+    const INFINITY : Self = std::f64::INFINITY;
+    const NEG_INFINITY : Self = std::f64::NEG_INFINITY;
+    const FRAC_2_SQRT_PI : Self = std::f64::consts::FRAC_2_SQRT_PI;
+}
+
+impl Float for f32 {
+    #[cfg(any(target_pointer_width = "128", target_pointer_width = "64", target_pointer_width = "32"))]
+    type CompatibleSize = u16;
+    #[cfg(any(target_pointer_width = "16"))]
+    type CompatibleSize = usize;
+
+    const PI : Self = std::f32::consts::PI;
+    const E : Self = std::f32::consts::E;
+    const EPSILON : Self = std::f32::EPSILON;
+    const SQRT_2 : Self = std::f32::consts::SQRT_2;
+    const INFINITY : Self = std::f32::INFINITY;
+    const NEG_INFINITY : Self = std::f32::NEG_INFINITY;
+    const FRAC_2_SQRT_PI : Self = std::f32::consts::FRAC_2_SQRT_PI;
+}
+
+trait_set! {
+    pub trait CompatibleUnsigned<F : Float> = Unsigned + Into<F>;
+    pub trait CompatibleSigned<F : Float> = Signed + Into<F>;
+}

mercurial