# HG changeset patch # User Tuomo Valkonen # Date 1666435635 -10800 # Node ID 9f27689eb1302a2bb02cd0819fba8fc693d38697 Initialise new clean repository diff -r 000000000000 -r 9f27689eb130 Cargo.lock --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/Cargo.lock Sat Oct 22 13:47:15 2022 +0300 @@ -0,0 +1,444 @@ +# This file is automatically @generated by Cargo. +# It is not intended for manual editing. +version = 3 + +[[package]] +name = "alg_tools" +version = "0.1.0" +dependencies = [ + "colored", + "cpu-time", + "csv", + "itertools", + "nalgebra", + "num", + "num-traits", + "numeric_literals", + "serde", + "serde_json", + "trait-set", +] + +[[package]] +name = "approx" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cab112f0a86d568ea0e627cc1d6be74a1e9cd55214684db5561995f6dad897c6" +dependencies = [ + "num-traits", +] + +[[package]] +name = "atty" +version = "0.2.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8" +dependencies = [ + "hermit-abi", + "libc", + "winapi", +] + +[[package]] +name = "autocfg" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" + +[[package]] +name = "bstr" +version = "0.2.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba3569f383e8f1598449f1a423e72e99569137b47740b1da11ef19af3d5c3223" +dependencies = [ + "lazy_static", + "memchr", + "regex-automata", + "serde", +] + +[[package]] +name = "bytemuck" +version = "1.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2f5715e491b5a1598fc2bef5a606847b5dc1d48ea625bd3c02c00de8285591da" + +[[package]] +name = "colored" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b3616f750b84d8f0de8a58bda93e08e2a81ad3f523089b05f1dffecab48c6cbd" +dependencies = [ + "atty", + "lazy_static", + "winapi", +] + +[[package]] +name = "cpu-time" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e9e393a7668fe1fad3075085b86c781883000b4ede868f43627b34a87c8b7ded" +dependencies = [ + "libc", + "winapi", +] + +[[package]] +name = "csv" +version = "1.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "22813a6dc45b335f9bade10bf7271dc477e81113e89eb251a0bc2a8a81c536e1" +dependencies = [ + "bstr", + "csv-core", + "itoa 0.4.8", + "ryu", + "serde", +] + +[[package]] +name = "csv-core" +version = "0.1.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2b2466559f260f48ad25fe6317b3c8dac77b5bdb5763ac7d9d6103530663bc90" +dependencies = [ + "memchr", +] + +[[package]] +name = "either" +version = "1.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "90e5c1c8368803113bf0c9584fc495a58b86dc8a29edbf8fe877d21d9507e797" + +[[package]] +name = "hermit-abi" +version = "0.1.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "62b467343b94ba476dcb2500d242dadbb39557df889310ac77c5d99100aaac33" +dependencies = [ + "libc", +] + +[[package]] +name = "itertools" +version = "0.10.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b0fd2260e829bddf4cb6ea802289de2f86d6a7a690192fbe91b3f46e0f2c8473" +dependencies = [ + "either", +] + +[[package]] +name = "itoa" +version = "0.4.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b71991ff56294aa922b450139ee08b3bfc70982c6b2c7562771375cf73542dd4" + +[[package]] +name = "itoa" +version = "1.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6c8af84674fe1f223a982c933a0ee1086ac4d4052aa0fb8060c12c6ad838e754" + +[[package]] +name = "lazy_static" +version = "1.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" + +[[package]] +name = "libc" +version = "0.2.134" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "329c933548736bc49fd575ee68c89e8be4d260064184389a5b77517cddd99ffb" + +[[package]] +name = "matrixmultiply" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "add85d4dd35074e6fedc608f8c8f513a3548619a9024b751949ef0e8e45a4d84" +dependencies = [ + "rawpointer", +] + +[[package]] +name = "memchr" +version = "2.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2dffe52ecf27772e601905b7522cb4ef790d2cc203488bbd0e2fe85fcb74566d" + +[[package]] +name = "nalgebra" +version = "0.31.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e9e0a04ce089f9401aac565c740ed30c46291260f27d4911fdbaa6ca65fa3044" +dependencies = [ + "approx", + "matrixmultiply", + "nalgebra-macros", + "num-complex", + "num-rational", + "num-traits", + "simba", + "typenum", +] + +[[package]] +name = "nalgebra-macros" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "01fcc0b8149b4632adc89ac3b7b31a12fb6099a0317a4eb2ebff574ef7de7218" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "num" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "43db66d1170d347f9a065114077f7dccb00c1b9478c89384490a3425279a4606" +dependencies = [ + "num-bigint", + "num-complex", + "num-integer", + "num-iter", + "num-rational", + "num-traits", +] + +[[package]] +name = "num-bigint" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f93ab6289c7b344a8a9f60f88d80aa20032336fe78da341afc91c8a2341fc75f" +dependencies = [ + "autocfg", + "num-integer", + "num-traits", +] + +[[package]] +name = "num-complex" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7ae39348c8bc5fbd7f40c727a9925f03517afd2ab27d46702108b6a7e5414c19" +dependencies = [ + "num-traits", +] + +[[package]] +name = "num-integer" +version = "0.1.45" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "225d3389fb3509a24c93f5c29eb6bde2586b98d9f016636dff58d7c6f7569cd9" +dependencies = [ + "autocfg", + "num-traits", +] + +[[package]] +name = "num-iter" +version = "0.1.43" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7d03e6c028c5dc5cac6e2dec0efda81fc887605bb3d884578bb6d6bf7514e252" +dependencies = [ + "autocfg", + "num-integer", + "num-traits", +] + +[[package]] +name = "num-rational" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0638a1c9d0a3c0914158145bc76cff373a75a627e6ecbfb71cbe6f453a5a19b0" +dependencies = [ + "autocfg", + "num-bigint", + "num-integer", + "num-traits", +] + +[[package]] +name = "num-traits" +version = "0.2.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "578ede34cf02f8924ab9447f50c28075b4d3e5b269972345e7e0372b38c6cdcd" +dependencies = [ + "autocfg", +] + +[[package]] +name = "numeric_literals" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "095aa67b0b9f2081746998f4f17106bdb51d56dc8c211afca5531b92b83bf98a" +dependencies = [ + "quote", + "syn", +] + +[[package]] +name = "paste" +version = "1.0.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b1de2e551fb905ac83f73f7aedf2f0cb4a0da7e35efa24a202a936269f1f18e1" + +[[package]] +name = "proc-macro2" +version = "1.0.46" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "94e2ef8dbfc347b10c094890f778ee2e36ca9bb4262e86dc99cd217e35f3470b" +dependencies = [ + "unicode-ident", +] + +[[package]] +name = "quote" +version = "1.0.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bbe448f377a7d6961e30f5955f9b8d106c3f5e449d493ee1b125c1d43c2b5179" +dependencies = [ + "proc-macro2", +] + +[[package]] +name = "rawpointer" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "60a357793950651c4ed0f3f52338f53b2f809f32d83a07f72909fa13e4c6c1e3" + +[[package]] +name = "regex-automata" +version = "0.1.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6c230d73fb8d8c1b9c0b3135c5142a8acee3a0558fb8db5cf1cb65f8d7862132" + +[[package]] +name = "ryu" +version = "1.0.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4501abdff3ae82a1c1b477a17252eb69cee9e66eb915c1abaa4f44d873df9f09" + +[[package]] +name = "safe_arch" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "794821e4ccb0d9f979512f9c1973480123f9bd62a90d74ab0f9426fcf8f4a529" +dependencies = [ + "bytemuck", +] + +[[package]] +name = "serde" +version = "1.0.145" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "728eb6351430bccb993660dfffc5a72f91ccc1295abaa8ce19b27ebe4f75568b" +dependencies = [ + "serde_derive", +] + +[[package]] +name = "serde_derive" +version = "1.0.145" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "81fa1584d3d1bcacd84c277a0dfe21f5b0f6accf4a23d04d4c6d61f1af522b4c" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "serde_json" +version = "1.0.85" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e55a28e3aaef9d5ce0506d0a14dbba8054ddc7e499ef522dd8b26859ec9d4a44" +dependencies = [ + "itoa 1.0.3", + "ryu", + "serde", +] + +[[package]] +name = "simba" +version = "0.7.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c48e45e5961033db030b56ad67aef22e9c908c493a6e8348c0a0f6b93433cd77" +dependencies = [ + "approx", + "num-complex", + "num-traits", + "paste", + "wide", +] + +[[package]] +name = "syn" +version = "1.0.101" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e90cde112c4b9690b8cbe810cba9ddd8bc1d7472e2cae317b69e9438c1cba7d2" +dependencies = [ + "proc-macro2", + "quote", + "unicode-ident", +] + +[[package]] +name = "trait-set" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "875c4c873cc824e362fa9a9419ffa59807244824275a44ad06fec9684fff08f2" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "typenum" +version = "1.15.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dcf81ac59edc17cc8697ff311e8f5ef2d99fcbd9817b34cec66f90b6c3dfd987" + +[[package]] +name = "unicode-ident" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dcc811dc4066ac62f84f11307873c4850cb653bfa9b1719cee2bd2204a4bc5dd" + +[[package]] +name = "wide" +version = "0.7.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b3aba2d1dac31ac7cae82847ac5b8be822aee8f99a4e100f279605016b185c5f" +dependencies = [ + "bytemuck", + "safe_arch", +] + +[[package]] +name = "winapi" +version = "0.3.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" +dependencies = [ + "winapi-i686-pc-windows-gnu", + "winapi-x86_64-pc-windows-gnu", +] + +[[package]] +name = "winapi-i686-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" + +[[package]] +name = "winapi-x86_64-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" diff -r 000000000000 -r 9f27689eb130 Cargo.toml --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/Cargo.toml Sat Oct 22 13:47:15 2022 +0300 @@ -0,0 +1,25 @@ +[package] +name = "alg_tools" +version = "0.1.0" +edition = "2021" +authors = ["Tuomo Valkonen "] + +[dependencies] +serde = { version = "1.0", features = ["derive"] } +csv = "~1.1.6" +nalgebra = "~0.31.0" +num-traits = { version = "~0.2.14", features = ["std"] } +colored = "~2.0.0" +trait-set = "~0.2.0" +num = "~0.4.0" +itertools = "~0.10.3" +numeric_literals = "~0.2.0" +cpu-time = "~1.0.0" +serde_json = "~1.0.85" + +[package.metadata.docs.rs] +rustdoc-args = [ "--html-in-header", "katex-header.html" ] + + +[profile.release] +debug = true diff -r 000000000000 -r 9f27689eb130 LICENSE --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/LICENSE Sat Oct 22 13:47:15 2022 +0300 @@ -0,0 +1,43 @@ + +# Anti-abuse license + +## Rationale + +The purpose of this license is to give end-users and developers maximal +freedom to use this software while preventing the authors from being +abused by powerful middle-men that repackage software for convenient +installation by users. Such potentially abusive middle-men include in +particular Linux distributions and similar centralising software +distribution schemes developed for other operating systems. +The ethos of this license is *bollocks to copyright and distributions!* + +## Rules + +This software is distributed without any warranty whatsoever. + +If you redistribute modified versions of this software to the public, +you must clearly mark them as modified. + +If you redistribute this software to the public as part of a large +collection of software with the purpose of providing end-users with +a convenient installation method, you must do one of the following: + +(a) Always redistribute the **unmodified** and **latest** version +provided by the authors. If the lead author releases a new version (on a +specific branch, such as 'stable' or 'development'), you must promptly +make that new version the default version offered to your users (on +that specific branch). + +(b) Rename the software, and make it obvious that your modified or obsolete +software is in no way connected to the authors of the original software. +The users of your version should under no circumstances be under the +illusion that they can contact the lead author or any of the authors +of the original software if they have any complaints or queries. + +(c) Do not in any way directly expose this software to your users. + +Otherwise, do whatever you want with this software. In particular, you may +freely use the software as part of other projects, and redistribute to +the public archival copies of the software (as long as your archive cannot +be considered a “convenient installation method” that will be governed by +the rules above). diff -r 000000000000 -r 9f27689eb130 README.md --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/README.md Sat Oct 22 13:47:15 2022 +0300 @@ -0,0 +1,24 @@ + +# AlgTools + +Author: Tuomo Valkonen + +**This crate is very much incomplete.** + +This crate/repository/package contains some general utility routines and tools for implementing +iterative algorithms and numerical computing in Rust. Former versions of the package were for Julia. +They are no longer mintained. Included are: + + * [Linear operator](crate::linops) abstraction, with support for matrices via `nalgebra` + or `ndarray` if this crate is compiled with the relevant features. + * [Iteration mechanism](crate::iterate) for generically implementing intermittent verbosity and stopping rules in algorithms. + * The iterators can employ a [logger](crate::logger) to store the intermittent results + (e.g. function values) in memory to later write into a `csv`-file. + * [Bisection trees](crate::bisection_tree) for branch-and-bound optimisation. + + + +## Installation + +??? diff -r 000000000000 -r 9f27689eb130 misc/doc_alias.sh --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/misc/doc_alias.sh Sat Oct 22 13:47:15 2022 +0300 @@ -0,0 +1,5 @@ +# source this file. use cargo rustdoc or cargo d --no-deps no build the documentation. +echo 'Creating cargo-d alias' +alias cargo-d='RUSTDOCFLAGS="--html-in-header misc/katex-header.html" BROWSER=/Applications/Firefox.app/Contents/MacOS/firefox-bin cargo d --no-deps' + + \ No newline at end of file diff -r 000000000000 -r 9f27689eb130 misc/katex-header.html --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/misc/katex-header.html Sat Oct 22 13:47:15 2022 +0300 @@ -0,0 +1,15 @@ + + + + diff -r 000000000000 -r 9f27689eb130 rust-toolchain.toml --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/rust-toolchain.toml Sat Oct 22 13:47:15 2022 +0300 @@ -0,0 +1,2 @@ +[toolchain] +channel = "nightly" diff -r 000000000000 -r 9f27689eb130 src/bisection_tree.rs --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/bisection_tree.rs Sat Oct 22 13:47:15 2022 +0300 @@ -0,0 +1,18 @@ +/// +/// Geometrical bisection trees +/// + +mod supportid; +pub use supportid::*; +mod aggregator; +pub use aggregator::*; +mod either; +pub use either::*; +mod support; +pub use support::*; +mod bt; +pub use bt::*; +mod refine; +pub use refine::*; +mod btfn; +pub use btfn::*; diff -r 000000000000 -r 9f27689eb130 src/bisection_tree/aggregator.rs --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/bisection_tree/aggregator.rs Sat Oct 22 13:47:15 2022 +0300 @@ -0,0 +1,154 @@ +use crate::types::*; +use crate::sets::Set; + +/// Trait for aggregating information about a branch of a [`super::BT`] bisection tree. +pub trait Aggregator : Clone + std::fmt::Debug { + // Aggregate a new leaf data point to current state. + fn aggregate(&mut self, aggregates : I) + where I : Iterator; + + // Summarise several other aggregators, resetting current state. + fn summarise<'a, I>(&'a mut self, aggregates : I) + where I : Iterator; + + /// Initialisation of aggregate data for an empty lower node. + fn new() -> Self; +} + +/// An [`Aggregator`] that doesn't aggregate anything. +#[derive(Clone,Debug)] +pub struct NullAggregator; + +impl Aggregator for NullAggregator { + // TODO: these should probabably also take a Cube as argument to + // allow integrating etc. + fn aggregate(&mut self, _aggregates : I) + where I : Iterator {} + + fn summarise<'a, I>(&'a mut self, _aggregates : I) + where I : Iterator {} + + fn new() -> Self { NullAggregator } +} + +/// Upper and lower bounds on an `F`-valued function. +/// Returned by [`super::support::Bounded::bounds`]. +#[derive(Copy,Clone,Debug)] +pub struct Bounds(pub F, pub F); + +impl Bounds { + /// Returns the lower bound + #[inline] + pub fn lower(&self) -> F { self.0 } + + /// Returns the upper bound + #[inline] + pub fn upper(&self) -> F { self.1 } +} + +impl Bounds { + /// Returns a uniform bound on the function (maximum over the absolute values of the + /// upper and lower bound). + #[inline] + pub fn uniform(&self) -> F { + let &Bounds(lower, upper) = self; + lower.abs().max(upper.abs()) + } +} + +impl<'a, F : Float> std::ops::Add for Bounds { + type Output = Self; + #[inline] + fn add(self, Bounds(l2, u2) : Self) -> Self::Output { + let Bounds(l1, u1) = self; + debug_assert!(l1 <= u1 && l2 <= u2); + Bounds(l1 + l2, u1 + u2) + } +} + +impl<'a, F : Float> std::ops::Mul for Bounds { + type Output = Self; + #[inline] + fn mul(self, Bounds(l2, u2) : Self) -> Self::Output { + let Bounds(l1, u1) = self; + debug_assert!(l1 <= u1 && l2 <= u2); + let a = l1 * l2; + let b = u1 * u2; + // The order may flip when negative numbers are involved, so need min/max + Bounds(a.min(b), a.max(b)) + } +} + +impl std::iter::Product for Bounds { + #[inline] + fn product(mut iter: I) -> Self + where I: Iterator { + match iter.next() { + None => Bounds(F::ZERO, F::ZERO), + Some(init) => iter.fold(init, |a, b| a*b) + } + } +} + +impl Set for Bounds { + fn contains(&self, item : &F) -> bool { + let &Bounds(l, u) = self; + debug_assert!(l <= u); + l <= *item && *item <= u + } +} + +impl Bounds { + /// Calculate a common bound (glb, lub) for two bounds. + #[inline] + pub fn common(&self, &Bounds(l2, u2) : &Self) -> Self { + let &Bounds(l1, u1) = self; + debug_assert!(l1 <= u1 && l2 <= u2); + Bounds(l1.min(l2), u1.max(u2)) + } + + #[inline] + pub fn superset(&self, &Bounds(l2, u2) : &Self) -> bool { + let &Bounds(l1, u1) = self; + debug_assert!(l1 <= u1 && l2 <= u2); + l1 <= l2 && u2 <= u1 + } + + // Return the greatest bound contained by both argument bounds, if one exists. + #[inline] + pub fn glb(&self, &Bounds(l2, u2) : &Self) -> Option { + let &Bounds(l1, u1) = self; + debug_assert!(l1 <= u1 && l2 <= u2); + let l = l1.max(l2); + let u = u1.min(u2); + if l < u { + Some(Bounds(l, u)) + } else { + None + } + } +} + +impl Aggregator for Bounds { + #[inline] + fn aggregate(&mut self, aggregates : I) + where I : Iterator { + *self = aggregates.fold(*self, |a, b| a + b); + } + + #[inline] + fn summarise<'a, I>(&'a mut self, mut aggregates : I) + where I : Iterator { + *self = match aggregates.next() { + None => Bounds(F::ZERO, F::ZERO), // No parts in this cube; the function is zero + Some(&bounds) => { + aggregates.fold(bounds, |a, b| a.common(b)) + } + } + } + + #[inline] + fn new() -> Self { + Bounds(F::ZERO, F::ZERO) + } +} diff -r 000000000000 -r 9f27689eb130 src/bisection_tree/bt.rs --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/bisection_tree/bt.rs Sat Oct 22 13:47:15 2022 +0300 @@ -0,0 +1,588 @@ + +use std::slice::{Iter,IterMut}; +use std::iter::once; +use std::rc::Rc; +use serde::{Serialize, Deserialize}; +pub use nalgebra::Const; +use itertools::izip; + +use crate::iter::{MapF,Mappable}; +use crate::types::{Float, Num}; +use crate::coefficients::pow; +use crate::maputil::{ + array_init, + map2, + map2_indexed, + collect_into_array_unchecked +}; +pub use crate::sets::Cube; +pub use crate::loc::Loc; +use super::support::*; +use super::aggregator::*; + +#[derive(Clone,Debug)] +pub enum NodeOption { + // TODO: Could optimise Uninitialised away by simply treat Leaf with an empty Vec as + // something that can be still replaced with Branches. + Uninitialised, + // TODO: replace with QuickVec fast and w/o allocs on single elements. + Leaf(Rc>), + Branches(Rc>), +} + +/// Node of a [`BT`] bisection tree. +#[derive(Clone,Debug)] +pub struct Node { + pub(super) data : NodeOption, + /// Aggregator for `data`. + pub(super) aggregator : A, +} + +/// Branch information of a [`Node`] of a [`BT`] bisection tree. +#[derive(Clone,Debug)] +pub struct Branches { + /// Point for subdivision of the (unstored) [`Cube`] corresponding to the node. + pub(super) branch_at : Loc, + /// Subnodes + pub(super) nodes : [Node; P], +} + +/// Dirty workaround to broken Rust drop, see [https://github.com/rust-lang/rust/issues/58068](). +impl +Drop for Node { + fn drop(&mut self) { + use NodeOption as NO; + + let process = |brc : Rc>, + to_drop : &mut Vec>>| { + // We only drop Branches if we have the only strong reference. + Rc::try_unwrap(brc).ok().map(|branches| branches.nodes.map(|mut node| { + if let NO::Branches(brc2) = std::mem::replace(&mut node.data, NO::Uninitialised) { + to_drop.push(brc2) + } + })); + }; + + // We mark Self as NodeOption::Uninitialised, extracting the real contents. + // If we have subprocess, we need to process them. + if let NO::Branches(brc1) = std::mem::replace(&mut self.data, NO::Uninitialised) { + // We store a queue of Rc to drop into a vector + let mut to_drop = Vec::new(); + process(brc1, &mut to_drop); + + // While there are any Branches in the drop queue vector, we continue the process, + // pushing all internal branching nodes into the queue. + while let Some(brc) = to_drop.pop() { + process(brc, &mut to_drop) + } + } + } +} + +pub trait Depth : 'static + Copy + std::fmt::Debug { + type Lower : Depth; + fn lower(&self) -> Option; + fn lower_or(&self) -> Self::Lower; +} + +#[derive(Copy,Clone,Debug,Serialize,Deserialize)] +pub struct DynamicDepth(pub u8); +impl Depth for DynamicDepth { + type Lower = Self; + #[inline] + fn lower(&self) -> Option { + if self.0>0 { + Some(DynamicDepth(self.0-1)) + } else { + None + } + } + #[inline] + fn lower_or(&self) -> Self { + DynamicDepth(if self.0>0 { self.0 - 1 } else { 0 }) + } +} + +impl Depth for Const<0> { + type Lower = Self; + fn lower(&self) -> Option { None } + fn lower_or(&self) -> Self::Lower { Const } +} + +macro_rules! impl_constdepth { + ($($n:literal)*) => { $( + impl Depth for Const<$n> { + type Lower = Const<{$n-1}>; + fn lower(&self) -> Option { Some(Const) } + fn lower_or(&self) -> Self::Lower { Const } + } + )* }; +} +impl_constdepth!(1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32); + + +pub trait BranchCount {} +macro_rules! impl_branchcount { + ($($n:literal)*) => { $( + impl BranchCount<$n> for Const<{pow(2, $n)}>{} + )* } +} +impl_branchcount!(1 2 3 4 5 6 7 8); + +impl Branches +where Const

: BranchCount, + A : Aggregator +{ + fn get_node_index(&self, x : &Loc) -> usize { + izip!(0..P, x.iter(), self.branch_at.iter()).map(|(i, x_i, branch_i)| + if x_i > branch_i { 1<) -> &Node { + &self.nodes[self.get_node_index(x)] + } +} + + +pub struct BTIter<'a, D> { + iter : Iter<'a, D>, +} + +pub struct SubcubeIter<'b, F : Float, const N : usize, const P : usize> { + domain : &'b Cube, + branch_at : Loc, + index : usize, +} + +#[inline] +fn get_subcube(branch_at : &Loc, domain : &Cube, i : usize) -> Cube { + map2_indexed(branch_at, domain, move |j, &branch, &[start, end]| { + if i & (1 << j) != 0 { + [branch, end] + } else { + [start, branch] + } + }).into() +} + +impl<'a, 'b, F : Float, const N : usize, const P : usize> Iterator +for SubcubeIter<'b, F, N, P> { + type Item = Cube; + #[inline] + fn next(&mut self) -> Option { + if self.index < P { + let i = self.index; + self.index += 1; + Some(get_subcube(&self.branch_at, self.domain, i)) + } else { + None + } + } +} + +impl +Branches +where Const

: BranchCount, + A : Aggregator { + + pub fn new_with>( + domain : &Cube, + support : &S + ) -> Self { + let hint = support.bisection_hint(domain); + let branch_at = map2(&hint, domain, |h, r| { + h.unwrap_or_else(|| (r[0]+r[1])/F::TWO).max(r[0]).min(r[1]) + }).into(); + Branches{ + branch_at : branch_at, + nodes : array_init(|| Node::new()), + } + } + + /// Get an iterator over the aggregators of the nodes of this branch head. + #[inline] + pub fn aggregators(&self) -> MapF>, &'_ A> { + self.nodes.iter().mapF(Node::get_aggregator) + } + + #[inline] + pub fn iter_subcubes<'b>(&self, domain : &'b Cube) + -> SubcubeIter<'b, F, N, P> { + SubcubeIter { + domain : domain, + branch_at : self.branch_at, + index : 0, + } + } + + /// Iterate over all nodes and corresponding subcubes of self. + #[inline] + pub fn nodes_and_cubes<'a, 'b>(&'a self, domain : &'b Cube) + -> std::iter::Zip>, SubcubeIter<'b, F, N, P>> { + self.nodes.iter().zip(self.iter_subcubes(domain)) + } + + /// Mutably iterate over all nodes and corresponding subcubes of self. + #[inline] + pub fn nodes_and_cubes_mut<'a, 'b>(&'a mut self, domain : &'b Cube) + -> std::iter::Zip>, SubcubeIter<'b, F, N, P>> { + let subcube_iter = self.iter_subcubes(domain); + self.nodes.iter_mut().zip(subcube_iter) + } + + /// Insert data into the branch. + pub fn insert>( + &mut self, + domain : &Cube, + d : D, + new_leaf_depth : M, + support : &S + ) { + let support_hint = support.support_hint(); + for (node, subcube) in self.nodes_and_cubes_mut(&domain) { + if support_hint.intersects(&subcube) { + node.insert( + &subcube, + d, + new_leaf_depth, + support + ); + } + } + } + + /// Construct a new instance for a different aggregator + pub fn convert_aggregator( + self, + generator : &G, + domain : &Cube + ) -> Branches + where ANew : Aggregator, + G : SupportGenerator, + G::SupportType : LocalAnalysis { + let branch_at = self.branch_at; + let subcube_iter = self.iter_subcubes(domain); + let new_nodes = self.nodes.into_iter().zip(subcube_iter).map(|(node, subcube)| { + // TODO: avoid clone + node.convert_aggregator(generator, &subcube) + }); + Branches { + branch_at : branch_at, + nodes : collect_into_array_unchecked(new_nodes), + } + } + + /// Recalculate aggregator after changes to generator + pub fn refresh_aggregator( + &mut self, + generator : &G, + domain : &Cube + ) where G : SupportGenerator, + G::SupportType : LocalAnalysis { + for (node, subcube) in self.nodes_and_cubes_mut(domain) { + node.refresh_aggregator(generator, &subcube) + } + } +} + +impl +Node +where Const

: BranchCount, + A : Aggregator { + + #[inline] + pub fn new() -> Self { + Node { + data : NodeOption::Uninitialised, + aggregator : A::new(), + } + } + + /// Get leaf data + #[inline] + pub fn get_leaf_data(&self, x : &Loc) -> Option<&Vec> { + match self.data { + NodeOption::Uninitialised => None, + NodeOption::Leaf(ref data) => Some(data), + NodeOption::Branches(ref b) => b.get_node(x).get_leaf_data(x), + } + } + + /// Returns a reference to the aggregator of this node + #[inline] + pub fn get_aggregator(&self) -> &A { + &self.aggregator + } + + /// Insert `d` into the tree. If an `Incomplete` node is encountered, a new + /// leaf is created at a minimum depth of `new_leaf_depth` + pub fn insert>( + &mut self, + domain : &Cube, + d : D, + new_leaf_depth : M, + support : &S + ) { + match &mut self.data { + NodeOption::Uninitialised => { + // Replace uninitialised node with a leaf or a branch + self.data = match new_leaf_depth.lower() { + None => { + let a = support.local_analysis(&domain); + self.aggregator.aggregate(once(a)); + // TODO: this is currently a dirty hard-coded heuristic; + // should add capacity as a parameter + let mut vec = Vec::with_capacity(2*P+1); + vec.push(d); + NodeOption::Leaf(Rc::new(vec)) + }, + Some(lower) => { + let b = Rc::new({ + let mut b0 = Branches::new_with(domain, support); + b0.insert(domain, d, lower, support); + b0 + }); + self.aggregator.summarise(b.aggregators()); + NodeOption::Branches(b) + } + } + }, + NodeOption::Leaf(leaf) => { + Rc::make_mut(leaf).push(d); + let a = support.local_analysis(&domain); + self.aggregator.aggregate(once(a)); + }, + NodeOption::Branches(b) => { + Rc::make_mut(b).insert(domain, d, new_leaf_depth.lower_or(), support); + self.aggregator.summarise(b.aggregators()); + }, + } + } + + /// Construct a new instance for a different aggregator + pub fn convert_aggregator( + mut self, + generator : &G, + domain : &Cube + ) -> Node + where ANew : Aggregator, + G : SupportGenerator, + G::SupportType : LocalAnalysis { + + // The mem::replace is needed due to the [`Drop`] implementation to extract self.data. + match std::mem::replace(&mut self.data, NodeOption::Uninitialised) { + NodeOption::Uninitialised => Node { + data : NodeOption::Uninitialised, + aggregator : ANew::new(), + }, + NodeOption::Leaf(v) => { + let mut anew = ANew::new(); + anew.aggregate(v.iter().map(|d| { + let support = generator.support_for(*d); + support.local_analysis(&domain) + })); + + Node { + data : NodeOption::Leaf(v), + aggregator : anew, + } + }, + NodeOption::Branches(b) => { + // TODO: now with Rc, convert_aggregator should be reference-based. + let bnew = Rc::new(Rc::unwrap_or_clone(b).convert_aggregator(generator, domain)); + let mut anew = ANew::new(); + anew.summarise(bnew.aggregators()); + Node { + data : NodeOption::Branches(bnew), + aggregator : anew, + } + } + } + } + + /// Refresh aggregator after changes to generator + pub fn refresh_aggregator( + &mut self, + generator : &G, + domain : &Cube + ) where G : SupportGenerator, + G::SupportType : LocalAnalysis { + match &mut self.data { + NodeOption::Uninitialised => { }, + NodeOption::Leaf(v) => { + self.aggregator = A::new(); + self.aggregator.aggregate(v.iter().map(|d| { + generator.support_for(*d) + .local_analysis(&domain) + })); + }, + NodeOption::Branches(ref mut b) => { + // TODO: now with Rc, convert_aggregator should be reference-based. + Rc::make_mut(b).refresh_aggregator(generator, domain); + self.aggregator.summarise(b.aggregators()); + } + } + } +} + +impl<'a, D> Iterator for BTIter<'a,D> { + type Item = &'a D; + #[inline] + fn next(&mut self) -> Option<&'a D> { + self.iter.next() + } +} + + +// +// BT +// + +/// Internal structure to hide the `const P : usize` parameter of [`Node`] until +/// const generics are flexible enough to fix `P=pow(2, N)`. +pub trait BTNode +where F : Float, + D : 'static + Copy, + A : Aggregator { + type Node : Clone + std::fmt::Debug; +} + +#[derive(Debug)] +pub struct BTNodeLookup; + +/// Interface to a [`BT`] bisection tree. +pub trait BTImpl : std::fmt::Debug + Clone + GlobalAnalysis { + type Data : 'static + Copy; + type Depth : Depth; + type Agg : Aggregator; + type Converted : BTImpl where ANew : Aggregator; + + /// Insert d into the `BisectionTree`. + fn insert>( + &mut self, + d : Self::Data, + support : &S + ); + + /// Construct a new instance for a different aggregator + fn convert_aggregator(self, generator : &G) + -> Self::Converted + where ANew : Aggregator, + G : SupportGenerator, + G::SupportType : LocalAnalysis; + + + /// Refresh aggregator after changes to generator + fn refresh_aggregator(&mut self, generator : &G) + where G : SupportGenerator, + G::SupportType : LocalAnalysis; + + /// Iterarate items at x + fn iter_at<'a>(&'a self, x : &'a Loc) -> BTIter<'a, Self::Data>; + + /// Create a new instance + fn new(domain : Cube, depth : Self::Depth) -> Self; +} + +/// The main bisection tree structure. The interface operations are via [`BTImpl`] +/// to hide the `const P : usize` parameter until const generics are flexible enough +/// to fix `P=pow(2, N)`. +#[derive(Clone,Debug)] +pub struct BT< + M : Depth, + F : Float, + D : 'static + Copy, + A : Aggregator, + const N : usize, +> where BTNodeLookup : BTNode { + pub(super) depth : M, + pub(super) domain : Cube, + pub(super) topnode : >::Node, +} + +macro_rules! impl_bt { + ($($n:literal)*) => { $( + impl BTNode for BTNodeLookup + where F : Float, + D : 'static + Copy + std::fmt::Debug, + A : Aggregator { + type Node = Node; + } + + impl BTImpl for BT + where M : Depth, + F : Float, + D : 'static + Copy + std::fmt::Debug, + A : Aggregator { + type Data = D; + type Depth = M; + type Agg = A; + type Converted = BT where ANew : Aggregator; + + /// Insert `d` into the tree. + fn insert>( + &mut self, + d : D, + support : &S + ) { + self.topnode.insert( + &self.domain, + d, + self.depth, + support + ); + } + + /// Construct a new instance for a different aggregator + fn convert_aggregator(self, generator : &G) -> Self::Converted + where ANew : Aggregator, + G : SupportGenerator, + G::SupportType : LocalAnalysis { + let topnode = self.topnode.convert_aggregator(generator, &self.domain); + BT { + depth : self.depth, + domain : self.domain, + topnode + } + } + + /// Refresh aggregator after changes to generator + fn refresh_aggregator(&mut self, generator : &G) + where G : SupportGenerator, + G::SupportType : LocalAnalysis { + self.topnode.refresh_aggregator(generator, &self.domain); + } + + /// Iterate elements at `x`. + fn iter_at<'a>(&'a self, x : &'a Loc) -> BTIter<'a,D> { + match self.topnode.get_leaf_data(x) { + Some(data) => BTIter { iter : data.iter() }, + None => BTIter { iter : [].iter() } + } + } + + fn new(domain : Cube, depth : M) -> Self { + BT { + depth : depth, + domain : domain, + topnode : Node::new(), + } + } + } + + impl GlobalAnalysis for BT + where M : Depth, + F : Float, + D : 'static + Copy + std::fmt::Debug, + A : Aggregator { + fn global_analysis(&self) -> A { + self.topnode.get_aggregator().clone() + } + } + )* } +} + +impl_bt!(1 2 3 4); + diff -r 000000000000 -r 9f27689eb130 src/bisection_tree/btfn.rs --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/bisection_tree/btfn.rs Sat Oct 22 13:47:15 2022 +0300 @@ -0,0 +1,689 @@ + +use numeric_literals::replace_float_literals; +use std::iter::Sum; +use std::marker::PhantomData; +pub use nalgebra::Const; +use crate::types::Float; +use crate::mapping::Mapping; +use crate::linops::Linear; +use crate::sets::Set; +pub use crate::sets::Cube; +pub use crate::loc::Loc; +use super::support::*; +use super::bt::*; +use super::refine::*; +use super::aggregator::*; +use super::either::*; +use crate::fe_model::base::RealLocalModel; +use crate::fe_model::p2_local_model::*; + +/// Presentation for functions constructed as a sum of components with limited support. +/// Lookup of relevant components at a specific point of a [`Loc`] domain is done +/// with a bisection tree (BT). We do notn impose that the `BT` parameter would be an in +/// instance [`BTImpl`] to allow performance improvements via “pre-BTFNs” that do not +/// construct a BT until being added to another function. Addition and subtraction always +/// use a copy-on-write clone of the BT of the left-hand-side operand, discarding +/// the BT of the right-hand-side operand. +#[derive(Clone,Debug)] +pub struct BTFN< + F : Float, + G : SupportGenerator, + BT /*: BTImpl*/, + const N : usize +> /*where G::SupportType : LocalAnalysis*/ { + bt : BT, + generator : G, + _phantoms : PhantomData, +} + +impl +BTFN +where G : SupportGenerator, + G::SupportType : LocalAnalysis, + BT : BTImpl { + + /// Returns the [`Support`] corresponding to the identifier `d`. + /// Simply wraps [`SupportGenerator::support_for`]. + #[inline] + fn support_for(&self, d : G::Id) -> G::SupportType { + self.generator.support_for(d) + } + + /// Create a new BTFN. The bisection tree `bt` should be pre-initialised to + /// correspond to the `generator`. Use [`Self::construct`] if no preinitialised tree + /// is available. Use [`Self::new_refresh`] when the aggregator may need updates. + pub fn new(bt : BT, generator : G) -> Self { + BTFN { + bt : bt, + generator : generator, + _phantoms : std::marker::PhantomData, + } + } + + /// Create a new BTFN. The bisection tree `bt` should be pre-initialised to + /// correspond to the `generator`, but the aggregator may be out of date. + pub fn new_refresh(bt : &BT, generator : G) -> Self { + // clone().refresh_aggregator(…) as opposed to convert_aggregator + // ensures that type is maintained. Due to Rc-pointer copy-on-write, + // the effort is not significantly different. + let mut btnew = bt.clone(); + btnew.refresh_aggregator(&generator); + BTFN::new(btnew, generator) + } + + /// Create a new BTFN. Unlike [`Self::new`], this constructs the bisection tree based on + /// the generator. + pub fn construct(domain : Cube, depth : BT::Depth, generator : G) -> Self { + let mut bt = BT::new(domain, depth); + for (d, support) in generator.all_data() { + bt.insert(d, &support); + } + Self::new(bt, generator) + } + + /// Construct a new instance for a different aggregator + pub fn convert_aggregator(self) -> BTFN, N> + where ANew : Aggregator, + G : SupportGenerator, + G::SupportType : LocalAnalysis { + BTFN::new(self.bt.convert_aggregator(&self.generator), self.generator) + } + + /// Change the [`BTImpl`]. + pub fn instantiate< + BTNew : BTImpl, + > (self, domain : Cube, depth : BTNew::Depth) -> BTFN + where G::SupportType : LocalAnalysis { + BTFN::construct(domain, depth, self.generator) + } + + /// Change the generator (after, e.g., a scaling of the latter). + pub fn new_generator(&self, generator : G) -> Self { + BTFN::new_refresh(&self.bt, generator) + } + + /// Refresh aggregator after updates to generator + fn refresh_aggregator(&mut self) { + self.bt.refresh_aggregator(&self.generator); + } + +} + +/// A “pre-BTFN” with no bisection tree. +pub type PreBTFN = BTFN; + +impl PreBTFN where G : SupportGenerator { + + /// Create a new “pre-BTFN” with no bisection tree. This can feasibly only be + /// multiplied by a scalar or added to or subtracted from a proper [`BTFN`]. + pub fn new_pre(generator : G) -> Self { + BTFN { + bt : (), + generator : generator, + _phantoms : std::marker::PhantomData, + } + } +} + +impl +BTFN +where G : SupportGenerator, + G::SupportType : LocalAnalysis, + BT : BTImpl { + + /// Helper function for implementing [`std::ops::Add`]. + fn add_another(self, other : G2) -> BTFN, BT, N> + where G2 : SupportGenerator, + G2::SupportType : LocalAnalysis { + + let mut bt = self.bt; + let both = BothGenerators(self.generator, other); + + for (d, support) in both.all_right_data() { + bt.insert(d, &support); + } + + BTFN { + bt : bt, + generator : both, + _phantoms : std::marker::PhantomData, + } + } +} + +macro_rules! make_btfn_add { + ($lhs:ty, $preprocess:path, $($extra_trait:ident)?) => { + impl<'a, F : Float, G1, G2, BT1, BT2, const N : usize> + std::ops::Add> for + $lhs + where BT1 : BTImpl, + G1 : SupportGenerator + $($extra_trait)?, + G2 : SupportGenerator, + G1::SupportType : LocalAnalysis, + G2::SupportType : LocalAnalysis { + type Output = BTFN, BT1, N>; + #[inline] + fn add(self, other : BTFN) -> Self::Output { + $preprocess(self).add_another(other.generator) + } + } + + impl<'a, 'b, F : Float, G1, G2, BT1, BT2, const N : usize> + std::ops::Add<&'b BTFN> for + $lhs + where BT1 : BTImpl, + G1 : SupportGenerator + $($extra_trait)?, + G2 : SupportGenerator + Clone, + G1::SupportType : LocalAnalysis, + G2::SupportType : LocalAnalysis { + + type Output = BTFN, BT1, N>; + #[inline] + fn add(self, other : &'b BTFN) -> Self::Output { + $preprocess(self).add_another(other.generator.clone()) + } + } + } +} + +make_btfn_add!(BTFN, std::convert::identity, ); +make_btfn_add!(&'a BTFN, Clone::clone, Clone); + +macro_rules! make_btfn_sub { + ($lhs:ty, $preprocess:path, $($extra_trait:ident)?) => { + impl<'a, F : Float, G1, G2, BT1, BT2, const N : usize> + std::ops::Sub> for + $lhs + where BT1 : BTImpl, + G1 : SupportGenerator + $($extra_trait)?, + G2 : SupportGenerator, + G1::SupportType : LocalAnalysis, + G2::SupportType : LocalAnalysis { + type Output = BTFN, BT1, N>; + #[inline] + fn sub(self, other : BTFN) -> Self::Output { + $preprocess(self).add_another(other.generator.neg()) + } + } + + impl<'a, 'b, F : Float, G1, G2, BT1, BT2, const N : usize> + std::ops::Sub<&'b BTFN> for + $lhs + where BT1 : BTImpl, + G1 : SupportGenerator + $($extra_trait)?, + G2 : SupportGenerator + Clone, + G1::SupportType : LocalAnalysis, + G2::SupportType : LocalAnalysis, + /*&'b G2 : std::ops::Neg*/ { + + type Output = BTFN, BT1, N>; + #[inline] + fn sub(self, other : &'b BTFN) -> Self::Output { + // FIXME: the compiler crashes in an overflow on this. + //$preprocess(self).add_another(std::ops::Neg::neg(&other.generator)) + $preprocess(self).add_another(other.generator.clone().neg()) + } + } + } +} + +make_btfn_sub!(BTFN, std::convert::identity, ); +make_btfn_sub!(&'a BTFN, Clone::clone, Clone); + +macro_rules! make_btfn_scalarop_rhs { + ($trait:ident, $fn:ident, $trait_assign:ident, $fn_assign:ident) => { + impl + std::ops::$trait_assign + for BTFN + where BT : BTImpl, + G : SupportGenerator, + G::SupportType : LocalAnalysis { + #[inline] + fn $fn_assign(&mut self, t : F) { + self.generator.$fn_assign(t); + self.refresh_aggregator(); + } + } + + impl + std::ops::$trait + for BTFN + where BT : BTImpl, + G : SupportGenerator, + G::SupportType : LocalAnalysis { + type Output = Self; + #[inline] + fn $fn(mut self, t : F) -> Self::Output { + self.generator.$fn_assign(t); + self.refresh_aggregator(); + self + } + } + + impl<'a, F : Float, G, BT, const N : usize> + std::ops::$trait + for &'a BTFN + where BT : BTImpl, + G : SupportGenerator, + G::SupportType : LocalAnalysis, + &'a G : std::ops::$trait { + type Output = BTFN; + #[inline] + fn $fn(self, t : F) -> Self::Output { + self.new_generator(self.generator.$fn(t)) + } + } + } +} + +make_btfn_scalarop_rhs!(Mul, mul, MulAssign, mul_assign); +make_btfn_scalarop_rhs!(Div, div, DivAssign, div_assign); + +macro_rules! make_btfn_scalarop_lhs { + ($trait:ident, $fn:ident, $fn_assign:ident, $($f:ident)+) => { $( + impl + std::ops::$trait> + for $f + where BT : BTImpl<$f, N>, + G : SupportGenerator<$f, N, Id=BT::Data>, + G::SupportType : LocalAnalysis<$f, BT::Agg, N> { + type Output = BTFN<$f, G, BT, N>; + #[inline] + fn $fn(self, mut a : BTFN<$f, G, BT, N>) -> Self::Output { + a.generator.$fn_assign(self); + a.refresh_aggregator(); + a + } + } + + impl<'a, G, BT, const N : usize> + std::ops::$trait<&'a BTFN<$f, G, BT, N>> + for $f + where BT : BTImpl<$f, N>, + G : SupportGenerator<$f, N, Id=BT::Data> + Clone, + G::SupportType : LocalAnalysis<$f, BT::Agg, N>, + // FIXME: This causes compiler overflow + /*&'a G : std::ops::$trait<$f,Output=G>*/ { + type Output = BTFN<$f, G, BT, N>; + #[inline] + fn $fn(self, a : &'a BTFN<$f, G, BT, N>) -> Self::Output { + let mut tmp = a.generator.clone(); + tmp.$fn_assign(self); + a.new_generator(tmp) + // FIXME: Prevented by the compiler overflow above. + //a.new_generator(a.generator.$fn(a)) + } + } + )+ } +} + +make_btfn_scalarop_lhs!(Mul, mul, mul_assign, f32 f64); +make_btfn_scalarop_lhs!(Div, div, div_assign, f32 f64); + +macro_rules! make_btfn_unaryop { + ($trait:ident, $fn:ident) => { + impl + std::ops::$trait + for BTFN + where BT : BTImpl, + G : SupportGenerator, + G::SupportType : LocalAnalysis { + type Output = Self; + #[inline] + fn $fn(mut self) -> Self::Output { + self.generator = self.generator.$fn(); + self.refresh_aggregator(); + self + } + } + + /*impl<'a, F : Float, G, BT, const N : usize> + std::ops::$trait + for &'a BTFN + where BT : BTImpl, + G : SupportGenerator, + G::SupportType : LocalAnalysis, + &'a G : std::ops::$trait { + type Output = BTFN; + #[inline] + fn $fn(self) -> Self::Output { + self.new_generator(std::ops::$trait::$fn(&self.generator)) + } + }*/ + } +} + +make_btfn_unaryop!(Neg, neg); + + + +// +// Mapping +// + +impl<'a, F : Float, G, BT, V, const N : usize> Mapping<&'a Loc> +for BTFN +where BT : BTImpl, + G : SupportGenerator, + G::SupportType : LocalAnalysis + Mapping<&'a Loc, Codomain = V>, + V : Sum { + + type Codomain = V; + + fn value(&self, x : &'a Loc) -> Self::Codomain { + self.bt.iter_at(x).map(|&d| self.support_for(d).value(x)).sum() + } +} + +impl Mapping> +for BTFN +where BT : BTImpl, + G : SupportGenerator, + G::SupportType : LocalAnalysis + Mapping, Codomain = V>, + V : Sum { + + type Codomain = V; + + fn value(&self, x : Loc) -> Self::Codomain { + self.bt.iter_at(&x).map(|&d| self.support_for(d).value(x)).sum() + } +} + +impl GlobalAnalysis +for BTFN +where BT : BTImpl, + G : SupportGenerator, + G::SupportType : LocalAnalysis { + + #[inline] + fn global_analysis(&self) -> BT::Agg { + self.bt.global_analysis() + } +} + +// +// Blanket implementation of BTFN as a linear functional over objects +// that are linear functionals over BTFN. +// + +impl Linear +for BTFN +where BT : BTImpl, + G : SupportGenerator, + G::SupportType : LocalAnalysis, + X : Linear, Codomain=F> { + type Codomain = F; + + #[inline] + fn apply(&self, x : &X) -> F { + x.apply(self) + } +} + +/// Helper trait for performing approximate minimisation using P2 elements. +pub trait P2Minimise : Set { + fn p2_minimise F>(&self, g : G) -> (U, F); + +} + +impl P2Minimise, F> for Cube { + fn p2_minimise) -> F>(&self, g : G) -> (Loc, F) { + let interval = Simplex(self.corners()); + interval.p2_model(&g).minimise(&interval) + } +} + +#[replace_float_literals(F::cast_from(literal))] +impl P2Minimise, F> for Cube { + fn p2_minimise) -> F>(&self, g : G) -> (Loc, F) { + if false { + // Split into two triangle (simplex) with separate P2 model in each. + // The six nodes of each triangle are the corners and the edges. + let [a, b, c, d] = self.corners(); + let [va, vb, vc, vd] = [g(&a), g(&b), g(&c), g(&d)]; + + let ab = midpoint(&a, &b); + let bc = midpoint(&b, &c); + let ca = midpoint(&c, &a); + let cd = midpoint(&c, &d); + let da = midpoint(&d, &a); + let [vab, vbc, vca, vcd, vda] = [g(&ab), g(&bc), g(&ca), g(&cd), g(&da)]; + + let s1 = Simplex([a, b, c]); + let m1 = P2LocalModel::::new( + &[a, b, c, ab, bc, ca], + &[va, vb, vc, vab, vbc, vca] + ); + + let r1@(_, v1) = m1.minimise(&s1); + + let s2 = Simplex([c, d, a]); + let m2 = P2LocalModel::::new( + &[c, d, a, cd, da, ca], + &[vc, vd, va, vcd, vda, vca] + ); + + let r2@(_, v2) = m2.minimise(&s2); + + if v1 < v2 { r1 } else { r2 } + } else { + // Single P2 model for the entire cube. + let [a, b, c, d] = self.corners(); + let [va, vb, vc, vd] = [g(&a), g(&b), g(&c), g(&d)]; + let [e, f] = match 'r' { + 'm' => [(&a + &b + &c) / 3.0, (&c + &d + &a) / 3.0], + 'c' => [midpoint(&a, &b), midpoint(&a, &d)], + 'w' => [(&a + &b * 2.0) / 3.0, (&a + &d * 2.0) / 3.0], + 'r' => { + // Pseudo-randomise edge midpoints + let Loc([x, y]) = a; + let tmp : f64 = (x+y).as_(); + match tmp.to_bits() % 4 { + 0 => [midpoint(&a, &b), midpoint(&a, &d)], + 1 => [midpoint(&c, &d), midpoint(&a, &d)], + 2 => [midpoint(&a, &b), midpoint(&b, &c)], + _ => [midpoint(&c, &d), midpoint(&b, &c)], + } + }, + _ => [self.center(), (&a + &b) / 2.0], + }; + let [ve, vf] = [g(&e), g(&f)]; + + let m1 = P2LocalModel::::new( + &[a, b, c, d, e, f], + &[va, vb, vc, vd, ve, vf], + ); + + m1.minimise(self) + } + } +} + +struct RefineMax; +struct RefineMin; + +/// TODO: update doc. +/// Simple refiner that keeps the difference between upper and lower bounds +/// of [`Bounded`] functions within $ε$. The functions have to also be continuous +/// for this refiner to ever succeed. +/// The type parameter `T` should be either [`RefineMax`] or [`RefineMin`]. +struct P2Refiner { + bound : Option, + tolerance : F, + max_steps : usize, + #[allow(dead_code)] // `how` is just for type system purposes. + how : T, +} + +impl Refiner, G, N> +for P2Refiner +where Cube : P2Minimise, F>, + G : SupportGenerator, + G::SupportType : for<'a> Mapping<&'a Loc,Codomain=F> + + LocalAnalysis, N> { + type Result = Option<(Loc, F)>; + type Sorting = UpperBoundSorting; + + fn refine( + &self, + aggregator : &Bounds, + cube : &Cube, + data : &Vec, + generator : &G, + step : usize + ) -> RefinerResult, Self::Result> { + // g gives the negative of the value of the function presented by `data` and `generator`. + let g = move |x : &Loc| { + let f = move |&d| generator.support_for(d).value(x); + -data.iter().map(f).sum::() + }; + // … so the negative of the minimum is the maximm we want. + let (x, neg_v) = cube.p2_minimise(g); + let v = -neg_v; + + if self.bound.map_or(false, |b| aggregator.upper() <= b + self.tolerance) { + // The upper bound is below the maximisation threshold. Don't bother with this cube. + RefinerResult::Uncertain(*aggregator, None) + } else if step < self.max_steps && (aggregator.upper() - v).abs() > self.tolerance { + // The function isn't refined enough in `cube`, so return None + // to indicate that further subdivision is required. + RefinerResult::NeedRefinement + } else { + // The data is refined enough, so return new hopefully better bounds + // and the maximiser. + let res = (x, v); + let bounds = Bounds(aggregator.lower(), v); + RefinerResult::Uncertain(bounds, Some(res)) + } + } +} + + +impl Refiner, G, N> +for P2Refiner +where Cube : P2Minimise, F>, + G : SupportGenerator, + G::SupportType : for<'a> Mapping<&'a Loc,Codomain=F> + + LocalAnalysis, N> { + type Result = Option<(Loc, F)>; + type Sorting = LowerBoundSorting; + + fn refine( + &self, + aggregator : &Bounds, + cube : &Cube, + data : &Vec, + generator : &G, + step : usize + ) -> RefinerResult, Self::Result> { + // g gives the value of the function presented by `data` and `generator`. + let g = move |x : &Loc| { + let f = move |&d| generator.support_for(d).value(x); + data.iter().map(f).sum::() + }; + // Minimise it. + let (x, v) = cube.p2_minimise(g); + + if self.bound.map_or(false, |b| aggregator.lower() >= b - self.tolerance) { + // The lower bound is above the minimisation threshold. Don't bother with this cube. + RefinerResult::Uncertain(*aggregator, None) + } else if step < self.max_steps && (aggregator.lower() - v).abs() > self.tolerance { + // The function isn't refined enough in `cube`, so return None + // to indicate that further subdivision is required. + RefinerResult::NeedRefinement + } else { + // The data is refined enough, so return new hopefully better bounds + // and the minimiser. + let res = (x, v); + let bounds = Bounds(v, aggregator.upper()); + RefinerResult::Uncertain(bounds, Some(res)) + } + } +} + + + +struct BoundRefiner { + bound : F, + tolerance : F, + max_steps : usize, + #[allow(dead_code)] // `how` is just for type system purposes. + how : T, +} + +impl Refiner, G, N> +for BoundRefiner +where G : SupportGenerator { + type Result = bool; + type Sorting = UpperBoundSorting; + + fn refine( + &self, + aggregator : &Bounds, + _cube : &Cube, + _data : &Vec, + _generator : &G, + step : usize + ) -> RefinerResult, Self::Result> { + if aggregator.upper() <= self.bound + self.tolerance { + // Below upper bound within tolerances. Indicate uncertain success. + RefinerResult::Uncertain(*aggregator, true) + } else if aggregator.lower() >= self.bound - self.tolerance { + // Above upper bound within tolerances. Indicate certain failure. + RefinerResult::Certain(false) + } else if step < self.max_steps { + // No decision possible, but within step bounds - further subdivision is required. + RefinerResult::NeedRefinement + } else { + // No decision possible, but past step bounds + RefinerResult::Uncertain(*aggregator, false) + } + } +} + + +impl BTFN +where BT : BTSearch>, + G : SupportGenerator, + G::SupportType : for<'a> Mapping<&'a Loc,Codomain=F> + + LocalAnalysis, N>, + Cube : P2Minimise, F> { + + /// Try to maximise the BTFN within stated value `tolerance`, taking at most `max_steps` + /// refinement steps. Returns the maximiser and the maximum value. + pub fn maximise(&mut self, tolerance : F, max_steps : usize) -> (Loc, F) { + let refiner = P2Refiner{ tolerance, max_steps, how : RefineMax, bound : None }; + self.bt.search_and_refine(&refiner, &self.generator).expect("Refiner failure.").unwrap() + } + + /// Try to maximise the BTFN within stated value `tolerance`, taking at most `max_steps` + /// refinement steps. Returns the maximiser and the maximum value when one is found above + /// the `bound` threshold, otherwise `None`. + pub fn maximise_above(&mut self, bound : F, tolerance : F, max_steps : usize) + -> Option<(Loc, F)> { + let refiner = P2Refiner{ tolerance, max_steps, how : RefineMax, bound : Some(bound) }; + self.bt.search_and_refine(&refiner, &self.generator).expect("Refiner failure.") + } + + /// Try to minimise the BTFN within stated value `tolerance`, taking at most `max_steps` + /// refinement steps. Returns the minimiser and the minimum value. + pub fn minimise(&mut self, tolerance : F, max_steps : usize) -> (Loc, F) { + let refiner = P2Refiner{ tolerance, max_steps, how : RefineMin, bound : None }; + self.bt.search_and_refine(&refiner, &self.generator).expect("Refiner failure.").unwrap() + } + + /// Try to minimise the BTFN within stated value `tolerance`, taking at most `max_steps` + /// refinement steps. Returns the minimiser and the minimum value when one is found below + /// the `bound` threshold, otherwise `None`. + pub fn minimise_below(&mut self, bound : F, tolerance : F, max_steps : usize) + -> Option<(Loc, F)> { + let refiner = P2Refiner{ tolerance, max_steps, how : RefineMin, bound : Some(bound) }; + self.bt.search_and_refine(&refiner, &self.generator).expect("Refiner failure.") + } + /// Verify that the BTFN is has stated upper `bound` within stated `tolerance, taking at most + /// `max_steps` refinment steps. + pub fn has_upper_bound(&mut self, bound : F, tolerance : F, max_steps : usize) -> bool { + let refiner = BoundRefiner{ bound, tolerance, max_steps, how : RefineMax }; + self.bt.search_and_refine(&refiner, &self.generator).expect("Refiner failure.") + } +} diff -r 000000000000 -r 9f27689eb130 src/bisection_tree/either.rs --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/bisection_tree/either.rs Sat Oct 22 13:47:15 2022 +0300 @@ -0,0 +1,225 @@ + +use crate::types::*; +use super::support::*; +use crate::mapping::Mapping; +use std::iter::Chain; +use crate::iter::{Mappable,MapF,MapZ}; +use super::aggregator::*; + +// TODO: try to remove F and N +#[derive(Debug,Clone)] +pub struct BothGenerators( + pub(super) A, + pub(super) B, +); + +#[derive(Debug,Clone)] +pub enum EitherSupport { + Left(A), + Right(B), +} + +// We need type alias bounds to access associate types. +#[allow(type_alias_bounds)] +type BothAllDataIter< + 'a, F, + G1 : SupportGenerator, + G2 : SupportGenerator, + const N : usize +> = Chain< + MapF, (usize, EitherSupport)>, + MapZ, usize, (usize, EitherSupport)>, +>; + +impl BothGenerators { + /// Helper for [`all_data`]. + #[inline] + fn map_left((d, support) : (G1::Id, G1::SupportType)) + -> (usize, EitherSupport) + where G1 : SupportGenerator, + G2 : SupportGenerator { + + let id : usize = d.into(); + (id.into(), EitherSupport::Left(support)) + } + + /// Helper for [`all_data`]. + #[inline] + fn map_right(n0 : &usize, (d, support) : (G2::Id, G2::SupportType)) + -> (usize, EitherSupport) + where G1 : SupportGenerator, + G2 : SupportGenerator { + + let id : usize = d.into(); + ((n0+id).into(), EitherSupport::Right(support)) + } + + #[inline] + pub(super) fn all_left_data(&self) + -> MapF, (usize, EitherSupport)> + where G1 : SupportGenerator, + G2 : SupportGenerator { + self.0.all_data().mapF(Self::map_left) + } + + #[inline] + pub(super) fn all_right_data(&self) + -> MapZ, usize, (usize, EitherSupport)> + where G1 : SupportGenerator, + G2 : SupportGenerator { + let n0 = self.0.support_count(); + self.1.all_data().mapZ(n0, Self::map_right) + } +} + +impl +SupportGenerator +for BothGenerators +where G1 : SupportGenerator, + G2 : SupportGenerator { + + type Id = usize; + type SupportType = EitherSupport; + type AllDataIter<'a> = BothAllDataIter<'a, F, G1, G2, N> where G1 : 'a, G2 : 'a; + + #[inline] + fn support_for(&self, id : Self::Id) + -> Self::SupportType { + let n0 = self.0.support_count(); + if id < n0 { + EitherSupport::Left(self.0.support_for(id.into())) + } else { + EitherSupport::Right(self.1.support_for((id-n0).into())) + } + } + + #[inline] + fn support_count(&self) -> usize { + self.0.support_count() + self.1.support_count() + } + + #[inline] + fn all_data(&self) -> Self::AllDataIter<'_> { + self.all_left_data().chain(self.all_right_data()) + } +} + +impl Support for EitherSupport +where S1 : Support, + S2 : Support { + + #[inline] + fn support_hint(&self) -> Cube { + match self { + EitherSupport::Left(ref a) => a.support_hint(), + EitherSupport::Right(ref b) => b.support_hint(), + } + } + + #[inline] + fn in_support(&self, x : &Loc) -> bool { + match self { + EitherSupport::Left(ref a) => a.in_support(x), + EitherSupport::Right(ref b) => b.in_support(x), + } + } + + #[inline] + fn bisection_hint(&self, cube : &Cube) -> [Option; N] { + match self { + EitherSupport::Left(ref a) => a.bisection_hint(cube), + EitherSupport::Right(ref b) => b.bisection_hint(cube), + } + } +} + +impl LocalAnalysis for EitherSupport +where A : Aggregator, + S1 : LocalAnalysis, + S2 : LocalAnalysis, { + + #[inline] + fn local_analysis(&self, cube : &Cube) -> A { + match self { + EitherSupport::Left(ref a) => a.local_analysis(cube), + EitherSupport::Right(ref b) => b.local_analysis(cube), + } + } +} + +impl GlobalAnalysis for EitherSupport +where A : Aggregator, + S1 : GlobalAnalysis, + S2 : GlobalAnalysis, { + + #[inline] + fn global_analysis(&self) -> A { + match self { + EitherSupport::Left(ref a) => a.global_analysis(), + EitherSupport::Right(ref b) => b.global_analysis(), + } + } +} + +impl Mapping for EitherSupport +where S1 : Mapping, + S2 : Mapping { + type Codomain = F; + #[inline] + fn value(&self, x : X) -> F { + match self { + EitherSupport::Left(ref a) => a.value(x), + EitherSupport::Right(ref b) => b.value(x), + } + } +} + +macro_rules! make_either_scalarop_rhs { + ($trait:ident, $fn:ident, $trait_assign:ident, $fn_assign:ident) => { + impl + std::ops::$trait_assign + for BothGenerators + where G1 : std::ops::$trait_assign, + G2 : std::ops::$trait_assign, { + #[inline] + fn $fn_assign(&mut self, t : F) { + self.0.$fn_assign(t); + self.1.$fn_assign(t); + } + } + + impl<'a, F : Float, G1, G2> + std::ops::$trait + for &'a BothGenerators + where &'a G1 : std::ops::$trait, + &'a G2 : std::ops::$trait { + type Output = BothGenerators; + #[inline] + fn $fn(self, t : F) -> BothGenerators { + BothGenerators(self.0.$fn(t), self.1.$fn(t)) + } + } + } +} + +make_either_scalarop_rhs!(Mul, mul, MulAssign, mul_assign); +make_either_scalarop_rhs!(Div, div, DivAssign, div_assign); + +impl std::ops::Neg for BothGenerators +where G1 : std::ops::Neg, G2 : std::ops::Neg, { + type Output = BothGenerators; + #[inline] + fn neg(self) -> Self::Output { + BothGenerators(self.0.neg(), self.1.neg()) + } +} +/* +impl<'a, G1, G2> std::ops::Neg for &'a BothGenerators +where &'a G1 : std::ops::Neg, &'a G2 : std::ops::Neg, { + type Output = BothGenerators<<&'a G1 as std::ops::Neg>::Output, + <&'a G2 as std::ops::Neg>::Output>; + fn neg(self) -> Self::Output { + BothGenerators(self.0.neg(), self.1.neg()) + } +} +*/ \ No newline at end of file diff -r 000000000000 -r 9f27689eb130 src/bisection_tree/refine.rs --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/bisection_tree/refine.rs Sat Oct 22 13:47:15 2022 +0300 @@ -0,0 +1,425 @@ + +use std::collections::BinaryHeap; +use std::cmp::{PartialOrd,Ord,Ordering,Ordering::*,max}; +use std::rc::Rc; +use std::marker::PhantomData; +use crate::types::*; +use super::support::*; +use super::bt::*; +use super::aggregator::*; +use crate::nanleast::NaNLeast; + +/// Trait for sorting [`Aggregator`]s for [`BT`] refinement. +/// +/// The sorting involves two sorting keys, the “upper” and the “lower” key. Any [`BT`] branches +/// with upper key less the lower key of another are discarded from the refinement process. +pub trait AggregatorSorting { + // Priority + type Agg : Aggregator; + type Sort : Ord + Copy + std::fmt::Debug; + + /// Returns lower sorting key + fn sort_lower(aggregator : &Self::Agg) -> Self::Sort; + + /// Returns upper sorting key + fn sort_upper(aggregator : &Self::Agg) -> Self::Sort; + + /// Returns bottom sorting key. + fn bottom() -> Self::Sort; +} + +/// An [`AggregatorSorting`] for [`Bounds`], using the upper/lower bound as the upper/lower key. +/// +/// See [`LowerBoundSorting`] for the opposite ordering. +pub struct UpperBoundSorting(PhantomData); + +/// An [`AggregatorSorting`] for [`Bounds`], using the upper/lower bound as the lower/upper key. +/// +/// See [`UpperBoundSorting`] for the opposite ordering. +pub struct LowerBoundSorting(PhantomData); + +impl AggregatorSorting for UpperBoundSorting { + type Agg = Bounds; + type Sort = NaNLeast; + + #[inline] + fn sort_lower(aggregator : &Bounds) -> Self::Sort { NaNLeast(aggregator.lower()) } + + #[inline] + fn sort_upper(aggregator : &Bounds) -> Self::Sort { NaNLeast(aggregator.upper()) } + + #[inline] + fn bottom() -> Self::Sort { NaNLeast(F::NEG_INFINITY) } +} + + +impl AggregatorSorting for LowerBoundSorting { + type Agg = Bounds; + type Sort = NaNLeast; + + #[inline] + fn sort_upper(aggregator : &Bounds) -> Self::Sort { NaNLeast(-aggregator.lower()) } + + #[inline] + fn sort_lower(aggregator : &Bounds) -> Self::Sort { NaNLeast(-aggregator.upper()) } + + #[inline] + fn bottom() -> Self::Sort { NaNLeast(F::NEG_INFINITY) } +} + +/// Result type of [`Refiner::refine`] for a refiner producing a result of type `R` acting on +/// an [`Aggregator`] of type `A`. +pub enum RefinerResult { + /// Indicates in insufficiently refined state: the [`BT`] needs to be further refined. + NeedRefinement, + /// Indicates a certain result `R`, stop refinement immediately. + Certain(R), + /// Indicates an uncertain result: continue refinement until candidates have been exhausted + /// or a certain result found. + Uncertain(A, R) +} + +use RefinerResult::*; + +/// A `Refiner` is used to determine whether an [`Aggregator`] `A` is sufficiently refined within +/// a [`Cube`] of a [`BT`], and in such a case, produce a desired result (e.g. a maximum value of +/// a function). +pub trait Refiner +where F : Num, + A : Aggregator, + G : SupportGenerator { + + type Result : std::fmt::Debug; + type Sorting : AggregatorSorting; + + /// Determines whether `aggregator` is sufficiently refined within `cube`. + /// Should return a possibly refined version of the `aggregator` and an arbitrary value of + /// the result type of the refiner. + fn refine( + &self, + aggregator : &A, + domain : &Cube, + data : &Vec, + generator : &G, + step : usize, + ) -> RefinerResult; +} + +/// Structure for tracking the refinement process in a [`BinaryHeap`]. +struct RefinementInfo<'a, F, D, A, S, RResult, const N : usize, const P : usize> +where F : Float, + D : 'static +, + A : Aggregator, + S : AggregatorSorting { + cube : Cube, + node : &'a mut Node, + refiner_info : Option<(A, RResult)>, + sorting : PhantomData, +} + +impl<'a, F, D, A, S, RResult, const N : usize, const P : usize> +RefinementInfo<'a, F, D, A, S, RResult, N, P> +where F : Float, + D : 'static, + A : Aggregator, + S : AggregatorSorting { + + #[inline] + fn aggregator(&self) -> &A { + match self.refiner_info { + Some((ref agg, _)) => agg, + None => &self.node.aggregator, + } + } + + #[inline] + fn sort_lower(&self) -> S::Sort { + S::sort_lower(self.aggregator()) + } + + #[inline] + fn sort_upper(&self) -> S::Sort { + S::sort_upper(self.aggregator()) + } +} + +impl<'a, F, D, A, S, RResult, const N : usize, const P : usize> PartialEq +for RefinementInfo<'a, F, D, A, S, RResult, N, P> +where F : Float, + D : 'static, + A : Aggregator, + S : AggregatorSorting { + + #[inline] + fn eq(&self, other : &Self) -> bool { self.cmp(other) == Equal } +} + +impl<'a, F, D, A, S, RResult, const N : usize, const P : usize> PartialOrd +for RefinementInfo<'a, F, D, A, S, RResult, N, P> +where F : Float, + D : 'static, + A : Aggregator, + S : AggregatorSorting { + + #[inline] + fn partial_cmp(&self, other : &Self) -> Option { Some(self.cmp(other)) } +} + +impl<'a, F, D, A, S, RResult, const N : usize, const P : usize> Eq +for RefinementInfo<'a, F, D, A, S, RResult, N, P> +where F : Float, + D : 'static, + A : Aggregator, + S : AggregatorSorting { +} + +impl<'a, F, D, A, S, RResult, const N : usize, const P : usize> Ord +for RefinementInfo<'a, F, D, A, S, RResult, N, P> +where F : Float, + D : 'static, + A : Aggregator, + S : AggregatorSorting { + + #[inline] + fn cmp(&self, other : &Self) -> Ordering { + let agg1 = self.aggregator(); + let agg2 = other.aggregator(); + match S::sort_upper(agg1).cmp(&S::sort_upper(agg2)) { + Equal => S::sort_lower(agg1).cmp(&S::sort_lower(agg2)), + order => order, + } + } +} + +pub struct HeapContainer<'a, F, D, A, S, RResult, const N : usize, const P : usize> +where F : Float, + D : 'static + Copy, + Const

: BranchCount, + A : Aggregator, + S : AggregatorSorting { + heap : BinaryHeap>, + glb : S::Sort, + glb_stale_counter : usize, + stale_insert_counter : usize, +} + +impl<'a, F, D, A, S, RResult, const N : usize, const P : usize> +HeapContainer<'a, F, D, A, S, RResult, N, P> +where F : Float, + D : 'static + Copy, + Const

: BranchCount, + A : Aggregator, + S : AggregatorSorting { + + fn push(&mut self, ri : RefinementInfo<'a, F, D, A, S, RResult, N, P>) { + if ri.sort_upper() >= self.glb { + let l = ri.sort_lower(); + self.heap.push(ri); + self.glb = self.glb.max(l); + if self.glb_stale_counter > 0 { + self.stale_insert_counter += 1; + } + } + } +} + +impl +Branches +where Const

: BranchCount, + A : Aggregator { + + /// Stage all subnodes of `self` into the refinement queue [`container`]. + fn stage_refine<'a, S, RResult>( + &'a mut self, + domain : Cube, + container : &mut HeapContainer<'a, F, D, A, S, RResult, N, P>, + ) where S : AggregatorSorting { + // Insert all subnodes into the refinement heap. + for (node, subcube) in self.nodes_and_cubes_mut(&domain) { + container.push(RefinementInfo { + cube : subcube, + node : node, + refiner_info : None, + sorting : PhantomData, + }); + } + } +} + + +impl +Node +where Const

: BranchCount, + A : Aggregator { + + /// If `self` is a leaf node, uses the `refiner` to determine whether further subdivision + /// is required to get a sufficiently refined solution for the problem the refiner is used + /// to solve. If the refiner returns [`RefinerResult::Certain`] result, it is returned. + /// If [`RefinerResult::Uncertain`] is returned, the leaf is inserted back into the refinement + /// queue `container`. If `self` is a branch, its subnodes are staged into `container` using + /// [`Branches::stage_refine`]. + fn search_and_refine<'a, 'b, R, G>( + &'a mut self, + domain : Cube, + refiner : &R, + generator : &G, + container : &'b mut HeapContainer<'a, F, D, A, R::Sorting, R::Result, N, P>, + step : usize + ) -> Option + where R : Refiner, + G : SupportGenerator, + G::SupportType : LocalAnalysis { + + // The “complex” repeated pattern matching here is forced by mutability requirements. + + // Refine a leaf. + let res = if let NodeOption::Leaf(ref v) = &mut self.data { + let res = refiner.refine(&self.aggregator, &domain, v, generator, step); + if let NeedRefinement = res { + // The refiner has deemed the leaf unsufficiently refined, so subdivide + // it and add the new nodes into the refinement priority heap. + // We start iterating from the end to mix support_hint a bit. + let mut it = v.iter().rev(); + if let Some(&d) = it.next() { + // Construct new Branches + let support = generator.support_for(d); + let b = Rc::new({ + let mut b0 = Branches::new_with(&domain, &support); + b0.insert(&domain, d, Const::<1>, &support); + for &d in it { + let support = generator.support_for(d); + // TODO: can we be smarter than just refining one level? + b0.insert(&domain, d, Const::<1>, &support); + } + b0 + }); + // Update current node + self.aggregator.summarise(b.aggregators()); + self.data = NodeOption::Branches(b); + // The branches will be inserted into the refinement priority queue below. + } + } + res + } else { + NeedRefinement + }; + + if let Uncertain(agg, val) = res { + // The refiner gave an undertain result. Push a leaf back into the refinement queue + // with the new refined aggregator and custom return value. It will be popped and + // returned in the loop of [`BT::search_and_refine`] when there are no unrefined + // candidates that could potentially be better according to their basic aggregator. + container.push(RefinementInfo { + cube : domain, + node : self, + refiner_info : Some((agg, val)), + sorting : PhantomData, + }); + None + } else if let Certain(val) = res { + // The refiner gave a certain result so return it to allow early termination + Some(val) + } else if let NodeOption::Branches(ref mut b) = &mut self.data { + // Insert branches into refinement priority queue. + Rc::make_mut(b).stage_refine(domain, container); + None + } else { + None + } + } +} + +/// Helper trait for implementing a refining search on a [`BT`]. +pub trait BTSearch : BTImpl +where F : Float { + + /// Perform a refining search on [`Self`], as determined by `refiner`. Nodes are inserted + /// in a priority queue and processed in the order determined by the [`AggregatorSorting`] + /// [`Refiner::Sorting`]. Leaf nodes are subdivided until the refiner decides that a + /// sufficiently refined leaf node has been found, as determined by either the refiner + /// returning a [`RefinerResult::Certain`] result, or a previous [`RefinerResult::Uncertain`] + /// result is found again at the top of the priority queue. + fn search_and_refine<'b, R, G>( + &'b mut self, + refiner : &R, + generator : &G, + ) -> Option + where R : Refiner, + G : SupportGenerator, + G::SupportType : LocalAnalysis; +} + +// Needed to get access to a Node without a trait interface. +macro_rules! impl_btsearch { + ($($n:literal)*) => { $( + impl<'a, M, F, D, A> + BTSearch + for BT + where //Self : BTImpl, // <== automatically deduce to be implemented + M : Depth, + F : Float, + A : 'a + Aggregator, + D : 'static + Copy + std::fmt::Debug { + fn search_and_refine<'b, R, G>( + &'b mut self, + refiner : &R, + generator : &G, + ) -> Option + where R : Refiner, + G : SupportGenerator, + G::SupportType : LocalAnalysis { + let mut container = HeapContainer { + heap : BinaryHeap::new(), + glb : R::Sorting::bottom(), + glb_stale_counter : 0, + stale_insert_counter : 0, + }; + container.push(RefinementInfo { + cube : self.domain, + node : &mut self.topnode, + refiner_info : None, + sorting : PhantomData, + }); + let mut step = 0; + while let Some(ri) = container.heap.pop() { + if let Some((_, result)) = ri.refiner_info { + // Terminate based on a “best possible” result. + return Some(result) + } + + if ri.sort_lower() >= container.glb { + container.glb_stale_counter += 1; + if container.stale_insert_counter + container.glb_stale_counter + > container.heap.len()/2 { + // GLB propery no longer correct. + match container.heap.iter().map(|ri| ri.sort_lower()).reduce(max) { + Some(glb) => { + container.glb = glb; + container.heap.retain(|ri| ri.sort_upper() >= glb); + }, + None => { + container.glb = R::Sorting::bottom() + } + } + container.glb_stale_counter = 0; + container.stale_insert_counter = 0; + } + } + + let res = ri.node.search_and_refine(ri.cube, refiner, generator, + &mut container, step); + if let Some(_) = res { + // Terminate based on a certain result from the refiner + return res + } + + step += 1; + } + None + } + } + )* } +} + +impl_btsearch!(1 2 3 4); + diff -r 000000000000 -r 9f27689eb130 src/bisection_tree/support.rs --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/bisection_tree/support.rs Sat Oct 22 13:47:15 2022 +0300 @@ -0,0 +1,420 @@ + +use serde::Serialize; +use std::ops::{MulAssign,DivAssign,Neg}; +pub use nalgebra::Const; +use crate::types::{Float,Num}; +use crate::maputil::{array_init,map2}; +use crate::mapping::{Mapping}; +pub use crate::sets::cube::Cube; +pub use crate::loc::Loc; +use super::aggregator::Bounds; +use crate::norms::{Norm, L1, L2, Linfinity}; + +/// A trait for encoding constant values +pub trait Constant : Copy + 'static + std::fmt::Debug + Into { + type Type : Float; + fn value(&self) -> Self::Type; +} + +impl Constant for F { + type Type = F; + #[inline] + fn value(&self) -> F { *self } +} + + +/// A trait for working with the supports of [`Mapping`]s. +/// Mapping is not a super-trait to allow more general use. +pub trait Support : Sized { + /// Return a cube containing the support of the function. + /// It may be larger than the actual support, but must contain it. + fn support_hint(&self) -> Cube; + + /// Indicate whether `x` is in the support of the function. + fn in_support(&self, x : &Loc) -> bool; + + // Indicate whether `cube` is fully in the support of the function. + //fn fully_in_support(&self, cube : &Cube) -> bool; + + /// Return an optional hint for bisecting the support. Useful for nonsmooth + /// functions and finite element approximations to make the bisections compatible + /// with the grid or points of non-differentiability. + #[inline] + fn bisection_hint(&self, _cube : &Cube) -> [Option; N] { + array_init(|| None) + } + + /// Shift the support by `x`. + #[inline] + fn shift(self, x : Loc) -> Shift { + Shift { shift : x, base_fn : self } + } + + /// Weight any corresponding [`Mapping`] + #[inline] + fn weigh>(self, a : C) -> Weighted { + Weighted { weight : a, base_fn : self } + } +} + +/// Trait for globally analysing a certain property of a [`Mapping`]. +pub trait GlobalAnalysis { + fn global_analysis(&self) -> A; +} + +// default impl GlobalAnalysis for L +// where L : LocalAnalysis { +// #[inline] +// fn global_analysis(&self) -> Bounds { +// self.local_analysis(&self.support_hint()) +// } +// } + +/// Trait for locally analysing a certain property `A` of a [`Support`] or [`Mapping`] +/// within a [`Cube`]. +pub trait LocalAnalysis : GlobalAnalysis + Support { + fn local_analysis(&self, cube : &Cube) -> A; +} + +/// Trait for determining the upper and lower bounds of a [`Mapping`] on [`Loc`]. +/// This is a blanket-implemented alias for [`GlobalAnalysis`]`>` +/// [`Mapping`] is not a supertrait to allow flexibility in the implementation of either +/// reference or non-reference arguments. +pub trait Bounded : GlobalAnalysis> { + /// Return lower and upper bounds for the values of of `self`. + #[inline] + fn bounds(&self) -> Bounds { + self.global_analysis() + } +} + +impl>> Bounded for T { } + +/// Shift of [`Support`] and [`Bounded`]. +#[derive(Copy,Clone,Debug,Serialize)] // Serialize! but not implemented by Loc. +pub struct Shift { + shift : Loc, + base_fn : T, +} + +impl<'a, T, V, F : Float, const N : usize> Mapping<&'a Loc> for Shift +where T : for<'b> Mapping<&'b Loc,Codomain=V> { + type Codomain = V; + #[inline] + fn value(&self, x : &'a Loc) -> Self::Codomain { + self.base_fn.value(&(x - &self.shift)) + } +} + +impl<'a, T, V, F : Float, const N : usize> Mapping> for Shift +where T : for<'b> Mapping,Codomain=V> { + type Codomain = V; + #[inline] + fn value(&self, x : Loc) -> Self::Codomain { + self.base_fn.value(x - &self.shift) + } +} + +impl<'a, T, F : Float, const N : usize> Support for Shift +where T : Support { + #[inline] + fn support_hint(&self) -> Cube { + self.base_fn.support_hint().shift(&self.shift) + } + + #[inline] + fn in_support(&self, x : &Loc) -> bool { + self.base_fn.in_support(&(x - &self.shift)) + } + + // fn fully_in_support(&self, _cube : &Cube) -> bool { + // //self.base_fn.fully_in_support(cube.shift(&vectorneg(self.shift))) + // todo!("Not implemented, but not used at the moment") + // } + + #[inline] + fn bisection_hint(&self, cube : &Cube) -> [Option; N] { + let base_hint = self.base_fn.bisection_hint(cube); + map2(base_hint, &self.shift, |h, s| h.map(|z| z + *s)) + } + +} + +impl<'a, T, F : Float, const N : usize> GlobalAnalysis> for Shift +where T : LocalAnalysis, N> { + #[inline] + fn global_analysis(&self) -> Bounds { + self.base_fn.global_analysis() + } +} + +impl<'a, T, F : Float, const N : usize> LocalAnalysis, N> for Shift +where T : LocalAnalysis, N> { + #[inline] + fn local_analysis(&self, cube : &Cube) -> Bounds { + self.base_fn.local_analysis(&cube.shift(&(-self.shift))) + } +} + +macro_rules! impl_shift_norm { + ($($norm:ident)*) => { $( + impl<'a, T, F : Float, const N : usize> Norm for Shift + where T : Norm { + #[inline] + fn norm(&self, n : $norm) -> F { + self.base_fn.norm(n) + } + } + )* } +} + +impl_shift_norm!(L1 L2 Linfinity); + +/// Weighting of [`Support`] and [`Bounded`]. +#[derive(Copy,Clone,Debug,Serialize)] +pub struct Weighted { + pub weight : C, + pub base_fn : T, +} + +impl<'a, T, V, F : Float, C, const N : usize> Mapping<&'a Loc> for Weighted +where T : for<'b> Mapping<&'b Loc,Codomain=V>, + V : std::ops::Mul, + C : Constant { + type Codomain = V; + #[inline] + fn value(&self, x : &'a Loc) -> Self::Codomain { + self.base_fn.value(x) * self.weight.value() + } +} + +impl<'a, T, V, F : Float, C, const N : usize> Mapping> for Weighted +where T : for<'b> Mapping,Codomain=V>, + V : std::ops::Mul, + C : Constant { + type Codomain = V; + #[inline] + fn value(&self, x : Loc) -> Self::Codomain { + self.base_fn.value(x) * self.weight.value() + } +} + +impl<'a, T, F : Float, C, const N : usize> Support for Weighted +where T : Support, + C : Constant { + + #[inline] + fn support_hint(&self) -> Cube { + self.base_fn.support_hint() + } + + #[inline] + fn in_support(&self, x : &Loc) -> bool { + self.base_fn.in_support(x) + } + + // fn fully_in_support(&self, cube : &Cube) -> bool { + // self.base_fn.fully_in_support(cube) + // } + + #[inline] + fn bisection_hint(&self, cube : &Cube) -> [Option; N] { + self.base_fn.bisection_hint(cube) + } +} + +impl<'a, T, F : Float, C> GlobalAnalysis> for Weighted +where T : GlobalAnalysis>, + C : Constant { + #[inline] + fn global_analysis(&self) -> Bounds { + let Bounds(lower, upper) = self.base_fn.global_analysis(); + match self.weight.value() { + w if w < F::ZERO => Bounds(w * upper, w * lower), + w => Bounds(w * lower, w * upper), + } + } +} + +impl<'a, T, F : Float, C, const N : usize> LocalAnalysis, N> for Weighted +where T : LocalAnalysis, N>, + C : Constant { + #[inline] + fn local_analysis(&self, cube : &Cube) -> Bounds { + let Bounds(lower, upper) = self.base_fn.local_analysis(cube); + match self.weight.value() { + w if w < F::ZERO => Bounds(w * upper, w * lower), + w => Bounds(w * lower, w * upper), + } + } +} + +macro_rules! make_weighted_scalarop_rhs { + ($trait:ident, $fn:ident, $trait_assign:ident, $fn_assign:ident) => { + impl std::ops::$trait_assign for Weighted { + #[inline] + fn $fn_assign(&mut self, t : F) { + self.weight.$fn_assign(t); + } + } + + impl<'a, F : Float, T> std::ops::$trait for Weighted { + type Output = Self; + #[inline] + fn $fn(mut self, t : F) -> Self { + self.weight.$fn_assign(t); + self + } + } + + impl<'a, F : Float, T> std::ops::$trait for &'a Weighted + where T : Clone { + type Output = Weighted; + #[inline] + fn $fn(self, t : F) -> Self::Output { + Weighted { weight : self.weight.$fn(t), base_fn : self.base_fn.clone() } + } + } + } +} + +make_weighted_scalarop_rhs!(Mul, mul, MulAssign, mul_assign); +make_weighted_scalarop_rhs!(Div, div, DivAssign, div_assign); + +macro_rules! impl_weighted_norm { + ($($norm:ident)*) => { $( + impl<'a, T, F : Float> Norm for Weighted + where T : Norm { + #[inline] + fn norm(&self, n : $norm) -> F { + self.base_fn.norm(n) * self.weight.abs() + } + } + )* } +} + +impl_weighted_norm!(L1 L2 Linfinity); + + +/// Normalisation of [`Support`] and [`Bounded`] to L¹ norm 1. +/// Currently only scalar-valued functions are supported. +#[derive(Copy, Clone, Debug, Serialize, PartialEq)] +pub struct Normalised(pub T); + +impl<'a, T, F : Float, const N : usize> Mapping<&'a Loc> for Normalised +where T : Norm + for<'b> Mapping<&'b Loc, Codomain=F> { + type Codomain = F; + #[inline] + fn value(&self, x : &'a Loc) -> Self::Codomain { + let w = self.0.norm(L1); + if w == F::ZERO { F::ZERO } else { self.0.value(x) / w } + } +} + +impl<'a, T, F : Float, const N : usize> Mapping> for Normalised +where T : Norm + for<'b> Mapping, Codomain=F> { + type Codomain = F; + #[inline] + fn value(&self, x : Loc) -> Self::Codomain { + let w = self.0.norm(L1); + if w == F::ZERO { F::ZERO } else { self.0.value(x) / w } + } +} + +impl<'a, T, F : Float, const N : usize> Support for Normalised +where T : Norm + Support { + + #[inline] + fn support_hint(&self) -> Cube { + self.0.support_hint() + } + + #[inline] + fn in_support(&self, x : &Loc) -> bool { + self.0.in_support(x) + } + + // fn fully_in_support(&self, cube : &Cube) -> bool { + // self.0.fully_in_support(cube) + // } + + #[inline] + fn bisection_hint(&self, cube : &Cube) -> [Option; N] { + self.0.bisection_hint(cube) + } +} + +impl<'a, T, F : Float> GlobalAnalysis> for Normalised +where T : Norm + GlobalAnalysis> { + #[inline] + fn global_analysis(&self) -> Bounds { + let Bounds(lower, upper) = self.0.global_analysis(); + let w = self.0.norm(L1); + debug_assert!(w >= F::ZERO); + Bounds(w * lower, w * upper) + } +} + +impl<'a, T, F : Float, const N : usize> LocalAnalysis, N> for Normalised +where T : Norm + LocalAnalysis, N> { + #[inline] + fn local_analysis(&self, cube : &Cube) -> Bounds { + let Bounds(lower, upper) = self.0.local_analysis(cube); + let w = self.0.norm(L1); + debug_assert!(w >= F::ZERO); + Bounds(w * lower, w * upper) + } +} + +impl<'a, T, F : Float> Norm for Normalised +where T : Norm { + #[inline] + fn norm(&self, _ : L1) -> F { + let w = self.0.norm(L1); + if w == F::ZERO { F::ZERO } else { F::ONE } + } +} + +macro_rules! impl_normalised_norm { + ($($norm:ident)*) => { $( + impl<'a, T, F : Float> Norm for Normalised + where T : Norm + Norm { + #[inline] + fn norm(&self, n : $norm) -> F { + let w = self.0.norm(L1); + if w == F::ZERO { F::ZERO } else { self.0.norm(n) / w } + } + } + )* } +} + +impl_normalised_norm!(L2 Linfinity); + +/* +impl, const N : usize> LocalAnalysis for S { + fn local_analysis(&self, _cube : &Cube) -> NullAggregator { NullAggregator } +} + +impl, const N : usize> LocalAnalysis, N> for S { + #[inline] + fn local_analysis(&self, cube : &Cube) -> Bounds { + self.bounds(cube) + } +}*/ + +pub trait SupportGenerator +: MulAssign + DivAssign + Neg { + type Id : 'static + Copy; + type SupportType : 'static + Support; + type AllDataIter<'a> : Iterator where Self : 'a; + + /// Gives the [`Support`] for component function identified by `d`. + fn support_for(&self, d : Self::Id) -> Self::SupportType; + + /// Returns the number of different component functions in this generator. + fn support_count(&self) -> usize; + + /// Iterator over all pairs of (id, support). + fn all_data(&self) -> Self::AllDataIter<'_>; +} + diff -r 000000000000 -r 9f27689eb130 src/bisection_tree/supportid.rs --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/bisection_tree/supportid.rs Sat Oct 22 13:47:15 2022 +0300 @@ -0,0 +1,39 @@ +use std::marker::PhantomData; + +#[derive(Debug)] +pub struct SupportId { + pub id : usize, + _phantom : PhantomData +} + +// derive fails so need to do Copy and Clone manually + +impl Clone for SupportId { + fn clone(&self) -> Self { + SupportId { id : self.id, _phantom : PhantomData } + } +} + +impl Copy for SupportId {} + +impl From for SupportId { + #[inline] + fn from(id : u32) -> SupportId { + SupportId { id : id as usize, _phantom : PhantomData } + } +} + +impl From for SupportId { + #[inline] + fn from(id : usize) -> SupportId { + SupportId { id : id, _phantom : PhantomData } + } +} + +impl Into for SupportId { + #[inline] + fn into(self) -> usize { + self.id + } +} + diff -r 000000000000 -r 9f27689eb130 src/coefficients.rs --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/coefficients.rs Sat Oct 22 13:47:15 2022 +0300 @@ -0,0 +1,26 @@ +///! This module implements some arithmetic functions that are valid in a ‘const context”, i.e., +///! can be computed at compile-time. + +/// Calculate $n!$. +/// (The version in the crate `num_integer` deesn't currently work in a `const` context. +pub const fn factorial(n : usize) -> usize { + const fn f(i : usize, a : usize) -> usize { + if i==0 { a } else { f(i-1, i*a) } + } + f(n, 1) +} + +/// Calculate $\nchoosek{n}{k}$. +/// (The version in the crate `num_integer` deesn't currently work in a `const` context. +pub const fn binomial(n : usize, k : usize) -> usize { + factorial(n)/(factorial(n-k)*factorial(k)) +} + +/// Calculate $n^k$. +/// (The version in the crate `num_integer` deesn't currently work in a `const` context. +pub const fn pow(n : usize, k : usize) -> usize { + const fn pow_accum(n : usize, k : usize, accum : usize) -> usize { + if k==0 { accum } else { pow_accum(n, k-1, accum*n) } + } + pow_accum(n, k, 1) +} diff -r 000000000000 -r 9f27689eb130 src/error.rs --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/error.rs Sat Oct 22 13:47:15 2022 +0300 @@ -0,0 +1,8 @@ + +use std::error::Error; + +pub type DynResult = Result>; +pub type DynError = DynResult<()>; + + + diff -r 000000000000 -r 9f27689eb130 src/fe_model.rs --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/fe_model.rs Sat Oct 22 13:47:15 2022 +0300 @@ -0,0 +1,4 @@ +/// Simple planar (1D and 2D) finite element discretisation on a box. + +pub mod base; +pub mod p2_local_model; diff -r 000000000000 -r 9f27689eb130 src/fe_model/base.rs --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/fe_model/base.rs Sat Oct 22 13:47:15 2022 +0300 @@ -0,0 +1,16 @@ +///! Base types for simple (finite) element models. + +/// A local model can be evaluated for value and differential +pub trait LocalModel { + /// Get the value of the model at `x` + fn value(&self, x : &Domain) -> Codomain; + /// Get the differential of the model at `x` + fn differential(&self, x : &Domain) -> Domain; +} + +/// A real local model is a minimisable [`LocalModel`]. +pub trait RealLocalModel : LocalModel { + /// Find a (minimum, minimiser) pair for he model within `el`, which is + /// typically a simplex subset of `Domain`. + fn minimise(&self, el : &S) -> (Domain, Codomain); +} diff -r 000000000000 -r 9f27689eb130 src/fe_model/p2_local_model.rs --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/fe_model/p2_local_model.rs Sat Oct 22 13:47:15 2022 +0300 @@ -0,0 +1,446 @@ +use crate::types::*; +use crate::loc::Loc; +use crate::sets::{Set,NPolygon,SpannedHalfspace}; +use crate::linsolve::*; +use crate::norms::Dot; +use super::base::{LocalModel,RealLocalModel}; +use crate::sets::Cube; +use numeric_literals::replace_float_literals; + +pub struct Simplex(pub [Loc; D]); +pub type PlanarSimplex = Simplex; +pub type RealInterval = Simplex; + +#[inline] +#[replace_float_literals(F::cast_from(literal))] +pub(crate) fn midpoint(a : &Loc, b : &Loc) -> Loc { + (a+b)/2.0 +} + +impl<'a, F : Float> Set> for RealInterval { + #[inline] + fn contains(&self, &Loc([x]) : &Loc) -> bool { + let &[Loc([x0]), Loc([x1])] = &self.0; + (x0 < x && x < x1) || (x1 < x && x < x0) + } +} + +impl<'a, F : Float> Set> for PlanarSimplex { + #[inline] + fn contains(&self, x : &Loc) -> bool { + let &[x0, x1, x2] = &self.0; + NPolygon([[x0, x1].spanned_halfspace(), + [x1, x2].spanned_halfspace(), + [x2, x0].spanned_halfspace()]).contains(x) + } +} + +trait P2Powers { + type Output; + type Diff; + type Full; + fn p2powers(&self) -> Self::Output; + fn p2powers_full(&self) -> Self::Full; + fn p2powers_diff(&self) -> Self::Diff; +} + +#[replace_float_literals(F::cast_from(literal))] +impl P2Powers for Loc { + type Output = Loc; + type Full = Loc; + type Diff = Loc, 1>; + + #[inline] + fn p2powers(&self) -> Self::Output { + let &Loc([x0]) = self; + [x0*x0].into() + } + + #[inline] + fn p2powers_full(&self) -> Self::Full { + let &Loc([x0]) = self; + [1.0, x0, x0*x0].into() + } + + #[inline] + fn p2powers_diff(&self) -> Self::Diff { + let &Loc([x0]) = self; + [[x0+x0].into()].into() + } +} + +#[replace_float_literals(F::cast_from(literal))] +impl P2Powers for Loc { + type Output = Loc; + type Full = Loc; + type Diff = Loc, 2>; + + #[inline] + fn p2powers(&self) -> Self::Output { + let &Loc([x0, x1]) = self; + [x0*x0, x0*x1, x1*x1].into() + } + + #[inline] + fn p2powers_full(&self) -> Self::Full { + let &Loc([x0, x1]) = self; + [1.0, x0, x1, x0*x0, x0*x1, x1*x1].into() + } + + #[inline] + fn p2powers_diff(&self) -> Self::Diff { + let &Loc([x0, x1]) = self; + [[x0+x0, x1, 0.0].into(), [0.0, x0, x1+x1].into()].into() + } +} + +pub trait P2Model { + type Model : LocalModel,F>; + fn p2_model) -> F>(&self, g : G) -> Self::Model; +} + +pub struct P2LocalModel { + a0 : F, + a1 : Loc, + a2 : Loc, + //node_values : Loc, + //edge_values : Loc, +} + +// +// 1D planar model construction +// + +impl RealInterval { + #[inline] + fn midpoints(&self) -> [Loc; 1] { + let [ref n0, ref n1] = &self.0; + let n01 = midpoint(n0, n1); + [n01] + } +} + +impl P2LocalModel { + #[inline] + pub fn new( + &[n0, n1, n01] : &[Loc; 3], + &[v0, v1, v01] : &[F; 3], + ) -> Self { + let p = move |x : &Loc, v : F| { + let Loc([c, d, e]) = x.p2powers_full(); + [c, d, e, v] + }; + let [a0, a1, a11] = linsolve([ + p(&n0, v0), + p(&n1, v1), + p(&n01, v01) + ]); + P2LocalModel { + a0 : a0, + a1 : [a1].into(), + a2 : [a11].into(), + //node_values : [v0, v1].into(), + //edge_values: [].into(), + } + } +} + +impl P2Model for RealInterval { + type Model = P2LocalModel; + + #[inline] + fn p2_model) -> F>(&self, g : G) -> Self::Model { + let [n01] = self.midpoints(); + let [n0, n1] = self.0; + let vals = [g(&n0), g(&n1), g(&n01)]; + let nodes = [n0, n1, n01]; + Self::Model::new(&nodes, &vals) + } +} + +// +// 2D planar model construction +// + +impl PlanarSimplex { + #[inline] + fn midpoints(&self) -> [Loc; 3] { + let [ref n0, ref n1, ref n2] = &self.0; + let n01 = midpoint(n0, n1); + let n12 = midpoint(n1, n2); + let n20 = midpoint(n2, n0); + [n01, n12, n20] + } +} + +impl P2LocalModel { + #[inline] + pub fn new( + &[n0, n1, n2, n01, n12, n20] : &[Loc; 6], + &[v0, v1, v2, v01, v12, v20] : &[F; 6], + ) -> Self { + let p = move |x : &Loc, v :F| { + let Loc([c, d, e, f, g, h]) = x.p2powers_full(); + [c, d, e, f, g, h, v] + }; + let [a0, a1, a2, a11, a12, a22] = linsolve([ + p(&n0, v0), + p(&n1, v1), + p(&n2, v2), + p(&n01, v01), + p(&n12, v12), + p(&n20, v20), + ]); + P2LocalModel { + a0 : a0, + a1 : [a1, a2].into(), + a2 : [a11, a12, a22].into(), + //node_values : [v0, v1, v2].into(), + //edge_values: [v01, v12, v20].into(), + } + } +} + +impl P2Model for PlanarSimplex { + type Model = P2LocalModel; + + #[inline] + fn p2_model) -> F>(&self, g : G) -> Self::Model { + let midpoints = self.midpoints(); + let [ref n0, ref n1, ref n2] = self.0; + let [ref n01, ref n12, ref n20] = midpoints; + let vals = [g(n0), g(n1), g(n2), g(n01), g(n12), g(n20)]; + let nodes = [*n0, *n1, *n2, *n01, *n12, *n20]; + Self::Model::new(&nodes, &vals) + } +} + +macro_rules! impl_local_model { + ($n:literal, $e:literal, $v:literal, $q:literal) => { + impl LocalModel, F> for P2LocalModel { + #[inline] + fn value(&self, x : &Loc) -> F { + self.a0 + x.dot(&self.a1) + x.p2powers().dot(&self.a2) + } + + #[inline] + fn differential(&self, x : &Loc) -> Loc { + self.a1 + x.p2powers_diff().map(|di| di.dot(&self.a2)) + } + } + } +} + +impl_local_model!(1, 1, 2, 0); +impl_local_model!(2, 3, 3, 3); + + +// +// Minimisation +// + +#[replace_float_literals(F::cast_from(literal))] +impl P2LocalModel { + #[inline] + fn minimise_edge(&self, x0 : Loc, x1 : Loc) -> (Loc, F) { + let &P2LocalModel{ + a1 : Loc([a1]), + a2 : Loc([a11]), + //node_values : Loc([v0, v1]), + .. + } = self; + // We do this in cases, first trying for an interior solution, then edges. + // For interior solution, first check determinant; no point trying if non-positive + if a11 > 0.0 { + // An interior solution x[1] has to satisfy + // 2a₁₁*x[1] + a₁ =0 + // This gives + let t = -a1/(2.0*a11); + let (Loc([t0]), Loc([t1])) = (x0, x1); + if (t0 <= t && t <= t1) || (t1 <= t && t <= t0) { + let x = [t].into(); + let v = self.value(&x); + return (x, v) + } + } + + let v0 = self.value(&x0); + let v1 = self.value(&x1); + if v0 < v1 { (x0, v0) } else { (x1, v1) } + } +} + +impl<'a, F : Float> RealLocalModel,Loc,F> +for P2LocalModel { + #[inline] + fn minimise(&self, &Simplex([x0, x1]) : &RealInterval) -> (Loc, F) { + self.minimise_edge(x0, x1) + } +} + +#[replace_float_literals(F::cast_from(literal))] +impl P2LocalModel { + /// Minimise the 2D model on the edge {x0 + t(x1 - x0) | t ∈ [0, 1] } + #[inline] + fn minimise_edge(&self, x0 : &Loc, x1 : &Loc/*, v0 : F, v1 : F*/) -> (Loc, F) { + let &P2LocalModel { + a0, + a1 : Loc([a1, a2]), + a2 : Loc([a11, a12, a22]), + .. + } = self; + let &Loc([x00, x01]) = x0; + let d@Loc([d0, d1]) = x1 - x0; + let b0 = a0 + a1*x00 + a2*x01 + a11*x00*x00 + a12*x00*x01 + a22*x01*x01; + let b1 = a1*d0 + a2*d1 + 2.0*a11*d0*x00 + a12*(d0*x01 + d1*x00) + 2.0*a22*d1*x01; + let b11 = a11*d0*d0 + a12*d0*d1 + a22*d1*d1; + let edge_1d_model = P2LocalModel { + a0 : b0, + a1 : Loc([b1]), + a2 : Loc([b11]), + //node_values : Loc([v0, v1]), + }; + let (Loc([t]), v) = edge_1d_model.minimise_edge(0.0.into(), 1.0.into()); + (x0 + d*t, v) + } +} + +#[replace_float_literals(F::cast_from(literal))] +impl<'a, F : Float> RealLocalModel,Loc,F> +for P2LocalModel { + #[inline] + fn minimise(&self, el : &PlanarSimplex) -> (Loc, F) { + let &P2LocalModel { + a1 : Loc([a1, a2]), + a2 : Loc([a11, a12, a22]), + //node_values : Loc([v0, v1, v2]), + .. + } = self; + + // We do this in cases, first trying for an interior solution, then edges. + // For interior solution, first check determinant; no point trying if non-positive + let r = 2.0*(a11*a22-a12*a12); + if r > 0.0 { + // An interior solution (x[1], x[2]) has to satisfy + // 2a₁₁*x[1] + 2a₁₂*x[2]+a₁ =0 and 2a₂₂*x[1] + 2a₁₂*x[1]+a₂=0 + // This gives + let x = [(a22*a1-a12*a2)/r, (a12*a1-a11*a2)/r].into(); + if el.contains(&x) { + return (x, self.value(&x)) + } + } + + let &[ref x0, ref x1, ref x2] = &el.0; + let mut min_edge = self.minimise_edge(x0, x1); + let more_edge = [self.minimise_edge(x1, x2), + self.minimise_edge(x2, x0)]; + + for edge in more_edge { + if edge.1 < min_edge.1 { + min_edge = edge; + } + } + + min_edge + } +} + +#[replace_float_literals(F::cast_from(literal))] +impl<'a, F : Float> RealLocalModel,Loc,F> +for P2LocalModel { + #[inline] + fn minimise(&self, el : &Cube) -> (Loc, F) { + let &P2LocalModel { + a1 : Loc([a1, a2]), + a2 : Loc([a11, a12, a22]), + //node_values : Loc([v0, v1, v2]), + .. + } = self; + + // We do this in cases, first trying for an interior solution, then edges. + // For interior solution, first check determinant; no point trying if non-positive + let r = 2.0*(a11*a22-a12*a12); + if r > 0.0 { + // An interior solution (x[1], x[2]) has to satisfy + // 2a₁₁*x[1] + 2a₁₂*x[2]+a₁ =0 and 2a₂₂*x[1] + 2a₁₂*x[1]+a₂=0 + // This gives + let x = [(a22*a1-a12*a2)/r, (a12*a1-a11*a2)/r].into(); + if el.contains(&x) { + return (x, self.value(&x)) + } + } + + let [x0, x1, x2, x3] = el.corners(); + let mut min_edge = self.minimise_edge(&x0, &x1); + let more_edge = [self.minimise_edge(&x1, &x2), + self.minimise_edge(&x2, &x3), + self.minimise_edge(&x3, &x0)]; + + + for edge in more_edge { + if edge.1 < min_edge.1 { + min_edge = edge; + } + } + + min_edge + } +} + +// impl P2Model for Cube { +// type Model = CubeP2LocalModel; + +// fn p2_model) -> F>(&self, fun : &G) -> Self::Model { +// CubeP2LocalModel([Simplex(self.corners()).p2_model(fun)]) +// } +// } + +// impl P2Model for Cube { +// type Model = CubeP2LocalModel; + +// fn p2_model) -> F>(&self, fun : &G) -> Self::Model { +// let [a, b, c, d] = self.corners(); +// CubeP2LocalModel([Simplex([a, b, c]).p2_model(fun), +// Simplex([b, c, d]).p2_model(fun)]) +// } +// } + + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn p2_model_1d_test() { + let vertices = [Loc([0.0]), Loc([1.0])]; + let domain = Simplex(vertices); + // A simple quadratic function for which the approximation is exact on reals, + // and appears exact on f64 as well. + let f = |&Loc([x]) : &Loc| x*x + x + 1.0; + let model = domain.p2_model(f); + let xs = [Loc([0.5]), Loc([0.25])]; + + for x in vertices.iter().chain(xs.iter()) { + assert_eq!(model.value(&x), f(&x)); + } + + assert_eq!(model.minimise(&domain), (Loc([0.0]), 1.0)); + } + + #[test] + fn p2_model_2d_test() { + let vertices = [Loc([0.0, 0.0]), Loc([1.0, 0.0]), Loc([1.0, 1.0])]; + let domain = Simplex(vertices); + // A simple quadratic function for which the approximation is exact on reals, + // and appears exact on f64 as well. + let f = |&Loc([x, y]) : &Loc| x*x + x*y + x - y + 1.0; + let model = domain.p2_model(f); + let xs = [Loc([0.5, 0.5]), Loc([0.25, 0.25])]; + + for x in vertices.iter().chain(xs.iter()) { + assert_eq!(model.value(&x), f(&x)); + } + + assert_eq!(model.minimise(&domain), (Loc([1.0, 1.0]), 3.0)); + } +} diff -r 000000000000 -r 9f27689eb130 src/iter.rs --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/iter.rs Sat Oct 22 13:47:15 2022 +0300 @@ -0,0 +1,473 @@ +/// Iteration utilities; [stateful][StatefulIterator] and +/// [restartable][RestartableIterator] iterators. + +use crate::types::Integer; +use std::marker::PhantomData; + +/// Trait for iterators that can be queried the current state without advancing. +pub trait StatefulIterator : Iterator { + /// Return current value of iterator. If the iterator has not yet started, + /// `None` should be returned. + fn current(&self) -> Option; +} + +/// Iterators that can be restarted and queried the current state without +/// advancing to next state. +pub trait RestartableIterator : StatefulIterator { + /// Restart the iterator. Return the first item. + fn restart(&mut self) -> Option; +} + +/// A variant of `std::iter::Map` that works for methods instead of closures, +/// so can be returned from functions without `dyn`. +/// Use [`Mappable::mapF`] to create this iterator. +pub struct MapF where I : Iterator { + iter : I, + func : fn(I::Item) -> J, +} + +impl Iterator for MapF where I : Iterator { + type Item = J; + fn next(&mut self) -> Option { + self.iter.next().map(|item| (self.func)(item)) + } +} + +/// A variant of [`MapF`] with extra data for the function stored as a reference. +/// See also [`MapZ`] and use [`Mappable::mapX`] to create this iteartor. +pub struct MapX<'a,I,T,J> where I : Iterator { + iter : I, + data : &'a T, + func : fn(&'a T, I::Item) -> J, +} + +impl<'a,I,T,J> Iterator for MapX<'a,I,T,J> where I : Iterator { + type Item = J; + fn next(&mut self) -> Option { + self.iter.next().map(|item| (self.func)(self.data, item)) + } +} + +/// A variant of [`MapF`] and [`MapX`] with extra data for the function stored in the struct. +/// Use [`Mappable::mapZ`] to create this iterator. +pub struct MapZ where I : Iterator { + iter : I, + data : T, + func : fn(&T, I::Item) -> J, +} + +impl<'a,I,T,J> Iterator for MapZ where I : Iterator { + type Item = J; + fn next(&mut self) -> Option { + self.iter.next().map(|item| (self.func)(&self.data, item)) + } +} + +/// A variant of `std::iter::FilterMap` that works for methods instead of closures, +/// so can be returned from functions without `dyn`. +pub struct FilterMapX<'a,I,T,J> where I : Iterator { + iter : I, + data : &'a T, + func : fn(&'a T, I::Item) -> Option, +} + +impl<'a,I,T,J> Iterator for FilterMapX<'a,I,T,J> where I : Iterator { + type Item = J; + fn next(&mut self) -> Option { + while let Some(item) = self.iter.next() { + let v = (self.func)(self.data, item); + if v.is_some() { + return v + } + } + None + } +} + +/// Helper for [`Mappable::filter_zip`]. +pub struct FilterZip +where I : Iterator, + J : Iterator { + iter : I, + filter : J, +} + +impl Iterator for FilterZip +where I : Iterator, + J : Iterator { + type Item = I::Item; + #[inline] + fn next(&mut self) -> Option { + while let (v@Some(..), Some(filt)) = (self.iter.next(), self.filter.next()) { + if filt { + return v + } + } + None + } +} + +/// This trait implements several mapping methods on iterators. +pub trait Mappable : Iterator + Sized { + /// Apply `func` to the output of the iterator. + #[allow(non_snake_case)] + fn mapF(self, func : fn(Self::Item) -> J) -> MapF; + /// Apply `func` to the output of the iterator, passing also `d` to it. + #[allow(non_snake_case)] + fn mapX<'a,T,J>(self, d : &'a T, func : fn(&'a T, Self::Item) -> J) -> MapX<'a,Self,T,J>; + /// Apply `func` to the output of the iterator, passing also `d` to it. + #[allow(non_snake_case)] + fn mapZ(self, d : T, func : fn(&T, Self::Item) -> J) -> MapZ; + /// Apply `func` to the output of the iterator, filtering based on the result. + /// The data `d` is passed to `func`. + #[allow(non_snake_case)] + fn filter_mapX<'a,T,J>(self, d : &'a T, func : fn(&'a T, Self::Item) -> Option) -> FilterMapX<'a,Self,T,J>; + + /// Filter `self` based on another iterator `iter` with `bool` items. + fn filter_zip(self, filter : J) -> FilterZip where J : Iterator; +} + +impl Mappable for I where I : Iterator + Sized { + #[inline] + fn mapF(self, func : fn(Self::Item) -> J) -> MapF { + MapF{ iter : self, func : func } + } + #[inline] + fn mapX<'a,T,J>(self, d : &'a T, func : fn(&'a T, Self::Item) -> J) -> MapX<'a,Self,T,J> { + MapX{ iter : self, data : d, func : func } + } + #[inline] + fn mapZ(self, d : T, func : fn(&T, Self::Item) -> J) -> MapZ { + MapZ{ iter : self, data : d, func : func } + } + #[inline] + fn filter_mapX<'a,T,J>(self, d : &'a T, func : fn(&'a T, Self::Item) -> Option) + -> FilterMapX<'a,Self,T,J> { + FilterMapX{ iter : self, data : d, func : func } + } + + #[inline] + fn filter_zip(self, filter : J) -> FilterZip + where J : Iterator { + FilterZip{ iter : self, filter : filter } + } +} + +/* +/// This is a helper struct returning iterators with a lifetime from trait functions. +/// `T` will typically be `&'a Self` or some other reference that can only be +/// specified in the trait function declaration without having to parametrise the entire +/// trait with `'a`. The parameter `ImplData` is supposed to be a unique type that idenfiers +/// the iterator for the implementation of the trait. Then `Iterator` is to be implemented +/// for `ProxyIterator`. For example: +/// TODO: not updated. +/// ``` +/// trait Test { +/// type ImplData; +/// fn returns_iterator<'a>(&'a self) -> ProxyIterator<&'a Self, Self::ImplData>; +/// } +/// struct MyIndex{ +/// index : usize, +/// } +/// impl Test for [f64; N] { +/// type ImplData = MyIndex; +/// fn returns_iterator<'a>(&'a self) -> ProxyIterator<&'a Self, MyIndex> { +/// TraitIterator::new(self, MyIndex{ index : 0 }) +/// } +/// } +/// impl<'a, const N : usize> Iterator for `TraitIterator<&'a [f64; N], MyIndex> { +/// type Item = f64; +/// fn next(&mut self) -> Option { +/// if self.state.index < N { +/// let v = self.data[self.state.index]; +/// self.state.index += 1; +/// Some(v) +/// } else { +/// None +/// } +/// } +/// } +/// ``` +*/ + +macro_rules! impl_trait_iterator { + ($name:ident, $proxyname:ident, &'a $($mut:ident)?, $($itemref:tt)*) => { + pub trait $name { + type Item : 'static; + type Data : ?Sized; + fn next_with<'a>(&mut self, data : &'a $($mut)? Self::Data) -> Option<$($itemref)* Self::Item>; + } + + pub struct $proxyname<'a, Impl> + where Impl : $name { + pub data : &'a $($mut)? Impl::Data, + pub iter : Impl, + } + + impl<'a, Impl> $proxyname<'a, Impl> + where Impl : $name { + #[inline] + pub fn new(data : &'a $($mut)? Impl::Data, iter : Impl) -> $proxyname<'a, Impl> { + $proxyname{ data : data, iter : iter} + } + } + + impl<'a, Impl> Iterator for $proxyname<'a, Impl> + where Impl : $name { + type Item = $($itemref)* Impl::Item; + #[inline] + fn next(&mut self) -> Option { + self.iter.next_with(self.data) + } + } + } +} + +impl_trait_iterator!(TraitIterator, ProxyIterator, &'a, ); +impl_trait_iterator!(TraitIteratorRef, ProxyIteratorRef, &'a, &'a); +// TODO: Lifetime errors. Probably due to potential overlapping mutable borrows. +// So should use some unsafe pointer hacks or so. +//impl_trait_iterator!(TraitIteratorMut, ProxyIteratorMut, &'a mut, &'a mut); + +/* +pub struct FlattenTI +where SI : TraitIterator, + SI::Item : Iterator { + iter_iter : SI, + element_iter : Option, +} + +impl FlattenTI +where SI : TraitIterator, + SI::Item : Iterator { + fn new(iter : SI) -> Self { + FlattenTI{ iter_iter : iter, element_iter : None } + } +} + +impl TraitIterator for FlattenTI +where SI::Item : Iterator { + type Item = ::Item; + type Data = SI::Data; + + fn next_with<'a>(&mut self, data : &'a $($mut)? SI::Data) -> Option { + loop { + if let Some(ei) = self.element_iter { + if let n @ Some(_) = ei.next() { + return n + } + } else { + self.element_iter = self.iter_iter.next_with(data); + if let None = self.element_iter { + return None + } + } + } + } +} +*/ + +/* +pub enum EitherIterator, R : Iterator> { + Left(L), + Right(R) +} + +impl, R : Iterator> Iterator for EitherIterator { + type Item = T; + #[inline] + fn next(&mut self) -> Option { + match self { + EitherIterator::Left(ref l) => { l.next() } + EitherIterator::Right(ref r) => { r.next() } + } + } +} +*/ + +/// A [`RestartableIterator`] over the range ´[start, end)`. +#[derive(Clone,Copy,Debug)] +pub struct RangeIter { + start : T, + end : T, + value : Option, +} + +pub trait StepNext : Ord + Copy { + fn step_next(&self) -> Self; + fn dist_to(&self, other : &Self) -> usize; +} + +impl> StepNext for T { + #[inline] + fn step_next(&self) -> Self { *self + Self::ONE } + + #[inline] + fn dist_to(&self, other : &Self) -> usize { + if *other >= *self { + (*other - *self).into() + } else { + 0 + } + } +} + +impl RangeIter { + /// Create a new [`RangeIter`]. + pub fn new(start : T, end : T) -> Self { + RangeIter{ start : start.min(end), end : end, value : None } + } +} + +impl Iterator for RangeIter { + type Item = T; + + #[inline] + fn next(&mut self) -> Option { + match self.value { + None => { + if self.start < self.end { + self.value = Some(self.start); + self.value + } else { + None + } + } + Some(v) => { + let vn = v.step_next(); + if vn < self.end { + self.value = Some(vn); + self.value + } else { + None + } + } + } + } +} + +impl ExactSizeIterator for RangeIter { + #[inline] + fn len(&self) -> usize { self.start.dist_to(&self.end) } +} + +impl StatefulIterator for RangeIter { + #[inline] + fn current(&self) -> Option { + self.value + } +} + +impl RestartableIterator for RangeIter { + fn restart(&mut self) -> Option { + if self.start < self.end { + self.value = Some(self.start); + self.value + } else { + None + } + } +} + + +pub struct RestartableIter<'a, T> { + inner : RangeIter<*const T>, + _phantom : PhantomData<&'a[T]> +} + +impl StepNext for *const T { + #[inline] + fn step_next(&self) -> Self { unsafe { self.add(1) } } + + #[inline] + fn dist_to(&self, other : &Self) -> usize { + unsafe { other.offset_from(*self).max(0) as usize } + } +} + +impl<'a, T : 'a> RestartableIter<'a, T> { + fn map_result(result : Option<*const T>) -> Option<&'a T> { + match result { + None => { None } + Some(ptr) => { Some(unsafe{ &*ptr }) } + } + } + + pub fn new(slice : &'a [T]) -> Self { + let ptr_range = slice.as_ptr_range(); + let inner = RangeIter::new(ptr_range.start, ptr_range.end); + RestartableIter{ inner : inner, _phantom : PhantomData } + } +} + +impl<'a, T : 'a> Iterator for RestartableIter<'a, T> { + type Item = &'a T; + #[inline] + fn next(&mut self) -> Option<&'a T> { + Self::map_result(self.inner.next()) + } +} + +impl<'a, T : 'a> ExactSizeIterator for RestartableIter<'a, T> { + #[inline] + fn len(&self) -> usize { self.inner.len() } +} + +impl<'a, T : 'a> StatefulIterator for RestartableIter<'a, T> { + #[inline] + fn current(&self) -> Option { + Self::map_result(self.inner.current()) + } +} + +impl<'a, T : 'a> RestartableIterator for RestartableIter<'a, T> { + fn restart(&mut self) -> Option { + Self::map_result(self.inner.current()) + } +} + +pub struct RestartableCloned { + inner : I, +} + +impl<'a, T : Clone + 'a , I : Iterator> RestartableCloned { + fn map_result(result : Option) -> Option { + match result { + None => { None } + Some(v) => { Some(v.clone()) } + } + } + + pub fn new(inner : I) -> Self { + RestartableCloned{ inner : inner } + } +} + +impl<'a, T : Clone + 'a , I : Iterator> Iterator +for RestartableCloned { + type Item = T; + fn next(&mut self) -> Option { + Self::map_result(self.inner.next()) + } +} + +impl<'a, T : Clone + 'a , I : ExactSizeIterator> ExactSizeIterator +for RestartableCloned { + fn len(&self) -> usize { + self.inner.len() + } +} + +impl<'a, T : Clone + 'a , I : StatefulIterator> StatefulIterator +for RestartableCloned { + fn current(&self) -> Option { + Self::map_result(self.inner.current()) + } +} + +impl<'a, T : Clone + 'a , I : RestartableIterator> RestartableIterator +for RestartableCloned { + fn restart(&mut self) -> Option { + Self::map_result(self.inner.restart()) + } +} \ No newline at end of file diff -r 000000000000 -r 9f27689eb130 src/iterate.rs --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/iterate.rs Sat Oct 22 13:47:15 2022 +0300 @@ -0,0 +1,916 @@ +/*! + +This module provides tools for separating the computational steps of iterative +algorithms from visualisation and other reporting routines. The approach is heavily +indebted to functional programming. The computational step is to be implemented as a +closure. That closure gets passed a `state` parameter that implements an +[`if_verbose`][AlgIteratorState::if_verbose] method that is to be called to determine +whether function values or other potentially costly things need to be calculated on that +iteration. The parameter of [`if_verbose`][AlgIteratorState::if_verbose] is another +closure that does the necessary computation. + +## Simple example + +```rust +use alg_tools::types::*; +use alg_tools::iterate::*; +let mut iter = AlgIteratorOptions{ max_iter : 100, + verbose_iter : Verbose::Every(10), + .. Default::default() }; +let mut x = 1 as float; +iter.iterate(|state|{ + // This is our computational step + x = x + x.sqrt(); + state.if_verbose(||{ + // return current value when requested + return x + }) +}) +``` +There is no colon after `state.if_verbose`, because we need to return its value. If you do something after the step, you need to store the result in a variable and return it from the main computational step afterwards. + +This will print out +```output +10/100 J=31.051164 +20/100 J=108.493699 +30/100 J=234.690039 +40/100 J=410.056327 +50/100 J=634.799262 +60/100 J=909.042928 +70/100 J=1232.870172 +80/100 J=1606.340254 +90/100 J=2029.497673 +100/100 J=2502.377071 +``` +*/ + +use colored::{Colorize,ColoredString}; +//use num_traits::Num; +use core::fmt::Debug; +//use core::fmt::Display; +//use std::sync::mpsc::Receiver; +use serde::{Serialize, Deserialize}; +use cpu_time::ProcessTime; +use std::marker::PhantomData; +use std::time::Duration; +use crate::types::*; +use crate::logger::*; + +/// Create the displayed presentation for log items. Override for nice (condensed) presentation of rich log items, or define `show` if it is not defined for your type. +pub trait LogRepr : Debug { + fn logrepr(&self) -> ColoredString { format!("« {:?} »", self).as_str().into() } +} + +impl LogRepr for str { + fn logrepr(&self) -> ColoredString { self.into() } +} + +impl LogRepr for String { + fn logrepr(&self) -> ColoredString { self.as_str().into() } +} + +impl LogRepr for T where T : Num { + fn logrepr(&self) -> ColoredString { format!("J={}", self).as_str().into() } +} + +impl LogRepr for Option where V : LogRepr { + fn logrepr(&self) -> ColoredString { + match self { + None => { "===missing value===".red() } + Some(v) => { v.logrepr() } + } + } +} + +#[derive(Debug,Clone)] +pub struct Annotated(pub F, pub String); + +impl LogRepr for Annotated where V : LogRepr { + fn logrepr(&self) -> ColoredString { + format!("{}\t| {}", self.0.logrepr(), self.1).as_str().into() + } +} + + +/// Basic log item. +#[derive(Serialize, Deserialize, Debug, Eq, PartialEq, Clone)] +pub struct LogItem { + pub iter : usize, + // This causes [`csv`] to crash. + //#[serde(flatten)] + pub data : V +} + +fn make_logitem(iter : usize, data : V) -> LogItem { + LogItem{ iter, data } +} + +/// State of an [`AlgIterator`] +pub trait AlgIteratorState { + fn if_verbose(&self, calc_objective : impl FnMut() -> A) -> Option; + + fn iteration(&self) -> usize; +} + +/// Result of a step of an [`AlgIterator`] +#[derive(Debug, Serialize)] +pub enum Step { + /// Iteration should be terminated + Terminated, + /// No result this iteration (due to verbosity settings) + Quiet, + /// Result of this iteration (due to verbosity settings) + Result(V), +} + +/// An iterator for algorithms, produced by [`AlgIteratorFactory.prepare`]. +/// For usage instructions see the help for [`AlgIteratorFactory`]. +pub trait AlgIterator : Sized { + type State : AlgIteratorState; + type Output; + + /// Advance the iterator. + fn step(&mut self) -> Step; + + /// Return current iteration count. + fn iteration(&self) -> usize { + self.state().iteration() + } + + /// Return current iteration stats. + fn state(&self) -> Self::State; + + /// Iterate the function `step`. It should accept one `state` ([`AlgIteratorState`] parameter, + /// and return the output of `state.if_verbose`. + /// Typically called through [`AlgIteratorFactory::iterate`]. + #[inline] + fn iterate(&mut self) { + loop { + if let Step::Terminated = self.step() { + break + } + } + } + + /// Converts the `AlgIterator` into a plain [`Iterator`], discarding [`Step::Quiet`] results. + /// (This is to avoid having to manually implement [`IntoIterator`] or [`Iterator`] + /// for every single [`AlgIterator`].) + fn downcast(self) -> AlgIteratorI { + AlgIteratorI(self, PhantomData) + } +} + +/// Conversion of an `AlgIterator` into a plain [`Iterator`]. +/// It discards [`Step::Quiet`] results. +pub struct AlgIteratorI(A, PhantomData); + +impl Iterator for AlgIteratorI +where A : AlgIterator { + type Item = A::Output; + + fn next(&mut self) -> Option { + loop { + match self.0.step() { + Step::Result(v) => return Some(v), + Step::Terminated => return None, + Step::Quiet => continue, + } + } + } +} + +/// A factory for producing an [`AlgIterator`]. To use one: +/// +/// ```rust +/// use alg_tools::iterate::*; +/// let mut iter = AlgIteratorOptions::default(); +/// iter.iterate(|state|{ +/// // perform iterations +/// state.if_verbose(||{ +/// // calculate and return function value or other displayed data v +/// return 0 +/// }) +/// }) +/// ``` +pub trait AlgIteratorFactory : Sized { + type Iter : AlgIterator + where F : FnMut(&Self::State) -> Step; + type State : AlgIteratorState; + type Output; + + /// Prepare an [`AlgIterator`], consuming the factory. + /// The function `step_fn` should accept a `state` ([`AlgIteratorState`] parameter, and return + /// a [`Step`]. + fn prepare(self, step_fn : F) -> Self::Iter + where F : FnMut(&Self::State) -> Step; + + /// Iterate the function `step`. It should accept one `state` ([`AlgIteratorState`] parameter, + /// and return the output of `state.if_verbose`. For usage instructions see the + /// [module documentation][self]. + /// + /// This method is equivalent to [`Self::prepare`] followed by [`AlgIterator.iterate`]. + #[inline] + fn iterate(self, mut step : F) -> () where F : FnMut(&Self::State) -> Option { + self.prepare(move |state| { + match step(state) { + None => Step::Quiet, + Some(v) => Step::Result(v), + } + }).iterate() + } + + /// Iterate the function `step`. It should accept a `state` ([`AlgIteratorState`] parameter, + /// and a data parameter taking frmo `datasource`, and return the output of `state.if_verbose`. + /// For usage instructions see the [module documentation][self]. + #[inline] + fn iterate_data(self, mut datasource : I, mut step : F) + where F : FnMut(&Self::State, D) -> Option, + I : Iterator { + self.prepare(move |state| { + match datasource.next() { + None => Step::Terminated, + Some(d) => match step(state, d) { + None => Step::Quiet, + Some(v) => Step::Result(v), + } + } + }).iterate() + } + + // fn make_iterate<'own>(self) + // -> Box Option) -> ()) + 'own> { + // Box::new(move |step| self.iterate(step)) + // } + + // fn make_iterate_data<'own, D, I>(self, datasource : I) + // -> Box Option) -> ()) + 'own> + // where I : Iterator + 'own { + // Box::new(move |step| self.iterate_data(step, datasource)) + // } + + /// Add logging to the iterator produced by the factory. + fn into_log<'log>(self, logger : &'log mut Logger) + -> LoggingIteratorFactory<'log, Self::Output, Self> + where Self : Sized { + LoggingIteratorFactory { + base_options : self, + logger, + } + } + + /// Map the output of the iterator produced by the factory. + fn mapped(self, map : G) + -> MappingIteratorFactory + where Self : Sized, + G : Fn(usize, Self::Output) -> U { + MappingIteratorFactory { + base_options : self, + map + } + } + + /// Adds iteration number to the output. Typically followed by [`Self::into_log`]. + fn with_iteration_number(self) + -> MappingIteratorFactory LogItem, Self> + where Self : Sized { + self.mapped(make_logitem) + } + + /// Add timing to the iterator produced by the factory. + fn timed(self) -> TimingIteratorFactory + where Self : Sized { + TimingIteratorFactory(self) + } + + /// Add value stopping threshold to the iterator produce by the factory + fn stop_target(self, target : Self::Output) -> ValueIteratorFactory + where Self : Sized, + Self::Output : Num { + ValueIteratorFactory { base_options : self, target : target } + } + + /// Add stall stopping to the iterator produce by the factory + fn stop_stall(self, stall : Self::Output) -> StallIteratorFactory + where Self : Sized, + Self::Output : Num { + StallIteratorFactory { base_options : self, stall : stall } + } + + /// Is the iterator quiet, i.e., on-verbose? + fn is_quiet(&self) -> bool { false } +} + +/// Options for [`BasicAlgIteratorFactory`]. Use as: +/// ``` +/// # use alg_tools::iterate::*; +/// let iter = AlgIteratorOptions{ max_iter : 10000, .. Default::default() }; +/// ``` +#[derive(Clone, Copy, Debug, Serialize, Deserialize, Eq, PartialEq)] +pub struct AlgIteratorOptions { + /// Maximum number of iterations + pub max_iter : usize, + /// Number of iterations between verbose iterations that display state. + pub verbose_iter : Verbose, + /// Whether verbose iterations are displayed, or just passed onwards to a containing + /// `AlgIterator`. + pub quiet : bool, +} + +#[derive(Clone, Copy, Debug, Serialize, Deserialize, Eq, PartialEq)] +pub enum Verbose { + /// Be verbose every $n$ iterations. + Every(usize), + /// Be verbose every $n$ iterations and initial $m$ iterations. + EveryAndInitial{ every : usize, initial : usize }, + /// Be verbose if iteration number $n$ divides by $b^{\text{floor}(\log_b(n))}$, where + /// $b$ is indicated logarithmic base. So, with $b=10$, + /// * every iteration for first 10 iterations, + /// * every 10 iterations from there on until 100 iterations, + /// * every 100 iteartinos frmo there on until 1000 iterations, etc. + Logarithmic(usize), +} + +impl Verbose { + /// Indicates whether given iteration number is verbose + pub fn is_verbose(&self, iter : usize) -> bool { + match self { + &Verbose::Every(every) => { + every != 0 && iter % every == 0 + }, + &Verbose::EveryAndInitial{ every, initial } => { + iter <= initial || (every != 0 && iter % every == 0) + }, + &Verbose::Logarithmic(base) => { + let every = 10usize.pow((iter as float).log(base as float).floor() as u32); + iter % every == 0 + } + } + } +} + +impl Default for AlgIteratorOptions { + fn default() -> AlgIteratorOptions { + AlgIteratorOptions{ + max_iter : 1000, + verbose_iter : Verbose::EveryAndInitial { every : 100, initial : 10 }, + quiet : false + } + } +} + +/// State of a `BasicAlgIterator` +#[derive(Clone,Copy,Debug,Serialize,Eq,PartialEq)] +pub struct BasicState { + iter : usize, + verbose : bool, + calc : bool, +} + +/// [`AlgIteratorFactory`] for [`BasicAlgIterator`] +#[derive(Clone,Debug)] +pub struct BasicAlgIteratorFactory { + options : AlgIteratorOptions, + phantoms : PhantomData, +} + +/// The simplest [`AlgIterator`], reated by [`BasicAlgIteratorFactory`] +#[derive(Clone,Debug)] +pub struct BasicAlgIterator { + options : AlgIteratorOptions, + iter : usize, + step_fn : F, +} + +impl AlgIteratorOptions { + /// [`AlgIteratorOptions`] is directly a factory for [`BasicAlgIterator`], + /// however, due to type inference issues, it may become convenient to instantiate + /// it to a specific return type for the inner step function. This method does that. + pub fn instantiate(&self) -> BasicAlgIteratorFactory { + BasicAlgIteratorFactory { + options : self.clone(), + phantoms : PhantomData + } + } +} + +impl AlgIteratorFactory for AlgIteratorOptions +where V : LogRepr { + type State = BasicState; + type Iter = BasicAlgIterator where F : FnMut(&Self::State) -> Step; + type Output = V; + + fn prepare(self, step_fn : F) -> Self::Iter + where F : FnMut(&Self::State) -> Step { + BasicAlgIterator{ options : self, iter : 0, step_fn } + } + + #[inline] + fn is_quiet(&self) -> bool { + self.quiet + } +} + +impl AlgIteratorFactory for BasicAlgIteratorFactory +where V : LogRepr { + type State = BasicState; + type Iter = BasicAlgIterator where F : FnMut(&Self::State) -> Step; + type Output = V; + + fn prepare(self, step_fn : F) -> Self::Iter + where F : FnMut(&Self::State) -> Step { + BasicAlgIterator{ options : self.options, iter : 0, step_fn } + } + + #[inline] + fn is_quiet(&self) -> bool { + self.options.quiet + } +} + +impl AlgIterator for BasicAlgIterator +where V : LogRepr, + F : FnMut(&BasicState) -> Step { + type State = BasicState; + type Output = V; + + #[inline] + fn step(&mut self) -> Step { + if self.iter >= self.options.max_iter { + Step::Terminated + } else { + self.iter += 1; + let state = self.state(); + let res = (self.step_fn)(&state); + if let Step::Result(ref val) = res { + if state.verbose && !self.options.quiet { + println!("{}{}/{} {}{}", "".dimmed(), + state.iter, + self.options.max_iter, + val.logrepr(), + "".clear()); + } + } + res + } + } + + #[inline] + fn iteration(&self) -> usize { + self.iter + } + + #[inline] + fn state(&self) -> BasicState { + let iter = self.iter; + let verbose = self.options.verbose_iter.is_verbose(iter); + BasicState{ + iter : iter, + verbose : verbose, + calc : verbose, + } + } +} + +impl AlgIteratorState for BasicState { + #[inline] + fn if_verbose(&self, mut calc_objective : impl FnMut() -> A) -> Option { + if self.calc { + Some(calc_objective()) + } else { + None + } + } + + #[inline] + fn iteration(&self) -> usize { + return self.iter; + } +} + +// +// Stall detecting iteration function. +// + +/// An [`AlgIteratorFactory`] for an [`AlgIterator`] that detects “stall” +/// $(v_{k+n}-v_k)/v_k ≤ θ$, where $n$ the distance between [`Step::Result`] iterations. +#[derive(Clone,Copy,Debug,Serialize,Eq,PartialEq)] +pub struct StallIteratorFactory { + /// Basic options + pub base_options : BaseFactory, + /// Stalling threshold $θ$. + pub stall : U, +} + +/// Iterator produced by [`StallIteratorFactory`]. +pub struct StallIterator { + base_iterator : BaseIterator, + stall : U, + previous_value : Option, +} + +impl AlgIteratorFactory +for StallIteratorFactory +where BaseFactory : AlgIteratorFactory, + U : SignedNum + PartialOrd { + type Iter = StallIterator> + where F : FnMut(&Self::State) -> Step; + type State = BaseFactory::State; + type Output = BaseFactory::Output; + + fn prepare(self, step_fn : F) -> Self::Iter + where F : FnMut(&Self::State) -> Step { + StallIterator { + base_iterator : self.base_options.prepare(step_fn), + stall : self.stall, + previous_value : None, + } + } + + fn is_quiet(&self) -> bool { + self.base_options.is_quiet() + } +} + +impl AlgIterator +for StallIterator +where BaseIterator : AlgIterator, + U : SignedNum + PartialOrd { + type State = BaseIterator::State; + type Output = BaseIterator::Output; + + #[inline] + fn step(&mut self) -> Step { + match self.base_iterator.step() { + Step::Result(nv) => { + let previous_v = self.previous_value; + self.previous_value = Some(nv); + match previous_v { + Some(pv) if (nv - pv).abs() <= self.stall * pv.abs() => Step::Terminated, + _ => Step::Result(nv), + } + }, + val => val, + } + } + + #[inline] + fn iteration(&self) -> usize { + self.base_iterator.iteration() + } + + #[inline] + fn state(&self) -> Self::State { + self.base_iterator.state() + } +} + +/// An [`AlgIteratorFactory`] for an [`AlgIterator`] that detect whether step function +/// return value is less than `target`, and terminates if it is. +#[derive(Clone,Copy,Debug,Serialize,Eq,PartialEq)] +pub struct ValueIteratorFactory { + /// Basic options + pub base_options : BaseFactory, + /// Stalling threshold $θ$. + pub target : U, +} + +/// Iterator produced by [`ValueIteratorFactory`]. +pub struct ValueIterator { + base_iterator : BaseIterator, + target : U, +} + +impl AlgIteratorFactory +for ValueIteratorFactory +where BaseFactory : AlgIteratorFactory, + U : SignedNum + PartialOrd { + type Iter = ValueIterator> + where F : FnMut(&Self::State) -> Step; + type State = BaseFactory::State; + type Output = BaseFactory::Output; + + fn prepare(self, step_fn : F) -> Self::Iter + where F : FnMut(&Self::State) -> Step { + ValueIterator { + base_iterator : self.base_options.prepare(step_fn), + target : self.target + } + } + + fn is_quiet(&self) -> bool { + self.base_options.is_quiet() + } +} + +impl AlgIterator +for ValueIterator +where BaseIterator : AlgIterator, + U : SignedNum + PartialOrd { + type State = BaseIterator::State; + type Output = BaseIterator::Output; + + #[inline] + fn step(&mut self) -> Step { + match self.base_iterator.step() { + Step::Result(v) => { + if v <= self.target { + Step::Terminated + } else { + Step::Result(v) + } + }, + val => val, + } + } + + #[inline] + fn iteration(&self) -> usize { + self.base_iterator.iteration() + } + + #[inline] + fn state(&self) -> Self::State { + self.base_iterator.state() + } +} + +// +// Logging iterator +// + +/// [`AlgIteratorFactory`] for a logging [`AlgIterator`]. +/// The `Output` of the corresponding [`LoggingIterator`] is `()`: +/// The data is only inserted into the log, and not passed onwards. +/// Use as: +/// ```rust +/// use alg_tools::iterate::*; +/// use alg_tools::logger::*; +/// let iter = AlgIteratorOptions::default(); +/// let mut log = Logger::new(); +/// iter.into_log(&mut log).iterate(|state|{ +/// // perform iterations +/// state.if_verbose(||{ +/// // calculate and return function value or other displayed data v +/// return 0 +/// }) +/// }) +/// ``` +#[derive(Debug)] +pub struct LoggingIteratorFactory<'log, U, BaseFactory> { + pub base_options : BaseFactory, + pub logger : &'log mut Logger, +} + +/// Iterator produced by `LoggingIteratorFactory`. +pub struct LoggingIterator<'log, U, BaseIterator> { + base_iterator : BaseIterator, + logger : &'log mut Logger, +} + + +impl<'log, V, BaseFactory> AlgIteratorFactory +for LoggingIteratorFactory<'log, BaseFactory::Output, BaseFactory> +where BaseFactory : AlgIteratorFactory, + BaseFactory::Output : 'log { + type State = BaseFactory::State; + type Iter = LoggingIterator<'log, BaseFactory::Output, BaseFactory::Iter> + where F : FnMut(&Self::State) -> Step; + type Output = (); + + fn prepare(self, step_fn : F) -> Self::Iter + where F : FnMut(&Self::State) -> Step { + LoggingIterator { + base_iterator : self.base_options.prepare(step_fn), + logger : self.logger, + } + } + + #[inline] + fn is_quiet(&self) -> bool { + self.base_options.is_quiet() + } +} + +impl<'log, V, BaseIterator> AlgIterator +for LoggingIterator<'log, BaseIterator::Output, BaseIterator> +where BaseIterator : AlgIterator, + BaseIterator::Output : 'log { + type State = BaseIterator::State; + type Output = (); + + #[inline] + fn step(&mut self) -> Step { + match self.base_iterator.step() { + Step::Result(v) => { + self.logger.log(v); + Step::Quiet + }, + Step::Quiet => Step::Quiet, + Step::Terminated => Step::Terminated, + } + } + + #[inline] + fn iteration(&self) -> usize { + self.base_iterator.iteration() + } + + #[inline] + fn state(&self) -> Self::State { + self.base_iterator.state() + } +} + +/// This [`AlgIteratorFactory`] allows output mapping. +#[derive(Debug)] +pub struct MappingIteratorFactory { + pub base_options : BaseFactory, + pub map : G, +} + +/// Iterator produced by `MappingIteratorFactory`. +pub struct MappingIterator { + base_iterator : BaseIterator, + map : G, +} + + +impl AlgIteratorFactory +for MappingIteratorFactory +where BaseFactory : AlgIteratorFactory, + G : Fn(usize, BaseFactory::Output) -> U { + type State = BaseFactory::State; + type Iter = MappingIterator> + where F : FnMut(&Self::State) -> Step; + type Output = U; + + fn prepare(self, step_fn : F) -> Self::Iter + where F : FnMut(&Self::State) -> Step { + MappingIterator { + base_iterator : self.base_options.prepare(step_fn), + map : self.map + } + } + + #[inline] + fn is_quiet(&self) -> bool { + self.base_options.is_quiet() + } +} + +impl AlgIterator +for MappingIterator +where BaseIterator : AlgIterator, + G : Fn(usize, BaseIterator::Output) -> U { + type State = BaseIterator::State; + type Output = U; + + #[inline] + fn step(&mut self) -> Step { + match self.base_iterator.step() { + Step::Result(v) => Step::Result((self.map)(self.iteration(), v)), + Step::Quiet => Step::Quiet, + Step::Terminated => Step::Terminated, + } + } + + #[inline] + fn iteration(&self) -> usize { + self.base_iterator.iteration() + } + + #[inline] + fn state(&self) -> Self::State { + self.base_iterator.state() + } +} + +// +// Timing iterator +// + +/// An [`AlgIteratorFactory`] for an [`AlgIterator`] that adds CPU time spent to verbose events. +#[derive(Debug)] +pub struct TimingIteratorFactory(pub BaseFactory); + +/// Iterator produced by [`TimingIteratorFactory`] +#[derive(Debug)] +pub struct TimingIterator { + base_iterator : BaseIterator, + start_time : ProcessTime, +} + +/// Data `U` with production time attached +#[derive(Copy, Clone, Debug, Serialize)] +pub struct Timed { + pub cpu_time : Duration, + //#[serde(flatten)] + pub data : U +} + +impl LogRepr for Timed where T : LogRepr { + fn logrepr(&self) -> ColoredString { + format!("[{:.3}s] {}", self.cpu_time.as_secs_f64(), self.data.logrepr()).as_str().into() + } +} + + +impl AlgIteratorFactory +for TimingIteratorFactory +where BaseFactory : AlgIteratorFactory { + type State = BaseFactory::State; + type Iter = TimingIterator> + where F : FnMut(&Self::State) -> Step; + type Output = Timed; + + fn prepare(self, step_fn : F) -> Self::Iter + where F : FnMut(&Self::State) -> Step { + TimingIterator { + base_iterator : self.0.prepare(step_fn), + start_time : ProcessTime::now() + } + } + + #[inline] + fn is_quiet(&self) -> bool { + self.0.is_quiet() + } +} + +impl AlgIterator +for TimingIterator +where BaseIterator : AlgIterator { + type State = BaseIterator::State; + type Output = Timed; + + #[inline] + fn step(&mut self) -> Step { + match self.base_iterator.step() { + Step::Result(data) => { + Step::Result(Timed{ + cpu_time : self.start_time.elapsed(), + data + }) + }, + Step::Quiet => Step::Quiet, + Step::Terminated => Step::Terminated, + } + } + + #[inline] + fn iteration(&self) -> usize { + self.base_iterator.iteration() + } + + #[inline] + fn state(&self) -> Self::State { + self.base_iterator.state() + } +} + +// +// Tests +// + +#[cfg(test)] +mod tests { + use super::*; + use crate::logger::Logger; + #[test] + fn iteration() { + let options = AlgIteratorOptions{ + max_iter : 10, + verbose_iter : Verbose::Every(3), + .. Default::default() + }; + + { + let mut start = 1 as int; + options.iterate(|state| { + start = start * 2; + state.if_verbose(|| start) + }); + assert_eq!(start, (2 as int).pow(10)); + } + + { + let mut start = 1 as int; + let mut log = Logger::new(); + let factory = options.instantiate() + .with_iteration_number() + .into_log(&mut log); + factory.iterate(|state| { + start = start * 2; + state.if_verbose(|| start) + }); + assert_eq!(start, (2 as int).pow(10)); + assert_eq!(log.data() + .iter() + .map(|LogItem{ data : v, iter : _ }| v.clone()) + .collect::>(), + (0..10).step_by(3) + .map(|i| (2 as int).pow(i+1)) + .collect::>()) + } + } +} diff -r 000000000000 -r 9f27689eb130 src/lib.rs --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/lib.rs Sat Oct 22 13:47:15 2022 +0300 @@ -0,0 +1,49 @@ +// The main documentation is in the README. +#![doc = include_str!("../README.md")] + +// We use unicode. We would like to use much more of it than Rust allows. +// Live with it. Embrace it. +#![allow(uncommon_codepoints)] +#![allow(mixed_script_confusables)] +#![allow(confusable_idents)] + +// We need BinaryHeap::retain from nightly +#![feature(binary_heap_retain)] + +#![feature(maybe_uninit_uninit_array,maybe_uninit_array_assume_init,maybe_uninit_slice)] +#![feature(try_trait_v2_residual,try_trait_v2)] + +#![feature(array_methods)] + +#![feature(arc_unwrap_or_clone)] + +#![feature(float_minimum_maximum)] + +// They don't work: +//#![feature(negative_impls)] +//#![feature(specialization)] + +pub mod types; +pub mod nanleast; +pub mod error; +pub mod maputil; +pub mod tuple; +pub mod norms; +#[macro_use] +pub mod loc; +pub mod iter; +pub mod linops; +pub mod iterate; +pub mod tabledump; +pub mod logger; +pub mod linsolve; +pub mod lingrid; +pub mod sets; +pub mod mapping; +pub mod coefficients; +pub mod fe_model; +pub mod bisection_tree; +pub mod nalgebra_support; + + +pub use types::*; diff -r 000000000000 -r 9f27689eb130 src/lingrid.rs --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/lingrid.rs Sat Oct 22 13:47:15 2022 +0300 @@ -0,0 +1,203 @@ +/// Production of equally spaced nodes within intervals. + +use crate::types::*; +use crate::loc::Loc; +use crate::sets::Cube; +use crate::iter::{RestartableIterator, StatefulIterator}; +use crate::maputil::{map2, map4}; +use serde::Serialize; + +// TODO: rewrite this using crate::sets::Cube. + +/// The interval `[start, end]` divided into `count` nodes. +/// Implementes `IntoIterator` to iterate over the nodes of the `LinSpace`. +/// It is similar to [`num::range_step_inclusive`], but [restartable][RestartableIterator] and +/// parametrised by the number of nodes instead of a step. This way it can be ensured that the +/// last item produced is equal to `end`. To use other types than `float` and `f32`, create +/// the `LinSpace` struct directly. +#[derive(Clone,Copy,Debug,Serialize,Eq,PartialEq)] +pub struct LinSpace { + pub start : F, + pub end : F, + pub count : I, +} + +#[allow(type_alias_bounds)] // Need it to access F::CompatibleSize. +pub type LinGrid = LinSpace, [usize; N]>; + +/// Creates a [`LinSpace`] on the real line. +pub fn linspace(start : F, end : F, count : usize) -> LinSpace { + LinSpace{ start : start, end : end, count : count } +} + +/// Create a multi-dimensional linear grid. +/// +/// The first and last point in each dimension are the boundaries of the corresponding +/// dimensions of `cube`, and there are `count` nodes along each dimension. +pub fn lingrid( + cube : &Cube, + count : &[usize; N] +) -> LinSpace, [usize; N]> { + LinSpace{ start : cube.span_start(), end : cube.span_end(), count : *count } +} + +/// Create a multi-dimensional linear grid with centered nodes. +/// +/// There are `count` along each dimension and each node has equally-sized subcube surrounding it +/// inside `cube`. Thus, if $w_i$ is the width of the cube along dimension $i$, and $n_i$ the number +/// of nodes, the width of the subcube along this dimension is $h_i = w_i/(n_i+1)$, and the first +/// and last nodes are at a distance $h_i/2$ from the closest boundary. +pub fn lingrid_centered( + cube : &Cube, + count : &[usize; N] +) -> LinSpace, [usize; N]> { + let h_div_2 = map2(cube.width(), count, |w, &n| w / F::cast_from(2 * (n + 1))); + let span_start = map2(cube.span_start(), &h_div_2, |a, &t| a + t).into(); + let span_end = map2(cube.span_end(), &h_div_2, |b, &t| b - t).into(); + LinSpace{ start : span_start, end : span_end, count : *count } +} + + +/// Iterator over a `LinSpace`. +#[derive(Clone, Debug)] +pub struct LinSpaceIterator { + lingrid : LinSpace, + current : Option, +} + +pub trait Grid { + fn entry_linear_unchecked(&self, i : usize) -> F; + //fn entry(&self, i : I) -> Option; + fn entry_unchecked(&self, i : &I) -> F; +} + +pub trait GridIteration { + fn next_index(&mut self) -> Option; +} + +impl, I : Unsigned> Grid for LinSpace { + /*fn entry(&self, i : I) -> Option { + if i < self.count { + Some(self.entry_unchecked(i)) + } else { + None + } + }*/ + + #[inline] + fn entry_linear_unchecked(&self, i : usize) -> F { + self.entry_unchecked(&I::cast_from(i)) + } + + #[inline] + fn entry_unchecked(&self, i : &I) -> F { + let idx = F::cast_from(*i); + let scale = F::cast_from(self.count-I::ONE); + self.start + (self.end-self.start)*idx/scale + } +} + +impl, I : Unsigned> GridIteration +for LinSpaceIterator { + #[inline] + fn next_index(&mut self) -> Option { + match self.current { + None if I::ZERO < self.lingrid.count => { self.current = Some(I::ZERO); self.current } + Some(v) if v+I::ONE < self.lingrid.count => { self.current = Some(v+I::ONE); self.current } + _ => { None } + } + } +} + +impl, I : Unsigned, const N : usize> Grid, [I; N]> +for LinSpace, [I; N]> { + #[inline] + fn entry_linear_unchecked(&self, i_ : usize) -> Loc { + let mut i = I::cast_from(i_); + let mut tmp = [I::ZERO; N]; + for k in 0..N { + tmp[k] = i % self.count[k]; + i /= self.count[k]; + } + self.entry_unchecked(&tmp) + } + + #[inline] + fn entry_unchecked(&self, i : &[I; N]) -> Loc { + let LinSpace{ start, end, count } = self; + map4(i, start, end, count, |&ik, &sk, &ek, &ck| { + let idx = F::cast_from(ik); + let scale = F::cast_from(ck-I::ONE); + sk + (ek - sk) * idx / scale + }).into() + } +} + +impl, I : Unsigned, const N : usize> GridIteration, [I; N]> +for LinSpaceIterator, [I; N]> { + + #[inline] + fn next_index(&mut self) -> Option<[I; N]> { + match self.current { + None if self.lingrid.count.iter().all(|v| I::ZERO < *v) => { + self.current = Some([I::ZERO; N]); + self.current + }, + Some(ref mut v) => { + for k in 0..N { + let a = v[k] + I::ONE; + if a < self.lingrid.count[k] { + v[k] = a; + return self.current + } else { + v[k] = I::ZERO; + } + } + None + }, + _ => None + } + } +} + +impl IntoIterator for LinSpace +where LinSpace : Grid, + LinSpaceIterator : GridIteration { + type Item = F; + type IntoIter = LinSpaceIterator; + + #[inline] + fn into_iter(self) -> Self::IntoIter { + LinSpaceIterator { lingrid : self, current : None } + } +} + +impl Iterator for LinSpaceIterator +where LinSpace : Grid, + LinSpaceIterator : GridIteration { + type Item = F; + #[inline] + fn next(&mut self) -> Option { + self.next_index().map(|v| self.lingrid.entry_unchecked(&v)) + } +} + +impl StatefulIterator for LinSpaceIterator +where LinSpace : Grid, + LinSpaceIterator : GridIteration { + #[inline] + fn current(&self) -> Option { + self.current.as_ref().map(|c| self.lingrid.entry_unchecked(c)) + } +} + + +impl RestartableIterator for LinSpaceIterator +where LinSpace : Grid, + LinSpaceIterator : GridIteration { + #[inline] + fn restart(&mut self) -> Option { + self.current = None; + self.next() + } +} diff -r 000000000000 -r 9f27689eb130 src/linops.rs --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/linops.rs Sat Oct 22 13:47:15 2022 +0300 @@ -0,0 +1,139 @@ +/*! +Abstract linear operators. +*/ + +use numeric_literals::replace_float_literals; +use std::marker::PhantomData; +use crate::types::*; +use serde::Serialize; + +/// Trait for linear operators on `X`. +pub trait Linear { + /// The range space of the operator. + type Codomain; + /// Apply the linear operator to `x`. + fn apply(&self, x : &X) -> Self::Codomain; +} + +/// Efficient in-place summation. +#[replace_float_literals(F::cast_from(literal))] +pub trait AXPY { + /// Computes `y = βy + αx`, where `y` is `Self`. + fn axpy(&mut self, α : F, x : &X, β : F); + + /// Copies `x` to `self`. + fn copy_from(&mut self, x : &X) { + self.axpy(1.0, x, 0.0) + } + + /// Computes `y = αx`, where `y` is `Self`. + fn scale_from(&mut self, α : F, x : &X) { + self.axpy(α, x, 0.0) + } +} + +/// Efficient in-place application for [`Linear`] operators. +#[replace_float_literals(F::cast_from(literal))] +pub trait GEMV>::Codomain> : Linear { + // Computes `y = αAx + βy`, where `A` is `Self`. + fn gemv(&self, y : &mut Y, α : F, x : &X, β : F); + + // Computes `y = Ax`, where `A` is `Self` + fn apply_mut(&self, y : &mut Y, x : &X){ + self.gemv(y, 1.0, x, 0.0) + } + + // Computes `y += Ax`, where `A` is `Self` + fn apply_add(&self, y : &mut Y, x : &X){ + self.gemv(y, 1.0, x, 1.0) + } +} + + +/// Bounded linear operators +pub trait BoundedLinear : Linear { + type FloatType : Float; + /// A bound on the operator norm $\|A\|$ for the linear operator $A$=`self`. + /// This is not expected to be the norm, just any bound on it that can be + /// reasonably implemented. + fn opnorm_bound(&self) -> Self::FloatType; +} + +/// Linear operator application into mutable target. The [`AsRef`] bound +/// is used to guarantee compatibility with `Yʹ` and `Self::Codomain`; +/// the former is assumed to be e.g. a view into the latter. + +/*impl Fn(&X) -> Y for T where T : Linear { + fn call(&self, x : &X) -> Y { + self.apply(x) + } +}*/ + +/// Trait for forming the adjoint operator of an operator $A$=`Self`. +pub trait Adjointable : Linear { + type AdjointCodomain; + type Adjoint<'a> : Linear where Self : 'a; + + /// Form the adjoint operator of `self`. + fn adjoint(&self) -> Self::Adjoint<'_>; + + /*fn adjoint_apply(&self, y : &Yʹ) -> Self::AdjointCodomain { + self.adjoint().apply(y) + }*/ +} + +/// Trait for forming a preadjoint of an operator $A$, i.e., an operator $A_*$ +/// such that its adjoint $(A_*)^*=A$. The space `X` is the domain of the `Self` +/// operator. The space `Ypre` is the predual of its codomain, and should be the +/// domain of the adjointed operator. `Self::Preadjoint` should be +/// [`Adjointable`]`<'a,Ypre,X>`. +pub trait Preadjointable : Linear { + type PreadjointCodomain; + type Preadjoint<'a> : Adjointable where Self : 'a; + + /// Form the preadjoint operator of `self`. + fn preadjoint(&self) -> Self::Preadjoint<'_>; +} + +/// Adjointable operators $A: X → Y$ on between reflexibe spaces $X$ and $Y$. +pub trait SimplyAdjointable : Adjointable>::Codomain> {} +impl<'a,X,T> SimplyAdjointable for T where T : Adjointable>::Codomain> {} + +/// The identity operator +#[derive(Clone,Copy,Debug,Serialize,Eq,PartialEq)] +pub struct IdOp (PhantomData); + +impl IdOp where X : Clone { + fn new() -> IdOp { IdOp(PhantomData) } +} + +impl Linear for IdOp where X : Clone { + type Codomain = X; + fn apply(&self, x : &X) -> X { + x.clone() + } +} + +#[replace_float_literals(F::cast_from(literal))] +impl GEMV for IdOp where Y : AXPY, X : Clone { + // Computes `y = αAx + βy`, where `A` is `Self`. + fn gemv(&self, y : &mut Y, α : F, x : &X, β : F) { + y.axpy(α, x, β) + } + + fn apply_mut(&self, y : &mut Y, x : &X){ + y.copy_from(x); + } +} + +impl BoundedLinear for IdOp where X : Clone { + type FloatType = float; + fn opnorm_bound(&self) -> float { 1.0 } +} + +impl Adjointable for IdOp where X : Clone { + type AdjointCodomain=X; + type Adjoint<'a> = IdOp where X : 'a; + fn adjoint(&self) -> Self::Adjoint<'_> { IdOp::new() } +} + diff -r 000000000000 -r 9f27689eb130 src/linsolve.rs --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/linsolve.rs Sat Oct 22 13:47:15 2022 +0300 @@ -0,0 +1,90 @@ +/// Linear solvers for small problems. + +use crate::types::Float; +use std::mem::MaybeUninit; + +/// Gaussian elimination for $AX=B$, where $A$ and $B$ are both stored in `ab`, +/// $A \in \mathbb{R}^{M \times M}$ and $X, B \in \mathbb{R}^{M \times K}$. +pub fn linsolve0( + mut ab : [[F; N]; M] +) -> [[F; K]; M] { + assert_eq!(M + K , N); + + let mut k = 0; + + // Convert to row-echelon form + for h in 0..(M-1) { + // Find pivotable column (has some non-zero entries in rows ≥ h) + 'find_pivot: while k < N { + let (mut î, mut v) = (h, ab[h][k].abs()); + // Find row ≥ h of maximum absolute value in this column + for i in (h+1)..M { + let ṽ = ab[i][k].abs(); + if ṽ > v { + î = i; + v = ṽ; + } + } + if v > F::ZERO { + ab.swap(h, î); + for i in (h+1)..M { + let f = ab[i][k] / ab[h][k]; + ab[i][k] = F::ZERO; + for j in (k+1)..N { + ab[i][j] -= ab[h][j]*f; + } + } + k += 1; + break 'find_pivot; + } + k += 1 + } + } + + // Solve UAX=UB for X where UA with U presenting the transformations above an + // upper triangular matrix. + // This use of MaybeUninit assumes F : Copy. Otherwise undefined behaviour may occur. + let mut x : [[MaybeUninit; K]; M] = core::array::from_fn(|_| MaybeUninit::uninit_array::() ); + //unsafe { std::mem::MaybeUninit::uninit().assume_init() }; + for i in (0..M).rev() { + for 𝓁 in 0..K { + let mut tmp = ab[i][M+𝓁]; + for j in (i+1)..M { + tmp -= ab[i][j] * unsafe { *(x[j][𝓁].assume_init_ref()) }; + } + tmp /= ab[i][i]; + x[i][𝓁].write(tmp); + } + } + //unsafe { MaybeUninit::array_assume_init(x) }; + let xinit = unsafe { + //core::intrinsics::assert_inhabited::<[[F; K]; M]>(); + (&x as *const _ as *const [[F; K]; M]).read() + }; + + std::mem::forget(x); + xinit +} + +/// Gaussian elimination for $Ax=b$, where $A$ and $b$ are both stored in `ab`, +/// $A \in \mathbb{R}^{M \times M}$ and $x, b \in \mathbb{R}^M$. +#[inline] +pub fn linsolve(ab : [[F; N]; M]) -> [F; M] { + let x : [[F; 1]; M] = linsolve0(ab); + unsafe { *((&x as *const [F; 1]) as *const [F; M] ) } +} + + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn linsolve_test() { + let ab1 = [[1.0, 2.0, 3.0], [2.0, 1.0, 6.0]]; + assert_eq!(linsolve(ab1), [3.0, 0.0]); + let ab2 = [[1.0, 2.0, 0.0, 1.0], [4.0, 0.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0]]; + assert_eq!(linsolve(ab2), [0.0, 0.5, 0.0]); + } +} + diff -r 000000000000 -r 9f27689eb130 src/loc.rs --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/loc.rs Sat Oct 22 13:47:15 2022 +0300 @@ -0,0 +1,530 @@ +/*! +Array containers with vectorspace operations on floats. +For working with vectors in $ℝ^2$ or $ℝ^3$. +*/ + +use std::ops::{Add,Sub,AddAssign,SubAssign,Mul,Div,MulAssign,DivAssign,Neg,Index,IndexMut}; +use std::slice::{Iter,IterMut}; +use crate::types::{Float,Num,SignedNum}; +use crate::maputil::{FixedLength,FixedLengthMut,map1,map2,map1_mut,map2_mut}; +use crate::norms::*; +use crate::linops::AXPY; +use serde::ser::{Serialize, Serializer, SerializeSeq}; + +#[derive(Copy,Clone,Debug,PartialEq,Eq)] +pub struct Loc(pub [F; N]); + +// Need to manually implement as [F; N] serialisation is provided only for some N. +impl Serialize for Loc +where + F: Serialize, +{ + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + let mut seq = serializer.serialize_seq(Some(N))?; + for e in self.iter() { + seq.serialize_element(e)?; + } + seq.end() + } +} + +impl Loc { + #[inline] + pub fn new(arr : [F; N]) -> Self { + Loc(arr) + } + + #[inline] + pub fn iter(&self) -> Iter<'_, F> { + self.0.iter() + } + + #[inline] + pub fn iter_mut(&mut self) -> IterMut<'_, F> { + self.0.iter_mut() + } +} + +impl Loc { + #[inline] + pub fn map H>(&self, g : G) -> Loc { + Loc::new(map1(self, |u| g(*u))) + } + + #[inline] + pub fn map2 H>(&self, other : &Self, g : G) -> Loc { + Loc::new(map2(self, other, |u, v| g(*u, *v))) + } + + #[inline] + pub fn map_mut(&mut self, g : G) { + map1_mut(self, g) + } + + #[inline] + pub fn map2_mut(&mut self, other : &Self, g : G) { + map2_mut(self, other, |u, v| g(u, *v)) + } + + #[inline] + pub fn product_map A, A : Num>(&self, f : G) -> A { + match N { + 1 => f(unsafe { *self.0.get_unchecked(0) }), + 2 => f(unsafe { *self.0.get_unchecked(0) }) * + f(unsafe { *self.0.get_unchecked(1) }), + 3 => f(unsafe { *self.0.get_unchecked(0) }) * + f(unsafe { *self.0.get_unchecked(1) }) * + f(unsafe { *self.0.get_unchecked(2) }), + _ => self.iter().fold(A::ONE, |m, &x| m*f(x)) + } + } +} + +#[macro_export] +macro_rules! loc { + ($($x:expr),+ $(,)?) => { Loc::new([$($x),+]) } +} + +// Conversions + +impl From<[F; N]> for Loc { + #[inline] + fn from(other: [F; N]) -> Loc { + Loc(other) + } +} + +/*impl From<&[F; N]> for Loc { + #[inline] + fn from(other: &[F; N]) -> Loc { + Loc(*other) + } +}*/ + +impl From for Loc { + #[inline] + fn from(other: F) -> Loc { + Loc([other]) + } +} + +impl From> for [F; N] { + #[inline] + fn from(other : Loc) -> [F; N] { + other.0 + } +} + +/*impl From<&Loc> for [F; N] { + #[inline] + fn from(other : &Loc) -> [F; N] { + other.0 + } +}*/ + + +impl IntoIterator for Loc { + type Item = <[F; N] as IntoIterator>::Item; + type IntoIter = <[F; N] as IntoIterator>::IntoIter; + + #[inline] + fn into_iter(self) -> Self::IntoIter { + self.0.into_iter() + } +} + +// Indexing + +impl Index for Loc +where [F; N] : Index { + type Output = <[F; N] as Index>::Output; + + #[inline] + fn index(&self, ix : Ix) -> &Self::Output { + self.0.index(ix) + } +} + +impl IndexMut for Loc +where [F; N] : IndexMut { + #[inline] + fn index_mut(&mut self, ix : Ix) -> &mut Self::Output { + self.0.index_mut(ix) + } +} + +// Arithmetic + +macro_rules! make_binop { + ($trait:ident, $fn:ident, $trait_assign:ident, $fn_assign:ident) => { + impl $trait> for Loc { + type Output = Loc; + #[inline] + fn $fn(mut self, other : Loc) -> Self::Output { + self.$fn_assign(other); + self + } + } + + impl<'a, F : Num, const N : usize> $trait<&'a Loc> for Loc { + type Output = Loc; + #[inline] + fn $fn(mut self, other : &'a Loc) -> Self::Output { + self.$fn_assign(other); + self + } + } + + impl<'b, F : Num, const N : usize> $trait> for &'b Loc { + type Output = Loc; + #[inline] + fn $fn(self, other : Loc) -> Self::Output { + self.map2(&other, |a, b| a.$fn(b)) + } + } + + impl<'a, 'b, F : Num, const N : usize> $trait<&'a Loc> for &'b Loc { + type Output = Loc; + #[inline] + fn $fn(self, other : &'a Loc) -> Self::Output { + self.map2(other, |a, b| a.$fn(b)) + } + } + + impl $trait_assign> for Loc { + #[inline] + fn $fn_assign(&mut self, other : Loc) { + self.map2_mut(&other, |a, b| a.$fn_assign(b)) + } + } + + impl<'a, F : Num, const N : usize> $trait_assign<&'a Loc> for Loc { + #[inline] + fn $fn_assign(&mut self, other : &'a Loc) { + self.map2_mut(other, |a, b| a.$fn_assign(b)) + } + } + } +} + +make_binop!(Add, add, AddAssign, add_assign); +make_binop!(Sub, sub, SubAssign, sub_assign); + +macro_rules! make_scalarop_rhs { + ($trait:ident, $fn:ident, $trait_assign:ident, $fn_assign:ident) => { + impl $trait for Loc { + type Output = Loc; + #[inline] + fn $fn(self, b : F) -> Self::Output { + self.map(|a| a.$fn(b)) + } + } + + impl<'a, F : Num, const N : usize> $trait<&'a F> for Loc { + type Output = Loc; + #[inline] + fn $fn(self, b : &'a F) -> Self::Output { + self.map(|a| a.$fn(*b)) + } + } + + impl<'b, F : Num, const N : usize> $trait for &'b Loc { + type Output = Loc; + #[inline] + fn $fn(self, b : F) -> Self::Output { + self.map(|a| a.$fn(b)) + } + } + + impl<'a, 'b, F : Float, const N : usize> $trait<&'a F> for &'b Loc { + type Output = Loc; + #[inline] + fn $fn(self, b : &'a F) -> Self::Output { + self.map(|a| a.$fn(*b)) + } + } + + impl $trait_assign for Loc { + #[inline] + fn $fn_assign(&mut self, b : F) { + self.map_mut(|a| a.$fn_assign(b)); + } + } + + impl<'a, F : Num, const N : usize> $trait_assign<&'a F> for Loc { + #[inline] + fn $fn_assign(&mut self, b : &'a F) { + self.map_mut(|a| a.$fn_assign(*b)); + } + } + } +} + + +make_scalarop_rhs!(Mul, mul, MulAssign, mul_assign); +make_scalarop_rhs!(Div, div, DivAssign, div_assign); + +macro_rules! make_unaryop { + ($trait:ident, $fn:ident) => { + impl $trait for Loc { + type Output = Loc; + #[inline] + fn $fn(mut self) -> Self::Output { + self.map_mut(|a| *a = (*a).$fn()); + self + } + } + + impl<'a, F : SignedNum, const N : usize> $trait for &'a Loc { + type Output = Loc; + #[inline] + fn $fn(self) -> Self::Output { + self.map(|a| a.$fn()) + } + } + } +} + +make_unaryop!(Neg, neg); + +macro_rules! make_scalarop_lhs { + ($trait:ident, $fn:ident; $($f:ident)+) => { $( + impl $trait> for $f { + type Output = Loc<$f, N>; + #[inline] + fn $fn(self, v : Loc<$f,N>) -> Self::Output { + v.map(|b| self.$fn(b)) + } + } + + impl<'a, const N : usize> $trait<&'a Loc<$f,N>> for $f { + type Output = Loc<$f, N>; + #[inline] + fn $fn(self, v : &'a Loc<$f,N>) -> Self::Output { + v.map(|b| self.$fn(b)) + } + } + + impl<'b, const N : usize> $trait> for &'b $f { + type Output = Loc<$f, N>; + #[inline] + fn $fn(self, v : Loc<$f,N>) -> Self::Output { + v.map(|b| self.$fn(b)) + } + } + + impl<'a, 'b, const N : usize> $trait<&'a Loc<$f,N>> for &'b $f { + type Output = Loc<$f, N>; + #[inline] + fn $fn(self, v : &'a Loc<$f, N>) -> Self::Output { + v.map(|b| self.$fn(b)) + } + } + )+ } +} + +make_scalarop_lhs!(Mul, mul; f32 f64 i8 i16 i32 i64 isize u8 u16 u32 u64 usize); +make_scalarop_lhs!(Div, div; f32 f64 i8 i16 i32 i64 isize u8 u16 u32 u64 usize); + +// Norms + +macro_rules! domination { + ($norm:ident, $dominates:ident) => { + impl Dominated> for $norm { + #[inline] + fn norm_factor(&self, _p : $dominates) -> F { + F::ONE + } + #[inline] + fn from_norm(&self, p_norm : F, _p : $dominates) -> F { + p_norm + } + } + }; + ($norm:ident, $dominates:ident, $fn:path) => { + impl Dominated> for $norm { + #[inline] + fn norm_factor(&self, _p : $dominates) -> F { + $fn(F::cast_from(N)) + } + } + }; +} + +domination!(L1, L1); +domination!(L2, L2); +domination!(Linfinity, Linfinity); + +domination!(L1, L2, F::sqrt); +domination!(L2, Linfinity, F::sqrt); +domination!(L1, Linfinity, std::convert::identity); + +domination!(Linfinity, L1); +domination!(Linfinity, L2); +domination!(L2, L1); + +impl Dot,F> for Loc { + /// This implementation is not stabilised as it's meant to be used for very small vectors. + /// Use [`nalgebra`] for larger vectors. + #[inline] + fn dot(&self, other : &Loc) -> F { + self.0.iter() + .zip(other.0.iter()) + .fold(F::ZERO, |m, (&v, &w)| m + v * w) + } +} + +impl Euclidean for Loc { + type Output = Self; + + #[inline] + fn similar_origin(&self) -> Self { + Self::ORIGIN + } + + /// This implementation is not stabilised as it's meant to be used for very small vectors. + /// Use [`nalgebra`] for larger vectors. + #[inline] + fn norm2_squared(&self) -> F { + self.iter().fold(F::ZERO, |m, &v| m + v * v) + } + + fn dist2_squared(&self, other : &Self) -> F { + self.iter() + .zip(other.iter()) + .fold(F::ZERO, |m, (&v, &w)| { let d = v - w; m + d * d }) + } + + #[inline] + fn norm2(&self) -> F { + // Optimisation for N==1 that avoids squaring and square rooting. + if N==1 { + unsafe { self.0.get_unchecked(0) }.abs() + } else { + self.norm2_squared().sqrt() + } + } + + #[inline] + fn dist2(&self, other : &Self) -> F { + // Optimisation for N==1 that avoids squaring and square rooting. + if N==1 { + unsafe { *self.0.get_unchecked(0) - *other.0.get_unchecked(0) }.abs() + } else { + self.dist2_squared(other).sqrt() + } + } +} + +impl Loc { + pub const ORIGIN : Self = Loc([F::ZERO; N]); +} + +impl StaticEuclidean for Loc { + #[inline] + fn origin() -> Self { + Self::ORIGIN + } +} + +impl Norm for Loc { + #[inline] + fn norm(&self, _ : L2) -> F { self.norm2() } +} + +impl Dist for Loc { + #[inline] + fn dist(&self, other : &Self, _ : L2) -> F { self.dist2(other) } +} + +impl Norm for Loc { + /// This implementation is not stabilised as it's meant to be used for very small vectors. + /// Use [`nalgebra`] for larger vectors. + #[inline] + fn norm(&self, _ : L1) -> F { + self.iter().fold(F::ZERO, |m, v| m + v.abs()) + } +} + +impl Dist for Loc { + #[inline] + fn dist(&self, other : &Self, _ : L1) -> F { + self.iter() + .zip(other.iter()) + .fold(F::ZERO, |m, (&v, &w)| m + (v-w).abs() ) + } +} + +impl Projection for Loc { + #[inline] + fn proj_ball_mut(&mut self, ρ : F, _ : Linfinity) { + self.iter_mut().for_each(|v| *v = num_traits::clamp(*v, -ρ, ρ)) + } +} + +impl Norm for Loc { + /// This implementation is not stabilised as it's meant to be used for very small vectors. + /// Use [`nalgebra`] for larger vectors. + #[inline] + fn norm(&self, _ : Linfinity) -> F { + self.iter().fold(F::ZERO, |m, v| m.max(v.abs())) + } +} + +impl Dist for Loc { + #[inline] + fn dist(&self, other : &Self, _ : Linfinity) -> F { + self.iter() + .zip(other.iter()) + .fold(F::ZERO, |m, (&v, &w)| m.max((v-w).abs())) + } +} + + +// Misc. + +impl FixedLength for Loc { + type Iter = std::array::IntoIter; + type Elem = A; + #[inline] + fn fl_iter(self) -> Self::Iter { + self.into_iter() + } +} + +impl FixedLengthMut for Loc { + type IterMut<'a> = std::slice::IterMut<'a, A> where A : 'a; + #[inline] + fn fl_iter_mut(&mut self) -> Self::IterMut<'_> { + self.iter_mut() + } +} + +impl<'a, A, const N : usize> FixedLength for &'a Loc { + type Iter = std::slice::Iter<'a, A>; + type Elem = &'a A; + #[inline] + fn fl_iter(self) -> Self::Iter { + self.iter() + } +} + +impl AXPY> for Loc { + + #[inline] + fn axpy(&mut self, α : F, x : &Loc, β : F) { + if β == F::ZERO { + map2_mut(self, x, |yi, xi| { *yi = α * (*xi) }) + } else { + map2_mut(self, x, |yi, xi| { *yi = β * (*yi) + α * (*xi) }) + } + } + + #[inline] + fn copy_from(&mut self, x : &Loc) { + map2_mut(self, x, |yi, xi| *yi = *xi ) + } +} diff -r 000000000000 -r 9f27689eb130 src/logger.rs --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/logger.rs Sat Oct 22 13:47:15 2022 +0300 @@ -0,0 +1,39 @@ +/*! +Logging routines for intermediate computational results. +*/ + +use serde::Serialize; +use crate::tabledump::TableDump; + +/// A log of items of type `T` along with log configuration. +/// The constructor takes no arguments; create the log with `Log{T}()` for `T` your +/// data type, e.g., `Float64`. +#[derive(Debug, Clone)] +pub struct Logger { + data : Vec, +} + +impl Logger { + /// Create new log + pub fn new() -> Logger { + Logger{ data : Vec::new() } + } + + /// Store the value `v` in the log at index `i`. + pub fn log(&mut self, v : V) -> () { + self.data.push(v); + } + + /// Get logged data. + pub fn data(&self) -> &Vec { + &self.data + } +} + +impl<'a, V : Serialize + 'a> TableDump<'a> for Logger { + type Iter = std::slice::Iter<'a, V>; + + fn tabledump_entries(&'a self) -> Self::Iter { + self.data.iter() + } +} diff -r 000000000000 -r 9f27689eb130 src/mapping.rs --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/mapping.rs Sat Oct 22 13:47:15 2022 +0300 @@ -0,0 +1,64 @@ +/// Traits for (mathematical) functions. + +use std::marker::PhantomData; +use crate::types::{Float}; +use serde::Serialize; +use crate::loc::Loc; + +pub trait Mapping { + type Codomain; + + fn value(&self, x : Domain) -> Self::Codomain; +} + +pub trait RealRefMapping +: for<'a> Mapping<&'a Loc, Codomain=F> {} + +impl RealRefMapping for T +where T : for<'a> Mapping<&'a Loc, Codomain=F> {} + +pub trait DifferentiableMapping : Mapping { + type Differential; + + fn differential(&self, x : Domain) -> Self::Differential; +} + +pub trait RealMapping : Mapping where Self::Codomain : Float { + fn minimise(&self, tolerance : Self::Codomain) -> (Domain, Self::Codomain); + fn maximise(&self, tolerance : Self::Codomain) -> (Domain, Self::Codomain); +} + +// +// Sum +// + +#[derive(Serialize, Debug, Clone)] +pub struct Sum> { + components : Vec, + _domain : PhantomData, +} + +impl Mapping for Sum +where M : Mapping, + M :: Codomain : std::iter::Sum, + Domain : Copy { + + type Codomain = M::Codomain; + + fn value(&self, x : Domain) -> Self::Codomain { + self.components.iter().map(|c| c.value(x)).sum() + } +} + +impl DifferentiableMapping for Sum +where M : DifferentiableMapping, + M :: Codomain : std::iter::Sum, + M :: Differential : std::iter::Sum, + Domain : Copy { + + type Differential = M::Differential; + + fn differential(&self, x : Domain) -> Self::Differential { + self.components.iter().map(|c| c.differential(x)).sum() + } +} diff -r 000000000000 -r 9f27689eb130 src/maputil.rs --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/maputil.rs Sat Oct 22 13:47:15 2022 +0300 @@ -0,0 +1,369 @@ +/// +/// Mapping utilities +/// + +use std::mem::MaybeUninit; +use itertools::izip; + +pub trait FixedLength { + type Elem; + type Iter : Iterator; + fn fl_iter(self) -> Self::Iter; +} + +pub trait FixedLengthMut : FixedLength { + type IterMut<'a> : Iterator where Self : 'a; + fn fl_iter_mut(&mut self) -> Self::IterMut<'_>; +} + +impl FixedLength for [A; N] { + type Elem = A; + type Iter = std::array::IntoIter; + #[inline] + fn fl_iter(self) -> Self::Iter { + self.into_iter() + } +} + +impl FixedLengthMut for [A; N] { + type IterMut<'a> = std::slice::IterMut<'a, A> where A : 'a; + #[inline] + fn fl_iter_mut(&mut self) -> Self::IterMut<'_> { + self.iter_mut() + } +} + +impl<'a, A, const N : usize> FixedLength for &'a [A; N] { + type Elem = &'a A; + type Iter = std::slice::Iter<'a, A>; + #[inline] + fn fl_iter(self) -> Self::Iter { + self.iter() + } +} + +macro_rules! tuple_or_singleton { + ($a:ident,) => { $a }; + ($($a:ident),+) => { ($($a),+) } +} + +macro_rules! make_mapmany { + ($name:ident, $name_indexed:ident, $var0:ident $($var:ident)* ; + $etype0:ident $($etype:ident)*, $ctype0:ident $($ctype:ident)*) => { + /// Map over multiple [`FixedLength`] containers, returning an array. + #[inline] + pub fn $name< + $etype0, + $($etype,)* + $ctype0 : FixedLength, + $($ctype : FixedLength,)* + Res, + const N : usize + >( + $var0 : $ctype0, + $($var : $ctype,)* + f : impl Fn($etype0, $($etype),*) -> Res + ) -> [Res; N] { + let zipit = izip!($var0.fl_iter(), $($var.fl_iter()),*); + let map = zipit.map(|tuple_or_singleton!($var0, $($var),*)| f($var0, $($var),*)); + collect_into_array_unchecked(map) + } + + /// Map over multiple [`FixedLength`] containers, returning an array. + /// This version also passes the index to the mapping function. + #[inline] + pub fn $name_indexed< + $etype0, + $($etype,)* + $ctype0 : FixedLength, + $($ctype : FixedLength,)* + Res, + const N : usize + >( + $var0 : $ctype0, + $($var : $ctype,)* + f : impl Fn(usize, $etype0, $($etype),*) -> Res + ) -> [Res; N] { + let zipit = (0..N).zip(izip!($var0.fl_iter(), $($var.fl_iter()),*)); + let map = zipit.map(|(i, tuple_or_singleton!($var0, $($var),*))| f(i, $var0, $($var),*)); + collect_into_array_unchecked(map) + } + } +} + +make_mapmany!(map1, map1_indexed, a; A, CA); +make_mapmany!(map2, map2_indexed, a b; A B, CA CB); +make_mapmany!(map3, map3_indexed, a b c; A B C, CA CB CC); +make_mapmany!(map4, map4_indexed, a b c d; A B C D, CA CB CC CD); +make_mapmany!(map5, map5_indexed, a b c d e; A B C D E, CA CB CC CD CE); +make_mapmany!(map6, map6_indexed, a b c d e f; A B C D E F, CA CB CC CD CE CF); + +macro_rules! make_mapmany_mut{ + ($name:ident, $name_indexed:ident, $var0:ident $($var:ident)* ; + $etype0:ident $($etype:ident)*, $ctype0:ident $($ctype:ident)*) => { + /// Map over multiple [`FixedLength`] container inserting the result in the first. + #[inline] + pub fn $name< + $etype0, + $($etype,)* + $ctype0 : FixedLengthMut, + $($ctype : FixedLength,)* + const N : usize + > ( + $var0 : &mut $ctype0, + $($var : $ctype,)* + f : impl Fn(&mut $etype0, $($etype),*) + ) { + let zipit = izip!($var0.fl_iter_mut(), $($var.fl_iter()),*); + zipit.for_each(|tuple_or_singleton!($var0, $($var),*)| f($var0, $($var),*)); + } + + /// Map over multiple [`FixedLength`] container inserting the result in the first. + /// This version also passes the index to the mapping function. + #[inline] + pub fn $name_indexed< + $etype0, + $($etype,)* + $ctype0 : FixedLengthMut, + $($ctype : FixedLength,)* + const N : usize + > ( + $var0 : &mut $ctype0, + $($var : $ctype,)* + f : impl Fn(usize, &mut $etype0, $($etype),*) + ) { + let zipit = (0..N).zip(izip!($var0.fl_iter_mut(), $($var.fl_iter()),*)); + zipit.for_each(|(i, tuple_or_singleton!($var0, $($var),*))| f(i, $var0, $($var),*)); + } + } +} + +make_mapmany_mut!(map1_mut, map1_indexed_mut, a; A, CA); +make_mapmany_mut!(map2_mut, map2_indexed_mut, a b; A B, CA CB); +make_mapmany_mut!(map3_mut, map3_indexed_mut, a b c; A B C, CA CB CC); +make_mapmany_mut!(map4_mut, map4_indexed_mut, a b c d; A B C D, CA CB CC CD); +make_mapmany_mut!(map5_mut, map5_indexed_mut, a b c d e; A B C D E, CA CB CC CD CE); +make_mapmany_mut!(map6_mut, map6_indexed_mut, a b c d e f; A B C D E F, CA CB CC CD CE CF); + + +/// Initialise an array of length `N` by calling `f` multiple times. +#[inline] +pub fn array_init A, const N : usize>(f : F) -> [A; N] { + //[(); N].map(|_| f()) + core::array::from_fn(|_| f()) +} + +// /// Initialise an array of length `N` by calling `f` with the index of each element. +// #[inline] +// pub fn array_gen A, const N : usize>(f : F) -> [A; N] { +// //[(); N].indexmap(|i, _| f(i)) +// core::array::from_fn(f) +// } + + +pub struct FoldMap, A, B, J : Copy, F : Fn(J, A) -> (J, B)> { + iter : I, + f : F, + j : J, +} + +impl, J : Copy, F : Fn(J, A) -> (J, B)> Iterator for FoldMap { + type Item = B; + #[inline] + fn next(&mut self) -> Option { + self.iter.next().map(|a| { + let (jnew, b) = (self.f)(self.j, a); + self.j = jnew; + b + }) + } +} + +pub struct IndexMap, A, B, F : Fn(usize, A) -> B> { + iter : I, + f : F, + j : usize, +} + +impl, F : Fn(usize, A) -> B> Iterator for IndexMap { + type Item = B; + #[inline] + fn next(&mut self) -> Option { + self.iter.next().map(|a| { + let b = (self.f)(self.j, a); + self.j = self.j+1; + b + }) + } +} + +pub trait FoldMappable (J, B)> : Sized { + type Output; + /// Fold and map over an array; typically used to count the element while traversing a map + fn foldmap(self, j : J, f : F) -> Self::Output; +} + +pub trait IndexMappable B> : Sized { + type Output; + fn indexmap(self, f : F) -> Self::Output; +} + +impl<'a, A, B, J : Copy, F : Fn(J, &'a A) -> (J, B)> FoldMappable<&'a A, B, J, F> +for std::slice::Iter<'a, A> { + type Output = FoldMap; + #[inline] + fn foldmap(self, j : J, f : F) -> Self::Output { + FoldMap { iter : self, j, f } + } +} + +impl<'a, A, B, F : Fn(usize, &'a A) -> B> IndexMappable<&'a A, B, F> +for std::slice::Iter<'a, A> { + type Output = IndexMap; + #[inline] + fn indexmap(self, f : F) -> Self::Output { + IndexMap { iter : self, j : 0, f } + } +} + + +impl (J, B), const N : usize> FoldMappable +for std::array::IntoIter { + type Output = FoldMap; + #[inline] + fn foldmap(self, j : J, f : F) -> Self::Output { + FoldMap { iter : self, j, f } + } +} + +impl<'a, A, B, F : Fn(usize, A) -> B, const N : usize> IndexMappable +for std::array::IntoIter { + type Output = IndexMap; + #[inline] + fn indexmap(self, f : F) -> Self::Output { + IndexMap { iter : self, j : 0, f } + } +} + + + + +impl (J, B), const N : usize> FoldMappable for [A; N] { + type Output = [B; N]; + #[inline] + fn foldmap(self, j : J, f : F) -> [B; N] { + // //let mut res : [MaybeUninit; N] = unsafe { MaybeUninit::uninit().assume_init() }; + // let mut res = MaybeUninit::uninit_array::(); + // for (a, i) in self.into_iter().zip(0..N) { + // let (jnew, b) = f(j, a); + // unsafe { *(res.get_unchecked_mut(i)) = MaybeUninit::new(b) }; + // j = jnew; + // } + // //unsafe { res.as_mut_ptr().cast::<[B; N]>().read() } + // unsafe { MaybeUninit::array_assume_init(res) } + let it = self.into_iter().foldmap(j, f); + collect_into_array_unchecked(it) + } +} + +impl B, const N : usize> IndexMappable for [A; N] { + type Output= [B; N]; + #[inline] + fn indexmap(self, f : F) -> [B; N] { + // //let mut res : [MaybeUninit; N] = unsafe { MaybeUninit::uninit().assume_init() }; + // let mut res = MaybeUninit::uninit_array::(); + // for (a, i) in self.into_iter().zip(0..N) { + // let b = f(i, a); + // unsafe { *(res.get_unchecked_mut(i)) = MaybeUninit::new(b) }; + // } + // //unsafe { res.as_mut_ptr().cast::<[B; N]>().read() } + // unsafe { MaybeUninit::array_assume_init(res) } + let it = self.into_iter().indexmap(f); + collect_into_array_unchecked(it) + } +} + +/// +/// This is taken and simplified from core::array to not involve `ControlFlow`, +/// `Try` etc. (Pulling everything including `NeverShortCircuit` turned out +/// too much to maintain here.) +/// +/// Pulls `N` items from `iter` and returns them as an array. If the iterator +/// yields fewer than `N` items, `None` is returned and all already yielded +/// items are dropped. +/// +/// Since the iterator is passed as a mutable reference and this function calls +/// `next` at most `N` times, the iterator can still be used afterwards to +/// retrieve the remaining items. +/// +/// If `iter.next()` panicks, all items already yielded by the iterator are +/// dropped. +#[inline] +pub(crate) fn collect_into_array_unchecked, const N: usize>(mut iter: I) -> [T; N] +{ + if N == 0 { + // SAFETY: An empty array is always inhabited and has no validity invariants. + return unsafe { core::mem::zeroed() }; + } + + struct Guard<'a, T, const N: usize> { + array_mut: &'a mut [MaybeUninit; N], + initialized: usize, + } + + impl Drop for Guard<'_, T, N> { + #[inline] + fn drop(&mut self) { + debug_assert!(self.initialized <= N); + + // SAFETY: this slice will contain only initialized objects. + unsafe { + core::ptr::drop_in_place(MaybeUninit::slice_assume_init_mut( + &mut self.array_mut.get_unchecked_mut(..self.initialized), + )); + } + } + } + + let mut array = MaybeUninit::uninit_array::(); + let mut guard = Guard { array_mut: &mut array, initialized: 0 }; + + while let Some(item) = iter.next() { + // SAFETY: `guard.initialized` starts at 0, is increased by one in the + // loop and the loop is aborted once it reaches N (which is + // `array.len()`). + unsafe { + guard.array_mut.get_unchecked_mut(guard.initialized).write(item); + } + guard.initialized += 1; + + // Check if the whole array was initialized. + if guard.initialized == N { + core::mem::forget(guard); + + // SAFETY: the condition above asserts that all elements are + // initialized. + let out = unsafe { MaybeUninit::array_assume_init(array) }; + return out; + } + } + + unreachable!("Something went wrong with iterator length") +} + + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn mapx_test() { + let a = [0,1,2]; + let mut b = [2,1,0]; + assert_eq!(map1(a, |x| x+1), [1,2,3]); + assert_eq!(map2(a, b, |x, y| x+y), [2,2,2]); + assert_eq!(map1_indexed(a, |i, y| y-i), [0,0,0]); + map1_indexed_mut(&mut b, |i, y| *y=i); + assert_eq!(b, a); + } +} diff -r 000000000000 -r 9f27689eb130 src/nalgebra_support.rs --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/nalgebra_support.rs Sat Oct 22 13:47:15 2022 +0300 @@ -0,0 +1,297 @@ +///! Integration with nalgebra + +use nalgebra::{ + Matrix, Storage, StorageMut, OMatrix, Dim, DefaultAllocator, Scalar, + ClosedMul, ClosedAdd, SimdComplexField, Vector, OVector, RealField, + LpNorm, UniformNorm +}; +use nalgebra::Norm as NalgebraNorm; +use nalgebra::base::constraint::{ + ShapeConstraint, SameNumberOfRows, SameNumberOfColumns +}; +use nalgebra::base::dimension::*; +use nalgebra::base::allocator::Allocator; +use std::ops::Mul; +use num_traits::identities::{Zero, One}; +use crate::linops::*; +use crate::norms::Dot; +use crate::types::Float; +use crate::norms::*; + +impl Linear> for Matrix +where SM: Storage, SV: Storage, + N : Dim, M : Dim, K : Dim, E : Scalar + ClosedMul + ClosedAdd + Zero + One, + DefaultAllocator : Allocator, + DefaultAllocator : Allocator, + DefaultAllocator : Allocator, + DefaultAllocator : Allocator { + type Codomain = OMatrix; + + #[inline] + fn apply(&self, x : &Matrix) -> Self::Codomain { + self.mul(x) + } +} + +impl GEMV, Matrix> for Matrix +where SM: Storage, SV1: Storage, SV2: StorageMut, + N : Dim, M : Dim, K : Dim, E : Scalar + ClosedMul + ClosedAdd + Zero + One + Float, + DefaultAllocator : Allocator, + DefaultAllocator : Allocator, + DefaultAllocator : Allocator, + DefaultAllocator : Allocator { + + #[inline] + fn gemv(&self, y : &mut Matrix, α : E, x : &Matrix, β : E) { + Matrix::gemm(y, α, self, x, β) + } + + #[inline] + fn apply_mut<'a>(&self, y : &mut Matrix, x : &Matrix) { + self.mul_to(x, y) + } +} + +impl AXPY> for Vector +where SM: StorageMut, SV1: Storage, + M : Dim, E : Scalar + ClosedMul + ClosedAdd + Zero + One + Float, + DefaultAllocator : Allocator { + + #[inline] + fn axpy(&mut self, α : E, x : &Vector, β : E) { + Matrix::axpy(self, α, x, β) + } + + #[inline] + fn copy_from(&mut self, y : &Vector) { + Matrix::copy_from(self, y) + } +} + +impl Projection for Vector +where SM: StorageMut, + M : Dim, E : Scalar + ClosedMul + ClosedAdd + Zero + One + Float + RealField, + DefaultAllocator : Allocator { + #[inline] + fn proj_ball_mut(&mut self, ρ : E, _ : Linfinity) { + self.iter_mut().for_each(|v| *v = num_traits::clamp(*v, -ρ, ρ)) + } +} + +impl<'own,SV1,SV2,SM,N,M,K,E> Adjointable,Matrix> +for Matrix +where SM: Storage, SV1: Storage, SV2: Storage, + N : Dim, M : Dim, K : Dim, E : Scalar + ClosedMul + ClosedAdd + Zero + One + SimdComplexField, + DefaultAllocator : Allocator, + DefaultAllocator : Allocator, + DefaultAllocator : Allocator, + DefaultAllocator : Allocator { + type AdjointCodomain = OMatrix; + type Adjoint<'a> = OMatrix where SM : 'a; + + #[inline] + fn adjoint(&self) -> Self::Adjoint<'_> { + Matrix::adjoint(self) + } +} + +impl Dot,E> +for Vector +where M : Dim, + E : Float + Scalar + ClosedMul + ClosedAdd + Zero + One, + S : Storage, + Si : Storage, + DefaultAllocator : Allocator { + + #[inline] + fn dot(&self, other : &Vector) -> E { + Vector::::dot(self, other) + } +} + +/// This function is [`nalgebra::EuclideanNorm::metric_distance`] without the `sqrt`. +#[inline] +fn metric_distance_squared( + /*ed: &EuclideanNorm,*/ + m1: &Matrix, + m2: &Matrix, +) -> T::SimdRealField +where + T: SimdComplexField, + R1: Dim, + C1: Dim, + S1: Storage, + R2: Dim, + C2: Dim, + S2: Storage, + ShapeConstraint: SameNumberOfRows + SameNumberOfColumns, +{ + m1.zip_fold(m2, T::SimdRealField::zero(), |acc, a, b| { + let diff = a - b; + acc + diff.simd_modulus_squared() + }) +} + +// TODO: should allow different input storages in `Euclidean`. + +impl Euclidean +for Vector +where M : Dim, + S : StorageMut, + E : Float + Scalar + ClosedMul + ClosedAdd + Zero + One + RealField, + DefaultAllocator : Allocator { + + type Output = OVector; + + #[inline] + fn similar_origin(&self) -> OVector { + OVector::zeros_generic(M::from_usize(self.len()), Const) + } + + #[inline] + fn norm2_squared(&self) -> E { + Vector::::norm_squared(self) + } + + #[inline] + fn dist2_squared(&self, other : &Self) -> E { + metric_distance_squared(self, other) + } +} + +impl StaticEuclidean +for Vector +where M : DimName, + S : StorageMut, + E : Float + Scalar + ClosedMul + ClosedAdd + Zero + One + RealField, + DefaultAllocator : Allocator { + + #[inline] + fn origin() -> OVector { + OVector::zeros() + } +} + +impl Norm +for Vector +where M : Dim, + S : StorageMut, + E : Float + Scalar + ClosedMul + ClosedAdd + Zero + One + RealField, + DefaultAllocator : Allocator { + + #[inline] + fn norm(&self, _ : L1) -> E { + LpNorm(1).norm(self) + } +} + +impl Dist +for Vector +where M : Dim, + S : StorageMut, + E : Float + Scalar + ClosedMul + ClosedAdd + Zero + One + RealField, + DefaultAllocator : Allocator { + #[inline] + fn dist(&self, other : &Self, _ : L1) -> E { + LpNorm(1).metric_distance(self, other) + } +} + +impl Norm +for Vector +where M : Dim, + S : StorageMut, + E : Float + Scalar + ClosedMul + ClosedAdd + Zero + One + RealField, + DefaultAllocator : Allocator { + + #[inline] + fn norm(&self, _ : L2) -> E { + LpNorm(2).norm(self) + } +} + +impl Dist +for Vector +where M : Dim, + S : StorageMut, + E : Float + Scalar + ClosedMul + ClosedAdd + Zero + One + RealField, + DefaultAllocator : Allocator { + #[inline] + fn dist(&self, other : &Self, _ : L2) -> E { + LpNorm(2).metric_distance(self, other) + } +} + +impl Norm +for Vector +where M : Dim, + S : StorageMut, + E : Float + Scalar + ClosedMul + ClosedAdd + Zero + One + RealField, + DefaultAllocator : Allocator { + + #[inline] + fn norm(&self, _ : Linfinity) -> E { + UniformNorm.norm(self) + } +} + +impl Dist +for Vector +where M : Dim, + S : StorageMut, + E : Float + Scalar + ClosedMul + ClosedAdd + Zero + One + RealField, + DefaultAllocator : Allocator { + #[inline] + fn dist(&self, other : &Self, _ : Linfinity) -> E { + UniformNorm.metric_distance(self, other) + } +} + +/// Helper trait to hide the symbols of `nalgebra::RealField` +/// while allowing nalgebra to be used in subroutines. +pub trait ToNalgebraRealField : Float { + type NalgebraType : RealField; + type MixedType : RealField + Float; + + fn to_nalgebra(self) -> Self::NalgebraType; + fn to_nalgebra_mixed(self) -> Self::MixedType; + + fn from_nalgebra(t : Self::NalgebraType) -> Self; + fn from_nalgebra_mixed(t : Self::MixedType) -> Self; +} + +impl ToNalgebraRealField for f32 { + type NalgebraType = f32; + type MixedType = f32; + + #[inline] + fn to_nalgebra(self) -> Self::NalgebraType { self } + + #[inline] + fn to_nalgebra_mixed(self) -> Self::MixedType { self } + + #[inline] + fn from_nalgebra(t : Self::NalgebraType) -> Self { t } + + #[inline] + fn from_nalgebra_mixed(t : Self::MixedType) -> Self { t } + +} + +impl ToNalgebraRealField for f64 { + type NalgebraType = f64; + type MixedType = f64; + + #[inline] + fn to_nalgebra(self) -> Self::NalgebraType { self } + + #[inline] + fn to_nalgebra_mixed(self) -> Self::MixedType { self } + + #[inline] + fn from_nalgebra(t : Self::NalgebraType) -> Self { t } + + #[inline] + fn from_nalgebra_mixed(t : Self::MixedType) -> Self { t } +} + diff -r 000000000000 -r 9f27689eb130 src/nanleast.rs --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/nanleast.rs Sat Oct 22 13:47:15 2022 +0300 @@ -0,0 +1,36 @@ +use crate::types::Float; +use std::cmp::{PartialOrd,Ord,Ordering,Ordering::*}; + +/// A container for floating point numbers for sorting NaN as a least element +/// (to essentially ignore errorneous computations producing NaNs). +#[derive(Debug, Clone, Copy)] +pub struct NaNLeast(pub F); + +/// Compare floating point numbers ordering nan as the least element. + +impl Ord for NaNLeast { + #[inline] + fn cmp(&self, NaNLeast(b) : &Self) -> Ordering { + let NaNLeast(a) = self; + match a.partial_cmp(b) { + None => match (a.is_nan(), b.is_nan()) { + (true, false) => Less, + (false, true) => Greater, + _ => Equal // The case (true, true) should not occur! + } + Some(order) => order + } + } +} + +impl PartialEq for NaNLeast { + #[inline] + fn eq(&self, other : &Self) -> bool { self.cmp(other)==Equal } +} + +impl Eq for NaNLeast { } + +impl PartialOrd for NaNLeast { + #[inline] + fn partial_cmp(&self, other : &Self) -> Option { Some(self.cmp(other)) } +} diff -r 000000000000 -r 9f27689eb130 src/norms.rs --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/norms.rs Sat Oct 22 13:47:15 2022 +0300 @@ -0,0 +1,233 @@ +/*! +Norms, projections, etc. +*/ + +use crate::types::*; +use std::ops::{Mul,MulAssign,Div,DivAssign,Add,Sub,AddAssign,SubAssign,Neg}; +use serde::Serialize; +//use std::iter::Sum; + +// +// Dot products +// + +/// Space with a defined dot product. +pub trait Dot { + fn dot(&self, other : &U) -> F; +} + +//self.iter().zip(other.iter()).map(|(&x,&y)| x*y).sum() + +// +// Euclidean spaces +// + +pub trait Euclidean : Sized + Dot + + Mul>::Output> + MulAssign + + Div>::Output> + DivAssign + + Add>::Output> + + Sub>::Output> + + for<'b> Add<&'b Self, Output=>::Output> + + for<'b> Sub<&'b Self, Output=>::Output> + + AddAssign + for<'b> AddAssign<&'b Self> + + SubAssign + for<'b> SubAssign<&'b Self> + + Neg>::Output> { + type Output : Euclidean; + + /// Returns the origin of same dimensions as `self`. + fn similar_origin(&self) -> >::Output; + + /// Calculate the square of the 2-norm. + #[inline] + fn norm2_squared(&self) -> F { + self.dot(self) + } + + /// Calculate the square of the 2-norm divided by 2. + #[inline] + fn norm2_squared_div2(&self) -> F { + self.norm2_squared()/F::TWO + } + + /// Calculate the 2-norm. + #[inline] + fn norm2(&self) -> F { + self.norm2_squared().sqrt() + } + + /// Calculate the 2-distance squared. + fn dist2_squared(&self, other : &Self) -> F; + + /// Calculate the 2-distance. + #[inline] + fn dist2(&self, other : &Self) -> F { + self.dist2_squared(other).sqrt() + } + + /// Project to the 2-ball. + #[inline] + fn proj_ball2(mut self, ρ : F) -> Self { + self.proj_ball2_mut(ρ); + self + } + + /// Project to the 2-ball in-place. + #[inline] + fn proj_ball2_mut(&mut self, ρ : F) { + let r = self.norm2(); + if r>ρ { + *self *= ρ/r + } + } +} + +/// Trait for [`Euclidean`] spaces with dimensions known at compile time. +pub trait StaticEuclidean : Euclidean { + /// Returns the origin + fn origin() -> >::Output; +} + +// +// Abstract norms +// + +#[derive(Copy,Debug,Clone,Serialize,Eq,PartialEq)] +pub struct L1; + +#[derive(Copy,Debug,Clone,Serialize,Eq,PartialEq)] +pub struct L2; + +#[derive(Copy,Debug,Clone,Serialize,Eq,PartialEq)] +pub struct Linfinity; + +#[derive(Copy,Debug,Clone,Serialize,Eq,PartialEq)] +pub struct L21; + +#[derive(Copy,Debug,Clone,Serialize,Eq,PartialEq)] +pub struct HuberL1(pub F); + +#[derive(Copy,Debug,Clone,Serialize,Eq,PartialEq)] +pub struct HuberL21(pub F); + +pub trait Norm { + /// Calculate the norm + fn norm(&self, _p : Exponent) -> F; +} + +/// Indicates that a [`Norm`] is dominated by another norm (`Exponent`) on `Elem` with the +/// corresponding field `F`. +pub trait Dominated { + /// Indicates the factor $c$ for the inequality $‖x‖ ≤ C ‖x‖_p$. + fn norm_factor(&self, p : Exponent) -> F; + /// Given a norm-value $‖x‖_p$, calculates $C‖x‖_p$ such that $‖x‖ ≤ C‖x‖_p$ + #[inline] + fn from_norm(&self, p_norm : F, p : Exponent) -> F { + p_norm * self.norm_factor(p) + } +} + +pub trait Dist : Norm { + /// Calculate the distance + fn dist(&self, other : &Self, _p : Exponent) -> F; +} + +pub trait Projection : Norm + Euclidean where F : Float { + /// Project to the norm-ball. + fn proj_ball(mut self, ρ : F, q : Exponent) -> Self { + self.proj_ball_mut(ρ, q); + self + } + + /// Project to the norm-ball in-place. + fn proj_ball_mut(&mut self, ρ : F, _q : Exponent); +} + +/*impl> Norm for E { + #[inline] + fn norm(&self, _p : L2) -> F { self.norm2() } + + fn dist(&self, other : &Self, _p : L2) -> F { self.dist2(other) } +}*/ + +impl + Norm> Projection for E { + #[inline] + fn proj_ball(self, ρ : F, _p : L2) -> Self { self.proj_ball2(ρ) } + + #[inline] + fn proj_ball_mut(&mut self, ρ : F, _p : L2) { self.proj_ball2_mut(ρ) } +} + +impl HuberL1 { + fn apply(self, xnsq : F) -> F { + let HuberL1(γ) = self; + let xn = xnsq.sqrt(); + if γ == F::ZERO { + xn + } else { + if xn > γ { + xn-γ/F::TWO + } else if xn<(-γ) { + -xn-γ/F::TWO + } else { + xnsq/(F::TWO*γ) + } + } + } +} + +impl> Norm> for E { + fn norm(&self, huber : HuberL1) -> F { + huber.apply(self.norm2_squared()) + } +} + +impl> Dist> for E { + fn dist(&self, other : &Self, huber : HuberL1) -> F { + huber.apply(self.dist2_squared(other)) + } +} + +/* +#[inline] +pub fn mean(x : V) -> V::Field where V : ValidArray { + x.iter().sum()/x.len() +} + +#[inline] +pub fn proj_nonneg_mut(x : &mut V) -> &mut V where V : ValidArray { + x.iter_mut().for_each(|&mut p| if p < 0 { *p = 0 } ); + x +} +*/ + + +// +// 2,1-norm generic implementation +// + +/* +pub trait InnerVectors { + type Item; + type Iter : Iterator; + fn inner_vectors(&self) -> &Self::Iter; +} + +pub trait InnerVectorsMut : InnerVectors { + type IterMut : Iterator; + fn inner_vectors_mut(&self) -> &mut Self::Item; +} + +impl Norm for T where T::Item : Norm { + fn norm(&self, _ : L21) -> F { + self.inner_vectors().map(|t| t.norm(L2)).sum() + } +} + +impl> Projection +for T where T::ItemMut : Projection { + fn proj_ball_mut(&mut self, _ : L21) { + self.inner_vectors_mut().for_each(|t| t.proj_ball_mut(L2)); + } +} +*/ + diff -r 000000000000 -r 9f27689eb130 src/sets.rs --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/sets.rs Sat Oct 22 13:47:15 2022 +0300 @@ -0,0 +1,108 @@ +/// Various types of sets. +use std::ops::{RangeFull,RangeFrom,Range,RangeInclusive,RangeTo,RangeToInclusive}; +use std::marker::PhantomData; +use crate::types::*; +use crate::loc::{Loc}; +use crate::norms::Dot; +use serde::Serialize; + +pub mod cube; +pub use cube::*; + +/// Trait for arbitrary sets. The parameter `U` is the element type. +pub trait Set where U : ?Sized { + /// Check for containment. + fn contains(&self, item : &U) -> bool; +} + +pub trait SetOrd : Sized { + /// Returns the smallest set of same class contain both parameters. + fn common(&self, other : &Self) -> Self; + + /// Returns the greatest set of same class contaied by n both parameter sets. + fn intersect(&self, other : &Self) -> Option; +} + +impl Set> +for Cube +where U : Num + PartialOrd + Sized { + fn contains(&self, item : &Loc) -> bool { + self.0.iter().zip(item.iter()).all(|(s, x)| s.contains(x)) + } +} + +impl Set for RangeFull { + fn contains(&self, _item : &U) -> bool { true } +} + +macro_rules! impl_ranges { + ($($range:ident),*) => { $( + impl Set + for $range + where Idx : PartialOrd, U : PartialOrd + ?Sized, Idx : PartialOrd { + #[inline] + fn contains(&self, item : &U) -> bool { $range::contains(&self, item) } + } + )* } +} + +impl_ranges!(RangeFrom,Range,RangeInclusive,RangeTo,RangeToInclusive); + +/// Halfspace. The orthogonal (dual) vectors `A` are supposed to implement [`Dot`]`` +/// for `U` the element type. +#[derive(Clone,Copy,Debug,Serialize,Eq,PartialEq)] +pub struct Halfspace where A : Dot, F : Float { + pub orthogonal : A, + pub offset : F, + _phantom : PhantomData, +} + +impl Halfspace where A : Dot, F : Float { + #[inline] + pub fn new(orthogonal : A, offset : F) -> Self { + Halfspace{ orthogonal : orthogonal, offset : offset, _phantom : PhantomData } + } +} + +pub trait SpannedHalfspace where F : Float { + type A : Dot; + fn spanned_halfspace(&self) -> Halfspace; +} + +// TODO: Gram-Schmidt for higher N. +impl SpannedHalfspace> for [Loc; 2] { + type A = Loc; + fn spanned_halfspace(&self) -> Halfspace> { + let (x0, x1) = (self[0], self[1]); + Halfspace::new(x1-x0, x0[0]) + } +} + +// TODO: Gram-Schmidt for higher N. +impl SpannedHalfspace> for [Loc; 2] { + type A = Loc; + fn spanned_halfspace(&self) -> Halfspace> { + let (x0, x1) = (&self[0], &self[1]); + let d = x1 - x0; + let orthog = loc![d[1], -d[0]]; // We don't normalise for efficiency + let offset = x0.dot(&orthog); + Halfspace::new(orthog, offset) + } +} + +impl Set for Halfspace where A : Dot, F : Float { + #[inline] + fn contains(&self, item : &U) -> bool { + self.orthogonal.dot(item) >= self.offset + } +} + +/// Polygons defined by `N` `Halfspace`s. +#[derive(Clone,Copy,Debug,Eq,PartialEq)] +pub struct NPolygon(pub [Halfspace; N]) where A : Dot, F : Float; + +impl Set for NPolygon where A : Dot, F : Float { + fn contains(&self, item : &U) -> bool { + self.0.iter().all(|halfspace| halfspace.contains(item)) + } +} diff -r 000000000000 -r 9f27689eb130 src/sets/cube.rs --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/sets/cube.rs Sat Oct 22 13:47:15 2022 +0300 @@ -0,0 +1,314 @@ +///! Presetnation of cubes $[a_1, b_1] × ⋯ × [a_n, b_n]$ + +use serde::ser::{Serialize, Serializer, SerializeTupleStruct}; +use crate::types::*; +use crate::loc::Loc; +use crate::sets::SetOrd; +use crate::maputil::{ + FixedLength, + FixedLengthMut, + map1, + map1_indexed, + map2, +}; + +/// A half-open `N`-cube of elements of type `U`. +#[derive(Copy, Clone, Debug, Eq, PartialEq)] +pub struct Cube(pub(super) [[U; 2]; N]); + +// Need to manually implement as [F; N] serialisation is provided only for some N. +impl Serialize for Cube +where + F: Serialize, +{ + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + let mut ts = serializer.serialize_tuple_struct("Cube", N)?; + for e in self.0.iter() { + ts.serialize_field(e)?; + } + ts.end() + } +} + +impl FixedLength for Cube { + type Iter = std::array::IntoIter<[A; 2], N>; + type Elem = [A; 2]; + #[inline] + fn fl_iter(self) -> Self::Iter { + self.0.into_iter() + } +} + +impl FixedLengthMut for Cube { + type IterMut<'a> = std::slice::IterMut<'a, [A; 2]>; + #[inline] + fn fl_iter_mut(&mut self) -> Self::IterMut<'_> { + self.0.iter_mut() + } +} + +impl<'a, A : Num, const N : usize> FixedLength for &'a Cube { + type Iter = std::slice::Iter<'a, [A; 2]>; + type Elem = &'a [A; 2]; + #[inline] + fn fl_iter(self) -> Self::Iter { + self.0.iter() + } +} + + +/// Iterator for [`Cube`] corners. +pub struct CubeCornersIter<'a, U : Num, const N : usize> { + index : usize, + cube : &'a Cube, +} + +impl<'a, U : Num, const N : usize> Iterator for CubeCornersIter<'a, U, N> { + type Item = Loc; + #[inline] + fn next(&mut self) -> Option { + if self.index >= N { + None + } else { + let i = self.index; + self.index += 1; + let arr = self.cube.map_indexed(|k, a, b| if (i>>k)&1 == 0 { a } else { b }); + Some(arr.into()) + } + } +} + +impl Cube { + #[inline] + pub fn map_indexed(&self, f : impl Fn(usize, U, U) -> T) -> [T; N] { + map1_indexed(self, |i, &[a, b]| f(i, a, b)) + } + + #[inline] + pub fn map(&self, f : impl Fn(U, U) -> T) -> [T; N] { + map1(self, |&[a, b]| f(a, b)) + } + + #[inline] + pub fn iter_coords(&self) -> std::slice::Iter<'_, [U; 2]> { + self.0.iter() + } + + #[inline] + pub fn start(&self, i : usize) -> U { + self.0[i][0] + } + + #[inline] + pub fn end(&self, i : usize) -> U { + self.0[i][1] + } + + #[inline] + pub fn span_start(&self) -> Loc { + Loc::new(self.map(|a, _b| a)) + } + + #[inline] + pub fn span_end(&self) -> Loc { + Loc::new(self.map(|_a, b| b)) + } + + #[inline] + pub fn iter_corners(&self) -> CubeCornersIter<'_, U, N> { + CubeCornersIter{ index : 0, cube : self } + } + + #[inline] + pub fn width(&self) -> Loc { + Loc::new(self.map(|a, b| b-a)) + } + + #[inline] + pub fn shift(&self, shift : &Loc) -> Self { + let mut cube = self.clone(); + for i in 0..N { + cube.0[i][0] += shift[i]; + cube.0[i][1] += shift[i]; + } + cube + } + + #[inline] + pub fn new(data : [[U; 2]; N]) -> Self { + Cube(data) + } +} + +impl Cube { + /// Returns the centre of the cube + pub fn center(&self) -> Loc { + map1(self, |&[a, b]| (a + b) / F::TWO).into() + } +} + +impl Cube { + /// Get the corners of the cube. + /// TODO: generic implementation once const-generics can be involved in + /// calculations. + #[inline] + pub fn corners(&self) -> [Loc; 2] { + let [[a, b]] = self.0; + [a.into(), b.into()] + } +} + +impl Cube { + /// Get the corners of the cube in counter-clockwise order. + /// TODO: generic implementation once const-generics can be involved in + /// calculations. + #[inline] + pub fn corners(&self) -> [Loc; 4] { + let [[a1, b1], [a2, b2]]=self.0; + [[a1, a2].into(), + [b1, a2].into(), + [b1, b2].into(), + [a1, b2].into()] + } +} + +impl Cube { + /// Get the corners of the cube. + /// TODO: generic implementation once const-generics can be involved in + /// calculations. + #[inline] + pub fn corners(&self) -> [Loc; 8] { + let [[a1, b1], [a2, b2], [a3, b3]]=self.0; + [[a1, a2, a3].into(), + [b1, a2, a3].into(), + [b1, b2, a3].into(), + [a1, b2, a3].into(), + [a1, b2, b3].into(), + [b1, b2, b3].into(), + [b1, a2, b3].into(), + [a1, a2, b3].into()] + } +} + +// TODO: Implement Add and Sub of Loc to Cube, and Mul and Div by U : Num. + +impl From<[[U; 2]; N]> for Cube { + #[inline] + fn from(data : [[U; 2]; N]) -> Self { + Cube(data) + } +} + +impl From> for [[U; 2]; N] { + #[inline] + fn from(Cube(data) : Cube) -> Self { + data + } +} + + +impl Cube where U : Num + PartialOrd { + /// Checks whether the cube is non-degenerate, i.e., the start coordinate + /// of each axis is strictly less than the end coordinate. + #[inline] + pub fn nondegenerate(&self) -> bool { + self.0.iter().all(|range| range[0] < range[1]) + } + + /// Checks whether the cube intersects some `other` cube. + /// Matching boundary points are not counted, so `U` is ideally a [`Float`]. + #[inline] + pub fn intersects(&self, other : &Cube) -> bool { + self.iter_coords().zip(other.iter_coords()).all(|([a1, b1], [a2, b2])| { + a1 < b2 && a2 < b1 + }) + } + + /// Checks whether the cube contains some `other` cube. + pub fn contains_set(&self, other : &Cube) -> bool { + self.iter_coords().zip(other.iter_coords()).all(|([a1, b1], [a2, b2])| { + a1 <= a2 && b1 >= b2 + }) + } + + /// Produces the point of minimum $ℓ^p$-norm within the cube `self` for any $p$-norm. + /// This is the point where each coordinate is closest to zero. + #[inline] + pub fn minnorm_point(&self) -> Loc { + let z = U::ZERO; + // As always, we assume that a ≤ b. + self.map(|a, b| { + debug_assert!(a <= b); + match (a < z, z < b) { + (false, _) => a, + (_, false) => b, + (true, true) => z + } + }).into() + } + + /// Produces the point of maximum $ℓ^p$-norm within the cube `self` for any $p$-norm. + /// This is the point where each coordinate is furthest from zero. + #[inline] + pub fn maxnorm_point(&self) -> Loc { + let z = U::ZERO; + // As always, we assume that a ≤ b. + self.map(|a, b| { + debug_assert!(a <= b); + match (a < z, z < b) { + (false, _) => b, + (_, false) => a, + // A this stage we must have a < 0 (so U must be signed), and want to check + // whether |a| > |b|. We can do this without assuming U to actually implement + // `Neg` by comparing whether 0 > a + b. + (true, true) => if z > a + b { a } else { b } + } + }).into() + } +} + +macro_rules! impl_common { + ($($t:ty)*, $min:ident, $max:ident) => { $( + impl SetOrd for Cube<$t, N> { + #[inline] + fn common(&self, other : &Self) -> Self { + map2(self, other, |&[a1, b1], &[a2, b2]| { + debug_assert!(a1 <= b1 && a2 <= b2); + [a1.$min(a2), b1.$max(b2)] + }).into() + } + + #[inline] + fn intersect(&self, other : &Self) -> Option { + let arr = map2(self, other, |&[a1, b1], &[a2, b2]| { + debug_assert!(a1 <= b1 && a2 <= b2); + [a1.$max(a2), b1.$min(b2)] + }); + arr.iter().all(|&[a, b]| a >= b).then(|| arr.into()) + } + } + )* } +} + +impl_common!(u8 u16 u32 u64 u128 usize + i8 i16 i32 i64 i128 isize, min, max); +// Any NaN yields NaN +impl_common!(f32 f64, minimum, maximum); + +impl std::ops::Index for Cube { + type Output = [U; 2]; + #[inline] + fn index(&self, index: usize) -> &Self::Output { + &self.0[index] + } +} + +impl std::ops::IndexMut for Cube { + #[inline] + fn index_mut(&mut self, index: usize) -> &mut Self::Output { + &mut self.0[index] + } +} diff -r 000000000000 -r 9f27689eb130 src/tabledump.rs --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/tabledump.rs Sat Oct 22 13:47:15 2022 +0300 @@ -0,0 +1,73 @@ + +use std::slice::Iter; +use csv; +use serde_json; +use serde::Serialize; +use crate::error::DynError; + +/// Write a CSV from an iterator +pub fn write_csv(mut iter : I, f : String) -> DynError +where I : Iterator, + J : Serialize { + let wtr = csv::WriterBuilder::new() + .has_headers(true) + .delimiter(b'\t') + .from_path(f); + wtr.and_then(|mut w|{ + //w.write_record(self.tabledump_headers())?; + iter.try_for_each(|item| w.serialize(item)) + })?; + Ok(()) +} + +/// Write a JSON from an iterator +pub fn write_json(iter : I, f : String) -> DynError +where I : Iterator, + J : Serialize { + let v : Vec = iter.collect(); + serde_json::to_writer_pretty(std::fs::File::create(f)?, &v)?; + Ok(()) +} + +/// Helper trait for dumping data in a CSV or JSON file. +pub trait TableDump<'a> +where ::Item : Serialize { + /// Iterator over the rows + type Iter : Iterator; + + // Return the headers of the CSV file. + //fn tabledump_headers(&'a self) -> Vec; + + /// Return an iterator over the rows of the CSV file. + fn tabledump_entries(&'a self) -> Self::Iter; + + /// Write a CSV file. + fn write_csv(&'a self, f : String) -> DynError { + write_csv(self.tabledump_entries(), f) + } + + /// Write mapped CSV. This is a workaround to rust-csv not supporting struct flattening + fn write_csv_mapped(&'a self, f : String, g : G) -> DynError + where D : Serialize, + G : FnMut(::Item) -> D { + write_csv(self.tabledump_entries().map(g), f) + } + + /// Write a JSON file. + fn write_json(&'a self, f : String) -> DynError { + write_json(self.tabledump_entries(), f) + } +} + +impl<'a, T : Serialize + 'a> TableDump<'a> for [T] { + type Iter = Iter<'a, T>; + + // fn tabledump_headers(&'a self) -> Vec { + // vec!["value".into()] + // } + + fn tabledump_entries(&'a self) -> Self::Iter { + self.iter() + } +} + diff -r 000000000000 -r 9f27689eb130 src/tuple.rs --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/tuple.rs Sat Oct 22 13:47:15 2022 +0300 @@ -0,0 +1,69 @@ +// +// Tuple tools +// + +/// A trait for removing the first item from a tuple and returning the rest. +/// Only implemented for a few low dimensions. +pub trait TupleOps : Sized { + type Head; + type Tail; + type Nested; + + /// Remove the first item from a tuple and returning the rest. + fn tail(self) -> Self::Tail { self.split_tail().1 } + fn split_tail(self) -> (Self::Head, Self::Tail); + fn nest(self) -> Self::Nested; + fn from_nested(nested : Self::Nested) -> Self; + fn from_head_tail(head : Self::Head, tail : Self::Tail) -> Self; +} + +macro_rules! nest { + ($t1:ident) => { ($t1,) }; + ($t1:ident $($ts:ident)+) => { ($t1, nest!($($ts)+)) }; +} + +macro_rules! impl_tuple_ops { + (@do $type1:ident $($types:ident)*, $var1:ident $($vars:ident)*) => { + impl<$type1, $($types),*> TupleOps for ($type1, $($types),*) { + type Head = $type1; + type Tail = ($($types,)*); + type Nested = nest!($type1 $($types)*); + + #[inline] + fn split_tail(self) -> (Self::Head, Self::Tail) { + let ($var1, $($vars),*) = self; + ($var1, ($($vars,)*)) + } + + #[inline] + fn nest(self) -> Self::Nested { + let ($var1, $($vars),*) = self; + nest!($var1 $($vars)*) + } + + #[inline] + fn from_nested(nested : Self::Nested) -> Self { + let nest!($var1 $($vars)*) = nested; + ($var1, $($vars),*) + } + + #[inline] + fn from_head_tail($var1 : Self::Head, tail : Self::Tail) -> Self { + let ($($vars,)*) = tail; + ($var1, $($vars),*) + } + } + }; + ($type1:ident $($types:ident)+, $var1:ident $($vars:ident)+) => { + impl_tuple_ops!(@do $type1 $($types)+, $var1 $($vars)+); + impl_tuple_ops!($($types)+, $($vars)+); + }; + ($type1:ident, $var1:ident) => { + impl_tuple_ops!(@do $type1, $var1); + }; +} + +impl_tuple_ops!(A B C D E F G H I J K L M N O P Q R S T U V W X Y Z, + a b c d e f g h i j k l m n o p q r s t u v w x y z); + + diff -r 000000000000 -r 9f27689eb130 src/types.rs --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/types.rs Sat Oct 22 13:47:15 2022 +0300 @@ -0,0 +1,148 @@ +///! Some useful (numerical) types and type traits + +use trait_set::trait_set; +pub use num_traits::Float as NumTraitsFloat; // needed to re-export functions. +pub use num_traits::cast::AsPrimitive; + +/// Typical integer type +#[allow(non_camel_case_types)] +pub type int = i64; + +/// Typical unsigned integer type +#[allow(non_camel_case_types)] +pub type uint = u64; + +/// Typical floating point number type +#[allow(non_camel_case_types)] +pub type float = f64; + +/// Casts of abstract numerical types to others via the standard `as` keyword. +pub trait CastFrom : num_traits::cast::AsPrimitive { + fn cast_from(other : T) -> Self; +} + +macro_rules! impl_casts { + ($($type:ty)*) => { $( + impl_casts!(@phase2, $type, + u8 u16 u32 u64 u128 usize + i8 i16 i32 i64 i128 isize + f32 f64); + )* }; + (@phase2, $type:ty, $($type2:ty)*) => { $( + impl CastFrom<$type2> for $type { + #[inline] + fn cast_from(other : $type2) -> Self { other as $type } + } + )* }; +} + +impl_casts!(u8 u16 u32 u64 u128 usize + i8 i16 i32 i64 i128 isize + f32 f64); + +/// Trait for numeric types +pub trait Num : 'static + Copy + num::Num + num_traits::NumAssign + + std::iter::Sum + std::iter::Product + + std::fmt::Debug + std::fmt::Display + serde::Serialize + + CastFrom + CastFrom + CastFrom + CastFrom + + CastFrom + CastFrom + + CastFrom + CastFrom + CastFrom + CastFrom + + CastFrom + CastFrom + + CastFrom + CastFrom { + + const ZERO : Self; + const ONE : Self; + const TWO : Self; + /// Generic version of ::MAX; + const RANGE_MAX : Self; + /// Generic version of ::MIN; + const RANGE_MIN : Self; +} + +/// Trait for signed numeric types +pub trait SignedNum : Num + num::Signed + std::ops::Neg {} +impl> SignedNum for U { } + +/// Trait for floating point numbers +pub trait Float : SignedNum + num::Float + From { + /// An unsigned integer that can be used for indexing operations and + /// converted to F without loss. + type CompatibleSize : CompatibleUnsigned; + + const PI : Self; + const E : Self; + const EPSILON : Self; + const SQRT_2 : Self; + const INFINITY : Self; + const NEG_INFINITY : Self; + const FRAC_2_SQRT_PI : Self; +} + +/// Trait for integers +pub trait Integer : Num + num::Integer {} + +/// Trait for unsigned integers +pub trait Unsigned : Num + Integer + num::Unsigned {} + +/// Trait for signed integers +pub trait Signed : SignedNum + Integer {} + +macro_rules! impl_num_consts { + ($($type:ty)*) => { $( + impl Num for $type { + const ZERO : Self = 0 as $type; + const ONE : Self = 1 as $type; + const TWO : Self = 2 as $type; + const RANGE_MAX : Self = <$type>::MAX; + const RANGE_MIN : Self = <$type>::MIN; + } + )* } +} + +macro_rules! impl_integers { + ($signed:ty : $($type:ty)*) => { $( + impl_num_consts!($type); + impl Integer for $type {} + impl $signed for $type {} + )* } +} + +impl_integers!(Signed: i8 i16 i32 i64 i128 isize); +impl_integers!(Unsigned: u8 u16 u32 u64 u128 usize); + +impl_num_consts!(f32 f64); + +impl Float for f64 { + #[cfg(any(target_pointer_width = "128", target_pointer_width = "64"))] + type CompatibleSize = u32; + #[cfg(any(target_pointer_width = "32", target_pointer_width = "16"))] + type CompatibleSize = usize; + + const PI : Self = std::f64::consts::PI; + const E : Self = std::f64::consts::E; + const EPSILON : Self = std::f64::EPSILON; + const SQRT_2 : Self = std::f64::consts::SQRT_2; + const INFINITY : Self = std::f64::INFINITY; + const NEG_INFINITY : Self = std::f64::NEG_INFINITY; + const FRAC_2_SQRT_PI : Self = std::f64::consts::FRAC_2_SQRT_PI; +} + +impl Float for f32 { + #[cfg(any(target_pointer_width = "128", target_pointer_width = "64", target_pointer_width = "32"))] + type CompatibleSize = u16; + #[cfg(any(target_pointer_width = "16"))] + type CompatibleSize = usize; + + const PI : Self = std::f32::consts::PI; + const E : Self = std::f32::consts::E; + const EPSILON : Self = std::f32::EPSILON; + const SQRT_2 : Self = std::f32::consts::SQRT_2; + const INFINITY : Self = std::f32::INFINITY; + const NEG_INFINITY : Self = std::f32::NEG_INFINITY; + const FRAC_2_SQRT_PI : Self = std::f32::consts::FRAC_2_SQRT_PI; +} + +trait_set! { + pub trait CompatibleUnsigned = Unsigned + Into; + pub trait CompatibleSigned = Signed + Into; +}