diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 96c5fd41..ada02e08 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -86,7 +86,7 @@ jobs: publish: name: Publish to crates.io runs-on: ubuntu-latest - if: ${{ startsWith(github.ref, 'refs/tags/v') || github.event_name == 'workflow_dispatch' }} + if: ${{ (startsWith(github.ref, 'refs/tags/v') || github.event_name == 'workflow_dispatch') && github.ref == 'refs/heads/main' }} needs: [check_format, build_workspace, build_lib_all_features, build_lib_no_default_features] steps: - uses: actions/checkout@v4 diff --git a/.github/workflows/pysplashsurf_CI.yml b/.github/workflows/pysplashsurf_CI.yml index 06596c0b..e93370a9 100644 --- a/.github/workflows/pysplashsurf_CI.yml +++ b/.github/workflows/pysplashsurf_CI.yml @@ -29,7 +29,7 @@ jobs: - uses: actions/checkout@v3 - uses: moonrepo/setup-rust@v1 - run: | - cargo run --bin stub_gen + cargo run --bin stub_gen --no-default-features working-directory: pysplashsurf - name: Upload stub as artifact uses: actions/upload-artifact@v4 @@ -316,7 +316,7 @@ jobs: publish: name: Publish to PyPI runs-on: ubuntu-latest - if: ${{ startsWith(github.ref, 'refs/tags/v') || github.event_name == 'workflow_dispatch' }} + if: ${{ (startsWith(github.ref, 'refs/tags/v') || github.event_name == 'workflow_dispatch') && github.ref == 'refs/heads/main' }} needs: [linux_wheels, macos_wheels, windows_wheels, sdist, tests, docs] steps: - uses: actions/download-artifact@v4 @@ -426,7 +426,7 @@ jobs: path: dist/ - run: pip install dist/${{ needs.build_wheel_dev.outputs.filename }} - name: Install splashsurf CLI - run: cargo install splashsurf + run: cargo install splashsurf --path ./splashsurf - name: Run pytest uses: pavelzw/pytest-action@v2 with: diff --git a/CHANGELOG.md b/CHANGELOG.md index d6991656..398534d1 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,7 +2,18 @@ The following changes are present in the `main` branch of the repository and are not yet part of a release: - - N/A + - Py: Major refactor of the Python bindings, interface is simplified and more "pythonic" + - Merged distinct F64/F32 classes and functions and infer data type automatically + - Nearly all inputs and outputs are now zero-copy (e.g. mesh vertices and faces can be accessed as attributes without copies) + - Py: Add a function for a plain marching cubes reconstruction without any SPH interpolation + - Lib: Add support for "dense" density maps (borrowed & owned) as input for the marching cubes triangulation, useful for the Python bindings + - Lib: Enforce that `Index` types are signed integers implementing the `num_traits::Signed` trait. Currently, the reconstruction does not work (correctly) with unsigned integers. + - Lib: Make most fields of `SurfaceReconstruction` public + - CLI: Add some tests for the `reconstruction_pipeline` function + - CLI: Fix post-processing when particle AABB filtering is enabled + - Lib: Support subdomain "ghost particle" margins to be up to the size of the subdomain itself (previously limited to half the size) + - CLI/Lib: Option to automatically disable subdomain decomposition for very small grids + - Lib: Support for non-owned data in `MeshAttribute`, avoids copies in CLI and Python package ## Version 0.12.0 diff --git a/Cargo.lock b/Cargo.lock index 121b6a14..93d1efd0 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -135,9 +135,9 @@ checksum = "9e1b586273c5702936fe7b7d6896644d8be71e6314cfe09d3167c95f712589e8" [[package]] name = "bitflags" -version = "2.9.2" +version = "2.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a65b545ab31d687cff52899d4890855fec459eb6afe0da6417b8a18da87aa29" +checksum = "34efbcccd345379ca2868b2b2c9d3782e9cc58ba87bc7d79d5b53d9c9ae6f25d" [[package]] name = "bumpalo" @@ -176,9 +176,9 @@ checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" [[package]] name = "camino" -version = "1.1.11" +version = "1.1.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d07aa9a93b00c76f71bc35d598bed923f6d4f3a9ca5c24b7737ae1a292841c0" +checksum = "dd0b03af37dad7a14518b7691d81acb0f8222604ad3d1b02f6b4bed5188c0cd5" dependencies = [ "serde", ] @@ -213,10 +213,11 @@ checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5" [[package]] name = "cc" -version = "1.2.33" +version = "1.2.35" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3ee0f8803222ba5a7e2777dd72ca451868909b1ac410621b676adf07280e9b5f" +checksum = "590f9024a68a8c40351881787f1934dc11afd69090f5edb6831464694d836ea3" dependencies = [ + "find-msvc-tools", "shlex", ] @@ -269,9 +270,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.45" +version = "4.5.46" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1fc0e74a703892159f5ae7d3aac52c8e6c392f5ae5f359c70b5881d60aaac318" +checksum = "2c5e4fcf9c21d2e544ca1ee9d8552de13019a42aa7dbf32747fa7aaf1df76e57" dependencies = [ "clap_builder", "clap_derive", @@ -279,9 +280,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.5.44" +version = "4.5.46" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b3e7f4214277f3c7aa526a59dd3fbe306a370daee1f8b7b8c987069cd8e888a8" +checksum = "fecb53a0e6fcfb055f686001bc2e2592fa527efaf38dbe81a6a9563562e57d41" dependencies = [ "anstream", "anstyle", @@ -471,6 +472,12 @@ dependencies = [ "log", ] +[[package]] +name = "find-msvc-tools" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e178e4fba8a2726903f6ba98a6d221e76f9c12c650d5dc0e6afdc50677b49650" + [[package]] name = "flate2" version = "1.1.2" @@ -677,9 +684,9 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.10.0" +version = "2.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fe4cd85333e22411419a0bcae1297d25e58c9443848b11dc6a86fefe8c78a661" +checksum = "f2481980430f9f78649238835720ddccc57e52df14ffce1c6f37391d61b563e9" dependencies = [ "equivalent", "hashbrown 0.15.5", @@ -1019,9 +1026,9 @@ dependencies = [ [[package]] name = "numpy" -version = "0.25.0" +version = "0.26.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "29f1dee9aa8d3f6f8e8b9af3803006101bb3653866ef056d530d53ae68587191" +checksum = "9b2dba356160b54f5371b550575b78130a54718b4c6e46b3f33a6da74a27e78b" dependencies = [ "libc", "ndarray", @@ -1199,9 +1206,9 @@ dependencies = [ [[package]] name = "pyo3" -version = "0.25.1" +version = "0.26.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8970a78afe0628a3e3430376fc5fd76b6b45c4d43360ffd6cdd40bdde72b682a" +checksum = "7ba0117f4212101ee6544044dae45abe1083d30ce7b29c4b5cbdfa2354e07383" dependencies = [ "anyhow", "indoc", @@ -1217,19 +1224,18 @@ dependencies = [ [[package]] name = "pyo3-build-config" -version = "0.25.1" +version = "0.26.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "458eb0c55e7ece017adeba38f2248ff3ac615e53660d7c71a238d7d2a01c7598" +checksum = "4fc6ddaf24947d12a9aa31ac65431fb1b851b8f4365426e182901eabfb87df5f" dependencies = [ - "once_cell", "target-lexicon", ] [[package]] name = "pyo3-ffi" -version = "0.25.1" +version = "0.26.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7114fe5457c61b276ab77c5055f206295b812608083644a5c5b2640c3102565c" +checksum = "025474d3928738efb38ac36d4744a74a400c901c7596199e20e45d98eb194105" dependencies = [ "libc", "pyo3-build-config", @@ -1237,9 +1243,9 @@ dependencies = [ [[package]] name = "pyo3-macros" -version = "0.25.1" +version = "0.26.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a8725c0a622b374d6cb051d11a0983786448f7785336139c3c94f5aa6bef7e50" +checksum = "2e64eb489f22fe1c95911b77c44cc41e7c19f3082fc81cce90f657cdc42ffded" dependencies = [ "proc-macro2", "pyo3-macros-backend", @@ -1249,9 +1255,9 @@ dependencies = [ [[package]] name = "pyo3-macros-backend" -version = "0.25.1" +version = "0.26.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4109984c22491085343c05b0dbc54ddc405c3cf7b4374fc533f5c3313a572ccc" +checksum = "100246c0ecf400b475341b8455a9213344569af29a3c841d29270e53102e0fcf" dependencies = [ "heck", "proc-macro2", @@ -1262,9 +1268,9 @@ dependencies = [ [[package]] name = "pyo3-stub-gen" -version = "0.12.2" +version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "650d9624b551894664cc95867ccfe4fd814a5a225c8fe3a75194a3ae51caae1d" +checksum = "b93cd67bcfbf726f81cd5d5f2cc85a69e089b4eaa11bb41a6514ad1783fb9355" dependencies = [ "anyhow", "chrono", @@ -1284,9 +1290,9 @@ dependencies = [ [[package]] name = "pyo3-stub-gen-derive" -version = "0.12.2" +version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "73947c71903f0e3e31a302a350567594063c75ac155031e40519721429898649" +checksum = "3f2933be64abedb32a666273e843a1c949e957c18071cf52d543daf4adb2b4e9" dependencies = [ "heck", "proc-macro2", @@ -1303,6 +1309,7 @@ dependencies = [ "ndarray", "numpy", "pyo3", + "pyo3-build-config", "pyo3-stub-gen", "splashsurf", "splashsurf_lib", @@ -1409,9 +1416,9 @@ dependencies = [ [[package]] name = "regex" -version = "1.11.1" +version = "1.11.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b544ef1b4eac5dc2db33ea63606ae9ffcfac26c1416a2806ae0bf5f56b201191" +checksum = "23d7fd106d8c02486a8d64e778353d1cffe08ce79ac2e82f540c86d0facf6912" dependencies = [ "aho-corasick", "memchr 2.7.5", @@ -1421,9 +1428,9 @@ dependencies = [ [[package]] name = "regex-automata" -version = "0.4.9" +version = "0.4.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "809e8dc61f6de73b46c85f4c96486310fe304c434cfa43669d7b40f711150908" +checksum = "6b9458fa0bfeeac22b5ca447c63aaf45f28439a709ccd244698632f9aa6394d6" dependencies = [ "aho-corasick", "memchr 2.7.5", @@ -1432,9 +1439,9 @@ dependencies = [ [[package]] name = "regex-syntax" -version = "0.8.5" +version = "0.8.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2b15c43186be67a4fd63bee50d0303afffcef381492ebe2c5d87f324e1b8815c" +checksum = "caf4aa5b0f434c91fe5c7f1ecb6a5ece2130b02ad2a590589dda5146df959001" [[package]] name = "rstar" @@ -1878,11 +1885,11 @@ dependencies = [ [[package]] name = "wasi" -version = "0.14.2+wasi-0.2.4" +version = "0.14.3+wasi-0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9683f9a5a998d873c0d21fcbe3c083009670149a8fab228644b8bd36b2c48cb3" +checksum = "6a51ae83037bdd272a9e28ce236db8c07016dd0d50c27038b3f407533c030c95" dependencies = [ - "wit-bindgen-rt", + "wit-bindgen", ] [[package]] @@ -2181,21 +2188,18 @@ checksum = "271414315aff87387382ec3d271b52d7ae78726f5d44ac98b4f4030c91880486" [[package]] name = "winnow" -version = "0.7.12" +version = "0.7.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f3edebf492c8125044983378ecb5766203ad3b4c2f7a922bd7dd207f6d443e95" +checksum = "21a0236b59786fed61e2a80582dd500fe61f18b5dca67a4a067d0bc9039339cf" dependencies = [ "memchr 2.7.5", ] [[package]] -name = "wit-bindgen-rt" -version = "0.39.0" +name = "wit-bindgen" +version = "0.45.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6f42320e61fe2cfd34354ecb597f86f413484a798ba44a8ca1165c58d42da6c1" -dependencies = [ - "bitflags", -] +checksum = "052283831dbae3d879dc7f51f3d92703a316ca49f91540417d38591826127814" [[package]] name = "xz2" diff --git a/pysplashsurf/Cargo.lock b/pysplashsurf/Cargo.lock deleted file mode 100644 index 773be723..00000000 --- a/pysplashsurf/Cargo.lock +++ /dev/null @@ -1,1920 +0,0 @@ -# This file is automatically @generated by Cargo. -# It is not intended for manual editing. -version = 4 - -[[package]] -name = "adler2" -version = "2.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "320119579fcad9c21884f5c4861d16174d0e06250625266f50fe6898340abefa" - -[[package]] -name = "aho-corasick" -version = "1.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e60d3430d3a69478ad0993f19238d2df97c507009a52b3c10addcd7f6bcb916" -dependencies = [ - "memchr 2.7.5", -] - -[[package]] -name = "android-tzdata" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e999941b234f3131b00bc13c22d06e8c5ff726d1b6318ac7eb276997bbb4fef0" - -[[package]] -name = "android_system_properties" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "819e7219dbd41043ac279b19830f2efc897156490d7fd6ea916720117ee66311" -dependencies = [ - "libc", -] - -[[package]] -name = "anstream" -version = "0.6.19" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "301af1932e46185686725e0fad2f8f2aa7da69dd70bf6ecc44d6b703844a3933" -dependencies = [ - "anstyle", - "anstyle-parse", - "anstyle-query", - "anstyle-wincon", - "colorchoice", - "is_terminal_polyfill", - "utf8parse", -] - -[[package]] -name = "anstyle" -version = "1.0.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "862ed96ca487e809f1c8e5a8447f6ee2cf102f846893800b20cebdf541fc6bbd" - -[[package]] -name = "anstyle-parse" -version = "0.2.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4e7644824f0aa2c7b9384579234ef10eb7efb6a0deb83f9630a49594dd9c15c2" -dependencies = [ - "utf8parse", -] - -[[package]] -name = "anstyle-query" -version = "1.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6c8bdeb6047d8983be085bab0ba1472e6dc604e7041dbf6fcd5e71523014fae9" -dependencies = [ - "windows-sys", -] - -[[package]] -name = "anstyle-wincon" -version = "3.0.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "403f75924867bb1033c59fbf0797484329750cfbe3c4325cd33127941fabc882" -dependencies = [ - "anstyle", - "once_cell_polyfill", - "windows-sys", -] - -[[package]] -name = "any_ascii" -version = "0.1.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "70033777eb8b5124a81a1889416543dddef2de240019b674c81285a2635a7e1e" - -[[package]] -name = "anyhow" -version = "1.0.98" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e16d2d3311acee920a9eb8d33b8cbc1787ce4a264e85f964c2404b969bdcd487" - -[[package]] -name = "approx" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cab112f0a86d568ea0e627cc1d6be74a1e9cd55214684db5561995f6dad897c6" -dependencies = [ - "num-traits", -] - -[[package]] -name = "arrayvec" -version = "0.7.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7c02d123df017efcdfbd739ef81735b36c5ba83ec3c59c80a9d7ecc718f92e50" - -[[package]] -name = "autocfg" -version = "1.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ace50bade8e6234aa140d9a2f552bbee1db4d353f69b8217bc503490fc1a9f26" - -[[package]] -name = "base64" -version = "0.13.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e1b586273c5702936fe7b7d6896644d8be71e6314cfe09d3167c95f712589e8" - -[[package]] -name = "bitflags" -version = "2.9.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b8e56985ec62d17e9c1001dc89c88ecd7dc08e47eba5ec7c29c7b5eeecde967" - -[[package]] -name = "bumpalo" -version = "3.18.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "793db76d6187cd04dff33004d8e6c9cc4e05cd330500379d2394209271b4aeee" - -[[package]] -name = "bytecount" -version = "0.6.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "175812e0be2bccb6abe50bb8d566126198344f707e304f45c648fd8f2cc0365e" - -[[package]] -name = "bytemuck" -version = "1.23.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c76a5792e44e4abe34d3abf15636779261d45a7450612059293d1d2cfc63422" - -[[package]] -name = "bytemuck_derive" -version = "1.9.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ecc273b49b3205b83d648f0690daa588925572cc5063745bfe547fe7ec8e1a1" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.102", -] - -[[package]] -name = "byteorder" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" - -[[package]] -name = "camino" -version = "1.1.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0da45bc31171d8d6960122e222a67740df867c1dd53b4d51caa297084c185cab" -dependencies = [ - "serde", -] - -[[package]] -name = "cargo-platform" -version = "0.1.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e35af189006b9c0f00a064685c727031e3ed2d8020f7ba284d78cc2671bd36ea" -dependencies = [ - "serde", -] - -[[package]] -name = "cargo_metadata" -version = "0.14.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4acbb09d9ee8e23699b9634375c72795d095bf268439da88562cf9b501f181fa" -dependencies = [ - "camino", - "cargo-platform", - "semver", - "serde", - "serde_json", -] - -[[package]] -name = "cargo_metadata" -version = "0.19.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dd5eb614ed4c27c5d706420e4320fbe3216ab31fa1c33cd8246ac36dae4479ba" -dependencies = [ - "camino", - "cargo-platform", - "semver", - "serde", - "serde_json", - "thiserror", -] - -[[package]] -name = "cc" -version = "1.2.26" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "956a5e21988b87f372569b66183b78babf23ebc2e744b733e4350a752c4dafac" -dependencies = [ - "shlex", -] - -[[package]] -name = "cfg-if" -version = "1.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9555578bc9e57714c812a1f84e4fc5b4d21fcb063490c624de019f7464c91268" - -[[package]] -name = "chrono" -version = "0.4.41" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c469d952047f47f91b68d1cba3f10d63c11d73e4636f24f08daf0278abf01c4d" -dependencies = [ - "android-tzdata", - "iana-time-zone", - "js-sys", - "num-traits", - "wasm-bindgen", - "windows-link", -] - -[[package]] -name = "clap" -version = "4.5.40" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "40b6887a1d8685cebccf115538db5c0efe625ccac9696ad45c409d96566e910f" -dependencies = [ - "clap_builder", - "clap_derive", -] - -[[package]] -name = "clap_builder" -version = "4.5.40" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e0c66c08ce9f0c698cbce5c0279d0bb6ac936d8674174fe48f736533b964f59e" -dependencies = [ - "anstream", - "anstyle", - "clap_lex", - "strsim", -] - -[[package]] -name = "clap_derive" -version = "4.5.40" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d2c7947ae4cc3d851207c1adb5b5e260ff0cca11446b1d6d1423788e442257ce" -dependencies = [ - "heck", - "proc-macro2", - "quote", - "syn 2.0.102", -] - -[[package]] -name = "clap_lex" -version = "0.7.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b94f61472cee1439c0b966b47e3aca9ae07e45d070759512cd390ea2bebc6675" - -[[package]] -name = "colorchoice" -version = "1.0.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b05b61dc5112cbb17e4b6cd61790d9845d13888356391624cbe7e41efeac1e75" - -[[package]] -name = "console" -version = "0.15.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "054ccb5b10f9f2cbf51eb355ca1d05c2d279ce1804688d0db74b4733a5aeafd8" -dependencies = [ - "encode_unicode", - "libc", - "once_cell", - "unicode-width", - "windows-sys", -] - -[[package]] -name = "core-foundation-sys" -version = "0.8.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "773648b94d0e5d620f64f280777445740e61fe701025087ec8b57f45c791888b" - -[[package]] -name = "crc32fast" -version = "1.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a97769d94ddab943e4510d138150169a2758b5ef3eb191a9ee688de3e23ef7b3" -dependencies = [ - "cfg-if", -] - -[[package]] -name = "crossbeam-deque" -version = "0.8.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9dd111b7b7f7d55b72c0a6ae361660ee5853c9af73f70c3c2ef6858b950e2e51" -dependencies = [ - "crossbeam-epoch", - "crossbeam-utils", -] - -[[package]] -name = "crossbeam-epoch" -version = "0.9.18" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b82ac4a3c2ca9c3460964f020e1402edd5753411d7737aa39c3714ad1b5420e" -dependencies = [ - "crossbeam-utils", -] - -[[package]] -name = "crossbeam-utils" -version = "0.8.21" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d0a5c400df2834b80a4c3327b3aad3a4c4cd4de0629063962b03235697506a28" - -[[package]] -name = "dashmap" -version = "6.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5041cc499144891f3790297212f32a74fb938e5136a14943f338ef9e0ae276cf" -dependencies = [ - "cfg-if", - "crossbeam-utils", - "hashbrown 0.14.5", - "lock_api", - "once_cell", - "parking_lot_core", -] - -[[package]] -name = "either" -version = "1.15.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "48c757948c5ede0e46177b7add2e67155f70e33c07fea8284df6576da70b3719" - -[[package]] -name = "encode_unicode" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "34aa73646ffb006b8f5147f3dc182bd4bcb190227ce861fc4a4844bf8e3cb2c0" - -[[package]] -name = "equivalent" -version = "1.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "877a4ace8713b0bcf2a4e7eec82529c029f1d0619886d18145fea96c3ffe5c0f" - -[[package]] -name = "errno" -version = "0.3.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cea14ef9355e3beab063703aa9dab15afd25f0667c341310c1e5274bb1d0da18" -dependencies = [ - "libc", - "windows-sys", -] - -[[package]] -name = "error-chain" -version = "0.12.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d2f06b9cac1506ece98fe3231e3cc9c4410ec3d5b1f24ae1c8946f0742cdefc" -dependencies = [ - "version_check", -] - -[[package]] -name = "fastrand" -version = "2.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "37909eebbb50d72f9059c3b6d82c0463f2ff062c9e95845c43a6c9c0355411be" - -[[package]] -name = "fern" -version = "0.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4316185f709b23713e41e3195f90edef7fb00c3ed4adc79769cf09cc762a3b29" -dependencies = [ - "log", -] - -[[package]] -name = "flate2" -version = "1.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4a3d7db9596fecd151c5f638c0ee5d5bd487b6e0ea232e5dc96d5250f6f94b1d" -dependencies = [ - "crc32fast", - "miniz_oxide", -] - -[[package]] -name = "fxhash" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c31b6d751ae2c7f11320402d34e41349dd1016f8d5d45e48c4312bc8625af50c" -dependencies = [ - "byteorder", -] - -[[package]] -name = "getrandom" -version = "0.2.16" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "335ff9f135e4384c8150d6f27c6daed433577f86b4750418338c01a1a2528592" -dependencies = [ - "cfg-if", - "libc", - "wasi 0.11.1+wasi-snapshot-preview1", -] - -[[package]] -name = "getrandom" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26145e563e54f2cadc477553f1ec5ee650b00862f0a58bcd12cbdc5f0ea2d2f4" -dependencies = [ - "cfg-if", - "libc", - "r-efi", - "wasi 0.14.2+wasi-0.2.4", -] - -[[package]] -name = "glob" -version = "0.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a8d1add55171497b4705a648c6b583acafb01d58050a51727785f0b2c8e0a2b2" - -[[package]] -name = "hash32" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "47d60b12902ba28e2730cd37e95b8c9223af2808df9e902d4df49588d1470606" -dependencies = [ - "byteorder", -] - -[[package]] -name = "hashbrown" -version = "0.14.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5274423e17b7c9fc20b6e7e208532f9b19825d82dfd615708b70edd83df41f1" - -[[package]] -name = "hashbrown" -version = "0.15.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5971ac85611da7067dbfcabef3c70ebb5606018acd9e2a3903a0da507521e0d5" - -[[package]] -name = "heapless" -version = "0.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0bfb9eb618601c89945a70e254898da93b13be0388091d42117462b265bb3fad" -dependencies = [ - "hash32", - "stable_deref_trait", -] - -[[package]] -name = "heck" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2304e00983f87ffb38b55b444b5e3b60a884b5d30c0fca7d82fe33449bbe55ea" - -[[package]] -name = "iana-time-zone" -version = "0.1.63" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b0c919e5debc312ad217002b8048a17b7d83f80703865bbfcfebb0458b0b27d8" -dependencies = [ - "android_system_properties", - "core-foundation-sys", - "iana-time-zone-haiku", - "js-sys", - "log", - "wasm-bindgen", - "windows-core", -] - -[[package]] -name = "iana-time-zone-haiku" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f31827a206f56af32e590ba56d5d2d085f558508192593743f16b2306495269f" -dependencies = [ - "cc", -] - -[[package]] -name = "indexmap" -version = "2.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cea70ddb795996207ad57735b50c5982d8844f38ba9ee5f1aedcfb708a2aa11e" -dependencies = [ - "equivalent", - "hashbrown 0.15.4", -] - -[[package]] -name = "indicatif" -version = "0.17.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "183b3088984b400f4cfac3620d5e076c84da5364016b4f49473de574b2586235" -dependencies = [ - "console", - "number_prefix", - "portable-atomic", - "unicode-width", - "web-time", -] - -[[package]] -name = "indoc" -version = "2.0.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f4c7245a08504955605670dbf141fceab975f15ca21570696aebe9d2e71576bd" - -[[package]] -name = "inventory" -version = "0.3.20" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab08d7cd2c5897f2c949e5383ea7c7db03fb19130ffcfbf7eda795137ae3cb83" -dependencies = [ - "rustversion", -] - -[[package]] -name = "is_terminal_polyfill" -version = "1.70.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7943c866cc5cd64cbc25b2e01621d07fa8eb2a1a23160ee81ce38704e97b8ecf" - -[[package]] -name = "itertools" -version = "0.13.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "413ee7dfc52ee1a4949ceeb7dbc8a33f2d6c088194d9f922fb8318faf1f01186" -dependencies = [ - "either", -] - -[[package]] -name = "itertools" -version = "0.14.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2b192c782037fadd9cfa75548310488aabdbf3d2da73885b31bd0abd03351285" -dependencies = [ - "either", -] - -[[package]] -name = "itoa" -version = "1.0.15" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4a5f13b858c8d314ee3e8f639011f7ccefe71f97f96e50151fb991f267928e2c" - -[[package]] -name = "js-sys" -version = "0.3.77" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1cfaf33c695fc6e08064efbc1f72ec937429614f25eef83af942d0e227c3a28f" -dependencies = [ - "once_cell", - "wasm-bindgen", -] - -[[package]] -name = "lexical-sort" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c09e4591611e231daf4d4c685a66cb0410cc1e502027a20ae55f2bb9e997207a" -dependencies = [ - "any_ascii", -] - -[[package]] -name = "libc" -version = "0.2.172" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d750af042f7ef4f724306de029d18836c26c1765a54a6a3f094cbd23a7267ffa" - -[[package]] -name = "libm" -version = "0.2.15" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f9fbbcab51052fe104eb5e5d351cf728d30a5be1fe14d9be8a3b097481fb97de" - -[[package]] -name = "linked-hash-map" -version = "0.5.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0717cef1bc8b636c6e1c1bbdefc09e6322da8a9321966e8928ef80d20f7f770f" - -[[package]] -name = "linux-raw-sys" -version = "0.9.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd945864f07fe9f5371a27ad7b52a172b4b499999f1d97574c9fa68373937e12" - -[[package]] -name = "lock_api" -version = "0.4.13" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96936507f153605bddfcda068dd804796c84324ed2510809e5b2a624c81da765" -dependencies = [ - "autocfg", - "scopeguard", -] - -[[package]] -name = "log" -version = "0.4.27" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13dc2df351e3202783a1fe0d44375f7295ffb4049267b0f3018346dc122a1d94" - -[[package]] -name = "lz4_flex" -version = "0.7.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05304f8e67dfc93d1b4b990137fd1a7a4c6ad44b60a9c486c8c4486f9d2027ae" - -[[package]] -name = "lzma-sys" -version = "0.1.20" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5fda04ab3764e6cde78b9974eec4f779acaba7c4e84b36eca3cf77c581b85d27" -dependencies = [ - "cc", - "libc", - "pkg-config", -] - -[[package]] -name = "maplit" -version = "1.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3e2e65a1a2e43cfcb47a895c4c8b10d1f4a61097f9f254f183aee60cad9c651d" - -[[package]] -name = "matrixmultiply" -version = "0.3.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a06de3016e9fae57a36fd14dba131fccf49f74b40b7fbdb472f96e361ec71a08" -dependencies = [ - "autocfg", - "rawpointer", -] - -[[package]] -name = "memchr" -version = "1.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "148fab2e51b4f1cfc66da2a7c32981d1d3c083a803978268bb11fe4b86925e7a" -dependencies = [ - "libc", -] - -[[package]] -name = "memchr" -version = "2.7.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "32a282da65faaf38286cf3be983213fcf1d2e2a58700e808f83f4ea9a4804bc0" - -[[package]] -name = "memoffset" -version = "0.9.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "488016bfae457b036d996092f6cb448677611ce4449e970ceaf42695203f218a" -dependencies = [ - "autocfg", -] - -[[package]] -name = "miniz_oxide" -version = "0.8.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1fa76a2c86f704bdb222d66965fb3d63269ce38518b83cb0575fca855ebb6316" -dependencies = [ - "adler2", -] - -[[package]] -name = "nalgebra" -version = "0.33.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26aecdf64b707efd1310e3544d709c5c0ac61c13756046aaaba41be5c4f66a3b" -dependencies = [ - "approx", - "bytemuck", - "matrixmultiply", - "nalgebra-macros", - "num-complex", - "num-rational", - "num-traits", - "rand", - "rand_distr", - "simba", - "typenum", -] - -[[package]] -name = "nalgebra-macros" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "254a5372af8fc138e36684761d3c0cdb758a4410e938babcff1c860ce14ddbfc" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.102", -] - -[[package]] -name = "ndarray" -version = "0.16.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "882ed72dce9365842bf196bdeedf5055305f11fc8c03dee7bb0194a6cad34841" -dependencies = [ - "matrixmultiply", - "num-complex", - "num-integer", - "num-traits", - "portable-atomic", - "portable-atomic-util", - "rawpointer", -] - -[[package]] -name = "nom" -version = "3.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05aec50c70fd288702bcd93284a8444607f3292dbdf2a30de5ea5dcdbe72287b" -dependencies = [ - "memchr 1.0.2", -] - -[[package]] -name = "nom" -version = "8.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df9761775871bdef83bee530e60050f7e54b1105350d6884eb0fb4f46c2f9405" -dependencies = [ - "memchr 2.7.5", -] - -[[package]] -name = "num-bigint" -version = "0.4.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a5e44f723f1133c9deac646763579fdb3ac745e418f2a7af9cd0c431da1f20b9" -dependencies = [ - "num-integer", - "num-traits", -] - -[[package]] -name = "num-complex" -version = "0.4.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "73f88a1307638156682bada9d7604135552957b7818057dcef22705b4d509495" -dependencies = [ - "num-traits", -] - -[[package]] -name = "num-derive" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "876a53fff98e03a936a674b29568b0e605f06b29372c2489ff4de23f1949743d" -dependencies = [ - "proc-macro2", - "quote", - "syn 1.0.109", -] - -[[package]] -name = "num-integer" -version = "0.1.46" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7969661fd2958a5cb096e56c8e1ad0444ac2bbcd0061bd28660485a44879858f" -dependencies = [ - "num-traits", -] - -[[package]] -name = "num-rational" -version = "0.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f83d14da390562dca69fc84082e73e548e1ad308d24accdedd2720017cb37824" -dependencies = [ - "num-bigint", - "num-integer", - "num-traits", -] - -[[package]] -name = "num-traits" -version = "0.2.19" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "071dfc062690e90b734c0b2273ce72ad0ffa95f0c74596bc250dcfd960262841" -dependencies = [ - "autocfg", - "libm", -] - -[[package]] -name = "number_prefix" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "830b246a0e5f20af87141b25c173cd1b609bd7779a4617d6ec582abaf90870f3" - -[[package]] -name = "numeric_literals" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "095aa67b0b9f2081746998f4f17106bdb51d56dc8c211afca5531b92b83bf98a" -dependencies = [ - "quote", - "syn 1.0.109", -] - -[[package]] -name = "numpy" -version = "0.25.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "29f1dee9aa8d3f6f8e8b9af3803006101bb3653866ef056d530d53ae68587191" -dependencies = [ - "libc", - "ndarray", - "num-complex", - "num-integer", - "num-traits", - "pyo3", - "pyo3-build-config", - "rustc-hash", -] - -[[package]] -name = "once_cell" -version = "1.21.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "42f5e15c9953c5e4ccceeb2e7382a716482c34515315f7b03532b8b4e8393d2d" - -[[package]] -name = "once_cell_polyfill" -version = "1.70.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4895175b425cb1f87721b59f0f286c2092bd4af812243672510e1ac53e2e0ad" - -[[package]] -name = "parking_lot" -version = "0.12.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "70d58bf43669b5795d1576d0641cfb6fbb2057bf629506267a92807158584a13" -dependencies = [ - "lock_api", - "parking_lot_core", -] - -[[package]] -name = "parking_lot_core" -version = "0.9.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc838d2a56b5b1a6c25f55575dfc605fabb63bb2365f6c2353ef9159aa69e4a5" -dependencies = [ - "cfg-if", - "libc", - "redox_syscall", - "smallvec", - "windows-targets", -] - -[[package]] -name = "paste" -version = "1.0.15" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57c0d7b74b563b49d38dae00a0c37d4d6de9b432382b2892f0574ddcae73fd0a" - -[[package]] -name = "peg" -version = "0.6.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f76678828272f177ac33b7e2ac2e3e73cc6c1cd1e3e387928aa69562fa51367" -dependencies = [ - "peg-macros", - "peg-runtime", -] - -[[package]] -name = "peg-macros" -version = "0.6.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "636d60acf97633e48d266d7415a9355d4389cea327a193f87df395d88cd2b14d" -dependencies = [ - "peg-runtime", - "proc-macro2", - "quote", -] - -[[package]] -name = "peg-runtime" -version = "0.6.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9555b1514d2d99d78150d3c799d4c357a3e2c2a8062cd108e93a06d9057629c5" - -[[package]] -name = "pkg-config" -version = "0.3.32" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7edddbd0b52d732b21ad9a5fab5c704c14cd949e5e9a1ec5929a24fded1b904c" - -[[package]] -name = "ply-rs" -version = "0.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dbadf9cb4a79d516de4c64806fe64ffbd8161d1ac685d000be789fb628b88963" -dependencies = [ - "byteorder", - "linked-hash-map", - "peg", - "skeptic", -] - -[[package]] -name = "portable-atomic" -version = "1.11.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f84267b20a16ea918e43c6a88433c2d54fa145c92a811b5b047ccbe153674483" - -[[package]] -name = "portable-atomic-util" -version = "0.2.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d8a2f0d8d040d7848a709caf78912debcc3f33ee4b3cac47d73d1e1069e83507" -dependencies = [ - "portable-atomic", -] - -[[package]] -name = "ppv-lite86" -version = "0.2.21" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85eae3c4ed2f50dcfe72643da4befc30deadb458a9b590d720cde2f2b1e97da9" -dependencies = [ - "zerocopy", -] - -[[package]] -name = "proc-macro2" -version = "1.0.95" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02b3e5e68a3a1a02aad3ec490a98007cbc13c37cbe84a3cd7b8e406d76e7f778" -dependencies = [ - "unicode-ident", -] - -[[package]] -name = "pulldown-cmark" -version = "0.9.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57206b407293d2bcd3af849ce869d52068623f19e1b5ff8e8778e3309439682b" -dependencies = [ - "bitflags", - "memchr 2.7.5", - "unicase", -] - -[[package]] -name = "pyo3" -version = "0.25.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f239d656363bcee73afef85277f1b281e8ac6212a1d42aa90e55b90ed43c47a4" -dependencies = [ - "anyhow", - "indoc", - "libc", - "memoffset", - "once_cell", - "portable-atomic", - "pyo3-build-config", - "pyo3-ffi", - "pyo3-macros", - "unindent", -] - -[[package]] -name = "pyo3-build-config" -version = "0.25.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "755ea671a1c34044fa165247aaf6f419ca39caa6003aee791a0df2713d8f1b6d" -dependencies = [ - "once_cell", - "target-lexicon", -] - -[[package]] -name = "pyo3-ffi" -version = "0.25.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fc95a2e67091e44791d4ea300ff744be5293f394f1bafd9f78c080814d35956e" -dependencies = [ - "libc", - "pyo3-build-config", -] - -[[package]] -name = "pyo3-macros" -version = "0.25.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a179641d1b93920829a62f15e87c0ed791b6c8db2271ba0fd7c2686090510214" -dependencies = [ - "proc-macro2", - "pyo3-macros-backend", - "quote", - "syn 2.0.102", -] - -[[package]] -name = "pyo3-macros-backend" -version = "0.25.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9dff85ebcaab8c441b0e3f7ae40a6963ecea8a9f5e74f647e33fcf5ec9a1e89e" -dependencies = [ - "heck", - "proc-macro2", - "pyo3-build-config", - "quote", - "syn 2.0.102", -] - -[[package]] -name = "pyo3-stub-gen" -version = "0.9.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2da99110990aded329ea6e5e6567bcab1577a2109253cebbd54d23cd61951752" -dependencies = [ - "anyhow", - "cargo_metadata 0.19.2", - "chrono", - "either", - "indexmap", - "inventory", - "itertools 0.13.0", - "log", - "maplit", - "num-complex", - "numpy", - "pyo3", - "pyo3-build-config", - "pyo3-stub-gen-derive", - "semver", - "serde", - "toml", -] - -[[package]] -name = "pyo3-stub-gen-derive" -version = "0.9.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d9a036cb01c21f3014989614036a69f1467bfbfde608a37f98eaefb016b1abfe" -dependencies = [ - "heck", - "proc-macro2", - "quote", - "syn 2.0.102", -] - -[[package]] -name = "pysplashsurf" -version = "0.11.0" -dependencies = [ - "anyhow", - "bytemuck", - "fxhash", - "log", - "ndarray", - "numpy", - "pyo3", - "pyo3-stub-gen", - "rayon", - "splashsurf", - "splashsurf_lib", -] - -[[package]] -name = "quick-xml" -version = "0.22.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8533f14c8382aaad0d592c812ac3b826162128b65662331e1127b45c3d18536b" -dependencies = [ - "memchr 2.7.5", - "serde", -] - -[[package]] -name = "quote" -version = "1.0.40" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1885c039570dc00dcb4ff087a89e185fd56bae234ddc7f056a945bf36467248d" -dependencies = [ - "proc-macro2", -] - -[[package]] -name = "r-efi" -version = "5.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "74765f6d916ee2faa39bc8e68e4f3ed8949b48cccdac59983d287a7cb71ce9c5" - -[[package]] -name = "rand" -version = "0.8.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" -dependencies = [ - "libc", - "rand_chacha", - "rand_core", -] - -[[package]] -name = "rand_chacha" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" -dependencies = [ - "ppv-lite86", - "rand_core", -] - -[[package]] -name = "rand_core" -version = "0.6.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" -dependencies = [ - "getrandom 0.2.16", -] - -[[package]] -name = "rand_distr" -version = "0.4.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "32cb0b9bc82b0a0876c2dd994a7e7a2683d3e7390ca40e6886785ef0c7e3ee31" -dependencies = [ - "num-traits", - "rand", -] - -[[package]] -name = "rawpointer" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "60a357793950651c4ed0f3f52338f53b2f809f32d83a07f72909fa13e4c6c1e3" - -[[package]] -name = "rayon" -version = "1.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b418a60154510ca1a002a752ca9714984e21e4241e804d32555251faf8b78ffa" -dependencies = [ - "either", - "rayon-core", -] - -[[package]] -name = "rayon-core" -version = "1.12.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1465873a3dfdaa8ae7cb14b4383657caab0b3e8a0aa9ae8e04b044854c8dfce2" -dependencies = [ - "crossbeam-deque", - "crossbeam-utils", -] - -[[package]] -name = "redox_syscall" -version = "0.5.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "928fca9cf2aa042393a8325b9ead81d2f0df4cb12e1e24cef072922ccd99c5af" -dependencies = [ - "bitflags", -] - -[[package]] -name = "regex" -version = "1.11.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b544ef1b4eac5dc2db33ea63606ae9ffcfac26c1416a2806ae0bf5f56b201191" -dependencies = [ - "aho-corasick", - "memchr 2.7.5", - "regex-automata", - "regex-syntax", -] - -[[package]] -name = "regex-automata" -version = "0.4.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "809e8dc61f6de73b46c85f4c96486310fe304c434cfa43669d7b40f711150908" -dependencies = [ - "aho-corasick", - "memchr 2.7.5", - "regex-syntax", -] - -[[package]] -name = "regex-syntax" -version = "0.8.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2b15c43186be67a4fd63bee50d0303afffcef381492ebe2c5d87f324e1b8815c" - -[[package]] -name = "rstar" -version = "0.12.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "421400d13ccfd26dfa5858199c30a5d76f9c54e0dba7575273025b43c5175dbb" -dependencies = [ - "heapless", - "num-traits", - "smallvec", -] - -[[package]] -name = "rustc-hash" -version = "2.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "357703d41365b4b27c590e3ed91eabb1b663f07c4c084095e60cbed4362dff0d" - -[[package]] -name = "rustix" -version = "1.0.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c71e83d6afe7ff64890ec6b71d6a69bb8a610ab78ce364b3352876bb4c801266" -dependencies = [ - "bitflags", - "errno", - "libc", - "linux-raw-sys", - "windows-sys", -] - -[[package]] -name = "rustversion" -version = "1.0.21" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a0d197bd2c9dc6e53b84da9556a69ba4cdfab8619eb41a8bd1cc2027a0f6b1d" - -[[package]] -name = "ryu" -version = "1.0.20" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28d3b2b1366ec20994f1fd18c3c594f05c5dd4bc44d8bb0c1c632c8d6829481f" - -[[package]] -name = "safe_arch" -version = "0.7.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96b02de82ddbe1b636e6170c21be622223aea188ef2e139be0a5b219ec215323" -dependencies = [ - "bytemuck", -] - -[[package]] -name = "same-file" -version = "1.0.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "93fc1dc3aaa9bfed95e02e6eadabb4baf7e3078b0bd1b4d7b6b0b68378900502" -dependencies = [ - "winapi-util", -] - -[[package]] -name = "scopeguard" -version = "1.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" - -[[package]] -name = "semver" -version = "1.0.26" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56e6fa9c48d24d85fb3de5ad847117517440f6beceb7798af16b4a87d616b8d0" -dependencies = [ - "serde", -] - -[[package]] -name = "serde" -version = "1.0.219" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f0e2c6ed6606019b4e29e69dbaba95b11854410e5347d525002456dbbb786b6" -dependencies = [ - "serde_derive", -] - -[[package]] -name = "serde_derive" -version = "1.0.219" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b0276cf7f2c73365f7157c8123c21cd9a50fbbd844757af28ca1f5925fc2a00" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.102", -] - -[[package]] -name = "serde_json" -version = "1.0.140" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "20068b6e96dc6c9bd23e01df8827e6c7e1f2fddd43c21810382803c136b99373" -dependencies = [ - "itoa", - "memchr 2.7.5", - "ryu", - "serde", -] - -[[package]] -name = "serde_spanned" -version = "0.6.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bf41e0cfaf7226dca15e8197172c295a782857fcb97fad1808a166870dee75a3" -dependencies = [ - "serde", -] - -[[package]] -name = "shlex" -version = "1.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64" - -[[package]] -name = "simba" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b3a386a501cd104797982c15ae17aafe8b9261315b5d07e3ec803f2ea26be0fa" -dependencies = [ - "approx", - "num-complex", - "num-traits", - "paste", - "wide", -] - -[[package]] -name = "skeptic" -version = "0.13.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "16d23b015676c90a0f01c197bfdc786c20342c73a0afdda9025adb0bc42940a8" -dependencies = [ - "bytecount", - "cargo_metadata 0.14.2", - "error-chain", - "glob", - "pulldown-cmark", - "tempfile", - "walkdir", -] - -[[package]] -name = "smallvec" -version = "1.15.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "67b1b7a3b5fe4f1376887184045fcf45c69e92af734b7aaddc05fb777b6fbd03" - -[[package]] -name = "splashsurf" -version = "0.11.0" -dependencies = [ - "anyhow", - "bytemuck", - "chrono", - "clap", - "fern", - "indicatif", - "lexical-sort", - "log", - "once_cell", - "parking_lot", - "rayon", - "regex", - "splashsurf_lib", - "walkdir", -] - -[[package]] -name = "splashsurf_lib" -version = "0.11.0" -dependencies = [ - "anyhow", - "arrayvec", - "bitflags", - "bytemuck", - "bytemuck_derive", - "chrono", - "clap", - "dashmap", - "fern", - "flate2", - "fxhash", - "itertools 0.14.0", - "log", - "nalgebra", - "nom 8.0.0", - "num-integer", - "num-traits", - "numeric_literals", - "parking_lot", - "ply-rs", - "rayon", - "rstar", - "serde_json", - "simba", - "thiserror", - "thread_local", - "vtkio", -] - -[[package]] -name = "stable_deref_trait" -version = "1.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a8f112729512f8e442d81f95a8a7ddf2b7c6b8a1a6f509a95864142b30cab2d3" - -[[package]] -name = "strsim" -version = "0.11.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7da8b5736845d9f2fcb837ea5d9e2628564b3b043a70948a3f0b778838c5fb4f" - -[[package]] -name = "syn" -version = "1.0.109" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" -dependencies = [ - "proc-macro2", - "quote", - "unicode-ident", -] - -[[package]] -name = "syn" -version = "2.0.102" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f6397daf94fa90f058bd0fd88429dd9e5738999cca8d701813c80723add80462" -dependencies = [ - "proc-macro2", - "quote", - "unicode-ident", -] - -[[package]] -name = "target-lexicon" -version = "0.13.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e502f78cdbb8ba4718f566c418c52bc729126ffd16baee5baa718cf25dd5a69a" - -[[package]] -name = "tempfile" -version = "3.20.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8a64e3985349f2441a1a9ef0b853f869006c3855f2cda6862a94d26ebb9d6a1" -dependencies = [ - "fastrand", - "getrandom 0.3.3", - "once_cell", - "rustix", - "windows-sys", -] - -[[package]] -name = "thiserror" -version = "2.0.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "567b8a2dae586314f7be2a752ec7474332959c6460e02bde30d702a66d488708" -dependencies = [ - "thiserror-impl", -] - -[[package]] -name = "thiserror-impl" -version = "2.0.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f7cf42b4507d8ea322120659672cf1b9dbb93f8f2d4ecfd6e51350ff5b17a1d" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.102", -] - -[[package]] -name = "thread_local" -version = "1.1.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b9ef9bad013ada3808854ceac7b46812a6465ba368859a37e2100283d2d719c" -dependencies = [ - "cfg-if", - "once_cell", -] - -[[package]] -name = "toml" -version = "0.8.23" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dc1beb996b9d83529a9e75c17a1686767d148d70663143c7854d8b4a09ced362" -dependencies = [ - "serde", - "serde_spanned", - "toml_datetime", - "toml_edit", -] - -[[package]] -name = "toml_datetime" -version = "0.6.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22cddaf88f4fbc13c51aebbf5f8eceb5c7c5a9da2ac40a13519eb5b0a0e8f11c" -dependencies = [ - "serde", -] - -[[package]] -name = "toml_edit" -version = "0.22.27" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41fe8c660ae4257887cf66394862d21dbca4a6ddd26f04a3560410406a2f819a" -dependencies = [ - "indexmap", - "serde", - "serde_spanned", - "toml_datetime", - "toml_write", - "winnow", -] - -[[package]] -name = "toml_write" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d99f8c9a7727884afe522e9bd5edbfc91a3312b36a77b5fb8926e4c31a41801" - -[[package]] -name = "typenum" -version = "1.18.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1dccffe3ce07af9386bfd29e80c0ab1a8205a2fc34e4bcd40364df902cfa8f3f" - -[[package]] -name = "unicase" -version = "2.8.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75b844d17643ee918803943289730bec8aac480150456169e647ed0b576ba539" - -[[package]] -name = "unicode-ident" -version = "1.0.18" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a5f39404a5da50712a4c1eecf25e90dd62b613502b7e925fd4e4d19b5c96512" - -[[package]] -name = "unicode-width" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4a1a07cc7db3810833284e8d372ccdc6da29741639ecc70c9ec107df0fa6154c" - -[[package]] -name = "unindent" -version = "0.2.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7264e107f553ccae879d21fbea1d6724ac785e8c3bfc762137959b5802826ef3" - -[[package]] -name = "utf8parse" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "06abde3611657adf66d383f00b093d7faecc7fa57071cce2578660c9f1010821" - -[[package]] -name = "version_check" -version = "0.9.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b928f33d975fc6ad9f86c8f283853ad26bdd5b10b7f1542aa2fa15e2289105a" - -[[package]] -name = "vtkio" -version = "0.6.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "abbe89e5b97b472d57abeb02755a06d75b28d2df7d1fe3df5baf032281a65c16" -dependencies = [ - "base64", - "bytemuck", - "byteorder", - "flate2", - "lz4_flex", - "nom 3.2.1", - "num-derive", - "num-traits", - "quick-xml", - "serde", - "xz2", -] - -[[package]] -name = "walkdir" -version = "2.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "29790946404f91d9c5d06f9874efddea1dc06c5efe94541a7d6863108e3a5e4b" -dependencies = [ - "same-file", - "winapi-util", -] - -[[package]] -name = "wasi" -version = "0.11.1+wasi-snapshot-preview1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ccf3ec651a847eb01de73ccad15eb7d99f80485de043efb2f370cd654f4ea44b" - -[[package]] -name = "wasi" -version = "0.14.2+wasi-0.2.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9683f9a5a998d873c0d21fcbe3c083009670149a8fab228644b8bd36b2c48cb3" -dependencies = [ - "wit-bindgen-rt", -] - -[[package]] -name = "wasm-bindgen" -version = "0.2.100" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1edc8929d7499fc4e8f0be2262a241556cfc54a0bea223790e71446f2aab1ef5" -dependencies = [ - "cfg-if", - "once_cell", - "rustversion", - "wasm-bindgen-macro", -] - -[[package]] -name = "wasm-bindgen-backend" -version = "0.2.100" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2f0a0651a5c2bc21487bde11ee802ccaf4c51935d0d3d42a6101f98161700bc6" -dependencies = [ - "bumpalo", - "log", - "proc-macro2", - "quote", - "syn 2.0.102", - "wasm-bindgen-shared", -] - -[[package]] -name = "wasm-bindgen-macro" -version = "0.2.100" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7fe63fc6d09ed3792bd0897b314f53de8e16568c2b3f7982f468c0bf9bd0b407" -dependencies = [ - "quote", - "wasm-bindgen-macro-support", -] - -[[package]] -name = "wasm-bindgen-macro-support" -version = "0.2.100" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ae87ea40c9f689fc23f209965b6fb8a99ad69aeeb0231408be24920604395de" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.102", - "wasm-bindgen-backend", - "wasm-bindgen-shared", -] - -[[package]] -name = "wasm-bindgen-shared" -version = "0.2.100" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a05d73b933a847d6cccdda8f838a22ff101ad9bf93e33684f39c1f5f0eece3d" -dependencies = [ - "unicode-ident", -] - -[[package]] -name = "web-time" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a6580f308b1fad9207618087a65c04e7a10bc77e02c8e84e9b00dd4b12fa0bb" -dependencies = [ - "js-sys", - "wasm-bindgen", -] - -[[package]] -name = "wide" -version = "0.7.32" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41b5576b9a81633f3e8df296ce0063042a73507636cbe956c61133dd7034ab22" -dependencies = [ - "bytemuck", - "safe_arch", -] - -[[package]] -name = "winapi-util" -version = "0.1.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf221c93e13a30d793f7645a0e7762c55d169dbb0a49671918a2319d289b10bb" -dependencies = [ - "windows-sys", -] - -[[package]] -name = "windows-core" -version = "0.61.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c0fdd3ddb90610c7638aa2b3a3ab2904fb9e5cdbecc643ddb3647212781c4ae3" -dependencies = [ - "windows-implement", - "windows-interface", - "windows-link", - "windows-result", - "windows-strings", -] - -[[package]] -name = "windows-implement" -version = "0.60.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a47fddd13af08290e67f4acabf4b459f647552718f683a7b415d290ac744a836" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.102", -] - -[[package]] -name = "windows-interface" -version = "0.59.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bd9211b69f8dcdfa817bfd14bf1c97c9188afa36f4750130fcdf3f400eca9fa8" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.102", -] - -[[package]] -name = "windows-link" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "76840935b766e1b0a05c0066835fb9ec80071d4c09a16f6bd5f7e655e3c14c38" - -[[package]] -name = "windows-result" -version = "0.3.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56f42bd332cc6c8eac5af113fc0c1fd6a8fd2aa08a0119358686e5160d0586c6" -dependencies = [ - "windows-link", -] - -[[package]] -name = "windows-strings" -version = "0.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56e6c93f3a0c3b36176cb1327a4958a0353d5d166c2a35cb268ace15e91d3b57" -dependencies = [ - "windows-link", -] - -[[package]] -name = "windows-sys" -version = "0.59.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e38bc4d79ed67fd075bcc251a1c39b32a1776bbe92e5bef1f0bf1f8c531853b" -dependencies = [ - "windows-targets", -] - -[[package]] -name = "windows-targets" -version = "0.52.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b724f72796e036ab90c1021d4780d4d3d648aca59e491e6b98e725b84e99973" -dependencies = [ - "windows_aarch64_gnullvm", - "windows_aarch64_msvc", - "windows_i686_gnu", - "windows_i686_gnullvm", - "windows_i686_msvc", - "windows_x86_64_gnu", - "windows_x86_64_gnullvm", - "windows_x86_64_msvc", -] - -[[package]] -name = "windows_aarch64_gnullvm" -version = "0.52.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3" - -[[package]] -name = "windows_aarch64_msvc" -version = "0.52.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469" - -[[package]] -name = "windows_i686_gnu" -version = "0.52.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b" - -[[package]] -name = "windows_i686_gnullvm" -version = "0.52.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66" - -[[package]] -name = "windows_i686_msvc" -version = "0.52.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66" - -[[package]] -name = "windows_x86_64_gnu" -version = "0.52.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78" - -[[package]] -name = "windows_x86_64_gnullvm" -version = "0.52.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d" - -[[package]] -name = "windows_x86_64_msvc" -version = "0.52.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" - -[[package]] -name = "winnow" -version = "0.7.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "74c7b26e3480b707944fc872477815d29a8e429d2f93a1ce000f5fa84a15cbcd" -dependencies = [ - "memchr 2.7.5", -] - -[[package]] -name = "wit-bindgen-rt" -version = "0.39.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6f42320e61fe2cfd34354ecb597f86f413484a798ba44a8ca1165c58d42da6c1" -dependencies = [ - "bitflags", -] - -[[package]] -name = "xz2" -version = "0.1.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "388c44dc09d76f1536602ead6d325eb532f5c122f17782bd57fb47baeeb767e2" -dependencies = [ - "lzma-sys", -] - -[[package]] -name = "zerocopy" -version = "0.8.25" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1702d9583232ddb9174e01bb7c15a2ab8fb1bc6f227aa1233858c351a3ba0cb" -dependencies = [ - "zerocopy-derive", -] - -[[package]] -name = "zerocopy-derive" -version = "0.8.25" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28a6e20d751156648aa063f3800b706ee209a32c0b4d9f24be3d980b01be55ef" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.102", -] diff --git a/pysplashsurf/Cargo.toml b/pysplashsurf/Cargo.toml index f63b184f..59c847b3 100644 --- a/pysplashsurf/Cargo.toml +++ b/pysplashsurf/Cargo.toml @@ -8,15 +8,18 @@ license.workspace = true [dependencies] splashsurf = { path = "../splashsurf" } splashsurf_lib = { path = "../splashsurf_lib" } -pyo3 = { version = "0.25.0", features = ["anyhow"] } -numpy = "0.25.0" +pyo3 = { version = "0.26", features = ["anyhow"] } +numpy = "0.26" ndarray = "0.16.1" -bytemuck = { version = "1.23.0", features = ["extern_crate_alloc"] } +bytemuck = { version = "1.23", features = ["extern_crate_alloc"] } anyhow = "1.0.98" -pyo3-stub-gen = "0.12.0" +pyo3-stub-gen = "0.13" + +[build-dependencies] +pyo3-build-config = { version = "0.26" , features = ["resolve-config"] } [features] -extension-module = ["pyo3/extension-module", "pyo3/abi3-py37"] +extension-module = ["pyo3/extension-module", "pyo3/abi3-py310"] default = ["extension-module"] [lib] diff --git a/pysplashsurf/README.md b/pysplashsurf/README.md index a0504d2b..b5fb1533 100644 --- a/pysplashsurf/README.md +++ b/pysplashsurf/README.md @@ -2,11 +2,15 @@ ![splashsurf logo](https://raw.githubusercontent.com/InteractiveComputerGraphics/splashsurf/main/logos/logo_small.svg "splashsurf") +![PyPI - Version](https://img.shields.io/pypi/v/pysplashsurf) +![PyPI - Python Version](https://img.shields.io/pypi/pyversions/pysplashsurf) + + pySplashsurf provides Python bindings for `splashsurf`, an open source surface reconstruction library for particle data from SPH simulations. Detailed information on the surface reconstruction and library itself and its API can be found on the [project website (splashsurf.physics-simulation.org)](https://splashsurf.physics-simulation.org/) or the [main repository](https://github.com/InteractiveComputerGraphics/splashsurf). ## Installation -Requires Python version 3.7+ +Requires Python version 3.10+ ``` pip install pysplashsurf ``` @@ -41,9 +45,11 @@ import meshio import numpy as np import pysplashsurf +# Load particles from mesh file mesh = meshio.read("input.vtk") particles = np.array(mesh.points, dtype=np.float64) +# Reconstruct the points/particles with some post-processing mesh_with_data, reconstruction = pysplashsurf.reconstruction_pipeline( particles, particle_radius=0.025, @@ -61,8 +67,9 @@ mesh_with_data, reconstruction = pysplashsurf.reconstruction_pipeline( subdomain_num_cubes_per_dim=64, output_mesh_smoothing_weights=True ) - -pysplashsurf.write_to_file(mesh_with_data, "output.vtk") + +# Write the mesh with attributes to file using meshio +mesh_with_data.write_to_file("surface.vtk") ``` The `reconstruction_pipeline` method provides (mostly) the same arguments as the splashsurf binary CLI. It may be necessary to specify the `dtype` of a function input (as done for `particles` in the example) so that the bindings know what data type to use internally. @@ -85,4 +92,4 @@ To generate the Sphinx documentation, make sure that the package is installed th The resulting HTML files will be in `pysplashsurf/pysplashsurf/docs/build/html`. ### Stub File Generation -To automatically generate a stub file for the package, run `cargo run --bin stub_gen` from the root project folder (from `pysplashsurf/`). +To automatically generate a stub file for the package, run `cargo run --bin stub_gen --no-default-features` from the root project folder (from `pysplashsurf/`). diff --git a/pysplashsurf/build.rs b/pysplashsurf/build.rs new file mode 100644 index 00000000..8ab6ad34 --- /dev/null +++ b/pysplashsurf/build.rs @@ -0,0 +1,9 @@ +fn main() { + // Required to run cargo check or stub gen outside of maturing build + if std::env::var_os("CARGO_CFG_TARGET_OS=macos").is_some() { + pyo3_build_config::get() + .lib_dir + .clone() + .map(|lib_dir| println!("cargo:rustc-link-arg=-Wl,-rpath,{}", lib_dir)); + } +} diff --git a/pysplashsurf/pyproject.toml b/pysplashsurf/pyproject.toml index 0a206ba6..63ee1441 100644 --- a/pysplashsurf/pyproject.toml +++ b/pysplashsurf/pyproject.toml @@ -13,15 +13,12 @@ authors = [ {name = "Interactive Computer Graphics"}, {name = "Fabian Löschner"}, ] -requires-python = ">=3.7" +requires-python = ">=3.10" classifiers = [ "Programming Language :: Rust", "Programming Language :: Python :: Implementation :: CPython", "Programming Language :: Python :: Implementation :: PyPy", "Programming Language :: Python :: 3", - "Programming Language :: Python :: 3.7", - "Programming Language :: Python :: 3.8", - "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", diff --git a/pysplashsurf/pysplashsurf/__init__.py b/pysplashsurf/pysplashsurf/__init__.py index 9b42767f..e4fdf457 100644 --- a/pysplashsurf/pysplashsurf/__init__.py +++ b/pysplashsurf/pysplashsurf/__init__.py @@ -1,705 +1,7 @@ from .pysplashsurf import * from . import bgeo -import numpy as np import sys + def run_pysplashsurf(): run_splashsurf(sys.argv) - -def push_point_attribute(self, name: str, data: np.ndarray, real_type): - """Add a point attribute to the mesh""" - if data.ndim == 2: - return self.push_point_attribute_vector_real(name, data) - - elif data.ndim == 1: - if data.dtype == np.uint64: - return self.push_point_attribute_scalar_u64(name, data) - - elif data.dtype == real_type: - return self.push_point_attribute_scalar_real(name, data) - - else: - raise ValueError("Not a valid data type, try explicitly specifying uint64 or float64") - - else: - raise ValueError("Not a valid data array") - -def push_cell_attribute(self, name: str, data: np.ndarray, real_type): - """Add a cell attribute to the mesh""" - if data.ndim == 2: - return self.push_cell_attribute_vector_real(name, data) - - elif data.ndim == 1: - if data.dtype == np.uint64: - return self.push_cell_attribute_scalar_u64(name, data) - - elif data.dtype == real_type: - return self.push_cell_attribute_scalar_real(name, data) - - else: - raise ValueError("Not a valid data type, try explicitly specifying uint64 or float64") - - else: - raise ValueError("Not a valid data array") - -TriMeshWithDataF64.push_point_attribute = lambda self, name, data: push_point_attribute(self, name, data, np.float64) -TriMeshWithDataF64.push_point_attribute.__doc__ = push_point_attribute.__doc__ -TriMeshWithDataF32.push_point_attribute = lambda self, name, data: push_point_attribute(self, name, data, np.float32) -TriMeshWithDataF32.push_point_attribute.__doc__ = push_point_attribute.__doc__ - -TriMeshWithDataF64.push_cell_attribute = lambda self, name, data: push_cell_attribute(self, name, data, np.float64) -TriMeshWithDataF64.push_cell_attribute.__doc__ = push_cell_attribute.__doc__ -TriMeshWithDataF32.push_cell_attribute = lambda self, name, data: push_cell_attribute(self, name, data, np.float32) -TriMeshWithDataF32.push_cell_attribute.__doc__ = push_cell_attribute.__doc__ - -MixedTriQuadMeshWithDataF64.push_point_attribute = lambda self, name, data: push_point_attribute(self, name, data, np.float64) -MixedTriQuadMeshWithDataF64.push_point_attribute.__doc__ = push_point_attribute.__doc__ -MixedTriQuadMeshWithDataF32.push_point_attribute = lambda self, name, data: push_point_attribute(self, name, data, np.float32) -MixedTriQuadMeshWithDataF32.push_point_attribute.__doc__ = push_point_attribute.__doc__ - -MixedTriQuadMeshWithDataF64.push_cell_attribute = lambda self, name, data: push_cell_attribute(self, name, data, np.float64) -MixedTriQuadMeshWithDataF64.push_cell_attribute.__doc__ = push_cell_attribute.__doc__ -MixedTriQuadMeshWithDataF32.push_cell_attribute = lambda self, name, data: push_cell_attribute(self, name, data, np.float32) -MixedTriQuadMeshWithDataF32.push_cell_attribute.__doc__ = push_cell_attribute.__doc__ - -def write_to_file(mesh_with_data, filename, file_format=None, consume_object=False): - """Write the mesh and its attributes to a file using meshio - - Parameters - ---------- - mesh: TriMeshWithDataF64 | TriMeshWithDataF32 | MixedTriQuadMeshWithDataF64 | MixedTriQuadMeshWithDataF32 - Mesh with data object to write - - filename: Any - File path for the output file - - file_format: str | None - File format for the output file, generally also derived from filename - - consume_object: bool - Flag for specifying whether the MeshWithData object should be consumed for a faster execution. - Only consumes the mesh field. - """ - try: - import meshio - except ImportError: - raise ImportError("meshio is not installed, please install it with with `pip install meshio` to use this function") - - mesh = mesh_with_data.take_mesh() if consume_object else mesh_with_data.mesh - - point_data = mesh_with_data.get_point_attributes() - cell_data = mesh_with_data.get_cell_attributes() - - if type(mesh) is pysplashsurf.TriMesh3dF64 or type(mesh) is pysplashsurf.TriMesh3dF32: - verts, tris = mesh.take_vertices_and_triangles() if consume_object else (mesh.vertices, mesh.triangles) - meshio.write_points_cells(filename, verts, [("triangle", tris)], point_data=point_data, cell_data=cell_data, file_format=file_format) - - else: - verts, cells = mesh.take_vertices_and_cells() if consume_object else (mesh.vertices, mesh.cells) - cells = [("triangle", list(filter(lambda x: len(x) == 3, cells))), ("quad", list(filter(lambda x: len(x) == 4, cells)))] - meshio.write_points_cells(filename, verts, cells, point_data=point_data, cell_data=cell_data, file_format=file_format) - - -def create_mesh_with_data_object(mesh): - """Create the corresponding mesh with data object to a mesh object - - Parameters - ---------- - mesh: TriMesh3dF64 | TriMesh3dF32 | MixedTriQuadMesh3dF64 | MixedTriQuadMesh3dF32 - Mesh object to convert - - Returns - ------- - TriMeshWithDataF64 | TriMeshWithDataF32 | MixedTriQuadMeshWithDataF64 | MixedTriQuadMeshWithDataF32 - Mesh with data object - """ - - if type(mesh) is TriMesh3dF64: - return TriMeshWithDataF64(mesh) - elif type(mesh) is TriMesh3dF32: - return TriMeshWithDataF32(mesh) - elif type(mesh) is MixedTriQuadMesh3dF64: - return MixedTriQuadMeshWithDataF64(mesh) - elif type(mesh) is MixedTriQuadMesh3dF32: - return MixedTriQuadMeshWithDataF32(mesh) - else: - raise ValueError("Invalid mesh type") - -def create_sph_interpolator_object(particle_positions, particle_densities, particle_rest_mass, compact_support_radius): - """Create the corresponding SPH interpolator object to a set of particle data - - Parameters - ---------- - particle_positions: np.ndarray - 2-dimensional array containing all particle positions [[ax, ay, az], [bx, by, bz], ...] - - particle_densities: np.ndarray - 1-dimensional array containing all particle densities - - particle_rest_mass: float - Rest mass of the particles - - compact_support_radius: float - Compact support radius of the SPH kernel - - Returns - ------- - SphInterpolatorF32 | SphInterpolatorF64 - SphInterpolator object - """ - - if particle_positions.dtype == 'float32': - return SphInterpolatorF32(particle_positions, particle_densities, particle_rest_mass, compact_support_radius) - elif particle_positions.dtype == 'float64': - return SphInterpolatorF64(particle_positions, particle_densities, particle_rest_mass, compact_support_radius) - else: - raise ValueError("Invalid data type (only float32 and float64 are supported, consider explicitly specifying the dtype for particle_positions)") - -def create_aabb_object(aabb_min, aabb_max): - """Create the corresponding AABB object to a set of min and max values - - Parameters - ---------- - aabb_min: np.ndarray - Smallest corner of the axis-aligned bounding box - - aabb_max: np.ndarray - Largest corner of the axis-aligned bounding box - - Returns - ------- - Aabb3dF32 | Aabb3dF64 - Aabb object - """ - - if aabb_min.dtype == 'float32': - return Aabb3dF32(aabb_min, aabb_max) - elif aabb_min.dtype == 'float64': - return Aabb3dF64(aabb_min, aabb_max) - else: - raise ValueError("Invalid data type (only float32 and float64 are supported, consider explicitly specifying the dtype for aabb_min and aabb_max)") - -def create_aabb_object_from_points(points): - """Create the corresponding AABB object to a set of points - - Parameters - ---------- - points: np.ndarray - 2-dimensional array containing all point positions [[ax, ay, az], [bx, by, bz], ...] - - Returns - ------- - Aabb3dF32 | Aabb3dF64 - Aabb object - """ - - if points.dtype == 'float32': - return Aabb3dF32.from_points(points) - elif points.dtype == 'float64': - return Aabb3dF64.from_points(points) - else: - raise ValueError("Invalid data type (only float32 and float64 are supported, consider explicitly specifying the dtype for points)") - -def reconstruct_surface( - particles, *, - particle_radius: float = 0.025, - rest_density: float = 1000.0, - smoothing_length: float = 2.0, - cube_size: float = 0.5, - iso_surface_threshold: float = 0.6, - multi_threading: bool = True, - global_neighborhood_list: bool = False, - subdomain_grid: bool = True, - subdomain_grid_auto_disable: bool = True, - subdomain_num_cubes_per_dim: int = 64, - aabb_min = None, - aabb_max = None, -): - """Reconstruct the surface from only particle positions - - Performs a marching cubes surface construction of the fluid represented by the given particle positions - - Parameters - ---------- - particles: np.ndarray - 2-dimensional array containing all particle positions [[ax, ay, az], [bx, by, bz], ...] - - particle_radius: float, optional (default=0.025) - Particle radius - - rest_density: float - Rest density of the fluid - - smoothing_length: float - Smoothing length of the fluid - - cube_size: float - Size of the cubes used in the uniform grid - - iso_surface_threshold: float - Threshold for the iso surface - - multi_threading: bool - Multi-threading flag - - global_neighborhood_list: bool - Global neighborhood list flag - - subdomain_grid: bool - Enable spatial decomposition using by dividing the domain into subdomains with dense marching cube grids for efficient multi-threading - - subdomain_grid_auto_disable: bool - Whether to automatically disable the subdomain grid if the global domain is too small - - subdomain_num_cubes_per_dim: int - Each subdomain will be a cube consisting of this number of MC cube cells along each coordinate axis - - aabb_min: np.ndarray - Smallest corner of the axis-aligned bounding box - - aabb_max: np.ndarray - Largest corner of the axis-aligned bounding box - - Returns - ------- - SurfaceReconstructionF32 | SurfaceReconstructionF64 - SurfaceReconstruction object containing the reconstructed mesh and used grid - - """ - - if particles.dtype == 'float32': - return reconstruct_surface_f32(particles, particle_radius=particle_radius, rest_density=rest_density, - smoothing_length=smoothing_length, cube_size=cube_size, iso_surface_threshold=iso_surface_threshold, - multi_threading=multi_threading, global_neighborhood_list=global_neighborhood_list, - subdomain_grid=subdomain_grid, subdomain_grid_auto_disable=subdomain_grid_auto_disable, subdomain_num_cubes_per_dim=subdomain_num_cubes_per_dim, - aabb_min=aabb_min, aabb_max=aabb_max) - elif particles.dtype == 'float64': - return reconstruct_surface_f64(particles, particle_radius=particle_radius, rest_density=rest_density, - smoothing_length=smoothing_length, cube_size=cube_size, iso_surface_threshold=iso_surface_threshold, - multi_threading=multi_threading, global_neighborhood_list=global_neighborhood_list, - subdomain_grid=subdomain_grid, subdomain_grid_auto_disable=subdomain_grid_auto_disable, subdomain_num_cubes_per_dim=subdomain_num_cubes_per_dim, - aabb_min=aabb_min, aabb_max=aabb_max) - else: - raise ValueError("Invalid data type (only float32 and float64 are supported, consider explicitly specifying the dtype for particles)") - -def marching_cubes_cleanup( - mesh, - grid, - max_iter: int = 5, - keep_vertices: bool = False -): - """Mesh simplification designed for marching cubes surfaces meshes inspired by the "Compact Contouring"/"Mesh displacement" approach by Doug Moore and Joe Warren - - See Moore and Warren: `"Mesh Displacement: An Improved Contouring Method for Trivariate Data" `_ (1991) - or Moore and Warren: "Compact Isocontours from Sampled Data" in "Graphics Gems III" (1992). - - Parameters - ---------- - mesh: TriMesh3dF32 | TriMesh3dF64 | TriMeshWithDataF32 | TriMeshWithDataF64 - Mesh object to simplify - - grid: UniformGridF32 | UniformGridF64 - Uniform grid object that was used to construct the mesh - - max_iter: int - Maximum number of iterations - - keep_vertices: bool - Flag to keep vertices - - Returns - ------- - list - vertex connectivity list of the simplified mesh - """ - if type(mesh) is TriMesh3dF32 or type(mesh) is TriMeshWithDataF32: - return marching_cubes_cleanup_f32(mesh, grid, max_iter=max_iter, keep_vertices=keep_vertices) - - elif type(mesh) is TriMesh3dF64 or type(mesh) is TriMeshWithDataF64: - return marching_cubes_cleanup_f64(mesh, grid, max_iter=max_iter, keep_vertices=keep_vertices) - - else: - raise ValueError("Invalid mesh type") - -def decimation( - mesh, - keep_vertices: bool = False -): - """Barnacle decimation - - For details see “Weighted Laplacian Smoothing for Surface Reconstruction of Particle-based Fluids” (Löschner, Böttcher, Jeske, Bender; 2023). - - Parameters - ---------- - mesh: TriMesh3dF32 | TriMesh3dF64 | TriMeshWithDataF32 | TriMeshWithDataF64 - Mesh object to simplify - - keep_vertices: bool - Flag to keep vertices - - Returns - ------- - list - vertex connectivity list of the simplified mesh - """ - if type(mesh) is TriMesh3dF32 or type(mesh) is TriMeshWithDataF32: - return decimation_f32(mesh, keep_vertices=keep_vertices) - - elif type(mesh) is TriMesh3dF64 or type(mesh) is TriMeshWithDataF64: - return decimation_f64(mesh, keep_vertices=keep_vertices) - - else: - raise ValueError("Invalid mesh type") - -def par_laplacian_smoothing_inplace( - mesh, - vertex_connectivity: list[list[int]], - iterations: int, - beta: float, - weights: list[float] -): - """Laplacian Smoothing with feature weights - - Move each vertex towards the mean position of its neighbors.\n - Factor beta in [0;1] proportional to amount of smoothing (for beta=1 each vertex is placed at the mean position).\n - Additionally, feature weights can be specified to apply a varying amount of smoothing over the mesh. - - Parameters - ---------- - mesh: TriMesh3dF32 | TriMesh3dF64 | TriMeshWithDataF32 | TriMeshWithDataF64 - Mesh object to smooth - - vertex_connectivity: list[list[int]] - Vertex connectivity list - - iterations: int - Number of iterations - - beta: float - Smoothing factor - - weights: list[float] - Feature weights for the vertices - """ - - if type(mesh) is TriMesh3dF32 or type(mesh) is TriMeshWithDataF32: - par_laplacian_smoothing_inplace_f32(mesh, vertex_connectivity, iterations, beta, weights) - - elif type(mesh) is TriMesh3dF64 or type(mesh) is TiMeshWithDataF64: - par_laplacian_smoothing_inplace_f64(mesh, vertex_connectivity, iterations, beta, weights) - - else: - raise ValueError("Invalid mesh type") - -def par_laplacian_smoothing_normals_inplace( - normals: np.ndarray, - vertex_connectivity: list[list[int]], - iterations: int -): - """Laplacian smoothing of a normal field - - Parameters - ---------- - normals: np.ndarray - 2D-Array of vertex normals to smooth - - vertex_connectivity: list[list[int]] - Vertex connectivity list - - iterations: int - Number of iterations - """ - - if normals.dtype == 'float32': - par_laplacian_smoothing_normals_inplace_f32(normals, vertex_connectivity, iterations) - - elif normals.dtype == 'float64': - par_laplacian_smoothing_normals_inplace_f64(normals, vertex_connectivity, iterations) - - else: - raise ValueError("Invalid mesh type") - -def neighborhood_search_spatial_hashing_parallel( - domain, - particle_positions: np.ndarray, - search_radius: float -): - """Performs a neighborhood search (multi-threaded implementation) - - Returns the indices of all neighboring particles in the given search radius per particle as a `list[list[int]]`. - - Parameters - ---------- - domain: Aabb3dF32 | Aabb3dF64 - Axis-aligned bounding box of the domain - - particle_positions: np.ndarray - 2D-Array of particle positions - - search_radius: float - Search radius - """ - - if type(domain) is Aabb3dF32: - return neighborhood_search_spatial_hashing_parallel_f32(domain, particle_positions, search_radius) - - elif type(domain) is Aabb3dF64: - return neighborhood_search_spatial_hashing_parallel_f64(domain, particle_positions, search_radius) - - else: - raise ValueError("Invalid domain type") - -def check_mesh_consistency( - grid, - mesh, *, - check_closed: bool, - check_manifold: bool, - debug: bool, -): - """Checks the consistency of the mesh (currently checks for holes, non-manifold edges and vertices) and returns a string with debug information in case of problems - - Parameters - ---------- - grid: UniformGridF32 | UniformGridF64 - Uniform grid object - - mesh: TriMesh3dF32 | TriMesh3dF64 | TriMeshWithDataF32 | TriMeshWithDataF64 - Triangular mesh object - - check_closed: bool - Flag to check for closed mesh - - check_manifold: bool - Flag to check for manifold mesh - - debug: bool - Flag to enable debug output - """ - - if type(grid) is UniformGridF32 and (type(mesh) is TriMesh3dF32 or type(mesh) is TriMeshWithDataF32): - return check_mesh_consistency_f32(grid, mesh, check_closed=check_closed, check_manifold=check_manifold, debug=debug) - - elif type(grid) is UniformGridF64 and (type(mesh) is TriMesh3dF64 or type(mesh) is TriMeshWithDataF64): - return check_mesh_consistency_f64(grid, mesh, check_closed=check_closed, check_manifold=check_manifold, debug=debug) - - else: - raise ValueError("Invalid grid or mesh type") - -def convert_tris_to_quads( - mesh, *, - non_squareness_limit: float, - normal_angle_limit_rad: float, - max_interior_angle: float, -): - """Merges triangles sharing an edge to quads if they fulfill the given criteria - - Parameters - ---------- - mesh: TriMesh3dF32 | TriMesh3dF64 | TriMeshWithDataF32 | TriMeshWithDataF64 - Triangular mesh object\n - When called with a MeshWithData Object, the resulting MixedTriQuadMeshWithData won't inherit the cell attributes from the input. - - non_squareness_limit: float - Non-squareness limit - - normal_angle_limit_rad: float - Normal angle limit in radians - - max_interior_angle: float - Maximum interior angle in radians - - Returns - ------- - MixedTriQuadMesh3dF32 | MixedTriQuadMesh3dF64 | MixedTriQuadMeshWithDataF32 | MixedTriQuadMeshWithDataF64 - Mixed triangular and quadrilateral mesh object - """ - - if type(mesh) is TriMesh3dF32 or type(mesh) is TriMeshWithDataF32: - return convert_tris_to_quads_f32(mesh, non_squareness_limit=non_squareness_limit, normal_angle_limit_rad=normal_angle_limit_rad, max_interior_angle=max_interior_angle) - - elif type(mesh) is TriMesh3dF64 or type(mesh) is TriMeshWithDataF64: - return convert_tris_to_quads_f64(mesh, non_squareness_limit=non_squareness_limit, normal_angle_limit_rad=normal_angle_limit_rad, max_interior_angle=max_interior_angle) - - else: - raise ValueError("Invalid mesh type") - - -def reconstruction_pipeline( - particles, *, attributes_to_interpolate=None, particle_radius, - rest_density=1000.0, smoothing_length=2.0, cube_size, - iso_surface_threshold=0.6, multi_threading=True, - check_mesh_closed=False, check_mesh_manifold=False, - check_mesh_orientation=False, check_mesh_debug=False, - mesh_smoothing_weights=False, sph_normals=False, - mesh_smoothing_weights_normalization=13.0, mesh_smoothing_iters=None, normals_smoothing_iters=None, - mesh_cleanup=False, mesh_cleanup_snap_dist=None, decimate_barnacles=False, keep_vertices=False, - compute_normals=False, output_raw_normals=False, output_raw_mesh=False, output_mesh_smoothing_weights=False, mesh_aabb_clamp_vertices=False, - subdomain_grid=True, subdomain_grid_auto_disable=True, subdomain_num_cubes_per_dim=64, aabb_min=None, aabb_max=None, mesh_aabb_min=None, mesh_aabb_max=None, - generate_quads=False, quad_max_edge_diag_ratio=1.75, quad_max_normal_angle=10.0, quad_max_interior_angle=135.0 -): - """Surface reconstruction based on particle positions with subsequent post-processing - - Parameters - ---------- - particles: np.ndarray - 2-dimensional array containing all particle positions [[ax, ay, az], [bx, by, bz], ...] - - attributes_to_interpolate: dict - Dictionary containing all attributes to interpolate. The keys are the attribute names and the values are the corresponding 1D/2D arrays.\n - The arrays must have the same length as the number of particles. \n - Supported array types are 2D float32/float64 arrays for vector attributes and 1D uint64/float32/float64 arrays for scalar attributes. - - particle_radius: float - Particle radius - - rest_density: float - Rest density of the fluid - - smoothing_length: float - Smoothing length of the fluid in multiples of the particle radius (compact support radius of SPH kernel will be twice the smoothing length) - - cube_size: float - Size of the cubes used for the marching cubes grid in multiples of the particle radius - - iso_surface_threshold: float - Threshold for the iso surface - - multi_threading: bool - Multi-threading flag - - check_mesh_closed: bool - Enable checking the final mesh for holes - - check_mesh_manifold: bool - Enable checking the final mesh for non-manifold edges and vertices - - check_mesh_orientation: bool - Enable checking the final mesh for inverted triangles (compares angle between vertex normals and adjacent face normals) - - check_mesh_debug: bool - Enable additional debug output for the check-mesh operations (has no effect if no other check-mesh option is enabled) - - sph_normals: bool - Flag to compute normals using SPH interpolation instead of geometry-based normals. - - mesh_smoothing_weights: bool - Flag to compute mesh smoothing weights\n - This implements the method from “Weighted Laplacian Smoothing for Surface Reconstruction of Particle-based Fluids” (Löschner, Böttcher, Jeske, Bender; 2023). - - mesh_smoothing_weights_normalization: float - Normalization factor for the mesh smoothing weights - - mesh_smoothing_iters: int - Number of iterations for the mesh smoothing - - normals_smoothing_iters: int - Number of iterations for the normal smoothing - - mesh_cleanup: bool - Flag to perform mesh cleanup\n - This implements the method from “Compact isocontours from sampled data” (Moore, Warren; 1992) - - mesh_cleanup_snap_dist: float - If MC mesh cleanup is enabled, vertex snapping can be limited to this distance relative to the MC edge length (should be in range of [0.0,0.5]) - - decimate_barnacles: bool - Flag to perform barnacle decimation\n - For details see “Weighted Laplacian Smoothing for Surface Reconstruction of Particle-based Fluids” (Löschner, Böttcher, Jeske, Bender; 2023). - - keep_vertices: bool - Flag to keep any vertices without connectivity resulting from mesh cleanup or decimation step - - compute_normals: bool - Flag to compute normals\n - If set to True, the normals will be computed and stored in the mesh. - - output_mesh_smoothing_weights: bool - Flag to store the mesh smoothing weights if smoothing weights are computed. - - output_raw_normals: bool - Flag to output the raw normals in addition to smoothed normals if smoothing of normals is enabled - - output_raw_mesh: bool - When true, also return the SurfaceReconstruction object with no post-processing applied - - mesh_aabb_clamp_vertices: bool - Flag to clamp the vertices of the mesh to the AABB - - subdomain_grid: bool - Enable spatial decomposition using by dividing the domain into subdomains with dense marching cube grids for efficient multi-threading - - subdomain_grid_auto_disable: bool - Whether to automatically disable the subdomain grid if the global domain is too small - - subdomain_num_cubes_per_dim: int - Each subdomain will be a cube consisting of this number of MC cube cells along each coordinate axis - - aabb_min: np.ndarray - Smallest corner of the axis-aligned bounding box - - aabb_max: np.ndarray - Largest corner of the axis-aligned bounding box - - mesh_aabb_min: np.ndarray - Smallest corner of the axis-aligned bounding box for the mesh - - mesh_aabb_max: np.ndarray - Largest corner of the axis-aligned bounding box for the mesh - - generate_quads: bool - Enable trying to convert triangles to quads if they meet quality criteria - - quad_max_edge_diag_ratio: float - Maximum allowed ratio of quad edge lengths to its diagonals to merge two triangles to a quad (inverse is used for minimum) - - quad_max_normal_angle: float - Maximum allowed angle (in degrees) between triangle normals to merge them to a quad - - quad_max_interior_angle: float - Maximum allowed vertex interior angle (in degrees) inside a quad to merge two triangles to a quad - - Returns - ------- - tuple[TriMeshWithDataF32 | TriMeshWithDataF64 | MixedTriQuadMeshWithDataF32 | MixedTriQuadMeshWithDataF64, Optional[SurfaceReconstructionF32] | Optional[SurfaceReconstructionF64]] - Mesh with data object and SurfaceReconstruction object containing the reconstructed mesh and used grid - """ - if attributes_to_interpolate is None: - attributes_to_interpolate = {} - - if particles.dtype == 'float32': - tri_mesh, tri_quad_mesh, reconstruction = reconstruction_pipeline_f32(particles, attributes_to_interpolate=attributes_to_interpolate, particle_radius=particle_radius, rest_density=rest_density, - smoothing_length=smoothing_length, cube_size=cube_size, iso_surface_threshold=iso_surface_threshold, - aabb_min=aabb_min, aabb_max=aabb_max, multi_threading=multi_threading, - subdomain_grid=subdomain_grid, subdomain_grid_auto_disable=subdomain_grid_auto_disable, subdomain_num_cubes_per_dim=subdomain_num_cubes_per_dim, - check_mesh_closed=check_mesh_closed, check_mesh_manifold=check_mesh_manifold, check_mesh_orientation=check_mesh_orientation, check_mesh_debug=check_mesh_debug, - mesh_cleanup=mesh_cleanup, mesh_cleanup_snap_dist=mesh_cleanup_snap_dist, decimate_barnacles=decimate_barnacles, - keep_vertices=keep_vertices, compute_normals=compute_normals, sph_normals=sph_normals, normals_smoothing_iters=normals_smoothing_iters, - mesh_smoothing_iters=mesh_smoothing_iters, mesh_smoothing_weights=mesh_smoothing_weights, mesh_smoothing_weights_normalization=mesh_smoothing_weights_normalization, - output_mesh_smoothing_weights=output_mesh_smoothing_weights, output_raw_normals=output_raw_normals, output_raw_mesh=output_raw_mesh, - mesh_aabb_min=mesh_aabb_min, mesh_aabb_max=mesh_aabb_max, mesh_aabb_clamp_vertices=mesh_aabb_clamp_vertices, - generate_quads=generate_quads, quad_max_edge_diag_ratio=quad_max_edge_diag_ratio, quad_max_normal_angle=quad_max_normal_angle, quad_max_interior_angle=quad_max_interior_angle) - - if tri_mesh == None: - return (tri_quad_mesh, reconstruction) - else: - return (tri_mesh, reconstruction) - - elif particles.dtype == 'float64': - tri_mesh, tri_quad_mesh, reconstruction = reconstruction_pipeline_f64(particles, attributes_to_interpolate=attributes_to_interpolate, particle_radius=particle_radius, rest_density=rest_density, - smoothing_length=smoothing_length, cube_size=cube_size, iso_surface_threshold=iso_surface_threshold, - aabb_min=aabb_min, aabb_max=aabb_max, multi_threading=multi_threading, - subdomain_grid=subdomain_grid, subdomain_grid_auto_disable=subdomain_grid_auto_disable, subdomain_num_cubes_per_dim=subdomain_num_cubes_per_dim, - check_mesh_closed=check_mesh_closed, check_mesh_manifold=check_mesh_manifold, check_mesh_orientation=check_mesh_orientation, check_mesh_debug=check_mesh_debug, - mesh_cleanup=mesh_cleanup, mesh_cleanup_snap_dist=mesh_cleanup_snap_dist, decimate_barnacles=decimate_barnacles, - keep_vertices=keep_vertices, compute_normals=compute_normals, sph_normals=sph_normals, normals_smoothing_iters=normals_smoothing_iters, - mesh_smoothing_iters=mesh_smoothing_iters, mesh_smoothing_weights=mesh_smoothing_weights, mesh_smoothing_weights_normalization=mesh_smoothing_weights_normalization, - output_mesh_smoothing_weights=output_mesh_smoothing_weights, output_raw_normals=output_raw_normals, output_raw_mesh=output_raw_mesh, - mesh_aabb_min=mesh_aabb_min, mesh_aabb_max=mesh_aabb_max, mesh_aabb_clamp_vertices=mesh_aabb_clamp_vertices, - generate_quads=generate_quads, quad_max_edge_diag_ratio=quad_max_edge_diag_ratio, quad_max_normal_angle=quad_max_normal_angle, quad_max_interior_angle=quad_max_interior_angle) - - if tri_mesh == None: - return (tri_quad_mesh, reconstruction) - else: - return (tri_mesh, reconstruction) - else: - raise ValueError("Invalid data type (only float32 and float64 are supported, consider explicitly specifying the dtype for particles)") diff --git a/pysplashsurf/pysplashsurf/docs/requirements.txt b/pysplashsurf/pysplashsurf/docs/requirements.txt index 379f4833..4ed21552 100644 --- a/pysplashsurf/pysplashsurf/docs/requirements.txt +++ b/pysplashsurf/pysplashsurf/docs/requirements.txt @@ -5,4 +5,5 @@ meshio==5.3.5 sphinx_rtd_theme==3.0.1 numpydoc==1.8.0 myst-parser==4.0.1 +sphinx-autodoc-typehints==3.2.0 pysplashsurf diff --git a/pysplashsurf/pysplashsurf/docs/source/api.rst b/pysplashsurf/pysplashsurf/docs/source/api.rst index e8b95fa5..f3c4655a 100644 --- a/pysplashsurf/pysplashsurf/docs/source/api.rst +++ b/pysplashsurf/pysplashsurf/docs/source/api.rst @@ -1,36 +1,47 @@ -API -=== +API Overview +============ .. currentmodule:: pysplashsurf -Methods -------- +The main functionality of ``pysplashsurf`` is provided by the :py:func:`reconstruction_pipeline` function which implements all features of the ``splashsurf`` CLI including the surface reconstruction from particles and optional post-processing, and the :py:func:`reconstruct_surface` function which only performs the surface reconstruction itself. + +**Data types:** The functions of the package accept Python ``float`` for scalar parameters and Numpy arrays of data-type ``np.float32`` or ``np.float64`` for array inputs (e.g. particle positions). +Outputs will be of the same float precision as the input arrays. +Array-like inputs have to be contiguous (C-order) in memory. +All array-like and object type (e.g. :py:class:`Aabb3d`) inputs to a function call have to use the same float data-type. + +Functions +--------- .. autosummary:: + barnacle_decimation check_mesh_consistency convert_tris_to_quads - create_aabb_object - create_aabb_object_from_points - create_mesh_with_data_object - create_sph_interpolator_object - decimation + laplacian_smoothing_normals_parallel + laplacian_smoothing_parallel + marching_cubes marching_cubes_cleanup neighborhood_search_spatial_hashing_parallel - par_laplacian_smoothing_inplace - par_laplacian_smoothing_normals_inplace reconstruct_surface reconstruction_pipeline - write_to_file Classes ------- .. autosummary:: - Aabb3dF32 - MixedTriQuadMesh3dF32 - MixedTriQuadMeshWithDataF32 - SphInterpolatorF32 - SurfaceReconstructionF32 - TriMesh3dF32 - TriMeshWithDataF32 - UniformGridF32 \ No newline at end of file + Aabb3d + MeshAttribute + MeshWithData + MixedTriQuadMesh3d + NeighborhoodLists + SphInterpolator + SurfaceReconstruction + TriMesh3d + UniformGrid + VertexVertexConnectivity + +Enums +----- + +.. autosummary:: + MeshType diff --git a/pysplashsurf/pysplashsurf/docs/source/classes.rst b/pysplashsurf/pysplashsurf/docs/source/classes.rst index 8060dcfb..1405e5c9 100644 --- a/pysplashsurf/pysplashsurf/docs/source/classes.rst +++ b/pysplashsurf/pysplashsurf/docs/source/classes.rst @@ -1,42 +1,56 @@ Classes ======= -Additionally to the classes on this page, there exists a F64 version for every class which is otherwise identical to the F32 version. - -For more information on the classes, refer to the `Rust documentation `_ of splashsurf_lib. +For more information on the classes, refer to the `Rust documentation `_ of ``splashsurf_lib``. .. currentmodule:: pysplashsurf -.. autoclass:: Aabb3dF32 +.. autoclass:: SphInterpolator + :members: - See `Aabb3d `_ for more information. + See `SphInterpolator `_ for more information. + +Mesh types +---------- + +.. autoclass:: TriMesh3d + :members: + + See `TriMesh3d `_ for more information. -.. autoclass:: MixedTriQuadMesh3dF32 +.. autoclass:: MixedTriQuadMesh3d + :members: See `MixedTriQuadMesh3d `_ for more information. -.. autoclass:: MixedTriQuadMeshWithDataF32 - :exclude-members: push_point_attribute_scalar_u64, push_point_attribute_scalar_real, push_point_attribute_vector_real, push_cell_attribute_scalar_real, push_cell_attribute_scalar_u64, push_cell_attribute_vector_real +.. autoclass:: MeshWithData + :members: See `MeshWithData `_ for more information. -.. autoclass:: SphInterpolatorF32 +Helper and return types +----------------------- - See `SphInterpolator `_ for more information. +.. autoclass:: Aabb3d + :members: + + See `Aabb3d `_ for more information. -.. autoclass:: SurfaceReconstructionF32 +.. autoclass:: MeshType + :members: - See `SurfaceReconstruction `_ for more information. +.. autoclass:: NeighborhoodLists + :members: -.. autoclass:: TriMesh3dF32 +.. autoclass:: SurfaceReconstruction + :members: - See `TriMesh3d `_ for more information. - -.. autoclass:: TriMeshWithDataF32 - :exclude-members: push_point_attribute_scalar_u64, push_point_attribute_scalar_real, push_point_attribute_vector_real, push_cell_attribute_scalar_real, push_cell_attribute_scalar_u64, push_cell_attribute_vector_real + See `SurfaceReconstruction `_ for more information. - See `MeshWithData `_ for more information. +.. autoclass:: UniformGrid + :members: -.. autoclass:: UniformGridF32 + See `UniformGrid `_ for more information. - See `UniformGrid `_ for more information. \ No newline at end of file +.. autoclass:: VertexVertexConnectivity + :members: \ No newline at end of file diff --git a/pysplashsurf/pysplashsurf/docs/source/conf.py b/pysplashsurf/pysplashsurf/docs/source/conf.py index fe315e1b..7d3e0b7c 100644 --- a/pysplashsurf/pysplashsurf/docs/source/conf.py +++ b/pysplashsurf/pysplashsurf/docs/source/conf.py @@ -10,10 +10,19 @@ # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # -# import os -# import sys -# sys.path.insert(0, os.path.abspath('.')) -import pysplashsurf +#import pysplashsurf + +import os +import sys +# #sys.path.append(os.path.abspath('..')) + +from sphinx.ext.autodoc.importer import import_module +rootpath = '/Users/floeschner/programming/splashsurf/pysplashsurf/pysplashsurf' +sys_path = list(sys.path) +sys.path.insert(0, str(rootpath)) +pysplashsurf = import_module('pysplashsurf') + +#import pysplashsurf # -- Project information ----------------------------------------------------- @@ -33,6 +42,7 @@ 'numpydoc', 'myst_parser', 'sphinx_rtd_theme', + 'sphinx_autodoc_typehints' ] source_suffix = ['.rst', '.md'] @@ -59,3 +69,12 @@ # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". # html_static_path = ['_static'] + +autodoc_typehints = "both" + +always_document_param_types = True +always_use_bars_union = True +typehints_document_rtype = False +typehints_use_rtype = False +typehints_use_signature = True +typehints_use_signature_return = True diff --git a/pysplashsurf/pysplashsurf/docs/source/functions.rst b/pysplashsurf/pysplashsurf/docs/source/functions.rst new file mode 100644 index 00000000..411811a9 --- /dev/null +++ b/pysplashsurf/pysplashsurf/docs/source/functions.rst @@ -0,0 +1,26 @@ +Functions +========= + +All functions infer float precision based on the input (``np.float32`` or ``np.float64``). + +.. currentmodule:: pysplashsurf + +.. autofunction:: barnacle_decimation + +.. autofunction:: check_mesh_consistency + +.. autofunction:: convert_tris_to_quads + +.. autofunction:: laplacian_smoothing_normals_parallel + +.. autofunction:: laplacian_smoothing_parallel + +.. autofunction:: marching_cubes + +.. autofunction:: marching_cubes_cleanup + +.. autofunction:: neighborhood_search_spatial_hashing_parallel + +.. autofunction:: reconstruct_surface + +.. autofunction:: reconstruction_pipeline diff --git a/pysplashsurf/pysplashsurf/docs/source/index.md b/pysplashsurf/pysplashsurf/docs/source/index.md index f24350a6..a0f6ece9 100644 --- a/pysplashsurf/pysplashsurf/docs/source/index.md +++ b/pysplashsurf/pysplashsurf/docs/source/index.md @@ -3,7 +3,7 @@ ```{toctree} :caption: Table of Contents -methods -classes api +functions +classes ``` \ No newline at end of file diff --git a/pysplashsurf/pysplashsurf/docs/source/methods.rst b/pysplashsurf/pysplashsurf/docs/source/methods.rst deleted file mode 100644 index 420f6a0b..00000000 --- a/pysplashsurf/pysplashsurf/docs/source/methods.rst +++ /dev/null @@ -1,34 +0,0 @@ -Methods -======= - -All methods infer float precision based on the input (32bit or 64bit). - -.. currentmodule:: pysplashsurf - -.. autofunction:: check_mesh_consistency - -.. autofunction:: convert_tris_to_quads - -.. autofunction:: create_aabb_object - -.. autofunction:: create_aabb_object_from_points - -.. autofunction:: create_mesh_with_data_object - -.. autofunction:: create_sph_interpolator_object - -.. autofunction:: decimation - -.. autofunction:: marching_cubes_cleanup - -.. autofunction:: neighborhood_search_spatial_hashing_parallel - -.. autofunction:: par_laplacian_smoothing_inplace - -.. autofunction:: par_laplacian_smoothing_normals_inplace - -.. autofunction:: reconstruct_surface - -.. autofunction:: reconstruction_pipeline - -.. autofunction:: write_to_file \ No newline at end of file diff --git a/pysplashsurf/pysplashsurf/pysplashsurf.pyi b/pysplashsurf/pysplashsurf/pysplashsurf.pyi index d9a8f7d4..55a6e507 100644 --- a/pysplashsurf/pysplashsurf/pysplashsurf.pyi +++ b/pysplashsurf/pysplashsurf/pysplashsurf.pyi @@ -1,598 +1,407 @@ # This file is automatically generated by pyo3_stub_gen # ruff: noqa: E501, F401 +from __future__ import annotations + import builtins import numpy import numpy.typing +import os +import pathlib import typing +from enum import Enum -class Aabb3dF32: +class Aabb3d: r""" - Aabb3d wrapper + Three-dimensional axis-aligned bounding box defined by its minimum and maximum corners """ - def __new__(cls, min:typing.Sequence[builtins.float], max:typing.Sequence[builtins.float]) -> Aabb3dF32: ... - @staticmethod - def from_points(points:numpy.typing.NDArray[numpy.float32]) -> Aabb3dF32: + @property + def min(self) -> numpy.typing.NDArray[numpy.float64]: r""" - Constructs the smallest AABB fitting around all the given points + The min coordinate of the AABB """ - @staticmethod - def par_from_points(points:numpy.typing.NDArray[numpy.float32]) -> Aabb3dF32: + @property + def max(self) -> numpy.typing.NDArray[numpy.float64]: r""" - Constructs the smallest AABB fitting around all the given points, parallel version + The max coordinate of the AABB """ @staticmethod - def zeros() -> Aabb3dF32: + def from_min_max(min:typing.Sequence[builtins.float], max:typing.Sequence[builtins.float]) -> Aabb3d: r""" - Constructs a degenerate AABB with min and max set to zero + Constructs an AABB with the given min and max coordinates """ @staticmethod - def from_point(point:typing.Sequence[builtins.float]) -> Aabb3dF32: - r""" - Constructs a degenerate AABB with zero extents centered at the given point - """ - def min(self) -> numpy.typing.NDArray[numpy.float32]: - r""" - Returns the min coordinate of the bounding box - """ - def max(self) -> numpy.typing.NDArray[numpy.float32]: - r""" - Returns the max coordinate of the bounding box - """ - def is_consistent(self) -> builtins.bool: - r""" - Returns whether the AABB is consistent, i.e. `aabb.min()[i] <= aabb.max()[i]` for all `i` - """ - def is_degenerate(self) -> builtins.bool: - r""" - Returns whether the AABB is degenerate in any dimension, i.e. `aabb.min()[i] == aabb.max()[i]` for any `i` - """ - def extents(self) -> numpy.typing.NDArray[numpy.float32]: - r""" - Returns the extents of the bounding box (vector connecting min and max point of the box) - """ - def min_extent(self) -> builtins.float: - r""" - Returns the smallest scalar extent of the AABB over all of its dimensions - """ - def max_extent(self) -> builtins.float: - r""" - Returns the largest scalar extent of the AABB over all of its dimensions - """ - def centroid(self) -> numpy.typing.NDArray[numpy.float32]: - r""" - Returns the geometric centroid of the AABB (mean of the corner points) - """ - def contains_aabb(self, other:Aabb3dF32) -> builtins.bool: + def from_points(points:numpy.typing.NDArray[typing.Any]) -> Aabb3d: r""" - Checks if the given AABB is inside of the AABB, the AABB is considered to be half-open to its max coordinate + Constructs the smallest AABB fitting around all the given points """ def contains_point(self, point:typing.Sequence[builtins.float]) -> builtins.bool: r""" - Checks if the given point is inside of the AABB, the AABB is considered to be half-open to its max coordinate - """ - def translate(self, vector:typing.Sequence[builtins.float]) -> None: - r""" - Translates the AABB by the given vector - """ - def center_at_origin(self) -> None: - r""" - Translates the AABB to center it at the coordinate origin (moves the centroid to the coordinate origin) - """ - def scale_uniformly(self, scaling:builtins.float) -> None: - r""" - Multiplies a uniform, local scaling to the AABB (i.e. multiplying its extents as if it was centered at the origin) + Checks if the given point is inside the AABB, the AABB is considered to be half-open to its max coordinate """ - def join(self, other:Aabb3dF32) -> None: - r""" - Enlarges this AABB to the smallest AABB enclosing both itself and another AABB - """ - def join_with_point(self, point:typing.Sequence[builtins.float]) -> None: + +class MeshAttribute: + @property + def dtype(self) -> numpy.dtype: r""" - Enlarges this AABB to the smallest AABB enclosing both itself and another point + Numpy dtype of the data stored in the attribute """ - def grow_uniformly(self, margin:builtins.float) -> None: + @property + def name(self) -> builtins.str: r""" - Grows this AABB uniformly in all directions by the given scalar margin (i.e. adding the margin to min/max extents) + Name of the attribute """ - def enclosing_cube(self) -> Aabb3dF32: + @property + def data(self) -> numpy.typing.NDArray[typing.Any]: r""" - Returns the smallest cubical AABB with the same center that encloses this AABB + View of the attribute data as a numpy array """ -class Aabb3dF64: +class MeshWithData: r""" - Aabb3d wrapper + Mesh with attached point and cell attributes """ - def __new__(cls, min:typing.Sequence[builtins.float], max:typing.Sequence[builtins.float]) -> Aabb3dF64: ... - @staticmethod - def from_points(points:numpy.typing.NDArray[numpy.float64]) -> Aabb3dF64: - r""" - Constructs the smallest AABB fitting around all the given points - """ - @staticmethod - def par_from_points(points:numpy.typing.NDArray[numpy.float64]) -> Aabb3dF64: - r""" - Constructs the smallest AABB fitting around all the given points, parallel version - """ - @staticmethod - def zeros() -> Aabb3dF64: + @property + def dtype(self) -> numpy.dtype: r""" - Constructs a degenerate AABB with min and max set to zero + Numpy dtype of the underlying scalar type (either ``np.float32`` or ``np.float64``) """ - @staticmethod - def from_point(point:typing.Sequence[builtins.float]) -> Aabb3dF64: + @property + def nvertices(self) -> builtins.int: r""" - Constructs a degenerate AABB with zero extents centered at the given point + Number of vertices in the mesh """ - def min(self) -> numpy.typing.NDArray[numpy.float64]: + @property + def ncells(self) -> builtins.int: r""" - Returns the min coordinate of the bounding box + Number of cells (triangles or quads) in the mesh """ - def max(self) -> numpy.typing.NDArray[numpy.float64]: + @property + def mesh_type(self) -> MeshType: r""" - Returns the max coordinate of the bounding box + Type of the underlying mesh """ - def is_consistent(self) -> builtins.bool: + @property + def mesh(self) -> typing.Union[TriMesh3d, MixedTriQuadMesh3d]: r""" - Returns whether the AABB is consistent, i.e. `aabb.min()[i] <= aabb.max()[i]` for all `i` + The wrapped mesh without associated data and attributes """ - def is_degenerate(self) -> builtins.bool: + @property + def point_attributes(self) -> dict[str, numpy.typing.NDArray]: r""" - Returns whether the AABB is degenerate in any dimension, i.e. `aabb.min()[i] == aabb.max()[i]` for any `i` + The attributes attached points (vertices) of the mesh """ - def extents(self) -> numpy.typing.NDArray[numpy.float64]: + @property + def cell_attributes(self) -> dict[str, numpy.typing.NDArray]: r""" - Returns the extents of the bounding box (vector connecting min and max point of the box) + The attributes attached to the cells (triangles or quads) of the mesh """ - def min_extent(self) -> builtins.float: + def __new__(cls, mesh:typing.Union[TriMesh3d, MeshWithData]) -> MeshWithData: r""" - Returns the smallest scalar extent of the AABB over all of its dimensions + Wraps an existing mesh object (either `TriMesh3d` or `MixedTriQuadMesh3d`) such that data (point and cell attributes) can be attached to it """ - def max_extent(self) -> builtins.float: + def copy_mesh(self) -> typing.Union[TriMesh3d, MixedTriQuadMesh3d]: r""" - Returns the largest scalar extent of the AABB over all of its dimensions + Returns a copy of the wrapped mesh without associated data and attributes """ - def centroid(self) -> numpy.typing.NDArray[numpy.float64]: + def copy(self) -> MeshWithData: r""" - Returns the geometric centroid of the AABB (mean of the corner points) + Returns a copy (deep copy) of this mesh with its data and attributes """ - def contains_aabb(self, other:Aabb3dF64) -> builtins.bool: + def add_point_attribute(self, name:builtins.str, attribute:numpy.typing.NDArray[typing.Any]) -> None: r""" - Checks if the given AABB is inside of the AABB, the AABB is considered to be half-open to its max coordinate + Attaches a point attribute to the mesh + + There has to be exactly one attribute value per vertex in the mesh. + As attribute data, the following numpy array types are supported: + - 1D array with shape (N,) of ``np.uint64`` + - 1D array with shape (N,) of the mesh scalar type (``np.float32`` or ``np.float64``) + - 2D array with shape (N,3) of the mesh scalar type (``np.float32`` or ``np.float64``) + The data is copied into the mesh object. """ - def contains_point(self, point:typing.Sequence[builtins.float]) -> builtins.bool: + def add_cell_attribute(self, name:builtins.str, attribute:numpy.typing.NDArray[typing.Any]) -> None: r""" - Checks if the given point is inside of the AABB, the AABB is considered to be half-open to its max coordinate + Attaches a cell attribute to the mesh + + There has to be exactly one attribute value per cell in the mesh. + As attribute data, the following numpy array types are supported: + - 1D array with shape (N,) of ``np.uint64`` + - 1D array with shape (N,) of the mesh scalar type (``np.float32`` or ``np.float64``) + - 2D array with shape (N,3) of the mesh scalar type (``np.float32`` or ``np.float64``) + The data is copied into the mesh object. """ - def translate(self, vector:typing.Sequence[builtins.float]) -> None: + def write_to_file(self, path:builtins.str | os.PathLike | pathlib.Path, *, file_format:typing.Optional[builtins.str]='vtk42') -> None: r""" - Translates the AABB by the given vector + Writes the mesh and its attributes to a file using ``meshio.write_points_cells`` """ - def center_at_origin(self) -> None: + +class MixedTriQuadMesh3d: + r""" + Mixed triangle and quad surface mesh in 3D + """ + @property + def dtype(self) -> numpy.dtype: r""" - Translates the AABB to center it at the coordinate origin (moves the centroid to the coordinate origin) + Numpy dtype of the underlying scalar type (either ``np.float32`` or ``np.float64``) """ - def scale_uniformly(self, scaling:builtins.float) -> None: + @property + def vertices(self) -> numpy.typing.NDArray[typing.Any]: r""" - Multiplies a uniform, local scaling to the AABB (i.e. multiplying its extents as if it was centered at the origin) + The `Nx3` array of vertex positions of the mesh """ - def join(self, other:Aabb3dF64) -> None: + def copy(self) -> MixedTriQuadMesh3d: r""" - Enlarges this AABB to the smallest AABB enclosing both itself and another AABB + Returns a copy (deep copy) of this mesh """ - def join_with_point(self, point:typing.Sequence[builtins.float]) -> None: + def get_triangles(self) -> numpy.typing.NDArray[numpy.uint64]: r""" - Enlarges this AABB to the smallest AABB enclosing both itself and another point + Returns a copy of all triangle cells of the mesh as an `Nx3` array of vertex indices """ - def grow_uniformly(self, margin:builtins.float) -> None: + def get_quads(self) -> numpy.typing.NDArray[numpy.uint64]: r""" - Grows this AABB uniformly in all directions by the given scalar margin (i.e. adding the margin to min/max extents) + Returns a copy of all quad cells of the mesh as an `Nx4` array of vertex indices """ - def enclosing_cube(self) -> Aabb3dF64: + def write_to_file(self, path:builtins.str | os.PathLike | pathlib.Path, *, file_format:typing.Optional[builtins.str]='vtk42') -> None: r""" - Returns the smallest cubical AABB with the same center that encloses this AABB + Writes the mesh to a file using ``meshio.write_points_cells`` """ -class MixedTriQuadMesh3dF32: +class NeighborhoodLists: r""" - MixedTriQuadMesh3d wrapper + Per particle neighborhood lists """ - def get_vertices(self) -> numpy.typing.NDArray[numpy.float32]: + def __len__(self) -> builtins.int: r""" - Returns a copy of the `Nx3` array of vertex positions + Returns the number of particles for which neighborhood lists are stored """ - def get_cells(self) -> builtins.list[builtins.list[builtins.int]]: + def __getitem__(self, idx:builtins.int) -> builtins.list[builtins.int]: r""" - Returns a 2D list specifying the vertex indices either for a triangle or a quad + Returns the neighborhood list for the particle at the given index """ - def take_vertices(self) -> numpy.typing.NDArray[numpy.float32]: + def get_neighborhood_lists(self) -> builtins.list[builtins.list[builtins.int]]: r""" - Returns the `Nx3` array of vertex positions by moving it out of the mesh (zero copy) + Returns all stored neighborhood lists as a list of lists """ -class MixedTriQuadMesh3dF64: +class SphInterpolator: r""" - MixedTriQuadMesh3d wrapper + Interpolator of per-particle quantities to arbitrary points using SPH interpolation (with cubic kernel) """ - def get_vertices(self) -> numpy.typing.NDArray[numpy.float64]: + def __new__(cls, particle_positions:numpy.typing.NDArray[typing.Any], particle_densities:numpy.typing.NDArray[typing.Any], particle_rest_mass:builtins.float, compact_support_radius:builtins.float) -> SphInterpolator: r""" - Returns a copy of the `Nx3` array of vertex positions + Constructs an SPH interpolator (with cubic kernels) for the given particles """ - def get_cells(self) -> builtins.list[builtins.list[builtins.int]]: + def interpolate_quantity(self, particle_quantity:numpy.typing.NDArray[typing.Any], interpolation_points:numpy.typing.NDArray[typing.Any], *, first_order_correction:builtins.bool=False) -> numpy.typing.NDArray[typing.Any]: r""" - Returns a 2D list specifying the vertex indices either for a triangle or a quad + Interpolates a scalar or vectorial per particle quantity to the given points """ - def take_vertices(self) -> numpy.typing.NDArray[numpy.float64]: + def interpolate_normals(self, interpolation_points:numpy.typing.NDArray[typing.Any]) -> numpy.typing.NDArray[typing.Any]: r""" - Returns the `Nx3` array of vertex positions by moving it out of the mesh (zero copy) + Interpolates surface normals (i.e. normalized SPH gradient of the indicator function) of the fluid to the given points using SPH interpolation """ -class MixedTriQuadMeshWithDataF32: +class SurfaceReconstruction: r""" - MeshWithData wrapper + Result returned by surface reconstruction functions with surface mesh and other data """ - def __new__(cls, mesh:MixedTriQuadMesh3dF32) -> MixedTriQuadMeshWithDataF32: ... - def get_mesh(self) -> MixedTriQuadMesh3dF32: - r""" - Returns a copy of the contained mesh - """ - def take_mesh(self) -> MixedTriQuadMesh3dF32: - r""" - Returns the contained mesh by moving it out of this object (zero copy) - """ - def par_clamp_with_aabb(self, aabb:Aabb3dF32, clamp_vertices:builtins.bool, keep_vertices:builtins.bool) -> MixedTriQuadMeshWithDataF32: - r""" - Removes all cells from the mesh that are completely outside of the given AABB and clamps the remaining cells to the boundary - """ - def push_point_attribute_scalar_u64(self, name:builtins.str, data:typing.Sequence[builtins.int]) -> None: ... - def push_point_attribute_scalar_real(self, name:builtins.str, data:typing.Sequence[builtins.float]) -> None: ... - def push_point_attribute_vector_real(self, name:builtins.str, data:numpy.typing.NDArray[numpy.float32]) -> None: ... - def push_cell_attribute_scalar_u64(self, name:builtins.str, data:typing.Sequence[builtins.int]) -> None: ... - def push_cell_attribute_scalar_real(self, name:builtins.str, data:typing.Sequence[builtins.float]) -> None: ... - def push_cell_attribute_vector_real(self, name:builtins.str, data:numpy.typing.NDArray[numpy.float32]) -> None: ... - def get_point_attribute(self, name:builtins.str) -> typing.Any: - r""" - Get mesh vertex attribute by name - """ - def get_cell_attribute(self, name:builtins.str) -> typing.Any: + @property + def grid(self) -> UniformGrid: r""" - Get mesh cell attribute by name + The marching cubes grid parameters used for the surface reconstruction """ - def get_point_attributes(self) -> dict: + @property + def particle_densities(self) -> typing.Optional[numpy.typing.NDArray[typing.Any]]: r""" - Get all point attributes in a python dictionary + The global array of particle densities (`None` if they were only computed locally) """ - def get_cell_attributes(self) -> dict: + @property + def particle_inside_aabb(self) -> typing.Optional[numpy.typing.NDArray[typing.Any]]: r""" - Get all cell attributes in a python dictionary + A boolean array indicating whether each particle was inside the AABB used for the reconstruction (`None` if no AABB was set) """ - def get_point_attribute_keys(self) -> list: + @property + def particle_neighbors(self) -> typing.Optional[NeighborhoodLists]: r""" - Get all registered point attribute names + The global neighborhood lists per particle (`None` if they were only computed locally) """ - def get_cell_attribute_keys(self) -> list: + @property + def mesh(self) -> TriMesh3d: r""" - Get all registered cell attribute names + The reconstructed triangle mesh """ -class MixedTriQuadMeshWithDataF64: +class TriMesh3d: r""" - MeshWithData wrapper + Triangle surface mesh in 3D """ - def __new__(cls, mesh:MixedTriQuadMesh3dF64) -> MixedTriQuadMeshWithDataF64: ... - def get_mesh(self) -> MixedTriQuadMesh3dF64: - r""" - Returns a copy of the contained mesh - """ - def take_mesh(self) -> MixedTriQuadMesh3dF64: - r""" - Returns the contained mesh by moving it out of this object (zero copy) - """ - def par_clamp_with_aabb(self, aabb:Aabb3dF64, clamp_vertices:builtins.bool, keep_vertices:builtins.bool) -> MixedTriQuadMeshWithDataF64: + @property + def dtype(self) -> numpy.dtype: r""" - Removes all cells from the mesh that are completely outside of the given AABB and clamps the remaining cells to the boundary + Numpy dtype of the underlying scalar type (either ``np.float32`` or ``np.float64``) """ - def push_point_attribute_scalar_u64(self, name:builtins.str, data:typing.Sequence[builtins.int]) -> None: ... - def push_point_attribute_scalar_real(self, name:builtins.str, data:typing.Sequence[builtins.float]) -> None: ... - def push_point_attribute_vector_real(self, name:builtins.str, data:numpy.typing.NDArray[numpy.float64]) -> None: ... - def push_cell_attribute_scalar_u64(self, name:builtins.str, data:typing.Sequence[builtins.int]) -> None: ... - def push_cell_attribute_scalar_real(self, name:builtins.str, data:typing.Sequence[builtins.float]) -> None: ... - def push_cell_attribute_vector_real(self, name:builtins.str, data:numpy.typing.NDArray[numpy.float64]) -> None: ... - def get_point_attribute(self, name:builtins.str) -> typing.Any: + @property + def vertices(self) -> numpy.typing.NDArray[typing.Any]: r""" - Get mesh vertex attribute by name + The `Nx3` array of vertex positions of the mesh """ - def get_cell_attribute(self, name:builtins.str) -> typing.Any: + @property + def triangles(self) -> numpy.typing.NDArray[numpy.uint64]: r""" - Get mesh cell attribute by name + The `Mx3` array of vertex indices per triangle """ - def get_point_attributes(self) -> dict: + def copy(self) -> TriMesh3d: r""" - Get all point attributes in a python dictionary + Returns a copy (deep copy) of this mesh """ - def get_cell_attributes(self) -> dict: + def vertex_normals_parallel(self) -> numpy.typing.NDArray[typing.Any]: r""" - Get all cell attributes in a python dictionary + Computes the vertex normals of the mesh using an area weighted average of the adjacent triangle faces """ - def get_point_attribute_keys(self) -> list: + def vertex_vertex_connectivity(self) -> VertexVertexConnectivity: r""" - Get all registered point attribute names + Computes the vertex-vertex connectivity of the mesh """ - def get_cell_attribute_keys(self) -> list: + def write_to_file(self, path:builtins.str | os.PathLike | pathlib.Path, *, file_format:typing.Optional[builtins.str]='vtk42') -> None: r""" - Get all registered cell attribute names + Writes the mesh to a file using ``meshio.write_points_cells`` """ -class SphInterpolatorF32: +class UniformGrid: r""" - SphInterpolator wrapper + Struct containing the parameters of the uniform grid used for the surface reconstruction """ - def __new__(cls, particle_positions:numpy.typing.NDArray[numpy.float32], particle_densities:typing.Sequence[builtins.float], particle_rest_mass:builtins.float, compact_support_radius:builtins.float) -> SphInterpolatorF32: ... - def interpolate_scalar_quantity(self, particle_quantity:typing.Sequence[builtins.float], interpolation_points:numpy.typing.NDArray[numpy.float32], first_order_correction:builtins.bool) -> builtins.list[builtins.float]: + @property + def aabb(self) -> Aabb3d: r""" - Interpolates a scalar per particle quantity to the given points, panics if the there are less per-particles values than particles + The AABB of the grid containing all marching cubes vertices influenced by the particle kernels """ - def interpolate_normals(self, interpolation_points:numpy.typing.NDArray[numpy.float32]) -> numpy.typing.NDArray[numpy.float32]: + @property + def cell_size(self) -> builtins.float: r""" - Interpolates surface normals (i.e. normalized SPH gradient of the indicator function) of the fluid to the given points using SPH interpolation + Returns the cell size of the uniform grid (the marching cubes voxel size) """ - def interpolate_vector_quantity(self, particle_quantity:numpy.typing.NDArray[numpy.float32], interpolation_points:numpy.typing.NDArray[numpy.float32], first_order_correction:builtins.bool) -> numpy.typing.NDArray[numpy.float32]: + @property + def npoints_per_dim(self) -> builtins.list[builtins.int]: r""" - Interpolates a vectorial per particle quantity to the given points, panics if the there are less per-particles values than particles + Returns the number of points (marching cubes vertices) per dimension in the uniform grid + """ + @property + def ncells_per_dim(self) -> builtins.list[builtins.int]: + r""" + Returns the number of cells (marching cubes voxels) per dimension in the uniform grid """ -class SphInterpolatorF64: +class VertexVertexConnectivity: r""" - SphInterpolator wrapper + Vertex-vertex connectivity of a mesh """ - def __new__(cls, particle_positions:numpy.typing.NDArray[numpy.float64], particle_densities:typing.Sequence[builtins.float], particle_rest_mass:builtins.float, compact_support_radius:builtins.float) -> SphInterpolatorF64: ... - def interpolate_scalar_quantity(self, particle_quantity:typing.Sequence[builtins.float], interpolation_points:numpy.typing.NDArray[numpy.float64], first_order_correction:builtins.bool) -> builtins.list[builtins.float]: + def copy_connectivity(self) -> builtins.list[builtins.list[builtins.int]]: r""" - Interpolates a scalar per particle quantity to the given points, panics if the there are less per-particles values than particles - """ - def interpolate_normals(self, interpolation_points:numpy.typing.NDArray[numpy.float64]) -> numpy.typing.NDArray[numpy.float64]: - r""" - Interpolates surface normals (i.e. normalized SPH gradient of the indicator function) of the fluid to the given points using SPH interpolation + Returns a copy of the wrapped connectivity data """ - def interpolate_vector_quantity(self, particle_quantity:numpy.typing.NDArray[numpy.float64], interpolation_points:numpy.typing.NDArray[numpy.float64], first_order_correction:builtins.bool) -> numpy.typing.NDArray[numpy.float64]: + def take_connectivity(self) -> builtins.list[builtins.list[builtins.int]]: r""" - Interpolates a vectorial per particle quantity to the given points, panics if the there are less per-particles values than particles + Returns the wrapped connectivity data by moving it out of this object (zero copy) """ -class SurfaceReconstructionF32: +class MeshType(Enum): r""" - SurfaceReconstruction wrapper + Enum specifying the type of mesh wrapped by a ``MeshWithData`` + """ + Tri3d = ... + r""" + 3D triangle mesh + """ + MixedTriQuad3d = ... + r""" + 3D mixed triangle and quad mesh """ - @property - def mesh(self) -> TriMesh3dF32: - r""" - PyTrimesh3d clone of the contained mesh - """ - @property - def grid(self) -> UniformGridF32: - r""" - PyUniformGrid clone of the contained grid - """ - def particle_densities(self) -> builtins.list[builtins.float]: - r""" - Returns a reference to the global particle density vector if computed during the reconstruction (currently, all reconstruction approaches return this) - """ - def particle_neighbors(self) -> typing.Optional[builtins.list[builtins.list[builtins.int]]]: - r""" - Returns a reference to the global list of per-particle neighborhood lists if computed during the reconstruction (`None` if not specified in the parameters) - """ -class SurfaceReconstructionF64: +def barnacle_decimation(mesh:typing.Union[TriMesh3d, MeshWithData], *, keep_vertices:builtins.bool) -> typing.Union[TriMesh3d, MeshWithData]: r""" - SurfaceReconstruction wrapper + Performs specialized decimation on the given mesh to prevent "barnacles" when applying weighted Laplacian smoothing + + The decimation is performed inplace and modifies the given mesh. + Returns the vertex-vertex connectivity of the decimated mesh which can be used for other + post-processing steps. """ - @property - def mesh(self) -> TriMesh3dF64: - r""" - PyTrimesh3d clone of the contained mesh - """ - @property - def grid(self) -> UniformGridF64: - r""" - PyUniformGrid clone of the contained grid - """ - def particle_densities(self) -> builtins.list[builtins.float]: - r""" - Returns a reference to the global particle density vector if computed during the reconstruction (currently, all reconstruction approaches return this) - """ - def particle_neighbors(self) -> typing.Optional[builtins.list[builtins.list[builtins.int]]]: - r""" - Returns a reference to the global list of per-particle neighborhood lists if computed during the reconstruction (`None` if not specified in the parameters) - """ -class TriMesh3dF32: +def check_mesh_consistency(mesh:typing.Union[TriMesh3d, MeshWithData], grid:UniformGrid, *, check_closed:builtins.bool=True, check_manifold:builtins.bool=True, debug:builtins.bool=False) -> typing.Optional[builtins.str]: r""" - TriMesh3d wrapper + Checks the consistency of a reconstructed surface mesh (watertightness, manifoldness), optionally returns a string with details if problems are found """ - def get_vertices(self) -> numpy.typing.NDArray[numpy.float32]: - r""" - Returns a copy of the `Nx3` array of vertex positions - """ - def get_triangles(self) -> numpy.typing.NDArray[numpy.uint64]: - r""" - Returns a copy of the `Mx3` array of the vertex indices that make up a triangle - """ - def get_cells(self) -> numpy.typing.NDArray[numpy.uint64]: - r""" - Alias for `get_triangles` - """ - def take_vertices(self) -> numpy.typing.NDArray[numpy.float32]: - r""" - Returns the `Nx3` array of vertex positions by moving it out of the mesh (zero copy) - """ - def take_triangles(self) -> numpy.typing.NDArray[numpy.uint64]: - r""" - Returns the `Mx3` array of the vertex indices that make up the triangles by moving it out of the mesh (zero copy) - """ - def take_cells(self) -> numpy.typing.NDArray[numpy.uint64]: - r""" - Alias for `take_triangles` - """ - def take_vertices_and_triangles(self) -> tuple: - r""" - Returns a tuple containing the vertices and triangles of the mesh by moving them out of the mesh (zero copy) - """ - def par_vertex_normals(self) -> numpy.typing.NDArray[numpy.float32]: - r""" - Computes the mesh's vertex normals using an area weighted average of the adjacent triangle faces (parallelized version) - """ - def vertex_vertex_connectivity(self) -> builtins.list[builtins.list[builtins.int]]: - r""" - Returns a mapping of all mesh vertices to the set of their connected neighbor vertices - """ -class TriMesh3dF64: +def convert_tris_to_quads(mesh:typing.Union[TriMesh3d, MeshWithData], *, non_squareness_limit:builtins.float=1.75, normal_angle_limit:builtins.float=10.0, max_interior_angle:builtins.float=135.0) -> typing.Union[MixedTriQuadMesh3d, MeshWithData]: r""" - TriMesh3d wrapper + Converts triangles to quads by merging triangles sharing an edge if they fulfill the given criteria + + This operation creates a new mesh and does not modify the input mesh. + Angles are specified in degrees. """ - def get_vertices(self) -> numpy.typing.NDArray[numpy.float64]: - r""" - Returns a copy of the `Nx3` array of vertex positions - """ - def get_triangles(self) -> numpy.typing.NDArray[numpy.uint64]: - r""" - Returns a copy of the `Mx3` array of the vertex indices that make up a triangle - """ - def get_cells(self) -> numpy.typing.NDArray[numpy.uint64]: - r""" - Alias for `get_triangles` - """ - def take_vertices(self) -> numpy.typing.NDArray[numpy.float64]: - r""" - Returns the `Nx3` array of vertex positions by moving it out of the mesh (zero copy) - """ - def take_triangles(self) -> numpy.typing.NDArray[numpy.uint64]: - r""" - Returns the `Mx3` array of the vertex indices that make up the triangles by moving it out of the mesh (zero copy) - """ - def take_cells(self) -> numpy.typing.NDArray[numpy.uint64]: - r""" - Alias for `take_triangles` - """ - def take_vertices_and_triangles(self) -> tuple: - r""" - Returns a tuple containing the vertices and triangles of the mesh by moving them out of the mesh (zero copy) - """ - def par_vertex_normals(self) -> numpy.typing.NDArray[numpy.float64]: - r""" - Computes the mesh's vertex normals using an area weighted average of the adjacent triangle faces (parallelized version) - """ - def vertex_vertex_connectivity(self) -> builtins.list[builtins.list[builtins.int]]: - r""" - Returns a mapping of all mesh vertices to the set of their connected neighbor vertices - """ -class TriMeshWithDataF32: +def laplacian_smoothing_normals_parallel(normals:numpy.typing.NDArray[typing.Any], vertex_connectivity:VertexVertexConnectivity, *, iterations:builtins.int) -> None: r""" - MeshWithData wrapper + Laplacian smoothing of a normal field + + The smoothing is performed inplace and modifies the given normal array. """ - def __new__(cls, mesh:TriMesh3dF32) -> TriMeshWithDataF32: ... - def get_mesh(self) -> TriMesh3dF32: - r""" - Returns a copy of the contained mesh - """ - def take_mesh(self) -> TriMesh3dF32: - r""" - Returns the contained mesh by moving it out of this object (zero copy) - """ - def par_clamp_with_aabb(self, aabb:Aabb3dF32, clamp_vertices:builtins.bool, keep_vertices:builtins.bool) -> TriMeshWithDataF32: - r""" - Removes all cells from the mesh that are completely outside of the given AABB and clamps the remaining cells to the boundary - """ - def push_point_attribute_scalar_u64(self, name:builtins.str, data:typing.Sequence[builtins.int]) -> None: ... - def push_point_attribute_scalar_real(self, name:builtins.str, data:typing.Sequence[builtins.float]) -> None: ... - def push_point_attribute_vector_real(self, name:builtins.str, data:numpy.typing.NDArray[numpy.float32]) -> None: ... - def push_cell_attribute_scalar_u64(self, name:builtins.str, data:typing.Sequence[builtins.int]) -> None: ... - def push_cell_attribute_scalar_real(self, name:builtins.str, data:typing.Sequence[builtins.float]) -> None: ... - def push_cell_attribute_vector_real(self, name:builtins.str, data:numpy.typing.NDArray[numpy.float32]) -> None: ... - def get_point_attribute(self, name:builtins.str) -> typing.Any: - r""" - Get mesh vertex attribute by name - """ - def get_cell_attribute(self, name:builtins.str) -> typing.Any: - r""" - Get mesh cell attribute by name - """ - def get_point_attributes(self) -> dict: - r""" - Get all point attributes in a python dictionary - """ - def get_cell_attributes(self) -> dict: - r""" - Get all cell attributes in a python dictionary - """ - def get_point_attribute_keys(self) -> list: - r""" - Get all registered point attribute names - """ - def get_cell_attribute_keys(self) -> list: - r""" - Get all registered cell attribute names - """ -class TriMeshWithDataF64: +def laplacian_smoothing_parallel(mesh:typing.Union[TriMesh3d, MeshWithData], vertex_connectivity:VertexVertexConnectivity, *, iterations:builtins.int, beta:builtins.float=1.0, weights:numpy.typing.NDArray[typing.Any]) -> None: r""" - MeshWithData wrapper + Laplacian smoothing of mesh vertices with feature weights + + The smoothing is performed inplace and modifies the vertices of the given mesh. + """ + +def marching_cubes(values:numpy.typing.NDArray[typing.Any], *, iso_surface_threshold:builtins.float, cube_size:builtins.float, translation:typing.Optional[typing.Sequence[builtins.float]]=None, return_grid:builtins.bool=False) -> typing.Union[TriMesh3d, tuple[TriMesh3d, UniformGrid]]: + r""" + Performs a standard marching cubes triangulation of a 3D array of values + + The array of values has to be a contiguous array with shape ``(nx, ny, nz)``. + The iso-surface threshold defines which value is considered to be "on" the surface. + The cube size and translation parameters define the scaling and translation of the resulting + mesh. Without translation, the value ``values[0, 0, 0]`` is located at coordinates ``(0, 0, 0)``. + + The values are interpreted as a "density field", meaning that values higher than the iso-surface + threshold are considered to be "inside" the surface and values lower than the threshold are + considered to be "outside" the surface. This is the opposite convention to an SDF (signed distance field). + However, even if values of an SDF are provided as an input, the marching cubes algorithm + will still work and produce a watertight surface mesh (if the surface is fully contained in the + array). + + If ``return_grid`` is set to ``True``, the function will return a tuple of the mesh and the + uniform grid that was used for the triangulation. This can be used for other functions such as + :py:func:`check_mesh_consistency`. Otherwise, only the mesh is returned. + + The function is currently single-threaded. The SPH surface reconstruction functions :py:func:`reconstruction_pipeline` + and :py:func:`reconstruct_surface` improve performance by processing multiple patches in parallel. + """ + +def marching_cubes_cleanup(mesh:typing.Union[TriMesh3d, MeshWithData], grid:UniformGrid, *, max_rel_snap_dist:typing.Optional[builtins.float]=None, max_iter:builtins.int=5, keep_vertices:builtins.bool=False) -> typing.Union[TriMesh3d, MeshWithData]: + r""" + Performs simplification on the given mesh inspired by the "Compact Contouring"/"Mesh displacement" approach by Doug Moore and Joe Warren + + The simplification is performed inplace and modifies the given mesh. + The method is designed specifically for meshes generated by Marching Cubes. + See Moore and Warren: `Mesh Displacement: An Improved Contouring Method for Trivariate Data `_ (1991) + or Moore and Warren: "Compact Isocontours from Sampled Data" in "Graphics Gems III" (1992). """ - def __new__(cls, mesh:TriMesh3dF64) -> TriMeshWithDataF64: ... - def get_mesh(self) -> TriMesh3dF64: - r""" - Returns a copy of the contained mesh - """ - def take_mesh(self) -> TriMesh3dF64: - r""" - Returns the contained mesh by moving it out of this object (zero copy) - """ - def par_clamp_with_aabb(self, aabb:Aabb3dF64, clamp_vertices:builtins.bool, keep_vertices:builtins.bool) -> TriMeshWithDataF64: - r""" - Removes all cells from the mesh that are completely outside of the given AABB and clamps the remaining cells to the boundary - """ - def push_point_attribute_scalar_u64(self, name:builtins.str, data:typing.Sequence[builtins.int]) -> None: ... - def push_point_attribute_scalar_real(self, name:builtins.str, data:typing.Sequence[builtins.float]) -> None: ... - def push_point_attribute_vector_real(self, name:builtins.str, data:numpy.typing.NDArray[numpy.float64]) -> None: ... - def push_cell_attribute_scalar_u64(self, name:builtins.str, data:typing.Sequence[builtins.int]) -> None: ... - def push_cell_attribute_scalar_real(self, name:builtins.str, data:typing.Sequence[builtins.float]) -> None: ... - def push_cell_attribute_vector_real(self, name:builtins.str, data:numpy.typing.NDArray[numpy.float64]) -> None: ... - def get_point_attribute(self, name:builtins.str) -> typing.Any: - r""" - Get mesh vertex attribute by name - """ - def get_cell_attribute(self, name:builtins.str) -> typing.Any: - r""" - Get mesh cell attribute by name - """ - def get_point_attributes(self) -> dict: - r""" - Get all point attributes in a python dictionary - """ - def get_cell_attributes(self) -> dict: - r""" - Get all cell attributes in a python dictionary - """ - def get_point_attribute_keys(self) -> list: - r""" - Get all registered point attribute names - """ - def get_cell_attribute_keys(self) -> list: - r""" - Get all registered cell attribute names - """ -class UniformGridF32: +def neighborhood_search_spatial_hashing_parallel(particle_positions:numpy.typing.NDArray[typing.Any], domain:Aabb3d, search_radius:builtins.float) -> NeighborhoodLists: r""" - UniformGrid wrapper + Performs a neighborhood search using spatial hashing (multithreaded implementation) """ - ... -class UniformGridF64: +def reconstruct_surface(particles:numpy.typing.NDArray[typing.Any], *, particle_radius:builtins.float, rest_density:builtins.float=1000.0, smoothing_length:builtins.float, cube_size:builtins.float, iso_surface_threshold:builtins.float=0.6, multi_threading:builtins.bool=True, global_neighborhood_list:builtins.bool=False, subdomain_grid:builtins.bool=True, subdomain_grid_auto_disable:builtins.bool=True, subdomain_num_cubes_per_dim:builtins.int=64, aabb_min:typing.Optional[typing.Sequence[builtins.float]]=None, aabb_max:typing.Optional[typing.Sequence[builtins.float]]=None) -> SurfaceReconstruction: r""" - UniformGrid wrapper + Performs a surface reconstruction from the given particles without additional post-processing + + Note that all parameters use absolute distance units and are not relative to the particle radius. """ - ... +def reconstruction_pipeline(particles:numpy.typing.NDArray[typing.Any], *, attributes_to_interpolate:typing.Optional[dict]=None, particle_radius:builtins.float, rest_density:builtins.float=1000.0, smoothing_length:builtins.float, cube_size:builtins.float, iso_surface_threshold:builtins.float=0.6, aabb_min:typing.Optional[typing.Sequence[builtins.float]]=None, aabb_max:typing.Optional[typing.Sequence[builtins.float]]=None, multi_threading:builtins.bool=True, subdomain_grid:builtins.bool=True, subdomain_grid_auto_disable:builtins.bool=True, subdomain_num_cubes_per_dim:builtins.int=64, check_mesh_closed:builtins.bool=False, check_mesh_manifold:builtins.bool=False, check_mesh_orientation:builtins.bool=False, check_mesh_debug:builtins.bool=False, mesh_cleanup:builtins.bool=False, mesh_cleanup_snap_dist:typing.Optional[builtins.float]=None, decimate_barnacles:builtins.bool=False, keep_vertices:builtins.bool=False, compute_normals:builtins.bool=False, sph_normals:builtins.bool=False, normals_smoothing_iters:typing.Optional[builtins.int]=None, mesh_smoothing_iters:typing.Optional[builtins.int]=None, mesh_smoothing_weights:builtins.bool=True, mesh_smoothing_weights_normalization:builtins.float=13.0, generate_quads:builtins.bool=False, quad_max_edge_diag_ratio:builtins.float=1.75, quad_max_normal_angle:builtins.float=10.0, quad_max_interior_angle:builtins.float=135.0, output_mesh_smoothing_weights:builtins.bool=False, output_raw_normals:builtins.bool=False, output_raw_mesh:builtins.bool=False, mesh_aabb_min:typing.Optional[typing.Sequence[builtins.float]]=None, mesh_aabb_max:typing.Optional[typing.Sequence[builtins.float]]=None, mesh_aabb_clamp_vertices:builtins.bool=True, dtype:typing.Optional[numpy.dtype]=None) -> tuple[MeshWithData, SurfaceReconstruction]: + r""" + Runs the surface reconstruction pipeline for the given particle positions with optional post-processing + + Note that smoothing length and cube size are given in multiples of the particle radius. + """ diff --git a/pysplashsurf/src/aabb.rs b/pysplashsurf/src/aabb.rs index fcba634d..b650e819 100644 --- a/pysplashsurf/src/aabb.rs +++ b/pysplashsurf/src/aabb.rs @@ -1,159 +1,89 @@ -use numpy::{PyArray, PyArray1, PyArray2, PyReadonlyArray2}; +use numpy as np; +use numpy::prelude::*; +use numpy::{Element, PyArray, PyArray1, PyArray2, PyUntypedArray}; use pyo3::{PyResult, prelude::*}; use pyo3_stub_gen::derive::*; -use splashsurf_lib::{Aabb3d, nalgebra::Vector3}; +use splashsurf_lib::{Aabb3d, Real, nalgebra::Vector3}; -macro_rules! create_aabb3d_interface { - ($name: ident, $type: ident) => { - /// Aabb3d wrapper - #[gen_stub_pyclass] - #[pyclass] - pub struct $name { - pub inner: Aabb3d<$type>, - } - - impl $name { - pub fn new(data: Aabb3d<$type>) -> Self { - Self { inner: data } - } - } - - #[gen_stub_pymethods] - #[pymethods] - impl $name { - #[new] - fn py_new<'py>(min: [$type; 3], max: [$type; 3]) -> PyResult { - Ok($name::new(Aabb3d::<$type>::new( - Vector3::from_column_slice(&min), - Vector3::from_column_slice(&max), - ))) - } - - /// Constructs the smallest AABB fitting around all the given points - #[staticmethod] - fn from_points<'py>(points: &Bound<'py, PyArray2<$type>>) -> PyResult<$name> { - let points: PyReadonlyArray2<$type> = points.extract()?; - let points = points.as_slice()?; - let points: &[Vector3<$type>] = bytemuck::cast_slice(points); - - Ok($name::new(Aabb3d::from_points(points))) - } - - /// Constructs the smallest AABB fitting around all the given points, parallel version - #[staticmethod] - fn par_from_points<'py>(points: &Bound<'py, PyArray2<$type>>) -> PyResult<$name> { - let points: PyReadonlyArray2<$type> = points.extract()?; - let points = points.as_slice()?; - let points: &[Vector3<$type>] = bytemuck::cast_slice(points); - - Ok($name::new(Aabb3d::par_from_points(points))) - } - - /// Constructs a degenerate AABB with min and max set to zero - #[staticmethod] - fn zeros() -> $name { - $name::new(Aabb3d::zeros()) - } - - /// Constructs a degenerate AABB with zero extents centered at the given point - #[staticmethod] - fn from_point(point: [$type; 3]) -> Self { - $name::new(Aabb3d::from_point(Vector3::from_column_slice(&point))) - } - - /// Returns the min coordinate of the bounding box - fn min<'py>(&self, py: Python<'py>) -> Bound<'py, PyArray1<$type>> { - let min: &[$type] = self.inner.min().as_slice(); - PyArray::from_slice(py, min) - } - - /// Returns the max coordinate of the bounding box - fn max<'py>(&self, py: Python<'py>) -> Bound<'py, PyArray1<$type>> { - let max: &[$type] = self.inner.max().as_slice(); - PyArray::from_slice(py, max) - } - - /// Returns whether the AABB is consistent, i.e. `aabb.min()[i] <= aabb.max()[i]` for all `i` - fn is_consistent(&self) -> bool { - self.inner.is_consistent() - } - - /// Returns whether the AABB is degenerate in any dimension, i.e. `aabb.min()[i] == aabb.max()[i]` for any `i` - fn is_degenerate(&self) -> bool { - self.inner.is_degenerate() - } - - /// Returns the extents of the bounding box (vector connecting min and max point of the box) - fn extents<'py>(&self, py: Python<'py>) -> Bound<'py, PyArray1<$type>> { - let extents = self.inner.extents(); - PyArray::from_slice(py, extents.as_slice()) - } +use crate::utils::*; - /// Returns the smallest scalar extent of the AABB over all of its dimensions - fn min_extent(&self) -> $type { - self.inner.min_extent() - } - - /// Returns the largest scalar extent of the AABB over all of its dimensions - fn max_extent(&self) -> $type { - self.inner.max_extent() - } - - /// Returns the geometric centroid of the AABB (mean of the corner points) - fn centroid<'py>(&self, py: Python<'py>) -> Bound<'py, PyArray1<$type>> { - let centroid = self.inner.centroid(); - PyArray::from_slice(py, centroid.as_slice()) - } - - /// Checks if the given AABB is inside of the AABB, the AABB is considered to be half-open to its max coordinate - fn contains_aabb(&self, other: &$name) -> bool { - self.inner.contains_aabb(&other.inner) - } - - /// Checks if the given point is inside of the AABB, the AABB is considered to be half-open to its max coordinate - fn contains_point(&self, point: [$type; 3]) -> bool { - self.inner - .contains_point(&Vector3::from_column_slice(&point)) - } - - /// Translates the AABB by the given vector - fn translate(&mut self, vector: [$type; 3]) { - self.inner.translate(&Vector3::from_column_slice(&vector)); - } - - /// Translates the AABB to center it at the coordinate origin (moves the centroid to the coordinate origin) - fn center_at_origin(&mut self) { - self.inner.center_at_origin(); - } - - /// Multiplies a uniform, local scaling to the AABB (i.e. multiplying its extents as if it was centered at the origin) - fn scale_uniformly(&mut self, scaling: $type) { - self.inner.scale_uniformly(scaling); - } - - /// Enlarges this AABB to the smallest AABB enclosing both itself and another AABB - fn join(&mut self, other: &$name) { - self.inner.join(&other.inner); - } +/// Three-dimensional axis-aligned bounding box defined by its minimum and maximum corners +#[gen_stub_pyclass] +#[pyclass] +#[pyo3(name = "Aabb3d")] +pub struct PyAabb3d { + min: Vector3, + max: Vector3, +} - /// Enlarges this AABB to the smallest AABB enclosing both itself and another point - fn join_with_point(&mut self, point: [$type; 3]) { - self.inner - .join_with_point(&Vector3::from_column_slice(&point)); - } +impl From> for PyAabb3d { + fn from(aabb: Aabb3d) -> Self { + Self { + min: aabb.min().map(|x| x.to_f64().unwrap()), + max: aabb.max().map(|x| x.to_f64().unwrap()), + } + } +} - /// Grows this AABB uniformly in all directions by the given scalar margin (i.e. adding the margin to min/max extents) - fn grow_uniformly(&mut self, margin: $type) { - self.inner.grow_uniformly(margin); - } +impl PyAabb3d { + /// Convert to an [`splashsurf_lib::Aabb3d`] with the given scalar type + pub(crate) fn inner(&self) -> Aabb3d { + Aabb3d::new( + self.min.map(|x| R::from_f64(x).unwrap()), + self.max.map(|x| R::from_f64(x).unwrap()), + ) + } + + fn from_points_generic<'py, R: Real + Element>( + points: &Bound<'py, PyArray2>, + ) -> PyResult { + let points = points.try_readonly()?; + let points_vec: &[Vector3] = bytemuck::cast_slice(points.as_slice()?); + Ok(Self::from(Aabb3d::par_from_points(points_vec))) + } +} - /// Returns the smallest cubical AABB with the same center that encloses this AABB - fn enclosing_cube(&self) -> $name { - $name::new(self.inner.enclosing_cube()) - } +#[gen_stub_pymethods] +#[pymethods] +impl PyAabb3d { + /// Constructs an AABB with the given min and max coordinates + #[staticmethod] + pub fn from_min_max<'py>(min: [f64; 3], max: [f64; 3]) -> Self { + Self { + min: Vector3::from(min), + max: Vector3::from(max), + } + } + + /// Constructs the smallest AABB fitting around all the given points + #[staticmethod] + pub fn from_points<'py>(points: &Bound<'py, PyUntypedArray>) -> PyResult { + let py = points.py(); + let element_type = points.dtype(); + if element_type.is_equiv_to(&np::dtype::(py)) { + Self::from_points_generic(points.downcast::>()?) + } else if element_type.is_equiv_to(&np::dtype::(py)) { + Self::from_points_generic(points.downcast::>()?) + } else { + Err(pyerr_unsupported_scalar()) } - }; + } + + /// The min coordinate of the AABB + #[getter] + pub fn min<'py>(&self, py: Python<'py>) -> Bound<'py, PyArray1> { + PyArray::from_slice(py, self.min.as_slice()) + } + + /// The max coordinate of the AABB + #[getter] + pub fn max<'py>(&self, py: Python<'py>) -> Bound<'py, PyArray1> { + PyArray::from_slice(py, self.max.as_slice()) + } + + /// Checks if the given point is inside the AABB, the AABB is considered to be half-open to its max coordinate + pub fn contains_point(&self, point: [f64; 3]) -> bool { + let point = &Vector3::from(point); + point >= &self.min && point < &self.max + } } - -create_aabb3d_interface!(Aabb3dF64, f64); -create_aabb3d_interface!(Aabb3dF32, f32); diff --git a/pysplashsurf/src/bin/stub_gen.rs b/pysplashsurf/src/bin/stub_gen.rs index befaaa89..0d1f9452 100644 --- a/pysplashsurf/src/bin/stub_gen.rs +++ b/pysplashsurf/src/bin/stub_gen.rs @@ -1,10 +1,44 @@ // Run `cargo run --bin stub_gen` to generate a stub file for the extension + use pyo3_stub_gen::Result; +use std::fs; + +fn add_future_imports() -> Result<()> { + // Read the original file + let content = fs::read_to_string("pysplashsurf.pyi")?; + let lines: Vec<&str> = content.lines().collect(); + + // Find where to insert the import (after comments) + let mut insert_index = 0; + for (i, line) in lines.iter().enumerate() { + if !line.trim().starts_with('#') && !line.trim().is_empty() { + insert_index = i; + break; + } + } + + // Create new content with the import added + let mut new_lines = Vec::new(); + new_lines.extend_from_slice(&lines[..insert_index]); + new_lines.push("from __future__ import annotations"); + if insert_index < lines.len() && !lines[insert_index].is_empty() { + new_lines.push(""); + } + new_lines.extend_from_slice(&lines[insert_index..]); + + // Write the modified content back to the file + fs::write("pysplashsurf.pyi", new_lines.join("\n"))?; + + Ok(()) +} fn main() -> Result<()> { // `stub_info` is a function defined by `define_stub_info_gatherer!` macro. let stub = pysplashsurf::stub_info()?; stub.generate()?; - std::fs::rename("pysplashsurf.pyi", "pysplashsurf/pysplashsurf.pyi")?; + + add_future_imports()?; + + fs::rename("pysplashsurf.pyi", "pysplashsurf/pysplashsurf.pyi")?; Ok(()) } diff --git a/pysplashsurf/src/lib.rs b/pysplashsurf/src/lib.rs index 6ab5589e..02183d1c 100644 --- a/pysplashsurf/src/lib.rs +++ b/pysplashsurf/src/lib.rs @@ -1,9 +1,15 @@ use pyo3::prelude::*; use pyo3::types::{PyList, PyString}; use pyo3_stub_gen::define_stub_info_gatherer; - use splashsurf::cli; +#[cfg(target_pointer_width = "32")] +pub(crate) use u32 as NumpyUsize; +#[cfg(target_pointer_width = "64")] +pub(crate) use u64 as NumpyUsize; +#[cfg(not(any(target_pointer_width = "64", target_pointer_width = "32")))] +compile_error!("Unsupported target pointer width, only 32 and 64 bit are supported."); + mod aabb; mod mesh; mod sph_interpolation; @@ -12,111 +18,50 @@ mod uniform_grid; mod marching_cubes; mod neighborhood_search; mod pipeline; -mod post_processing; +mod postprocessing; mod reconstruction; +pub(crate) mod utils; + /// High-Level Bindings of the splashsurf surface reconstruction implementation. /// Support reconstructing Level-Set surfaces from particle clouds or from regular grids. #[pymodule] fn pysplashsurf(m: &Bound<'_, PyModule>) -> PyResult<()> { - let _ = m.add_class::()?; - let _ = m.add_class::()?; - let _ = m.add_class::()?; - let _ = m.add_class::()?; - - let _ = m.add_class::()?; - let _ = m.add_class::()?; - let _ = m.add_class::()?; - let _ = m.add_class::()?; - - let _ = m.add_class::()?; - let _ = m.add_class::()?; - - let _ = m.add_class::()?; - let _ = m.add_class::()?; - - let _ = m.add_class::()?; - let _ = m.add_class::()?; - - let _ = m.add_class::()?; - let _ = m.add_class::()?; - - let _ = m.add_function(wrap_pyfunction!( - reconstruction::reconstruct_surface_py_f32, - m - )?); - let _ = m.add_function(wrap_pyfunction!( - reconstruction::reconstruct_surface_py_f64, - m - )?); - - let _ = m.add_function(wrap_pyfunction!( - post_processing::convert_tris_to_quads_py_f32, - m - )?); - let _ = m.add_function(wrap_pyfunction!( - post_processing::convert_tris_to_quads_py_f64, - m - )?); + m.add_class::()?; + m.add_class::()?; + m.add_class::()?; + m.add_class::()?; + m.add_class::()?; + m.add_class::()?; - let _ = m.add_function(wrap_pyfunction!( - post_processing::marching_cubes_cleanup_py_f32, - m - )?); - let _ = m.add_function(wrap_pyfunction!( - post_processing::marching_cubes_cleanup_py_f64, - m - )?); + m.add_class::()?; + m.add_class::()?; + m.add_class::()?; + m.add_class::()?; + m.add_class::()?; - let _ = m.add_function(wrap_pyfunction!( - marching_cubes::check_mesh_consistency_py_f32, - m - )?); - let _ = m.add_function(wrap_pyfunction!( - marching_cubes::check_mesh_consistency_py_f64, - m - )?); - - let _ = m.add_function(wrap_pyfunction!(post_processing::decimation_py_f32, m)?); - let _ = m.add_function(wrap_pyfunction!(post_processing::decimation_py_f64, m)?); + use wrap_pyfunction as wrap; - let _ = m.add_function(wrap_pyfunction!( - post_processing::par_laplacian_smoothing_inplace_py_f32, - m - )?); - let _ = m.add_function(wrap_pyfunction!( - post_processing::par_laplacian_smoothing_inplace_py_f64, + m.add_function(wrap!(reconstruction::reconstruct_surface, m)?)?; + m.add_function(wrap!(marching_cubes::check_mesh_consistency, m)?)?; + m.add_function(wrap!(marching_cubes::marching_cubes, m)?)?; + m.add_function(wrap!(postprocessing::marching_cubes_cleanup, m)?)?; + m.add_function(wrap!(postprocessing::convert_tris_to_quads, m)?)?; + m.add_function(wrap!(postprocessing::barnacle_decimation, m)?)?; + m.add_function(wrap!(postprocessing::laplacian_smoothing_parallel, m)?)?; + m.add_function(wrap!( + postprocessing::laplacian_smoothing_normals_parallel, m - )?); + )?)?; - let _ = m.add_function(wrap_pyfunction!( - post_processing::par_laplacian_smoothing_normals_inplace_py_f32, + m.add_function(wrap!( + neighborhood_search::neighborhood_search_spatial_hashing_parallel, m - )?); - let _ = m.add_function(wrap_pyfunction!( - post_processing::par_laplacian_smoothing_normals_inplace_py_f64, - m - )?); - - let _ = m.add_function(wrap_pyfunction!( - neighborhood_search::neighborhood_search_spatial_hashing_parallel_py_f32, - m - )?); - let _ = m.add_function(wrap_pyfunction!( - neighborhood_search::neighborhood_search_spatial_hashing_parallel_py_f64, - m - )?); + )?)?; - let _ = m.add_function(wrap_pyfunction!( - pipeline::reconstruction_pipeline_py_f32, - m - )?); - let _ = m.add_function(wrap_pyfunction!( - pipeline::reconstruction_pipeline_py_f64, - m - )?); + m.add_function(wrap!(pipeline::reconstruction_pipeline, m)?)?; - let _ = m.add_function(wrap_pyfunction!(run_splashsurf_py, m)?); + m.add_function(wrap!(run_splashsurf_py, m)?)?; Ok(()) } @@ -126,7 +71,7 @@ fn pysplashsurf(m: &Bound<'_, PyModule>) -> PyResult<()> { fn run_splashsurf_py<'py>(args: Bound<'py, PyList>) -> PyResult<()> { cli::run_splashsurf(args.iter().map(|arg| { arg.downcast::() - .expect("Argument wasn't a string") + .expect("argument wasn't a string") .extract::() .unwrap() }))?; diff --git a/pysplashsurf/src/marching_cubes.rs b/pysplashsurf/src/marching_cubes.rs index 8dedd671..9af30f28 100644 --- a/pysplashsurf/src/marching_cubes.rs +++ b/pysplashsurf/src/marching_cubes.rs @@ -1,77 +1,148 @@ -use pyo3::{ - exceptions::{PyRuntimeError, PyValueError}, - prelude::*, -}; +use numpy::prelude::*; +use numpy::{Element, PyArray3, PyUntypedArray}; +use pyo3::IntoPyObjectExt; +use pyo3::prelude::*; +use pyo3_stub_gen::derive::*; +use splashsurf_lib::nalgebra::Vector3; +use splashsurf_lib::{DensityMap, Real, UniformGrid}; -use crate::{ - mesh::{TriMesh3dF32, TriMesh3dF64, TriMeshWithDataF32, TriMeshWithDataF64}, - uniform_grid::{UniformGridF32, UniformGridF64}, -}; +use crate::mesh::{PyTriMesh3d, get_triangle_mesh_generic}; +use crate::uniform_grid::PyUniformGrid; +use crate::utils; +use crate::utils::IndexT; +/// Checks the consistency of a reconstructed surface mesh (watertightness, manifoldness), optionally returns a string with details if problems are found +#[gen_stub_pyfunction] #[pyfunction] -#[pyo3(name = "check_mesh_consistency_f32")] -#[pyo3(signature = (grid, mesh, *, check_closed, check_manifold, debug))] -pub fn check_mesh_consistency_py_f32<'py>( - py: Python, - grid: &UniformGridF32, - mesh: PyObject, +#[pyo3(name = "check_mesh_consistency")] +#[pyo3(signature = (mesh, grid, *, check_closed = true, check_manifold = true, debug = false))] +pub fn check_mesh_consistency<'py>( + #[gen_stub(override_type(type_repr="typing.Union[TriMesh3d, MeshWithData]", imports=()))] + mesh: &Bound<'py, PyAny>, + grid: &PyUniformGrid, check_closed: bool, check_manifold: bool, debug: bool, -) -> PyResult<()> { - if let Ok(mesh) = mesh.downcast_bound::(py) { - splashsurf_lib::marching_cubes::check_mesh_consistency( - &grid.inner, - &mesh.borrow().inner, +) -> PyResult> { + let py = mesh.py(); + + // Try to extract the triangle mesh; + let mesh = get_triangle_mesh_generic(&mesh).ok_or_else(utils::pyerr_only_triangle_mesh)?; + let mesh = mesh.borrow(py); + + if let (Some(grid), Some(mesh)) = (grid.as_f32(), mesh.as_f32()) { + Ok(splashsurf_lib::marching_cubes::check_mesh_consistency( + grid, + mesh, check_closed, check_manifold, debug, ) - .map_err(|x| PyErr::new::(x)) - } else if let Ok(mesh) = mesh.downcast_bound::(py) { - splashsurf_lib::marching_cubes::check_mesh_consistency( - &grid.inner, - &mesh.borrow().inner.mesh, + .err()) + } else if let (Some(grid), Some(mesh)) = (grid.as_f64(), mesh.as_f64()) { + Ok(splashsurf_lib::marching_cubes::check_mesh_consistency( + grid, + mesh, check_closed, check_manifold, debug, ) - .map_err(|x| PyErr::new::(x)) + .err()) } else { - Err(PyErr::new::("Invalid mesh type")) + Err(utils::pyerr_scalar_type_mismatch()) } } +/// Performs a standard marching cubes triangulation of a 3D array of values +/// +/// The array of values has to be a contiguous array with shape ``(nx, ny, nz)``. +/// The iso-surface threshold defines which value is considered to be "on" the surface. +/// The cube size and translation parameters define the scaling and translation of the resulting +/// mesh. Without translation, the value ``values[0, 0, 0]`` is located at coordinates ``(0, 0, 0)``. +/// +/// The values are interpreted as a "density field", meaning that values higher than the iso-surface +/// threshold are considered to be "inside" the surface and values lower than the threshold are +/// considered to be "outside" the surface. This is the opposite convention to an SDF (signed distance field). +/// However, even if values of an SDF are provided as an input, the marching cubes algorithm +/// will still work and produce a watertight surface mesh (if the surface is fully contained in the +/// array). +/// +/// If ``return_grid`` is set to ``True``, the function will return a tuple of the mesh and the +/// uniform grid that was used for the triangulation. This can be used for other functions such as +/// :py:func:`check_mesh_consistency`. Otherwise, only the mesh is returned. +/// +/// The function is currently single-threaded. The SPH surface reconstruction functions :py:func:`reconstruction_pipeline` +/// and :py:func:`reconstruct_surface` improve performance by processing multiple patches in parallel. +#[gen_stub_pyfunction] #[pyfunction] -#[pyo3(name = "check_mesh_consistency_f64")] -#[pyo3(signature = (grid, mesh, *, check_closed, check_manifold, debug))] -pub fn check_mesh_consistency_py_f64<'py>( - py: Python, - grid: &UniformGridF64, - mesh: PyObject, - check_closed: bool, - check_manifold: bool, - debug: bool, -) -> PyResult<()> { - if let Ok(mesh) = mesh.downcast_bound::(py) { - splashsurf_lib::marching_cubes::check_mesh_consistency( - &grid.inner, - &mesh.borrow().inner, - check_closed, - check_manifold, - debug, +#[pyo3(name = "marching_cubes")] +#[pyo3(signature = (values, *, iso_surface_threshold, cube_size, translation = None, return_grid = false))] +#[gen_stub(override_return_type(type_repr="typing.Union[TriMesh3d, tuple[TriMesh3d, UniformGrid]]", imports=()))] +pub fn marching_cubes<'py>( + values: &Bound<'py, PyUntypedArray>, + iso_surface_threshold: f64, + cube_size: f64, + translation: Option<[f64; 3]>, + return_grid: bool, +) -> PyResult> { + assert_eq!(values.shape().len(), 3, "values must be a 3D array"); + + fn triangulate_density_map_generic<'py, R: Real + Element>( + values: &Bound<'py, PyArray3>, + iso_surface_threshold: R, + cube_size: R, + translation: Option<[R; 3]>, + return_grid: bool, + ) -> PyResult> { + let py = values.py(); + let shape = values.shape(); + let translation = Vector3::from(translation.unwrap_or([R::zero(); 3])); + let n_cells_per_dim = [ + shape[0] as IndexT - 1, + shape[1] as IndexT - 1, + shape[2] as IndexT - 1, + ]; + + let grid = UniformGrid::new(&translation, &n_cells_per_dim, cube_size) + .map_err(anyhow::Error::from)?; + + let values = values.try_readonly()?; + let density_map = DensityMap::from(values.as_slice()?); + + let mesh = splashsurf_lib::marching_cubes::triangulate_density_map( + &grid, + &density_map, + iso_surface_threshold, ) - .map_err(|x| PyErr::new::(x)) - } else if let Ok(mesh) = mesh.downcast_bound::(py) { - splashsurf_lib::marching_cubes::check_mesh_consistency( - &grid.inner, - &mesh.borrow().inner.mesh, - check_closed, - check_manifold, - debug, + .map_err(anyhow::Error::from)?; + + let mesh = PyTriMesh3d::try_from_generic(mesh)?; + let grid = PyUniformGrid::try_from_generic(grid)?; + + if return_grid { + (mesh, grid).into_py_any(py) + } else { + mesh.into_py_any(py) + } + } + + if let Ok(values) = values.downcast::>() { + triangulate_density_map_generic( + &values, + iso_surface_threshold as f32, + cube_size as f32, + translation.map(|t| t.map(|t| t as f32)), + return_grid, + ) + } else if let Ok(values) = values.downcast::>() { + triangulate_density_map_generic( + &values, + iso_surface_threshold, + cube_size, + translation, + return_grid, ) - .map_err(|x| PyErr::new::(x)) } else { - Err(PyErr::new::("Invalid mesh type")) + Err(utils::pyerr_unsupported_scalar()) } } diff --git a/pysplashsurf/src/mesh.rs b/pysplashsurf/src/mesh.rs index 07e9ae87..6c33baa3 100644 --- a/pysplashsurf/src/mesh.rs +++ b/pysplashsurf/src/mesh.rs @@ -1,12 +1,17 @@ -use ndarray::{Array2, ArrayView, ArrayView2}; -use numpy::{Element, IntoPyArray, PyArray, PyArray2, PyArrayMethods, PyReadonlyArray2, ToPyArray}; -use pyo3::{ - IntoPyObjectExt, - exceptions::PyValueError, - prelude::*, - types::{PyDict, PyList, PyTuple}, -}; +use crate::NumpyUsize; +use crate::utils; +use crate::utils::{enum_impl_from, enum_wrapper_impl_from}; +use bytemuck::{NoUninit, Pod}; +use ndarray::Array2; +use numpy as np; +use numpy::prelude::*; +use numpy::{Element, PyArray, PyArray1, PyArray2, PyArrayDescr, PyUntypedArray}; +use pyo3::IntoPyObjectExt; +use pyo3::exceptions::{PyTypeError, PyValueError}; +use pyo3::prelude::*; +use pyo3::types::{IntoPyDict, PyDict}; use pyo3_stub_gen::derive::*; +use splashsurf_lib::mesh::TriangleCell; use splashsurf_lib::{ Real, mesh::{ @@ -15,447 +20,836 @@ use splashsurf_lib::{ }, nalgebra::{Unit, Vector3}, }; +use std::ops::Deref; +use std::path::PathBuf; + +fn view_triangles_generic<'py>( + triangles: &[TriangleCell], + container: Bound<'py, PyAny>, +) -> PyResult>> { + let vertex_indices: &[NumpyUsize] = bytemuck::cast_slice(triangles); + let view = utils::view_generic(vertex_indices, &[triangles.len(), 3], container)?.into_any(); + Ok(view.downcast_into::>()?) +} -use crate::aabb::{Aabb3dF32, Aabb3dF64}; - -fn get_attribute_with_name<'py, R: Real + Element>( +fn compute_normals_generic<'py, R: Real + Element>( py: Python<'py>, - attrs: &[OwnedMeshAttribute], - name: &str, -) -> PyResult -where - R: pyo3::IntoPyObject<'py>, -{ - let elem = attrs.iter().filter(|x| x.name == name).next(); - match elem { - Some(attr) => match attr.data.clone() { - OwnedAttributeData::ScalarU64(res) => Ok(res.into_owned().into_pyobject(py)?.into()), - OwnedAttributeData::ScalarReal(res) => Ok(res.into_owned().into_pyobject(py)?.into()), - OwnedAttributeData::Vector3Real(res) => { - let flattened: Vec = bytemuck::cast_vec(res.into_owned()); - let res: Array2 = Array2::from_shape_vec((flattened.len() / 3, 3), flattened) - .map_err(anyhow::Error::new)?; - Ok(res.into_pyarray(py).into_bound_py_any(py)?.into()) - } - }, - None => Err(PyErr::new::(format!( - "Attribute with name {} doesn't exist", - name - ))), + mesh: &TriMesh3d, +) -> PyResult> { + let normals_vec = mesh.par_vertex_normals(); + let normals_vec = bytemuck::allocation::cast_vec::>, R>(normals_vec); + + Ok(PyArray::from_vec(py, normals_vec) + .reshape([mesh.vertices().len(), 3])? + .into_any() + .downcast_into::() + .expect("downcast should not fail")) +} + +pub fn get_triangle_mesh_generic<'py>(mesh: &Bound<'py, PyAny>) -> Option> { + let py = mesh.py(); + if let Ok(mesh) = mesh.downcast::() { + Some(mesh.as_unbound().clone_ref(py)) + } else if let Ok(data_mesh) = mesh.downcast::() + && data_mesh.borrow().mesh_type() == MeshType::Tri3d + { + data_mesh.borrow().as_tri3d(py) + } else { + None } } -fn add_attribute_with_name<'py, R: Real + Element>( - attrs: &mut Vec>, - attribute: OwnedMeshAttribute, -) -> PyResult<()> { - let elem = attrs.iter().filter(|x| x.name == attribute.name).next(); - match elem { - None => { - attrs.push(attribute); - Ok(()) +/// Vertex-vertex connectivity of a mesh +#[gen_stub_pyclass] +#[pyclass] +#[pyo3(name = "VertexVertexConnectivity")] +pub struct PyVertexVertexConnectivity { + pub(crate) connectivity: Vec>, +} + +impl PyVertexVertexConnectivity { + pub fn new(connectivity: Vec>) -> Self { + Self { connectivity } + } +} + +#[gen_stub_pymethods] +#[pymethods] +impl PyVertexVertexConnectivity { + /// Returns a copy of the wrapped connectivity data + pub fn copy_connectivity(&self) -> Vec> { + self.connectivity.clone() + } + + /// Returns the wrapped connectivity data by moving it out of this object (zero copy) + pub fn take_connectivity(&mut self) -> Vec> { + // TODO: Check if this is actually zero-copy with the conversion to Python lists + std::mem::take(&mut self.connectivity) + } +} + +#[derive(Clone)] +enum PyTriMesh3dData { + F32(TriMesh3d), + F64(TriMesh3d), +} + +/// Triangle surface mesh in 3D +#[gen_stub_pyclass] +#[pyclass] +#[pyo3(name = "TriMesh3d")] +#[derive(Clone)] +pub struct PyTriMesh3d { + inner: PyTriMesh3dData, +} + +enum_wrapper_impl_from!(PyTriMesh3d, TriMesh3d => PyTriMesh3dData::F32); +enum_wrapper_impl_from!(PyTriMesh3d, TriMesh3d => PyTriMesh3dData::F64); + +impl Default for PyTriMesh3d { + fn default() -> Self { + Self { + inner: PyTriMesh3dData::F32(TriMesh3d::default()), } - _ => Err(PyErr::new::(format!( - "Attribute with name {} already exists", - attribute.name - ))), } } -macro_rules! create_mesh_data_interface { - ($name: ident, $type: ident, $mesh_class: ident, $pymesh_class: ident, $aabb_class: ident) => { - /// MeshWithData wrapper - #[gen_stub_pyclass] - #[pyclass] - pub struct $name { - pub inner: MeshWithData<$type, $mesh_class<$type>>, +impl PyTriMesh3d { + pub fn try_from_generic(mut mesh: TriMesh3d) -> PyResult { + utils::transmute_take_into::<_, TriMesh3d, _>(&mut mesh) + .or_else(|| utils::transmute_take_into::<_, TriMesh3d, _>(&mut mesh)) + .ok_or_else(utils::pyerr_unsupported_scalar) + } + + pub fn as_f32(&self) -> Option<&TriMesh3d> { + match &self.inner { + PyTriMesh3dData::F32(mesh) => Some(mesh), + _ => None, } + } - impl $name { - pub fn new(data: MeshWithData<$type, $mesh_class<$type>>) -> Self { - Self { inner: data } - } + pub fn as_f64(&self) -> Option<&TriMesh3d> { + match &self.inner { + PyTriMesh3dData::F64(mesh) => Some(mesh), + _ => None, } + } - #[gen_stub_pymethods] - #[pymethods] - impl $name { - #[new] - fn py_new(mesh: &$pymesh_class) -> PyResult { - let meshdata = MeshWithData::new(mesh.inner.clone()); - Ok($name::new(meshdata)) - } + pub fn as_f32_mut(&mut self) -> Option<&mut TriMesh3d> { + match &mut self.inner { + PyTriMesh3dData::F32(mesh) => Some(mesh), + _ => None, + } + } - /// Returns a copy of the contained mesh - fn get_mesh(&self) -> $pymesh_class { - $pymesh_class::new(self.inner.mesh.clone()) - } + pub fn as_f64_mut(&mut self) -> Option<&mut TriMesh3d> { + match &mut self.inner { + PyTriMesh3dData::F64(mesh) => Some(mesh), + _ => None, + } + } +} - /// Returns the contained mesh by moving it out of this object (zero copy) - fn take_mesh(&mut self) -> $pymesh_class { - let mesh = std::mem::take(&mut self.inner.mesh); - $pymesh_class::new(mesh) - } +#[gen_stub_pymethods] +#[pymethods] +impl PyTriMesh3d { + /// Numpy dtype of the underlying scalar type (either ``np.float32`` or ``np.float64``) + #[getter] + pub fn dtype<'py>(&self, py: Python<'py>) -> Bound<'py, PyArrayDescr> { + match &self.inner { + PyTriMesh3dData::F32(_) => np::dtype::(py), + PyTriMesh3dData::F64(_) => np::dtype::(py), + } + } - /// Removes all cells from the mesh that are completely outside of the given AABB and clamps the remaining cells to the boundary - fn par_clamp_with_aabb( - &self, - aabb: &$aabb_class, - clamp_vertices: bool, - keep_vertices: bool, - ) -> $name { - $name::new(self.inner.par_clamp_with_aabb( - &aabb.inner, - clamp_vertices, - keep_vertices, - )) - } + /// The `Nx3` array of vertex positions of the mesh + #[getter] + pub fn vertices<'py>(this: Bound<'py, Self>) -> PyResult> { + match &this.borrow().inner { + PyTriMesh3dData::F32(mesh) => utils::view_vec_generic(mesh.vertices(), this.into_any()), + PyTriMesh3dData::F64(mesh) => utils::view_vec_generic(mesh.vertices(), this.into_any()), + } + } - fn push_point_attribute_scalar_u64<'py>( - &mut self, - name: &str, - data: Vec, - ) -> PyResult<()> { - add_attribute_with_name::<$type>( - &mut self.inner.point_attributes, - OwnedMeshAttribute::new(name, OwnedAttributeData::ScalarU64(data.into())), - ) - } + /// The `Mx3` array of vertex indices per triangle + #[getter] + pub fn triangles<'py>(this: Bound<'py, Self>) -> PyResult>> { + match &this.borrow().inner { + PyTriMesh3dData::F32(mesh) => view_triangles_generic(mesh.cells(), this.into_any()), + PyTriMesh3dData::F64(mesh) => view_triangles_generic(mesh.cells(), this.into_any()), + } + } - fn push_point_attribute_scalar_real<'py>( - &mut self, - name: &str, - data: Vec<$type>, - ) -> PyResult<()> { - add_attribute_with_name::<$type>( - &mut self.inner.point_attributes, - OwnedMeshAttribute::new(name, OwnedAttributeData::ScalarReal(data.into())), - ) - } + /// Returns a copy (deep copy) of this mesh + pub fn copy(&self) -> Self { + self.clone() + } - fn push_point_attribute_vector_real<'py>( - &mut self, - name: &str, - data: &Bound<'py, PyArray2<$type>>, - ) -> PyResult<()> { - let data: PyReadonlyArray2<$type> = data.extract()?; - let data = data.as_slice()?; - let data: &[Vector3<$type>] = bytemuck::cast_slice(data); - - add_attribute_with_name::<$type>( - &mut self.inner.point_attributes, - OwnedMeshAttribute::new( - name, - OwnedAttributeData::Vector3Real(data.to_vec().into()), - ), - ) - } + /// Computes the vertex normals of the mesh using an area weighted average of the adjacent triangle faces + pub fn vertex_normals_parallel<'py>( + &self, + py: Python<'py>, + ) -> PyResult> { + match &self.inner { + PyTriMesh3dData::F32(mesh) => compute_normals_generic(py, mesh), + PyTriMesh3dData::F64(mesh) => compute_normals_generic(py, mesh), + } + } - fn push_cell_attribute_scalar_u64<'py>( - &mut self, - name: &str, - data: Vec, - ) -> PyResult<()> { - add_attribute_with_name::<$type>( - &mut self.inner.cell_attributes, - OwnedMeshAttribute::new(name, OwnedAttributeData::ScalarU64(data.into())), - ) - } + /// Computes the vertex-vertex connectivity of the mesh + pub fn vertex_vertex_connectivity(&self) -> PyVertexVertexConnectivity { + let connectivity = match &self.inner { + PyTriMesh3dData::F32(mesh) => mesh.vertex_vertex_connectivity(), + PyTriMesh3dData::F64(mesh) => mesh.vertex_vertex_connectivity(), + }; + PyVertexVertexConnectivity::new(connectivity) + } - fn push_cell_attribute_scalar_real<'py>( - &mut self, - name: &str, - data: Vec<$type>, - ) -> PyResult<()> { - add_attribute_with_name::<$type>( - &mut self.inner.cell_attributes, - OwnedMeshAttribute::new(name, OwnedAttributeData::ScalarReal(data.into())), - ) - } + /// Writes the mesh to a file using ``meshio.write_points_cells`` + #[pyo3(signature = (path, *, file_format = Some("vtk42")))] + pub fn write_to_file<'py>( + this: Bound<'py, Self>, + path: PathBuf, + file_format: Option<&str>, + ) -> PyResult<()> { + let py = this.py(); + let mesh = + PyMeshWithData::try_from_pymesh(py, this.unbind().clone_ref(py))?.into_pyobject(py)?; + PyMeshWithData::write_to_file(mesh, path, file_format) + } +} - fn push_cell_attribute_vector_real<'py>( - &mut self, - name: &str, - data: &Bound<'py, PyArray2<$type>>, - ) -> PyResult<()> { - let data: PyReadonlyArray2<$type> = data.extract()?; - let data = data.as_slice()?; - let data: &[Vector3<$type>] = bytemuck::cast_slice(data); - - add_attribute_with_name::<$type>( - &mut self.inner.cell_attributes, - OwnedMeshAttribute::new( - name, - OwnedAttributeData::Vector3Real(data.to_vec().into()), - ), - ) - } +#[derive(Clone)] +enum PyMixedTriQuadMesh3dData { + F32(MixedTriQuadMesh3d), + F64(MixedTriQuadMesh3d), +} - /// Get mesh vertex attribute by name - fn get_point_attribute<'py>(&self, py: Python<'py>, name: &str) -> PyResult { - get_attribute_with_name::<$type>(py, self.inner.point_attributes.as_slice(), name) - } +/// Mixed triangle and quad surface mesh in 3D +#[gen_stub_pyclass] +#[pyclass] +#[pyo3(name = "MixedTriQuadMesh3d")] +#[derive(Clone)] +pub struct PyMixedTriQuadMesh3d { + inner: PyMixedTriQuadMesh3dData, +} - /// Get mesh cell attribute by name - fn get_cell_attribute<'py>(&self, py: Python<'py>, name: &str) -> PyResult { - get_attribute_with_name::<$type>(py, self.inner.cell_attributes.as_slice(), name) - } +enum_wrapper_impl_from!(PyMixedTriQuadMesh3d, MixedTriQuadMesh3d => PyMixedTriQuadMesh3dData::F32); +enum_wrapper_impl_from!(PyMixedTriQuadMesh3d, MixedTriQuadMesh3d => PyMixedTriQuadMesh3dData::F64); - /// Get all point attributes in a python dictionary - fn get_point_attributes<'py>(&self, py: Python<'py>) -> PyResult> { - let res = PyDict::new(py); - - for attr in self.inner.point_attributes.iter() { - let data = get_attribute_with_name::<$type>( - py, - self.inner.point_attributes.as_slice(), - &attr.name, - ); - match data { - Ok(data) => res.set_item(&attr.name, data)?, - Err(_) => println!("Couldn't embed attribute {} in PyDict", &attr.name), - } - } +impl PyMixedTriQuadMesh3d { + pub fn try_from_generic(mut mesh: MixedTriQuadMesh3d) -> PyResult { + utils::transmute_take_into::<_, MixedTriQuadMesh3d, _>(&mut mesh) + .or_else(|| utils::transmute_take_into::<_, MixedTriQuadMesh3d, _>(&mut mesh)) + .ok_or_else(utils::pyerr_unsupported_scalar) + } +} - Ok(res) +#[gen_stub_pymethods] +#[pymethods] +impl PyMixedTriQuadMesh3d { + /// Numpy dtype of the underlying scalar type (either ``np.float32`` or ``np.float64``) + #[getter] + pub fn dtype<'py>(&self, py: Python<'py>) -> Bound<'py, PyArrayDescr> { + match &self.inner { + PyMixedTriQuadMesh3dData::F32(_) => np::dtype::(py), + PyMixedTriQuadMesh3dData::F64(_) => np::dtype::(py), + } + } + + /// The `Nx3` array of vertex positions of the mesh + #[getter] + pub fn vertices<'py>(this: Bound<'py, Self>) -> PyResult> { + match &this.borrow().inner { + PyMixedTriQuadMesh3dData::F32(mesh) => { + utils::view_vec_generic(mesh.vertices(), this.into_any()) + } + PyMixedTriQuadMesh3dData::F64(mesh) => { + utils::view_vec_generic(mesh.vertices(), this.into_any()) } + } + } - /// Get all cell attributes in a python dictionary - fn get_cell_attributes<'py>(&self, py: Python<'py>) -> PyResult> { - let res = PyDict::new(py); - - for attr in self.inner.cell_attributes.iter() { - let data = get_attribute_with_name::<$type>( - py, - self.inner.cell_attributes.as_slice(), - &attr.name, - ); - match data { - Ok(data) => res.set_item(&attr.name, data)?, - Err(_) => println!("Couldn't embed attribute {} in PyDict", &attr.name), - } - } + /// Returns a copy (deep copy) of this mesh + pub fn copy(&self) -> Self { + self.clone() + } - Ok(res) - } + /// Returns a copy of all triangle cells of the mesh as an `Nx3` array of vertex indices + pub fn get_triangles<'py>( + &self, + py: Python<'py>, + ) -> PyResult>> { + let cells = match &self.inner { + PyMixedTriQuadMesh3dData::F32(mesh) => mesh.cells.as_slice(), + PyMixedTriQuadMesh3dData::F64(mesh) => mesh.cells.as_slice(), + }; + + filter_cells(py, cells, |cell| match cell { + TriangleOrQuadCell::Tri(tri) => Some(tri.map(|v| v as NumpyUsize)), + _ => None, + }) + } - /// Get all registered point attribute names - fn get_point_attribute_keys<'py>( - &self, - py: Python<'py>, - ) -> PyResult> { - let mut res: Vec<&str> = vec![]; + /// Returns a copy of all quad cells of the mesh as an `Nx4` array of vertex indices + pub fn get_quads<'py>(&self, py: Python<'py>) -> PyResult>> { + let cells = match &self.inner { + PyMixedTriQuadMesh3dData::F32(mesh) => mesh.cells.as_slice(), + PyMixedTriQuadMesh3dData::F64(mesh) => mesh.cells.as_slice(), + }; + + filter_cells(py, cells, |cell| match cell { + TriangleOrQuadCell::Quad(quad) => Some(quad.map(|v| v as NumpyUsize)), + _ => None, + }) + } - for attr in self.inner.point_attributes.iter() { - res.push(&attr.name); - } + /// Writes the mesh to a file using ``meshio.write_points_cells`` + #[pyo3(signature = (path, *, file_format = Some("vtk42")))] + pub fn write_to_file<'py>( + this: Bound<'py, Self>, + path: PathBuf, + file_format: Option<&str>, + ) -> PyResult<()> { + let py = this.py(); + let mesh = + PyMeshWithData::try_from_pymesh(py, this.unbind().clone_ref(py))?.into_pyobject(py)?; + PyMeshWithData::write_to_file(mesh, path, file_format) + } +} - PyList::new(py, res) - } +pub fn filter_cells<'py, C, const N: usize, F>( + py: Python<'py>, + cells: &[C], + filter: F, +) -> PyResult>> +where + [NumpyUsize; N]: Pod + NoUninit, + F: Fn(&C) -> Option<[NumpyUsize; N]>, +{ + let filtered_cells: Vec<[NumpyUsize; N]> = cells.iter().filter_map(filter).collect(); + let n_triangles = filtered_cells.len(); + let vertex_indices: Vec = bytemuck::cast_vec(filtered_cells); + let array: Array2 = + Array2::from_shape_vec((n_triangles, N), vertex_indices).map_err(anyhow::Error::new)?; + let pyarray = array.into_pyarray(py); + Ok(pyarray) +} - /// Get all registered cell attribute names - fn get_cell_attribute_keys<'py>( - &self, - py: Python<'py>, - ) -> PyResult> { - let mut res: Vec<&str> = vec![]; +/// Enum specifying the type of mesh wrapped by a ``MeshWithData`` +#[derive(Copy, Clone, Debug, PartialEq, Eq)] +#[gen_stub_pyclass_enum] +#[pyclass(eq)] +pub enum MeshType { + /// 3D triangle mesh + Tri3d, + /// 3D mixed triangle and quad mesh + MixedTriQuad3d, +} - for attr in self.inner.cell_attributes.iter() { - res.push(&attr.name); - } +pub enum PyMesh3dData { + Tri3d(Py), + MixedTriQuad3d(Py), +} - PyList::new(py, res) - } - } - }; +enum_impl_from!(PyMesh3dData, Py => PyMesh3dData::Tri3d); +enum_impl_from!(PyMesh3dData, Py => PyMesh3dData::MixedTriQuad3d); + +#[derive(Clone)] +enum PyMeshAttributeData { + F32(OwnedMeshAttribute), + F64(OwnedMeshAttribute), } -macro_rules! create_tri_mesh_interface { - ($name: ident, $type: ident) => { - /// TriMesh3d wrapper - #[gen_stub_pyclass] - #[pyclass] - pub struct $name { - pub inner: TriMesh3d<$type>, - } +#[gen_stub_pyclass] +#[pyclass] +#[pyo3(name = "MeshAttribute")] +#[derive(Clone)] +pub struct PyMeshAttribute { + inner: PyMeshAttributeData, +} - impl $name { - pub fn new(data: TriMesh3d<$type>) -> Self { - Self { inner: data } - } +enum_wrapper_impl_from!(PyMeshAttribute, OwnedMeshAttribute => PyMeshAttributeData::F32); +enum_wrapper_impl_from!(PyMeshAttribute, OwnedMeshAttribute => PyMeshAttributeData::F64); + +impl PyMeshAttribute { + pub fn try_from_generic<'py, R: Real + Element>( + name: String, + data: Bound<'py, PyUntypedArray>, + ) -> PyResult + where + PyMeshAttribute: From>, + { + let data = if let Ok(data) = data.downcast::>() { + OwnedAttributeData::ScalarU64(data.try_readonly()?.as_array().to_vec().into()) + } else if let Ok(data) = data.downcast::>() { + OwnedAttributeData::ScalarReal(data.try_readonly()?.as_array().to_vec().into()) + } else if let Ok(data) = data.downcast::>() { + let data_vec = data.try_readonly()?.as_slice()?.to_vec(); + if data.shape()[1] == 1 { + OwnedAttributeData::ScalarReal(bytemuck::cast_vec(data_vec).into()) + } else if data.shape()[1] == 3 { + OwnedAttributeData::Vector3Real(bytemuck::cast_vec(data_vec).into()) + } else { + return Err(PyValueError::new_err( + "expected Nx1 or Nx3 array for Vector3Real attribute data", + )); + } + } else { + return Err(PyTypeError::new_err("unsupported attribute data type")); + }; + + Ok(Self::from(OwnedMeshAttribute { name, data })) + } +} + +#[gen_stub_pymethods] +#[pymethods] +impl PyMeshAttribute { + /// Numpy dtype of the data stored in the attribute + #[getter] + pub fn dtype<'py>(&self, py: Python<'py>) -> Bound<'py, PyArrayDescr> { + match &self.inner { + PyMeshAttributeData::F32(attr) => match attr.data { + OwnedAttributeData::ScalarU64(_) => np::dtype::(py), + OwnedAttributeData::ScalarReal(_) => np::dtype::(py), + OwnedAttributeData::Vector3Real(_) => np::dtype::(py), + }, + PyMeshAttributeData::F64(attr) => match attr.data { + OwnedAttributeData::ScalarU64(_) => np::dtype::(py), + OwnedAttributeData::ScalarReal(_) => np::dtype::(py), + OwnedAttributeData::Vector3Real(_) => np::dtype::(py), + }, } + } - #[gen_stub_pymethods] - #[pymethods] - impl $name { - /// Returns a copy of the `Nx3` array of vertex positions - fn get_vertices<'py>(&self, py: Python<'py>) -> PyResult>> { - let points: &[$type] = bytemuck::cast_slice(&self.inner.vertices); - let vertices: ArrayView2<$type> = - ArrayView::from_shape((self.inner.vertices.len(), 3), points) - .map_err(anyhow::Error::new)?; - Ok(vertices.to_pyarray(py)) // seems like at least one copy is necessary here (to_pyarray copies the data) - } + /// Name of the attribute + #[getter] + pub fn name(&self) -> String { + match &self.inner { + PyMeshAttributeData::F32(attr) => attr.name.clone(), + PyMeshAttributeData::F64(attr) => attr.name.clone(), + } + } - /// Returns a copy of the `Mx3` array of the vertex indices that make up a triangle - fn get_triangles<'py>(&self, py: Python<'py>) -> PyResult>> { - let tris: &[u64] = bytemuck::cast_slice(&self.inner.triangles); - let triangles: ArrayView2 = - ArrayView::from_shape((self.inner.triangles.len(), 3), tris) - .map_err(anyhow::Error::new)?; - Ok(triangles.to_pyarray(py)) - } + /// View of the attribute data as a numpy array + #[getter] + pub fn data<'py>(this: Bound<'py, Self>) -> PyResult> { + use utils::{view_scalar_generic, view_vec_generic}; + match &this.borrow().inner { + PyMeshAttributeData::F32(attr) => match &attr.data { + OwnedAttributeData::ScalarU64(data) => view_scalar_generic(data, this.into_any()), + OwnedAttributeData::ScalarReal(data) => view_scalar_generic(data, this.into_any()), + OwnedAttributeData::Vector3Real(data) => view_vec_generic(data, this.into_any()), + }, + PyMeshAttributeData::F64(attr) => match &attr.data { + OwnedAttributeData::ScalarU64(data) => view_scalar_generic(data, this.into_any()), + OwnedAttributeData::ScalarReal(data) => view_scalar_generic(data, this.into_any()), + OwnedAttributeData::Vector3Real(data) => view_vec_generic(data, this.into_any()), + }, + } + } +} - /// Alias for `get_triangles` - fn get_cells<'py>(&self, py: Python<'py>) -> PyResult>> { - self.get_triangles(py) - } +/// Mesh with attached point and cell attributes +#[gen_stub_pyclass] +#[pyclass] +#[pyo3(name = "MeshWithData")] +pub struct PyMeshWithData { + mesh: PyMesh3dData, + pub(crate) point_attributes: Vec>, + pub(crate) cell_attributes: Vec>, +} - /// Returns the `Nx3` array of vertex positions by moving it out of the mesh (zero copy) - fn take_vertices<'py>( - &mut self, - py: Python<'py>, - ) -> PyResult>> { - let vertices = std::mem::take(&mut self.inner.vertices); - let n = vertices.len(); - let vertices_scalar: Vec<$type> = bytemuck::cast_vec(vertices); - let vertices_array = PyArray::from_vec(py, vertices_scalar) - .reshape([n, 3]) - .map_err(anyhow::Error::new)?; - Ok(vertices_array) - } +impl PyMeshWithData { + /// Constructs a new mesh with data from an existing mesh object (either `TriMesh3d` or `MixedTriQuadMesh3d`) + pub fn try_from_pymesh<'py, T>(py: Python<'py>, mesh: T) -> PyResult + where + T: IntoPyObject<'py>, + T::Output: Into>, + Py: Into, + PyErr: From, + { + let mesh_bound = mesh.into_pyobject(py)?; + let mesh_py: Py = mesh_bound.into(); + let mesh: PyMesh3dData = mesh_py.into(); + + Ok(Self { + mesh, + point_attributes: vec![], + cell_attributes: vec![], + }) + } - /// Returns the `Mx3` array of the vertex indices that make up the triangles by moving it out of the mesh (zero copy) - fn take_triangles<'py>( - &mut self, - py: Python<'py>, - ) -> PyResult>> { - let triangles = std::mem::take(&mut self.inner.triangles); - let m = triangles.len(); - let triangles_scalar: Vec = bytemuck::cast_vec(triangles); - let triangles_array = PyArray::from_vec(py, triangles_scalar) - .reshape([m, 3]) - .map_err(anyhow::Error::new)?; - Ok(triangles_array) - } + pub fn try_from_mesh_with_data<'py, R: Real + Element, M: Mesh3d + 'static>( + py: Python<'_>, + mesh_with_data: MeshWithData, + ) -> PyResult { + // Deconstruct the input mesh + let MeshWithData { + mut mesh, + mut point_attributes, + mut cell_attributes, + } = mesh_with_data; + + // Convert the inner mesh + let mut mesh_with_data = if let Some(mesh) = + utils::transmute_same_take::>(&mut mesh) + { + PyTriMesh3d::try_from_generic(mesh) + .and_then(|tri_mesh| Self::try_from_pymesh(py, tri_mesh)) + } else if let Some(mesh) = utils::transmute_same_take::>(&mut mesh) + { + PyMixedTriQuadMesh3d::try_from_generic(mesh) + .and_then(|quad_mesh| Self::try_from_pymesh(py, quad_mesh)) + } else { + Err(utils::pyerr_only_tri_and_tri_quad_mesh()) + }?; + + fn try_convert_attribute_vec<'a, In: Real + Element, Out: Real + Element>( + py: Python<'_>, + attributes: &mut Vec>, + dest: &mut Vec>, + ) -> Option<()> + where + PyMeshAttribute: From>, + { + utils::transmute_same_take::>, Vec>>( + attributes, + ) + .map(|a| { + a.into_iter() + .map(|a| { + PyMeshAttribute::from(a) + .into_pyobject(py) + .expect("allocation should not fail") + .into() + }) + .collect::>>() + }) + .and_then(|a| Some(*dest = a)) + } - /// Alias for `take_triangles` - fn take_cells<'py>(&mut self, py: Python<'py>) -> PyResult>> { - self.take_triangles(py) - } + if std::any::TypeId::of::() == std::any::TypeId::of::() { + try_convert_attribute_vec::( + py, + &mut point_attributes, + &mut mesh_with_data.point_attributes, + ); + try_convert_attribute_vec::( + py, + &mut cell_attributes, + &mut mesh_with_data.cell_attributes, + ); + } else if std::any::TypeId::of::() == std::any::TypeId::of::() { + try_convert_attribute_vec::( + py, + &mut point_attributes, + &mut mesh_with_data.point_attributes, + ); + try_convert_attribute_vec::( + py, + &mut cell_attributes, + &mut mesh_with_data.cell_attributes, + ); + } else { + return Err(utils::pyerr_unsupported_scalar()); + } - /// Returns a tuple containing the vertices and triangles of the mesh by moving them out of the mesh (zero copy) - fn take_vertices_and_triangles<'py>( - &mut self, - py: Python<'py>, - ) -> PyResult> { - let tup = (self.take_vertices(py)?, self.take_triangles(py)?); - tup.into_pyobject(py) - } + Ok(mesh_with_data) + } - /// Computes the mesh's vertex normals using an area weighted average of the adjacent triangle faces (parallelized version) - fn par_vertex_normals<'py>( - &self, - py: Python<'py>, - ) -> PyResult>> { - let normals_vec = self.inner.par_vertex_normals(); - let normals_vec = - bytemuck::allocation::cast_vec::>, $type>(normals_vec); - - let normals: &[$type] = normals_vec.as_slice(); - let normals: ArrayView2<$type> = - ArrayView::from_shape((normals.len() / 3, 3), normals) - .map_err(anyhow::Error::new)?; - - Ok(normals.to_pyarray(py)) - } + pub fn as_tri3d<'py, 'a>(&'a self, py: Python<'py>) -> Option> { + match &self.mesh { + PyMesh3dData::Tri3d(mesh) => Some(mesh.clone_ref(py)), + _ => None, + } + } - /// Returns a mapping of all mesh vertices to the set of their connected neighbor vertices - fn vertex_vertex_connectivity(&self) -> Vec> { - self.inner.vertex_vertex_connectivity() - } + pub fn as_mixed_tri_quad3d<'py>(&self, py: Python<'py>) -> Option> { + match &self.mesh { + PyMesh3dData::MixedTriQuad3d(mesh) => Some(mesh.clone_ref(py)), + _ => None, } - }; + } } -macro_rules! create_tri_quad_mesh_interface { - ($name: ident, $type: ident) => { - /// MixedTriQuadMesh3d wrapper - #[gen_stub_pyclass] - #[pyclass] - pub struct $name { - pub inner: MixedTriQuadMesh3d<$type>, +#[gen_stub_pymethods] +#[pymethods] +impl PyMeshWithData { + /// Wraps an existing mesh object (either `TriMesh3d` or `MixedTriQuadMesh3d`) such that data (point and cell attributes) can be attached to it + #[new] + fn py_new<'py>( + #[gen_stub(override_type(type_repr="typing.Union[TriMesh3d, MeshWithData]", imports=()))] + mesh: Bound<'py, PyAny>, + ) -> PyResult { + if mesh.is_instance_of::() { + let mesh = mesh.downcast_into::()?; + PyMeshWithData::try_from_pymesh(mesh.py(), mesh.unbind()) + } else if mesh.is_instance_of::() { + let mesh = mesh.downcast_into::()?; + PyMeshWithData::try_from_pymesh(mesh.py(), mesh.unbind()) + } else { + Err(PyTypeError::new_err( + "unsupported mesh type, expected TriMesh3d or MixedTriQuadMesh3d", + )) } + } - impl $name { - pub fn new(data: MixedTriQuadMesh3d<$type>) -> Self { - Self { inner: data } - } + /// Numpy dtype of the underlying scalar type (either ``np.float32`` or ``np.float64``) + #[getter] + pub fn dtype<'py>(&self, py: Python<'py>) -> Bound<'py, PyArrayDescr> { + match &self.mesh { + PyMesh3dData::Tri3d(mesh) => mesh.borrow(py).dtype(py), + PyMesh3dData::MixedTriQuad3d(mesh) => mesh.borrow(py).dtype(py), } + } - #[gen_stub_pymethods] - #[pymethods] - impl $name { - /// Returns a copy of the `Nx3` array of vertex positions - fn get_vertices<'py>(&self, py: Python<'py>) -> PyResult>> { - let points: &[$type] = bytemuck::cast_slice(&self.inner.vertices); - let vertices: ArrayView2<$type> = - ArrayView::from_shape((self.inner.vertices.len(), 3), points) - .map_err(anyhow::Error::new)?; - Ok(vertices.to_pyarray(py)) - } + /// Number of vertices in the mesh + #[getter] + pub fn nvertices<'py>(&self, py: Python<'py>) -> usize { + match &self.mesh { + PyMesh3dData::Tri3d(mesh) => match &mesh.borrow(py).deref().inner { + PyTriMesh3dData::F32(mesh) => mesh.vertices.len(), + PyTriMesh3dData::F64(mesh) => mesh.vertices.len(), + }, + PyMesh3dData::MixedTriQuad3d(mesh) => match &mesh.borrow(py).deref().inner { + PyMixedTriQuadMesh3dData::F32(mesh) => mesh.vertices.len(), + PyMixedTriQuadMesh3dData::F64(mesh) => mesh.vertices.len(), + }, + } + } - /// Returns a 2D list specifying the vertex indices either for a triangle or a quad - fn get_cells(&self) -> PyResult>> { - let cells: Vec> = self - .inner - .cells - .iter() - .map(|c| match c { - TriangleOrQuadCell::Tri(v) => v.to_vec(), - TriangleOrQuadCell::Quad(v) => v.to_vec(), - }) - .collect(); - Ok(cells) - } + /// Number of cells (triangles or quads) in the mesh + #[getter] + pub fn ncells<'py>(&self, py: Python<'py>) -> usize { + match &self.mesh { + PyMesh3dData::Tri3d(mesh) => match &mesh.borrow(py).deref().inner { + PyTriMesh3dData::F32(mesh) => mesh.triangles.len(), + PyTriMesh3dData::F64(mesh) => mesh.triangles.len(), + }, + PyMesh3dData::MixedTriQuad3d(mesh) => match &mesh.borrow(py).deref().inner { + PyMixedTriQuadMesh3dData::F32(mesh) => mesh.cells.len(), + PyMixedTriQuadMesh3dData::F64(mesh) => mesh.cells.len(), + }, + } + } - /// Returns the `Nx3` array of vertex positions by moving it out of the mesh (zero copy) - fn take_vertices<'py>( - &mut self, - py: Python<'py>, - ) -> PyResult>> { - let vertices = std::mem::take(&mut self.inner.vertices); - let n = vertices.len(); - let vertices_scalar: Vec<$type> = bytemuck::cast_vec(vertices); - let vertices_array = PyArray::from_vec(py, vertices_scalar) - .reshape([n, 3]) - .map_err(anyhow::Error::new)?; - Ok(vertices_array) + /// Type of the underlying mesh + #[getter] + pub fn mesh_type(&self) -> MeshType { + match &self.mesh { + PyMesh3dData::Tri3d(_) => MeshType::Tri3d, + PyMesh3dData::MixedTriQuad3d(_) => MeshType::MixedTriQuad3d, + } + } + + /// The wrapped mesh without associated data and attributes + #[getter] + #[gen_stub(override_return_type(type_repr="typing.Union[TriMesh3d, MixedTriQuadMesh3d]", imports=()))] + pub fn mesh<'py>(&self, py: Python<'py>) -> Py { + match &self.mesh { + PyMesh3dData::Tri3d(mesh) => mesh.clone_ref(py).into_any(), + PyMesh3dData::MixedTriQuad3d(mesh) => mesh.clone_ref(py).into_any(), + } + } + + /// The attributes attached points (vertices) of the mesh + #[getter] + #[gen_stub(override_return_type(type_repr="dict[str, numpy.typing.NDArray]", imports=()))] + pub fn point_attributes<'py>(&self, py: Python<'py>) -> PyResult> { + self.point_attributes + .iter() + .map(|attr| -> PyResult<_> { + let attr = attr.clone_ref(py).into_bound(py); + let name = attr.try_borrow()?.name(); + let data = PyMeshAttribute::data(attr)?; + Ok((name, data)) + }) + .collect::, _>>()? + .into_py_dict(py) + } + + /// The attributes attached to the cells (triangles or quads) of the mesh + #[getter] + #[gen_stub(override_return_type(type_repr="dict[str, numpy.typing.NDArray]", imports=()))] + pub fn cell_attributes<'py>(&self, py: Python<'py>) -> PyResult> { + self.cell_attributes + .iter() + .map(|attr| -> PyResult<_> { + let attr = attr.clone_ref(py).into_bound(py); + let name = attr.try_borrow()?.name(); + let data = PyMeshAttribute::data(attr)?; + Ok((name, data)) + }) + .collect::, _>>()? + .into_py_dict(py) + } + + /// Returns a copy of the wrapped mesh without associated data and attributes + #[gen_stub(override_return_type(type_repr="typing.Union[TriMesh3d, MixedTriQuadMesh3d]", imports=()))] + pub fn copy_mesh<'py>(&self, py: Python<'py>) -> PyResult> { + match &self.mesh { + PyMesh3dData::Tri3d(mesh) => mesh.borrow(py).clone().into_bound_py_any(py), + PyMesh3dData::MixedTriQuad3d(mesh) => mesh.borrow(py).clone().into_bound_py_any(py), + } + } + + /// Returns a copy (deep copy) of this mesh with its data and attributes + pub fn copy<'py>(&self, py: Python<'py>) -> PyResult { + Ok(Self { + mesh: match &self.mesh { + PyMesh3dData::Tri3d(mesh) => { + PyMesh3dData::from(mesh.borrow(py).clone().into_pyobject(py)?.unbind()) + } + PyMesh3dData::MixedTriQuad3d(mesh) => { + PyMesh3dData::from(mesh.borrow(py).clone().into_pyobject(py)?.unbind()) + } + }, + point_attributes: self + .point_attributes + .iter() + .map(|attr| -> PyResult> { + Ok(attr.borrow(py).clone().into_pyobject(py)?.unbind()) + }) + .collect::, _>>()?, + cell_attributes: self + .cell_attributes + .iter() + .map(|attr| -> PyResult> { + Ok(attr.borrow(py).clone().into_pyobject(py)?.unbind()) + }) + .collect::, _>>()?, + }) + } + + /// Attaches a point attribute to the mesh + /// + /// There has to be exactly one attribute value per vertex in the mesh. + /// As attribute data, the following numpy array types are supported: + /// - 1D array with shape (N,) of ``np.uint64`` + /// - 1D array with shape (N,) of the mesh scalar type (``np.float32`` or ``np.float64``) + /// - 2D array with shape (N,3) of the mesh scalar type (``np.float32`` or ``np.float64``) + /// The data is copied into the mesh object. + pub fn add_point_attribute<'py>( + &mut self, + py: Python<'py>, + name: String, + attribute: Bound<'py, PyUntypedArray>, + ) -> PyResult<()> { + assert_eq!( + attribute.shape()[0], + self.nvertices(py), + "number of attribute values must match number of vertices in the mesh" + ); + + let dtype = self.dtype(py); + let attribute = if dtype.is_equiv_to(&np::dtype::(py)) { + PyMeshAttribute::try_from_generic::(name, attribute)? + } else if dtype.is_equiv_to(&np::dtype::(py)) { + PyMeshAttribute::try_from_generic::(name, attribute)? + } else { + return Err(PyTypeError::new_err( + "unsupported dtype for mesh vertices (expected float32 or float64)", + )); + }; + + self.point_attributes + .push(attribute.into_pyobject(py)?.unbind()); + Ok(()) + } + + /// Attaches a cell attribute to the mesh + /// + /// There has to be exactly one attribute value per cell in the mesh. + /// As attribute data, the following numpy array types are supported: + /// - 1D array with shape (N,) of ``np.uint64`` + /// - 1D array with shape (N,) of the mesh scalar type (``np.float32`` or ``np.float64``) + /// - 2D array with shape (N,3) of the mesh scalar type (``np.float32`` or ``np.float64``) + /// The data is copied into the mesh object. + pub fn add_cell_attribute<'py>( + &mut self, + py: Python<'py>, + name: String, + attribute: Bound<'py, PyUntypedArray>, + ) -> PyResult<()> { + assert_eq!( + attribute.shape()[0], + self.ncells(py), + "number of attribute values must match number of cells in the mesh" + ); + + let dtype = self.dtype(py); + let attribute = if dtype.is_equiv_to(&np::dtype::(py)) { + PyMeshAttribute::try_from_generic::(name, attribute)? + } else if dtype.is_equiv_to(&np::dtype::(py)) { + PyMeshAttribute::try_from_generic::(name, attribute)? + } else { + return Err(PyTypeError::new_err( + "unsupported dtype for mesh vertices (expected float32 or float64)", + )); + }; + + self.cell_attributes + .push(attribute.into_pyobject(py)?.unbind()); + Ok(()) + } + + /// Writes the mesh and its attributes to a file using ``meshio.write_points_cells`` + #[pyo3(signature = (path, *, file_format = Some("vtk42")))] + pub fn write_to_file<'py>( + this: Bound<'py, Self>, + path: PathBuf, + file_format: Option<&str>, + ) -> PyResult<()> { + let py = this.py(); + let meshio = PyModule::import(py, "meshio")?; + let write_points_cells = meshio.getattr("write_points_cells")?; + + let this = this.borrow(); + + let filename = path.into_py_any(py)?; + let points = match &this.mesh { + PyMesh3dData::Tri3d(mesh) => PyTriMesh3d::vertices(mesh.clone_ref(py).into_bound(py))?, + PyMesh3dData::MixedTriQuad3d(mesh) => { + PyMixedTriQuadMesh3d::vertices(mesh.clone_ref(py).into_bound(py))? } } - }; + .into_py_any(py)?; + let cells = match &this.mesh { + PyMesh3dData::Tri3d(mesh) => { + let triangles = PyTriMesh3d::triangles(mesh.clone_ref(py).into_bound(py))?; + let dict = [("triangle", triangles)].into_py_dict(py)?; + dict.into_py_any(py)? + } + PyMesh3dData::MixedTriQuad3d(mesh) => { + let triangles = mesh.borrow(py).get_triangles(py)?; + let quads = mesh.borrow(py).get_quads(py)?; + let dict = [("triangle", triangles), ("quad", quads)].into_py_dict(py)?; + dict.into_py_any(py)? + } + }; + let point_data = this.point_attributes(py)?.into_py_any(py)?; + let cell_data = this.cell_attributes(py)?.into_py_any(py)?; + let field_data = py.None(); + let point_sets = py.None(); + let cell_sets = py.None(); + let file_format = file_format.into_py_any(py)?; + + let args_vec: Vec<(&str, Py)> = vec![ + ("filename", filename), + ("points", points), + ("cells", cells), + ("point_data", point_data), + ("cell_data", cell_data), + ("field_data", field_data), + ("point_sets", point_sets), + ("cell_sets", cell_sets), + ("file_format", file_format), + ]; + let args = args_vec.into_py_dict(py)?; + + let _ = write_points_cells.call((), Some(&args))?; + Ok(()) + } } - -create_tri_mesh_interface!(TriMesh3dF64, f64); -create_tri_mesh_interface!(TriMesh3dF32, f32); - -create_tri_quad_mesh_interface!(MixedTriQuadMesh3dF64, f64); -create_tri_quad_mesh_interface!(MixedTriQuadMesh3dF32, f32); - -create_mesh_data_interface!(TriMeshWithDataF64, f64, TriMesh3d, TriMesh3dF64, Aabb3dF64); -create_mesh_data_interface!(TriMeshWithDataF32, f32, TriMesh3d, TriMesh3dF32, Aabb3dF32); - -create_mesh_data_interface!( - MixedTriQuadMeshWithDataF64, - f64, - MixedTriQuadMesh3d, - MixedTriQuadMesh3dF64, - Aabb3dF64 -); -create_mesh_data_interface!( - MixedTriQuadMeshWithDataF32, - f32, - MixedTriQuadMesh3d, - MixedTriQuadMesh3dF32, - Aabb3dF32 -); diff --git a/pysplashsurf/src/neighborhood_search.rs b/pysplashsurf/src/neighborhood_search.rs index d0e20281..1f08cdf4 100644 --- a/pysplashsurf/src/neighborhood_search.rs +++ b/pysplashsurf/src/neighborhood_search.rs @@ -1,53 +1,97 @@ -use numpy::{PyArray2, PyReadonlyArray2}; +use numpy as np; +use numpy::prelude::*; +use numpy::{PyArray2, PyUntypedArray}; +use pyo3::exceptions::PyIndexError; use pyo3::prelude::*; -use splashsurf_lib::{nalgebra::Vector3, neighborhood_search::*}; +use pyo3_stub_gen::derive::*; +use splashsurf_lib::nalgebra::Vector3; -use crate::aabb::{Aabb3dF32, Aabb3dF64}; +use crate::aabb::PyAabb3d; +use crate::utils::*; -#[pyfunction] -#[pyo3(name = "neighborhood_search_spatial_hashing_parallel_f64")] -#[pyo3(signature = (domain, particle_positions, search_radius))] -pub fn neighborhood_search_spatial_hashing_parallel_py_f64<'py>( - domain: &Aabb3dF64, - particle_positions: &Bound<'py, PyArray2>, - search_radius: f64, -) -> PyResult>> { - let mut nl: Vec> = Vec::new(); +// TODO: Bindings for flat neighborhood search +// TODO: Bindings for computing particle densities - let particle_positions: PyReadonlyArray2 = particle_positions.extract()?; - let particle_positions = particle_positions.as_slice()?; - let particle_positions: &[Vector3] = bytemuck::cast_slice(particle_positions); +/// Per particle neighborhood lists +#[gen_stub_pyclass] +#[pyclass] +#[pyo3(name = "NeighborhoodLists")] +pub struct PyNeighborhoodLists { + inner: Vec>, +} - neighborhood_search_spatial_hashing_parallel::( - &domain.inner, - particle_positions, - search_radius, - &mut nl, - ); +impl From>> for PyNeighborhoodLists { + fn from(nl: Vec>) -> Self { + Self { inner: nl } + } +} - Ok(nl) +#[gen_stub_pymethods] +#[pymethods] +impl PyNeighborhoodLists { + /// Returns the number of particles for which neighborhood lists are stored + pub fn __len__(&self) -> usize { + self.inner.len() + } + + /// Returns the neighborhood list for the particle at the given index + pub fn __getitem__(&self, idx: isize) -> PyResult> { + let len = self.inner.len() as isize; + let idx = if idx < 0 { len + idx } else { idx }; + if idx < 0 || idx >= len { + Err(PyIndexError::new_err("index out of bounds")) + } else { + Ok(self.inner[idx as usize].clone()) + } + } + + /// Returns all stored neighborhood lists as a list of lists + pub fn get_neighborhood_lists(&self) -> Vec> { + self.inner.clone() + } } +/// Performs a neighborhood search using spatial hashing (multithreaded implementation) +#[gen_stub_pyfunction] #[pyfunction] -#[pyo3(name = "neighborhood_search_spatial_hashing_parallel_f32")] -#[pyo3(signature = (domain, particle_positions, search_radius))] -pub fn neighborhood_search_spatial_hashing_parallel_py_f32<'py>( - domain: &Aabb3dF32, - particle_positions: &Bound<'py, PyArray2>, - search_radius: f32, -) -> PyResult>> { +#[pyo3(name = "neighborhood_search_spatial_hashing_parallel")] +#[pyo3(signature = (particle_positions, domain, search_radius))] +pub fn neighborhood_search_spatial_hashing_parallel<'py>( + particle_positions: &Bound<'py, PyUntypedArray>, + domain: &Bound<'py, PyAabb3d>, + search_radius: f64, +) -> PyResult { let mut nl: Vec> = Vec::new(); - let particle_positions: PyReadonlyArray2 = particle_positions.extract()?; - let particle_positions = particle_positions.as_slice()?; - let particle_positions: &[Vector3] = bytemuck::cast_slice(particle_positions); + let py = particle_positions.py(); + let element_type = particle_positions.dtype(); + if element_type.is_equiv_to(&np::dtype::(py)) { + let particle_positions = particle_positions + .downcast::>()? + .try_readonly()?; + let particles: &[Vector3] = bytemuck::cast_slice(particle_positions.as_slice()?); + + splashsurf_lib::neighborhood_search::neighborhood_search_spatial_hashing_parallel::( + &domain.borrow().inner(), + particles, + search_radius as f32, + &mut nl, + ); + } else if element_type.is_equiv_to(&np::dtype::(py)) { + let particle_positions = particle_positions + .downcast::>()? + .try_readonly()?; + let particles: &[Vector3] = bytemuck::cast_slice(particle_positions.as_slice()?); - neighborhood_search_spatial_hashing_parallel::( - &domain.inner, - particle_positions, - search_radius, - &mut nl, - ); + splashsurf_lib::neighborhood_search::neighborhood_search_spatial_hashing_parallel::( + &domain.borrow().inner(), + particles, + search_radius, + &mut nl, + ); + } else { + return Err(pyerr_unsupported_scalar()); + } - Ok(nl) + Ok(nl.into()) } diff --git a/pysplashsurf/src/pipeline.rs b/pysplashsurf/src/pipeline.rs index 1ec94704..ebaf4024 100644 --- a/pysplashsurf/src/pipeline.rs +++ b/pysplashsurf/src/pipeline.rs @@ -1,15 +1,14 @@ -use crate::{ - mesh::{ - MixedTriQuadMeshWithDataF32, MixedTriQuadMeshWithDataF64, TriMeshWithDataF32, - TriMeshWithDataF64, - }, - reconstruction::{SurfaceReconstructionF32, SurfaceReconstructionF64}, +use numpy as np; +use numpy::prelude::*; +use numpy::{ + Element, PyArray1, PyArray2, PyArrayDescr, PyReadonlyArray1, PyReadonlyArray2, PyUntypedArray, }; -use numpy::{Element, PyArray1, PyArray2, PyArrayMethods, PyReadonlyArray1, PyReadonlyArray2}; +use pyo3::exceptions::PyRuntimeError; use pyo3::{ prelude::*, types::{PyDict, PyString}, }; +use pyo3_stub_gen::derive::gen_stub_pyfunction; use splashsurf_lib::{ Aabb3d, GridDecompositionParameters, Index, Real, SpatialDecomposition, mesh::{AttributeData, MeshAttribute}, @@ -17,16 +16,37 @@ use splashsurf_lib::{ }; use std::borrow::Cow; -fn reconstruction_pipeline_generic<'py, I: Index, R: Real + Element>( - particles: &Bound<'py, PyArray2>, - attributes_to_interpolate: Bound<'py, PyDict>, - particle_radius: R, - rest_density: R, - smoothing_length: R, - cube_size: R, - iso_surface_threshold: R, - aabb_min: Option<[R; 3]>, - aabb_max: Option<[R; 3]>, +use crate::mesh::PyMeshWithData; +use crate::reconstruction::PySurfaceReconstruction; +use crate::utils::{IndexT, pyerr_unsupported_scalar}; + +/// Runs the surface reconstruction pipeline for the given particle positions with optional post-processing +/// +/// Note that smoothing length and cube size are given in multiples of the particle radius. +#[gen_stub_pyfunction] +#[pyfunction] +#[pyo3(name = "reconstruction_pipeline")] +#[pyo3(signature = (particles, *, attributes_to_interpolate = None, + particle_radius, rest_density = 1000.0, smoothing_length, cube_size, iso_surface_threshold = 0.6, + aabb_min = None, aabb_max = None, multi_threading = true, + subdomain_grid = true, subdomain_grid_auto_disable = true, subdomain_num_cubes_per_dim = 64, + check_mesh_closed = false, check_mesh_manifold = false, check_mesh_orientation = false, check_mesh_debug = false, + mesh_cleanup = false, mesh_cleanup_snap_dist = None, decimate_barnacles = false, keep_vertices = false, compute_normals = false, sph_normals = false, + normals_smoothing_iters = None, mesh_smoothing_iters = None, mesh_smoothing_weights = true, mesh_smoothing_weights_normalization = 13.0, + generate_quads = false, quad_max_edge_diag_ratio = 1.75, quad_max_normal_angle = 10.0, quad_max_interior_angle = 135.0, + output_mesh_smoothing_weights = false, output_raw_normals = false, output_raw_mesh = false, + mesh_aabb_min = None, mesh_aabb_max = None, mesh_aabb_clamp_vertices = true, dtype = None +))] +pub fn reconstruction_pipeline<'py>( + particles: &Bound<'py, PyUntypedArray>, + attributes_to_interpolate: Option>, + particle_radius: f64, + rest_density: f64, + smoothing_length: f64, + cube_size: f64, + iso_surface_threshold: f64, + aabb_min: Option<[f64; 3]>, + aabb_max: Option<[f64; 3]>, multi_threading: bool, subdomain_grid: bool, subdomain_grid_auto_disable: bool, @@ -55,8 +75,120 @@ fn reconstruction_pipeline_generic<'py, I: Index, R: Real + Element>( mesh_aabb_min: Option<[f64; 3]>, mesh_aabb_max: Option<[f64; 3]>, mesh_aabb_clamp_vertices: bool, + dtype: Option>, +) -> PyResult<(PyMeshWithData, PySurfaceReconstruction)> { + let py = particles.py(); + let element_type = particles.dtype(); + + if let Some(target_dtype) = dtype + && !target_dtype.is_equiv_to(&element_type) + { + unimplemented!("Casting to different dtype is not implemented yet"); + } + + let particle_aabb = aabb_min + .zip(aabb_max) + .map(|(min, max)| Aabb3d::new(Vector3::from(min), Vector3::from(max))); + + let mesh_aabb = mesh_aabb_min + .zip(mesh_aabb_max) + .map(|(min, max)| Aabb3d::new(Vector3::from(min), Vector3::from(max))); + + let spatial_decomposition = if subdomain_grid { + SpatialDecomposition::UniformGrid(GridDecompositionParameters { + subdomain_num_cubes_per_dim, + auto_disable: subdomain_grid_auto_disable, + }) + } else { + SpatialDecomposition::None + }; + + let parameters = splashsurf_lib::Parameters { + particle_radius, + rest_density, + compact_support_radius: 2.0 * smoothing_length * particle_radius, + cube_size: cube_size * particle_radius, + iso_surface_threshold, + particle_aabb, + enable_multi_threading: multi_threading, + spatial_decomposition, + global_neighborhood_list: false, + }; + + let postprocessing_args = splashsurf::reconstruct::ReconstructionPostprocessingParameters { + check_mesh_closed, + check_mesh_manifold, + check_mesh_orientation, + check_mesh_debug, + mesh_cleanup, + mesh_cleanup_snap_dist, + decimate_barnacles, + keep_vertices, + compute_normals, + sph_normals, + normals_smoothing_iters, + interpolate_attributes: None, + mesh_smoothing_iters, + mesh_smoothing_weights, + mesh_smoothing_weights_normalization, + generate_quads, + quad_max_edge_diag_ratio, + quad_max_normal_angle, + quad_max_interior_angle, + output_mesh_smoothing_weights, + output_raw_normals, + output_raw_mesh, + mesh_aabb, + mesh_aabb_clamp_vertices, + }; + + fn reconstruction_to_pymesh<'py, R: Real + Element>( + py: Python<'py>, + reconstruction: splashsurf::reconstruct::ReconstructionResult, + ) -> PyResult<(PyMeshWithData, PySurfaceReconstruction)> { + let mesh_with_data = if let Some(tri_mesh) = reconstruction.tri_mesh { + PyMeshWithData::try_from_mesh_with_data(py, tri_mesh)? + } else if let Some(tri_quad_mesh) = reconstruction.tri_quad_mesh { + PyMeshWithData::try_from_mesh_with_data(py, tri_quad_mesh)? + } else { + return Err(PyRuntimeError::new_err("reconstruction returned no mesh")); + }; + let rec = PySurfaceReconstruction::try_from_generic(py, reconstruction.raw_reconstruction)?; + Ok((mesh_with_data, rec)) + } + + if element_type.is_equiv_to(&np::dtype::(py)) { + let particles = particles.downcast::>()?; + let reconstruction = reconstruction_pipeline_generic_impl::( + particles, + attributes_to_interpolate, + ¶meters + .try_convert() + .expect("failed to convert reconstruction parameters to f32"), + &postprocessing_args, + )?; + reconstruction_to_pymesh(py, reconstruction) + } else if element_type.is_equiv_to(&np::dtype::(py)) { + let particles = particles.downcast::>()?; + let reconstruction = reconstruction_pipeline_generic_impl::( + particles, + attributes_to_interpolate, + ¶meters, + &postprocessing_args, + )?; + reconstruction_to_pymesh(py, reconstruction) + } else { + Err(pyerr_unsupported_scalar()) + } +} + +fn reconstruction_pipeline_generic_impl<'py, I: Index, R: Real + Element>( + particles: &Bound<'py, PyArray2>, + attributes_to_interpolate: Option>, + parameters: &splashsurf_lib::Parameters, + postprocessing_args: &splashsurf::reconstruct::ReconstructionPostprocessingParameters, ) -> Result, anyhow::Error> { - let particles: PyReadonlyArray2 = particles.readonly(); + let particles: PyReadonlyArray2 = particles.try_readonly()?; let particle_positions: &[Vector3] = bytemuck::cast_slice(particles.as_slice()?); enum AttributePyView<'a, R: Real + Element> { @@ -69,10 +201,10 @@ fn reconstruction_pipeline_generic<'py, I: Index, R: Real + Element>( let mut attr_views = Vec::new(); // Collect readonly views of all attribute arrays - for (key, value) in attributes_to_interpolate.iter() { + for (key, value) in attributes_to_interpolate.iter().flatten() { let key_str: String = key .downcast::() - .expect("Key wasn't a string") + .expect("attribute key has to be a string") .extract()?; if let Ok(value) = value.downcast::>() { @@ -85,7 +217,7 @@ fn reconstruction_pipeline_generic<'py, I: Index, R: Real + Element>( attr_views.push(AttributePyView::FloatVec3(value.readonly())); attr_names.push(key_str); } else { - println!("Couldn't downcast attribute {} to valid type", &key_str); + println!("Failed to downcast attribute {} to valid type", &key_str); } } @@ -110,293 +242,14 @@ fn reconstruction_pipeline_generic<'py, I: Index, R: Real + Element>( }) .collect::, _>>()?; - let aabb = if let (Some(aabb_min), Some(aabb_max)) = (aabb_min, aabb_max) { - // Convert the min and max arrays to Vector3 - Some(Aabb3d::new( - Vector3::from(aabb_min), - Vector3::from(aabb_max), - )) - } else { - None - }; - - let spatial_decomposition = if subdomain_grid { - SpatialDecomposition::UniformGrid(GridDecompositionParameters { - subdomain_num_cubes_per_dim, - auto_disable: subdomain_grid_auto_disable, - }) - } else { - SpatialDecomposition::None - }; - - let params = splashsurf_lib::Parameters { - particle_radius, - rest_density, - compact_support_radius: R::from_float(2.0) * smoothing_length * particle_radius, - cube_size: cube_size * particle_radius, - iso_surface_threshold, - particle_aabb: aabb, - enable_multi_threading: multi_threading, - spatial_decomposition, - global_neighborhood_list: mesh_smoothing_weights, - }; - - let mesh_aabb = - if let (Some(mesh_aabb_min), Some(mesh_aabb_max)) = (mesh_aabb_min, mesh_aabb_max) { - // Convert the min and max arrays to Vector3 - Some(Aabb3d::new( - Vector3::from(mesh_aabb_min), - Vector3::from(mesh_aabb_max), - )) - } else { - None - }; - - let postprocessing_args = splashsurf::reconstruct::ReconstructionPostprocessingParameters { - check_mesh_closed, - check_mesh_manifold, - check_mesh_orientation, - check_mesh_debug, - mesh_cleanup, - mesh_cleanup_snap_dist, - decimate_barnacles, - keep_vertices, - compute_normals, - sph_normals, - normals_smoothing_iters, - interpolate_attributes: Some(attributes.iter().map(|a| a.name.clone()).collect()), - mesh_smoothing_iters, - mesh_smoothing_weights, - mesh_smoothing_weights_normalization, - generate_quads, - quad_max_edge_diag_ratio, - quad_max_normal_angle, - quad_max_interior_angle, - output_mesh_smoothing_weights, - output_raw_normals, - output_raw_mesh, - mesh_aabb, - mesh_aabb_clamp_vertices, - }; + let mut postprocessing_args = postprocessing_args.clone(); + postprocessing_args.interpolate_attributes = + (!attributes.is_empty()).then(|| attributes.iter().map(|a| a.name.clone()).collect()); splashsurf::reconstruct::reconstruction_pipeline( particle_positions, &attributes, - ¶ms, + ¶meters, &postprocessing_args, ) } - -#[pyfunction] -#[pyo3(name = "reconstruction_pipeline_f32")] -#[pyo3(signature = (particles, *, attributes_to_interpolate, particle_radius, rest_density, - smoothing_length, cube_size, iso_surface_threshold, - aabb_min = None, aabb_max = None, multi_threading = true, - subdomain_grid = true, subdomain_grid_auto_disable = true, subdomain_num_cubes_per_dim = 64, - check_mesh_closed = false, check_mesh_manifold = false, check_mesh_orientation = false, check_mesh_debug = false, - mesh_cleanup, mesh_cleanup_snap_dist = None, decimate_barnacles, keep_vertices, compute_normals, sph_normals, - normals_smoothing_iters, mesh_smoothing_iters, mesh_smoothing_weights, mesh_smoothing_weights_normalization, - generate_quads = false, quad_max_edge_diag_ratio = 1.75, quad_max_normal_angle = 10.0, quad_max_interior_angle = 135.0, - output_mesh_smoothing_weights, output_raw_normals, output_raw_mesh=false, - mesh_aabb_min, mesh_aabb_max, mesh_aabb_clamp_vertices -))] -pub fn reconstruction_pipeline_py_f32<'py>( - particles: &Bound<'py, PyArray2>, - attributes_to_interpolate: Bound<'py, PyDict>, - particle_radius: f32, - rest_density: f32, - smoothing_length: f32, - cube_size: f32, - iso_surface_threshold: f32, - aabb_min: Option<[f32; 3]>, - aabb_max: Option<[f32; 3]>, - multi_threading: bool, - subdomain_grid: bool, - subdomain_grid_auto_disable: bool, - subdomain_num_cubes_per_dim: u32, - check_mesh_closed: bool, - check_mesh_manifold: bool, - check_mesh_orientation: bool, - check_mesh_debug: bool, - mesh_cleanup: bool, - mesh_cleanup_snap_dist: Option, - decimate_barnacles: bool, - keep_vertices: bool, - compute_normals: bool, - sph_normals: bool, - normals_smoothing_iters: Option, - mesh_smoothing_iters: Option, - mesh_smoothing_weights: bool, - mesh_smoothing_weights_normalization: f64, - generate_quads: bool, - quad_max_edge_diag_ratio: f64, - quad_max_normal_angle: f64, - quad_max_interior_angle: f64, - output_mesh_smoothing_weights: bool, - output_raw_normals: bool, - output_raw_mesh: bool, - mesh_aabb_min: Option<[f64; 3]>, - mesh_aabb_max: Option<[f64; 3]>, - mesh_aabb_clamp_vertices: bool, -) -> PyResult<( - Option, - Option, - Option, -)> { - let splashsurf::reconstruct::ReconstructionResult { - tri_mesh, - tri_quad_mesh, - raw_reconstruction: reconstruction, - } = reconstruction_pipeline_generic::( - particles, - attributes_to_interpolate, - particle_radius, - rest_density, - smoothing_length, - cube_size, - iso_surface_threshold, - aabb_min, - aabb_max, - multi_threading, - subdomain_grid, - subdomain_grid_auto_disable, - subdomain_num_cubes_per_dim, - check_mesh_closed, - check_mesh_manifold, - check_mesh_orientation, - check_mesh_debug, - mesh_cleanup, - mesh_cleanup_snap_dist, - decimate_barnacles, - keep_vertices, - compute_normals, - sph_normals, - normals_smoothing_iters, - mesh_smoothing_iters, - mesh_smoothing_weights, - mesh_smoothing_weights_normalization, - generate_quads, - quad_max_edge_diag_ratio, - quad_max_normal_angle, - quad_max_interior_angle, - output_mesh_smoothing_weights, - output_raw_normals, - output_raw_mesh, - mesh_aabb_min, - mesh_aabb_max, - mesh_aabb_clamp_vertices, - )?; - - Ok(( - tri_mesh.map(TriMeshWithDataF32::new), - tri_quad_mesh.map(MixedTriQuadMeshWithDataF32::new), - reconstruction.map(SurfaceReconstructionF32::new), - )) -} - -#[pyfunction] -#[pyo3(name = "reconstruction_pipeline_f64")] -#[pyo3(signature = (particles, *, attributes_to_interpolate, particle_radius, rest_density, - smoothing_length, cube_size, iso_surface_threshold, - aabb_min = None, aabb_max = None, multi_threading = true, - subdomain_grid = true, subdomain_grid_auto_disable = true, subdomain_num_cubes_per_dim = 64, - check_mesh_closed = false, check_mesh_manifold = false, check_mesh_orientation = false, check_mesh_debug = false, - mesh_cleanup, mesh_cleanup_snap_dist = None, decimate_barnacles, keep_vertices, compute_normals, sph_normals, - normals_smoothing_iters, mesh_smoothing_iters, mesh_smoothing_weights, mesh_smoothing_weights_normalization, - generate_quads = false, quad_max_edge_diag_ratio = 1.75, quad_max_normal_angle = 10.0, quad_max_interior_angle = 135.0, - output_mesh_smoothing_weights, output_raw_normals, output_raw_mesh=false, - mesh_aabb_min, mesh_aabb_max, mesh_aabb_clamp_vertices -))] -pub fn reconstruction_pipeline_py_f64<'py>( - particles: &Bound<'py, PyArray2>, - attributes_to_interpolate: Bound<'py, PyDict>, - particle_radius: f64, - rest_density: f64, - smoothing_length: f64, - cube_size: f64, - iso_surface_threshold: f64, - aabb_min: Option<[f64; 3]>, - aabb_max: Option<[f64; 3]>, - multi_threading: bool, - subdomain_grid: bool, - subdomain_grid_auto_disable: bool, - subdomain_num_cubes_per_dim: u32, - check_mesh_closed: bool, - check_mesh_manifold: bool, - check_mesh_orientation: bool, - check_mesh_debug: bool, - mesh_cleanup: bool, - mesh_cleanup_snap_dist: Option, - decimate_barnacles: bool, - keep_vertices: bool, - compute_normals: bool, - sph_normals: bool, - normals_smoothing_iters: Option, - mesh_smoothing_iters: Option, - mesh_smoothing_weights: bool, - mesh_smoothing_weights_normalization: f64, - generate_quads: bool, - quad_max_edge_diag_ratio: f64, - quad_max_normal_angle: f64, - quad_max_interior_angle: f64, - output_mesh_smoothing_weights: bool, - output_raw_normals: bool, - output_raw_mesh: bool, - mesh_aabb_min: Option<[f64; 3]>, - mesh_aabb_max: Option<[f64; 3]>, - mesh_aabb_clamp_vertices: bool, -) -> PyResult<( - Option, - Option, - Option, -)> { - let splashsurf::reconstruct::ReconstructionResult { - tri_mesh, - tri_quad_mesh, - raw_reconstruction: reconstruction, - } = reconstruction_pipeline_generic::( - particles, - attributes_to_interpolate, - particle_radius, - rest_density, - smoothing_length, - cube_size, - iso_surface_threshold, - aabb_min, - aabb_max, - multi_threading, - subdomain_grid, - subdomain_grid_auto_disable, - subdomain_num_cubes_per_dim, - check_mesh_closed, - check_mesh_manifold, - check_mesh_orientation, - check_mesh_debug, - mesh_cleanup, - mesh_cleanup_snap_dist, - decimate_barnacles, - keep_vertices, - compute_normals, - sph_normals, - normals_smoothing_iters, - mesh_smoothing_iters, - mesh_smoothing_weights, - mesh_smoothing_weights_normalization, - generate_quads, - quad_max_edge_diag_ratio, - quad_max_normal_angle, - quad_max_interior_angle, - output_mesh_smoothing_weights, - output_raw_normals, - output_raw_mesh, - mesh_aabb_min, - mesh_aabb_max, - mesh_aabb_clamp_vertices, - )?; - - Ok(( - tri_mesh.map(TriMeshWithDataF64::new), - tri_quad_mesh.map(MixedTriQuadMeshWithDataF64::new), - reconstruction.map(SurfaceReconstructionF64::new), - )) -} diff --git a/pysplashsurf/src/post_processing.rs b/pysplashsurf/src/post_processing.rs deleted file mode 100644 index 9fb32861..00000000 --- a/pysplashsurf/src/post_processing.rs +++ /dev/null @@ -1,323 +0,0 @@ -use ndarray::ArrayViewMut2; -use numpy::{PyArray2, PyArrayMethods}; -use pyo3::{exceptions::PyValueError, prelude::*}; -use splashsurf_lib::nalgebra::Vector3; - -use crate::{ - mesh::{ - MixedTriQuadMesh3dF32, MixedTriQuadMesh3dF64, MixedTriQuadMeshWithDataF32, - MixedTriQuadMeshWithDataF64, TriMesh3dF32, TriMesh3dF64, TriMeshWithDataF32, - TriMeshWithDataF64, - }, - uniform_grid::{UniformGridF32, UniformGridF64}, -}; - -#[pyfunction] -#[pyo3(name = "convert_tris_to_quads_f64")] -#[pyo3(signature = (mesh, *, non_squareness_limit, normal_angle_limit_rad, max_interior_angle))] -pub fn convert_tris_to_quads_py_f64<'py>( - mesh: PyObject, - py: Python<'py>, - non_squareness_limit: f64, - normal_angle_limit_rad: f64, - max_interior_angle: f64, -) -> PyResult { - if mesh.downcast_bound::(py).is_ok() { - let mesh = mesh.downcast_bound::(py).unwrap(); - let quad_mesh = - MixedTriQuadMesh3dF64::new(splashsurf_lib::postprocessing::convert_tris_to_quads( - &mesh.borrow().inner, - non_squareness_limit, - normal_angle_limit_rad, - max_interior_angle, - )); - Ok(quad_mesh.into_pyobject(py).unwrap().into()) - } else if mesh.downcast_bound::(py).is_ok() { - let mesh = mesh.downcast_bound::(py).unwrap(); - let mut quad_mesh = - MixedTriQuadMeshWithDataF64::new(splashsurf_lib::mesh::MeshWithData::new( - splashsurf_lib::postprocessing::convert_tris_to_quads( - &mesh.borrow().inner.mesh, - non_squareness_limit, - normal_angle_limit_rad, - max_interior_angle, - ), - )); - - quad_mesh.inner.point_attributes = mesh.borrow().inner.point_attributes.clone(); - - Ok(quad_mesh.into_pyobject(py).unwrap().into()) - } else { - Err(PyErr::new::("Invalid mesh type")) - } -} - -#[pyfunction] -#[pyo3(name = "convert_tris_to_quads_f32")] -#[pyo3(signature = (mesh, *, non_squareness_limit, normal_angle_limit_rad, max_interior_angle))] -pub fn convert_tris_to_quads_py_f32<'py>( - py: Python<'py>, - mesh: PyObject, - non_squareness_limit: f32, - normal_angle_limit_rad: f32, - max_interior_angle: f32, -) -> PyResult { - if mesh.downcast_bound::(py).is_ok() { - let mesh = mesh.downcast_bound::(py).unwrap(); - let quad_mesh = - MixedTriQuadMesh3dF32::new(splashsurf_lib::postprocessing::convert_tris_to_quads( - &mesh.borrow().inner, - non_squareness_limit, - normal_angle_limit_rad, - max_interior_angle, - )); - Ok(quad_mesh.into_pyobject(py).unwrap().into()) - } else if mesh.downcast_bound::(py).is_ok() { - let mesh = mesh.downcast_bound::(py).unwrap(); - let mut quad_mesh = - MixedTriQuadMeshWithDataF32::new(splashsurf_lib::mesh::MeshWithData::new( - splashsurf_lib::postprocessing::convert_tris_to_quads( - &mesh.borrow().inner.mesh, - non_squareness_limit, - normal_angle_limit_rad, - max_interior_angle, - ), - )); - - quad_mesh.inner.point_attributes = mesh.borrow().inner.point_attributes.clone(); - - Ok(quad_mesh.into_pyobject(py).unwrap().into()) - } else { - Err(PyErr::new::("Invalid mesh type")) - } -} - -#[pyfunction] -#[pyo3(name = "par_laplacian_smoothing_inplace_f64")] -#[pyo3(signature = (mesh, vertex_connectivity, iterations, beta, weights))] -pub fn par_laplacian_smoothing_inplace_py_f64<'py>( - py: Python, - mesh: PyObject, - vertex_connectivity: Vec>, // ToDo: only take reference to data here - iterations: usize, - beta: f64, - weights: Vec, // ToDo: Same here -) -> PyResult<()> { - if mesh.downcast_bound::(py).is_ok() { - let mesh = mesh.downcast_bound::(py).unwrap(); - splashsurf_lib::postprocessing::par_laplacian_smoothing_inplace( - &mut mesh.borrow_mut().inner, - &vertex_connectivity, - iterations, - beta, - &weights, - ); - Ok(()) - } else if mesh.downcast_bound::(py).is_ok() { - let mesh = mesh.downcast_bound::(py).unwrap(); - splashsurf_lib::postprocessing::par_laplacian_smoothing_inplace( - &mut mesh.borrow_mut().inner.mesh, - &vertex_connectivity, - iterations, - beta, - &weights, - ); - Ok(()) - } else { - Err(PyErr::new::("Invalid mesh type")) - } -} - -#[pyfunction] -#[pyo3(name = "par_laplacian_smoothing_inplace_f32")] -#[pyo3(signature = (mesh, vertex_connectivity, iterations, beta, weights))] -pub fn par_laplacian_smoothing_inplace_py_f32<'py>( - py: Python, - mesh: PyObject, - vertex_connectivity: Vec>, // ToDo: only take reference to data here - iterations: usize, - beta: f32, - weights: Vec, // ToDo: Same here -) -> PyResult<()> { - if mesh.downcast_bound::(py).is_ok() { - let mesh = mesh.downcast_bound::(py).unwrap(); - splashsurf_lib::postprocessing::par_laplacian_smoothing_inplace( - &mut mesh.borrow_mut().inner, - &vertex_connectivity, - iterations, - beta, - &weights, - ); - Ok(()) - } else if mesh.downcast_bound::(py).is_ok() { - let mesh = mesh.downcast_bound::(py).unwrap(); - splashsurf_lib::postprocessing::par_laplacian_smoothing_inplace( - &mut mesh.borrow_mut().inner.mesh, - &vertex_connectivity, - iterations, - beta, - &weights, - ); - Ok(()) - } else { - Err(PyErr::new::("Invalid mesh type")) - } -} - -#[pyfunction] -#[pyo3(name = "par_laplacian_smoothing_normals_inplace_f32")] -#[pyo3(signature = (normals, vertex_connectivity, iterations))] -pub fn par_laplacian_smoothing_normals_inplace_py_f32<'py>( - normals: &Bound<'py, PyArray2>, - vertex_connectivity: Vec>, - iterations: usize, -) { - let mut normals: ArrayViewMut2 = unsafe { normals.as_array_mut() }; - let mut normals_vec: Vec> = - bytemuck::cast_vec(normals.as_slice().unwrap().to_vec()); // Copies data temporarily into a vec - splashsurf_lib::postprocessing::par_laplacian_smoothing_normals_inplace( - &mut normals_vec, - &vertex_connectivity, - iterations, - ); - normals - .as_slice_mut() - .unwrap() - .copy_from_slice(&bytemuck::cast_slice(normals_vec.as_slice())); // Copy back to numpy array -} - -#[pyfunction] -#[pyo3(name = "par_laplacian_smoothing_normals_inplace_f64")] -#[pyo3(signature = (normals, vertex_connectivity, iterations))] -pub fn par_laplacian_smoothing_normals_inplace_py_f64<'py>( - normals: &Bound<'py, PyArray2>, - vertex_connectivity: Vec>, - iterations: usize, -) { - let mut normals: ArrayViewMut2 = unsafe { normals.as_array_mut() }; - let mut normals_vec: Vec> = - bytemuck::cast_vec(normals.as_slice().unwrap().to_vec()); // Copies data temporarily into a vec - splashsurf_lib::postprocessing::par_laplacian_smoothing_normals_inplace( - &mut normals_vec, - &vertex_connectivity, - iterations, - ); - normals - .as_slice_mut() - .unwrap() - .copy_from_slice(&bytemuck::cast_slice(normals_vec.as_slice())); // Copy back to numpy array -} - -#[pyfunction] -#[pyo3(name = "decimation_f64")] -#[pyo3(signature = (mesh, *, keep_vertices))] -pub fn decimation_py_f64<'py>( - py: Python, - mesh: PyObject, - keep_vertices: bool, -) -> PyResult>> { - if mesh.downcast_bound::(py).is_ok() { - let mesh = mesh.downcast_bound::(py).unwrap(); - Ok(splashsurf_lib::postprocessing::decimation( - &mut mesh.borrow_mut().inner, - keep_vertices, - )) - } else if mesh.downcast_bound::(py).is_ok() { - let mesh = mesh.downcast_bound::(py).unwrap(); - Ok(splashsurf_lib::postprocessing::decimation( - &mut mesh.borrow_mut().inner.mesh, - keep_vertices, - )) - } else { - Err(PyErr::new::("Invalid mesh type")) - } -} - -#[pyfunction] -#[pyo3(name = "decimation_f32")] -#[pyo3(signature = (mesh, *, keep_vertices))] -pub fn decimation_py_f32<'py>( - py: Python, - mesh: PyObject, - keep_vertices: bool, -) -> PyResult>> { - if mesh.downcast_bound::(py).is_ok() { - let mesh = mesh.downcast_bound::(py).unwrap(); - Ok(splashsurf_lib::postprocessing::decimation( - &mut mesh.borrow_mut().inner, - keep_vertices, - )) - } else if mesh.downcast_bound::(py).is_ok() { - let mesh = mesh.downcast_bound::(py).unwrap(); - Ok(splashsurf_lib::postprocessing::decimation( - &mut mesh.borrow_mut().inner.mesh, - keep_vertices, - )) - } else { - Err(PyErr::new::("Invalid mesh type")) - } -} - -#[pyfunction] -#[pyo3(name = "marching_cubes_cleanup_f64")] -#[pyo3(signature = (mesh, grid, *, max_rel_snap_dist = None, max_iter = 5, keep_vertices = false))] -pub fn marching_cubes_cleanup_py_f64<'py>( - py: Python, - mesh: PyObject, - grid: &UniformGridF64, - max_rel_snap_dist: Option, - max_iter: usize, - keep_vertices: bool, -) -> PyResult>> { - if let Ok(mesh) = mesh.downcast_bound::(py) { - Ok(splashsurf_lib::postprocessing::marching_cubes_cleanup( - &mut mesh.borrow_mut().inner, - &grid.inner, - max_rel_snap_dist, - max_iter, - keep_vertices, - )) - } else if let Ok(mesh) = mesh.downcast_bound::(py) { - Ok(splashsurf_lib::postprocessing::marching_cubes_cleanup( - &mut mesh.borrow_mut().inner.mesh, - &grid.inner, - max_rel_snap_dist, - max_iter, - keep_vertices, - )) - } else { - Err(PyErr::new::("Invalid mesh type")) - } -} - -#[pyfunction] -#[pyo3(name = "marching_cubes_cleanup_f32")] -#[pyo3(signature = (mesh, grid, *, max_rel_snap_dist = None, max_iter = 5, keep_vertices = false))] -pub fn marching_cubes_cleanup_py_f32<'py>( - py: Python, - mesh: PyObject, - grid: &UniformGridF32, - max_rel_snap_dist: Option, - max_iter: usize, - keep_vertices: bool, -) -> PyResult>> { - if let Ok(mesh) = mesh.downcast_bound::(py) { - Ok(splashsurf_lib::postprocessing::marching_cubes_cleanup( - &mut mesh.borrow_mut().inner, - &grid.inner, - max_rel_snap_dist, - max_iter, - keep_vertices, - )) - } else if let Ok(mesh) = mesh.downcast_bound::(py) { - Ok(splashsurf_lib::postprocessing::marching_cubes_cleanup( - &mut mesh.borrow_mut().inner.mesh, - &grid.inner, - max_rel_snap_dist, - max_iter, - keep_vertices, - )) - } else { - Err(PyErr::new::("Invalid mesh type")) - } -} diff --git a/pysplashsurf/src/postprocessing.rs b/pysplashsurf/src/postprocessing.rs new file mode 100644 index 00000000..9e9d0d80 --- /dev/null +++ b/pysplashsurf/src/postprocessing.rs @@ -0,0 +1,236 @@ +use numpy as np; +use numpy::prelude::*; +use numpy::{PyArray1, PyArray2, PyArrayMethods, PyUntypedArray}; +use pyo3::IntoPyObjectExt; +use pyo3::prelude::*; +use pyo3_stub_gen::derive::gen_stub_pyfunction; +use splashsurf_lib::nalgebra::Vector3; + +use crate::mesh::{ + PyMeshAttribute, PyMeshWithData, PyMixedTriQuadMesh3d, PyVertexVertexConnectivity, + get_triangle_mesh_generic, +}; +use crate::uniform_grid::PyUniformGrid; +use crate::utils::*; + +/// Converts triangles to quads by merging triangles sharing an edge if they fulfill the given criteria +/// +/// This operation creates a new mesh and does not modify the input mesh. +/// Angles are specified in degrees. +#[gen_stub_pyfunction] +#[pyfunction] +#[pyo3(name = "convert_tris_to_quads")] +#[pyo3(signature = (mesh, *, non_squareness_limit = 1.75, normal_angle_limit = 10.0, max_interior_angle = 135.0))] +#[gen_stub(override_return_type(type_repr="typing.Union[MixedTriQuadMesh3d, MeshWithData]", imports=()))] +pub fn convert_tris_to_quads<'py>( + #[gen_stub(override_type(type_repr="typing.Union[TriMesh3d, MeshWithData]", imports=()))] + mesh: Bound<'py, PyAny>, + non_squareness_limit: f64, + normal_angle_limit: f64, + max_interior_angle: f64, +) -> PyResult> { + let py = mesh.py(); + + let normal_angle_limit = normal_angle_limit.to_radians(); + let max_interior_angle = max_interior_angle.to_radians(); + + let quad_mesh = { + // Try to extract the triangle mesh; + let mesh = get_triangle_mesh_generic(&mesh).ok_or_else(pyerr_only_triangle_mesh)?; + let mesh = mesh.borrow(py); + + if let Some(mesh) = mesh.as_f32() { + let quad_mesh = splashsurf_lib::postprocessing::convert_tris_to_quads( + mesh, + non_squareness_limit as f32, + normal_angle_limit as f32, + max_interior_angle as f32, + ); + Ok(PyMixedTriQuadMesh3d::from(quad_mesh)) + } else if let Some(mesh) = mesh.as_f64() { + let quad_mesh = splashsurf_lib::postprocessing::convert_tris_to_quads( + mesh, + non_squareness_limit, + normal_angle_limit, + max_interior_angle, + ); + Ok(PyMixedTriQuadMesh3d::from(quad_mesh)) + } else { + Err(pyerr_unsupported_scalar()) + } + }?; + + if let Ok(mesh) = mesh.downcast::() { + let mut data_mesh = PyMeshWithData::try_from_pymesh(py, quad_mesh)?; + data_mesh.point_attributes = mesh + .borrow() + .point_attributes + .iter() + .map(|attr| { + let attr_clone: PyMeshAttribute = attr.borrow(py).clone(); + attr_clone.into_pyobject(py).map(Py::from) + }) + .collect::>()?; + data_mesh.into_bound_py_any(py) + } else { + quad_mesh.into_bound_py_any(py) + } +} + +/// Laplacian smoothing of mesh vertices with feature weights +/// +/// The smoothing is performed inplace and modifies the vertices of the given mesh. +#[gen_stub_pyfunction] +#[pyfunction] +#[pyo3(name = "laplacian_smoothing_parallel")] +#[pyo3(signature = (mesh, vertex_connectivity, *, iterations, beta = 1.0, weights))] +pub fn laplacian_smoothing_parallel<'py>( + #[gen_stub(override_type(type_repr="typing.Union[TriMesh3d, MeshWithData]", imports=()))] + mesh: &Bound<'py, PyAny>, + vertex_connectivity: &Bound<'py, PyVertexVertexConnectivity>, + iterations: usize, + beta: f64, + weights: &Bound<'py, PyUntypedArray>, +) -> PyResult<()> { + let py = mesh.py(); + + // Try to extract the triangle mesh; + let mesh = get_triangle_mesh_generic(&mesh).ok_or_else(pyerr_only_triangle_mesh)?; + let mut mesh = mesh.borrow_mut(py); + + if let Some(mesh) = mesh.as_f32_mut() { + let weights = weights.downcast::>()?.try_readonly()?; + splashsurf_lib::postprocessing::par_laplacian_smoothing_inplace( + mesh, + &vertex_connectivity.borrow().connectivity, + iterations, + beta as f32, + weights.as_slice()?, + ); + } else if let Some(mesh) = mesh.as_f64_mut() { + let weights = weights.downcast::>()?.try_readonly()?; + splashsurf_lib::postprocessing::par_laplacian_smoothing_inplace( + mesh, + &vertex_connectivity.borrow().connectivity, + iterations, + beta, + weights.as_slice()?, + ); + } else { + return Err(pyerr_unsupported_scalar()); + } + + Ok(()) +} + +/// Laplacian smoothing of a normal field +/// +/// The smoothing is performed inplace and modifies the given normal array. +#[gen_stub_pyfunction] +#[pyfunction] +#[pyo3(name = "laplacian_smoothing_normals_parallel")] +#[pyo3(signature = (normals, vertex_connectivity, *, iterations))] +pub fn laplacian_smoothing_normals_parallel<'py>( + normals: &Bound<'py, PyUntypedArray>, + vertex_connectivity: &Bound<'py, PyVertexVertexConnectivity>, + iterations: usize, +) -> PyResult<()> { + let py = normals.py(); + let element_type = normals.dtype(); + if element_type.is_equiv_to(&np::dtype::(py)) { + let mut normals = normals.downcast::>()?.try_readwrite()?; + let normals_vec3: &mut [Vector3] = bytemuck::cast_slice_mut(normals.as_slice_mut()?); + splashsurf_lib::postprocessing::par_laplacian_smoothing_normals_inplace( + normals_vec3, + &vertex_connectivity.borrow().connectivity, + iterations, + ); + } else if element_type.is_equiv_to(&np::dtype::(py)) { + let mut normals = normals.downcast::>()?.try_readwrite()?; + let normals_vec3: &mut [Vector3] = bytemuck::cast_slice_mut(normals.as_slice_mut()?); + splashsurf_lib::postprocessing::par_laplacian_smoothing_normals_inplace( + normals_vec3, + &vertex_connectivity.borrow().connectivity, + iterations, + ); + } else { + return Err(pyerr_unsupported_scalar()); + } + + Ok(()) +} + +/// Performs specialized decimation on the given mesh to prevent "barnacles" when applying weighted Laplacian smoothing +/// +/// The decimation is performed inplace and modifies the given mesh. +/// Returns the vertex-vertex connectivity of the decimated mesh which can be used for other +/// post-processing steps. +#[gen_stub_pyfunction] +#[pyfunction] +#[pyo3(name = "barnacle_decimation")] +#[pyo3(signature = (mesh, *, keep_vertices))] +#[gen_stub(override_return_type(type_repr="typing.Union[TriMesh3d, MeshWithData]", imports=()))] +pub fn barnacle_decimation<'py>( + #[gen_stub(override_type(type_repr="typing.Union[TriMesh3d, MeshWithData]", imports=()))] + mesh: Bound<'py, PyAny>, + keep_vertices: bool, +) -> PyResult { + use splashsurf_lib::postprocessing::decimation; + let py = mesh.py(); + + // Try to extract the triangle mesh; + let mesh = get_triangle_mesh_generic(&mesh).ok_or_else(pyerr_only_triangle_mesh)?; + let mut mesh = mesh.borrow_mut(py); + + if let Some(mesh) = mesh.as_f32_mut() { + Ok(PyVertexVertexConnectivity::new(decimation( + mesh, + keep_vertices, + ))) + } else if let Some(mesh) = mesh.as_f64_mut() { + Ok(PyVertexVertexConnectivity::new(decimation( + mesh, + keep_vertices, + ))) + } else { + Err(pyerr_unsupported_scalar()) + } +} + +/// Performs simplification on the given mesh inspired by the "Compact Contouring"/"Mesh displacement" approach by Doug Moore and Joe Warren +/// +/// The simplification is performed inplace and modifies the given mesh. +/// The method is designed specifically for meshes generated by Marching Cubes. +/// See Moore and Warren: `Mesh Displacement: An Improved Contouring Method for Trivariate Data `_ (1991) +/// or Moore and Warren: "Compact Isocontours from Sampled Data" in "Graphics Gems III" (1992). +#[gen_stub_pyfunction] +#[pyfunction] +#[pyo3(name = "marching_cubes_cleanup")] +#[pyo3(signature = (mesh, grid, *, max_rel_snap_dist = None, max_iter = 5, keep_vertices = false))] +#[gen_stub(override_return_type(type_repr="typing.Union[TriMesh3d, MeshWithData]", imports=()))] +pub fn marching_cubes_cleanup<'py>( + #[gen_stub(override_type(type_repr="typing.Union[TriMesh3d, MeshWithData]", imports=()))] + mesh: &Bound<'py, PyAny>, + grid: &PyUniformGrid, + max_rel_snap_dist: Option, + max_iter: usize, + keep_vertices: bool, +) -> PyResult<()> { + use splashsurf_lib::postprocessing::marching_cubes_cleanup as cleanup; + let py = mesh.py(); + let max_rel_snap_dist_f32 = max_rel_snap_dist.map(|d| d as f32); + + // Try to extract the triangle mesh; + let mesh = get_triangle_mesh_generic(&mesh).ok_or_else(pyerr_only_triangle_mesh)?; + let mut mesh = mesh.borrow_mut(py); + + if let (Some(grid), Some(mesh)) = (grid.as_f32(), mesh.as_f32_mut()) { + cleanup(mesh, grid, max_rel_snap_dist_f32, max_iter, keep_vertices); + } else if let (Some(grid), Some(mesh)) = (grid.as_f64(), mesh.as_f64_mut()) { + cleanup(mesh, grid, max_rel_snap_dist, max_iter, keep_vertices); + } else { + return Err(pyerr_scalar_type_mismatch()); + } + + Ok(()) +} diff --git a/pysplashsurf/src/reconstruction.rs b/pysplashsurf/src/reconstruction.rs index aabf5aa9..fcb4d7c7 100644 --- a/pysplashsurf/src/reconstruction.rs +++ b/pysplashsurf/src/reconstruction.rs @@ -1,184 +1,117 @@ -use numpy::{PyArray2, PyReadonlyArray2}; +use crate::mesh::PyTriMesh3d; +use crate::neighborhood_search::PyNeighborhoodLists; +use crate::uniform_grid::PyUniformGrid; +use crate::utils; +use anyhow::anyhow; +use ndarray::ArrayView1; +use numpy as np; +use numpy::prelude::*; +use numpy::{Element, PyArray1, PyArray2, PyUntypedArray}; use pyo3::{Bound, prelude::*}; use pyo3_stub_gen::derive::*; use splashsurf_lib::{ - Aabb3d, GridDecompositionParameters, Index, Real, SpatialDecomposition, SurfaceReconstruction, - nalgebra::Vector3, reconstruct_surface, + Aabb3d, GridDecompositionParameters, Real, SpatialDecomposition, SurfaceReconstruction, + nalgebra::Vector3, }; - -use crate::{ - mesh::{TriMesh3dF32, TriMesh3dF64}, - uniform_grid::{UniformGridF32, UniformGridF64}, -}; - -macro_rules! create_reconstruction_interface { - ($name: ident, $type: ident, $mesh_class: ident, $grid_class: ident) => { - /// SurfaceReconstruction wrapper - #[gen_stub_pyclass] - #[pyclass] - pub struct $name { - pub inner: SurfaceReconstruction, - } - - impl $name { - pub fn new(data: SurfaceReconstruction) -> Self { - Self { inner: data } - } - } - - #[gen_stub_pymethods] - #[pymethods] - impl $name { - /// PyTrimesh3d clone of the contained mesh - #[getter] - fn mesh(&self) -> $mesh_class { - $mesh_class::new(self.inner.mesh().clone()) - } - - /// PyUniformGrid clone of the contained grid - #[getter] - fn grid(&self) -> $grid_class { - $grid_class::new(self.inner.grid().clone()) - } - - // Doesn't work because SurfaceReconstruction.mesh() only returns an immutable reference - // /// Returns PyTrimesh3dF32/F64 without copying the mesh data, removes the mesh from the object - // fn take_mesh(&mut self) -> $mesh_class { - // let mesh = std::mem::take(&mut self.inner.mesh()); - // $mesh_class::new(mesh) - // } - - /// Returns a reference to the global particle density vector if computed during the reconstruction (currently, all reconstruction approaches return this) - fn particle_densities(&self) -> &Vec<$type> { - self.inner - .particle_densities() - .ok_or_else(|| { - anyhow::anyhow!("Surface Reconstruction did not return particle densities") - }) - .unwrap() - } - - /// Returns a reference to the global list of per-particle neighborhood lists if computed during the reconstruction (`None` if not specified in the parameters) - fn particle_neighbors(&self) -> Option<&Vec>> { - self.inner.particle_neighbors() - } - } - }; +use utils::{IndexT, PyFloatVecWrapper}; + +/// Result returned by surface reconstruction functions with surface mesh and other data +#[gen_stub_pyclass] +#[pyclass] +#[pyo3(name = "SurfaceReconstruction")] +pub struct PySurfaceReconstruction { + grid: Py, + particle_densities: Option, + particle_inside_aabb: Option>, + particle_neighbors: Option>, + mesh: Py, } -create_reconstruction_interface!(SurfaceReconstructionF64, f64, TriMesh3dF64, UniformGridF64); -create_reconstruction_interface!(SurfaceReconstructionF32, f32, TriMesh3dF32, UniformGridF32); - -/// Reconstruct the surface from only particle positions -pub fn reconstruct_surface_py( - particles: &[Vector3], - particle_radius: R, - rest_density: R, - smoothing_length: R, - cube_size: R, - iso_surface_threshold: R, - multi_threading: bool, - global_neighborhood_list: bool, - subdomain_grid: bool, - subdomain_grid_auto_disable: bool, - subdomain_num_cubes_per_dim: u32, - aabb_min: Option<[R; 3]>, - aabb_max: Option<[R; 3]>, -) -> SurfaceReconstruction { - let aabb; - if let (Some(aabb_min), Some(aabb_max)) = (aabb_min, aabb_max) { - // Convert the min and max arrays to Vector3 - aabb = Some(Aabb3d::new( - Vector3::from(aabb_min), - Vector3::from(aabb_max), - )); - } else { - aabb = None; +impl PySurfaceReconstruction { + pub fn try_from_generic<'py, R: Real + Element>( + py: Python<'py>, + reconstruction: SurfaceReconstruction, + ) -> PyResult { + Ok(Self { + grid: Py::new(py, PyUniformGrid::try_from_generic(reconstruction.grid)?)?, + particle_densities: reconstruction + .particle_densities + .map(PyFloatVecWrapper::try_from_generic) + .transpose()?, + particle_inside_aabb: reconstruction.particle_inside_aabb, + particle_neighbors: reconstruction + .particle_neighbors + .map(|n| Py::new(py, PyNeighborhoodLists::from(n))) + .transpose()?, + mesh: Py::new(py, PyTriMesh3d::try_from_generic(reconstruction.mesh)?)?, + }) } +} - let spatial_decomposition; - if subdomain_grid { - spatial_decomposition = SpatialDecomposition::UniformGrid(GridDecompositionParameters { - subdomain_num_cubes_per_dim, - auto_disable: subdomain_grid_auto_disable, - }); - } else { - spatial_decomposition = SpatialDecomposition::None; +#[gen_stub_pymethods] +#[pymethods] +impl PySurfaceReconstruction { + /// The marching cubes grid parameters used for the surface reconstruction + #[getter] + fn grid<'py>(this: Bound<'py, Self>) -> Py { + this.borrow().grid.clone_ref(this.py()) } - let params = splashsurf_lib::Parameters { - particle_radius, - rest_density, - // Compact support is twice the smoothing length - compact_support_radius: (smoothing_length * particle_radius) * R::from_float(2.0), - cube_size: cube_size * particle_radius, - iso_surface_threshold, - particle_aabb: aabb, - enable_multi_threading: multi_threading, - spatial_decomposition, - global_neighborhood_list, - }; - - let surface = reconstruct_surface(&particles, ¶ms).unwrap(); - - surface -} - -#[pyfunction] -#[pyo3(name = "reconstruct_surface_f32")] -#[pyo3(signature = (particles, *, particle_radius, rest_density, - smoothing_length, cube_size, iso_surface_threshold, multi_threading=true, - global_neighborhood_list=false, subdomain_grid=true, subdomain_grid_auto_disable=true, subdomain_num_cubes_per_dim=64, - aabb_min = None, aabb_max = None -))] -pub fn reconstruct_surface_py_f32<'py>( - particles: &Bound<'py, PyArray2>, - particle_radius: f32, - rest_density: f32, - smoothing_length: f32, - cube_size: f32, - iso_surface_threshold: f32, - multi_threading: bool, - global_neighborhood_list: bool, - subdomain_grid: bool, - subdomain_grid_auto_disable: bool, - subdomain_num_cubes_per_dim: u32, - aabb_min: Option<[f32; 3]>, - aabb_max: Option<[f32; 3]>, -) -> PyResult { - let particles: PyReadonlyArray2 = particles.extract()?; + /// The global array of particle densities (`None` if they were only computed locally) + #[getter] + fn particle_densities<'py>( + this: Bound<'py, Self>, + ) -> PyResult>> { + this.borrow() + .particle_densities + .as_ref() + .map(|p| p.view(this.into_any())) + .transpose() + } - let particle_positions = particles.as_slice()?; - let particle_positions: &[Vector3] = bytemuck::cast_slice(particle_positions); + /// A boolean array indicating whether each particle was inside the AABB used for the reconstruction (`None` if no AABB was set) + #[getter] + fn particle_inside_aabb<'py>(this: Bound<'py, Self>) -> Option> { + this.borrow().particle_inside_aabb.as_ref().map(|p| { + let array: ArrayView1 = ArrayView1::from(p.as_slice()); + let pyarray = unsafe { PyArray1::borrow_from_array(&array, this.into_any()) }; + pyarray + .into_any() + .downcast_into::() + .expect("downcast should not fail") + }) + } - let reconstruction = reconstruct_surface_py::( - particle_positions, - particle_radius, - rest_density, - smoothing_length, - cube_size, - iso_surface_threshold, - multi_threading, - global_neighborhood_list, - subdomain_grid, - subdomain_grid_auto_disable, - subdomain_num_cubes_per_dim, - aabb_min, - aabb_max, - ); + /// The global neighborhood lists per particle (`None` if they were only computed locally) + #[getter] + fn particle_neighbors<'py>(this: Bound<'py, Self>) -> Option> { + this.borrow() + .particle_neighbors + .as_ref() + .map(|p| p.clone_ref(this.py())) + } - Ok(SurfaceReconstructionF32::new(reconstruction.to_owned())) + /// The reconstructed triangle mesh + #[getter] + fn mesh<'py>(this: Bound<'py, Self>) -> Py { + this.borrow().mesh.clone_ref(this.py()) + } } +/// Performs a surface reconstruction from the given particles without additional post-processing +/// +/// Note that all parameters use absolute distance units and are not relative to the particle radius. +#[gen_stub_pyfunction] #[pyfunction] -#[pyo3(name = "reconstruct_surface_f64")] -#[pyo3(signature = (particles, *, particle_radius, rest_density, - smoothing_length, cube_size, iso_surface_threshold, multi_threading=true, - global_neighborhood_list=false, subdomain_grid=true, subdomain_grid_auto_disable=true, subdomain_num_cubes_per_dim=64, +#[pyo3(name = "reconstruct_surface")] +#[pyo3(signature = (particles, *, + particle_radius, rest_density = 1000.0, smoothing_length, cube_size, iso_surface_threshold = 0.6, + multi_threading = true, global_neighborhood_list = false, + subdomain_grid = true, subdomain_grid_auto_disable = true, subdomain_num_cubes_per_dim = 64, aabb_min = None, aabb_max = None ))] -pub fn reconstruct_surface_py_f64<'py>( - particles: &Bound<'py, PyArray2>, +pub fn reconstruct_surface<'py>( + particles: &Bound<'py, PyUntypedArray>, particle_radius: f64, rest_density: f64, smoothing_length: f64, @@ -191,27 +124,54 @@ pub fn reconstruct_surface_py_f64<'py>( subdomain_num_cubes_per_dim: u32, aabb_min: Option<[f64; 3]>, aabb_max: Option<[f64; 3]>, -) -> PyResult { - let particles: PyReadonlyArray2 = particles.extract()?; +) -> PyResult { + let py = particles.py(); - let particle_positions = particles.as_slice()?; - let particle_positions: &[Vector3] = bytemuck::cast_slice(particle_positions); + let particle_aabb = aabb_min + .zip(aabb_max) + .map(|(min, max)| Aabb3d::new(Vector3::from(min), Vector3::from(max))); - let reconstruction = reconstruct_surface_py::( - particle_positions, + let spatial_decomposition = if subdomain_grid { + SpatialDecomposition::UniformGrid(GridDecompositionParameters { + subdomain_num_cubes_per_dim, + auto_disable: subdomain_grid_auto_disable, + }) + } else { + SpatialDecomposition::None + }; + + let parameters = splashsurf_lib::Parameters { particle_radius, rest_density, - smoothing_length, - cube_size, + compact_support_radius: 2.0 * smoothing_length * particle_radius, + cube_size: cube_size * particle_radius, iso_surface_threshold, - multi_threading, + particle_aabb, + enable_multi_threading: multi_threading, + spatial_decomposition, global_neighborhood_list, - subdomain_grid, - subdomain_grid_auto_disable, - subdomain_num_cubes_per_dim, - aabb_min, - aabb_max, - ); + }; - Ok(SurfaceReconstructionF64::new(reconstruction.to_owned())) + let element_type = particles.dtype(); + if element_type.is_equiv_to(&np::dtype::(py)) { + let particles = particles.downcast::>()?.try_readonly()?; + let particle_positions: &[Vector3] = bytemuck::cast_slice(particles.as_slice()?); + let reconstruction = splashsurf_lib::reconstruct_surface::( + particle_positions, + ¶meters + .try_convert() + .expect("failed to convert reconstruction parameters to f32"), + ) + .map_err(|e| anyhow!(e))?; + PySurfaceReconstruction::try_from_generic(py, reconstruction) + } else if element_type.is_equiv_to(&np::dtype::(py)) { + let particles = particles.downcast::>()?.try_readonly()?; + let particle_positions: &[Vector3] = bytemuck::cast_slice(particles.as_slice()?); + let reconstruction = + splashsurf_lib::reconstruct_surface::(particle_positions, ¶meters) + .map_err(|e| anyhow!(e))?; + PySurfaceReconstruction::try_from_generic(py, reconstruction) + } else { + Err(utils::pyerr_unsupported_scalar()) + } } diff --git a/pysplashsurf/src/sph_interpolation.rs b/pysplashsurf/src/sph_interpolation.rs index c86ac8ff..841c4bc2 100644 --- a/pysplashsurf/src/sph_interpolation.rs +++ b/pysplashsurf/src/sph_interpolation.rs @@ -1,128 +1,230 @@ -use ndarray::{ArrayView, ArrayView2}; -use numpy::{PyArray2, PyReadonlyArray2, ToPyArray}; -use pyo3::{PyResult, prelude::*}; +use numpy as np; +use numpy::prelude::*; +use numpy::{Element, PyArray1, PyArray2, PyUntypedArray}; +use pyo3::PyResult; +use pyo3::exceptions::PyValueError; +use pyo3::prelude::*; use pyo3_stub_gen::derive::*; +use splashsurf_lib::nalgebra::SVector; use splashsurf_lib::{ + Real, nalgebra::{Unit, Vector3}, sph_interpolation::SphInterpolator, }; -macro_rules! create_sph_interpolator_interface { - ($name: ident, $type: ident) => { - /// SphInterpolator wrapper - #[gen_stub_pyclass] - #[pyclass] - pub struct $name { - pub inner: SphInterpolator<$type>, - } +use crate::utils::*; - impl $name { - pub fn new(data: SphInterpolator<$type>) -> Self { - Self { inner: data } - } - } +enum PySphInterpolatorWrapper { + F32(SphInterpolator), + F64(SphInterpolator), +} - #[gen_stub_pymethods] - #[pymethods] - impl $name { - #[new] - fn py_new<'py>( - particle_positions: &Bound<'py, PyArray2<$type>>, - particle_densities: Vec<$type>, - particle_rest_mass: $type, - compact_support_radius: $type, - ) -> PyResult { - let particle_positions: PyReadonlyArray2<$type> = - particle_positions.extract().unwrap(); - let particle_positions = particle_positions.as_slice().unwrap(); - let particle_positions: &[Vector3<$type>] = - bytemuck::cast_slice(particle_positions); - - Ok($name::new(SphInterpolator::new( - particle_positions, - particle_densities.as_slice(), - particle_rest_mass, - compact_support_radius, - ))) - } +/// Interpolator of per-particle quantities to arbitrary points using SPH interpolation (with cubic kernel) +#[gen_stub_pyclass] +#[pyclass] +#[pyo3(name = "SphInterpolator")] +pub struct PySphInterpolator { + inner: PySphInterpolatorWrapper, +} - /// Interpolates a scalar per particle quantity to the given points, panics if the there are less per-particles values than particles - fn interpolate_scalar_quantity<'py>( - &self, - particle_quantity: Vec<$type>, - interpolation_points: &Bound<'py, PyArray2<$type>>, - first_order_correction: bool, - ) -> PyResult> { - let interpolation_points: PyReadonlyArray2<$type> = - interpolation_points.extract()?; - let interpolation_points = interpolation_points.as_slice()?; - let interpolation_points: &[Vector3<$type>] = - bytemuck::cast_slice(interpolation_points); - - Ok(self.inner.interpolate_scalar_quantity( - particle_quantity.as_slice(), - interpolation_points, - first_order_correction, - )) - } +enum_wrapper_impl_from!(PySphInterpolator, SphInterpolator => PySphInterpolatorWrapper::F32); +enum_wrapper_impl_from!(PySphInterpolator, SphInterpolator => PySphInterpolatorWrapper::F64); + +impl PySphInterpolator { + fn new_generic<'py, R: Real + Element>( + particle_positions: &Bound<'py, PyUntypedArray>, + particle_densities: &Bound<'py, PyUntypedArray>, + particle_rest_mass: f64, + compact_support_radius: f64, + ) -> PyResult + where + PySphInterpolator: From>, + { + if let (Ok(particles), Ok(densities)) = ( + particle_positions.downcast::>(), + particle_densities.downcast::>(), + ) { + let particles = particles.try_readonly()?; + let particles: &[Vector3] = bytemuck::cast_slice(particles.as_slice()?); + + let densities = densities.try_readonly()?; + let densities = densities.as_slice()?; + + Ok(PySphInterpolator::from(SphInterpolator::new( + particles, + densities, + R::from_float(particle_rest_mass), + R::from_float(compact_support_radius), + ))) + } else { + Err(pyerr_scalar_type_mismatch()) + } + } + + fn interpolate_normals_generic<'py, R: Real + Element>( + interpolator: &SphInterpolator, + interpolation_points: &Bound<'py, PyUntypedArray>, + ) -> PyResult> { + let py = interpolation_points.py(); + if let Ok(points) = interpolation_points.downcast::>() { + let points = points.try_readonly()?; + let points: &[Vector3] = bytemuck::cast_slice(points.as_slice()?); + + let normals_vec = interpolator.interpolate_normals(points); + Ok(bytemuck::cast_vec::>, R>(normals_vec) + .into_pyarray(py) + .reshape((points.len(), 3))? + .into_any() + .downcast_into::() + .expect("downcast should not fail")) + } else { + Err(pyerr_unsupported_scalar()) + } + } + + fn interpolate_vector_generic<'py, R: Real + Element>( + interpolator: &SphInterpolator, + particle_quantity: &Bound<'py, PyUntypedArray>, + interpolation_points: &Bound<'py, PyUntypedArray>, + first_order_correction: bool, + ) -> PyResult> { + let shape_in = particle_quantity.shape(); + if ![1, 2].contains(&shape_in.len()) || shape_in[0] != interpolator.size() { + return Err(PyValueError::new_err( + "unsupported shape of per particle quantity", + )); + } + let n_components = shape_in.get(1).copied().unwrap_or(1); + let shape_out = { + let mut s = shape_in.to_vec(); + s[0] = interpolation_points.shape()[0]; + s + }; + + // Get the per-particle quantity as a read-only contiguous slice + let quantity = if let Ok(q) = particle_quantity.downcast::>() { + q.to_dyn().try_readonly() + } else if let Ok(q) = particle_quantity.downcast::>() { + q.to_dyn().try_readonly() + } else { + return Err(pyerr_scalar_type_mismatch()); + }?; + let quantity = quantity.as_slice()?; + + let points = interpolation_points + .downcast::>() + .map_err(|_| pyerr_scalar_type_mismatch())? + .try_readonly()?; + let points: &[Vector3] = bytemuck::cast_slice(points.as_slice()?); + + fn interpolate_ndim<'py, const D: usize, R: Real + Element>( + py: Python<'py>, + interpolator: &SphInterpolator, + points: &[Vector3], + quantity: &[R], + first_order_correction: bool, + shape: &[usize], + ) -> PyResult> { + let quantity: &[SVector] = bytemuck::cast_slice(quantity); + let interpolated = + interpolator.interpolate_vector_quantity(quantity, points, first_order_correction); + Ok(bytemuck::cast_vec::<_, R>(interpolated) + .into_pyarray(py) + .reshape(shape)? + .into_any() + .downcast_into::() + .expect("downcast should not fail")) + } - /// Interpolates surface normals (i.e. normalized SPH gradient of the indicator function) of the fluid to the given points using SPH interpolation - fn interpolate_normals<'py>( - &self, - py: Python<'py>, - interpolation_points: &Bound<'py, PyArray2<$type>>, - ) -> PyResult>> { - let interpolation_points: PyReadonlyArray2<$type> = - interpolation_points.extract()?; - let interpolation_points = interpolation_points.as_slice()?; - let interpolation_points: &[Vector3<$type>] = - bytemuck::cast_slice(interpolation_points); - - let normals_vec = self.inner.interpolate_normals(interpolation_points); - let normals_vec = - bytemuck::allocation::cast_vec::>, $type>(normals_vec); - - let normals: &[$type] = normals_vec.as_slice(); - let normals: ArrayView2<$type> = - ArrayView::from_shape((normals.len() / 3, 3), normals).unwrap(); - - Ok(normals.to_pyarray(py)) - } + let py = particle_quantity.py(); + let i = interpolator; + let shape = &shape_out; + match n_components { + 1 => interpolate_ndim::<1, R>(py, i, points, quantity, first_order_correction, shape), + 2 => interpolate_ndim::<2, R>(py, i, points, quantity, first_order_correction, shape), + 3 => interpolate_ndim::<3, R>(py, i, points, quantity, first_order_correction, shape), + 4 => interpolate_ndim::<4, R>(py, i, points, quantity, first_order_correction, shape), + 5 => interpolate_ndim::<5, R>(py, i, points, quantity, first_order_correction, shape), + 6 => interpolate_ndim::<6, R>(py, i, points, quantity, first_order_correction, shape), + 7 => interpolate_ndim::<7, R>(py, i, points, quantity, first_order_correction, shape), + 8 => interpolate_ndim::<8, R>(py, i, points, quantity, first_order_correction, shape), + 9 => interpolate_ndim::<9, R>(py, i, points, quantity, first_order_correction, shape), + _ => Err(PyValueError::new_err( + "only vector quantities with up to 9 dimensions are supported", + )), + } + } +} - /// Interpolates a vectorial per particle quantity to the given points, panics if the there are less per-particles values than particles - fn interpolate_vector_quantity<'py>( - &self, - py: Python<'py>, - particle_quantity: &Bound<'py, PyArray2<$type>>, - interpolation_points: &Bound<'py, PyArray2<$type>>, - first_order_correction: bool, - ) -> PyResult>> { - let interpolation_points: PyReadonlyArray2<$type> = - interpolation_points.extract()?; - let interpolation_points = interpolation_points.as_slice()?; - let interpolation_points: &[Vector3<$type>] = - bytemuck::cast_slice(interpolation_points); - - let particle_quantity: PyReadonlyArray2<$type> = particle_quantity.extract()?; - let particle_quantity = particle_quantity.as_slice()?; - let particle_quantity: &[Vector3<$type>] = bytemuck::cast_slice(particle_quantity); - - let res_vec = self.inner.interpolate_vector_quantity( - particle_quantity, - interpolation_points, - first_order_correction, - ); - let res_vec = bytemuck::allocation::cast_vec::, $type>(res_vec); - - let res: &[$type] = res_vec.as_slice(); - let res: ArrayView2<$type> = - ArrayView::from_shape((res.len() / 3, 3), res).unwrap(); - - Ok(res.to_pyarray(py)) +#[gen_stub_pymethods] +#[pymethods] +impl PySphInterpolator { + /// Constructs an SPH interpolator (with cubic kernels) for the given particles + #[new] + fn py_new<'py>( + particle_positions: &Bound<'py, PyUntypedArray>, + particle_densities: &Bound<'py, PyUntypedArray>, + particle_rest_mass: f64, + compact_support_radius: f64, + ) -> PyResult { + let py = particle_positions.py(); + let element_type = particle_positions.dtype(); + + if element_type.is_equiv_to(&np::dtype::(py)) { + Self::new_generic::( + particle_positions, + particle_densities, + particle_rest_mass, + compact_support_radius, + ) + } else if element_type.is_equiv_to(&np::dtype::(py)) { + Self::new_generic::( + particle_positions, + particle_densities, + particle_rest_mass, + compact_support_radius, + ) + } else { + Err(pyerr_unsupported_scalar()) + } + } + + /// Interpolates a scalar or vectorial per particle quantity to the given points + #[pyo3(signature = (particle_quantity, interpolation_points, *, first_order_correction = false))] + fn interpolate_quantity<'py>( + &self, + particle_quantity: &Bound<'py, PyUntypedArray>, + interpolation_points: &Bound<'py, PyUntypedArray>, + first_order_correction: bool, + ) -> PyResult> { + match &self.inner { + PySphInterpolatorWrapper::F32(interp) => Self::interpolate_vector_generic::( + interp, + particle_quantity, + interpolation_points, + first_order_correction, + ), + PySphInterpolatorWrapper::F64(interp) => Self::interpolate_vector_generic::( + interp, + particle_quantity, + interpolation_points, + first_order_correction, + ), + } + } + + /// Interpolates surface normals (i.e. normalized SPH gradient of the indicator function) of the fluid to the given points using SPH interpolation + fn interpolate_normals<'py>( + &self, + interpolation_points: &Bound<'py, PyUntypedArray>, + ) -> PyResult> { + match &self.inner { + PySphInterpolatorWrapper::F32(interp) => { + Self::interpolate_normals_generic::(interp, interpolation_points) + } + PySphInterpolatorWrapper::F64(interp) => { + Self::interpolate_normals_generic::(interp, interpolation_points) } } - }; + } } - -create_sph_interpolator_interface!(SphInterpolatorF64, f64); -create_sph_interpolator_interface!(SphInterpolatorF32, f32); diff --git a/pysplashsurf/src/uniform_grid.rs b/pysplashsurf/src/uniform_grid.rs index b33b116c..31403f6f 100644 --- a/pysplashsurf/src/uniform_grid.rs +++ b/pysplashsurf/src/uniform_grid.rs @@ -1,23 +1,87 @@ +use crate::aabb::PyAabb3d; +use crate::utils; +use crate::utils::{IndexT, enum_wrapper_impl_from}; +use pyo3::exceptions::PyTypeError; use pyo3::prelude::*; use pyo3_stub_gen::derive::*; -use splashsurf_lib::UniformGrid; - -macro_rules! create_grid_interface { - ($name: ident, $type: ident) => { - /// UniformGrid wrapper - #[gen_stub_pyclass] - #[pyclass] - pub struct $name { - pub inner: UniformGrid, +use splashsurf_lib::{Real, UniformGrid}; + +enum PyUniformGridData { + F32(UniformGrid), + F64(UniformGrid), +} + +/// Struct containing the parameters of the uniform grid used for the surface reconstruction +#[gen_stub_pyclass] +#[pyclass] +#[pyo3(name = "UniformGrid")] +pub struct PyUniformGrid { + inner: PyUniformGridData, +} + +enum_wrapper_impl_from!(PyUniformGrid, UniformGrid => PyUniformGridData::F32); +enum_wrapper_impl_from!(PyUniformGrid, UniformGrid => PyUniformGridData::F64); + +impl PyUniformGrid { + pub(crate) fn try_from_generic(mut grid: UniformGrid) -> PyResult { + utils::transmute_replace_into::<_, UniformGrid, _>(&mut grid, UniformGrid::new_zero()) + .or_else(|| { + utils::transmute_replace_into::<_, UniformGrid, _>(&mut grid, UniformGrid::new_zero()) + }) + .ok_or_else(|| PyTypeError::new_err("unsupported type of grid, only i64 for Index and f32 and f64 for Real type are supported")) + } + + pub(crate) fn as_f32(&self) -> Option<&UniformGrid> { + match &self.inner { + PyUniformGridData::F32(grid) => Some(grid), + _ => None, } + } - impl $name { - pub fn new(data: UniformGrid) -> Self { - Self { inner: data } - } + pub(crate) fn as_f64(&self) -> Option<&UniformGrid> { + match &self.inner { + PyUniformGridData::F64(grid) => Some(grid), + _ => None, } - }; + } } -create_grid_interface!(UniformGridF64, f64); -create_grid_interface!(UniformGridF32, f32); +#[gen_stub_pymethods] +#[pymethods] +impl PyUniformGrid { + /// The AABB of the grid containing all marching cubes vertices influenced by the particle kernels + #[getter] + pub fn aabb(&self) -> PyAabb3d { + match &self.inner { + PyUniformGridData::F32(grid) => PyAabb3d::from(grid.aabb().clone()), + PyUniformGridData::F64(grid) => PyAabb3d::from(grid.aabb().clone()), + } + } + + /// Returns the cell size of the uniform grid (the marching cubes voxel size) + #[getter] + pub fn cell_size(&self) -> f64 { + match &self.inner { + PyUniformGridData::F32(grid) => grid.cell_size() as f64, + PyUniformGridData::F64(grid) => grid.cell_size(), + } + } + + /// Returns the number of points (marching cubes vertices) per dimension in the uniform grid + #[getter] + pub fn npoints_per_dim(&self) -> [IndexT; 3] { + match &self.inner { + PyUniformGridData::F32(grid) => grid.points_per_dim().clone(), + PyUniformGridData::F64(grid) => grid.points_per_dim().clone(), + } + } + + /// Returns the number of cells (marching cubes voxels) per dimension in the uniform grid + #[getter] + pub fn ncells_per_dim(&self) -> [IndexT; 3] { + match &self.inner { + PyUniformGridData::F32(grid) => grid.cells_per_dim().clone(), + PyUniformGridData::F64(grid) => grid.cells_per_dim().clone(), + } + } +} diff --git a/pysplashsurf/src/utils.rs b/pysplashsurf/src/utils.rs new file mode 100644 index 00000000..095c3590 --- /dev/null +++ b/pysplashsurf/src/utils.rs @@ -0,0 +1,158 @@ +use ndarray::{ArrayView, IxDyn}; +use numpy::{Element, PyArray, PyUntypedArray}; +use pyo3::exceptions::PyTypeError; +use pyo3::prelude::*; +use pyo3::{Bound, PyAny, PyErr, PyResult}; +use splashsurf_lib::Real; +use splashsurf_lib::nalgebra::SVector; + +/// The index type used for all grids and reconstructions in this crate +pub(crate) type IndexT = i64; + +pub(crate) fn pyerr_unsupported_scalar() -> PyErr { + PyTypeError::new_err("unsupported mesh scalar data type, only f32 and f64 are supported") +} + +pub(crate) fn pyerr_scalar_type_mismatch() -> PyErr { + PyTypeError::new_err( + "unsupported combination of scalar data types, all parameters must have the same type (f32 or f64)", + ) +} + +pub(crate) fn pyerr_only_triangle_mesh() -> PyErr { + PyTypeError::new_err("unsupported mesh type, only triangle meshes are supported") +} + +pub(crate) fn pyerr_only_tri_and_tri_quad_mesh() -> PyErr { + PyTypeError::new_err( + "unsupported mesh type, only triangle and mixed triangle-quad meshes are supported", + ) +} + +macro_rules! enum_wrapper_impl_from { + ($pyclass:ident, $mesh:ty => $target_enum:path) => { + impl From<$mesh> for $pyclass { + fn from(mesh: $mesh) -> Self { + Self { + inner: $target_enum(mesh), + } + } + } + }; +} + +macro_rules! enum_impl_from { + ($enum_t:ident, $from_t:ty => $to_variant:path) => { + impl From<$from_t> for $enum_t { + fn from(value: $from_t) -> Self { + $to_variant(value) + } + } + }; +} + +pub(crate) use enum_impl_from; +pub(crate) use enum_wrapper_impl_from; + +pub enum PyFloatVecWrapper { + F32(Vec), + F64(Vec), +} + +enum_impl_from!(PyFloatVecWrapper, Vec => PyFloatVecWrapper::F32); +enum_impl_from!(PyFloatVecWrapper, Vec => PyFloatVecWrapper::F64); + +impl PyFloatVecWrapper { + pub fn try_from_generic(mut vec: Vec) -> PyResult { + transmute_same_take::, Vec>(&mut vec) + .map(PyFloatVecWrapper::F32) + .or_else(|| { + transmute_same_take::, Vec>(&mut vec).map(PyFloatVecWrapper::F64) + }) + .ok_or_else(pyerr_unsupported_scalar) + } + + pub fn view<'py>(&self, container: Bound<'py, PyAny>) -> PyResult> { + match self { + PyFloatVecWrapper::F32(v) => view_scalar_generic(v, container), + PyFloatVecWrapper::F64(v) => view_scalar_generic(v, container), + } + } +} + +/// Transmutes a mutable reference from a generic type to a concrete type if they are identical, otherwise returns None +pub(crate) fn transmute_same_mut( + value: &mut GenericSrc, +) -> Option<&mut ConcreteSrc> { + if std::any::TypeId::of::() == std::any::TypeId::of::() { + Some(unsafe { std::mem::transmute::<&mut GenericSrc, &mut ConcreteSrc>(value) }) + } else { + None + } +} + +/// Transmutes between types if they are identical and takes the value out of the source +pub(crate) fn transmute_same_take( + value: &mut GenericSrc, +) -> Option { + transmute_same_mut::(value).map(|value_ref| std::mem::take(value_ref)) +} + +/// Transmutes from a generic type to a concrete type if they are identical, takes the value and converts it into the target type +pub(crate) fn transmute_take_into< + GenericSrc: 'static, + ConcreteSrc: Default + Into + 'static, + Target, +>( + value: &mut GenericSrc, +) -> Option { + transmute_same_mut::(value) + .map(|value_ref| std::mem::take(value_ref).into()) +} + +/// Transmutes from a generic type to a concrete type if they are identical, replaces the value and converts it into the target type +pub(crate) fn transmute_replace_into< + GenericSrc: 'static, + ConcreteSrc: Into + 'static, + Target, +>( + value: &mut GenericSrc, + replacement: ConcreteSrc, +) -> Option { + transmute_same_mut::(value) + .map(|value_ref| std::mem::replace(value_ref, replacement).into()) +} + +pub(crate) fn view_generic<'py, R: Element>( + values: &[R], + shape: &[usize], + container: Bound<'py, PyAny>, +) -> PyResult> { + assert_eq!( + shape.iter().product::(), + values.len(), + "shape does not match values length" + ); + let array: ArrayView = + ArrayView::from_shape(shape, values).map_err(anyhow::Error::new)?; + let pyarray = unsafe { PyArray::borrow_from_array(&array, container) }; + Ok(pyarray + .into_any() + .downcast_into::() + .expect("downcast should not fail")) +} + +pub(crate) fn view_scalar_generic<'py, R: Element>( + values: &[R], + container: Bound<'py, PyAny>, +) -> PyResult> { + view_generic(values, &[values.len()], container) +} + +pub(crate) fn view_vec_generic<'py, R: Real + Element, const D: usize>( + values: &[SVector], + container: Bound<'py, PyAny>, +) -> PyResult> { + let coordinates: &[R] = bytemuck::cast_slice(values); + view_generic(coordinates, &[values.len(), D], container) +} diff --git a/pysplashsurf/tests/ParticleData_Random_1000.vtk b/pysplashsurf/tests/ParticleData_Random_1000.vtk new file mode 100644 index 00000000..94bdbed5 Binary files /dev/null and b/pysplashsurf/tests/ParticleData_Random_1000.vtk differ diff --git a/pysplashsurf/tests/main.rs b/pysplashsurf/tests/main.rs deleted file mode 100644 index 31e1bb20..00000000 --- a/pysplashsurf/tests/main.rs +++ /dev/null @@ -1,7 +0,0 @@ -#[cfg(test)] -mod tests { - #[test] - fn it_works() { - assert_eq!(2 + 2, 4); - } -} diff --git a/pysplashsurf/tests/test_basic.py b/pysplashsurf/tests/test_basic.py new file mode 100644 index 00000000..9fc18096 --- /dev/null +++ b/pysplashsurf/tests/test_basic.py @@ -0,0 +1,309 @@ +import pysplashsurf +import numpy as np +import meshio +import os.path +import pathlib +import tempfile + +DIR = pathlib.Path(__file__).parent.resolve() +VTK_PATH = DIR.joinpath("ParticleData_Random_1000.vtk") + + +def test_aabb_class(): + print("\nTesting AABB class") + + aabb = pysplashsurf.Aabb3d.from_min_max(min=[0.0, 0.0, 0.0], max=[1.0, 2.0, 3.0]) + assert (aabb.min == np.array([0.0, 0.0, 0.0])).all() + assert (aabb.max == np.array([1.0, 2.0, 3.0])).all() + + aabb = pysplashsurf.Aabb3d.from_min_max( + min=np.array([0.0, 0.0, 0.0]), max=np.array([1.0, 2.0, 3.0]) + ) + assert (aabb.min == np.array([0.0, 0.0, 0.0])).all() + assert (aabb.max == np.array([1.0, 2.0, 3.0])).all() + + aabb = pysplashsurf.Aabb3d.from_points( + np.array([[0.0, 0.0, 0.0], [1.0, 1.0, 1.0], [2.0, 0.5, 4.2]]) + ) + + print("AABB min:", aabb.min) + print("AABB max:", aabb.max) + + assert (aabb.min == np.array([0.0, 0.0, 0.0])).all() + assert (aabb.max == np.array([2.0, 1.0, 4.2])).all() + + assert aabb.contains_point([1.0, 0.9, 4.1]) + assert aabb.contains_point([0.0, 0.0, 0.0]) + assert not aabb.contains_point([2.0, 1.0, 4.2]) + assert not aabb.contains_point([1.0, -1.0, 5.0]) + + +def impl_basic_test(dtype): + particles = np.array(meshio.read(VTK_PATH).points, dtype=dtype) + + mesh_with_data, reconstruction = pysplashsurf.reconstruction_pipeline( + particles, + particle_radius=0.025, + rest_density=1000.0, + smoothing_length=2.0, + cube_size=1.0, + iso_surface_threshold=0.6, + mesh_smoothing_iters=5, + output_mesh_smoothing_weights=True, + ) + + assert type(mesh_with_data) is pysplashsurf.MeshWithData + assert type(reconstruction) is pysplashsurf.SurfaceReconstruction + assert type(mesh_with_data.mesh) is pysplashsurf.TriMesh3d + + mesh = mesh_with_data.mesh + + assert mesh_with_data.dtype == mesh.dtype + assert mesh_with_data.dtype == dtype + + assert type(mesh_with_data.mesh_type) is pysplashsurf.MeshType + assert mesh_with_data.mesh_type == pysplashsurf.MeshType.Tri3d + + assert mesh.vertices.dtype == dtype + assert mesh.triangles.dtype in [np.uint32, np.uint64] + + assert mesh_with_data.nvertices == len(mesh.vertices) + assert mesh_with_data.ncells == len(mesh.triangles) + + assert mesh_with_data.nvertices in range(21000, 25000) + assert mesh_with_data.ncells in range(45000, 49000) + + assert mesh.vertices.shape == (mesh_with_data.nvertices, 3) + assert mesh.triangles.shape == (mesh_with_data.ncells, 3) + + assert len(mesh_with_data.point_attributes) == 2 + assert len(mesh_with_data.cell_attributes) == 0 + + assert "sw" in mesh_with_data.point_attributes + assert "wnn" in mesh_with_data.point_attributes + + sw = mesh_with_data.point_attributes["sw"] + wnn = mesh_with_data.point_attributes["wnn"] + + assert len(sw) == mesh_with_data.nvertices + assert len(wnn) == mesh_with_data.nvertices + + assert sw.dtype == dtype + assert wnn.dtype == dtype + + assert sw.shape == (mesh_with_data.nvertices,) + assert wnn.shape == (mesh_with_data.nvertices,) + + assert sw.min() >= 0.0 + assert sw.max() <= 1.0 + + assert wnn.min() >= 0.0 + + +def test_pipeline_f32(): + impl_basic_test(np.float32) + + +def test_pipeline_f64(): + impl_basic_test(np.float64) + + +def test_reconstruct(): + particles = np.array(meshio.read(VTK_PATH).points, dtype=np.float32) + + reconstruction = pysplashsurf.reconstruct_surface( + particles, + particle_radius=0.025, + rest_density=1000.0, + smoothing_length=2.0 * 0.025, + cube_size=1.0 * 0.025, + iso_surface_threshold=0.6, + global_neighborhood_list=True, + ) + + assert type(reconstruction) is pysplashsurf.SurfaceReconstruction + assert type(reconstruction.mesh) is pysplashsurf.TriMesh3d + assert type(reconstruction.grid) is pysplashsurf.UniformGrid + assert type(reconstruction.particle_densities) is np.ndarray + assert type(reconstruction.particle_inside_aabb) is type(None) + assert type(reconstruction.particle_neighbors) is pysplashsurf.NeighborhoodLists + + mesh = reconstruction.mesh + + assert mesh.dtype == np.float32 + + assert reconstruction.particle_densities.dtype == np.float32 + assert len(reconstruction.particle_densities) == len(particles) + + assert len(mesh.vertices) in range(25000, 30000) + assert len(mesh.triangles) in range(49000, 53000) + + +def test_neighborhood_search(): + particles = np.array(meshio.read(VTK_PATH).points, dtype=np.float32) + + reconstruction = pysplashsurf.reconstruct_surface( + particles, + particle_radius=0.025, + rest_density=1000.0, + smoothing_length=2.0 * 0.025, + cube_size=1.0 * 0.025, + iso_surface_threshold=0.6, + global_neighborhood_list=True, + ) + + neighbors_reconstruct = reconstruction.particle_neighbors.get_neighborhood_lists() + + assert type(neighbors_reconstruct) is list + assert len(neighbors_reconstruct) == len(particles) + + aabb = reconstruction.grid.aabb + + neighbor_lists = pysplashsurf.neighborhood_search_spatial_hashing_parallel( + particles, domain=aabb, search_radius=4.0 * 0.025 + ) + + assert type(neighbor_lists) is pysplashsurf.NeighborhoodLists + + neighbors = neighbor_lists.get_neighborhood_lists() + + assert type(neighbors) is list + assert len(neighbors) == len(particles) + assert len(neighbors) == len(neighbors_reconstruct) + + # TODO: Compare with naive neighbor search + + +def test_check_consistency(): + particles = np.array(meshio.read(VTK_PATH).points, dtype=np.float32) + + reconstruction = pysplashsurf.reconstruct_surface( + particles, + particle_radius=0.025, + rest_density=1000.0, + smoothing_length=2.0 * 0.025, + cube_size=1.0 * 0.025, + iso_surface_threshold=0.6, + global_neighborhood_list=True, + ) + mesh = reconstruction.mesh + + assert pysplashsurf.check_mesh_consistency(mesh, reconstruction.grid) is None + + mesh_with_data, reconstruction = pysplashsurf.reconstruction_pipeline( + particles, + particle_radius=0.025, + rest_density=1000.0, + smoothing_length=2.0, + cube_size=1.0, + iso_surface_threshold=0.6, + mesh_smoothing_iters=5, + output_mesh_smoothing_weights=True, + ) + + assert ( + pysplashsurf.check_mesh_consistency(mesh_with_data, reconstruction.grid) is None + ) + + # TODO: Delete some triangles and check for failure + + +def test_tris_to_quads(): + particles = np.array(meshio.read(VTK_PATH).points, dtype=np.float32) + + mesh_with_data, reconstruction = pysplashsurf.reconstruction_pipeline( + particles, + particle_radius=0.025, + rest_density=1000.0, + smoothing_length=2.0, + cube_size=1.0, + iso_surface_threshold=0.6, + mesh_smoothing_iters=5, + output_mesh_smoothing_weights=True, + ) + + mesh_with_data_quads = pysplashsurf.convert_tris_to_quads(mesh_with_data) + + assert type(mesh_with_data_quads.mesh) is pysplashsurf.MixedTriQuadMesh3d + assert mesh_with_data_quads.mesh_type == pysplashsurf.MeshType.MixedTriQuad3d + + assert mesh_with_data_quads.nvertices == mesh_with_data.nvertices + assert mesh_with_data_quads.ncells < mesh_with_data.ncells + + tris = mesh_with_data_quads.mesh.get_triangles() + quads = mesh_with_data_quads.mesh.get_quads() + + assert tris.dtype in [np.uint32, np.uint64] + assert quads.dtype in [np.uint32, np.uint64] + + assert len(tris) + len(quads) == mesh_with_data_quads.ncells + + assert tris.shape == (len(tris), 3) + assert quads.shape == (len(quads), 4) + + assert len(tris) in range(35000, 39000) + assert len(quads) in range(4600, 5000) + + assert len(mesh_with_data.point_attributes) == 2 + assert len(mesh_with_data.cell_attributes) == 0 + + assert "sw" in mesh_with_data.point_attributes + assert "wnn" in mesh_with_data.point_attributes + + +def test_interpolator(): + particles = np.array(meshio.read(VTK_PATH).points, dtype=np.float32) + + mesh_with_data, reconstruction = pysplashsurf.reconstruction_pipeline( + particles, + particle_radius=0.025, + rest_density=1000.0, + smoothing_length=2.0, + cube_size=1.0, + iso_surface_threshold=0.6, + mesh_smoothing_iters=5, + output_mesh_smoothing_weights=True, + ) + + compact_support = 4.0 * 0.025 + rest_mass = 1000.0 * 0.025**3 + + interpolator = pysplashsurf.SphInterpolator( + particles, reconstruction.particle_densities, rest_mass, compact_support + ) + + assert type(interpolator) is pysplashsurf.SphInterpolator + + mesh = mesh_with_data.mesh + mesh_densities = interpolator.interpolate_quantity( + reconstruction.particle_densities, mesh.vertices + ) + + assert type(mesh_densities) is np.ndarray + assert mesh_densities.dtype == np.float32 + assert mesh_densities.shape == (len(mesh.vertices),) + assert mesh_densities.min() >= 0.0 + + mesh_particles = interpolator.interpolate_quantity(particles, mesh.vertices) + + assert type(mesh_particles) is np.ndarray + assert mesh_particles.dtype == np.float32 + assert mesh_particles.shape == (len(mesh.vertices), 3) + + mesh_sph_normals = interpolator.interpolate_normals(mesh.vertices) + + assert type(mesh_sph_normals) is np.ndarray + assert mesh_sph_normals.dtype == np.float32 + assert mesh_sph_normals.shape == (len(mesh.vertices), 3) + + mesh_with_data.add_point_attribute("density", mesh_densities) + mesh_with_data.add_point_attribute("position", mesh_particles) + mesh_with_data.add_point_attribute("normal", mesh_sph_normals) + + assert "density" in mesh_with_data.point_attributes + assert "position" in mesh_with_data.point_attributes + assert "normal" in mesh_with_data.point_attributes + + assert np.array_equal(mesh_with_data.point_attributes["density"], mesh_densities) + assert np.array_equal(mesh_with_data.point_attributes["position"], mesh_particles) + assert np.array_equal(mesh_with_data.point_attributes["normal"], mesh_sph_normals) diff --git a/pysplashsurf/tests/test_bgeo.py b/pysplashsurf/tests/test_bgeo.py new file mode 100644 index 00000000..a7e2e024 --- /dev/null +++ b/pysplashsurf/tests/test_bgeo.py @@ -0,0 +1,13 @@ +import pysplashsurf +import numpy as np +import meshio +import pathlib + +DIR = pathlib.Path(__file__).parent.resolve() +BGEO_PATH = DIR.joinpath("ParticleData_Fluid_50.bgeo") + + +def test_bgeo(): + particles = np.array(meshio.read(BGEO_PATH).points, dtype=np.float32) + + assert len(particles) == 4732 diff --git a/pysplashsurf/tests/test_calling.py b/pysplashsurf/tests/test_calling.py index b146c109..0cc75fec 100644 --- a/pysplashsurf/tests/test_calling.py +++ b/pysplashsurf/tests/test_calling.py @@ -1,6 +1,5 @@ import pysplashsurf import numpy as np -import math import meshio import subprocess import time @@ -9,203 +8,244 @@ BINARY_PATH = "splashsurf" DIR = pathlib.Path(__file__).parent.resolve() -BGEO_PATH = DIR.joinpath("ParticleData_Fluid_50.bgeo") VTK_PATH = DIR.joinpath("ParticleData_Fluid_5.vtk") + def now_s(): - return time.process_time_ns() / (10 ** 9) - -def test_bgeo(): - particles = np.array(meshio.read(BGEO_PATH).points, dtype=np.float32) - - assert(len(particles) == 4732) - -def test_aabb_class(): - print("\nTesting AABB class") - - aabb = pysplashsurf.Aabb3dF64.par_from_points(np.array([[0.0, 0.0, 0.0], [1.0, 1.0, 1.0], [2.0, 0.5, 4.2]])) - - assert(aabb.min() == np.array([0.0, 0.0, 0.0])).all() - assert(aabb.max() == np.array([2.0, 1.0, 4.2])).all() - - aabb.join_with_point([3.0, 2.0, 1.0]) - - assert(aabb.min() == np.array([0.0, 0.0, 0.0])).all() - assert(aabb.max() == np.array([3.0, 2.0, 4.2])).all() - - assert(aabb.contains_point([1.0, 1.0, 4.1])) - assert(aabb.contains_point([0.0, 0.0, 0.0])) - assert(not aabb.contains_point([4.0, 2.0, 1.0])) - assert(not aabb.contains_point([1.0, -1.0, 5.0])) + return time.process_time_ns() / (10**9) + def test_marching_cubes_calls(): print("\nTesting marching cubes calls") - + particles = np.array(meshio.read(VTK_PATH).points, dtype=np.float32) - reconstruction = pysplashsurf.reconstruct_surface(particles, particle_radius=0.025, rest_density=1000.0, - smoothing_length=2.0, cube_size=0.5, - iso_surface_threshold=0.6) + reconstruction = pysplashsurf.reconstruct_surface( + particles, + particle_radius=0.025, + rest_density=1000.0, + smoothing_length=2.0, + cube_size=0.5, + iso_surface_threshold=0.6, + ) mesh = reconstruction.mesh - verts_before = len(mesh.get_vertices()) + verts_before = len(mesh.vertices) print("# of vertices before:", verts_before) - - mesh_with_data = pysplashsurf.create_mesh_with_data_object(mesh) + + mesh_with_data = pysplashsurf.MeshWithData(mesh) pysplashsurf.marching_cubes_cleanup(mesh_with_data, reconstruction.grid) - mesh = mesh_with_data.take_mesh() - verts_after = len(mesh.get_vertices()) + mesh = mesh_with_data.mesh + verts_after = len(mesh.vertices) print("# of vertices after:", verts_after) - assert(verts_after < verts_before) - -def test_memory_access(): - print("\nTesting memory copy vs take") - - particles = np.array(meshio.read(VTK_PATH).points, dtype=np.float64) - reconstruction = pysplashsurf.reconstruct_surface(particles, particle_radius=0.025, rest_density=1000.0, - smoothing_length=2.0, cube_size=0.5, - iso_surface_threshold=0.6, - aabb_min=np.array([0.0, 0.0, 0.0]), aabb_max=np.array([2.0, 2.0, 2.0])) - mesh = reconstruction.mesh - - start = now_s() - triangles_copy = mesh.get_triangles() - vertices_copy = mesh.get_vertices() - copy_time = now_s() - start - print("Copy time:", copy_time) - - start = now_s() - vertices, triangles = mesh.take_vertices_and_triangles() - take_time = now_s() - start - print("Take time:", take_time) - - print("Copy time / Take time (Speedup):", copy_time / take_time) - - assert(np.allclose(vertices, vertices_copy)) - assert(np.allclose(triangles, triangles_copy)) - -def reconstruction_pipeline(input_file, output_file, *, attributes_to_interpolate=None, multi_threading=True, particle_radius=0.025, - rest_density=1000.0, smoothing_length=2.0, cube_size=0.5, - iso_surface_threshold=0.6, mesh_smoothing_weights=False, output_mesh_smoothing_weights=False, sph_normals=False, - mesh_smoothing_weights_normalization=13.0, mesh_smoothing_iters=5, normals_smoothing_iters=5, - mesh_aabb_min=None, mesh_aabb_max=None, mesh_cleanup=False, decimate_barnacles=False, keep_vertices=False, - compute_normals=False, output_raw_normals=False, output_raw_mesh=False, mesh_aabb_clamp_vertices=False, - check_mesh_closed=False, check_mesh_manifold=False, check_mesh_orientation=False, check_mesh_debug=False, - generate_quads=False, quad_max_edge_diag_ratio=1.75, quad_max_normal_angle=10.0, quad_max_interior_angle=135.0, - subdomain_grid=False, subdomain_num_cubes_per_dim=64): - + assert verts_after < verts_before + + +def reconstruction_pipeline( + input_file, + output_file, + *, + attributes_to_interpolate=None, + multi_threading=True, + particle_radius=0.025, + rest_density=1000.0, + smoothing_length=2.0, + cube_size=0.5, + iso_surface_threshold=0.6, + mesh_smoothing_weights=False, + output_mesh_smoothing_weights=False, + sph_normals=False, + mesh_smoothing_weights_normalization=13.0, + mesh_smoothing_iters=5, + normals_smoothing_iters=5, + mesh_aabb_min=None, + mesh_aabb_max=None, + mesh_cleanup=False, + decimate_barnacles=False, + keep_vertices=False, + compute_normals=False, + output_raw_normals=False, + output_raw_mesh=False, + mesh_aabb_clamp_vertices=False, + check_mesh_closed=False, + check_mesh_manifold=False, + check_mesh_orientation=False, + check_mesh_debug=False, + generate_quads=False, + quad_max_edge_diag_ratio=1.75, + quad_max_normal_angle=10.0, + quad_max_interior_angle=135.0, + subdomain_grid=False, + subdomain_num_cubes_per_dim=64, +): mesh = meshio.read(input_file) particles = np.array(mesh.points, dtype=np.float64) if attributes_to_interpolate is None: attributes_to_interpolate = [] - # Prepare attributes dictionary + # Prepare attributes dictionary attrs = {} for attr in attributes_to_interpolate: if attr in mesh.point_data: - if mesh.point_data[attr].dtype.kind == 'f': + if mesh.point_data[attr].dtype.kind == "f": attrs[attr] = mesh.point_data[attr].astype(np.float64) else: attrs[attr] = mesh.point_data[attr].astype(np.int64) - - mesh_with_data, reconstruction = pysplashsurf.reconstruction_pipeline(particles, attributes_to_interpolate=attrs, multi_threading=multi_threading, particle_radius=particle_radius, - rest_density=rest_density, smoothing_length=smoothing_length, cube_size=cube_size, iso_surface_threshold=iso_surface_threshold, - mesh_smoothing_weights=mesh_smoothing_weights, sph_normals=sph_normals, - mesh_smoothing_weights_normalization=mesh_smoothing_weights_normalization, - mesh_smoothing_iters=mesh_smoothing_iters, normals_smoothing_iters=normals_smoothing_iters, - mesh_aabb_min=mesh_aabb_min, mesh_aabb_max=mesh_aabb_max, mesh_cleanup=mesh_cleanup, decimate_barnacles=decimate_barnacles, - keep_vertices=keep_vertices, compute_normals=compute_normals, output_raw_normals=output_raw_normals, output_raw_mesh=output_raw_mesh, - mesh_aabb_clamp_vertices=mesh_aabb_clamp_vertices, subdomain_grid=subdomain_grid, subdomain_num_cubes_per_dim=subdomain_num_cubes_per_dim, output_mesh_smoothing_weights=output_mesh_smoothing_weights, - check_mesh_closed=check_mesh_closed, check_mesh_manifold=check_mesh_manifold, check_mesh_orientation=check_mesh_orientation, check_mesh_debug=check_mesh_debug, - generate_quads=generate_quads, quad_max_edge_diag_ratio=quad_max_edge_diag_ratio, quad_max_normal_angle=quad_max_normal_angle, quad_max_interior_angle=quad_max_interior_angle) - pysplashsurf.write_to_file(mesh_with_data, output_file, consume_object=True) + mesh_with_data, reconstruction = pysplashsurf.reconstruction_pipeline( + particles, + attributes_to_interpolate=attrs, + multi_threading=multi_threading, + particle_radius=particle_radius, + rest_density=rest_density, + smoothing_length=smoothing_length, + cube_size=cube_size, + iso_surface_threshold=iso_surface_threshold, + mesh_smoothing_weights=mesh_smoothing_weights, + sph_normals=sph_normals, + mesh_smoothing_weights_normalization=mesh_smoothing_weights_normalization, + mesh_smoothing_iters=mesh_smoothing_iters, + normals_smoothing_iters=normals_smoothing_iters, + mesh_aabb_min=mesh_aabb_min, + mesh_aabb_max=mesh_aabb_max, + mesh_cleanup=mesh_cleanup, + decimate_barnacles=decimate_barnacles, + keep_vertices=keep_vertices, + compute_normals=compute_normals, + output_raw_normals=output_raw_normals, + output_raw_mesh=output_raw_mesh, + mesh_aabb_clamp_vertices=mesh_aabb_clamp_vertices, + subdomain_grid=subdomain_grid, + subdomain_num_cubes_per_dim=subdomain_num_cubes_per_dim, + output_mesh_smoothing_weights=output_mesh_smoothing_weights, + check_mesh_closed=check_mesh_closed, + check_mesh_manifold=check_mesh_manifold, + check_mesh_orientation=check_mesh_orientation, + check_mesh_debug=check_mesh_debug, + generate_quads=generate_quads, + quad_max_edge_diag_ratio=quad_max_edge_diag_ratio, + quad_max_normal_angle=quad_max_normal_angle, + quad_max_interior_angle=quad_max_interior_angle, + ) + + mesh_with_data.write_to_file(output_file) def test_no_post_processing(): start = now_s() - subprocess.run([BINARY_PATH] + f"reconstruct {VTK_PATH} -o {DIR.joinpath("test_bin.vtk")} -r=0.025 -l=2.0 -c=0.5 -t=0.6 -d=on --subdomain-grid=on --mesh-cleanup=off --mesh-smoothing-weights=off --mesh-smoothing-iters=0 --normals=off --normals-smoothing-iters=0".split(), check=True) + subprocess.run( + [BINARY_PATH] + + f"reconstruct {VTK_PATH} -o {DIR.joinpath('test_bin.vtk')} -r=0.025 -l=2.0 -c=0.5 -t=0.6 -d=on --subdomain-grid=on --mesh-cleanup=off --mesh-smoothing-weights=off --mesh-smoothing-iters=0 --normals=off --normals-smoothing-iters=0".split(), + check=True, + ) print("Binary done in", now_s() - start) - + start = now_s() - reconstruction_pipeline(VTK_PATH, DIR.joinpath("test.vtk"), particle_radius=np.float64(0.025), smoothing_length=np.float64(2.0), - cube_size=np.float64(0.5), iso_surface_threshold=np.float64(0.6), mesh_smoothing_weights=False, - mesh_smoothing_iters=0, normals_smoothing_iters=0, mesh_cleanup=False, compute_normals=False, subdomain_grid=True) + reconstruction_pipeline( + VTK_PATH, + DIR.joinpath("test.vtk"), + particle_radius=np.float64(0.025), + smoothing_length=np.float64(2.0), + cube_size=np.float64(0.5), + iso_surface_threshold=np.float64(0.6), + mesh_smoothing_weights=False, + mesh_smoothing_iters=0, + normals_smoothing_iters=0, + mesh_cleanup=False, + compute_normals=False, + subdomain_grid=True, + ) print("Python done in", now_s() - start) - + binary_mesh = meshio.read(DIR.joinpath("test_bin.vtk")) python_mesh = meshio.read(DIR.joinpath("test.vtk")) - + binary_verts = np.array(binary_mesh.points, dtype=np.float64) python_verts = np.array(python_mesh.points, dtype=np.float64) - + print("# of vertices binary:", len(binary_verts)) print("# of vertices python:", len(python_verts)) - - assert(len(binary_verts) == len(python_verts)) - + + assert len(binary_verts) == len(python_verts) + binary_verts.sort(axis=0) python_verts.sort(axis=0) - - assert(np.allclose(binary_verts, python_verts)) - + + assert np.allclose(binary_verts, python_verts) + + def test_with_post_processing(): start = now_s() - subprocess.run([BINARY_PATH] + f"reconstruct {VTK_PATH} -o {DIR.joinpath("test_bin.vtk")} -r=0.025 -l=2.0 -c=0.5 -t=0.6 -d=on --subdomain-grid=on --interpolate_attribute velocity --decimate-barnacles=on --mesh-cleanup=on --mesh-smoothing-weights=on --mesh-smoothing-iters=25 --normals=on --normals-smoothing-iters=10 --output-smoothing-weights=on --generate-quads=off".split(), check=True) + subprocess.run( + [BINARY_PATH] + + f"reconstruct {VTK_PATH} -o {DIR.joinpath('test_bin.vtk')} -r=0.025 -l=2.0 -c=0.5 -t=0.6 -d=on --subdomain-grid=on --interpolate_attribute velocity --decimate-barnacles=on --mesh-cleanup=on --mesh-smoothing-weights=on --mesh-smoothing-iters=25 --normals=on --normals-smoothing-iters=10 --output-smoothing-weights=on --generate-quads=off".split(), + check=True, + ) print("Binary done in", now_s() - start) - + start = now_s() - reconstruction_pipeline(VTK_PATH, DIR.joinpath("test.vtk"), attributes_to_interpolate=["velocity"], particle_radius=np.float64(0.025), smoothing_length=np.float64(2.0), - cube_size=np.float64(0.5), iso_surface_threshold=np.float64(0.6), mesh_smoothing_weights=True, - mesh_smoothing_weights_normalization=np.float64(13.0), mesh_smoothing_iters=25, normals_smoothing_iters=10, - generate_quads=False, mesh_cleanup=True, compute_normals=True, subdomain_grid=True, decimate_barnacles=True, - output_mesh_smoothing_weights=True, output_raw_normals=True) + reconstruction_pipeline( + VTK_PATH, + DIR.joinpath("test.vtk"), + attributes_to_interpolate=["velocity"], + particle_radius=np.float64(0.025), + smoothing_length=np.float64(2.0), + cube_size=np.float64(0.5), + iso_surface_threshold=np.float64(0.6), + mesh_smoothing_weights=True, + mesh_smoothing_weights_normalization=np.float64(13.0), + mesh_smoothing_iters=25, + normals_smoothing_iters=10, + generate_quads=False, + mesh_cleanup=True, + compute_normals=True, + subdomain_grid=True, + decimate_barnacles=True, + output_mesh_smoothing_weights=True, + output_raw_normals=True, + ) print("Python done in", now_s() - start) - + binary_mesh = meshio.read(DIR.joinpath("test_bin.vtk")) python_mesh = meshio.read(DIR.joinpath("test.vtk")) - + # Compare number of vertices binary_verts = np.array(binary_mesh.points, dtype=np.float64) python_verts = np.array(python_mesh.points, dtype=np.float64) - + print("# of vertices binary:", len(binary_verts)) print("# of vertices python:", len(python_verts)) - - assert(len(binary_verts) == len(python_verts)) - + + assert len(binary_verts) == len(python_verts) + # Compare interpolated attribute binary_vels = binary_mesh.point_data["velocity"] python_vels = python_mesh.point_data["velocity"] - + binary_vels.sort(axis=0) python_vels.sort(axis=0) - - assert(np.allclose(binary_vels, python_vels)) - + + assert np.allclose(binary_vels, python_vels) + # Trimesh similarity test # TODO: Replace load_mesh call: the function tries to create temporary files which may fail on some CI runners binary_mesh = trimesh.load_mesh(DIR.joinpath("test_bin.vtk"), "vtk") python_mesh = trimesh.load_mesh(DIR.joinpath("test.vtk"), "vtk") - + (_, distance_bin, _) = trimesh.proximity.closest_point(binary_mesh, python_verts) (_, distance_py, _) = trimesh.proximity.closest_point(python_mesh, binary_verts) - distance = (np.sum(distance_bin) + np.sum(distance_py)) / (len(distance_bin) + len(python_verts)) + distance = (np.sum(distance_bin) + np.sum(distance_py)) / ( + len(distance_bin) + len(python_verts) + ) print("Distance:", distance) - assert(distance < 1e-5) - + assert distance < 1e-5 + # Naïve similarity test - + binary_verts.sort(axis=0) python_verts.sort(axis=0) - + print("Binary verts:", binary_verts) print("Python verts:", python_verts) - - assert(np.allclose(binary_verts, python_verts)) - -# test_bgeo() -# test_aabb_class() -# test_marching_cubes_calls() -# test_memory_access() -# test_with_post_processing() + + assert np.allclose(binary_verts, python_verts) diff --git a/pysplashsurf/tests/test_sdf.py b/pysplashsurf/tests/test_sdf.py new file mode 100644 index 00000000..5df83ec2 --- /dev/null +++ b/pysplashsurf/tests/test_sdf.py @@ -0,0 +1,33 @@ +import pysplashsurf +import numpy as np + + +def test_sphere_sdf_mc(): + radius = 1.0 + num_verts = 100 + + grid_size = radius * 2.2 + dx = grid_size / (num_verts - 1) + + translation = -0.5 * grid_size + + def make_sdf(): + coords = np.arange(num_verts, dtype=np.float32) * dx + translation + x, y, z = np.meshgrid(coords, coords, coords, indexing="ij") + sdf = np.sqrt(x**2 + y**2 + z**2) - radius + return sdf + + sdf = make_sdf() + + # Note: Currently this reconstruction assumes that inside the surface values get bigger (like a density function) + mesh, grid = pysplashsurf.marching_cubes( + sdf, iso_surface_threshold=0.0, cube_size=dx, translation=[translation] * 3, return_grid=True + ) + + assert len(mesh.vertices) > 0 + + norms = np.linalg.norm(mesh.vertices, axis=1) + assert norms.min() > radius - 1e-4 + assert norms.max() < radius + 1e-4 + + assert pysplashsurf.check_mesh_consistency(mesh, grid) is None diff --git a/splashsurf/src/reconstruct.rs b/splashsurf/src/reconstruct.rs index b41119e0..643b4b5e 100644 --- a/splashsurf/src/reconstruct.rs +++ b/splashsurf/src/reconstruct.rs @@ -440,8 +440,8 @@ pub struct ReconstructionResult { pub tri_mesh: Option>>, /// Holds the reconstructed mixed triangle/quad mesh (only if [`generate_quads`](ReconstructionPostprocessingParameters::generate_quads) was enabled) pub tri_quad_mesh: Option>>, - /// Holds the initial [`SurfaceReconstruction`] with no post-processing applied (only if [`output_raw_mesh`](ReconstructionPostprocessingParameters::output_raw_mesh) was enabled) - pub raw_reconstruction: Option>, + /// Holds the initial [`SurfaceReconstruction`] with no post-processing applied (the unprocessed mesh is only contained if [`output_raw_mesh`](ReconstructionPostprocessingParameters::output_raw_mesh) was enabled) + pub raw_reconstruction: SurfaceReconstruction, } /// Parameters for the post-processing steps in the reconstruction pipeline @@ -1013,8 +1013,13 @@ pub fn reconstruction_pipeline<'a, I: Index, R: Real>( params: &splashsurf_lib::Parameters, postprocessing: &ReconstructionPostprocessingParameters, ) -> Result, anyhow::Error> { + // Ensure that we get global neighborhood lists if required for post-processing + let mut params = params.clone(); + params.global_neighborhood_list = + params.global_neighborhood_list || postprocessing.mesh_smoothing_weights; + // Perform the surface reconstruction - let reconstruction = splashsurf_lib::reconstruct_surface::(particle_positions, params)?; + let reconstruction = splashsurf_lib::reconstruct_surface::(particle_positions, ¶ms)?; // Filters a particle quantity based on an optional mask of particles inside the reconstruction domain fn filtered_quantity<'a, T: Clone>( @@ -1035,12 +1040,6 @@ pub fn reconstruction_pipeline<'a, I: Index, R: Real>( } } - let reconstruction_output = if postprocessing.output_raw_mesh { - Some(reconstruction.clone()) - } else { - None - }; - let grid = reconstruction.grid(); let mut mesh_with_data = MeshWithData::new(Cow::Borrowed(reconstruction.mesh())); @@ -1391,7 +1390,7 @@ pub fn reconstruction_pipeline<'a, I: Index, R: Real>( }; // Convert triangles to quads - let (mut tri_mesh, tri_quad_mesh) = if postprocessing.generate_quads { + let (tri_mesh, mut tri_quad_mesh) = if postprocessing.generate_quads { info!("Post-processing: Convert triangles to quads..."); let non_squareness_limit = R::from_float(postprocessing.quad_max_edge_diag_ratio); let normal_angle_limit = R::from_float(postprocessing.quad_max_normal_angle.to_radians()); @@ -1427,7 +1426,7 @@ pub fn reconstruction_pipeline<'a, I: Index, R: Real>( // TODO: Option to continue processing sequences even if checks fail. Maybe return special error type? if postprocessing.check_mesh_closed || postprocessing.check_mesh_manifold { - if let Err(err) = match (&tri_mesh, &tri_quad_mesh) { + if let Err(err) = match (&tri_mesh, &mut tri_quad_mesh) { (Some(mesh), None) => splashsurf_lib::marching_cubes::check_mesh_consistency( grid, &mesh.mesh, @@ -1435,12 +1434,12 @@ pub fn reconstruction_pipeline<'a, I: Index, R: Real>( postprocessing.check_mesh_manifold, postprocessing.check_mesh_debug, ), - (None, Some(_mesh)) => { + (None, Some(mesh)) => { info!("Checking for mesh consistency not implemented for quad mesh at the moment."); return Ok(ReconstructionResult { tri_mesh: None, - tri_quad_mesh: Some(_mesh.to_owned()), - raw_reconstruction: reconstruction_output, + tri_quad_mesh: Some(std::mem::take(mesh)), + raw_reconstruction: reconstruction, }); } _ => unreachable!(), @@ -1524,23 +1523,45 @@ pub fn reconstruction_pipeline<'a, I: Index, R: Real>( } } - match (&mut tri_mesh, &tri_quad_mesh) { + match (tri_mesh, tri_quad_mesh) { (Some(mesh), None) => { - let mut res: MeshWithData> = - MeshWithData::new(mesh.to_owned().mesh.into_owned()); - res.point_attributes = std::mem::take(&mut mesh.point_attributes); - res.cell_attributes = std::mem::take(&mut mesh.cell_attributes); + let MeshWithData { + mesh, + point_attributes, + cell_attributes, + } = mesh; + + // Avoid copy if original mesh was not modified + let (mesh, take_mesh) = if std::ptr::eq(mesh.as_ref(), reconstruction.mesh()) + && !postprocessing.output_raw_mesh + { + // Ensure that borrow of reconstruction is dropped + (Default::default(), true) + } else { + (mesh.into_owned(), false) + }; + + let mut reconstruction = reconstruction; + let mesh = if take_mesh { + std::mem::take(&mut reconstruction.mesh) + } else { + mesh + }; Ok(ReconstructionResult { - tri_mesh: Some(res), + tri_mesh: Some(MeshWithData { + mesh, + point_attributes, + cell_attributes, + }), tri_quad_mesh: None, - raw_reconstruction: reconstruction_output, + raw_reconstruction: reconstruction, }) } - (None, Some(_mesh)) => Ok(ReconstructionResult { + (None, Some(mesh)) => Ok(ReconstructionResult { tri_mesh: None, - tri_quad_mesh: Some(_mesh.to_owned()), - raw_reconstruction: reconstruction_output, + tri_quad_mesh: Some(mesh), + raw_reconstruction: reconstruction, }), _ => unreachable!(), } @@ -1581,9 +1602,6 @@ pub(crate) fn reconstruction_pipeline_from_path( if postprocessing.output_raw_mesh { profile!("write surface mesh to file"); - let reconstruction = reconstruction.expect( - "reconstruction_pipeline_from_data did not return a SurfaceReconstruction object", - ); let mesh = reconstruction.mesh(); let output_path = paths diff --git a/splashsurf/tests/test_pipeline.rs b/splashsurf/tests/test_pipeline.rs index 9bbd8482..6003e54f 100644 --- a/splashsurf/tests/test_pipeline.rs +++ b/splashsurf/tests/test_pipeline.rs @@ -27,11 +27,7 @@ fn test_basic_pipeline() -> Result<(), Box> { .as_ref() .expect("reconstruction should produce a triangle mesh") .mesh; - let raw_mesh = reconstruction - .raw_reconstruction - .as_ref() - .expect("raw surface should be present") - .mesh(); + let raw_mesh = reconstruction.raw_reconstruction.mesh(); vtk_format::write_vtk(mesh, "../out/bunny_test_basic_pipeline.vtk", "mesh")?; // Compare raw and final mesh @@ -207,11 +203,7 @@ fn test_basic_pipeline_postprocessing() -> Result<(), Box .as_ref() .expect("reconstruction should produce a triangle mesh") .mesh; - let raw_mesh = reconstruction - .raw_reconstruction - .as_ref() - .expect("raw surface should be present") - .mesh(); + let raw_mesh = reconstruction.raw_reconstruction.mesh(); vtk_format::write_vtk( mesh, "../out/bunny_test_basic_pipeline_postprocessing.vtk", @@ -294,11 +286,7 @@ fn test_basic_pipeline_postprocessing_with_aabb() -> Result<(), Box for MarchingCubesGrid { ]; if let Some(index) = self.grid.get_point(ijk) { - let index = self.grid.flatten_point_index(&index); + let index: usize = self.grid.flatten_point_index(&index).try_into().unwrap(); if self.values[index] < self.threshold { LevelSetSign::Outside } else { @@ -196,6 +196,7 @@ impl MarchingCubesLevelSet for MarchingCubesGrid { .get_point(ijk) .map(|p| self.grid.flatten_point_index(&p)) { + let index: usize = index.try_into().unwrap(); self.values[index] - self.threshold } else { f32::MIN // or some other value indicating outside @@ -278,8 +279,9 @@ fn reconstruct() -> Result<(), anyhow::Error> { densities }; - let mut function_values = vec![0.0; grid.points_per_dim().iter().product()]; - let mut function_values_vol_frac = vec![0.0; grid.points_per_dim().iter().product()]; + let n_grid_points = grid.points_per_dim().iter().product::() as usize; + let mut function_values = vec![0.0; n_grid_points]; + let mut function_values_vol_frac = vec![0.0; n_grid_points]; { profile!("evaluate_levelset_function"); @@ -326,7 +328,10 @@ fn reconstruct() -> Result<(), anyhow::Error> { let r = dx.norm(); if r <= kernel_evaluation_radius { - let index = grid.flatten_point_index(&point_index); + let index: usize = grid + .flatten_point_index(&point_index) + .try_into() + .unwrap(); //let vol = particle_rest_volume; let vol = @@ -370,7 +375,7 @@ fn reconstruct() -> Result<(), anyhow::Error> { let [ni, nj, nk] = grid.points_per_dim().clone(); - let mut points_flat = Vec::with_capacity(3 * ni * nj * nk); + let mut points_flat = Vec::with_capacity(3 * (ni * nj * nk) as usize); for i in 0..ni { for j in 0..nj { for k in 0..nk { diff --git a/splashsurf_lib/src/aabb.rs b/splashsurf_lib/src/aabb.rs index 8545b950..8313c807 100644 --- a/splashsurf_lib/src/aabb.rs +++ b/splashsurf_lib/src/aabb.rs @@ -212,12 +212,12 @@ where self.min + (self.extents() / (R::one() + R::one())) } - /// Checks if the given AABB is inside of the AABB, the AABB is considered to be half-open to its max coordinate + /// Checks if the given AABB is inside the AABB, the AABB is considered to be half-open to its max coordinate pub fn contains_aabb(&self, other: &Self) -> bool { self.contains_point(&other.min) || self.contains_point(&other.max) } - /// Checks if the given point is inside of the AABB, the AABB is considered to be half-open to its max coordinate + /// Checks if the given point is inside the AABB, the AABB is considered to be half-open to its max coordinate pub fn contains_point(&self, point: &SVector) -> bool { point >= &self.min && point < &self.max } diff --git a/splashsurf_lib/src/dense_subdomains.rs b/splashsurf_lib/src/dense_subdomains.rs index 5ad587f6..cc57c87d 100644 --- a/splashsurf_lib/src/dense_subdomains.rs +++ b/splashsurf_lib/src/dense_subdomains.rs @@ -19,6 +19,7 @@ use crate::neighborhood_search::{ FlatNeighborhoodList, neighborhood_search_spatial_hashing_flat_filtered, neighborhood_search_spatial_hashing_parallel, }; +use crate::topology::Direction; use crate::uniform_grid::{EdgeIndex, GridConstructionError, UniformCartesianCubeGrid3d}; use crate::{ Aabb3d, MapType, Parameters, RealConvert, SpatialDecomposition, SurfaceReconstruction, new_map, @@ -28,7 +29,7 @@ use crate::{Index, Real}; // TODO: Implement single-threaded processing -type GlobalIndex = u64; +type GlobalIndex = i64; pub(crate) struct ParametersSubdomainGrid { /// SPH particle radius (in simulation units) @@ -842,6 +843,8 @@ pub(crate) fn reconstruction( .copied() .zip(subdomain_particle_densities.iter().copied()) { + // Note: this loop assumes that enclosing_cell can return negative indices for ghost particles + // Get grid cell containing particle let particle_cell = mc_grid.enclosing_cell(&p_i); @@ -849,9 +852,9 @@ pub(crate) fn reconstruction( // We want to loop over the vertices of the enclosing cells plus all points in `cube_radius` distance from the cell let lower = [ - (particle_cell[0] - cube_radius).max(I::zero()), - (particle_cell[1] - cube_radius).max(I::zero()), - (particle_cell[2] - cube_radius).max(I::zero()), + particle_cell[0].saturating_sub(&cube_radius).max(I::zero()), + particle_cell[1].saturating_sub(&cube_radius).max(I::zero()), + particle_cell[2].saturating_sub(&cube_radius).max(I::zero()), ]; let upper = [ @@ -870,6 +873,8 @@ pub(crate) fn reconstruction( let point_ijk = [i, j, k]; let local_point = mc_grid .get_point(point_ijk) + // TODO: Can this fail if the ghost margin is too large such that upper + // falls outside of the subdomain grid? .expect("point has to be part of the subdomain grid"); //let point_coordinates = mc_grid.point_coordinates(&point); @@ -1095,6 +1100,8 @@ pub(crate) fn reconstruction( .copied() .zip(subdomain_particle_densities.iter().copied()) { + // Note: this loop assumes that enclosing_cell can return negative indices for ghost particles + // Get grid cell containing particle let particle_cell = mc_grid.enclosing_cell(&p_i); @@ -1102,9 +1109,9 @@ pub(crate) fn reconstruction( // We want to loop over the vertices of the enclosing cells plus all points in `cube_radius` distance from the cell let lower = [ - (particle_cell[0] - cube_radius).max(I::zero()), - (particle_cell[1] - cube_radius).max(I::zero()), - (particle_cell[2] - cube_radius).max(I::zero()), + particle_cell[0].saturating_sub(&cube_radius).max(I::zero()), + particle_cell[1].saturating_sub(&cube_radius).max(I::zero()), + particle_cell[2].saturating_sub(&cube_radius).max(I::zero()), ]; let upper = [ @@ -1570,7 +1577,27 @@ pub(crate) mod subdomain_classification { && is_in_ghost_margin_single_dim(z_step, 2) }; - // Loop over all 27 subdomains around and including the owning subdomain + let checked_apply_step = |index: I, step: i8| -> Option { + let direction = match step { + -1 => Some(Direction::Negative), + 0 => None, + 1 => Some(Direction::Positive), + _ => unsafe { std::hint::unreachable_unchecked() }, + }; + direction + .map(|d| d.checked_apply_step(index, I::one())) + .unwrap_or(Some(index)) + }; + + let checked_apply_step_ijk = + |ijk: [I; 3], x_step: i8, y_step: i8, z_step: i8| -> Option<[I; 3]> { + Some([ + checked_apply_step(ijk[0], x_step)?, + checked_apply_step(ijk[1], y_step)?, + checked_apply_step(ijk[2], z_step)?, + ]) + }; + for &i in &[-1, 0, 1] { for &j in &[-1, 0, 1] { for &k in &[-1, 0, 1] { @@ -1578,14 +1605,10 @@ pub(crate) mod subdomain_classification { let in_ghost_margin = is_in_ghost_margin(i, j, k); if in_ghost_margin { - let neighbor_subdomain_ijk = [ - subdomain_ijk[0] + I::from(i).unwrap(), - subdomain_ijk[1] + I::from(j).unwrap(), - subdomain_ijk[2] + I::from(k).unwrap(), - ]; - // The potential neighbor subdomain might not even be part of our computation domain - if let Some(cell) = subdomain_grid.get_cell(neighbor_subdomain_ijk) { - // If it is, it can be added as a subdomain of the particle + if let Some(neighbor_subdomain_ijk) = + checked_apply_step_ijk(subdomain_ijk, i, j, k) + && let Some(cell) = subdomain_grid.get_cell(neighbor_subdomain_ijk) + { subdomains.push(subdomain_grid.flatten_cell_index(&cell)); } } diff --git a/splashsurf_lib/src/density_map.rs b/splashsurf_lib/src/density_map.rs index 3f3ca08f..9b82a70a 100644 --- a/splashsurf_lib/src/density_map.rs +++ b/splashsurf_lib/src/density_map.rs @@ -3,7 +3,6 @@ //! This module provides functions for the computation of per-particle densities and the discretization //! of the resulting fluid density field by mapping onto a discrete background grid. //! -//! Currently, only sparse density maps are implemented. //! //! ## Sparse density maps //! The [`DensityMap`] stores fluid density values for each point of an implicit background grid @@ -12,9 +11,15 @@ //! In case of a sparse density map, the values are stored in a hashmap. The keys are so called //! "flat point indices". These are computed from the background grid point coordinates `(i,j,k)` //! analogous to multidimensional array index flattening. That means for a grid with dimensions -//! `[n_x, n_y, n_z]`, the flat point index is given by the expression `i*n_x + j*n_y + k*n_z`. +//! `[n_x, n_y, n_z]`, the flat point index is given by the expression `i*n_y*n_z + j*n_z + k`. //! For these point index operations, the [`UniformGrid`] is used. //! +//! ## Dense density maps +//! For some applications, it might be desirable to allocate the storage for all grid points +//! in a contiguous array. This is supported by the [`DensityMap::Dense`] variant. The values +//! can either be borrowed (a slice) or owned (a vector). Background grid coordinates are mapped +//! to indices in this array (and vice versa) using the same flattening scheme as for the sparse maps. +//! //! Note that all density mapping functions always use the global background grid for flat point //! indices, even if the density map is only generated for a smaller subdomain. @@ -29,6 +34,7 @@ use dashmap::ReadOnlyView as ReadDashMap; use log::{info, trace, warn}; use nalgebra::Vector3; use rayon::prelude::*; +use std::borrow::Cow; use std::cell::RefCell; use thiserror::Error as ThisError; use thread_local::ThreadLocal; @@ -217,37 +223,59 @@ pub fn parallel_compute_particle_densities( /// A sparse density map /// /// The density map contains values for all points of the background grid where the density is not -/// trivially zero (which is the case when a point is outside of the compact support of any particles). +/// trivially zero (which is the case when a point is outside the compact support of any particles). #[derive(Clone, Debug)] -pub enum DensityMap { +pub enum DensityMap<'a, I: Index, R: Real> { Standard(MapType), DashMap(ReadDashMap), + Dense(Cow<'a, [R]>), } -impl Default for DensityMap { +/// Owned version of [`DensityMap`] (with static lifetime) +pub type OwnedDensityMap = DensityMap<'static, I, R>; + +impl Default for OwnedDensityMap { fn default() -> Self { DensityMap::Standard(MapType::default()) } } -impl From> for DensityMap { +impl From> for OwnedDensityMap { fn from(map: MapType) -> Self { Self::Standard(map) } } -impl From> for DensityMap { +impl From> for OwnedDensityMap { fn from(map: ParallelMapType) -> Self { Self::DashMap(map.into_read_only()) } } -impl DensityMap { +impl From> for DensityMap<'static, I, R> { + fn from(values: Vec) -> Self { + Self::Dense(values.into()) + } +} + +impl<'a, I: Index, R: Real> From<&'a [R]> for DensityMap<'a, I, R> { + fn from(values: &'a [R]) -> Self { + Self::Dense(values.into()) + } +} + +impl<'a, I: Index, R: Real> DensityMap<'a, I, R> { /// Converts the contained map into a vector of tuples of (flat_point_index, density) pub fn to_vec(&self) -> Vec<(I, R)> { match self { DensityMap::Standard(map) => map.iter().map(|(&i, &r)| (i, r)).collect(), DensityMap::DashMap(map) => map.iter().map(|(&i, &r)| (i, r)).collect(), + DensityMap::Dense(values) => values + .iter() + .copied() + .enumerate() + .map(|(i, r)| (I::from_usize(i).unwrap(), r)) + .collect(), } } @@ -256,6 +284,7 @@ impl DensityMap { match self { DensityMap::Standard(map) => map.len(), DensityMap::DashMap(map) => map.len(), + DensityMap::Dense(values) => values.len(), } } @@ -264,6 +293,7 @@ impl DensityMap { match self { DensityMap::Standard(map) => map.get(&flat_point_index).copied(), DensityMap::DashMap(map) => map.get(&flat_point_index).copied(), + DensityMap::Dense(values) => values.get(flat_point_index.to_usize()?).copied(), } } @@ -273,6 +303,9 @@ impl DensityMap { match self { DensityMap::Standard(map) => map.iter().for_each(|(&i, &r)| f(i, r)), DensityMap::DashMap(map) => map.iter().for_each(|(&i, &r)| f(i, r)), + DensityMap::Dense(values) => values.iter().copied().enumerate().for_each(|(i, r)| { + f(I::from_usize(i).unwrap(), r); + }), } } } @@ -339,7 +372,7 @@ pub fn sequential_generate_sparse_density_map( particle_rest_mass: R, compact_support_radius: R, cube_size: R, -) -> Result, DensityMapError> { +) -> Result, DensityMapError> { profile!("sequential_generate_sparse_density_map"); let mut sparse_densities = new_map(); @@ -386,7 +419,7 @@ pub fn parallel_generate_sparse_density_map( particle_rest_mass: R, compact_support_radius: R, cube_size: R, -) -> Result, DensityMapError> { +) -> Result, DensityMapError> { profile!("parallel_generate_sparse_density_map"); // Each thread will write to its own local density map @@ -518,6 +551,12 @@ pub(crate) fn compute_kernel_evaluation_radius( compact_support_radius: R, cube_size: R, ) -> GridKernelExtents { + assert!( + compact_support_radius >= R::zero(), + "compact support radius must be non-negative" + ); + assert!(cube_size > R::zero(), "cube size must be positive"); + // The number of cells in each direction from a particle that can be affected by its compact support let half_supported_cells_real = (compact_support_radius / cube_size).ceil(); // Convert to index type for cell and point indexing diff --git a/splashsurf_lib/src/lib.rs b/splashsurf_lib/src/lib.rs index ac132613..ce193c19 100644 --- a/splashsurf_lib/src/lib.rs +++ b/splashsurf_lib/src/lib.rs @@ -239,15 +239,15 @@ impl Parameters { #[derive(Clone, Debug)] pub struct SurfaceReconstruction { /// Background grid that was used as a basis for generating the density map for marching cubes - grid: UniformGrid, + pub grid: UniformGrid, /// Per particle densities (contains only data of particles inside the domain) - particle_densities: Option>, + pub particle_densities: Option>, /// If an AABB was specified to restrict the reconstruction, this stores per input particle whether they were inside - particle_inside_aabb: Option>, + pub particle_inside_aabb: Option>, /// Per particles neighbor lists - particle_neighbors: Option>>, + pub particle_neighbors: Option>>, /// Surface mesh that is the result of the surface reconstruction - mesh: TriMesh3d, + pub mesh: TriMesh3d, /// Workspace with allocated memory for subsequent surface reconstructions workspace: ReconstructionWorkspace, } @@ -266,6 +266,7 @@ impl Default for SurfaceReconstruction { } } +// TODO: Remove these functions impl SurfaceReconstruction { /// Returns a reference to the surface mesh that is the result of the reconstruction pub fn mesh(&self) -> &TriMesh3d { diff --git a/splashsurf_lib/src/mesh.rs b/splashsurf_lib/src/mesh.rs index dfefa09d..a12df31f 100644 --- a/splashsurf_lib/src/mesh.rs +++ b/splashsurf_lib/src/mesh.rs @@ -232,7 +232,7 @@ impl TriangleOrQuadCell { pub struct MixedTriQuadMesh3d { /// Coordinates of all vertices of the mesh pub vertices: Vec>, - /// All triangle cells of the mesh + /// All triangle and quad cells of the mesh pub cells: Vec, } diff --git a/splashsurf_lib/src/postprocessing.rs b/splashsurf_lib/src/postprocessing.rs index fe2a2c38..a103fe73 100644 --- a/splashsurf_lib/src/postprocessing.rs +++ b/splashsurf_lib/src/postprocessing.rs @@ -53,31 +53,43 @@ pub fn par_laplacian_smoothing_inplace( /// Laplacian smoothing of a normal field pub fn par_laplacian_smoothing_normals_inplace( - normals: &mut Vec>, + normals: &mut [Vector3], vertex_connectivity: &[Vec], iterations: usize, ) { profile!("par_laplacian_smoothing_normals_inplace"); - let mut normal_buffer = normals.clone(); + let mut normals_buffer_vec = vec![Vector3::zeros(); normals.len()]; + let mut normals_old = normals_buffer_vec.as_mut_slice(); + let mut normals_smoothed = normals; + let mut buffer_contains_output = false; for _ in 0..iterations { profile!("smoothing iteration"); - std::mem::swap(&mut normal_buffer, normals); + std::mem::swap(&mut normals_old, &mut normals_smoothed); + buffer_contains_output = !buffer_contains_output; - normals + // After the first swap, normals_smoothed points to the temporary buffer which will be used + // to store the smoothed normals below. This alternates every iteration. + + normals_smoothed .par_iter_mut() - .enumerate() - .for_each(|(i, normal_i)| { + .zip(vertex_connectivity.par_iter()) + .for_each(|(normal_i, connectivity_i)| { *normal_i = Vector3::zeros(); - for j in vertex_connectivity[i].iter().copied() { - let normal_j = normal_buffer[j]; + for j in connectivity_i.iter().copied() { + let normal_j = normals_old[j]; *normal_i += normal_j; } normal_i.normalize_mut(); }); } + + if buffer_contains_output { + // normals_smoothed points to temporary buffer, copy back to original slice + normals_old.copy_from_slice(normals_smoothed); + } } /// Mesh simplification designed for marching cubes surfaces meshes inspired by the "Compact Contouring"/"Mesh displacement" approach by Doug Moore and Joe Warren @@ -677,7 +689,7 @@ pub fn merge_double_barnacle_configurations_he(mesh: &mut HalfEdgeTriMe pub fn convert_tris_to_quads( mesh: &TriMesh3d, non_squareness_limit: R, - normal_angle_limit_rad: R, + normal_angle_limit: R, max_interior_angle: R, ) -> MixedTriQuadMesh3d { profile!("tri_to_quad"); @@ -694,7 +706,7 @@ pub fn convert_tris_to_quads( }) .collect::>(); - let min_dot = normal_angle_limit_rad.cos(); + let min_dot = normal_angle_limit.cos(); let max_non_squareness = non_squareness_limit; let sqrt_two = R::from_float(2.0_f64.sqrt()); diff --git a/splashsurf_lib/src/sph_interpolation.rs b/splashsurf_lib/src/sph_interpolation.rs index c99e77ab..6fe432e0 100644 --- a/splashsurf_lib/src/sph_interpolation.rs +++ b/splashsurf_lib/src/sph_interpolation.rs @@ -73,6 +73,11 @@ impl SphInterpolator { } } + /// Returns the number of particles stored in the interpolator + pub fn size(&self) -> usize { + self.tree.size() + } + /// Interpolates surface normals (i.e. normalized SPH gradient of the indicator function) of the fluid to the given points using SPH interpolation, appends to the given vector pub fn interpolate_normals_inplace( &self, diff --git a/splashsurf_lib/src/traits.rs b/splashsurf_lib/src/traits.rs index 5458091e..28a08096 100644 --- a/splashsurf_lib/src/traits.rs +++ b/splashsurf_lib/src/traits.rs @@ -2,7 +2,8 @@ use bytemuck::Pod; use nalgebra::{RealField, SMatrix}; use num_integer::Integer; use num_traits::{ - Bounded, CheckedAdd, CheckedMul, CheckedSub, FromPrimitive, NumCast, SaturatingSub, ToPrimitive, + Bounded, CheckedAdd, CheckedMul, CheckedSub, FromPrimitive, NumCast, SaturatingSub, Signed, + ToPrimitive, }; use simba::scalar::SupersetOf; use std::fmt::{Debug, Display}; @@ -44,6 +45,7 @@ pub trait Index: Copy + Hash + Integer + + Signed + Bounded + CheckedAdd + CheckedSub @@ -153,6 +155,7 @@ impl Index for I where I: Copy + Hash + Integer + + Signed + Bounded + CheckedAdd + CheckedSub diff --git a/splashsurf_lib/src/uniform_grid.rs b/splashsurf_lib/src/uniform_grid.rs index 299fd6af..eb4d3be5 100644 --- a/splashsurf_lib/src/uniform_grid.rs +++ b/splashsurf_lib/src/uniform_grid.rs @@ -230,7 +230,7 @@ impl UniformCartesianCubeGrid3d { } /// Constructs a degenerate grid with zero extents, zero cells and zero points - pub(crate) fn new_zero() -> Self { + pub fn new_zero() -> Self { Self { aabb: Aabb3d::new(Vector3::zeros(), Vector3::zeros()), cell_size: R::zero(), @@ -435,6 +435,9 @@ impl UniformCartesianCubeGrid3d { } /// Returns the grid cell index triplet of the cell enclosing a point with the given coordinates in space + /// + /// Note that this function does not check if the point is part of the grid and thus might also + /// return negative indices or indices larger than the number of cells per dimension. #[inline(always)] pub fn enclosing_cell(&self, coord: &Vector3) -> [I; 3] { let normalized_coord = (coord - self.aabb.min()) / self.cell_size;