diff --git a/CLAUDE.md b/CLAUDE.md index 3bb51a2410..1be55e7fce 100644 --- a/CLAUDE.md +++ b/CLAUDE.md @@ -177,6 +177,11 @@ go test -run TestLightweight -timeout 15m End-to-end tests for the off-chain tree maintenance service. ```bash +# Using just (preferred, from forester/ directory): +just -f forester/justfile local # Run e2e test without rebuilding SBF programs +just -f forester/justfile test # Build SBF test deps first, then run e2e test + +# Or directly: TEST_MODE=local cargo test --package forester e2e_test -- --nocapture ``` diff --git a/Cargo.lock b/Cargo.lock index dfc954f985..51c3103bd5 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -520,7 +520,22 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "43d5b281e737544384e969a5ccad3f1cdd24b48086a0fc1b2a5262a26b8f4f4a" dependencies = [ "anstyle", - "anstyle-parse", + "anstyle-parse 0.2.7", + "anstyle-query", + "anstyle-wincon", + "colorchoice", + "is_terminal_polyfill", + "utf8parse", +] + +[[package]] +name = "anstream" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "824a212faf96e9acacdbd09febd34438f8f711fb84e09a8916013cd7815ca28d" +dependencies = [ + "anstyle", + "anstyle-parse 1.0.0", "anstyle-query", "anstyle-wincon", "colorchoice", @@ -530,9 +545,9 @@ dependencies = [ [[package]] name = "anstyle" -version = "1.0.13" +version = "1.0.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5192cca8006f1fd4f7237516f40fa183bb07f8fbdfedaa0036de5ea9b0b45e78" +checksum = "940b3a0ca603d1eade50a4846a2afffd5ef57a9feac2c0e2ec2e14f9ead76000" [[package]] name = "anstyle-parse" @@ -543,6 +558,15 @@ dependencies = [ "utf8parse", ] +[[package]] +name = "anstyle-parse" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "52ce7f38b242319f7cabaa6813055467063ecdc9d355bbb4ce0c68908cd8130e" +dependencies = [ + "utf8parse", +] + [[package]] name = "anstyle-query" version = "1.1.5" @@ -1109,11 +1133,12 @@ dependencies = [ [[package]] name = "borsh" -version = "1.6.0" +version = "1.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d1da5ab77c1437701eeff7c88d968729e7766172279eab0676857b3d63af7a6f" +checksum = "cfd1e3f8955a5d7de9fab72fc8373fade9fb8a703968cb200ae3dc6cf08e185a" dependencies = [ - "borsh-derive 1.6.0", + "borsh-derive 1.6.1", + "bytes", "cfg_aliases", ] @@ -1132,9 +1157,9 @@ dependencies = [ [[package]] name = "borsh-derive" -version = "1.6.0" +version = "1.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0686c856aa6aac0c4498f936d7d6a02df690f614c03e4d906d1018062b5c5e2c" +checksum = "bfcfdc083699101d5a7965e49925975f2f55060f94f9a05e7187be95d530ca59" dependencies = [ "once_cell", "proc-macro-crate 3.5.0", @@ -1273,9 +1298,9 @@ dependencies = [ [[package]] name = "cc" -version = "1.2.56" +version = "1.2.57" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aebf35691d1bfb0ac386a69bac2fde4dd276fb618cf8bf4f5318fe285e821bb2" +checksum = "7a0dd1ca384932ff3641c8718a02769f1698e7563dc6974ffd03346116310423" dependencies = [ "find-msvc-tools", "jobserver", @@ -1353,9 +1378,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.60" +version = "4.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2797f34da339ce31042b27d23607e051786132987f595b02ba4f6a6dffb7030a" +checksum = "b193af5b67834b676abd72466a96c1024e6a6ad978a1f484bd90b85c94041351" dependencies = [ "clap_builder", "clap_derive", @@ -1363,11 +1388,11 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.5.60" +version = "4.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "24a241312cea5059b13574bb9b3861cabf758b879c15190b37b6d6fd63ab6876" +checksum = "714a53001bf66416adb0e2ef5ac857140e7dc3a0c48fb28b2f10762fc4b5069f" dependencies = [ - "anstream", + "anstream 1.0.0", "anstyle", "clap_lex", "strsim 0.11.1", @@ -1375,9 +1400,9 @@ dependencies = [ [[package]] name = "clap_derive" -version = "4.5.55" +version = "4.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a92793da1a46a5f2a02a6f4c46c6496b28c43638adea8306fcb0caa1634f24e5" +checksum = "1110bd8a634a1ab8cb04345d8d878267d57c3cf1b38d91b71af6686408bbca6a" dependencies = [ "heck 0.5.0", "proc-macro2", @@ -1387,9 +1412,9 @@ dependencies = [ [[package]] name = "clap_lex" -version = "1.0.0" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3a822ea5bc7590f9d40f1ba12c0dc3c2760f3482c6984db1573ad11031420831" +checksum = "c8d4a3bb8b1e0c1050499d1815f5ab16d04f0959b233085fb31653fbfc9d98f9" [[package]] name = "client-test" @@ -1440,9 +1465,9 @@ dependencies = [ [[package]] name = "colorchoice" -version = "1.0.4" +version = "1.0.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b05b61dc5112cbb17e4b6cd61790d9845d13888356391624cbe7e41efeac1e75" +checksum = "1d07550c9036bf2ae0c684c4297d503f838287c83c53686d05370d0e139ae570" [[package]] name = "combine" @@ -1806,8 +1831,18 @@ version = "0.21.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9cdf337090841a411e2a7f3deb9187445851f91b309c0c0a29e05f74a00a48c0" dependencies = [ - "darling_core", - "darling_macro", + "darling_core 0.21.3", + "darling_macro 0.21.3", +] + +[[package]] +name = "darling" +version = "0.23.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "25ae13da2f202d56bd7f91c25fba009e7717a1e4a1cc98a76d844b65ae912e9d" +dependencies = [ + "darling_core 0.23.0", + "darling_macro 0.23.0", ] [[package]] @@ -1824,13 +1859,37 @@ dependencies = [ "syn 2.0.117", ] +[[package]] +name = "darling_core" +version = "0.23.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9865a50f7c335f53564bb694ef660825eb8610e0a53d3e11bf1b0d3df31e03b0" +dependencies = [ + "ident_case", + "proc-macro2", + "quote", + "strsim 0.11.1", + "syn 2.0.117", +] + [[package]] name = "darling_macro" version = "0.21.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d38308df82d1080de0afee5d069fa14b0326a88c14f15c5ccda35b4a6c414c81" dependencies = [ - "darling_core", + "darling_core 0.21.3", + "quote", + "syn 2.0.117", +] + +[[package]] +name = "darling_macro" +version = "0.23.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ac3984ec7bd6cfa798e62b4a642426a5be0e68f9401cfc2a01e3fa9ea2fcdb8d" +dependencies = [ + "darling_core 0.23.0", "quote", "syn 2.0.117", ] @@ -1903,9 +1962,9 @@ dependencies = [ [[package]] name = "deranged" -version = "0.3.11" +version = "0.5.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b42b6fa04a440b495c8b04d0e71b707c585f83cb9cb28cf8cd0d976c315e31b4" +checksum = "7cd812cc2bc1d69d4764bd80df88b4317eaef9e773c75226407d9bc0876b211c" dependencies = [ "powerfmt", ] @@ -2230,7 +2289,7 @@ version = "0.11.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b2daee4ea451f429a58296525ddf28b45a3b64f1acf6587e2067437bb11e218d" dependencies = [ - "anstream", + "anstream 0.6.21", "anstyle", "env_filter", "jiff", @@ -2404,7 +2463,7 @@ dependencies = [ "base64 0.13.1", "borsh 0.10.4", "bs58", - "clap 4.5.60", + "clap 4.6.0", "create-address-test-program", "csdk-anchor-full-derived-test", "dashmap 6.1.0", @@ -3402,9 +3461,9 @@ dependencies = [ [[package]] name = "itoa" -version = "1.0.17" +version = "1.0.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "92ecc6618181def0457392ccd0ee51198e065e016d1d527a7ac1b6dc7c1f09d2" +checksum = "8f42a60cbdf9a97f5d2305f08a87dc4e09308d1276d28c869c684d7777685682" [[package]] name = "jiff" @@ -4016,7 +4075,7 @@ name = "light-instruction-decoder-derive" version = "0.4.0" dependencies = [ "bs58", - "darling", + "darling 0.21.3", "heck 0.5.0", "proc-macro2", "quote", @@ -4199,6 +4258,7 @@ dependencies = [ "account-compression", "aligned-sized", "anchor-lang", + "bitvec", "borsh 0.10.4", "light-account-checks", "light-batched-merkle-tree", @@ -4256,7 +4316,7 @@ name = "light-sdk-macros" version = "0.23.0" dependencies = [ "borsh 0.10.4", - "darling", + "darling 0.21.3", "light-account-checks", "light-compressed-account", "light-hasher", @@ -4964,9 +5024,9 @@ dependencies = [ [[package]] name = "num_enum" -version = "0.7.5" +version = "0.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1207a7e20ad57b847bbddc6776b968420d38292bbfe2089accff5e19e82454c" +checksum = "5d0bca838442ec211fa11de3a8b0e0e8f3a4522575b5c4c06ed722e005036f26" dependencies = [ "num_enum_derive", "rustversion", @@ -4974,9 +5034,9 @@ dependencies = [ [[package]] name = "num_enum_derive" -version = "0.7.5" +version = "0.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ff32365de1b6743cb203b710788263c44a03de03802daf96092f2da4fe6ba4d7" +checksum = "680998035259dcfcafe653688bf2aa6d3e2dc05e98be6ab46afb089dc84f1df8" dependencies = [ "proc-macro-crate 3.5.0", "proc-macro2", @@ -5001,9 +5061,9 @@ dependencies = [ [[package]] name = "once_cell" -version = "1.21.3" +version = "1.21.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "42f5e15c9953c5e4ccceeb2e7382a716482c34515315f7b03532b8b4e8393d2d" +checksum = "9f7c3e4beb33f85d45ae3e3a1792185706c8e16d043238c593331cc7cd313b50" [[package]] name = "once_cell_polyfill" @@ -5030,9 +5090,9 @@ dependencies = [ [[package]] name = "openssl" -version = "0.10.75" +version = "0.10.76" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08838db121398ad17ab8531ce9de97b244589089e290a384c900cb9ff7434328" +checksum = "951c002c75e16ea2c65b8c7e4d3d51d5530d8dfa7d060b4776828c88cfb18ecf" dependencies = [ "bitflags 2.11.0", "cfg-if", @@ -5071,9 +5131,9 @@ dependencies = [ [[package]] name = "openssl-sys" -version = "0.9.111" +version = "0.9.112" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "82cab2d520aa75e3c58898289429321eb788c3106963d0dc886ec7a5f4adc321" +checksum = "57d55af3b3e226502be1526dfdba67ab0e9c96fc293004e79576b2b9edb0dbdb" dependencies = [ "cc", "libc", @@ -5247,9 +5307,9 @@ checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" [[package]] name = "pinocchio" -version = "0.9.2" +version = "0.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b971851087bc3699b001954ad02389d50c41405ece3548cbcafc88b3e20017a" +checksum = "b8afe4f39c0e25cc471b35b89963312791a5162d45a86578cbeaad9e5e7d1b3b" [[package]] name = "pinocchio-light-program-test" @@ -5404,9 +5464,9 @@ checksum = "c33a9471896f1c69cecef8d20cbe2f7accd12527ce60845ff44c153bb2a21b49" [[package]] name = "portable-atomic-util" -version = "0.2.5" +version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a9db96d7fa8782dd8c15ce32ffe8680bbd1e978a43bf51a34d39483540495f5" +checksum = "091397be61a01d4be58e7841595bd4bfedb15f1cd54977d79b8271e94ed799a3" dependencies = [ "portable-atomic", ] @@ -5466,7 +5526,7 @@ version = "3.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e67ba7e9b2b56446f1d419b1d807906278ffa1a658a8a5d8a39dcb1f5a78614f" dependencies = [ - "toml_edit 0.25.4+spec-1.1.0", + "toml_edit 0.25.5+spec-1.1.0", ] [[package]] @@ -6751,9 +6811,9 @@ dependencies = [ [[package]] name = "serde_with" -version = "3.17.0" +version = "3.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "381b283ce7bc6b476d903296fb59d0d36633652b633b27f64db4fb46dcbfc3b9" +checksum = "dd5414fad8e6907dbdd5bc441a50ae8d6e26151a03b1de04d89a5576de61d01f" dependencies = [ "serde_core", "serde_with_macros", @@ -6761,11 +6821,11 @@ dependencies = [ [[package]] name = "serde_with_macros" -version = "3.17.0" +version = "3.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a6d4e30573c8cb306ed6ab1dca8423eec9a463ea0e155f45399455e0368b27e0" +checksum = "d3db8978e608f1fe7357e211969fd9abdcae80bac1ba7a3369bb7eb6b404eb65" dependencies = [ - "darling", + "darling 0.23.0", "proc-macro2", "quote", "syn 2.0.117", @@ -7226,7 +7286,7 @@ version = "2.3.13" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "68548570c38a021c724b5aa0112f45a54bdf7ff1b041a042848e034a95a96994" dependencies = [ - "borsh 1.6.0", + "borsh 1.6.1", "futures", "solana-account", "solana-banks-interface", @@ -7340,7 +7400,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "718333bcd0a1a7aed6655aa66bef8d7fb047944922b2d3a18f49cbc13e73d004" dependencies = [ "borsh 0.10.4", - "borsh 1.6.0", + "borsh 1.6.1", ] [[package]] @@ -7655,7 +7715,7 @@ version = "2.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8432d2c4c22d0499aa06d62e4f7e333f81777b3d7c96050ae9e5cb71a8c3aee4" dependencies = [ - "borsh 1.6.0", + "borsh 1.6.1", "serde", "serde_derive", "solana-instruction", @@ -7964,7 +8024,7 @@ version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b5b96e9f0300fa287b545613f007dfe20043d7812bee255f418c1eb649c93b63" dependencies = [ - "borsh 1.6.0", + "borsh 1.6.1", "bytemuck", "bytemuck_derive", "five8", @@ -7993,7 +8053,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bab5682934bd1f65f8d2c16f21cb532526fcc1a09f796e2cacdb091eee5774ad" dependencies = [ "bincode", - "borsh 1.6.0", + "borsh 1.6.1", "getrandom 0.2.17", "js-sys", "num-traits", @@ -8410,7 +8470,7 @@ dependencies = [ "bincode", "blake3", "borsh 0.10.4", - "borsh 1.6.0", + "borsh 1.6.1", "bs58", "bytemuck", "console_error_panic_hook", @@ -8499,7 +8559,7 @@ version = "2.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9ee2e0217d642e2ea4bee237f37bd61bb02aec60da3647c48ff88f6556ade775" dependencies = [ - "borsh 1.6.0", + "borsh 1.6.1", "num-traits", "serde", "serde_derive", @@ -8583,7 +8643,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9b62adb9c3261a052ca1f999398c388f1daf558a1b492f60a6d9e64857db4ff1" dependencies = [ "borsh 0.10.4", - "borsh 1.6.0", + "borsh 1.6.1", "bytemuck", "bytemuck_derive", "curve25519-dalek 4.1.3", @@ -9009,7 +9069,7 @@ version = "2.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "baa3120b6cdaa270f39444f5093a90a7b03d296d362878f7a6991d6de3bbe496" dependencies = [ - "borsh 1.6.0", + "borsh 1.6.1", "libsecp256k1", "solana-define-syscall 2.3.0", "thiserror 2.0.18", @@ -9187,7 +9247,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5269e89fde216b4d7e1d1739cf5303f8398a1ff372a81232abbee80e554a838c" dependencies = [ "borsh 0.10.4", - "borsh 1.6.0", + "borsh 1.6.1", "num-traits", "serde", "serde_derive", @@ -9588,7 +9648,7 @@ dependencies = [ "agave-reserved-account-keys", "base64 0.22.1", "bincode", - "borsh 1.6.0", + "borsh 1.6.1", "bs58", "log", "serde", @@ -9869,7 +9929,7 @@ version = "6.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "76fee7d65013667032d499adc3c895e286197a35a0d3a4643c80e7fd3e9969e3" dependencies = [ - "borsh 1.6.0", + "borsh 1.6.1", "num-derive 0.4.2", "num-traits", "solana-program", @@ -9885,7 +9945,7 @@ version = "7.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ae179d4a26b3c7a20c839898e6aed84cb4477adf108a366c95532f058aea041b" dependencies = [ - "borsh 1.6.0", + "borsh 1.6.1", "num-derive 0.4.2", "num-traits", "solana-program", @@ -10007,7 +10067,7 @@ version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d994afaf86b779104b4a95ba9ca75b8ced3fdb17ee934e38cb69e72afbe17799" dependencies = [ - "borsh 1.6.0", + "borsh 1.6.1", "bytemuck", "bytemuck_derive", "num-derive 0.4.2", @@ -10445,7 +10505,7 @@ version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dfb9c89dbc877abd735f05547dcf9e6e12c00c11d6d74d8817506cab4c99fdbb" dependencies = [ - "borsh 1.6.0", + "borsh 1.6.1", "num-derive 0.4.2", "num-traits", "solana-borsh", @@ -10466,7 +10526,7 @@ version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "304d6e06f0de0c13a621464b1fd5d4b1bebf60d15ca71a44d3839958e0da16ee" dependencies = [ - "borsh 1.6.0", + "borsh 1.6.1", "num-derive 0.4.2", "num-traits", "solana-borsh", @@ -10844,9 +10904,9 @@ dependencies = [ [[package]] name = "tempfile" -version = "3.26.0" +version = "3.27.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "82a72c767771b47409d2345987fda8628641887d5466101319899796367354a0" +checksum = "32497e9a4c7b38532efcdebeef879707aa9f794296a4f0244f6f69e9bc8574bd" dependencies = [ "fastrand 2.3.0", "getrandom 0.4.2", @@ -10933,30 +10993,30 @@ dependencies = [ [[package]] name = "time" -version = "0.3.37" +version = "0.3.45" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "35e7868883861bd0e56d9ac6efcaaca0d6d5d82a2a7ec8209ff492c07cf37b21" +checksum = "f9e442fc33d7fdb45aa9bfeb312c095964abdf596f7567261062b2a7107aaabd" dependencies = [ "deranged", "itoa", "num-conv", "powerfmt", - "serde", + "serde_core", "time-core", "time-macros", ] [[package]] name = "time-core" -version = "0.1.2" +version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ef927ca75afb808a4d64dd374f00a2adf8d0fcff8e7b184af886c3c87ec4a3f3" +checksum = "8b36ee98fd31ec7426d599183e8fe26932a8dc1fb76ddb6214d05493377d34ca" [[package]] name = "time-macros" -version = "0.2.19" +version = "0.2.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2834e6017e3e5e4b9834939793b282bc03b37a3336245fa820e35e233e2a85de" +checksum = "71e552d1249bf61ac2a52db88179fd0673def1e1ad8243a00d9ec9ed71fee3dd" dependencies = [ "num-conv", "time-core", @@ -10993,9 +11053,9 @@ dependencies = [ [[package]] name = "tinyvec" -version = "1.10.0" +version = "1.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bfa5fdc3bce6191a1dbc8c02d5c8bffcf557bafa17c124c5264a458f1b0613fa" +checksum = "3e61e67053d25a4e82c844e8424039d9745781b3fc4f32b8d55ed50f5f667ef3" dependencies = [ "tinyvec_macros", ] @@ -11173,17 +11233,17 @@ dependencies = [ [[package]] name = "toml" -version = "1.0.6+spec-1.1.0" +version = "1.0.7+spec-1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "399b1124a3c9e16766831c6bba21e50192572cdd98706ea114f9502509686ffc" +checksum = "dd28d57d8a6f6e458bc0b8784f8fdcc4b99a437936056fa122cb234f18656a96" dependencies = [ "indexmap", "serde_core", "serde_spanned 1.0.4", - "toml_datetime 1.0.0+spec-1.1.0", + "toml_datetime 1.0.1+spec-1.1.0", "toml_parser", "toml_writer", - "winnow", + "winnow 1.0.0", ] [[package]] @@ -11197,9 +11257,9 @@ dependencies = [ [[package]] name = "toml_datetime" -version = "1.0.0+spec-1.1.0" +version = "1.0.1+spec-1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "32c2555c699578a4f59f0cc68e5116c8d7cabbd45e1409b989d4be085b53f13e" +checksum = "9b320e741db58cac564e26c607d3cc1fdc4a88fd36c879568c07856ed83ff3e9" dependencies = [ "serde_core", ] @@ -11215,28 +11275,28 @@ dependencies = [ "serde_spanned 0.6.9", "toml_datetime 0.6.11", "toml_write", - "winnow", + "winnow 0.7.15", ] [[package]] name = "toml_edit" -version = "0.25.4+spec-1.1.0" +version = "0.25.5+spec-1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7193cbd0ce53dc966037f54351dbbcf0d5a642c7f0038c382ef9e677ce8c13f2" +checksum = "8ca1a40644a28bce036923f6a431df0b34236949d111cc07cb6dca830c9ef2e1" dependencies = [ "indexmap", - "toml_datetime 1.0.0+spec-1.1.0", + "toml_datetime 1.0.1+spec-1.1.0", "toml_parser", - "winnow", + "winnow 1.0.0", ] [[package]] name = "toml_parser" -version = "1.0.9+spec-1.1.0" +version = "1.0.10+spec-1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "702d4415e08923e7e1ef96cd5727c0dfed80b4d2fa25db9647fe5eb6f7c5a4c4" +checksum = "7df25b4befd31c4816df190124375d5a20c6b6921e2cad937316de3fccd63420" dependencies = [ - "winnow", + "winnow 1.0.0", ] [[package]] @@ -11247,9 +11307,9 @@ checksum = "5d99f8c9a7727884afe522e9bd5edbfc91a3312b36a77b5fb8926e4c31a41801" [[package]] name = "toml_writer" -version = "1.0.6+spec-1.1.0" +version = "1.0.7+spec-1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab16f14aed21ee8bfd8ec22513f7287cd4a91aa92e44edfe2c17ddd004e92607" +checksum = "f17aaa1c6e3dc22b1da4b6bba97d066e354c7945cac2f7852d4e4e7ca7a6b56d" [[package]] name = "tower" @@ -11382,9 +11442,9 @@ dependencies = [ [[package]] name = "tracing-subscriber" -version = "0.3.22" +version = "0.3.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2f30143827ddab0d256fd843b7a66d164e9f271cfa0dde49142c5ca0ca291f1e" +checksum = "cb7f578e5945fb242538965c2d0b04418d38ec25c79d160cd279bf0731c8d319" dependencies = [ "matchers", "nu-ansi-term", @@ -11419,7 +11479,7 @@ dependencies = [ "serde_json", "target-triple", "termcolor", - "toml 1.0.6+spec-1.1.0", + "toml 1.0.7+spec-1.1.0", ] [[package]] @@ -12303,6 +12363,15 @@ dependencies = [ "memchr", ] +[[package]] +name = "winnow" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a90e88e4667264a994d34e6d1ab2d26d398dcdca8b7f52bec8668957517fc7d8" +dependencies = [ + "memchr", +] + [[package]] name = "winreg" version = "0.50.0" @@ -12468,7 +12537,7 @@ dependencies = [ "base64 0.13.1", "bs58", "chrono", - "clap 4.5.60", + "clap 4.6.0", "dirs", "groth16-solana", "light-batched-merkle-tree", @@ -12536,18 +12605,18 @@ dependencies = [ [[package]] name = "zerocopy" -version = "0.8.42" +version = "0.8.47" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2578b716f8a7a858b7f02d5bd870c14bf4ddbbcf3a4c05414ba6503640505e3" +checksum = "efbb2a062be311f2ba113ce66f697a4dc589f85e78a4aea276200804cea0ed87" dependencies = [ "zerocopy-derive", ] [[package]] name = "zerocopy-derive" -version = "0.8.42" +version = "0.8.47" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7e6cc098ea4d3bd6246687de65af3f920c430e236bee1e3bf2e441463f08a02f" +checksum = "0e8bc7269b54418e7aeeef514aa68f8690b8c0489a06b0136e5f57c4c5ccab89" dependencies = [ "proc-macro2", "quote", diff --git a/Cargo.toml b/Cargo.toml index 54f0c4e1ef..110bcc81b2 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -120,6 +120,9 @@ version = "0.1.0" edition = "2021" [workspace.dependencies] +# Pin time <0.3.46 -- 0.3.46+ pulls time-core 0.1.8 which uses edition2024, +# incompatible with Solana platform-tools Cargo 1.84. +time = ">=0.3, <0.3.46" solana-banks-client = { version = "2.3" } solana-banks-interface = { version = "2.3" } solana-program = "2.3" diff --git a/external/photon b/external/photon index 7a649f9c45..a52fd36570 160000 --- a/external/photon +++ b/external/photon @@ -1 +1 @@ -Subproject commit 7a649f9c45a138ef47b090445163abe84775145c +Subproject commit a52fd365706e235f538689c3afdf94c8371db80f diff --git a/forester/justfile b/forester/justfile index ad7798ecae..95b5c938b3 100644 --- a/forester/justfile +++ b/forester/justfile @@ -20,6 +20,10 @@ build-test-deps: test: build-test-deps cargo test --package forester e2e_test -- --nocapture +# Run e2e test without rebuilding SBF programs +local: + cargo test --package forester e2e_test -- --nocapture + # Builds csdk-anchor-full-derived-test program for compressible tests build-compressible-test-deps: cargo build-sbf --manifest-path ../sdk-tests/csdk-anchor-full-derived-test/Cargo.toml diff --git a/forester/src/cli.rs b/forester/src/cli.rs index 467b311604..b6c1769e82 100644 --- a/forester/src/cli.rs +++ b/forester/src/cli.rs @@ -102,8 +102,8 @@ pub struct StartArgs { #[arg( long, env = "MAX_CONCURRENT_SENDS", - default_value = "12", - help = "Maximum number of concurrent transaction sends per batch" + default_value = "50", + help = "Maximum number of concurrent transaction sends per batch. Defaults to 50 to match work-item-batch-size." )] pub max_concurrent_sends: usize, @@ -287,6 +287,29 @@ pub struct StartArgs { )] pub lookup_table_address: Option, + #[arg( + long, + env = "MIN_QUEUE_ITEMS", + default_value = "5000", + help = "Minimum queue items before processing V1 state nullifications. Delays processing to allow dedup grouping. Only applies when lookup_table_address is set." + )] + pub min_queue_items: Option, + + #[arg( + long, + env = "ENABLE_V1_MULTI_NULLIFY", + help = "Enable nullify_state_v1_multi instruction for batching 2-4 V1 state nullifications per instruction. Requires --lookup-table-address. Enabled by default.", + default_value = "true" + )] + pub enable_v1_multi_nullify: bool, + + #[arg( + long, + env = "WORK_ITEM_BATCH_SIZE", + help = "Number of queue items to process per batch cycle. Smaller values reduce blockhash expiry risk, larger values reduce per-batch overhead." + )] + pub work_item_batch_size: Option, + #[arg( long, env = "API_SERVER_PORT", diff --git a/forester/src/compressible/ctoken/compressor.rs b/forester/src/compressible/ctoken/compressor.rs index 80e5ca4382..0296ac1747 100644 --- a/forester/src/compressible/ctoken/compressor.rs +++ b/forester/src/compressible/ctoken/compressor.rs @@ -89,26 +89,45 @@ impl CTokenCompressor { let mut rpc = self.rpc_pool.get_connection().await?; - // Pre-check: filter out accounts that no longer exist on-chain + // Pre-check: filter out accounts that no longer exist on-chain or are no longer compressible let all_pubkeys: Vec = account_states.iter().map(|a| a.pubkey).collect(); let on_chain_accounts = rpc .get_multiple_accounts(&all_pubkeys) .await .map_err(|e| anyhow::anyhow!("Failed to pre-check accounts: {:?}", e))?; + let current_slot = rpc + .get_slot() + .await + .map_err(|e| anyhow::anyhow!("Failed to get current slot: {:?}", e))?; + let account_states: Vec<&CTokenAccountState> = account_states .iter() .zip(on_chain_accounts.iter()) .filter_map(|(state, on_chain)| { - if on_chain.is_some() { - Some(state) - } else { - debug!( - "CToken account {} no longer exists on-chain, removing from tracker", - state.pubkey - ); - self.tracker.remove(&state.pubkey); - None + let on_chain = on_chain.as_ref()?; + // Re-validate compressibility with fresh on-chain data + match super::state::revalidate_compressibility( + &on_chain.data, + on_chain.lamports, + current_slot, + ) { + Ok(true) => Some(state), + Ok(false) => { + debug!( + "CToken account {} is no longer compressible, skipping", + state.pubkey + ); + None + } + Err(e) => { + debug!( + "CToken account {} failed compressibility check: {}, removing", + state.pubkey, e + ); + self.tracker.remove(&state.pubkey); + None + } } }) .collect(); diff --git a/forester/src/compressible/ctoken/state.rs b/forester/src/compressible/ctoken/state.rs index 1c71e785f3..dfcf269140 100644 --- a/forester/src/compressible/ctoken/state.rs +++ b/forester/src/compressible/ctoken/state.rs @@ -13,6 +13,35 @@ use crate::{ Result, }; +/// Re-validate compressibility using fresh on-chain account data. +/// Returns true if the account is still compressible at the given slot. +pub fn revalidate_compressibility( + account_data: &[u8], + lamports: u64, + current_slot: u64, +) -> Result { + use light_token_interface::state::extensions::ExtensionStruct; + + let account = Token::try_from_slice(account_data) + .map_err(|e| anyhow::anyhow!("Failed to deserialize Token: {:?}", e))?; + + let compression_info = account + .extensions + .as_ref() + .and_then(|exts| { + exts.iter().find_map(|ext| match ext { + ExtensionStruct::Compressible(comp) => Some(&comp.info), + _ => None, + }) + }) + .ok_or_else(|| anyhow::anyhow!("Missing Compressible extension"))?; + + Ok(compression_info + .is_compressible(account_data.len() as u64, current_slot, lamports) + .map_err(|e| anyhow::anyhow!("is_compressible error: {:?}", e))? + .is_some()) +} + fn calculate_compressible_slot(account: &Token, lamports: u64, account_size: usize) -> Result { use light_token_interface::state::extensions::ExtensionStruct; diff --git a/forester/src/config.rs b/forester/src/config.rs index 550828b7c3..ef16f3b5c3 100644 --- a/forester/src/config.rs +++ b/forester/src/config.rs @@ -30,6 +30,14 @@ pub struct ForesterConfig { pub compressible_config: Option, /// Address lookup table for versioned transactions. If None, legacy transactions are used. pub lookup_table_address: Option, + /// Minimum queue items before processing V1 state nullifications. + /// Delays processing to allow dedup grouping. Only applies when lookup_table_address is set. + pub min_queue_items: Option, + /// Enable nullify_state_v1_multi instruction for batching 2-4 V1 state nullifications. + /// Requires lookup_table_address to be set. + pub enable_v1_multi_nullify: bool, + /// Number of queue items to process per batch cycle. Default: 50. + pub work_item_batch_size: usize, } #[derive(Debug, Clone)] @@ -116,7 +124,7 @@ impl Default for GeneralConfig { skip_v2_address_trees: false, tree_ids: vec![], sleep_after_processing_ms: 10_000, - sleep_when_idle_ms: 45_000, + sleep_when_idle_ms: 5_000, queue_polling_mode: QueuePollingMode::Indexer, group_authority: None, helius_rpc: false, @@ -341,7 +349,7 @@ impl ForesterConfig { valid.into_iter().map(|r| r.unwrap()).collect() }, sleep_after_processing_ms: 10_000, - sleep_when_idle_ms: 45_000, + sleep_when_idle_ms: 5_000, queue_polling_mode: args.queue_polling_mode, group_authority: args .group_authority @@ -421,6 +429,9 @@ impl ForesterConfig { }) }) .transpose()?, + min_queue_items: args.min_queue_items, + enable_v1_multi_nullify: args.enable_v1_multi_nullify, + work_item_batch_size: args.work_item_batch_size.unwrap_or(50), }) } @@ -475,6 +486,9 @@ impl ForesterConfig { state_tree_data: vec![], compressible_config: None, lookup_table_address: None, + min_queue_items: None, + enable_v1_multi_nullify: false, + work_item_batch_size: 50, }) } } @@ -495,6 +509,9 @@ impl Clone for ForesterConfig { state_tree_data: self.state_tree_data.clone(), compressible_config: self.compressible_config.clone(), lookup_table_address: self.lookup_table_address, + min_queue_items: self.min_queue_items, + enable_v1_multi_nullify: self.enable_v1_multi_nullify, + work_item_batch_size: self.work_item_batch_size, } } } diff --git a/forester/src/epoch_manager.rs b/forester/src/epoch_manager.rs index f52efa1b13..a253b4bad3 100644 --- a/forester/src/epoch_manager.rs +++ b/forester/src/epoch_manager.rs @@ -393,13 +393,19 @@ impl EpochManager { async move { self_clone.check_sol_balance_periodically().await } }); + let queue_metrics_handle = tokio::spawn({ + let self_clone = Arc::clone(&self); + async move { self_clone.update_queue_metrics_periodically().await } + }); + let _guard = scopeguard::guard( ( current_previous_handle, tree_discovery_handle, balance_check_handle, + queue_metrics_handle, ), - |(h2, h3, h4)| { + |(h2, h3, h4, h5)| { info!( event = "background_tasks_aborting", run_id = %self.run_id, @@ -408,6 +414,7 @@ impl EpochManager { h2.abort(); h3.abort(); h4.abort(); + h5.abort(); }, ); @@ -491,6 +498,50 @@ impl EpochManager { result } + /// Periodically updates queue_length and queue_capacity Prometheus gauges + /// so Grafana dashboards can show queue trends over time. + async fn update_queue_metrics_periodically(self: Arc) -> Result<()> { + let interval_secs = self.config.general_config.tree_discovery_interval_seconds; + if interval_secs == 0 { + return Ok(()); + } + // Use same interval as tree discovery (default 30s) + let mut interval = tokio::time::interval(Duration::from_secs(interval_secs)); + // Skip first tick — let tree discovery populate the tree list first + interval.tick().await; + + loop { + interval.tick().await; + + let trees = self.trees.lock().await; + let trees_snapshot: Vec<_> = trees.clone(); + drop(trees); + + if trees_snapshot.is_empty() { + continue; + } + + for tree_type in [ + TreeType::StateV1, + TreeType::AddressV1, + TreeType::StateV2, + TreeType::AddressV2, + ] { + if let Err(e) = + crate::run_queue_info(self.config.clone(), &trees_snapshot, tree_type).await + { + debug!( + event = "queue_metrics_update_failed", + run_id = %self.run_id, + tree_type = ?tree_type, + error = ?e, + "Failed to update queue metrics" + ); + } + } + } + } + async fn check_sol_balance_periodically(self: Arc) -> Result<()> { let interval_duration = Duration::from_secs(300); let mut interval = tokio::time::interval(interval_duration); @@ -2147,13 +2198,12 @@ impl EpochManager { } estimated_slot = self.slot_tracker.estimated_current_slot(); - let sleep_duration_ms = if items_processed_this_iteration > 0 { - self.config.general_config.sleep_after_processing_ms - } else { - self.config.general_config.sleep_when_idle_ms - }; - - tokio::time::sleep(Duration::from_millis(sleep_duration_ms)).await; + if items_processed_this_iteration == 0 { + // No items processed. Short sleep before re-checking — the queue + // may grow above min_queue_items within this light slot. + tokio::time::sleep(Duration::from_secs(5)).await; + } + // When items were processed, loop immediately to fetch the next batch. } Ok(()) } @@ -3005,12 +3055,25 @@ impl EpochManager { ), confirmation_max_attempts: self.config.transaction_config.confirmation_max_attempts as usize, + min_queue_items: if self.config.enable_v1_multi_nullify + && !self.address_lookup_tables.is_empty() + { + self.config.min_queue_items + } else { + None + }, + enable_presort: self.config.enable_v1_multi_nullify + && !self.address_lookup_tables.is_empty(), + work_item_batch_size: self.config.work_item_batch_size, }; + let alt_snapshot = (*self.address_lookup_tables).clone(); let transaction_builder = Arc::new(EpochManagerTransactions::new( self.rpc_pool.clone(), epoch_info.epoch, self.tx_cache.clone(), + alt_snapshot, + self.config.enable_v1_multi_nullify, )); let num_sent = send_batched_transactions( @@ -3056,6 +3119,7 @@ impl EpochManager { input_queue_hint: Option, output_queue_hint: Option, eligibility_end: Option, + address_lookup_tables: Arc>, ) -> BatchContext { let default_prover_url = "http://127.0.0.1:3001".to_string(); let eligibility_end = eligibility_end.unwrap_or(0); @@ -3105,7 +3169,7 @@ impl EpochManager { output_queue_hint, num_proof_workers: self.config.transaction_config.max_concurrent_batches, forester_eligibility_end_slot: Arc::new(AtomicU64::new(eligibility_end)), - address_lookup_tables: self.address_lookup_tables.clone(), + address_lookup_tables, transaction_policy: self.transaction_policy(), max_batches_per_tree: self.config.transaction_config.max_batches_per_tree, } @@ -3211,7 +3275,14 @@ impl EpochManager { } // No existing processor - create new one - let batch_context = self.build_batch_context(epoch_info, tree_accounts, None, None, None); + let batch_context = self.build_batch_context( + epoch_info, + tree_accounts, + None, + None, + None, + self.address_lookup_tables.clone(), + ); let processor = Arc::new(Mutex::new( QueueProcessor::new(batch_context, StateTreeStrategy).await?, )); @@ -3265,7 +3336,14 @@ impl EpochManager { } // No existing processor - create new one - let batch_context = self.build_batch_context(epoch_info, tree_accounts, None, None, None); + let batch_context = self.build_batch_context( + epoch_info, + tree_accounts, + None, + None, + None, + self.address_lookup_tables.clone(), + ); let processor = Arc::new(Mutex::new( QueueProcessor::new(batch_context, AddressTreeStrategy).await?, )); @@ -4459,27 +4537,27 @@ pub async fn run_service( let address_lookup_tables = { if let Some(lut_address) = config.lookup_table_address { let rpc = rpc_pool.get_connection().await?; - match load_lookup_table_async(&*rpc, lut_address).await { - Ok(lut) => { - info!( - event = "lookup_table_loaded", + let lut = load_lookup_table_async(&*rpc, lut_address).await + .map_err(|e| { + error!( + event = "lookup_table_load_failed", run_id = %run_id_for_logs, lookup_table = %lut_address, - address_count = lut.addresses.len(), - "Loaded lookup table" - ); - Arc::new(vec![lut]) - } - Err(e) => { - debug!( - "Lookup table {} not available: {}. Using legacy transactions.", - lut_address, e + error = %e, + "Failed to load lookup table" ); - Arc::new(Vec::new()) - } - } + e + })?; + info!( + event = "lookup_table_loaded", + run_id = %run_id_for_logs, + lookup_table = %lut_address, + address_count = lut.addresses.len(), + "Loaded lookup table" + ); + Arc::new(vec![lut]) } else { - debug!("No lookup table address configured. Using legacy transactions."); + debug!("No lookup table address configured. Using v1 state single nullify transactions."); Arc::new(Vec::new()) } }; @@ -4650,6 +4728,9 @@ mod tests { state_tree_data: vec![], compressible_config: None, lookup_table_address: None, + min_queue_items: None, + enable_v1_multi_nullify: false, + work_item_batch_size: 50, } } @@ -4749,6 +4830,7 @@ mod tests { queue_item_data: QueueItemData { hash: [0u8; 32], index: 0, + leaf_index: None, }, }; @@ -4771,6 +4853,7 @@ mod tests { queue_item_data: QueueItemData { hash: [0u8; 32], index: 0, + leaf_index: None, }, }; diff --git a/forester/src/processor/v1/config.rs b/forester/src/processor/v1/config.rs index f2ee05f353..41f1bc0c08 100644 --- a/forester/src/processor/v1/config.rs +++ b/forester/src/processor/v1/config.rs @@ -19,6 +19,16 @@ pub struct SendBatchedTransactionsConfig { pub light_slot_length: u64, pub confirmation_poll_interval: std::time::Duration, pub confirmation_max_attempts: usize, + /// Minimum number of queue items required before processing begins. + /// Only applies to StateV1 trees. When `None`, processing starts immediately. + /// When the timeout deadline is reached, returns 0 (re-scheduled next light slot). + pub min_queue_items: Option, + /// When true, fetch leaf indices from the indexer and sort work items by + /// leaf_index before chunking, so adjacent leaves land in the same batch + /// for better dedup grouping. + pub enable_presort: bool, + /// Number of queue items to process per batch cycle. + pub work_item_batch_size: usize, } #[derive(Debug, Clone, Copy)] diff --git a/forester/src/processor/v1/helpers.rs b/forester/src/processor/v1/helpers.rs index d980b02e32..8af0e9c4ba 100644 --- a/forester/src/processor/v1/helpers.rs +++ b/forester/src/processor/v1/helpers.rs @@ -8,15 +8,20 @@ use account_compression::{ }, }; use forester_utils::{rpc_pool::SolanaRpcPool, utils::wait_for_indexer}; -use light_client::{indexer::Indexer, rpc::Rpc}; +use light_client::{ + indexer::{Indexer, MerkleProof}, + rpc::Rpc, +}; use light_compressed_account::TreeType; use light_registry::account_compression_cpi::sdk::{ - create_nullify_instruction, create_update_address_merkle_tree_instruction, - CreateNullifyInstructionInputs, UpdateAddressMerkleTreeInstructionInputs, + compress_proofs, create_nullify_instruction, create_nullify_state_v1_multi_instruction, + create_update_address_merkle_tree_instruction, CompressedProofs, + CreateNullifyInstructionInputs, CreateNullifyStateV1MultiInstructionInputs, + UpdateAddressMerkleTreeInstructionInputs, }; use solana_program::instruction::Instruction; use tokio::time::Instant; -use tracing::{info, warn}; +use tracing::{debug, info, warn}; use crate::{ logging::should_emit_rate_limited_warning, @@ -32,6 +37,16 @@ use crate::{ errors::ForesterError, }; +/// A labeled instruction for logging purposes. +#[derive(Clone)] +pub struct LabeledInstruction { + pub instruction: Instruction, + /// Label for logging, e.g. "StateV1Nullify" or "StateV1MultiNullify(3)" + pub label: String, + /// Number of nullifications in this instruction (1 for single, 2-4 for multi). + pub nullify_count: u32, +} + /// Work items should be of only one type and tree pub async fn fetch_proofs_and_create_instructions( authority: Pubkey, @@ -39,7 +54,8 @@ pub async fn fetch_proofs_and_create_instructions( pool: Arc>, epoch: u64, work_items: &[WorkItem], -) -> crate::Result<(Vec, Vec)> { + use_multi_nullify: bool, +) -> crate::Result<(Vec, Vec)> { let mut proofs = Vec::new(); let mut instructions = vec![]; @@ -79,16 +95,6 @@ pub async fn fetch_proofs_and_create_instructions( None }; - let state_data = if !state_items.is_empty() { - let states: Vec<[u8; 32]> = state_items - .iter() - .map(|item| item.queue_item_data.hash) - .collect(); - Some(states) - } else { - None - }; - let rpc = pool.get_connection().await?; if let Err(e) = wait_for_indexer(&*rpc).await { if should_emit_rate_limited_warning("v1_wait_for_indexer", Duration::from_secs(30)) { @@ -224,6 +230,16 @@ pub async fn fetch_proofs_and_create_instructions( Vec::new() }; + let state_data = if !state_items.is_empty() { + let states: Vec<[u8; 32]> = state_items + .iter() + .map(|item| item.queue_item_data.hash) + .collect(); + Some(states) + } else { + None + }; + let state_proofs = if let Some(states) = state_data { let total_states = states.len(); info!( @@ -360,7 +376,11 @@ pub async fn fetch_proofs_and_create_instructions( }, epoch, ); - instructions.push(instruction); + instructions.push(LabeledInstruction { + instruction, + label: "AddressV1Update".to_string(), + nullify_count: 1, + }); } // Process state proofs and create instructions @@ -372,25 +392,463 @@ pub async fn fetch_proofs_and_create_instructions( )); } - for (item, proof) in state_items.iter().zip(state_proofs.into_iter()) { - proofs.push(MerkleProofType::StateProof(proof.clone())); - - let instruction = create_nullify_instruction( - CreateNullifyInstructionInputs { - nullifier_queue: item.tree_account.queue, - merkle_tree: item.tree_account.merkle_tree, - change_log_indices: vec![proof.root_seq % STATE_MERKLE_TREE_CHANGELOG], - leaves_queue_indices: vec![item.queue_item_data.index as u16], - indices: vec![proof.leaf_index], - proofs: vec![proof.proof.clone()], - authority, - derivation, - is_metadata_forester: false, - }, - epoch, + let mut items_with_proofs: Vec<(&WorkItem, MerkleProof)> = state_items + .iter() + .zip(state_proofs.into_iter()) + .map(|(item, proof)| (*item, proof)) + .collect(); + + if use_multi_nullify && items_with_proofs.len() >= 2 { + let groups = group_state_items_for_dedup(&mut items_with_proofs); + + // Push proofs in sorted order (after grouping may have sorted) + for (_, proof) in items_with_proofs.iter() { + proofs.push(MerkleProofType::StateProof(proof.clone())); + } + + let mut count_1 = 0usize; + let mut count_2 = 0usize; + let mut count_3 = 0usize; + let mut count_4 = 0usize; + for g in &groups { + match g.len() { + 1 => count_1 += 1, + 2 => count_2 += 1, + 3 => count_3 += 1, + 4 => count_4 += 1, + _ => {} + } + } + let total_leaves = items_with_proofs.len(); + let total_instructions = groups.len(); + let dedup_savings_pct = if total_leaves > 0 { + ((total_leaves - total_instructions) as f64 / total_leaves as f64 * 100.0) as u32 + } else { + 0 + }; + info!( + event = "v1_nullify_state_v1_multi_grouping", + total_leaves, + groups_of_4 = count_4, + groups_of_3 = count_3, + groups_of_2 = count_2, + singletons = count_1, + total_instructions, + dedup_savings_pct, + "State nullify dedup grouping complete" ); - instructions.push(instruction); + + for group_indices in groups { + if group_indices.len() == 1 { + let (item, proof) = &items_with_proofs[group_indices[0]]; + instructions.push(LabeledInstruction { + instruction: build_nullify_instruction( + item, proof, authority, derivation, epoch, + ), + label: "StateV1Nullify".to_string(), + nullify_count: 1, + }); + } else { + let group_proofs: Vec<[[u8; 32]; 16]> = group_indices + .iter() + .map(|&idx| { + let proof = &items_with_proofs[idx].1.proof; + let arr: [[u8; 32]; 16] = proof.as_slice().try_into().map_err(|_| { + anyhow::anyhow!("proof has {} nodes, expected 16", proof.len()) + })?; + Ok(arr) + }) + .collect::>>()?; + let proof_refs: Vec<&[[u8; 32]; 16]> = group_proofs.iter().collect(); + let CompressedProofs { + proof_bitvecs, + nodes, + } = compress_proofs(&proof_refs).ok_or_else(|| { + anyhow::anyhow!( + "compress_proofs failed for group that passed try_compress_group" + ) + })?; + + let first_item = &items_with_proofs[group_indices[0]]; + let change_log_index = (first_item.1.root_seq % STATE_MERKLE_TREE_CHANGELOG) as u16; + + let mut queue_indices = [0u16; 4]; + let mut leaf_indices = [u32::MAX; 4]; + for (slot, &idx) in group_indices.iter().enumerate() { + let (item, proof) = &items_with_proofs[idx]; + queue_indices[slot] = item.queue_item_data.index as u16; + leaf_indices[slot] = proof.leaf_index as u32; + } + + let node_count = nodes.len(); + let instruction = create_nullify_state_v1_multi_instruction( + CreateNullifyStateV1MultiInstructionInputs { + authority, + nullifier_queue: first_item.0.tree_account.queue, + merkle_tree: first_item.0.tree_account.merkle_tree, + change_log_index, + queue_indices, + leaf_indices, + proof_bitvecs, + nodes, + derivation, + is_metadata_forester: false, + }, + epoch, + ); + let group_size = group_indices.len(); + debug!( + event = "v1_nullify_state_v1_multi_instruction", + group_size, + node_count, + ix_data_bytes = instruction.data.len(), + "Created nullify_state_v1_multi instruction" + ); + instructions.push(LabeledInstruction { + instruction, + label: format!("StateV1MultiNullify({})", group_size), + nullify_count: group_size as u32, + }); + } + } + } else { + for (_, proof) in items_with_proofs.iter() { + proofs.push(MerkleProofType::StateProof(proof.clone())); + } + for (item, proof) in items_with_proofs.iter() { + instructions.push(LabeledInstruction { + instruction: build_nullify_instruction(item, proof, authority, derivation, epoch), + label: "StateV1Nullify".to_string(), + nullify_count: 1, + }); + } } Ok((proofs, instructions)) } + +fn build_nullify_instruction( + item: &WorkItem, + proof: &MerkleProof, + authority: Pubkey, + derivation: Pubkey, + epoch: u64, +) -> Instruction { + create_nullify_instruction( + CreateNullifyInstructionInputs { + nullifier_queue: item.tree_account.queue, + merkle_tree: item.tree_account.merkle_tree, + change_log_indices: vec![proof.root_seq % STATE_MERKLE_TREE_CHANGELOG], + leaves_queue_indices: vec![item.queue_item_data.index as u16], + indices: vec![proof.leaf_index], + proofs: vec![proof.proof.clone()], + authority, + derivation, + is_metadata_forester: false, + }, + epoch, + ) +} + +/// Groups sorted (WorkItem, MerkleProof) pairs for dedup nullification. +/// Returns a vec of groups: each group is a vec of indices into `items_with_proofs` +/// that can be packed into a single nullify_state_v1_multi instruction (2-4 items), +/// or a singleton for regular nullify. +fn group_state_items_for_dedup( + items_with_proofs: &mut [(&WorkItem, MerkleProof)], +) -> Vec> { + items_with_proofs.sort_by_key(|(_, proof)| proof.leaf_index); + + let n = items_with_proofs.len(); + let mut groups = Vec::new(); + let mut i = 0; + + while i < n { + if i + 4 <= n && try_compress_group(items_with_proofs, i, 4).is_some() { + groups.push((i..i + 4).collect()); + i += 4; + } else if i + 3 <= n && try_compress_group(items_with_proofs, i, 3).is_some() { + groups.push((i..i + 3).collect()); + i += 3; + } else if i + 2 <= n && try_compress_group(items_with_proofs, i, 2).is_some() { + groups.push((i..i + 2).collect()); + i += 2; + } else { + groups.push(vec![i]); + i += 1; + } + } + + groups +} + +/// Attempt to compress a group of proofs starting at `start` with `count` items. +/// Returns the compression result if successful. +fn try_compress_group( + items_with_proofs: &[(&WorkItem, MerkleProof)], + start: usize, + count: usize, +) -> Option { + let proof_arrays: Vec<[[u8; 32]; 16]> = (start..start + count) + .map(|idx| items_with_proofs[idx].1.proof.as_slice().try_into().ok()) + .collect::>>()?; + let refs: Vec<&[[u8; 32]; 16]> = proof_arrays.iter().collect(); + compress_proofs(&refs) +} + +#[cfg(test)] +mod tests { + use forester_utils::forester_epoch::TreeAccounts; + use light_compressed_account::TreeType; + use solana_sdk::pubkey::Pubkey; + + use super::*; + use crate::queue_helpers::QueueItemData; + + fn make_work_item() -> WorkItem { + WorkItem { + tree_account: TreeAccounts { + merkle_tree: Pubkey::new_unique(), + queue: Pubkey::new_unique(), + tree_type: TreeType::StateV1, + is_rolledover: false, + owner: Pubkey::new_unique(), + }, + queue_item_data: QueueItemData { + hash: [0u8; 32], + index: 0, + leaf_index: None, + }, + } + } + + /// Create a 16-node proof where all proofs share the same top node (index 15) + /// but lower nodes differ unless leaves are in the same subtree. + fn make_proof(leaf_index: u64, shared_top: [u8; 32]) -> MerkleProof { + let mut proof = [[0u8; 32]; 16]; + // Set unique values per leaf for levels 0..15 + for (level, slot) in proof.iter_mut().enumerate().take(15) { + let mut node = [0u8; 32]; + node[0..8].copy_from_slice(&leaf_index.to_le_bytes()); + node[8] = level as u8; + *slot = node; + } + // All proofs share the same top node + proof[15] = shared_top; + MerkleProof { + hash: [0u8; 32], + leaf_index, + merkle_tree: Pubkey::new_unique(), + proof: proof.to_vec(), + root_seq: 100, + root: [0u8; 32], + } + } + + /// Create proofs that share sibling nodes so compress_proofs succeeds. + /// Adjacent leaves (leaf_index differing only in low bits) share many proof nodes. + fn make_compressible_proofs(leaf_indices: &[u64]) -> Vec { + let shared_top = [0xFFu8; 32]; + let base_proof = { + let mut p = [[0u8; 32]; 16]; + for (level, slot) in p.iter_mut().enumerate().take(15) { + let mut node = [0u8; 32]; + node[0] = level as u8; + node[1] = 0xAA; + *slot = node; + } + p[15] = shared_top; + p + }; + + leaf_indices + .iter() + .map(|&li| { + // All proofs share the same nodes (maximally compressible). + // Only the leaf_index differs. + MerkleProof { + hash: [0u8; 32], + leaf_index: li, + merkle_tree: Pubkey::new_unique(), + proof: base_proof.to_vec(), + root_seq: 100, + root: [0u8; 32], + } + }) + .collect() + } + + /// Describes expected grouping result for assertion. + #[derive(Debug, PartialEq)] + struct GroupingResult { + group_sizes: Vec, + } + + impl GroupingResult { + fn from_groups(groups: &[Vec]) -> Self { + Self { + group_sizes: groups.iter().map(|g| g.len()).collect(), + } + } + } + + #[test] + fn test_group_dedup_empty() { + let mut items: Vec<(&WorkItem, MerkleProof)> = vec![]; + let groups = group_state_items_for_dedup(&mut items); + assert_eq!( + GroupingResult::from_groups(&groups), + GroupingResult { + group_sizes: vec![] + }, + "Empty input should produce empty grouping" + ); + } + + #[test] + fn test_group_dedup_single_item() { + let work_item = make_work_item(); + let proof = make_proof(0, [0xFFu8; 32]); + let mut items: Vec<(&WorkItem, MerkleProof)> = vec![(&work_item, proof)]; + let groups = group_state_items_for_dedup(&mut items); + assert_eq!( + GroupingResult::from_groups(&groups), + GroupingResult { + group_sizes: vec![1] + }, + "Single item should produce one singleton group" + ); + } + + #[test] + fn test_group_dedup_2_compressible() { + let work_items: Vec = (0..2).map(|_| make_work_item()).collect(); + let proofs = make_compressible_proofs(&[0, 1]); + let mut items: Vec<(&WorkItem, MerkleProof)> = work_items.iter().zip(proofs).collect(); + let groups = group_state_items_for_dedup(&mut items); + assert_eq!( + GroupingResult::from_groups(&groups), + GroupingResult { + group_sizes: vec![2] + }, + "2 compressible leaves should form 1 group of 2" + ); + } + + #[test] + fn test_group_dedup_3_compressible() { + let work_items: Vec = (0..3).map(|_| make_work_item()).collect(); + let proofs = make_compressible_proofs(&[0, 1, 2]); + let mut items: Vec<(&WorkItem, MerkleProof)> = work_items.iter().zip(proofs).collect(); + let groups = group_state_items_for_dedup(&mut items); + assert_eq!( + GroupingResult::from_groups(&groups), + GroupingResult { + group_sizes: vec![3] + }, + "3 compressible leaves should form 1 group of 3" + ); + } + + #[test] + fn test_group_dedup_4_compressible() { + let work_items: Vec = (0..4).map(|_| make_work_item()).collect(); + let proofs = make_compressible_proofs(&[0, 1, 2, 3]); + let mut items: Vec<(&WorkItem, MerkleProof)> = work_items.iter().zip(proofs).collect(); + let groups = group_state_items_for_dedup(&mut items); + assert_eq!( + GroupingResult::from_groups(&groups), + GroupingResult { + group_sizes: vec![4] + }, + "4 compressible leaves should form 1 group of 4" + ); + } + + #[test] + fn test_group_dedup_5_compressible_makes_4_plus_1() { + let work_items: Vec = (0..5).map(|_| make_work_item()).collect(); + let proofs = make_compressible_proofs(&[0, 1, 2, 3, 4]); + let mut items: Vec<(&WorkItem, MerkleProof)> = work_items.iter().zip(proofs).collect(); + let groups = group_state_items_for_dedup(&mut items); + assert_eq!( + GroupingResult::from_groups(&groups), + GroupingResult { + group_sizes: vec![4, 1] + }, + "5 compressible leaves should form group of 4 + singleton" + ); + } + + #[test] + fn test_group_dedup_6_compressible_makes_4_plus_2() { + let work_items: Vec = (0..6).map(|_| make_work_item()).collect(); + let proofs = make_compressible_proofs(&[0, 1, 2, 3, 4, 5]); + let mut items: Vec<(&WorkItem, MerkleProof)> = work_items.iter().zip(proofs).collect(); + let groups = group_state_items_for_dedup(&mut items); + assert_eq!( + GroupingResult::from_groups(&groups), + GroupingResult { + group_sizes: vec![4, 2] + }, + "6 compressible leaves should form group of 4 + group of 2" + ); + } + + #[test] + fn test_group_dedup_incompressible_becomes_singletons() { + let shared_top = [0xFFu8; 32]; + let work_items: Vec = (0..3).map(|_| make_work_item()).collect(); + // Each proof has unique nodes per leaf, so compress_proofs fails when + // total unique nodes exceed NULLIFY_STATE_V1_MULTI_MAX_NODES (26). + // proof_1 contributes 15 nodes; proof_2 has 15 unique => 30 total > 26. + let proofs: Vec = (0..3).map(|i| make_proof(i * 1000, shared_top)).collect(); + let mut items: Vec<(&WorkItem, MerkleProof)> = work_items.iter().zip(proofs).collect(); + let groups = group_state_items_for_dedup(&mut items); + // 15 (proof1) + 15 (proof2 unique) = 30 > 28 max, so pairs fail. + // All 3 become singletons. + assert_eq!( + GroupingResult::from_groups(&groups), + GroupingResult { + group_sizes: vec![1, 1, 1] + }, + "Incompressible proofs (30 nodes > 28 max) should all become singletons" + ); + } + + #[test] + fn test_group_dedup_sorts_by_leaf_index() { + let work_items: Vec = (0..4).map(|_| make_work_item()).collect(); + let proofs = make_compressible_proofs(&[100, 3, 50, 1]); + let mut items: Vec<(&WorkItem, MerkleProof)> = work_items.iter().zip(proofs).collect(); + let groups = group_state_items_for_dedup(&mut items); + + let sorted_leaf_indices: Vec = + items.iter().map(|(_, proof)| proof.leaf_index).collect(); + assert_eq!( + sorted_leaf_indices, + vec![1, 3, 50, 100], + "Items should be sorted by leaf_index after grouping" + ); + assert_eq!( + GroupingResult::from_groups(&groups), + GroupingResult { + group_sizes: vec![4] + }, + "All compressible, should form 1 group of 4" + ); + } + + #[test] + fn test_group_dedup_indices_reference_sorted_positions() { + let work_items: Vec = (0..4).map(|_| make_work_item()).collect(); + let proofs = make_compressible_proofs(&[0, 1, 2, 3]); + let mut items: Vec<(&WorkItem, MerkleProof)> = work_items.iter().zip(proofs).collect(); + let groups = group_state_items_for_dedup(&mut items); + assert_eq!( + groups, + vec![vec![0, 1, 2, 3]], + "Group indices should reference positions in the sorted items array" + ); + } +} diff --git a/forester/src/processor/v1/send_transaction.rs b/forester/src/processor/v1/send_transaction.rs index b5282bc47a..de762e9cee 100644 --- a/forester/src/processor/v1/send_transaction.rs +++ b/forester/src/processor/v1/send_transaction.rs @@ -9,19 +9,16 @@ use std::{ use forester_utils::{forester_epoch::TreeAccounts, rpc_pool::SolanaRpcPool}; use futures::StreamExt; -use light_client::rpc::Rpc; +use light_client::{indexer::Indexer, rpc::Rpc}; use light_compressed_account::TreeType; use light_registry::utils::get_forester_epoch_pda_from_authority; use solana_sdk::{ - hash::Hash, pubkey::Pubkey, signature::{Keypair, Signature, Signer}, - transaction::Transaction, }; use tokio::time::Instant; -use tracing::{debug, error, info, trace, warn}; +use tracing::{error, info, trace, warn}; -const WORK_ITEM_BATCH_SIZE: usize = 100; use crate::{ epoch_manager::WorkItem, errors::ForesterError, @@ -35,8 +32,6 @@ use crate::{ struct PreparedBatchData { work_items: Vec, - recent_blockhash: Hash, - last_valid_block_height: u64, priority_fee: Option, timeout_deadline: Instant, } @@ -78,7 +73,7 @@ pub async fn send_batched_transactions { + let leaf_index_map: std::collections::HashMap<[u8; 32], u64> = response + .value + .items + .into_iter() + .map(|item| (item.hash, item.leaf_index)) + .collect(); + data.work_items.sort_by_key(|item| { + leaf_index_map + .get(&item.queue_item_data.hash) + .copied() + .unwrap_or(u64::MAX) + }); + info!( + tree = %tree_accounts.merkle_tree, + count = data.work_items.len(), + leaf_indices = leaf_index_map.len(), + "Pre-sorted work items by leaf_index for dedup grouping" + ); + } + Err(e) => { + warn!( + tree = %tree_accounts.merkle_tree, + error = %e, + "Failed to fetch queue leaf indices, proceeding without pre-sort" + ); + } + } + } + } + + let build_config = config.build_transaction_batch_config; + let max_concurrent_sends = build_config.max_concurrent_sends.unwrap_or(1).max(1); let effective_max_concurrent_sends = compute_effective_max_concurrent_sends(config, max_concurrent_sends, data.work_items.len()); @@ -109,112 +148,119 @@ pub async fn send_batched_transactions= data.timeout_deadline { - trace!(tree = %tree_accounts.merkle_tree, "Reached global timeout deadline before processing next chunk, stopping."); - break; - } + // Cap total items to stay within the merkle tree changelog capacity. + const MAX_ITEMS_PER_CYCLE: usize = 1400; + let items_to_process = if data.work_items.len() > MAX_ITEMS_PER_CYCLE { + &data.work_items[..MAX_ITEMS_PER_CYCLE] + } else { + &data.work_items + }; - // Refresh blockhash if it's getting stale - if last_blockhash_refresh.elapsed() > BLOCKHASH_REFRESH_INTERVAL { - match pool.get_connection().await { - Ok(mut rpc) => match rpc.get_latest_blockhash().await { - Ok((new_hash, new_height)) => { - recent_blockhash = new_hash; - last_valid_block_height = new_height; - last_blockhash_refresh = Instant::now(); - debug!(tree = %tree_accounts.merkle_tree, "Refreshed blockhash"); - } - Err(e) => { - warn!(tree = %tree_accounts.merkle_tree, "Failed to refresh blockhash: {:?}", e); - } - }, - Err(e) => { - warn!(tree = %tree_accounts.merkle_tree, "Failed to get RPC for blockhash refresh: {:?}", e); + // Process all chunks concurrently: each chunk fetches proofs, builds, and sends in parallel. + let chunks: Vec> = items_to_process + .chunks(work_item_batch_size) + .map(|c| c.to_vec()) + .collect(); + + let num_chunks = chunks.len(); + info!( + tree = %tree_accounts.merkle_tree, + "Processing {} concurrent chunks of up to {} items each", + num_chunks, work_item_batch_size + ); + + let chunk_futures: Vec<_> = chunks + .into_iter() + .map(|work_chunk| { + let pool = Arc::clone(&pool); + let transaction_builder = Arc::clone(&transaction_builder); + let cancel_signal = Arc::clone(&operation_cancel_signal); + let num_sent = Arc::clone(&num_sent_transactions); + let payer = payer.insecure_clone(); + let derivation = *derivation; + let tree_id = tree_accounts.merkle_tree; + let timeout_deadline = data.timeout_deadline; + let confirmation_max_attempts = config.confirmation_max_attempts; + let confirmation_poll_interval = config.confirmation_poll_interval; + + async move { + // Safety margin: stop 3s before deadline to avoid sending txs + // that land after our light slot ends (ForesterNotEligible). + let safe_deadline = timeout_deadline - std::time::Duration::from_secs(3); + if cancel_signal.load(Ordering::SeqCst) || Instant::now() >= safe_deadline { + return Ok(()); } - } - } - trace!(tree = %tree_accounts.merkle_tree, "Processing chunk of size {}", work_chunk.len()); - let build_start_time = Instant::now(); - - let (transactions_to_send, chunk_last_valid_block_height) = match transaction_builder - .build_signed_transaction_batch( - payer, - derivation, - &recent_blockhash, - last_valid_block_height, - data.priority_fee, - work_chunk, - config.build_transaction_batch_config, - ) - .await - { - Ok(res) => res, - Err(e) => { - error!(tree = %tree_accounts.merkle_tree, "Failed to build transaction batch: {:?}", e); - continue; - } - }; - trace!(tree = %tree_accounts.merkle_tree, "Built {} transactions in {:?}", transactions_to_send.len(), build_start_time.elapsed()); + // Each chunk gets a fresh blockhash + let (recent_blockhash, last_valid_block_height) = { + let mut rpc = pool.get_connection().await.map_err(ForesterError::from)?; + rpc.get_latest_blockhash().await.map_err(|e| { + ForesterError::General { error: format!("Failed to get blockhash: {:?}", e) } + })? + }; + + let build_start_time = Instant::now(); + let (transactions_to_send, _) = match transaction_builder + .build_signed_transaction_batch( + &payer, + &derivation, + &recent_blockhash, + last_valid_block_height, + data.priority_fee, + &work_chunk, + build_config, + ) + .await + { + Ok(res) => res, + Err(e) => { + error!(tree = %tree_id, "Failed to build transaction batch: {:?}", e); + return Ok(()); + } + }; + trace!(tree = %tree_id, "Built {} transactions in {:?}", transactions_to_send.len(), build_start_time.elapsed()); - if Instant::now() >= data.timeout_deadline { - trace!(tree = %tree_accounts.merkle_tree, "Reached global timeout deadline after building transactions, stopping."); - break; - } + if transactions_to_send.is_empty() || Instant::now() >= safe_deadline { + return Ok(()); + } - if transactions_to_send.is_empty() { - trace!(tree = %tree_accounts.merkle_tree, "Built batch resulted in 0 transactions, skipping send for this chunk."); - continue; - } + let send_context = ChunkSendContext { + pool: Arc::clone(&pool), + max_concurrent_sends: effective_max_concurrent_sends, + timeout_deadline, + cancel_signal: Arc::clone(&cancel_signal), + num_sent_transactions: Arc::clone(&num_sent), + confirmation: ConfirmationConfig { + max_attempts: confirmation_max_attempts as u32, + poll_interval: confirmation_poll_interval, + }, + }; + + if let Err(e) = execute_transaction_chunk_sending(transactions_to_send, &send_context).await { + if e.is_forester_not_eligible() { + cancel_signal.store(true, Ordering::SeqCst); + return Err(ForesterError::NotEligible); + } + warn!(tree = %tree_id, error = ?e, "Chunk send finished with recoverable errors"); + } - let send_context = ChunkSendContext { - pool: Arc::clone(&pool), - max_concurrent_sends: effective_max_concurrent_sends, - timeout_deadline: data.timeout_deadline, - cancel_signal: Arc::clone(&operation_cancel_signal), - num_sent_transactions: Arc::clone(&num_sent_transactions), - confirmation: ConfirmationConfig { - max_attempts: config.confirmation_max_attempts as u32, - poll_interval: config.confirmation_poll_interval, - }, - }; - - if let Err(e) = execute_transaction_chunk_sending( - transactions_to_send, - chunk_last_valid_block_height, - &send_context, - ) - .await - { - if e.is_forester_not_eligible() { - warn!( - tree = %tree_accounts.merkle_tree, - "Detected ForesterNotEligible while sending V1 transactions; stopping batch loop for re-schedule" - ); - return Err(ForesterError::NotEligible); + Ok::<(), ForesterError>(()) } - warn!( - tree = %tree_accounts.merkle_tree, - error = ?e, - "Chunk send finished with recoverable errors" - ); + }) + .collect(); + + let results = futures::future::join_all(chunk_futures).await; + for result in results { + if let Err(ForesterError::NotEligible) = result { + return Err(ForesterError::NotEligible); } } @@ -224,6 +270,7 @@ pub async fn send_batched_transactions( payer_pubkey: &Pubkey, derivation: &Pubkey, @@ -232,6 +279,7 @@ async fn prepare_batch_prerequisites( tree_accounts: TreeAccounts, transaction_builder: &T, start_time: Instant, + min_queue_items: Option, ) -> Result> { let tree_id_str = tree_accounts.merkle_tree.to_string(); @@ -266,22 +314,30 @@ async fn prepare_batch_prerequisites( if queue_item_data.is_empty() { trace!(tree = %tree_id_str, "Queue is empty, no transactions to send."); - return Ok(None); // Return None to indicate no work + return Ok(None); } - let (recent_blockhash, last_valid_block_height, priority_fee) = { - let mut rpc = pool.get_connection().await.map_err(|e| { + if let Some(min) = min_queue_items { + if tree_accounts.tree_type == TreeType::StateV1 && queue_item_data.len() < min { + trace!( + tree = %tree_id_str, + queue_len = queue_item_data.len(), + min_queue_items = min, + "Queue below min_queue_items threshold, skipping" + ); + return Ok(None); + } + } + + let priority_fee = { + let rpc = pool.get_connection().await.map_err(|e| { error!( tree = %tree_id_str, - "Failed to get RPC for blockhash/priority fee: {:?}", + "Failed to get RPC for priority fee: {:?}", e ); ForesterError::RpcPool(e) })?; - let r_blockhash = rpc.get_latest_blockhash().await.map_err(|e| { - error!(tree = %tree_id_str, "Failed to get latest blockhash: {:?}", e); - ForesterError::Rpc(e) - })?; let forester_epoch_pda_pubkey = get_forester_epoch_pda_from_authority(derivation, transaction_builder.epoch()).0; let account_keys = vec![ @@ -290,13 +346,12 @@ async fn prepare_batch_prerequisites( tree_accounts.queue, tree_accounts.merkle_tree, ]; - let priority_fee = PriorityFeeConfig { + PriorityFeeConfig { compute_unit_price: config.build_transaction_batch_config.compute_unit_price, enable_priority_fees: config.build_transaction_batch_config.enable_priority_fees, } .resolve(&*rpc, account_keys) - .await?; - (r_blockhash.0, r_blockhash.1, priority_fee) + .await? }; let work_items: Vec = queue_item_data @@ -311,8 +366,6 @@ async fn prepare_batch_prerequisites( Ok(Some(PreparedBatchData { work_items, - recent_blockhash, - last_valid_block_height, priority_fee, timeout_deadline, })) @@ -343,8 +396,7 @@ fn compute_effective_max_concurrent_sends( } async fn execute_transaction_chunk_sending( - transactions: Vec, - last_valid_block_height: u64, + transactions: Vec, context: &ChunkSendContext, ) -> std::result::Result<(), ForesterError> { if transactions.is_empty() { @@ -358,17 +410,20 @@ async fn execute_transaction_chunk_sending( let timeout_deadline = context.timeout_deadline; let max_concurrent_sends = context.max_concurrent_sends; let confirmation = context.confirmation; - let transaction_send_futures = transactions.into_iter().map(|tx| { + let transaction_send_futures = transactions.into_iter().map(|prepared_transaction| { let pool_clone = Arc::clone(&pool); let cancel_signal_clone = Arc::clone(&cancel_signal); let num_sent_transactions_clone = Arc::clone(&num_sent_transactions); + let tx_label = prepared_transaction.label().to_string(); async move { if cancel_signal_clone.load(Ordering::SeqCst) || Instant::now() >= timeout_deadline { return TransactionSendResult::Cancelled; // Or Timeout } - let tx_signature = tx.signatures.first().copied().unwrap_or_default(); + let tx_signature = prepared_transaction + .signature() + .unwrap_or_default(); let tx_signature_str = tx_signature.to_string(); match pool_clone.get_connection().await { @@ -379,8 +434,6 @@ async fn execute_transaction_chunk_sending( } let send_time = Instant::now(); - let prepared_transaction = - PreparedTransaction::legacy(tx, last_valid_block_height); match prepared_transaction .send(&mut *rpc, Some(confirmation), Some(timeout_deadline)) .await @@ -388,10 +441,11 @@ async fn execute_transaction_chunk_sending( Ok(signature) => { if !cancel_signal_clone.load(Ordering::SeqCst) { num_sent_transactions_clone.fetch_add(1, Ordering::SeqCst); - trace!( - tx.signature = %signature, - elapsed = ?send_time.elapsed(), - "Transaction sent and confirmed successfully" + info!( + "tx sent: {} type={} e2e={}ms", + signature, + tx_label, + send_time.elapsed().as_millis(), ); TransactionSendResult::Success(signature) } else { diff --git a/forester/src/processor/v1/tx_builder.rs b/forester/src/processor/v1/tx_builder.rs index 463cc0b2bf..36fee1eafc 100644 --- a/forester/src/processor/v1/tx_builder.rs +++ b/forester/src/processor/v1/tx_builder.rs @@ -6,8 +6,9 @@ use forester_utils::rpc_pool::SolanaRpcPool; use light_client::rpc::Rpc; use solana_program::hash::Hash; use solana_sdk::{ + address_lookup_table::AddressLookupTableAccount, + instruction::Instruction, signature::{Keypair, Signer}, - transaction::Transaction, }; use tokio::sync::Mutex; use tracing::{trace, warn}; @@ -18,7 +19,9 @@ use crate::{ tx_cache::ProcessedHashCache, v1::{config::BuildTransactionBatchConfig, helpers::fetch_proofs_and_create_instructions}, }, - smart_transaction::{create_smart_transaction, CreateSmartTransactionConfig}, + smart_transaction::{ + create_smart_transaction, CreateSmartTransactionConfig, PreparedTransaction, + }, Result, }; @@ -35,7 +38,7 @@ pub trait TransactionBuilder: Send + Sync { priority_fee: Option, work_items: &[WorkItem], config: BuildTransactionBatchConfig, - ) -> Result<(Vec, u64)>; + ) -> Result<(Vec, u64)>; } pub struct EpochManagerTransactions { @@ -43,6 +46,8 @@ pub struct EpochManagerTransactions { pub epoch: u64, pub phantom: std::marker::PhantomData, pub processed_hash_cache: Arc>, + pub address_lookup_tables: Vec, + pub enable_v1_multi_nullify: bool, } impl EpochManagerTransactions { @@ -50,12 +55,16 @@ impl EpochManagerTransactions { pool: Arc>, epoch: u64, cache: Arc>, + address_lookup_tables: Vec, + enable_v1_multi_nullify: bool, ) -> Self { Self { pool, epoch, phantom: std::marker::PhantomData, processed_hash_cache: cache, + address_lookup_tables, + enable_v1_multi_nullify, } } } @@ -75,7 +84,7 @@ impl TransactionBuilder for EpochManagerTransactions { priority_fee: Option, work_items: &[WorkItem], config: BuildTransactionBatchConfig, - ) -> Result<(Vec, u64)> { + ) -> Result<(Vec, u64)> { let mut cache = self.processed_hash_cache.lock().await; let work_items: Vec<&WorkItem> = work_items @@ -115,6 +124,8 @@ impl TransactionBuilder for EpochManagerTransactions { .map(|&item| item.clone()) .collect::>(); + let use_multi_nullify = + self.enable_v1_multi_nullify && !self.address_lookup_tables.is_empty(); let mut transactions = vec![]; let all_instructions = match fetch_proofs_and_create_instructions( payer.pubkey(), @@ -122,6 +133,7 @@ impl TransactionBuilder for EpochManagerTransactions { self.pool.clone(), self.epoch, work_items.as_slice(), + use_multi_nullify, ) .await { @@ -142,19 +154,44 @@ impl TransactionBuilder for EpochManagerTransactions { } }; - let batch_size = config.batch_size.max(1) as usize; + let batch_size = if !self.address_lookup_tables.is_empty() { + 1 + } else { + config.batch_size.max(1) as usize + }; - for instruction_chunk in all_instructions.chunks(batch_size) { - let (transaction, _) = create_smart_transaction(CreateSmartTransactionConfig { + for labeled_chunk in all_instructions.chunks(batch_size) { + let label = labeled_chunk + .iter() + .map(|li| li.label.as_str()) + .collect::>() + .join("+"); + let instructions: Vec = labeled_chunk + .iter() + .map(|li| li.instruction.clone()) + .collect(); + + // Dynamic CU based on number of nullifications in the instruction. + let nullify_count: u32 = labeled_chunk.iter().map(|li| li.nullify_count).sum(); + let dynamic_cu_limit = Some(match nullify_count { + 1 => 300_000, + 2 => 600_000, + 3 => 900_000, + _ => 1_000_000, + }); + + let prepared = create_smart_transaction(CreateSmartTransactionConfig { payer: payer.insecure_clone(), - instructions: instruction_chunk.to_vec(), + instructions, recent_blockhash: *recent_blockhash, compute_unit_price: priority_fee, - compute_unit_limit: config.compute_unit_limit, + compute_unit_limit: dynamic_cu_limit, last_valid_block_height, + address_lookup_tables: self.address_lookup_tables.clone(), }) - .await?; - transactions.push(transaction); + .await? + .with_label(label); + transactions.push(prepared); } if !transactions.is_empty() { diff --git a/forester/src/queue_helpers.rs b/forester/src/queue_helpers.rs index f4a7dac704..d9adcda049 100644 --- a/forester/src/queue_helpers.rs +++ b/forester/src/queue_helpers.rs @@ -164,6 +164,8 @@ pub struct QueueLengthAndCapacity { pub struct QueueItemData { pub hash: [u8; 32], pub index: usize, + /// Leaf index in the Merkle tree. Available when fetched from indexer. + pub leaf_index: Option, } /// Result of fetching V1 queue data, including items and capacity. @@ -230,7 +232,11 @@ pub async fn fetch_queue_item_data( .filter(|(index, _, is_pending)| { *index >= start_index as usize && *index < end_index && *is_pending }) - .map(|(index, hash, _)| QueueItemData { hash, index }) + .map(|(index, hash, _)| QueueItemData { + hash, + index, + leaf_index: None, + }) .collect(); tracing::debug!( diff --git a/forester/src/smart_transaction.rs b/forester/src/smart_transaction.rs index 38df9f70be..91149905eb 100644 --- a/forester/src/smart_transaction.rs +++ b/forester/src/smart_transaction.rs @@ -66,6 +66,7 @@ pub struct CreateSmartTransactionConfig { pub compute_unit_limit: Option, pub instructions: Vec, pub last_valid_block_height: u64, + pub address_lookup_tables: Vec, } pub struct SendSmartTransactionConfig<'a> { @@ -201,10 +202,10 @@ fn with_compute_budget_instructions( /// whether it's a legacy or versioned smart transaction. The transaction's send configuration can also be changed, if provided /// /// # Returns -/// An optimized `Transaction` and the `last_valid_block_height` +/// A `PreparedTransaction` (legacy or versioned) and the `last_valid_block_height` pub async fn create_smart_transaction( config: CreateSmartTransactionConfig, -) -> Result<(Transaction, u64), RpcError> { +) -> Result { let payer_pubkey: Pubkey = config.payer.pubkey(); let final_instructions = with_compute_budget_instructions( config.instructions, @@ -214,10 +215,28 @@ pub async fn create_smart_transaction( }, ); - let mut tx = Transaction::new_with_payer(&final_instructions, Some(&payer_pubkey)); - tx.sign(&[&config.payer], config.recent_blockhash); - - Ok((tx, config.last_valid_block_height)) + if config.address_lookup_tables.is_empty() { + let mut tx = Transaction::new_with_payer(&final_instructions, Some(&payer_pubkey)); + tx.sign(&[&config.payer], config.recent_blockhash); + Ok(PreparedTransaction::legacy( + tx, + config.last_valid_block_height, + )) + } else { + let message = v0::Message::try_compile( + &payer_pubkey, + &final_instructions, + &config.address_lookup_tables, + config.recent_blockhash, + ) + .map_err(|e| RpcError::CustomError(format!("Failed to compile v0 message: {}", e)))?; + let tx = VersionedTransaction::try_new(VersionedMessage::V0(message), &[&config.payer]) + .map_err(|e| RpcError::SigningError(e.to_string()))?; + Ok(PreparedTransaction::versioned( + tx, + config.last_valid_block_height, + )) + } } pub async fn send_transaction_with_policy( @@ -251,9 +270,11 @@ pub async fn send_transaction_with_policy( .await } -pub(crate) struct PreparedTransaction { +pub struct PreparedTransaction { transaction: PreparedTransactionKind, last_valid_block_height: u64, + /// Optional label for logging (e.g. "StateV1MultiNullify(4)") + label: Option, } enum PreparedTransactionKind { @@ -266,10 +287,31 @@ impl PreparedTransaction { Self { transaction: PreparedTransactionKind::Legacy(transaction), last_valid_block_height, + label: None, } } - fn signature(&self) -> Option { + pub(crate) fn versioned( + transaction: VersionedTransaction, + last_valid_block_height: u64, + ) -> Self { + Self { + transaction: PreparedTransactionKind::Versioned(transaction), + last_valid_block_height, + label: None, + } + } + + pub(crate) fn with_label(mut self, label: String) -> Self { + self.label = Some(label); + self + } + + pub(crate) fn label(&self) -> &str { + self.label.as_deref().unwrap_or("V1Nullify") + } + + pub(crate) fn signature(&self) -> Option { match &self.transaction { PreparedTransactionKind::Legacy(transaction) => transaction.signatures.first().copied(), PreparedTransactionKind::Versioned(transaction) => { @@ -360,10 +402,10 @@ async fn prepare_transaction( transaction .try_sign(signers, blockhash) .map_err(|e| RpcError::SigningError(e.to_string()))?; - Ok(PreparedTransaction { - transaction: PreparedTransactionKind::Legacy(transaction), + Ok(PreparedTransaction::legacy( + transaction, last_valid_block_height, - }) + )) } else { let message = v0::Message::try_compile(payer, &final_instructions, address_lookup_tables, blockhash) @@ -372,10 +414,10 @@ async fn prepare_transaction( })?; let transaction = VersionedTransaction::try_new(VersionedMessage::V0(message), signers) .map_err(|e| RpcError::SigningError(e.to_string()))?; - Ok(PreparedTransaction { - transaction: PreparedTransactionKind::Versioned(transaction), + Ok(PreparedTransaction::versioned( + transaction, last_valid_block_height, - }) + )) } } diff --git a/forester/tests/e2e_test.rs b/forester/tests/e2e_test.rs index 1727ed108b..c11500f29a 100644 --- a/forester/tests/e2e_test.rs +++ b/forester/tests/e2e_test.rs @@ -38,6 +38,9 @@ use light_compressed_token::process_transfer::{ use light_hasher::Poseidon; use light_program_test::accounts::test_accounts::TestAccounts; use light_prover_client::prover::spawn_prover; +use light_registry::account_compression_cpi::sdk::{ + forester_lookup_table_accounts, ForesterLookupTableParams, +}; use light_test_utils::{ actions::{create_compressible_token_account, CreateCompressibleTokenAccountInputs}, conversions::sdk_to_program_token_data, @@ -189,13 +192,52 @@ fn is_v2_address_test_enabled() -> bool { env::var("TEST_V2_ADDRESS").unwrap_or_else(|_| "true".to_string()) == "true" } +/// Creates an on-chain Address Lookup Table populated with the accounts +/// needed for nullify_state_v1_multi instructions. Returns the ALT address. +async fn create_forester_alt(rpc: &mut R, payer: &Keypair, env: &TestAccounts) -> Pubkey { + use light_client::rpc::lut::instruction::{create_lookup_table, extend_lookup_table}; + + let slot = rpc.get_slot().await.unwrap(); + let (create_ix, alt_address) = create_lookup_table(payer.pubkey(), payer.pubkey(), slot); + rpc.create_and_send_transaction(&[create_ix], &payer.pubkey(), &[payer]) + .await + .unwrap(); + + let params = ForesterLookupTableParams { + v1_state_trees: env + .v1_state_trees + .iter() + .map(|t| (t.merkle_tree, t.nullifier_queue)) + .collect(), + v1_address_trees: env + .v1_address_trees + .iter() + .map(|t| (t.merkle_tree, t.queue)) + .collect(), + v2_state_trees: env + .v2_state_trees + .iter() + .map(|t| (t.merkle_tree, t.output_queue)) + .collect(), + v2_address_trees: env.v2_address_trees.clone(), + }; + let addresses = forester_lookup_table_accounts(¶ms); + let extend_ix = + extend_lookup_table(alt_address, payer.pubkey(), Some(payer.pubkey()), addresses); + rpc.create_and_send_transaction(&[extend_ix], &payer.pubkey(), &[payer]) + .await + .unwrap(); + + alt_address +} + #[tokio::test(flavor = "multi_thread", worker_threads = 16)] #[serial] async fn e2e_test() { let state_tree_params = InitStateTreeAccountsInstructionData::test_default(); let env = TestAccounts::get_local_test_validator_accounts(); println!("env {:?}", env); - let config = ForesterConfig { + let mut config = ForesterConfig { external_services: ExternalServicesConfig { rpc_url: get_rpc_url(), ws_rpc_url: Some(get_ws_rpc_url()), @@ -259,6 +301,9 @@ async fn e2e_test() { max_concurrent_batches: 10, pda_programs: vec![], }), + min_queue_items: None, + enable_v1_multi_nullify: false, + work_item_batch_size: 50, }; let test_mode = TestMode::from_env(); @@ -296,6 +341,17 @@ async fn e2e_test() { .await; } + // Create unified ALT for all forester operations. + // v0::Message::try_compile selects relevant entries per instruction automatically. + let alt_addr = create_forester_alt(&mut rpc, &env.protocol.forester, &env).await; + println!("Created forester ALT: {}", alt_addr); + config.lookup_table_address = Some(alt_addr); + + if is_v1_state_test_enabled() { + config.min_queue_items = Some(10); + config.enable_v1_multi_nullify = true; + } + // Get initial state for V1 state tree if enabled let pre_state_v1_root = if is_v1_state_test_enabled() { let (_, _, root) = get_initial_merkle_tree_state( @@ -490,6 +546,31 @@ async fn e2e_test() { ) .await; + // Spawn a slot advancement task so the forester doesn't get stuck waiting + // for epoch registration windows (surfpool offline mode doesn't auto-advance slots). + let slot_advance_rpc_url = config.external_services.rpc_url.clone(); + let slot_advance_handle = tokio::spawn(async move { + let advance_rpc = LightClient::new(LightClientConfig { + url: slot_advance_rpc_url, + commitment_config: None, + photon_url: None, + fetch_active_tree: false, + }) + .await + .unwrap(); + loop { + let current_slot = match advance_rpc.get_slot().await { + Ok(s) => s, + Err(_) => break, + }; + let target = current_slot + 50; + if advance_rpc.warp_to_slot(target).await.is_err() { + break; + } + tokio::time::sleep(Duration::from_secs(2)).await; + } + }); + wait_for_work_report( &mut work_report_receiver, &state_tree_params, @@ -497,6 +578,8 @@ async fn e2e_test() { ) .await; + slot_advance_handle.abort(); + // Verify root changes based on enabled tests if is_v1_state_test_enabled() { if let Some(pre_root) = pre_state_v1_root { diff --git a/forester/tests/legacy/priority_fee_test.rs b/forester/tests/legacy/priority_fee_test.rs index 46f5b2afe9..37f2ed010a 100644 --- a/forester/tests/legacy/priority_fee_test.rs +++ b/forester/tests/legacy/priority_fee_test.rs @@ -87,6 +87,8 @@ async fn test_priority_fee_request() { tree_ids: vec![], enable_compressible: false, lookup_table_address: None, + min_queue_items: None, + enable_v1_multi_nullify: false, api_server_port: 8080, api_server_public_bind: false, group_authority: None, diff --git a/forester/tests/legacy/test_utils.rs b/forester/tests/legacy/test_utils.rs index d535665d71..fdac97ba34 100644 --- a/forester/tests/legacy/test_utils.rs +++ b/forester/tests/legacy/test_utils.rs @@ -122,6 +122,10 @@ pub fn forester_config() -> ForesterConfig { address_tree_data: vec![], state_tree_data: vec![], compressible_config: None, + lookup_table_address: None, + min_queue_items: None, + enable_v1_multi_nullify: false, + work_item_batch_size: 50, } } diff --git a/forester/tests/priority_fee_test.rs b/forester/tests/priority_fee_test.rs index 976898455a..5067342d2f 100644 --- a/forester/tests/priority_fee_test.rs +++ b/forester/tests/priority_fee_test.rs @@ -92,6 +92,8 @@ async fn test_priority_fee_request() { tree_ids: vec![], enable_compressible: true, lookup_table_address: None, + min_queue_items: None, + enable_v1_multi_nullify: false, api_server_port: 8080, group_authority: None, light_pda_programs: vec![], @@ -101,6 +103,7 @@ async fn test_priority_fee_request() { fallback_indexer_url: None, rpc_pool_failure_threshold: 3, rpc_pool_primary_probe_interval_secs: 30, + work_item_batch_size: None, }; let config = ForesterConfig::new_for_start(&args).expect("Failed to create config"); diff --git a/forester/tests/test_nullify_state_v1_multi_tx_size.rs b/forester/tests/test_nullify_state_v1_multi_tx_size.rs new file mode 100644 index 0000000000..68a8713afe --- /dev/null +++ b/forester/tests/test_nullify_state_v1_multi_tx_size.rs @@ -0,0 +1,121 @@ +use light_registry::account_compression_cpi::sdk::{ + create_nullify_state_v1_multi_instruction, nullify_state_v1_multi_lookup_table_accounts, + CreateNullifyStateV1MultiInstructionInputs, NULLIFY_STATE_V1_MULTI_MAX_NODES, +}; +use solana_sdk::{ + compute_budget::ComputeBudgetInstruction, + hash::Hash, + message::{v0, AddressLookupTableAccount, VersionedMessage}, + pubkey::Pubkey, + signature::Keypair, + signer::Signer, + transaction::VersionedTransaction, +}; + +/// Validates that a nullify_state_v1_multi v0 transaction with ALT and SetComputeUnitLimit +/// fits within the 1232-byte Solana transaction size limit. +/// +/// This is a pure serialization check -- no validator needed. +#[test] +fn test_nullify_state_v1_multi_v0_transaction_size() { + let authority = Keypair::new(); + let merkle_tree = Pubkey::new_unique(); + let nullifier_queue = Pubkey::new_unique(); + + // Worst case: MAX_NODES unique nodes + let nullify_ix = create_nullify_state_v1_multi_instruction( + CreateNullifyStateV1MultiInstructionInputs { + authority: authority.pubkey(), + nullifier_queue, + merkle_tree, + change_log_index: 1400, + queue_indices: [100, 200, 300, 400], + leaf_indices: [67_000_000, 67_000_001, 67_000_002, 67_000_003], + proof_bitvecs: [0; 4], + nodes: vec![[0xAA; 32]; NULLIFY_STATE_V1_MULTI_MAX_NODES], + derivation: authority.pubkey(), + is_metadata_forester: false, + }, + 0, + ); + + // Compute budget instructions (both are added by the forester's smart_transaction logic) + let compute_limit_ix = ComputeBudgetInstruction::set_compute_unit_limit(600_000); + let compute_price_ix = ComputeBudgetInstruction::set_compute_unit_price(1); + + // Build synthetic ALT with the known accounts (includes ComputeBudget program ID) + let alt_accounts = nullify_state_v1_multi_lookup_table_accounts(merkle_tree, nullifier_queue); + let alt_address = Pubkey::new_unique(); + let alt = AddressLookupTableAccount { + key: alt_address, + addresses: alt_accounts, + }; + + // Compile v0 message with all instructions + let blockhash = Hash::default(); + let message = v0::Message::try_compile( + &authority.pubkey(), + &[compute_price_ix, compute_limit_ix, nullify_ix], + &[alt], + blockhash, + ) + .expect("Failed to compile v0 message"); + + // Create signed transaction + let versioned_message = VersionedMessage::V0(message); + let tx = VersionedTransaction::try_new(versioned_message, &[&authority]) + .expect("Failed to create versioned transaction"); + + let serialized = tx.message.serialize(); + // Full tx = compact-u16 sig count (1) + signatures (64 * n) + serialized message + let tx_size = 1 + tx.signatures.len() * 64 + serialized.len(); + + let ix_data_size = 8 + 2 + 8 + 16 + 16 + 4 + NULLIFY_STATE_V1_MULTI_MAX_NODES * 32; + + println!( + "nullify_state_v1_multi v0 transaction size: {} bytes (limit: 1232)", + tx_size + ); + println!( + " nullify_state_v1_multi instruction data: {} bytes", + ix_data_size + ); + println!( + " max_nodes: {} ({} bytes payload)", + NULLIFY_STATE_V1_MULTI_MAX_NODES, + NULLIFY_STATE_V1_MULTI_MAX_NODES * 32 + ); + println!(" margin: {} bytes", 1232_i64 - tx_size as i64); + + // Breakdown + println!("\nTransaction breakdown:"); + println!(" signatures: {}", tx.signatures.len() * 64 + 1); + let static_keys = tx.message.static_account_keys(); + println!(" static account keys: {}", static_keys.len()); + for (i, key) in static_keys.iter().enumerate() { + let label = if *key == authority.pubkey() { + "authority (signer)" + } else if *key == light_registry::ID { + "registry program" + } else if *key == solana_sdk::compute_budget::ID { + "compute budget program" + } else { + "unknown" + }; + println!(" [{}] {} ({})", i, key, label); + } + if let VersionedMessage::V0(m) = &tx.message { + println!(" address table lookups: {}", m.address_table_lookups.len()); + for alt_lookup in &m.address_table_lookups { + println!(" writable indices: {:?}", alt_lookup.writable_indexes); + println!(" readonly indices: {:?}", alt_lookup.readonly_indexes); + } + }; + + assert!( + tx_size <= 1232, + "nullify_state_v1_multi v0 transaction is {} bytes, exceeds 1232 byte limit by {} bytes", + tx_size, + tx_size - 1232 + ); +} diff --git a/forester/tests/test_utils.rs b/forester/tests/test_utils.rs index 4225503a19..4ae9352482 100644 --- a/forester/tests/test_utils.rs +++ b/forester/tests/test_utils.rs @@ -136,6 +136,9 @@ pub fn forester_config() -> ForesterConfig { state_tree_data: vec![], compressible_config: None, lookup_table_address: None, + min_queue_items: None, + enable_v1_multi_nullify: false, + work_item_batch_size: 50, } } diff --git a/program-tests/registry-test/tests/nullify_state_v1_multi.rs b/program-tests/registry-test/tests/nullify_state_v1_multi.rs new file mode 100644 index 0000000000..eea55b4b7e --- /dev/null +++ b/program-tests/registry-test/tests/nullify_state_v1_multi.rs @@ -0,0 +1,616 @@ +use account_compression::{state::QueueAccount, StateMerkleTreeAccount}; +use forester_utils::account_zero_copy::{get_concurrent_merkle_tree, get_hash_set}; +use light_client::rpc::Rpc; +use light_compressed_account::TreeType; +use light_hasher::Poseidon; +use light_program_test::{program_test::LightProgramTest, ProgramTestConfig}; +use light_registry::account_compression_cpi::sdk::{ + compress_proofs, create_nullify_state_v1_multi_instruction, CompressedProofs, + CreateNullifyStateV1MultiInstructionInputs, +}; +use light_test_utils::e2e_test_env::init_program_test_env; +use serial_test::serial; +use solana_sdk::signature::{Keypair, Signer}; + +#[serial] +#[tokio::test] +async fn test_nullify_state_v1_multi_4_leaves() { + let mut rpc = LightProgramTest::new(ProgramTestConfig::default_with_batched_trees(true)) + .await + .unwrap(); + rpc.indexer = None; + let env = rpc.test_accounts.clone(); + + let forester_keypair = Keypair::new(); + rpc.airdrop_lamports(&forester_keypair.pubkey(), 2_000_000_000) + .await + .unwrap(); + + let merkle_tree_keypair = Keypair::new(); + let nullifier_queue_keypair = Keypair::new(); + let cpi_context_keypair = Keypair::new(); + + let (mut state_tree_bundle, mut rpc) = { + let mut e2e_env = init_program_test_env(rpc, &env, 50).await; + e2e_env.indexer.state_merkle_trees.clear(); + e2e_env.keypair_action_config.fee_assert = false; + + e2e_env + .indexer + .add_state_merkle_tree( + &mut e2e_env.rpc, + &merkle_tree_keypair, + &nullifier_queue_keypair, + &cpi_context_keypair, + None, + Some(forester_keypair.pubkey()), + TreeType::StateV1, + ) + .await; + + for _ in 0..4 { + e2e_env + .compress_sol_deterministic(&forester_keypair, 1_000_000, None) + .await; + e2e_env + .transfer_sol_deterministic(&forester_keypair, &Keypair::new().pubkey(), None) + .await + .unwrap(); + } + + (e2e_env.indexer.state_merkle_trees[0].clone(), e2e_env.rpc) + }; + + // Read on-chain state + let nullifier_queue = unsafe { + get_hash_set::(&mut rpc, state_tree_bundle.accounts.nullifier_queue) + .await + .unwrap() + }; + let onchain_tree = get_concurrent_merkle_tree::( + &mut rpc, + state_tree_bundle.accounts.merkle_tree, + ) + .await + .unwrap(); + let pre_root = onchain_tree.root(); + let change_log_index = onchain_tree.changelog_index(); + + // Collect 4 unmarked items + let mut items_to_nullify = Vec::new(); + for i in 0..nullifier_queue.get_capacity() { + let bucket = nullifier_queue.get_bucket(i).unwrap(); + if let Some(bucket) = bucket { + if bucket.sequence_number.is_none() { + items_to_nullify.push((i, bucket.value_bytes())); + } + } + } + assert!( + items_to_nullify.len() >= 4, + "Need at least 4 items in nullifier queue, got {}", + items_to_nullify.len() + ); + + // Get proofs + let mut leaf_indices = Vec::new(); + let mut proofs = Vec::new(); + for (_, leaf) in items_to_nullify.iter().take(4) { + let leaf_index = state_tree_bundle.merkle_tree.get_leaf_index(leaf).unwrap(); + leaf_indices.push(leaf_index); + let proof: Vec<[u8; 32]> = state_tree_bundle + .merkle_tree + .get_proof_of_leaf(leaf_index, false) + .unwrap(); + let proof_arr: [[u8; 32]; 16] = proof.try_into().unwrap(); + proofs.push(proof_arr); + } + + let proof_refs: Vec<&[[u8; 32]; 16]> = proofs.iter().collect(); + let CompressedProofs { + proof_bitvecs, + nodes, + } = compress_proofs(&proof_refs).expect("compress_proofs should succeed for 4 leaves"); + + let queue_indices: [u16; 4] = [ + items_to_nullify[0].0 as u16, + items_to_nullify[1].0 as u16, + items_to_nullify[2].0 as u16, + items_to_nullify[3].0 as u16, + ]; + let leaf_indices_arr: [u32; 4] = [ + leaf_indices[0] as u32, + leaf_indices[1] as u32, + leaf_indices[2] as u32, + leaf_indices[3] as u32, + ]; + + let ix = create_nullify_state_v1_multi_instruction( + CreateNullifyStateV1MultiInstructionInputs { + authority: forester_keypair.pubkey(), + nullifier_queue: state_tree_bundle.accounts.nullifier_queue, + merkle_tree: state_tree_bundle.accounts.merkle_tree, + change_log_index: change_log_index as u16, + queue_indices, + leaf_indices: leaf_indices_arr, + proof_bitvecs, + nodes, + derivation: forester_keypair.pubkey(), + is_metadata_forester: true, + }, + 0, + ); + + rpc.create_and_send_transaction(&[ix], &forester_keypair.pubkey(), &[&forester_keypair]) + .await + .unwrap(); + + // Verify all 4 queue items marked + let nullifier_queue_post = unsafe { + get_hash_set::(&mut rpc, state_tree_bundle.accounts.nullifier_queue) + .await + .unwrap() + }; + for (idx, (queue_idx, _)) in items_to_nullify.iter().take(4).enumerate() { + let bucket = nullifier_queue_post + .get_bucket(*queue_idx) + .unwrap() + .unwrap(); + assert!( + bucket.sequence_number.is_some(), + "Queue item {} should be marked after nullify_state_v1_multi", + idx + ); + } + + // Verify root changed + let onchain_tree_post = get_concurrent_merkle_tree::( + &mut rpc, + state_tree_bundle.accounts.merkle_tree, + ) + .await + .unwrap(); + assert_ne!( + pre_root, + onchain_tree_post.root(), + "Root should have changed after nullify_state_v1_multi" + ); + + // Locally update and verify root match + for &li in &leaf_indices { + state_tree_bundle + .merkle_tree + .update(&[0u8; 32], li) + .unwrap(); + } + assert_eq!( + onchain_tree_post.root(), + state_tree_bundle.merkle_tree.root(), + "On-chain root should match local tree after nullifying all 4 leaves" + ); +} + +#[serial] +#[tokio::test] +async fn test_nullify_state_v1_multi_3_leaves() { + let mut rpc = LightProgramTest::new(ProgramTestConfig::default_with_batched_trees(true)) + .await + .unwrap(); + rpc.indexer = None; + let env = rpc.test_accounts.clone(); + + let forester_keypair = Keypair::new(); + rpc.airdrop_lamports(&forester_keypair.pubkey(), 2_000_000_000) + .await + .unwrap(); + + let merkle_tree_keypair = Keypair::new(); + let nullifier_queue_keypair = Keypair::new(); + let cpi_context_keypair = Keypair::new(); + + let (mut state_tree_bundle, mut rpc) = { + let mut e2e_env = init_program_test_env(rpc, &env, 50).await; + e2e_env.indexer.state_merkle_trees.clear(); + e2e_env.keypair_action_config.fee_assert = false; + + e2e_env + .indexer + .add_state_merkle_tree( + &mut e2e_env.rpc, + &merkle_tree_keypair, + &nullifier_queue_keypair, + &cpi_context_keypair, + None, + Some(forester_keypair.pubkey()), + TreeType::StateV1, + ) + .await; + + for _ in 0..3 { + e2e_env + .compress_sol_deterministic(&forester_keypair, 1_000_000, None) + .await; + e2e_env + .transfer_sol_deterministic(&forester_keypair, &Keypair::new().pubkey(), None) + .await + .unwrap(); + } + + (e2e_env.indexer.state_merkle_trees[0].clone(), e2e_env.rpc) + }; + + let nullifier_queue = unsafe { + get_hash_set::(&mut rpc, state_tree_bundle.accounts.nullifier_queue) + .await + .unwrap() + }; + let onchain_tree = get_concurrent_merkle_tree::( + &mut rpc, + state_tree_bundle.accounts.merkle_tree, + ) + .await + .unwrap(); + let change_log_index = onchain_tree.changelog_index(); + + let mut items_to_nullify = Vec::new(); + for i in 0..nullifier_queue.get_capacity() { + let bucket = nullifier_queue.get_bucket(i).unwrap(); + if let Some(bucket) = bucket { + if bucket.sequence_number.is_none() { + items_to_nullify.push((i, bucket.value_bytes())); + } + } + } + assert!(items_to_nullify.len() >= 3); + + let mut leaf_indices = Vec::new(); + let mut proofs = Vec::new(); + for (_, leaf) in items_to_nullify.iter().take(3) { + let leaf_index = state_tree_bundle.merkle_tree.get_leaf_index(leaf).unwrap(); + leaf_indices.push(leaf_index); + let proof: Vec<[u8; 32]> = state_tree_bundle + .merkle_tree + .get_proof_of_leaf(leaf_index, false) + .unwrap(); + proofs.push(<[[u8; 32]; 16]>::try_from(proof).unwrap()); + } + + let proof_refs: Vec<&[[u8; 32]; 16]> = proofs.iter().collect(); + let CompressedProofs { + proof_bitvecs, + nodes, + } = compress_proofs(&proof_refs).expect("compress_proofs should succeed for 3 leaves"); + + let ix = create_nullify_state_v1_multi_instruction( + CreateNullifyStateV1MultiInstructionInputs { + authority: forester_keypair.pubkey(), + nullifier_queue: state_tree_bundle.accounts.nullifier_queue, + merkle_tree: state_tree_bundle.accounts.merkle_tree, + change_log_index: change_log_index as u16, + queue_indices: [ + items_to_nullify[0].0 as u16, + items_to_nullify[1].0 as u16, + items_to_nullify[2].0 as u16, + 0, + ], + leaf_indices: [ + leaf_indices[0] as u32, + leaf_indices[1] as u32, + leaf_indices[2] as u32, + u32::MAX, + ], + proof_bitvecs, + nodes, + derivation: forester_keypair.pubkey(), + is_metadata_forester: true, + }, + 0, + ); + + rpc.create_and_send_transaction(&[ix], &forester_keypair.pubkey(), &[&forester_keypair]) + .await + .unwrap(); + + // Verify 3 queue items marked + let nullifier_queue_post = unsafe { + get_hash_set::(&mut rpc, state_tree_bundle.accounts.nullifier_queue) + .await + .unwrap() + }; + for (idx, (queue_idx, _)) in items_to_nullify.iter().take(3).enumerate() { + let bucket = nullifier_queue_post + .get_bucket(*queue_idx) + .unwrap() + .unwrap(); + assert!( + bucket.sequence_number.is_some(), + "Queue item {} should be marked", + idx + ); + } + + // Locally update and verify root match + for &li in &leaf_indices { + state_tree_bundle + .merkle_tree + .update(&[0u8; 32], li) + .unwrap(); + } + let onchain_tree_post = get_concurrent_merkle_tree::( + &mut rpc, + state_tree_bundle.accounts.merkle_tree, + ) + .await + .unwrap(); + assert_eq!( + onchain_tree_post.root(), + state_tree_bundle.merkle_tree.root(), + ); +} + +#[serial] +#[tokio::test] +async fn test_nullify_state_v1_multi_2_leaves() { + let mut rpc = LightProgramTest::new(ProgramTestConfig::default_with_batched_trees(true)) + .await + .unwrap(); + rpc.indexer = None; + let env = rpc.test_accounts.clone(); + + let forester_keypair = Keypair::new(); + rpc.airdrop_lamports(&forester_keypair.pubkey(), 2_000_000_000) + .await + .unwrap(); + + let merkle_tree_keypair = Keypair::new(); + let nullifier_queue_keypair = Keypair::new(); + let cpi_context_keypair = Keypair::new(); + + let (mut state_tree_bundle, mut rpc) = { + let mut e2e_env = init_program_test_env(rpc, &env, 50).await; + e2e_env.indexer.state_merkle_trees.clear(); + e2e_env.keypair_action_config.fee_assert = false; + + e2e_env + .indexer + .add_state_merkle_tree( + &mut e2e_env.rpc, + &merkle_tree_keypair, + &nullifier_queue_keypair, + &cpi_context_keypair, + None, + Some(forester_keypair.pubkey()), + TreeType::StateV1, + ) + .await; + + for _ in 0..2 { + e2e_env + .compress_sol_deterministic(&forester_keypair, 1_000_000, None) + .await; + e2e_env + .transfer_sol_deterministic(&forester_keypair, &Keypair::new().pubkey(), None) + .await + .unwrap(); + } + + (e2e_env.indexer.state_merkle_trees[0].clone(), e2e_env.rpc) + }; + + let nullifier_queue = unsafe { + get_hash_set::(&mut rpc, state_tree_bundle.accounts.nullifier_queue) + .await + .unwrap() + }; + let onchain_tree = get_concurrent_merkle_tree::( + &mut rpc, + state_tree_bundle.accounts.merkle_tree, + ) + .await + .unwrap(); + let change_log_index = onchain_tree.changelog_index(); + + let mut items_to_nullify = Vec::new(); + for i in 0..nullifier_queue.get_capacity() { + let bucket = nullifier_queue.get_bucket(i).unwrap(); + if let Some(bucket) = bucket { + if bucket.sequence_number.is_none() { + items_to_nullify.push((i, bucket.value_bytes())); + } + } + } + assert!(items_to_nullify.len() >= 2); + + let mut leaf_indices = Vec::new(); + let mut proofs = Vec::new(); + for (_, leaf) in items_to_nullify.iter().take(2) { + let leaf_index = state_tree_bundle.merkle_tree.get_leaf_index(leaf).unwrap(); + leaf_indices.push(leaf_index); + let proof: Vec<[u8; 32]> = state_tree_bundle + .merkle_tree + .get_proof_of_leaf(leaf_index, false) + .unwrap(); + proofs.push(<[[u8; 32]; 16]>::try_from(proof).unwrap()); + } + + let proof_refs: Vec<&[[u8; 32]; 16]> = proofs.iter().collect(); + let CompressedProofs { + proof_bitvecs, + nodes, + } = compress_proofs(&proof_refs).expect("compress_proofs should succeed for 2 leaves"); + + let ix = create_nullify_state_v1_multi_instruction( + CreateNullifyStateV1MultiInstructionInputs { + authority: forester_keypair.pubkey(), + nullifier_queue: state_tree_bundle.accounts.nullifier_queue, + merkle_tree: state_tree_bundle.accounts.merkle_tree, + change_log_index: change_log_index as u16, + queue_indices: [ + items_to_nullify[0].0 as u16, + items_to_nullify[1].0 as u16, + 0, + 0, + ], + leaf_indices: [ + leaf_indices[0] as u32, + leaf_indices[1] as u32, + u32::MAX, + u32::MAX, + ], + proof_bitvecs, + nodes, + derivation: forester_keypair.pubkey(), + is_metadata_forester: true, + }, + 0, + ); + + rpc.create_and_send_transaction(&[ix], &forester_keypair.pubkey(), &[&forester_keypair]) + .await + .unwrap(); + + // Verify 2 queue items marked + let nullifier_queue_post = unsafe { + get_hash_set::(&mut rpc, state_tree_bundle.accounts.nullifier_queue) + .await + .unwrap() + }; + for (idx, (queue_idx, _)) in items_to_nullify.iter().take(2).enumerate() { + let bucket = nullifier_queue_post + .get_bucket(*queue_idx) + .unwrap() + .unwrap(); + assert!( + bucket.sequence_number.is_some(), + "Queue item {} should be marked", + idx + ); + } + + // Locally update and verify root match + for &li in &leaf_indices { + state_tree_bundle + .merkle_tree + .update(&[0u8; 32], li) + .unwrap(); + } + let onchain_tree_post = get_concurrent_merkle_tree::( + &mut rpc, + state_tree_bundle.accounts.merkle_tree, + ) + .await + .unwrap(); + assert_eq!( + onchain_tree_post.root(), + state_tree_bundle.merkle_tree.root(), + ); +} + +#[serial] +#[tokio::test] +async fn test_nullify_state_v1_multi_1_leaf_fails() { + let mut rpc = LightProgramTest::new(ProgramTestConfig::default_with_batched_trees(true)) + .await + .unwrap(); + rpc.indexer = None; + let env = rpc.test_accounts.clone(); + + let forester_keypair = Keypair::new(); + rpc.airdrop_lamports(&forester_keypair.pubkey(), 2_000_000_000) + .await + .unwrap(); + + let merkle_tree_keypair = Keypair::new(); + let nullifier_queue_keypair = Keypair::new(); + let cpi_context_keypair = Keypair::new(); + + let (state_tree_bundle, mut rpc) = { + let mut e2e_env = init_program_test_env(rpc, &env, 50).await; + e2e_env.indexer.state_merkle_trees.clear(); + e2e_env.keypair_action_config.fee_assert = false; + + e2e_env + .indexer + .add_state_merkle_tree( + &mut e2e_env.rpc, + &merkle_tree_keypair, + &nullifier_queue_keypair, + &cpi_context_keypair, + None, + Some(forester_keypair.pubkey()), + TreeType::StateV1, + ) + .await; + + e2e_env + .compress_sol_deterministic(&forester_keypair, 1_000_000, None) + .await; + e2e_env + .transfer_sol_deterministic(&forester_keypair, &Keypair::new().pubkey(), None) + .await + .unwrap(); + + (e2e_env.indexer.state_merkle_trees[0].clone(), e2e_env.rpc) + }; + + let nullifier_queue = unsafe { + get_hash_set::(&mut rpc, state_tree_bundle.accounts.nullifier_queue) + .await + .unwrap() + }; + let onchain_tree = get_concurrent_merkle_tree::( + &mut rpc, + state_tree_bundle.accounts.merkle_tree, + ) + .await + .unwrap(); + let change_log_index = onchain_tree.changelog_index(); + + let mut items_to_nullify = Vec::new(); + for i in 0..nullifier_queue.get_capacity() { + let bucket = nullifier_queue.get_bucket(i).unwrap(); + if let Some(bucket) = bucket { + if bucket.sequence_number.is_none() { + items_to_nullify.push((i, bucket.value_bytes())); + } + } + } + assert!(!items_to_nullify.is_empty()); + + let leaf_index = state_tree_bundle + .merkle_tree + .get_leaf_index(&items_to_nullify[0].1) + .unwrap(); + let proof: Vec<[u8; 32]> = state_tree_bundle + .merkle_tree + .get_proof_of_leaf(leaf_index, false) + .unwrap(); + let proof_arr: [[u8; 32]; 16] = proof.try_into().unwrap(); + + let nodes: Vec<[u8; 32]> = proof_arr.to_vec(); + + let ix = create_nullify_state_v1_multi_instruction( + CreateNullifyStateV1MultiInstructionInputs { + authority: forester_keypair.pubkey(), + nullifier_queue: state_tree_bundle.accounts.nullifier_queue, + merkle_tree: state_tree_bundle.accounts.merkle_tree, + change_log_index: change_log_index as u16, + queue_indices: [items_to_nullify[0].0 as u16, 0, 0, 0], + leaf_indices: [leaf_index as u32, u32::MAX, u32::MAX, u32::MAX], + proof_bitvecs: [0; 4], + nodes, + derivation: forester_keypair.pubkey(), + is_metadata_forester: true, + }, + 0, + ); + + let result = rpc + .create_and_send_transaction(&[ix], &forester_keypair.pubkey(), &[&forester_keypair]) + .await; + + assert!( + result.is_err(), + "nullify_state_v1_multi with 1 leaf should fail with InvalidProofEncoding" + ); +} diff --git a/programs/registry/Cargo.toml b/programs/registry/Cargo.toml index 0423906a08..c173733030 100644 --- a/programs/registry/Cargo.toml +++ b/programs/registry/Cargo.toml @@ -31,6 +31,7 @@ light-system-program-anchor = { workspace = true, features = ["cpi"] } light-account-checks = { workspace = true, features = ["solana", "std", "msg"] } light-program-profiler = { workspace = true } light-macros = { workspace = true } +bitvec = { workspace = true } borsh = { workspace = true } solana-account-info = { workspace = true } solana-instruction = { workspace = true } diff --git a/programs/registry/src/account_compression_cpi/nullify.rs b/programs/registry/src/account_compression_cpi/nullify.rs index 818e2b43a8..89a16c4594 100644 --- a/programs/registry/src/account_compression_cpi/nullify.rs +++ b/programs/registry/src/account_compression_cpi/nullify.rs @@ -2,8 +2,9 @@ use account_compression::{ program::AccountCompression, utils::constants::CPI_AUTHORITY_PDA_SEED, StateMerkleTreeAccount, }; use anchor_lang::prelude::*; +use bitvec::prelude::*; -use crate::epoch::register_epoch::ForesterEpochPda; +use crate::{epoch::register_epoch::ForesterEpochPda, errors::RegistryError}; #[derive(Accounts)] pub struct NullifyLeaves<'info> { @@ -61,3 +62,102 @@ pub fn process_nullify( proofs, ) } + +/// Issues a single nullify_leaves CPI for one leaf. +#[inline(always)] +fn nullify_single_leaf_cpi( + ctx: &Context, + signer_seeds: &[&[&[u8]]], + change_log_index: u64, + queue_index: u16, + leaf_index: u64, + proof: Vec<[u8; 32]>, +) -> Result<()> { + let accounts = account_compression::cpi::accounts::NullifyLeaves { + authority: ctx.accounts.cpi_authority.to_account_info(), + registered_program_pda: Some(ctx.accounts.registered_program_pda.to_account_info()), + log_wrapper: ctx.accounts.log_wrapper.to_account_info(), + merkle_tree: ctx.accounts.merkle_tree.to_account_info(), + nullifier_queue: ctx.accounts.nullifier_queue.to_account_info(), + fee_payer: Some(ctx.accounts.authority.to_account_info()), + }; + let cpi_ctx = CpiContext::new_with_signer( + ctx.accounts.account_compression_program.to_account_info(), + accounts, + signer_seeds, + ); + account_compression::cpi::nullify_leaves( + cpi_ctx, + vec![change_log_index], + vec![queue_index], + vec![leaf_index], + vec![proof], + ) +} + +/// Determines proof count from leaf_indices sentinel values. +/// Returns Err(InvalidProofEncoding) if fewer than 2 leaves are specified. +pub fn count_from_leaf_indices(leaf_indices: &[u32; 4]) -> Result { + match *leaf_indices { + [a, b, u32::MAX, u32::MAX] if a != u32::MAX && b != u32::MAX => Ok(2), + [a, b, c, u32::MAX] if a != u32::MAX && b != u32::MAX && c != u32::MAX => Ok(3), + [a, b, c, d] if a != u32::MAX && b != u32::MAX && c != u32::MAX && d != u32::MAX => Ok(4), + _ => err!(RegistryError::InvalidProofEncoding), + } +} + +/// Reconstructs a 16-node Merkle proof by selecting nodes from a +/// deduplicated pool. The bitvec selects which pool nodes belong to +/// this proof (exactly 16 bits must be set). +fn reconstruct_proof(nodes: &[[u8; 32]], bits: u32) -> Result<[[u8; 32]; 16]> { + let bv = bits.view_bits::(); + let mut proof = [[0u8; 32]; 16]; + let mut proof_idx = 0; + for i in 0..nodes.len() { + if bv[i] { + if proof_idx >= 16 { + return err!(RegistryError::InvalidProofEncoding); + } + proof[proof_idx] = nodes[i]; + proof_idx += 1; + } + } + if proof_idx != 16 { + return err!(RegistryError::InvalidProofEncoding); + } + Ok(proof) +} + +pub fn process_nullify_state_v1_multi( + ctx: &Context, + count: usize, + change_log_index: u16, + queue_indices: [u16; 4], + leaf_indices: [u32; 4], + proof_bitvecs: [u32; 4], + nodes: Vec<[u8; 32]>, +) -> Result<()> { + if nodes.len() > 32 { + return err!(RegistryError::InvalidProofEncoding); + } + + let bump = ctx.bumps.cpi_authority; + let bump = &[bump]; + let seeds = [CPI_AUTHORITY_PDA_SEED, bump]; + let signer_seeds = &[&seeds[..]]; + let change_log_index_u64 = change_log_index as u64; + + for i in 0..count { + let proof = reconstruct_proof(&nodes, proof_bitvecs[i])?; + nullify_single_leaf_cpi( + ctx, + signer_seeds, + change_log_index_u64, + queue_indices[i], + leaf_indices[i] as u64, + proof.to_vec(), + )?; + } + + Ok(()) +} diff --git a/programs/registry/src/account_compression_cpi/sdk.rs b/programs/registry/src/account_compression_cpi/sdk.rs index f002c35499..bc29789c0b 100644 --- a/programs/registry/src/account_compression_cpi/sdk.rs +++ b/programs/registry/src/account_compression_cpi/sdk.rs @@ -62,6 +62,189 @@ pub fn create_nullify_instruction( } } +/// Returns the common accounts shared by all forester lookup tables. +fn common_lookup_table_accounts() -> Vec { + let (cpi_authority, _) = get_cpi_authority_pda(); + let registered_program_pda = get_registered_program_pda(&crate::ID); + vec![ + cpi_authority, + registered_program_pda, + account_compression::ID, + Pubkey::new_from_array(NOOP_PUBKEY), + crate::ID, + solana_sdk::compute_budget::ID, + ] +} + +/// Max number of 32-byte nodes in the dedup encoding vec. +/// Verified by tx size test (forester/tests/test_nullify_state_v1_multi_tx_size.rs). +/// With ALT, SetComputeUnitLimit + SetComputeUnitPrice ixs, and worst-case nodes, +/// the tx fits within the 1232 byte limit. +pub const NULLIFY_STATE_V1_MULTI_MAX_NODES: usize = 27; + +#[derive(Clone, Debug, PartialEq)] +pub struct CreateNullifyStateV1MultiInstructionInputs { + pub authority: Pubkey, + pub nullifier_queue: Pubkey, + pub merkle_tree: Pubkey, + pub change_log_index: u16, + pub queue_indices: [u16; 4], + pub leaf_indices: [u32; 4], + pub proof_bitvecs: [u32; 4], + pub nodes: Vec<[u8; 32]>, + pub derivation: Pubkey, + pub is_metadata_forester: bool, +} + +pub fn create_nullify_state_v1_multi_instruction( + inputs: CreateNullifyStateV1MultiInstructionInputs, + epoch: u64, +) -> Instruction { + let register_program_pda = get_registered_program_pda(&crate::ID); + let registered_forester_pda = if inputs.is_metadata_forester { + None + } else { + Some(get_forester_epoch_pda_from_authority(&inputs.derivation, epoch).0) + }; + let (cpi_authority, _bump) = get_cpi_authority_pda(); + let instruction_data = crate::instruction::NullifyStateV1Multi { + change_log_index: inputs.change_log_index, + queue_indices: inputs.queue_indices, + leaf_indices: inputs.leaf_indices, + proof_bitvecs: inputs.proof_bitvecs, + nodes: inputs.nodes, + }; + + let accounts = crate::accounts::NullifyLeaves { + authority: inputs.authority, + registered_forester_pda, + registered_program_pda: register_program_pda, + nullifier_queue: inputs.nullifier_queue, + merkle_tree: inputs.merkle_tree, + log_wrapper: NOOP_PUBKEY.into(), + cpi_authority, + account_compression_program: account_compression::ID, + }; + Instruction { + program_id: crate::ID, + accounts: accounts.to_account_metas(Some(true)), + data: instruction_data.data(), + } +} + +/// Result of compressing 2-4 Merkle proofs into a deduplicated node pool. +pub struct CompressedProofs { + /// Bitvecs for proofs 2-4, each selecting 16 nodes from the pool. + /// proof_1 is always nodes[0..16]. + pub proof_bitvecs: [u32; 4], + pub nodes: Vec<[u8; 32]>, +} + +/// Compresses 2-4 full 16-node Merkle proofs into a deduplicated node pool. +/// The pool is built level-by-level so that iterating set bits in ascending +/// order produces nodes in proof-level order. +/// Proof 1 is always nodes[0..16]. Proofs 2-4 each have a bitvec selecting +/// which pool nodes form that proof. +/// Returns `None` if fewer than 2, more than 4 proofs, or too many unique nodes. +pub fn compress_proofs(proofs: &[&[[u8; 32]; 16]]) -> Option { + use bitvec::prelude::*; + + if proofs.len() < 2 || proofs.len() > 4 { + return None; + } + + // Build level-ordered deduplicated pool. For each level, add unique + // nodes across all proofs. Ascending pool index == ascending level. + let mut nodes: Vec<[u8; 32]> = Vec::new(); + let mut pool_indices = [[0usize; 16]; 4]; + + for level in 0..16 { + for (proof_idx, proof) in proofs.iter().enumerate() { + if let Some(idx) = nodes.iter().position(|n| *n == proof[level]) { + pool_indices[proof_idx][level] = idx; + } else { + pool_indices[proof_idx][level] = nodes.len(); + nodes.push(proof[level]); + } + } + } + + if nodes.len() > NULLIFY_STATE_V1_MULTI_MAX_NODES || nodes.len() > 32 { + return None; + } + + let mut proof_bitvecs = [0u32; 4]; + for (proof_idx, _) in proofs.iter().enumerate() { + let bv = proof_bitvecs[proof_idx].view_bits_mut::(); + for level in 0..16 { + bv.set(pool_indices[proof_idx][level], true); + } + } + + Some(CompressedProofs { + proof_bitvecs, + nodes, + }) +} + +/// Returns the known accounts for populating an address lookup table +/// for nullify_state_v1_multi v0 transactions. Includes ComputeBudget program ID +/// since nullify_state_v1_multi transactions also include a SetComputeUnitLimit instruction. +pub fn nullify_state_v1_multi_lookup_table_accounts( + merkle_tree: Pubkey, + nullifier_queue: Pubkey, +) -> Vec { + let mut accounts = common_lookup_table_accounts(); + accounts.push(merkle_tree); + accounts.push(nullifier_queue); + accounts +} + +/// Parameters for creating a unified forester address lookup table +/// that covers all tree types. +pub struct ForesterLookupTableParams { + /// (merkle_tree, nullifier_queue) + pub v1_state_trees: Vec<(Pubkey, Pubkey)>, + /// (merkle_tree, queue) + pub v1_address_trees: Vec<(Pubkey, Pubkey)>, + /// (merkle_tree, output_queue) + pub v2_state_trees: Vec<(Pubkey, Pubkey)>, + /// merkle_tree (== queue for v2 address trees) + pub v2_address_trees: Vec, +} + +/// Returns a deduplicated list of accounts for a unified forester ALT +/// that covers all tree types. `v0::Message::try_compile` automatically +/// selects which ALT entries to reference per instruction, so unused +/// entries cost nothing. +pub fn forester_lookup_table_accounts(params: &ForesterLookupTableParams) -> Vec { + let mut accounts = common_lookup_table_accounts(); + + for (merkle_tree, nullifier_queue) in ¶ms.v1_state_trees { + push_if_absent(&mut accounts, *merkle_tree); + push_if_absent(&mut accounts, *nullifier_queue); + } + for (merkle_tree, queue) in ¶ms.v1_address_trees { + push_if_absent(&mut accounts, *merkle_tree); + push_if_absent(&mut accounts, *queue); + } + for (merkle_tree, output_queue) in ¶ms.v2_state_trees { + push_if_absent(&mut accounts, *merkle_tree); + push_if_absent(&mut accounts, *output_queue); + } + for merkle_tree in ¶ms.v2_address_trees { + push_if_absent(&mut accounts, *merkle_tree); + } + + accounts +} + +fn push_if_absent(accounts: &mut Vec, key: Pubkey) { + if !accounts.contains(&key) { + accounts.push(key); + } +} + #[derive(Clone, Debug, PartialEq)] pub struct CreateMigrateStateInstructionInputs { pub authority: Pubkey, @@ -545,3 +728,215 @@ pub fn create_rollover_batch_address_tree_instruction( data: instruction_data.data(), } } + +#[cfg(test)] +mod tests { + use bitvec::prelude::*; + + use super::*; + + /// Simulates on-chain reconstruction for testing round-trips. + fn reconstruct_proof(nodes: &[[u8; 32]], bits: u32) -> [[u8; 32]; 16] { + let bv = bits.view_bits::(); + let mut proof = [[0u8; 32]; 16]; + let mut proof_idx = 0; + for (i, node) in nodes.iter().enumerate() { + if bv[i] { + proof[proof_idx] = *node; + proof_idx += 1; + } + } + assert_eq!(proof_idx, 16, "bitvec must select exactly 16 nodes"); + proof + } + + #[test] + fn test_nullify_state_v1_multi_instruction_data_size() { + let instruction_data = crate::instruction::NullifyStateV1Multi { + change_log_index: 0, + queue_indices: [0; 4], + leaf_indices: [0; 4], + proof_bitvecs: [0; 4], + nodes: vec![[0u8; 32]; NULLIFY_STATE_V1_MULTI_MAX_NODES], + }; + let data = instruction_data.data(); + // 8 disc + 2 changelog + 8 queue_indices + 16 leaf_indices + 16 proof_bitvecs + // + 4 vec_prefix + N*32 nodes + let expected = 8 + 2 + 8 + 16 + 16 + 4 + NULLIFY_STATE_V1_MULTI_MAX_NODES * 32; + assert_eq!( + data.len(), + expected, + "nullify_state_v1_multi instruction data must be exactly {} bytes, got {}", + expected, + data.len() + ); + } + + #[test] + fn test_nullify_state_v1_multi_instruction_accounts() { + let authority = Pubkey::new_unique(); + let inputs = CreateNullifyStateV1MultiInstructionInputs { + authority, + nullifier_queue: Pubkey::new_unique(), + merkle_tree: Pubkey::new_unique(), + change_log_index: 0, + queue_indices: [0, 1, 2, 3], + leaf_indices: [0, 1, 2, 3], + proof_bitvecs: [0; 4], + nodes: vec![[0u8; 32]; 16], + derivation: authority, + is_metadata_forester: false, + }; + let ix = create_nullify_state_v1_multi_instruction(inputs, 0); + assert_eq!(ix.accounts.len(), 8, "expected 8 accounts"); + } + + #[test] + fn test_compress_proofs_round_trip() { + let mut proof_1 = [[0u8; 32]; 16]; + let mut proof_2 = [[0u8; 32]; 16]; + let mut proof_3 = [[0u8; 32]; 16]; + let mut proof_4 = [[0u8; 32]; 16]; + + for (i, elem) in proof_1.iter_mut().enumerate() { + *elem = [i as u8 + 1; 32]; + } + + // proof_2: differs at levels 0-3, shares 4-15 (total: 16 + 4 = 20) + for i in 0..16 { + if i < 4 { + proof_2[i] = [i as u8 + 100; 32]; + } else { + proof_2[i] = proof_1[i]; + } + } + + // proof_3: differs at levels 0-2, shares 3-15 (total: 20 + 3 = 23) + for i in 0..16 { + if i < 3 { + proof_3[i] = [i as u8 + 200; 32]; + } else { + proof_3[i] = proof_1[i]; + } + } + + // proof_4: differs at levels 0-1, shares 2-15 (total: 23 + 2 = 25) + for i in 0..16 { + if i < 2 { + proof_4[i] = [(i as u8).wrapping_add(250); 32]; + } else { + proof_4[i] = proof_1[i]; + } + } + + let proofs: Vec<&[[u8; 32]; 16]> = vec![&proof_1, &proof_2, &proof_3, &proof_4]; + let result = compress_proofs(&proofs); + assert!(result.is_some(), "compress_proofs should succeed"); + let compressed = result.unwrap(); + + let r_proof_1 = reconstruct_proof(&compressed.nodes, compressed.proof_bitvecs[0]); + assert_eq!(r_proof_1, proof_1); + + let r_proof_2 = reconstruct_proof(&compressed.nodes, compressed.proof_bitvecs[1]); + assert_eq!(r_proof_2, proof_2); + + let r_proof_3 = reconstruct_proof(&compressed.nodes, compressed.proof_bitvecs[2]); + assert_eq!(r_proof_3, proof_3); + + let r_proof_4 = reconstruct_proof(&compressed.nodes, compressed.proof_bitvecs[3]); + assert_eq!(r_proof_4, proof_4); + } + + #[test] + fn test_compress_proofs_returns_none_when_too_many_nodes() { + let make_proof = |base: u8| -> [[u8; 32]; 16] { + let mut p = [[0u8; 32]; 16]; + for (i, slot) in p.iter_mut().enumerate() { + *slot = [base.wrapping_add(i as u8); 32]; + } + p + }; + let p1 = make_proof(1); + let p2 = make_proof(50); + let p3 = make_proof(100); + let p4 = make_proof(150); + + let proofs: Vec<&[[u8; 32]; 16]> = vec![&p1, &p2, &p3, &p4]; + let result = compress_proofs(&proofs); + assert!( + result.is_none(), + "should return None when no sharing leads to > MAX_NODES" + ); + } + + #[test] + fn test_compress_proofs_2_proofs() { + let mut proof_1 = [[0u8; 32]; 16]; + let mut proof_2 = [[0u8; 32]; 16]; + for i in 0..16 { + proof_1[i] = [i as u8 + 1; 32]; + if i % 2 == 0 { + proof_2[i] = proof_1[i]; + } else { + proof_2[i] = [i as u8 + 100; 32]; + } + } + + let proofs: Vec<&[[u8; 32]; 16]> = vec![&proof_1, &proof_2]; + let result = compress_proofs(&proofs); + assert!(result.is_some(), "2 proofs should compress"); + let compressed = result.unwrap(); + + // Unused bitvecs should be 0 + assert_eq!(compressed.proof_bitvecs[2], 0); + assert_eq!(compressed.proof_bitvecs[3], 0); + + // 16 for proof_1 + 8 unique for proof_2 (odd indices) + assert_eq!(compressed.nodes.len(), 16 + 8); + + // Round-trip + let r_proof_1 = reconstruct_proof(&compressed.nodes, compressed.proof_bitvecs[0]); + assert_eq!(r_proof_1, proof_1); + + let r_proof_2 = reconstruct_proof(&compressed.nodes, compressed.proof_bitvecs[1]); + assert_eq!(r_proof_2, proof_2); + } + + #[test] + fn test_compress_proofs_3_proofs() { + let mut proof_1 = [[0u8; 32]; 16]; + let mut proof_2 = [[0u8; 32]; 16]; + let mut proof_3 = [[0u8; 32]; 16]; + for i in 0..16 { + proof_1[i] = [i as u8 + 1; 32]; + if i % 2 == 0 { + proof_2[i] = proof_1[i]; + } else { + proof_2[i] = [i as u8 + 50; 32]; + } + if i % 3 == 0 { + proof_3[i] = proof_1[i]; + } else { + proof_3[i] = proof_2[i]; + } + } + + let proofs: Vec<&[[u8; 32]; 16]> = vec![&proof_1, &proof_2, &proof_3]; + let result = compress_proofs(&proofs); + assert!(result.is_some(), "3 proofs should compress"); + let compressed = result.unwrap(); + assert_eq!( + compressed.proof_bitvecs[3], 0, + "proof_4 bitvec should be 0 for 3 proofs" + ); + + let r_proof_1 = reconstruct_proof(&compressed.nodes, compressed.proof_bitvecs[0]); + assert_eq!(r_proof_1, proof_1); + + let r_proof_2 = reconstruct_proof(&compressed.nodes, compressed.proof_bitvecs[1]); + assert_eq!(r_proof_2, proof_2); + + let r_proof_3 = reconstruct_proof(&compressed.nodes, compressed.proof_bitvecs[2]); + assert_eq!(r_proof_3, proof_3); + } +} diff --git a/programs/registry/src/errors.rs b/programs/registry/src/errors.rs index 7c445d2ca3..ce2c5699b0 100644 --- a/programs/registry/src/errors.rs +++ b/programs/registry/src/errors.rs @@ -42,4 +42,6 @@ pub enum RegistryError { BorrowAccountDataFailed, #[msg("Failed to serialize instruction data")] SerializationFailed, + #[msg("Invalid proof dedup encoding")] + InvalidProofEncoding, } diff --git a/programs/registry/src/lib.rs b/programs/registry/src/lib.rs index a21b58cd4b..b862870de6 100644 --- a/programs/registry/src/lib.rs +++ b/programs/registry/src/lib.rs @@ -420,6 +420,40 @@ pub mod light_registry { ) } + /// Nullifies 2-4 leaves in a single instruction via sequential CPIs. + /// Uses proof deduplication: nearby leaves share Merkle proof nodes at + /// common ancestor levels. The `nodes` vec is a deduplicated pool of + /// unique nodes, and each proof's bitvec selects which 16 nodes from + /// the pool form that proof. + pub fn nullify_state_v1_multi<'info>( + ctx: Context<'_, '_, '_, 'info, NullifyLeaves<'info>>, + change_log_index: u16, + queue_indices: [u16; 4], + leaf_indices: [u32; 4], + proof_bitvecs: [u32; 4], + nodes: Vec<[u8; 32]>, + ) -> Result<()> { + let metadata = ctx.accounts.merkle_tree.load()?.metadata; + let count = account_compression_cpi::nullify::count_from_leaf_indices(&leaf_indices)?; + check_forester( + &metadata, + ctx.accounts.authority.key(), + ctx.accounts.nullifier_queue.key(), + &mut ctx.accounts.registered_forester_pda, + count as u64 * DEFAULT_WORK_V1, + )?; + + process_nullify_state_v1_multi( + &ctx, + count, + change_log_index, + queue_indices, + leaf_indices, + proof_bitvecs, + nodes, + ) + } + #[allow(clippy::too_many_arguments)] pub fn update_address_merkle_tree( ctx: Context, diff --git a/sdk-libs/client/src/indexer/indexer_trait.rs b/sdk-libs/client/src/indexer/indexer_trait.rs index b051ab3c1d..8def890fd5 100644 --- a/sdk-libs/client/src/indexer/indexer_trait.rs +++ b/sdk-libs/client/src/indexer/indexer_trait.rs @@ -11,7 +11,7 @@ use super::{ GetCompressedTokenAccountsByOwnerOrDelegateOptions, Hash, IndexerError, IndexerRpcConfig, MerkleProof, NewAddressProofWithContext, PaginatedOptions, QueueElementsV2Options, RetryConfig, }; -use crate::indexer::QueueElementsResult; +use crate::indexer::{QueueElementsResult, QueueLeafIndex}; // TODO: remove all references in input types. #[async_trait] pub trait Indexer: std::marker::Send + std::marker::Sync { @@ -181,6 +181,16 @@ pub trait Indexer: std::marker::Send + std::marker::Sync { config: Option, ) -> Result, IndexerError>; + /// Returns lightweight (hash, queue_index, leaf_index) tuples for nullifier queue items. + /// Used by the forester to sort queue items before grouping for multi-nullify. + async fn get_queue_leaf_indices( + &self, + merkle_tree_pubkey: [u8; 32], + limit: u16, + start_index: Option, + config: Option, + ) -> Result>, IndexerError>; + /// Returns information about all queues in the system. /// Includes tree pubkey, queue pubkey, queue type, and queue size for each queue. async fn get_queue_info( diff --git a/sdk-libs/client/src/indexer/mod.rs b/sdk-libs/client/src/indexer/mod.rs index cc3167459c..999f76895a 100644 --- a/sdk-libs/client/src/indexer/mod.rs +++ b/sdk-libs/client/src/indexer/mod.rs @@ -18,8 +18,8 @@ pub use types::{ AddressQueueData, AddressWithTree, ColdContext, ColdData, CompressedAccount, CompressedTokenAccount, Hash, InputQueueData, InterfaceTreeInfo, MerkleProof, MerkleProofWithContext, NewAddressProofWithContext, NextTreeInfo, OutputQueueData, - OwnerBalance, ProofOfLeaf, QueueElementsResult, QueueInfo, QueueInfoResult, RootIndex, - SignatureWithMetadata, SolanaAccountData, StateMerkleTreeAccounts, StateQueueData, + OwnerBalance, ProofOfLeaf, QueueElementsResult, QueueInfo, QueueInfoResult, QueueLeafIndex, + RootIndex, SignatureWithMetadata, SolanaAccountData, StateMerkleTreeAccounts, StateQueueData, TokenAccountInterface, TokenBalance, TreeInfo, ValidityProofWithContext, }; mod options; diff --git a/sdk-libs/client/src/indexer/photon_indexer.rs b/sdk-libs/client/src/indexer/photon_indexer.rs index 26d16ae235..bd0a14fbb4 100644 --- a/sdk-libs/client/src/indexer/photon_indexer.rs +++ b/sdk-libs/client/src/indexer/photon_indexer.rs @@ -1701,6 +1701,60 @@ impl Indexer for PhotonIndexer { .await } + async fn get_queue_leaf_indices( + &self, + merkle_tree_pubkey: [u8; 32], + limit: u16, + start_index: Option, + config: Option, + ) -> Result>, IndexerError> { + let config = config.unwrap_or_default(); + self.retry(config.retry_config, || async { + let tree_hash = + photon_api::types::Hash(bs58::encode(&merkle_tree_pubkey).into_string()); + + let params = photon_api::types::PostGetQueueLeafIndicesBodyParams { + tree: tree_hash, + limit, + start_index, + }; + let request = photon_api::apis::default_api::make_get_queue_leaf_indices_body(params); + + let result = photon_api::apis::default_api::get_queue_leaf_indices_post( + &self.configuration, + request, + ) + .await?; + + Self::check_api_error("get_queue_leaf_indices", result.error)?; + let api_response = Self::extract_result("get_queue_leaf_indices", result.result)?; + + if api_response.context.slot < config.slot { + return Err(IndexerError::IndexerNotSyncedToSlot); + } + + let items = api_response + .value + .into_iter() + .map(|item| { + Ok(super::QueueLeafIndex { + hash: super::base58::decode_base58_to_fixed_array(&item.hash.0)?, + queue_index: item.queue_index, + leaf_index: item.leaf_index, + }) + }) + .collect::, IndexerError>>()?; + + Ok(Response { + context: super::response::Context { + slot: api_response.context.slot, + }, + value: Items { items }, + }) + }) + .await + } + async fn get_subtrees( &self, _merkle_tree_pubkey: [u8; 32], diff --git a/sdk-libs/client/src/indexer/types/mod.rs b/sdk-libs/client/src/indexer/types/mod.rs index f91504c8e3..1b5f34c590 100644 --- a/sdk-libs/client/src/indexer/types/mod.rs +++ b/sdk-libs/client/src/indexer/types/mod.rs @@ -16,7 +16,8 @@ pub use proof::{ NewAddressProofWithContext, RootIndex, ValidityProofWithContext, }; pub use queue::{ - AddressQueueData, InputQueueData, OutputQueueData, QueueElementsResult, StateQueueData, + AddressQueueData, InputQueueData, OutputQueueData, QueueElementsResult, QueueLeafIndex, + StateQueueData, }; pub use signature::SignatureWithMetadata; pub use token::{CompressedTokenAccount, OwnerBalance, TokenBalance}; diff --git a/sdk-libs/client/src/indexer/types/queue.rs b/sdk-libs/client/src/indexer/types/queue.rs index 40e7cc0f6e..f9a863cecb 100644 --- a/sdk-libs/client/src/indexer/types/queue.rs +++ b/sdk-libs/client/src/indexer/types/queue.rs @@ -138,6 +138,14 @@ impl AddressQueueData { } } +/// Lightweight queue leaf index entry (hash, queue_index, leaf_index) +#[derive(Debug, Clone, PartialEq, Default)] +pub struct QueueLeafIndex { + pub hash: [u8; 32], + pub queue_index: u64, + pub leaf_index: u64, +} + /// V2 Queue Elements Result with deduplicated node data #[derive(Debug, Clone, PartialEq, Default)] pub struct QueueElementsResult { diff --git a/sdk-libs/client/src/rpc/indexer.rs b/sdk-libs/client/src/rpc/indexer.rs index 55c6b069e0..08fbea4748 100644 --- a/sdk-libs/client/src/rpc/indexer.rs +++ b/sdk-libs/client/src/rpc/indexer.rs @@ -7,8 +7,8 @@ use crate::indexer::{ GetCompressedAccountsByOwnerConfig, GetCompressedTokenAccountsByOwnerOrDelegateOptions, Hash, Indexer, IndexerError, IndexerRpcConfig, Items, ItemsWithCursor, MerkleProof, NewAddressProofWithContext, OwnerBalance, PaginatedOptions, QueueElementsResult, - QueueElementsV2Options, QueueInfoResult, Response, RetryConfig, SignatureWithMetadata, - TokenBalance, ValidityProofWithContext, + QueueElementsV2Options, QueueInfoResult, QueueLeafIndex, Response, RetryConfig, + SignatureWithMetadata, TokenBalance, ValidityProofWithContext, }; #[async_trait] @@ -200,6 +200,21 @@ impl Indexer for LightClient { .await?) } + async fn get_queue_leaf_indices( + &self, + merkle_tree_pubkey: [u8; 32], + limit: u16, + start_index: Option, + config: Option, + ) -> Result>, IndexerError> { + Ok(self + .indexer + .as_ref() + .ok_or(IndexerError::NotInitialized)? + .get_queue_leaf_indices(merkle_tree_pubkey, limit, start_index, config) + .await?) + } + async fn get_queue_info( &self, config: Option, diff --git a/sdk-libs/photon-api/src/codegen.rs b/sdk-libs/photon-api/src/codegen.rs index 4dd88bda05..0936da0ea0 100644 --- a/sdk-libs/photon-api/src/codegen.rs +++ b/sdk-libs/photon-api/src/codegen.rs @@ -1550,6 +1550,96 @@ All endpoints return AccountV2.*/ Default::default() } } + /**Parameters for requesting input queue leaf indices. +Returns (hash, queue_index, leaf_index) for nullifier queue items.*/ + /// + ///
JSON schema + /// + /// ```json + ///{ + /// "description": "Parameters for requesting input queue leaf indices.\nReturns (hash, queue_index, leaf_index) for nullifier queue items.", + /// "type": "object", + /// "required": [ + /// "limit", + /// "tree" + /// ], + /// "properties": { + /// "limit": { + /// "type": "integer", + /// "format": "uint16", + /// "minimum": 0.0 + /// }, + /// "startIndex": { + /// "type": [ + /// "integer", + /// "null" + /// ], + /// "format": "uint64", + /// "minimum": 0.0 + /// }, + /// "tree": { + /// "$ref": "#/components/schemas/Hash" + /// } + /// }, + /// "additionalProperties": false + ///} + /// ``` + ///
+ #[derive(::serde::Deserialize, ::serde::Serialize, Clone, Debug)] + #[serde(deny_unknown_fields)] + pub struct GetQueueLeafIndicesRequest { + pub limit: u16, + #[serde( + rename = "startIndex", + default, + skip_serializing_if = "::std::option::Option::is_none" + )] + pub start_index: ::std::option::Option, + pub tree: Hash, + } + impl GetQueueLeafIndicesRequest { + pub fn builder() -> builder::GetQueueLeafIndicesRequest { + Default::default() + } + } + ///Response containing queue leaf indices + /// + ///
JSON schema + /// + /// ```json + ///{ + /// "description": "Response containing queue leaf indices", + /// "type": "object", + /// "required": [ + /// "context", + /// "value" + /// ], + /// "properties": { + /// "context": { + /// "$ref": "#/components/schemas/Context" + /// }, + /// "value": { + /// "type": "array", + /// "items": { + /// "$ref": "#/components/schemas/QueueLeafIndex" + /// } + /// } + /// }, + /// "additionalProperties": false + ///} + /// ``` + ///
+ #[derive(::serde::Deserialize, ::serde::Serialize, Clone, Debug)] + #[serde(deny_unknown_fields)] + pub struct GetQueueLeafIndicesResponse { + pub context: Context, + pub value: ::std::vec::Vec, + } + impl GetQueueLeafIndicesResponse { + pub fn builder() -> builder::GetQueueLeafIndicesResponse { + Default::default() + } + } ///A 32-byte hash represented as a base58 string. /// ///
JSON schema @@ -24678,7 +24768,7 @@ All endpoints return AccountV2.*/ Default::default() } } - ///`PostGetTransactionWithCompressionInfoBody` + ///`PostGetQueueLeafIndicesBody` /// ///
JSON schema /// @@ -24710,17 +24800,32 @@ All endpoints return AccountV2.*/ /// "description": "The name of the method to invoke.", /// "type": "string", /// "enum": [ - /// "getTransactionWithCompressionInfo" + /// "getQueueLeafIndices" /// ] /// }, /// "params": { + /// "description": "Parameters for requesting input queue leaf indices.\nReturns (hash, queue_index, leaf_index) for nullifier queue items.", /// "type": "object", /// "required": [ - /// "signature" + /// "limit", + /// "tree" /// ], /// "properties": { - /// "signature": { - /// "$ref": "#/components/schemas/SerializableSignature" + /// "limit": { + /// "type": "integer", + /// "format": "uint16", + /// "minimum": 0.0 + /// }, + /// "startIndex": { + /// "type": [ + /// "integer", + /// "null" + /// ], + /// "format": "uint64", + /// "minimum": 0.0 + /// }, + /// "tree": { + /// "$ref": "#/components/schemas/Hash" /// } /// }, /// "additionalProperties": false @@ -24730,17 +24835,17 @@ All endpoints return AccountV2.*/ /// ``` ///
#[derive(::serde::Deserialize, ::serde::Serialize, Clone, Debug)] - pub struct PostGetTransactionWithCompressionInfoBody { + pub struct PostGetQueueLeafIndicesBody { ///An ID to identify the request. - pub id: PostGetTransactionWithCompressionInfoBodyId, + pub id: PostGetQueueLeafIndicesBodyId, ///The version of the JSON-RPC protocol. - pub jsonrpc: PostGetTransactionWithCompressionInfoBodyJsonrpc, + pub jsonrpc: PostGetQueueLeafIndicesBodyJsonrpc, ///The name of the method to invoke. - pub method: PostGetTransactionWithCompressionInfoBodyMethod, - pub params: PostGetTransactionWithCompressionInfoBodyParams, + pub method: PostGetQueueLeafIndicesBodyMethod, + pub params: PostGetQueueLeafIndicesBodyParams, } - impl PostGetTransactionWithCompressionInfoBody { - pub fn builder() -> builder::PostGetTransactionWithCompressionInfoBody { + impl PostGetQueueLeafIndicesBody { + pub fn builder() -> builder::PostGetQueueLeafIndicesBody { Default::default() } } @@ -24770,18 +24875,18 @@ All endpoints return AccountV2.*/ PartialEq, PartialOrd )] - pub enum PostGetTransactionWithCompressionInfoBodyId { + pub enum PostGetQueueLeafIndicesBodyId { #[serde(rename = "test-account")] TestAccount, } - impl ::std::fmt::Display for PostGetTransactionWithCompressionInfoBodyId { + impl ::std::fmt::Display for PostGetQueueLeafIndicesBodyId { fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { match *self { Self::TestAccount => f.write_str("test-account"), } } } - impl ::std::str::FromStr for PostGetTransactionWithCompressionInfoBodyId { + impl ::std::str::FromStr for PostGetQueueLeafIndicesBodyId { type Err = self::error::ConversionError; fn from_str( value: &str, @@ -24792,7 +24897,7 @@ All endpoints return AccountV2.*/ } } } - impl ::std::convert::TryFrom<&str> for PostGetTransactionWithCompressionInfoBodyId { + impl ::std::convert::TryFrom<&str> for PostGetQueueLeafIndicesBodyId { type Error = self::error::ConversionError; fn try_from( value: &str, @@ -24801,7 +24906,7 @@ All endpoints return AccountV2.*/ } } impl ::std::convert::TryFrom<&::std::string::String> - for PostGetTransactionWithCompressionInfoBodyId { + for PostGetQueueLeafIndicesBodyId { type Error = self::error::ConversionError; fn try_from( value: &::std::string::String, @@ -24810,7 +24915,7 @@ All endpoints return AccountV2.*/ } } impl ::std::convert::TryFrom<::std::string::String> - for PostGetTransactionWithCompressionInfoBodyId { + for PostGetQueueLeafIndicesBodyId { type Error = self::error::ConversionError; fn try_from( value: ::std::string::String, @@ -24844,18 +24949,18 @@ All endpoints return AccountV2.*/ PartialEq, PartialOrd )] - pub enum PostGetTransactionWithCompressionInfoBodyJsonrpc { + pub enum PostGetQueueLeafIndicesBodyJsonrpc { #[serde(rename = "2.0")] X20, } - impl ::std::fmt::Display for PostGetTransactionWithCompressionInfoBodyJsonrpc { + impl ::std::fmt::Display for PostGetQueueLeafIndicesBodyJsonrpc { fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { match *self { Self::X20 => f.write_str("2.0"), } } } - impl ::std::str::FromStr for PostGetTransactionWithCompressionInfoBodyJsonrpc { + impl ::std::str::FromStr for PostGetQueueLeafIndicesBodyJsonrpc { type Err = self::error::ConversionError; fn from_str( value: &str, @@ -24866,8 +24971,7 @@ All endpoints return AccountV2.*/ } } } - impl ::std::convert::TryFrom<&str> - for PostGetTransactionWithCompressionInfoBodyJsonrpc { + impl ::std::convert::TryFrom<&str> for PostGetQueueLeafIndicesBodyJsonrpc { type Error = self::error::ConversionError; fn try_from( value: &str, @@ -24876,7 +24980,7 @@ All endpoints return AccountV2.*/ } } impl ::std::convert::TryFrom<&::std::string::String> - for PostGetTransactionWithCompressionInfoBodyJsonrpc { + for PostGetQueueLeafIndicesBodyJsonrpc { type Error = self::error::ConversionError; fn try_from( value: &::std::string::String, @@ -24885,7 +24989,7 @@ All endpoints return AccountV2.*/ } } impl ::std::convert::TryFrom<::std::string::String> - for PostGetTransactionWithCompressionInfoBodyJsonrpc { + for PostGetQueueLeafIndicesBodyJsonrpc { type Error = self::error::ConversionError; fn try_from( value: ::std::string::String, @@ -24902,7 +25006,7 @@ All endpoints return AccountV2.*/ /// "description": "The name of the method to invoke.", /// "type": "string", /// "enum": [ - /// "getTransactionWithCompressionInfo" + /// "getQueueLeafIndices" /// ] ///} /// ``` @@ -24919,34 +25023,29 @@ All endpoints return AccountV2.*/ PartialEq, PartialOrd )] - pub enum PostGetTransactionWithCompressionInfoBodyMethod { - #[serde(rename = "getTransactionWithCompressionInfo")] - GetTransactionWithCompressionInfo, + pub enum PostGetQueueLeafIndicesBodyMethod { + #[serde(rename = "getQueueLeafIndices")] + GetQueueLeafIndices, } - impl ::std::fmt::Display for PostGetTransactionWithCompressionInfoBodyMethod { + impl ::std::fmt::Display for PostGetQueueLeafIndicesBodyMethod { fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { match *self { - Self::GetTransactionWithCompressionInfo => { - f.write_str("getTransactionWithCompressionInfo") - } + Self::GetQueueLeafIndices => f.write_str("getQueueLeafIndices"), } } } - impl ::std::str::FromStr for PostGetTransactionWithCompressionInfoBodyMethod { + impl ::std::str::FromStr for PostGetQueueLeafIndicesBodyMethod { type Err = self::error::ConversionError; fn from_str( value: &str, ) -> ::std::result::Result { match value { - "getTransactionWithCompressionInfo" => { - Ok(Self::GetTransactionWithCompressionInfo) - } + "getQueueLeafIndices" => Ok(Self::GetQueueLeafIndices), _ => Err("invalid value".into()), } } } - impl ::std::convert::TryFrom<&str> - for PostGetTransactionWithCompressionInfoBodyMethod { + impl ::std::convert::TryFrom<&str> for PostGetQueueLeafIndicesBodyMethod { type Error = self::error::ConversionError; fn try_from( value: &str, @@ -24955,7 +25054,7 @@ All endpoints return AccountV2.*/ } } impl ::std::convert::TryFrom<&::std::string::String> - for PostGetTransactionWithCompressionInfoBodyMethod { + for PostGetQueueLeafIndicesBodyMethod { type Error = self::error::ConversionError; fn try_from( value: &::std::string::String, @@ -24964,7 +25063,7 @@ All endpoints return AccountV2.*/ } } impl ::std::convert::TryFrom<::std::string::String> - for PostGetTransactionWithCompressionInfoBodyMethod { + for PostGetQueueLeafIndicesBodyMethod { type Error = self::error::ConversionError; fn try_from( value: ::std::string::String, @@ -24972,19 +25071,35 @@ All endpoints return AccountV2.*/ value.parse() } } - ///`PostGetTransactionWithCompressionInfoBodyParams` + /**Parameters for requesting input queue leaf indices. +Returns (hash, queue_index, leaf_index) for nullifier queue items.*/ /// ///
JSON schema /// /// ```json ///{ + /// "description": "Parameters for requesting input queue leaf indices.\nReturns (hash, queue_index, leaf_index) for nullifier queue items.", /// "type": "object", /// "required": [ - /// "signature" + /// "limit", + /// "tree" /// ], /// "properties": { - /// "signature": { - /// "$ref": "#/components/schemas/SerializableSignature" + /// "limit": { + /// "type": "integer", + /// "format": "uint16", + /// "minimum": 0.0 + /// }, + /// "startIndex": { + /// "type": [ + /// "integer", + /// "null" + /// ], + /// "format": "uint64", + /// "minimum": 0.0 + /// }, + /// "tree": { + /// "$ref": "#/components/schemas/Hash" /// } /// }, /// "additionalProperties": false @@ -24993,15 +25108,22 @@ All endpoints return AccountV2.*/ ///
#[derive(::serde::Deserialize, ::serde::Serialize, Clone, Debug)] #[serde(deny_unknown_fields)] - pub struct PostGetTransactionWithCompressionInfoBodyParams { - pub signature: SerializableSignature, + pub struct PostGetQueueLeafIndicesBodyParams { + pub limit: u16, + #[serde( + rename = "startIndex", + default, + skip_serializing_if = "::std::option::Option::is_none" + )] + pub start_index: ::std::option::Option, + pub tree: Hash, } - impl PostGetTransactionWithCompressionInfoBodyParams { - pub fn builder() -> builder::PostGetTransactionWithCompressionInfoBodyParams { + impl PostGetQueueLeafIndicesBodyParams { + pub fn builder() -> builder::PostGetQueueLeafIndicesBodyParams { Default::default() } } - ///`PostGetTransactionWithCompressionInfoResponse` + ///`PostGetQueueLeafIndicesResponse` /// ///
JSON schema /// @@ -25039,62 +25161,46 @@ All endpoints return AccountV2.*/ /// ] /// }, /// "result": { - /// "description": "A Solana transaction with additional compression information", + /// "description": "Response containing queue leaf indices", /// "type": "object", + /// "required": [ + /// "context", + /// "value" + /// ], /// "properties": { - /// "compression_info": { - /// "type": "object", - /// "required": [ - /// "closedAccounts", - /// "openedAccounts" - /// ], - /// "properties": { - /// "closedAccounts": { - /// "type": "array", - /// "items": { - /// "$ref": "#/components/schemas/AccountWithOptionalTokenData" - /// } - /// }, - /// "openedAccounts": { - /// "type": "array", - /// "items": { - /// "$ref": "#/components/schemas/AccountWithOptionalTokenData" - /// } - /// } - /// }, - /// "additionalProperties": false + /// "context": { + /// "$ref": "#/components/schemas/Context" /// }, - /// "transaction": { - /// "description": "An encoded confirmed transaction with status meta", - /// "type": "object" + /// "value": { + /// "type": "array", + /// "items": { + /// "$ref": "#/components/schemas/QueueLeafIndex" + /// } /// } - /// } + /// }, + /// "additionalProperties": false /// } /// } ///} /// ``` ///
#[derive(::serde::Deserialize, ::serde::Serialize, Clone, Debug)] - pub struct PostGetTransactionWithCompressionInfoResponse { + pub struct PostGetQueueLeafIndicesResponse { #[serde(default, skip_serializing_if = "::std::option::Option::is_none")] - pub error: ::std::option::Option< - PostGetTransactionWithCompressionInfoResponseError, - >, + pub error: ::std::option::Option, ///An ID to identify the response. - pub id: PostGetTransactionWithCompressionInfoResponseId, + pub id: PostGetQueueLeafIndicesResponseId, ///The version of the JSON-RPC protocol. - pub jsonrpc: PostGetTransactionWithCompressionInfoResponseJsonrpc, + pub jsonrpc: PostGetQueueLeafIndicesResponseJsonrpc, #[serde(default, skip_serializing_if = "::std::option::Option::is_none")] - pub result: ::std::option::Option< - PostGetTransactionWithCompressionInfoResponseResult, - >, + pub result: ::std::option::Option, } - impl PostGetTransactionWithCompressionInfoResponse { - pub fn builder() -> builder::PostGetTransactionWithCompressionInfoResponse { + impl PostGetQueueLeafIndicesResponse { + pub fn builder() -> builder::PostGetQueueLeafIndicesResponse { Default::default() } } - ///`PostGetTransactionWithCompressionInfoResponseError` + ///`PostGetQueueLeafIndicesResponseError` /// ///
JSON schema /// @@ -25113,13 +25219,13 @@ All endpoints return AccountV2.*/ /// ``` ///
#[derive(::serde::Deserialize, ::serde::Serialize, Clone, Debug)] - pub struct PostGetTransactionWithCompressionInfoResponseError { + pub struct PostGetQueueLeafIndicesResponseError { #[serde(default, skip_serializing_if = "::std::option::Option::is_none")] pub code: ::std::option::Option, #[serde(default, skip_serializing_if = "::std::option::Option::is_none")] pub message: ::std::option::Option<::std::string::String>, } - impl ::std::default::Default for PostGetTransactionWithCompressionInfoResponseError { + impl ::std::default::Default for PostGetQueueLeafIndicesResponseError { fn default() -> Self { Self { code: Default::default(), @@ -25127,8 +25233,8 @@ All endpoints return AccountV2.*/ } } } - impl PostGetTransactionWithCompressionInfoResponseError { - pub fn builder() -> builder::PostGetTransactionWithCompressionInfoResponseError { + impl PostGetQueueLeafIndicesResponseError { + pub fn builder() -> builder::PostGetQueueLeafIndicesResponseError { Default::default() } } @@ -25158,18 +25264,18 @@ All endpoints return AccountV2.*/ PartialEq, PartialOrd )] - pub enum PostGetTransactionWithCompressionInfoResponseId { + pub enum PostGetQueueLeafIndicesResponseId { #[serde(rename = "test-account")] TestAccount, } - impl ::std::fmt::Display for PostGetTransactionWithCompressionInfoResponseId { + impl ::std::fmt::Display for PostGetQueueLeafIndicesResponseId { fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { match *self { Self::TestAccount => f.write_str("test-account"), } } } - impl ::std::str::FromStr for PostGetTransactionWithCompressionInfoResponseId { + impl ::std::str::FromStr for PostGetQueueLeafIndicesResponseId { type Err = self::error::ConversionError; fn from_str( value: &str, @@ -25180,8 +25286,7 @@ All endpoints return AccountV2.*/ } } } - impl ::std::convert::TryFrom<&str> - for PostGetTransactionWithCompressionInfoResponseId { + impl ::std::convert::TryFrom<&str> for PostGetQueueLeafIndicesResponseId { type Error = self::error::ConversionError; fn try_from( value: &str, @@ -25190,7 +25295,7 @@ All endpoints return AccountV2.*/ } } impl ::std::convert::TryFrom<&::std::string::String> - for PostGetTransactionWithCompressionInfoResponseId { + for PostGetQueueLeafIndicesResponseId { type Error = self::error::ConversionError; fn try_from( value: &::std::string::String, @@ -25199,7 +25304,7 @@ All endpoints return AccountV2.*/ } } impl ::std::convert::TryFrom<::std::string::String> - for PostGetTransactionWithCompressionInfoResponseId { + for PostGetQueueLeafIndicesResponseId { type Error = self::error::ConversionError; fn try_from( value: ::std::string::String, @@ -25233,18 +25338,18 @@ All endpoints return AccountV2.*/ PartialEq, PartialOrd )] - pub enum PostGetTransactionWithCompressionInfoResponseJsonrpc { + pub enum PostGetQueueLeafIndicesResponseJsonrpc { #[serde(rename = "2.0")] X20, } - impl ::std::fmt::Display for PostGetTransactionWithCompressionInfoResponseJsonrpc { + impl ::std::fmt::Display for PostGetQueueLeafIndicesResponseJsonrpc { fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { match *self { Self::X20 => f.write_str("2.0"), } } } - impl ::std::str::FromStr for PostGetTransactionWithCompressionInfoResponseJsonrpc { + impl ::std::str::FromStr for PostGetQueueLeafIndicesResponseJsonrpc { type Err = self::error::ConversionError; fn from_str( value: &str, @@ -25255,8 +25360,7 @@ All endpoints return AccountV2.*/ } } } - impl ::std::convert::TryFrom<&str> - for PostGetTransactionWithCompressionInfoResponseJsonrpc { + impl ::std::convert::TryFrom<&str> for PostGetQueueLeafIndicesResponseJsonrpc { type Error = self::error::ConversionError; fn try_from( value: &str, @@ -25265,7 +25369,7 @@ All endpoints return AccountV2.*/ } } impl ::std::convert::TryFrom<&::std::string::String> - for PostGetTransactionWithCompressionInfoResponseJsonrpc { + for PostGetQueueLeafIndicesResponseJsonrpc { type Error = self::error::ConversionError; fn try_from( value: &::std::string::String, @@ -25274,7 +25378,7 @@ All endpoints return AccountV2.*/ } } impl ::std::convert::TryFrom<::std::string::String> - for PostGetTransactionWithCompressionInfoResponseJsonrpc { + for PostGetQueueLeafIndicesResponseJsonrpc { type Error = self::error::ConversionError; fn try_from( value: ::std::string::String, @@ -25282,91 +25386,26 @@ All endpoints return AccountV2.*/ value.parse() } } - ///A Solana transaction with additional compression information - /// - ///
JSON schema - /// - /// ```json - ///{ - /// "description": "A Solana transaction with additional compression information", - /// "type": "object", - /// "properties": { - /// "compression_info": { - /// "type": "object", - /// "required": [ - /// "closedAccounts", - /// "openedAccounts" - /// ], - /// "properties": { - /// "closedAccounts": { - /// "type": "array", - /// "items": { - /// "$ref": "#/components/schemas/AccountWithOptionalTokenData" - /// } - /// }, - /// "openedAccounts": { - /// "type": "array", - /// "items": { - /// "$ref": "#/components/schemas/AccountWithOptionalTokenData" - /// } - /// } - /// }, - /// "additionalProperties": false - /// }, - /// "transaction": { - /// "description": "An encoded confirmed transaction with status meta", - /// "type": "object" - /// } - /// } - ///} - /// ``` - ///
- #[derive(::serde::Deserialize, ::serde::Serialize, Clone, Debug)] - pub struct PostGetTransactionWithCompressionInfoResponseResult { - #[serde(default, skip_serializing_if = "::std::option::Option::is_none")] - pub compression_info: ::std::option::Option< - PostGetTransactionWithCompressionInfoResponseResultCompressionInfo, - >, - ///An encoded confirmed transaction with status meta - #[serde(default, skip_serializing_if = "::serde_json::Map::is_empty")] - pub transaction: ::serde_json::Map<::std::string::String, ::serde_json::Value>, - } - impl ::std::default::Default - for PostGetTransactionWithCompressionInfoResponseResult { - fn default() -> Self { - Self { - compression_info: Default::default(), - transaction: Default::default(), - } - } - } - impl PostGetTransactionWithCompressionInfoResponseResult { - pub fn builder() -> builder::PostGetTransactionWithCompressionInfoResponseResult { - Default::default() - } - } - ///`PostGetTransactionWithCompressionInfoResponseResultCompressionInfo` + ///Response containing queue leaf indices /// ///
JSON schema /// /// ```json ///{ + /// "description": "Response containing queue leaf indices", /// "type": "object", /// "required": [ - /// "closedAccounts", - /// "openedAccounts" + /// "context", + /// "value" /// ], /// "properties": { - /// "closedAccounts": { - /// "type": "array", - /// "items": { - /// "$ref": "#/components/schemas/AccountWithOptionalTokenData" - /// } + /// "context": { + /// "$ref": "#/components/schemas/Context" /// }, - /// "openedAccounts": { + /// "value": { /// "type": "array", /// "items": { - /// "$ref": "#/components/schemas/AccountWithOptionalTokenData" + /// "$ref": "#/components/schemas/QueueLeafIndex" /// } /// } /// }, @@ -25376,18 +25415,16 @@ All endpoints return AccountV2.*/ ///
#[derive(::serde::Deserialize, ::serde::Serialize, Clone, Debug)] #[serde(deny_unknown_fields)] - pub struct PostGetTransactionWithCompressionInfoResponseResultCompressionInfo { - #[serde(rename = "closedAccounts")] - pub closed_accounts: ::std::vec::Vec, - #[serde(rename = "openedAccounts")] - pub opened_accounts: ::std::vec::Vec, + pub struct PostGetQueueLeafIndicesResponseResult { + pub context: Context, + pub value: ::std::vec::Vec, } - impl PostGetTransactionWithCompressionInfoResponseResultCompressionInfo { - pub fn builder() -> builder::PostGetTransactionWithCompressionInfoResponseResultCompressionInfo { + impl PostGetQueueLeafIndicesResponseResult { + pub fn builder() -> builder::PostGetQueueLeafIndicesResponseResult { Default::default() } } - ///`PostGetTransactionWithCompressionInfoV2Body` + ///`PostGetTransactionWithCompressionInfoBody` /// ///
JSON schema /// @@ -25419,7 +25456,7 @@ All endpoints return AccountV2.*/ /// "description": "The name of the method to invoke.", /// "type": "string", /// "enum": [ - /// "getTransactionWithCompressionInfoV2" + /// "getTransactionWithCompressionInfo" /// ] /// }, /// "params": { @@ -25439,17 +25476,17 @@ All endpoints return AccountV2.*/ /// ``` ///
#[derive(::serde::Deserialize, ::serde::Serialize, Clone, Debug)] - pub struct PostGetTransactionWithCompressionInfoV2Body { + pub struct PostGetTransactionWithCompressionInfoBody { ///An ID to identify the request. - pub id: PostGetTransactionWithCompressionInfoV2BodyId, + pub id: PostGetTransactionWithCompressionInfoBodyId, ///The version of the JSON-RPC protocol. - pub jsonrpc: PostGetTransactionWithCompressionInfoV2BodyJsonrpc, + pub jsonrpc: PostGetTransactionWithCompressionInfoBodyJsonrpc, ///The name of the method to invoke. - pub method: PostGetTransactionWithCompressionInfoV2BodyMethod, - pub params: PostGetTransactionWithCompressionInfoV2BodyParams, + pub method: PostGetTransactionWithCompressionInfoBodyMethod, + pub params: PostGetTransactionWithCompressionInfoBodyParams, } - impl PostGetTransactionWithCompressionInfoV2Body { - pub fn builder() -> builder::PostGetTransactionWithCompressionInfoV2Body { + impl PostGetTransactionWithCompressionInfoBody { + pub fn builder() -> builder::PostGetTransactionWithCompressionInfoBody { Default::default() } } @@ -25479,18 +25516,18 @@ All endpoints return AccountV2.*/ PartialEq, PartialOrd )] - pub enum PostGetTransactionWithCompressionInfoV2BodyId { + pub enum PostGetTransactionWithCompressionInfoBodyId { #[serde(rename = "test-account")] TestAccount, } - impl ::std::fmt::Display for PostGetTransactionWithCompressionInfoV2BodyId { + impl ::std::fmt::Display for PostGetTransactionWithCompressionInfoBodyId { fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { match *self { Self::TestAccount => f.write_str("test-account"), } } } - impl ::std::str::FromStr for PostGetTransactionWithCompressionInfoV2BodyId { + impl ::std::str::FromStr for PostGetTransactionWithCompressionInfoBodyId { type Err = self::error::ConversionError; fn from_str( value: &str, @@ -25501,8 +25538,7 @@ All endpoints return AccountV2.*/ } } } - impl ::std::convert::TryFrom<&str> - for PostGetTransactionWithCompressionInfoV2BodyId { + impl ::std::convert::TryFrom<&str> for PostGetTransactionWithCompressionInfoBodyId { type Error = self::error::ConversionError; fn try_from( value: &str, @@ -25511,7 +25547,7 @@ All endpoints return AccountV2.*/ } } impl ::std::convert::TryFrom<&::std::string::String> - for PostGetTransactionWithCompressionInfoV2BodyId { + for PostGetTransactionWithCompressionInfoBodyId { type Error = self::error::ConversionError; fn try_from( value: &::std::string::String, @@ -25520,7 +25556,7 @@ All endpoints return AccountV2.*/ } } impl ::std::convert::TryFrom<::std::string::String> - for PostGetTransactionWithCompressionInfoV2BodyId { + for PostGetTransactionWithCompressionInfoBodyId { type Error = self::error::ConversionError; fn try_from( value: ::std::string::String, @@ -25554,18 +25590,18 @@ All endpoints return AccountV2.*/ PartialEq, PartialOrd )] - pub enum PostGetTransactionWithCompressionInfoV2BodyJsonrpc { + pub enum PostGetTransactionWithCompressionInfoBodyJsonrpc { #[serde(rename = "2.0")] X20, } - impl ::std::fmt::Display for PostGetTransactionWithCompressionInfoV2BodyJsonrpc { + impl ::std::fmt::Display for PostGetTransactionWithCompressionInfoBodyJsonrpc { fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { match *self { Self::X20 => f.write_str("2.0"), } } } - impl ::std::str::FromStr for PostGetTransactionWithCompressionInfoV2BodyJsonrpc { + impl ::std::str::FromStr for PostGetTransactionWithCompressionInfoBodyJsonrpc { type Err = self::error::ConversionError; fn from_str( value: &str, @@ -25577,7 +25613,7 @@ All endpoints return AccountV2.*/ } } impl ::std::convert::TryFrom<&str> - for PostGetTransactionWithCompressionInfoV2BodyJsonrpc { + for PostGetTransactionWithCompressionInfoBodyJsonrpc { type Error = self::error::ConversionError; fn try_from( value: &str, @@ -25586,7 +25622,7 @@ All endpoints return AccountV2.*/ } } impl ::std::convert::TryFrom<&::std::string::String> - for PostGetTransactionWithCompressionInfoV2BodyJsonrpc { + for PostGetTransactionWithCompressionInfoBodyJsonrpc { type Error = self::error::ConversionError; fn try_from( value: &::std::string::String, @@ -25595,7 +25631,7 @@ All endpoints return AccountV2.*/ } } impl ::std::convert::TryFrom<::std::string::String> - for PostGetTransactionWithCompressionInfoV2BodyJsonrpc { + for PostGetTransactionWithCompressionInfoBodyJsonrpc { type Error = self::error::ConversionError; fn try_from( value: ::std::string::String, @@ -25612,7 +25648,7 @@ All endpoints return AccountV2.*/ /// "description": "The name of the method to invoke.", /// "type": "string", /// "enum": [ - /// "getTransactionWithCompressionInfoV2" + /// "getTransactionWithCompressionInfo" /// ] ///} /// ``` @@ -25629,34 +25665,34 @@ All endpoints return AccountV2.*/ PartialEq, PartialOrd )] - pub enum PostGetTransactionWithCompressionInfoV2BodyMethod { - #[serde(rename = "getTransactionWithCompressionInfoV2")] - GetTransactionWithCompressionInfoV2, + pub enum PostGetTransactionWithCompressionInfoBodyMethod { + #[serde(rename = "getTransactionWithCompressionInfo")] + GetTransactionWithCompressionInfo, } - impl ::std::fmt::Display for PostGetTransactionWithCompressionInfoV2BodyMethod { + impl ::std::fmt::Display for PostGetTransactionWithCompressionInfoBodyMethod { fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { match *self { - Self::GetTransactionWithCompressionInfoV2 => { - f.write_str("getTransactionWithCompressionInfoV2") + Self::GetTransactionWithCompressionInfo => { + f.write_str("getTransactionWithCompressionInfo") } } } } - impl ::std::str::FromStr for PostGetTransactionWithCompressionInfoV2BodyMethod { + impl ::std::str::FromStr for PostGetTransactionWithCompressionInfoBodyMethod { type Err = self::error::ConversionError; fn from_str( value: &str, ) -> ::std::result::Result { match value { - "getTransactionWithCompressionInfoV2" => { - Ok(Self::GetTransactionWithCompressionInfoV2) + "getTransactionWithCompressionInfo" => { + Ok(Self::GetTransactionWithCompressionInfo) } _ => Err("invalid value".into()), } } } impl ::std::convert::TryFrom<&str> - for PostGetTransactionWithCompressionInfoV2BodyMethod { + for PostGetTransactionWithCompressionInfoBodyMethod { type Error = self::error::ConversionError; fn try_from( value: &str, @@ -25665,7 +25701,7 @@ All endpoints return AccountV2.*/ } } impl ::std::convert::TryFrom<&::std::string::String> - for PostGetTransactionWithCompressionInfoV2BodyMethod { + for PostGetTransactionWithCompressionInfoBodyMethod { type Error = self::error::ConversionError; fn try_from( value: &::std::string::String, @@ -25674,7 +25710,7 @@ All endpoints return AccountV2.*/ } } impl ::std::convert::TryFrom<::std::string::String> - for PostGetTransactionWithCompressionInfoV2BodyMethod { + for PostGetTransactionWithCompressionInfoBodyMethod { type Error = self::error::ConversionError; fn try_from( value: ::std::string::String, @@ -25682,7 +25718,7 @@ All endpoints return AccountV2.*/ value.parse() } } - ///`PostGetTransactionWithCompressionInfoV2BodyParams` + ///`PostGetTransactionWithCompressionInfoBodyParams` /// ///
JSON schema /// @@ -25703,15 +25739,15 @@ All endpoints return AccountV2.*/ ///
#[derive(::serde::Deserialize, ::serde::Serialize, Clone, Debug)] #[serde(deny_unknown_fields)] - pub struct PostGetTransactionWithCompressionInfoV2BodyParams { + pub struct PostGetTransactionWithCompressionInfoBodyParams { pub signature: SerializableSignature, } - impl PostGetTransactionWithCompressionInfoV2BodyParams { - pub fn builder() -> builder::PostGetTransactionWithCompressionInfoV2BodyParams { + impl PostGetTransactionWithCompressionInfoBodyParams { + pub fn builder() -> builder::PostGetTransactionWithCompressionInfoBodyParams { Default::default() } } - ///`PostGetTransactionWithCompressionInfoV2Response` + ///`PostGetTransactionWithCompressionInfoResponse` /// ///
JSON schema /// @@ -25762,13 +25798,13 @@ All endpoints return AccountV2.*/ /// "closedAccounts": { /// "type": "array", /// "items": { - /// "$ref": "#/components/schemas/ClosedAccountWithOptionalTokenDataV2" + /// "$ref": "#/components/schemas/AccountWithOptionalTokenData" /// } /// }, /// "openedAccounts": { /// "type": "array", /// "items": { - /// "$ref": "#/components/schemas/AccountWithOptionalTokenDataV2" + /// "$ref": "#/components/schemas/AccountWithOptionalTokenData" /// } /// } /// }, @@ -25785,26 +25821,26 @@ All endpoints return AccountV2.*/ /// ``` ///
#[derive(::serde::Deserialize, ::serde::Serialize, Clone, Debug)] - pub struct PostGetTransactionWithCompressionInfoV2Response { + pub struct PostGetTransactionWithCompressionInfoResponse { #[serde(default, skip_serializing_if = "::std::option::Option::is_none")] pub error: ::std::option::Option< - PostGetTransactionWithCompressionInfoV2ResponseError, + PostGetTransactionWithCompressionInfoResponseError, >, ///An ID to identify the response. - pub id: PostGetTransactionWithCompressionInfoV2ResponseId, + pub id: PostGetTransactionWithCompressionInfoResponseId, ///The version of the JSON-RPC protocol. - pub jsonrpc: PostGetTransactionWithCompressionInfoV2ResponseJsonrpc, + pub jsonrpc: PostGetTransactionWithCompressionInfoResponseJsonrpc, #[serde(default, skip_serializing_if = "::std::option::Option::is_none")] pub result: ::std::option::Option< - PostGetTransactionWithCompressionInfoV2ResponseResult, + PostGetTransactionWithCompressionInfoResponseResult, >, } - impl PostGetTransactionWithCompressionInfoV2Response { - pub fn builder() -> builder::PostGetTransactionWithCompressionInfoV2Response { + impl PostGetTransactionWithCompressionInfoResponse { + pub fn builder() -> builder::PostGetTransactionWithCompressionInfoResponse { Default::default() } } - ///`PostGetTransactionWithCompressionInfoV2ResponseError` + ///`PostGetTransactionWithCompressionInfoResponseError` /// ///
JSON schema /// @@ -25823,14 +25859,13 @@ All endpoints return AccountV2.*/ /// ``` ///
#[derive(::serde::Deserialize, ::serde::Serialize, Clone, Debug)] - pub struct PostGetTransactionWithCompressionInfoV2ResponseError { + pub struct PostGetTransactionWithCompressionInfoResponseError { #[serde(default, skip_serializing_if = "::std::option::Option::is_none")] pub code: ::std::option::Option, #[serde(default, skip_serializing_if = "::std::option::Option::is_none")] pub message: ::std::option::Option<::std::string::String>, } - impl ::std::default::Default - for PostGetTransactionWithCompressionInfoV2ResponseError { + impl ::std::default::Default for PostGetTransactionWithCompressionInfoResponseError { fn default() -> Self { Self { code: Default::default(), @@ -25838,8 +25873,8 @@ All endpoints return AccountV2.*/ } } } - impl PostGetTransactionWithCompressionInfoV2ResponseError { - pub fn builder() -> builder::PostGetTransactionWithCompressionInfoV2ResponseError { + impl PostGetTransactionWithCompressionInfoResponseError { + pub fn builder() -> builder::PostGetTransactionWithCompressionInfoResponseError { Default::default() } } @@ -25869,18 +25904,18 @@ All endpoints return AccountV2.*/ PartialEq, PartialOrd )] - pub enum PostGetTransactionWithCompressionInfoV2ResponseId { + pub enum PostGetTransactionWithCompressionInfoResponseId { #[serde(rename = "test-account")] TestAccount, } - impl ::std::fmt::Display for PostGetTransactionWithCompressionInfoV2ResponseId { + impl ::std::fmt::Display for PostGetTransactionWithCompressionInfoResponseId { fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { match *self { Self::TestAccount => f.write_str("test-account"), } } } - impl ::std::str::FromStr for PostGetTransactionWithCompressionInfoV2ResponseId { + impl ::std::str::FromStr for PostGetTransactionWithCompressionInfoResponseId { type Err = self::error::ConversionError; fn from_str( value: &str, @@ -25892,7 +25927,7 @@ All endpoints return AccountV2.*/ } } impl ::std::convert::TryFrom<&str> - for PostGetTransactionWithCompressionInfoV2ResponseId { + for PostGetTransactionWithCompressionInfoResponseId { type Error = self::error::ConversionError; fn try_from( value: &str, @@ -25901,7 +25936,7 @@ All endpoints return AccountV2.*/ } } impl ::std::convert::TryFrom<&::std::string::String> - for PostGetTransactionWithCompressionInfoV2ResponseId { + for PostGetTransactionWithCompressionInfoResponseId { type Error = self::error::ConversionError; fn try_from( value: &::std::string::String, @@ -25910,7 +25945,7 @@ All endpoints return AccountV2.*/ } } impl ::std::convert::TryFrom<::std::string::String> - for PostGetTransactionWithCompressionInfoV2ResponseId { + for PostGetTransactionWithCompressionInfoResponseId { type Error = self::error::ConversionError; fn try_from( value: ::std::string::String, @@ -25944,18 +25979,18 @@ All endpoints return AccountV2.*/ PartialEq, PartialOrd )] - pub enum PostGetTransactionWithCompressionInfoV2ResponseJsonrpc { + pub enum PostGetTransactionWithCompressionInfoResponseJsonrpc { #[serde(rename = "2.0")] X20, } - impl ::std::fmt::Display for PostGetTransactionWithCompressionInfoV2ResponseJsonrpc { + impl ::std::fmt::Display for PostGetTransactionWithCompressionInfoResponseJsonrpc { fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { match *self { Self::X20 => f.write_str("2.0"), } } } - impl ::std::str::FromStr for PostGetTransactionWithCompressionInfoV2ResponseJsonrpc { + impl ::std::str::FromStr for PostGetTransactionWithCompressionInfoResponseJsonrpc { type Err = self::error::ConversionError; fn from_str( value: &str, @@ -25967,7 +26002,7 @@ All endpoints return AccountV2.*/ } } impl ::std::convert::TryFrom<&str> - for PostGetTransactionWithCompressionInfoV2ResponseJsonrpc { + for PostGetTransactionWithCompressionInfoResponseJsonrpc { type Error = self::error::ConversionError; fn try_from( value: &str, @@ -25976,7 +26011,7 @@ All endpoints return AccountV2.*/ } } impl ::std::convert::TryFrom<&::std::string::String> - for PostGetTransactionWithCompressionInfoV2ResponseJsonrpc { + for PostGetTransactionWithCompressionInfoResponseJsonrpc { type Error = self::error::ConversionError; fn try_from( value: &::std::string::String, @@ -25985,7 +26020,7 @@ All endpoints return AccountV2.*/ } } impl ::std::convert::TryFrom<::std::string::String> - for PostGetTransactionWithCompressionInfoV2ResponseJsonrpc { + for PostGetTransactionWithCompressionInfoResponseJsonrpc { type Error = self::error::ConversionError; fn try_from( value: ::std::string::String, @@ -26012,13 +26047,13 @@ All endpoints return AccountV2.*/ /// "closedAccounts": { /// "type": "array", /// "items": { - /// "$ref": "#/components/schemas/ClosedAccountWithOptionalTokenDataV2" + /// "$ref": "#/components/schemas/AccountWithOptionalTokenData" /// } /// }, /// "openedAccounts": { /// "type": "array", /// "items": { - /// "$ref": "#/components/schemas/AccountWithOptionalTokenDataV2" + /// "$ref": "#/components/schemas/AccountWithOptionalTokenData" /// } /// } /// }, @@ -26033,17 +26068,17 @@ All endpoints return AccountV2.*/ /// ``` ///
#[derive(::serde::Deserialize, ::serde::Serialize, Clone, Debug)] - pub struct PostGetTransactionWithCompressionInfoV2ResponseResult { + pub struct PostGetTransactionWithCompressionInfoResponseResult { #[serde(default, skip_serializing_if = "::std::option::Option::is_none")] pub compression_info: ::std::option::Option< - PostGetTransactionWithCompressionInfoV2ResponseResultCompressionInfo, + PostGetTransactionWithCompressionInfoResponseResultCompressionInfo, >, ///An encoded confirmed transaction with status meta #[serde(default, skip_serializing_if = "::serde_json::Map::is_empty")] pub transaction: ::serde_json::Map<::std::string::String, ::serde_json::Value>, } impl ::std::default::Default - for PostGetTransactionWithCompressionInfoV2ResponseResult { + for PostGetTransactionWithCompressionInfoResponseResult { fn default() -> Self { Self { compression_info: Default::default(), @@ -26051,12 +26086,12 @@ All endpoints return AccountV2.*/ } } } - impl PostGetTransactionWithCompressionInfoV2ResponseResult { - pub fn builder() -> builder::PostGetTransactionWithCompressionInfoV2ResponseResult { + impl PostGetTransactionWithCompressionInfoResponseResult { + pub fn builder() -> builder::PostGetTransactionWithCompressionInfoResponseResult { Default::default() } } - ///`PostGetTransactionWithCompressionInfoV2ResponseResultCompressionInfo` + ///`PostGetTransactionWithCompressionInfoResponseResultCompressionInfo` /// ///
JSON schema /// @@ -26071,13 +26106,13 @@ All endpoints return AccountV2.*/ /// "closedAccounts": { /// "type": "array", /// "items": { - /// "$ref": "#/components/schemas/ClosedAccountWithOptionalTokenDataV2" + /// "$ref": "#/components/schemas/AccountWithOptionalTokenData" /// } /// }, /// "openedAccounts": { /// "type": "array", /// "items": { - /// "$ref": "#/components/schemas/AccountWithOptionalTokenDataV2" + /// "$ref": "#/components/schemas/AccountWithOptionalTokenData" /// } /// } /// }, @@ -26087,18 +26122,18 @@ All endpoints return AccountV2.*/ ///
#[derive(::serde::Deserialize, ::serde::Serialize, Clone, Debug)] #[serde(deny_unknown_fields)] - pub struct PostGetTransactionWithCompressionInfoV2ResponseResultCompressionInfo { + pub struct PostGetTransactionWithCompressionInfoResponseResultCompressionInfo { #[serde(rename = "closedAccounts")] - pub closed_accounts: ::std::vec::Vec, + pub closed_accounts: ::std::vec::Vec, #[serde(rename = "openedAccounts")] - pub opened_accounts: ::std::vec::Vec, + pub opened_accounts: ::std::vec::Vec, } - impl PostGetTransactionWithCompressionInfoV2ResponseResultCompressionInfo { - pub fn builder() -> builder::PostGetTransactionWithCompressionInfoV2ResponseResultCompressionInfo { + impl PostGetTransactionWithCompressionInfoResponseResultCompressionInfo { + pub fn builder() -> builder::PostGetTransactionWithCompressionInfoResponseResultCompressionInfo { Default::default() } } - ///`PostGetValidityProofBody` + ///`PostGetTransactionWithCompressionInfoV2Body` /// ///
JSON schema /// @@ -26130,23 +26165,17 @@ All endpoints return AccountV2.*/ /// "description": "The name of the method to invoke.", /// "type": "string", /// "enum": [ - /// "getValidityProof" + /// "getTransactionWithCompressionInfoV2" /// ] /// }, /// "params": { /// "type": "object", + /// "required": [ + /// "signature" + /// ], /// "properties": { - /// "hashes": { - /// "type": "array", - /// "items": { - /// "$ref": "#/components/schemas/Hash" - /// } - /// }, - /// "newAddressesWithTrees": { - /// "type": "array", - /// "items": { - /// "$ref": "#/components/schemas/AddressWithTree" - /// } + /// "signature": { + /// "$ref": "#/components/schemas/SerializableSignature" /// } /// }, /// "additionalProperties": false @@ -26156,17 +26185,17 @@ All endpoints return AccountV2.*/ /// ``` ///
#[derive(::serde::Deserialize, ::serde::Serialize, Clone, Debug)] - pub struct PostGetValidityProofBody { + pub struct PostGetTransactionWithCompressionInfoV2Body { ///An ID to identify the request. - pub id: PostGetValidityProofBodyId, + pub id: PostGetTransactionWithCompressionInfoV2BodyId, ///The version of the JSON-RPC protocol. - pub jsonrpc: PostGetValidityProofBodyJsonrpc, + pub jsonrpc: PostGetTransactionWithCompressionInfoV2BodyJsonrpc, ///The name of the method to invoke. - pub method: PostGetValidityProofBodyMethod, - pub params: PostGetValidityProofBodyParams, + pub method: PostGetTransactionWithCompressionInfoV2BodyMethod, + pub params: PostGetTransactionWithCompressionInfoV2BodyParams, } - impl PostGetValidityProofBody { - pub fn builder() -> builder::PostGetValidityProofBody { + impl PostGetTransactionWithCompressionInfoV2Body { + pub fn builder() -> builder::PostGetTransactionWithCompressionInfoV2Body { Default::default() } } @@ -26196,18 +26225,18 @@ All endpoints return AccountV2.*/ PartialEq, PartialOrd )] - pub enum PostGetValidityProofBodyId { + pub enum PostGetTransactionWithCompressionInfoV2BodyId { #[serde(rename = "test-account")] TestAccount, } - impl ::std::fmt::Display for PostGetValidityProofBodyId { + impl ::std::fmt::Display for PostGetTransactionWithCompressionInfoV2BodyId { fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { match *self { Self::TestAccount => f.write_str("test-account"), } } } - impl ::std::str::FromStr for PostGetValidityProofBodyId { + impl ::std::str::FromStr for PostGetTransactionWithCompressionInfoV2BodyId { type Err = self::error::ConversionError; fn from_str( value: &str, @@ -26218,7 +26247,8 @@ All endpoints return AccountV2.*/ } } } - impl ::std::convert::TryFrom<&str> for PostGetValidityProofBodyId { + impl ::std::convert::TryFrom<&str> + for PostGetTransactionWithCompressionInfoV2BodyId { type Error = self::error::ConversionError; fn try_from( value: &str, @@ -26226,7 +26256,8 @@ All endpoints return AccountV2.*/ value.parse() } } - impl ::std::convert::TryFrom<&::std::string::String> for PostGetValidityProofBodyId { + impl ::std::convert::TryFrom<&::std::string::String> + for PostGetTransactionWithCompressionInfoV2BodyId { type Error = self::error::ConversionError; fn try_from( value: &::std::string::String, @@ -26234,7 +26265,8 @@ All endpoints return AccountV2.*/ value.parse() } } - impl ::std::convert::TryFrom<::std::string::String> for PostGetValidityProofBodyId { + impl ::std::convert::TryFrom<::std::string::String> + for PostGetTransactionWithCompressionInfoV2BodyId { type Error = self::error::ConversionError; fn try_from( value: ::std::string::String, @@ -26268,18 +26300,18 @@ All endpoints return AccountV2.*/ PartialEq, PartialOrd )] - pub enum PostGetValidityProofBodyJsonrpc { + pub enum PostGetTransactionWithCompressionInfoV2BodyJsonrpc { #[serde(rename = "2.0")] X20, } - impl ::std::fmt::Display for PostGetValidityProofBodyJsonrpc { + impl ::std::fmt::Display for PostGetTransactionWithCompressionInfoV2BodyJsonrpc { fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { match *self { Self::X20 => f.write_str("2.0"), } } } - impl ::std::str::FromStr for PostGetValidityProofBodyJsonrpc { + impl ::std::str::FromStr for PostGetTransactionWithCompressionInfoV2BodyJsonrpc { type Err = self::error::ConversionError; fn from_str( value: &str, @@ -26290,7 +26322,8 @@ All endpoints return AccountV2.*/ } } } - impl ::std::convert::TryFrom<&str> for PostGetValidityProofBodyJsonrpc { + impl ::std::convert::TryFrom<&str> + for PostGetTransactionWithCompressionInfoV2BodyJsonrpc { type Error = self::error::ConversionError; fn try_from( value: &str, @@ -26299,7 +26332,7 @@ All endpoints return AccountV2.*/ } } impl ::std::convert::TryFrom<&::std::string::String> - for PostGetValidityProofBodyJsonrpc { + for PostGetTransactionWithCompressionInfoV2BodyJsonrpc { type Error = self::error::ConversionError; fn try_from( value: &::std::string::String, @@ -26308,7 +26341,7 @@ All endpoints return AccountV2.*/ } } impl ::std::convert::TryFrom<::std::string::String> - for PostGetValidityProofBodyJsonrpc { + for PostGetTransactionWithCompressionInfoV2BodyJsonrpc { type Error = self::error::ConversionError; fn try_from( value: ::std::string::String, @@ -26325,7 +26358,7 @@ All endpoints return AccountV2.*/ /// "description": "The name of the method to invoke.", /// "type": "string", /// "enum": [ - /// "getValidityProof" + /// "getTransactionWithCompressionInfoV2" /// ] ///} /// ``` @@ -26342,29 +26375,34 @@ All endpoints return AccountV2.*/ PartialEq, PartialOrd )] - pub enum PostGetValidityProofBodyMethod { - #[serde(rename = "getValidityProof")] - GetValidityProof, + pub enum PostGetTransactionWithCompressionInfoV2BodyMethod { + #[serde(rename = "getTransactionWithCompressionInfoV2")] + GetTransactionWithCompressionInfoV2, } - impl ::std::fmt::Display for PostGetValidityProofBodyMethod { + impl ::std::fmt::Display for PostGetTransactionWithCompressionInfoV2BodyMethod { fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { match *self { - Self::GetValidityProof => f.write_str("getValidityProof"), + Self::GetTransactionWithCompressionInfoV2 => { + f.write_str("getTransactionWithCompressionInfoV2") + } } } } - impl ::std::str::FromStr for PostGetValidityProofBodyMethod { + impl ::std::str::FromStr for PostGetTransactionWithCompressionInfoV2BodyMethod { type Err = self::error::ConversionError; fn from_str( value: &str, ) -> ::std::result::Result { match value { - "getValidityProof" => Ok(Self::GetValidityProof), + "getTransactionWithCompressionInfoV2" => { + Ok(Self::GetTransactionWithCompressionInfoV2) + } _ => Err("invalid value".into()), } } } - impl ::std::convert::TryFrom<&str> for PostGetValidityProofBodyMethod { + impl ::std::convert::TryFrom<&str> + for PostGetTransactionWithCompressionInfoV2BodyMethod { type Error = self::error::ConversionError; fn try_from( value: &str, @@ -26373,7 +26411,7 @@ All endpoints return AccountV2.*/ } } impl ::std::convert::TryFrom<&::std::string::String> - for PostGetValidityProofBodyMethod { + for PostGetTransactionWithCompressionInfoV2BodyMethod { type Error = self::error::ConversionError; fn try_from( value: &::std::string::String, @@ -26382,7 +26420,7 @@ All endpoints return AccountV2.*/ } } impl ::std::convert::TryFrom<::std::string::String> - for PostGetValidityProofBodyMethod { + for PostGetTransactionWithCompressionInfoV2BodyMethod { type Error = self::error::ConversionError; fn try_from( value: ::std::string::String, @@ -26390,25 +26428,19 @@ All endpoints return AccountV2.*/ value.parse() } } - ///`PostGetValidityProofBodyParams` + ///`PostGetTransactionWithCompressionInfoV2BodyParams` /// ///
JSON schema /// /// ```json ///{ /// "type": "object", + /// "required": [ + /// "signature" + /// ], /// "properties": { - /// "hashes": { - /// "type": "array", - /// "items": { - /// "$ref": "#/components/schemas/Hash" - /// } - /// }, - /// "newAddressesWithTrees": { - /// "type": "array", - /// "items": { - /// "$ref": "#/components/schemas/AddressWithTree" - /// } + /// "signature": { + /// "$ref": "#/components/schemas/SerializableSignature" /// } /// }, /// "additionalProperties": false @@ -26417,30 +26449,15 @@ All endpoints return AccountV2.*/ ///
#[derive(::serde::Deserialize, ::serde::Serialize, Clone, Debug)] #[serde(deny_unknown_fields)] - pub struct PostGetValidityProofBodyParams { - #[serde(default, skip_serializing_if = "::std::vec::Vec::is_empty")] - pub hashes: ::std::vec::Vec, - #[serde( - rename = "newAddressesWithTrees", - default, - skip_serializing_if = "::std::vec::Vec::is_empty" - )] - pub new_addresses_with_trees: ::std::vec::Vec, - } - impl ::std::default::Default for PostGetValidityProofBodyParams { - fn default() -> Self { - Self { - hashes: Default::default(), - new_addresses_with_trees: Default::default(), - } - } + pub struct PostGetTransactionWithCompressionInfoV2BodyParams { + pub signature: SerializableSignature, } - impl PostGetValidityProofBodyParams { - pub fn builder() -> builder::PostGetValidityProofBodyParams { + impl PostGetTransactionWithCompressionInfoV2BodyParams { + pub fn builder() -> builder::PostGetTransactionWithCompressionInfoV2BodyParams { Default::default() } } - ///`PostGetValidityProofResponse` + ///`PostGetTransactionWithCompressionInfoV2Response` /// ///
JSON schema /// @@ -26478,42 +26495,62 @@ All endpoints return AccountV2.*/ /// ] /// }, /// "result": { + /// "description": "A Solana transaction with additional compression information", /// "type": "object", - /// "required": [ - /// "context", - /// "value" - /// ], /// "properties": { - /// "context": { - /// "$ref": "#/components/schemas/Context" + /// "compression_info": { + /// "type": "object", + /// "required": [ + /// "closedAccounts", + /// "openedAccounts" + /// ], + /// "properties": { + /// "closedAccounts": { + /// "type": "array", + /// "items": { + /// "$ref": "#/components/schemas/ClosedAccountWithOptionalTokenDataV2" + /// } + /// }, + /// "openedAccounts": { + /// "type": "array", + /// "items": { + /// "$ref": "#/components/schemas/AccountWithOptionalTokenDataV2" + /// } + /// } + /// }, + /// "additionalProperties": false /// }, - /// "value": { - /// "$ref": "#/components/schemas/CompressedProofWithContext" + /// "transaction": { + /// "description": "An encoded confirmed transaction with status meta", + /// "type": "object" /// } - /// }, - /// "additionalProperties": false + /// } /// } /// } ///} /// ``` ///
#[derive(::serde::Deserialize, ::serde::Serialize, Clone, Debug)] - pub struct PostGetValidityProofResponse { + pub struct PostGetTransactionWithCompressionInfoV2Response { #[serde(default, skip_serializing_if = "::std::option::Option::is_none")] - pub error: ::std::option::Option, + pub error: ::std::option::Option< + PostGetTransactionWithCompressionInfoV2ResponseError, + >, ///An ID to identify the response. - pub id: PostGetValidityProofResponseId, + pub id: PostGetTransactionWithCompressionInfoV2ResponseId, ///The version of the JSON-RPC protocol. - pub jsonrpc: PostGetValidityProofResponseJsonrpc, + pub jsonrpc: PostGetTransactionWithCompressionInfoV2ResponseJsonrpc, #[serde(default, skip_serializing_if = "::std::option::Option::is_none")] - pub result: ::std::option::Option, + pub result: ::std::option::Option< + PostGetTransactionWithCompressionInfoV2ResponseResult, + >, } - impl PostGetValidityProofResponse { - pub fn builder() -> builder::PostGetValidityProofResponse { + impl PostGetTransactionWithCompressionInfoV2Response { + pub fn builder() -> builder::PostGetTransactionWithCompressionInfoV2Response { Default::default() } } - ///`PostGetValidityProofResponseError` + ///`PostGetTransactionWithCompressionInfoV2ResponseError` /// ///
JSON schema /// @@ -26532,13 +26569,14 @@ All endpoints return AccountV2.*/ /// ``` ///
#[derive(::serde::Deserialize, ::serde::Serialize, Clone, Debug)] - pub struct PostGetValidityProofResponseError { + pub struct PostGetTransactionWithCompressionInfoV2ResponseError { #[serde(default, skip_serializing_if = "::std::option::Option::is_none")] pub code: ::std::option::Option, #[serde(default, skip_serializing_if = "::std::option::Option::is_none")] pub message: ::std::option::Option<::std::string::String>, } - impl ::std::default::Default for PostGetValidityProofResponseError { + impl ::std::default::Default + for PostGetTransactionWithCompressionInfoV2ResponseError { fn default() -> Self { Self { code: Default::default(), @@ -26546,8 +26584,8 @@ All endpoints return AccountV2.*/ } } } - impl PostGetValidityProofResponseError { - pub fn builder() -> builder::PostGetValidityProofResponseError { + impl PostGetTransactionWithCompressionInfoV2ResponseError { + pub fn builder() -> builder::PostGetTransactionWithCompressionInfoV2ResponseError { Default::default() } } @@ -26577,18 +26615,18 @@ All endpoints return AccountV2.*/ PartialEq, PartialOrd )] - pub enum PostGetValidityProofResponseId { + pub enum PostGetTransactionWithCompressionInfoV2ResponseId { #[serde(rename = "test-account")] TestAccount, } - impl ::std::fmt::Display for PostGetValidityProofResponseId { + impl ::std::fmt::Display for PostGetTransactionWithCompressionInfoV2ResponseId { fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { match *self { Self::TestAccount => f.write_str("test-account"), } } } - impl ::std::str::FromStr for PostGetValidityProofResponseId { + impl ::std::str::FromStr for PostGetTransactionWithCompressionInfoV2ResponseId { type Err = self::error::ConversionError; fn from_str( value: &str, @@ -26599,7 +26637,8 @@ All endpoints return AccountV2.*/ } } } - impl ::std::convert::TryFrom<&str> for PostGetValidityProofResponseId { + impl ::std::convert::TryFrom<&str> + for PostGetTransactionWithCompressionInfoV2ResponseId { type Error = self::error::ConversionError; fn try_from( value: &str, @@ -26608,7 +26647,7 @@ All endpoints return AccountV2.*/ } } impl ::std::convert::TryFrom<&::std::string::String> - for PostGetValidityProofResponseId { + for PostGetTransactionWithCompressionInfoV2ResponseId { type Error = self::error::ConversionError; fn try_from( value: &::std::string::String, @@ -26617,7 +26656,7 @@ All endpoints return AccountV2.*/ } } impl ::std::convert::TryFrom<::std::string::String> - for PostGetValidityProofResponseId { + for PostGetTransactionWithCompressionInfoV2ResponseId { type Error = self::error::ConversionError; fn try_from( value: ::std::string::String, @@ -26651,18 +26690,18 @@ All endpoints return AccountV2.*/ PartialEq, PartialOrd )] - pub enum PostGetValidityProofResponseJsonrpc { + pub enum PostGetTransactionWithCompressionInfoV2ResponseJsonrpc { #[serde(rename = "2.0")] X20, } - impl ::std::fmt::Display for PostGetValidityProofResponseJsonrpc { + impl ::std::fmt::Display for PostGetTransactionWithCompressionInfoV2ResponseJsonrpc { fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { match *self { Self::X20 => f.write_str("2.0"), } } } - impl ::std::str::FromStr for PostGetValidityProofResponseJsonrpc { + impl ::std::str::FromStr for PostGetTransactionWithCompressionInfoV2ResponseJsonrpc { type Err = self::error::ConversionError; fn from_str( value: &str, @@ -26673,7 +26712,8 @@ All endpoints return AccountV2.*/ } } } - impl ::std::convert::TryFrom<&str> for PostGetValidityProofResponseJsonrpc { + impl ::std::convert::TryFrom<&str> + for PostGetTransactionWithCompressionInfoV2ResponseJsonrpc { type Error = self::error::ConversionError; fn try_from( value: &str, @@ -26682,7 +26722,7 @@ All endpoints return AccountV2.*/ } } impl ::std::convert::TryFrom<&::std::string::String> - for PostGetValidityProofResponseJsonrpc { + for PostGetTransactionWithCompressionInfoV2ResponseJsonrpc { type Error = self::error::ConversionError; fn try_from( value: &::std::string::String, @@ -26691,7 +26731,7 @@ All endpoints return AccountV2.*/ } } impl ::std::convert::TryFrom<::std::string::String> - for PostGetValidityProofResponseJsonrpc { + for PostGetTransactionWithCompressionInfoV2ResponseJsonrpc { type Error = self::error::ConversionError; fn try_from( value: ::std::string::String, @@ -26699,7 +26739,70 @@ All endpoints return AccountV2.*/ value.parse() } } - ///`PostGetValidityProofResponseResult` + ///A Solana transaction with additional compression information + /// + ///
JSON schema + /// + /// ```json + ///{ + /// "description": "A Solana transaction with additional compression information", + /// "type": "object", + /// "properties": { + /// "compression_info": { + /// "type": "object", + /// "required": [ + /// "closedAccounts", + /// "openedAccounts" + /// ], + /// "properties": { + /// "closedAccounts": { + /// "type": "array", + /// "items": { + /// "$ref": "#/components/schemas/ClosedAccountWithOptionalTokenDataV2" + /// } + /// }, + /// "openedAccounts": { + /// "type": "array", + /// "items": { + /// "$ref": "#/components/schemas/AccountWithOptionalTokenDataV2" + /// } + /// } + /// }, + /// "additionalProperties": false + /// }, + /// "transaction": { + /// "description": "An encoded confirmed transaction with status meta", + /// "type": "object" + /// } + /// } + ///} + /// ``` + ///
+ #[derive(::serde::Deserialize, ::serde::Serialize, Clone, Debug)] + pub struct PostGetTransactionWithCompressionInfoV2ResponseResult { + #[serde(default, skip_serializing_if = "::std::option::Option::is_none")] + pub compression_info: ::std::option::Option< + PostGetTransactionWithCompressionInfoV2ResponseResultCompressionInfo, + >, + ///An encoded confirmed transaction with status meta + #[serde(default, skip_serializing_if = "::serde_json::Map::is_empty")] + pub transaction: ::serde_json::Map<::std::string::String, ::serde_json::Value>, + } + impl ::std::default::Default + for PostGetTransactionWithCompressionInfoV2ResponseResult { + fn default() -> Self { + Self { + compression_info: Default::default(), + transaction: Default::default(), + } + } + } + impl PostGetTransactionWithCompressionInfoV2ResponseResult { + pub fn builder() -> builder::PostGetTransactionWithCompressionInfoV2ResponseResult { + Default::default() + } + } + ///`PostGetTransactionWithCompressionInfoV2ResponseResultCompressionInfo` /// ///
JSON schema /// @@ -26707,15 +26810,21 @@ All endpoints return AccountV2.*/ ///{ /// "type": "object", /// "required": [ - /// "context", - /// "value" + /// "closedAccounts", + /// "openedAccounts" /// ], /// "properties": { - /// "context": { - /// "$ref": "#/components/schemas/Context" + /// "closedAccounts": { + /// "type": "array", + /// "items": { + /// "$ref": "#/components/schemas/ClosedAccountWithOptionalTokenDataV2" + /// } /// }, - /// "value": { - /// "$ref": "#/components/schemas/CompressedProofWithContext" + /// "openedAccounts": { + /// "type": "array", + /// "items": { + /// "$ref": "#/components/schemas/AccountWithOptionalTokenDataV2" + /// } /// } /// }, /// "additionalProperties": false @@ -26724,16 +26833,18 @@ All endpoints return AccountV2.*/ ///
#[derive(::serde::Deserialize, ::serde::Serialize, Clone, Debug)] #[serde(deny_unknown_fields)] - pub struct PostGetValidityProofResponseResult { - pub context: Context, - pub value: CompressedProofWithContext, + pub struct PostGetTransactionWithCompressionInfoV2ResponseResultCompressionInfo { + #[serde(rename = "closedAccounts")] + pub closed_accounts: ::std::vec::Vec, + #[serde(rename = "openedAccounts")] + pub opened_accounts: ::std::vec::Vec, } - impl PostGetValidityProofResponseResult { - pub fn builder() -> builder::PostGetValidityProofResponseResult { + impl PostGetTransactionWithCompressionInfoV2ResponseResultCompressionInfo { + pub fn builder() -> builder::PostGetTransactionWithCompressionInfoV2ResponseResultCompressionInfo { Default::default() } } - ///`PostGetValidityProofV2Body` + ///`PostGetValidityProofBody` /// ///
JSON schema /// @@ -26765,7 +26876,7 @@ All endpoints return AccountV2.*/ /// "description": "The name of the method to invoke.", /// "type": "string", /// "enum": [ - /// "getValidityProofV2" + /// "getValidityProof" /// ] /// }, /// "params": { @@ -26791,17 +26902,17 @@ All endpoints return AccountV2.*/ /// ``` ///
#[derive(::serde::Deserialize, ::serde::Serialize, Clone, Debug)] - pub struct PostGetValidityProofV2Body { + pub struct PostGetValidityProofBody { ///An ID to identify the request. - pub id: PostGetValidityProofV2BodyId, + pub id: PostGetValidityProofBodyId, ///The version of the JSON-RPC protocol. - pub jsonrpc: PostGetValidityProofV2BodyJsonrpc, + pub jsonrpc: PostGetValidityProofBodyJsonrpc, ///The name of the method to invoke. - pub method: PostGetValidityProofV2BodyMethod, - pub params: PostGetValidityProofV2BodyParams, + pub method: PostGetValidityProofBodyMethod, + pub params: PostGetValidityProofBodyParams, } - impl PostGetValidityProofV2Body { - pub fn builder() -> builder::PostGetValidityProofV2Body { + impl PostGetValidityProofBody { + pub fn builder() -> builder::PostGetValidityProofBody { Default::default() } } @@ -26831,18 +26942,18 @@ All endpoints return AccountV2.*/ PartialEq, PartialOrd )] - pub enum PostGetValidityProofV2BodyId { + pub enum PostGetValidityProofBodyId { #[serde(rename = "test-account")] TestAccount, } - impl ::std::fmt::Display for PostGetValidityProofV2BodyId { + impl ::std::fmt::Display for PostGetValidityProofBodyId { fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { match *self { Self::TestAccount => f.write_str("test-account"), } } } - impl ::std::str::FromStr for PostGetValidityProofV2BodyId { + impl ::std::str::FromStr for PostGetValidityProofBodyId { type Err = self::error::ConversionError; fn from_str( value: &str, @@ -26853,7 +26964,7 @@ All endpoints return AccountV2.*/ } } } - impl ::std::convert::TryFrom<&str> for PostGetValidityProofV2BodyId { + impl ::std::convert::TryFrom<&str> for PostGetValidityProofBodyId { type Error = self::error::ConversionError; fn try_from( value: &str, @@ -26861,8 +26972,7 @@ All endpoints return AccountV2.*/ value.parse() } } - impl ::std::convert::TryFrom<&::std::string::String> - for PostGetValidityProofV2BodyId { + impl ::std::convert::TryFrom<&::std::string::String> for PostGetValidityProofBodyId { type Error = self::error::ConversionError; fn try_from( value: &::std::string::String, @@ -26870,8 +26980,7 @@ All endpoints return AccountV2.*/ value.parse() } } - impl ::std::convert::TryFrom<::std::string::String> - for PostGetValidityProofV2BodyId { + impl ::std::convert::TryFrom<::std::string::String> for PostGetValidityProofBodyId { type Error = self::error::ConversionError; fn try_from( value: ::std::string::String, @@ -26905,18 +27014,18 @@ All endpoints return AccountV2.*/ PartialEq, PartialOrd )] - pub enum PostGetValidityProofV2BodyJsonrpc { + pub enum PostGetValidityProofBodyJsonrpc { #[serde(rename = "2.0")] X20, } - impl ::std::fmt::Display for PostGetValidityProofV2BodyJsonrpc { + impl ::std::fmt::Display for PostGetValidityProofBodyJsonrpc { fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { match *self { Self::X20 => f.write_str("2.0"), } } } - impl ::std::str::FromStr for PostGetValidityProofV2BodyJsonrpc { + impl ::std::str::FromStr for PostGetValidityProofBodyJsonrpc { type Err = self::error::ConversionError; fn from_str( value: &str, @@ -26927,7 +27036,7 @@ All endpoints return AccountV2.*/ } } } - impl ::std::convert::TryFrom<&str> for PostGetValidityProofV2BodyJsonrpc { + impl ::std::convert::TryFrom<&str> for PostGetValidityProofBodyJsonrpc { type Error = self::error::ConversionError; fn try_from( value: &str, @@ -26936,7 +27045,7 @@ All endpoints return AccountV2.*/ } } impl ::std::convert::TryFrom<&::std::string::String> - for PostGetValidityProofV2BodyJsonrpc { + for PostGetValidityProofBodyJsonrpc { type Error = self::error::ConversionError; fn try_from( value: &::std::string::String, @@ -26945,7 +27054,7 @@ All endpoints return AccountV2.*/ } } impl ::std::convert::TryFrom<::std::string::String> - for PostGetValidityProofV2BodyJsonrpc { + for PostGetValidityProofBodyJsonrpc { type Error = self::error::ConversionError; fn try_from( value: ::std::string::String, @@ -26962,7 +27071,7 @@ All endpoints return AccountV2.*/ /// "description": "The name of the method to invoke.", /// "type": "string", /// "enum": [ - /// "getValidityProofV2" + /// "getValidityProof" /// ] ///} /// ``` @@ -26979,29 +27088,29 @@ All endpoints return AccountV2.*/ PartialEq, PartialOrd )] - pub enum PostGetValidityProofV2BodyMethod { - #[serde(rename = "getValidityProofV2")] - GetValidityProofV2, + pub enum PostGetValidityProofBodyMethod { + #[serde(rename = "getValidityProof")] + GetValidityProof, } - impl ::std::fmt::Display for PostGetValidityProofV2BodyMethod { + impl ::std::fmt::Display for PostGetValidityProofBodyMethod { fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { match *self { - Self::GetValidityProofV2 => f.write_str("getValidityProofV2"), + Self::GetValidityProof => f.write_str("getValidityProof"), } } } - impl ::std::str::FromStr for PostGetValidityProofV2BodyMethod { + impl ::std::str::FromStr for PostGetValidityProofBodyMethod { type Err = self::error::ConversionError; fn from_str( value: &str, ) -> ::std::result::Result { match value { - "getValidityProofV2" => Ok(Self::GetValidityProofV2), + "getValidityProof" => Ok(Self::GetValidityProof), _ => Err("invalid value".into()), } } } - impl ::std::convert::TryFrom<&str> for PostGetValidityProofV2BodyMethod { + impl ::std::convert::TryFrom<&str> for PostGetValidityProofBodyMethod { type Error = self::error::ConversionError; fn try_from( value: &str, @@ -27010,7 +27119,7 @@ All endpoints return AccountV2.*/ } } impl ::std::convert::TryFrom<&::std::string::String> - for PostGetValidityProofV2BodyMethod { + for PostGetValidityProofBodyMethod { type Error = self::error::ConversionError; fn try_from( value: &::std::string::String, @@ -27019,7 +27128,7 @@ All endpoints return AccountV2.*/ } } impl ::std::convert::TryFrom<::std::string::String> - for PostGetValidityProofV2BodyMethod { + for PostGetValidityProofBodyMethod { type Error = self::error::ConversionError; fn try_from( value: ::std::string::String, @@ -27027,7 +27136,7 @@ All endpoints return AccountV2.*/ value.parse() } } - ///`PostGetValidityProofV2BodyParams` + ///`PostGetValidityProofBodyParams` /// ///
JSON schema /// @@ -27054,7 +27163,7 @@ All endpoints return AccountV2.*/ ///
#[derive(::serde::Deserialize, ::serde::Serialize, Clone, Debug)] #[serde(deny_unknown_fields)] - pub struct PostGetValidityProofV2BodyParams { + pub struct PostGetValidityProofBodyParams { #[serde(default, skip_serializing_if = "::std::vec::Vec::is_empty")] pub hashes: ::std::vec::Vec, #[serde( @@ -27064,7 +27173,7 @@ All endpoints return AccountV2.*/ )] pub new_addresses_with_trees: ::std::vec::Vec, } - impl ::std::default::Default for PostGetValidityProofV2BodyParams { + impl ::std::default::Default for PostGetValidityProofBodyParams { fn default() -> Self { Self { hashes: Default::default(), @@ -27072,12 +27181,649 @@ All endpoints return AccountV2.*/ } } } - impl PostGetValidityProofV2BodyParams { - pub fn builder() -> builder::PostGetValidityProofV2BodyParams { + impl PostGetValidityProofBodyParams { + pub fn builder() -> builder::PostGetValidityProofBodyParams { Default::default() } } - ///`PostGetValidityProofV2Response` + ///`PostGetValidityProofResponse` + /// + ///
JSON schema + /// + /// ```json + ///{ + /// "type": "object", + /// "required": [ + /// "id", + /// "jsonrpc" + /// ], + /// "properties": { + /// "error": { + /// "type": "object", + /// "properties": { + /// "code": { + /// "type": "integer" + /// }, + /// "message": { + /// "type": "string" + /// } + /// } + /// }, + /// "id": { + /// "description": "An ID to identify the response.", + /// "type": "string", + /// "enum": [ + /// "test-account" + /// ] + /// }, + /// "jsonrpc": { + /// "description": "The version of the JSON-RPC protocol.", + /// "type": "string", + /// "enum": [ + /// "2.0" + /// ] + /// }, + /// "result": { + /// "type": "object", + /// "required": [ + /// "context", + /// "value" + /// ], + /// "properties": { + /// "context": { + /// "$ref": "#/components/schemas/Context" + /// }, + /// "value": { + /// "$ref": "#/components/schemas/CompressedProofWithContext" + /// } + /// }, + /// "additionalProperties": false + /// } + /// } + ///} + /// ``` + ///
+ #[derive(::serde::Deserialize, ::serde::Serialize, Clone, Debug)] + pub struct PostGetValidityProofResponse { + #[serde(default, skip_serializing_if = "::std::option::Option::is_none")] + pub error: ::std::option::Option, + ///An ID to identify the response. + pub id: PostGetValidityProofResponseId, + ///The version of the JSON-RPC protocol. + pub jsonrpc: PostGetValidityProofResponseJsonrpc, + #[serde(default, skip_serializing_if = "::std::option::Option::is_none")] + pub result: ::std::option::Option, + } + impl PostGetValidityProofResponse { + pub fn builder() -> builder::PostGetValidityProofResponse { + Default::default() + } + } + ///`PostGetValidityProofResponseError` + /// + ///
JSON schema + /// + /// ```json + ///{ + /// "type": "object", + /// "properties": { + /// "code": { + /// "type": "integer" + /// }, + /// "message": { + /// "type": "string" + /// } + /// } + ///} + /// ``` + ///
+ #[derive(::serde::Deserialize, ::serde::Serialize, Clone, Debug)] + pub struct PostGetValidityProofResponseError { + #[serde(default, skip_serializing_if = "::std::option::Option::is_none")] + pub code: ::std::option::Option, + #[serde(default, skip_serializing_if = "::std::option::Option::is_none")] + pub message: ::std::option::Option<::std::string::String>, + } + impl ::std::default::Default for PostGetValidityProofResponseError { + fn default() -> Self { + Self { + code: Default::default(), + message: Default::default(), + } + } + } + impl PostGetValidityProofResponseError { + pub fn builder() -> builder::PostGetValidityProofResponseError { + Default::default() + } + } + ///An ID to identify the response. + /// + ///
JSON schema + /// + /// ```json + ///{ + /// "description": "An ID to identify the response.", + /// "type": "string", + /// "enum": [ + /// "test-account" + /// ] + ///} + /// ``` + ///
+ #[derive( + ::serde::Deserialize, + ::serde::Serialize, + Clone, + Copy, + Debug, + Eq, + Hash, + Ord, + PartialEq, + PartialOrd + )] + pub enum PostGetValidityProofResponseId { + #[serde(rename = "test-account")] + TestAccount, + } + impl ::std::fmt::Display for PostGetValidityProofResponseId { + fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { + match *self { + Self::TestAccount => f.write_str("test-account"), + } + } + } + impl ::std::str::FromStr for PostGetValidityProofResponseId { + type Err = self::error::ConversionError; + fn from_str( + value: &str, + ) -> ::std::result::Result { + match value { + "test-account" => Ok(Self::TestAccount), + _ => Err("invalid value".into()), + } + } + } + impl ::std::convert::TryFrom<&str> for PostGetValidityProofResponseId { + type Error = self::error::ConversionError; + fn try_from( + value: &str, + ) -> ::std::result::Result { + value.parse() + } + } + impl ::std::convert::TryFrom<&::std::string::String> + for PostGetValidityProofResponseId { + type Error = self::error::ConversionError; + fn try_from( + value: &::std::string::String, + ) -> ::std::result::Result { + value.parse() + } + } + impl ::std::convert::TryFrom<::std::string::String> + for PostGetValidityProofResponseId { + type Error = self::error::ConversionError; + fn try_from( + value: ::std::string::String, + ) -> ::std::result::Result { + value.parse() + } + } + ///The version of the JSON-RPC protocol. + /// + ///
JSON schema + /// + /// ```json + ///{ + /// "description": "The version of the JSON-RPC protocol.", + /// "type": "string", + /// "enum": [ + /// "2.0" + /// ] + ///} + /// ``` + ///
+ #[derive( + ::serde::Deserialize, + ::serde::Serialize, + Clone, + Copy, + Debug, + Eq, + Hash, + Ord, + PartialEq, + PartialOrd + )] + pub enum PostGetValidityProofResponseJsonrpc { + #[serde(rename = "2.0")] + X20, + } + impl ::std::fmt::Display for PostGetValidityProofResponseJsonrpc { + fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { + match *self { + Self::X20 => f.write_str("2.0"), + } + } + } + impl ::std::str::FromStr for PostGetValidityProofResponseJsonrpc { + type Err = self::error::ConversionError; + fn from_str( + value: &str, + ) -> ::std::result::Result { + match value { + "2.0" => Ok(Self::X20), + _ => Err("invalid value".into()), + } + } + } + impl ::std::convert::TryFrom<&str> for PostGetValidityProofResponseJsonrpc { + type Error = self::error::ConversionError; + fn try_from( + value: &str, + ) -> ::std::result::Result { + value.parse() + } + } + impl ::std::convert::TryFrom<&::std::string::String> + for PostGetValidityProofResponseJsonrpc { + type Error = self::error::ConversionError; + fn try_from( + value: &::std::string::String, + ) -> ::std::result::Result { + value.parse() + } + } + impl ::std::convert::TryFrom<::std::string::String> + for PostGetValidityProofResponseJsonrpc { + type Error = self::error::ConversionError; + fn try_from( + value: ::std::string::String, + ) -> ::std::result::Result { + value.parse() + } + } + ///`PostGetValidityProofResponseResult` + /// + ///
JSON schema + /// + /// ```json + ///{ + /// "type": "object", + /// "required": [ + /// "context", + /// "value" + /// ], + /// "properties": { + /// "context": { + /// "$ref": "#/components/schemas/Context" + /// }, + /// "value": { + /// "$ref": "#/components/schemas/CompressedProofWithContext" + /// } + /// }, + /// "additionalProperties": false + ///} + /// ``` + ///
+ #[derive(::serde::Deserialize, ::serde::Serialize, Clone, Debug)] + #[serde(deny_unknown_fields)] + pub struct PostGetValidityProofResponseResult { + pub context: Context, + pub value: CompressedProofWithContext, + } + impl PostGetValidityProofResponseResult { + pub fn builder() -> builder::PostGetValidityProofResponseResult { + Default::default() + } + } + ///`PostGetValidityProofV2Body` + /// + ///
JSON schema + /// + /// ```json + ///{ + /// "type": "object", + /// "required": [ + /// "id", + /// "jsonrpc", + /// "method", + /// "params" + /// ], + /// "properties": { + /// "id": { + /// "description": "An ID to identify the request.", + /// "type": "string", + /// "enum": [ + /// "test-account" + /// ] + /// }, + /// "jsonrpc": { + /// "description": "The version of the JSON-RPC protocol.", + /// "type": "string", + /// "enum": [ + /// "2.0" + /// ] + /// }, + /// "method": { + /// "description": "The name of the method to invoke.", + /// "type": "string", + /// "enum": [ + /// "getValidityProofV2" + /// ] + /// }, + /// "params": { + /// "type": "object", + /// "properties": { + /// "hashes": { + /// "type": "array", + /// "items": { + /// "$ref": "#/components/schemas/Hash" + /// } + /// }, + /// "newAddressesWithTrees": { + /// "type": "array", + /// "items": { + /// "$ref": "#/components/schemas/AddressWithTree" + /// } + /// } + /// }, + /// "additionalProperties": false + /// } + /// } + ///} + /// ``` + ///
+ #[derive(::serde::Deserialize, ::serde::Serialize, Clone, Debug)] + pub struct PostGetValidityProofV2Body { + ///An ID to identify the request. + pub id: PostGetValidityProofV2BodyId, + ///The version of the JSON-RPC protocol. + pub jsonrpc: PostGetValidityProofV2BodyJsonrpc, + ///The name of the method to invoke. + pub method: PostGetValidityProofV2BodyMethod, + pub params: PostGetValidityProofV2BodyParams, + } + impl PostGetValidityProofV2Body { + pub fn builder() -> builder::PostGetValidityProofV2Body { + Default::default() + } + } + ///An ID to identify the request. + /// + ///
JSON schema + /// + /// ```json + ///{ + /// "description": "An ID to identify the request.", + /// "type": "string", + /// "enum": [ + /// "test-account" + /// ] + ///} + /// ``` + ///
+ #[derive( + ::serde::Deserialize, + ::serde::Serialize, + Clone, + Copy, + Debug, + Eq, + Hash, + Ord, + PartialEq, + PartialOrd + )] + pub enum PostGetValidityProofV2BodyId { + #[serde(rename = "test-account")] + TestAccount, + } + impl ::std::fmt::Display for PostGetValidityProofV2BodyId { + fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { + match *self { + Self::TestAccount => f.write_str("test-account"), + } + } + } + impl ::std::str::FromStr for PostGetValidityProofV2BodyId { + type Err = self::error::ConversionError; + fn from_str( + value: &str, + ) -> ::std::result::Result { + match value { + "test-account" => Ok(Self::TestAccount), + _ => Err("invalid value".into()), + } + } + } + impl ::std::convert::TryFrom<&str> for PostGetValidityProofV2BodyId { + type Error = self::error::ConversionError; + fn try_from( + value: &str, + ) -> ::std::result::Result { + value.parse() + } + } + impl ::std::convert::TryFrom<&::std::string::String> + for PostGetValidityProofV2BodyId { + type Error = self::error::ConversionError; + fn try_from( + value: &::std::string::String, + ) -> ::std::result::Result { + value.parse() + } + } + impl ::std::convert::TryFrom<::std::string::String> + for PostGetValidityProofV2BodyId { + type Error = self::error::ConversionError; + fn try_from( + value: ::std::string::String, + ) -> ::std::result::Result { + value.parse() + } + } + ///The version of the JSON-RPC protocol. + /// + ///
JSON schema + /// + /// ```json + ///{ + /// "description": "The version of the JSON-RPC protocol.", + /// "type": "string", + /// "enum": [ + /// "2.0" + /// ] + ///} + /// ``` + ///
+ #[derive( + ::serde::Deserialize, + ::serde::Serialize, + Clone, + Copy, + Debug, + Eq, + Hash, + Ord, + PartialEq, + PartialOrd + )] + pub enum PostGetValidityProofV2BodyJsonrpc { + #[serde(rename = "2.0")] + X20, + } + impl ::std::fmt::Display for PostGetValidityProofV2BodyJsonrpc { + fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { + match *self { + Self::X20 => f.write_str("2.0"), + } + } + } + impl ::std::str::FromStr for PostGetValidityProofV2BodyJsonrpc { + type Err = self::error::ConversionError; + fn from_str( + value: &str, + ) -> ::std::result::Result { + match value { + "2.0" => Ok(Self::X20), + _ => Err("invalid value".into()), + } + } + } + impl ::std::convert::TryFrom<&str> for PostGetValidityProofV2BodyJsonrpc { + type Error = self::error::ConversionError; + fn try_from( + value: &str, + ) -> ::std::result::Result { + value.parse() + } + } + impl ::std::convert::TryFrom<&::std::string::String> + for PostGetValidityProofV2BodyJsonrpc { + type Error = self::error::ConversionError; + fn try_from( + value: &::std::string::String, + ) -> ::std::result::Result { + value.parse() + } + } + impl ::std::convert::TryFrom<::std::string::String> + for PostGetValidityProofV2BodyJsonrpc { + type Error = self::error::ConversionError; + fn try_from( + value: ::std::string::String, + ) -> ::std::result::Result { + value.parse() + } + } + ///The name of the method to invoke. + /// + ///
JSON schema + /// + /// ```json + ///{ + /// "description": "The name of the method to invoke.", + /// "type": "string", + /// "enum": [ + /// "getValidityProofV2" + /// ] + ///} + /// ``` + ///
+ #[derive( + ::serde::Deserialize, + ::serde::Serialize, + Clone, + Copy, + Debug, + Eq, + Hash, + Ord, + PartialEq, + PartialOrd + )] + pub enum PostGetValidityProofV2BodyMethod { + #[serde(rename = "getValidityProofV2")] + GetValidityProofV2, + } + impl ::std::fmt::Display for PostGetValidityProofV2BodyMethod { + fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { + match *self { + Self::GetValidityProofV2 => f.write_str("getValidityProofV2"), + } + } + } + impl ::std::str::FromStr for PostGetValidityProofV2BodyMethod { + type Err = self::error::ConversionError; + fn from_str( + value: &str, + ) -> ::std::result::Result { + match value { + "getValidityProofV2" => Ok(Self::GetValidityProofV2), + _ => Err("invalid value".into()), + } + } + } + impl ::std::convert::TryFrom<&str> for PostGetValidityProofV2BodyMethod { + type Error = self::error::ConversionError; + fn try_from( + value: &str, + ) -> ::std::result::Result { + value.parse() + } + } + impl ::std::convert::TryFrom<&::std::string::String> + for PostGetValidityProofV2BodyMethod { + type Error = self::error::ConversionError; + fn try_from( + value: &::std::string::String, + ) -> ::std::result::Result { + value.parse() + } + } + impl ::std::convert::TryFrom<::std::string::String> + for PostGetValidityProofV2BodyMethod { + type Error = self::error::ConversionError; + fn try_from( + value: ::std::string::String, + ) -> ::std::result::Result { + value.parse() + } + } + ///`PostGetValidityProofV2BodyParams` + /// + ///
JSON schema + /// + /// ```json + ///{ + /// "type": "object", + /// "properties": { + /// "hashes": { + /// "type": "array", + /// "items": { + /// "$ref": "#/components/schemas/Hash" + /// } + /// }, + /// "newAddressesWithTrees": { + /// "type": "array", + /// "items": { + /// "$ref": "#/components/schemas/AddressWithTree" + /// } + /// } + /// }, + /// "additionalProperties": false + ///} + /// ``` + ///
+ #[derive(::serde::Deserialize, ::serde::Serialize, Clone, Debug)] + #[serde(deny_unknown_fields)] + pub struct PostGetValidityProofV2BodyParams { + #[serde(default, skip_serializing_if = "::std::vec::Vec::is_empty")] + pub hashes: ::std::vec::Vec, + #[serde( + rename = "newAddressesWithTrees", + default, + skip_serializing_if = "::std::vec::Vec::is_empty" + )] + pub new_addresses_with_trees: ::std::vec::Vec, + } + impl ::std::default::Default for PostGetValidityProofV2BodyParams { + fn default() -> Self { + Self { + hashes: Default::default(), + new_addresses_with_trees: Default::default(), + } + } + } + impl PostGetValidityProofV2BodyParams { + pub fn builder() -> builder::PostGetValidityProofV2BodyParams { + Default::default() + } + } + ///`PostGetValidityProofV2Response` /// ///
JSON schema /// @@ -27418,6 +28164,52 @@ All endpoints return AccountV2.*/ Default::default() } } + ///A lightweight queue leaf index entry + /// + ///
JSON schema + /// + /// ```json + ///{ + /// "description": "A lightweight queue leaf index entry", + /// "type": "object", + /// "required": [ + /// "hash", + /// "leafIndex", + /// "queueIndex" + /// ], + /// "properties": { + /// "hash": { + /// "$ref": "#/components/schemas/Hash" + /// }, + /// "leafIndex": { + /// "type": "integer", + /// "format": "uint64", + /// "minimum": 0.0 + /// }, + /// "queueIndex": { + /// "type": "integer", + /// "format": "uint64", + /// "minimum": 0.0 + /// } + /// }, + /// "additionalProperties": false + ///} + /// ``` + ///
+ #[derive(::serde::Deserialize, ::serde::Serialize, Clone, Debug)] + #[serde(deny_unknown_fields)] + pub struct QueueLeafIndex { + pub hash: Hash, + #[serde(rename = "leafIndex")] + pub leaf_index: u64, + #[serde(rename = "queueIndex")] + pub queue_index: u64, + } + impl QueueLeafIndex { + pub fn builder() -> builder::QueueLeafIndex { + Default::default() + } + } ///Parameters for requesting queue elements /// ///
JSON schema @@ -30890,6 +31682,148 @@ All endpoints return AccountV2.*/ } } #[derive(Clone, Debug)] + pub struct GetQueueLeafIndicesRequest { + limit: ::std::result::Result, + start_index: ::std::result::Result< + ::std::option::Option, + ::std::string::String, + >, + tree: ::std::result::Result, + } + impl ::std::default::Default for GetQueueLeafIndicesRequest { + fn default() -> Self { + Self { + limit: Err("no value supplied for limit".to_string()), + start_index: Ok(Default::default()), + tree: Err("no value supplied for tree".to_string()), + } + } + } + impl GetQueueLeafIndicesRequest { + pub fn limit(mut self, value: T) -> Self + where + T: ::std::convert::TryInto, + T::Error: ::std::fmt::Display, + { + self.limit = value + .try_into() + .map_err(|e| { + format!("error converting supplied value for limit: {e}") + }); + self + } + pub fn start_index(mut self, value: T) -> Self + where + T: ::std::convert::TryInto<::std::option::Option>, + T::Error: ::std::fmt::Display, + { + self.start_index = value + .try_into() + .map_err(|e| { + format!("error converting supplied value for start_index: {e}") + }); + self + } + pub fn tree(mut self, value: T) -> Self + where + T: ::std::convert::TryInto, + T::Error: ::std::fmt::Display, + { + self.tree = value + .try_into() + .map_err(|e| { + format!("error converting supplied value for tree: {e}") + }); + self + } + } + impl ::std::convert::TryFrom + for super::GetQueueLeafIndicesRequest { + type Error = super::error::ConversionError; + fn try_from( + value: GetQueueLeafIndicesRequest, + ) -> ::std::result::Result { + Ok(Self { + limit: value.limit?, + start_index: value.start_index?, + tree: value.tree?, + }) + } + } + impl ::std::convert::From + for GetQueueLeafIndicesRequest { + fn from(value: super::GetQueueLeafIndicesRequest) -> Self { + Self { + limit: Ok(value.limit), + start_index: Ok(value.start_index), + tree: Ok(value.tree), + } + } + } + #[derive(Clone, Debug)] + pub struct GetQueueLeafIndicesResponse { + context: ::std::result::Result, + value: ::std::result::Result< + ::std::vec::Vec, + ::std::string::String, + >, + } + impl ::std::default::Default for GetQueueLeafIndicesResponse { + fn default() -> Self { + Self { + context: Err("no value supplied for context".to_string()), + value: Err("no value supplied for value".to_string()), + } + } + } + impl GetQueueLeafIndicesResponse { + pub fn context(mut self, value: T) -> Self + where + T: ::std::convert::TryInto, + T::Error: ::std::fmt::Display, + { + self.context = value + .try_into() + .map_err(|e| { + format!("error converting supplied value for context: {e}") + }); + self + } + pub fn value(mut self, value: T) -> Self + where + T: ::std::convert::TryInto<::std::vec::Vec>, + T::Error: ::std::fmt::Display, + { + self.value = value + .try_into() + .map_err(|e| { + format!("error converting supplied value for value: {e}") + }); + self + } + } + impl ::std::convert::TryFrom + for super::GetQueueLeafIndicesResponse { + type Error = super::error::ConversionError; + fn try_from( + value: GetQueueLeafIndicesResponse, + ) -> ::std::result::Result { + Ok(Self { + context: value.context?, + value: value.value?, + }) + } + } + impl ::std::convert::From + for GetQueueLeafIndicesResponse { + fn from(value: super::GetQueueLeafIndicesResponse) -> Self { + Self { + context: Ok(value.context), + value: Ok(value.value), + } + } + } + #[derive(Clone, Debug)] pub struct InputQueueData { account_hashes: ::std::result::Result< ::std::vec::Vec, @@ -46275,6 +47209,424 @@ All endpoints return AccountV2.*/ } } #[derive(Clone, Debug)] + pub struct PostGetQueueLeafIndicesBody { + id: ::std::result::Result< + super::PostGetQueueLeafIndicesBodyId, + ::std::string::String, + >, + jsonrpc: ::std::result::Result< + super::PostGetQueueLeafIndicesBodyJsonrpc, + ::std::string::String, + >, + method: ::std::result::Result< + super::PostGetQueueLeafIndicesBodyMethod, + ::std::string::String, + >, + params: ::std::result::Result< + super::PostGetQueueLeafIndicesBodyParams, + ::std::string::String, + >, + } + impl ::std::default::Default for PostGetQueueLeafIndicesBody { + fn default() -> Self { + Self { + id: Err("no value supplied for id".to_string()), + jsonrpc: Err("no value supplied for jsonrpc".to_string()), + method: Err("no value supplied for method".to_string()), + params: Err("no value supplied for params".to_string()), + } + } + } + impl PostGetQueueLeafIndicesBody { + pub fn id(mut self, value: T) -> Self + where + T: ::std::convert::TryInto, + T::Error: ::std::fmt::Display, + { + self.id = value + .try_into() + .map_err(|e| format!("error converting supplied value for id: {e}")); + self + } + pub fn jsonrpc(mut self, value: T) -> Self + where + T: ::std::convert::TryInto, + T::Error: ::std::fmt::Display, + { + self.jsonrpc = value + .try_into() + .map_err(|e| { + format!("error converting supplied value for jsonrpc: {e}") + }); + self + } + pub fn method(mut self, value: T) -> Self + where + T: ::std::convert::TryInto, + T::Error: ::std::fmt::Display, + { + self.method = value + .try_into() + .map_err(|e| { + format!("error converting supplied value for method: {e}") + }); + self + } + pub fn params(mut self, value: T) -> Self + where + T: ::std::convert::TryInto, + T::Error: ::std::fmt::Display, + { + self.params = value + .try_into() + .map_err(|e| { + format!("error converting supplied value for params: {e}") + }); + self + } + } + impl ::std::convert::TryFrom + for super::PostGetQueueLeafIndicesBody { + type Error = super::error::ConversionError; + fn try_from( + value: PostGetQueueLeafIndicesBody, + ) -> ::std::result::Result { + Ok(Self { + id: value.id?, + jsonrpc: value.jsonrpc?, + method: value.method?, + params: value.params?, + }) + } + } + impl ::std::convert::From + for PostGetQueueLeafIndicesBody { + fn from(value: super::PostGetQueueLeafIndicesBody) -> Self { + Self { + id: Ok(value.id), + jsonrpc: Ok(value.jsonrpc), + method: Ok(value.method), + params: Ok(value.params), + } + } + } + #[derive(Clone, Debug)] + pub struct PostGetQueueLeafIndicesBodyParams { + limit: ::std::result::Result, + start_index: ::std::result::Result< + ::std::option::Option, + ::std::string::String, + >, + tree: ::std::result::Result, + } + impl ::std::default::Default for PostGetQueueLeafIndicesBodyParams { + fn default() -> Self { + Self { + limit: Err("no value supplied for limit".to_string()), + start_index: Ok(Default::default()), + tree: Err("no value supplied for tree".to_string()), + } + } + } + impl PostGetQueueLeafIndicesBodyParams { + pub fn limit(mut self, value: T) -> Self + where + T: ::std::convert::TryInto, + T::Error: ::std::fmt::Display, + { + self.limit = value + .try_into() + .map_err(|e| { + format!("error converting supplied value for limit: {e}") + }); + self + } + pub fn start_index(mut self, value: T) -> Self + where + T: ::std::convert::TryInto<::std::option::Option>, + T::Error: ::std::fmt::Display, + { + self.start_index = value + .try_into() + .map_err(|e| { + format!("error converting supplied value for start_index: {e}") + }); + self + } + pub fn tree(mut self, value: T) -> Self + where + T: ::std::convert::TryInto, + T::Error: ::std::fmt::Display, + { + self.tree = value + .try_into() + .map_err(|e| { + format!("error converting supplied value for tree: {e}") + }); + self + } + } + impl ::std::convert::TryFrom + for super::PostGetQueueLeafIndicesBodyParams { + type Error = super::error::ConversionError; + fn try_from( + value: PostGetQueueLeafIndicesBodyParams, + ) -> ::std::result::Result { + Ok(Self { + limit: value.limit?, + start_index: value.start_index?, + tree: value.tree?, + }) + } + } + impl ::std::convert::From + for PostGetQueueLeafIndicesBodyParams { + fn from(value: super::PostGetQueueLeafIndicesBodyParams) -> Self { + Self { + limit: Ok(value.limit), + start_index: Ok(value.start_index), + tree: Ok(value.tree), + } + } + } + #[derive(Clone, Debug)] + pub struct PostGetQueueLeafIndicesResponse { + error: ::std::result::Result< + ::std::option::Option, + ::std::string::String, + >, + id: ::std::result::Result< + super::PostGetQueueLeafIndicesResponseId, + ::std::string::String, + >, + jsonrpc: ::std::result::Result< + super::PostGetQueueLeafIndicesResponseJsonrpc, + ::std::string::String, + >, + result: ::std::result::Result< + ::std::option::Option, + ::std::string::String, + >, + } + impl ::std::default::Default for PostGetQueueLeafIndicesResponse { + fn default() -> Self { + Self { + error: Ok(Default::default()), + id: Err("no value supplied for id".to_string()), + jsonrpc: Err("no value supplied for jsonrpc".to_string()), + result: Ok(Default::default()), + } + } + } + impl PostGetQueueLeafIndicesResponse { + pub fn error(mut self, value: T) -> Self + where + T: ::std::convert::TryInto< + ::std::option::Option, + >, + T::Error: ::std::fmt::Display, + { + self.error = value + .try_into() + .map_err(|e| { + format!("error converting supplied value for error: {e}") + }); + self + } + pub fn id(mut self, value: T) -> Self + where + T: ::std::convert::TryInto, + T::Error: ::std::fmt::Display, + { + self.id = value + .try_into() + .map_err(|e| format!("error converting supplied value for id: {e}")); + self + } + pub fn jsonrpc(mut self, value: T) -> Self + where + T: ::std::convert::TryInto< + super::PostGetQueueLeafIndicesResponseJsonrpc, + >, + T::Error: ::std::fmt::Display, + { + self.jsonrpc = value + .try_into() + .map_err(|e| { + format!("error converting supplied value for jsonrpc: {e}") + }); + self + } + pub fn result(mut self, value: T) -> Self + where + T: ::std::convert::TryInto< + ::std::option::Option, + >, + T::Error: ::std::fmt::Display, + { + self.result = value + .try_into() + .map_err(|e| { + format!("error converting supplied value for result: {e}") + }); + self + } + } + impl ::std::convert::TryFrom + for super::PostGetQueueLeafIndicesResponse { + type Error = super::error::ConversionError; + fn try_from( + value: PostGetQueueLeafIndicesResponse, + ) -> ::std::result::Result { + Ok(Self { + error: value.error?, + id: value.id?, + jsonrpc: value.jsonrpc?, + result: value.result?, + }) + } + } + impl ::std::convert::From + for PostGetQueueLeafIndicesResponse { + fn from(value: super::PostGetQueueLeafIndicesResponse) -> Self { + Self { + error: Ok(value.error), + id: Ok(value.id), + jsonrpc: Ok(value.jsonrpc), + result: Ok(value.result), + } + } + } + #[derive(Clone, Debug)] + pub struct PostGetQueueLeafIndicesResponseError { + code: ::std::result::Result< + ::std::option::Option, + ::std::string::String, + >, + message: ::std::result::Result< + ::std::option::Option<::std::string::String>, + ::std::string::String, + >, + } + impl ::std::default::Default for PostGetQueueLeafIndicesResponseError { + fn default() -> Self { + Self { + code: Ok(Default::default()), + message: Ok(Default::default()), + } + } + } + impl PostGetQueueLeafIndicesResponseError { + pub fn code(mut self, value: T) -> Self + where + T: ::std::convert::TryInto<::std::option::Option>, + T::Error: ::std::fmt::Display, + { + self.code = value + .try_into() + .map_err(|e| { + format!("error converting supplied value for code: {e}") + }); + self + } + pub fn message(mut self, value: T) -> Self + where + T: ::std::convert::TryInto<::std::option::Option<::std::string::String>>, + T::Error: ::std::fmt::Display, + { + self.message = value + .try_into() + .map_err(|e| { + format!("error converting supplied value for message: {e}") + }); + self + } + } + impl ::std::convert::TryFrom + for super::PostGetQueueLeafIndicesResponseError { + type Error = super::error::ConversionError; + fn try_from( + value: PostGetQueueLeafIndicesResponseError, + ) -> ::std::result::Result { + Ok(Self { + code: value.code?, + message: value.message?, + }) + } + } + impl ::std::convert::From + for PostGetQueueLeafIndicesResponseError { + fn from(value: super::PostGetQueueLeafIndicesResponseError) -> Self { + Self { + code: Ok(value.code), + message: Ok(value.message), + } + } + } + #[derive(Clone, Debug)] + pub struct PostGetQueueLeafIndicesResponseResult { + context: ::std::result::Result, + value: ::std::result::Result< + ::std::vec::Vec, + ::std::string::String, + >, + } + impl ::std::default::Default for PostGetQueueLeafIndicesResponseResult { + fn default() -> Self { + Self { + context: Err("no value supplied for context".to_string()), + value: Err("no value supplied for value".to_string()), + } + } + } + impl PostGetQueueLeafIndicesResponseResult { + pub fn context(mut self, value: T) -> Self + where + T: ::std::convert::TryInto, + T::Error: ::std::fmt::Display, + { + self.context = value + .try_into() + .map_err(|e| { + format!("error converting supplied value for context: {e}") + }); + self + } + pub fn value(mut self, value: T) -> Self + where + T: ::std::convert::TryInto<::std::vec::Vec>, + T::Error: ::std::fmt::Display, + { + self.value = value + .try_into() + .map_err(|e| { + format!("error converting supplied value for value: {e}") + }); + self + } + } + impl ::std::convert::TryFrom + for super::PostGetQueueLeafIndicesResponseResult { + type Error = super::error::ConversionError; + fn try_from( + value: PostGetQueueLeafIndicesResponseResult, + ) -> ::std::result::Result { + Ok(Self { + context: value.context?, + value: value.value?, + }) + } + } + impl ::std::convert::From + for PostGetQueueLeafIndicesResponseResult { + fn from(value: super::PostGetQueueLeafIndicesResponseResult) -> Self { + Self { + context: Ok(value.context), + value: Ok(value.value), + } + } + } + #[derive(Clone, Debug)] pub struct PostGetTransactionWithCompressionInfoBody { id: ::std::result::Result< super::PostGetTransactionWithCompressionInfoBodyId, @@ -48206,6 +49558,80 @@ All endpoints return AccountV2.*/ } } #[derive(Clone, Debug)] + pub struct QueueLeafIndex { + hash: ::std::result::Result, + leaf_index: ::std::result::Result, + queue_index: ::std::result::Result, + } + impl ::std::default::Default for QueueLeafIndex { + fn default() -> Self { + Self { + hash: Err("no value supplied for hash".to_string()), + leaf_index: Err("no value supplied for leaf_index".to_string()), + queue_index: Err("no value supplied for queue_index".to_string()), + } + } + } + impl QueueLeafIndex { + pub fn hash(mut self, value: T) -> Self + where + T: ::std::convert::TryInto, + T::Error: ::std::fmt::Display, + { + self.hash = value + .try_into() + .map_err(|e| { + format!("error converting supplied value for hash: {e}") + }); + self + } + pub fn leaf_index(mut self, value: T) -> Self + where + T: ::std::convert::TryInto, + T::Error: ::std::fmt::Display, + { + self.leaf_index = value + .try_into() + .map_err(|e| { + format!("error converting supplied value for leaf_index: {e}") + }); + self + } + pub fn queue_index(mut self, value: T) -> Self + where + T: ::std::convert::TryInto, + T::Error: ::std::fmt::Display, + { + self.queue_index = value + .try_into() + .map_err(|e| { + format!("error converting supplied value for queue_index: {e}") + }); + self + } + } + impl ::std::convert::TryFrom for super::QueueLeafIndex { + type Error = super::error::ConversionError; + fn try_from( + value: QueueLeafIndex, + ) -> ::std::result::Result { + Ok(Self { + hash: value.hash?, + leaf_index: value.leaf_index?, + queue_index: value.queue_index?, + }) + } + } + impl ::std::convert::From for QueueLeafIndex { + fn from(value: super::QueueLeafIndex) -> Self { + Self { + hash: Ok(value.hash), + leaf_index: Ok(value.leaf_index), + queue_index: Ok(value.queue_index), + } + } + } + #[derive(Clone, Debug)] pub struct QueueRequest { limit: ::std::result::Result, start_index: ::std::result::Result< @@ -50080,6 +51506,17 @@ let response = client.post_get_queue_info() pub fn post_get_queue_info(&self) -> builder::PostGetQueueInfo<'_> { builder::PostGetQueueInfo::new(self) } + /**Sends a `POST` request to `/getQueueLeafIndices` + +```ignore +let response = client.post_get_queue_leaf_indices() + .body(body) + .send() + .await; +```*/ + pub fn post_get_queue_leaf_indices(&self) -> builder::PostGetQueueLeafIndices<'_> { + builder::PostGetQueueLeafIndices::new(self) + } /**Sends a `POST` request to `/getTransactionWithCompressionInfo` ```ignore @@ -50447,15 +51884,221 @@ pub mod builder { } } } - /**Builder for [`Client::post_get_compressed_account_proof`] + /**Builder for [`Client::post_get_compressed_account_proof`] + +[`Client::post_get_compressed_account_proof`]: super::Client::post_get_compressed_account_proof*/ + #[derive(Debug, Clone)] + pub struct PostGetCompressedAccountProof<'a> { + client: &'a super::Client, + body: Result, + } + impl<'a> PostGetCompressedAccountProof<'a> { + pub fn new(client: &'a super::Client) -> Self { + Self { + client: client, + body: Ok(::std::default::Default::default()), + } + } + pub fn body(mut self, value: V) -> Self + where + V: std::convert::TryInto, + >::Error: std::fmt::Display, + { + self.body = value + .try_into() + .map(From::from) + .map_err(|s| { + format!( + "conversion to `PostGetCompressedAccountProofBody` for body failed: {}", + s + ) + }); + self + } + pub fn body_map(mut self, f: F) -> Self + where + F: std::ops::FnOnce( + types::builder::PostGetCompressedAccountProofBody, + ) -> types::builder::PostGetCompressedAccountProofBody, + { + self.body = self.body.map(f); + self + } + ///Sends a `POST` request to `/getCompressedAccountProof` + pub async fn send( + self, + ) -> Result< + ResponseValue, + Error, + > { + let Self { client, body } = self; + let body = body + .and_then(|v| { + types::PostGetCompressedAccountProofBody::try_from(v) + .map_err(|e| e.to_string()) + }) + .map_err(Error::InvalidRequest)?; + let url = format!("{}/getCompressedAccountProof", client.baseurl,); + let mut header_map = ::reqwest::header::HeaderMap::with_capacity(1usize); + header_map + .append( + ::reqwest::header::HeaderName::from_static("api-version"), + ::reqwest::header::HeaderValue::from_static( + super::Client::api_version(), + ), + ); + #[allow(unused_mut)] + let mut request = client + .client + .post(url) + .header( + ::reqwest::header::ACCEPT, + ::reqwest::header::HeaderValue::from_static("application/json"), + ) + .json(&body) + .headers(header_map) + .build()?; + let info = OperationInfo { + operation_id: "post_get_compressed_account_proof", + }; + client.pre(&mut request, &info).await?; + let result = client.exec(request, &info).await; + client.post(&result, &info).await?; + let response = result?; + match response.status().as_u16() { + 200u16 => ResponseValue::from_response(response).await, + 429u16 => { + Err( + Error::ErrorResponse( + ResponseValue::from_response(response).await?, + ), + ) + } + 500u16 => { + Err( + Error::ErrorResponse( + ResponseValue::from_response(response).await?, + ), + ) + } + _ => Err(Error::UnexpectedResponse(response)), + } + } + } + /**Builder for [`Client::post_get_compressed_account_proof_v2`] + +[`Client::post_get_compressed_account_proof_v2`]: super::Client::post_get_compressed_account_proof_v2*/ + #[derive(Debug, Clone)] + pub struct PostGetCompressedAccountProofV2<'a> { + client: &'a super::Client, + body: Result, + } + impl<'a> PostGetCompressedAccountProofV2<'a> { + pub fn new(client: &'a super::Client) -> Self { + Self { + client: client, + body: Ok(::std::default::Default::default()), + } + } + pub fn body(mut self, value: V) -> Self + where + V: std::convert::TryInto, + >::Error: std::fmt::Display, + { + self.body = value + .try_into() + .map(From::from) + .map_err(|s| { + format!( + "conversion to `PostGetCompressedAccountProofV2Body` for body failed: {}", + s + ) + }); + self + } + pub fn body_map(mut self, f: F) -> Self + where + F: std::ops::FnOnce( + types::builder::PostGetCompressedAccountProofV2Body, + ) -> types::builder::PostGetCompressedAccountProofV2Body, + { + self.body = self.body.map(f); + self + } + ///Sends a `POST` request to `/getCompressedAccountProofV2` + pub async fn send( + self, + ) -> Result< + ResponseValue, + Error, + > { + let Self { client, body } = self; + let body = body + .and_then(|v| { + types::PostGetCompressedAccountProofV2Body::try_from(v) + .map_err(|e| e.to_string()) + }) + .map_err(Error::InvalidRequest)?; + let url = format!("{}/getCompressedAccountProofV2", client.baseurl,); + let mut header_map = ::reqwest::header::HeaderMap::with_capacity(1usize); + header_map + .append( + ::reqwest::header::HeaderName::from_static("api-version"), + ::reqwest::header::HeaderValue::from_static( + super::Client::api_version(), + ), + ); + #[allow(unused_mut)] + let mut request = client + .client + .post(url) + .header( + ::reqwest::header::ACCEPT, + ::reqwest::header::HeaderValue::from_static("application/json"), + ) + .json(&body) + .headers(header_map) + .build()?; + let info = OperationInfo { + operation_id: "post_get_compressed_account_proof_v2", + }; + client.pre(&mut request, &info).await?; + let result = client.exec(request, &info).await; + client.post(&result, &info).await?; + let response = result?; + match response.status().as_u16() { + 200u16 => ResponseValue::from_response(response).await, + 429u16 => { + Err( + Error::ErrorResponse( + ResponseValue::from_response(response).await?, + ), + ) + } + 500u16 => { + Err( + Error::ErrorResponse( + ResponseValue::from_response(response).await?, + ), + ) + } + _ => Err(Error::UnexpectedResponse(response)), + } + } + } + /**Builder for [`Client::post_get_compressed_account_v2`] -[`Client::post_get_compressed_account_proof`]: super::Client::post_get_compressed_account_proof*/ +[`Client::post_get_compressed_account_v2`]: super::Client::post_get_compressed_account_v2*/ #[derive(Debug, Clone)] - pub struct PostGetCompressedAccountProof<'a> { + pub struct PostGetCompressedAccountV2<'a> { client: &'a super::Client, - body: Result, + body: Result, } - impl<'a> PostGetCompressedAccountProof<'a> { + impl<'a> PostGetCompressedAccountV2<'a> { pub fn new(client: &'a super::Client) -> Self { Self { client: client, @@ -50464,9 +52107,9 @@ pub mod builder { } pub fn body(mut self, value: V) -> Self where - V: std::convert::TryInto, + V: std::convert::TryInto, >::Error: std::fmt::Display, { self.body = value @@ -50474,7 +52117,7 @@ pub mod builder { .map(From::from) .map_err(|s| { format!( - "conversion to `PostGetCompressedAccountProofBody` for body failed: {}", + "conversion to `PostGetCompressedAccountV2Body` for body failed: {}", s ) }); @@ -50483,27 +52126,27 @@ pub mod builder { pub fn body_map(mut self, f: F) -> Self where F: std::ops::FnOnce( - types::builder::PostGetCompressedAccountProofBody, - ) -> types::builder::PostGetCompressedAccountProofBody, + types::builder::PostGetCompressedAccountV2Body, + ) -> types::builder::PostGetCompressedAccountV2Body, { self.body = self.body.map(f); self } - ///Sends a `POST` request to `/getCompressedAccountProof` + ///Sends a `POST` request to `/getCompressedAccountV2` pub async fn send( self, ) -> Result< - ResponseValue, - Error, + ResponseValue, + Error, > { let Self { client, body } = self; let body = body .and_then(|v| { - types::PostGetCompressedAccountProofBody::try_from(v) + types::PostGetCompressedAccountV2Body::try_from(v) .map_err(|e| e.to_string()) }) .map_err(Error::InvalidRequest)?; - let url = format!("{}/getCompressedAccountProof", client.baseurl,); + let url = format!("{}/getCompressedAccountV2", client.baseurl,); let mut header_map = ::reqwest::header::HeaderMap::with_capacity(1usize); header_map .append( @@ -50524,7 +52167,7 @@ pub mod builder { .headers(header_map) .build()?; let info = OperationInfo { - operation_id: "post_get_compressed_account_proof", + operation_id: "post_get_compressed_account_v2", }; client.pre(&mut request, &info).await?; let result = client.exec(request, &info).await; @@ -50550,15 +52193,15 @@ pub mod builder { } } } - /**Builder for [`Client::post_get_compressed_account_proof_v2`] + /**Builder for [`Client::post_get_compressed_accounts_by_owner`] -[`Client::post_get_compressed_account_proof_v2`]: super::Client::post_get_compressed_account_proof_v2*/ +[`Client::post_get_compressed_accounts_by_owner`]: super::Client::post_get_compressed_accounts_by_owner*/ #[derive(Debug, Clone)] - pub struct PostGetCompressedAccountProofV2<'a> { + pub struct PostGetCompressedAccountsByOwner<'a> { client: &'a super::Client, - body: Result, + body: Result, } - impl<'a> PostGetCompressedAccountProofV2<'a> { + impl<'a> PostGetCompressedAccountsByOwner<'a> { pub fn new(client: &'a super::Client) -> Self { Self { client: client, @@ -50567,9 +52210,9 @@ pub mod builder { } pub fn body(mut self, value: V) -> Self where - V: std::convert::TryInto, + V: std::convert::TryInto, >::Error: std::fmt::Display, { self.body = value @@ -50577,7 +52220,7 @@ pub mod builder { .map(From::from) .map_err(|s| { format!( - "conversion to `PostGetCompressedAccountProofV2Body` for body failed: {}", + "conversion to `PostGetCompressedAccountsByOwnerBody` for body failed: {}", s ) }); @@ -50586,27 +52229,27 @@ pub mod builder { pub fn body_map(mut self, f: F) -> Self where F: std::ops::FnOnce( - types::builder::PostGetCompressedAccountProofV2Body, - ) -> types::builder::PostGetCompressedAccountProofV2Body, + types::builder::PostGetCompressedAccountsByOwnerBody, + ) -> types::builder::PostGetCompressedAccountsByOwnerBody, { self.body = self.body.map(f); self } - ///Sends a `POST` request to `/getCompressedAccountProofV2` + ///Sends a `POST` request to `/getCompressedAccountsByOwner` pub async fn send( self, ) -> Result< - ResponseValue, - Error, + ResponseValue, + Error, > { let Self { client, body } = self; let body = body .and_then(|v| { - types::PostGetCompressedAccountProofV2Body::try_from(v) + types::PostGetCompressedAccountsByOwnerBody::try_from(v) .map_err(|e| e.to_string()) }) .map_err(Error::InvalidRequest)?; - let url = format!("{}/getCompressedAccountProofV2", client.baseurl,); + let url = format!("{}/getCompressedAccountsByOwner", client.baseurl,); let mut header_map = ::reqwest::header::HeaderMap::with_capacity(1usize); header_map .append( @@ -50627,7 +52270,7 @@ pub mod builder { .headers(header_map) .build()?; let info = OperationInfo { - operation_id: "post_get_compressed_account_proof_v2", + operation_id: "post_get_compressed_accounts_by_owner", }; client.pre(&mut request, &info).await?; let result = client.exec(request, &info).await; @@ -50653,15 +52296,15 @@ pub mod builder { } } } - /**Builder for [`Client::post_get_compressed_account_v2`] + /**Builder for [`Client::post_get_compressed_accounts_by_owner_v2`] -[`Client::post_get_compressed_account_v2`]: super::Client::post_get_compressed_account_v2*/ +[`Client::post_get_compressed_accounts_by_owner_v2`]: super::Client::post_get_compressed_accounts_by_owner_v2*/ #[derive(Debug, Clone)] - pub struct PostGetCompressedAccountV2<'a> { + pub struct PostGetCompressedAccountsByOwnerV2<'a> { client: &'a super::Client, - body: Result, + body: Result, } - impl<'a> PostGetCompressedAccountV2<'a> { + impl<'a> PostGetCompressedAccountsByOwnerV2<'a> { pub fn new(client: &'a super::Client) -> Self { Self { client: client, @@ -50670,9 +52313,9 @@ pub mod builder { } pub fn body(mut self, value: V) -> Self where - V: std::convert::TryInto, + V: std::convert::TryInto, >::Error: std::fmt::Display, { self.body = value @@ -50680,7 +52323,7 @@ pub mod builder { .map(From::from) .map_err(|s| { format!( - "conversion to `PostGetCompressedAccountV2Body` for body failed: {}", + "conversion to `PostGetCompressedAccountsByOwnerV2Body` for body failed: {}", s ) }); @@ -50689,27 +52332,27 @@ pub mod builder { pub fn body_map(mut self, f: F) -> Self where F: std::ops::FnOnce( - types::builder::PostGetCompressedAccountV2Body, - ) -> types::builder::PostGetCompressedAccountV2Body, + types::builder::PostGetCompressedAccountsByOwnerV2Body, + ) -> types::builder::PostGetCompressedAccountsByOwnerV2Body, { self.body = self.body.map(f); self } - ///Sends a `POST` request to `/getCompressedAccountV2` + ///Sends a `POST` request to `/getCompressedAccountsByOwnerV2` pub async fn send( self, ) -> Result< - ResponseValue, - Error, + ResponseValue, + Error, > { let Self { client, body } = self; let body = body .and_then(|v| { - types::PostGetCompressedAccountV2Body::try_from(v) + types::PostGetCompressedAccountsByOwnerV2Body::try_from(v) .map_err(|e| e.to_string()) }) .map_err(Error::InvalidRequest)?; - let url = format!("{}/getCompressedAccountV2", client.baseurl,); + let url = format!("{}/getCompressedAccountsByOwnerV2", client.baseurl,); let mut header_map = ::reqwest::header::HeaderMap::with_capacity(1usize); header_map .append( @@ -50730,7 +52373,7 @@ pub mod builder { .headers(header_map) .build()?; let info = OperationInfo { - operation_id: "post_get_compressed_account_v2", + operation_id: "post_get_compressed_accounts_by_owner_v2", }; client.pre(&mut request, &info).await?; let result = client.exec(request, &info).await; @@ -50756,15 +52399,15 @@ pub mod builder { } } } - /**Builder for [`Client::post_get_compressed_accounts_by_owner`] + /**Builder for [`Client::post_get_compressed_balance_by_owner`] -[`Client::post_get_compressed_accounts_by_owner`]: super::Client::post_get_compressed_accounts_by_owner*/ +[`Client::post_get_compressed_balance_by_owner`]: super::Client::post_get_compressed_balance_by_owner*/ #[derive(Debug, Clone)] - pub struct PostGetCompressedAccountsByOwner<'a> { + pub struct PostGetCompressedBalanceByOwner<'a> { client: &'a super::Client, - body: Result, + body: Result, } - impl<'a> PostGetCompressedAccountsByOwner<'a> { + impl<'a> PostGetCompressedBalanceByOwner<'a> { pub fn new(client: &'a super::Client) -> Self { Self { client: client, @@ -50773,9 +52416,9 @@ pub mod builder { } pub fn body(mut self, value: V) -> Self where - V: std::convert::TryInto, + V: std::convert::TryInto, >::Error: std::fmt::Display, { self.body = value @@ -50783,7 +52426,7 @@ pub mod builder { .map(From::from) .map_err(|s| { format!( - "conversion to `PostGetCompressedAccountsByOwnerBody` for body failed: {}", + "conversion to `PostGetCompressedBalanceByOwnerBody` for body failed: {}", s ) }); @@ -50792,27 +52435,27 @@ pub mod builder { pub fn body_map(mut self, f: F) -> Self where F: std::ops::FnOnce( - types::builder::PostGetCompressedAccountsByOwnerBody, - ) -> types::builder::PostGetCompressedAccountsByOwnerBody, + types::builder::PostGetCompressedBalanceByOwnerBody, + ) -> types::builder::PostGetCompressedBalanceByOwnerBody, { self.body = self.body.map(f); self } - ///Sends a `POST` request to `/getCompressedAccountsByOwner` + ///Sends a `POST` request to `/getCompressedBalanceByOwner` pub async fn send( self, ) -> Result< - ResponseValue, - Error, + ResponseValue, + Error, > { let Self { client, body } = self; let body = body .and_then(|v| { - types::PostGetCompressedAccountsByOwnerBody::try_from(v) + types::PostGetCompressedBalanceByOwnerBody::try_from(v) .map_err(|e| e.to_string()) }) .map_err(Error::InvalidRequest)?; - let url = format!("{}/getCompressedAccountsByOwner", client.baseurl,); + let url = format!("{}/getCompressedBalanceByOwner", client.baseurl,); let mut header_map = ::reqwest::header::HeaderMap::with_capacity(1usize); header_map .append( @@ -50833,7 +52476,7 @@ pub mod builder { .headers(header_map) .build()?; let info = OperationInfo { - operation_id: "post_get_compressed_accounts_by_owner", + operation_id: "post_get_compressed_balance_by_owner", }; client.pre(&mut request, &info).await?; let result = client.exec(request, &info).await; @@ -50859,15 +52502,15 @@ pub mod builder { } } } - /**Builder for [`Client::post_get_compressed_accounts_by_owner_v2`] + /**Builder for [`Client::post_get_compressed_mint_token_holders`] -[`Client::post_get_compressed_accounts_by_owner_v2`]: super::Client::post_get_compressed_accounts_by_owner_v2*/ +[`Client::post_get_compressed_mint_token_holders`]: super::Client::post_get_compressed_mint_token_holders*/ #[derive(Debug, Clone)] - pub struct PostGetCompressedAccountsByOwnerV2<'a> { + pub struct PostGetCompressedMintTokenHolders<'a> { client: &'a super::Client, - body: Result, + body: Result, } - impl<'a> PostGetCompressedAccountsByOwnerV2<'a> { + impl<'a> PostGetCompressedMintTokenHolders<'a> { pub fn new(client: &'a super::Client) -> Self { Self { client: client, @@ -50876,9 +52519,9 @@ pub mod builder { } pub fn body(mut self, value: V) -> Self where - V: std::convert::TryInto, + V: std::convert::TryInto, >::Error: std::fmt::Display, { self.body = value @@ -50886,7 +52529,7 @@ pub mod builder { .map(From::from) .map_err(|s| { format!( - "conversion to `PostGetCompressedAccountsByOwnerV2Body` for body failed: {}", + "conversion to `PostGetCompressedMintTokenHoldersBody` for body failed: {}", s ) }); @@ -50895,27 +52538,27 @@ pub mod builder { pub fn body_map(mut self, f: F) -> Self where F: std::ops::FnOnce( - types::builder::PostGetCompressedAccountsByOwnerV2Body, - ) -> types::builder::PostGetCompressedAccountsByOwnerV2Body, + types::builder::PostGetCompressedMintTokenHoldersBody, + ) -> types::builder::PostGetCompressedMintTokenHoldersBody, { self.body = self.body.map(f); self } - ///Sends a `POST` request to `/getCompressedAccountsByOwnerV2` + ///Sends a `POST` request to `/getCompressedMintTokenHolders` pub async fn send( self, ) -> Result< - ResponseValue, - Error, + ResponseValue, + Error, > { let Self { client, body } = self; let body = body .and_then(|v| { - types::PostGetCompressedAccountsByOwnerV2Body::try_from(v) + types::PostGetCompressedMintTokenHoldersBody::try_from(v) .map_err(|e| e.to_string()) }) .map_err(Error::InvalidRequest)?; - let url = format!("{}/getCompressedAccountsByOwnerV2", client.baseurl,); + let url = format!("{}/getCompressedMintTokenHolders", client.baseurl,); let mut header_map = ::reqwest::header::HeaderMap::with_capacity(1usize); header_map .append( @@ -50936,7 +52579,7 @@ pub mod builder { .headers(header_map) .build()?; let info = OperationInfo { - operation_id: "post_get_compressed_accounts_by_owner_v2", + operation_id: "post_get_compressed_mint_token_holders", }; client.pre(&mut request, &info).await?; let result = client.exec(request, &info).await; @@ -50962,15 +52605,15 @@ pub mod builder { } } } - /**Builder for [`Client::post_get_compressed_balance_by_owner`] + /**Builder for [`Client::post_get_compressed_token_account_balance`] -[`Client::post_get_compressed_balance_by_owner`]: super::Client::post_get_compressed_balance_by_owner*/ +[`Client::post_get_compressed_token_account_balance`]: super::Client::post_get_compressed_token_account_balance*/ #[derive(Debug, Clone)] - pub struct PostGetCompressedBalanceByOwner<'a> { + pub struct PostGetCompressedTokenAccountBalance<'a> { client: &'a super::Client, - body: Result, + body: Result, } - impl<'a> PostGetCompressedBalanceByOwner<'a> { + impl<'a> PostGetCompressedTokenAccountBalance<'a> { pub fn new(client: &'a super::Client) -> Self { Self { client: client, @@ -50979,9 +52622,9 @@ pub mod builder { } pub fn body(mut self, value: V) -> Self where - V: std::convert::TryInto, + V: std::convert::TryInto, >::Error: std::fmt::Display, { self.body = value @@ -50989,7 +52632,7 @@ pub mod builder { .map(From::from) .map_err(|s| { format!( - "conversion to `PostGetCompressedBalanceByOwnerBody` for body failed: {}", + "conversion to `PostGetCompressedTokenAccountBalanceBody` for body failed: {}", s ) }); @@ -50998,27 +52641,27 @@ pub mod builder { pub fn body_map(mut self, f: F) -> Self where F: std::ops::FnOnce( - types::builder::PostGetCompressedBalanceByOwnerBody, - ) -> types::builder::PostGetCompressedBalanceByOwnerBody, + types::builder::PostGetCompressedTokenAccountBalanceBody, + ) -> types::builder::PostGetCompressedTokenAccountBalanceBody, { self.body = self.body.map(f); self } - ///Sends a `POST` request to `/getCompressedBalanceByOwner` + ///Sends a `POST` request to `/getCompressedTokenAccountBalance` pub async fn send( self, ) -> Result< - ResponseValue, - Error, + ResponseValue, + Error, > { let Self { client, body } = self; let body = body .and_then(|v| { - types::PostGetCompressedBalanceByOwnerBody::try_from(v) + types::PostGetCompressedTokenAccountBalanceBody::try_from(v) .map_err(|e| e.to_string()) }) .map_err(Error::InvalidRequest)?; - let url = format!("{}/getCompressedBalanceByOwner", client.baseurl,); + let url = format!("{}/getCompressedTokenAccountBalance", client.baseurl,); let mut header_map = ::reqwest::header::HeaderMap::with_capacity(1usize); header_map .append( @@ -51039,7 +52682,7 @@ pub mod builder { .headers(header_map) .build()?; let info = OperationInfo { - operation_id: "post_get_compressed_balance_by_owner", + operation_id: "post_get_compressed_token_account_balance", }; client.pre(&mut request, &info).await?; let result = client.exec(request, &info).await; @@ -51065,15 +52708,18 @@ pub mod builder { } } } - /**Builder for [`Client::post_get_compressed_mint_token_holders`] + /**Builder for [`Client::post_get_compressed_token_accounts_by_delegate`] -[`Client::post_get_compressed_mint_token_holders`]: super::Client::post_get_compressed_mint_token_holders*/ +[`Client::post_get_compressed_token_accounts_by_delegate`]: super::Client::post_get_compressed_token_accounts_by_delegate*/ #[derive(Debug, Clone)] - pub struct PostGetCompressedMintTokenHolders<'a> { + pub struct PostGetCompressedTokenAccountsByDelegate<'a> { client: &'a super::Client, - body: Result, + body: Result< + types::builder::PostGetCompressedTokenAccountsByDelegateBody, + String, + >, } - impl<'a> PostGetCompressedMintTokenHolders<'a> { + impl<'a> PostGetCompressedTokenAccountsByDelegate<'a> { pub fn new(client: &'a super::Client) -> Self { Self { client: client, @@ -51082,9 +52728,11 @@ pub mod builder { } pub fn body(mut self, value: V) -> Self where - V: std::convert::TryInto, + V: std::convert::TryInto< + types::PostGetCompressedTokenAccountsByDelegateBody, + >, >::Error: std::fmt::Display, { self.body = value @@ -51092,7 +52740,7 @@ pub mod builder { .map(From::from) .map_err(|s| { format!( - "conversion to `PostGetCompressedMintTokenHoldersBody` for body failed: {}", + "conversion to `PostGetCompressedTokenAccountsByDelegateBody` for body failed: {}", s ) }); @@ -51101,27 +52749,29 @@ pub mod builder { pub fn body_map(mut self, f: F) -> Self where F: std::ops::FnOnce( - types::builder::PostGetCompressedMintTokenHoldersBody, - ) -> types::builder::PostGetCompressedMintTokenHoldersBody, + types::builder::PostGetCompressedTokenAccountsByDelegateBody, + ) -> types::builder::PostGetCompressedTokenAccountsByDelegateBody, { self.body = self.body.map(f); self } - ///Sends a `POST` request to `/getCompressedMintTokenHolders` + ///Sends a `POST` request to `/getCompressedTokenAccountsByDelegate` pub async fn send( self, ) -> Result< - ResponseValue, - Error, + ResponseValue, + Error, > { let Self { client, body } = self; let body = body .and_then(|v| { - types::PostGetCompressedMintTokenHoldersBody::try_from(v) + types::PostGetCompressedTokenAccountsByDelegateBody::try_from(v) .map_err(|e| e.to_string()) }) .map_err(Error::InvalidRequest)?; - let url = format!("{}/getCompressedMintTokenHolders", client.baseurl,); + let url = format!( + "{}/getCompressedTokenAccountsByDelegate", client.baseurl, + ); let mut header_map = ::reqwest::header::HeaderMap::with_capacity(1usize); header_map .append( @@ -51142,7 +52792,7 @@ pub mod builder { .headers(header_map) .build()?; let info = OperationInfo { - operation_id: "post_get_compressed_mint_token_holders", + operation_id: "post_get_compressed_token_accounts_by_delegate", }; client.pre(&mut request, &info).await?; let result = client.exec(request, &info).await; @@ -51168,15 +52818,18 @@ pub mod builder { } } } - /**Builder for [`Client::post_get_compressed_token_account_balance`] + /**Builder for [`Client::post_get_compressed_token_accounts_by_delegate_v2`] -[`Client::post_get_compressed_token_account_balance`]: super::Client::post_get_compressed_token_account_balance*/ +[`Client::post_get_compressed_token_accounts_by_delegate_v2`]: super::Client::post_get_compressed_token_accounts_by_delegate_v2*/ #[derive(Debug, Clone)] - pub struct PostGetCompressedTokenAccountBalance<'a> { + pub struct PostGetCompressedTokenAccountsByDelegateV2<'a> { client: &'a super::Client, - body: Result, + body: Result< + types::builder::PostGetCompressedTokenAccountsByDelegateV2Body, + String, + >, } - impl<'a> PostGetCompressedTokenAccountBalance<'a> { + impl<'a> PostGetCompressedTokenAccountsByDelegateV2<'a> { pub fn new(client: &'a super::Client) -> Self { Self { client: client, @@ -51185,9 +52838,11 @@ pub mod builder { } pub fn body(mut self, value: V) -> Self where - V: std::convert::TryInto, + V: std::convert::TryInto< + types::PostGetCompressedTokenAccountsByDelegateV2Body, + >, >::Error: std::fmt::Display, { self.body = value @@ -51195,7 +52850,7 @@ pub mod builder { .map(From::from) .map_err(|s| { format!( - "conversion to `PostGetCompressedTokenAccountBalanceBody` for body failed: {}", + "conversion to `PostGetCompressedTokenAccountsByDelegateV2Body` for body failed: {}", s ) }); @@ -51204,27 +52859,29 @@ pub mod builder { pub fn body_map(mut self, f: F) -> Self where F: std::ops::FnOnce( - types::builder::PostGetCompressedTokenAccountBalanceBody, - ) -> types::builder::PostGetCompressedTokenAccountBalanceBody, + types::builder::PostGetCompressedTokenAccountsByDelegateV2Body, + ) -> types::builder::PostGetCompressedTokenAccountsByDelegateV2Body, { self.body = self.body.map(f); self } - ///Sends a `POST` request to `/getCompressedTokenAccountBalance` + ///Sends a `POST` request to `/getCompressedTokenAccountsByDelegateV2` pub async fn send( self, ) -> Result< - ResponseValue, - Error, + ResponseValue, + Error, > { let Self { client, body } = self; let body = body .and_then(|v| { - types::PostGetCompressedTokenAccountBalanceBody::try_from(v) + types::PostGetCompressedTokenAccountsByDelegateV2Body::try_from(v) .map_err(|e| e.to_string()) }) .map_err(Error::InvalidRequest)?; - let url = format!("{}/getCompressedTokenAccountBalance", client.baseurl,); + let url = format!( + "{}/getCompressedTokenAccountsByDelegateV2", client.baseurl, + ); let mut header_map = ::reqwest::header::HeaderMap::with_capacity(1usize); header_map .append( @@ -51245,7 +52902,7 @@ pub mod builder { .headers(header_map) .build()?; let info = OperationInfo { - operation_id: "post_get_compressed_token_account_balance", + operation_id: "post_get_compressed_token_accounts_by_delegate_v2", }; client.pre(&mut request, &info).await?; let result = client.exec(request, &info).await; @@ -51271,18 +52928,15 @@ pub mod builder { } } } - /**Builder for [`Client::post_get_compressed_token_accounts_by_delegate`] + /**Builder for [`Client::post_get_compressed_token_accounts_by_owner`] -[`Client::post_get_compressed_token_accounts_by_delegate`]: super::Client::post_get_compressed_token_accounts_by_delegate*/ +[`Client::post_get_compressed_token_accounts_by_owner`]: super::Client::post_get_compressed_token_accounts_by_owner*/ #[derive(Debug, Clone)] - pub struct PostGetCompressedTokenAccountsByDelegate<'a> { + pub struct PostGetCompressedTokenAccountsByOwner<'a> { client: &'a super::Client, - body: Result< - types::builder::PostGetCompressedTokenAccountsByDelegateBody, - String, - >, + body: Result, } - impl<'a> PostGetCompressedTokenAccountsByDelegate<'a> { + impl<'a> PostGetCompressedTokenAccountsByOwner<'a> { pub fn new(client: &'a super::Client) -> Self { Self { client: client, @@ -51291,11 +52945,9 @@ pub mod builder { } pub fn body(mut self, value: V) -> Self where - V: std::convert::TryInto< - types::PostGetCompressedTokenAccountsByDelegateBody, - >, + V: std::convert::TryInto, >::Error: std::fmt::Display, { self.body = value @@ -51303,7 +52955,7 @@ pub mod builder { .map(From::from) .map_err(|s| { format!( - "conversion to `PostGetCompressedTokenAccountsByDelegateBody` for body failed: {}", + "conversion to `PostGetCompressedTokenAccountsByOwnerBody` for body failed: {}", s ) }); @@ -51312,29 +52964,27 @@ pub mod builder { pub fn body_map(mut self, f: F) -> Self where F: std::ops::FnOnce( - types::builder::PostGetCompressedTokenAccountsByDelegateBody, - ) -> types::builder::PostGetCompressedTokenAccountsByDelegateBody, + types::builder::PostGetCompressedTokenAccountsByOwnerBody, + ) -> types::builder::PostGetCompressedTokenAccountsByOwnerBody, { self.body = self.body.map(f); self } - ///Sends a `POST` request to `/getCompressedTokenAccountsByDelegate` + ///Sends a `POST` request to `/getCompressedTokenAccountsByOwner` pub async fn send( self, ) -> Result< - ResponseValue, - Error, + ResponseValue, + Error, > { let Self { client, body } = self; let body = body .and_then(|v| { - types::PostGetCompressedTokenAccountsByDelegateBody::try_from(v) + types::PostGetCompressedTokenAccountsByOwnerBody::try_from(v) .map_err(|e| e.to_string()) }) .map_err(Error::InvalidRequest)?; - let url = format!( - "{}/getCompressedTokenAccountsByDelegate", client.baseurl, - ); + let url = format!("{}/getCompressedTokenAccountsByOwner", client.baseurl,); let mut header_map = ::reqwest::header::HeaderMap::with_capacity(1usize); header_map .append( @@ -51355,7 +53005,7 @@ pub mod builder { .headers(header_map) .build()?; let info = OperationInfo { - operation_id: "post_get_compressed_token_accounts_by_delegate", + operation_id: "post_get_compressed_token_accounts_by_owner", }; client.pre(&mut request, &info).await?; let result = client.exec(request, &info).await; @@ -51381,18 +53031,18 @@ pub mod builder { } } } - /**Builder for [`Client::post_get_compressed_token_accounts_by_delegate_v2`] + /**Builder for [`Client::post_get_compressed_token_accounts_by_owner_v2`] -[`Client::post_get_compressed_token_accounts_by_delegate_v2`]: super::Client::post_get_compressed_token_accounts_by_delegate_v2*/ +[`Client::post_get_compressed_token_accounts_by_owner_v2`]: super::Client::post_get_compressed_token_accounts_by_owner_v2*/ #[derive(Debug, Clone)] - pub struct PostGetCompressedTokenAccountsByDelegateV2<'a> { + pub struct PostGetCompressedTokenAccountsByOwnerV2<'a> { client: &'a super::Client, body: Result< - types::builder::PostGetCompressedTokenAccountsByDelegateV2Body, + types::builder::PostGetCompressedTokenAccountsByOwnerV2Body, String, >, } - impl<'a> PostGetCompressedTokenAccountsByDelegateV2<'a> { + impl<'a> PostGetCompressedTokenAccountsByOwnerV2<'a> { pub fn new(client: &'a super::Client) -> Self { Self { client: client, @@ -51401,11 +53051,9 @@ pub mod builder { } pub fn body(mut self, value: V) -> Self where - V: std::convert::TryInto< - types::PostGetCompressedTokenAccountsByDelegateV2Body, - >, + V: std::convert::TryInto, >::Error: std::fmt::Display, { self.body = value @@ -51413,7 +53061,7 @@ pub mod builder { .map(From::from) .map_err(|s| { format!( - "conversion to `PostGetCompressedTokenAccountsByDelegateV2Body` for body failed: {}", + "conversion to `PostGetCompressedTokenAccountsByOwnerV2Body` for body failed: {}", s ) }); @@ -51422,29 +53070,27 @@ pub mod builder { pub fn body_map(mut self, f: F) -> Self where F: std::ops::FnOnce( - types::builder::PostGetCompressedTokenAccountsByDelegateV2Body, - ) -> types::builder::PostGetCompressedTokenAccountsByDelegateV2Body, + types::builder::PostGetCompressedTokenAccountsByOwnerV2Body, + ) -> types::builder::PostGetCompressedTokenAccountsByOwnerV2Body, { self.body = self.body.map(f); self } - ///Sends a `POST` request to `/getCompressedTokenAccountsByDelegateV2` + ///Sends a `POST` request to `/getCompressedTokenAccountsByOwnerV2` pub async fn send( self, ) -> Result< - ResponseValue, - Error, + ResponseValue, + Error, > { let Self { client, body } = self; let body = body .and_then(|v| { - types::PostGetCompressedTokenAccountsByDelegateV2Body::try_from(v) + types::PostGetCompressedTokenAccountsByOwnerV2Body::try_from(v) .map_err(|e| e.to_string()) }) .map_err(Error::InvalidRequest)?; - let url = format!( - "{}/getCompressedTokenAccountsByDelegateV2", client.baseurl, - ); + let url = format!("{}/getCompressedTokenAccountsByOwnerV2", client.baseurl,); let mut header_map = ::reqwest::header::HeaderMap::with_capacity(1usize); header_map .append( @@ -51465,7 +53111,7 @@ pub mod builder { .headers(header_map) .build()?; let info = OperationInfo { - operation_id: "post_get_compressed_token_accounts_by_delegate_v2", + operation_id: "post_get_compressed_token_accounts_by_owner_v2", }; client.pre(&mut request, &info).await?; let result = client.exec(request, &info).await; @@ -51491,15 +53137,15 @@ pub mod builder { } } } - /**Builder for [`Client::post_get_compressed_token_accounts_by_owner`] + /**Builder for [`Client::post_get_compressed_token_balances_by_owner`] -[`Client::post_get_compressed_token_accounts_by_owner`]: super::Client::post_get_compressed_token_accounts_by_owner*/ +[`Client::post_get_compressed_token_balances_by_owner`]: super::Client::post_get_compressed_token_balances_by_owner*/ #[derive(Debug, Clone)] - pub struct PostGetCompressedTokenAccountsByOwner<'a> { + pub struct PostGetCompressedTokenBalancesByOwner<'a> { client: &'a super::Client, - body: Result, + body: Result, } - impl<'a> PostGetCompressedTokenAccountsByOwner<'a> { + impl<'a> PostGetCompressedTokenBalancesByOwner<'a> { pub fn new(client: &'a super::Client) -> Self { Self { client: client, @@ -51508,9 +53154,9 @@ pub mod builder { } pub fn body(mut self, value: V) -> Self where - V: std::convert::TryInto, + V: std::convert::TryInto, >::Error: std::fmt::Display, { self.body = value @@ -51518,7 +53164,7 @@ pub mod builder { .map(From::from) .map_err(|s| { format!( - "conversion to `PostGetCompressedTokenAccountsByOwnerBody` for body failed: {}", + "conversion to `PostGetCompressedTokenBalancesByOwnerBody` for body failed: {}", s ) }); @@ -51527,27 +53173,27 @@ pub mod builder { pub fn body_map(mut self, f: F) -> Self where F: std::ops::FnOnce( - types::builder::PostGetCompressedTokenAccountsByOwnerBody, - ) -> types::builder::PostGetCompressedTokenAccountsByOwnerBody, + types::builder::PostGetCompressedTokenBalancesByOwnerBody, + ) -> types::builder::PostGetCompressedTokenBalancesByOwnerBody, { self.body = self.body.map(f); self } - ///Sends a `POST` request to `/getCompressedTokenAccountsByOwner` + ///Sends a `POST` request to `/getCompressedTokenBalancesByOwner` pub async fn send( self, ) -> Result< - ResponseValue, - Error, + ResponseValue, + Error, > { let Self { client, body } = self; let body = body .and_then(|v| { - types::PostGetCompressedTokenAccountsByOwnerBody::try_from(v) + types::PostGetCompressedTokenBalancesByOwnerBody::try_from(v) .map_err(|e| e.to_string()) }) .map_err(Error::InvalidRequest)?; - let url = format!("{}/getCompressedTokenAccountsByOwner", client.baseurl,); + let url = format!("{}/getCompressedTokenBalancesByOwner", client.baseurl,); let mut header_map = ::reqwest::header::HeaderMap::with_capacity(1usize); header_map .append( @@ -51568,7 +53214,7 @@ pub mod builder { .headers(header_map) .build()?; let info = OperationInfo { - operation_id: "post_get_compressed_token_accounts_by_owner", + operation_id: "post_get_compressed_token_balances_by_owner", }; client.pre(&mut request, &info).await?; let result = client.exec(request, &info).await; @@ -51594,18 +53240,18 @@ pub mod builder { } } } - /**Builder for [`Client::post_get_compressed_token_accounts_by_owner_v2`] + /**Builder for [`Client::post_get_compressed_token_balances_by_owner_v2`] -[`Client::post_get_compressed_token_accounts_by_owner_v2`]: super::Client::post_get_compressed_token_accounts_by_owner_v2*/ +[`Client::post_get_compressed_token_balances_by_owner_v2`]: super::Client::post_get_compressed_token_balances_by_owner_v2*/ #[derive(Debug, Clone)] - pub struct PostGetCompressedTokenAccountsByOwnerV2<'a> { + pub struct PostGetCompressedTokenBalancesByOwnerV2<'a> { client: &'a super::Client, body: Result< - types::builder::PostGetCompressedTokenAccountsByOwnerV2Body, + types::builder::PostGetCompressedTokenBalancesByOwnerV2Body, String, >, } - impl<'a> PostGetCompressedTokenAccountsByOwnerV2<'a> { + impl<'a> PostGetCompressedTokenBalancesByOwnerV2<'a> { pub fn new(client: &'a super::Client) -> Self { Self { client: client, @@ -51614,9 +53260,9 @@ pub mod builder { } pub fn body(mut self, value: V) -> Self where - V: std::convert::TryInto, + V: std::convert::TryInto, >::Error: std::fmt::Display, { self.body = value @@ -51624,7 +53270,7 @@ pub mod builder { .map(From::from) .map_err(|s| { format!( - "conversion to `PostGetCompressedTokenAccountsByOwnerV2Body` for body failed: {}", + "conversion to `PostGetCompressedTokenBalancesByOwnerV2Body` for body failed: {}", s ) }); @@ -51633,27 +53279,27 @@ pub mod builder { pub fn body_map(mut self, f: F) -> Self where F: std::ops::FnOnce( - types::builder::PostGetCompressedTokenAccountsByOwnerV2Body, - ) -> types::builder::PostGetCompressedTokenAccountsByOwnerV2Body, + types::builder::PostGetCompressedTokenBalancesByOwnerV2Body, + ) -> types::builder::PostGetCompressedTokenBalancesByOwnerV2Body, { self.body = self.body.map(f); self } - ///Sends a `POST` request to `/getCompressedTokenAccountsByOwnerV2` + ///Sends a `POST` request to `/getCompressedTokenBalancesByOwnerV2` pub async fn send( self, ) -> Result< - ResponseValue, - Error, + ResponseValue, + Error, > { let Self { client, body } = self; let body = body .and_then(|v| { - types::PostGetCompressedTokenAccountsByOwnerV2Body::try_from(v) + types::PostGetCompressedTokenBalancesByOwnerV2Body::try_from(v) .map_err(|e| e.to_string()) }) .map_err(Error::InvalidRequest)?; - let url = format!("{}/getCompressedTokenAccountsByOwnerV2", client.baseurl,); + let url = format!("{}/getCompressedTokenBalancesByOwnerV2", client.baseurl,); let mut header_map = ::reqwest::header::HeaderMap::with_capacity(1usize); header_map .append( @@ -51674,7 +53320,7 @@ pub mod builder { .headers(header_map) .build()?; let info = OperationInfo { - operation_id: "post_get_compressed_token_accounts_by_owner_v2", + operation_id: "post_get_compressed_token_balances_by_owner_v2", }; client.pre(&mut request, &info).await?; let result = client.exec(request, &info).await; @@ -51700,15 +53346,15 @@ pub mod builder { } } } - /**Builder for [`Client::post_get_compressed_token_balances_by_owner`] + /**Builder for [`Client::post_get_compression_signatures_for_account`] -[`Client::post_get_compressed_token_balances_by_owner`]: super::Client::post_get_compressed_token_balances_by_owner*/ +[`Client::post_get_compression_signatures_for_account`]: super::Client::post_get_compression_signatures_for_account*/ #[derive(Debug, Clone)] - pub struct PostGetCompressedTokenBalancesByOwner<'a> { + pub struct PostGetCompressionSignaturesForAccount<'a> { client: &'a super::Client, - body: Result, + body: Result, } - impl<'a> PostGetCompressedTokenBalancesByOwner<'a> { + impl<'a> PostGetCompressionSignaturesForAccount<'a> { pub fn new(client: &'a super::Client) -> Self { Self { client: client, @@ -51717,9 +53363,9 @@ pub mod builder { } pub fn body(mut self, value: V) -> Self where - V: std::convert::TryInto, + V: std::convert::TryInto, >::Error: std::fmt::Display, { self.body = value @@ -51727,7 +53373,7 @@ pub mod builder { .map(From::from) .map_err(|s| { format!( - "conversion to `PostGetCompressedTokenBalancesByOwnerBody` for body failed: {}", + "conversion to `PostGetCompressionSignaturesForAccountBody` for body failed: {}", s ) }); @@ -51736,27 +53382,27 @@ pub mod builder { pub fn body_map(mut self, f: F) -> Self where F: std::ops::FnOnce( - types::builder::PostGetCompressedTokenBalancesByOwnerBody, - ) -> types::builder::PostGetCompressedTokenBalancesByOwnerBody, + types::builder::PostGetCompressionSignaturesForAccountBody, + ) -> types::builder::PostGetCompressionSignaturesForAccountBody, { self.body = self.body.map(f); self } - ///Sends a `POST` request to `/getCompressedTokenBalancesByOwner` + ///Sends a `POST` request to `/getCompressionSignaturesForAccount` pub async fn send( self, ) -> Result< - ResponseValue, - Error, + ResponseValue, + Error, > { let Self { client, body } = self; let body = body .and_then(|v| { - types::PostGetCompressedTokenBalancesByOwnerBody::try_from(v) + types::PostGetCompressionSignaturesForAccountBody::try_from(v) .map_err(|e| e.to_string()) }) .map_err(Error::InvalidRequest)?; - let url = format!("{}/getCompressedTokenBalancesByOwner", client.baseurl,); + let url = format!("{}/getCompressionSignaturesForAccount", client.baseurl,); let mut header_map = ::reqwest::header::HeaderMap::with_capacity(1usize); header_map .append( @@ -51777,7 +53423,7 @@ pub mod builder { .headers(header_map) .build()?; let info = OperationInfo { - operation_id: "post_get_compressed_token_balances_by_owner", + operation_id: "post_get_compression_signatures_for_account", }; client.pre(&mut request, &info).await?; let result = client.exec(request, &info).await; @@ -51803,18 +53449,15 @@ pub mod builder { } } } - /**Builder for [`Client::post_get_compressed_token_balances_by_owner_v2`] + /**Builder for [`Client::post_get_compression_signatures_for_address`] -[`Client::post_get_compressed_token_balances_by_owner_v2`]: super::Client::post_get_compressed_token_balances_by_owner_v2*/ +[`Client::post_get_compression_signatures_for_address`]: super::Client::post_get_compression_signatures_for_address*/ #[derive(Debug, Clone)] - pub struct PostGetCompressedTokenBalancesByOwnerV2<'a> { + pub struct PostGetCompressionSignaturesForAddress<'a> { client: &'a super::Client, - body: Result< - types::builder::PostGetCompressedTokenBalancesByOwnerV2Body, - String, - >, + body: Result, } - impl<'a> PostGetCompressedTokenBalancesByOwnerV2<'a> { + impl<'a> PostGetCompressionSignaturesForAddress<'a> { pub fn new(client: &'a super::Client) -> Self { Self { client: client, @@ -51823,9 +53466,9 @@ pub mod builder { } pub fn body(mut self, value: V) -> Self where - V: std::convert::TryInto, + V: std::convert::TryInto, >::Error: std::fmt::Display, { self.body = value @@ -51833,7 +53476,7 @@ pub mod builder { .map(From::from) .map_err(|s| { format!( - "conversion to `PostGetCompressedTokenBalancesByOwnerV2Body` for body failed: {}", + "conversion to `PostGetCompressionSignaturesForAddressBody` for body failed: {}", s ) }); @@ -51842,27 +53485,27 @@ pub mod builder { pub fn body_map(mut self, f: F) -> Self where F: std::ops::FnOnce( - types::builder::PostGetCompressedTokenBalancesByOwnerV2Body, - ) -> types::builder::PostGetCompressedTokenBalancesByOwnerV2Body, + types::builder::PostGetCompressionSignaturesForAddressBody, + ) -> types::builder::PostGetCompressionSignaturesForAddressBody, { self.body = self.body.map(f); self } - ///Sends a `POST` request to `/getCompressedTokenBalancesByOwnerV2` + ///Sends a `POST` request to `/getCompressionSignaturesForAddress` pub async fn send( self, ) -> Result< - ResponseValue, - Error, + ResponseValue, + Error, > { let Self { client, body } = self; let body = body .and_then(|v| { - types::PostGetCompressedTokenBalancesByOwnerV2Body::try_from(v) + types::PostGetCompressionSignaturesForAddressBody::try_from(v) .map_err(|e| e.to_string()) }) .map_err(Error::InvalidRequest)?; - let url = format!("{}/getCompressedTokenBalancesByOwnerV2", client.baseurl,); + let url = format!("{}/getCompressionSignaturesForAddress", client.baseurl,); let mut header_map = ::reqwest::header::HeaderMap::with_capacity(1usize); header_map .append( @@ -51883,7 +53526,7 @@ pub mod builder { .headers(header_map) .build()?; let info = OperationInfo { - operation_id: "post_get_compressed_token_balances_by_owner_v2", + operation_id: "post_get_compression_signatures_for_address", }; client.pre(&mut request, &info).await?; let result = client.exec(request, &info).await; @@ -51909,15 +53552,15 @@ pub mod builder { } } } - /**Builder for [`Client::post_get_compression_signatures_for_account`] + /**Builder for [`Client::post_get_compression_signatures_for_owner`] -[`Client::post_get_compression_signatures_for_account`]: super::Client::post_get_compression_signatures_for_account*/ +[`Client::post_get_compression_signatures_for_owner`]: super::Client::post_get_compression_signatures_for_owner*/ #[derive(Debug, Clone)] - pub struct PostGetCompressionSignaturesForAccount<'a> { + pub struct PostGetCompressionSignaturesForOwner<'a> { client: &'a super::Client, - body: Result, + body: Result, } - impl<'a> PostGetCompressionSignaturesForAccount<'a> { + impl<'a> PostGetCompressionSignaturesForOwner<'a> { pub fn new(client: &'a super::Client) -> Self { Self { client: client, @@ -51926,9 +53569,9 @@ pub mod builder { } pub fn body(mut self, value: V) -> Self where - V: std::convert::TryInto, + V: std::convert::TryInto, >::Error: std::fmt::Display, { self.body = value @@ -51936,7 +53579,7 @@ pub mod builder { .map(From::from) .map_err(|s| { format!( - "conversion to `PostGetCompressionSignaturesForAccountBody` for body failed: {}", + "conversion to `PostGetCompressionSignaturesForOwnerBody` for body failed: {}", s ) }); @@ -51945,27 +53588,27 @@ pub mod builder { pub fn body_map(mut self, f: F) -> Self where F: std::ops::FnOnce( - types::builder::PostGetCompressionSignaturesForAccountBody, - ) -> types::builder::PostGetCompressionSignaturesForAccountBody, + types::builder::PostGetCompressionSignaturesForOwnerBody, + ) -> types::builder::PostGetCompressionSignaturesForOwnerBody, { self.body = self.body.map(f); self } - ///Sends a `POST` request to `/getCompressionSignaturesForAccount` + ///Sends a `POST` request to `/getCompressionSignaturesForOwner` pub async fn send( self, ) -> Result< - ResponseValue, - Error, + ResponseValue, + Error, > { let Self { client, body } = self; let body = body .and_then(|v| { - types::PostGetCompressionSignaturesForAccountBody::try_from(v) + types::PostGetCompressionSignaturesForOwnerBody::try_from(v) .map_err(|e| e.to_string()) }) .map_err(Error::InvalidRequest)?; - let url = format!("{}/getCompressionSignaturesForAccount", client.baseurl,); + let url = format!("{}/getCompressionSignaturesForOwner", client.baseurl,); let mut header_map = ::reqwest::header::HeaderMap::with_capacity(1usize); header_map .append( @@ -51986,7 +53629,7 @@ pub mod builder { .headers(header_map) .build()?; let info = OperationInfo { - operation_id: "post_get_compression_signatures_for_account", + operation_id: "post_get_compression_signatures_for_owner", }; client.pre(&mut request, &info).await?; let result = client.exec(request, &info).await; @@ -52012,15 +53655,18 @@ pub mod builder { } } } - /**Builder for [`Client::post_get_compression_signatures_for_address`] + /**Builder for [`Client::post_get_compression_signatures_for_token_owner`] -[`Client::post_get_compression_signatures_for_address`]: super::Client::post_get_compression_signatures_for_address*/ +[`Client::post_get_compression_signatures_for_token_owner`]: super::Client::post_get_compression_signatures_for_token_owner*/ #[derive(Debug, Clone)] - pub struct PostGetCompressionSignaturesForAddress<'a> { + pub struct PostGetCompressionSignaturesForTokenOwner<'a> { client: &'a super::Client, - body: Result, + body: Result< + types::builder::PostGetCompressionSignaturesForTokenOwnerBody, + String, + >, } - impl<'a> PostGetCompressionSignaturesForAddress<'a> { + impl<'a> PostGetCompressionSignaturesForTokenOwner<'a> { pub fn new(client: &'a super::Client) -> Self { Self { client: client, @@ -52029,9 +53675,11 @@ pub mod builder { } pub fn body(mut self, value: V) -> Self where - V: std::convert::TryInto, + V: std::convert::TryInto< + types::PostGetCompressionSignaturesForTokenOwnerBody, + >, >::Error: std::fmt::Display, { self.body = value @@ -52039,7 +53687,7 @@ pub mod builder { .map(From::from) .map_err(|s| { format!( - "conversion to `PostGetCompressionSignaturesForAddressBody` for body failed: {}", + "conversion to `PostGetCompressionSignaturesForTokenOwnerBody` for body failed: {}", s ) }); @@ -52048,27 +53696,29 @@ pub mod builder { pub fn body_map(mut self, f: F) -> Self where F: std::ops::FnOnce( - types::builder::PostGetCompressionSignaturesForAddressBody, - ) -> types::builder::PostGetCompressionSignaturesForAddressBody, + types::builder::PostGetCompressionSignaturesForTokenOwnerBody, + ) -> types::builder::PostGetCompressionSignaturesForTokenOwnerBody, { self.body = self.body.map(f); self } - ///Sends a `POST` request to `/getCompressionSignaturesForAddress` + ///Sends a `POST` request to `/getCompressionSignaturesForTokenOwner` pub async fn send( self, ) -> Result< - ResponseValue, - Error, + ResponseValue, + Error, > { let Self { client, body } = self; let body = body .and_then(|v| { - types::PostGetCompressionSignaturesForAddressBody::try_from(v) + types::PostGetCompressionSignaturesForTokenOwnerBody::try_from(v) .map_err(|e| e.to_string()) }) .map_err(Error::InvalidRequest)?; - let url = format!("{}/getCompressionSignaturesForAddress", client.baseurl,); + let url = format!( + "{}/getCompressionSignaturesForTokenOwner", client.baseurl, + ); let mut header_map = ::reqwest::header::HeaderMap::with_capacity(1usize); header_map .append( @@ -52089,7 +53739,7 @@ pub mod builder { .headers(header_map) .build()?; let info = OperationInfo { - operation_id: "post_get_compression_signatures_for_address", + operation_id: "post_get_compression_signatures_for_token_owner", }; client.pre(&mut request, &info).await?; let result = client.exec(request, &info).await; @@ -52115,15 +53765,15 @@ pub mod builder { } } } - /**Builder for [`Client::post_get_compression_signatures_for_owner`] + /**Builder for [`Client::post_get_indexer_health`] -[`Client::post_get_compression_signatures_for_owner`]: super::Client::post_get_compression_signatures_for_owner*/ +[`Client::post_get_indexer_health`]: super::Client::post_get_indexer_health*/ #[derive(Debug, Clone)] - pub struct PostGetCompressionSignaturesForOwner<'a> { + pub struct PostGetIndexerHealth<'a> { client: &'a super::Client, - body: Result, + body: Result, } - impl<'a> PostGetCompressionSignaturesForOwner<'a> { + impl<'a> PostGetIndexerHealth<'a> { pub fn new(client: &'a super::Client) -> Self { Self { client: client, @@ -52132,9 +53782,9 @@ pub mod builder { } pub fn body(mut self, value: V) -> Self where - V: std::convert::TryInto, + V: std::convert::TryInto, >::Error: std::fmt::Display, { self.body = value @@ -52142,8 +53792,7 @@ pub mod builder { .map(From::from) .map_err(|s| { format!( - "conversion to `PostGetCompressionSignaturesForOwnerBody` for body failed: {}", - s + "conversion to `PostGetIndexerHealthBody` for body failed: {}", s ) }); self @@ -52151,27 +53800,27 @@ pub mod builder { pub fn body_map(mut self, f: F) -> Self where F: std::ops::FnOnce( - types::builder::PostGetCompressionSignaturesForOwnerBody, - ) -> types::builder::PostGetCompressionSignaturesForOwnerBody, + types::builder::PostGetIndexerHealthBody, + ) -> types::builder::PostGetIndexerHealthBody, { self.body = self.body.map(f); self } - ///Sends a `POST` request to `/getCompressionSignaturesForOwner` + ///Sends a `POST` request to `/getIndexerHealth` pub async fn send( self, ) -> Result< - ResponseValue, - Error, + ResponseValue, + Error, > { let Self { client, body } = self; let body = body .and_then(|v| { - types::PostGetCompressionSignaturesForOwnerBody::try_from(v) + types::PostGetIndexerHealthBody::try_from(v) .map_err(|e| e.to_string()) }) .map_err(Error::InvalidRequest)?; - let url = format!("{}/getCompressionSignaturesForOwner", client.baseurl,); + let url = format!("{}/getIndexerHealth", client.baseurl,); let mut header_map = ::reqwest::header::HeaderMap::with_capacity(1usize); header_map .append( @@ -52192,7 +53841,7 @@ pub mod builder { .headers(header_map) .build()?; let info = OperationInfo { - operation_id: "post_get_compression_signatures_for_owner", + operation_id: "post_get_indexer_health", }; client.pre(&mut request, &info).await?; let result = client.exec(request, &info).await; @@ -52218,18 +53867,15 @@ pub mod builder { } } } - /**Builder for [`Client::post_get_compression_signatures_for_token_owner`] + /**Builder for [`Client::post_get_indexer_slot`] -[`Client::post_get_compression_signatures_for_token_owner`]: super::Client::post_get_compression_signatures_for_token_owner*/ +[`Client::post_get_indexer_slot`]: super::Client::post_get_indexer_slot*/ #[derive(Debug, Clone)] - pub struct PostGetCompressionSignaturesForTokenOwner<'a> { + pub struct PostGetIndexerSlot<'a> { client: &'a super::Client, - body: Result< - types::builder::PostGetCompressionSignaturesForTokenOwnerBody, - String, - >, + body: Result, } - impl<'a> PostGetCompressionSignaturesForTokenOwner<'a> { + impl<'a> PostGetIndexerSlot<'a> { pub fn new(client: &'a super::Client) -> Self { Self { client: client, @@ -52238,11 +53884,9 @@ pub mod builder { } pub fn body(mut self, value: V) -> Self where - V: std::convert::TryInto< - types::PostGetCompressionSignaturesForTokenOwnerBody, - >, + V: std::convert::TryInto, >::Error: std::fmt::Display, { self.body = value @@ -52250,8 +53894,7 @@ pub mod builder { .map(From::from) .map_err(|s| { format!( - "conversion to `PostGetCompressionSignaturesForTokenOwnerBody` for body failed: {}", - s + "conversion to `PostGetIndexerSlotBody` for body failed: {}", s ) }); self @@ -52259,29 +53902,26 @@ pub mod builder { pub fn body_map(mut self, f: F) -> Self where F: std::ops::FnOnce( - types::builder::PostGetCompressionSignaturesForTokenOwnerBody, - ) -> types::builder::PostGetCompressionSignaturesForTokenOwnerBody, + types::builder::PostGetIndexerSlotBody, + ) -> types::builder::PostGetIndexerSlotBody, { self.body = self.body.map(f); self } - ///Sends a `POST` request to `/getCompressionSignaturesForTokenOwner` + ///Sends a `POST` request to `/getIndexerSlot` pub async fn send( self, ) -> Result< - ResponseValue, - Error, + ResponseValue, + Error, > { let Self { client, body } = self; let body = body .and_then(|v| { - types::PostGetCompressionSignaturesForTokenOwnerBody::try_from(v) - .map_err(|e| e.to_string()) + types::PostGetIndexerSlotBody::try_from(v).map_err(|e| e.to_string()) }) .map_err(Error::InvalidRequest)?; - let url = format!( - "{}/getCompressionSignaturesForTokenOwner", client.baseurl, - ); + let url = format!("{}/getIndexerSlot", client.baseurl,); let mut header_map = ::reqwest::header::HeaderMap::with_capacity(1usize); header_map .append( @@ -52302,7 +53942,7 @@ pub mod builder { .headers(header_map) .build()?; let info = OperationInfo { - operation_id: "post_get_compression_signatures_for_token_owner", + operation_id: "post_get_indexer_slot", }; client.pre(&mut request, &info).await?; let result = client.exec(request, &info).await; @@ -52328,15 +53968,15 @@ pub mod builder { } } } - /**Builder for [`Client::post_get_indexer_health`] + /**Builder for [`Client::post_get_latest_compression_signatures`] -[`Client::post_get_indexer_health`]: super::Client::post_get_indexer_health*/ +[`Client::post_get_latest_compression_signatures`]: super::Client::post_get_latest_compression_signatures*/ #[derive(Debug, Clone)] - pub struct PostGetIndexerHealth<'a> { + pub struct PostGetLatestCompressionSignatures<'a> { client: &'a super::Client, - body: Result, + body: Result, } - impl<'a> PostGetIndexerHealth<'a> { + impl<'a> PostGetLatestCompressionSignatures<'a> { pub fn new(client: &'a super::Client) -> Self { Self { client: client, @@ -52345,9 +53985,9 @@ pub mod builder { } pub fn body(mut self, value: V) -> Self where - V: std::convert::TryInto, + V: std::convert::TryInto, >::Error: std::fmt::Display, { self.body = value @@ -52355,7 +53995,8 @@ pub mod builder { .map(From::from) .map_err(|s| { format!( - "conversion to `PostGetIndexerHealthBody` for body failed: {}", s + "conversion to `PostGetLatestCompressionSignaturesBody` for body failed: {}", + s ) }); self @@ -52363,27 +54004,27 @@ pub mod builder { pub fn body_map(mut self, f: F) -> Self where F: std::ops::FnOnce( - types::builder::PostGetIndexerHealthBody, - ) -> types::builder::PostGetIndexerHealthBody, + types::builder::PostGetLatestCompressionSignaturesBody, + ) -> types::builder::PostGetLatestCompressionSignaturesBody, { self.body = self.body.map(f); self } - ///Sends a `POST` request to `/getIndexerHealth` + ///Sends a `POST` request to `/getLatestCompressionSignatures` pub async fn send( self, ) -> Result< - ResponseValue, - Error, + ResponseValue, + Error, > { let Self { client, body } = self; let body = body .and_then(|v| { - types::PostGetIndexerHealthBody::try_from(v) + types::PostGetLatestCompressionSignaturesBody::try_from(v) .map_err(|e| e.to_string()) }) .map_err(Error::InvalidRequest)?; - let url = format!("{}/getIndexerHealth", client.baseurl,); + let url = format!("{}/getLatestCompressionSignatures", client.baseurl,); let mut header_map = ::reqwest::header::HeaderMap::with_capacity(1usize); header_map .append( @@ -52404,7 +54045,7 @@ pub mod builder { .headers(header_map) .build()?; let info = OperationInfo { - operation_id: "post_get_indexer_health", + operation_id: "post_get_latest_compression_signatures", }; client.pre(&mut request, &info).await?; let result = client.exec(request, &info).await; @@ -52430,15 +54071,15 @@ pub mod builder { } } } - /**Builder for [`Client::post_get_indexer_slot`] + /**Builder for [`Client::post_get_latest_non_voting_signatures`] -[`Client::post_get_indexer_slot`]: super::Client::post_get_indexer_slot*/ +[`Client::post_get_latest_non_voting_signatures`]: super::Client::post_get_latest_non_voting_signatures*/ #[derive(Debug, Clone)] - pub struct PostGetIndexerSlot<'a> { + pub struct PostGetLatestNonVotingSignatures<'a> { client: &'a super::Client, - body: Result, + body: Result, } - impl<'a> PostGetIndexerSlot<'a> { + impl<'a> PostGetLatestNonVotingSignatures<'a> { pub fn new(client: &'a super::Client) -> Self { Self { client: client, @@ -52447,9 +54088,9 @@ pub mod builder { } pub fn body(mut self, value: V) -> Self where - V: std::convert::TryInto, + V: std::convert::TryInto, >::Error: std::fmt::Display, { self.body = value @@ -52457,7 +54098,8 @@ pub mod builder { .map(From::from) .map_err(|s| { format!( - "conversion to `PostGetIndexerSlotBody` for body failed: {}", s + "conversion to `PostGetLatestNonVotingSignaturesBody` for body failed: {}", + s ) }); self @@ -52465,26 +54107,27 @@ pub mod builder { pub fn body_map(mut self, f: F) -> Self where F: std::ops::FnOnce( - types::builder::PostGetIndexerSlotBody, - ) -> types::builder::PostGetIndexerSlotBody, + types::builder::PostGetLatestNonVotingSignaturesBody, + ) -> types::builder::PostGetLatestNonVotingSignaturesBody, { self.body = self.body.map(f); self } - ///Sends a `POST` request to `/getIndexerSlot` + ///Sends a `POST` request to `/getLatestNonVotingSignatures` pub async fn send( self, ) -> Result< - ResponseValue, - Error, + ResponseValue, + Error, > { let Self { client, body } = self; let body = body .and_then(|v| { - types::PostGetIndexerSlotBody::try_from(v).map_err(|e| e.to_string()) + types::PostGetLatestNonVotingSignaturesBody::try_from(v) + .map_err(|e| e.to_string()) }) .map_err(Error::InvalidRequest)?; - let url = format!("{}/getIndexerSlot", client.baseurl,); + let url = format!("{}/getLatestNonVotingSignatures", client.baseurl,); let mut header_map = ::reqwest::header::HeaderMap::with_capacity(1usize); header_map .append( @@ -52505,7 +54148,7 @@ pub mod builder { .headers(header_map) .build()?; let info = OperationInfo { - operation_id: "post_get_indexer_slot", + operation_id: "post_get_latest_non_voting_signatures", }; client.pre(&mut request, &info).await?; let result = client.exec(request, &info).await; @@ -52531,15 +54174,15 @@ pub mod builder { } } } - /**Builder for [`Client::post_get_latest_compression_signatures`] + /**Builder for [`Client::post_get_multiple_account_interfaces`] -[`Client::post_get_latest_compression_signatures`]: super::Client::post_get_latest_compression_signatures*/ +[`Client::post_get_multiple_account_interfaces`]: super::Client::post_get_multiple_account_interfaces*/ #[derive(Debug, Clone)] - pub struct PostGetLatestCompressionSignatures<'a> { + pub struct PostGetMultipleAccountInterfaces<'a> { client: &'a super::Client, - body: Result, + body: Result, } - impl<'a> PostGetLatestCompressionSignatures<'a> { + impl<'a> PostGetMultipleAccountInterfaces<'a> { pub fn new(client: &'a super::Client) -> Self { Self { client: client, @@ -52548,9 +54191,9 @@ pub mod builder { } pub fn body(mut self, value: V) -> Self where - V: std::convert::TryInto, + V: std::convert::TryInto, >::Error: std::fmt::Display, { self.body = value @@ -52558,7 +54201,7 @@ pub mod builder { .map(From::from) .map_err(|s| { format!( - "conversion to `PostGetLatestCompressionSignaturesBody` for body failed: {}", + "conversion to `PostGetMultipleAccountInterfacesBody` for body failed: {}", s ) }); @@ -52567,27 +54210,27 @@ pub mod builder { pub fn body_map(mut self, f: F) -> Self where F: std::ops::FnOnce( - types::builder::PostGetLatestCompressionSignaturesBody, - ) -> types::builder::PostGetLatestCompressionSignaturesBody, + types::builder::PostGetMultipleAccountInterfacesBody, + ) -> types::builder::PostGetMultipleAccountInterfacesBody, { self.body = self.body.map(f); self } - ///Sends a `POST` request to `/getLatestCompressionSignatures` + ///Sends a `POST` request to `/getMultipleAccountInterfaces` pub async fn send( self, ) -> Result< - ResponseValue, - Error, + ResponseValue, + Error, > { let Self { client, body } = self; let body = body .and_then(|v| { - types::PostGetLatestCompressionSignaturesBody::try_from(v) + types::PostGetMultipleAccountInterfacesBody::try_from(v) .map_err(|e| e.to_string()) }) .map_err(Error::InvalidRequest)?; - let url = format!("{}/getLatestCompressionSignatures", client.baseurl,); + let url = format!("{}/getMultipleAccountInterfaces", client.baseurl,); let mut header_map = ::reqwest::header::HeaderMap::with_capacity(1usize); header_map .append( @@ -52608,7 +54251,7 @@ pub mod builder { .headers(header_map) .build()?; let info = OperationInfo { - operation_id: "post_get_latest_compression_signatures", + operation_id: "post_get_multiple_account_interfaces", }; client.pre(&mut request, &info).await?; let result = client.exec(request, &info).await; @@ -52634,15 +54277,15 @@ pub mod builder { } } } - /**Builder for [`Client::post_get_latest_non_voting_signatures`] + /**Builder for [`Client::post_get_multiple_compressed_account_proofs`] -[`Client::post_get_latest_non_voting_signatures`]: super::Client::post_get_latest_non_voting_signatures*/ +[`Client::post_get_multiple_compressed_account_proofs`]: super::Client::post_get_multiple_compressed_account_proofs*/ #[derive(Debug, Clone)] - pub struct PostGetLatestNonVotingSignatures<'a> { + pub struct PostGetMultipleCompressedAccountProofs<'a> { client: &'a super::Client, - body: Result, + body: Result, } - impl<'a> PostGetLatestNonVotingSignatures<'a> { + impl<'a> PostGetMultipleCompressedAccountProofs<'a> { pub fn new(client: &'a super::Client) -> Self { Self { client: client, @@ -52651,9 +54294,9 @@ pub mod builder { } pub fn body(mut self, value: V) -> Self where - V: std::convert::TryInto, + V: std::convert::TryInto, >::Error: std::fmt::Display, { self.body = value @@ -52661,7 +54304,7 @@ pub mod builder { .map(From::from) .map_err(|s| { format!( - "conversion to `PostGetLatestNonVotingSignaturesBody` for body failed: {}", + "conversion to `PostGetMultipleCompressedAccountProofsBody` for body failed: {}", s ) }); @@ -52670,27 +54313,27 @@ pub mod builder { pub fn body_map(mut self, f: F) -> Self where F: std::ops::FnOnce( - types::builder::PostGetLatestNonVotingSignaturesBody, - ) -> types::builder::PostGetLatestNonVotingSignaturesBody, + types::builder::PostGetMultipleCompressedAccountProofsBody, + ) -> types::builder::PostGetMultipleCompressedAccountProofsBody, { self.body = self.body.map(f); self } - ///Sends a `POST` request to `/getLatestNonVotingSignatures` + ///Sends a `POST` request to `/getMultipleCompressedAccountProofs` pub async fn send( self, ) -> Result< - ResponseValue, - Error, + ResponseValue, + Error, > { let Self { client, body } = self; let body = body .and_then(|v| { - types::PostGetLatestNonVotingSignaturesBody::try_from(v) + types::PostGetMultipleCompressedAccountProofsBody::try_from(v) .map_err(|e| e.to_string()) }) .map_err(Error::InvalidRequest)?; - let url = format!("{}/getLatestNonVotingSignatures", client.baseurl,); + let url = format!("{}/getMultipleCompressedAccountProofs", client.baseurl,); let mut header_map = ::reqwest::header::HeaderMap::with_capacity(1usize); header_map .append( @@ -52711,7 +54354,7 @@ pub mod builder { .headers(header_map) .build()?; let info = OperationInfo { - operation_id: "post_get_latest_non_voting_signatures", + operation_id: "post_get_multiple_compressed_account_proofs", }; client.pre(&mut request, &info).await?; let result = client.exec(request, &info).await; @@ -52737,15 +54380,18 @@ pub mod builder { } } } - /**Builder for [`Client::post_get_multiple_account_interfaces`] + /**Builder for [`Client::post_get_multiple_compressed_account_proofs_v2`] -[`Client::post_get_multiple_account_interfaces`]: super::Client::post_get_multiple_account_interfaces*/ +[`Client::post_get_multiple_compressed_account_proofs_v2`]: super::Client::post_get_multiple_compressed_account_proofs_v2*/ #[derive(Debug, Clone)] - pub struct PostGetMultipleAccountInterfaces<'a> { + pub struct PostGetMultipleCompressedAccountProofsV2<'a> { client: &'a super::Client, - body: Result, + body: Result< + types::builder::PostGetMultipleCompressedAccountProofsV2Body, + String, + >, } - impl<'a> PostGetMultipleAccountInterfaces<'a> { + impl<'a> PostGetMultipleCompressedAccountProofsV2<'a> { pub fn new(client: &'a super::Client) -> Self { Self { client: client, @@ -52754,9 +54400,11 @@ pub mod builder { } pub fn body(mut self, value: V) -> Self where - V: std::convert::TryInto, + V: std::convert::TryInto< + types::PostGetMultipleCompressedAccountProofsV2Body, + >, >::Error: std::fmt::Display, { self.body = value @@ -52764,7 +54412,7 @@ pub mod builder { .map(From::from) .map_err(|s| { format!( - "conversion to `PostGetMultipleAccountInterfacesBody` for body failed: {}", + "conversion to `PostGetMultipleCompressedAccountProofsV2Body` for body failed: {}", s ) }); @@ -52773,27 +54421,29 @@ pub mod builder { pub fn body_map(mut self, f: F) -> Self where F: std::ops::FnOnce( - types::builder::PostGetMultipleAccountInterfacesBody, - ) -> types::builder::PostGetMultipleAccountInterfacesBody, + types::builder::PostGetMultipleCompressedAccountProofsV2Body, + ) -> types::builder::PostGetMultipleCompressedAccountProofsV2Body, { self.body = self.body.map(f); self } - ///Sends a `POST` request to `/getMultipleAccountInterfaces` + ///Sends a `POST` request to `/getMultipleCompressedAccountProofsV2` pub async fn send( self, ) -> Result< - ResponseValue, - Error, + ResponseValue, + Error, > { let Self { client, body } = self; let body = body .and_then(|v| { - types::PostGetMultipleAccountInterfacesBody::try_from(v) + types::PostGetMultipleCompressedAccountProofsV2Body::try_from(v) .map_err(|e| e.to_string()) }) .map_err(Error::InvalidRequest)?; - let url = format!("{}/getMultipleAccountInterfaces", client.baseurl,); + let url = format!( + "{}/getMultipleCompressedAccountProofsV2", client.baseurl, + ); let mut header_map = ::reqwest::header::HeaderMap::with_capacity(1usize); header_map .append( @@ -52814,7 +54464,7 @@ pub mod builder { .headers(header_map) .build()?; let info = OperationInfo { - operation_id: "post_get_multiple_account_interfaces", + operation_id: "post_get_multiple_compressed_account_proofs_v2", }; client.pre(&mut request, &info).await?; let result = client.exec(request, &info).await; @@ -52840,15 +54490,15 @@ pub mod builder { } } } - /**Builder for [`Client::post_get_multiple_compressed_account_proofs`] + /**Builder for [`Client::post_get_multiple_compressed_accounts`] -[`Client::post_get_multiple_compressed_account_proofs`]: super::Client::post_get_multiple_compressed_account_proofs*/ +[`Client::post_get_multiple_compressed_accounts`]: super::Client::post_get_multiple_compressed_accounts*/ #[derive(Debug, Clone)] - pub struct PostGetMultipleCompressedAccountProofs<'a> { + pub struct PostGetMultipleCompressedAccounts<'a> { client: &'a super::Client, - body: Result, + body: Result, } - impl<'a> PostGetMultipleCompressedAccountProofs<'a> { + impl<'a> PostGetMultipleCompressedAccounts<'a> { pub fn new(client: &'a super::Client) -> Self { Self { client: client, @@ -52857,9 +54507,9 @@ pub mod builder { } pub fn body(mut self, value: V) -> Self where - V: std::convert::TryInto, + V: std::convert::TryInto, >::Error: std::fmt::Display, { self.body = value @@ -52867,7 +54517,7 @@ pub mod builder { .map(From::from) .map_err(|s| { format!( - "conversion to `PostGetMultipleCompressedAccountProofsBody` for body failed: {}", + "conversion to `PostGetMultipleCompressedAccountsBody` for body failed: {}", s ) }); @@ -52876,27 +54526,27 @@ pub mod builder { pub fn body_map(mut self, f: F) -> Self where F: std::ops::FnOnce( - types::builder::PostGetMultipleCompressedAccountProofsBody, - ) -> types::builder::PostGetMultipleCompressedAccountProofsBody, + types::builder::PostGetMultipleCompressedAccountsBody, + ) -> types::builder::PostGetMultipleCompressedAccountsBody, { self.body = self.body.map(f); self } - ///Sends a `POST` request to `/getMultipleCompressedAccountProofs` + ///Sends a `POST` request to `/getMultipleCompressedAccounts` pub async fn send( self, ) -> Result< - ResponseValue, - Error, + ResponseValue, + Error, > { let Self { client, body } = self; let body = body .and_then(|v| { - types::PostGetMultipleCompressedAccountProofsBody::try_from(v) + types::PostGetMultipleCompressedAccountsBody::try_from(v) .map_err(|e| e.to_string()) }) .map_err(Error::InvalidRequest)?; - let url = format!("{}/getMultipleCompressedAccountProofs", client.baseurl,); + let url = format!("{}/getMultipleCompressedAccounts", client.baseurl,); let mut header_map = ::reqwest::header::HeaderMap::with_capacity(1usize); header_map .append( @@ -52917,7 +54567,7 @@ pub mod builder { .headers(header_map) .build()?; let info = OperationInfo { - operation_id: "post_get_multiple_compressed_account_proofs", + operation_id: "post_get_multiple_compressed_accounts", }; client.pre(&mut request, &info).await?; let result = client.exec(request, &info).await; @@ -52943,18 +54593,15 @@ pub mod builder { } } } - /**Builder for [`Client::post_get_multiple_compressed_account_proofs_v2`] + /**Builder for [`Client::post_get_multiple_compressed_accounts_v2`] -[`Client::post_get_multiple_compressed_account_proofs_v2`]: super::Client::post_get_multiple_compressed_account_proofs_v2*/ +[`Client::post_get_multiple_compressed_accounts_v2`]: super::Client::post_get_multiple_compressed_accounts_v2*/ #[derive(Debug, Clone)] - pub struct PostGetMultipleCompressedAccountProofsV2<'a> { + pub struct PostGetMultipleCompressedAccountsV2<'a> { client: &'a super::Client, - body: Result< - types::builder::PostGetMultipleCompressedAccountProofsV2Body, - String, - >, + body: Result, } - impl<'a> PostGetMultipleCompressedAccountProofsV2<'a> { + impl<'a> PostGetMultipleCompressedAccountsV2<'a> { pub fn new(client: &'a super::Client) -> Self { Self { client: client, @@ -52963,11 +54610,9 @@ pub mod builder { } pub fn body(mut self, value: V) -> Self where - V: std::convert::TryInto< - types::PostGetMultipleCompressedAccountProofsV2Body, - >, + V: std::convert::TryInto, >::Error: std::fmt::Display, { self.body = value @@ -52975,7 +54620,7 @@ pub mod builder { .map(From::from) .map_err(|s| { format!( - "conversion to `PostGetMultipleCompressedAccountProofsV2Body` for body failed: {}", + "conversion to `PostGetMultipleCompressedAccountsV2Body` for body failed: {}", s ) }); @@ -52984,29 +54629,27 @@ pub mod builder { pub fn body_map(mut self, f: F) -> Self where F: std::ops::FnOnce( - types::builder::PostGetMultipleCompressedAccountProofsV2Body, - ) -> types::builder::PostGetMultipleCompressedAccountProofsV2Body, + types::builder::PostGetMultipleCompressedAccountsV2Body, + ) -> types::builder::PostGetMultipleCompressedAccountsV2Body, { self.body = self.body.map(f); self } - ///Sends a `POST` request to `/getMultipleCompressedAccountProofsV2` + ///Sends a `POST` request to `/getMultipleCompressedAccountsV2` pub async fn send( self, ) -> Result< - ResponseValue, - Error, + ResponseValue, + Error, > { let Self { client, body } = self; let body = body .and_then(|v| { - types::PostGetMultipleCompressedAccountProofsV2Body::try_from(v) + types::PostGetMultipleCompressedAccountsV2Body::try_from(v) .map_err(|e| e.to_string()) }) .map_err(Error::InvalidRequest)?; - let url = format!( - "{}/getMultipleCompressedAccountProofsV2", client.baseurl, - ); + let url = format!("{}/getMultipleCompressedAccountsV2", client.baseurl,); let mut header_map = ::reqwest::header::HeaderMap::with_capacity(1usize); header_map .append( @@ -53027,7 +54670,7 @@ pub mod builder { .headers(header_map) .build()?; let info = OperationInfo { - operation_id: "post_get_multiple_compressed_account_proofs_v2", + operation_id: "post_get_multiple_compressed_accounts_v2", }; client.pre(&mut request, &info).await?; let result = client.exec(request, &info).await; @@ -53053,15 +54696,15 @@ pub mod builder { } } } - /**Builder for [`Client::post_get_multiple_compressed_accounts`] + /**Builder for [`Client::post_get_multiple_new_address_proofs`] -[`Client::post_get_multiple_compressed_accounts`]: super::Client::post_get_multiple_compressed_accounts*/ +[`Client::post_get_multiple_new_address_proofs`]: super::Client::post_get_multiple_new_address_proofs*/ #[derive(Debug, Clone)] - pub struct PostGetMultipleCompressedAccounts<'a> { + pub struct PostGetMultipleNewAddressProofs<'a> { client: &'a super::Client, - body: Result, + body: Result, } - impl<'a> PostGetMultipleCompressedAccounts<'a> { + impl<'a> PostGetMultipleNewAddressProofs<'a> { pub fn new(client: &'a super::Client) -> Self { Self { client: client, @@ -53070,9 +54713,9 @@ pub mod builder { } pub fn body(mut self, value: V) -> Self where - V: std::convert::TryInto, + V: std::convert::TryInto, >::Error: std::fmt::Display, { self.body = value @@ -53080,7 +54723,7 @@ pub mod builder { .map(From::from) .map_err(|s| { format!( - "conversion to `PostGetMultipleCompressedAccountsBody` for body failed: {}", + "conversion to `PostGetMultipleNewAddressProofsBody` for body failed: {}", s ) }); @@ -53089,27 +54732,27 @@ pub mod builder { pub fn body_map(mut self, f: F) -> Self where F: std::ops::FnOnce( - types::builder::PostGetMultipleCompressedAccountsBody, - ) -> types::builder::PostGetMultipleCompressedAccountsBody, + types::builder::PostGetMultipleNewAddressProofsBody, + ) -> types::builder::PostGetMultipleNewAddressProofsBody, { self.body = self.body.map(f); self } - ///Sends a `POST` request to `/getMultipleCompressedAccounts` + ///Sends a `POST` request to `/getMultipleNewAddressProofs` pub async fn send( self, ) -> Result< - ResponseValue, - Error, + ResponseValue, + Error, > { let Self { client, body } = self; let body = body .and_then(|v| { - types::PostGetMultipleCompressedAccountsBody::try_from(v) + types::PostGetMultipleNewAddressProofsBody::try_from(v) .map_err(|e| e.to_string()) }) .map_err(Error::InvalidRequest)?; - let url = format!("{}/getMultipleCompressedAccounts", client.baseurl,); + let url = format!("{}/getMultipleNewAddressProofs", client.baseurl,); let mut header_map = ::reqwest::header::HeaderMap::with_capacity(1usize); header_map .append( @@ -53130,7 +54773,7 @@ pub mod builder { .headers(header_map) .build()?; let info = OperationInfo { - operation_id: "post_get_multiple_compressed_accounts", + operation_id: "post_get_multiple_new_address_proofs", }; client.pre(&mut request, &info).await?; let result = client.exec(request, &info).await; @@ -53156,15 +54799,15 @@ pub mod builder { } } } - /**Builder for [`Client::post_get_multiple_compressed_accounts_v2`] + /**Builder for [`Client::post_get_multiple_new_address_proofs_v2`] -[`Client::post_get_multiple_compressed_accounts_v2`]: super::Client::post_get_multiple_compressed_accounts_v2*/ +[`Client::post_get_multiple_new_address_proofs_v2`]: super::Client::post_get_multiple_new_address_proofs_v2*/ #[derive(Debug, Clone)] - pub struct PostGetMultipleCompressedAccountsV2<'a> { + pub struct PostGetMultipleNewAddressProofsV2<'a> { client: &'a super::Client, - body: Result, + body: Result, } - impl<'a> PostGetMultipleCompressedAccountsV2<'a> { + impl<'a> PostGetMultipleNewAddressProofsV2<'a> { pub fn new(client: &'a super::Client) -> Self { Self { client: client, @@ -53173,9 +54816,9 @@ pub mod builder { } pub fn body(mut self, value: V) -> Self where - V: std::convert::TryInto, + V: std::convert::TryInto, >::Error: std::fmt::Display, { self.body = value @@ -53183,7 +54826,7 @@ pub mod builder { .map(From::from) .map_err(|s| { format!( - "conversion to `PostGetMultipleCompressedAccountsV2Body` for body failed: {}", + "conversion to `PostGetMultipleNewAddressProofsV2Body` for body failed: {}", s ) }); @@ -53192,27 +54835,27 @@ pub mod builder { pub fn body_map(mut self, f: F) -> Self where F: std::ops::FnOnce( - types::builder::PostGetMultipleCompressedAccountsV2Body, - ) -> types::builder::PostGetMultipleCompressedAccountsV2Body, + types::builder::PostGetMultipleNewAddressProofsV2Body, + ) -> types::builder::PostGetMultipleNewAddressProofsV2Body, { self.body = self.body.map(f); self } - ///Sends a `POST` request to `/getMultipleCompressedAccountsV2` + ///Sends a `POST` request to `/getMultipleNewAddressProofsV2` pub async fn send( self, ) -> Result< - ResponseValue, - Error, + ResponseValue, + Error, > { let Self { client, body } = self; let body = body .and_then(|v| { - types::PostGetMultipleCompressedAccountsV2Body::try_from(v) + types::PostGetMultipleNewAddressProofsV2Body::try_from(v) .map_err(|e| e.to_string()) }) .map_err(Error::InvalidRequest)?; - let url = format!("{}/getMultipleCompressedAccountsV2", client.baseurl,); + let url = format!("{}/getMultipleNewAddressProofsV2", client.baseurl,); let mut header_map = ::reqwest::header::HeaderMap::with_capacity(1usize); header_map .append( @@ -53233,7 +54876,7 @@ pub mod builder { .headers(header_map) .build()?; let info = OperationInfo { - operation_id: "post_get_multiple_compressed_accounts_v2", + operation_id: "post_get_multiple_new_address_proofs_v2", }; client.pre(&mut request, &info).await?; let result = client.exec(request, &info).await; @@ -53259,15 +54902,15 @@ pub mod builder { } } } - /**Builder for [`Client::post_get_multiple_new_address_proofs`] + /**Builder for [`Client::post_get_queue_elements`] -[`Client::post_get_multiple_new_address_proofs`]: super::Client::post_get_multiple_new_address_proofs*/ +[`Client::post_get_queue_elements`]: super::Client::post_get_queue_elements*/ #[derive(Debug, Clone)] - pub struct PostGetMultipleNewAddressProofs<'a> { + pub struct PostGetQueueElements<'a> { client: &'a super::Client, - body: Result, + body: Result, } - impl<'a> PostGetMultipleNewAddressProofs<'a> { + impl<'a> PostGetQueueElements<'a> { pub fn new(client: &'a super::Client) -> Self { Self { client: client, @@ -53276,9 +54919,9 @@ pub mod builder { } pub fn body(mut self, value: V) -> Self where - V: std::convert::TryInto, + V: std::convert::TryInto, >::Error: std::fmt::Display, { self.body = value @@ -53286,8 +54929,7 @@ pub mod builder { .map(From::from) .map_err(|s| { format!( - "conversion to `PostGetMultipleNewAddressProofsBody` for body failed: {}", - s + "conversion to `PostGetQueueElementsBody` for body failed: {}", s ) }); self @@ -53295,27 +54937,27 @@ pub mod builder { pub fn body_map(mut self, f: F) -> Self where F: std::ops::FnOnce( - types::builder::PostGetMultipleNewAddressProofsBody, - ) -> types::builder::PostGetMultipleNewAddressProofsBody, + types::builder::PostGetQueueElementsBody, + ) -> types::builder::PostGetQueueElementsBody, { self.body = self.body.map(f); self } - ///Sends a `POST` request to `/getMultipleNewAddressProofs` + ///Sends a `POST` request to `/getQueueElements` pub async fn send( self, ) -> Result< - ResponseValue, - Error, + ResponseValue, + Error, > { let Self { client, body } = self; let body = body .and_then(|v| { - types::PostGetMultipleNewAddressProofsBody::try_from(v) + types::PostGetQueueElementsBody::try_from(v) .map_err(|e| e.to_string()) }) .map_err(Error::InvalidRequest)?; - let url = format!("{}/getMultipleNewAddressProofs", client.baseurl,); + let url = format!("{}/getQueueElements", client.baseurl,); let mut header_map = ::reqwest::header::HeaderMap::with_capacity(1usize); header_map .append( @@ -53336,7 +54978,7 @@ pub mod builder { .headers(header_map) .build()?; let info = OperationInfo { - operation_id: "post_get_multiple_new_address_proofs", + operation_id: "post_get_queue_elements", }; client.pre(&mut request, &info).await?; let result = client.exec(request, &info).await; @@ -53362,15 +55004,15 @@ pub mod builder { } } } - /**Builder for [`Client::post_get_multiple_new_address_proofs_v2`] + /**Builder for [`Client::post_get_queue_info`] -[`Client::post_get_multiple_new_address_proofs_v2`]: super::Client::post_get_multiple_new_address_proofs_v2*/ +[`Client::post_get_queue_info`]: super::Client::post_get_queue_info*/ #[derive(Debug, Clone)] - pub struct PostGetMultipleNewAddressProofsV2<'a> { + pub struct PostGetQueueInfo<'a> { client: &'a super::Client, - body: Result, + body: Result, } - impl<'a> PostGetMultipleNewAddressProofsV2<'a> { + impl<'a> PostGetQueueInfo<'a> { pub fn new(client: &'a super::Client) -> Self { Self { client: client, @@ -53379,9 +55021,9 @@ pub mod builder { } pub fn body(mut self, value: V) -> Self where - V: std::convert::TryInto, + V: std::convert::TryInto, >::Error: std::fmt::Display, { self.body = value @@ -53389,8 +55031,7 @@ pub mod builder { .map(From::from) .map_err(|s| { format!( - "conversion to `PostGetMultipleNewAddressProofsV2Body` for body failed: {}", - s + "conversion to `PostGetQueueInfoBody` for body failed: {}", s ) }); self @@ -53398,27 +55039,26 @@ pub mod builder { pub fn body_map(mut self, f: F) -> Self where F: std::ops::FnOnce( - types::builder::PostGetMultipleNewAddressProofsV2Body, - ) -> types::builder::PostGetMultipleNewAddressProofsV2Body, + types::builder::PostGetQueueInfoBody, + ) -> types::builder::PostGetQueueInfoBody, { self.body = self.body.map(f); self } - ///Sends a `POST` request to `/getMultipleNewAddressProofsV2` + ///Sends a `POST` request to `/getQueueInfo` pub async fn send( self, ) -> Result< - ResponseValue, - Error, + ResponseValue, + Error, > { let Self { client, body } = self; let body = body .and_then(|v| { - types::PostGetMultipleNewAddressProofsV2Body::try_from(v) - .map_err(|e| e.to_string()) + types::PostGetQueueInfoBody::try_from(v).map_err(|e| e.to_string()) }) .map_err(Error::InvalidRequest)?; - let url = format!("{}/getMultipleNewAddressProofsV2", client.baseurl,); + let url = format!("{}/getQueueInfo", client.baseurl,); let mut header_map = ::reqwest::header::HeaderMap::with_capacity(1usize); header_map .append( @@ -53439,7 +55079,7 @@ pub mod builder { .headers(header_map) .build()?; let info = OperationInfo { - operation_id: "post_get_multiple_new_address_proofs_v2", + operation_id: "post_get_queue_info", }; client.pre(&mut request, &info).await?; let result = client.exec(request, &info).await; @@ -53465,15 +55105,15 @@ pub mod builder { } } } - /**Builder for [`Client::post_get_queue_elements`] + /**Builder for [`Client::post_get_queue_leaf_indices`] -[`Client::post_get_queue_elements`]: super::Client::post_get_queue_elements*/ +[`Client::post_get_queue_leaf_indices`]: super::Client::post_get_queue_leaf_indices*/ #[derive(Debug, Clone)] - pub struct PostGetQueueElements<'a> { + pub struct PostGetQueueLeafIndices<'a> { client: &'a super::Client, - body: Result, + body: Result, } - impl<'a> PostGetQueueElements<'a> { + impl<'a> PostGetQueueLeafIndices<'a> { pub fn new(client: &'a super::Client) -> Self { Self { client: client, @@ -53482,9 +55122,9 @@ pub mod builder { } pub fn body(mut self, value: V) -> Self where - V: std::convert::TryInto, + V: std::convert::TryInto, >::Error: std::fmt::Display, { self.body = value @@ -53492,7 +55132,8 @@ pub mod builder { .map(From::from) .map_err(|s| { format!( - "conversion to `PostGetQueueElementsBody` for body failed: {}", s + "conversion to `PostGetQueueLeafIndicesBody` for body failed: {}", + s ) }); self @@ -53500,128 +55141,27 @@ pub mod builder { pub fn body_map(mut self, f: F) -> Self where F: std::ops::FnOnce( - types::builder::PostGetQueueElementsBody, - ) -> types::builder::PostGetQueueElementsBody, + types::builder::PostGetQueueLeafIndicesBody, + ) -> types::builder::PostGetQueueLeafIndicesBody, { self.body = self.body.map(f); self } - ///Sends a `POST` request to `/getQueueElements` + ///Sends a `POST` request to `/getQueueLeafIndices` pub async fn send( self, ) -> Result< - ResponseValue, - Error, + ResponseValue, + Error, > { let Self { client, body } = self; let body = body .and_then(|v| { - types::PostGetQueueElementsBody::try_from(v) + types::PostGetQueueLeafIndicesBody::try_from(v) .map_err(|e| e.to_string()) }) .map_err(Error::InvalidRequest)?; - let url = format!("{}/getQueueElements", client.baseurl,); - let mut header_map = ::reqwest::header::HeaderMap::with_capacity(1usize); - header_map - .append( - ::reqwest::header::HeaderName::from_static("api-version"), - ::reqwest::header::HeaderValue::from_static( - super::Client::api_version(), - ), - ); - #[allow(unused_mut)] - let mut request = client - .client - .post(url) - .header( - ::reqwest::header::ACCEPT, - ::reqwest::header::HeaderValue::from_static("application/json"), - ) - .json(&body) - .headers(header_map) - .build()?; - let info = OperationInfo { - operation_id: "post_get_queue_elements", - }; - client.pre(&mut request, &info).await?; - let result = client.exec(request, &info).await; - client.post(&result, &info).await?; - let response = result?; - match response.status().as_u16() { - 200u16 => ResponseValue::from_response(response).await, - 429u16 => { - Err( - Error::ErrorResponse( - ResponseValue::from_response(response).await?, - ), - ) - } - 500u16 => { - Err( - Error::ErrorResponse( - ResponseValue::from_response(response).await?, - ), - ) - } - _ => Err(Error::UnexpectedResponse(response)), - } - } - } - /**Builder for [`Client::post_get_queue_info`] - -[`Client::post_get_queue_info`]: super::Client::post_get_queue_info*/ - #[derive(Debug, Clone)] - pub struct PostGetQueueInfo<'a> { - client: &'a super::Client, - body: Result, - } - impl<'a> PostGetQueueInfo<'a> { - pub fn new(client: &'a super::Client) -> Self { - Self { - client: client, - body: Ok(::std::default::Default::default()), - } - } - pub fn body(mut self, value: V) -> Self - where - V: std::convert::TryInto, - >::Error: std::fmt::Display, - { - self.body = value - .try_into() - .map(From::from) - .map_err(|s| { - format!( - "conversion to `PostGetQueueInfoBody` for body failed: {}", s - ) - }); - self - } - pub fn body_map(mut self, f: F) -> Self - where - F: std::ops::FnOnce( - types::builder::PostGetQueueInfoBody, - ) -> types::builder::PostGetQueueInfoBody, - { - self.body = self.body.map(f); - self - } - ///Sends a `POST` request to `/getQueueInfo` - pub async fn send( - self, - ) -> Result< - ResponseValue, - Error, - > { - let Self { client, body } = self; - let body = body - .and_then(|v| { - types::PostGetQueueInfoBody::try_from(v).map_err(|e| e.to_string()) - }) - .map_err(Error::InvalidRequest)?; - let url = format!("{}/getQueueInfo", client.baseurl,); + let url = format!("{}/getQueueLeafIndices", client.baseurl,); let mut header_map = ::reqwest::header::HeaderMap::with_capacity(1usize); header_map .append( @@ -53642,7 +55182,7 @@ pub mod builder { .headers(header_map) .build()?; let info = OperationInfo { - operation_id: "post_get_queue_info", + operation_id: "post_get_queue_leaf_indices", }; client.pre(&mut request, &info).await?; let result = client.exec(request, &info).await; diff --git a/sdk-libs/photon-api/src/lib.rs b/sdk-libs/photon-api/src/lib.rs index 15f469d40f..480d524ae2 100644 --- a/sdk-libs/photon-api/src/lib.rs +++ b/sdk-libs/photon-api/src/lib.rs @@ -358,6 +358,17 @@ pub mod apis { } } + pub fn make_get_queue_leaf_indices_body( + params: types::PostGetQueueLeafIndicesBodyParams, + ) -> types::PostGetQueueLeafIndicesBody { + types::PostGetQueueLeafIndicesBody { + id: types::PostGetQueueLeafIndicesBodyId::TestAccount, + jsonrpc: types::PostGetQueueLeafIndicesBodyJsonrpc::X20, + method: types::PostGetQueueLeafIndicesBodyMethod::GetQueueLeafIndices, + params, + } + } + pub fn make_get_queue_info_body( params: types::PostGetQueueInfoBodyParams, ) -> types::PostGetQueueInfoBody { @@ -576,6 +587,12 @@ pub mod apis { types::PostGetQueueElementsBody, types::PostGetQueueElementsResponse ); + api_call!( + get_queue_leaf_indices_post, + "getQueueLeafIndices", + types::PostGetQueueLeafIndicesBody, + types::PostGetQueueLeafIndicesResponse + ); api_call!( get_queue_info_post, "getQueueInfo", diff --git a/sdk-libs/program-test/src/indexer/test_indexer.rs b/sdk-libs/program-test/src/indexer/test_indexer.rs index 0b5b0583a3..5485c21afb 100644 --- a/sdk-libs/program-test/src/indexer/test_indexer.rs +++ b/sdk-libs/program-test/src/indexer/test_indexer.rs @@ -28,8 +28,8 @@ use light_client::{ GetCompressedAccountsByOwnerConfig, GetCompressedTokenAccountsByOwnerOrDelegateOptions, Indexer, IndexerError, IndexerRpcConfig, Items, ItemsWithCursor, MerkleProof, NewAddressProofWithContext, OwnerBalance, PaginatedOptions, QueueElementsResult, - QueueElementsV2Options, Response, RetryConfig, RootIndex, SignatureWithMetadata, - StateMerkleTreeAccounts, TokenBalance, ValidityProofWithContext, + QueueElementsV2Options, QueueLeafIndex, Response, RetryConfig, RootIndex, + SignatureWithMetadata, StateMerkleTreeAccounts, TokenBalance, ValidityProofWithContext, }, }; use light_compressed_account::{ @@ -896,6 +896,16 @@ impl Indexer for TestIndexer { } } + async fn get_queue_leaf_indices( + &self, + _merkle_tree_pubkey: [u8; 32], + _limit: u16, + _start_index: Option, + _config: Option, + ) -> Result>, IndexerError> { + unimplemented!("get_queue_leaf_indices") + } + async fn get_queue_info( &self, _config: Option, diff --git a/sdk-libs/program-test/src/program_test/indexer.rs b/sdk-libs/program-test/src/program_test/indexer.rs index a1a80113ce..d0bfda4c95 100644 --- a/sdk-libs/program-test/src/program_test/indexer.rs +++ b/sdk-libs/program-test/src/program_test/indexer.rs @@ -4,8 +4,8 @@ use light_client::indexer::{ GetCompressedAccountsByOwnerConfig, GetCompressedTokenAccountsByOwnerOrDelegateOptions, Hash, Indexer, IndexerError, IndexerRpcConfig, Items, ItemsWithCursor, MerkleProof, NewAddressProofWithContext, OwnerBalance, PaginatedOptions, QueueElementsResult, - QueueElementsV2Options, Response, RetryConfig, SignatureWithMetadata, TokenBalance, - ValidityProofWithContext, + QueueElementsV2Options, QueueLeafIndex, Response, RetryConfig, SignatureWithMetadata, + TokenBalance, ValidityProofWithContext, }; use solana_sdk::pubkey::Pubkey; @@ -196,6 +196,21 @@ impl Indexer for LightProgramTest { .await?) } + async fn get_queue_leaf_indices( + &self, + merkle_tree_pubkey: [u8; 32], + limit: u16, + start_index: Option, + config: Option, + ) -> Result>, IndexerError> { + Ok(self + .indexer + .as_ref() + .ok_or(IndexerError::NotInitialized)? + .get_queue_leaf_indices(merkle_tree_pubkey, limit, start_index, config) + .await?) + } + async fn get_queue_info( &self, config: Option,