diff --git a/Cargo.lock b/Cargo.lock index 57e186d6f5..68769acbec 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -176,9 +176,9 @@ dependencies = [ [[package]] name = "aho-corasick" -version = "1.1.4" +version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ddd31a130427c27518df266943a5308ed92d4b226cc639f5a8f1002816174301" +checksum = "8e60d3430d3a69478ad0993f19238d2df97c507009a52b3c10addcd7f6bcb916" dependencies = [ "memchr", ] @@ -287,15 +287,6 @@ dependencies = [ "num-traits", ] -[[package]] -name = "ar_archive_writer" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0c269894b6fe5e9d7ada0cf69b5bf847ff35bc25fc271f08e1d080fce80339a" -dependencies = [ - "object 0.32.2", -] - [[package]] name = "arbitrary" version = "1.4.2" @@ -314,6 +305,15 @@ dependencies = [ "arbitrary", ] +[[package]] +name = "arc-swap" +version = "1.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "51d03449bb8ca2cc2ef70869af31463d1ae5ccc8fa3e334b307203fbf815207e" +dependencies = [ + "rustversion", +] + [[package]] name = "argon2" version = "0.5.3" @@ -1240,7 +1240,7 @@ dependencies = [ "serde_urlencoded", "thiserror 1.0.69", "tokio", - "tokio-util 0.7.17", + "tokio-util 0.7.16", "url", "winapi", ] @@ -1268,9 +1268,9 @@ dependencies = [ [[package]] name = "bstr" -version = "1.12.1" +version = "1.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "63044e1ae8e69f3b5a92c736ca6269b8d12fa7efe39bf34ddb06d102cf0e2cab" +checksum = "234113d19d0d7d613b40e86fb654acf958910802bcceab913a4f9e7cda03b1a4" dependencies = [ "memchr", "serde", @@ -1421,9 +1421,9 @@ dependencies = [ [[package]] name = "cc" -version = "1.2.44" +version = "1.2.41" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "37521ac7aabe3d13122dc382493e20c9416f299d2ccd5b3a5340a2570cdeb0f3" +checksum = "ac9fe6cdbb24b6ade63616c0a0688e45bb56732262c158df3c0c4bea4ca47cb7" dependencies = [ "find-msvc-tools", "jobserver", @@ -1572,9 +1572,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.51" +version = "4.5.50" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c26d721170e0295f191a69bd9a1f93efcdb0aff38684b61ab5750468972e5f5" +checksum = "0c2cfd7bf8a6017ddaa4e32ffe7403d547790db06bd171c1c53926faab501623" dependencies = [ "clap_builder", "clap_derive", @@ -1582,9 +1582,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.5.51" +version = "4.5.50" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75835f0c7bf681bfd05abe44e965760fea999a5286c6eb2d59883634fd02011a" +checksum = "0a4c05b9e80c5ccd3a7ef080ad7b6ba7d6fc00a985b8b157197075677c82c7a0" dependencies = [ "anstream", "anstyle", @@ -1594,11 +1594,11 @@ dependencies = [ [[package]] name = "clap_complete" -version = "4.5.60" +version = "4.5.59" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e602857739c5a4291dfa33b5a298aeac9006185229a700e5810a3ef7272d971" +checksum = "2348487adcd4631696ced64ccdb40d38ac4d31cae7f2eec8817fcea1b9d1c43c" dependencies = [ - "clap 4.5.51", + "clap 4.5.50", ] [[package]] @@ -2346,9 +2346,9 @@ dependencies = [ [[package]] name = "deranged" -version = "0.5.5" +version = "0.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ececcb659e7ba858fb4f10388c250a7252eb0a27373f1a72b8748afdd248e587" +checksum = "a41953f86f8a05768a6cda24def994fd2f424b04ec5c719cf89989779f199071" dependencies = [ "powerfmt", "serde_core", @@ -3137,7 +3137,7 @@ checksum = "3a82608ee96ce76aeab659e9b8d3c2b787bffd223199af88c674923d861ada10" dependencies = [ "execute-command-macro", "execute-command-tokens", - "generic-array 1.3.5", + "generic-array 1.3.4", ] [[package]] @@ -3380,6 +3380,7 @@ dependencies = [ "fendermint_vm_snapshot", "fendermint_vm_topdown", "fendermint_vm_topdown_proof_service", + "filecoin-f3-gpbft", "fs-err", "fvm", "fvm_ipld_blockstore 0.3.1", @@ -3399,6 +3400,7 @@ dependencies = [ "libp2p-bitswap", "literally", "multiaddr", + "num-bigint", "num-traits", "openssl", "paste", @@ -3417,7 +3419,7 @@ dependencies = [ "tendermint-proto 0.31.1", "tendermint-rpc", "tokio", - "tokio-util 0.7.17", + "tokio-util 0.7.16", "toml 0.8.23", "tower 0.4.13", "tower-abci", @@ -3434,7 +3436,7 @@ dependencies = [ "anyhow", "bytes", "cid 0.11.1", - "clap 4.5.51", + "clap 4.5.50", "ethers", "fendermint_materializer", "fendermint_vm_actor_interface", @@ -3462,6 +3464,7 @@ dependencies = [ "dirs", "fendermint_vm_encoding", "fendermint_vm_topdown", + "fendermint_vm_topdown_proof_service", "fvm_ipld_encoding 0.5.3", "fvm_shared", "ipc-api", @@ -3537,7 +3540,7 @@ dependencies = [ "async-trait", "axum", "cid 0.11.1", - "clap 4.5.51", + "clap 4.5.50", "erased-serde", "ethers", "ethers-contract", @@ -3658,7 +3661,7 @@ dependencies = [ "tendermint-rpc", "text-tables", "tokio", - "tokio-util 0.7.17", + "tokio-util 0.7.16", "toml 0.8.23", "tracing", "url", @@ -3690,7 +3693,7 @@ dependencies = [ "base64 0.21.7", "bytes", "cid 0.11.1", - "clap 4.5.51", + "clap 4.5.50", "ethers", "fendermint_crypto", "fendermint_vm_actor_interface", @@ -3832,6 +3835,17 @@ dependencies = [ "strum 0.26.3", ] +[[package]] +name = "fendermint_vm_evm_event_utils" +version = "0.1.0" +dependencies = [ + "anyhow", + "ethers", + "hex", + "ipc_actors_abis", + "proofs", +] + [[package]] name = "fendermint_vm_genesis" version = "0.1.0" @@ -3868,10 +3882,12 @@ dependencies = [ name = "fendermint_vm_interpreter" version = "0.1.0" dependencies = [ + "actors-builtin-car", "actors-custom-api", "actors-custom-car", "anyhow", "arbitrary", + "arc-swap", "async-stm", "async-trait", "base64 0.21.7", @@ -3892,11 +3908,13 @@ dependencies = [ "fendermint_vm_core", "fendermint_vm_encoding", "fendermint_vm_event", + "fendermint_vm_evm_event_utils", "fendermint_vm_genesis", "fendermint_vm_interpreter", "fendermint_vm_message", "fendermint_vm_resolver", "fendermint_vm_topdown", + "fendermint_vm_topdown_proof_service", "fil_actor_eam", "fil_actor_evm", "futures-core", @@ -3914,9 +3932,11 @@ dependencies = [ "merkle-tree-rs", "multihash 0.18.1", "multihash-codetable", + "num-bigint", "num-traits", "pin-project", "prometheus", + "proofs", "quickcheck", "quickcheck_macros", "rand 0.8.5", @@ -3931,7 +3951,7 @@ dependencies = [ "thiserror 1.0.69", "tokio", "tokio-stream", - "tokio-util 0.7.17", + "tokio-util 0.7.16", "tracing", ] @@ -3950,6 +3970,7 @@ dependencies = [ "fendermint_vm_actor_interface", "fendermint_vm_encoding", "fendermint_vm_message", + "fendermint_vm_topdown_proof_service", "fvm_ipld_encoding 0.5.3", "fvm_shared", "hex", @@ -3957,6 +3978,7 @@ dependencies = [ "lazy_static", "multihash-codetable", "num-traits", + "proofs", "quickcheck", "quickcheck_macros", "rand 0.8.5", @@ -4017,7 +4039,7 @@ dependencies = [ "thiserror 1.0.69", "tokio", "tokio-stream", - "tokio-util 0.7.17", + "tokio-util 0.7.16", "tracing", "unsigned-varint 0.7.2", ] @@ -4032,7 +4054,7 @@ dependencies = [ "async-trait", "bytes", "cid 0.11.1", - "clap 4.5.51", + "clap 4.5.50", "ethers", "fendermint_crypto", "fendermint_testing", @@ -4070,8 +4092,11 @@ dependencies = [ "base64 0.21.7", "chrono", "cid 0.11.1", - "clap 4.5.51", + "clap 4.5.50", + "contracts-artifacts", + "ethers", "fendermint_actor_f3_light_client", + "fendermint_vm_evm_event_utils", "fendermint_vm_genesis", "filecoin-f3-certs", "filecoin-f3-gpbft", @@ -4081,10 +4106,11 @@ dependencies = [ "fvm_ipld_bitfield", "fvm_ipld_encoding 0.5.3", "fvm_shared", + "hex", "humantime-serde", "ipc-api", "ipc-observability", - "ipc-provider", + "ipc_actors_abis", "keccak-hash", "multihash 0.18.1", "multihash-codetable", @@ -4139,7 +4165,7 @@ dependencies = [ "anyhow", "async-std", "cid 0.10.1", - "clap 4.5.51", + "clap 4.5.50", "futures", "fvm_ipld_blockstore 0.2.1", "fvm_ipld_car 0.7.1", @@ -4161,7 +4187,7 @@ dependencies = [ "fvm_ipld_blockstore 0.3.1", "fvm_ipld_encoding 0.5.3", "fvm_shared", - "hex-literal 1.1.0", + "hex-literal 1.0.0", "log", "multihash 0.19.3", "num-derive 0.4.2", @@ -4186,7 +4212,7 @@ dependencies = [ "fvm_ipld_kamt", "fvm_shared", "hex", - "hex-literal 1.1.0", + "hex-literal 1.0.0", "log", "multihash-codetable", "num-derive 0.4.2", @@ -4227,7 +4253,7 @@ dependencies = [ "fvm_sdk", "fvm_shared", "hex", - "integer-encoding 4.1.0", + "integer-encoding 4.0.2", "itertools 0.14.0", "k256 0.13.4", "lazy_static", @@ -4251,7 +4277,7 @@ dependencies = [ [[package]] name = "filecoin-f3-blssig" version = "0.1.0" -source = "git+https://github.com/moshababo/rust-f3?branch=cargo-git-compat#f838fcd973e6e7f32298363ceb03a8010a1dc1fe" +source = "git+https://github.com/moshababo/rust-f3?branch=cargo-git-compat#40af605984045a9f2b9ba5dcc9c04c984deb8d1f" dependencies = [ "blake2 0.11.0-rc.2", "bls-signatures", @@ -4267,7 +4293,7 @@ dependencies = [ [[package]] name = "filecoin-f3-certs" version = "0.1.0" -source = "git+https://github.com/moshababo/rust-f3?branch=cargo-git-compat#f838fcd973e6e7f32298363ceb03a8010a1dc1fe" +source = "git+https://github.com/moshababo/rust-f3?branch=cargo-git-compat#40af605984045a9f2b9ba5dcc9c04c984deb8d1f" dependencies = [ "ahash 0.8.12", "filecoin-f3-gpbft", @@ -4278,7 +4304,7 @@ dependencies = [ [[package]] name = "filecoin-f3-gpbft" version = "0.1.0" -source = "git+https://github.com/moshababo/rust-f3?branch=cargo-git-compat#f838fcd973e6e7f32298363ceb03a8010a1dc1fe" +source = "git+https://github.com/moshababo/rust-f3?branch=cargo-git-compat#40af605984045a9f2b9ba5dcc9c04c984deb8d1f" dependencies = [ "ahash 0.8.12", "anyhow", @@ -4301,7 +4327,7 @@ dependencies = [ [[package]] name = "filecoin-f3-lightclient" version = "0.1.0" -source = "git+https://github.com/moshababo/rust-f3?branch=cargo-git-compat#f838fcd973e6e7f32298363ceb03a8010a1dc1fe" +source = "git+https://github.com/moshababo/rust-f3?branch=cargo-git-compat#40af605984045a9f2b9ba5dcc9c04c984deb8d1f" dependencies = [ "anyhow", "base64 0.22.1", @@ -4317,7 +4343,7 @@ dependencies = [ [[package]] name = "filecoin-f3-merkle" version = "0.1.0" -source = "git+https://github.com/moshababo/rust-f3?branch=cargo-git-compat#f838fcd973e6e7f32298363ceb03a8010a1dc1fe" +source = "git+https://github.com/moshababo/rust-f3?branch=cargo-git-compat#40af605984045a9f2b9ba5dcc9c04c984deb8d1f" dependencies = [ "anyhow", "sha3", @@ -4326,7 +4352,7 @@ dependencies = [ [[package]] name = "filecoin-f3-rpc" version = "0.1.0" -source = "git+https://github.com/moshababo/rust-f3?branch=cargo-git-compat#f838fcd973e6e7f32298363ceb03a8010a1dc1fe" +source = "git+https://github.com/moshababo/rust-f3?branch=cargo-git-compat#40af605984045a9f2b9ba5dcc9c04c984deb8d1f" dependencies = [ "anyhow", "filecoin-f3-gpbft", @@ -4443,9 +4469,9 @@ checksum = "0ce7134b9999ecaf8bcd65542e436736ef32ddca1b3e06094cb6ec5755203b80" [[package]] name = "flate2" -version = "1.1.5" +version = "1.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bfe33edd8e85a12a67454e37f8c75e730830d83e313556ab9ebf9ee7fbeb3bfb" +checksum = "dc5a4e564e38c699f2880d3fda590bedc2e69f3f84cd48b457bd892ce61d0aa9" dependencies = [ "crc32fast", "miniz_oxide 0.8.9", @@ -4951,7 +4977,7 @@ dependencies = [ "serde", "serde_ipld_dagcbor 0.6.4", "serde_repr", - "serde_tuple 1.1.3", + "serde_tuple 1.1.2", "thiserror 2.0.17", ] @@ -5064,9 +5090,9 @@ dependencies = [ [[package]] name = "generic-array" -version = "1.3.5" +version = "1.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eaf57c49a95fd1fe24b90b3033bee6dc7e8f1288d51494cb44e627c295e38542" +checksum = "985a5578ebdb02351d484a77fb27e7cb79272f1ba9bc24692d8243c3cfe40660" dependencies = [ "rustversion", "typenum", @@ -5208,7 +5234,7 @@ dependencies = [ "indexmap 2.12.0", "slab", "tokio", - "tokio-util 0.7.17", + "tokio-util 0.7.16", "tracing", ] @@ -5227,7 +5253,7 @@ dependencies = [ "indexmap 2.12.0", "slab", "tokio", - "tokio-util 0.7.17", + "tokio-util 0.7.16", "tracing", ] @@ -5375,9 +5401,9 @@ checksum = "6fe2267d4ed49bc07b63801559be28c718ea06c4738b7a03c94df7386d2cde46" [[package]] name = "hex-literal" -version = "1.1.0" +version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e712f64ec3850b98572bffac52e2c6f282b29fe6c5fa6d42334b30be438d95c1" +checksum = "bcaaec4551594c969335c98c903c1397853d4198408ea609190f420500f6be71" [[package]] name = "hex_fmt" @@ -5472,11 +5498,11 @@ dependencies = [ [[package]] name = "home" -version = "0.5.12" +version = "0.5.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc627f471c528ff0c4a49e1d5e60450c8f6461dd6d10ba9dcd3a61d3dff7728d" +checksum = "589533453244b0995c858700322199b2becb13b627df2851f64a2775d024abcf" dependencies = [ - "windows-sys 0.61.2", + "windows-sys 0.59.0", ] [[package]] @@ -5690,7 +5716,7 @@ dependencies = [ "tokio", "tokio-rustls 0.26.4", "tower-service", - "webpki-roots 1.0.4", + "webpki-roots 1.0.3", ] [[package]] @@ -5787,9 +5813,9 @@ dependencies = [ [[package]] name = "icu_collections" -version = "2.1.1" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c6b649701667bbe825c3b7e6388cb521c23d88644678e83c0c4d0a621a34b43" +checksum = "200072f5d0e3614556f94a9930d5dc3e0662a652823904c3a75dc3b0af7fee47" dependencies = [ "displaydoc", "potential_utf", @@ -5800,9 +5826,9 @@ dependencies = [ [[package]] name = "icu_locale_core" -version = "2.1.1" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "edba7861004dd3714265b4db54a3c390e880ab658fec5f7db895fae2046b5bb6" +checksum = "0cde2700ccaed3872079a65fb1a78f6c0a36c91570f28755dda67bc8f7d9f00a" dependencies = [ "displaydoc", "litemap", @@ -5813,10 +5839,11 @@ dependencies = [ [[package]] name = "icu_normalizer" -version = "2.1.1" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f6c8828b67bf8908d82127b2054ea1b4427ff0230ee9141c54251934ab1b599" +checksum = "436880e8e18df4d7bbc06d58432329d6458cc84531f7ac5f024e93deadb37979" dependencies = [ + "displaydoc", "icu_collections", "icu_normalizer_data", "icu_properties", @@ -5827,38 +5854,42 @@ dependencies = [ [[package]] name = "icu_normalizer_data" -version = "2.1.1" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7aedcccd01fc5fe81e6b489c15b247b8b0690feb23304303a9e560f37efc560a" +checksum = "00210d6893afc98edb752b664b8890f0ef174c8adbb8d0be9710fa66fbbf72d3" [[package]] name = "icu_properties" -version = "2.1.1" +version = "2.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e93fcd3157766c0c8da2f8cff6ce651a31f0810eaa1c51ec363ef790bbb5fb99" +checksum = "016c619c1eeb94efb86809b015c58f479963de65bdb6253345c1a1276f22e32b" dependencies = [ + "displaydoc", "icu_collections", "icu_locale_core", "icu_properties_data", "icu_provider", + "potential_utf", "zerotrie", "zerovec", ] [[package]] name = "icu_properties_data" -version = "2.1.1" +version = "2.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02845b3647bb045f1100ecd6480ff52f34c35f82d9880e029d329c21d1054899" +checksum = "298459143998310acd25ffe6810ed544932242d3f07083eee1084d83a71bd632" [[package]] name = "icu_provider" -version = "2.1.1" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85962cf0ce02e1e0a629cc34e7ca3e373ce20dda4c4d7294bbd0bf1fdb59e614" +checksum = "03c80da27b5f4187909049ee2d72f276f0d9f99a42c306bd0131ecfe04d8e5af" dependencies = [ "displaydoc", "icu_locale_core", + "stable_deref_trait", + "tinystr", "writeable", "yoke", "zerofrom", @@ -5948,9 +5979,9 @@ dependencies = [ [[package]] name = "ignore" -version = "0.4.25" +version = "0.4.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d3d782a365a015e0f5c04902246139249abf769125006fbe7649e2ee88169b4a" +checksum = "81776e6f9464432afcc28d03e52eb101c93b6f0566f52aef2427663e700f0403" dependencies = [ "crossbeam-deque", "globset", @@ -6098,9 +6129,9 @@ checksum = "8bb03732005da905c88227371639bf1ad885cc712789c011c31c5fb3ab3ccf02" [[package]] name = "integer-encoding" -version = "4.1.0" +version = "4.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "14c00403deb17c3221a1fe4fb571b9ed0370b3dcd116553c77fa294a3d918699" +checksum = "0d762194228a2f1c11063e46e32e5acb96e66e906382b9eb5441f2e0504bbd5a" [[package]] name = "io-lifetimes" @@ -6164,7 +6195,7 @@ dependencies = [ "bytes", "chrono", "cid 0.11.1", - "clap 4.5.51", + "clap 4.5.50", "clap_complete", "contracts-artifacts", "env_logger 0.10.2", @@ -6219,7 +6250,7 @@ dependencies = [ "thiserror 1.0.69", "tokio", "tokio-tungstenite 0.18.0", - "tokio-util 0.7.17", + "tokio-util 0.7.16", "toml 0.7.8", "tracing", "tracing-subscriber 0.3.20", @@ -6343,7 +6374,7 @@ dependencies = [ "ethers", "fs-err", "fvm_shared", - "generic-array 1.3.5", + "generic-array 1.3.4", "hex", "ipc-types", "libc", @@ -6460,13 +6491,13 @@ dependencies = [ [[package]] name = "is-terminal" -version = "0.4.17" +version = "0.4.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3640c1c38b8e4e43584d8df18be5fc6b0aa314ce6ebf51b53313d4306cca8e46" +checksum = "e04d7f318608d35d4b61ddd75cbdaee86b023ebe2bd5a66ee0915f0bf93095a9" dependencies = [ "hermit-abi 0.5.2", "libc", - "windows-sys 0.61.2", + "windows-sys 0.59.0", ] [[package]] @@ -6578,9 +6609,9 @@ dependencies = [ [[package]] name = "js-sys" -version = "0.3.82" +version = "0.3.81" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b011eec8cc36da2aab2d5cff675ec18454fad408585853910a202391cf9f8e65" +checksum = "ec48937a97411dcb524a265206ccd4c90bb711fca92b2792c407f268825b9305" dependencies = [ "once_cell", "wasm-bindgen", @@ -6642,7 +6673,7 @@ dependencies = [ "thiserror 2.0.17", "tokio", "tokio-rustls 0.26.4", - "tokio-util 0.7.17", + "tokio-util 0.7.16", "tracing", "url", ] @@ -7546,9 +7577,9 @@ checksum = "df1d3c3b53da64cf5760482273a98e575c651a67eec7f77df96b5b642de8f039" [[package]] name = "litemap" -version = "0.8.1" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6373607a59f0be73a39b6fe456b8192fcc3585f602af20751600e974dd455e77" +checksum = "241eaef5fd12c88705a01fc1066c48c4b36e0dd4377dcdc7ec3942cea7a69956" [[package]] name = "literally" @@ -8853,9 +8884,9 @@ dependencies = [ [[package]] name = "potential_utf" -version = "0.1.4" +version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b73949432f5e2a09657003c25bca5e19a0e9c84f8058ca374f49e0ebe605af77" +checksum = "84df19adbe5b5a0782edcab45899906947ab039ccf4573713735ee7de1e6b08a" dependencies = [ "zerovec", ] @@ -8980,9 +9011,9 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.103" +version = "1.0.102" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ee95bc4ef87b8d5ba32e8b7714ccc834865276eab0aed5c9958d00ec45f49e8" +checksum = "8e0f6df8eaa422d97d72edcd152e1451618fed47fabbdbd5a8864167b1d4aff7" dependencies = [ "unicode-ident", ] @@ -9099,11 +9130,12 @@ dependencies = [ [[package]] name = "proptest" -version = "1.9.0" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bee689443a2bd0a16ab0348b52ee43e3b2d1b1f931c8aa5c9f8de4c86fbe8c40" +checksum = "2bb0be07becd10686a0bb407298fb425360a5c44a663774406340c59a22de4ce" dependencies = [ "bitflags 2.10.0", + "lazy_static", "num-traits", "rand 0.9.2", "rand_chacha 0.9.0", @@ -9205,11 +9237,10 @@ checksum = "106dd99e98437432fed6519dedecfade6a06a73bb7b2a1e019fdd2bee5778d94" [[package]] name = "psm" -version = "0.1.28" +version = "0.1.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d11f2fedc3b7dafdc2851bc52f277377c5473d378859be234bc7ebb593144d01" +checksum = "e66fcd288453b748497d8fb18bccc83a16b0518e3906d4b8df0a8d42d93dbb1c" dependencies = [ - "ar_archive_writer", "cc", ] @@ -9683,7 +9714,7 @@ dependencies = [ "tokio", "tokio-native-tls", "tokio-rustls 0.26.4", - "tokio-util 0.7.17", + "tokio-util 0.7.16", "tower 0.5.2", "tower-http 0.6.6", "tower-service", @@ -9692,7 +9723,7 @@ dependencies = [ "wasm-bindgen-futures", "wasm-streams", "web-sys", - "webpki-roots 1.0.4", + "webpki-roots 1.0.3", ] [[package]] @@ -10005,7 +10036,7 @@ dependencies = [ "once_cell", "ring 0.17.14", "rustls-pki-types", - "rustls-webpki 0.103.8", + "rustls-webpki 0.103.7", "subtle", "zeroize", ] @@ -10057,9 +10088,9 @@ dependencies = [ [[package]] name = "rustls-pki-types" -version = "1.13.0" +version = "1.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94182ad936a0c91c324cd46c6511b9510ed16af436d7b5bab34beab0afd55f7a" +checksum = "229a4a4c221013e7e1f1a043678c5cc39fe5171437c88fb47151a21e6f5b5c79" dependencies = [ "web-time", "zeroize", @@ -10079,7 +10110,7 @@ dependencies = [ "rustls 0.23.34", "rustls-native-certs 0.8.2", "rustls-platform-verifier-android", - "rustls-webpki 0.103.8", + "rustls-webpki 0.103.7", "security-framework 3.5.1", "security-framework-sys", "webpki-root-certs 0.26.11", @@ -10104,9 +10135,9 @@ dependencies = [ [[package]] name = "rustls-webpki" -version = "0.103.8" +version = "0.103.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2ffdfa2f5286e2247234e03f680868ac2815974dc39e00ea15adc445d0aafe52" +checksum = "e10b3f4191e8a80e6b43eebabfac91e5dcecebb27a71f04e820c47ec41d314bf" dependencies = [ "ring 0.17.14", "rustls-pki-types", @@ -10219,9 +10250,9 @@ dependencies = [ [[package]] name = "schemars" -version = "1.0.5" +version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1317c3bf3e7df961da95b0a56a172a02abead31276215a0497241a7624b487ce" +checksum = "82d20c4491bc164fa2f6c5d44565947a52ad80b9505d8e36f8d54c27c739fcd0" dependencies = [ "dyn-clone", "ref-cast", @@ -10513,12 +10544,12 @@ dependencies = [ [[package]] name = "serde_tuple" -version = "1.1.3" +version = "1.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6af196b9c06f0aa5555ab980c01a2527b0f67517da8d68b1731b9d4764846a6f" +checksum = "52569c5296679bd28e2457f067f97d270077df67da0340647da5412c8eac8d9e" dependencies = [ "serde", - "serde_tuple_macros 1.1.3", + "serde_tuple_macros 1.1.2", ] [[package]] @@ -10534,9 +10565,9 @@ dependencies = [ [[package]] name = "serde_tuple_macros" -version = "1.1.3" +version = "1.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec3a1e7d2eadec84deabd46ae061bf480a91a6bce74d25dad375bd656f2e19d8" +checksum = "2f46c707781471741d5f2670edb36476479b26e94cf43efe21ca3c220b97ef2e" dependencies = [ "proc-macro2", "quote", @@ -10583,7 +10614,7 @@ dependencies = [ "indexmap 1.9.3", "indexmap 2.12.0", "schemars 0.9.0", - "schemars 1.0.5", + "schemars 1.0.4", "serde_core", "serde_json", "time", @@ -11384,9 +11415,9 @@ checksum = "df7f62577c25e07834649fc3b39fafdc597c0a3527dc1c60129201ccfcbaa50c" [[package]] name = "target-triple" -version = "1.0.0" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "591ef38edfb78ca4771ee32cf494cb8771944bee237a9b91fc9c1424ac4b777b" +checksum = "1ac9aa371f599d22256307c24a9d748c041e548cbf599f35d890f9d365361790" [[package]] name = "tempfile" @@ -11708,9 +11739,9 @@ dependencies = [ [[package]] name = "tinystr" -version = "0.8.2" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "42d3e9c45c09de15d06dd8acf5f4e0e399e85927b7f00711024eb7ae10fa4869" +checksum = "5d4f6d1145dcb577acf783d4e601bc1d76a13337bb54e6233add580b07344c8b" dependencies = [ "displaydoc", "zerovec", @@ -11879,9 +11910,9 @@ dependencies = [ [[package]] name = "tokio-util" -version = "0.7.17" +version = "0.7.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2efa149fe76073d6e8fd97ef4f4eca7b67f599660115591483572e406e165594" +checksum = "14307c986784f72ef81c89db7d9e28d6ac26d16213b109ea501696195e6e3ce5" dependencies = [ "bytes", "futures-core", @@ -12032,7 +12063,7 @@ dependencies = [ "rand 0.8.5", "slab", "tokio", - "tokio-util 0.7.17", + "tokio-util 0.7.16", "tower-layer", "tower-service", "tracing", @@ -12267,9 +12298,9 @@ checksum = "e421abadd41a4225275504ea4d6566923418b7f05506fbc9c0fe86ba7396114b" [[package]] name = "trybuild" -version = "1.0.113" +version = "1.0.112" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "559b6a626c0815c942ac98d434746138b4f89ddd6a1b8cbb168c6845fb3376c5" +checksum = "4d66678374d835fe847e0dc8348fde2ceb5be4a7ec204437d8367f0d8df266a5" dependencies = [ "glob", "serde", @@ -12391,9 +12422,9 @@ checksum = "75b844d17643ee918803943289730bec8aac480150456169e647ed0b576ba539" [[package]] name = "unicode-ident" -version = "1.0.22" +version = "1.0.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9312f7c4f6ff9069b165498234ce8be658059c6728633667c526e27dc2cf1df5" +checksum = "462eeb75aeb73aea900253ce739c8e18a67423fadf006037cd3ff27e82748a06" [[package]] name = "unicode-segmentation" @@ -12620,7 +12651,7 @@ dependencies = [ "serde_urlencoded", "tokio", "tokio-tungstenite 0.21.0", - "tokio-util 0.7.17", + "tokio-util 0.7.16", "tower-service", "tracing", ] @@ -12642,9 +12673,9 @@ dependencies = [ [[package]] name = "wasm-bindgen" -version = "0.2.105" +version = "0.2.104" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da95793dfc411fbbd93f5be7715b0578ec61fe87cb1a42b12eb625caa5c5ea60" +checksum = "c1da10c01ae9f1ae40cbfac0bac3b1e724b320abfcf52229f80b547c0d250e2d" dependencies = [ "cfg-if", "once_cell", @@ -12653,11 +12684,25 @@ dependencies = [ "wasm-bindgen-shared", ] +[[package]] +name = "wasm-bindgen-backend" +version = "0.2.104" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "671c9a5a66f49d8a47345ab942e2cb93c7d1d0339065d4f8139c486121b43b19" +dependencies = [ + "bumpalo", + "log", + "proc-macro2", + "quote", + "syn 2.0.108", + "wasm-bindgen-shared", +] + [[package]] name = "wasm-bindgen-futures" -version = "0.4.55" +version = "0.4.54" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "551f88106c6d5e7ccc7cd9a16f312dd3b5d36ea8b4954304657d5dfba115d4a0" +checksum = "7e038d41e478cc73bae0ff9b36c60cff1c98b8f38f8d7e8061e79ee63608ac5c" dependencies = [ "cfg-if", "js-sys", @@ -12668,9 +12713,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.105" +version = "0.2.104" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "04264334509e04a7bf8690f2384ef5265f05143a4bff3889ab7a3269adab59c2" +checksum = "7ca60477e4c59f5f2986c50191cd972e3a50d8a95603bc9434501cf156a9a119" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -12678,22 +12723,22 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.105" +version = "0.2.104" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "420bc339d9f322e562942d52e115d57e950d12d88983a14c79b86859ee6c7ebc" +checksum = "9f07d2f20d4da7b26400c9f4a0511e6e0345b040694e8a75bd41d578fa4421d7" dependencies = [ - "bumpalo", "proc-macro2", "quote", "syn 2.0.108", + "wasm-bindgen-backend", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" -version = "0.2.105" +version = "0.2.104" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "76f218a38c84bcb33c25ec7059b07847d465ce0e0a76b995e134a45adcb6af76" +checksum = "bad67dc8b2a1a6e5448428adec4c3e84c43e561d8c9ee8a9e5aabeb193ec41d1" dependencies = [ "unicode-ident", ] @@ -12941,9 +12986,9 @@ dependencies = [ [[package]] name = "web-sys" -version = "0.3.82" +version = "0.3.81" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3a1f95c0d03a47f4ae1f7a64643a6bb97465d9b740f0fa8f90ea33915c99a9a1" +checksum = "9367c417a924a74cae129e6a2ae3b47fabb1f8995595ab474029da749a8be120" dependencies = [ "js-sys", "wasm-bindgen", @@ -12985,14 +13030,14 @@ version = "0.26.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "75c7f0ef91146ebfb530314f5f1d24528d7f0767efbfd31dce919275413e393e" dependencies = [ - "webpki-root-certs 1.0.4", + "webpki-root-certs 1.0.3", ] [[package]] name = "webpki-root-certs" -version = "1.0.4" +version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee3e3b5f5e80bc89f30ce8d0343bf4e5f12341c51f3e26cbeecbc7c85443e85b" +checksum = "05d651ec480de84b762e7be71e6efa7461699c19d9e2c272c8d93455f567786e" dependencies = [ "rustls-pki-types", ] @@ -13014,9 +13059,9 @@ checksum = "5f20c57d8d7db6d3b86154206ae5d8fba62dd39573114de97c2cb0578251f8e1" [[package]] name = "webpki-roots" -version = "1.0.4" +version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b2878ef029c47c6e8cf779119f20fcf52bde7ad42a731b2a304bc221df17571e" +checksum = "32b130c0d2d49f8b6889abc456e795e82525204f27c42cf767cf0d7734e089b8" dependencies = [ "rustls-pki-types", ] @@ -13554,9 +13599,9 @@ checksum = "f17a85883d4e6d00e8a97c586de764dabcc06133f7f1d55dce5cdc070ad7fe59" [[package]] name = "writeable" -version = "0.6.2" +version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9edde0db4769d2dc68579893f2306b26c6ecfbe0ef499b013d731b7b9247e0b9" +checksum = "ea2f10b9bb0928dfb1b42b65e1f9e36f7f54dbdf08457afefb38afcdec4fa2bb" [[package]] name = "ws_stream_wasm" @@ -13627,9 +13672,9 @@ dependencies = [ [[package]] name = "xml-rs" -version = "0.8.28" +version = "0.8.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3ae8337f8a065cfc972643663ea4279e04e7256de865aa66fe25cec5fb912d3f" +checksum = "6fd8403733700263c6eb89f192880191f1b83e332f7a20371ddcf421c4a337c7" [[package]] name = "xmltree" @@ -13725,10 +13770,11 @@ dependencies = [ [[package]] name = "yoke" -version = "0.8.1" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72d6e5c6afb84d73944e5cedb052c4680d5657337201555f9f2a16b7406d4954" +checksum = "5f41bb01b8226ef4bfd589436a297c53d118f65921786300e427be8d487695cc" dependencies = [ + "serde", "stable_deref_trait", "yoke-derive", "zerofrom", @@ -13736,9 +13782,9 @@ dependencies = [ [[package]] name = "yoke-derive" -version = "0.8.1" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b659052874eb698efe5b9e8cf382204678a0086ebf46982b79d6ca3182927e5d" +checksum = "38da3c9736e16c5d3c8c597a9aaa5d1fa565d0532ae05e27c24aa62fb32c0ab6" dependencies = [ "proc-macro2", "quote", @@ -13809,9 +13855,9 @@ dependencies = [ [[package]] name = "zerotrie" -version = "0.2.3" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a59c17a5562d507e4b54960e8569ebee33bee890c70aa3fe7b97e85a9fd7851" +checksum = "36f0bbd478583f79edad978b407914f61b2972f5af6fa089686016be8f9af595" dependencies = [ "displaydoc", "yoke", @@ -13820,9 +13866,9 @@ dependencies = [ [[package]] name = "zerovec" -version = "0.11.5" +version = "0.11.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6c28719294829477f525be0186d13efa9a3c602f7ec202ca9e353d310fb9a002" +checksum = "e7aa2bd55086f1ab526693ecbe444205da57e25f4489879da80635a46d90e73b" dependencies = [ "yoke", "zerofrom", @@ -13831,9 +13877,9 @@ dependencies = [ [[package]] name = "zerovec-derive" -version = "0.11.2" +version = "0.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eadce39539ca5cb3985590102671f2567e659fca9666581ad3411d59207951f3" +checksum = "5b96237efa0c878c64bd89c436f661be4e46b2f3eff1ebb976f7ef2321d2f58f" dependencies = [ "proc-macro2", "quote", diff --git a/Cargo.toml b/Cargo.toml index c5156b3619..12a93b364e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -35,7 +35,6 @@ members = [ "fendermint/testing/*-test", "fendermint/tracing", "fendermint/vm/*", - "fendermint/vm/topdown/proof-service", "fendermint/actors", "fendermint/actors-custom-car", "fendermint/actors-builtin-car", @@ -95,6 +94,7 @@ gcra = "0.6.0" hex = "0.4" hex-literal = "0.4.1" http = "0.2.12" +humantime-serde = "1.1" im = "15.1.0" integer-encoding = { version = "3.0.3", default-features = false } jsonrpc-v2 = { version = "0.11", default-features = false, features = [ @@ -137,6 +137,7 @@ num-bigint = "0.4" num-derive = "0.4" num-traits = "0.2" num_enum = "0.7.2" +parking_lot = "0.12" paste = "1" pin-project = "1.1.2" prometheus = { version = "0.13", features = ["process"] } @@ -185,8 +186,6 @@ tracing-appender = "0.2.3" text-tables = "0.3.1" url = { version = "2.4.1", features = ["serde"] } zeroize = "1.6" -parking_lot = "0.12" -humantime-serde = "1.1" # Vendored for cross-compilation, see https://github.com/cross-rs/cross/wiki/Recipes#openssl # Make sure every top level build target actually imports this dependency, and don't end up diff --git a/contracts/contracts/lib/LibGateway.sol b/contracts/contracts/lib/LibGateway.sol index 0d217c4532..ccc794ab02 100644 --- a/contracts/contracts/lib/LibGateway.sol +++ b/contracts/contracts/lib/LibGateway.sol @@ -29,7 +29,6 @@ library LibGateway { event MembershipUpdated(Membership); /// @dev subnet refers to the next "down" subnet that the `envelope.message.to` should be forwarded to. - // Keep in sync with the event signature in the proof-service: fendermint/vm/topdown/proof-service/src/assembler.rs:NEW_TOPDOWN_MESSAGE_SIGNATURE event NewTopDownMessage(address indexed subnet, IpcEnvelope message, bytes32 indexed id); /// @dev event emitted when there is a new bottom-up message added to the batch. /// @dev there is no need to emit the message itself, as the message is included in batch. diff --git a/contracts/contracts/lib/LibPowerChangeLog.sol b/contracts/contracts/lib/LibPowerChangeLog.sol index 96f9d1cd19..ce143567aa 100644 --- a/contracts/contracts/lib/LibPowerChangeLog.sol +++ b/contracts/contracts/lib/LibPowerChangeLog.sol @@ -5,7 +5,6 @@ import {PowerChangeLog, PowerChange, PowerOperation} from "../structs/Subnet.sol /// The util library for `PowerChangeLog` library LibPowerChangeLog { - // Keep in sync with the event signature in the proof-service: fendermint/vm/topdown/proof-service/src/assembler.rs:NEW_POWER_CHANGE_REQUEST_SIGNATURE event NewPowerChangeRequest(PowerOperation op, address validator, bytes payload, uint64 configurationNumber); /// @notice Validator request to update its metadata diff --git a/fendermint/actors/f3-light-client/src/lib.rs b/fendermint/actors/f3-light-client/src/lib.rs index 0898c8243c..f61023e987 100644 --- a/fendermint/actors/f3-light-client/src/lib.rs +++ b/fendermint/actors/f3-light-client/src/lib.rs @@ -1,8 +1,8 @@ // Copyright 2021-2024 Protocol Labs // SPDX-License-Identifier: Apache-2.0, MIT -use crate::state::State; -use crate::types::{ConstructorParams, GetStateResponse, UpdateStateParams}; +use crate::state::{PowerEntryValue, State}; +use crate::types::{ConstructorParams, GetStateResponse, PowerEntry, UpdateStateParams}; use fil_actors_runtime::builtin::singletons::SYSTEM_ACTOR_ADDR; use fil_actors_runtime::runtime::{ActorCode, Runtime}; use fil_actors_runtime::{actor_dispatch, actor_error, ActorError}; @@ -39,11 +39,7 @@ impl F3LightClientActor { pub fn constructor(rt: &impl Runtime, params: ConstructorParams) -> Result<(), ActorError> { rt.validate_immediate_caller_is(std::iter::once(&SYSTEM_ACTOR_ADDR))?; - let state = State::new( - params.instance_id, - params.power_table, - params.finalized_epochs, - )?; + let state = State::new(rt.store(), params.processed_instance_id, params.power_table)?; rt.create(&state)?; Ok(()) @@ -57,7 +53,28 @@ impl F3LightClient for F3LightClientActor { rt.validate_immediate_caller_is(std::iter::once(&SYSTEM_ACTOR_ADDR))?; rt.transaction(|st: &mut State, rt| { - st.update_state(rt, params.state)?; + // Basic monotonicity checks to prevent accidental rewinds or no-op updates. + // + // Note: multiple epochs can be proven under the same certificate instance, so + // `processed_instance_id` may stay the same across updates, but it must never go + // backwards. + // + // We intentionally allow forward jumps: intermediate F3 instances may be "base-only" + // (empty suffix), meaning there is no epoch proof/execution point at which to update + // the actor. In that case, the executor may update the actor directly to a later + // instance in a single atomic state transition. + // + // Also, we allow re-applying the same update (idempotency) by permitting equality. + if params.processed_instance_id < st.light_client_state.processed_instance_id { + return Err(actor_error!( + illegal_argument, + "processed_instance_id went backwards: {} < {}", + params.processed_instance_id, + st.light_client_state.processed_instance_id + )); + } + + st.update_state(rt, params.processed_instance_id, params.power_table)?; Ok(()) }) } @@ -69,10 +86,31 @@ impl F3LightClient for F3LightClientActor { let state = rt.state::()?; let lc = &state.light_client_state; + // Materialize the current power table for convenience. + let power_table = { + let m = fil_actors_runtime::Map2::<_, u64, PowerEntryValue>::load( + rt.store(), + &lc.power_table_root, + fil_actors_runtime::DEFAULT_HAMT_CONFIG, + "f3_power_table", + )?; + let mut out = Vec::new(); + m.for_each(|id, v| { + out.push(PowerEntry { + id, + public_key: v.public_key.clone(), + power_be: v.power_be.clone(), + }); + Ok(()) + })?; + out.sort_by_key(|e| e.id); + out + }; + Ok(GetStateResponse { - instance_id: lc.instance_id, - finalized_epochs: lc.finalized_epochs.clone(), - power_table: lc.power_table.clone(), + processed_instance_id: lc.processed_instance_id, + power_table_root: lc.power_table_root.clone(), + power_table, }) } } @@ -94,45 +132,42 @@ impl ActorCode for F3LightClientActor { #[cfg(test)] mod tests { use super::*; - use crate::types::{LightClientState, PowerEntry}; + use crate::types::PowerEntry; use fil_actors_runtime::test_utils::{expect_empty, MockRuntime, SYSTEM_ACTOR_CODE_ID}; use fil_actors_runtime::SYSTEM_ACTOR_ADDR; use fvm_ipld_encoding::ipld_block::IpldBlock; use fvm_shared::address::Address; use fvm_shared::error::ExitCode; - /// Helper function to create test light client state - fn create_test_state( - instance_id: u64, - finalized_epochs: Vec, - power_table: Vec, - ) -> LightClientState { - LightClientState { - instance_id, - finalized_epochs, - power_table, - } - } - /// Helper function to create test power entries fn create_test_power_entries() -> Vec { + fn u64_to_power_be(x: u64) -> Vec { + if x == 0 { + return Vec::new(); + } + let bytes = x.to_be_bytes(); + let first = bytes.iter().position(|b| *b != 0).unwrap_or(bytes.len()); + bytes[first..].to_vec() + } + vec![ PowerEntry { + id: 1, public_key: vec![1, 2, 3], - power: 100, + power_be: u64_to_power_be(100), }, PowerEntry { + id: 2, public_key: vec![4, 5, 6], - power: 200, + power_be: u64_to_power_be(200), }, ] } /// Construct the actor and verify initialization pub fn construct_and_verify( - instance_id: u64, + current_instance_id: u64, power_table: Vec, - finalized_epochs: Vec, ) -> MockRuntime { let rt = MockRuntime { receiver: Address::new_id(10), @@ -144,9 +179,8 @@ mod tests { rt.expect_validate_caller_addr(vec![SYSTEM_ACTOR_ADDR]); let constructor_params = ConstructorParams { - instance_id, + processed_instance_id: current_instance_id, power_table, - finalized_epochs, }; let result = rt @@ -165,35 +199,28 @@ mod tests { #[test] fn test_constructor_empty_power_table() { - let _rt = construct_and_verify(0, vec![], vec![]); + let _rt = construct_and_verify(0, vec![]); // Constructor test passed if we get here without panicking } #[test] fn test_constructor_with_power_table() { let power_entries = create_test_power_entries(); - let _rt = construct_and_verify(1, power_entries, vec![]); - // Constructor test passed if we get here without panicking - } - - #[test] - fn test_constructor_with_finalized_epochs() { - let power_entries = create_test_power_entries(); - let _rt = construct_and_verify(1, power_entries, vec![100, 101, 102]); + let _rt = construct_and_verify(1, power_entries); // Constructor test passed if we get here without panicking } #[test] fn test_update_state_success() { - let rt = construct_and_verify(1, create_test_power_entries(), vec![]); + let rt = construct_and_verify(1, create_test_power_entries()); // Set caller to system actor rt.set_caller(*SYSTEM_ACTOR_CODE_ID, SYSTEM_ACTOR_ADDR); rt.expect_validate_caller_addr(vec![SYSTEM_ACTOR_ADDR]); - let new_state = create_test_state(1, vec![100, 101, 102], create_test_power_entries()); let update_params = UpdateStateParams { - state: new_state.clone(), + processed_instance_id: 1, + power_table: create_test_power_entries(), }; let result = rt @@ -207,17 +234,53 @@ mod tests { rt.verify(); } + #[test] + fn test_update_state_idempotent_allowed() { + let rt = construct_and_verify(1, create_test_power_entries()); + + rt.set_caller(*SYSTEM_ACTOR_CODE_ID, SYSTEM_ACTOR_ADDR); + rt.expect_validate_caller_addr(vec![SYSTEM_ACTOR_ADDR]); + let initial_params = UpdateStateParams { + processed_instance_id: 1, + power_table: create_test_power_entries(), + }; + rt.call::( + Method::UpdateState as u64, + IpldBlock::serialize_cbor(&initial_params).unwrap(), + ) + .unwrap(); + rt.reset(); + + // Try to update with same instance + rt.set_caller(*SYSTEM_ACTOR_CODE_ID, SYSTEM_ACTOR_ADDR); + rt.expect_validate_caller_addr(vec![SYSTEM_ACTOR_ADDR]); + let update_params = UpdateStateParams { + processed_instance_id: 1, + power_table: create_test_power_entries(), + }; + + let result = rt.call::( + Method::UpdateState as u64, + IpldBlock::serialize_cbor(&update_params).unwrap(), + ); + + // Allowed (idempotency): equality is ok, only rewinds are rejected. + assert!(result.is_ok()); + } + #[test] fn test_update_state_unauthorized_caller() { - let rt = construct_and_verify(1, create_test_power_entries(), vec![]); + let rt = construct_and_verify(1, create_test_power_entries()); // Set caller to non-system actor let unauthorized_caller = Address::new_id(999); rt.set_caller(*SYSTEM_ACTOR_CODE_ID, unauthorized_caller); rt.expect_validate_caller_addr(vec![SYSTEM_ACTOR_ADDR]); - let new_state = create_test_state(1, vec![100, 101, 102], create_test_power_entries()); - let update_params = UpdateStateParams { state: new_state }; + let update_params = UpdateStateParams { + processed_instance_id: 1, + power_table: create_test_power_entries(), + }; let result = rt.call::( Method::UpdateState as u64, @@ -233,13 +296,15 @@ mod tests { #[test] fn test_get_state() { let power_entries = create_test_power_entries(); - let rt = construct_and_verify(42, power_entries.clone(), vec![]); + let rt = construct_and_verify(42, power_entries.clone()); // Update state first rt.set_caller(*SYSTEM_ACTOR_CODE_ID, SYSTEM_ACTOR_ADDR); rt.expect_validate_caller_addr(vec![SYSTEM_ACTOR_ADDR]); - let new_state = create_test_state(42, vec![100, 101, 102], power_entries.clone()); - let update_params = UpdateStateParams { state: new_state }; + let update_params = UpdateStateParams { + processed_instance_id: 42, + power_table: power_entries.clone(), + }; rt.call::( Method::UpdateState as u64, IpldBlock::serialize_cbor(&update_params).unwrap(), @@ -255,20 +320,84 @@ mod tests { .unwrap(); let response = result.deserialize::().unwrap(); - assert_eq!(response.instance_id, 42); - assert_eq!(response.finalized_epochs, vec![100, 101, 102]); + assert_eq!(response.processed_instance_id, 42); assert_eq!(response.power_table, power_entries); } + #[test] + fn test_power_table_root_changes_on_update() { + let rt = construct_and_verify(42, create_test_power_entries()); + + // Read initial state. + rt.expect_validate_caller_any(); + let initial = rt + .call::(Method::GetState as u64, None) + .unwrap() + .unwrap() + .deserialize::() + .unwrap(); + + // Update with a different power table. + rt.set_caller(*SYSTEM_ACTOR_CODE_ID, SYSTEM_ACTOR_ADDR); + rt.expect_validate_caller_addr(vec![SYSTEM_ACTOR_ADDR]); + fn u64_to_power_be(x: u64) -> Vec { + if x == 0 { + return Vec::new(); + } + let bytes = x.to_be_bytes(); + let first = bytes.iter().position(|b| *b != 0).unwrap_or(bytes.len()); + bytes[first..].to_vec() + } + let new_power_table = vec![ + PowerEntry { + id: 1, + public_key: vec![1, 2, 3], + power_be: u64_to_power_be(999), + }, + PowerEntry { + id: 3, + public_key: vec![7, 8, 9], + power_be: u64_to_power_be(333), + }, + ]; + let update_params = UpdateStateParams { + processed_instance_id: 42, + power_table: new_power_table.clone(), + }; + rt.call::( + Method::UpdateState as u64, + IpldBlock::serialize_cbor(&update_params).unwrap(), + ) + .unwrap(); + rt.reset(); + + // Read updated state. + rt.expect_validate_caller_any(); + let updated = rt + .call::(Method::GetState as u64, None) + .unwrap() + .unwrap() + .deserialize::() + .unwrap(); + + assert_ne!( + initial.power_table_root, updated.power_table_root, + "power table root CID should change when table changes" + ); + assert_eq!(updated.power_table, new_power_table); + } + #[test] fn test_state_progression() { - let rt = construct_and_verify(1, create_test_power_entries(), vec![]); + let rt = construct_and_verify(1, create_test_power_entries()); // Update with first state rt.set_caller(*SYSTEM_ACTOR_CODE_ID, SYSTEM_ACTOR_ADDR); rt.expect_validate_caller_addr(vec![SYSTEM_ACTOR_ADDR]); - let state1 = create_test_state(1, vec![100, 101, 102], create_test_power_entries()); - let params1 = UpdateStateParams { state: state1 }; + let params1 = UpdateStateParams { + processed_instance_id: 1, + power_table: create_test_power_entries(), + }; rt.call::( Method::UpdateState as u64, IpldBlock::serialize_cbor(¶ms1).unwrap(), @@ -276,15 +405,99 @@ mod tests { .unwrap(); rt.reset(); - // Update with second state (higher height) + // Update with same instance again (idempotent allowed) rt.set_caller(*SYSTEM_ACTOR_CODE_ID, SYSTEM_ACTOR_ADDR); rt.expect_validate_caller_addr(vec![SYSTEM_ACTOR_ADDR]); - let state2 = create_test_state(1, vec![200, 201, 202], create_test_power_entries()); - let params2 = UpdateStateParams { state: state2 }; + let params2 = UpdateStateParams { + processed_instance_id: 1, + power_table: create_test_power_entries(), + }; let result = rt.call::( Method::UpdateState as u64, IpldBlock::serialize_cbor(¶ms2).unwrap(), ); assert!(result.is_ok()); } + + #[test] + fn test_instance_id_progression_next_instance() { + let rt = construct_and_verify(100, create_test_power_entries()); + + // First state at instance 100 + rt.set_caller(*SYSTEM_ACTOR_CODE_ID, SYSTEM_ACTOR_ADDR); + rt.expect_validate_caller_addr(vec![SYSTEM_ACTOR_ADDR]); + let initial_params = UpdateStateParams { + processed_instance_id: 100, + power_table: create_test_power_entries(), + }; + rt.call::( + Method::UpdateState as u64, + IpldBlock::serialize_cbor(&initial_params).unwrap(), + ) + .unwrap(); + rt.reset(); + + // Update to next instance (100 -> 101) should succeed + rt.set_caller(*SYSTEM_ACTOR_CODE_ID, SYSTEM_ACTOR_ADDR); + rt.expect_validate_caller_addr(vec![SYSTEM_ACTOR_ADDR]); + let update_params = UpdateStateParams { + processed_instance_id: 101, + power_table: create_test_power_entries(), + }; + + let result = rt.call::( + Method::UpdateState as u64, + IpldBlock::serialize_cbor(&update_params).unwrap(), + ); + assert!(result.is_ok()); + } + + #[test] + fn test_instance_id_skip_allowed_but_rewind_rejected() { + let rt = construct_and_verify(100, create_test_power_entries()); + + // First state at instance 100 + rt.set_caller(*SYSTEM_ACTOR_CODE_ID, SYSTEM_ACTOR_ADDR); + rt.expect_validate_caller_addr(vec![SYSTEM_ACTOR_ADDR]); + let initial_params = UpdateStateParams { + processed_instance_id: 100, + power_table: create_test_power_entries(), + }; + rt.call::( + Method::UpdateState as u64, + IpldBlock::serialize_cbor(&initial_params).unwrap(), + ) + .unwrap(); + rt.reset(); + + // Skipping forward instances is allowed (base-only instances may have no epoch execution point). + rt.set_caller(*SYSTEM_ACTOR_CODE_ID, SYSTEM_ACTOR_ADDR); + rt.expect_validate_caller_addr(vec![SYSTEM_ACTOR_ADDR]); + let update_params = UpdateStateParams { + processed_instance_id: 102, + power_table: create_test_power_entries(), + }; + + let result = rt.call::( + Method::UpdateState as u64, + IpldBlock::serialize_cbor(&update_params).unwrap(), + ); + assert!(result.is_ok()); + rt.reset(); + + // Rewinding is still forbidden. + rt.set_caller(*SYSTEM_ACTOR_CODE_ID, SYSTEM_ACTOR_ADDR); + rt.expect_validate_caller_addr(vec![SYSTEM_ACTOR_ADDR]); + let rewind_params = UpdateStateParams { + processed_instance_id: 101, + power_table: create_test_power_entries(), + }; + let result = rt.call::( + Method::UpdateState as u64, + IpldBlock::serialize_cbor(&rewind_params).unwrap(), + ); + assert!(result.is_err()); + let err = result.unwrap_err(); + assert_eq!(err.exit_code(), ExitCode::USR_ILLEGAL_ARGUMENT); + } } diff --git a/fendermint/actors/f3-light-client/src/state.rs b/fendermint/actors/f3-light-client/src/state.rs index 64497e9d5c..6ab67bca1e 100644 --- a/fendermint/actors/f3-light-client/src/state.rs +++ b/fendermint/actors/f3-light-client/src/state.rs @@ -8,7 +8,9 @@ use crate::types::{LightClientState, PowerEntry}; use fil_actors_runtime::runtime::Runtime; -use fil_actors_runtime::ActorError; +use fil_actors_runtime::{ActorError, Map2, DEFAULT_HAMT_CONFIG}; +use fvm_ipld_blockstore::Blockstore; +use fvm_ipld_encoding::tuple::{Deserialize_tuple, Serialize_tuple}; use serde::{Deserialize, Serialize}; /// State of the F3 light client actor. @@ -22,34 +24,72 @@ pub struct State { pub light_client_state: LightClientState, } +/// Stored HAMT value for power table entries. +/// +/// The key of the HAMT is the validator ID, so storing `id` in the value would be redundant. +#[derive(Deserialize_tuple, Serialize_tuple, Debug, Clone, PartialEq, Eq)] +pub(crate) struct PowerEntryValue { + pub public_key: Vec, + pub power_be: Vec, +} + +pub(crate) type PowerTable = Map2; + impl State { /// Create a new F3 light client state - pub fn new( - instance_id: u64, + pub fn new( + store: &BS, + processed_instance_id: u64, power_table: Vec, - finalized_epochs: Vec, ) -> Result { + let power_table_root = { + let mut m = PowerTable::empty(store, DEFAULT_HAMT_CONFIG, "f3_power_table"); + for pe in power_table { + let id = pe.id; + m.set( + &id, + PowerEntryValue { + public_key: pe.public_key, + power_be: pe.power_be, + }, + )?; + } + m.flush()? + }; + let state = State { light_client_state: LightClientState { - instance_id, - finalized_epochs, - power_table, + processed_instance_id, + power_table_root, }, }; Ok(state) } /// Update light client state - /// - /// This method should only be called from consensus code path which - /// contains the lightclient verifier. No additional validation is - /// performed here as it's expected to be done by the verifier. pub fn update_state( &mut self, - _rt: &impl Runtime, - new_state: LightClientState, + rt: &impl Runtime, + processed_instance_id: u64, + power_table: Vec, ) -> Result<(), ActorError> { - self.light_client_state = new_state; + let power_table_root = { + let mut m = PowerTable::empty(rt.store(), DEFAULT_HAMT_CONFIG, "f3_power_table"); + for pe in power_table { + let id = pe.id; + m.set( + &id, + PowerEntryValue { + public_key: pe.public_key, + power_be: pe.power_be, + }, + )?; + } + m.flush()? + }; + + self.light_client_state.processed_instance_id = processed_instance_id; + self.light_client_state.power_table_root = power_table_root; Ok(()) } } diff --git a/fendermint/actors/f3-light-client/src/types.rs b/fendermint/actors/f3-light-client/src/types.rs index 7065f700d4..68eeee4bea 100644 --- a/fendermint/actors/f3-light-client/src/types.rs +++ b/fendermint/actors/f3-light-client/src/types.rs @@ -6,65 +6,77 @@ //! including the light client state structure that tracks F3 finality //! from the parent chain. +use cid::Cid; use fvm_ipld_encoding::tuple::{Deserialize_tuple, Serialize_tuple}; -use fvm_shared::clock::ChainEpoch; /// F3 Light Client State - maintains verifiable parent finality from the parent chain. /// /// This structure represents the essential state needed to track F3 finality: -/// - Instance ID: The current F3 instance (can increment during protocol upgrades) -/// - Finalized Epochs: Complete chain of finalized epochs (not just the latest) +/// - Processed Instance ID: The latest F3 instance that has been fully processed on-chain /// - Power Table: Current validator power table (can change between instances) /// /// This state is extracted from F3 certificates received from the parent chain /// and stored by the actor for use in finality proofs. #[derive(Deserialize_tuple, Serialize_tuple, Debug, Clone, PartialEq, Eq)] pub struct LightClientState { - /// Current F3 instance ID - pub instance_id: u64, - /// Finalized chain - full list of finalized epochs - /// Matches ECChain from F3 certificates - /// Empty initially at genesis until first update - pub finalized_epochs: Vec, - /// Current power table for this instance - /// Power table can change between instances - pub power_table: Vec, + /// Latest F3 instance ID that has been fully processed on-chain. + /// + /// This MUST only be advanced once the corresponding certificate is fully processed, i.e. + /// after executing the certificate's *last provable epoch* (the parent tipset of the last + /// `(parent, child)` proof window). + pub processed_instance_id: u64, + /// Root CID of the on-chain power table (HAMT). + /// + /// The actual entries are stored in the actor's blockstore and reachable from this root. + pub power_table_root: Cid, } /// Power table entry for F3 consensus #[derive(Deserialize_tuple, Serialize_tuple, Debug, Clone, PartialEq, Eq)] pub struct PowerEntry { + /// Validator ID (from F3 power table) + pub id: u64, /// Public key of the validator pub public_key: Vec, - /// Voting power of the validator - pub power: u64, + /// Voting power of the validator, encoded as unsigned big-endian bytes. + /// + /// Filecoin power values can exceed 64 bits; storing bytes avoids lossy conversions. + /// `[]` represents zero. + pub power_be: Vec, } /// Constructor parameters for the F3 light client actor #[derive(Deserialize_tuple, Serialize_tuple, Debug, Clone, PartialEq, Eq)] pub struct ConstructorParams { - /// Initial F3 instance ID (from genesis) - pub instance_id: u64, + /// Initial processed F3 instance ID (from genesis) + pub processed_instance_id: u64, /// Initial power table (from genesis) pub power_table: Vec, - /// Initial finalized epochs (from genesis certificate) - pub finalized_epochs: Vec, } /// Parameters for updating the light client state #[derive(Deserialize_tuple, Serialize_tuple, Debug, Clone, PartialEq, Eq)] pub struct UpdateStateParams { - /// New light client state to store - pub state: LightClientState, + /// Latest processed F3 instance ID + pub processed_instance_id: u64, + /// New power table entries for this instance (authoritative). + pub power_table: Vec, } /// Response containing the current light client state #[derive(Deserialize_tuple, Serialize_tuple, Debug, Clone, PartialEq, Eq)] pub struct GetStateResponse { - /// Current F3 instance ID - pub instance_id: u64, - /// Finalized chain - full list of finalized epochs (ordered) - pub finalized_epochs: Vec, - /// Current power table + /// Latest processed F3 instance ID + pub processed_instance_id: u64, + /// Root CID of the on-chain power table (HAMT). + /// + /// Note: this is **not** the same CID as the power table CID carried by F3 certificates. + /// In FIP-0086 `SupplementalData`, the power table CID is the + /// DagCBOR-blake2b256 CID of the CBOR-encoded power-table *array* ordered by + /// (power descending, participant ascending), not a HAMT root. + pub power_table_root: Cid, + /// Current power table (materialized). + /// + /// This is derived from `power_table_root` for convenience. pub power_table: Vec, } diff --git a/fendermint/app/Cargo.toml b/fendermint/app/Cargo.toml index 01c8a95803..9cc88f0349 100644 --- a/fendermint/app/Cargo.toml +++ b/fendermint/app/Cargo.toml @@ -22,6 +22,7 @@ libp2p = { workspace = true } libp2p-bitswap = { workspace = true } multiaddr = { workspace = true } num-traits = { workspace = true } +num-bigint = { workspace = true } openssl = { workspace = true } paste = { workspace = true } prometheus = { workspace = true } @@ -45,8 +46,10 @@ tracing-appender = { workspace = true } tracing-subscriber = { workspace = true } literally = { workspace = true } url = { workspace = true } +ethers = { workspace = true } fendermint_abci = { path = "../abci" } +ipc_actors_abis = { path = "../../contract-bindings" } actors-custom-api = { path = "../actors/api" } fendermint_actor_f3_light_client = { path = "../actors/f3-light-client" } fendermint_app_options = { path = "./options" } @@ -73,8 +76,8 @@ fendermint_vm_snapshot = { path = "../vm/snapshot" } fendermint_vm_topdown = { path = "../vm/topdown" } fendermint_vm_topdown_proof_service = { path = "../vm/topdown/proof-service" } -ipc_actors_abis = { path = "../../contract-bindings" } -ethers = {workspace = true} +# F3 certificate handling +filecoin-f3-gpbft = { git = "https://github.com/moshababo/rust-f3", branch = "cargo-git-compat" } # .car file wrapped in a crate actors-builtin-car = { path = "../actors-builtin-car" } diff --git a/fendermint/app/config/default.toml b/fendermint/app/config/default.toml index 1aa0174248..b965e93ac7 100644 --- a/fendermint/app/config/default.toml +++ b/fendermint/app/config/default.toml @@ -273,6 +273,51 @@ vote_interval = 1 # pausing the syncer, preventing new events to trigger votes. vote_timeout = 60 +# # Top-down checkpoint configuration (uncomment to enable parent syncing) +# [ipc.topdown] +# # Number of blocks to delay before considering a parent block final +# chain_head_delay = 10 +# # Additional delay on top of chain_head_delay before proposing finality +# proposal_delay = 5 +# # Maximum number of blocks to propose in a single checkpoint +# max_proposal_range = 100 +# # Maximum number of blocks to cache (optional) +# # max_cache_blocks = 1000 +# # Parent syncing cron period, in seconds +# polling_interval = 30 +# # Exponential backoff retry base, in seconds +# exponential_back_off = 5 +# # Maximum number of retries before giving up +# exponential_retry_limit = 5 +# # Parent HTTP RPC endpoint +# parent_http_endpoint = "http://api.calibration.node.glif.io/rpc/v1" +# # Parent HTTP timeout (optional), in seconds +# # parent_http_timeout = 60 +# # Bearer token for Authorization header (optional) +# # parent_http_auth_token = "your-token-here" +# # Parent registry address +# parent_registry = "0x74539671a1d2f1c8f200826baba665179f53a1b7" +# # Parent gateway address +# parent_gateway = "0x77aa40b105843728088c0132e43fc44348881da8" +# +# # F3 proof service configuration (optional - for proof-based parent finality) +# # Requires genesis to have F3 parameters configured +# [ipc.topdown.proof_service] +# # Enable F3 proof-based parent finality (default: false) +# enabled = false +# # F3 network name - must match parent chain ("calibrationnet", "mainnet") +# f3_network_name = "calibrationnet" +# # How often to poll parent chain for new F3 certificates, in seconds +# polling_interval = 30 +# # How many F3 instances ahead to pre-generate proofs (lookahead window) +# lookahead_instances = 5 +# # How many old instances to keep after commitment (retention window) +# retention_instances = 2 +# # Gateway actor ID on parent chain (optional - derived from genesis if not set) +# # gateway_actor_id = 176609 +# # Or use Ethereum address (will be resolved to actor ID) +# # gateway_eth_address = "0xE4c61299c16323C4B58376b60A77F68Aa59afC8b" + # # Setting which are only allowed if the `--network` CLI parameter is `testnet`. # [testing] diff --git a/fendermint/app/settings/Cargo.toml b/fendermint/app/settings/Cargo.toml index 20aaeee513..db90508c24 100644 --- a/fendermint/app/settings/Cargo.toml +++ b/fendermint/app/settings/Cargo.toml @@ -32,3 +32,4 @@ ipc-observability = { path = "../../../ipc/observability" } fendermint_vm_encoding = { path = "../../vm/encoding" } fendermint_vm_topdown = { path = "../../vm/topdown" } +fendermint_vm_topdown_proof_service = { path = "../../vm/topdown/proof-service" } diff --git a/fendermint/app/settings/src/lib.rs b/fendermint/app/settings/src/lib.rs index ab738dfa75..2521f8aefe 100644 --- a/fendermint/app/settings/src/lib.rs +++ b/fendermint/app/settings/src/lib.rs @@ -27,6 +27,8 @@ use self::resolver::ResolverSettings; use ipc_observability::config::TracingSettings; use ipc_provider::config::deserialize::deserialize_eth_address_from_str; +use fendermint_vm_topdown_proof_service::ProofServiceConfig; + pub mod eth; pub mod fvm; pub mod resolver; @@ -226,6 +228,57 @@ pub struct TopDownSettings { /// The parent gateway address #[serde(deserialize_with = "deserialize_eth_address_from_str")] pub parent_gateway: Address, + /// F3 configuration (optional - for proof-based finality) + /// If Some, F3 proof-based finality is enabled; if None, use legacy voting-based finality + #[serde(default)] + pub f3: Option, +} + +/// F3 proof-based finality configuration +/// When present, F3 proof-based finality is enabled +#[serde_as] +#[derive(Debug, Deserialize, Serialize, Clone)] +pub struct F3 { + /// F3 proof service configuration (mandatory when F3 is configured) + pub proof_service: ProofServiceConfig, + + /// Retry policy for executing `ParentFinalityWithCert` when the local proof cache is missing. + /// + /// This affects catch-up: if a node did not have the cache entry during attestation (so it + /// didn't vote), it may still need to execute a committed block later once the proof-service + /// fills the cache. + #[serde(default)] + pub execution_cache_retry: F3ExecutionCacheRetrySettings, +} + +#[serde_as] +#[derive(Debug, Deserialize, Serialize, Clone)] +pub struct F3ExecutionCacheRetrySettings { + /// Initial backoff between retries. + #[serde_as(as = "DurationSeconds")] + pub backoff_initial: Duration, + /// Maximum backoff between retries. + #[serde_as(as = "DurationSeconds")] + pub backoff_max: Duration, + /// After this much waiting for a local cache entry during block execution, emit an + /// error-severity signal (but keep retrying). + #[serde_as(as = "DurationSeconds")] + #[serde(alias = "max_wait")] + pub critical_after: Duration, + /// Emit an `error!` log after this much waiting (and then periodically thereafter). + #[serde_as(as = "DurationSeconds")] + pub error_after: Duration, +} + +impl Default for F3ExecutionCacheRetrySettings { + fn default() -> Self { + Self { + backoff_initial: Duration::from_millis(200), + backoff_max: Duration::from_secs(5), + critical_after: Duration::from_secs(10 * 60), + error_after: Duration::from_secs(2 * 60), + } + } } #[serde_as] diff --git a/fendermint/app/src/app.rs b/fendermint/app/src/app.rs index 747f79b130..73954fc22f 100644 --- a/fendermint/app/src/app.rs +++ b/fendermint/app/src/app.rs @@ -23,8 +23,8 @@ use fendermint_storage::{ }; use fendermint_vm_core::Timestamp; use fendermint_vm_interpreter::fvm::state::{ - empty_state_tree, CheckStateRef, FvmExecState, FvmQueryState, FvmStateParams, - FvmUpdatableParams, + empty_state_tree, ipc::F3LightClientCaller, CheckStateRef, FvmExecState, FvmQueryState, + FvmStateParams, FvmUpdatableParams, }; use fendermint_vm_interpreter::fvm::store::ReadOnlyBlockstore; use fendermint_vm_interpreter::genesis::{read_genesis_car, GenesisAppState}; @@ -218,6 +218,77 @@ where } } +/// Create a read-only execution state for querying actors before app initialization. +/// +/// This allows querying state (e.g., F3 Light Client) before the full App is created. +/// Returns `None` if the state hasn't been initialized by genesis yet. +pub fn create_read_only_exec_state( + db: &DB, + state_store: &BS, + namespace: KV::Namespace, +) -> Result>>>> +where + KV: KVStore + Codec + Encode, + DB: KVReadable + 'static, + BS: Blockstore + Clone + 'static + Send + Sync, +{ + // Read committed state from database (same pattern as get_committed_state) + let tx = db.read(); + let state: Option = tx + .get(&namespace, &AppStoreKey::State) + .context("get failed")?; + + let state = match state { + Some(s) => s, + None => return Ok(None), + }; + + let block_height = state.app_state.block_height; + let state_params = state.app_state.state_params; + + // Check if state is queryable (genesis has been initialized) + // It's really the empty state tree that would be the best indicator. + if block_height == 0 + && state_params.timestamp.0 == 0 + && state_params.network_version == NetworkVersion::V0 + { + return Ok(None); + } + + // Create MultiEngine (same as in App::new) + let multi_engine = Arc::new(MultiEngine::new(1)); + + // Create read-only execution state + let exec_state = FvmExecState::new( + ReadOnlyBlockstore::new(Arc::new(state_store.clone())), + multi_engine.as_ref(), + block_height as ChainEpoch, + state_params, + ) + .context("error creating execution state")?; + + Ok(Some(exec_state)) +} + +/// Query the F3 Light Client Actor state from a read-only execution state. +/// Returns the actor state if F3 is initialized, None otherwise. +pub fn query_f3_state( + exec_state: &mut FvmExecState>, +) -> Result> +where + BS: Blockstore + Clone + 'static + Send + Sync, +{ + let f3_caller = F3LightClientCaller::new(); + match f3_caller.get_state(exec_state) { + Ok(state) => Ok(Some(state)), + Err(e) => { + // F3 actor might not be deployed (non-Filecoin parent) + tracing::debug!("F3 Light Client Actor not found or not accessible: {}", e); + Ok(None) + } + } +} + impl App where KV: KVStore @@ -481,6 +552,12 @@ where .context("Validator cache is not available")? .get_validator(id) } + + /// Get access to the messages interpreter + /// Used to access the TopDownManager for updating the proof cache + pub fn interpreter(&self) -> &Arc { + &self.messages_interpreter + } } // NOTE: The `Application` interface doesn't allow failures at the moment. The protobuf diff --git a/fendermint/app/src/cmd/genesis.rs b/fendermint/app/src/cmd/genesis.rs index d0365eeb85..1bcb3a1104 100644 --- a/fendermint/app/src/cmd/genesis.rs +++ b/fendermint/app/src/cmd/genesis.rs @@ -350,8 +350,9 @@ pub async fn seal_genesis(genesis_file: &PathBuf, args: &SealGenesisArgs) -> any builder.write_to(args.output_path.clone()).await } -/// Fetches F3 parameters for a specific instance ID from the parent Filecoin chain +/// Fetches F3 parameters from the parent Filecoin chain async fn fetch_f3_params_from_parent( + subnet_id: &SubnetID, parent_endpoint: &url::Url, parent_auth_token: Option<&String>, instance_id: u64, @@ -359,7 +360,7 @@ async fn fetch_f3_params_from_parent( tracing::info!( "Fetching F3 parameters for instance {} from parent chain at {}", instance_id, - parent_endpoint + parent_endpoint, ); let jsonrpc_client = JsonRpcClientImpl::new( @@ -367,24 +368,66 @@ async fn fetch_f3_params_from_parent( parent_auth_token.map(|s| s.as_str()), ); - // We use a dummy subnet ID here since F3 data is at the chain level, not subnet-specific + // We use a dummy subnet ID for the Lotus client since these RPC calls are chain-level, + // but the F3 network name derivation (for certificate fetch) uses the real subnet root. let lotus_client = LotusJsonRPCClient::new(jsonrpc_client, SubnetID::default()); + // Fetch the F3 certificate for the specific instance so we can deterministically + // derive the last finalized epoch for this instance (and its ETH block hash). + let cert = fendermint_vm_topdown_proof_service::fetch_certificate( + &parent_endpoint.to_string(), + subnet_id, + instance_id, + ) + .await + .context("failed to fetch F3 certificate for instance")?; + if cert.gpbft_instance != instance_id { + anyhow::bail!( + "F3 certificate instance mismatch: requested {}, got {}", + instance_id, + cert.gpbft_instance + ); + } + // Genesis treats the configured `instance_id` certificate as already committed. + // Use the cert's last provable parent tipset, or the base tipset if the ECChain has no provable window: + // - base-only ECChain (len=1): use the base tipset + // - otherwise: use the last provable parent tipset (second-to-last) + let last_provable_tipset = + fendermint_vm_topdown_proof_service::types::last_provable_or_base_tipset(&cert.ec_chain) + .with_context(|| { + format!( + "failed to derive genesis base_epoch from cert {}", + instance_id + ) + })?; + let base_epoch = last_provable_tipset.epoch; + let base_epoch_eth_block_hash = + fendermint_vm_topdown_proof_service::types::eth_hash_from_tipset_key_bytes( + &last_provable_tipset.key, + ) + .context("failed to derive base_epoch_eth_block_hash from ECChain last provable tipset")?; + // Get base power table for the specified instance let power_table_response = lotus_client.f3_get_power_table(instance_id).await?; - // Convert power entries + // Convert power entries (power can exceed 64 bits; store as big-endian bytes). let power_table: anyhow::Result> = power_table_response .iter() .map(|entry| { // Decode base64 public key let public_key_bytes = base64::Engine::decode(&base64::engine::general_purpose::STANDARD, &entry.pub_key)?; - // Parse the power string to u64 - let power = entry.power.parse::()?; + // Parse the power string as BigInt (decimal) and encode as unsigned big-endian bytes. + let power = num_bigint::BigInt::parse_bytes(entry.power.as_bytes(), 10) + .ok_or_else(|| anyhow::anyhow!("invalid power string '{}'", entry.power))?; + let (sign, power_be) = power.to_bytes_be(); + if sign == num_bigint::Sign::Minus { + anyhow::bail!("negative power for participant id {}", entry.id); + } Ok(types::PowerEntry { + id: entry.id, public_key: public_key_bytes, - power, + power_be, }) }) .collect(); @@ -396,6 +439,8 @@ async fn fetch_f3_params_from_parent( ); Ok(Some(ipc::F3Params { instance_id, + base_epoch, + base_epoch_eth_block_hash, power_table, })) } @@ -440,7 +485,9 @@ pub async fn new_genesis_from_parent( ) })?; + tracing::info!("Fetching F3 data from parent Filecoin chain"); fetch_f3_params_from_parent( + &args.subnet_id, parent_rpc, args.parent_filecoin_auth_token.as_ref(), f3_instance_id, diff --git a/fendermint/app/src/cmd/proof_cache.rs b/fendermint/app/src/cmd/proof_cache.rs index 38aa40e6a0..dcb4f861fe 100644 --- a/fendermint/app/src/cmd/proof_cache.rs +++ b/fendermint/app/src/cmd/proof_cache.rs @@ -5,6 +5,7 @@ use crate::cmd; use crate::options::proof_cache::{ProofCacheArgs, ProofCacheCommands}; use fendermint_vm_topdown_proof_service::persistence::ProofCachePersistence; use std::path::Path; +use std::path::PathBuf; cmd! { ProofCacheArgs(self) { @@ -24,7 +25,7 @@ fn handle_proof_cache_command(args: &ProofCacheArgs) -> anyhow::Result<()> { } } -fn inspect_cache(db_path: &Path) -> anyhow::Result<()> { +fn inspect_cache(db_path: &PathBuf) -> anyhow::Result<()> { println!("=== Proof Cache Inspection ==="); println!("Database: {}", db_path.display()); println!(); @@ -69,7 +70,7 @@ fn inspect_cache(db_path: &Path) -> anyhow::Result<()> { Ok(()) } -fn show_stats(db_path: &Path) -> anyhow::Result<()> { +fn show_stats(db_path: &PathBuf) -> anyhow::Result<()> { println!("=== Proof Cache Statistics ==="); println!("Database: {}", db_path.display()); println!(); @@ -125,7 +126,7 @@ fn show_stats(db_path: &Path) -> anyhow::Result<()> { Ok(()) } -fn get_proof(db_path: &Path, instance_id: u64) -> anyhow::Result<()> { +fn get_proof(db_path: &PathBuf, instance_id: u64) -> anyhow::Result<()> { println!("=== Get Proof for Instance {} ===", instance_id); println!("Database: {}", db_path.display()); println!(); diff --git a/fendermint/app/src/service/mod.rs b/fendermint/app/src/service/mod.rs index a0af14723f..9604dea451 100644 --- a/fendermint/app/src/service/mod.rs +++ b/fendermint/app/src/service/mod.rs @@ -3,3 +3,4 @@ pub mod eth_api; pub mod node; +mod topdown; diff --git a/fendermint/app/src/service/node.rs b/fendermint/app/src/service/node.rs index d2baffacd8..ffa4af6ffa 100644 --- a/fendermint/app/src/service/node.rs +++ b/fendermint/app/src/service/node.rs @@ -2,41 +2,26 @@ // SPDX-License-Identifier: Apache-2.0, MIT use anyhow::{anyhow, bail, Context}; -use async_stm::atomically_or_err; use fendermint_abci::ApplicationService; -use fendermint_crypto::SecretKey; use fendermint_rocksdb::{blockstore::NamespaceBlockstore, namespaces, RocksDb, RocksDbConfig}; -use fendermint_vm_actor_interface::eam::EthAddress; use fendermint_vm_interpreter::fvm::interpreter::FvmMessagesInterpreter; use fendermint_vm_interpreter::fvm::observe::register_metrics as register_interpreter_metrics; -use fendermint_vm_interpreter::fvm::topdown::TopDownManager; use fendermint_vm_interpreter::fvm::upgrades::UpgradeScheduler; use fendermint_vm_snapshot::{SnapshotManager, SnapshotParams}; use fendermint_vm_topdown::observe::register_metrics as register_topdown_metrics; -use fendermint_vm_topdown::proxy::{IPCProviderProxy, IPCProviderProxyWithLatency}; -use fendermint_vm_topdown::sync::launch_polling_syncer; -use fendermint_vm_topdown::voting::{publish_vote_loop, Error as VoteError, VoteTally}; -use fendermint_vm_topdown::{CachedFinalityProvider, IPCParentFinality, Toggle}; use fvm_shared::address::{current_network, Address, Network}; -use ipc_ipld_resolver::{Event as ResolverEvent, VoteRecord}; use ipc_observability::observe::register_metrics as register_default_metrics; -use ipc_provider::config::subnet::{EVMSubnet, SubnetConfig}; -use ipc_provider::IpcProvider; -use libp2p::identity::secp256k1; -use libp2p::identity::Keypair; -use std::sync::Arc; use tokio::select; -use tokio::sync::broadcast::error::RecvError; use tokio_util::sync::CancellationToken; use tower::ServiceBuilder; use tracing::info; use crate::cmd::key::read_secret_key; -use crate::ipc::{AppParentFinalityQuery, AppVote}; use crate::observe::register_metrics as register_consensus_metrics; -use crate::{App, AppConfig, AppStore, BitswapBlockstore}; +use crate::{App, AppConfig, AppStore}; use fendermint_app_settings::{AccountKind, Settings}; +use super::topdown::start_topdown_if_enabled; use fendermint_vm_interpreter::fvm::end_block_hook::EndBlockManager; // Database collection names. @@ -121,102 +106,16 @@ pub async fn run( let state_store = NamespaceBlockstore::new(db.clone(), ns.state_store).context("error creating state DB")?; - let parent_finality_votes = VoteTally::empty(); - - let topdown_enabled = settings.topdown_enabled(); - - // If enabled, start a resolver that communicates with the application through the resolve pool. - if settings.resolver_enabled() { - let mut service = - make_resolver_service(&settings, db.clone(), state_store.clone(), ns.bit_store)?; - - // Register all metrics from the IPLD resolver stack - if let Some(ref registry) = metrics_registry { - service - .register_metrics(registry) - .context("failed to register IPLD resolver metrics")?; - } - - let client = service.client(); - - let own_subnet_id = settings.ipc.subnet_id.clone(); - - client - .add_provided_subnet(own_subnet_id.clone()) - .context("error adding own provided subnet.")?; - - if topdown_enabled { - if let Some(key) = validator_keypair { - let parent_finality_votes = parent_finality_votes.clone(); - - tracing::info!("starting the parent finality vote gossip loop..."); - tokio::spawn(async move { - publish_vote_loop( - parent_finality_votes, - settings.ipc.vote_interval, - settings.ipc.vote_timeout, - key, - own_subnet_id, - client, - |height, block_hash| { - AppVote::ParentFinality(IPCParentFinality { height, block_hash }) - }, - ) - .await - }); - } - } else { - tracing::info!("parent finality vote gossip disabled"); - } - - tracing::info!("subscribing to gossip..."); - let rx = service.subscribe(); - let parent_finality_votes = parent_finality_votes.clone(); - tokio::spawn(async move { - dispatch_resolver_events(rx, parent_finality_votes, topdown_enabled).await; - }); - - tracing::info!("starting the IPLD Resolver Service..."); - tokio::spawn(async move { - if let Err(e) = service.run().await { - tracing::error!("IPLD Resolver Service failed: {e:#}") - } - }); - } else { - tracing::info!("IPLD Resolver disabled.") - } - - let (parent_finality_provider, ipc_tuple) = if topdown_enabled { - info!("topdown finality enabled"); - let topdown_config = settings.ipc.topdown_config()?; - let mut config = fendermint_vm_topdown::Config::new( - topdown_config.chain_head_delay, - topdown_config.polling_interval, - topdown_config.exponential_back_off, - topdown_config.exponential_retry_limit, - ) - .with_proposal_delay(topdown_config.proposal_delay) - .with_max_proposal_range(topdown_config.max_proposal_range); - - if let Some(v) = topdown_config.max_cache_blocks { - info!(value = v, "setting max cache blocks"); - config = config.with_max_cache_blocks(v); - } - - let ipc_provider = { - let p = make_ipc_provider_proxy(&settings)?; - Arc::new(IPCProviderProxyWithLatency::new(p)) - }; - - let finality_provider = - CachedFinalityProvider::uninitialized(config.clone(), ipc_provider.clone()).await?; - - let p = Arc::new(Toggle::enabled(finality_provider)); - (p, Some((ipc_provider, config))) - } else { - info!("topdown finality disabled"); - (Arc::new(Toggle::disabled()), None) - }; + let topdown = start_topdown_if_enabled( + &settings, + &db, + &state_store, + ns.app.clone(), + ns.bit_store.clone(), + validator_keypair, + metrics_registry.as_ref(), + ) + .await?; // Start a snapshot manager in the background. let snapshots = if settings.snapshots.enabled { @@ -245,14 +144,9 @@ pub async fn run( }; let end_block_manager = EndBlockManager::new(); - let top_down_manager = TopDownManager::new( - parent_finality_provider.clone(), - parent_finality_votes.clone(), - ); - let interpreter = FvmMessagesInterpreter::new( end_block_manager, - top_down_manager, + topdown.manager(), UpgradeScheduler::new(), testing_settings.is_none_or(|t| t.push_chain_meta), settings.abci.block_max_msgs, @@ -274,24 +168,9 @@ pub async fn run( snapshots, )?; - if let Some((agent_proxy, config)) = ipc_tuple { - let app_parent_finality_query = AppParentFinalityQuery::new(app.clone()); - tokio::spawn(async move { - match launch_polling_syncer( - app_parent_finality_query, - config, - parent_finality_provider, - parent_finality_votes, - agent_proxy, - tendermint_client, - ) - .await - { - Ok(_) => {} - Err(e) => tracing::error!("cannot launch polling syncer: {e}"), - } - }); - } + topdown + .spawn_legacy_polling_syncer_if_needed(app.clone(), tendermint_client.clone()) + .await?; // Start the metrics on a background thread. if let Some(registry) = metrics_registry { @@ -370,173 +249,12 @@ fn open_db(settings: &Settings, ns: &Namespaces) -> anyhow::Result { Ok(db) } -fn make_resolver_service( - settings: &Settings, - db: RocksDb, - state_store: NamespaceBlockstore, - bit_store_ns: String, -) -> anyhow::Result> { - // Blockstore for Bitswap. - let bit_store = NamespaceBlockstore::new(db, bit_store_ns).context("error creating bit DB")?; - - // Blockstore for Bitswap with a fallback on the actor store for reads. - let bitswap_store = BitswapBlockstore::new(state_store, bit_store); - - let config = to_resolver_config(settings).context("error creating resolver config")?; - - let service = ipc_ipld_resolver::Service::new(config, bitswap_store) - .context("error creating IPLD Resolver Service")?; - - Ok(service) -} - -fn make_ipc_provider_proxy(settings: &Settings) -> anyhow::Result { - let topdown_config = settings.ipc.topdown_config()?; - let subnet = ipc_provider::config::Subnet { - id: settings - .ipc - .subnet_id - .parent() - .ok_or_else(|| anyhow!("subnet has no parent"))?, - config: SubnetConfig::Fevm(EVMSubnet { - provider_http: topdown_config - .parent_http_endpoint - .to_string() - .parse() - .unwrap(), - provider_timeout: topdown_config.parent_http_timeout, - auth_token: topdown_config.parent_http_auth_token.as_ref().cloned(), - registry_addr: topdown_config.parent_registry, - gateway_addr: topdown_config.parent_gateway, - }), - }; - info!("init ipc provider with subnet: {}", subnet.id); - - let ipc_provider = IpcProvider::new_with_subnet(None, subnet)?; - IPCProviderProxy::new(ipc_provider, settings.ipc.subnet_id.clone()) -} - -fn to_resolver_config(settings: &Settings) -> anyhow::Result { - use ipc_ipld_resolver::{ - Config, ConnectionConfig, ContentConfig, DiscoveryConfig, MembershipConfig, NetworkConfig, - }; - - let r = &settings.resolver; - - let local_key: Keypair = { - let path = r.network.local_key(settings.home_dir()); - let sk = read_secret_key(&path)?; - let sk = secp256k1::SecretKey::try_from_bytes(sk.serialize())?; - secp256k1::Keypair::from(sk).into() - }; - - let network_name = format!( - "ipld-resolver-{}-{}", - settings.ipc.subnet_id.root_id(), - r.network.network_name - ); - - let config = Config { - connection: ConnectionConfig { - listen_addr: r.connection.listen_addr.clone(), - external_addresses: r.connection.external_addresses.clone(), - expected_peer_count: r.connection.expected_peer_count, - max_incoming: r.connection.max_incoming, - max_peers_per_query: r.connection.max_peers_per_query, - event_buffer_capacity: r.connection.event_buffer_capacity, - }, - network: NetworkConfig { - local_key, - network_name, - }, - discovery: DiscoveryConfig { - static_addresses: r.discovery.static_addresses.clone(), - target_connections: r.discovery.target_connections, - enable_kademlia: r.discovery.enable_kademlia, - }, - membership: MembershipConfig { - static_subnets: r.membership.static_subnets.clone(), - max_subnets: r.membership.max_subnets, - publish_interval: r.membership.publish_interval, - min_time_between_publish: r.membership.min_time_between_publish, - max_provider_age: r.membership.max_provider_age, - }, - content: ContentConfig { - rate_limit_bytes: r.content.rate_limit_bytes, - rate_limit_period: r.content.rate_limit_period, - }, - }; - - Ok(config) -} - -fn to_address(sk: &SecretKey, kind: &AccountKind) -> anyhow::Result
{ +fn to_address(sk: &fendermint_crypto::SecretKey, kind: &AccountKind) -> anyhow::Result
{ let pk = sk.public_key().serialize(); match kind { AccountKind::Regular => Ok(Address::new_secp256k1(&pk)?), - AccountKind::Ethereum => Ok(Address::from(EthAddress::new_secp256k1(&pk)?)), - } -} - -async fn dispatch_resolver_events( - mut rx: tokio::sync::broadcast::Receiver>, - parent_finality_votes: VoteTally, - topdown_enabled: bool, -) { - loop { - match rx.recv().await { - Ok(event) => match event { - ResolverEvent::ReceivedPreemptive(_, _) => {} - ResolverEvent::ReceivedVote(vote) => { - dispatch_vote(*vote, &parent_finality_votes, topdown_enabled).await; - } - }, - Err(RecvError::Lagged(n)) => { - tracing::warn!("the resolver service skipped {n} gossip events") - } - Err(RecvError::Closed) => { - tracing::error!("the resolver service stopped receiving gossip"); - return; - } - } - } -} - -async fn dispatch_vote( - vote: VoteRecord, - parent_finality_votes: &VoteTally, - topdown_enabled: bool, -) { - match vote.content { - AppVote::ParentFinality(f) => { - if !topdown_enabled { - tracing::debug!("ignoring vote; topdown disabled"); - return; - } - let res = atomically_or_err(|| { - parent_finality_votes.add_vote( - vote.public_key.clone(), - f.height, - f.block_hash.clone(), - ) - }) - .await; - - match res { - Err(e @ VoteError::Equivocation(_, _, _, _)) => { - tracing::warn!(error = e.to_string(), "failed to handle vote"); - } - Err(e @ ( - VoteError::Uninitialized // early vote, we're not ready yet - | VoteError::UnpoweredValidator(_) // maybe arrived too early or too late, or spam - | VoteError::UnexpectedBlock(_, _) // won't happen here - )) => { - tracing::debug!(error = e.to_string(), "failed to handle vote"); - } - _ => { - tracing::debug!("vote handled"); - } - }; - } + AccountKind::Ethereum => Ok(Address::from( + fendermint_vm_actor_interface::eam::EthAddress::new_secp256k1(&pk)?, + )), } } diff --git a/fendermint/app/src/service/topdown.rs b/fendermint/app/src/service/topdown.rs new file mode 100644 index 0000000000..21e5208f57 --- /dev/null +++ b/fendermint/app/src/service/topdown.rs @@ -0,0 +1,585 @@ +// Copyright 2022-2026 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +use std::sync::Arc; + +use anyhow::{anyhow, bail, Context}; +use async_stm::atomically_or_err; +use fendermint_rocksdb::blockstore::NamespaceBlockstore; +use fendermint_rocksdb::RocksDb; +use fendermint_vm_interpreter::fvm::interpreter::FvmMessagesInterpreter; +use fendermint_vm_interpreter::fvm::topdown::TopDownManager; +use fendermint_vm_interpreter::fvm::LegacyTopDownHandler; +use fendermint_vm_topdown::proxy::{IPCProviderProxy, IPCProviderProxyWithLatency}; +use fendermint_vm_topdown::sync::launch_polling_syncer; +use fendermint_vm_topdown::voting::{publish_vote_loop, Error as VoteError, VoteTally}; +use fendermint_vm_topdown::{CachedFinalityProvider, IPCParentFinality, Toggle}; +use ipc_api::subnet_id::SubnetID; +use ipc_ipld_resolver::{Event as ResolverEvent, VoteRecord}; +use ipc_provider::config::subnet::{EVMSubnet, SubnetConfig}; +use ipc_provider::IpcProvider; +use libp2p::identity::secp256k1; +use libp2p::identity::Keypair; +use tokio::sync::broadcast::error::RecvError; + +use crate::cmd::key::read_secret_key; +use crate::ipc::AppParentFinalityQuery; +use crate::ipc::AppVote; +use crate::{App, AppStore, BitswapBlockstore}; +use fendermint_app_settings::{Settings, TopDownSettings}; +use fendermint_storage::KVStore; + +type TopDownFinalityProvider = Arc>>; + +/// Legacy topdown background tasks which require a live `App` instance. +struct LegacyPostInit { + agent_proxy: Arc, + config: fendermint_vm_topdown::Config, + parent_finality_provider: TopDownFinalityProvider, + parent_finality_votes: VoteTally, +} + +/// Result of topdown initialization performed before `App::new()`. +pub(super) struct TopDownInit { + manager: TopDownManager, + legacy_post_init: Option, +} + +impl TopDownInit { + pub(super) fn manager(&self) -> TopDownManager { + self.manager.clone() + } + + pub(super) async fn spawn_legacy_polling_syncer_if_needed( + self, + app: App< + RocksDb, + NamespaceBlockstore, + AppStore, + FvmMessagesInterpreter, + >, + tendermint_client: tendermint_rpc::HttpClient, + ) -> anyhow::Result<()> { + if let Some(p) = self.legacy_post_init { + let app_parent_finality_query = AppParentFinalityQuery::new(app); + tokio::spawn(async move { + match launch_polling_syncer( + app_parent_finality_query, + p.config, + p.parent_finality_provider, + p.parent_finality_votes, + p.agent_proxy, + tendermint_client, + ) + .await + { + Ok(_) => {} + Err(e) => tracing::error!("cannot launch polling syncer: {e}"), + } + }); + } + Ok(()) + } +} + +/// Initialize topdown (legacy or F3) before creating the `App`. +/// +/// Returns the `TopDownManager` to be put into the interpreter, and a `TopDownInit` handle +/// with any required post-`App::new()` work. +pub(super) async fn start_topdown_if_enabled( + settings: &Settings, + db: &RocksDb, + state_store: &NamespaceBlockstore, + app_namespace: ::Namespace, + bit_store_namespace: String, + validator_keypair: Option, + metrics_registry: Option<&prometheus::Registry>, +) -> anyhow::Result { + // If topdown is disabled, return a disabled topdown manager and no post-init tasks. + if !settings.topdown_enabled() { + return Ok(TopDownInit { + manager: TopDownManager::disabled(), + legacy_post_init: None, + }); + } + + let topdown_config = settings + .ipc + .topdown_config() + .context("topdown is enabled but topdown config is missing")?; + + let f3_enabled_in_config = topdown_config.f3.is_some(); + let f3_state_in_committed_state = + query_f3_state_in_committed_state(db, state_store, app_namespace.clone())?; + let gateway_finality_in_committed_state = + query_gateway_parent_finality_in_committed_state(db, state_store, app_namespace.clone())?; + let gateway_event_cursor_in_committed_state = + query_gateway_event_cursor_in_committed_state(db, state_store, app_namespace.clone())?; + + // Fail-fast consistency between config and committed state. + // + // - If committed state has F3 state, config must enable F3. + // - If config enables F3, committed state must have initial F3 state. + if f3_state_in_committed_state.is_some() && !f3_enabled_in_config { + bail!("F3 is enabled in committed state but not in config"); + } + if f3_enabled_in_config && f3_state_in_committed_state.is_none() { + bail!("F3 is enabled in config but initial F3 state is missing in committed state"); + } + + if f3_enabled_in_config { + if gateway_finality_in_committed_state.is_none() { + bail!("F3 is enabled but gateway latest parent finality is missing in committed state"); + } + return start_f3_topdown( + settings, + topdown_config, + f3_state_in_committed_state, + gateway_finality_in_committed_state, + gateway_event_cursor_in_committed_state, + ) + .await; + } + + start_legacy_topdown( + settings, + topdown_config, + validator_keypair, + db.clone(), + state_store.clone(), + bit_store_namespace, + metrics_registry, + ) + .await +} + +#[derive(Debug, Clone, Copy)] +struct GatewayEventCursor { + applied_top_down_nonce: u64, + next_power_change_config_number: u64, +} + +fn query_f3_state_in_committed_state( + db: &RocksDb, + state_store: &NamespaceBlockstore, + app_namespace: ::Namespace, +) -> anyhow::Result> { + // Query F3 state from committed state once (used for fail-fast + F3 cache init). + let exec_state = + crate::app::create_read_only_exec_state::<_, _, AppStore>(db, state_store, app_namespace) + .context("failed to create read-only exec state")?; + + let f3_state_in_committed_state = match exec_state { + Some(mut state) => crate::app::query_f3_state(&mut state) + .context("failed to query F3 state from committed state")?, + None => None, + }; + + Ok(f3_state_in_committed_state) +} + +fn query_gateway_parent_finality_in_committed_state( + db: &RocksDb, + state_store: &NamespaceBlockstore, + app_namespace: ::Namespace, +) -> anyhow::Result> { + type ROStore = fendermint_vm_interpreter::fvm::store::ReadOnlyBlockstore< + std::sync::Arc, + >; + // Query the gateway's latest parent finality from committed/genesis state once. + let exec_state = + crate::app::create_read_only_exec_state::<_, _, AppStore>(db, state_store, app_namespace) + .context("failed to create read-only exec state")?; + + let latest = match exec_state { + Some(mut state) => { + let gw = + fendermint_vm_interpreter::fvm::state::ipc::GatewayCaller::::default(); + Some(gw.get_latest_parent_finality(&mut state)?) + } + None => None, + }; + + Ok(latest) +} + +fn query_gateway_event_cursor_in_committed_state( + db: &RocksDb, + state_store: &NamespaceBlockstore, + app_namespace: ::Namespace, +) -> anyhow::Result> { + type ROStore = fendermint_vm_interpreter::fvm::store::ReadOnlyBlockstore< + std::sync::Arc, + >; + let exec_state = + crate::app::create_read_only_exec_state::<_, _, AppStore>(db, state_store, app_namespace) + .context("failed to create read-only exec state")?; + + let cursor = match exec_state { + Some(mut state) => { + let gw = + fendermint_vm_interpreter::fvm::state::ipc::GatewayCaller::::default(); + let applied_top_down_nonce = gw.applied_top_down_nonce(&mut state)?; + let (next_cfg, _start_cfg) = gw.tracker_configuration_numbers(&mut state)?; + Some(GatewayEventCursor { + applied_top_down_nonce, + next_power_change_config_number: next_cfg, + }) + } + None => None, + }; + + Ok(cursor) +} + +fn make_resolver_service( + settings: &Settings, + db: RocksDb, + state_store: NamespaceBlockstore, + bit_store_ns: String, +) -> anyhow::Result> { + let bit_store = NamespaceBlockstore::new(db, bit_store_ns).context("error creating bit DB")?; + let bitswap_store = BitswapBlockstore::new(state_store, bit_store); + let config = to_resolver_config(settings).context("error creating resolver config")?; + let service = ipc_ipld_resolver::Service::new(config, bitswap_store) + .context("error creating IPLD Resolver Service")?; + Ok(service) +} + +fn make_ipc_provider_proxy(settings: &Settings) -> anyhow::Result { + let topdown_config = settings.ipc.topdown_config()?; + let subnet = ipc_provider::config::Subnet { + id: settings + .ipc + .subnet_id + .parent() + .ok_or_else(|| anyhow!("subnet has no parent"))?, + config: SubnetConfig::Fevm(EVMSubnet { + provider_http: topdown_config + .parent_http_endpoint + .to_string() + .parse() + .unwrap(), + provider_timeout: topdown_config.parent_http_timeout, + auth_token: topdown_config.parent_http_auth_token.as_ref().cloned(), + registry_addr: topdown_config.parent_registry, + gateway_addr: topdown_config.parent_gateway, + }), + }; + + tracing::info!("init ipc provider with subnet: {}", subnet.id); + let ipc_provider = IpcProvider::new_with_subnet(None, subnet)?; + IPCProviderProxy::new(ipc_provider, settings.ipc.subnet_id.clone()) +} + +async fn start_legacy_topdown( + settings: &Settings, + topdown_config: &TopDownSettings, + validator_keypair: Option, + db: RocksDb, + state_store: NamespaceBlockstore, + bit_store_ns: String, + metrics_registry: Option<&prometheus::Registry>, +) -> anyhow::Result { + let parent_finality_votes = VoteTally::empty(); + // Resolver is required for legacy mode (vote gossip + quorum collection). + if !settings.resolver_enabled() { + bail!("IPLD Resolver is disabled but legacy topdown is enabled"); + } + + let mut service = make_resolver_service(settings, db, state_store.clone(), bit_store_ns)?; + + if let Some(registry) = metrics_registry { + service + .register_metrics(registry) + .context("failed to register IPLD resolver metrics")?; + } + + let client = service.client(); + let own_subnet_id = settings.ipc.subnet_id.clone(); + + client + .add_provided_subnet(own_subnet_id.clone()) + .context("error adding own provided subnet.")?; + + // NOTE: Legacy topdown can run in a non-validator mode. + // + // Non-validator nodes should still start up and subscribe to votes (so they can + // observe quorum and execute committed checkpoints), but they cannot *publish* + // votes without a validator keypair. + if let Some(key) = validator_keypair { + let parent_finality_votes_for_votes_loop = parent_finality_votes.clone(); + let vote_interval = settings.ipc.vote_interval; + let vote_timeout = settings.ipc.vote_timeout; + let own_subnet_id_for_votes_loop = own_subnet_id.clone(); + let client_for_votes_loop = client.clone(); + + tracing::info!("starting the parent finality vote gossip loop..."); + tokio::spawn(async move { + publish_vote_loop( + parent_finality_votes_for_votes_loop, + vote_interval, + vote_timeout, + key, + own_subnet_id_for_votes_loop, + client_for_votes_loop, + |height, block_hash| { + AppVote::ParentFinality(IPCParentFinality { height, block_hash }) + }, + ) + .await + }); + } else { + tracing::warn!( + "validator key missing; legacy topdown enabled but vote publishing is disabled (non-validator mode)" + ); + } + + tracing::info!("subscribing to gossip..."); + let rx = service.subscribe(); + let parent_finality_votes_for_resolver = parent_finality_votes.clone(); + tokio::spawn(async move { + dispatch_resolver_events(rx, parent_finality_votes_for_resolver).await; + }); + + tracing::info!("starting the IPLD Resolver Service..."); + tokio::spawn(async move { + if let Err(e) = service.run().await { + tracing::error!("IPLD Resolver Service failed: {e:#}") + } + }); + + tracing::info!("legacy topdown finality enabled"); + + let mut config = fendermint_vm_topdown::Config::new( + topdown_config.chain_head_delay, + topdown_config.polling_interval, + topdown_config.exponential_back_off, + topdown_config.exponential_retry_limit, + ) + .with_proposal_delay(topdown_config.proposal_delay) + .with_max_proposal_range(topdown_config.max_proposal_range); + + if let Some(v) = topdown_config.max_cache_blocks { + tracing::info!(value = v, "setting max cache blocks"); + config = config.with_max_cache_blocks(v); + } + + let ipc_provider = { + let p = make_ipc_provider_proxy(settings)?; + Arc::new(IPCProviderProxyWithLatency::new(p)) + }; + + let finality_provider = + CachedFinalityProvider::uninitialized(config.clone(), ipc_provider.clone()).await?; + + let parent_finality_provider: TopDownFinalityProvider = + Arc::new(Toggle::enabled(finality_provider)); + + let manager = TopDownManager::legacy(LegacyTopDownHandler::new( + parent_finality_provider.clone(), + parent_finality_votes.clone(), + )); + + Ok(TopDownInit { + manager, + legacy_post_init: Some(LegacyPostInit { + agent_proxy: ipc_provider, + config, + parent_finality_provider, + parent_finality_votes: parent_finality_votes.clone(), + }), + }) +} + +async fn start_f3_topdown( + settings: &Settings, + topdown_config: &TopDownSettings, + f3_state_in_committed_state: Option< + fendermint_vm_actor_interface::f3_light_client::GetStateResponse, + >, + gateway_finality_in_committed_state: Option, + gateway_event_cursor_in_committed_state: Option, +) -> anyhow::Result { + let f3_config = topdown_config + .f3 + .as_ref() + .context("F3 is enabled in config but missing F3 config section")?; + + let f3_state = f3_state_in_committed_state + .context("F3 is enabled in config but initial F3 state is missing in committed state")?; + let initial_instance = f3_state.processed_instance_id; + // Epoch cursor comes from the gateway contract (seeded at genesis). + let initial_epoch = gateway_finality_in_committed_state + .context("F3 enabled but gateway latest parent finality missing in committed state")? + .height as fvm_shared::clock::ChainEpoch; + let gateway_cursor = gateway_event_cursor_in_committed_state + .context("F3 enabled but gateway event cursor missing in committed state")?; + + let db_path = Some(settings.data_dir().join("proof-cache")); + let cache = Arc::new( + fendermint_vm_topdown_proof_service::ProofCache::new_with_persistence( + initial_epoch, + initial_instance, + f3_config.proof_service.cache_config.clone(), + db_path.as_ref().expect("db_path always set here"), + )?, + ); + + let handler = fendermint_vm_interpreter::fvm::F3TopDownHandler::new(cache); + let proof_cache = handler.proof_cache().clone(); + + let mut proof_config = f3_config.proof_service.clone(); + proof_config.parent_rpc_url = topdown_config.parent_http_endpoint.to_string(); + + if !proof_config.enabled { + tracing::info!("F3 proof service disabled in configuration"); + } else { + tracing::info!("F3 proof service enabled"); + + use fendermint_vm_topdown_proof_service::ProofGeneratorService; + let subnet_id: SubnetID = settings.ipc.subnet_id.clone(); + let service = ProofGeneratorService::new( + proof_config.clone(), + proof_cache.clone(), + &subnet_id, + initial_instance, + fendermint_vm_topdown_proof_service::power_entries_from_actor(&f3_state.power_table), + gateway_cursor.applied_top_down_nonce, + gateway_cursor.next_power_change_config_number, + ) + .await + .context("Failed to create F3 proof service")?; + + tracing::info!( + f3_network = proof_config.f3_network_name(&subnet_id), + lookahead = proof_config.cache_config.lookahead_instances, + "F3 proof service initialized successfully" + ); + + tokio::spawn(async move { + service.run().await; + }); + } + + Ok(TopDownInit { + manager: TopDownManager::f3_with_retry_config( + handler, + fendermint_vm_interpreter::fvm::topdown::F3ExecutionCacheRetryConfig { + backoff_initial: f3_config.execution_cache_retry.backoff_initial, + backoff_max: f3_config.execution_cache_retry.backoff_max, + critical_after: f3_config.execution_cache_retry.critical_after, + error_after: f3_config.execution_cache_retry.error_after, + }, + ), + legacy_post_init: None, + }) +} + +fn to_resolver_config(settings: &Settings) -> anyhow::Result { + use ipc_ipld_resolver::{ + Config, ConnectionConfig, ContentConfig, DiscoveryConfig, MembershipConfig, NetworkConfig, + }; + + let r = &settings.resolver; + + let local_key: Keypair = { + let path = r.network.local_key(settings.home_dir()); + let sk = read_secret_key(&path)?; + let sk = secp256k1::SecretKey::try_from_bytes(sk.serialize())?; + secp256k1::Keypair::from(sk).into() + }; + + let network_name = format!( + "ipld-resolver-{}-{}", + settings.ipc.subnet_id.root_id(), + r.network.network_name + ); + + Ok(Config { + connection: ConnectionConfig { + listen_addr: r.connection.listen_addr.clone(), + external_addresses: r.connection.external_addresses.clone(), + expected_peer_count: r.connection.expected_peer_count, + max_incoming: r.connection.max_incoming, + max_peers_per_query: r.connection.max_peers_per_query, + event_buffer_capacity: r.connection.event_buffer_capacity, + }, + network: NetworkConfig { + local_key, + network_name, + }, + discovery: DiscoveryConfig { + static_addresses: r.discovery.static_addresses.clone(), + target_connections: r.discovery.target_connections, + enable_kademlia: r.discovery.enable_kademlia, + }, + membership: MembershipConfig { + static_subnets: r.membership.static_subnets.clone(), + max_subnets: r.membership.max_subnets, + publish_interval: r.membership.publish_interval, + min_time_between_publish: r.membership.min_time_between_publish, + max_provider_age: r.membership.max_provider_age, + }, + content: ContentConfig { + rate_limit_bytes: r.content.rate_limit_bytes, + rate_limit_period: r.content.rate_limit_period, + }, + }) +} + +async fn dispatch_resolver_events( + mut rx: tokio::sync::broadcast::Receiver>, + parent_finality_votes: VoteTally, +) { + loop { + match rx.recv().await { + Ok(event) => match event { + ResolverEvent::ReceivedPreemptive(_, _) => {} + ResolverEvent::ReceivedVote(vote) => { + dispatch_vote(*vote, &parent_finality_votes).await; + } + }, + Err(RecvError::Lagged(n)) => { + tracing::warn!("the resolver service skipped {n} gossip events") + } + Err(RecvError::Closed) => { + tracing::error!("the resolver service stopped receiving gossip"); + return; + } + } + } +} + +async fn dispatch_vote(vote: VoteRecord, parent_finality_votes: &VoteTally) { + match vote.content { + AppVote::ParentFinality(f) => { + let res = atomically_or_err(|| { + parent_finality_votes.add_vote( + vote.public_key.clone(), + f.height, + f.block_hash.clone(), + ) + }) + .await; + + match res { + Err(e @ VoteError::Equivocation(_, _, _, _)) => { + tracing::warn!(error = e.to_string(), "failed to handle vote"); + } + Err( + e @ ( + VoteError::Uninitialized // early vote, we're not ready yet + | VoteError::UnpoweredValidator(_) // maybe arrived too early or too late, or spam + | VoteError::UnexpectedBlock(_, _) // won't happen here + ), + ) => { + tracing::debug!(error = e.to_string(), "failed to handle vote"); + } + _ => { + tracing::debug!("vote handled"); + } + }; + } + } +} diff --git a/fendermint/testing/contract-test/tests/gas_market.rs b/fendermint/testing/contract-test/tests/gas_market.rs index 8b57b8a16d..0341892b10 100644 --- a/fendermint/testing/contract-test/tests/gas_market.rs +++ b/fendermint/testing/contract-test/tests/gas_market.rs @@ -3,8 +3,6 @@ mod staking; -use std::sync::Arc; - use fendermint_actor_gas_market_eip1559::Constants; use fendermint_contract_test::Tester; use fendermint_crypto::{PublicKey, SecretKey}; @@ -14,14 +12,14 @@ use fendermint_vm_actor_interface::system::SYSTEM_ACTOR_ADDR; use fendermint_vm_core::Timestamp; use fendermint_vm_genesis::{Account, Actor, ActorMeta, Genesis, PermissionMode, SignerAddr}; use fendermint_vm_interpreter::fvm::store::memory::MemoryBlockstore; +use fendermint_vm_interpreter::fvm::topdown::TopDownFinalityHandler; use fendermint_vm_interpreter::fvm::topdown::TopDownManager; use fendermint_vm_interpreter::fvm::upgrades::{Upgrade, UpgradeScheduler}; use fendermint_vm_interpreter::fvm::FvmMessagesInterpreter; use fendermint_vm_message::chain::ChainMessage; use fendermint_vm_message::conv::from_fvm; use fendermint_vm_message::signed::SignedMessage; -use fendermint_vm_topdown::voting::VoteTally; -use fendermint_vm_topdown::Toggle; +// Topdown is disabled for these tests. use fvm_shared::chainid::ChainID; use crate::staking::DEFAULT_CHAIN_ID; @@ -64,9 +62,7 @@ async fn tester_with_upgrader( let validator = rand_secret_key().public_key(); let end_block_manager = EndBlockManager::default(); - let finality_provider = Arc::new(Toggle::disabled()); - let vote_tally = VoteTally::empty(); - let top_down_manager = TopDownManager::new(finality_provider, vote_tally); + let top_down_manager = TopDownManager::new(TopDownFinalityHandler::Disabled); let interpreter: FvmMessagesInterpreter = FvmMessagesInterpreter::new( end_block_manager, diff --git a/fendermint/testing/contract-test/tests/run_upgrades.rs b/fendermint/testing/contract-test/tests/run_upgrades.rs index 0a734b426d..892aaefbe6 100644 --- a/fendermint/testing/contract-test/tests/run_upgrades.rs +++ b/fendermint/testing/contract-test/tests/run_upgrades.rs @@ -10,8 +10,6 @@ use fendermint_rpc::response::decode_fevm_return_data; use rand::rngs::StdRng; use rand::SeedableRng; use std::str::FromStr; -use std::sync::Arc; - use ethers::contract::abigen; use fvm_shared::address::Address; use fvm_shared::bigint::Zero; @@ -26,12 +24,12 @@ use fendermint_vm_core::Timestamp; use fendermint_vm_genesis::{Account, Actor, ActorMeta, Genesis, PermissionMode, SignerAddr}; use fendermint_vm_interpreter::fvm::end_block_hook::EndBlockManager; use fendermint_vm_interpreter::fvm::store::memory::MemoryBlockstore; +use fendermint_vm_interpreter::fvm::topdown::TopDownFinalityHandler; use fendermint_vm_interpreter::fvm::topdown::TopDownManager; use fendermint_vm_interpreter::fvm::upgrades::{Upgrade, UpgradeScheduler}; use fendermint_vm_interpreter::fvm::FvmMessagesInterpreter; use fendermint_vm_message::conv::from_fvm; -use fendermint_vm_topdown::voting::VoteTally; -use fendermint_vm_topdown::Toggle; +// Topdown is disabled for these tests. // returns a seeded secret key which is guaranteed to be the same every time fn my_secret_key() -> SecretKey { @@ -206,9 +204,7 @@ async fn test_applying_upgrades() { .unwrap(); let end_block_manager = EndBlockManager::default(); - let finality_provider = Arc::new(Toggle::disabled()); - let vote_tally = VoteTally::empty(); - let top_down_manager = TopDownManager::new(finality_provider, vote_tally); + let top_down_manager = TopDownManager::new(TopDownFinalityHandler::Disabled); let interpreter: FvmMessagesInterpreter = FvmMessagesInterpreter::new( end_block_manager, diff --git a/fendermint/vm/evm-event-utils/Cargo.toml b/fendermint/vm/evm-event-utils/Cargo.toml new file mode 100644 index 0000000000..5f8202bf1d --- /dev/null +++ b/fendermint/vm/evm-event-utils/Cargo.toml @@ -0,0 +1,14 @@ +[package] +name = "fendermint_vm_evm_event_utils" +description = "Shared helpers for decoding EVM logs (topics/data) from proof bundles" +version = "0.1.0" +edition.workspace = true +license.workspace = true +authors.workspace = true + +[dependencies] +anyhow = { workspace = true } +ethers = { workspace = true } +hex = { workspace = true } +ipc_actors_abis = { path = "../../../contract-bindings" } +proofs = { git = "https://github.com/consensus-shipyard/ipc-filecoin-proofs", branch = "proofs" } diff --git a/fendermint/vm/evm-event-utils/src/lib.rs b/fendermint/vm/evm-event-utils/src/lib.rs new file mode 100644 index 0000000000..17b7568c7a --- /dev/null +++ b/fendermint/vm/evm-event-utils/src/lib.rs @@ -0,0 +1,94 @@ +//! Shared helpers for decoding EVM logs from proof bundles. +//! +//! Both the interpreter and the proof-service need to decode Solidity events embedded in +//! `UnifiedProofBundle` event proofs. The proofs library stores event topics/data as hex strings, +//! so these helpers provide: +//! - Hex parsing (`0x`-prefixed strings) +//! - Conversion from `EventProof` -> `RawLog` +//! - Decoding typed events using generated contract bindings + +use anyhow::{anyhow, Context, Result}; +use ethers::abi::RawLog; +use ethers::contract::EthLogDecode; +use ethers::types::H256; +use ipc_actors_abis::{lib_gateway, lib_power_change_log}; +use proofs::proofs::events::bundle::EventProof; + +/// Parse a `0x`-prefixed hex string into bytes. +pub fn parse_0x_bytes(s: &str) -> Result> { + let s = s.strip_prefix("0x").unwrap_or(s); + Ok(hex::decode(s)?) +} + +/// Parse a 32-byte EVM storage word (hex string) and return the low 64 bits as a `u64`. +/// +/// Solidity stores integer values left-padded in a 32-byte word. For `uint64`, the value sits in the +/// low 8 bytes (big-endian). +pub fn parse_u64_from_0x_word_low64(word_0x: &str) -> Result { + let mut b = parse_0x_bytes(word_0x)?; + if b.len() > 32 { + anyhow::bail!("expected <= 32 bytes, got {}", b.len()); + } + if b.len() < 32 { + let mut padded = vec![0u8; 32 - b.len()]; + padded.append(&mut b); + b = padded; + } + // Enforce that the word actually fits in u64 (high 192 bits must be zero). + if b[..24].iter().any(|x| *x != 0) { + anyhow::bail!("value does not fit in u64 (high 192 bits are non-zero)"); + } + let tail: [u8; 8] = b[24..32].try_into().expect("slice is 8 bytes"); + Ok(u64::from_be_bytes(tail)) +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn parse_u64_accepts_short_hex_quantity() { + assert_eq!(parse_u64_from_0x_word_low64("0x01").unwrap(), 1); + } + + #[test] + fn parse_u64_rejects_overflow() { + // 2^64, i.e. low64=0 but high bits non-zero. + assert!(parse_u64_from_0x_word_low64("0x010000000000000000").is_err()); + } +} + +/// Convert an `EventProof` into an `ethers::abi::RawLog`. +pub fn raw_log_from_event_proof(event_proof: &EventProof) -> Result { + let topics: Result> = event_proof + .event_data + .topics + .iter() + .map(|t| { + let b = parse_0x_bytes(t)?; + if b.len() != 32 { + return Err(anyhow!("topic must be 32 bytes, got {}", b.len())); + } + Ok(H256::from_slice(&b)) + }) + .collect(); + let topics = topics?; + let data = parse_0x_bytes(&event_proof.event_data.data) + .with_context(|| "failed to decode event data hex")?; + + Ok(RawLog { topics, data }) +} + +/// Attempt to decode a `NewTopDownMessage` event. +pub fn decode_new_topdown_message(raw: &RawLog) -> Result { + lib_gateway::NewTopDownMessageFilter::decode_log(raw) + .map_err(|e| anyhow!("failed to decode NewTopDownMessage: {e}")) +} + +/// Attempt to decode a `NewPowerChangeRequest` event. +pub fn decode_new_power_change_request( + raw: &RawLog, +) -> Result { + lib_power_change_log::NewPowerChangeRequestFilter::decode_log(raw) + .map_err(|e| anyhow!("failed to decode NewPowerChangeRequest: {e}")) +} diff --git a/fendermint/vm/genesis/golden/genesis/cbor/genesis.cbor b/fendermint/vm/genesis/golden/genesis/cbor/genesis.cbor index 3a434547aa..9def94a75d 100644 --- a/fendermint/vm/genesis/golden/genesis/cbor/genesis.cbor +++ b/fendermint/vm/genesis/golden/genesis/cbor/genesis.cbor @@ -1 +1 @@ -ab63697063a16767617465776179a4697375626e65745f6964821b7eca9e45193a5edf834b008ff4db9f8db9fc98d0014b008c87f3a18bc3dad6a10156040a14b3436b1ce515269d0e01716511eb1e14a8f5b2736d616a6f726974795f70657263656e74616765184076626f74746f6d5f75705f636865636b5f706572696f641bedb1ca6774bbca0b776163746976655f76616c696461746f72735f6c696d69740f686163636f756e747382a2646d657461a1684d756c7469736967a4677369676e657273855501300fa088e63bc7284c7886986f0c0b32a4b8a43155017129d666aefc41888fa7848bc44d87e28e2627e45501394ae215967cb88cdff3028d154bb52de3dec828550109d2f2a1068fec62e2e1c03901b9d09e0fb6a1fb55011ffff67c4ddbe407cade15c5455c016c169a806d697468726573686f6c64026d76657374696e675f73746172741b7e691439f5a3af537076657374696e675f6475726174696f6e1b24ca3cfd0d53d7566762616c616e636540a2646d657461a1684d756c7469736967a4677369676e6572738555016591322f2dd8242769cbb1f25c7ea721a9a7af1655013434f6a3adaa76a3cc8eeb51e6ecf817422549d955017ccedef0ff205eb9de81a5195c611224864fe2af5501738bfd8efcf20518c5d1896a2c4dbf5a4532f23355018edcf2b088860ace2d2451fcda9a8cccf5155e6f697468726573686f6c64036d76657374696e675f73746172741b77bbd6777d7f09ff7076657374696e675f6475726174696f6e1b3cd419a08d6f82af6762616c616e636551009aceabcd73ffacc17e7a2589b0b410cb68626173655f6665654900664a46027308d50568636861696e5f696418656974696d657374616d701bb7e5d1db6ca339f66a636861696e5f6e616d65606a76616c696461746f727389a265706f7765725100f2e3e900debb4b13c01ef1ffbe73af006a7075626c69635f6b657958410430949f5cd9edf3887a6d162477f4adb9162c0aa02126099e4af4a6f18621449bbe65e7ea6383eb76f570ff3c9c7a4a4d939dc140efd778e2d5029c4a52f70ef7a265706f776572406a7075626c69635f6b65795841046f9029b17c045cd92f136b781ec69080903bf1963cc8dd61b464d0b523dce15d453bf839c66ccc7778a6a69e7dfab0d66ec05f28ae4a0f80d6abf9dae473ac0aa265706f7765725100571c2a44f074149d342219ca1c8b31566a7075626c69635f6b6579584104e2b47a1897aed91d422c992607879595875e6134dacab1a34c8037553f9c92e24eb009cfd2fb9f17fca7bf94e83df742c21ac47315d30a15e9c8a79eb2d39738a265706f7765725100ffffffffffffffff95e98069516d1c4d6a7075626c69635f6b65795841041baac9a539f651ce7653b132b3ce6f6d37252a271e67273c6c9622589f024506875c9b9c8ad30d47e87361a6564a9d24dffa4973c85b15299b9e91633c00c926a265706f7765725100e2ec5feec408b63f8f765b56f36f2a986a7075626c69635f6b65795841041691865a9fc99f8fd3bbc782337ffbddfd698fdf90347e8b7e17369baac381a0bf79510bea98c313af91778477d3c26d1fb21a9388d46f9c422b013b5a145a02a265706f7765725100ad363ad866db295a0e261113392461176a7075626c69635f6b657958410443ea92679151790e0864ec6349ad5d02e1b0540c04807de8c987da4d678089a456882ddfc884122288413a519b991954700d78432f8e830ea85747d6b313b785a265706f7765725100bf5c2b3032d672331f48c06fa88ea52d6a7075626c69635f6b6579584104837bc4a25a552f28beb82287674433a33354bb6c75f1b39ddbd3b553ce6fdcf4e270f933dc532d6b2c0ff34797cf9bb0d1f0a75b44017521132caf3bde7f2ed1a265706f776572510090288e0b0b66da00393724c1552d9acf6a7075626c69635f6b65795841047c4ab7ccc6f0b545a9be47a001de9e4f6bc3ef106d52ad97b1e242805009d89299e0534cd38040f0b38650219012aa96e0ba6686deaa1df7a71b07a2b38f6879a265706f776572510074551b9664b0fc66f17b34f9b90c6e926a7075626c69635f6b6579584104f05352e3e6ac2ab0c1298706964b05e0f1f81c81ed23d0ac6da795c45d67e28f322bbc85ad3cb5bc283b6b05a86d8b0898764f2d81a1a88210f99692334004e96b706f7765725f7363616c65006f6e6574776f726b5f76657273696f6e157365616d5f7065726d697373696f6e5f6d6f6465a1646d6f64656c756e72657374726963746564736970635f636f6e7472616374735f6f776e6572782a307830303030303030303030303030303030303030303030303030303030303030303030303030303030 \ No newline at end of file +aa686163636f756e747388a2646d657461a1674163636f756e74a1656f776e657256040a0e6978da3166dd56f2da0824fb228bce677f3a7c6762616c616e636551008af1227fe0519826b837a5190374deb5a2646d657461a1684d756c7469736967a4677369676e65727382550167b55508faeace686ecc4c96838f37acf4bdc77955017a1c4fc41b6827aad1cc91bd15781d4237761ac3697468726573686f6c64026d76657374696e675f73746172741b5418dfd312b06f877076657374696e675f6475726174696f6e1bde381ae7edd4430c6762616c616e636540a2646d657461a1684d756c7469736967a4677369676e657273825501d36198c7bb51b798cd40af94988ee41d046764fc550113979603901cf8caa4667ea8f6d7ee41e9a6b1c5697468726573686f6c64026d76657374696e675f73746172741babd49fa4d825e9217076657374696e675f6475726174696f6e1b7a573f830369567b6762616c616e63655100ffffffffffffffffa83394a88ffa831fa2646d657461a1674163636f756e74a1656f776e657256040a0767389b6b1ebeb419c1640415aeb0c26f228b4a6762616c616e636551003c865617640201cae70d1c4fe67955d6a2646d657461a1674163636f756e74a1656f776e657255012eb05a918cfd1e3e5b5af0ba2888f337b0f58af76762616c616e63655100620ca24f6dca248b1972ca7bad454683a2646d657461a1684d756c7469736967a4677369676e657273855501f2b7e9c7c823a56e010382f49b8c273b52f24147550181b55ed1dc2498c99940751911c9fb2736eff9dd5501a127931a462ad8eebf7d387e1c834cec14d6bf8b55018db93265f2a57d394d58dc410a52569c10ea7b1555016d7baa840ffbe153e2ee8b6d664c2f0e54e0d839697468726573686f6c64026d76657374696e675f73746172741b21d6a435a95ce2837076657374696e675f6475726174696f6e1b47a4650d7ef6b58d6762616c616e63655100ffffffffffffffff4130053836ad90f1a2646d657461a1674163636f756e74a1656f776e657256040a409ad7f930145cbbdb46ba5ade1261814d7830b86762616c616e63655100b3253e0eb8ca1e1e766912f54414447fa2646d657461a1674163636f756e74a1656f776e65725501951d541f21edd560528530ea396f55449e8646226762616c616e636549001eca77746d617d8668626173655f6665655100dae8be0aabd41a1a1fe1fe08ad0299d568636861696e5f696418656974696d657374616d701b8e0ad784e2cc848c6a636861696e5f6e616d656a0f133c1be8808229c2866a76616c696461746f727386a265706f776572510040d27107a74fac0b4af2831557748abc6a7075626c69635f6b6579584104b8d9acb9016eb222fdfda47c45437464a1d7a9f0b5a82d75ee41de8a9d09f022348d045766ddba934e54dfc61611b278d6d3f0789e8abe8e2cb4386f3759bd39a265706f7765725100c944a8332ea61b784316f03089a237aa6a7075626c69635f6b65795841042d686a4da8179eeeaec8b13b944c48f332438771e76730bc6029af8347e236bcc97b00f59b945bfe0db048df06c12f7157fa414555a6e36d17e471ca36e39c77a265706f77657251008dff12e00cf64548a93c977738433f016a7075626c69635f6b6579584104eac459f6b71d647bf145beb6092bb78518890359ac92dc4047b2f9e5ab5a0406ebd2db53b0d4a136740cd8e96985d3f7f1cbc387be8ad5c08705a62d341aaa0aa265706f776572406a7075626c69635f6b6579584104af05f458e83191b0baaf56721fcb7f3d1f80c88bf0a3bc318900faa3bed2aa9123de3e4ecc856254a20f29bfff16fd6c0c8d4a5d53b053e6373a815178f91061a265706f7765725100ae5d49b11c918c6256a83cf0d61ce38b6a7075626c69635f6b6579584104210d10d1b8c80752fa1e33546d050017f094f2c4628f3b367f387761a278fd04066826e589ed57ed9751793c7e33e5beb341043c60d6a83de29bae2137d5c764a265706f7765725100ffffffffffffffff00000000000000006a7075626c69635f6b6579584104bc4241fbd1dbcc8bf21762c7ec723ab5b7084c880f824aa2faa36c708bc5450020242ace9a1bb8341cfe7d51c1b51f8f9e694e207e74102aebc64036ae348c1c6b706f7765725f7363616c65036f6e6574776f726b5f76657273696f6e157365616d5f7065726d697373696f6e5f6d6f6465a1646d6f64656c756e72657374726963746564736970635f636f6e7472616374735f6f776e6572782a307861646164616461646164616461646164616461646164616461646164616461646164616461646164 \ No newline at end of file diff --git a/fendermint/vm/genesis/golden/genesis/cbor/genesis.txt b/fendermint/vm/genesis/golden/genesis/cbor/genesis.txt index 61f2dbfb46..b1e6df7698 100644 --- a/fendermint/vm/genesis/golden/genesis/cbor/genesis.txt +++ b/fendermint/vm/genesis/golden/genesis/cbor/genesis.txt @@ -1 +1 @@ -Genesis { chain_name: "", chain_id: 101, timestamp: Timestamp(13251228218958232054), network_version: NetworkVersion(21), base_fee: TokenAmount(7.370780716479075589), power_scale: 0, validators: [Validator { public_key: ValidatorKey(PublicKey(Affine { x: Field { n: [35734683, 19512417, 10085551, 41976984, 18230282, 33368942, 47276615, 63840745, 56416749, 795943], magnitude: 1, normalized: true }, y: Field { n: [49745655, 10949268, 59649360, 17022813, 26451393, 52335251, 51377097, 61725653, 65692547, 3119481], magnitude: 1, normalized: true }, infinity: false })), power: Collateral(TokenAmount(322856552237206793804.031172885867638528)) }, Validator { public_key: ValidatorKey(PublicKey(Affine { x: Field { n: [64807261, 20196680, 30808902, 39383843, 9452529, 61973536, 20363137, 24339644, 28408836, 1827850], magnitude: 1, normalized: true }, y: Field { n: [7580682, 50230969, 16256362, 10664232, 40812639, 58633269, 40528359, 53599714, 3786348, 1134334], magnitude: 1, normalized: true }, infinity: false })), power: Collateral(TokenAmount(0.0)) }, Validator { public_key: ValidatorKey(PublicKey(Affine { x: Field { n: [60592866, 906575, 52049096, 13855530, 25648737, 31581541, 46764640, 56915208, 35166126, 3714334], magnitude: 1, normalized: true }, y: Field { n: [47421240, 36300716, 10575516, 30168908, 46275268, 34569680, 41679182, 41705458, 30397179, 1289218], magnitude: 1, normalized: true }, infinity: false })), power: Collateral(TokenAmount(115789077268461795555.786014125701411158)) }, Validator { public_key: ValidatorKey(PublicKey(Affine { x: Field { n: [50480390, 25728551, 41141961, 10254748, 20391210, 15965147, 20648747, 21445081, 27605494, 453298], magnitude: 1, normalized: true }, y: Field { n: [51494, 61102287, 22190521, 30351724, 14678601, 26388297, 53877349, 3481505, 60590803, 2217766], magnitude: 1, normalized: true }, infinity: false })), power: Collateral(TokenAmount(340282366920938463455.730169729109531725)) }, Validator { public_key: ValidatorKey(PublicKey(Affine { x: Field { n: [46367136, 30254826, 65583073, 58605777, 33384847, 14679799, 62683171, 41828174, 39493577, 369761], magnitude: 1, normalized: true }, y: Field { n: [34888194, 46157526, 49923106, 38675281, 18854426, 32829595, 18315335, 51138238, 17558168, 3137108], magnitude: 1, normalized: true }, infinity: false })), power: Collateral(TokenAmount(301632854851889825874.9190748556565654)) }, Validator { public_key: ValidatorKey(PublicKey(Affine { x: Field { n: [58755492, 32936793, 64916632, 3150337, 48345172, 40589120, 38716980, 31733793, 40341841, 1112740], magnitude: 1, normalized: true }, y: Field { n: [51623813, 30537132, 3205765, 17612346, 7343480, 48645717, 1287449, 4753953, 31443076, 1417739], magnitude: 1, normalized: true }, infinity: false })), power: Collateral(TokenAmount(230238020826023862616.805863604753817879)) }, Validator { public_key: ValidatorKey(PublicKey(Affine { x: Field { n: [40885492, 15553779, 54123965, 28432326, 53695675, 30477544, 58861686, 12362490, 10639957, 2154225], magnitude: 1, normalized: true }, y: Field { n: [41889489, 53202679, 55710002, 23924741, 13758631, 32761580, 16725113, 11906224, 20175955, 3710014], magnitude: 1, normalized: true }, infinity: false })), power: Collateral(TokenAmount(254361114468202393482.250430575417402669)) }, Validator { public_key: ValidatorKey(PublicKey(Affine { x: Field { n: [645266, 9478164, 47807262, 4306250, 57394159, 7841683, 65305088, 47519398, 63751920, 2036397], magnitude: 1, normalized: true }, y: Field { n: [59730041, 46262444, 31423089, 35355304, 48282214, 305829, 6619673, 17023694, 55366528, 2521108], magnitude: 1, normalized: true }, infinity: false })), power: Collateral(TokenAmount(191619404244571460019.048908483311016655)) }, Validator { public_key: ValidatorKey(PublicKey(Affine { x: Field { n: [23585423, 31813911, 17483482, 34059407, 15857692, 26395000, 43544681, 11191044, 48490156, 3937492], magnitude: 1, normalized: true }, y: Field { n: [54527209, 40215692, 42475791, 11929222, 9991759, 35349186, 62304346, 47640736, 8760636, 821999], magnitude: 1, normalized: true }, infinity: false })), power: Collateral(TokenAmount(154632352284471841201.727616896507604626)) }], accounts: [Actor { meta: Multisig(Multisig { signers: [SignerAddr(Address("f1gah2bchghpdsqtdyq2mg6dalgkslrjbrtgaesia")), SignerAddr(Address("f1oeu5mzvo7rayrd5hqsf4itmh4khcmj7eji5xhya")), SignerAddr(Address("f1hffoefmwps4izx7takgrks5vfxr55sbif4ba23a")), SignerAddr(Address("f1bhjpfiigr7wgfyxbya4qdooqtyh3nip3elwgtmq")), SignerAddr(Address("f1d777m7cn3psapsw6cxcukxabnqljvadnlox6opq"))], threshold: 2, vesting_duration: 2650998388208949078, vesting_start: 9108833960500375379 }), balance: TokenAmount(0.0) }, Actor { meta: Multisig(Multisig { signers: [SignerAddr(Address("f1mwitelzn3asco2olwhzfy7vhegu2plyw5jmez7a")), SignerAddr(Address("f1gq2pni5nvj3khteo5ni6n3hyc5bcksoziofrtza")), SignerAddr(Address("f1pthn54h7ebpltxubuumvyyisesde7yvpvgg7qwi")), SignerAddr(Address("f1oof73dx46icrrrorrfvcytn7ljctf4rtv3lemxa")), SignerAddr(Address("f1r3opfmeiqyfm4ljekh6nvgumzt2rkxtpsugo5by"))], threshold: 3, vesting_duration: 4383156514696692399, vesting_start: 8627725319853246975 }), balance: TokenAmount(205774209073450626808.321982400535204043) }], eam_permission_mode: Unrestricted, ipc: Some(IpcParams { gateway: GatewayParams { subnet_id: SubnetID { root: 9136288813687660255, children: [Address("f015002037678599764495"), Address("f011650084465412981644"), Address("f410fcszug2y44uksnhioafywkepldykkr5nsgp42gai")] }, bottom_up_check_period: 17127693403555613195, majority_percentage: 64, active_validators_limit: 15 } }), ipc_contracts_owner: 0x0000000000000000000000000000000000000000, f3: None } \ No newline at end of file +Genesis { chain_name: "\u{f}\u{13}<\u{1b}耂)\u{86}", chain_id: 101, timestamp: Timestamp(10235230068893582476), network_version: NetworkVersion(21), base_fee: TokenAmount(290980170455576411479.522545465028680149), power_scale: 3, validators: [Validator { public_key: ValidatorKey(PublicKey(Affine { x: Field { n: [17428514, 7840423, 47668964, 63100576, 10606505, 22076697, 64636868, 46697463, 12124526, 3028587], magnitude: 1, normalized: true }, y: Field { n: [56212793, 17701837, 65594059, 31619626, 14078960, 25455774, 21888097, 48909625, 5727965, 860993], magnitude: 1, normalized: true }, infinity: false })), power: Collateral(TokenAmount(86163266589189594141.7072688837587463)) }, Validator { public_key: ValidatorKey(PublicKey(Affine { x: Field { n: [65156796, 40624337, 51103234, 29859228, 53625735, 18027068, 9114553, 41663163, 38643735, 743962], magnitude: 1, normalized: true }, y: Field { n: [48471159, 18641549, 37147006, 18175643, 22542913, 28330972, 50630128, 24115254, 16096148, 3301056], magnitude: 1, normalized: true }, infinity: false })), power: Collateral(TokenAmount(267531314839035282300.694276546888873898)) }, Validator { public_key: ValidatorKey(PublicKey(Affine { x: Field { n: [56230918, 12482922, 29623419, 23507531, 18385155, 38464993, 6024032, 26341317, 32945949, 3846422], magnitude: 1, normalized: true }, y: Field { n: [1747466, 23694157, 22808688, 35584555, 66177987, 39941373, 13471382, 42260944, 55816404, 3863734], magnitude: 1, normalized: true }, infinity: false })), power: Collateral(TokenAmount(188745565939092331103.120343411443580673)) }, Validator { public_key: ValidatorKey(PublicKey(Affine { x: Field { n: [47360657, 4106479, 63117456, 36684430, 18841800, 66248655, 49637153, 38191850, 5826609, 2867581], magnitude: 1, normalized: true }, y: Field { n: [16322657, 44061790, 20865907, 24465089, 822602, 63291227, 15899647, 25776776, 38718597, 587663], magnitude: 1, normalized: true }, infinity: false })), power: Collateral(TokenAmount(0.0)) }, Validator { public_key: ValidatorKey(PublicKey(Affine { x: Field { n: [41483524, 35510376, 62089203, 51481148, 66098418, 54607877, 31667526, 1919976, 13744328, 541508], magnitude: 1, normalized: true }, y: Field { n: [64341860, 48990285, 42196521, 15827802, 45302020, 59570543, 18322375, 23049821, 48597485, 104969], magnitude: 1, normalized: true }, infinity: false })), power: Collateral(TokenAmount(231770049522546469915.619078870943982475)) }, Validator { public_key: ValidatorKey(PublicKey(Affine { x: Field { n: [63259904, 14359586, 11153322, 35667465, 28772428, 52203181, 24521854, 53620680, 33280475, 3084432], magnitude: 1, normalized: true }, y: Field { n: [36998172, 26217899, 16953020, 8518096, 60713294, 7161827, 65525020, 48287859, 47094299, 526602], magnitude: 1, normalized: true }, infinity: false })), power: Collateral(TokenAmount(340282366920938463444.92786335805865984)) }], accounts: [Actor { meta: Account(Account { owner: SignerAddr(Address("f410fbzuxrwrrm3ovn4w2baspwiulzztx6ot4nex3kta")) }), balance: TokenAmount(184685506694551731092.892523818741194421) }, Actor { meta: Multisig(Multisig { signers: [SignerAddr(Address("f1m62vkch25lhgq3wmjslihdzxvt2l3r3z2rzuziy")), SignerAddr(Address("f1pioe7ra3nat2vuomsg6rk6a5ii3xmgwddvamzya"))], threshold: 2, vesting_duration: 16012578058545677068, vesting_start: 6059839396271648647 }), balance: TokenAmount(0.0) }, Actor { meta: Multisig(Multisig { signers: [SignerAddr(Address("f12nqzrr53kg3zrtkav6kjrdxeducgozh4xatbrwa")), SignerAddr(Address("f1colzma4qdt4mvjdgp2upnv7oihu2nmofdjczgsa"))], threshold: 2, vesting_duration: 8815584627531535995, vesting_start: 12381696805898152225 }), balance: TokenAmount(340282366920938463457.048057831933772575) }, Actor { meta: Account(Account { owner: SignerAddr(Address("f410fa5ttrg3ld27ligobmqcbllvqyjxsfc2kdn2eyvq")) }), balance: TokenAmount(80451193666563198447.219250656813077974) }, Actor { meta: Account(Account { owner: SignerAddr(Address("f1f2yfvemm7upd4w226c5crchtg6yplcxxg227vky")) }), balance: TokenAmount(130329943192583045330.820134976081970819) }, Actor { meta: Multisig(Multisig { signers: [SignerAddr(Address("f16k36tr6ieosw4aidql2jxdbhhnjpeqkh63snsja")), SignerAddr(Address("f1qg2v5uo4esmmtgkaoumrdsp3e43o76o5mhfdily")), SignerAddr(Address("f1uetzggsgflmo5p35hb7bza2m5qknnp4listyipq")), SignerAddr(Address("f1rw4tezpsuv6tstky3raquuswtqiou6yvigu34ma")), SignerAddr(Address("f1nv52vbap7pqvhyxornwwmtbpbzkobwbztzlz4ga"))], threshold: 2, vesting_duration: 5162362181512508813, vesting_start: 2438316798649361027 }), balance: TokenAmount(340282366920938463449.625123508399739121) }, Actor { meta: Account(Account { owner: SignerAddr(Address("f410ficnnp6jqcrolxw2gxjnn4etbqfgxqmfy27nf2hy")) }), balance: TokenAmount(238125184905045018530.922931046499763327) }, Actor { meta: Account(Account { owner: SignerAddr(Address("f1suovihzb5xkwauufgdvds32vispimrrc4k7y4ya")) }), balance: TokenAmount(2.218717108368407942) }], eam_permission_mode: Unrestricted, ipc: None, ipc_contracts_owner: 0xadadadadadadadadadadadadadadadadadadadad, f3: None } \ No newline at end of file diff --git a/fendermint/vm/genesis/golden/genesis/json/genesis.json b/fendermint/vm/genesis/golden/genesis/json/genesis.json index 4d642a5102..d95bd9a65d 100644 --- a/fendermint/vm/genesis/golden/genesis/json/genesis.json +++ b/fendermint/vm/genesis/golden/genesis/json/genesis.json @@ -1,52 +1,82 @@ { - "chain_name": "U", + "chain_name": "7", "chain_id": 101, - "timestamp": 12982167733895342124, + "timestamp": 17133465601291489619, "network_version": 21, - "base_fee": "20131727983098262839895554089832955311", - "power_scale": 3, + "base_fee": "0", + "power_scale": 0, "validators": [ { - "public_key": "BH4dgpBqtkh6sKhMXelu7LKYN2nb1UJSX6ZCYGPmtmsCM9y80iAUMj5wu4Yiu952Dv3Oq2Rrbt9h45EgRmHkFSc=", - "power": "40223257309125237738285695934834340379" + "public_key": "BOaJm/wQCVEyouz1u6+43/t+RsrsT7YBK8x7fOBj5JYzkmZ0zjWvt4tNrUzFwKCykzkY5XUk8SULr9XhE2kCKgM=", + "power": "133436547121158909513477972881658559922" }, { - "public_key": "BBPbyO1PiFQ7AoLdOYNViHBJ+EF6FuhOYGAOTygqwuvdOt960J5TvCIjwX2UIm2vPm+9ILZarBWcTFXz8rQG9bQ=", - "power": "19706769749739782860581433033120308803" + "public_key": "BHYo2AfRilJS1GHpeE93pAb/gx/lOPjESuA9ntxQSB5iKAqKMOgpcl/RMTI3/EDOwSdx8T4yCwVWhhq4bl5mAXQ=", + "power": "240244227391930844362499398952384284081" }, { - "public_key": "BGB9f7R3VPpLiWszpGgbG+BLbtllYDg1YGq0RMqmv9xhBz7RG6WVOSCmUem+TAVQkIX2tPT7ZcZF+FLkCoNHhOc=", - "power": "260894447470586868017041531430614024483" + "public_key": "BH5D8ccOKZTLbslmKgVzhkzYJUH8HfUHJ+GpgDNQe7R5TIpjhlJOB3i8O76iebA05LzpDPmKHu/3fw+E9PH+bJM=", + "power": "91680598785060915932670932172996893971" }, { - "public_key": "BA0y6qtkjqTG5IdQDRNT3yHZx4WI+6Ua+LFYuXdfczM/ocBhjNJQmQVpCjDvuwHoBPD0hzK4KxtdJe+lPa7a3hU=", - "power": "178349749897518691979494799247727256537" + "public_key": "BA5IaVwESz+YmgJYU2ebuCqcRdCwTWQcboPi7damKfFkdkC3Wig8VaIWjiYwcE+H0R8XaE2Jg0rZ2TCOiRJ7cWk=", + "power": "203185844703013843453464423669543643840" }, { - "public_key": "BPhrBPJqcNmmxgGlHirO5Tt03wYXcUKlHJVYuqdWzNew6RQby1tcXpqwyFxSazNt6cOQ0UNjXBa2RAnw9CWJieY=", - "power": "135063245844052113159013998821298664415" + "public_key": "BPs6vRJsPlUB+3ipe/r+4701HXWSV3E01vd/MkdRikAlfN+ALllUEkGmvK/exyeC7JLMr5ahI621l8ZEviYPpQ0=", + "power": "226575883433545164735832720684510944066" + }, + { + "public_key": "BK/bT0XF7NcvLtxYLjuCNf9Jh4Lm7yzx9127OupNaS0aMClpWLXbMDAVokzCM62LF7/h359fgfRIDpZkqTuVC10=", + "power": "86547577664969384614076978215207358206" + }, + { + "public_key": "BKeJffIrsfHZAT1LRYivDtggz53c/HoT3QQbM7nh8jo0R1eZ6jQ3ReAzyhSKP7JYFgMgACcsdmvONGGH2Sw9uDs=", + "power": "45906758430646698643388752623232051481" } ], "accounts": [ + { + "meta": { + "Multisig": { + "signers": [ + "f1wfp3qxaniskb2wrmuznq4lh5f7yosusizebxj2a", + "f1hf25io4nmjhex2qdjqmdzyt4ckswceefcx2yqmi", + "f1zqyla4dld6ifsfaqp3syfycmkq25f5pidb7jyea" + ], + "threshold": 3, + "vesting_duration": 6751209550894226961, + "vesting_start": 6976100918620151485 + } + }, + "balance": "97156951261594553286313547130067065459" + }, + { + "meta": { + "Multisig": { + "signers": [ + "f1pdcdgdja64kfotq3cnwpj7yjkqleqr2xbbt6mzi", + "f15v3rxxexa5gppkzx74nkjft5ndxrygmzl5rmifq", + "f1nwtqqcnslcn2annevfckkzz5tx2dbylupbshkla" + ], + "threshold": 1, + "vesting_duration": 2114408940148013, + "vesting_start": 371510136937269349 + } + }, + "balance": "251556828864509738395289928249703715510" + }, { "meta": { "Account": { - "owner": "f410fkutwqjirqgadmxm43p2myoi5m3gurnulc4uvrti" + "owner": "f1cgubsj6dus4eod774qyoczn45am6i7g3hcffnhi" } }, - "balance": "68216112094381133411177903949584186607" + "balance": "283041057149337595479796882931880733859" } ], "eam_permission_mode": { "mode": "unrestricted" }, - "ipc": { - "gateway": { - "subnet_id": "/r16315642738389104412", - "bottom_up_check_period": 11339269183869879227, - "majority_percentage": 73, - "active_validators_limit": 63 - } - }, - "ipc_contracts_owner": "0x8989898989898989898989898989898989898989" + "ipc_contracts_owner": "0x2424242424242424242424242424242424242424" } \ No newline at end of file diff --git a/fendermint/vm/genesis/golden/genesis/json/genesis.txt b/fendermint/vm/genesis/golden/genesis/json/genesis.txt index f37522ab4d..ffbf737369 100644 --- a/fendermint/vm/genesis/golden/genesis/json/genesis.txt +++ b/fendermint/vm/genesis/golden/genesis/json/genesis.txt @@ -1 +1 @@ -Genesis { chain_name: "U", chain_id: 101, timestamp: Timestamp(12982167733895342124), network_version: NetworkVersion(21), base_fee: TokenAmount(20131727983098262839.895554089832955311), power_scale: 3, validators: [Validator { public_key: ValidatorKey(PublicKey(Affine { x: Field { n: [45509378, 9967865, 19266148, 57627913, 43530089, 39566124, 42255838, 19000002, 43018934, 2066272], magnitude: 1, normalized: true }, y: Field { n: [31724839, 4723096, 32906809, 26324411, 50187947, 49782147, 62415403, 13171138, 13770772, 849711], magnitude: 1, normalized: true }, infinity: false })), power: Collateral(TokenAmount(40223257309125237738.285695934834340379)) }, Validator { public_key: ValidatorKey(PublicKey(Affine { x: Field { n: [46328797, 60017162, 15074816, 32005025, 4847681, 13984284, 3003288, 22080522, 15552392, 325362], magnitude: 1, normalized: true }, y: Field { n: [456116, 24968365, 22660293, 47803056, 40877344, 10185679, 1562946, 49318031, 47226451, 964574], magnitude: 1, normalized: true }, infinity: false })), power: Collateral(TokenAmount(19706769749739782860.581433033120308803)) }, Validator { public_key: ValidatorKey(PublicKey(Affine { x: Field { n: [46128225, 17904297, 55969451, 26575072, 4943577, 33998584, 45300294, 65613349, 62158676, 1580895], magnitude: 1, normalized: true }, y: Field { n: [55018727, 12124832, 6578053, 64220567, 8779444, 50418724, 18783204, 14975641, 18589077, 118708], magnitude: 1, normalized: true }, infinity: false })), power: Collateral(TokenAmount(260894447470586868017.041531430614024483)) }, Validator { public_key: ValidatorKey(PublicKey(Affine { x: Field { n: [57881407, 36593111, 28281621, 35909268, 31049605, 13957064, 7667921, 43195282, 44786830, 216250], magnitude: 1, normalized: true }, y: Field { n: [47898133, 65621867, 28693086, 13295788, 15791239, 46168577, 10686203, 40113572, 26006096, 2650136], magnitude: 1, normalized: true }, infinity: false })), power: Collateral(TokenAmount(178349749897518691979.494799247727256537)) }, Validator { public_key: ValidatorKey(PublicKey(Affine { x: Field { n: [46978992, 36612565, 38914389, 6145290, 57990918, 45332814, 1724898, 57056024, 15886960, 4070081], magnitude: 1, normalized: true }, y: Field { n: [25790950, 41696521, 23815232, 17665392, 29593809, 46979962, 8766758, 24799939, 63658844, 3818758], magnitude: 1, normalized: true }, infinity: false })), power: Collateral(TokenAmount(135063245844052113159.013998821298664415)) }], accounts: [Actor { meta: Account(Account { owner: SignerAddr(Address("f410fkutwqjirqgadmxm43p2myoi5m3gurnulc4uvrti")) }), balance: TokenAmount(68216112094381133411.177903949584186607) }], eam_permission_mode: Unrestricted, ipc: Some(IpcParams { gateway: GatewayParams { subnet_id: SubnetID { root: 16315642738389104412, children: [] }, bottom_up_check_period: 11339269183869879227, majority_percentage: 73, active_validators_limit: 63 } }), ipc_contracts_owner: 0x8989898989898989898989898989898989898989, f3: None } \ No newline at end of file +Genesis { chain_name: "7", chain_id: 101, timestamp: Timestamp(17133465601291489619), network_version: NetworkVersion(21), base_fee: TokenAmount(0.0), power_scale: 0, validators: [Validator { public_key: ValidatorKey(PublicKey(Affine { x: Field { n: [65312307, 48183320, 1227975, 61947608, 58607306, 65943550, 47143866, 21285515, 66850825, 3777126], magnitude: 1, normalized: true }, y: Field { n: [16919043, 24659162, 38845181, 30708676, 54073573, 2632868, 47500380, 48114998, 13514159, 2398621], magnitude: 1, normalized: true }, infinity: false })), power: Collateral(TokenAmount(133436547121158909513.477972881658559922)) }, Validator { public_key: ValidatorKey(PublicKey(Affine { x: Field { n: [4726370, 57128724, 4500995, 60089315, 50299679, 64874753, 35559300, 21580625, 512394, 1935926], magnitude: 1, normalized: true }, y: Field { n: [40239476, 44964759, 5597281, 16304172, 19362289, 51393456, 51585919, 29982532, 36759593, 656034], magnitude: 1, normalized: true }, infinity: false })), power: Collateral(TokenAmount(240244227391930844362.499398952384284081)) }, Validator { public_key: ValidatorKey(PublicKey(Affine { x: Field { n: [8107129, 39849172, 7503386, 66090964, 14165313, 22864275, 9855648, 39005627, 29822505, 2068732], magnitude: 1, normalized: true }, y: Field { n: [33451155, 65092924, 50296816, 65415291, 12380428, 40635705, 62646823, 1958640, 59134542, 1254040], magnitude: 1, normalized: true }, infinity: false })), power: Collateral(TokenAmount(91680598785060915932.670932172996893971)) }, Validator { public_key: ValidatorKey(PublicKey(Affine { x: Field { n: [36303204, 12285353, 29812798, 46216592, 43795920, 31911434, 2458934, 16671336, 22807627, 234010], magnitude: 1, normalized: true }, y: Field { n: [41644393, 2335300, 11378067, 20325901, 18814824, 1303028, 14836487, 22448218, 56240188, 1937453], magnitude: 1, normalized: true }, infinity: false })), power: Collateral(TokenAmount(203185844703013843453.46442366954364384)) }, Validator { public_key: ValidatorKey(PublicKey(Affine { x: Field { n: [25837605, 63738324, 55406455, 38362564, 20258165, 46119151, 59414463, 22284269, 17984574, 4116143], magnitude: 1, normalized: true }, y: Field { n: [34579725, 26292105, 47929724, 39486606, 9620655, 30007483, 63634924, 4785818, 3037524, 2045920], magnitude: 1, normalized: true }, infinity: false })), power: Collateral(TokenAmount(226575883433545164735.832720684510944066)) }, Validator { public_key: ValidatorKey(PublicKey(Affine { x: Field { n: [23670042, 47102611, 52393435, 60538035, 55150466, 48270719, 29721315, 56409275, 54904300, 2881235], magnitude: 1, normalized: true }, y: Field { n: [60099421, 26815054, 54821097, 41778695, 62906847, 15426245, 35965987, 12632150, 22590939, 789082], magnitude: 1, normalized: true }, infinity: false })), power: Collateral(TokenAmount(86547577664969384614.076978215207358206)) }, Validator { public_key: ValidatorKey(PublicKey(Affine { x: Field { n: [32651828, 46984824, 20828225, 57930216, 2150301, 36422582, 64271448, 63398916, 32648113, 2744927], magnitude: 1, normalized: true }, y: Field { n: [4044859, 6420043, 45933382, 10269145, 33759232, 65836549, 10569891, 18317519, 32126007, 1168870], magnitude: 1, normalized: true }, infinity: false })), power: Collateral(TokenAmount(45906758430646698643.388752623232051481)) }], accounts: [Actor { meta: Multisig(Multisig { signers: [SignerAddr(Address("f1wfp3qxaniskb2wrmuznq4lh5f7yosusizebxj2a")), SignerAddr(Address("f1hf25io4nmjhex2qdjqmdzyt4ckswceefcx2yqmi")), SignerAddr(Address("f1zqyla4dld6ifsfaqp3syfycmkq25f5pidb7jyea"))], threshold: 3, vesting_duration: 6751209550894226961, vesting_start: 6976100918620151485 }), balance: TokenAmount(97156951261594553286.313547130067065459) }, Actor { meta: Multisig(Multisig { signers: [SignerAddr(Address("f1pdcdgdja64kfotq3cnwpj7yjkqleqr2xbbt6mzi")), SignerAddr(Address("f15v3rxxexa5gppkzx74nkjft5ndxrygmzl5rmifq")), SignerAddr(Address("f1nwtqqcnslcn2annevfckkzz5tx2dbylupbshkla"))], threshold: 1, vesting_duration: 2114408940148013, vesting_start: 371510136937269349 }), balance: TokenAmount(251556828864509738395.28992824970371551) }, Actor { meta: Account(Account { owner: SignerAddr(Address("f1cgubsj6dus4eod774qyoczn45am6i7g3hcffnhi")) }), balance: TokenAmount(283041057149337595479.796882931880733859) }], eam_permission_mode: Unrestricted, ipc: None, ipc_contracts_owner: 0x2424242424242424242424242424242424242424, f3: None } \ No newline at end of file diff --git a/fendermint/vm/genesis/src/lib.rs b/fendermint/vm/genesis/src/lib.rs index 7de96303d7..984ad110f9 100644 --- a/fendermint/vm/genesis/src/lib.rs +++ b/fendermint/vm/genesis/src/lib.rs @@ -285,6 +285,18 @@ pub mod ipc { pub struct F3Params { /// F3 instance ID from parent chain pub instance_id: u64, + /// Last finalized epoch committed at genesis for `instance_id`. + /// + /// The proof-service fetches certificates from `instance_id + 1`, so this value must + /// represent the overlap point for the *next* certificate: the last epoch in the + /// ECChain of `instance_id`. + /// + /// The first epoch to prove/execute is `base_epoch + 1` (allowing for null rounds). + pub base_epoch: fvm_shared::clock::ChainEpoch, + /// Ethereum JSON-RPC block hash (bytes32) for the `base_epoch` tipset. + /// + /// Derived deterministically from the F3 certificate's ECChain base tipset key bytes. + pub base_epoch_eth_block_hash: [u8; 32], /// Power table for F3 consensus from parent chain pub power_table: Vec, } diff --git a/fendermint/vm/interpreter/Cargo.toml b/fendermint/vm/interpreter/Cargo.toml index b364e3c5f0..ab7ee5d865 100644 --- a/fendermint/vm/interpreter/Cargo.toml +++ b/fendermint/vm/interpreter/Cargo.toml @@ -18,6 +18,8 @@ fendermint_vm_genesis = { path = "../genesis" } fendermint_vm_message = { path = "../message" } fendermint_vm_resolver = { path = "../resolver" } fendermint_vm_topdown = { path = "../topdown" } +fendermint_vm_topdown_proof_service = { path = "../topdown/proof-service" } +fendermint_vm_evm_event_utils = { path = "../evm-event-utils" } fendermint_crypto = { path = "../../crypto" } fendermint_eth_hardhat = { path = "../../eth/hardhat" } fendermint_eth_deployer = { path = "../../eth/deployer" } @@ -39,9 +41,11 @@ ipc-observability = { path = "../../../ipc/observability" } async-trait = { workspace = true } async-stm = { workspace = true } anyhow = { workspace = true } +arc-swap = "1.6" base64 = { workspace = true } ethers = { workspace = true } hex = { workspace = true } +num-bigint = { workspace = true } num-traits = { workspace = true } serde = { workspace = true } serde_with = { workspace = true } @@ -75,11 +79,13 @@ quickcheck = { workspace = true, optional = true } rand = { workspace = true, optional = true } merkle-tree-rs = { path = "../../../ext/merkle-tree-rs" } +proofs = { git = "https://github.com/consensus-shipyard/ipc-filecoin-proofs", branch = "proofs" } [dev-dependencies] quickcheck = { workspace = true } quickcheck_macros = { workspace = true } tempfile = { workspace = true } +actors-builtin-car = { path = "../../actors-builtin-car" } fendermint_vm_interpreter = { path = ".", features = ["arb"] } fendermint_vm_message = { path = "../message", features = ["arb"] } diff --git a/fendermint/vm/interpreter/src/fvm/event_extraction.rs b/fendermint/vm/interpreter/src/fvm/event_extraction.rs new file mode 100644 index 0000000000..c6dd3329cc --- /dev/null +++ b/fendermint/vm/interpreter/src/fvm/event_extraction.rs @@ -0,0 +1,235 @@ +// Copyright 2022-2026 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT +//! Event extraction from F3 proof bundles +//! +//! This module provides functionality to extract and decode events from proof bundles, +//! including topdown messages and validator change events. + +use anyhow::{Context, Result}; +use fendermint_vm_evm_event_utils::{ + decode_new_power_change_request, decode_new_topdown_message, raw_log_from_event_proof, +}; +use ipc_api::cross::IpcEnvelope; +use ipc_api::staking::PowerChangeRequest; +use proofs::proofs::common::bundle::UnifiedProofBundle; +use tracing::{debug, trace}; + +/// Extract topdown messages from a proof bundle +/// +/// This function iterates through event proofs in the bundle and extracts +/// NewTopDownMessage events by: +/// 1. Finding events matching the signature +/// 2. Decoding the IpcEnvelope from the event data using contract bindings +/// 3. Returning all extracted messages +pub fn extract_topdown_messages(proof_bundle: &UnifiedProofBundle) -> Result> { + let mut messages = Vec::new(); + + for event_proof in &proof_bundle.event_proofs { + let raw = raw_log_from_event_proof(event_proof)?; + + // Try to decode as NewTopDownMessage event. + if let Ok(event) = decode_new_topdown_message(&raw) { + trace!( + emitter = event_proof.event_data.emitter, + subnet = ?event.subnet, + "Found NewTopDownMessage event" + ); + + // Convert from contract binding type to IPC type + let envelope = IpcEnvelope::try_from(event.message) + .context("Failed to convert gateway IpcEnvelope to IPC IpcEnvelope")?; + messages.push(envelope); + } + } + + debug!( + message_count = messages.len(), + "Extracted topdown messages from proof bundle" + ); + + Ok(messages) +} + +/// Extract validator changes from a proof bundle +/// +/// This function iterates through event proofs and extracts +/// NewPowerChangeRequest events by: +/// 1. Finding events matching the signature +/// 2. Decoding the PowerChangeRequest from the event data using contract bindings +/// 3. Returning all extracted changes +pub fn extract_validator_changes( + proof_bundle: &UnifiedProofBundle, +) -> Result> { + let mut changes = Vec::new(); + + for event_proof in &proof_bundle.event_proofs { + let raw = raw_log_from_event_proof(event_proof)?; + + // Try to decode as NewPowerChangeRequest event. + if let Ok(event) = decode_new_power_change_request(&raw) { + trace!( + emitter = event_proof.event_data.emitter, + validator = ?event.validator, + op = event.op, + "Found NewPowerChangeRequest event" + ); + + // Convert to PowerChangeRequest + let change_request = PowerChangeRequest::try_from(event) + .context("Failed to convert power change event to PowerChangeRequest")?; + changes.push(change_request); + } + } + + debug!( + change_count = changes.len(), + "Extracted validator changes from proof bundle" + ); + + Ok(changes) +} + +// (Decoding helpers moved to `fendermint_vm_evm_event_utils` so proof-service and interpreter share logic.) + +#[cfg(test)] +mod tests { + use super::*; + use ethers::abi::Tokenizable; + use ethers::abi::{encode, Token}; + use ethers::contract::EthEvent; + use ethers::types::{Address as EthAddress, H256, U256}; + use fvm_shared::address::Address as FilAddress; + use fvm_shared::econ::TokenAmount; + use fvm_shared::ActorID; + use ipc_actors_abis::lib_gateway; + use ipc_actors_abis::lib_power_change_log; + use ipc_api::address::IPCAddress; + use ipc_api::cross::{IpcEnvelope as ApiIpcEnvelope, IpcMsgKind}; + use ipc_api::ethers_address_to_fil_address; + use ipc_api::subnet_id::SubnetID; + use proofs::proofs::common::bundle::UnifiedProofBundle; + use proofs::proofs::events::bundle::{EventData, EventProof}; + + fn h256_to_0x_string(h: H256) -> String { + format!("0x{}", hex::encode(h.as_bytes())) + } + + fn bytes_to_0x_string(b: &[u8]) -> String { + format!("0x{}", hex::encode(b)) + } + + fn mk_event_proof(topics: Vec, data: String) -> EventProof { + EventProof { + parent_epoch: 100, + child_epoch: 101, + parent_tipset_cids: vec!["bafy...parent".to_string()], + child_block_cid: "bafy...child".to_string(), + message_cid: "bafy...msg".to_string(), + exec_index: 0, + event_index: 0, + event_data: EventData { + emitter: 1000, + topics, + data, + }, + } + } + + #[test] + fn extracts_and_decodes_new_topdown_message_event() -> Result<()> { + // Build a valid IPC envelope, then convert into the EVM binding struct. + // This avoids guessing the `FvmAddress` encoding. + let child_route = FilAddress::new_delegated(10 as ActorID, &[0x11; 20]) + .context("failed to create delegated route address")?; + let subnet_id = SubnetID::new(314159, vec![child_route]); + + let raw_from = FilAddress::new_delegated(10 as ActorID, &[0x22; 20]) + .context("failed to create delegated sender address")?; + let raw_to = FilAddress::new_delegated(10 as ActorID, &[0x33; 20]) + .context("failed to create delegated receiver address")?; + + let from = IPCAddress::new(&subnet_id, &raw_from)?; + let to = IPCAddress::new(&subnet_id, &raw_to)?; + + let api_env = ApiIpcEnvelope { + kind: IpcMsgKind::Transfer, + to, + value: TokenAmount::from_atto(0), + from, + message: vec![0xAA, 0xBB], + local_nonce: 1, + original_nonce: 2, + }; + + let evm_env = lib_gateway::IpcEnvelope::try_from(api_env.clone()) + .context("failed to convert api IpcEnvelope to evm IpcEnvelope")?; + + // Build event topics and data matching the proofs generator format. + let subnet_eth = EthAddress::from_slice(&[0x11; 20]); + let topic_subnet_bytes = encode(&[Token::Address(subnet_eth)]); + let topic_subnet = H256::from_slice(&topic_subnet_bytes); + + let id = [0x42u8; 32]; + let topic_id_bytes = encode(&[Token::FixedBytes(id.to_vec())]); + let topic_id = H256::from_slice(&topic_id_bytes); + + let topic0 = lib_gateway::NewTopDownMessageFilter::signature(); + let data_bytes = encode(&[evm_env.clone().into_token()]); + + let proof = mk_event_proof( + vec![ + h256_to_0x_string(topic0), + h256_to_0x_string(topic_subnet), + h256_to_0x_string(topic_id), + ], + bytes_to_0x_string(&data_bytes), + ); + + let bundle = UnifiedProofBundle { + storage_proofs: vec![], + event_proofs: vec![proof], + blocks: vec![], + }; + + let out = extract_topdown_messages(&bundle)?; + assert_eq!(out.len(), 1); + assert_eq!(out[0], api_env); + Ok(()) + } + + #[test] + fn extracts_and_decodes_new_power_change_request_event() -> Result<()> { + let validator_eth = EthAddress::from_slice(&[0x77; 20]); + let payload = vec![0xDE, 0xAD, 0xBE, 0xEF]; + let configuration_number = 42u64; + + let topic0 = lib_power_change_log::NewPowerChangeRequestFilter::signature(); + let data_bytes = encode(&[ + Token::Uint(U256::from(1u8)), // PowerOperation::SetPower + Token::Address(validator_eth), + Token::Bytes(payload.clone()), + Token::Uint(U256::from(configuration_number)), + ]); + + let proof = mk_event_proof( + vec![h256_to_0x_string(topic0)], + bytes_to_0x_string(&data_bytes), + ); + + let bundle = UnifiedProofBundle { + storage_proofs: vec![], + event_proofs: vec![proof], + blocks: vec![], + }; + + let out = extract_validator_changes(&bundle)?; + assert_eq!(out.len(), 1); + assert_eq!(out[0].configuration_number, configuration_number); + + let expected_validator = ethers_address_to_fil_address(&validator_eth)?; + assert_eq!(out[0].change.validator, expected_validator); + assert_eq!(out[0].change.payload, payload); + + Ok(()) + } +} diff --git a/fendermint/vm/interpreter/src/fvm/f3_topdown.rs b/fendermint/vm/interpreter/src/fvm/f3_topdown.rs new file mode 100644 index 0000000000..a8ad53a8a4 --- /dev/null +++ b/fendermint/vm/interpreter/src/fvm/f3_topdown.rs @@ -0,0 +1,898 @@ +// Copyright 2022-2024 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +use anyhow::{bail, Context}; +use fendermint_vm_message::chain::ChainMessage; +use fendermint_vm_message::ipc::ParentFinalityWithCert; +use fvm_ipld_blockstore::Blockstore; +use fvm_shared::clock::ChainEpoch; +use ipc_api::cross::IpcEnvelope; +use ipc_api::staking::PowerChangeRequest; +use std::sync::Arc; +use thiserror::Error; + +use fendermint_vm_message::ipc::IpcMessage; +use fendermint_vm_topdown_proof_service::types::SerializableF3Certificate; +use fendermint_vm_topdown_proof_service::PowerEntries; + +use crate::fvm::event_extraction::{extract_topdown_messages, extract_validator_changes}; +use crate::fvm::state::ipc::F3LightClientCaller; +use crate::fvm::state::FvmExecState; + +#[derive(Debug, Error)] +pub enum F3TopDownError { + #[error("proof bundle not found in local cache for height {height}")] + CacheMiss { height: ChainEpoch }, +} + +#[derive(Debug)] +pub struct ExtractedTopDownEffects { + pub topdown_msgs: Vec, + pub validator_changes: Vec, + pub instance_id: u64, + pub parent_eth_block_hash: [u8; 32], +} + +fn eth_hash_from_tipset_key_bytes( + tipset: &fendermint_vm_topdown_proof_service::types::FinalizedTipset, +) -> anyhow::Result<[u8; 32]> { + fendermint_vm_topdown_proof_service::types::eth_hash_from_tipset_key_bytes(&tipset.block_cids) +} + +/// F3 finality handler - handles all F3 proof-based finality logic +/// This module encapsulates all F3-specific concerns, keeping TopDownManager clean +#[derive(Clone)] +pub struct F3TopDownHandler { + /// Proof cache for F3-based parent finality (off-chain, local). + proof_cache: Arc, + /// F3 Light Client **actor** caller (on-chain state in the FVM). + f3_light_client_actor_caller: F3LightClientCaller, +} + +impl F3TopDownHandler { + pub fn new(proof_cache: Arc) -> Self { + Self { + proof_cache, + f3_light_client_actor_caller: F3LightClientCaller::new(), + } + } + + /// Get reference to the proof cache + pub fn proof_cache(&self) -> &Arc { + &self.proof_cache + } + + /// Query proof cache for next uncommitted proof and create a chain message with proof bundle. + /// + /// This is the v2 proof-based approach that replaces voting with cryptographic verification. + /// + /// Returns `None` if: + /// - No proof available for next height + /// - Cache is temporarily empty (graceful degradation) + pub fn chain_message_from_proof_cache(&self) -> Option { + // Get next uncommitted proof (epoch after last_committed) + let epoch_with_cert = self.proof_cache.get_next_uncommitted_epoch_with_cert()?; + + tracing::debug!( + instance_id = epoch_with_cert.certificate.gpbft_instance, + epoch = epoch_with_cert.epoch, + "found next uncommitted epoch with certificate in cache" + ); + + // Convert FinalityCertificate to SerializableF3Certificate for message + let serializable_cert = SerializableF3Certificate::from(&epoch_with_cert.certificate); + + Some(ChainMessage::Ipc(IpcMessage::ParentFinalityWithCert( + ParentFinalityWithCert { + height: epoch_with_cert.epoch, + certificate: fendermint_vm_message::ipc::Certificate::FilecoinF3(serializable_cert), + }, + ))) + } + + /// Attest a generalised top-down message during the attestation phase. + /// + /// Cache-first attestation. + /// + /// We require that: + /// - there is an epoch proof in the local cache for `msg.height` + /// - the certificate attached to the message matches the certificate referenced by the cache entry + /// + /// Proof bundle validity is verified at proof generation time (before insertion into the cache). + pub async fn attest( + &self, + state: &mut FvmExecState, + msg: &ParentFinalityWithCert, + ) -> anyhow::Result<()> + where + DB: Blockstore + Clone + 'static + Send + Sync, + { + let msg_cert = match &msg.certificate { + fendermint_vm_message::ipc::Certificate::FilecoinF3(cert) => cert, + }; + + let cached = self + .proof_cache + .get_epoch_proof_with_certificate(msg.height) + .ok_or_else(|| anyhow::Error::new(F3TopDownError::CacheMiss { height: msg.height }))?; + + let cached_cert = SerializableF3Certificate::from(&cached.certificate); + if &cached_cert != msg_cert { + bail!( + "certificate mismatch for epoch {} (message instance {}, cache instance {})", + msg.height, + msg_cert.gpbft_instance, + cached_cert.gpbft_instance + ); + } + + // Check on-chain continuity (this needs actor state access, hence in attestation). + let f3_state = self.get_f3_light_client_actor_state(state)?; + let instance_id = cached.certificate.gpbft_instance; + + // Certificate instance must not go backwards; it may stay the same (multiple epochs can be + // proven under the same certificate) or advance by more than 1. + // + // We allow forward jumps because intermediate instances may be "base-only" (empty suffix), + // which have no epoch execution point at which the on-chain light client actor could be + // updated. The actor itself allows forward jumps in a single atomic state transition. + if instance_id < f3_state.processed_instance_id { + bail!( + "certificate instance went backwards: {} < {}", + instance_id, + f3_state.processed_instance_id + ); + } + + // Epoch ordering: + // + // Filecoin can have null rounds (epochs with no tipsets), so tipset heights may skip. + // We must not require strict +1 sequencing. Instead, require that the proposal targets + // the next *available* cached epoch after the last committed one. + let expected_epoch = self + .proof_cache + .get_next_uncommitted_epoch() + .ok_or_else(|| { + anyhow::anyhow!( + "no cached proof available after last committed epoch {}", + self.proof_cache.last_committed_epoch() + ) + })?; + if msg.height != expected_epoch { + bail!( + "unexpected epoch: message height {} != expected {}", + msg.height, + expected_epoch + ); + } + + Ok(()) + } + + /// Execute F3-specific logic for a generalised top-down message. + /// Returns the topdown messages and validator changes to be processed by TopDownManager. + pub fn extract_top_down_effects( + &self, + msg: &ParentFinalityWithCert, + ) -> anyhow::Result { + // Cache is the source of truth: get the proof + certificate for this epoch. + let cached = self + .proof_cache + .get_epoch_proof_with_certificate(msg.height) + .ok_or_else(|| anyhow::Error::new(F3TopDownError::CacheMiss { height: msg.height }))?; + + // We don't validate the message certificate here; that happens during attestation. + let instance_id = cached.certificate.gpbft_instance; + + // Deterministically derive the FEVM/Ethereum-view block hash from the cached tipset key. + // + // In Lotus, the eth "block hash" for a tipset is `EthHashFromCid(TipSetKey.Cid())`, + // where `TipSetKey.Cid()` is the CID of the DAG-CBOR bytestring-wrapped key bytes. + // + // We already have the former tipset key bytes in the cache as `FinalizedTipset.block_cids`. + let tipset = cached + .finalized_tipsets + .iter() + .find(|t| t.epoch == msg.height) + .ok_or_else(|| { + anyhow::anyhow!( + "tipset key bytes not found in certificate for epoch {}", + msg.height + ) + })?; + let parent_eth_block_hash = eth_hash_from_tipset_key_bytes(tipset) + .context("failed to derive parent eth block hash from cached tipset key")?; + + tracing::debug!( + instance = instance_id, + height = msg.height, + "executing F3 generalised top-down" + ); + + let msgs = extract_topdown_messages(&cached.proof_bundle)?; + let validator_changes = extract_validator_changes(&cached.proof_bundle)?; + + tracing::debug!( + message_count = msgs.len(), + validator_changes_count = validator_changes.len(), + "extracted topdown effects from proof bundle" + ); + + Ok(ExtractedTopDownEffects { + topdown_msgs: msgs, + validator_changes, + instance_id, + parent_eth_block_hash, + }) + } + + /// Finalize F3 execution after all top-down effects have been applied successfully. + /// + /// This updates the on-chain F3 light client state and marks the epoch as committed in the proof cache. + pub fn finalize_after_execution( + &self, + state: &mut FvmExecState, + epoch: fvm_shared::clock::ChainEpoch, + instance_id: u64, + ) -> anyhow::Result<()> + where + DB: Blockstore + Clone + 'static + Send + Sync, + { + // Update F3LightClientActor with new certificate state (on-chain), but ONLY once we've + // executed the *last provable epoch* of this certificate. + // + // A certificate's ECChain contains tipsets [T0, T1, ... TN]. Proofs are generated for + // parent epochs in windows(2), i.e. for T0..T(N-1). The final tipset TN has no child in + // this certificate, so it is proven by the next certificate (as its base). + // + // Therefore we only advance the on-chain instance/power-table once we execute epoch T(N-1), + // i.e. the second-to-last tipset epoch in the certificate. + if self.is_last_provable_epoch_for_instance(instance_id, epoch)? { + let power_table = ActorPowerTable::try_from(&self.get_power_table(instance_id)?)?.0; + self.update_f3_light_client_actor_state(state, instance_id, power_table)?; + tracing::debug!( + instance = instance_id, + "updated F3LightClientActor state (end of cert)" + ); + } + + // Mark epoch as committed in cache. + if let Err(e) = self.mark_committed(epoch, instance_id) { + tracing::warn!( + error = %e, + epoch, + instance = instance_id, + "failed to mark epoch as committed in cache" + ); + } else { + tracing::debug!( + epoch, + instance = instance_id, + "marked epoch as committed in cache" + ); + } + + Ok(()) + } + + /// Get power table for a certificate instance from the **cache** (off-chain). + fn get_power_table(&self, instance_id: u64) -> anyhow::Result { + let cert_entry = self + .proof_cache + .get_certificate(instance_id) + .ok_or_else(|| { + anyhow::anyhow!( + "certificate not found in cache for instance {}", + instance_id + ) + })?; + + Ok(cert_entry.power_table) + } + + /// Mark epoch as committed in the **cache** (off-chain). + fn mark_committed( + &self, + epoch: fvm_shared::clock::ChainEpoch, + instance_id: u64, + ) -> anyhow::Result<()> { + self.proof_cache + .mark_committed(epoch, instance_id) + .map_err(|e| { + anyhow::anyhow!( + "failed to mark epoch {} as committed in cache: {}", + epoch, + e + ) + }) + } + + /// Get F3 Light Client **actor** state (on-chain). + fn get_f3_light_client_actor_state( + &self, + state: &mut FvmExecState, + ) -> anyhow::Result + where + DB: Blockstore + Clone + 'static + Send + Sync, + { + self.f3_light_client_actor_caller + .get_state(state) + .context("failed to get F3LightClientActor state") + } + + /// Update F3 Light Client **actor** state (on-chain). + fn update_f3_light_client_actor_state( + &self, + state: &mut FvmExecState, + processed_instance_id: u64, + power_table: Vec, + ) -> anyhow::Result<()> + where + DB: Blockstore + Clone + 'static + Send + Sync, + { + self.f3_light_client_actor_caller + .update_state(state, processed_instance_id, power_table) + .context("failed to update F3LightClientActor state") + } + + fn is_last_provable_epoch_for_instance( + &self, + instance_id: u64, + epoch: fvm_shared::clock::ChainEpoch, + ) -> anyhow::Result { + let cert_entry = self + .proof_cache + .get_certificate(instance_id) + .ok_or_else(|| { + anyhow::anyhow!("certificate not found in cache for instance {instance_id}") + })?; + let Some(last_provable) = fendermint_vm_topdown_proof_service::types::last_provable_tipset( + &cert_entry.certificate.ec_chain, + ) else { + // Base-only certificate (len < 2): valid but has no provable `(parent, child)` pair. + return Ok(false); + }; + Ok(epoch == last_provable.epoch) + } +} + +/// Local helper newtype so we can provide a clean `From` impl at the conversion boundary. +struct ActorPowerTable(Vec); + +impl TryFrom<&PowerEntries> for ActorPowerTable { + type Error = anyhow::Error; + + fn try_from(entries: &PowerEntries) -> Result { + use num_bigint::Sign; + + let out = entries + .iter() + .map(|pe| { + let (sign, power_be) = pe.power.to_bytes_be(); + if sign == Sign::Minus { + anyhow::bail!("negative power for participant id {}", pe.id); + } + + Ok(fendermint_vm_actor_interface::f3_light_client::PowerEntry { + id: pe.id, + public_key: pe.pub_key.0.clone(), + power_be, + }) + }) + .collect::>>()?; + + Ok(Self(out)) + } +} + +#[cfg(test)] +mod tests { + use super::F3TopDownHandler; + use crate::fvm::state::FvmGenesisState; + use crate::fvm::store::memory::MemoryBlockstore; + use anyhow::Context; + use cid::multihash::Multihash; + use fendermint_vm_actor_interface::{f3_light_client, gas_market, init, system}; + use fendermint_vm_core::Timestamp; + use fendermint_vm_genesis::PowerScale; + use fendermint_vm_message::chain::ChainMessage; + use fendermint_vm_message::ipc::{Certificate, IpcMessage}; + use fendermint_vm_topdown_proof_service::config::CacheConfig; + use fendermint_vm_topdown_proof_service::types::{ + CertificateEntry, EpochProofEntry, SerializableCertificateEntry, SerializableECChainEntry, + SerializableF3Certificate, SerializablePowerEntries, SerializablePowerEntry, + SerializableSupplementalData, + }; + use fendermint_vm_topdown_proof_service::ProofCache; + use fvm::engine::MultiEngine; + use fvm_shared::clock::ChainEpoch; + use fvm_shared::econ::TokenAmount; + use fvm_shared::version::NetworkVersion; + use num_traits::Zero; + use proofs::proofs::common::bundle::UnifiedProofBundle; + use std::collections::BTreeSet; + use std::sync::Arc; + + #[test] + fn test_eth_hash_from_tipset_key_bytes_matches_mainnet_vector() -> anyhow::Result<()> { + // Mainnet test vector for epoch 5707380. + // Generated from: + // - Filecoin.ChainGetTipSetByHeight(5707380).Cids (in RPC order) + // - Filecoin.EthGetBlockByNumber(5707380).hash + // + // IMPORTANT: CID order is significant; do NOT sort these. + let cids = [ + "bafy2bzacedqyixpeoqskjviifl6s2jmsabnexnw5ho77wakh3s3bevsdyqyle", + "bafy2bzaceacu7rvgnnmeq2ibhtzil6ygw6govnaqdia3yr3otetfk7whiohkq", + "bafy2bzaceccqjvlsxeesvb4mksxa62n47o7ahuvkpfue32qkwpjcbuw3lhdja", + "bafy2bzaced46vktulg7par7y3b5uwemwsamqucdn3mpbtezcxfv36gcfwumpy", + "bafy2bzaceb26bjmzcqkvnrwlfr7o3kxrdliqf6d5metxta5genc2fg7qz7x2y", + ]; + let expected_hex = "b1b336d1164ed4a696920245d94d3a0c32d25b7d6d4758b51d7218e4f932b785"; + let expected = hex::decode(expected_hex)?; + let expected: [u8; 32] = expected + .try_into() + .map_err(|_| anyhow::anyhow!("expected hash must be 32 bytes"))?; + + let mut block_cids = Vec::new(); + for s in cids { + let cid: cid::Cid = s.parse()?; + block_cids.extend_from_slice(cid.to_bytes().as_slice()); + } + + let tipset = fendermint_vm_topdown_proof_service::types::FinalizedTipset { + epoch: 5707380, + block_cids, + }; + + let got = super::eth_hash_from_tipset_key_bytes(&tipset)?; + assert_eq!(got, expected); + Ok(()) + } + + fn mk_test_certificate_entry(instance_id: u64, epochs: Vec) -> CertificateEntry { + mk_test_certificate_entry_with_powers( + instance_id, + epochs, + vec![ + (1u64, "1000".to_string(), vec![1u8; 48]), + (2u64, "2000".to_string(), vec![2u8; 48]), + ], + ) + } + + fn mk_test_certificate_entry_with_powers( + instance_id: u64, + epochs: Vec, + powers: Vec<(u64, String, Vec)>, + ) -> CertificateEntry { + let mh = Multihash::<64>::wrap(0x12, &[0u8; 32]).expect("valid multihash"); + let power_table_cid = cid::Cid::new_v1(0x55, mh).to_string(); + + let ec_chain = epochs + .into_iter() + .map(|epoch| SerializableECChainEntry { + epoch, + key: vec!["0".to_string()], + power_table: power_table_cid.clone(), + commitments: vec![0u8; 32], + }) + .collect(); + + let serializable = SerializableCertificateEntry { + certificate: SerializableF3Certificate { + gpbft_instance: instance_id, + ec_chain, + supplemental_data: SerializableSupplementalData { + power_table: power_table_cid.clone(), + commitments: vec![0u8; 32], + }, + signers: vec![0], + signature: vec![], + power_table_delta: vec![], + }, + power_table: SerializablePowerEntries( + powers + .into_iter() + .map(|(id, power, pub_key)| SerializablePowerEntry { id, power, pub_key }) + .collect(), + ), + source_rpc: "test".to_string(), + fetched_at: std::time::SystemTime::now(), + }; + + CertificateEntry::try_from(serializable).expect("valid certificate entry") + } + + #[tokio::test] + async fn f3_topdown_handler_end_to_end_cache_to_finalize() -> anyhow::Result<()> { + // Minimal FVM genesis state with F3LightClientActor so attestation can query actor state. + let store = MemoryBlockstore::new(); + let multi_engine = Arc::new(MultiEngine::new(1)); + let mut genesis_state = FvmGenesisState::new( + store, + multi_engine, + actors_builtin_car::CAR, + actors_custom_car::CAR, + ) + .await + .context("failed to create FVM genesis state")?; + + // System actor (required so the FVM can load the builtin actor manifest). + genesis_state + .create_builtin_actor( + system::SYSTEM_ACTOR_CODE_ID, + system::SYSTEM_ACTOR_ID, + &system::State { + builtin_actors: genesis_state.manifest_data_cid, + }, + TokenAmount::zero(), + None, + ) + .context("failed to create system actor")?; + + // Init actor (safe default for message execution environment). + let (init_state, _addr_to_id) = init::State::new( + genesis_state.store(), + "test".to_string(), + &[], + &BTreeSet::new(), + 0, + ) + .context("failed to create init state")?; + genesis_state + .create_builtin_actor( + init::INIT_ACTOR_CODE_ID, + init::INIT_ACTOR_ID, + &init_state, + TokenAmount::zero(), + None, + ) + .context("failed to create init actor")?; + + // Gas market custom actor: required by BlockGasTracker initialization. + let gas_market_state = fendermint_actor_gas_market_eip1559::State { + base_fee: TokenAmount::from_atto(100), + constants: fendermint_actor_gas_market_eip1559::Constants::default(), + }; + genesis_state + .create_custom_actor( + fendermint_actor_gas_market_eip1559::ACTOR_NAME, + gas_market::GAS_MARKET_ACTOR_ID, + &gas_market_state, + TokenAmount::zero(), + None, + ) + .context("failed to create gas market actor")?; + + let instance_id = 7u64; + let base_epoch: ChainEpoch = 50; + let genesis_power_table = vec![f3_light_client::PowerEntry { + id: 10, + public_key: vec![9u8; 48], + power_be: vec![9], + }]; + + let f3_state = fendermint_actor_f3_light_client::state::State::new( + genesis_state.store(), + instance_id, + genesis_power_table, + ) + .context("failed to create F3 light client actor state")?; + genesis_state + .create_custom_actor( + fendermint_actor_f3_light_client::F3_LIGHT_CLIENT_ACTOR_NAME, + f3_light_client::F3_LIGHT_CLIENT_ACTOR_ID, + &f3_state, + TokenAmount::zero(), + None, + ) + .context("failed to create F3 light client actor")?; + + // Initialize execution params (required for executing implicit/read-only messages). + genesis_state + .init_exec_state( + Timestamp(1), + NetworkVersion::V21, + TokenAmount::from_atto(100), + TokenAmount::zero(), + 1, + 0 as PowerScale, + ) + .context("failed to init exec state")?; + let mut exec_state = genesis_state + .into_exec_state() + .map_err(|_| anyhow::anyhow!("genesis exec state missing"))?; + + // Prepare a cache with exactly one next epoch proof. + // + // Proofs are generated for parent epochs in `windows(2)`. With a certified chain + // `[base_epoch, base_epoch + 1]`, the only provable (parent) epoch is `base_epoch`. + let cache = ProofCache::new( + base_epoch - 1, + instance_id, + CacheConfig { + lookahead_instances: 10, + retention_epochs: 10, + }, + ); + cache + .insert_certificate(mk_test_certificate_entry( + instance_id, + vec![base_epoch, base_epoch + 1], + )) + .context("failed to insert certificate")?; + cache + .insert_epoch_proofs(vec![EpochProofEntry::new( + base_epoch, + UnifiedProofBundle { + storage_proofs: vec![], + event_proofs: vec![], + blocks: vec![], + }, + instance_id, + )]) + .context("failed to insert epoch proof")?; + + let handler = F3TopDownHandler::new(Arc::new(cache.clone())); + + // Propose from cache. + let chain_msg = handler + .chain_message_from_proof_cache() + .expect("next uncommitted epoch proof exists"); + let msg = match chain_msg { + ChainMessage::Ipc(IpcMessage::ParentFinalityWithCert(m)) => m, + other => anyhow::bail!("unexpected chain message: {other:?}"), + }; + assert_eq!(msg.height, base_epoch); + + // Attest: cache match + on-chain continuity. + handler + .attest(&mut exec_state, &msg) + .await + .context("attestation failed")?; + + // Extract effects (should be empty in this fabricated proof bundle). + let extracted = handler.extract_top_down_effects(&msg)?; + assert!(extracted.topdown_msgs.is_empty()); + assert!(extracted.validator_changes.is_empty()); + assert_eq!(extracted.instance_id, instance_id); + + // Finalize: updates actor state + marks cache committed. + handler + .finalize_after_execution(&mut exec_state, msg.height, extracted.instance_id) + .context("finalize failed")?; + + // Actor state updated (we update at the cert's last provable epoch). + let caller = crate::fvm::state::ipc::F3LightClientCaller::new(); + let actor_state = caller.get_state(&mut exec_state)?; + assert_eq!(actor_state.processed_instance_id, instance_id); + // Actor no longer tracks finalized height; the epoch cursor is stored in the gateway. + assert_eq!(actor_state.power_table.len(), 2); + assert_eq!(actor_state.power_table[0].id, 1); + assert_eq!(actor_state.power_table[0].power_be, vec![0x03, 0xE8]); + + // Cache committed cursor updated. + assert_eq!( + handler.proof_cache().last_committed(), + (base_epoch, instance_id) + ); + + // Sanity: message certificate is FilecoinF3 (we don't decode internals here). + match msg.certificate { + Certificate::FilecoinF3(_) => {} + } + + Ok(()) + } + + #[tokio::test] + async fn f3_topdown_allows_forward_instance_jumps_without_catchup() -> anyhow::Result<()> { + // This test covers the case where the message certificate instance is ahead of the actor's + // processed instance due to intermediate base-only instances. We allow forward jumps and + // update the actor in a single atomic state transition (no grinding through intermediates). + + // Minimal FVM genesis state with F3LightClientActor so attestation can query actor state. + let store = MemoryBlockstore::new(); + let multi_engine = Arc::new(MultiEngine::new(1)); + let mut genesis_state = FvmGenesisState::new( + store, + multi_engine, + actors_builtin_car::CAR, + actors_custom_car::CAR, + ) + .await + .context("failed to create FVM genesis state")?; + + // System actor (required so the FVM can load the builtin actor manifest). + genesis_state + .create_builtin_actor( + system::SYSTEM_ACTOR_CODE_ID, + system::SYSTEM_ACTOR_ID, + &system::State { + builtin_actors: genesis_state.manifest_data_cid, + }, + TokenAmount::zero(), + None, + ) + .context("failed to create system actor")?; + + // Init actor (safe default for message execution environment). + let (init_state, _addr_to_id) = init::State::new( + genesis_state.store(), + "test".to_string(), + &[], + &BTreeSet::new(), + 0, + ) + .context("failed to create init state")?; + genesis_state + .create_builtin_actor( + init::INIT_ACTOR_CODE_ID, + init::INIT_ACTOR_ID, + &init_state, + TokenAmount::zero(), + None, + ) + .context("failed to create init actor")?; + + // Gas market custom actor: required by BlockGasTracker initialization. + let gas_market_state = fendermint_actor_gas_market_eip1559::State { + base_fee: TokenAmount::from_atto(100), + constants: fendermint_actor_gas_market_eip1559::Constants::default(), + }; + genesis_state + .create_custom_actor( + fendermint_actor_gas_market_eip1559::ACTOR_NAME, + gas_market::GAS_MARKET_ACTOR_ID, + &gas_market_state, + TokenAmount::zero(), + None, + ) + .context("failed to create gas market actor")?; + + // Actor starts at instance 1. + let initial_processed_instance = 1u64; + let initial_power_table = vec![f3_light_client::PowerEntry { + id: 10, + public_key: vec![9u8; 48], + power_be: vec![9], + }]; + let f3_state = fendermint_actor_f3_light_client::state::State::new( + genesis_state.store(), + initial_processed_instance, + initial_power_table, + ) + .context("failed to create F3 light client actor state")?; + genesis_state + .create_custom_actor( + fendermint_actor_f3_light_client::F3_LIGHT_CLIENT_ACTOR_NAME, + f3_light_client::F3_LIGHT_CLIENT_ACTOR_ID, + &f3_state, + TokenAmount::zero(), + None, + ) + .context("failed to create F3 light client actor")?; + + // Initialize execution params (required for executing implicit/read-only messages). + genesis_state + .init_exec_state( + Timestamp(1), + NetworkVersion::V21, + TokenAmount::from_atto(100), + TokenAmount::zero(), + 1, + 0 as PowerScale, + ) + .context("failed to init exec state")?; + let mut exec_state = genesis_state + .into_exec_state() + .map_err(|_| anyhow::anyhow!("genesis exec state missing"))?; + + // Build a cache with: + // - Instance 2: base-only cert (no epoch proofs). + // - Instance 3: cert with a provable window `[E, E+1]`, so we can propose/execute epoch `E`. + let target_instance = 3u64; + let epoch_e: ChainEpoch = 50; + let cache = ProofCache::new( + epoch_e - 1, + initial_processed_instance, + CacheConfig { + lookahead_instances: 10, + retention_epochs: 10, + }, + ); + + cache + .insert_certificate(mk_test_certificate_entry_with_powers( + 2, + vec![epoch_e - 10], // base-only: len=1 + vec![ + (1u64, "1111".to_string(), vec![3u8; 48]), + (2u64, "2222".to_string(), vec![4u8; 48]), + ], + )) + .context("failed to insert base-only certificate (instance 2)")?; + + cache + .insert_certificate(mk_test_certificate_entry_with_powers( + target_instance, + vec![epoch_e, epoch_e + 1], // provable parent epoch is `epoch_e` + vec![ + (1u64, "3333".to_string(), vec![5u8; 48]), + (2u64, "4444".to_string(), vec![6u8; 48]), + ], + )) + .context("failed to insert certificate (instance 3)")?; + + cache + .insert_epoch_proofs(vec![EpochProofEntry::new( + epoch_e, + UnifiedProofBundle { + storage_proofs: vec![], + event_proofs: vec![], + blocks: vec![], + }, + target_instance, + )]) + .context("failed to insert epoch proof (instance 3)")?; + + let handler = F3TopDownHandler::new(Arc::new(cache.clone())); + + // Propose from cache: should propose epoch `epoch_e` under instance 3. + let chain_msg = handler + .chain_message_from_proof_cache() + .expect("next uncommitted epoch proof exists"); + let msg = match chain_msg { + ChainMessage::Ipc(IpcMessage::ParentFinalityWithCert(m)) => m, + other => anyhow::bail!("unexpected chain message: {other:?}"), + }; + assert_eq!(msg.height, epoch_e); + + // Attest: must allow the instance jump 1 -> 3 (actor continuity is monotonic-only). + handler + .attest(&mut exec_state, &msg) + .await + .context("attestation failed")?; + + // Execute and finalize: should update actor directly to instance 3. + let extracted = handler.extract_top_down_effects(&msg)?; + assert_eq!(extracted.instance_id, target_instance); + handler + .finalize_after_execution(&mut exec_state, msg.height, extracted.instance_id) + .context("finalize failed")?; + + let caller = crate::fvm::state::ipc::F3LightClientCaller::new(); + let actor_state = caller.get_state(&mut exec_state)?; + assert_eq!(actor_state.processed_instance_id, target_instance); + + // Cache committed cursor updated. + assert_eq!( + handler.proof_cache().last_committed(), + (epoch_e, target_instance) + ); + + Ok(()) + } + + #[test] + fn base_only_certificate_is_not_last_provable_epoch() -> anyhow::Result<()> { + // A base-only certificate (ECChain len=1) has no provable `(parent, child)` pair, + // therefore there is no "last provable epoch" for that instance. + use fendermint_vm_topdown_proof_service::cache::ProofCache; + use fendermint_vm_topdown_proof_service::config::CacheConfig; + + let instance_id = 7u64; + let base_epoch: ChainEpoch = 50; + + let cache = ProofCache::new(base_epoch, instance_id, CacheConfig::default()); + cache + .insert_certificate(mk_test_certificate_entry(instance_id, vec![base_epoch])) + .context("failed to insert base-only certificate")?; + + let handler = F3TopDownHandler::new(Arc::new(cache)); + + assert!(!handler.is_last_provable_epoch_for_instance(instance_id, base_epoch)?); + Ok(()) + } +} diff --git a/fendermint/vm/interpreter/src/fvm/interpreter.rs b/fendermint/vm/interpreter/src/fvm/interpreter.rs index 5a3cb5bc52..5188b6228f 100644 --- a/fendermint/vm/interpreter/src/fvm/interpreter.rs +++ b/fendermint/vm/interpreter/src/fvm/interpreter.rs @@ -275,9 +275,10 @@ where .into_iter() .map(Into::into); + // Get parent finality message - TopDownManager decides internally whether to use F3 or legacy let top_down_iter = self .top_down_manager - .chain_message_from_finality_or_quorum() + .chain_message_for_proposal() .await .into_iter(); @@ -317,7 +318,7 @@ where async fn attest_block_messages( &self, - state: FvmExecState>>, + mut state: FvmExecState>>, msgs: Vec>, ) -> Result { if msgs.len() > self.max_msgs_per_block { @@ -329,27 +330,65 @@ where } let mut block_gas_usage = 0; - let base_fee = state.block_gas_tracker().base_fee(); + // Clone to avoid holding an immutable borrow of `state` while we also need mutable access + // during top-down attestation. + let base_fee = state.block_gas_tracker().base_fee().clone(); for msg in msgs { match fvm_ipld_encoding::from_slice::(&msg) { - Ok(chain_msg) => match chain_msg { - ChainMessage::Ipc(IpcMessage::TopDownExec(finality)) => { - if !self.top_down_manager.is_finality_valid(finality).await { - return Ok(AttestMessagesResponse::Reject); + Ok(chain_msg) => { + match chain_msg { + ChainMessage::Ipc(IpcMessage::ParentFinalityWithCert(ref msg)) => { + // Attest parent-finality-with-cert message (checks local cache + on-chain continuity). + match self + .top_down_manager + .attest_parent_finality_with_cert(&mut state, msg) + .await + { + Ok(()) => { + tracing::debug!( + height = msg.height, + "parent finality with cert attested successfully" + ); + } + Err(e) => { + tracing::warn!( + error = %e, + height = msg.height, + reason = if e.chain().any(|cause| { + matches!( + cause.downcast_ref::(), + Some(crate::fvm::f3_topdown::F3TopDownError::CacheMiss { .. }) + ) + }) { + "cache_miss" + } else { + "invalid" + }, + "parent finality with cert attestation failed - rejecting block" + ); + return Ok(AttestMessagesResponse::Reject); + } + } } - } - ChainMessage::Signed(signed) => { - if signed.message.gas_fee_cap < *base_fee { - tracing::warn!( - fee_cap = signed.message.gas_fee_cap.to_string(), - base_fee = base_fee.to_string(), - "msg fee cap less than base fee" - ); - return Ok(AttestMessagesResponse::Reject); + ChainMessage::Ipc(IpcMessage::TopDownExec(finality)) => { + // v1 voting-based finality (kept for backward compatibility) + if !self.top_down_manager.attest_legacy(finality).await { + return Ok(AttestMessagesResponse::Reject); + } + } + ChainMessage::Signed(signed) => { + if signed.message.gas_fee_cap < base_fee { + tracing::warn!( + fee_cap = signed.message.gas_fee_cap.to_string(), + base_fee = base_fee.to_string(), + "msg fee cap less than base fee" + ); + return Ok(AttestMessagesResponse::Reject); + } + block_gas_usage += signed.message.gas_limit; } - block_gas_usage += signed.message.gas_limit; } - }, + } Err(e) => { tracing::warn!(error = %e, "failed to decode message in proposal as ChainMessage"); return Ok(AttestMessagesResponse::Reject); @@ -459,9 +498,19 @@ where }) } ChainMessage::Ipc(ipc_msg) => match ipc_msg { + IpcMessage::ParentFinalityWithCert(msg) => { + let applied_message = self + .top_down_manager + .execute_parent_finality_with_cert(state, msg) + .await?; + Ok(ApplyMessageResponse { + applied_message, + domain_hash: None, + }) + } IpcMessage::TopDownExec(p) => { - let applied_message = - self.top_down_manager.execute_topdown_msg(state, p).await?; + // OLD: v1 voting-based execution (kept for backward compatibility) + let applied_message = self.top_down_manager.execute_legacy(state, p).await?; Ok(ApplyMessageResponse { applied_message, domain_hash: None, diff --git a/fendermint/vm/interpreter/src/fvm/legacy_topdown.rs b/fendermint/vm/interpreter/src/fvm/legacy_topdown.rs new file mode 100644 index 0000000000..eb4adb9aed --- /dev/null +++ b/fendermint/vm/interpreter/src/fvm/legacy_topdown.rs @@ -0,0 +1,144 @@ +// Copyright 2022-2026 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +use async_stm::{atomically, atomically_or_err}; +use fendermint_tracing::emit; +use fendermint_vm_event::ParentFinalityMissingQuorum; +use fendermint_vm_message::chain::ChainMessage; +use fendermint_vm_message::ipc::{IpcMessage, ParentFinality}; +use fendermint_vm_topdown::proxy::IPCProviderProxyWithLatency; +use fendermint_vm_topdown::voting::ValidatorKey; +use fendermint_vm_topdown::voting::VoteTally; +use fendermint_vm_topdown::{ + BlockHeight, CachedFinalityProvider, IPCParentFinality, ParentFinalityProvider, + ParentViewProvider, Toggle, +}; +use fvm_shared::clock::ChainEpoch; +use std::sync::Arc; + +use crate::fvm::end_block_hook::PowerUpdates; + +type TopDownFinalityProvider = Arc>>; + +/// Legacy (vote-based) parent finality handler. +/// +/// Encapsulates all vote/quorum/provider logic; `TopDownManager` should orchestrate execution. +#[derive(Clone)] +pub struct LegacyTopDownHandler { + provider: TopDownFinalityProvider, + votes: VoteTally, +} + +impl LegacyTopDownHandler { + pub fn new(provider: TopDownFinalityProvider, votes: VoteTally) -> Self { + Self { provider, votes } + } + + pub fn is_enabled(&self) -> bool { + self.provider.is_enabled() + } + + pub fn genesis_epoch(&self) -> anyhow::Result { + self.provider.genesis_epoch() + } + + pub async fn attest(&self, finality: ParentFinality) -> bool { + let prop = IPCParentFinality { + height: finality.height as u64, + block_hash: finality.block_hash, + }; + atomically(|| self.provider.check_proposal(&prop)).await + } + + pub async fn update_voting_power_table(&self, power_updates: &PowerUpdates) { + let power_updates_mapped: Vec<_> = power_updates + .0 + .iter() + .map(|v| (ValidatorKey::from(v.public_key.0), v.power.0)) + .collect(); + + atomically(|| self.votes.update_power_table(power_updates_mapped.clone())).await + } + + pub async fn chain_message_for_proposal(&self) -> Option { + tracing::debug!("using legacy voting-based finality"); + self.chain_message_from_finality_or_quorum().await + } + + pub async fn validator_changes_from( + &self, + from: BlockHeight, + to: BlockHeight, + ) -> anyhow::Result> { + self.provider.validator_changes_from(from, to).await + } + + pub async fn top_down_msgs_from( + &self, + from: BlockHeight, + to: BlockHeight, + ) -> anyhow::Result> { + self.provider.top_down_msgs_from(from, to).await + } + + pub async fn on_finality_executed( + &self, + finality: IPCParentFinality, + proposer: Option<&str>, + local_block_height: u64, + ) -> anyhow::Result<()> { + atomically_or_err(|| { + self.provider.set_new_finality(finality.clone())?; + self.votes.set_finalized( + finality.height, + finality.block_hash.clone(), + proposer, + Some(local_block_height), + )?; + Ok(()) + }) + .await + } + + async fn chain_message_from_finality_or_quorum(&self) -> Option { + atomically(|| self.votes.pause_votes_until_find_quorum()).await; + + let (parent, quorum) = atomically(|| { + let parent = self.provider.next_proposal()?; + + let quorum = self + .votes + .find_quorum()? + .map(|(height, block_hash)| IPCParentFinality { height, block_hash }); + + Ok((parent, quorum)) + }) + .await; + + let parent = parent?; + + let quorum = if let Some(quorum) = quorum { + quorum + } else { + emit!( + DEBUG, + ParentFinalityMissingQuorum { + block_height: parent.height, + block_hash: &hex::encode(&parent.block_hash), + } + ); + return None; + }; + + let finality = if parent.height <= quorum.height { + parent + } else { + quorum + }; + + Some(ChainMessage::Ipc(IpcMessage::TopDownExec(ParentFinality { + height: finality.height as ChainEpoch, + block_hash: finality.block_hash, + }))) + } +} diff --git a/fendermint/vm/interpreter/src/fvm/mod.rs b/fendermint/vm/interpreter/src/fvm/mod.rs index 762c8b696a..70adf2a704 100644 --- a/fendermint/vm/interpreter/src/fvm/mod.rs +++ b/fendermint/vm/interpreter/src/fvm/mod.rs @@ -4,11 +4,15 @@ pub mod constants; mod executions; mod externs; +pub mod f3_topdown; pub mod interpreter; +pub mod legacy_topdown; pub mod observe; pub mod state; pub mod store; pub mod topdown; +pub use f3_topdown::F3TopDownHandler; +pub use legacy_topdown::LegacyTopDownHandler; pub mod upgrades; pub use interpreter::FvmMessagesInterpreter; @@ -17,6 +21,7 @@ pub mod bundle; pub mod activity; pub mod end_block_hook; +pub mod event_extraction; pub(crate) mod gas; pub(crate) mod gas_estimation; diff --git a/fendermint/vm/interpreter/src/fvm/observe.rs b/fendermint/vm/interpreter/src/fvm/observe.rs index e714981ca4..dd190ea436 100644 --- a/fendermint/vm/interpreter/src/fvm/observe.rs +++ b/fendermint/vm/interpreter/src/fvm/observe.rs @@ -8,8 +8,9 @@ use ipc_observability::{ }; use prometheus::{ - register_histogram, register_int_counter, register_int_gauge, register_int_gauge_vec, - Histogram, IntCounter, IntGauge, IntGaugeVec, Registry, + register_histogram, register_histogram_vec, register_int_counter, register_int_counter_vec, + register_int_gauge, register_int_gauge_vec, Histogram, HistogramVec, IntCounter, IntCounterVec, + IntGauge, IntGaugeVec, Registry, }; use fvm_shared::message::Message; @@ -38,6 +39,17 @@ register_metrics! { ); BOTTOMUP_CHECKPOINT_FINALIZED_HEIGHT: IntGauge = register_int_gauge!("bottomup_checkpoint_finalized_height", "Height of the checkpoint finalized"); + + F3_TOPDOWN_CACHE_WAIT_TOTAL: IntCounterVec = register_int_counter_vec!( + "f3_topdown_cache_wait_total", + "Number of times the node waited for the local F3 proof cache during top-down execution", + &["status"] + ); + F3_TOPDOWN_CACHE_WAIT_SECS: HistogramVec = register_histogram_vec!( + "f3_topdown_cache_wait_secs", + "Seconds spent waiting for the local F3 proof cache during top-down execution", + &["status"] + ); } impl_traceables!(TraceLevel::Info, "Execution", MsgExec); @@ -82,6 +94,13 @@ impl_traceables!( CheckpointFinalized ); +impl_traceables!( + TraceLevel::Error, + "Topdown", + F3CacheWaitStuck +); +impl_traceables!(TraceLevel::Info, "Topdown", F3CacheWaitRecovered); + #[derive(Debug)] pub struct CheckpointCreated { pub height: u64, @@ -133,6 +152,42 @@ impl Recordable for CheckpointFinalized { } } +#[derive(Debug)] +pub struct F3CacheWaitStuck { + pub epoch: u64, + pub waited_secs: f64, +} + +impl Recordable for F3CacheWaitStuck { + fn record_metrics(&self) { + F3_TOPDOWN_CACHE_WAIT_TOTAL + .with_label_values(&["stuck"]) + .inc(); + F3_TOPDOWN_CACHE_WAIT_SECS + .with_label_values(&["stuck"]) + .observe(self.waited_secs); + } +} + +// NOTE: We intentionally do not have a one-shot "timeout" event. Execution waits indefinitely. + +#[derive(Debug)] +pub struct F3CacheWaitRecovered { + pub epoch: u64, + pub waited_secs: f64, +} + +impl Recordable for F3CacheWaitRecovered { + fn record_metrics(&self) { + F3_TOPDOWN_CACHE_WAIT_TOTAL + .with_label_values(&["recovered"]) + .inc(); + F3_TOPDOWN_CACHE_WAIT_SECS + .with_label_values(&["recovered"]) + .observe(self.waited_secs); + } +} + #[cfg(test)] mod tests { use super::*; @@ -185,5 +240,14 @@ mod tests { hash: HexEncodableBlockHash(hash.clone()), validator: Address::new_id(1), }); + + emit(F3CacheWaitStuck { + epoch: 1, + waited_secs: 120.0, + }); + emit(F3CacheWaitRecovered { + epoch: 1, + waited_secs: 2.0, + }); } } diff --git a/fendermint/vm/interpreter/src/fvm/state/ipc.rs b/fendermint/vm/interpreter/src/fvm/state/ipc.rs index 52f55dde81..9665d33747 100644 --- a/fendermint/vm/interpreter/src/fvm/state/ipc.rs +++ b/fendermint/vm/interpreter/src/fvm/state/ipc.rs @@ -1,16 +1,17 @@ // Copyright 2022-2024 Protocol Labs // SPDX-License-Identifier: Apache-2.0, MIT -use anyhow::Context; +use anyhow::{bail, Context}; use fvm_ipld_blockstore::Blockstore; use fvm_shared::econ::TokenAmount; use fvm_shared::ActorID; +use num_traits::Zero; use fendermint_crypto::PublicKey; use fendermint_vm_actor_interface::ipc; use fendermint_vm_actor_interface::{ - eam::EthAddress, init::builtin_actor_eth_addr, ipc::GATEWAY_ACTOR_ID, + eam::EthAddress, f3_light_client, init::builtin_actor_eth_addr, ipc::GATEWAY_ACTOR_ID, system, }; use fendermint_vm_genesis::{Collateral, Power, PowerScale, Validator, ValidatorKey}; use fendermint_vm_message::conv::from_eth; @@ -149,6 +150,24 @@ impl GatewayCaller { self.getter.call(state, |c| c.get_current_membership()) } + /// Return the next top-down message nonce expected to be applied by this gateway. + /// + /// This is the L2 execution cursor enforced when applying incoming top-down cross messages. + pub fn applied_top_down_nonce(&self, state: &mut FvmExecState) -> anyhow::Result { + self.getter.call(state, |c| c.applied_top_down_nonce()) + } + + /// Return the validator tracker configuration numbers (next, start). + /// + /// `nextConfigurationNumber` advances as power-change requests are stored on the gateway. + pub fn tracker_configuration_numbers( + &self, + state: &mut FvmExecState, + ) -> anyhow::Result<(u64, u64)> { + self.topdown + .call(state, |c| c.get_tracker_configuration_numbers()) + } + /// Get the current power table, which is the same as the membership but parsed into domain types. pub fn current_power_table( &self, @@ -297,3 +316,267 @@ fn membership_to_power_table( pt } + +/// Caller for the F3 Light Client actor +/// +/// This actor is responsible for: +/// - Storing finalized F3 instance state (instance ID, validator power table) +/// - Validator power table +#[derive(Clone)] +pub struct F3LightClientCaller { + actor_id: ActorID, +} + +impl F3LightClientCaller { + pub fn new() -> Self { + Self { + actor_id: f3_light_client::F3_LIGHT_CLIENT_ACTOR_ID, + } + } + + /// Update the F3 light client state after verifying a proof bundle. + /// + /// This should be called after successfully executing a proof-based topdown finality message. + pub fn update_state( + &self, + state: &mut FvmExecState, + processed_instance_id: u64, + power_table: Vec, + ) -> anyhow::Result<()> { + let method_num = f3_light_client::Method::UpdateState as u64; + + let params = f3_light_client::UpdateStateParams { + processed_instance_id, + power_table, + }; + + let params_bytes = + fvm_ipld_encoding::to_vec(¶ms).context("failed to serialize update params")?; + + let msg = fvm_shared::message::Message { + version: Default::default(), + from: fvm_shared::address::Address::new_id(system::SYSTEM_ACTOR_ID), + to: fvm_shared::address::Address::new_id(self.actor_id), + sequence: 0, + value: TokenAmount::zero(), + method_num, + params: fvm_ipld_encoding::RawBytes::new(params_bytes), + gas_limit: 10_000_000_000, + gas_fee_cap: TokenAmount::zero(), + gas_premium: TokenAmount::zero(), + }; + + let (ret, _) = state + .execute_implicit(msg) + .context("failed to execute F3 light client update")?; + + if let Some(err) = &ret.failure_info { + bail!( + "F3 light client update failed (exit code {}): {}", + ret.msg_receipt.exit_code.value(), + err + ); + } + + Ok(()) + } + + /// Get the current F3 instance state from the light client actor. + pub fn get_state( + &self, + state: &mut FvmExecState, + ) -> anyhow::Result { + let method_num = f3_light_client::Method::GetState as u64; + + let msg = fvm_shared::message::Message { + version: Default::default(), + from: fvm_shared::address::Address::new_id(system::SYSTEM_ACTOR_ID), + to: fvm_shared::address::Address::new_id(self.actor_id), + sequence: 0, + value: TokenAmount::zero(), + method_num, + params: fvm_ipld_encoding::RawBytes::default(), + gas_limit: 10_000_000_000, + gas_fee_cap: TokenAmount::zero(), + gas_premium: TokenAmount::zero(), + }; + + // Read-only execution still requires `&mut FvmExecState` (it uses interior caches), but + // any effects are reverted by the executor. + let (ret, _) = state + .execute_read_only(msg) + .context("failed to execute F3 light client get_state")?; + + if let Some(err) = &ret.failure_info { + bail!( + "F3 light client get_state failed (exit code {}): {}", + ret.msg_receipt.exit_code.value(), + err + ); + } + + let state_response: f3_light_client::GetStateResponse = + fvm_ipld_encoding::from_slice(&ret.msg_receipt.return_data.bytes()) + .context("failed to deserialize F3 light client state")?; + + Ok(state_response) + } +} + +#[cfg(test)] +mod tests { + use super::F3LightClientCaller; + use crate::fvm::state::genesis::FvmGenesisState; + use crate::fvm::store::memory::MemoryBlockstore; + use anyhow::Context; + use fendermint_vm_actor_interface::{f3_light_client, gas_market, init, system}; + use fendermint_vm_core::Timestamp; + use fendermint_vm_genesis::PowerScale; + use fvm::engine::MultiEngine; + use fvm_shared::clock::ChainEpoch; + use fvm_shared::econ::TokenAmount; + use fvm_shared::version::NetworkVersion; + use num_traits::Zero; + use std::collections::BTreeSet; + use std::sync::Arc; + + #[tokio::test] + async fn f3_light_client_caller_roundtrip_update_and_get_state() -> anyhow::Result<()> { + // Build a minimal genesis state with the built-in bundle, plus the custom F3 actor. + let store = MemoryBlockstore::new(); + let multi_engine = Arc::new(MultiEngine::new(1)); + let mut genesis_state = FvmGenesisState::new( + store, + multi_engine, + actors_builtin_car::CAR, + actors_custom_car::CAR, + ) + .await + .context("failed to create FVM genesis state")?; + + // System actor (required so the FVM can load the builtin actor manifest). + genesis_state + .create_builtin_actor( + system::SYSTEM_ACTOR_CODE_ID, + system::SYSTEM_ACTOR_ID, + &system::State { + builtin_actors: genesis_state.manifest_data_cid, + }, + TokenAmount::zero(), + None, + ) + .context("failed to create system actor")?; + + // Init actor (safe default for message execution environment). + let (init_state, _addr_to_id) = init::State::new( + genesis_state.store(), + "test".to_string(), + &[], + &BTreeSet::new(), + 0, + ) + .context("failed to create init state")?; + genesis_state + .create_builtin_actor( + init::INIT_ACTOR_CODE_ID, + init::INIT_ACTOR_ID, + &init_state, + TokenAmount::zero(), + None, + ) + .context("failed to create init actor")?; + + // Gas market custom actor: required by BlockGasTracker initialization. + let gas_market_state = fendermint_actor_gas_market_eip1559::State { + base_fee: TokenAmount::from_atto(100), + constants: fendermint_actor_gas_market_eip1559::Constants::default(), + }; + genesis_state + .create_custom_actor( + fendermint_actor_gas_market_eip1559::ACTOR_NAME, + gas_market::GAS_MARKET_ACTOR_ID, + &gas_market_state, + TokenAmount::zero(), + None, + ) + .context("failed to create gas market actor")?; + + // Create the F3 light client custom actor. + let instance_id = 10u64; + let _base_epoch: ChainEpoch = 1234; + let power_table = vec![ + fendermint_actor_f3_light_client::types::PowerEntry { + id: 1, + public_key: vec![1, 2, 3], + power_be: vec![100], + }, + fendermint_actor_f3_light_client::types::PowerEntry { + id: 2, + public_key: vec![4, 5, 6], + power_be: vec![200], + }, + ]; + let f3_state = fendermint_actor_f3_light_client::state::State::new( + genesis_state.store(), + instance_id, + power_table.clone(), + ) + .context("failed to create F3 light client actor state")?; + genesis_state + .create_custom_actor( + fendermint_actor_f3_light_client::F3_LIGHT_CLIENT_ACTOR_NAME, + f3_light_client::F3_LIGHT_CLIENT_ACTOR_ID, + &f3_state, + TokenAmount::zero(), + None, + ) + .context("failed to create F3 light client actor")?; + + // Initialize execution params (required for executing implicit/read-only messages). + genesis_state + .init_exec_state( + Timestamp(1), + NetworkVersion::V21, + TokenAmount::from_atto(100), + TokenAmount::zero(), + 1, + 0 as PowerScale, + ) + .context("failed to init exec state")?; + + let mut exec_state = genesis_state + .into_exec_state() + .map_err(|_| anyhow::anyhow!("genesis exec state missing"))?; + + let caller = F3LightClientCaller::new(); + + // Round-trip: read initial actor state. + let state0 = caller.get_state(&mut exec_state)?; + assert_eq!(state0.processed_instance_id, instance_id); + // Epoch cursor is stored in the gateway; the actor no longer tracks finalized height. + assert_eq!(state0.power_table.len(), power_table.len()); + assert_eq!(state0.power_table[0].id, 1); + + // Update state and read again. + let new_instance = instance_id + 1; + let new_power_table = vec![f3_light_client::PowerEntry { + id: 99, + public_key: vec![9u8; 48], + power_be: vec![0x03, 0xE7], + }]; + caller + .update_state(&mut exec_state, new_instance, new_power_table.clone()) + .context("failed to update F3LightClientActor state")?; + + let state1 = caller.get_state(&mut exec_state)?; + assert_eq!(state1.processed_instance_id, new_instance); + // Epoch cursor is stored in the gateway; the actor no longer tracks finalized height. + assert_eq!(state1.power_table, new_power_table); + + // Also sanity-check that read-only exec doesn't mutate the actor (it reverts effects). + let state2 = caller.get_state(&mut exec_state)?; + assert_eq!(state2, state1); + + Ok(()) + } +} diff --git a/fendermint/vm/interpreter/src/fvm/state/query.rs b/fendermint/vm/interpreter/src/fvm/state/query.rs index e555bcdd91..547dd5f2ce 100644 --- a/fendermint/vm/interpreter/src/fvm/state/query.rs +++ b/fendermint/vm/interpreter/src/fvm/state/query.rs @@ -108,7 +108,7 @@ where } /// If we know the query is over the state, cache the state tree. - async fn with_exec_state(self, f: F) -> anyhow::Result<(Self, T)> + pub async fn with_exec_state(self, f: F) -> anyhow::Result<(Self, T)> where F: FnOnce(&mut FvmExecState>) -> anyhow::Result, { diff --git a/fendermint/vm/interpreter/src/fvm/topdown.rs b/fendermint/vm/interpreter/src/fvm/topdown.rs index 4fb6c9a6c9..75d5a4dc7b 100644 --- a/fendermint/vm/interpreter/src/fvm/topdown.rs +++ b/fendermint/vm/interpreter/src/fvm/topdown.rs @@ -1,21 +1,9 @@ // Copyright 2022-2024 Protocol Labs // SPDX-License-Identifier: Apache-2.0, MIT -use async_stm::atomically; -use fendermint_tracing::emit; -use fendermint_vm_event::ParentFinalityMissingQuorum; use fendermint_vm_message::chain::ChainMessage; -use fendermint_vm_message::ipc::IpcMessage; use fendermint_vm_message::ipc::ParentFinality; -use fendermint_vm_topdown::proxy::IPCProviderProxyWithLatency; -use fendermint_vm_topdown::voting::ValidatorKey; -use fendermint_vm_topdown::voting::VoteTally; -use fendermint_vm_topdown::{ - BlockHeight, CachedFinalityProvider, IPCParentFinality, ParentFinalityProvider, - ParentViewProvider, Toggle, -}; -use fvm_shared::clock::ChainEpoch; -use std::sync::Arc; +use fendermint_vm_topdown::{BlockHeight, IPCParentFinality}; use crate::fvm::state::ipc::GatewayCaller; use crate::fvm::state::FvmExecState; @@ -23,116 +11,307 @@ use anyhow::{bail, Context}; use fvm_ipld_blockstore::Blockstore; use crate::fvm::end_block_hook::PowerUpdates; +use crate::fvm::f3_topdown::{F3TopDownError, F3TopDownHandler}; +use crate::fvm::legacy_topdown::LegacyTopDownHandler; +use crate::fvm::observe::{F3CacheWaitRecovered, F3CacheWaitStuck}; use crate::fvm::state::ipc::tokens_to_mint; use crate::types::AppliedMessage; use ipc_api::cross::IpcEnvelope; +use ipc_observability::emit; + +#[derive(Clone, Debug)] +pub struct F3ExecutionCacheRetryConfig { + pub backoff_initial: std::time::Duration, + pub backoff_max: std::time::Duration, + /// After this much waiting, emit an error-severity event/log to surface that block execution + /// is blocked on a missing local proof-cache entry. Execution will still keep retrying. + pub critical_after: std::time::Duration, + pub error_after: std::time::Duration, +} -type TopDownFinalityProvider = Arc>>; +impl Default for F3ExecutionCacheRetryConfig { + fn default() -> Self { + Self { + backoff_initial: std::time::Duration::from_millis(200), + backoff_max: std::time::Duration::from_secs(5), + critical_after: std::time::Duration::from_secs(10 * 60), + error_after: std::time::Duration::from_secs(2 * 60), + } + } +} + +#[derive(Clone)] +pub enum TopDownFinalityHandler { + Disabled, + Legacy(LegacyTopDownHandler), + F3(F3TopDownHandler), +} #[derive(Clone)] pub struct TopDownManager where DB: Blockstore + Clone + 'static + Send + Sync, { - provider: TopDownFinalityProvider, - votes: VoteTally, + finality: TopDownFinalityHandler, // Gateway caller for IPC gateway interactions gateway_caller: GatewayCaller, + f3_execution_cache_retry: F3ExecutionCacheRetryConfig, } impl TopDownManager where DB: Blockstore + Clone + 'static + Send + Sync, { - pub fn new(provider: TopDownFinalityProvider, votes: VoteTally) -> Self { + fn is_cache_miss(err: &anyhow::Error) -> bool { + err.chain().any(|cause| { + matches!( + cause.downcast_ref::(), + Some(F3TopDownError::CacheMiss { .. }) + ) + }) + } + + /// Extract top-down effects, retrying on local proof-cache misses up to a bounded timeout. + /// + /// This is used during block execution (catch-up): a node might not have had the local cache + /// entry during attestation (so it didn't vote), but it still needs to be able to apply the + /// committed block once the proof-service catches up. + async fn extract_top_down_effects_retry_cache_miss( + retry: &F3ExecutionCacheRetryConfig, + f3: &F3TopDownHandler, + msg: &fendermint_vm_message::ipc::ParentFinalityWithCert, + ) -> anyhow::Result { + use std::time::Instant; + use tokio::time::sleep; + + // Tuning: + // - critical_after controls when we start emitting an error-severity signal. + // - error_after controls how often we repeat that signal once we're in the critical state. + let mut backoff = retry.backoff_initial; + let max_backoff = retry.backoff_max; + let critical_after = retry.critical_after; + let error_after = retry.error_after; + let mut next_error_log_at = critical_after; + let start = Instant::now(); + let mut saw_cache_miss = false; + let mut entered_critical = false; + + loop { + match f3.extract_top_down_effects(msg) { + Ok(v) => { + if saw_cache_miss { + emit(F3CacheWaitRecovered { + epoch: msg.height as u64, + waited_secs: start.elapsed().as_secs_f64(), + }); + } + return Ok(v); + } + Err(e) if Self::is_cache_miss(&e) => { + saw_cache_miss = true; + let waited = start.elapsed(); + // Don't abort execution on cache wait: keep retrying forever. + // Once we cross `critical_after`, switch into a "critical" state where we emit + // an error-severity signal periodically. + if waited >= critical_after && !entered_critical { + entered_critical = true; + tracing::error!( + height = msg.height, + waited = ?waited, + critical_after = ?critical_after, + "still missing local proof cache entry after critical_after; continuing to wait" + ); + } else if waited >= next_error_log_at { + if entered_critical { + tracing::error!( + height = msg.height, + waited = ?waited, + "still missing local proof cache entry; node cannot execute parent-finality-with-cert yet" + ); + emit(F3CacheWaitStuck { + epoch: msg.height as u64, + waited_secs: waited.as_secs_f64(), + }); + next_error_log_at += error_after; + } + } else { + tracing::warn!( + height = msg.height, + waited = ?waited, + retry_in = ?backoff, + "missing local proof cache entry; waiting for proof-service to fill cache" + ); + } + sleep(backoff).await; + backoff = std::cmp::min(backoff * 2, max_backoff); + } + Err(e) => return Err(e), + } + } + } + + pub fn new(finality: TopDownFinalityHandler) -> Self { Self { - provider, - votes, + finality, gateway_caller: GatewayCaller::default(), + f3_execution_cache_retry: Default::default(), } } - pub async fn is_finality_valid(&self, finality: ParentFinality) -> bool { - let prop = IPCParentFinality { - height: finality.height as u64, - block_hash: finality.block_hash, - }; - atomically(|| self.provider.check_proposal(&prop)).await + pub fn disabled() -> Self { + Self::new(TopDownFinalityHandler::Disabled) } - /// Prepares a top-down execution message based on the current parent's finality proposal and quorum. - /// - /// This function first pauses incoming votes to prevent interference during processing. It then atomically retrieves - /// both the next parent's proposal and the quorum of votes. If either the parent's proposal or the quorum is missing, - /// the function returns `None`. When both are available, it selects the finality with the lower block height and wraps - /// it into a `ChainMessage` for top-down execution. - pub async fn chain_message_from_finality_or_quorum(&self) -> Option { - // Prepare top down proposals. - // Before we try to find a quorum, pause incoming votes. This is optional but if there are lots of votes coming in it might hold up proposals. - atomically(|| self.votes.pause_votes_until_find_quorum()).await; - - // The pre-requisite for proposal is that there is a quorum of gossiped votes at that height. - // The final proposal can be at most as high as the quorum, but can be less if we have already, - // hit some limits such as how many blocks we can propose in a single step. - let (parent, quorum) = atomically(|| { - let parent = self.provider.next_proposal()?; - - let quorum = self - .votes - .find_quorum()? - .map(|(height, block_hash)| IPCParentFinality { height, block_hash }); - - Ok((parent, quorum)) - }) - .await; + pub fn legacy(handler: LegacyTopDownHandler) -> Self { + Self::new(TopDownFinalityHandler::Legacy(handler)) + } - // If there is no parent proposal, exit early. - let parent = parent?; + pub fn f3(handler: F3TopDownHandler) -> Self { + Self::new(TopDownFinalityHandler::F3(handler)) + } - // Require a quorum; if it's missing, log and exit. - let quorum = if let Some(quorum) = quorum { - quorum - } else { - emit!( - DEBUG, - ParentFinalityMissingQuorum { - block_height: parent.height, - block_hash: &hex::encode(&parent.block_hash), - } - ); - return None; - }; + pub fn f3_with_retry_config( + handler: F3TopDownHandler, + retry: F3ExecutionCacheRetryConfig, + ) -> Self { + let mut m = Self::new(TopDownFinalityHandler::F3(handler)); + m.f3_execution_cache_retry = retry; + m + } - // Choose the lower height between the parent's proposal and the quorum. - let finality = if parent.height <= quorum.height { - parent - } else { - quorum - }; + pub async fn attest_legacy(&self, finality: ParentFinality) -> bool { + match &self.finality { + TopDownFinalityHandler::Legacy(h) => h.attest(finality).await, + TopDownFinalityHandler::F3(_) | TopDownFinalityHandler::Disabled => false, + } + } - Some(ChainMessage::Ipc(IpcMessage::TopDownExec(ParentFinality { - height: finality.height as ChainEpoch, - block_hash: finality.block_hash, - }))) + /// Get the chain message for parent finality proposal. + /// + /// This method encapsulates the decision of which finality mechanism to use: + /// - If configured for legacy: use legacy voting-based finality + /// - If configured for F3: use F3 proof-based finality (no fallback) + /// + /// The caller doesn't need to know which mechanism is being used. + pub async fn chain_message_for_proposal(&self) -> Option { + match &self.finality { + TopDownFinalityHandler::Disabled => None, + TopDownFinalityHandler::Legacy(h) => h.chain_message_for_proposal().await, + TopDownFinalityHandler::F3(f3) => { + let proof_msg = f3.chain_message_from_proof_cache()?; + tracing::debug!("using F3 proof-based parent finality in proposal"); + Some(proof_msg) + } + } + } + + /// Attest a parent-finality-with-cert message during the attestation phase. + /// + /// Delegates to F3 handler if F3 is configured, otherwise returns error. + pub async fn attest_parent_finality_with_cert( + &self, + state: &mut FvmExecState, + msg: &fendermint_vm_message::ipc::ParentFinalityWithCert, + ) -> anyhow::Result<()> + where + BS: Blockstore + Clone + 'static + Send + Sync, + { + match &self.finality { + TopDownFinalityHandler::F3(f3) => f3.attest(state, msg).await, + TopDownFinalityHandler::Legacy(_) | TopDownFinalityHandler::Disabled => { + Err(anyhow::anyhow!( + "F3 not configured - cannot attest parent-finality-with-cert message" + )) + } + } } pub async fn update_voting_power_table(&self, power_updates: &PowerUpdates) { - let power_updates_mapped: Vec<_> = power_updates - .0 - .iter() - .map(|v| (ValidatorKey::from(v.public_key.0), v.power.0)) - .collect(); + if let TopDownFinalityHandler::Legacy(h) = &self.finality { + h.update_voting_power_table(power_updates).await + } + } + + /// Execute parent-finality-with-cert message. + /// Delegates F3-specific logic to F3 module, handles common top-down execution. + pub async fn execute_parent_finality_with_cert( + &self, + state: &mut FvmExecState, + msg: fendermint_vm_message::ipc::ParentFinalityWithCert, + ) -> anyhow::Result { + let f3 = match &self.finality { + TopDownFinalityHandler::F3(f3) => f3, + TopDownFinalityHandler::Legacy(_) | TopDownFinalityHandler::Disabled => { + bail!("F3 not configured - cannot execute without F3 handler") + } + }; + + // Execute F3-specific logic (certificate validation, proof extraction, state updates). + // + // This path may be hit during catch-up for a node that did not have the local proof cache + // entry during attestation. In that case, wait for the cache to be filled by the proof-service. + let extracted = Self::extract_top_down_effects_retry_cache_miss( + &self.f3_execution_cache_retry, + f3, + &msg, + ) + .await?; + + // Commit parent finality to gateway. + // + // The gateway expects a fixed `bytes32 blockHash`, so for Filecoin we commit the FEVM + // (Ethereum-view) block hash corresponding to this epoch, derived deterministically from + // the cached tipset key bytes for this epoch (see `F3TopDownHandler`). + let finality = + IPCParentFinality::new(msg.height as i64, extracted.parent_eth_block_hash.to_vec()); + let (prev_height, _prev_finality) = self + .commit_finality(state, finality.clone(), 0) + .await + .context("failed to commit finality")?; + + tracing::debug!( + previous_height = prev_height, + current_height = finality.height, + "committed parent finality" + ); + + // Store validator changes in gateway + self.gateway_caller + .store_validator_changes(state, extracted.validator_changes) + .context("failed to store validator changes")?; + + // Execute topdown messages + let ret = self + .execute_topdown_msgs(state, extracted.topdown_msgs) + .await + .context("failed to execute top down messages")?; + + // Finalize F3 execution only after all effects were applied successfully. + f3.finalize_after_execution(state, msg.height, extracted.instance_id) + .context("failed to finalize F3 execution")?; - atomically(|| self.votes.update_power_table(power_updates_mapped.clone())).await + tracing::info!( + height = msg.height, + "parent finality with cert executed successfully" + ); + + Ok(ret) } // TODO Karel - separate this huge function and clean up - pub async fn execute_topdown_msg( + pub async fn execute_legacy( &self, state: &mut FvmExecState, finality: ParentFinality, ) -> anyhow::Result { - if !self.provider.is_enabled() { + let legacy = match &self.finality { + TopDownFinalityHandler::Legacy(h) => h, + TopDownFinalityHandler::F3(_) => bail!("cannot execute legacy top-down: F3 enabled"), + TopDownFinalityHandler::Disabled => { + bail!("cannot execute IPC top-down message: parent provider disabled") + } + }; + if !legacy.is_enabled() { bail!("cannot execute IPC top-down message: parent provider disabled"); } @@ -144,7 +323,7 @@ where ); let (prev_height, prev_finality) = self - .commit_finality(state, finality.clone()) + .commit_finality(state, finality.clone(), legacy.genesis_epoch()?) .await .context("failed to commit finality")?; @@ -170,8 +349,7 @@ where let (execution_fr, execution_to) = (prev_height + 1, finality.height); // error happens if we cannot get the validator set from ipc agent after retries - let validator_changes = self - .provider + let validator_changes = legacy .validator_changes_from(execution_fr, execution_to) .await .context("failed to fetch validator changes")?; @@ -188,8 +366,7 @@ where .context("failed to store validator changes")?; // error happens if we cannot get the cross messages from ipc agent after retries - let msgs = self - .provider + let msgs = legacy .top_down_msgs_from(execution_fr, execution_to) .await .context("failed to fetch top down messages")?; @@ -214,19 +391,10 @@ where .map(|id| hex::encode(id.serialize_compressed())); let proposer_ref = proposer.as_deref(); - atomically(|| { - self.provider.set_new_finality(finality.clone())?; - - self.votes.set_finalized( - finality.height, - finality.block_hash.clone(), - proposer_ref, - Some(local_block_height), - )?; - - Ok(()) - }) - .await; + legacy + .on_finality_executed(finality.clone(), proposer_ref, local_block_height) + .await + .context("failed to record new finality")?; tracing::debug!( finality = finality.to_string(), @@ -242,6 +410,7 @@ where &self, state: &mut FvmExecState, finality: IPCParentFinality, + genesis_epoch: BlockHeight, ) -> anyhow::Result<(BlockHeight, Option)> { let (prev_height, prev_finality) = if let Some(prev_finality) = self .gateway_caller @@ -249,7 +418,7 @@ where { (prev_finality.height, Some(prev_finality)) } else { - (self.provider.genesis_epoch()?, None) + (genesis_epoch, None) }; tracing::debug!( diff --git a/fendermint/vm/interpreter/src/genesis.rs b/fendermint/vm/interpreter/src/genesis.rs index 581c75d492..1f343ecdba 100644 --- a/fendermint/vm/interpreter/src/genesis.rs +++ b/fendermint/vm/interpreter/src/genesis.rs @@ -444,17 +444,12 @@ impl<'a> GenesisBuilder<'a> { // F3 Light Client actor - manages F3 light client state for proof-based parent finality if let Some(f3_params) = &genesis.f3 { - // For subnets with F3 parameters, initialize with the provided F3 data - // Note: finalized_epochs always starts empty at genesis - let constructor_params = fendermint_actor_f3_light_client::types::ConstructorParams { - instance_id: f3_params.instance_id, - power_table: f3_params.power_table.clone(), - finalized_epochs: Vec::new(), - }; + // We treat the configured F3 `instance_id` as already committed at genesis. + // The proof-service starts fetching/validating from `instance_id + 1`. let f3_state = fendermint_actor_f3_light_client::state::State::new( - constructor_params.instance_id, - constructor_params.power_table, - constructor_params.finalized_epochs, + state.store(), + f3_params.instance_id, + f3_params.power_table.clone(), )?; state @@ -522,6 +517,27 @@ impl<'a> GenesisBuilder<'a> { config, )?; + // If we have F3 enabled, seed the gateway's parent finality cursor at genesis. + // This anchors the epoch cursor in on-chain contract state for late joiners. + if let Some(f3) = genesis.f3.as_ref() { + let exec_state = state + .exec_state() + .ok_or_else(|| anyhow::anyhow!("exec state not initialized in genesis builder"))?; + + let gateway = crate::fvm::state::ipc::GatewayCaller::< + crate::fvm::store::memory::MemoryBlockstore, + >::default(); + + let base_finality = fendermint_vm_topdown::IPCParentFinality::new( + f3.base_epoch, + f3.base_epoch_eth_block_hash.to_vec(), + ); + + gateway + .commit_parent_finality(exec_state, base_finality) + .context("failed to seed gateway parent finality at genesis")?; + } + Ok(out) } } @@ -792,6 +808,14 @@ pub async fn create_test_genesis_state( #[cfg(test)] mod tests { use crate::genesis::GenesisAppState; + use fendermint_crypto::SecretKey; + use fendermint_vm_genesis::ipc::{F3Params, GatewayParams, IpcParams}; + use fendermint_vm_genesis::{Collateral, Genesis, PermissionMode, Validator, ValidatorKey}; + use fendermint_vm_topdown::IPCParentFinality; + use fvm_shared::econ::TokenAmount; + use fvm_shared::version::NetworkVersion; + use ipc_api::subnet_id::SubnetID; + use rand::thread_rng; #[test] fn test_compression() { @@ -806,4 +830,65 @@ mod tests { assert_eq!(recovered, bytes); } + + #[tokio::test] + async fn test_genesis_seeds_gateway_parent_finality_when_f3_enabled() -> anyhow::Result<()> { + use crate::fvm::bundle::contracts_path; + use crate::fvm::state::ipc::GatewayCaller; + + // Minimal genesis with IPC + F3 enabled. + let mut rng = thread_rng(); + let pk = SecretKey::random(&mut rng).public_key(); + + let base_epoch = 123_i64; + let base_epoch_eth_block_hash = [0xABu8; 32]; + + let genesis = Genesis { + chain_name: "test".to_string(), + chain_id: 1234, + timestamp: fendermint_vm_core::Timestamp(0), + network_version: NetworkVersion::V21, + base_fee: TokenAmount::from_atto(0), + power_scale: 0, + validators: vec![Validator { + public_key: ValidatorKey::new(pk), + power: Collateral(TokenAmount::from_atto(1)), + }], + accounts: vec![], + eam_permission_mode: PermissionMode::Unrestricted, + ipc: Some(IpcParams { + gateway: GatewayParams::new(SubnetID::new(1234u64, vec![])), + }), + ipc_contracts_owner: ethers::types::Address::repeat_byte(0x11), + f3: Some(F3Params { + instance_id: 10, + base_epoch, + base_epoch_eth_block_hash, + power_table: vec![], + }), + }; + + let (state, _out) = crate::genesis::create_test_genesis_state( + actors_builtin_car::CAR, + actors_custom_car::CAR, + contracts_path(), + genesis, + ) + .await?; + + let mut exec_state = state + .into_exec_state() + .map_err(|_| anyhow::anyhow!("failed to convert genesis state into exec state"))?; + + let gateway = GatewayCaller::default(); + let got = gateway.get_latest_parent_finality(&mut exec_state)?; + + let want = IPCParentFinality::new(base_epoch, base_epoch_eth_block_hash.to_vec()); + assert_eq!( + got, want, + "gateway latest parent finality should be seeded from genesis.f3" + ); + + Ok(()) + } } diff --git a/fendermint/vm/message/Cargo.toml b/fendermint/vm/message/Cargo.toml index 34459becbb..93dd32e033 100644 --- a/fendermint/vm/message/Cargo.toml +++ b/fendermint/vm/message/Cargo.toml @@ -30,7 +30,9 @@ ipc-api = { path = "../../../ipc/api" } fendermint_crypto = { path = "../../crypto" } fendermint_vm_encoding = { path = "../encoding" } fendermint_vm_actor_interface = { path = "../actor_interface" } +fendermint_vm_topdown_proof_service = { path = "../topdown/proof-service" } fendermint_testing = { path = "../../testing", optional = true } +proofs = { git = "https://github.com/consensus-shipyard/ipc-filecoin-proofs.git", branch = "proofs" } [dev-dependencies] ethers = { workspace = true } diff --git a/fendermint/vm/message/src/ipc.rs b/fendermint/vm/message/src/ipc.rs index 8f275a1c24..29c7919816 100644 --- a/fendermint/vm/message/src/ipc.rs +++ b/fendermint/vm/message/src/ipc.rs @@ -8,9 +8,13 @@ use serde::{Deserialize, Serialize}; #[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)] #[allow(clippy::large_enum_variant)] pub enum IpcMessage { - /// A top-down checkpoint parent finality proposal. This proposal should contain the latest parent - /// state that to be checked and voted by validators. + /// A top-down checkpoint parent finality proposal (legacy voting-based) TopDownExec(ParentFinality), + /// Parent finality proposal carrying a certificate. + /// + /// This is intentionally "WithCert" to allow future extensions where certificates and + /// integrity proof bundles are delivered as separate messages. + ParentFinalityWithCert(ParentFinalityWithCert), } /// A proposal of the parent view that validators will be voting on. @@ -22,6 +26,23 @@ pub struct ParentFinality { pub block_hash: Vec, } +/// Parent finality proposal with a certificate (v2). +#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)] +pub struct ParentFinalityWithCert { + /// The chain epoch this finality is for (height) + pub height: ChainEpoch, + /// The certificate that certifies finality (type-specific, proof is fetched from local cache) + pub certificate: Certificate, +} + +/// Certificate types (extensible for future certificate types) +/// Each variant contains the certificate data. Proofs are fetched from local cache when needed. +#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)] +pub enum Certificate { + /// Filecoin F3 certificate (proof bundle is fetched from local cache using instance ID) + FilecoinF3(fendermint_vm_topdown_proof_service::types::SerializableF3Certificate), +} + #[cfg(feature = "arb")] mod arb { diff --git a/fendermint/vm/topdown/proof-service/Cargo.toml b/fendermint/vm/topdown/proof-service/Cargo.toml index b5ba35a98d..597d1f0435 100644 --- a/fendermint/vm/topdown/proof-service/Cargo.toml +++ b/fendermint/vm/topdown/proof-service/Cargo.toml @@ -19,19 +19,23 @@ url = { workspace = true } base64 = { workspace = true } humantime-serde = { workspace = true } cid = { workspace = true } +hex = { workspace = true } multihash = { workspace = true } +multihash-codetable = { version = "0.1.4", features = ["blake2b"] } rocksdb = { version = "0.21", features = ["multi-threaded-cf"] } futures = { workspace = true } fvm_ipld_bitfield = "0.7.2" keccak-hash = "0.11" num-bigint = { workspace = true } +ethers = { workspace = true } +ipc_actors_abis = { path = "../../../../contract-bindings" } +fendermint_vm_evm_event_utils = { path = "../../evm-event-utils" } # Fendermint fendermint_actor_f3_light_client = { path = "../../../actors/f3-light-client" } fendermint_vm_genesis = { path = "../../genesis" } # IPC -ipc-provider = { path = "../../../../ipc/provider" } ipc-api = { path = "../../../../ipc/api" } ipc-observability = { path = "../../../../ipc/observability" } @@ -67,5 +71,5 @@ required-features = ["dev-tools"] [dev-dependencies] tokio = { workspace = true, features = ["test-util", "rt-multi-thread"] } tracing-subscriber = { workspace = true } -multihash-codetable = { version = "0.1.4", features = ["blake2b"] } tempfile = "3.8" +contracts_artifacts = { package = "contracts-artifacts", path = "../../../../contracts-artifacts" } diff --git a/fendermint/vm/topdown/proof-service/README.md b/fendermint/vm/topdown/proof-service/README.md index 79750d1d39..bc19c9450f 100644 --- a/fendermint/vm/topdown/proof-service/README.md +++ b/fendermint/vm/topdown/proof-service/README.md @@ -196,7 +196,8 @@ let config = ProofServiceConfig { retention_epochs: 100, }, polling_interval: Duration::from_secs(30), - ..Default::default() + max_cache_size_bytes: 100 * 1024 * 1024, // 100 MB + fallback_rpc_urls: vec![], }; // Launch service with optional persistence @@ -207,6 +208,8 @@ let (cache, handle) = launch_service( initial_epoch, initial_instance, power_table, + applied_top_down_nonce, + next_power_change_config_number, db_path, ).await?.unwrap(); @@ -411,9 +414,9 @@ Older issue with reqwest library on macOS (now fixed in upstream). ### Unit Tests -```bash -cargo test --package fendermint_vm_topdown_proof_service --lib -``` +````bash +# Unit tests +cargo test --package fendermint_vm_topdown_proof_service **Test Coverage:** @@ -424,10 +427,41 @@ cargo test --package fendermint_vm_topdown_proof_service --lib - Metrics registration ### Integration Tests - ```bash # Requires live Calibration network cargo test --package fendermint_vm_topdown_proof_service --test integration -- --ignored +```` + +### End-to-End Testing + +1. **Deploy Test Contract** (optional - for testing with TopdownMessenger): + +```bash +cd /path/to/proofs/topdown-messenger +forge create --rpc-url http://api.calibration.node.glif.io/rpc/v1 \ + --private-key $PRIVATE_KEY \ + src/TopdownMessenger.sol:TopdownMessenger +``` + +2. **Run Proof Service**: + +```bash +./target/debug/proof-cache-test run \ + --rpc-url "http://api.calibration.node.glif.io/rpc/v1" \ + --initial-instance \ + --gateway-actor-id \ + --subnet-id "your-subnet-id" \ + --poll-interval 10 \ + --lookahead 3 \ + --db-path /tmp/proof-cache-test.db +``` + +3. **Inspect Results**: + +```bash +# After stopping the service +./target/debug/proof-cache-test inspect --db-path /tmp/proof-cache-test.db +./target/debug/proof-cache-test get --db-path /tmp/proof-cache-test.db --instance-id ``` ### End-to-End Testing diff --git a/fendermint/vm/topdown/proof-service/src/assembler.rs b/fendermint/vm/topdown/proof-service/src/assembler.rs index 666128599b..a55a9c57b3 100644 --- a/fendermint/vm/topdown/proof-service/src/assembler.rs +++ b/fendermint/vm/topdown/proof-service/src/assembler.rs @@ -7,53 +7,57 @@ //! proof generation - it has no knowledge of cache entries or storage. use crate::observe::{OperationStatus, ProofBundleGenerated}; +use crate::storage_layout::{ + NEXT_CONFIG_NUMBER_ABSOLUTE_SLOT, SUBNETS_MAPPING_SLOT, SUBNET_TOPDOWN_NONCE_OFFSET, +}; use crate::types::FinalizedTipset; use anyhow::{Context, Result}; +use ethers::contract::EthEvent; +use ethers::types::H256; use fvm_ipld_encoding; +use ipc_actors_abis::{lib_gateway, lib_power_change_log}; use ipc_observability::emit; +use proofs::proofs::storage::utils::compute_mapping_slot; use proofs::{ client::LotusClient, proofs::{ - calculate_storage_slot, common::bundle::UnifiedProofBundle, generate_proof_bundle, - EventProofSpec, StorageProofSpec, + common::bundle::UnifiedProofBundle, generate_proof_bundle, EventProofSpec, StorageProofSpec, }, }; use std::time::Instant; use url::Url; -// Event signatures for proof generation -// These use Solidity's canonical format (type names, not ABI encoding) -// For contract bindings, see: contract_bindings::lib_gateway::NewTopDownMessageFilter -// and contract_bindings::lib_power_change_log::NewPowerChangeRequestFilter - -/// Event signature for NewTopDownMessage from LibGateway.sol -/// Event: NewTopDownMessage(address indexed subnet, IpcEnvelope message, bytes32 indexed id) -/// Bindings: contract_bindings::lib_gateway::NewTopDownMessageFilter -pub const NEW_TOPDOWN_MESSAGE_SIGNATURE: &str = "NewTopDownMessage(address,IpcEnvelope,bytes32)"; - -/// Event signature for NewPowerChangeRequest from LibPowerChangeLog.sol -/// Event: NewPowerChangeRequest(PowerOperation op, address validator, bytes payload, uint64 configurationNumber) -/// Bindings: contract_bindings::lib_power_change_log::NewPowerChangeRequestFilter -/// This captures validator power changes that need to be reflected in the subnet -pub const NEW_POWER_CHANGE_REQUEST_SIGNATURE: &str = - "NewPowerChangeRequest(PowerOperation,address,bytes,uint64)"; - -/// Storage slot offset for topDownNonce in the Subnet struct. -/// In the Gateway actor's subnets mapping: mapping(SubnetID => Subnet) -/// The Subnet struct field layout (see contracts/contracts/structs/Subnet.sol): -/// - id (SubnetID): slot 0-1 (SubnetID has 2 fields) -/// - stake (uint256): slot 2 -/// - topDownNonce (uint64): slot 3 -/// - appliedBottomUpNonce (uint64): slot 3 (packed with topDownNonce) -/// - genesisEpoch (uint256): slot 4 -/// -/// We need the nonce to verify top-down message ordering. -const TOPDOWN_NONCE_STORAGE_OFFSET: u64 = 3; +// Event signatures for proof generation. +// +// The proofs library expects the Solidity *canonical ABI signature* string. +// Instead of hard-coding it, derive it from the contract bindings. +fn new_topdown_message_signature() -> String { + lib_gateway::NewTopDownMessageFilter::abi_signature().into_owned() +} + +fn new_power_change_request_signature() -> String { + lib_power_change_log::NewPowerChangeRequestFilter::abi_signature().into_owned() +} -/// Storage slot for nextConfigurationNumber in GatewayActorStorage -/// This is used to track configuration changes for power updates -/// Based on the storage layout, nextConfigurationNumber is at slot 20 -const NEXT_CONFIG_NUMBER_STORAGE_SLOT: u64 = 20; +// Storage slots are defined in `storage_layout.rs` (derived from Foundry `storageLayout`). + +#[cfg(test)] +mod signature_tests { + use super::*; + use ethers::contract::EthEvent; + use proofs::proofs::common::evm::hash_event_signature; + + #[test] + fn abi_signature_strings_match_contract_bindings_topic0() { + let expected_topdown: H256 = lib_gateway::NewTopDownMessageFilter::signature(); + let got_topdown: H256 = H256(hash_event_signature(&new_topdown_message_signature())); + assert_eq!(got_topdown, expected_topdown); + + let expected_power: H256 = lib_power_change_log::NewPowerChangeRequestFilter::signature(); + let got_power: H256 = H256(hash_event_signature(&new_power_change_request_signature())); + assert_eq!(got_power, expected_power); + } +} /// Assembles proof bundles from F3 certificates and parent chain data /// @@ -79,14 +83,27 @@ impl ProofAssembler { } fn build_storage_specs(&self) -> Vec { + // Mapping key is bytes32; proofs utils only provide `compute_mapping_slot(key, slot)`. + // We use the proof library's ASCII bytes32 helper to match the existing on-chain encoding + // used by the proofs stack (topic/storage filters operate on bytes32 strings). + let key = proofs::proofs::common::evm::ascii_to_bytes32(&self.subnet_id); + let base = compute_mapping_slot(key, SUBNETS_MAPPING_SLOT); + // Struct member is at relative slot 3. + let mut slot_bytes = base; + let base_u256 = ethers::types::U256::from_big_endian(&base); + let slot_u256 = base_u256 + ethers::types::U256::from(SUBNET_TOPDOWN_NONCE_OFFSET); + slot_u256.to_big_endian(&mut slot_bytes); + vec![ StorageProofSpec { actor_id: self.gateway_actor_id, - slot: calculate_storage_slot(&self.subnet_id, TOPDOWN_NONCE_STORAGE_OFFSET), + // `subnets[].topDownNonce` + slot: H256::from(slot_bytes), }, StorageProofSpec { actor_id: self.gateway_actor_id, - slot: calculate_storage_slot("", NEXT_CONFIG_NUMBER_STORAGE_SLOT), + // Fixed storage slot (not a mapping): `validatorsTracker.changes.nextConfigurationNumber`. + slot: H256::from_low_u64_be(NEXT_CONFIG_NUMBER_ABSOLUTE_SLOT), }, ] } @@ -94,12 +111,12 @@ impl ProofAssembler { fn build_event_specs(&self) -> Vec { vec![ EventProofSpec { - event_signature: NEW_TOPDOWN_MESSAGE_SIGNATURE.to_string(), + event_signature: new_topdown_message_signature(), topic_1: self.subnet_id.clone(), actor_id_filter: Some(self.gateway_actor_id), }, EventProofSpec { - event_signature: NEW_POWER_CHANGE_REQUEST_SIGNATURE.to_string(), + event_signature: new_power_change_request_signature(), topic_1: String::new(), actor_id_filter: Some(self.gateway_actor_id), }, @@ -111,7 +128,7 @@ impl ProofAssembler { /// LotusClient is not Send, so we create it on-demand in each async function /// rather than storing it as a field. fn create_client(&self) -> LotusClient { - LotusClient::new(self.rpc_url.clone(), None) + LotusClient::new(self.rpc_url.clone(), None::<&str>) } /// Fetch a tipset by epoch from Lotus RPC @@ -245,6 +262,23 @@ impl ProofAssembler { } } +/// Resolve an Ethereum address to a Filecoin actor ID on the parent chain. +/// +/// Used at proof-service startup when `gateway_id` is configured as an Ethereum address. +pub async fn resolve_eth_address_to_actor_id(parent_rpc_url: &str, eth_addr: &str) -> Result { + let url = Url::parse(parent_rpc_url).context("Failed to parse parent RPC URL")?; + let client = LotusClient::new(url, None::<&str>); + let actor_id = proofs::proofs::resolve_eth_address_to_actor_id(&client, eth_addr) + .await + .with_context(|| { + format!( + "Failed to resolve gateway Ethereum address to actor id: {}", + eth_addr + ) + })?; + Ok(actor_id) +} + #[cfg(test)] mod tests { use super::*; diff --git a/fendermint/vm/topdown/proof-service/src/bin/proof-cache-test.rs b/fendermint/vm/topdown/proof-service/src/bin/proof-cache-test.rs index ee39172cc0..6e72958996 100644 --- a/fendermint/vm/topdown/proof-service/src/bin/proof-cache-test.rs +++ b/fendermint/vm/topdown/proof-service/src/bin/proof-cache-test.rs @@ -168,6 +168,8 @@ async fn run_service( initial_committed_epoch, initial_instance, power_table, + 0, + 0, db_path, ) .await? diff --git a/fendermint/vm/topdown/proof-service/src/bootstrap.rs b/fendermint/vm/topdown/proof-service/src/bootstrap.rs new file mode 100644 index 0000000000..24be0c5a4c --- /dev/null +++ b/fendermint/vm/topdown/proof-service/src/bootstrap.rs @@ -0,0 +1,45 @@ +// Copyright 2022-2025 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT +//! Bootstrap helpers for integrating the proof service with on-chain state. + +use crate::config; +use crate::PowerEntries; +use anyhow::{Context, Result}; +use ipc_api::subnet_id::SubnetID; +use num_bigint::Sign; + +/// Fetch an F3 certificate for a specific instance from the parent chain. +/// +/// Uses the F3 light client RPC (not the Lotus JSON-RPC wrapper), which supports fetching +/// certificates by instance ID. +pub async fn fetch_certificate( + parent_rpc_url: &str, + subnet_id: &SubnetID, + instance_id: u64, +) -> Result { + let network = config::f3_network_name(subnet_id); + let light_client = filecoin_f3_lightclient::LightClient::new(parent_rpc_url, &network) + .context("failed to create F3 light client")?; + light_client + .get_certificate(instance_id) + .await + .context("failed to fetch F3 certificate by instance") +} + +/// Convert the on-chain F3LightClientActor power table into GPBFT `PowerEntries`. +/// +/// This preserves participant IDs, which are required for certificate verification. +pub fn power_entries_from_actor( + entries: &[fendermint_actor_f3_light_client::types::PowerEntry], +) -> PowerEntries { + PowerEntries( + entries + .iter() + .map(|e| filecoin_f3_gpbft::PowerEntry { + id: e.id, + power: num_bigint::BigInt::from_bytes_be(Sign::Plus, &e.power_be), + pub_key: filecoin_f3_gpbft::PubKey(e.public_key.clone()), + }) + .collect(), + ) +} diff --git a/fendermint/vm/topdown/proof-service/src/cache.rs b/fendermint/vm/topdown/proof-service/src/cache.rs index f088baf294..687c279fb3 100644 --- a/fendermint/vm/topdown/proof-service/src/cache.rs +++ b/fendermint/vm/topdown/proof-service/src/cache.rs @@ -12,7 +12,9 @@ //! reference the same certificate use crate::config::CacheConfig; -use crate::observe::{ProofCached, CACHE_HIT_TOTAL, CACHE_SIZE}; +use crate::observe::{ + OperationStatus, ProofCacheAtomicWrite, ProofCached, CACHE_HIT_TOTAL, CACHE_SIZE, +}; use crate::persistence::ProofCachePersistence; use crate::types::{CertificateEntry, EpochProofEntry, EpochProofWithCertificate}; use anyhow::{Context, Result}; @@ -115,8 +117,9 @@ impl ProofCache { /// Insert a certificate into the store pub fn insert_certificate(&self, entry: CertificateEntry) -> Result<()> { let instance_id = entry.instance_id(); - self.certificates.write().insert(instance_id, entry.clone()); + // Persist first so an error doesn't leave in-memory state ahead of disk. self.with_persistence(|p| p.save_certificate(&entry))?; + self.certificates.write().insert(instance_id, entry.clone()); tracing::debug!(instance_id, "Inserted certificate into cache"); Ok(()) } @@ -147,13 +150,7 @@ impl ProofCache { let epochs: Vec = entries.iter().map(|e| e.epoch).collect(); - { - let mut proofs = self.epoch_proofs.write(); - for entry in entries.iter() { - proofs.insert(entry.epoch, entry.clone()); - } - } - + // Persist first so an error doesn't leave in-memory state ahead of disk. self.with_persistence(|p| { for entry in &entries { p.save_epoch_proof(entry)?; @@ -161,11 +158,70 @@ impl ProofCache { Ok(()) })?; + { + let mut proofs = self.epoch_proofs.write(); + for entry in entries.iter() { + proofs.insert(entry.epoch, entry.clone()); + } + } + self.emit_cache_metrics(&epochs); tracing::debug!(?epochs, "Inserted epoch proofs into cache"); Ok(()) } + /// Insert a certificate and all of its epoch proofs into the cache, atomically on disk. + /// + /// This is the preferred API for proof generation: it avoids partial persistence (e.g. cert + /// written but only some epoch proofs) if RocksDB writes fail or the process crashes mid-write. + pub fn insert_certificate_with_epoch_proofs( + &self, + cert: CertificateEntry, + epoch_proofs: Vec, + ) -> Result<()> { + use std::time::Instant; + + let instance_id = cert.instance_id(); + let epochs: Vec = epoch_proofs.iter().map(|e| e.epoch).collect(); + + // Persist atomically first (if enabled). + let persist_start = Instant::now(); + if let Err(e) = + self.with_persistence(|p| p.save_certificate_with_epoch_proofs(&cert, &epoch_proofs)) + { + emit(ProofCacheAtomicWrite { + instance: instance_id, + epoch_count: epochs.len(), + status: OperationStatus::Failure, + latency: persist_start.elapsed().as_secs_f64(), + }); + return Err(e); + } + emit(ProofCacheAtomicWrite { + instance: instance_id, + epoch_count: epochs.len(), + status: OperationStatus::Success, + latency: persist_start.elapsed().as_secs_f64(), + }); + + // Then update in-memory structures (infallible). + self.certificates.write().insert(instance_id, cert.clone()); + if !epoch_proofs.is_empty() { + let mut proofs = self.epoch_proofs.write(); + for entry in &epoch_proofs { + proofs.insert(entry.epoch, entry.clone()); + } + self.emit_cache_metrics(&epochs); + } + + tracing::debug!( + instance_id, + epoch_count = epochs.len(), + "Inserted certificate and epoch proofs into cache" + ); + Ok(()) + } + fn emit_cache_metrics(&self, epochs: &[ChainEpoch]) { let cache_size = self.epoch_proofs.read().len(); if let Some(highest) = self.highest_cached_epoch() { @@ -260,6 +316,27 @@ impl ProofCache { self.last_committed_instance.load(Ordering::Acquire) } + /// Get the next uncommitted epoch. + /// + /// Filecoin can have null rounds (epochs with no tipsets), so cached proofs may not exist for + /// `last_committed_epoch + 1`. We therefore return the smallest cached epoch strictly greater + /// than `last_committed_epoch`. + pub fn get_next_uncommitted_epoch(&self) -> Option { + let after = self.last_committed_epoch() + 1; + self.epoch_proofs + .read() + .range(after..) + .next() + .map(|(epoch, _)| *epoch) + } + + /// Get the next uncommitted proof entry (epoch + certificate) + /// Returns None if no proof is available for next epoch + pub fn get_next_uncommitted_epoch_with_cert(&self) -> Option { + let next_epoch = self.get_next_uncommitted_epoch()?; + self.get_epoch_proof_with_certificate(next_epoch) + } + /// Get the number of cached epoch proofs pub fn epoch_proof_count(&self) -> usize { self.epoch_proofs.read().len() diff --git a/fendermint/vm/topdown/proof-service/src/config.rs b/fendermint/vm/topdown/proof-service/src/config.rs index c2820d2975..6b0e2dd305 100644 --- a/fendermint/vm/topdown/proof-service/src/config.rs +++ b/fendermint/vm/topdown/proof-service/src/config.rs @@ -9,6 +9,25 @@ use std::time::Duration; const FILECOIN_MAINNET_CHAIN_ID: u64 = 314; const FILECOIN_CALIBRATION_CHAIN_ID: u64 = 314159; +/// Derive the F3 network name from the subnet root chain ID. +/// +/// This is used for interacting with the Filecoin F3 RPC. +pub fn f3_network_name(subnet_id: &SubnetID) -> String { + let root_id = subnet_id.root_id(); + + match root_id { + FILECOIN_MAINNET_CHAIN_ID => "mainnet".to_string(), + FILECOIN_CALIBRATION_CHAIN_ID => "calibrationnet".to_string(), + _ => { + tracing::warn!( + root_id, + "Unknown root chain ID for F3, defaulting to calibrationnet" + ); + "calibrationnet".to_string() + } + } +} + /// Represents a value that can be either a numeric Actor ID or an Ethereum address string. #[derive(Debug, Clone, Serialize, Deserialize)] #[serde(untagged)] @@ -65,19 +84,7 @@ impl ProofServiceConfig { } pub fn f3_network_name(&self, subnet_id: &SubnetID) -> String { - let root_id = subnet_id.root_id(); - - match root_id { - FILECOIN_MAINNET_CHAIN_ID => "mainnet".to_string(), - FILECOIN_CALIBRATION_CHAIN_ID => "calibrationnet".to_string(), - _ => { - tracing::warn!( - root_id, - "Unknown root chain ID for F3, defaulting to calibrationnet" - ); - "calibrationnet".to_string() - } - } + f3_network_name(subnet_id) } } diff --git a/fendermint/vm/topdown/proof-service/src/f3_client.rs b/fendermint/vm/topdown/proof-service/src/f3_client.rs index 40cc9da2f2..c47a33e371 100644 --- a/fendermint/vm/topdown/proof-service/src/f3_client.rs +++ b/fendermint/vm/topdown/proof-service/src/f3_client.rs @@ -154,6 +154,20 @@ impl F3Client { Ok((certificate, power_table)) } + /// Snapshot the current validated state. + /// + /// Used by the proof-service to provide *all-or-nothing* semantics: + /// if proof generation fails after fetching/validating a certificate, we can roll back + /// and retry the same certificate on the next tick. + pub fn checkpoint_state(&self) -> LightClientState { + self.state.clone() + } + + /// Restore a previously checkpointed validated state. + pub fn restore_state(&mut self, state: LightClientState) { + self.state = state; + } + async fn fetch_certificate(&self, instance: u64) -> Result { let fetch_start = Instant::now(); diff --git a/fendermint/vm/topdown/proof-service/src/lib.rs b/fendermint/vm/topdown/proof-service/src/lib.rs index cc8c51bb87..4b9c0ba43c 100644 --- a/fendermint/vm/topdown/proof-service/src/lib.rs +++ b/fendermint/vm/topdown/proof-service/src/lib.rs @@ -18,18 +18,22 @@ //! the same certificate. pub mod assembler; +pub mod bootstrap; pub mod cache; pub mod config; pub mod f3_client; pub mod observe; pub mod persistence; pub mod service; +pub mod storage_layout; pub mod types; pub mod verifier; // Re-export main types for convenience +pub use bootstrap::{fetch_certificate, power_entries_from_actor}; pub use cache::ProofCache; pub use config::{CacheConfig, ProofServiceConfig}; +pub use filecoin_f3_gpbft::PowerEntries; pub use service::ProofGeneratorService; pub use types::{ CertificateEntry, EpochProofEntry, EpochProofWithCertificate, SerializableF3Certificate, @@ -63,6 +67,8 @@ pub async fn launch_service( initial_committed_epoch: ChainEpoch, initial_instance: u64, initial_power_table: filecoin_f3_gpbft::PowerEntries, + initial_applied_top_down_nonce: u64, + initial_next_power_change_config_number: u64, db_path: Option, ) -> Result, tokio::task::JoinHandle<()>)>> { // Check if disabled first @@ -116,6 +122,8 @@ pub async fn launch_service( &subnet_id, initial_instance, power_table_clone, + initial_applied_top_down_nonce, + initial_next_power_change_config_number, ) .await { @@ -143,7 +151,7 @@ mod tests { let power_table = PowerEntries(vec![]); let subnet_id = SubnetID::default(); - let result = launch_service(config, subnet_id, 0, 0, power_table, None).await; + let result = launch_service(config, subnet_id, 0, 0, power_table, 0, 0, None).await; assert!(result.is_ok()); assert!(result.unwrap().is_none()); } @@ -164,7 +172,7 @@ mod tests { let power_table = PowerEntries(vec![]); let subnet_id = SubnetID::default(); - let result = launch_service(config, subnet_id, 100, 5, power_table, None).await; + let result = launch_service(config, subnet_id, 100, 5, power_table, 0, 0, None).await; assert!(result.is_ok()); let (_cache, handle) = result.unwrap().unwrap(); diff --git a/fendermint/vm/topdown/proof-service/src/observe.rs b/fendermint/vm/topdown/proof-service/src/observe.rs index 101ff8b3cd..3e25f812f7 100644 --- a/fendermint/vm/topdown/proof-service/src/observe.rs +++ b/fendermint/vm/topdown/proof-service/src/observe.rs @@ -59,6 +59,12 @@ register_metrics! { = register_int_counter_vec!("proof_cache_hit_total", "Cache hits/misses", &["result"]); CACHE_INSERT_TOTAL: IntCounterVec = register_int_counter_vec!("proof_cache_insert_total", "Cache insertions", &["status"]); + CACHE_ATOMIC_WRITE_LATENCY_SECS: HistogramVec + = register_histogram_vec!( + "proof_cache_atomic_write_latency_secs", + "Latency of atomic (certificate+epoch proofs) cache writes", + &["status"] + ); } impl_traceables!( @@ -67,7 +73,8 @@ impl_traceables!( F3CertificateFetched, F3CertificateValidated, ProofBundleGenerated, - ProofCached + ProofCached, + ProofCacheAtomicWrite ); #[derive(Debug)] @@ -154,6 +161,25 @@ impl Recordable for ProofCached { } } +#[derive(Debug)] +pub struct ProofCacheAtomicWrite { + pub instance: u64, + pub epoch_count: usize, + pub status: OperationStatus, + pub latency: f64, +} + +impl Recordable for ProofCacheAtomicWrite { + fn record_metrics(&self) { + CACHE_INSERT_TOTAL + .with_label_values(&[self.status.as_str()]) + .inc(); + CACHE_ATOMIC_WRITE_LATENCY_SECS + .with_label_values(&[self.status.as_str()]) + .observe(self.latency); + } +} + #[cfg(test)] mod tests { use super::*; diff --git a/fendermint/vm/topdown/proof-service/src/persistence.rs b/fendermint/vm/topdown/proof-service/src/persistence.rs index b61c086f4f..d6cc653ba6 100644 --- a/fendermint/vm/topdown/proof-service/src/persistence.rs +++ b/fendermint/vm/topdown/proof-service/src/persistence.rs @@ -23,7 +23,7 @@ use crate::types::{ use anyhow::{Context, Result}; use fvm_shared::clock::ChainEpoch; use proofs::proofs::common::bundle::UnifiedProofBundle; -use rocksdb::{BoundColumnFamily, Options, DB}; +use rocksdb::{BoundColumnFamily, Options, WriteBatch, WriteOptions, DB}; use std::collections::HashMap; use std::path::Path; use std::sync::Arc; @@ -72,8 +72,16 @@ impl ProofCachePersistence { .with_context(|| format!("Failed to get {} column family", name)) } + fn sync_write_options() -> WriteOptions { + let mut opts = WriteOptions::default(); + // Make successful writes durable: the WAL is synced to disk before returning. + opts.set_sync(true); + opts + } + fn init_schema(&self) -> Result<()> { let cf = self.get_cf(CF_METADATA)?; + let wopts = Self::sync_write_options(); match self.db.get_cf(&cf, KEY_SCHEMA_VERSION)? { Some(data) => { @@ -89,10 +97,11 @@ impl ProofCachePersistence { info!(version = SCHEMA_VERSION, "Verified schema version"); } None => { - self.db.put_cf( + self.db.put_cf_opt( &cf, KEY_SCHEMA_VERSION, serde_json::to_vec(&SCHEMA_VERSION)?, + &wopts, )?; info!(version = SCHEMA_VERSION, "Initialized new schema"); } @@ -103,11 +112,12 @@ impl ProofCachePersistence { pub fn save_certificate(&self, entry: &CertificateEntry) -> Result<()> { let cf = self.get_cf(CF_CERTIFICATES)?; + let wopts = Self::sync_write_options(); let key = entry.instance_id().to_be_bytes(); let value = serde_json::to_vec(&SerializableCertificateEntry::from(entry)) .context("Failed to serialize certificate entry")?; - self.db.put_cf(&cf, key, value)?; + self.db.put_cf_opt(&cf, key, value, &wopts)?; debug!( instance_id = entry.instance_id(), "Saved certificate to disk" @@ -115,6 +125,42 @@ impl ProofCachePersistence { Ok(()) } + /// Atomically save a certificate together with all its epoch proofs. + /// + /// This is used to guarantee "all-or-nothing" persistence: if any write fails, none of the + /// entries are committed to disk, avoiding partial cache state after restart. + pub fn save_certificate_with_epoch_proofs( + &self, + cert: &CertificateEntry, + epoch_proofs: &[EpochProofEntry], + ) -> Result<()> { + let cf_certs = self.get_cf(CF_CERTIFICATES)?; + let cf_proofs = self.get_cf(CF_EPOCH_PROOFS)?; + let wopts = Self::sync_write_options(); + + let cert_key = cert.instance_id().to_be_bytes(); + let cert_value = serde_json::to_vec(&SerializableCertificateEntry::from(cert)) + .context("Failed to serialize certificate entry")?; + + let mut batch = WriteBatch::default(); + batch.put_cf(&cf_certs, cert_key, cert_value); + + for entry in epoch_proofs { + let key = entry.epoch.to_be_bytes(); + let value = + serde_json::to_vec(entry).context("Failed to serialize epoch proof entry")?; + batch.put_cf(&cf_proofs, key, value); + } + + self.db.write_opt(batch, &wopts)?; + debug!( + instance_id = cert.instance_id(), + epoch_count = epoch_proofs.len(), + "Saved certificate and epoch proofs to disk (atomic batch)" + ); + Ok(()) + } + pub fn load_all_certificates(&self) -> Result> { let cf = self.get_cf(CF_CERTIFICATES)?; let mut entries = Vec::new(); @@ -132,17 +178,20 @@ impl ProofCachePersistence { pub fn delete_certificate(&self, instance_id: u64) -> Result<()> { let cf = self.get_cf(CF_CERTIFICATES)?; - self.db.delete_cf(&cf, instance_id.to_be_bytes())?; + let wopts = Self::sync_write_options(); + self.db + .delete_cf_opt(&cf, instance_id.to_be_bytes(), &wopts)?; debug!(instance_id, "Deleted certificate from disk"); Ok(()) } pub fn save_epoch_proof(&self, entry: &EpochProofEntry) -> Result<()> { let cf = self.get_cf(CF_EPOCH_PROOFS)?; + let wopts = Self::sync_write_options(); let key = entry.epoch.to_be_bytes(); let value = serde_json::to_vec(entry).context("Failed to serialize epoch proof entry")?; - self.db.put_cf(&cf, key, value)?; + self.db.put_cf_opt(&cf, key, value, &wopts)?; debug!(epoch = entry.epoch, "Saved epoch proof to disk"); Ok(()) } @@ -164,7 +213,8 @@ impl ProofCachePersistence { pub fn delete_epoch_proof(&self, epoch: ChainEpoch) -> Result<()> { let cf = self.get_cf(CF_EPOCH_PROOFS)?; - self.db.delete_cf(&cf, epoch.to_be_bytes())?; + let wopts = Self::sync_write_options(); + self.db.delete_cf_opt(&cf, epoch.to_be_bytes(), &wopts)?; debug!(epoch, "Deleted epoch proof from disk"); Ok(()) } @@ -181,14 +231,6 @@ impl ProofCachePersistence { self.clear_all() } - /// Load the last committed instance ID - /// - /// Note: This information is not persisted to disk, so this always returns None. - /// The last committed state is only stored in memory in the ProofCache. - pub fn load_last_committed(&self) -> Result> { - Ok(None) - } - /// Load all entries as combined cache entries /// /// This combines certificates with their associated epoch proofs for inspection. @@ -225,13 +267,18 @@ impl ProofCachePersistence { fn clear_cf(&self, cf_name: &str) -> Result<()> { if let Some(cf) = self.db.cf_handle(cf_name) { + let wopts = Self::sync_write_options(); let keys: Vec> = self .db .iterator_cf(&cf, rocksdb::IteratorMode::Start) .filter_map(|r| r.ok().map(|(k, _)| k)) .collect(); - for key in keys { - self.db.delete_cf(&cf, &key)?; + if !keys.is_empty() { + let mut batch = WriteBatch::default(); + for key in keys { + batch.delete_cf(&cf, &key); + } + self.db.write_opt(batch, &wopts)?; } } Ok(()) diff --git a/fendermint/vm/topdown/proof-service/src/service.rs b/fendermint/vm/topdown/proof-service/src/service.rs index 88271f31fa..481fa40ca3 100644 --- a/fendermint/vm/topdown/proof-service/src/service.rs +++ b/fendermint/vm/topdown/proof-service/src/service.rs @@ -15,11 +15,12 @@ //! Each proof requires both parent (epoch E) and child (typically epoch E+1) because //! Filecoin stores `parentReceipts` in the child block, not the parent. -use crate::assembler::ProofAssembler; +use crate::assembler::{resolve_eth_address_to_actor_id, ProofAssembler}; use crate::cache::ProofCache; use crate::config::{GatewayId, ProofServiceConfig}; use crate::f3_client::F3Client; -use crate::types::{CertificateEntry, EpochProofEntry, FinalizedTipset}; +use crate::types::{CertificateEntry, EpochProofEntry, FinalizedTipset, FinalizedTipsets}; +use crate::verifier::ProofVerifier; use anyhow::{Context, Result}; use filecoin_f3_certs::FinalityCertificate; use filecoin_f3_gpbft::PowerEntries; @@ -33,6 +34,12 @@ pub struct ProofGeneratorService { cache: Arc, f3_client: F3Client, assembler: ProofAssembler, + verifier: ProofVerifier, + /// Cursor for continuity checks, seeded from L2 gateway state on startup. + /// + /// This allows fresh nodes to perform "storage delta vs observed event count" checks without + /// relying on local cache history. + event_number_cursor: crate::verifier::EventNumberCursor, } impl ProofGeneratorService { @@ -53,6 +60,8 @@ impl ProofGeneratorService { subnet_id: &SubnetID, initial_instance: u64, initial_power_table: PowerEntries, + initial_applied_top_down_nonce: u64, + initial_next_power_change_config_number: u64, ) -> Result { let gateway_actor_id = extract_gateway_actor_id_from_config(&config).await?; @@ -100,6 +109,11 @@ impl ProofGeneratorService { cache, f3_client, assembler, + verifier: ProofVerifier::new(subnet_id.to_string()), + event_number_cursor: crate::verifier::EventNumberCursor { + next_parent_topdown_nonce: initial_applied_top_down_nonce, + next_parent_power_change_config_number: initial_next_power_change_config_number, + }, }) } @@ -142,12 +156,30 @@ impl ProofGeneratorService { return Ok(()); } + // Provide *all-or-nothing* semantics per certificate. + // + // `fetch_next_certificate()` advances the internal F3 light-client state to the newly + // validated instance. If we fail later while generating/verifying/caching proofs, we MUST + // roll back that state; otherwise the next tick would fetch the next instance and we'd + // permanently skip this certificate, leaving a cache hole that can stall catch-up. + let checkpoint = self.f3_client.checkpoint_state(); + let Some((certificate, power_table)) = self.fetch_next_certificate().await? else { return Ok(()); // No certificate available, caught up with F3 }; - self.generate_proofs_for_certificate(&certificate, &power_table) - .await?; + if let Err(e) = self + .generate_proofs_for_certificate(&certificate, &power_table) + .await + { + tracing::error!( + error = %e, + instance = certificate.gpbft_instance, + "failed to generate/verify proofs for certificate; rolling back and retrying later" + ); + self.f3_client.restore_state(checkpoint); + return Err(e); + } Ok(()) } @@ -212,10 +244,16 @@ impl ProofGeneratorService { /// - E2 (using E3 as child) /// - E3 has no child in this certificate, will be proven with next certificate async fn generate_proofs_for_certificate( - &self, + &mut self, cert: &FinalityCertificate, power_table: &PowerEntries, ) -> Result<()> { + // Always cache the validated certificate entry, even if it has no provable (parent, child) + // windows. A certificate may be "base-only" (empty suffix), yet it still advances the F3 + // instance and can carry a new power table needed to validate subsequent certificates. + let rpc_endpoint = self.f3_client.rpc_endpoint().to_string(); + let cert_entry = CertificateEntry::new(cert.clone(), power_table.clone(), rpc_endpoint); + // Build (parent, child) pairs using windows - this makes the requirement explicit let tipset_pairs: Vec<_> = cert .ec_chain @@ -231,6 +269,9 @@ impl ProofGeneratorService { instance = cert.gpbft_instance, "Certificate has fewer than 2 tipsets, no (parent, child) pairs to prove" ); + self.cache + .insert_certificate_with_epoch_proofs(cert_entry, Vec::new()) + .context("failed to cache base-only certificate")?; return Ok(()); } @@ -242,9 +283,20 @@ impl ProofGeneratorService { "Generating proofs for certificate epochs" ); + // Verification needs to accept witness blocks from *both* the parent and the child tipset + // of each (parent, child) pair (receipts/state for the parent live in the child). + // Therefore, pass the whole certified chain from the certificate. + // + // Note: we still only *generate* proofs for the parent epochs via `windows(2)`, so the + // last tipset in the chain (which has no child in this certificate) is not proven yet. + let finalized_tipsets = FinalizedTipsets::from(&cert.ec_chain); + let mut epoch_proofs = Vec::with_capacity(tipset_pairs.len()); + // Seed the cursor from L2 gateway state for fresh-node restarts, then keep it updated + // across epochs and certificates. + let mut cursor: crate::verifier::EventNumberCursor = self.event_number_cursor; - // Generate proofs for each (parent, child) pair + // Generate proofs for each (parent, child) pair. // The child tipset contains `parentReceipts` which commits to the parent's execution. for (parent_tipset, child_tipset) in tipset_pairs { let parent_epoch = parent_tipset.epoch; @@ -261,6 +313,26 @@ impl ProofGeneratorService { .await .with_context(|| format!("Failed to generate proof for epoch {}", parent_epoch))?; + self.verifier + .verify_proof_bundle_with_tipsets(&proof_bundle, &finalized_tipsets) + .with_context(|| format!("Failed to verify proof for epoch {}", parent_epoch))?; + + // Additional semantic checks: + // - top-down message nonce continuity (from decoded events) + // - power change configurationNumber continuity, and consistency with proved storage slot + // Maintain a cursor across epochs within this certificate so we can detect omitted + // events at the beginning of an epoch (anchored to proved end-of-epoch storage). + // + // Note: the first epoch proven in a certificate does not have a previous cursor. + self.verifier + .verify_event_number_continuity(parent_epoch, &proof_bundle, &mut cursor) + .with_context(|| { + format!( + "Nonce/config continuity check failed for epoch {}", + parent_epoch + ) + })?; + epoch_proofs.push(EpochProofEntry::new( parent_epoch, proof_bundle, @@ -269,10 +341,11 @@ impl ProofGeneratorService { } // Cache the certificate and proofs - let rpc_endpoint = self.f3_client.rpc_endpoint().to_string(); - let cert_entry = CertificateEntry::new(cert.clone(), power_table.clone(), rpc_endpoint); - self.cache.insert_certificate(cert_entry)?; - self.cache.insert_epoch_proofs(epoch_proofs)?; + self.cache + .insert_certificate_with_epoch_proofs(cert_entry, epoch_proofs)?; + + // Persist updated cursor only after successful generation + caching. + self.event_number_cursor = cursor; tracing::info!( epoch_count = epochs_to_prove.len(), @@ -298,19 +371,11 @@ async fn extract_gateway_actor_id_from_config(config: &ProofServiceConfig) -> Re match &config.gateway_id { GatewayId::ActorId(id) => Ok(*id), GatewayId::EthAddress(eth_addr) => { - resolve_eth_address_to_actor_id(eth_addr, &config.parent_rpc_url).await + resolve_eth_address_to_actor_id(&config.parent_rpc_url, eth_addr).await } } } -async fn resolve_eth_address_to_actor_id(eth_addr: &str, parent_rpc_url: &str) -> Result { - let client = proofs::client::LotusClient::new(url::Url::parse(parent_rpc_url)?, None); - let actor_id = proofs::proofs::resolve_eth_address_to_actor_id(&client, eth_addr) - .await - .with_context(|| format!("Failed to resolve gateway Ethereum address: {}", eth_addr))?; - Ok(actor_id) -} - #[cfg(test)] mod tests { use super::*; @@ -334,7 +399,8 @@ mod tests { // Note: Service creation succeeds with F3Client::new() even with a fake RPC endpoint // The actual RPC calls will fail later when the service tries to fetch certificates - let result = ProofGeneratorService::new(config, cache, &subnet_id, 0, power_table).await; + let result = + ProofGeneratorService::new(config, cache, &subnet_id, 0, power_table, 0, 0).await; assert!(result.is_ok()); } } diff --git a/fendermint/vm/topdown/proof-service/src/storage_layout.rs b/fendermint/vm/topdown/proof-service/src/storage_layout.rs new file mode 100644 index 0000000000..e8c23b9ce2 --- /dev/null +++ b/fendermint/vm/topdown/proof-service/src/storage_layout.rs @@ -0,0 +1,177 @@ +//! Verified storage layout constants for the Gateway contract. +//! +//! These are derived from the Solidity compiler `storageLayout` for `GatewayDiamond`. +//! In this repo the reliable source of `storageLayout` is the Hardhat build-info artifacts +//! under `contracts/artifacts/build-info/*.json` (not `contracts/out/*`). +//! +//! Keeping them in one place avoids "magic numbers" duplicated across assembler and checks. + +/// `GatewayActorStorage.subnets` mapping slot. +/// +/// Derived from `GatewayActorStorage` layout: `subnets` is at slot 22. +pub const SUBNETS_MAPPING_SLOT: u64 = 22; + +/// `Subnet.topDownNonce` relative slot inside the `Subnet` struct. +pub const SUBNET_TOPDOWN_NONCE_OFFSET: u64 = 3; + +/// Absolute storage slot for `GatewayActorStorage.validatorsTracker.changes.nextConfigurationNumber`. +/// +/// Derived from the compiled storage layout: +/// - `GatewayActorStorage.validatorsTracker` starts at slot 11 +/// - `ParentValidatorsTracker.changes` is at slot 9 +/// - `PowerChangeLog.nextConfigurationNumber` is at slot 0 +/// => 11 + 9 + 0 = 20 +pub const NEXT_CONFIG_NUMBER_ABSOLUTE_SLOT: u64 = 20; + +#[cfg(test)] +mod tests { + use super::*; + use anyhow::{Context, Result}; + use serde_json::Value; + + fn load_gateway_diamond_storage_layout() -> Result { + // Prefer Hardhat build-info artifacts, which include Solidity `storageLayout`: + // `contracts/artifacts/build-info/*.json` + // + // We resolve paths relative to this crate so tests work regardless of cwd. + let build_info_dir = std::path::Path::new(env!("CARGO_MANIFEST_DIR")) + .join("../../../../contracts/artifacts/build-info"); + + let entries = std::fs::read_dir(&build_info_dir).with_context(|| { + format!( + "failed to read Hardhat build-info directory: {:?}", + build_info_dir + ) + })?; + + for entry in entries { + let entry = entry?; + let path = entry.path(); + if path.extension().and_then(|e| e.to_str()) != Some("json") { + continue; + } + + let bytes = + std::fs::read(&path).with_context(|| format!("failed to read {:?}", path))?; + let json: Value = match serde_json::from_slice(bytes.as_slice()) { + Ok(v) => v, + Err(_) => continue, + }; + + let layout = &json["output"]["contracts"]["contracts/GatewayDiamond.sol"] + ["GatewayDiamond"]["storageLayout"]; + if layout["storage"].as_array().is_some() && layout["types"].as_object().is_some() { + return Ok(layout.clone()); + } + } + + anyhow::bail!( + "no Hardhat build-info artifact contained GatewayDiamond storageLayout (expected under contracts/artifacts/build-info)" + ) + } + + fn parse_slot_u64(v: &Value) -> Result { + // Foundry encodes slot as a decimal string (e.g. "22"). + let s = v.as_str().context("expected storage slot to be a string")?; + Ok(s.parse::() + .with_context(|| format!("invalid slot string {s:?}"))?) + } + + #[test] + fn storage_layout_constants_match_gateway_diamond_artifact() -> Result<()> { + let layout = load_gateway_diamond_storage_layout()?; + + let storage = layout["storage"] + .as_array() + .context("storageLayout.storage must be an array")?; + let types = layout["types"] + .as_object() + .context("storageLayout.types must be an object")?; + + // Helper: find a member by label in a struct type. + fn struct_member<'a>( + types: &'a serde_json::Map, + struct_type: &str, + member_label: &str, + ) -> Result<(&'a Value, u64, &'a str)> { + let def = types + .get(struct_type) + .with_context(|| format!("missing type {struct_type} in storageLayout.types"))?; + let members = def["members"] + .as_array() + .context("type missing members array")?; + let m = members + .iter() + .find(|m| m["label"].as_str() == Some(member_label)) + .with_context(|| format!("missing member {member_label} in type {struct_type}"))?; + let slot = parse_slot_u64(&m["slot"])?; + let ty = m["type"].as_str().context("member.type must be a string")?; + Ok((def, slot, ty)) + } + + // Foundry `GatewayDiamond` artifacts may store the whole `GatewayActorStorage` under a single + // top-level storage variable (often `s`/`store`), so `subnets`/`validatorsTracker` might not + // appear as top-level labels. We locate the top-level struct which contains `subnets`. + let (gateway_base_slot, gateway_struct_type) = storage + .iter() + .filter_map(|e| { + let slot = parse_slot_u64(&e["slot"]).ok()?; + let ty = e["type"].as_str()?; + let def = types.get(ty)?; + def.get("members")?.as_array()?; + Some((slot, ty.to_string())) + }) + .find(|(_, ty)| { + types + .get(ty) + .and_then(|def| def.get("members")) + .and_then(|m| m.as_array()) + .map(|members| { + members + .iter() + .any(|m| m["label"].as_str() == Some("subnets")) + }) + .unwrap_or(false) + }) + .map(|(slot, ty)| (slot, ty)) + .context("could not find GatewayActorStorage-like struct containing member=subnets")?; + + // 1) `GatewayActorStorage.subnets` mapping absolute slot. + let (_gateway_def, subnets_rel_slot, subnets_mapping_type) = + struct_member(types, &gateway_struct_type, "subnets")?; + let subnets_abs_slot = gateway_base_slot + subnets_rel_slot; + assert_eq!(subnets_abs_slot, SUBNETS_MAPPING_SLOT); + + // 2) `Subnet.topDownNonce` relative slot inside the `Subnet` struct (mapping value type). + let mapping_def = types + .get(subnets_mapping_type) + .context("missing subnets mapping type")?; + let subnet_value_type = mapping_def["value"] + .as_str() + .context("subnets mapping type missing .value")?; + let subnet_def = types + .get(subnet_value_type) + .context("missing Subnet struct type")?; + let subnet_members = subnet_def["members"] + .as_array() + .context("Subnet type missing members array")?; + let topdown_nonce_member = subnet_members + .iter() + .find(|m| m["label"].as_str() == Some("topDownNonce")) + .context("Subnet members missing topDownNonce")?; + let topdown_nonce_slot = parse_slot_u64(&topdown_nonce_member["slot"])?; + assert_eq!(topdown_nonce_slot, SUBNET_TOPDOWN_NONCE_OFFSET); + + // 3) Absolute slot for `validatorsTracker.changes.nextConfigurationNumber`. + let (_gateway_def, vt_rel_slot, vt_type) = + struct_member(types, &gateway_struct_type, "validatorsTracker")?; + let (_vt_def, changes_rel_slot, changes_type) = struct_member(types, vt_type, "changes")?; + let (_changes_def, next_cfg_rel_slot, _next_cfg_type) = + struct_member(types, changes_type, "nextConfigurationNumber")?; + + let derived_abs = gateway_base_slot + vt_rel_slot + changes_rel_slot + next_cfg_rel_slot; + assert_eq!(derived_abs, NEXT_CONFIG_NUMBER_ABSOLUTE_SLOT); + + Ok(()) + } +} diff --git a/fendermint/vm/topdown/proof-service/src/types.rs b/fendermint/vm/topdown/proof-service/src/types.rs index de43e9d4c8..c566bb9183 100644 --- a/fendermint/vm/topdown/proof-service/src/types.rs +++ b/fendermint/vm/topdown/proof-service/src/types.rs @@ -6,8 +6,11 @@ use anyhow::{bail, Context, Result}; use filecoin_f3_certs::{FinalityCertificate, PowerTableDelta, PowerTableDiff}; use filecoin_f3_gpbft::{self, Cid, ECChain, PowerEntries, PowerEntry, SupplementalData, Tipset}; use fvm_ipld_bitfield::BitField; +use fvm_ipld_encoding::BytesSer; use fvm_shared::clock::ChainEpoch; use keccak_hash::H256; +use multihash_codetable::Code; +use multihash_codetable::MultihashDigest; use num_bigint::BigInt; use proofs::proofs::common::bundle::UnifiedProofBundle; use serde::{Deserialize, Serialize}; @@ -73,6 +76,13 @@ impl From<&ECChain> for FinalizedTipsets { } } +impl From<&[FinalizedTipset]> for FinalizedTipsets { + /// Convert from slice of already-converted `FinalizedTipset`s. + fn from(tipsets: &[FinalizedTipset]) -> Self { + Self(tipsets.to_vec()) + } +} + impl TryFrom<&[proofs::client::types::ApiTipset]> for FinalizedTipsets { type Error = anyhow::Error; @@ -94,6 +104,99 @@ pub struct FinalizedTipset { pub block_cids: Vec, } +/// Derive the Ethereum JSON-RPC "blockHash" for a Filecoin tipset. +/// +/// This matches Lotus' `EthBlockHash` derivation for a tipset key: +/// \( \text{blake2b-256}(\text{DAG-CBOR bytestring}(\text{tipsetKeyBytes})) \). +/// +/// The input must be the canonically ordered concatenated block-header CIDs bytes +/// (i.e. `Tipset.key` / `FinalizedTipset.block_cids`). +pub fn eth_hash_from_tipset_key_bytes(block_cids: &[u8]) -> Result<[u8; 32]> { + let wrapped = fvm_ipld_encoding::to_vec(&BytesSer(block_cids)) + .context("failed to CBOR-encode tipset key bytes as bytestring")?; + let digest = Code::Blake2b256.digest(&wrapped); + + let mut out = [0u8; 32]; + out.copy_from_slice(digest.digest()); + Ok(out) +} + +/// Return the last provable tipset (the parent tipset of the last `(parent, child)` pair). +/// +/// Proofs are generated for parent epochs via `windows(2)` over the certified ECChain tipsets. +/// For a chain `[T0, T1, .., TN]`, the last provable parent epoch is `T(N-1)` (second-to-last tipset). +/// +/// Returns: +/// - `None` if the chain has fewer than 2 tipsets (no provable `(parent, child)` pair). +/// - `Some(&Tipset)` otherwise. +pub fn last_provable_tipset(ec_chain: &ECChain) -> Option<&Tipset> { + let mut rev = ec_chain.iter().rev(); + let _last = rev.next()?; // empty chain => None + rev.next() // len < 2 => None +} + +/// Return the last provable parent tipset, or the base tipset if the chain has no provable window. +/// +/// - If the ECChain has at least 2 tipsets, returns the last provable parent tipset (second-to-last), +/// matching `windows(2)` proof generation. +/// - If the ECChain has exactly 1 tipset (base only), returns that base tipset. +/// +/// This is useful for genesis/bootstrap cursor seeding where we treat the configured instance as +/// already committed, but the certificate may have an empty suffix. +pub fn last_provable_or_base_tipset(ec_chain: &ECChain) -> Result<&Tipset> { + let mut rev = ec_chain.iter().rev(); + let last = rev.next().context("ECChain is empty (no tipsets)")?; + Ok(rev.next().unwrap_or(last)) +} + +#[cfg(test)] +mod last_provable_tipset_tests { + use super::*; + use keccak_hash::H256; + + #[test] + fn last_provable_tipset_requires_two_tipsets() { + let empty = ECChain::new_unvalidated(vec![]); + assert!(last_provable_tipset(&empty).is_none()); + + let one = ECChain::new_unvalidated(vec![Tipset { + epoch: 1, + key: vec![0], + power_table: Cid::default(), + commitments: H256::zero(), + }]); + assert!(last_provable_tipset(&one).is_none()); + } + + #[test] + fn last_provable_or_base_tipset_allows_base_only() { + let one = ECChain::new_unvalidated(vec![Tipset { + epoch: 7, + key: vec![7], + power_table: Cid::default(), + commitments: H256::zero(), + }]); + assert_eq!(last_provable_or_base_tipset(&one).unwrap().epoch, 7); + + let two = ECChain::new_unvalidated(vec![ + Tipset { + epoch: 10, + key: vec![10], + power_table: Cid::default(), + commitments: H256::zero(), + }, + Tipset { + epoch: 11, + key: vec![11], + power_table: Cid::default(), + commitments: H256::zero(), + }, + ]); + // For [10, 11], last provable parent is 10 (second-to-last). + assert_eq!(last_provable_or_base_tipset(&two).unwrap().epoch, 10); + } +} + impl FinalizedTipset { /// Verify this tipset matches another (e.g., fetched from RPC) /// diff --git a/fendermint/vm/topdown/proof-service/src/verifier.rs b/fendermint/vm/topdown/proof-service/src/verifier.rs index 1cae49132f..cc6238b644 100644 --- a/fendermint/vm/topdown/proof-service/src/verifier.rs +++ b/fendermint/vm/topdown/proof-service/src/verifier.rs @@ -11,32 +11,60 @@ //! by the F3 certificates. With the two-level cache design, proofs are verified //! against pre-merged tipsets from both the parent and child certificates. -use crate::assembler::{NEW_POWER_CHANGE_REQUEST_SIGNATURE, NEW_TOPDOWN_MESSAGE_SIGNATURE}; +use crate::storage_layout::{ + NEXT_CONFIG_NUMBER_ABSOLUTE_SLOT, SUBNETS_MAPPING_SLOT, SUBNET_TOPDOWN_NONCE_OFFSET, +}; use crate::types::{EpochProofWithCertificate, FinalizedTipsets}; -use anyhow::Result; +use anyhow::{Context, Result}; use cid::Cid; +use ethers::abi::RawLog; +use ethers::contract::EthEvent; +use ethers::types::H256; +use fendermint_vm_evm_event_utils::{ + decode_new_power_change_request, decode_new_topdown_message, parse_u64_from_0x_word_low64, + raw_log_from_event_proof, +}; +use ipc_actors_abis::{lib_gateway, lib_power_change_log}; use proofs::proofs::common::bundle::{UnifiedProofBundle, UnifiedVerificationResult}; use proofs::proofs::events::bundle::EventProofBundle; use proofs::proofs::events::verifier::verify_event_proof; use proofs::proofs::storage::verifier::verify_storage_proof; use proofs::proofs::common::evm::{ascii_to_bytes32, extract_evm_log, hash_event_signature}; +use proofs::proofs::storage::utils::compute_mapping_slot; pub struct ProofVerifier { events: Vec>, + subnet_id: String, +} + +/// Cursor derived from *proved* end-of-epoch storage values. +/// +/// If provided across epochs, this lets us detect omitted events at the beginning of an epoch by +/// checking that the storage delta matches the number of events observed in the bundle. +#[derive(Debug, Clone, Copy)] +pub(crate) struct EventNumberCursor { + /// Next top-down message nonce on the **parent gateway** (`subnets[...].topDownNonce`) + /// after applying the epoch. + pub next_parent_topdown_nonce: u64, + /// Next power-change configuration number on the **parent gateway** + /// (`validatorsTracker.changes.nextConfigurationNumber`) after applying the epoch. + pub next_parent_power_change_config_number: u64, } impl ProofVerifier { pub fn new(subnet_id: String) -> Self { let events = vec![ vec![ - hash_event_signature(NEW_TOPDOWN_MESSAGE_SIGNATURE), + hash_event_signature(&lib_gateway::NewTopDownMessageFilter::abi_signature()), ascii_to_bytes32(&subnet_id), ], - vec![hash_event_signature(NEW_POWER_CHANGE_REQUEST_SIGNATURE)], + vec![hash_event_signature( + &lib_power_change_log::NewPowerChangeRequestFilter::abi_signature(), + )], ]; - Self { events } + Self { events, subnet_id } } /// Verify a inclusion proof in the proof bundle using pre-merged tipsets from certificates @@ -132,6 +160,61 @@ impl ProofVerifier { } } } + + /// Verify semantic properties of the EVM events included in a proof bundle. + /// + /// This is **not** inclusion verification (that is handled by [`ProofVerifier::verify_proof_bundle_with_tipsets`]). + /// Instead, this checks properties like: + /// - contiguity of top-down event nonces + /// - contiguity of power-change configuration numbers + /// + /// All checks are anchored to proved end-of-epoch storage values: + /// - `subnets[...].topDownNonce` + /// - `validatorsTracker.changes.nextConfigurationNumber` + /// + /// If `cursor` is provided (derived from the previous epoch's proved end values), we also + /// verify that `end - prev_end == observed_count`, which detects omitted events at the + /// beginning of an epoch. + pub(crate) fn verify_event_number_continuity( + &self, + parent_epoch: i64, + bundle: &UnifiedProofBundle, + cursor: &mut EventNumberCursor, + ) -> Result<()> { + // 1) Extract values. + let mut nums = extract_epoch_event_numbers(parent_epoch, bundle) + .with_context(|| format!("failed to extract event numbers for epoch {parent_epoch}"))?; + + // 2) Verify local contiguity within the epoch. + verify_contiguous_u64(&mut nums.topdown_nonces, "top-down message nonces")?; + verify_contiguous_u64( + &mut nums.config_numbers, + "power-change configuration numbers", + )?; + + // 3) Anchor both sequences to proved "next" storage values. + // Storage holds the next nonce/config-number *after* applying the epoch. + let next_topdown = next_topdown_message_nonce_from_storage(bundle, &self.subnet_id)?; + let next_cfg = next_power_change_config_number_from_storage(bundle)?; + + verify_sequence_against_storage_next( + "top-down message nonces", + next_topdown, + Some(cursor.next_parent_topdown_nonce), + &nums.topdown_nonces, + )?; + verify_sequence_against_storage_next( + "power-change configuration numbers", + next_cfg, + Some(cursor.next_parent_power_change_config_number), + &nums.config_numbers, + )?; + + cursor.next_parent_topdown_nonce = next_topdown; + cursor.next_parent_power_change_config_number = next_cfg; + + Ok(()) + } } #[cfg(test)] @@ -144,3 +227,432 @@ mod tests { assert_eq!(verifier.events.len(), 2); } } + +// (Semantic continuity verification lives on `ProofVerifier` to access `subnet_id`.) + +fn h256_to_0x(h: H256) -> String { + format!("0x{}", hex::encode(h.as_bytes())) +} + +fn expected_topdown_nonce_slot(subnet_id: &str) -> H256 { + let key = ascii_to_bytes32(subnet_id); + let base = compute_mapping_slot(key, SUBNETS_MAPPING_SLOT); + let mut slot_bytes = base; + let base_u256 = ethers::types::U256::from_big_endian(&base); + let slot_u256 = base_u256 + ethers::types::U256::from(SUBNET_TOPDOWN_NONCE_OFFSET); + slot_u256.to_big_endian(&mut slot_bytes); + H256::from(slot_bytes) +} + +fn next_topdown_message_nonce_from_storage( + bundle: &UnifiedProofBundle, + subnet_id: &str, +) -> Result { + let expected_slot = h256_to_0x(expected_topdown_nonce_slot(subnet_id)); + let storage = bundle + .storage_proofs + .iter() + .find(|sp| sp.slot.eq_ignore_ascii_case(&expected_slot)) + .context("missing storage proof for subnets[...].topDownNonce")?; + parse_u64_from_0x_word_low64(&storage.value) + .context("failed to parse topDownNonce from storage proof") +} + +fn next_power_change_config_number_from_storage(bundle: &UnifiedProofBundle) -> Result { + let expected_slot = format!("0x{:064x}", NEXT_CONFIG_NUMBER_ABSOLUTE_SLOT); + let storage = bundle + .storage_proofs + .iter() + .find(|sp| sp.slot.eq_ignore_ascii_case(&expected_slot)) + .context("missing storage proof for nextConfigurationNumber (slot 20)")?; + parse_u64_from_0x_word_low64(&storage.value) + .context("failed to parse nextConfigurationNumber from storage proof") +} + +fn verify_sequence_against_storage_next( + what: &str, + storage_next: u64, + prev_storage_next: Option, + values: &[u64], +) -> Result<()> { + let count = values.len() as u64; + + // If we have a previous cursor, enforce that the storage delta matches the number of + // observed events. This detects omitted initial events (or invented extras). + if let Some(prev) = prev_storage_next { + let delta = storage_next.checked_sub(prev).with_context(|| { + format!("{what} mismatch: storage_next {storage_next} < prev {prev}") + })?; + if delta != count { + anyhow::bail!( + "{what} event-count mismatch: storage_delta {delta} != observed_count {count}" + ); + } + } + + if values.is_empty() { + return Ok(()); + } + + let last = *values.last().unwrap(); + if storage_next != last + 1 { + anyhow::bail!( + "{what} mismatch: storage_next {storage_next} != last_event+1 {}", + last + 1 + ); + } + + Ok(()) +} + +fn verify_contiguous_u64(values: &mut Vec, what: &str) -> Result<()> { + if values.is_empty() { + return Ok(()); + } + values.sort_unstable(); + for w in values.windows(2) { + let a = w[0]; + let b = w[1]; + if b == a { + anyhow::bail!("{what} contains duplicate value: {a}"); + } + if b != a + 1 { + anyhow::bail!("{what} not contiguous: {a} -> {b}"); + } + } + Ok(()) +} + +#[derive(Debug, Default)] +struct EpochEventNumbers { + topdown_nonces: Vec, + config_numbers: Vec, +} + +fn extract_epoch_event_numbers( + parent_epoch: i64, + bundle: &UnifiedProofBundle, +) -> Result { + let mut out = EpochEventNumbers::default(); + + let topdown_sig: H256 = lib_gateway::NewTopDownMessageFilter::signature(); + let power_sig: H256 = lib_power_change_log::NewPowerChangeRequestFilter::signature(); + + for ep in &bundle.event_proofs { + if ep.parent_epoch != parent_epoch { + continue; + } + + let RawLog { topics, data } = raw_log_from_event_proof(ep)?; + if topics.is_empty() { + continue; + } + + if topics[0] == topdown_sig { + let decoded = decode_new_topdown_message(&RawLog { topics, data })?; + out.topdown_nonces.push(decoded.message.local_nonce); + } else if topics[0] == power_sig { + let decoded = decode_new_power_change_request(&RawLog { topics, data })?; + out.config_numbers.push(decoded.configuration_number); + } + } + + Ok(out) +} + +#[cfg(test)] +mod event_number_continuity_tests { + use super::*; + use ethers::abi::{encode, Token}; + use ethers::types::Address as EthAddress; + use ethers::types::U256; + use proofs::proofs::events::bundle::{EventData, EventProof}; + use proofs::proofs::storage::bundle::StorageProof; + + fn h256_to_0x(h: H256) -> String { + format!("0x{}", hex::encode(h.as_bytes())) + } + + fn bytes_to_0x(b: &[u8]) -> String { + format!("0x{}", hex::encode(b)) + } + + fn mk_event_proof(parent_epoch: i64, raw: RawLog) -> EventProof { + EventProof { + parent_epoch, + child_epoch: parent_epoch + 1, + parent_tipset_cids: vec!["bafy...parent".to_string()], + child_block_cid: "bafy...child".to_string(), + message_cid: "bafy...msg".to_string(), + exec_index: 0, + event_index: 0, + event_data: EventData { + emitter: 1000, + topics: raw.topics.into_iter().map(h256_to_0x).collect(), + data: bytes_to_0x(&raw.data), + }, + } + } + + fn mk_storage_proof(slot_u64: u64, value_u64: u64) -> StorageProof { + let slot = format!("0x{:064x}", slot_u64); + let mut word = [0u8; 32]; + word[24..].copy_from_slice(&value_u64.to_be_bytes()); + StorageProof { + child_epoch: 0, + child_block_cid: "bafy...child".to_string(), + parent_state_root: "bafy...state".to_string(), + actor_id: 1000, + actor_state_cid: "bafy...actor".to_string(), + storage_root: "bafy...storage".to_string(), + slot, + value: bytes_to_0x(&word), + } + } + + fn mk_storage_proof_h256(slot: H256, value_u64: u64) -> StorageProof { + let slot = h256_to_0x(slot); + let mut word = [0u8; 32]; + word[24..].copy_from_slice(&value_u64.to_be_bytes()); + StorageProof { + child_epoch: 0, + child_block_cid: "bafy...child".to_string(), + parent_state_root: "bafy...state".to_string(), + actor_id: 1000, + actor_state_cid: "bafy...actor".to_string(), + storage_root: "bafy...storage".to_string(), + slot, + value: bytes_to_0x(&word), + } + } + + fn topic_from_address(addr: EthAddress) -> H256 { + let mut b = [0u8; 32]; + b[12..].copy_from_slice(addr.as_bytes()); + H256(b) + } + + fn topic_from_bytes32(id: [u8; 32]) -> H256 { + H256(id) + } + + fn mk_topdown_rawlog(subnet: EthAddress, id: [u8; 32], local_nonce: u64) -> RawLog { + let sig: H256 = lib_gateway::NewTopDownMessageFilter::signature(); + let topics = vec![sig, topic_from_address(subnet), topic_from_bytes32(id)]; + + // Encode IpcEnvelope as the only non-indexed event arg. + // Tuple layout: + // (uint8 kind,uint64 localNonce,uint64 originalNonce,uint256 value, + // ((uint64,address[]),(uint8,bytes)) to, + // ((uint64,address[]),(uint8,bytes)) from, + // bytes message) + let subnet_id = Token::Tuple(vec![Token::Uint(U256::from(0u64)), Token::Array(vec![])]); + let fvm_addr = Token::Tuple(vec![Token::Uint(U256::from(0u8)), Token::Bytes(vec![])]); + let ipc_address = Token::Tuple(vec![subnet_id, fvm_addr]); + + let env = Token::Tuple(vec![ + Token::Uint(U256::from(0u8)), // kind + Token::Uint(U256::from(local_nonce)), // local_nonce + Token::Uint(U256::from(local_nonce)), // original_nonce + Token::Uint(U256::zero()), // value + ipc_address.clone(), // to + ipc_address, // from + Token::Bytes(vec![]), // message + ]); + + RawLog { + topics, + data: encode(&[env]), + } + } + + fn mk_power_change_rawlog(configuration_number: u64) -> RawLog { + let sig: H256 = lib_power_change_log::NewPowerChangeRequestFilter::signature(); + RawLog { + topics: vec![sig], + data: encode(&[ + Token::Uint(U256::from(0u8)), // op + Token::Address(EthAddress::zero()), + Token::Bytes(vec![]), + Token::Uint(U256::from(configuration_number)), + ]), + } + } + + #[test] + fn continuity_check_passes_for_contiguous_nonces_and_config_numbers() -> Result<()> { + let epoch = 100; + let verifier = ProofVerifier::new("test-subnet".to_string()); + let mut cursor = EventNumberCursor { + next_parent_topdown_nonce: 10, + next_parent_power_change_config_number: 7, + }; + + // Two topdown messages with contiguous nonces: 10, 11. + let td0 = mk_topdown_rawlog(EthAddress::random(), [7u8; 32], 10); + let td1 = mk_topdown_rawlog(EthAddress::random(), [8u8; 32], 11); + + // Power changes with contiguous configuration numbers: 7, 8. + let pc0 = mk_power_change_rawlog(7); + let pc1 = mk_power_change_rawlog(8); + + // nextConfigurationNumber after applying 2 changes should be 9. + let next_config_storage = mk_storage_proof(NEXT_CONFIG_NUMBER_ABSOLUTE_SLOT, 9); + // topDownNonce after applying 2 messages with nonces 10,11 should be 12. + let topdown_nonce_storage = + mk_storage_proof_h256(expected_topdown_nonce_slot("test-subnet"), 12); + + let bundle = UnifiedProofBundle { + storage_proofs: vec![next_config_storage, topdown_nonce_storage], + event_proofs: vec![ + mk_event_proof(epoch, td0), + mk_event_proof(epoch, td1), + mk_event_proof(epoch, pc0), + mk_event_proof(epoch, pc1), + ], + blocks: vec![], + }; + + verifier.verify_event_number_continuity(epoch, &bundle, &mut cursor)?; + Ok(()) + } + + #[test] + fn continuity_check_fails_on_config_storage_mismatch() -> Result<()> { + let epoch = 100; + let verifier = ProofVerifier::new("test-subnet".to_string()); + let mut cursor = EventNumberCursor { + next_parent_topdown_nonce: 0, + next_parent_power_change_config_number: 7, + }; + + let pc0 = mk_power_change_rawlog(7); + let pc1 = mk_power_change_rawlog(8); + + // WRONG: should be 9, but we claim 10. + let next_config_storage = mk_storage_proof(NEXT_CONFIG_NUMBER_ABSOLUTE_SLOT, 10); + let topdown_nonce_storage = + mk_storage_proof_h256(expected_topdown_nonce_slot("test-subnet"), 0); + + let bundle = UnifiedProofBundle { + storage_proofs: vec![next_config_storage, topdown_nonce_storage], + event_proofs: vec![mk_event_proof(epoch, pc0), mk_event_proof(epoch, pc1)], + blocks: vec![], + }; + + let err = verifier + .verify_event_number_continuity(epoch, &bundle, &mut cursor) + .expect_err("expected mismatch to be rejected"); + let msg = err.to_string(); + assert!( + msg.contains("power-change configuration numbers mismatch") + || msg.contains("power-change configuration numbers event-count mismatch"), + "unexpected error message: {msg}" + ); + Ok(()) + } + + #[test] + fn continuity_check_fails_on_nonce_gap() -> Result<()> { + let epoch = 100; + let verifier = ProofVerifier::new("test-subnet".to_string()); + let mut cursor = EventNumberCursor { + next_parent_topdown_nonce: 10, + next_parent_power_change_config_number: 0, + }; + + let td0 = mk_topdown_rawlog(EthAddress::random(), [7u8; 32], 10); + let td1 = mk_topdown_rawlog(EthAddress::random(), [8u8; 32], 12); // gap! + let next_config_storage = mk_storage_proof(NEXT_CONFIG_NUMBER_ABSOLUTE_SLOT, 0); + let topdown_nonce_storage = + mk_storage_proof_h256(expected_topdown_nonce_slot("test-subnet"), 13); + + let bundle = UnifiedProofBundle { + storage_proofs: vec![next_config_storage, topdown_nonce_storage], + event_proofs: vec![mk_event_proof(epoch, td0), mk_event_proof(epoch, td1)], + blocks: vec![], + }; + + let err = verifier + .verify_event_number_continuity(epoch, &bundle, &mut cursor) + .expect_err("expected nonce gap to be rejected"); + assert!(err + .to_string() + .contains("top-down message nonces not contiguous")); + Ok(()) + } + + #[test] + fn continuity_check_fails_on_duplicate_nonce() -> Result<()> { + let epoch = 100; + let verifier = ProofVerifier::new("test-subnet".to_string()); + let mut cursor = EventNumberCursor { + next_parent_topdown_nonce: 10, + next_parent_power_change_config_number: 0, + }; + + // Duplicate nonce 10 twice. + let td0 = mk_topdown_rawlog(EthAddress::random(), [7u8; 32], 10); + let td1 = mk_topdown_rawlog(EthAddress::random(), [8u8; 32], 10); + + // Storage indicates two messages were applied (delta=2) ending at nonce 12. + let next_config_storage = mk_storage_proof(NEXT_CONFIG_NUMBER_ABSOLUTE_SLOT, 0); + let topdown_nonce_storage = + mk_storage_proof_h256(expected_topdown_nonce_slot("test-subnet"), 12); + + let bundle = UnifiedProofBundle { + storage_proofs: vec![next_config_storage, topdown_nonce_storage], + event_proofs: vec![mk_event_proof(epoch, td0), mk_event_proof(epoch, td1)], + blocks: vec![], + }; + + let err = verifier + .verify_event_number_continuity(epoch, &bundle, &mut cursor) + .expect_err("expected duplicate nonce to be rejected"); + assert!(err.to_string().contains("contains duplicate")); + Ok(()) + } + + #[test] + fn continuity_check_detects_omitted_initial_events_via_storage_delta() -> Result<()> { + let verifier = ProofVerifier::new("test-subnet".to_string()); + // Epoch 100 starts at nonce 10 (two events) and config-number 0 (no events). + let mut cursor = EventNumberCursor { + next_parent_topdown_nonce: 10, + next_parent_power_change_config_number: 0, + }; + + // Epoch 100: two topdown messages (10,11) -> end nonce 12. + let epoch0 = 100; + let td0 = mk_topdown_rawlog(EthAddress::random(), [7u8; 32], 10); + let td1 = mk_topdown_rawlog(EthAddress::random(), [8u8; 32], 11); + let bundle0 = UnifiedProofBundle { + storage_proofs: vec![ + mk_storage_proof(NEXT_CONFIG_NUMBER_ABSOLUTE_SLOT, 0), + mk_storage_proof_h256(expected_topdown_nonce_slot("test-subnet"), 12), + ], + event_proofs: vec![mk_event_proof(epoch0, td0), mk_event_proof(epoch0, td1)], + blocks: vec![], + }; + verifier.verify_event_number_continuity(epoch0, &bundle0, &mut cursor)?; + + // Epoch 101: actual storage end indicates 3 messages (delta=3), but we only include 2 events. + // This simulates omitting the first event in the epoch while keeping contiguity. + let epoch1 = 101; + let td2 = mk_topdown_rawlog(EthAddress::random(), [9u8; 32], 13); + let td3 = mk_topdown_rawlog(EthAddress::random(), [10u8; 32], 14); + let bundle1 = UnifiedProofBundle { + storage_proofs: vec![ + mk_storage_proof(NEXT_CONFIG_NUMBER_ABSOLUTE_SLOT, 0), + mk_storage_proof_h256(expected_topdown_nonce_slot("test-subnet"), 15), + ], + event_proofs: vec![mk_event_proof(epoch1, td2), mk_event_proof(epoch1, td3)], + blocks: vec![], + }; + let err = verifier + .verify_event_number_continuity(epoch1, &bundle1, &mut cursor) + .expect_err("expected omitted-initial-event to be detected"); + assert!(err.to_string().contains("event-count mismatch")); + Ok(()) + } +} diff --git a/ipc/provider/Cargo.toml b/ipc/provider/Cargo.toml index f2c19911bd..50eef07548 100644 --- a/ipc/provider/Cargo.toml +++ b/ipc/provider/Cargo.toml @@ -61,7 +61,6 @@ fendermint_rpc = { path = "../../fendermint/rpc" } fendermint_actor_f3_light_client = { path = "../../fendermint/actors/f3-light-client" } fendermint_vm_genesis = { path = "../../fendermint/vm/genesis" } - [dev-dependencies] tempfile = { workspace = true } hex = { workspace = true }