diff --git a/.gitignore b/.gitignore index efd7916b050..e4aa942de2a 100644 --- a/.gitignore +++ b/.gitignore @@ -17,3 +17,5 @@ genesis.ssz # VSCode /.vscode + +md-docs/ diff --git a/.vscode/settings.json b/.vscode/settings.json index 65447c4390a..ec50547fb9d 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -1,5 +1,8 @@ { - "rust-analyzer.cargo.cfgs": [ - "!debug_assertions" - ] +"rust-analyzer.checkOnSave.enable": true, +"rust-analyzer.check.command": "clippy", +"rust-analyzer.check.allTargets": false, + +"rust-analyzer.check.extraEnv": { "CARGO_BUILD_JOBS": "4" }, +"rust-analyzer.cargo.features": ["test-utils"] } diff --git a/Cargo.lock b/Cargo.lock index 27f775b2d0d..e53873dfc1c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3323,7 +3323,9 @@ dependencies = [ "alloy-primitives", "alloy-rlp", "alloy-rpc-types-eth", + "anyhow", "arc-swap", + "async-trait", "bls", "builder_client", "bytes", @@ -6230,15 +6232,26 @@ version = "0.2.0" dependencies = [ "beacon_node", "beacon_node_fallback", + "bls", "environment", "eth2", "execution_layer", + "hex", + "mockito", + "parking_lot", + "reqwest", "sensitive_url", + "serde_json", + "ssz_types", + "task_executor", "tempfile", "tokio", + "tracing", + "tree_hash", "types", "validator_client", "validator_dir", + "validator_store", ] [[package]] @@ -7006,6 +7019,17 @@ dependencies = [ "syn 2.0.110", ] +[[package]] +name = "proof_engine_test" +version = "8.0.1" +dependencies = [ + "anyhow", + "network", + "simulator", + "tokio", + "tracing", +] + [[package]] name = "proptest" version = "1.9.0" @@ -8260,22 +8284,29 @@ dependencies = [ name = "simulator" version = "0.2.0" dependencies = [ + "anyhow", + "beacon_chain", "clap", "environment", + "eth2", "execution_layer", "futures", "kzg", + "lighthouse_network", "logging", "node_test_rig", "parking_lot", "rayon", "sensitive_url", "serde_json", + "task_executor", + "tempfile", "tokio", "tracing", "tracing-subscriber", "typenum", "types", + "validator_http_api", ] [[package]] @@ -9599,6 +9630,7 @@ dependencies = [ "doppelganger_service", "environment", "eth2", + "execution_layer", "fdlimit", "graffiti_file", "hyper 1.8.1", @@ -9755,11 +9787,14 @@ dependencies = [ "bls", "either", "eth2", + "execution_layer", "futures", "graffiti_file", + "lighthouse_validator_store", "logging", "parking_lot", "safe_arith", + "serde_json", "slot_clock", "task_executor", "tokio", @@ -9768,6 +9803,7 @@ dependencies = [ "types", "validator_metrics", "validator_store", + "warp_utils", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index 441490ee1b9..81c2bb847a6 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -69,6 +69,7 @@ members = [ "testing/ef_tests", "testing/execution_engine_integration", "testing/node_test_rig", + "testing/proof_engine", "testing/simulator", "testing/state_transition_vectors", "testing/validator_test_rig", diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index e3de8d73245..98f19cc4a3d 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -27,6 +27,7 @@ use crate::data_availability_checker::{ }; use crate::data_column_verification::{GossipDataColumnError, GossipVerifiedDataColumn}; use crate::early_attester_cache::EarlyAttesterCache; +use crate::eip8025::{ExecutionProofError, verify_signed_execution_proof_signature}; use crate::errors::{BeaconChainError as Error, BlockProductionError}; use crate::events::ServerSentEventHandler; use crate::execution_payload::{NotifyExecutionLayer, PreparePayloadHandle, get_execution_payload}; @@ -76,11 +77,12 @@ use crate::{ use bls::{PublicKey, PublicKeyBytes, Signature}; use eth2::beacon_response::ForkVersionedResponse; use eth2::types::{ - EventKind, SseBlobSidecar, SseBlock, SseDataColumnSidecar, SseExtendedPayloadAttributes, + EventKind, SseBlobSidecar, SseBlock, SseBlockFull, SseDataColumnSidecar, + SseExtendedPayloadAttributes, }; use execution_layer::{ BlockProposalContents, BlockProposalContentsType, BuilderParams, ChainHealth, ExecutionLayer, - FailedCondition, PayloadAttributes, PayloadStatus, + FailedCondition, MissingProofInfo, PayloadAttributes, PayloadStatus, eip8025::ProofEngine, }; use fixed_bytes::FixedBytesExtended; use fork_choice::{ @@ -134,6 +136,7 @@ use tracing::{Span, debug, debug_span, error, info, info_span, instrument, trace use tree_hash::TreeHash; use types::data::{ColumnIndex, FixedBlobSidecarList}; use types::execution::BlockProductionVersion; +use types::execution::eip8025::ProofStatus; use types::*; pub type ForkChoiceError = fork_choice::Error; @@ -4019,7 +4022,7 @@ impl BeaconChain { .block_processed(block_root); self.import_block_update_metrics_and_events( - block, + signed_block, block_root, block_time_imported, payload_verification_status, @@ -4307,12 +4310,17 @@ impl BeaconChain { fn import_block_update_metrics_and_events( &self, - block: BeaconBlockRef, + signed_block: Arc>, block_root: Hash256, block_time_imported: Duration, payload_verification_status: PayloadVerificationStatus, current_slot: Slot, ) { + // TODO: Optimise this so we don't have to clone. + let beacon_block = Arc::unwrap_or_clone(signed_block.clone()); + let (beacon_block, _) = beacon_block.deconstruct(); + let block = signed_block.message(); + // Only present some metrics for blocks from the previous epoch or later. // // This helps avoid noise in the metrics during sync. @@ -4344,14 +4352,30 @@ impl BeaconChain { ); } - if let Some(event_handler) = self.event_handler.as_ref() - && event_handler.has_block_subscribers() - { - event_handler.register(EventKind::Block(SseBlock { - slot: block.slot(), - block: block_root, - execution_optimistic: payload_verification_status.is_optimistic(), - })); + if let Some(event_handler) = self.event_handler.as_ref() { + // Emit Block event if there are block subscribers + if event_handler.has_block_subscribers() { + event_handler.register(EventKind::Block(SseBlock { + slot: block.slot(), + block: block_root, + execution_optimistic: payload_verification_status.is_optimistic(), + })); + } + + // Emit BlockFull event if there are block_full subscribers + if event_handler.has_block_full_subscribers() { + let slot = block.slot(); + // Convert BeaconBlockRef to owned BeaconBlock for the event + event_handler.register(EventKind::BlockFull(Box::new(ForkVersionedResponse { + data: SseBlockFull { + slot, + block: beacon_block, + execution_optimistic: payload_verification_status.is_optimistic(), + }, + metadata: Default::default(), + version: self.spec.fork_name_at_slot::(slot), + }))); + } } // Do not trigger light_client server update producer for old blocks, to extra work @@ -7421,11 +7445,173 @@ impl BeaconChain { .custody_context() .custody_columns_for_epoch(epoch_opt, &self.spec) } + + /// Return all proof-engine buffer entries that are still missing sufficient proofs, + /// with `MissingProofInfo.root` replaced by the corresponding beacon block root. + /// + /// Entries whose `request_root → block_root` mapping is not yet in the store LRU cache + /// are filtered out (the block may not have been imported yet). + pub fn missing_execution_proofs(&self) -> Vec { + let Some(el) = self.execution_layer.as_ref() else { + return vec![]; + }; + let Some(pe) = el.proof_engine() else { + return vec![]; + }; + pe.missing_proofs() + .into_iter() + .filter_map(|mut info| { + info.root = self.store.get_block_root_by_request_root(&info.root)?; + Some(info) + }) + .collect() + } + + /// Get execution proofs associated with the given beacon block root (EIP-8025). + /// + /// Translates beacon block root → new_payload request root → proofs via the proof engine + /// state. Returns an empty vec if the node has no proof engine or if there are no proofs for + /// the requested block. + pub fn get_execution_proofs_by_block_root( + &self, + block_root: Hash256, + ) -> Vec { + let Some(proof_engine) = self + .execution_layer + .as_ref() + .and_then(|el| el.proof_engine()) + else { + return vec![]; + }; + let Some(request_root) = self.store.get_request_root_by_block_root(&block_root) else { + return vec![]; + }; + proof_engine.get_proofs_by_root(&request_root) + } + + /// Verify a signed execution proof (EIP-8025). + /// + /// This method: + /// 1. Verifies the BLS signature over the proof message + /// 2. Verifies the proof via the ProofEngine (execution engine RPC) + /// 3. If the proof is valid, updates fork choice to mark the corresponding block as valid. + /// + /// # Returns + /// + /// `Ok(ProofStatus)` if the proof has been verified by the proof engine, otherwise an `ExecutionProofError`. + pub async fn verify_execution_proof( + self: &Arc, + signed_proof: types::SignedExecutionProof, + ) -> Result { + // TODO: This function clones the proof multiple times. Optimise it. + + // Clone for moving into closures + let chain = self.clone(); + let signed_proof_for_bls = signed_proof.clone(); + + // Use spawn_blocking_handle because BLS verification is cpu-bound. + self.spawn_blocking_handle( + move || { + let head = chain.canonical_head.cached_head(); + let fork_name = chain.spec.fork_name_at_slot::(head.head_slot()); + + let validator_index = signed_proof_for_bls.validator_index as usize; + let head_state = &head.snapshot.beacon_state; + + let validator_pubkey = head_state + .validators() + .get(validator_index) + .map(|v| v.pubkey) + .ok_or(ExecutionProofError::InvalidValidatorIndex)?; + + verify_signed_execution_proof_signature::( + &signed_proof_for_bls, + &validator_pubkey, + fork_name, + chain.genesis_validators_root, + &chain.spec, + ) + }, + "verify_execution_proof_bls", + ) + .await??; + + // Step 2: ProofEngine verification + // The proof engine must be configured if we are receiving execution proofs, so if it's not available then that's an error. + let proof_engine = self + .execution_layer + .as_ref() + .ok_or(ExecutionProofError::NoExecutionLayer)? + .proof_engine() + .ok_or(ExecutionProofError::NoExecutionLayer)?; + + // The proof engine verification is primiarly async work, waiting for the proof verifier result so we spawn it on the async executor. + let signed_proof_for_engine = signed_proof.clone(); + let handle = self + .task_executor + .spawn_handle( + async move { + proof_engine + .verify_execution_proof(&signed_proof_for_engine) + .await + }, + "verify_execution_proof_engine", + ) + .ok_or(Error::RuntimeShutdown)?; + + let verification_result = handle + .await + .map_err(Error::TokioJoin)? + .ok_or(Error::RuntimeShutdown)??; + + // Step 3: Update the fork choice if the proof engine returns valid. + // The proof engine returns valid if the proof is valid and the criteria for the associated block root to be considered valid are met. + // The proof engine returns ACCEPTED if the proof is valid but block validity criteria are not met. + if verification_result.is_valid() { + let request_root = signed_proof.request_root(); + + // Look up the beacon block root from request root + let block_root = self + .store + .get_block_root_by_request_root(&request_root) + .ok_or_else(|| ExecutionProofError::UnknownRequestRoot(request_root))?; + + debug!( + ?request_root, + ?block_root, + validator_index = signed_proof.validator_index, + proof_type = signed_proof.message.proof_type, + "Processing verified execution proof" + ); + + // Update fork choice using spawn_blocking_handle to avoid lock contention. + let chain = self.clone(); + self.spawn_blocking_handle( + move || { + chain + .canonical_head + .fork_choice_write_lock() + .on_valid_execution_payload(block_root) + }, + "verify_execution_proof_fork_choice_update", + ) + .await??; + + info!( + ?block_root, + ?request_root, + "Updated fork choice for verified proof" + ); + } + + Ok(verification_result) + } } impl Drop for BeaconChain { fn drop(&mut self) { let drop = || -> Result<(), Error> { + // TODO: Persist the proof engine state if the BeaconChain is dropped. self.persist_fork_choice()?; self.persist_op_pool()?; self.persist_custody_context() diff --git a/beacon_node/beacon_chain/src/bellatrix_readiness.rs b/beacon_node/beacon_chain/src/bellatrix_readiness.rs index 412870354b9..33bf9367ebd 100644 --- a/beacon_node/beacon_chain/src/bellatrix_readiness.rs +++ b/beacon_node/beacon_chain/src/bellatrix_readiness.rs @@ -2,7 +2,8 @@ //! transition. use crate::{BeaconChain, BeaconChainError as Error, BeaconChainTypes}; -use execution_layer::BlockByNumberQuery; +use execution_layer::eip8025::ProofEngine; +use execution_layer::{BlockByNumberQuery, ForkchoiceState}; use serde::{Deserialize, Serialize, Serializer}; use std::fmt; use std::fmt::Write; @@ -205,19 +206,31 @@ impl BeaconChain { .ok_or(Error::ExecutionLayerMissing)?; let exec_block_hash = latest_execution_payload_header.block_hash(); + if let Some(proof_engine) = execution_layer.proof_engine() { + proof_engine + .forkchoice_updated(ForkchoiceState { + head_block_hash: exec_block_hash, + safe_block_hash: exec_block_hash, + finalized_block_hash: exec_block_hash, + }) + .await?; + } + // Use getBlockByNumber(0) to check that the block hash matches. // At present, Geth does not respond to engine_getPayloadBodiesByRange before genesis. - let execution_block = execution_layer - .get_block_by_number(BlockByNumberQuery::Tag("0x0")) - .await - .map_err(|e| Error::ExecutionLayerGetBlockByNumberFailed(Box::new(e)))? - .ok_or(Error::BlockHashMissingFromExecutionLayer(exec_block_hash))?; - - if execution_block.block_hash != exec_block_hash { - return Ok(GenesisExecutionPayloadStatus::BlockHashMismatch { - got: execution_block.block_hash, - expected: exec_block_hash, - }); + if execution_layer.engine().is_some() { + let execution_block = execution_layer + .get_block_by_number(BlockByNumberQuery::Tag("0x0")) + .await + .map_err(|e| Error::ExecutionLayerGetBlockByNumberFailed(Box::new(e)))? + .ok_or(Error::BlockHashMissingFromExecutionLayer(exec_block_hash))?; + + if execution_block.block_hash != exec_block_hash { + return Ok(GenesisExecutionPayloadStatus::BlockHashMismatch { + got: execution_block.block_hash, + expected: exec_block_hash, + }); + } } Ok(GenesisExecutionPayloadStatus::Correct(exec_block_hash)) diff --git a/beacon_node/beacon_chain/src/eip8025/mod.rs b/beacon_node/beacon_chain/src/eip8025/mod.rs new file mode 100644 index 00000000000..2b74f3e1048 --- /dev/null +++ b/beacon_node/beacon_chain/src/eip8025/mod.rs @@ -0,0 +1,13 @@ +//! EIP-8025: Optional Execution Proofs +//! +//! This module provides beacon chain integration for EIP-8025 optional execution proofs. +//! It includes: +//! - Proof verification logic using validator signatures +//! - TODO: integrate into proof engine + +pub mod proof_verification; + +pub use proof_verification::{ + ExecutionProofError, compute_execution_proof_domain, compute_signing_root, + verify_signed_execution_proof_signature, +}; diff --git a/beacon_node/beacon_chain/src/eip8025/proof_verification.rs b/beacon_node/beacon_chain/src/eip8025/proof_verification.rs new file mode 100644 index 00000000000..ba00f69307c --- /dev/null +++ b/beacon_node/beacon_chain/src/eip8025/proof_verification.rs @@ -0,0 +1,494 @@ +//! EIP-8025 Proof Verification +//! +//! This module implements the proof verification logic for EIP-8025 optional execution proofs. +//! It provides: +//! - BLS signature verification for validator signatures +//! - Validator index validation against the BeaconState +//! - TODO: integration into proof engine for end-to-end verification + +use crate::BeaconChainError; +use execution_layer::eip8025::ProofEngineError; +use std::fmt; +use tree_hash::TreeHash; +use types::{ChainSpec, Domain, EthSpec, ForkName, Hash256, SignedExecutionProof, SigningData}; + +/// Errors that can occur during execution proof verification. +#[derive(Debug)] +pub enum ExecutionProofError { + /// The BLS signature is invalid. + InvalidSignature, + /// The proof data is empty. + EmptyProofData, + /// The validator index is out of range. + InvalidValidatorIndex, + /// Failed to decompress the validator's public key. + InvalidValidatorPubkey, + /// Failed to decompress the signature. + InvalidSignatureFormat, + /// The fork does not support EIP-8025. + UnsupportedFork, + /// Failed to retrieve beacon state. + StateError(String), + /// No execution layer configured. + NoExecutionLayer, + /// The request root referenced by the proof is not known. + UnknownRequestRoot(Hash256), + /// The was an error in the proof engine during verification. + ProofEngineError(ProofEngineError), +} + +impl fmt::Display for ExecutionProofError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + ExecutionProofError::InvalidSignature => { + write!(f, "Invalid BLS signature") + } + ExecutionProofError::EmptyProofData => { + write!(f, "Proof data is empty") + } + ExecutionProofError::InvalidValidatorIndex => { + write!(f, "Validator index out of range") + } + ExecutionProofError::InvalidValidatorPubkey => { + write!(f, "Invalid validator public key format") + } + ExecutionProofError::InvalidSignatureFormat => { + write!(f, "Invalid signature format") + } + ExecutionProofError::UnsupportedFork => { + write!(f, "Fork does not support EIP-8025") + } + ExecutionProofError::StateError(msg) => { + write!(f, "Beacon state error: {}", msg) + } + ExecutionProofError::NoExecutionLayer => { + write!(f, "No execution layer configured") + } + ExecutionProofError::UnknownRequestRoot(root) => { + write!( + f, + "Unknown request root {:?}. Block may not be imported yet or was already finalized.", + root + ) + } + ExecutionProofError::ProofEngineError(engine_error) => { + write!(f, "Proof engine error: {:?}", engine_error) + } + } + } +} + +impl std::error::Error for ExecutionProofError {} + +/// Compute the signing root for an execution proof message. +/// +/// This function is public for use by the validator client when signing proofs. +pub fn compute_signing_root(message: &types::ExecutionProof, domain: Hash256) -> Hash256 { + SigningData { + object_root: message.tree_hash_root(), + domain, + } + .tree_hash_root() +} + +/// Compute the domain for execution proof signing. +/// +/// This function is public for use by the validator client when signing proofs. +pub fn compute_execution_proof_domain( + fork_name: ForkName, + genesis_validators_root: Hash256, + spec: &ChainSpec, +) -> Hash256 { + let fork_version = spec.fork_version_for_name(fork_name); + spec.compute_domain( + Domain::ExecutionProof, + fork_version, + genesis_validators_root, + ) +} + +// TODO: migrate into an impl on BeaconChain +/// Verify a validator's BLS signature over an execution proof. +/// +/// This function: +/// 1. Checks that the fork supports EIP-8025 +/// 2. Checks that proof data is not empty (max proof size should be enforced by ssz deserialization) +/// 3. Verifies the BLS signature over the proof message using the validator's pubkey +/// +/// # Arguments +/// +/// * `signed_proof` - The signed execution proof to verify +/// * `validator_pubkey` - The public key of the validator at the specified index +/// * `fork_name` - The current fork name +/// * `genesis_validators_root` - The genesis validators root for domain computation +/// * `spec` - The chain specification +/// +/// # Returns +/// +/// `Ok(())` if the proof is valid, otherwise an `ExecutionProofError`. +pub fn verify_signed_execution_proof_signature( + signed_proof: &SignedExecutionProof, + validator_pubkey: &bls::PublicKeyBytes, + fork_name: ForkName, + genesis_validators_root: Hash256, + spec: &ChainSpec, +) -> Result<(), BeaconChainError> { + // Check proof data is not empty + if signed_proof.message.proof_data.is_empty() { + Err(ExecutionProofError::EmptyProofData)?; + } + + // Decompress the validator's public key + let pubkey = validator_pubkey + .decompress() + .map_err(|_| ExecutionProofError::InvalidValidatorPubkey)?; + + // Decompress the signature using bls::SignatureBytes::decompress() + let signature = signed_proof + .signature + .decompress() + .map_err(|_| ExecutionProofError::InvalidSignatureFormat)?; + + // Get the domain for execution proof signing + let domain = compute_execution_proof_domain(fork_name, genesis_validators_root, spec); + + // Compute the signing root + let signing_root = compute_signing_root(&signed_proof.message, domain); + + // Verify the signature + if !signature.verify(&pubkey, signing_root) { + Err(ExecutionProofError::InvalidSignature)?; + } + + Ok(()) +} + +impl From for BeaconChainError { + fn from(engine_error: ProofEngineError) -> Self { + BeaconChainError::ExecutionProofError(ExecutionProofError::ProofEngineError(engine_error)) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::BeaconChainError; + use bls::{Keypair, SignatureBytes}; + use ssz_types::VariableList; + use types::{ExecutionProof, MainnetEthSpec, PublicInput}; + + fn get_fulu_spec() -> ChainSpec { + ForkName::Fulu.make_genesis_spec(MainnetEthSpec::default_spec()) + } + + fn create_test_proof(proof_data: Vec) -> ExecutionProof { + ExecutionProof { + proof_data: VariableList::new(proof_data).unwrap(), + proof_type: 1, + public_input: PublicInput { + new_payload_request_root: Hash256::repeat_byte(0xab), + }, + } + } + + fn sign_proof( + proof: &ExecutionProof, + keypair: &Keypair, + fork_name: ForkName, + genesis_validators_root: Hash256, + spec: &ChainSpec, + ) -> SignedExecutionProof { + let domain = compute_execution_proof_domain(fork_name, genesis_validators_root, spec); + let signing_root = compute_signing_root(proof, domain); + let signature = keypair.sk.sign(signing_root); + + // Convert signature to bls::SignatureBytes + let sig_bytes = signature.serialize(); + let signature_vec: SignatureBytes = SignatureBytes::deserialize(&sig_bytes).unwrap(); + + SignedExecutionProof { + message: proof.clone(), + validator_index: 0, + signature: signature_vec, + } + } + + #[test] + fn test_verify_valid_signature() { + let keypair = Keypair::random(); + let spec = get_fulu_spec(); + let genesis_validators_root = Hash256::repeat_byte(0xcd); + let proof = create_test_proof(vec![1, 2, 3, 4]); + + let signed = sign_proof( + &proof, + &keypair, + ForkName::Fulu, + genesis_validators_root, + &spec, + ); + + let result = verify_signed_execution_proof_signature::( + &signed, + &keypair.pk.compress(), + ForkName::Fulu, + genesis_validators_root, + &spec, + ); + + assert!(result.is_ok()); + } + + #[test] + fn test_verify_invalid_signature() { + let keypair = Keypair::random(); + let wrong_keypair = Keypair::random(); + let spec = get_fulu_spec(); + let genesis_validators_root = Hash256::repeat_byte(0xcd); + let proof = create_test_proof(vec![1, 2, 3, 4]); + + // Sign with one keypair, verify with another + let signed = sign_proof( + &proof, + &keypair, + ForkName::Fulu, + genesis_validators_root, + &spec, + ); + + let result = verify_signed_execution_proof_signature::( + &signed, + &wrong_keypair.pk.compress(), + ForkName::Fulu, + genesis_validators_root, + &spec, + ); + + assert!(matches!( + result, + Err(BeaconChainError::ExecutionProofError( + ExecutionProofError::InvalidSignature + )) + )); + } + + #[test] + fn test_verify_empty_proof_data() { + let keypair = Keypair::random(); + let spec = get_fulu_spec(); + let genesis_validators_root = Hash256::repeat_byte(0xcd); + let proof = create_test_proof(vec![]); // Empty proof data + + let signed = sign_proof( + &proof, + &keypair, + ForkName::Fulu, + genesis_validators_root, + &spec, + ); + + let result = verify_signed_execution_proof_signature::( + &signed, + &keypair.pk.compress(), + ForkName::Fulu, + genesis_validators_root, + &spec, + ); + + assert!(matches!( + result, + Err(BeaconChainError::ExecutionProofError( + ExecutionProofError::EmptyProofData + )) + )); + } + + #[test] + fn test_verify_unsupported_fork() { + let keypair = Keypair::random(); + let spec = MainnetEthSpec::default_spec(); + let genesis_validators_root = Hash256::repeat_byte(0xcd); + let proof = create_test_proof(vec![1, 2, 3, 4]); + + // Use Electra spec (pre-Fulu, EIP-8025 not enabled) + let electra_spec = ForkName::Electra.make_genesis_spec(spec.clone()); + let signed = sign_proof( + &proof, + &keypair, + ForkName::Electra, + genesis_validators_root, + &electra_spec, + ); + + let result = verify_signed_execution_proof_signature::( + &signed, + &keypair.pk.compress(), + ForkName::Electra, // Pre-Fulu fork + genesis_validators_root, + &electra_spec, + ); + + assert!(matches!( + result, + Err(BeaconChainError::ExecutionProofError( + ExecutionProofError::UnsupportedFork + )) + )); + } + + #[test] + fn test_verify_invalid_pubkey_format() { + let keypair = Keypair::random(); + let spec = get_fulu_spec(); + let genesis_validators_root = Hash256::repeat_byte(0xcd); + let proof = create_test_proof(vec![1, 2, 3, 4]); + + let signed = sign_proof( + &proof, + &keypair, + ForkName::Fulu, + genesis_validators_root, + &spec, + ); + + // Create invalid pubkey bytes (all zeros is not a valid point on the curve) + let invalid_pubkey = bls::PublicKeyBytes::empty(); + + let result = verify_signed_execution_proof_signature::( + &signed, + &invalid_pubkey, + ForkName::Fulu, + genesis_validators_root, + &spec, + ); + + assert!(matches!( + result, + Err(BeaconChainError::ExecutionProofError( + ExecutionProofError::InvalidValidatorPubkey + )) + )); + } + + #[test] + fn test_verify_invalid_signature_format() { + let keypair = Keypair::random(); + let spec = get_fulu_spec(); + let genesis_validators_root = Hash256::repeat_byte(0xcd); + let proof = create_test_proof(vec![1, 2, 3, 4]); + + // Create a signed proof with invalid signature bytes. + // BLS signatures are G2 points. Bytes 0xff repeated are not a valid + // compressed G2 point representation because they fail deserialization. + let invalid_signature = SignatureBytes::deserialize(&[0xff; 96]).unwrap(); + let signed = SignedExecutionProof { + message: proof, + validator_index: 0, + signature: invalid_signature, + }; + + let result = verify_signed_execution_proof_signature::( + &signed, + &keypair.pk.compress(), + ForkName::Fulu, + genesis_validators_root, + &spec, + ); + + assert!(matches!( + result, + Err(BeaconChainError::ExecutionProofError( + ExecutionProofError::InvalidSignatureFormat + )) + )); + } + + #[test] + fn test_compute_signing_root_deterministic() { + let proof = create_test_proof(vec![1, 2, 3, 4]); + let domain = Hash256::repeat_byte(0xaa); + + let root1 = compute_signing_root(&proof, domain); + let root2 = compute_signing_root(&proof, domain); + + assert_eq!(root1, root2); + } + + #[test] + fn test_compute_signing_root_different_inputs() { + let proof1 = create_test_proof(vec![1, 2, 3, 4]); + let proof2 = create_test_proof(vec![5, 6, 7, 8]); + let domain = Hash256::repeat_byte(0xaa); + + let root1 = compute_signing_root(&proof1, domain); + let root2 = compute_signing_root(&proof2, domain); + + assert_ne!(root1, root2); + } + + #[test] + fn test_compute_signing_root_different_domains() { + let proof = create_test_proof(vec![1, 2, 3, 4]); + let domain1 = Hash256::repeat_byte(0xaa); + let domain2 = Hash256::repeat_byte(0xbb); + + let root1 = compute_signing_root(&proof, domain1); + let root2 = compute_signing_root(&proof, domain2); + + assert_ne!(root1, root2); + } + + #[test] + fn test_compute_execution_proof_domain() { + let spec = get_fulu_spec(); + let genesis_validators_root = Hash256::repeat_byte(0xcd); + + let domain1 = + compute_execution_proof_domain(ForkName::Fulu, genesis_validators_root, &spec); + + // Domain should be deterministic + let domain2 = + compute_execution_proof_domain(ForkName::Fulu, genesis_validators_root, &spec); + assert_eq!(domain1, domain2); + + // Different genesis_validators_root should produce different domain + let different_root = Hash256::repeat_byte(0xef); + let domain3 = compute_execution_proof_domain(ForkName::Fulu, different_root, &spec); + assert_ne!(domain1, domain3); + } + + #[test] + fn test_verify_with_different_genesis_validators_root() { + let keypair = Keypair::random(); + let spec = get_fulu_spec(); + let genesis_validators_root = Hash256::repeat_byte(0xcd); + let different_root = Hash256::repeat_byte(0xef); + let proof = create_test_proof(vec![1, 2, 3, 4]); + + // Sign with one genesis_validators_root + let signed = sign_proof( + &proof, + &keypair, + ForkName::Fulu, + genesis_validators_root, + &spec, + ); + + // Verify with different genesis_validators_root + let result = verify_signed_execution_proof_signature::( + &signed, + &keypair.pk.compress(), + ForkName::Fulu, + different_root, + &spec, + ); + + // Should fail because domain computation uses genesis_validators_root + assert!(matches!( + result, + Err(BeaconChainError::ExecutionProofError( + ExecutionProofError::InvalidSignature + )) + )); + } +} diff --git a/beacon_node/beacon_chain/src/errors.rs b/beacon_node/beacon_chain/src/errors.rs index 816e75fd242..9315db37c2d 100644 --- a/beacon_node/beacon_chain/src/errors.rs +++ b/beacon_node/beacon_chain/src/errors.rs @@ -1,4 +1,3 @@ -use crate::beacon_block_streamer::Error as BlockStreamerError; use crate::beacon_chain::ForkChoiceError; use crate::beacon_fork_choice_store::Error as ForkChoiceStoreError; use crate::data_availability_checker::AvailabilityCheckError; @@ -8,6 +7,7 @@ use crate::observed_aggregates::Error as ObservedAttestationsError; use crate::observed_attesters::Error as ObservedAttestersError; use crate::observed_block_producers::Error as ObservedBlockProducersError; use crate::observed_data_sidecars::Error as ObservedDataSidecarsError; +use crate::{beacon_block_streamer::Error as BlockStreamerError, eip8025::ExecutionProofError}; use bls::PublicKeyBytes; use execution_layer::PayloadStatus; use fork_choice::ExecutionStatus; @@ -248,6 +248,7 @@ pub enum BeaconChainError { }, SkipProposerPreparation, FailedColumnCustodyInfoUpdate, + ExecutionProofError(ExecutionProofError), } easy_from_to!(SlotProcessingError, BeaconChainError); @@ -277,6 +278,7 @@ easy_from_to!(EpochCacheError, BeaconChainError); easy_from_to!(LightClientError, BeaconChainError); easy_from_to!(MilhouseError, BeaconChainError); easy_from_to!(AttestationError, BeaconChainError); +easy_from_to!(ExecutionProofError, BeaconChainError); #[derive(Debug)] pub enum BlockProductionError { diff --git a/beacon_node/beacon_chain/src/events.rs b/beacon_node/beacon_chain/src/events.rs index 63be944eea2..5adce3f8dd2 100644 --- a/beacon_node/beacon_chain/src/events.rs +++ b/beacon_node/beacon_chain/src/events.rs @@ -10,6 +10,7 @@ pub struct ServerSentEventHandler { attestation_tx: Sender>, single_attestation_tx: Sender>, block_tx: Sender>, + block_full_tx: Sender>, blob_sidecar_tx: Sender>, data_column_sidecar_tx: Sender>, finalized_tx: Sender>, @@ -37,6 +38,7 @@ impl ServerSentEventHandler { let (attestation_tx, _) = broadcast::channel(capacity); let (single_attestation_tx, _) = broadcast::channel(capacity); let (block_tx, _) = broadcast::channel(capacity); + let (block_full_tx, _) = broadcast::channel(capacity); let (blob_sidecar_tx, _) = broadcast::channel(capacity); let (data_column_sidecar_tx, _) = broadcast::channel(capacity); let (finalized_tx, _) = broadcast::channel(capacity); @@ -58,6 +60,7 @@ impl ServerSentEventHandler { attestation_tx, single_attestation_tx, block_tx, + block_full_tx, blob_sidecar_tx, data_column_sidecar_tx, finalized_tx, @@ -98,6 +101,10 @@ impl ServerSentEventHandler { .block_tx .send(kind) .map(|count| log_count("block", count)), + EventKind::BlockFull(_) => self + .block_full_tx + .send(kind) + .map(|count| log_count("block_full", count)), EventKind::BlobSidecar(_) => self .blob_sidecar_tx .send(kind) @@ -180,6 +187,10 @@ impl ServerSentEventHandler { self.block_tx.subscribe() } + pub fn subscribe_block_full(&self) -> Receiver> { + self.block_full_tx.subscribe() + } + pub fn subscribe_blob_sidecar(&self) -> Receiver> { self.blob_sidecar_tx.subscribe() } @@ -256,6 +267,10 @@ impl ServerSentEventHandler { self.block_tx.receiver_count() > 0 } + pub fn has_block_full_subscribers(&self) -> bool { + self.block_full_tx.receiver_count() > 0 + } + pub fn has_blob_sidecar_subscribers(&self) -> bool { self.blob_sidecar_tx.receiver_count() > 0 } diff --git a/beacon_node/beacon_chain/src/execution_payload.rs b/beacon_node/beacon_chain/src/execution_payload.rs index 9459b1acd7d..61ad7714d05 100644 --- a/beacon_node/beacon_chain/src/execution_payload.rs +++ b/beacon_node/beacon_chain/src/execution_payload.rs @@ -136,7 +136,26 @@ async fn notify_new_payload( .ok_or(ExecutionPayloadError::NoExecutionConnection)?; let execution_block_hash = block.execution_payload()?.block_hash(); - let new_payload_response = execution_layer.notify_new_payload(block.try_into()?).await; + let new_payload_request: NewPayloadRequest<'_, ::EthSpec> = + block.try_into()?; + let new_payload_request_root = new_payload_request.tree_hash_root(); + let block_root = block.tree_hash_root(); + let new_payload_response = execution_layer + .notify_new_payload(new_payload_request) + .await; + + // Store bidirectional mapping for EIP-8025 execution proofs + // This enables proofs to be mapped to beacon blocks for fork choice updates + // TODO: If we store proofs in Store then we can remove the need for this mapping and just store the block root in the proof. + // TODO: We should consider if this is the optimal mapping. We could consider using the execution block hash. + debug!( + ?block_root, + ?new_payload_request_root, + "Stored request_root mapping in cache" + ); + chain + .store + .put_request_root_mapping(new_payload_request_root, block_root); match new_payload_response { Ok(status) => match status { diff --git a/beacon_node/beacon_chain/src/lib.rs b/beacon_node/beacon_chain/src/lib.rs index f92030a6714..baed68b7331 100644 --- a/beacon_node/beacon_chain/src/lib.rs +++ b/beacon_node/beacon_chain/src/lib.rs @@ -20,6 +20,7 @@ pub mod custody_context; pub mod data_availability_checker; pub mod data_column_verification; mod early_attester_cache; +pub mod eip8025; mod errors; pub mod events; pub mod execution_payload; diff --git a/beacon_node/beacon_processor/src/lib.rs b/beacon_node/beacon_processor/src/lib.rs index d9ae0e23451..4efd5bdec18 100644 --- a/beacon_node/beacon_processor/src/lib.rs +++ b/beacon_node/beacon_processor/src/lib.rs @@ -332,7 +332,7 @@ impl BeaconProcessorSend { } } -pub type AsyncFn = Pin + Send + Sync>>; +pub type AsyncFn = Pin + Send>>; pub type BlockingFn = Box; pub type BlockingFnWithManualSendOnIdle = Box; pub enum BlockingOrAsync { @@ -409,6 +409,12 @@ pub enum Work { DataColumnsByRootsRequest(BlockingFn), DataColumnsByRangeRequest(BlockingFn), GossipBlsToExecutionChange(BlockingFn), + /// EIP-8025: A signed execution proof has been received over gossip. + GossipExecutionProof(AsyncFn), + /// EIP-8025: Serve an ExecutionProofsByRange RPC request. + ExecutionProofsByRangeRequest(BlockingFn), + /// EIP-8025: Serve an ExecutionProofsByRoot RPC request. + ExecutionProofsByRootRequest(BlockingFn), LightClientBootstrapRequest(BlockingFn), LightClientOptimisticUpdateRequest(BlockingFn), LightClientFinalityUpdateRequest(BlockingFn), @@ -461,6 +467,9 @@ pub enum WorkType { DataColumnsByRootsRequest, DataColumnsByRangeRequest, GossipBlsToExecutionChange, + GossipExecutionProof, + ExecutionProofsByRangeRequest, + ExecutionProofsByRootRequest, LightClientBootstrapRequest, LightClientOptimisticUpdateRequest, LightClientFinalityUpdateRequest, @@ -496,6 +505,7 @@ impl Work { WorkType::GossipLightClientOptimisticUpdate } Work::GossipBlsToExecutionChange(_) => WorkType::GossipBlsToExecutionChange, + Work::GossipExecutionProof(_) => WorkType::GossipExecutionProof, Work::RpcBlock { .. } => WorkType::RpcBlock, Work::RpcBlobs { .. } => WorkType::RpcBlobs, Work::RpcCustodyColumn { .. } => WorkType::RpcCustodyColumn, @@ -510,6 +520,8 @@ impl Work { Work::BlobsByRootsRequest(_) => WorkType::BlobsByRootsRequest, Work::DataColumnsByRootsRequest(_) => WorkType::DataColumnsByRootsRequest, Work::DataColumnsByRangeRequest(_) => WorkType::DataColumnsByRangeRequest, + Work::ExecutionProofsByRangeRequest(_) => WorkType::ExecutionProofsByRangeRequest, + Work::ExecutionProofsByRootRequest(_) => WorkType::ExecutionProofsByRootRequest, Work::LightClientBootstrapRequest(_) => WorkType::LightClientBootstrapRequest, Work::LightClientOptimisticUpdateRequest(_) => { WorkType::LightClientOptimisticUpdateRequest @@ -933,6 +945,10 @@ impl BeaconProcessor { Some(item) } else if let Some(item) = work_queues.dcbrange_queue.pop() { Some(item) + } else if let Some(item) = work_queues.epbroots_queue.pop() { + Some(item) + } else if let Some(item) = work_queues.epbrange_queue.pop() { + Some(item) // Check slashings after all other consensus messages so we prioritize // following head. // @@ -952,6 +968,9 @@ impl BeaconProcessor { work_queues.gossip_bls_to_execution_change_queue.pop() { Some(item) + // EIP-8025: Process execution proofs + } else if let Some(item) = work_queues.gossip_execution_proof_queue.pop() { + Some(item) // Check the priority 1 API requests after we've // processed all the interesting things from the network // and things required for us to stay in good repute @@ -1143,6 +1162,9 @@ impl BeaconProcessor { Work::GossipBlsToExecutionChange { .. } => work_queues .gossip_bls_to_execution_change_queue .push(work, work_id), + Work::GossipExecutionProof { .. } => { + work_queues.gossip_execution_proof_queue.push(work, work_id) + } Work::BlobsByRootsRequest { .. } => { work_queues.blob_broots_queue.push(work, work_id) } @@ -1152,6 +1174,13 @@ impl BeaconProcessor { Work::DataColumnsByRangeRequest { .. } => { work_queues.dcbrange_queue.push(work, work_id) } + // EIP-8025: Dedicated queues for serving execution proof RPC requests. + Work::ExecutionProofsByRangeRequest { .. } => { + work_queues.epbrange_queue.push(work, work_id) + } + Work::ExecutionProofsByRootRequest { .. } => { + work_queues.epbroots_queue.push(work, work_id) + } Work::UnknownLightClientOptimisticUpdate { .. } => work_queues .unknown_light_client_update_queue .push(work, work_id), @@ -1226,9 +1255,18 @@ impl BeaconProcessor { WorkType::BlobsByRootsRequest => work_queues.blob_broots_queue.len(), WorkType::DataColumnsByRootsRequest => work_queues.dcbroots_queue.len(), WorkType::DataColumnsByRangeRequest => work_queues.dcbrange_queue.len(), + WorkType::ExecutionProofsByRangeRequest => { + work_queues.epbrange_queue.len() + } + WorkType::ExecutionProofsByRootRequest => { + work_queues.epbroots_queue.len() + } WorkType::GossipBlsToExecutionChange => { work_queues.gossip_bls_to_execution_change_queue.len() } + WorkType::GossipExecutionProof => { + work_queues.gossip_execution_proof_queue.len() + } WorkType::LightClientBootstrapRequest => { work_queues.lc_bootstrap_queue.len() } @@ -1379,7 +1417,8 @@ impl BeaconProcessor { Work::RpcBlock { process_fn } | Work::RpcBlobs { process_fn } | Work::RpcCustodyColumn(process_fn) - | Work::ColumnReconstruction(process_fn) => task_spawner.spawn_async(process_fn), + | Work::ColumnReconstruction(process_fn) + | Work::GossipExecutionProof(process_fn) => task_spawner.spawn_async(process_fn), Work::IgnoredRpcBlock { process_fn } => task_spawner.spawn_blocking(process_fn), Work::GossipBlock(work) | Work::GossipBlobSidecar(work) @@ -1389,7 +1428,9 @@ impl BeaconProcessor { Work::BlobsByRangeRequest(process_fn) | Work::BlobsByRootsRequest(process_fn) | Work::DataColumnsByRootsRequest(process_fn) - | Work::DataColumnsByRangeRequest(process_fn) => { + | Work::DataColumnsByRangeRequest(process_fn) + | Work::ExecutionProofsByRangeRequest(process_fn) + | Work::ExecutionProofsByRootRequest(process_fn) => { task_spawner.spawn_blocking(process_fn) } Work::BlocksByRangeRequest(work) | Work::BlocksByRootsRequest(work) => { diff --git a/beacon_node/beacon_processor/src/scheduler/work_queue.rs b/beacon_node/beacon_processor/src/scheduler/work_queue.rs index c6f74961d17..246133ce1f4 100644 --- a/beacon_node/beacon_processor/src/scheduler/work_queue.rs +++ b/beacon_node/beacon_processor/src/scheduler/work_queue.rs @@ -135,6 +135,9 @@ pub struct BeaconProcessorQueueLengths { dcbroots_queue: usize, dcbrange_queue: usize, gossip_bls_to_execution_change_queue: usize, + gossip_execution_proof_queue: usize, + epbroots_queue: usize, + epbrange_queue: usize, lc_bootstrap_queue: usize, lc_rpc_optimistic_update_queue: usize, lc_rpc_finality_update_queue: usize, @@ -201,6 +204,10 @@ impl BeaconProcessorQueueLengths { dcbroots_queue: 1024, dcbrange_queue: 1024, gossip_bls_to_execution_change_queue: 16384, + // EIP-8025: Queue for execution proofs + gossip_execution_proof_queue: 64, + epbroots_queue: 1024, + epbrange_queue: 1024, lc_gossip_finality_update_queue: 1024, lc_gossip_optimistic_update_queue: 1024, lc_bootstrap_queue: 1024, @@ -245,6 +252,12 @@ pub struct WorkQueues { pub dcbroots_queue: FifoQueue>, pub dcbrange_queue: FifoQueue>, pub gossip_bls_to_execution_change_queue: FifoQueue>, + /// EIP-8025: Queue for execution proofs from gossip. + pub gossip_execution_proof_queue: FifoQueue>, + /// EIP-8025: Queue for serving ExecutionProofsByRoot RPC requests. + pub epbroots_queue: FifoQueue>, + /// EIP-8025: Queue for serving ExecutionProofsByRange RPC requests. + pub epbrange_queue: FifoQueue>, pub lc_gossip_finality_update_queue: FifoQueue>, pub lc_gossip_optimistic_update_queue: FifoQueue>, pub lc_bootstrap_queue: FifoQueue>, @@ -310,6 +323,12 @@ impl WorkQueues { let gossip_bls_to_execution_change_queue = FifoQueue::new(queue_lengths.gossip_bls_to_execution_change_queue); + // EIP-8025: Execution proof queue + let gossip_execution_proof_queue = + FifoQueue::new(queue_lengths.gossip_execution_proof_queue); + let epbroots_queue = FifoQueue::new(queue_lengths.epbroots_queue); + let epbrange_queue = FifoQueue::new(queue_lengths.epbrange_queue); + let lc_gossip_optimistic_update_queue = FifoQueue::new(queue_lengths.lc_gossip_optimistic_update_queue); let lc_gossip_finality_update_queue = @@ -357,6 +376,9 @@ impl WorkQueues { dcbroots_queue, dcbrange_queue, gossip_bls_to_execution_change_queue, + gossip_execution_proof_queue, + epbroots_queue, + epbrange_queue, lc_gossip_optimistic_update_queue, lc_gossip_finality_update_queue, lc_bootstrap_queue, diff --git a/beacon_node/execution_layer/Cargo.toml b/beacon_node/execution_layer/Cargo.toml index c443e945743..7696fa9cb20 100644 --- a/beacon_node/execution_layer/Cargo.toml +++ b/beacon_node/execution_layer/Cargo.toml @@ -10,6 +10,7 @@ alloy-primitives = { workspace = true } alloy-rlp = { workspace = true } alloy-rpc-types-eth = { workspace = true } arc-swap = "1.6.0" +async-trait = "0.1" bls = { workspace = true } builder_client = { path = "../builder_client" } bytes = { workspace = true } @@ -53,3 +54,6 @@ typenum = { workspace = true } types = { workspace = true } warp = { workspace = true } zeroize = { workspace = true } + +[dev-dependencies] +anyhow = { workspace = true } diff --git a/beacon_node/execution_layer/src/eip8025/errors.rs b/beacon_node/execution_layer/src/eip8025/errors.rs new file mode 100644 index 00000000000..72da1b651f0 --- /dev/null +++ b/beacon_node/execution_layer/src/eip8025/errors.rs @@ -0,0 +1,189 @@ +//! Error types for EIP-8025 proof engine operations. + +use pretty_reqwest_error::PrettyReqwestError; +use std::fmt; +use types::{ExecutionBlockHash, Hash256}; + +/// Errors that can occur during proof engine operations. +#[derive(Debug)] +pub enum ProofEngineError { + /// The proof format is invalid. + InvalidProofFormat(String), + /// The proof type is invalid. + InvalidProofType(String), + /// The header format is invalid. + InvalidHeaderFormat(String), + /// The payload is invalid. + InvalidPayload(String), + /// Proof generation is unavailable. + ProofGenerationUnavailable(String), + /// HTTP request failed. + HttpClientError(PrettyReqwestError), + /// JSON-RPC error from the execution engine. + JsonRpcError { code: i64, message: String }, + /// Failed to serialize/deserialize. + SerdeError(serde_json::Error), + /// SSZ error. + SszError(ssz_types::Error), + /// The specified fork is not supported. + ForkNotSupported(String), + /// The execution engine does not support the requested proof type. + ProofTypeNotSupported(u8), + /// Timeout waiting for proof engine response. + Timeout, + /// Engine is not available. + EngineUnavailable, + /// State-related errors. + StateError(ProofEngineStateError), +} + +/// Errors related to the proof engine state. +#[derive(Debug)] +pub enum ProofEngineStateError { + /// The block hash for the given request root was not found in the tree state. + BlockHashNotFoundForRequestRoot { + request_root: Hash256, + block_hash: ExecutionBlockHash, + }, + /// The request root associated with the proof has not been observed in a beacon block. + ProofRequestRootNotSeen(Hash256), + /// The request root was not found in the buffer when promotion was attempted. + BufferedRequestNotFound(Hash256), + /// The block number for the given block hash was not found. + BlockNumberNotFound(ExecutionBlockHash), +} + +impl std::error::Error for ProofEngineError {} +impl std::error::Error for ProofEngineStateError {} + +impl ProofEngineError { + /// Returns the JSON-RPC error code if this is a JSON-RPC error. + pub fn rpc_error_code(&self) -> Option { + match self { + ProofEngineError::JsonRpcError { code, .. } => Some(*code), + _ => None, + } + } + + /// Returns true if this error indicates the proof type is not supported. + pub fn is_not_supported(&self) -> bool { + matches!(self, ProofEngineError::ProofTypeNotSupported(_)) + } +} + +// JSON-RPC error codes for EIP-8025 +pub mod error_codes { + /// Invalid proof format - The execution proof structure is malformed + pub const INVALID_PROOF_FORMAT: i64 = -39001; + /// Invalid header format - The new payload request header structure is malformed + pub const INVALID_HEADER_FORMAT: i64 = -39002; + /// Invalid payload - The execution payload is invalid + pub const INVALID_PAYLOAD: i64 = -39003; + /// Proof generation unavailable - The client cannot generate proofs + pub const PROOF_GENERATION_UNAVAILABLE: i64 = -39004; +} + +impl fmt::Display for ProofEngineError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + ProofEngineError::InvalidProofFormat(msg) => { + write!(f, "Invalid proof format: {}", msg) + } + ProofEngineError::InvalidProofType(msg) => { + write!(f, "Invalid proof type: {}", msg) + } + ProofEngineError::InvalidHeaderFormat(msg) => { + write!(f, "Invalid header format: {}", msg) + } + ProofEngineError::InvalidPayload(msg) => { + write!(f, "Invalid payload: {}", msg) + } + ProofEngineError::ProofGenerationUnavailable(msg) => { + write!(f, "Proof generation unavailable: {}", msg) + } + ProofEngineError::HttpClientError(err) => { + write!(f, "HTTP request failed: {}", err) + } + ProofEngineError::JsonRpcError { code, message } => { + write!(f, "JSON-RPC error (code {}): {}", code, message) + } + ProofEngineError::SerdeError(msg) => { + write!(f, "Serialization error: {}", msg) + } + ProofEngineError::SszError(err) => { + write!(f, "SSZ error: {}", err) + } + ProofEngineError::ForkNotSupported(fork) => { + write!(f, "Fork not supported: {}", fork) + } + ProofEngineError::ProofTypeNotSupported(proof_type) => { + write!(f, "Proof type {} not supported", proof_type) + } + ProofEngineError::Timeout => { + write!(f, "Proof engine request timed out") + } + ProofEngineError::EngineUnavailable => { + write!(f, "Proof engine is unavailable") + } + ProofEngineError::StateError(state_error) => { + write!(f, "State error: {:?}", state_error) + } + } + } +} + +impl fmt::Display for ProofEngineStateError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + ProofEngineStateError::BlockHashNotFoundForRequestRoot { + request_root, + block_hash, + } => write!( + f, + "Block hash {:?} not found for request root {:?}", + block_hash, request_root + ), + ProofEngineStateError::ProofRequestRootNotSeen(request_root) => write!( + f, + "Proof request root {:?} has not been seen in a beacon block", + request_root + ), + ProofEngineStateError::BufferedRequestNotFound(request_root) => { + write!(f, "Buffered request with root {:?} not found", request_root) + } + ProofEngineStateError::BlockNumberNotFound(block_hash) => { + write!(f, "Block number not found for block hash {:?}", block_hash) + } + } + } +} + +impl From for ProofEngineError { + fn from(e: serde_json::Error) -> Self { + ProofEngineError::SerdeError(e) + } +} + +impl From for ProofEngineError { + fn from(e: ssz_types::Error) -> Self { + ProofEngineError::SszError(e) + } +} + +impl From for ProofEngineError { + fn from(e: reqwest::Error) -> Self { + ProofEngineError::HttpClientError(e.into()) + } +} + +impl From for ProofEngineError { + fn from(e: PrettyReqwestError) -> Self { + ProofEngineError::HttpClientError(e) + } +} + +impl From for ProofEngineError { + fn from(e: ProofEngineStateError) -> Self { + ProofEngineError::StateError(e) + } +} diff --git a/beacon_node/execution_layer/src/eip8025/json_structures.rs b/beacon_node/execution_layer/src/eip8025/json_structures.rs new file mode 100644 index 00000000000..ce638e09b91 --- /dev/null +++ b/beacon_node/execution_layer/src/eip8025/json_structures.rs @@ -0,0 +1,134 @@ +//! JSON structures for EIP-8025 Engine API communication. +//! +//! These types are used for JSON-RPC serialization/deserialization with the execution engine. + +use crate::eip8025::ProofEngineError; +use serde::{Deserialize, Serialize}; +use strum::EnumString; +use types::execution::eip8025::{ProofData, ProofStatus}; +use types::{Hash256, ProofGenId}; + +// TODO: Consider if this type is necessary or if we can use existing ProofInput type. +/// JSON representation of PublicInput for Engine API. +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct JsonPublicInputV1 { + /// The tree hash root of the NewPayloadRequest + pub new_payload_request_root: Hash256, +} + +/// JSON representation of ExecutionProof for Engine API. +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct JsonExecutionProofV1 { + /// The proof data (hex encoded) + #[serde(with = "ssz_types::serde_utils::hex_var_list")] + pub proof_data: ProofData, + /// The type of proof + #[serde(with = "serde_utils::quoted_u64")] + pub proof_type: u64, + /// Public input linking the proof to a specific payload request + pub public_input: JsonPublicInputV1, +} + +/// JSON representation of ProofStatus for Engine API responses. +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct JsonProofStatusV1 { + /// The status: "VALID", "INVALID", "ACCEPTED", or "NOT_SUPPORTED" + pub status: JsonProofStatusV1Status, + /// Optional error message + #[serde(skip_serializing_if = "Option::is_none")] + pub error: Option, +} + +#[derive(Debug, Clone, Copy, PartialEq, Serialize, Deserialize, EnumString)] +#[serde(rename_all = "SCREAMING_SNAKE_CASE")] +#[strum(serialize_all = "SCREAMING_SNAKE_CASE")] +pub enum JsonProofStatusV1Status { + Valid, + Invalid, + Accepted, + NotSupported, +} + +/// JSON representation of ProofAttributes for proof requests. +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct JsonProofAttributesV1 { + /// List of proof types to generate + pub proof_types: Vec, +} + +#[derive(Debug, PartialEq, Clone, Serialize, Deserialize)] +#[serde(transparent)] +pub struct TransparentJsonProofGenId(#[serde(with = "serde_utils::bytes_8_hex")] pub ProofGenId); + +impl From for ProofGenId { + fn from(json: TransparentJsonProofGenId) -> Self { + json.0 + } +} + +impl From for JsonPublicInputV1 { + fn from(input: types::execution::eip8025::PublicInput) -> Self { + JsonPublicInputV1 { + new_payload_request_root: input.new_payload_request_root, + } + } +} + +impl From for JsonExecutionProofV1 { + fn from(proof: types::execution::eip8025::ExecutionProof) -> Self { + JsonExecutionProofV1 { + proof_data: proof.proof_data, + proof_type: proof.proof_type as u64, + public_input: proof.public_input.into(), + } + } +} + +impl From for ProofStatus { + fn from(j: JsonProofStatusV1) -> Self { + // Use this verbose deconstruction pattern to ensure no field is left unused. + let JsonProofStatusV1 { status, .. } = j; + + status.into() + } +} + +impl From for ProofStatus { + fn from(status: JsonProofStatusV1Status) -> Self { + match status { + JsonProofStatusV1Status::Valid => ProofStatus::Valid, + JsonProofStatusV1Status::Invalid => ProofStatus::Invalid, + JsonProofStatusV1Status::Accepted => ProofStatus::Accepted, + JsonProofStatusV1Status::NotSupported => ProofStatus::NotSupported, + } + } +} + +impl From for JsonProofAttributesV1 { + fn from(attrs: types::execution::eip8025::ProofAttributes) -> Self { + JsonProofAttributesV1 { + proof_types: attrs.proof_types.into_iter().map(|t| t as u64).collect(), + } + } +} + +impl TryFrom for types::execution::eip8025::ProofAttributes { + type Error = ProofEngineError; + + fn try_from(json: JsonProofAttributesV1) -> Result { + Ok(types::execution::eip8025::ProofAttributes { + proof_types: json + .proof_types + .into_iter() + .map(|t| { + t.try_into() + .map_err(|_| ProofEngineError::InvalidProofType(t.to_string())) + }) + .collect::, _>>()?, + }) + } +} diff --git a/beacon_node/execution_layer/src/eip8025/mod.rs b/beacon_node/execution_layer/src/eip8025/mod.rs new file mode 100644 index 00000000000..98b3f05bf93 --- /dev/null +++ b/beacon_node/execution_layer/src/eip8025/mod.rs @@ -0,0 +1,20 @@ +//! EIP-8025: Optional Execution Proofs +//! +//! This module provides the execution layer integration for EIP-8025 optional proofs. +//! It includes: +//! - Engine API methods for proof verification and generation +//! - ProofEngine trait for abstracting proof engine communication +//! - JSON structures for Engine API serialization + +pub mod errors; +pub mod json_structures; +pub mod proof_engine; +mod state; + +pub use errors::ProofEngineError; +pub use json_structures::*; +pub use proof_engine::{ + ENGINE_REQUEST_PROOFS_V1, ENGINE_VERIFY_EXECUTION_PROOF_V1, + ENGINE_VERIFY_NEW_PAYLOAD_REQUEST_HEADER_V1, HttpProofEngine, PROOF_ENGINE_TIMEOUT, + ProofEngine, +}; diff --git a/beacon_node/execution_layer/src/eip8025/proof_engine.rs b/beacon_node/execution_layer/src/eip8025/proof_engine.rs new file mode 100644 index 00000000000..27cd355bd40 --- /dev/null +++ b/beacon_node/execution_layer/src/eip8025/proof_engine.rs @@ -0,0 +1,315 @@ +//! ProofEngine trait and HTTP implementation for EIP-8025. +//! +//! This module defines the interface for interacting with proof engines +//! and provides an HTTP JSON-RPC implementation with an internal proof cache. + +use super::{errors::ProofEngineError, json_structures::*}; +use crate::{ + ForkchoiceState, ForkchoiceUpdatedResponse, MissingProofInfo, NewPayloadRequest, + NewPayloadRequestFulu, PayloadStatusV1, PayloadStatusV1Status, + eip8025::state::{RequestMetadata, State}, + json_structures::{JsonExecutionPayload, JsonRequestBody, JsonResponseBody}, +}; +use parking_lot::RwLock; +use reqwest::Client; +use reqwest::header::CONTENT_TYPE; +use sensitive_url::SensitiveUrl; +use serde::de::DeserializeOwned; +use serde_json::json; +use std::collections::HashMap; +use std::time::Duration; + +use types::execution::eip8025::{ProofAttributes, ProofGenId, ProofStatus, SignedExecutionProof}; +use types::{EthSpec, Hash256}; + +/// Static ID for JSON-RPC requests. +const STATIC_ID: u32 = 1; + +/// JSON-RPC version string. +pub const JSONRPC_VERSION: &str = "2.0"; + +/// This error is returned during a `chainId` call by Geth. +pub const EIP155_ERROR_STR: &str = "chain not synced beyond EIP-155 replay-protection fork block"; + +/// Engine API method for verifying execution proofs. +pub const ENGINE_VERIFY_EXECUTION_PROOF_V1: &str = "engine_verifyExecutionProofV1"; + +/// Engine API method for verifying new payload request headers. +/// +/// This is currently unused but defined for completeness. We may use it in the future +pub const ENGINE_VERIFY_NEW_PAYLOAD_REQUEST_HEADER_V1: &str = + "engine_verifyNewPayloadRequestHeaderV1"; + +/// Engine API method for requesting proof generation. +pub const ENGINE_REQUEST_PROOFS_V1: &str = "engine_requestProofsV1"; + +/// Default timeout for proof engine requests (1 second per spec). +pub const PROOF_ENGINE_TIMEOUT: Duration = Duration::from_secs(1); + +/// Trait defining the interface for a proof engine. +#[async_trait::async_trait] +pub trait ProofEngine: Send + Sync { + /// Get all proofs for a given new_payload_request_root. + fn get_proofs_by_root(&self, root: &Hash256) -> Vec; + + /// Return all buffer entries that do not yet have sufficient proofs for promotion. + /// + /// `MissingProofInfo.root` is populated with the new-payload request root. + /// The beacon chain layer replaces it with the beacon block root before the + /// sync manager issues `ExecutionProofsByRoot` RPC requests. + fn missing_proofs(&self) -> Vec; + + /// Verify an individual execution proof via RPC. + /// + /// Maps to `engine_verifyExecutionProofV1`. + async fn verify_execution_proof( + &self, + proof: &SignedExecutionProof, + ) -> Result; + + /// Verify that sufficient proofs exist for a new payload request via RPC. + /// + /// Maps to `engine_verifyNewPayloadRequestHeaderV*`. + async fn new_payload( + &self, + header: &NewPayloadRequest<'_, E>, + ) -> Result; + + /// Notify the proof engine of a forkchoice update. + async fn forkchoice_updated( + &self, + forkchoice_state: ForkchoiceState, + ) -> Result; + + /// Request asynchronous proof generation via RPC. + /// + /// Maps to `engine_requestProofsV1`. + /// Returns a ProofGenId to track the generation request. + /// Generated proofs are delivered asynchronously via the beacon API endpoint + /// POST /eth/v1/prover/execution_proofs. + async fn request_proofs( + &self, + new_payload_request: NewPayloadRequest<'_, E>, + attributes: ProofAttributes, + ) -> Result; +} + +/// HTTP JSON-RPC implementation of the ProofEngine trait with internal proof storage. +/// +/// This implementation: +/// - Stores ALL unfinalized proofs indexed by new_payload_request_root (unbounded) +/// - Calls out to the execution engine RPC for proof verification +/// - Prunes proofs when finalization events occur +pub struct HttpProofEngine { + /// HTTP client for making requests. + client: Client, + /// URL of the proof engine endpoint. + url: SensitiveUrl, + /// The internal state storing execution proofs in a tree structure and buffer. + state: RwLock, + /// Buffered proofs for request roots not yet seen. + buffered_proofs: RwLock>>, +} + +impl HttpProofEngine { + /// Create a new HTTP proof engine client with internal proof storage. + pub fn new(url: SensitiveUrl, timeout: Option) -> Self { + let client = Client::builder() + .timeout(timeout.unwrap_or(PROOF_ENGINE_TIMEOUT)) + .build() + .expect("Failed to build HTTP client"); + + Self { + client, + url, + state: RwLock::new(State::new()), + buffered_proofs: RwLock::new(HashMap::new()), + } + } + + /// Make a generic JSON-RPC request to the proof engine. + pub async fn rpc_request( + &self, + method: &str, + params: serde_json::Value, + timeout: Duration, + ) -> Result { + let body = JsonRequestBody { + jsonrpc: JSONRPC_VERSION, + method, + params, + id: json!(STATIC_ID), + }; + + let request = self + .client + .post(self.url.expose_full().clone()) + .timeout(timeout) + .header(CONTENT_TYPE, "application/json") + .json(&body); + + // TODO: do we want to support authentication? + // Generate and add a jwt token to the header if auth is defined. + // if let Some(auth) = &self.auth { + // request = request.bearer_auth(auth.generate_token()?); + // }; + + let body: JsonResponseBody = request.send().await?.error_for_status()?.json().await?; + + match (body.result, body.error) { + (result, None) => Ok(serde_json::from_value(result)?), + (_, Some(error)) => Err(ProofEngineError::JsonRpcError { + code: error.code, + message: error.message, + }), + } + } +} + +#[async_trait::async_trait] +impl ProofEngine for HttpProofEngine { + fn get_proofs_by_root(&self, root: &Hash256) -> Vec { + self.state + .read() + .get_proofs(root) + .map(<[SignedExecutionProof]>::to_vec) + .unwrap_or_default() + } + + fn missing_proofs(&self) -> Vec { + self.state.read().missing_proofs() + } + + async fn verify_execution_proof( + &self, + proof: &SignedExecutionProof, + ) -> Result { + if !self + .state + .read() + .contains_request_root(&proof.request_root()) + { + tracing::info!(target: "execution_layer", "Received proof for unknown request root {}, buffering", proof.request_root()); + self.buffered_proofs + .write() + .entry(proof.request_root()) + .or_default() + .push(proof.clone()); + return Ok(ProofStatus::Syncing); + } + + let json_proof: JsonExecutionProofV1 = proof.message.clone().into(); + let params = json!([json_proof]); + + let result = self + .rpc_request( + ENGINE_VERIFY_EXECUTION_PROOF_V1, + params, + PROOF_ENGINE_TIMEOUT, + ) + .await?; + + let status: JsonProofStatusV1 = serde_json::from_value(result)?; + let status: ProofStatus = status.into(); + if status.is_valid() { + // Insert the valid proof into state. + return Ok(self.state.write().insert_proof(proof.clone())?); + } + + Ok(status) + } + + async fn new_payload( + &self, + request: &NewPayloadRequest<'_, E>, + ) -> Result { + // We buffer the request in state for future proof association and return Syncing. + // TODO: Currently we don't support proof verification before payload processing to prevent DOS so its not possible that proofs are verified yet. Is this reasonable? + let request: RequestMetadata = request.into(); + let buffered_proofs = self + .buffered_proofs + .write() + .remove(&request.request_root) + .unwrap_or_default(); + self.state.write().buffer_request(request); + + let mut status = PayloadStatusV1Status::Syncing; + for proof in buffered_proofs { + let proof_status = self.verify_execution_proof(&proof).await?; + if proof_status.is_valid() { + status = PayloadStatusV1Status::Valid; + } + } + + Ok(PayloadStatusV1 { + status, + latest_valid_hash: None, + validation_error: None, + }) + } + + async fn forkchoice_updated( + &self, + forkchoice_state: ForkchoiceState, + ) -> Result { + tracing::info!(target: "execution_layer", "Received forkchoice update: head {}, safe {}, finalized {}", forkchoice_state.head_block_hash, forkchoice_state.safe_block_hash, forkchoice_state.finalized_block_hash); + Ok(self.state.write().forkchoice_updated(forkchoice_state)?) + } + + async fn request_proofs( + &self, + new_payload_request: NewPayloadRequest<'_, E>, + proof_attributes: ProofAttributes, + ) -> Result { + match new_payload_request { + NewPayloadRequest::Bellatrix(_) => { + Err(ProofEngineError::ForkNotSupported("Bellatrix".to_string())) + } + NewPayloadRequest::Capella(_) => { + Err(ProofEngineError::ForkNotSupported("Capella".to_string())) + } + NewPayloadRequest::Deneb(_) => { + Err(ProofEngineError::ForkNotSupported("Deneb".to_string())) + } + NewPayloadRequest::Electra(_) => { + Err(ProofEngineError::ForkNotSupported("Electra".to_string())) + } + NewPayloadRequest::Fulu(new_payload_request_fulu) => { + self.request_proofs_v4_fulu(new_payload_request_fulu, proof_attributes) + .await + } + NewPayloadRequest::Gloas(_) => { + Err(ProofEngineError::ForkNotSupported("Gloas".to_string())) + } + } + } +} + +impl HttpProofEngine { + pub async fn request_proofs_v4_fulu( + &self, + new_payload_request_fulu: NewPayloadRequestFulu<'_, E>, + proof_attributes: ProofAttributes, + ) -> Result { + let params = json!([ + JsonExecutionPayload::Fulu( + new_payload_request_fulu + .execution_payload + .clone() + .try_into()? + ), + new_payload_request_fulu.versioned_hashes, + new_payload_request_fulu.parent_beacon_block_root, + new_payload_request_fulu + .execution_requests + .get_execution_requests_list(), + proof_attributes + ]); + + let response: TransparentJsonProofGenId = self + .rpc_request(ENGINE_REQUEST_PROOFS_V1, params, PROOF_ENGINE_TIMEOUT) + .await?; + + Ok(response.into()) + } +} diff --git a/beacon_node/execution_layer/src/eip8025/state.rs b/beacon_node/execution_layer/src/eip8025/state.rs new file mode 100644 index 00000000000..0f21a8a96a3 --- /dev/null +++ b/beacon_node/execution_layer/src/eip8025/state.rs @@ -0,0 +1,1685 @@ +use crate::{ + ForkchoiceState, ForkchoiceUpdatedResponse, MissingProofInfo, PayloadStatusV1, + PayloadStatusV1Status, +}; +use crate::{NewPayloadRequest, eip8025::errors::ProofEngineStateError}; +use std::collections::btree_map::Entry; +use std::collections::{BTreeMap, HashMap, HashSet}; +use std::mem; +use tree_hash::TreeHash; +use types::{EthSpec, ExecutionBlockHash, Hash256, SignedExecutionProof}; +use types::{MIN_REQUIRED_EXECUTION_PROOFS, ProofStatus}; + +// TODO: Consider refactoring to use proto-array style state structure for better performance. +// TODO: Add metrics for latency, state size, buffer size, proof counts, etc. +// TODO: If we continue to use HashMaps then consider using ahash or foldhash for better performance (keys are cryptographic digests and as such random). + +#[derive(Debug, Clone)] +pub struct State { + /// The latest fork choice state received that has not yet been marked as valid. + pub latest_fcs: Option, + /// The last fork choice state that was marked as valid. + pub last_valid_fcs: ForkchoiceState, + /// State of the execution proofs tree. + pub tree: TreeState, + /// Buffer of unassociated execution proofs. + pub buffer: RequestBuffer, + /// The minimum number of proofs required for a request to be promotable from buffer to tree. + pub min_required_proofs: usize, +} + +impl State { + /// Create a new State with the specified proof buffer size. + pub fn new() -> Self { + Self { + latest_fcs: None, + last_valid_fcs: ForkchoiceState { + head_block_hash: ExecutionBlockHash::zero(), + safe_block_hash: ExecutionBlockHash::zero(), + finalized_block_hash: ExecutionBlockHash::zero(), + }, + tree: TreeState::default(), + buffer: RequestBuffer::new(), + min_required_proofs: MIN_REQUIRED_EXECUTION_PROOFS, + } + } + + /// Return all buffer entries that do not yet have sufficient proofs for promotion. + /// + /// Only the `buffer` is scanned: by design, every entry in the buffer has not been + /// promoted to the tree, meaning it lacks sufficient proofs. Tree entries are already done. + pub fn missing_proofs(&self) -> Vec { + self.buffer + .proofs + .iter() + .map(|(request_root, payload_request)| MissingProofInfo { + root: *request_root, + existing_proof_types: payload_request + .proofs + .iter() + .map(|p| p.message.proof_type) + .collect(), + }) + .collect() + } + + /// Check if the state contains any proofs associated with the given new payload request root. + pub fn contains_request_root(&self, request_root: &Hash256) -> bool { + self.tree + .request_root_to_block_hash + .contains_key(request_root) + || self.buffer.proofs.contains_key(request_root) + } + + /// Buffer a new payload request for future proof association. + pub fn buffer_request(&mut self, request: RequestMetadata) { + if self + .tree + .request_root_to_block_hash + .contains_key(&request.request_root) + { + tracing::warn!(target: "execution_layer", request_root = ?request.request_root, "Attempting to buffer a request that is already associated with a block hash in the tree - skipping buffer insertion"); + return; + } + + if self.buffer.proofs.contains_key(&request.request_root) { + tracing::debug!(target: "execution_layer", request_root = ?request.request_root, "Request is already buffered - skipping buffer insertion"); + return; + } + + self.buffer.insert(request); + } + + /// Validate and update the latest fork choice state. + pub fn forkchoice_updated( + &mut self, + forkchoice_state: ForkchoiceState, + ) -> Result { + let head = forkchoice_state.head_block_hash; + let safe = forkchoice_state.safe_block_hash; + let finalized = forkchoice_state.finalized_block_hash; + + // When tree is empty, always update last_valid_fcs to track finalized block + // This allows finalized to advance during sync before any blocks are promoted + // TODO: Reconsider this logic - maybe we just always update the finalized block in last_valid_fcs and allow syncing until we have observed the head block hash? + if self.tree.is_empty() && finalized != ExecutionBlockHash::zero() { + // Create a baseline forkchoice state anchored at finalized block + let bootstrap_fcs = ForkchoiceState { + head_block_hash: finalized, + safe_block_hash: finalized, + finalized_block_hash: finalized, + }; + self.last_valid_fcs = bootstrap_fcs; + self.latest_fcs = Some(forkchoice_state); + self.tree.current_canonical_head = finalized; + + tracing::info!(target: "execution_layer", ?finalized, "Updated last_valid_fcs to finalized block (tree empty)"); + return Ok(self.forkchoice_response_syncing()); + } + + let new_safe_zero = safe.is_zero(); + let new_finalized_zero = finalized.is_zero(); + let safe = if !new_safe_zero { + safe + } else { + self.last_valid_fcs.safe_block_hash + }; + let finalized = if !new_finalized_zero { + finalized + } else { + self.last_valid_fcs.finalized_block_hash + }; + + // If we have not observed the head block hash yet, we cannot validate the forkchoice + if !self.tree.proofs_by_block_hash.contains_key(&head) { + tracing::debug!(target: "execution_layer", ?head, "Forkchoice update head not found in tree state, marking as syncing"); + self.latest_fcs = Some(forkchoice_state); + return Ok(self.forkchoice_response_syncing()); + } + + // Validate that the safe block is in the tree (this is a quick sanity check so we don't have to traverse the tree) + if !new_safe_zero && !self.tree.proofs_by_block_hash.contains_key(&safe) { + tracing::warn!(target: "execution_layer", ?safe, "Forkchoice update safe block hash not found in tree state - invalid forkchoice"); + return Ok(self.forkchoice_response_invalid()); + } + + // Validate that the finalized block is in the tree (this is a quick sanity check so we don't have to traverse the tree) + if !new_finalized_zero && !self.tree.proofs_by_block_hash.contains_key(&finalized) { + tracing::warn!(target: "execution_layer", ?finalized, "Forkchoice update finalized block hash not found in tree state - invalid forkchoice"); + return Ok(self.forkchoice_response_invalid()); + } + + // Validate the ancestry chain: head -> safe -> finalized + if !self.is_descendant(safe, head) { + tracing::error!(target: "execution_layer", ?head, ?safe, "Forkchoice update is invalid - safe block is not an ancestor of head"); + return Ok(self.forkchoice_response_invalid()); + } + + if !new_safe_zero && !self.is_descendant(finalized, safe) { + tracing::error!(target: "execution_layer", ?safe, ?finalized, "Forkchoice update is invalid - finalized block is not an ancestor of safe"); + return Ok(self.forkchoice_response_invalid()); + } + + if !self.is_descendant(self.last_valid_fcs.finalized_block_hash, finalized) { + tracing::error!(target: "execution_layer", ?head, ?safe, ?finalized, "Forkchoice update is invalid - new finalized block is not a descendant of last valid finalized block"); + return Ok(self.forkchoice_response_invalid()); + } + + // Determine if we need to update the canonical head + let update_canonical_head = if head == self.tree.current_canonical_head { + tracing::debug!(target: "execution_layer", ?head, "Forkchoice update head matches current canonical head"); + false + } else if self.is_descendant(head, self.tree.current_canonical_head) { + tracing::debug!(target: "execution_layer", ?head, "Forkchoice update head is a ancestor of current canonical head - skip head update"); + false + } else { + tracing::debug!(target: "execution_layer", ?head, "Forkchoice update head is on a fork, updating canonical head pending validation"); + true + }; + + if update_canonical_head { + tracing::info!(target: "execution_layer", ?head, "Updating canonical head to new forkchoice head"); + self.tree.current_canonical_head = head; + } + + let prune_finalized = + !new_finalized_zero && (self.last_valid_fcs.finalized_block_hash != finalized); + + if prune_finalized { + self.prune_finalized_sidechains(finalized)?; + } + + self.last_valid_fcs = ForkchoiceState { + head_block_hash: head, + safe_block_hash: safe, + finalized_block_hash: finalized, + }; + Ok(self.forkchoice_response_valid()) + } + + /// Get all execution proofs associated with the given new payload request root. + pub fn get_proofs(&self, root: &Hash256) -> Option<&[SignedExecutionProof]> { + self.tree + .request_root_to_block_hash + .get(root) + .and_then(|h| self.tree.proofs_by_block_hash.get(h)) + .map(|p| p.proofs.as_slice()) + .or_else(|| self.buffer.proofs.get(root).map(|b| b.proofs.as_slice())) + .filter(|slice| !slice.is_empty()) + } + + /// Insert a new execution proof into state. + pub fn insert_proof( + &mut self, + proof: SignedExecutionProof, + ) -> Result { + let request_root = proof.request_root(); + + // Insert into the tree if associated block hash is found. + if let Some(block_hash) = self.tree.request_root_to_block_hash.get(&request_root) { + // Insert into the tree associated with the block hash. + let proofs = self.tree.proofs_by_block_hash.get_mut(block_hash).ok_or( + ProofEngineStateError::BlockHashNotFoundForRequestRoot { + request_root, + block_hash: *block_hash, + }, + )?; + proofs.proofs.push(proof); + return Ok(ProofStatus::Accepted); + } + + // Insert into the buffer if associated request root is found. + if let Some(buffered_request) = self.buffer.proofs.get_mut(&request_root) { + buffered_request.proofs.push(proof); + } else { + // We only process proofs that are associated with a request root from an observed beacon block. + return Err(ProofEngineStateError::ProofRequestRootNotSeen(request_root)); + }; + + if self.can_promote(&request_root)? + && let Some(latest_canonical_head) = self.promote_buffered_requests(request_root)? + { + tracing::info!(target: "execution_layer", ?latest_canonical_head, "Updated canonical head after promoting buffered proofs"); + return Ok(ProofStatus::Valid); + } + + Ok(ProofStatus::Accepted) + } + + /// Promote buffered requests that can now be associated with the tree state. + /// + /// Returns the latest canonical head if it was updated. + fn promote_buffered_requests( + &mut self, + request_root: Hash256, + ) -> Result, ProofEngineStateError> { + let (block_hash, updated_head) = self.promote_buffered_request(request_root)?; + let mut latest_head = if updated_head { + Some(self.tree.current_canonical_head) + } else { + None + }; + + // Promote any child requests that can now be associated that have sufficient proofs. + let mut queue = vec![block_hash]; + while let Some(parent_hash) = queue.pop() { + let promotable_roots: Vec = self + .buffer + .proofs + .iter() + .filter(|(_, buffered)| { + buffered.metadata.parent_hash == parent_hash + && buffered.proofs.len() >= MIN_REQUIRED_EXECUTION_PROOFS + }) + .map(|(root, _)| *root) + .collect(); + + for request_root in promotable_roots { + let (block_hash, updated_head) = self.promote_buffered_request(request_root)?; + if updated_head { + latest_head = Some(self.tree.current_canonical_head); + } + queue.push(block_hash); + } + } + + Ok(latest_head) + } + + /// Promote a buffered request into the tree state. + /// + /// Returns the block hash and whether the canonical head was updated. + fn promote_buffered_request( + &mut self, + request_root: Hash256, + ) -> Result<(ExecutionBlockHash, bool), ProofEngineStateError> { + let buffered_request = self + .buffer + .proofs + .remove(&request_root) + .ok_or(ProofEngineStateError::BufferedRequestNotFound(request_root))?; + let RequestMetadata { + block_hash, + parent_hash, + .. + } = buffered_request.metadata; + + self.tree + .block_number_to_block_hash + .entry(buffered_request.metadata.block_number) + .or_default() + .insert(block_hash); + + self.tree + .parent_to_children + .entry(parent_hash) + .or_default() + .insert(block_hash); + self.tree + .proofs_by_block_hash + .insert(block_hash, buffered_request); + self.tree + .request_root_to_block_hash + .insert(request_root, block_hash); + + // If the promoted block is the parent of the current canonical head, update the canonical head to the promoted block. + if self.tree.current_canonical_head == parent_hash { + self.tree.current_canonical_head = block_hash; + return Ok((block_hash, true)); + } + + // If the promoted block is equal to the current canonical head, we return the block hash and return true to indicate the tree head has been updated. + if self.tree.current_canonical_head == block_hash { + return Ok((block_hash, true)); + } + + Ok((block_hash, false)) + } + + fn forkchoice_response_valid(&self) -> ForkchoiceUpdatedResponse { + ForkchoiceUpdatedResponse { + payload_status: PayloadStatusV1 { + status: PayloadStatusV1Status::Valid, + latest_valid_hash: self.tree.current_canonical_head.into(), + validation_error: None, + }, + payload_id: None, + } + } + + fn forkchoice_response_syncing(&self) -> ForkchoiceUpdatedResponse { + ForkchoiceUpdatedResponse { + payload_status: PayloadStatusV1 { + status: PayloadStatusV1Status::Syncing, + latest_valid_hash: None, + validation_error: None, + }, + payload_id: None, + } + } + + fn forkchoice_response_invalid(&self) -> ForkchoiceUpdatedResponse { + ForkchoiceUpdatedResponse { + payload_status: PayloadStatusV1 { + status: PayloadStatusV1Status::Invalid, + latest_valid_hash: self.tree.current_canonical_head.into(), + validation_error: Some("invalid forkchoice state".to_string()), + }, + payload_id: None, + } + } + + /// Check if a block can be promoted from buffer to tree. + /// + /// A block can be promoted if: + /// 1. Its parent is already in the tree (normal case), OR + /// 2. It's a finalized block: + /// - Block hash matches last_valid_fcs.finalized_block_hash + fn can_promote(&self, request: &Hash256) -> Result { + let request = self + .buffer + .proofs + .get(request) + .ok_or(ProofEngineStateError::BufferedRequestNotFound(*request))?; + + if request.proofs.len() < self.min_required_proofs { + return Ok(false); + } + + // Normal case: parent already in tree + if self + .tree + .proofs_by_block_hash + .contains_key(&request.metadata.parent_hash) + { + return Ok(true); + } + + // Bootstrap case: allow finalized block when starting empty tree + if request.metadata.block_hash == self.tree.current_canonical_head + || request.metadata.parent_hash == self.tree.current_canonical_head + { + tracing::debug!(target: "execution_layer", block_hash = ?request.metadata.block_hash, "Allowing promotion of finalized block during bootstrap"); + return Ok(true); + } + + Ok(false) + } + + /// Check if `target` is a descendant of `ancestor` in the tree. + fn is_descendant(&self, ancestor: ExecutionBlockHash, target: ExecutionBlockHash) -> bool { + let mut current = target; + + loop { + if current == ancestor { + return true; + } + + let Some(proofs) = self.tree.proofs_by_block_hash.get(¤t) else { + return false; + }; + + current = proofs.metadata.parent_hash; + } + } + + fn block_number_for_hash(&self, block_hash: ExecutionBlockHash) -> Option { + self.tree + .proofs_by_block_hash + .get(&block_hash) + .map(|p| p.metadata.block_number) + } + + // TODO: We should also prune buffered requests that are associated with sidechains that have been removed using parent to children mapping. + fn prune_finalized_sidechains( + &mut self, + finalized_hash: ExecutionBlockHash, + ) -> Result<(), ProofEngineStateError> { + // Get the finalized block number. + // TODO: Maybe this should just return SYNCING instead. + let finalized_number = self + .block_number_for_hash(finalized_hash) + .ok_or(ProofEngineStateError::BlockNumberNotFound(finalized_hash))?; + + // Remove buffered proofs below or at the finalized block number. + self.buffer.proofs.retain(|_root, entry| { + (entry.metadata.block_number > finalized_number) + || (entry.metadata.block_hash == finalized_hash) + }); + + // Remove all blocks with a block number below the finalized number. + let mut block_hashes_to_remove = self + .tree + .block_number_to_block_hash + .split_off(&finalized_number); + mem::swap( + &mut block_hashes_to_remove, + &mut self.tree.block_number_to_block_hash, + ); + + for hashes in block_hashes_to_remove.into_values().flatten() { + // Remove all block hash from state. We ignore returned children as they will have been + // removed in this loop already. Any children on sidechains with a higher block number will be + // removed in the next step. + let _ = self.remove_request(hashes)?; + } + + // Remove all block hashes at the finalized block number except the finalized hash. + let mut to_remove: Vec<_> = if let Some(hashes) = self + .tree + .block_number_to_block_hash + .get_mut(&finalized_number) + { + let mut to_remove = mem::replace(hashes, HashSet::from([finalized_hash])); + to_remove.remove(&finalized_hash); + to_remove.into_iter().collect() + } else { + return Ok(()); + }; + + // Recursively remove children of the removed block hashes. + while let Some(block_hash) = to_remove.pop() { + if let Some(children) = self.remove_request(block_hash)? { + to_remove.extend(children); + } + } + + Ok(()) + } + + /// Remove a request and its associated proofs from the tree state. + fn remove_request( + &mut self, + block_hash: ExecutionBlockHash, + ) -> Result>, ProofEngineStateError> { + // TODO: Update to proper error handling + let entry = self + .tree + .proofs_by_block_hash + .remove(&block_hash) + .ok_or(ProofEngineStateError::BlockNumberNotFound(block_hash))?; + self.tree + .request_root_to_block_hash + .remove(&entry.metadata.request_root); + let children = self.tree.parent_to_children.remove(&block_hash); + if let Entry::Occupied(mut occ) = self + .tree + .block_number_to_block_hash + .entry(entry.metadata.block_number) + { + occ.get_mut().remove(&block_hash); + if occ.get().is_empty() { + occ.remove(); + } + } + Ok(children) + } + + /// Create a new State with the specified minimum required proofs for promotion. + #[cfg(test)] + pub fn with_min_required_proofs(min_required_proofs: usize) -> Self { + Self { + latest_fcs: None, + last_valid_fcs: ForkchoiceState { + head_block_hash: ExecutionBlockHash::zero(), + safe_block_hash: ExecutionBlockHash::zero(), + finalized_block_hash: ExecutionBlockHash::zero(), + }, + tree: TreeState::default(), + buffer: RequestBuffer::new(), + min_required_proofs, + } + } +} + +/// Keeps track of execution proofs in a tree structure. +/// +/// - All proofs are associated with EL blocks connected to the current canonical chain. +#[derive(Debug, Default, Clone)] +pub struct TreeState { + /// Map of execution block hash to execution proofs. + pub proofs_by_block_hash: HashMap, + /// Map of new payload request root to execution block hash. + pub request_root_to_block_hash: HashMap, + /// Map of parent block hash to child block hashes. + pub parent_to_children: HashMap>, + /// Map of block number to block hashes at that height. + pub block_number_to_block_hash: BTreeMap>, + /// The current canonical head block hash. + pub current_canonical_head: ExecutionBlockHash, +} + +impl TreeState { + /// Check if the tree is empty (no blocks inserted yet) + pub fn is_empty(&self) -> bool { + self.proofs_by_block_hash.is_empty() + } +} + +/// A buffer of new payload requests and their associated execution proofs. +#[derive(Debug, Clone)] +pub struct RequestBuffer { + /// Map of new payload request root to execution proofs. + pub proofs: HashMap, +} + +impl RequestBuffer { + /// Insert a new payload request into the buffer. + /// + /// This will not overwrite existing requests. + pub fn insert(&mut self, request: RequestMetadata) { + self.proofs + .entry(request.request_root) + .or_insert_with(|| PayloadRequest::new(request)); + } +} + +#[derive(Debug, Clone)] +pub struct PayloadRequest { + /// The new payload request root associated with these proofs. + pub metadata: RequestMetadata, + /// Collection of signed execution proofs. + pub proofs: Vec, +} + +impl PayloadRequest { + pub fn new(metadata: RequestMetadata) -> Self { + Self { + metadata, + proofs: Vec::new(), + } + } +} + +#[derive(Debug, Default, Clone)] +pub struct RequestMetadata { + /// The new payload request root associated with the request. + pub request_root: Hash256, + /// The execution block hash associated with the new payload request. + pub block_hash: ExecutionBlockHash, + /// The parent block hash of the new payload request. + pub parent_hash: ExecutionBlockHash, + /// The block number of the new payload request. + pub block_number: u64, +} + +impl RequestBuffer { + /// Create a new ProofBuffer with the specified maximum size. + pub fn new() -> Self { + Self { + proofs: Default::default(), + } + } +} + +impl From<&NewPayloadRequest<'_, E>> for RequestMetadata { + fn from(request: &NewPayloadRequest<'_, E>) -> Self { + Self { + request_root: request.clone().tree_hash_root(), + block_hash: request.block_hash(), + parent_hash: request.parent_hash(), + block_number: request.block_number(), + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use bls::SignatureBytes; + use ssz_types::VariableList; + use types::{ExecutionProof, PublicInput}; + + fn test_hash(byte: u8) -> Hash256 { + Hash256::repeat_byte(byte) + } + + fn test_exec_hash(byte: u8) -> ExecutionBlockHash { + ExecutionBlockHash::repeat_byte(byte) + } + + fn create_request_metadata( + request_root: Hash256, + block_hash: ExecutionBlockHash, + parent_hash: ExecutionBlockHash, + block_number: u64, + ) -> RequestMetadata { + RequestMetadata { + request_root, + block_hash, + parent_hash, + block_number, + } + } + + fn create_signed_proof(request_root: Hash256, validator_index: u64) -> SignedExecutionProof { + SignedExecutionProof { + message: ExecutionProof { + proof_data: VariableList::new(vec![0xaa, 0xbb, 0xcc]).unwrap(), + proof_type: 1, + public_input: PublicInput { + new_payload_request_root: request_root, + }, + }, + validator_index, + signature: SignatureBytes::empty(), + } + } + + fn create_forkchoice_state( + head: ExecutionBlockHash, + safe: ExecutionBlockHash, + finalized: ExecutionBlockHash, + ) -> ForkchoiceState { + ForkchoiceState { + head_block_hash: head, + safe_block_hash: safe, + finalized_block_hash: finalized, + } + } + + /// Test data provider for state tests + /// + /// Generates payload requests, proofs, and hashes. + struct TestStateFixture { + /// Generated block data + /// blocks[0] = canonical chain + /// blocks[1] = fork 0 + /// blocks[2] = fork 1 + /// etc. + blocks: Vec>, + } + + impl TestStateFixture { + /// Get the genesis fcs + /// + /// Defined as the first block in the canonical chain + fn genesis_fcs(&self) -> ForkchoiceState { + let finalized_block = &self.blocks[0][0]; + create_forkchoice_state( + finalized_block.metadata.block_hash, + finalized_block.metadata.block_hash, + finalized_block.metadata.block_hash, + ) + } + + /// Get canonical chain block data + fn canonical(&self, index: usize) -> &PayloadRequest { + &self.blocks[0][index] + } + + /// Get fork block data + fn fork(&self, fork_id: usize, index: usize) -> &PayloadRequest { + &self.blocks[fork_id + 1][index] + } + + /// Get canonical block hash + fn canonical_block_hash(&self, index: usize) -> ExecutionBlockHash { + self.canonical(index).metadata.block_hash + } + + /// Get fork block hash + fn fork_block_hash(&self, fork_id: usize, index: usize) -> ExecutionBlockHash { + self.fork(fork_id, index).metadata.block_hash + } + + /// Get canonical request root + fn canonical_request_root(&self, index: usize) -> Hash256 { + self.canonical(index).metadata.request_root + } + + /// Get canonical metadata + fn canonical_metadata(&self, index: usize) -> RequestMetadata { + self.canonical(index).metadata.clone() + } + + /// Get fork metadata + fn fork_metadata(&self, fork_id: usize, index: usize) -> RequestMetadata { + self.fork(fork_id, index).metadata.clone() + } + + /// Get canonical proofs + fn canonical_proofs(&self, index: usize) -> &[SignedExecutionProof] { + &self.canonical(index).proofs + } + + /// Get fork proofs + fn fork_proofs(&self, fork_id: usize, index: usize) -> &[SignedExecutionProof] { + &self.fork(fork_id, index).proofs + } + + fn bootstrap_canonical(&self, state: &mut State) -> anyhow::Result<()> { + state.forkchoice_updated(self.genesis_fcs())?; + self.insert_canonical(state, None)?; + Ok(()) + } + + /// Insert the canonical chain into state (buffer + add proofs) + fn insert_canonical( + &self, + state: &mut State, + block_index: Option, + ) -> anyhow::Result<()> { + let range = match block_index { + Some(i) => i..=i, + None => 0..=self.blocks[0].len() - 1, + }; + for index in range { + state.buffer_request(self.canonical_metadata(index)); + for proof in self.canonical_proofs(index) { + let _ = state.insert_proof(proof.clone())?; + } + } + Ok(()) + } + + /// Insert a fork into state (buffer + add proofs) + fn insert_fork( + &self, + state: &mut State, + fork_id: usize, + block_index: Option, + ) -> anyhow::Result<()> { + let range = match block_index { + Some(i) => i..=i, + None => 0..=self.blocks[fork_id + 1].len() - 1, + }; + for index in range { + state.buffer_request(self.fork_metadata(fork_id, index)); + for proof in self.fork_proofs(fork_id, index) { + let _ = state.insert_proof(proof.clone())?; + } + } + + Ok(()) + } + } + + /// Builder for test state fixture + struct TestStateFixtureBuilder { + /// Number of blocks in canonical chain + canonical_chain_length: usize, + + /// Fork configurations (branch_point, fork_length, proofs_per_block) + forks: Vec<(usize, usize, Option)>, + + /// Default proofs per block + proofs_per_block: usize, + + /// Starting block number + starting_block_number: u64, + } + + impl TestStateFixtureBuilder { + /// Create new builder + fn new() -> Self { + Self { + canonical_chain_length: 0, + forks: Vec::new(), + proofs_per_block: MIN_REQUIRED_EXECUTION_PROOFS, + starting_block_number: 0, + } + } + + /// Create a simple chain with 3 blocks in the canonical chain + fn simple_chain() -> Self { + Self::new().with_canonical_chain(3) + } + + /// Set default proofs per block + fn with_proofs_per_block(mut self, proofs: usize) -> Self { + self.proofs_per_block = proofs; + self + } + + /// Set canonical chain length + fn with_canonical_chain(mut self, length: usize) -> Self { + self.canonical_chain_length = length; + self + } + + /// Add a fork (uses default proofs per block) + fn with_fork( + mut self, + branch_point: usize, + fork_length: usize, + proofs_per_block: Option, + ) -> Self { + self.forks + .push((branch_point, fork_length, proofs_per_block)); + self + } + + /// Build the fixture + fn build(self) -> TestStateFixture { + let mut fixture = TestStateFixture { + blocks: vec![Vec::new()], // Start with empty canonical chain + }; + + // Generate canonical chain (chain_id = 0) + for i in 0..self.canonical_chain_length { + let parent_hash = if i == 0 { + test_exec_hash(0xff) // Genesis parent + } else { + fixture.blocks[0][i - 1].metadata.block_hash + }; + + let block_number = self.starting_block_number + i as u64; + let block_data = self.generate_block( + 0, // chain_id + i, // block index within chain + parent_hash, + block_number, + self.proofs_per_block, + ); + + fixture.blocks[0].push(block_data); + } + + // Generate forks + for (fork_idx, (branch_point, fork_length, custom_proofs)) in + self.forks.iter().enumerate() + { + let proof_count = custom_proofs.unwrap_or(self.proofs_per_block); + let mut fork_blocks: Vec = Vec::new(); + + for i in 0..*fork_length { + let parent_hash = if i == 0 { + // First fork block connects to canonical chain + fixture.blocks[0][*branch_point].metadata.block_hash + } else { + // Subsequent blocks connect to previous fork block + fork_blocks[i - 1].metadata.block_hash + }; + + let block_number = + self.starting_block_number + *branch_point as u64 + i as u64 + 1; + + let block_data = self.generate_block( + fork_idx + 1, // chain_id (fork 0 = chain 1, fork 1 = chain 2, etc.) + i, + parent_hash, + block_number, + proof_count, + ); + + fork_blocks.push(block_data); + } + + fixture.blocks.push(fork_blocks); + } + + fixture + } + + /// Generate data for a single block + fn generate_block( + &self, + chain_id: usize, + block_index: usize, + parent_hash: ExecutionBlockHash, + block_number: u64, + proof_count: usize, + ) -> PayloadRequest { + // Create unique hashes based on chain_id and block_index + let hash_seed = (chain_id * 1000 + block_index) % 256; + let block_hash = test_exec_hash(hash_seed as u8); + let request_root = test_hash(((hash_seed + 0x10) % 256) as u8); + + let metadata = + create_request_metadata(request_root, block_hash, parent_hash, block_number); + + // Generate proofs + let mut proofs = Vec::new(); + for i in 0..proof_count { + proofs.push(create_signed_proof( + request_root, + request_root.0[0] as u64 + i as u64, + )); + } + + PayloadRequest { metadata, proofs } + } + } + + #[test] + fn test_buffer_request_new() { + let fixture = TestStateFixtureBuilder::new() + .with_canonical_chain(1) + .build(); + + let request = fixture.canonical(0); + + let mut state = State::new(); + state.buffer_request(request.metadata.clone()); + + assert_eq!( + state.buffer.proofs.len(), + 1, + "buffer should contain exactly one request" + ); + assert!( + state + .buffer + .proofs + .contains_key(&request.metadata.request_root), + "buffer should contain the request root" + ); + let buffered = state + .buffer + .proofs + .get(&request.metadata.request_root) + .expect("buffered request should exist"); + assert_eq!( + buffered.metadata.block_hash, request.metadata.block_hash, + "buffered request should have correct block hash" + ); + assert_eq!( + buffered.proofs.len(), + 0, + "newly buffered request should have no proofs" + ); + } + + #[test] + fn test_buffer_request_preserves_proofs_on_duplicate() -> anyhow::Result<()> { + let fixture = TestStateFixtureBuilder::new() + .with_proofs_per_block(4) + .with_canonical_chain(1) + .build(); + let mut state = State::with_min_required_proofs(3); + + // Buffer request + let request = fixture.canonical(0); + state.buffer_request(request.metadata.clone()); + + // Add multiple proofs + for i in 0..2 { + state.insert_proof(request.proofs[i].clone())?; + } + + // Verify proofs exist + let proofs_before = state + .buffer + .proofs + .get(&request.metadata.request_root) + .expect("request should be buffered") + .proofs + .len(); + assert_eq!( + proofs_before, 2, + "should have 2 proofs before re-buffer attempt" + ); + + // Attempt to buffer again + state.buffer_request(request.metadata.clone()); + + // Verify proofs preserved + assert_eq!( + state.buffer.proofs.len(), + 1, + "buffer should still contain exactly one request" + ); + let proofs_after = state + .buffer + .proofs + .get(&request.metadata.request_root) + .expect("request should still be buffered") + .proofs + .len(); + assert_eq!( + proofs_after, 2, + "all proofs should be preserved after duplicate buffer attempt" + ); + + Ok(()) + } + + #[test] + fn test_buffer_request_skips_if_promoted_exists() -> anyhow::Result<()> { + let fixture = TestStateFixtureBuilder::simple_chain().build(); + let mut state = State::new(); + fixture.bootstrap_canonical(&mut state)?; + + let request = fixture.canonical(2); + + // Assert promoted + assert!( + state + .tree + .proofs_by_block_hash + .contains_key(&request.metadata.block_hash), + "block should be promoted to tree" + ); + assert!( + !state + .buffer + .proofs + .contains_key(&request.metadata.request_root), + "block should be removed from buffer after promotion" + ); + + // Try buffer again + state.buffer_request(request.metadata.clone()); + + // Verify it stays in tree and is not re-added to buffer + assert!( + state + .tree + .proofs_by_block_hash + .contains_key(&request.metadata.block_hash), + "block should remain in tree" + ); + assert!( + !state + .buffer + .proofs + .contains_key(&request.metadata.request_root), + "block should not be added back to buffer" + ); + + Ok(()) + } + + #[test] + fn test_insert_proof_unknown_request_root() { + let fixture = TestStateFixtureBuilder::new() + .with_canonical_chain(1) + .build(); + let mut state = State::new(); + + let request = fixture.canonical(0); + let result = state.insert_proof(request.proofs[0].clone()); + + assert!( + result.is_err(), + "inserting proof for unknown request root should return error" + ); + match result { + Err(ProofEngineStateError::ProofRequestRootNotSeen(root)) => { + assert_eq!( + root, request.metadata.request_root, + "error should contain the unknown root" + ); + } + _ => panic!("expected ProofRequestRootNotSeen error"), + } + } + + #[test] + fn test_promotion() -> anyhow::Result<()> { + let fixture = TestStateFixtureBuilder::simple_chain() + .with_proofs_per_block(4) + .with_fork(1, 1, None) + .build(); + let mut state = State::with_min_required_proofs(4); + + let request = fixture.canonical(0); + state.forkchoice_updated(fixture.genesis_fcs())?; + state.buffer_request(request.metadata.clone()); + for i in 0..request.proofs.len() - 1 { + assert_eq!( + state + .insert_proof(request.proofs[i].clone()) + .expect("proof insertion should succeed"), + ProofStatus::Accepted, + "proof insertion should be accepted before reaching threshold" + ); + } + + // Verify no promotion yet + assert!( + state + .buffer + .proofs + .contains_key(&request.metadata.request_root), + "request should still be in buffer before reaching proof threshold" + ); + assert!( + !state + .tree + .proofs_by_block_hash + .contains_key(&request.metadata.block_hash), + "block should not be in tree before reaching proof threshold" + ); + + // Insert final proof to trigger promotion + assert_eq!( + state + .insert_proof(request.proofs[request.proofs.len() - 1].clone()) + .expect("proof insertion should succeed"), + ProofStatus::Valid + ); + + // Verify promotion occurred + assert!( + !state + .buffer + .proofs + .contains_key(&request.metadata.request_root), + "promoted request should be removed from buffer" + ); + assert!( + state + .tree + .proofs_by_block_hash + .contains_key(&request.metadata.block_hash), + "promoted request should be added to tree" + ); + assert!( + state + .tree + .request_root_to_block_hash + .contains_key(&request.metadata.request_root), + "request root mapping should be created" + ); + assert_eq!( + state.tree.current_canonical_head, request.metadata.block_hash, + "canonical head should be updated to child of previous head" + ); + + // Verify parent-child relationship + let children = state + .tree + .parent_to_children + .get(&request.metadata.parent_hash) + .expect("parent should have children"); + assert!( + children.contains(&request.metadata.block_hash), + "parent should reference child in parent_to_children map" + ); + + // Verify block number mapping + let blocks_at_height = state + .tree + .block_number_to_block_hash + .get(&0) + .expect("height 0 should exist"); + assert!( + blocks_at_height.contains(&request.metadata.block_hash), + "block should be in block_number_to_block_hash map" + ); + + // Now insert canonical block 2 with all proof - there should be no promotion yet as block 1 is not in the tree + fixture.insert_canonical(&mut state, Some(2))?; + + // Verify block 2 is still in buffer + let request2 = fixture.canonical(2); + assert!( + state + .buffer + .proofs + .contains_key(&request2.metadata.request_root), + "block 2 should remain in buffer as parent is not in tree" + ); + + // Now insert block 1 insert the buffer and this should cascade promote block 1 and block 2 and update the canonical head to block 2 + fixture.insert_canonical(&mut state, Some(1))?; + + // Verify block 1 promoted + let request1 = fixture.canonical(1); + assert!( + !state + .buffer + .proofs + .contains_key(&request1.metadata.request_root), + "block 1 should be promoted from buffer" + ); + assert!( + state + .tree + .proofs_by_block_hash + .contains_key(&request1.metadata.block_hash), + "block 1 should be in tree" + ); + + // Verify block 2 promoted + assert!( + !state + .buffer + .proofs + .contains_key(&request2.metadata.request_root), + "block 2 should be promoted from buffer" + ); + assert!( + state + .tree + .proofs_by_block_hash + .contains_key(&request2.metadata.block_hash), + "block 2 should be in tree" + ); + + // Verify canonical head updated to block 2 + assert_eq!( + state.tree.current_canonical_head, request2.metadata.block_hash, + "canonical head should be updated to block 2" + ); + + // Now lets insert the fork into the tree and assert its promoted but does not affect the canonical head + fixture.insert_fork(&mut state, 0, None)?; + + // Verify fork block promoted + let fork_request = fixture.fork(0, 0); + assert!( + !state + .buffer + .proofs + .contains_key(&fork_request.metadata.request_root), + "fork block should be promoted from buffer" + ); + assert!( + state + .tree + .proofs_by_block_hash + .contains_key(&fork_request.metadata.block_hash), + "fork block should be in tree" + ); + assert_eq!( + state.tree.current_canonical_head, request2.metadata.block_hash, + "canonical head should remain at block 2 after fork promotion" + ); + + Ok(()) + } + + #[test] + fn test_forkchoice_updated_head_not_in_tree() -> anyhow::Result<()> { + let mut state = State::new(); + let fixture = TestStateFixtureBuilder::simple_chain().build(); + + // Bootstrap and insert canonical chain + fixture.bootstrap_canonical(&mut state)?; + + // Update forkchoice with unknown head + let finalized_hash = fixture.canonical_block_hash(0); + let safe_hash = fixture.canonical_block_hash(0); + let unknown_head_hash = test_exec_hash(0xee); + let fcs = create_forkchoice_state(unknown_head_hash, safe_hash, finalized_hash); + + // Perform forkchoice update + let response = state.forkchoice_updated(fcs)?; + + assert_eq!( + response.payload_status.status, + PayloadStatusV1Status::Syncing, + "forkchoice update with unknown head should return SYNCING" + ); + + Ok(()) + } + + #[test] + fn test_forkchoice_invalid_ancestry_chain() -> anyhow::Result<()> { + let mut state = State::new(); + let fixture = TestStateFixtureBuilder::simple_chain() + .with_fork(1, 1, None) + .build(); + + // Bootstrap and insert canonical chain + fixture.bootstrap_canonical(&mut state)?; + + // Create a forkchoice state where the safe is not an ancestor of head and is not in the tree + let head_hash = fixture.canonical_block_hash(2); + let finalized_hash = fixture.canonical_block_hash(0); + let unknown_safe_hash = test_exec_hash(0xee); + let fcs = create_forkchoice_state(head_hash, unknown_safe_hash, finalized_hash); + + // Perform forkchoice update + let response = state.forkchoice_updated(fcs)?; + + // Verify INVALID response + assert_eq!( + response.payload_status.status, + PayloadStatusV1Status::Invalid, + "forkchoice update with invalid ancestry should return INVALID" + ); + + // Create a forkchoice state where the finalized is not an ancestor of safe and is not in the tree + let safe_hash = fixture.canonical_block_hash(1); + let unknown_finalized_hash = test_exec_hash(0xee); + let fcs = create_forkchoice_state(head_hash, safe_hash, unknown_finalized_hash); + + // Perform forkchoice update + let response = state.forkchoice_updated(fcs)?; + + // Verify INVALID response + assert_eq!( + response.payload_status.status, + PayloadStatusV1Status::Invalid, + "forkchoice update with invalid ancestry should return INVALID" + ); + + // Create a forkchoice state where safe is not an ancestor of head but is in the tree + let unknown_safe_hash = fixture.fork_block_hash(0, 0); + let fcs = create_forkchoice_state(head_hash, unknown_safe_hash, finalized_hash); + + // Perform forkchoice update + let response = state.forkchoice_updated(fcs)?; + + // Verify INVALID response + assert_eq!( + response.payload_status.status, + PayloadStatusV1Status::Invalid, + "forkchoice update with invalid ancestry should return INVALID" + ); + + Ok(()) + } + + #[test] + fn test_valid_forkchoice_update_with_new_fork_head() -> anyhow::Result<()> { + let fixture = TestStateFixtureBuilder::simple_chain() + .with_fork(1, 1, None) + .build(); + let mut state = State::new(); + + // Bootstrap and insert canonical chain + fixture.bootstrap_canonical(&mut state)?; + + // Extract canonical block hashes + let block_0_hash = fixture.canonical_block_hash(0); + let block_1_hash = fixture.canonical_block_hash(1); + let block_2_hash = fixture.canonical_block_hash(2); + + // Assert that the tree canonical head is block 2 + assert_eq!( + state.tree.current_canonical_head, block_2_hash, + "canonical head should be block 2" + ); + + // Create and update forkchoice state pointing to block 1 as head and block 0 as safe/finalized + let fcs = create_forkchoice_state(block_1_hash, block_0_hash, block_0_hash); + let response = state.forkchoice_updated(fcs)?; + + // Assert that the response is VALID and the canonical head remains at block 2 + assert_eq!( + response.payload_status.status, + PayloadStatusV1Status::Valid, + "forkchoice update should return VALID" + ); + assert_eq!( + state.tree.current_canonical_head, block_2_hash, + "canonical head should not change when updating to ancestor" + ); + + // Create and update forkchoice state pointing to block 2 as head and block 1 as safe and block 0 as finalized + let fcs = create_forkchoice_state(block_2_hash, block_1_hash, block_0_hash); + + // Perform forkchoice update + let response = state.forkchoice_updated(fcs).unwrap(); + + assert_eq!( + response.payload_status.status, + PayloadStatusV1Status::Valid, + "forkchoice update should return VALID" + ); + assert_eq!( + state.tree.current_canonical_head, block_2_hash, + "canonical head should not revert to ancestor" + ); + + // Insert the fork chain and update forkchoice to point to the fork head + fixture.insert_fork(&mut state, 0, None)?; + let fork_head_hash = fixture.fork_block_hash(0, 0); + let fcs = create_forkchoice_state(fork_head_hash, block_1_hash, block_0_hash); + + // Perform forkchoice update + let response = state.forkchoice_updated(fcs)?; + + // Verify VALID response and head updated to fork + assert_eq!( + response.payload_status.status, + PayloadStatusV1Status::Valid, + "forkchoice update to fork head should return VALID" + ); + assert_eq!( + state.tree.current_canonical_head, fork_head_hash, + "canonical head should be updated to fork head" + ); + + Ok(()) + } + + // TODO: We need to update this test when we update the prune logic for fork -> buffer mapping + #[test] + fn test_prune() -> anyhow::Result<()> { + let fixture = TestStateFixtureBuilder::simple_chain() + .with_proofs_per_block(4) + .with_fork(0, 4, None) + .with_fork(0, 4, Some(1)) + .build(); + let mut state = State::with_min_required_proofs(4); + // Bootstrap with canonical chain + fixture.bootstrap_canonical(&mut state)?; + + // Insert fork chain which should also insert the fork block into the tree + fixture.insert_fork(&mut state, 0, None)?; + + // Insert another fork with only 1 proof to ensure it is not promoted to the tree + // TODO: When logic is added to prune buffer properly then add this. + + // Assert tree contains expected blocks + assert_eq!( + state.tree.proofs_by_block_hash.len(), + 7, + "tree should contain 7 blocks before pruning" + ); + + // Issue forkchoice update that will prune the sidechain from the tree. + let finalized_hash = fixture.canonical_block_hash(1); + let safe_hash = finalized_hash; + let head_hash = fixture.canonical_block_hash(2); + let fcs = create_forkchoice_state(head_hash, safe_hash, finalized_hash); + + // Perform forkchoice update + let response = state.forkchoice_updated(fcs)?; + + // Assert the response is VALID + assert_eq!( + response.payload_status.status, + PayloadStatusV1Status::Valid, + "forkchoice update should return VALID" + ); + + // Assert that the fork chain has been pruned from the tree as has the canonical block 0 but the canonical blocks 1 and 2 remain + assert_eq!( + state.tree.proofs_by_block_hash.len(), + 2, + "tree should contain 2 blocks after pruning" + ); + + Ok(()) + } + + #[test] + fn test_get_proofs_from_tree() -> anyhow::Result<()> { + let fixture = TestStateFixtureBuilder::simple_chain().build(); + let mut state = State::new(); + + // Bootstrap and insert canonical chain + fixture.bootstrap_canonical(&mut state)?; + + // Retrieve proofs for genesis request root + let genesis_request = fixture.canonical(0); + let proofs = state.get_proofs(&genesis_request.metadata.request_root); + + assert!(proofs.is_some(), "should retrieve proofs from tree"); + assert_eq!( + proofs.unwrap().len(), + MIN_REQUIRED_EXECUTION_PROOFS, + "should retrieve all proofs from tree" + ); + + Ok(()) + } + + #[test] + fn test_get_proofs_from_buffer() -> anyhow::Result<()> { + let fixture = TestStateFixtureBuilder::simple_chain() + .with_fork(0, 1, Some(1)) + .build(); + let mut state = State::new(); + + // Bootstrap and insert canonical chain + fixture.bootstrap_canonical(&mut state)?; + + // Insert fork into state (this will be buffered only) + fixture.insert_fork(&mut state, 0, None)?; + + // Retrieve proofs for fork request root + let fork_request = fixture.fork(0, 0); + let proofs = state.get_proofs(&fork_request.metadata.request_root); + + assert!(proofs.is_some(), "should retrieve proofs from buffer"); + assert_eq!( + proofs.unwrap().len(), + 1, + "should retrieve all proofs from buffer" + ); + + Ok(()) + } + + #[test] + fn test_get_proofs_empty_list() { + let fixture = TestStateFixtureBuilder::simple_chain().build(); + let mut state = State::new(); + + // Insert a request into the buffer with no proofs + let request = fixture.canonical(0); + state.buffer_request(request.metadata.clone()); + + // Retrieve proofs for the request root + let proofs = state.get_proofs(&request.metadata.request_root); + + // The request exists in the buffer but has no proofs, so it should return None + assert!( + proofs.is_none(), + "should return None for known request with no proofs" + ); + } + + #[test] + fn test_tree_state_consistency_after_promotion() -> anyhow::Result<()> { + let fixture = TestStateFixtureBuilder::simple_chain().build(); + let mut state = State::new(); + + // Bootstrap and insert canonical chain + fixture.bootstrap_canonical(&mut state).unwrap(); + + // Extract block hashes and request roots for all blocks in the canonical chain + let genesis_hash = fixture.canonical_block_hash(0); + let block1_hash = fixture.canonical_block_hash(1); + let block2_hash = fixture.canonical_block_hash(2); + + let genesis_root = fixture.canonical_request_root(0); + let block1_root = fixture.canonical_request_root(1); + let block2_root = fixture.canonical_request_root(2); + + // Verify all tree mappings are consistent + + // proofs_by_block_hash + assert_eq!( + state.tree.proofs_by_block_hash.len(), + 3, + "tree should contain exactly 3 blocks" + ); + + // request_root_to_block_hash + assert_eq!( + state.tree.request_root_to_block_hash.len(), + 3, + "request_root_to_block_hash should have 3 entries" + ); + assert_eq!( + state + .tree + .request_root_to_block_hash + .get(&genesis_root) + .copied(), + Some(genesis_hash), + "genesis root should map to genesis hash" + ); + assert_eq!( + state + .tree + .request_root_to_block_hash + .get(&block1_root) + .copied(), + Some(block1_hash), + "block1 root should map to block1 hash" + ); + assert_eq!( + state + .tree + .request_root_to_block_hash + .get(&block2_root) + .copied(), + Some(block2_hash), + "block2 root should map to block2 hash" + ); + + // parent_to_children + let genesis_parent = test_exec_hash(0xff); + let genesis_parent_children = state + .tree + .parent_to_children + .get(&genesis_parent) + .expect("genesis parent should have children"); + assert!( + genesis_parent_children.contains(&genesis_hash), + "genesis parent should reference genesis" + ); + + let genesis_children = state + .tree + .parent_to_children + .get(&genesis_hash) + .expect("genesis should have children"); + assert!( + genesis_children.contains(&block1_hash), + "genesis should reference block1" + ); + + let block1_children = state + .tree + .parent_to_children + .get(&block1_hash) + .expect("block1 should have children"); + assert!( + block1_children.contains(&block2_hash), + "block1 should reference block2" + ); + + // block_number_to_block_hash + assert!( + state + .tree + .block_number_to_block_hash + .get(&0) + .unwrap() + .contains(&genesis_hash), + "genesis should be at height 0" + ); + assert!( + state + .tree + .block_number_to_block_hash + .get(&1) + .unwrap() + .contains(&block1_hash), + "block1 should be at height 1" + ); + assert!( + state + .tree + .block_number_to_block_hash + .get(&2) + .unwrap() + .contains(&block2_hash), + "block2 should be at height 2" + ); + + Ok(()) + } +} diff --git a/beacon_node/execution_layer/src/engine_api.rs b/beacon_node/execution_layer/src/engine_api.rs index 32090bccfc9..0424530316f 100644 --- a/beacon_node/execution_layer/src/engine_api.rs +++ b/beacon_node/execution_layer/src/engine_api.rs @@ -61,7 +61,7 @@ pub enum Error { ExecutionHeadBlockNotFound, ParentHashEqualsBlockHash(ExecutionBlockHash), PayloadIdUnavailable, - SszError(ssz_types::Error), + Ssz(ssz_types::Error), DeserializeWithdrawals(ssz_types::Error), DeserializeDepositRequests(ssz_types::Error), DeserializeWithdrawalRequests(ssz_types::Error), @@ -106,7 +106,7 @@ impl From for Error { impl From for Error { fn from(e: ssz_types::Error) -> Self { - Error::SszError(e) + Error::Ssz(e) } } @@ -120,6 +120,12 @@ pub enum PayloadStatusV1Status { InvalidBlockHash, } +impl PayloadStatusV1Status { + pub fn is_syncing(&self) -> bool { + matches!(self, PayloadStatusV1Status::Syncing) + } +} + #[derive(Clone, Debug, PartialEq)] pub struct PayloadStatusV1 { pub status: PayloadStatusV1Status, @@ -250,6 +256,21 @@ impl From for SsePayloadAttributes { } } +/// Info about a buffered proof request that is missing sufficient proofs. +/// +/// The `root` field is dual-purpose: +/// - At the execution-layer level it holds the **new-payload request root**. +/// - After `BeaconChain::missing_execution_proofs()` performs the store LRU lookup it is +/// replaced with the corresponding **beacon block root** so the sync layer can issue +/// `ExecutionProofsByRoot` RPC requests directly. +#[derive(Clone, Debug, Default, PartialEq)] +pub struct MissingProofInfo { + /// New-payload request root (EL) or beacon block root (sync layer). + pub root: Hash256, + /// Proof types already received for this request root (to avoid redundant requests). + pub existing_proof_types: Vec, +} + #[derive(Clone, Debug, PartialEq)] pub struct ForkchoiceUpdatedResponse { pub payload_status: PayloadStatusV1, diff --git a/beacon_node/execution_layer/src/engine_api/new_payload_request.rs b/beacon_node/execution_layer/src/engine_api/new_payload_request.rs index ba94296b859..4334a99ce8c 100644 --- a/beacon_node/execution_layer/src/engine_api/new_payload_request.rs +++ b/beacon_node/execution_layer/src/engine_api/new_payload_request.rs @@ -1,8 +1,10 @@ use crate::{Error, block_hash::calculate_execution_block_hash, metrics}; use crate::versioned_hashes::verify_versioned_hashes; +use ssz_types::VariableList; use state_processing::per_block_processing::deneb::kzg_commitment_to_versioned_hash; use superstruct::superstruct; +use tree_hash_derive::TreeHash; use types::{ BeaconBlockRef, BeaconStateError, EthSpec, ExecutionBlockHash, ExecutionPayload, ExecutionPayloadRef, Hash256, VersionedHash, @@ -14,7 +16,7 @@ use types::{ #[superstruct( variants(Bellatrix, Capella, Deneb, Electra, Fulu, Gloas), - variant_attributes(derive(Clone, Debug, PartialEq),), + variant_attributes(derive(Clone, Debug, PartialEq, TreeHash),), map_into(ExecutionPayload), map_ref_into(ExecutionPayloadRef), cast_error( @@ -26,7 +28,8 @@ use types::{ expr = "BeaconStateError::IncorrectStateVariant" ) )] -#[derive(Clone, Debug, PartialEq)] +#[derive(Clone, Debug, PartialEq, TreeHash)] +#[tree_hash(enum_behaviour = "transparent")] pub struct NewPayloadRequest<'block, E: EthSpec> { #[superstruct( only(Bellatrix), @@ -44,7 +47,7 @@ pub struct NewPayloadRequest<'block, E: EthSpec> { #[superstruct(only(Gloas), partial_getter(rename = "execution_payload_gloas"))] pub execution_payload: &'block ExecutionPayloadGloas, #[superstruct(only(Deneb, Electra, Fulu, Gloas))] - pub versioned_hashes: Vec, + pub versioned_hashes: VariableList, #[superstruct(only(Deneb, Electra, Fulu, Gloas))] pub parent_beacon_block_root: Hash256, #[superstruct(only(Electra, Fulu, Gloas))] @@ -196,7 +199,8 @@ impl<'a, E: EthSpec> TryFrom> for NewPayloadRequest<'a, E> .blob_kzg_commitments .iter() .map(kzg_commitment_to_versioned_hash) - .collect(), + .collect::>() + .try_into()?, parent_beacon_block_root: block_ref.parent_root, })), BeaconBlockRef::Electra(block_ref) => Ok(Self::Electra(NewPayloadRequestElectra { @@ -206,7 +210,8 @@ impl<'a, E: EthSpec> TryFrom> for NewPayloadRequest<'a, E> .blob_kzg_commitments .iter() .map(kzg_commitment_to_versioned_hash) - .collect(), + .collect::>() + .try_into()?, parent_beacon_block_root: block_ref.parent_root, execution_requests: &block_ref.body.execution_requests, })), @@ -217,7 +222,8 @@ impl<'a, E: EthSpec> TryFrom> for NewPayloadRequest<'a, E> .blob_kzg_commitments .iter() .map(kzg_commitment_to_versioned_hash) - .collect(), + .collect::>() + .try_into()?, parent_beacon_block_root: block_ref.parent_root, execution_requests: &block_ref.body.execution_requests, })), diff --git a/beacon_node/execution_layer/src/lib.rs b/beacon_node/execution_layer/src/lib.rs index 33b83aab09f..85f1f4b06ee 100644 --- a/beacon_node/execution_layer/src/lib.rs +++ b/beacon_node/execution_layer/src/lib.rs @@ -4,6 +4,7 @@ //! This crate only provides useful functionality for "The Merge", it does not provide any of the //! deposit-contract functionality that the `beacon_node/eth1` crate already provides. +use crate::eip8025::proof_engine::ProofEngine; use crate::json_structures::{BlobAndProofV1, BlobAndProofV2}; use crate::payload_cache::PayloadCache; use arc_swap::ArcSwapOption; @@ -60,6 +61,7 @@ use types::{ }; mod block_hash; +pub mod eip8025; mod engine_api; pub mod engines; mod keccak; @@ -142,6 +144,10 @@ impl TryFrom> for ProvenancedPayload for Error { } } +impl From for Error { + fn from(e: eip8025::errors::ProofEngineError) -> Self { + Error::ProofEngineError(e) + } +} + pub enum BlockProposalContentsType { Full(BlockProposalContents>), Blinded(BlockProposalContents>), @@ -426,7 +438,8 @@ pub enum SubmitBlindedBlockResponse { type PayloadContentsRefTuple<'a, E> = (ExecutionPayloadRef<'a, E>, Option<&'a BlobsBundle>); struct Inner { - engine: Arc, + /// Traditional execution engine (optional). + engine: Option>, builder: ArcSwapOption, execution_engine_forkchoice_lock: Mutex<()>, suggested_fee_recipient: Option
, @@ -440,12 +453,16 @@ struct Inner { /// This is used *only* in the informational sync status endpoint, so that a VC using this /// node can prefer another node with a healthier EL. last_new_payload_errored: RwLock, + /// EIP-8025: Optional execution proof engine. + proof_engine: Option>, } #[derive(Debug, Default, Clone, Serialize, Deserialize)] pub struct Config { - /// Endpoint url for EL nodes that are running the engine api. + /// Endpoint url for EL nodes that are running the engine api (optional). pub execution_endpoint: Option, + /// Endpoint url for EIP-8025 proof engine (optional). + pub proof_engine_endpoint: Option, /// Endpoint urls for services providing the builder api. pub builder_url: Option, /// The timeout value used when making a request to fetch a block header @@ -480,7 +497,8 @@ impl ExecutionLayer { /// Instantiate `Self` with an Execution engine specified in `Config`, using JSON-RPC via HTTP. pub fn from_config(config: Config, executor: TaskExecutor) -> Result { let Config { - execution_endpoint: url, + execution_endpoint, + proof_engine_endpoint, builder_url, builder_user_agent, builder_header_timeout, @@ -493,50 +511,66 @@ impl ExecutionLayer { execution_timeout_multiplier, } = config; - let execution_url = url.ok_or(Error::NoEngine)?; - - // Use the default jwt secret path if not provided via cli. - let secret_file = secret_file.unwrap_or_else(|| default_datadir.join(DEFAULT_JWT_FILE)); - - let jwt_key = if secret_file.exists() { - // Read secret from file if it already exists - std::fs::read_to_string(&secret_file) - .map_err(|e| format!("Failed to read JWT secret file. Error: {:?}", e)) - .and_then(|ref s| { - let secret = JwtKey::from_slice( - &hex::decode(strip_prefix(s.trim_end())) - .map_err(|e| format!("Invalid hex string: {:?}", e))?, - )?; - Ok(secret) - }) - .map_err(Error::InvalidJWTSecret) - } else { - // Create a new file and write a randomly generated secret to it if file does not exist - warn!(path = %secret_file.display(),"No JWT found on disk. Generating"); - std::fs::File::options() - .write(true) - .create_new(true) - .open(&secret_file) - .map_err(|e| format!("Failed to open JWT secret file. Error: {:?}", e)) - .and_then(|mut f| { - let secret = auth::JwtKey::random(); - f.write_all(secret.hex_string().as_bytes()) - .map_err(|e| format!("Failed to write to JWT secret file: {:?}", e))?; - Ok(secret) - }) - .map_err(Error::InvalidJWTSecret) - }?; + // Validation: at least one endpoint must be provided + if execution_endpoint.is_none() && proof_engine_endpoint.is_none() { + return Err(Error::NoExecutionEndpoint); + } + + // Create Engine if execution_endpoint is provided + let engine: Option> = if let Some(execution_url) = execution_endpoint { + // Use the default jwt secret path if not provided via cli. + let secret_file = secret_file.unwrap_or_else(|| default_datadir.join(DEFAULT_JWT_FILE)); + + let jwt_key = if secret_file.exists() { + // Read secret from file if it already exists + std::fs::read_to_string(&secret_file) + .map_err(|e| format!("Failed to read JWT secret file. Error: {:?}", e)) + .and_then(|ref s| { + let secret = JwtKey::from_slice( + &hex::decode(strip_prefix(s.trim_end())) + .map_err(|e| format!("Invalid hex string: {:?}", e))?, + )?; + Ok(secret) + }) + .map_err(Error::InvalidJWTSecret) + } else { + // Create a new file and write a randomly generated secret to it if file does not exist + warn!(path = %secret_file.display(),"No JWT found on disk. Generating"); + std::fs::File::options() + .write(true) + .create_new(true) + .open(&secret_file) + .map_err(|e| format!("Failed to open JWT secret file. Error: {:?}", e)) + .and_then(|mut f| { + let secret = auth::JwtKey::random(); + f.write_all(secret.hex_string().as_bytes()) + .map_err(|e| format!("Failed to write to JWT secret file: {:?}", e))?; + Ok(secret) + }) + .map_err(Error::InvalidJWTSecret) + }?; - let engine: Engine = { let auth = Auth::new(jwt_key, jwt_id, jwt_version); debug!(endpoint = %execution_url, jwt_path = ?secret_file.as_path(),"Loaded execution endpoint"); let api = HttpJsonRpc::new_with_auth(execution_url, auth, execution_timeout_multiplier) .map_err(Error::ApiError)?; - Engine::new(api, executor.clone()) + + Some(Arc::new(Engine::new(api, executor.clone()))) + } else { + None }; + // Create ProofEngine if proof_engine_endpoint is provided + let proof_engine: Option> = + if let Some(proof_url) = proof_engine_endpoint { + debug!(endpoint = %proof_url, "Loaded proof engine endpoint"); + Some(Arc::new(eip8025::HttpProofEngine::new(proof_url, None))) + } else { + None + }; + let inner = Inner { - engine: Arc::new(engine), + engine, builder: ArcSwapOption::empty(), execution_engine_forkchoice_lock: <_>::default(), suggested_fee_recipient, @@ -546,6 +580,7 @@ impl ExecutionLayer { executor, payload_cache: PayloadCache::default(), last_new_payload_errored: RwLock::new(false), + proof_engine, }; let el = Self { @@ -564,8 +599,12 @@ impl ExecutionLayer { Ok(el) } - fn engine(&self) -> &Arc { - &self.inner.engine + pub fn engine(&self) -> Option<&Arc> { + self.inner.engine.as_ref() + } + + pub fn proof_engine(&self) -> Option> { + self.inner.proof_engine.clone() } pub fn builder(&self) -> Option> { @@ -626,14 +665,17 @@ impl ExecutionLayer { /// Get the current difficulty of the PoW chain. pub async fn get_current_difficulty(&self) -> Result, ApiError> { - let block = self - .engine() - .api - .get_block_by_number(BlockByNumberQuery::Tag(LATEST_TAG)) - .await? - .ok_or(ApiError::ExecutionHeadBlockNotFound)?; - Ok(block.total_difficulty) + if let Some(engine) = self.engine() { + engine + .api + .get_block_by_number(BlockByNumberQuery::Tag(LATEST_TAG)) + .await + .map(|opt| opt.and_then(|block| block.total_difficulty)) + } else { + Ok(None) + } } + /// Note: this function returns a mutex guard, be careful to avoid deadlocks. async fn execution_blocks( &self, @@ -644,8 +686,13 @@ impl ExecutionLayer { /// Gives access to a channel containing if the last engine state is online or not. /// /// This can be called several times. - pub async fn get_responsiveness_watch(&self) -> WatchStream { - self.engine().watch_state().await + /// Returns None if no engine is configured. + pub async fn get_responsiveness_watch(&self) -> Option> { + if let Some(engine) = self.engine() { + Some(engine.watch_state().await) + } else { + None + } } /// Note: this function returns a mutex guard, be careful to avoid deadlocks. @@ -674,6 +721,11 @@ impl ExecutionLayer { /// Spawns a routine which attempts to keep the execution engine online. pub fn spawn_watchdog_routine(&self, slot_clock: S) { + // If there is no engine, there is no need to spawn the watchdog routine. + if self.engine().is_none() { + return; + } + let watchdog = |el: ExecutionLayer| async move { // Run one task immediately. el.watchdog_task().await; @@ -693,7 +745,9 @@ impl ExecutionLayer { /// Performs a single execution of the watchdog routine. pub async fn watchdog_task(&self) { - self.engine().upcheck().await; + if let Some(engine) = self.engine() { + engine.upcheck().await; + } } /// Spawns a routine which cleans the cached proposer data periodically. @@ -736,7 +790,11 @@ impl ExecutionLayer { /// Returns `true` if the execution engine is synced and reachable. pub async fn is_synced(&self) -> bool { - self.engine().is_synced().await + if let Some(engine) = self.engine() { + engine.is_synced().await + } else { + true + } } /// Execution nodes return a "SYNCED" response when they do not have any peers. @@ -747,12 +805,16 @@ impl ExecutionLayer { /// Returns the `Self::is_synced` response if unable to get latest block. pub async fn is_synced_for_notifier(&self, current_slot: Slot) -> bool { let synced = self.is_synced().await; - if synced - && let Ok(Some(block)) = self - .engine() + let block = if let Some(engine) = self.engine() { + engine .api .get_block_by_number(BlockByNumberQuery::Tag(LATEST_TAG)) .await + } else { + Ok(None) + }; + if synced + && let Ok(Some(block)) = block && block.block_number == 0 && current_slot > 0 { @@ -768,7 +830,12 @@ impl ExecutionLayer { /// be used to give an indication on the HTTP API that the node's execution layer is struggling, /// which can in turn be used by the VC. pub async fn is_offline_or_erroring(&self) -> bool { - self.engine().is_offline().await || *self.inner.last_new_payload_errored.read().await + let engine_offline = if let Some(engine) = self.engine() { + engine.is_offline().await + } else { + false + }; + engine_offline || *self.inner.last_new_payload_errored.read().await } /// Updates the proposer preparation data provided by validators @@ -1258,7 +1325,9 @@ impl ExecutionLayer { .. } = payload_parameters; - self.engine() + let engine = self.engine().ok_or(Error::NoEngine)?; + + engine .request(move |engine| async move { let payload_id = if let Some(id) = engine .get_payload_id(&parent_hash, payload_attributes) @@ -1376,10 +1445,25 @@ impl ExecutionLayer { let block_hash = new_payload_request.block_hash(); let parent_hash = new_payload_request.parent_hash(); - let result = self - .engine() - .request(|engine| engine.api.new_payload(new_payload_request)) - .await; + let engine_result = if let Some(engine) = self.engine() { + Some( + engine + .request(|engine| engine.api.new_payload(new_payload_request.clone())) + .await, + ) + } else { + None + }; + + let proof_engine_result = if let Some(proof_engine) = self.proof_engine() { + Some(Ok(proof_engine.new_payload(&new_payload_request).await?)) + } else { + None + }; + + let result = engine_result + .or(proof_engine_result) + .expect("at least one of engine or proof engine must be present"); if let Ok(status) = &result { let status_str = <&'static str>::from(status.status); @@ -1405,7 +1489,9 @@ impl ExecutionLayer { /// Update engine sync status. pub async fn upcheck(&self) { - self.engine().upcheck().await; + if let Some(engine) = self.engine() { + engine.upcheck().await; + } } /// Register that the given `validator_index` is going to produce a block at `slot`. @@ -1512,18 +1598,33 @@ impl ExecutionLayer { finalized_block_hash, }; - self.engine() - .set_latest_forkchoice_state(forkchoice_state) - .await; + let engine_result = if let Some(engine) = self.engine() { + engine.set_latest_forkchoice_state(forkchoice_state).await; - let result = self - .engine() - .request(|engine| async move { + Some( engine - .notify_forkchoice_updated(forkchoice_state, payload_attributes) - .await - }) - .await; + .request(|engine| async move { + engine + .notify_forkchoice_updated(forkchoice_state, payload_attributes) + .await + }) + .await, + ) + } else { + None + }; + + let proof_engine_result = if let Some(proof_engine) = self.proof_engine() { + Some(Ok(proof_engine + .forkchoice_updated(forkchoice_state) + .await?)) + } else { + None + }; + + let result = engine_result + .or(proof_engine_result) + .expect("at least one of engine or proof engine must be present"); if let Ok(status) = &result { metrics::inc_counter_vec( @@ -1553,10 +1654,31 @@ impl ExecutionLayer { &self, age_limit: Option, ) -> Result { - self.engine() - .request(|engine| engine.get_engine_capabilities(age_limit)) - .await - .map_err(Into::into) + if let Some(engine) = self.engine() { + Ok(engine + .request(|engine| engine.get_engine_capabilities(age_limit)) + .await?) + } else { + Ok(EngineCapabilities { + new_payload_v1: true, + new_payload_v2: true, + new_payload_v3: true, + new_payload_v4: true, + forkchoice_updated_v1: true, + forkchoice_updated_v2: true, + forkchoice_updated_v3: true, + get_payload_bodies_by_hash_v1: false, + get_payload_bodies_by_range_v1: false, + get_payload_v1: true, + get_payload_v2: true, + get_payload_v3: true, + get_payload_v4: true, + get_payload_v5: true, + get_client_version_v1: false, + get_blobs_v1: false, + get_blobs_v2: false, + }) + } } /// Returns the execution engine version resulting from a call to @@ -1572,14 +1694,16 @@ impl ExecutionLayer { &self, age_limit: Option, ) -> Result, Error> { - let versions = self - .engine() - .request(|engine| engine.get_engine_version(age_limit)) - .await - .map_err(Into::::into)?; - metrics::expose_execution_layer_info(&versions); - - Ok(versions) + if let Some(engine) = self.engine() { + let versions = engine + .request(|engine| engine.get_engine_version(age_limit)) + .await + .map_err(Into::::into)?; + metrics::expose_execution_layer_info(&versions); + Ok(versions) + } else { + Ok(vec![]) + } } /// Used during block production to determine if the merge has been triggered. @@ -1599,39 +1723,42 @@ impl ExecutionLayer { &[metrics::GET_TERMINAL_POW_BLOCK_HASH], ); - let hash_opt = self - .engine() - .request(|engine| async move { - let terminal_block_hash = spec.terminal_block_hash; - if terminal_block_hash != ExecutionBlockHash::zero() { - if self - .get_pow_block(engine, terminal_block_hash) - .await? - .is_some() - { - return Ok(Some(terminal_block_hash)); - } else { - return Ok(None); + let hash_opt = if let Some(engine) = self.engine() { + engine + .request(|engine| async move { + let terminal_block_hash = spec.terminal_block_hash; + if terminal_block_hash != ExecutionBlockHash::zero() { + if self + .get_pow_block(engine, terminal_block_hash) + .await? + .is_some() + { + return Ok(Some(terminal_block_hash)); + } else { + return Ok(None); + } } - } - let block = self.get_pow_block_at_total_difficulty(engine, spec).await?; - if let Some(pow_block) = block { - // If `terminal_block.timestamp == transition_block.timestamp`, - // we violate the invariant that a block's timestamp must be - // strictly greater than its parent's timestamp. - // The execution layer will reject a fcu call with such payload - // attributes leading to a missed block. - // Hence, we return `None` in such a case. - if pow_block.timestamp >= timestamp { - return Ok(None); + let block = self.get_pow_block_at_total_difficulty(engine, spec).await?; + if let Some(pow_block) = block { + // If `terminal_block.timestamp == transition_block.timestamp`, + // we violate the invariant that a block's timestamp must be + // strictly greater than its parent's timestamp. + // The execution layer will reject a fcu call with such payload + // attributes leading to a missed block. + // Hence, we return `None` in such a case. + if pow_block.timestamp >= timestamp { + return Ok(None); + } } - } - Ok(block.map(|b| b.block_hash)) - }) - .await - .map_err(Box::new) - .map_err(Error::EngineError)?; + Ok(block.map(|b| b.block_hash)) + }) + .await + .map_err(Box::new) + .map_err(Error::EngineError)? + } else { + None + }; if let Some(hash) = &hash_opt { info!( @@ -1728,21 +1855,25 @@ impl ExecutionLayer { &[metrics::IS_VALID_TERMINAL_POW_BLOCK_HASH], ); - self.engine() - .request(|engine| async move { - if let Some(pow_block) = self.get_pow_block(engine, block_hash).await? - && let Some(pow_parent) = - self.get_pow_block(engine, pow_block.parent_hash).await? - { - return Ok(Some( - self.is_valid_terminal_pow_block(pow_block, pow_parent, spec), - )); - } - Ok(None) - }) - .await - .map_err(Box::new) - .map_err(Error::EngineError) + if let Some(engine) = self.engine() { + engine + .request(|engine| async move { + if let Some(pow_block) = self.get_pow_block(engine, block_hash).await? + && let Some(pow_parent) = + self.get_pow_block(engine, pow_block.parent_hash).await? + { + return Ok(Some( + self.is_valid_terminal_pow_block(pow_block, pow_parent, spec), + )); + } + Ok(None) + }) + .await + .map_err(Box::new) + .map_err(Error::EngineError) + } else { + Ok(None) + } } /// This function should remain internal. @@ -1788,13 +1919,17 @@ impl ExecutionLayer { &self, hashes: Vec, ) -> Result>>, Error> { - self.engine() - .request(|engine: &Engine| async move { - engine.api.get_payload_bodies_by_hash_v1(hashes).await - }) - .await - .map_err(Box::new) - .map_err(Error::EngineError) + if let Some(engine) = self.engine() { + engine + .request(|engine: &Engine| async move { + engine.api.get_payload_bodies_by_hash_v1(hashes).await + }) + .await + .map_err(Box::new) + .map_err(Error::EngineError) + } else { + Ok(vec![None; hashes.len()]) + } } pub async fn get_payload_bodies_by_range( @@ -1803,16 +1938,20 @@ impl ExecutionLayer { count: u64, ) -> Result>>, Error> { let _timer = metrics::start_timer(&metrics::EXECUTION_LAYER_GET_PAYLOAD_BODIES_BY_RANGE); - self.engine() - .request(|engine: &Engine| async move { - engine - .api - .get_payload_bodies_by_range_v1(start, count) - .await - }) - .await - .map_err(Box::new) - .map_err(Error::EngineError) + if let Some(engine) = self.engine() { + engine + .request(|engine: &Engine| async move { + engine + .api + .get_payload_bodies_by_range_v1(start, count) + .await + }) + .await + .map_err(Box::new) + .map_err(Error::EngineError) + } else { + Ok(vec![None; count as usize]) + } } /// Fetch a full payload from the execution node. @@ -1872,6 +2011,7 @@ impl ExecutionLayer { if capabilities.get_blobs_v1 { self.engine() + .expect("capabilities only returns get_blobs_v1=true if engine is present") .request(|engine| async move { engine.api.get_blobs_v1(query).await }) .await .map_err(Box::new) @@ -1889,6 +2029,7 @@ impl ExecutionLayer { if capabilities.get_blobs_v2 { self.engine() + .expect("capabilities only returns get_blobs_v2=true if engine is present") .request(|engine| async move { engine.api.get_blobs_v2(query).await }) .await .map_err(Box::new) @@ -1902,11 +2043,15 @@ impl ExecutionLayer { &self, query: BlockByNumberQuery<'_>, ) -> Result, Error> { - self.engine() - .request(|engine| async move { engine.api.get_block_by_number(query).await }) - .await - .map_err(Box::new) - .map_err(Error::EngineError) + if let Some(engine) = self.engine() { + engine + .request(|engine| async move { engine.api.get_block_by_number(query).await }) + .await + .map_err(Box::new) + .map_err(Error::EngineError) + } else { + Ok(None) + } } pub async fn propose_blinded_beacon_block( @@ -2351,7 +2496,10 @@ mod test { MockExecutionLayer::default_params(runtime.task_executor.clone()) .move_to_block_prior_to_terminal_block() .with_terminal_block(|spec, el, _| async move { - el.engine().upcheck().await; + el.engine() + .expect("engine is configured in default test mock execution layer parameters") + .upcheck() + .await; assert_eq!( el.get_terminal_pow_block_hash(&spec, timestamp_now()) .await @@ -2378,7 +2526,10 @@ mod test { MockExecutionLayer::default_params(runtime.task_executor.clone()) .move_to_block_prior_to_terminal_block() .with_terminal_block(|spec, el, _| async move { - el.engine().upcheck().await; + el.engine() + .expect("engine is configured in default test mock execution layer parameters") + .upcheck() + .await; assert_eq!( el.get_terminal_pow_block_hash(&spec, timestamp_now()) .await @@ -2406,7 +2557,10 @@ mod test { MockExecutionLayer::default_params(runtime.task_executor.clone()) .move_to_terminal_block() .with_terminal_block(|spec, el, terminal_block| async move { - el.engine().upcheck().await; + el.engine() + .expect("engine is configured in default test mock execution layer parameters") + .upcheck() + .await; assert_eq!( el.is_valid_terminal_pow_block_hash(terminal_block.unwrap().block_hash, &spec) .await @@ -2423,7 +2577,10 @@ mod test { MockExecutionLayer::default_params(runtime.task_executor.clone()) .move_to_terminal_block() .with_terminal_block(|spec, el, terminal_block| async move { - el.engine().upcheck().await; + el.engine() + .expect("engine is configured in default test mock execution layer parameters") + .upcheck() + .await; let invalid_terminal_block = terminal_block.unwrap().parent_hash; assert_eq!( @@ -2442,7 +2599,10 @@ mod test { MockExecutionLayer::default_params(runtime.task_executor.clone()) .move_to_terminal_block() .with_terminal_block(|spec, el, _| async move { - el.engine().upcheck().await; + el.engine() + .expect("engine is configured in default test mock execution layer parameters") + .upcheck() + .await; let missing_terminal_block = ExecutionBlockHash::repeat_byte(42); assert_eq!( diff --git a/beacon_node/execution_layer/src/test_utils/mod.rs b/beacon_node/execution_layer/src/test_utils/mod.rs index 2465a41d8b6..b2a6d6f98e2 100644 --- a/beacon_node/execution_layer/src/test_utils/mod.rs +++ b/beacon_node/execution_layer/src/test_utils/mod.rs @@ -56,8 +56,8 @@ pub const DEFAULT_ENGINE_CAPABILITIES: EngineCapabilities = EngineCapabilities { get_payload_v4: true, get_payload_v5: true, get_client_version_v1: true, - get_blobs_v1: true, - get_blobs_v2: true, + get_blobs_v1: false, + get_blobs_v2: false, }; pub static DEFAULT_CLIENT_VERSION: LazyLock = diff --git a/beacon_node/http_api/src/eip8025.rs b/beacon_node/http_api/src/eip8025.rs new file mode 100644 index 00000000000..c8ae9248c47 --- /dev/null +++ b/beacon_node/http_api/src/eip8025.rs @@ -0,0 +1,172 @@ +//! EIP-8025: Optional Execution Proofs - HTTP API Endpoints +//! +//! This module provides HTTP API endpoints for: +//! - GET `/eth/v1/beacon/proofs/execution_proofs/{block_id}` - Retrieve execution proofs for a block +//! - POST `/eth/v1/beacon/execution_proofs` - Submit pre-signed execution proofs + +use crate::block_id::BlockId; +use beacon_chain::{BeaconChain, BeaconChainTypes}; +use execution_layer::eip8025::ProofEngine; +use lighthouse_network::PubsubMessage; +use network::NetworkMessage; +use serde::{Deserialize, Serialize}; +use std::sync::Arc; +use tokio::sync::mpsc::UnboundedSender; +use tracing::{debug, warn}; +use types::SignedExecutionProof; +use warp::Reply; +use warp::http::Response; +use warp::hyper::Body; +use warp_utils::reject::{custom_bad_request, custom_server_error}; + +/// Response for GET /eth/v1/beacon/proofs/execution_proofs/{block_id} +#[derive(Debug, Serialize, Deserialize)] +pub struct ExecutionProofsResponse { + pub execution_optimistic: bool, + pub finalized: bool, + pub data: Vec, +} + +/// Request body for POST /eth/v1/beacon/execution_proofs +#[derive(Debug, Serialize, Deserialize)] +pub struct SubmitExecutionProofsRequest { + /// Pre-signed execution proofs from validators + pub proofs: Vec, +} + +/// Get execution proofs for a given block. +/// +/// Returns execution proofs from the ProofEngine for the block's execution payload. +/// This endpoint requires `--proof-engine-endpoint` to be configured. +pub fn get_execution_proofs( + block_id: BlockId, + chain: Arc>, +) -> Result { + // Get the execution layer's proof engine — presence is the only gate. + let execution_layer = chain + .execution_layer + .as_ref() + .ok_or_else(|| custom_server_error("Execution layer not available".to_string()))?; + + let proof_engine = execution_layer + .proof_engine() + .ok_or_else(|| custom_bad_request( + "Proof engine not configured. Start with --proof-engine-endpoint to enable EIP-8025.".to_string(), + ))?; + + // Get the block to retrieve its execution payload root + let (block_root, execution_optimistic, finalized) = block_id.root(&chain)?; + + // Get proofs from the proof engine + let request_root = chain + .store + .get_request_root_by_block_root(&block_root) + .ok_or(custom_server_error("request block is unknown".to_string()))?; + let proofs = proof_engine.get_proofs_by_root(&request_root); + + debug!( + block_root = ?block_root, + num_proofs = proofs.len(), + "Retrieved execution proofs for block" + ); + + Ok(ExecutionProofsResponse { + execution_optimistic, + finalized, + data: proofs, + }) +} + +/// Submit signed execution proofs. +/// +/// This endpoint is used by validator clients to submit execution proofs that have been +/// signed by a validator. The proofs will be verified, stored in the ProofEngine, and +/// gossiped to the network. +pub async fn submit_execution_proofs( + request: SubmitExecutionProofsRequest, + chain: Arc>, + network_send: UnboundedSender>, +) -> Result, warp::Rejection> { + // TODO: should we add a verify: bool to verify_execution_proof to allow skipping verification checks from this endpoint if we trust the source? + + // Require proof engine to be configured — presence is the only gate. + if chain + .execution_layer + .as_ref() + .and_then(|el| el.proof_engine()) + .is_none() + { + return Err(custom_bad_request( + "Proof engine not configured. Start with --proof-engine-endpoint to enable EIP-8025." + .to_string(), + )); + } + + if request.proofs.is_empty() { + return Err(custom_bad_request("No proofs provided".to_string())); + } + + // Process each signed proof + for signed_proof in request.proofs { + let request_root = signed_proof.request_root(); + let proof_type = signed_proof.proof_type(); + let validator_index = signed_proof.validator_index(); + + debug!( + ?request_root, + proof_type, validator_index, "Processing submitted signed execution proof" + ); + + // Verify proof (BLS signature + execution engine + fork choice update) + if let Err(e) = chain.verify_execution_proof(signed_proof.clone()).await { + warn!( + error = ?e, + ?request_root, + proof_type, + validator_index, + "Signed proof validation failed" + ); + return Err(custom_bad_request(format!( + "Proof validation failed: {e:?}" + ))); + } + + // Gossip publish the signed proof + if let Err(e) = network_send.send(NetworkMessage::Publish { + messages: vec![PubsubMessage::ExecutionProof(Box::new(signed_proof))], + }) { + warn!( + error = ?e, + ?request_root, + proof_type, + "Failed to gossip signed proof" + ); + } + + debug!( + ?request_root, + proof_type, validator_index, "Signed execution proof verified, stored, and gossiped" + ); + } + + Ok(warp::reply().into_response()) +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_execution_proofs_response_serialization() { + let response = ExecutionProofsResponse { + execution_optimistic: false, + finalized: true, + data: vec![], + }; + + let json = serde_json::to_string(&response).unwrap(); + assert!(json.contains("execution_optimistic")); + assert!(json.contains("finalized")); + assert!(json.contains("data")); + } +} diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index 58cd2a3bdbc..453fc633ac6 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -16,6 +16,7 @@ mod build_block_contents; mod builder_states; mod custody; mod database; +mod eip8025; mod light_client; mod metrics; mod peer; @@ -542,6 +543,20 @@ pub fn serve( .map(move || TaskSpawner::new(beacon_processor_send.clone())) .boxed(); + // Create a `warp` filter that provides direct access to the `BeaconProcessorSend`. + let beacon_processor_send_direct = ctx.beacon_processor_send.clone(); + let _beacon_processor_send_filter = warp::any() + .map(move || beacon_processor_send_direct.clone()) + .and_then(|send| async move { + match send { + Some(send) => Ok(send), + None => Err(warp_utils::reject::custom_server_error( + "Beacon processor unavailable".to_string(), + )), + } + }) + .boxed(); + let duplicate_block_status_code = ctx.config.duplicate_block_status_code; /* @@ -1786,6 +1801,49 @@ pub fn serve( }, ); + /* + * EIP-8025: beacon/execution_proofs + */ + + let beacon_proofs_path = eth_v1 + .clone() + .and(warp::path("beacon")) + .and(warp::path("execution_proofs")) + .and(task_spawner_filter.clone()) + .and(chain_filter.clone()); + + // GET beacon/execution_proofs/{block_id} + let get_beacon_execution_proofs = beacon_proofs_path + .clone() + .and(block_id_or_err) + .and(warp::path::end()) + .then( + |task_spawner: TaskSpawner, + chain: Arc>, + block_id: BlockId| { + task_spawner.blocking_json_task(Priority::P1, move || { + eip8025::get_execution_proofs(block_id, chain) + }) + }, + ); + + // POST beacon/execution_proofs + let post_prover_execution_proofs = beacon_proofs_path + .clone() + .and(warp::path::end()) + .and(warp_utils::json::json()) + .and(network_tx_filter.clone()) + .then( + |task_spawner: TaskSpawner, + chain: Arc>, + proofs: eip8025::SubmitExecutionProofsRequest, + network_send: UnboundedSender>| { + task_spawner.spawn_async_with_rejection(Priority::P1, async move { + eip8025::submit_execution_proofs(proofs, chain, network_send).await + }) + }, + ); + /* * config */ @@ -3120,6 +3178,9 @@ pub fn serve( let receiver = match topic { api_types::EventTopic::Head => event_handler.subscribe_head(), api_types::EventTopic::Block => event_handler.subscribe_block(), + api_types::EventTopic::BlockFull => { + event_handler.subscribe_block_full() + } api_types::EventTopic::BlobSidecar => { event_handler.subscribe_blob_sidecar() } @@ -3289,6 +3350,7 @@ pub fn serve( .uor(get_beacon_pool_voluntary_exits) .uor(get_beacon_pool_bls_to_execution_changes) .uor(get_beacon_rewards_blocks) + .uor(get_beacon_execution_proofs) .uor(get_config_fork_schedule) .uor(get_config_spec) .uor(get_config_deposit_contract) @@ -3380,6 +3442,7 @@ pub fn serve( .uor(post_lighthouse_add_peer) .uor(post_lighthouse_remove_peer) .uor(post_lighthouse_custody_backfill) + .uor(post_prover_execution_proofs) .recover(warp_utils::reject::handle_rejection), ), ) diff --git a/beacon_node/http_api/src/task_spawner.rs b/beacon_node/http_api/src/task_spawner.rs index 834cd29971f..fed466b8f4f 100644 --- a/beacon_node/http_api/src/task_spawner.rs +++ b/beacon_node/http_api/src/task_spawner.rs @@ -107,7 +107,7 @@ impl TaskSpawner { pub async fn spawn_async_with_rejection( self, priority: Priority, - func: impl Future> + Send + Sync + 'static, + func: impl Future> + Send + 'static, ) -> Response { let result = self .spawn_async_with_rejection_no_conversion(priority, func) @@ -122,7 +122,7 @@ impl TaskSpawner { pub async fn spawn_async_with_rejection_no_conversion( self, priority: Priority, - func: impl Future> + Send + Sync + 'static, + func: impl Future> + Send + 'static, ) -> Result { if let Some(beacon_processor_send) = &self.beacon_processor_send { // Create a wrapper future that will execute `func` and send the diff --git a/beacon_node/lighthouse_network/src/config.rs b/beacon_node/lighthouse_network/src/config.rs index 416ca73e08e..89808e9f787 100644 --- a/beacon_node/lighthouse_network/src/config.rs +++ b/beacon_node/lighthouse_network/src/config.rs @@ -125,6 +125,10 @@ pub struct Config { /// Whether light client protocols should be enabled. pub enable_light_client_server: bool, + /// Whether to subscribe to the EIP-8025 execution proof gossip topic. + /// Set to `true` only when `--proof-engine-endpoint` is configured. + pub enable_execution_proof: bool, + /// Configuration for the outbound rate limiter (requests made by this node). pub outbound_rate_limiter_config: Option, @@ -359,6 +363,7 @@ impl Default for Config { proposer_only: false, metrics_enabled: false, enable_light_client_server: true, + enable_execution_proof: false, outbound_rate_limiter_config: None, invalid_block_storage: None, inbound_rate_limiter_config: None, diff --git a/beacon_node/lighthouse_network/src/discovery/enr.rs b/beacon_node/lighthouse_network/src/discovery/enr.rs index 4c285ea86c8..ce4be57f6d0 100644 --- a/beacon_node/lighthouse_network/src/discovery/enr.rs +++ b/beacon_node/lighthouse_network/src/discovery/enr.rs @@ -29,6 +29,8 @@ pub const ATTESTATION_BITFIELD_ENR_KEY: &str = "attnets"; pub const SYNC_COMMITTEE_BITFIELD_ENR_KEY: &str = "syncnets"; /// The ENR field specifying the peerdas custody group count. pub const PEERDAS_CUSTODY_GROUP_COUNT_ENR_KEY: &str = "cgc"; +/// The ENR field indicating execution proof engine support. +pub const EXECUTION_PROOF_ENR_KEY: &str = "ep"; /// Extension trait for ENR's within Eth2. pub trait Eth2Enr { @@ -47,6 +49,9 @@ pub trait Eth2Enr { fn next_fork_digest(&self) -> Result<[u8; 4], &'static str>; fn eth2(&self) -> Result; + + /// Whether this node has an execution proof engine configured and can serve execution proofs. + fn execution_proof_enabled(&self) -> bool; } impl Eth2Enr for Enr { @@ -99,6 +104,12 @@ impl Eth2Enr for Enr { EnrForkId::from_ssz_bytes(ð2_bytes).map_err(|_| "Could not decode EnrForkId") } + + fn execution_proof_enabled(&self) -> bool { + self.get_decodable::(EXECUTION_PROOF_ENR_KEY) + .and_then(|r| r.ok()) + .unwrap_or(false) + } } /// Either use the given ENR or load an ENR from file if it exists and matches the current NodeId @@ -284,6 +295,11 @@ pub fn build_enr( builder.add_value(NEXT_FORK_DIGEST_ENR_KEY, &next_fork_digest); } + // advertise execution proof engine support if configured + if config.enable_execution_proof { + builder.add_value(EXECUTION_PROOF_ENR_KEY, &true); + } + builder .build(enr_key) .map_err(|e| format!("Could not build Local ENR: {:?}", e)) @@ -308,11 +324,12 @@ fn compare_enr(local_enr: &Enr, disk_enr: &Enr) -> bool { && (local_enr.udp4().is_none() || local_enr.udp4() == disk_enr.udp4()) && (local_enr.udp6().is_none() || local_enr.udp6() == disk_enr.udp6()) // we need the ATTESTATION_BITFIELD_ENR_KEY and SYNC_COMMITTEE_BITFIELD_ENR_KEY and - // PEERDAS_CUSTODY_GROUP_COUNT_ENR_KEY key to match, otherwise we use a new ENR. This will - // likely only be true for non-validating nodes. + // PEERDAS_CUSTODY_GROUP_COUNT_ENR_KEY and EXECUTION_PROOF_ENR_KEY to match, otherwise we + // use a new ENR. This will likely only be true for non-validating nodes. && local_enr.get_decodable::(ATTESTATION_BITFIELD_ENR_KEY) == disk_enr.get_decodable(ATTESTATION_BITFIELD_ENR_KEY) && local_enr.get_decodable::(SYNC_COMMITTEE_BITFIELD_ENR_KEY) == disk_enr.get_decodable(SYNC_COMMITTEE_BITFIELD_ENR_KEY) && local_enr.get_decodable::(PEERDAS_CUSTODY_GROUP_COUNT_ENR_KEY) == disk_enr.get_decodable(PEERDAS_CUSTODY_GROUP_COUNT_ENR_KEY) + && local_enr.get_decodable::(EXECUTION_PROOF_ENR_KEY) == disk_enr.get_decodable(EXECUTION_PROOF_ENR_KEY) } /// Loads enr from the given directory diff --git a/beacon_node/lighthouse_network/src/discovery/mod.rs b/beacon_node/lighthouse_network/src/discovery/mod.rs index 939eca3b946..ebe8e76e8b7 100644 --- a/beacon_node/lighthouse_network/src/discovery/mod.rs +++ b/beacon_node/lighthouse_network/src/discovery/mod.rs @@ -560,6 +560,8 @@ impl Discovery { } // Data column subnets are computed from node ID. No subnet bitfield in the ENR. Subnet::DataColumn(_) => return Ok(()), + // ExecutionProof capability is set once at startup via build_enr(); not toggled. + Subnet::ExecutionProof => return Ok(()), } // replace the global version @@ -904,6 +906,7 @@ impl Discovery { Subnet::Attestation(_) => "attestation", Subnet::SyncCommittee(_) => "sync_committee", Subnet::DataColumn(_) => "data_column", + Subnet::ExecutionProof => "execution_proof", }; if let Some(v) = metrics::get_int_counter( diff --git a/beacon_node/lighthouse_network/src/discovery/subnet_predicate.rs b/beacon_node/lighthouse_network/src/discovery/subnet_predicate.rs index 757dbb58534..a403c478fbb 100644 --- a/beacon_node/lighthouse_network/src/discovery/subnet_predicate.rs +++ b/beacon_node/lighthouse_network/src/discovery/subnet_predicate.rs @@ -41,6 +41,7 @@ where false } } + Subnet::ExecutionProof => enr.execution_proof_enabled(), }); if !predicate { diff --git a/beacon_node/lighthouse_network/src/peer_manager/mod.rs b/beacon_node/lighthouse_network/src/peer_manager/mod.rs index 43a44c85fc8..ccce98dd196 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/mod.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/mod.rs @@ -22,6 +22,7 @@ pub use libp2p::identity::Keypair; pub mod peerdb; +use crate::discovery::enr::Eth2Enr; use crate::peer_manager::peerdb::client::ClientKind; use crate::types::GossipKind; use libp2p::multiaddr; @@ -59,6 +60,8 @@ pub const PEER_RECONNECTION_TIMEOUT: Duration = Duration::from_secs(600); pub const MIN_SYNC_COMMITTEE_PEERS: u64 = 2; /// Avoid pruning sampling peers if subnet peer count is below this number. pub const MIN_SAMPLING_COLUMN_SUBNET_PEERS: u64 = 2; +/// Minimum connected peers that advertise execution proof engine support. +pub const MIN_EXECUTION_PROOF_PEERS: u64 = 1; /// A fraction of `PeerManager::target_peers` that we allow to connect to us in excess of /// `PeerManager::target_peers`. For clarity, if `PeerManager::target_peers` is 50 and /// PEER_EXCESS_FACTOR = 0.1 we allow 10% more nodes, i.e 55. @@ -353,6 +356,13 @@ impl PeerManager { let results_count = results.len(); let connected_or_dialing = self.network_globals.connected_or_dialing_peers(); for (enr, min_ttl) in results { + let peer_id = enr.peer_id(); + + self.network_globals + .peers + .write() + .update_peer_enr_if_missing(&peer_id, enr.clone()); + // There are two conditions in deciding whether to dial this peer. // 1. If we are less than our max connections. Discovery queries are executed to reach // our target peers, so its fine to dial up to our max peers (which will get pruned @@ -368,7 +378,6 @@ impl PeerManager { { // This should be updated with the peer dialing. In fact created once the peer is // dialed - let peer_id = enr.peer_id(); if let Some(min_ttl) = min_ttl { self.network_globals .peers @@ -599,6 +608,8 @@ impl PeerManager { Protocol::BlobsByRoot => PeerAction::MidToleranceError, Protocol::DataColumnsByRoot => PeerAction::MidToleranceError, Protocol::DataColumnsByRange => PeerAction::MidToleranceError, + Protocol::ExecutionProofsByRange => PeerAction::MidToleranceError, + Protocol::ExecutionProofsByRoot => PeerAction::MidToleranceError, Protocol::Goodbye => PeerAction::LowToleranceError, Protocol::MetaData => PeerAction::LowToleranceError, Protocol::Status => PeerAction::LowToleranceError, @@ -619,6 +630,8 @@ impl PeerManager { Protocol::BlobsByRoot => return, Protocol::DataColumnsByRoot => return, Protocol::DataColumnsByRange => return, + Protocol::ExecutionProofsByRange => return, + Protocol::ExecutionProofsByRoot => return, Protocol::Goodbye => return, Protocol::LightClientBootstrap => return, Protocol::LightClientOptimisticUpdate => return, @@ -642,6 +655,8 @@ impl PeerManager { Protocol::BlobsByRoot => PeerAction::MidToleranceError, Protocol::DataColumnsByRoot => PeerAction::MidToleranceError, Protocol::DataColumnsByRange => PeerAction::MidToleranceError, + Protocol::ExecutionProofsByRange => PeerAction::MidToleranceError, + Protocol::ExecutionProofsByRoot => PeerAction::MidToleranceError, Protocol::LightClientBootstrap => return, Protocol::LightClientOptimisticUpdate => return, Protocol::LightClientFinalityUpdate => return, @@ -1009,6 +1024,30 @@ impl PeerManager { } } + /// Trigger subnet-targeted discovery when we are below the minimum number of connected peers + /// that advertise execution proof engine support (`ep=true` ENR). + fn maintain_proof_capable_peers(&mut self) { + let count = self + .network_globals + .peers + .read() + .good_peers_on_subnet(Subnet::ExecutionProof) + .count() as u64; + if count < MIN_EXECUTION_PROOF_PEERS { + debug!( + count, + target = MIN_EXECUTION_PROOF_PEERS, + "Insufficient proof-capable peers; triggering discovery" + ); + self.events.push(PeerManagerEvent::DiscoverSubnetPeers(vec![ + SubnetDiscovery { + subnet: Subnet::ExecutionProof, + min_ttl: None, + }, + ])); + } + } + /// This function checks the status of our current peers and optionally requests a discovery /// query if we need to find more peers to maintain the current number of peers fn maintain_peer_count(&mut self, dialing_peers: usize) { @@ -1079,6 +1118,8 @@ impl PeerManager { Subnet::DataColumn(id) => { peer_info.custody_subnets.insert(id); } + // ExecutionProof is a capability flag, not a subnet tracked in peer_subnet_info. + Subnet::ExecutionProof => {} } } @@ -1163,6 +1204,27 @@ impl PeerManager { return true; } + // Protect proof-capable peers when at or below the minimum threshold + if self.network_globals.execution_proof() { + let is_proof_capable = candidate_info + .info + .enr() + .is_some_and(|enr| enr.execution_proof_enabled()); + if is_proof_capable { + let proof_capable_count = peer_subnet_info + .values() + .filter(|p| { + p.info + .enr() + .is_some_and(|enr| enr.execution_proof_enabled()) + }) + .count(); + if proof_capable_count <= MIN_EXECUTION_PROOF_PEERS as usize { + return true; + } + } + } + // Check attestation subnet to avoid pruning from subnets with the lowest peer count let attestation_subnet_counts: HashMap = peer_subnet_info .values() @@ -1447,6 +1509,11 @@ impl PeerManager { // Maintain minimum count for sync committee peers. self.maintain_sync_committee_peers(); + // Maintain minimum count for execution-proof-capable peers. + if self.network_globals.execution_proof() { + self.maintain_proof_capable_peers(); + } + // Prune any excess peers back to our target in such a way that incentivises good scores and // a uniform distribution of subnets. self.prune_excess_peers(); diff --git a/beacon_node/lighthouse_network/src/peer_manager/peerdb.rs b/beacon_node/lighthouse_network/src/peer_manager/peerdb.rs index 11ce7853507..a46edac2a5c 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/peerdb.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/peerdb.rs @@ -1,5 +1,5 @@ use crate::discovery::CombinedKey; -use crate::discovery::enr::PEERDAS_CUSTODY_GROUP_COUNT_ENR_KEY; +use crate::discovery::enr::{EXECUTION_PROOF_ENR_KEY, PEERDAS_CUSTODY_GROUP_COUNT_ENR_KEY}; use crate::{Enr, Gossipsub, PeerId, SyncInfo, metrics, multiaddr::Multiaddr, types::Subnet}; use itertools::Itertools; use logging::crit; @@ -695,6 +695,15 @@ impl PeerDB { } } + /// Stores the ENR for a peer if they don't already have one recorded. + pub(super) fn update_peer_enr_if_missing(&mut self, peer_id: &PeerId, enr: Enr) { + if let Some(info) = self.peer_info_mut(peer_id) + && info.enr().is_none() + { + info.set_enr(enr); + } + } + /// Update min ttl of a peer. // VISIBILITY: Only the peer manager can update the min_ttl pub(super) fn update_min_ttl(&mut self, peer_id: &PeerId, min_ttl: Instant) { @@ -853,6 +862,42 @@ impl PeerDB { peer_id } + /// Like `__add_connected_peer_testing_only`, but sets `ep=true` in the peer's ENR so that + /// `execution_proof_enabled()` returns true for this peer. MUST ONLY BE USED IN TESTS. + pub fn __add_connected_proof_capable_peer_testing_only( + &mut self, + enr_key: CombinedKey, + ) -> PeerId { + let mut enr = Enr::builder().build(&enr_key).unwrap(); + let peer_id = enr.peer_id(); + enr.insert(EXECUTION_PROOF_ENR_KEY, &true, &enr_key) + .expect("bool can be encoded"); + + self.update_connection_state( + &peer_id, + NewConnectionState::Connected { + enr: Some(enr), + seen_address: Multiaddr::empty(), + direction: ConnectionDirection::Outgoing, + }, + ); + + self.update_sync_status( + &peer_id, + SyncStatus::Synced { + info: SyncInfo { + head_slot: Slot::new(0), + head_root: Hash256::ZERO, + finalized_epoch: Epoch::new(0), + finalized_root: Hash256::ZERO, + earliest_available_slot: Some(Slot::new(0)), + }, + }, + ); + + peer_id + } + /// The connection state of the peer has been changed. Modify the peer in the db to ensure all /// variables are in sync with libp2p. /// Updating the state can lead to a `BanOperation` which needs to be processed via the peer diff --git a/beacon_node/lighthouse_network/src/peer_manager/peerdb/peer_info.rs b/beacon_node/lighthouse_network/src/peer_manager/peerdb/peer_info.rs index c289cb9a69c..a6e51e37692 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/peerdb/peer_info.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/peerdb/peer_info.rs @@ -92,6 +92,10 @@ impl PeerInfo { /// Returns if the peer is subscribed to a given `Subnet` from the metadata attnets/syncnets field. /// Also returns true if the peer is assigned to custody a given data column `Subnet` computed from the metadata `custody_group_count` field or ENR `cgc` field. pub fn on_subnet_metadata(&self, subnet: &Subnet) -> bool { + // ExecutionProof capability is advertised in the ENR, not a metadata bitfield. + if matches!(subnet, Subnet::ExecutionProof) { + return self.enr().is_some_and(|enr| enr.execution_proof_enabled()); + } if let Some(meta_data) = &self.meta_data { match subnet { Subnet::Attestation(id) => { @@ -105,6 +109,7 @@ impl PeerInfo { Subnet::DataColumn(subnet_id) => { return self.is_assigned_to_custody_subnet(subnet_id); } + Subnet::ExecutionProof => unreachable!("handled above"), } } false diff --git a/beacon_node/lighthouse_network/src/rpc/codec.rs b/beacon_node/lighthouse_network/src/rpc/codec.rs index 3611f023917..4125887ddb7 100644 --- a/beacon_node/lighthouse_network/src/rpc/codec.rs +++ b/beacon_node/lighthouse_network/src/rpc/codec.rs @@ -21,7 +21,7 @@ use types::{ LightClientOptimisticUpdate, LightClientUpdate, SignedBeaconBlock, SignedBeaconBlockAltair, SignedBeaconBlockBase, SignedBeaconBlockBellatrix, SignedBeaconBlockCapella, SignedBeaconBlockDeneb, SignedBeaconBlockElectra, SignedBeaconBlockFulu, - SignedBeaconBlockGloas, + SignedBeaconBlockGloas, execution::eip8025::SignedExecutionProof, }; use unsigned_varint::codec::Uvi; @@ -80,6 +80,8 @@ impl SSZSnappyInboundCodec { RpcSuccessResponse::BlobsByRoot(res) => res.as_ssz_bytes(), RpcSuccessResponse::DataColumnsByRoot(res) => res.as_ssz_bytes(), RpcSuccessResponse::DataColumnsByRange(res) => res.as_ssz_bytes(), + RpcSuccessResponse::ExecutionProofsByRange(res) => res.as_ssz_bytes(), + RpcSuccessResponse::ExecutionProofsByRoot(res) => res.as_ssz_bytes(), RpcSuccessResponse::LightClientBootstrap(res) => res.as_ssz_bytes(), RpcSuccessResponse::LightClientOptimisticUpdate(res) => res.as_ssz_bytes(), RpcSuccessResponse::LightClientFinalityUpdate(res) => res.as_ssz_bytes(), @@ -363,6 +365,8 @@ impl Encoder> for SSZSnappyOutboundCodec { RequestType::Ping(req) => req.as_ssz_bytes(), RequestType::LightClientBootstrap(req) => req.as_ssz_bytes(), RequestType::LightClientUpdatesByRange(req) => req.as_ssz_bytes(), + RequestType::ExecutionProofsByRange(req) => req.as_ssz_bytes(), + RequestType::ExecutionProofsByRoot(req) => req.block_roots.as_ssz_bytes(), // no metadata to encode RequestType::MetaData(_) | RequestType::LightClientOptimisticUpdate @@ -587,6 +591,19 @@ fn handle_rpc_request( LightClientUpdatesByRangeRequest::from_ssz_bytes(decoded_buffer)?, ))) } + SupportedProtocol::ExecutionProofsByRangeV1 => { + Ok(Some(RequestType::ExecutionProofsByRange( + ExecutionProofsByRangeRequest::from_ssz_bytes(decoded_buffer)?, + ))) + } + SupportedProtocol::ExecutionProofsByRootV1 => Ok(Some(RequestType::ExecutionProofsByRoot( + ExecutionProofsByRootRequest { + block_roots: RuntimeVariableList::from_ssz_bytes( + decoded_buffer, + spec.max_request_blocks(current_fork), + )?, + }, + ))), // MetaData requests return early from InboundUpgrade and do not reach the decoder. // Handle this case just for completeness. SupportedProtocol::MetaDataV3 => { @@ -841,6 +858,16 @@ fn handle_rpc_response( ), )), }, + SupportedProtocol::ExecutionProofsByRangeV1 => { + Ok(Some(RpcSuccessResponse::ExecutionProofsByRange(Arc::new( + SignedExecutionProof::from_ssz_bytes(decoded_buffer)?, + )))) + } + SupportedProtocol::ExecutionProofsByRootV1 => { + Ok(Some(RpcSuccessResponse::ExecutionProofsByRoot(Arc::new( + SignedExecutionProof::from_ssz_bytes(decoded_buffer)?, + )))) + } SupportedProtocol::BlocksByRootV2 => match fork_name { Some(ForkName::Altair) => Ok(Some(RpcSuccessResponse::BlocksByRoot(Arc::new( SignedBeaconBlock::Altair(SignedBeaconBlockAltair::from_ssz_bytes(decoded_buffer)?), @@ -1284,6 +1311,12 @@ mod tests { RequestType::LightClientUpdatesByRange(light_client_updates_by_range) ) } + RequestType::ExecutionProofsByRange(ep_range) => { + assert_eq!(decoded, RequestType::ExecutionProofsByRange(ep_range)) + } + RequestType::ExecutionProofsByRoot(ep_root) => { + assert_eq!(decoded, RequestType::ExecutionProofsByRoot(ep_root)) + } } } diff --git a/beacon_node/lighthouse_network/src/rpc/config.rs b/beacon_node/lighthouse_network/src/rpc/config.rs index b0ee6fea64b..1eb71ab49aa 100644 --- a/beacon_node/lighthouse_network/src/rpc/config.rs +++ b/beacon_node/lighthouse_network/src/rpc/config.rs @@ -97,6 +97,8 @@ pub struct RateLimiterConfig { pub(super) light_client_optimistic_update_quota: Quota, pub(super) light_client_finality_update_quota: Quota, pub(super) light_client_updates_by_range_quota: Quota, + pub(super) execution_proofs_by_range_quota: Quota, + pub(super) execution_proofs_by_root_quota: Quota, } impl RateLimiterConfig { @@ -126,6 +128,11 @@ impl RateLimiterConfig { pub const DEFAULT_LIGHT_CLIENT_OPTIMISTIC_UPDATE_QUOTA: Quota = Quota::one_every(10); pub const DEFAULT_LIGHT_CLIENT_FINALITY_UPDATE_QUOTA: Quota = Quota::one_every(10); pub const DEFAULT_LIGHT_CLIENT_UPDATES_BY_RANGE_QUOTA: Quota = Quota::one_every(10); + // Execution proofs are comparable to data columns in bandwidth. + pub const DEFAULT_EXECUTION_PROOFS_BY_RANGE_QUOTA: Quota = + Quota::n_every(NonZeroU64::new(128).unwrap(), 10); + pub const DEFAULT_EXECUTION_PROOFS_BY_ROOT_QUOTA: Quota = + Quota::n_every(NonZeroU64::new(128).unwrap(), 10); } impl Default for RateLimiterConfig { @@ -146,6 +153,8 @@ impl Default for RateLimiterConfig { Self::DEFAULT_LIGHT_CLIENT_OPTIMISTIC_UPDATE_QUOTA, light_client_finality_update_quota: Self::DEFAULT_LIGHT_CLIENT_FINALITY_UPDATE_QUOTA, light_client_updates_by_range_quota: Self::DEFAULT_LIGHT_CLIENT_UPDATES_BY_RANGE_QUOTA, + execution_proofs_by_range_quota: Self::DEFAULT_EXECUTION_PROOFS_BY_RANGE_QUOTA, + execution_proofs_by_root_quota: Self::DEFAULT_EXECUTION_PROOFS_BY_ROOT_QUOTA, } } } @@ -205,6 +214,8 @@ impl FromStr for RateLimiterConfig { let mut light_client_optimistic_update_quota = None; let mut light_client_finality_update_quota = None; let mut light_client_updates_by_range_quota = None; + let mut execution_proofs_by_range_quota = None; + let mut execution_proofs_by_root_quota = None; for proto_def in s.split(';') { let ProtocolQuota { protocol, quota } = proto_def.parse()?; @@ -239,6 +250,12 @@ impl FromStr for RateLimiterConfig { light_client_updates_by_range_quota = light_client_updates_by_range_quota.or(quota) } + Protocol::ExecutionProofsByRange => { + execution_proofs_by_range_quota = execution_proofs_by_range_quota.or(quota) + } + Protocol::ExecutionProofsByRoot => { + execution_proofs_by_root_quota = execution_proofs_by_root_quota.or(quota) + } } } Ok(RateLimiterConfig { @@ -265,6 +282,10 @@ impl FromStr for RateLimiterConfig { .unwrap_or(Self::DEFAULT_LIGHT_CLIENT_FINALITY_UPDATE_QUOTA), light_client_updates_by_range_quota: light_client_updates_by_range_quota .unwrap_or(Self::DEFAULT_LIGHT_CLIENT_UPDATES_BY_RANGE_QUOTA), + execution_proofs_by_range_quota: execution_proofs_by_range_quota + .unwrap_or(Self::DEFAULT_EXECUTION_PROOFS_BY_RANGE_QUOTA), + execution_proofs_by_root_quota: execution_proofs_by_root_quota + .unwrap_or(Self::DEFAULT_EXECUTION_PROOFS_BY_ROOT_QUOTA), }) } } diff --git a/beacon_node/lighthouse_network/src/rpc/methods.rs b/beacon_node/lighthouse_network/src/rpc/methods.rs index 0539877c722..8af7504599b 100644 --- a/beacon_node/lighthouse_network/src/rpc/methods.rs +++ b/beacon_node/lighthouse_network/src/rpc/methods.rs @@ -13,6 +13,7 @@ use std::sync::Arc; use strum::IntoStaticStr; use superstruct::superstruct; use types::data::BlobIdentifier; +use types::execution::eip8025::SignedExecutionProof; use types::light_client::consts::MAX_REQUEST_LIGHT_CLIENT_UPDATES; use types::{ ChainSpec, ColumnIndex, DataColumnSidecar, DataColumnsByRootIdentifier, Epoch, EthSpec, @@ -573,6 +574,72 @@ impl LightClientUpdatesByRangeRequest { } } +/// Request execution proofs for a slot range from a peer. +#[derive(Encode, Decode, Clone, Debug, PartialEq)] +pub struct ExecutionProofsByRangeRequest { + /// The starting slot to request execution proofs. + pub start_slot: u64, + /// The number of slots from the start slot. + pub count: u64, +} + +impl ExecutionProofsByRangeRequest { + pub fn max_requested(&self) -> u64 { + use types::execution::eip8025::MaxExecutionProofsPerPayload; + use typenum::Unsigned; + self.count + .saturating_mul(MaxExecutionProofsPerPayload::to_u64()) + } + + pub fn ssz_min_len() -> usize { + ExecutionProofsByRangeRequest { + start_slot: 0, + count: 0, + } + .as_ssz_bytes() + .len() + } + + pub fn ssz_max_len() -> usize { + Self::ssz_min_len() + } +} + +impl std::fmt::Display for ExecutionProofsByRangeRequest { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!( + f, + "Request: ExecutionProofsByRange: Start Slot: {}, Count: {}", + self.start_slot, self.count + ) + } +} + +/// Request execution proofs for specific blocks by root from a peer. +#[derive(Clone, Debug, PartialEq)] +pub struct ExecutionProofsByRootRequest { + /// The list of block roots whose execution proofs are being requested. + pub block_roots: RuntimeVariableList, +} + +impl ExecutionProofsByRootRequest { + pub fn new(block_roots: Vec, max_request_blocks: usize) -> Result { + let block_roots = RuntimeVariableList::new(block_roots, max_request_blocks) + .map_err(|e| format!("ExecutionProofsByRootRequest too many roots: {e:?}"))?; + Ok(Self { block_roots }) + } +} + +impl std::fmt::Display for ExecutionProofsByRootRequest { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!( + f, + "Request: ExecutionProofsByRoot: Number of Requested Roots: {}", + self.block_roots.len() + ) + } +} + /* RPC Handling and Grouping */ // Collection of enums and structs used by the Codecs to encode/decode RPC messages @@ -612,6 +679,12 @@ pub enum RpcSuccessResponse { /// A response to a get DATA_COLUMN_SIDECARS_BY_RANGE request. DataColumnsByRange(Arc>), + /// A response to a get EXECUTION_PROOFS_BY_RANGE request. + ExecutionProofsByRange(Arc), + + /// A response to a get EXECUTION_PROOFS_BY_ROOT request. + ExecutionProofsByRoot(Arc), + /// A PONG response to a PING request. Pong(Ping), @@ -642,6 +715,12 @@ pub enum ResponseTermination { /// Light client updates by range stream termination. LightClientUpdatesByRange, + + /// Execution proofs by range stream termination. + ExecutionProofsByRange, + + /// Execution proofs by root stream termination. + ExecutionProofsByRoot, } impl ResponseTermination { @@ -654,6 +733,8 @@ impl ResponseTermination { ResponseTermination::DataColumnsByRoot => Protocol::DataColumnsByRoot, ResponseTermination::DataColumnsByRange => Protocol::DataColumnsByRange, ResponseTermination::LightClientUpdatesByRange => Protocol::LightClientUpdatesByRange, + ResponseTermination::ExecutionProofsByRange => Protocol::ExecutionProofsByRange, + ResponseTermination::ExecutionProofsByRoot => Protocol::ExecutionProofsByRoot, } } } @@ -756,6 +837,8 @@ impl RpcSuccessResponse { } RpcSuccessResponse::LightClientFinalityUpdate(_) => Protocol::LightClientFinalityUpdate, RpcSuccessResponse::LightClientUpdatesByRange(_) => Protocol::LightClientUpdatesByRange, + RpcSuccessResponse::ExecutionProofsByRange(_) => Protocol::ExecutionProofsByRange, + RpcSuccessResponse::ExecutionProofsByRoot(_) => Protocol::ExecutionProofsByRoot, } } @@ -772,7 +855,11 @@ impl RpcSuccessResponse { Self::LightClientFinalityUpdate(r) => Some(r.get_attested_header_slot()), Self::LightClientOptimisticUpdate(r) => Some(r.get_slot()), Self::LightClientUpdatesByRange(r) => Some(r.attested_header_slot()), - Self::MetaData(_) | Self::Status(_) | Self::Pong(_) => None, + Self::MetaData(_) + | Self::Status(_) + | Self::Pong(_) + | Self::ExecutionProofsByRange(_) + | Self::ExecutionProofsByRoot(_) => None, } } } @@ -860,6 +947,20 @@ impl std::fmt::Display for RpcSuccessResponse { update.signature_slot(), ) } + RpcSuccessResponse::ExecutionProofsByRange(proof) => { + write!( + f, + "ExecutionProofsByRange: validator_index: {}", + proof.validator_index + ) + } + RpcSuccessResponse::ExecutionProofsByRoot(proof) => { + write!( + f, + "ExecutionProofsByRoot: validator_index: {}", + proof.validator_index + ) + } } } } diff --git a/beacon_node/lighthouse_network/src/rpc/mod.rs b/beacon_node/lighthouse_network/src/rpc/mod.rs index 7c43018af83..6c7b1e2d781 100644 --- a/beacon_node/lighthouse_network/src/rpc/mod.rs +++ b/beacon_node/lighthouse_network/src/rpc/mod.rs @@ -155,6 +155,7 @@ pub struct RPC { events: Vec>, fork_context: Arc, enable_light_client_server: bool, + enable_execution_proof: bool, /// A sequential counter indicating when data gets modified. seq_number: u64, } @@ -163,6 +164,7 @@ impl RPC { pub fn new( fork_context: Arc, enable_light_client_server: bool, + enable_execution_proof: bool, inbound_rate_limiter_config: Option, outbound_rate_limiter_config: Option, seq_number: u64, @@ -184,6 +186,7 @@ impl RPC { events: Vec::new(), fork_context, enable_light_client_server, + enable_execution_proof, seq_number, } } @@ -319,6 +322,7 @@ where fork_context: self.fork_context.clone(), max_rpc_size: self.fork_context.spec.max_payload_size as usize, enable_light_client_server: self.enable_light_client_server, + enable_execution_proof: self.enable_execution_proof, phantom: PhantomData, }, (), @@ -342,6 +346,7 @@ where fork_context: self.fork_context.clone(), max_rpc_size: self.fork_context.spec.max_payload_size as usize, enable_light_client_server: self.enable_light_client_server, + enable_execution_proof: self.enable_execution_proof, phantom: PhantomData, }, (), diff --git a/beacon_node/lighthouse_network/src/rpc/protocol.rs b/beacon_node/lighthouse_network/src/rpc/protocol.rs index 366515d42f6..f5e5bc271c3 100644 --- a/beacon_node/lighthouse_network/src/rpc/protocol.rs +++ b/beacon_node/lighthouse_network/src/rpc/protocol.rs @@ -118,6 +118,19 @@ pub static LIGHT_CLIENT_UPDATES_BY_RANGE_DENEB_MAX: LazyLock = pub static LIGHT_CLIENT_UPDATES_BY_RANGE_ELECTRA_MAX: LazyLock = LazyLock::new(|| LightClientUpdate::::ssz_max_len_for_fork(ForkName::Electra)); +/// Minimum SSZ size of a `SignedExecutionProof` (empty proof_data): +/// - message offset: 4 bytes +/// - validator_index: 8 bytes +/// - signature: 96 bytes +/// - ExecutionProof fixed header (proof_data offset + proof_type + public_input): 4 + 1 + 32 = 37 +pub const SIGNED_EXECUTION_PROOF_MIN_SIZE: usize = 4 + 8 + 96 + 37; + +/// Maximum SSZ size of a `SignedExecutionProof` (MaxProofSize = 307200 bytes): +/// - SignedExecutionProof fixed header: 4 + 8 + 96 = 108 bytes +/// - ExecutionProof fixed header: 4 + 1 + 32 = 37 bytes +/// - proof_data: MaxProofSize = 75 * 4096 = 307200 bytes +pub const SIGNED_EXECUTION_PROOF_MAX_SIZE: usize = 4 + 8 + 96 + 37 + 307200; + /// The protocol prefix the RPC protocol id. const PROTOCOL_PREFIX: &str = "/eth2/beacon_chain/req"; /// The number of seconds to wait for the first bytes of a request once a protocol has been @@ -267,6 +280,12 @@ pub enum Protocol { /// The `LightClientUpdatesByRange` protocol name #[strum(serialize = "light_client_updates_by_range")] LightClientUpdatesByRange, + /// The `ExecutionProofsByRange` protocol name. + #[strum(serialize = "execution_proofs_by_range")] + ExecutionProofsByRange, + /// The `ExecutionProofsByRoot` protocol name. + #[strum(serialize = "execution_proofs_by_root")] + ExecutionProofsByRoot, } impl Protocol { @@ -286,6 +305,8 @@ impl Protocol { Protocol::LightClientOptimisticUpdate => None, Protocol::LightClientFinalityUpdate => None, Protocol::LightClientUpdatesByRange => None, + Protocol::ExecutionProofsByRange => Some(ResponseTermination::ExecutionProofsByRange), + Protocol::ExecutionProofsByRoot => Some(ResponseTermination::ExecutionProofsByRoot), } } } @@ -318,6 +339,8 @@ pub enum SupportedProtocol { LightClientOptimisticUpdateV1, LightClientFinalityUpdateV1, LightClientUpdatesByRangeV1, + ExecutionProofsByRangeV1, + ExecutionProofsByRootV1, } impl SupportedProtocol { @@ -342,6 +365,8 @@ impl SupportedProtocol { SupportedProtocol::LightClientOptimisticUpdateV1 => "1", SupportedProtocol::LightClientFinalityUpdateV1 => "1", SupportedProtocol::LightClientUpdatesByRangeV1 => "1", + SupportedProtocol::ExecutionProofsByRangeV1 => "1", + SupportedProtocol::ExecutionProofsByRootV1 => "1", } } @@ -368,6 +393,8 @@ impl SupportedProtocol { } SupportedProtocol::LightClientFinalityUpdateV1 => Protocol::LightClientFinalityUpdate, SupportedProtocol::LightClientUpdatesByRangeV1 => Protocol::LightClientUpdatesByRange, + SupportedProtocol::ExecutionProofsByRangeV1 => Protocol::ExecutionProofsByRange, + SupportedProtocol::ExecutionProofsByRootV1 => Protocol::ExecutionProofsByRoot, } } @@ -426,6 +453,7 @@ pub struct RPCProtocol { pub fork_context: Arc, pub max_rpc_size: usize, pub enable_light_client_server: bool, + pub enable_execution_proof: bool, pub phantom: PhantomData, } @@ -450,6 +478,16 @@ impl UpgradeInfo for RPCProtocol { Encoding::SSZSnappy, )); } + if self.enable_execution_proof { + supported_protocols.push(ProtocolId::new( + SupportedProtocol::ExecutionProofsByRangeV1, + Encoding::SSZSnappy, + )); + supported_protocols.push(ProtocolId::new( + SupportedProtocol::ExecutionProofsByRootV1, + Encoding::SSZSnappy, + )); + } supported_protocols } } @@ -535,6 +573,14 @@ impl ProtocolId { LightClientUpdatesByRangeRequest::ssz_max_len(), ), Protocol::MetaData => RpcLimits::new(0, 0), // Metadata requests are empty + Protocol::ExecutionProofsByRange => RpcLimits::new( + ExecutionProofsByRangeRequest::ssz_min_len(), + ExecutionProofsByRangeRequest::ssz_max_len(), + ), + // ExecutionProofsByRoot request is a list of block roots — same size limit as BlocksByRoot. + Protocol::ExecutionProofsByRoot => { + RpcLimits::new(0, spec.max_blocks_by_root_request) + } } } @@ -576,6 +622,10 @@ impl ProtocolId { Protocol::LightClientUpdatesByRange => { rpc_light_client_updates_by_range_limits_by_fork(fork_context.current_fork_name()) } + Protocol::ExecutionProofsByRange | Protocol::ExecutionProofsByRoot => RpcLimits::new( + SIGNED_EXECUTION_PROOF_MIN_SIZE, + SIGNED_EXECUTION_PROOF_MAX_SIZE, + ), } } @@ -601,7 +651,10 @@ impl ProtocolId { | SupportedProtocol::MetaDataV1 | SupportedProtocol::MetaDataV2 | SupportedProtocol::MetaDataV3 - | SupportedProtocol::GoodbyeV1 => false, + | SupportedProtocol::GoodbyeV1 + // Execution proof types are not fork-dependent, no context bytes needed. + | SupportedProtocol::ExecutionProofsByRangeV1 + | SupportedProtocol::ExecutionProofsByRootV1 => false, } } } @@ -729,6 +782,8 @@ pub enum RequestType { LightClientOptimisticUpdate, LightClientFinalityUpdate, LightClientUpdatesByRange(LightClientUpdatesByRangeRequest), + ExecutionProofsByRange(ExecutionProofsByRangeRequest), + ExecutionProofsByRoot(ExecutionProofsByRootRequest), Ping(Ping), MetaData(MetadataRequest), } @@ -754,6 +809,13 @@ impl RequestType { RequestType::LightClientOptimisticUpdate => 1, RequestType::LightClientFinalityUpdate => 1, RequestType::LightClientUpdatesByRange(req) => req.count, + RequestType::ExecutionProofsByRange(req) => req.max_requested(), + RequestType::ExecutionProofsByRoot(req) => { + use typenum::Unsigned; + use types::execution::eip8025::MaxExecutionProofsPerPayload; + (req.block_roots.len() as u64) + .saturating_mul(MaxExecutionProofsPerPayload::to_u64()) + } } } @@ -793,6 +855,8 @@ impl RequestType { RequestType::LightClientUpdatesByRange(_) => { SupportedProtocol::LightClientUpdatesByRangeV1 } + RequestType::ExecutionProofsByRange(_) => SupportedProtocol::ExecutionProofsByRangeV1, + RequestType::ExecutionProofsByRoot(_) => SupportedProtocol::ExecutionProofsByRootV1, } } @@ -808,6 +872,8 @@ impl RequestType { RequestType::BlobsByRoot(_) => ResponseTermination::BlobsByRoot, RequestType::DataColumnsByRoot(_) => ResponseTermination::DataColumnsByRoot, RequestType::DataColumnsByRange(_) => ResponseTermination::DataColumnsByRange, + RequestType::ExecutionProofsByRange(_) => ResponseTermination::ExecutionProofsByRange, + RequestType::ExecutionProofsByRoot(_) => ResponseTermination::ExecutionProofsByRoot, RequestType::Status(_) => unreachable!(), RequestType::Goodbye(_) => unreachable!(), RequestType::Ping(_) => unreachable!(), @@ -879,6 +945,14 @@ impl RequestType { SupportedProtocol::LightClientUpdatesByRangeV1, Encoding::SSZSnappy, )], + RequestType::ExecutionProofsByRange(_) => vec![ProtocolId::new( + SupportedProtocol::ExecutionProofsByRangeV1, + Encoding::SSZSnappy, + )], + RequestType::ExecutionProofsByRoot(_) => vec![ProtocolId::new( + SupportedProtocol::ExecutionProofsByRootV1, + Encoding::SSZSnappy, + )], } } @@ -898,6 +972,8 @@ impl RequestType { RequestType::LightClientOptimisticUpdate => true, RequestType::LightClientFinalityUpdate => true, RequestType::LightClientUpdatesByRange(_) => true, + RequestType::ExecutionProofsByRange(_) => false, + RequestType::ExecutionProofsByRoot(_) => false, } } } @@ -1019,6 +1095,8 @@ impl std::fmt::Display for RequestType { RequestType::LightClientUpdatesByRange(_) => { write!(f, "Light client updates by range request") } + RequestType::ExecutionProofsByRange(req) => write!(f, "{}", req), + RequestType::ExecutionProofsByRoot(req) => write!(f, "{}", req), } } } diff --git a/beacon_node/lighthouse_network/src/rpc/rate_limiter.rs b/beacon_node/lighthouse_network/src/rpc/rate_limiter.rs index 8b364f506cc..b4740afd066 100644 --- a/beacon_node/lighthouse_network/src/rpc/rate_limiter.rs +++ b/beacon_node/lighthouse_network/src/rpc/rate_limiter.rs @@ -113,6 +113,10 @@ pub struct RPCRateLimiter { lc_finality_update_rl: Limiter, /// LightClientUpdatesByRange rate limiter. lc_updates_by_range_rl: Limiter, + /// ExecutionProofsByRange rate limiter. + ep_by_range_rl: Limiter, + /// ExecutionProofsByRoot rate limiter. + ep_by_root_rl: Limiter, fork_context: Arc, } @@ -156,6 +160,10 @@ pub struct RPCRateLimiterBuilder { lc_finality_update_quota: Option, /// Quota for the LightClientUpdatesByRange protocol. lc_updates_by_range_quota: Option, + /// Quota for the ExecutionProofsByRange protocol. + ep_by_range_quota: Option, + /// Quota for the ExecutionProofsByRoot protocol. + ep_by_root_quota: Option, } impl RPCRateLimiterBuilder { @@ -177,6 +185,8 @@ impl RPCRateLimiterBuilder { Protocol::LightClientOptimisticUpdate => self.lc_optimistic_update_quota = q, Protocol::LightClientFinalityUpdate => self.lc_finality_update_quota = q, Protocol::LightClientUpdatesByRange => self.lc_updates_by_range_quota = q, + Protocol::ExecutionProofsByRange => self.ep_by_range_quota = q, + Protocol::ExecutionProofsByRoot => self.ep_by_root_quota = q, } self } @@ -221,6 +231,14 @@ impl RPCRateLimiterBuilder { .dcbrange_quota .ok_or("DataColumnsByRange quota not specified")?; + let ep_by_range_quota = self + .ep_by_range_quota + .ok_or("ExecutionProofsByRange quota not specified")?; + + let ep_by_root_quota = self + .ep_by_root_quota + .ok_or("ExecutionProofsByRoot quota not specified")?; + // create the rate limiters let ping_rl = Limiter::from_quota(ping_quota)?; let metadata_rl = Limiter::from_quota(metadata_quota)?; @@ -236,6 +254,8 @@ impl RPCRateLimiterBuilder { let lc_optimistic_update_rl = Limiter::from_quota(lc_optimistic_update_quota)?; let lc_finality_update_rl = Limiter::from_quota(lc_finality_update_quota)?; let lc_updates_by_range_rl = Limiter::from_quota(lc_updates_by_range_quota)?; + let ep_by_range_rl = Limiter::from_quota(ep_by_range_quota)?; + let ep_by_root_rl = Limiter::from_quota(ep_by_root_quota)?; // check for peers to prune every 30 seconds, starting in 30 seconds let prune_every = tokio::time::Duration::from_secs(30); @@ -259,6 +279,8 @@ impl RPCRateLimiterBuilder { lc_optimistic_update_rl, lc_finality_update_rl, lc_updates_by_range_rl, + ep_by_range_rl, + ep_by_root_rl, init_time: Instant::now(), fork_context, }) @@ -312,6 +334,8 @@ impl RPCRateLimiter { light_client_optimistic_update_quota, light_client_finality_update_quota, light_client_updates_by_range_quota, + execution_proofs_by_range_quota, + execution_proofs_by_root_quota, } = config; Self::builder() @@ -338,6 +362,8 @@ impl RPCRateLimiter { Protocol::LightClientUpdatesByRange, light_client_updates_by_range_quota, ) + .set_quota(Protocol::ExecutionProofsByRange, execution_proofs_by_range_quota) + .set_quota(Protocol::ExecutionProofsByRoot, execution_proofs_by_root_quota) .build(fork_context) } @@ -376,6 +402,8 @@ impl RPCRateLimiter { Protocol::LightClientOptimisticUpdate => &mut self.lc_optimistic_update_rl, Protocol::LightClientFinalityUpdate => &mut self.lc_finality_update_rl, Protocol::LightClientUpdatesByRange => &mut self.lc_updates_by_range_rl, + Protocol::ExecutionProofsByRange => &mut self.ep_by_range_rl, + Protocol::ExecutionProofsByRoot => &mut self.ep_by_root_rl, }; check(limiter) } @@ -400,6 +428,8 @@ impl RPCRateLimiter { lc_optimistic_update_rl, lc_finality_update_rl, lc_updates_by_range_rl, + ep_by_range_rl, + ep_by_root_rl, fork_context: _, } = self; @@ -417,6 +447,8 @@ impl RPCRateLimiter { lc_optimistic_update_rl.prune(time_since_start); lc_finality_update_rl.prune(time_since_start); lc_updates_by_range_rl.prune(time_since_start); + ep_by_range_rl.prune(time_since_start); + ep_by_root_rl.prune(time_since_start); } } diff --git a/beacon_node/lighthouse_network/src/service/api_types.rs b/beacon_node/lighthouse_network/src/service/api_types.rs index f1a4d87de76..9adf4551165 100644 --- a/beacon_node/lighthouse_network/src/service/api_types.rs +++ b/beacon_node/lighthouse_network/src/service/api_types.rs @@ -5,6 +5,7 @@ use std::sync::Arc; use types::{ BlobSidecar, DataColumnSidecar, Epoch, EthSpec, LightClientBootstrap, LightClientFinalityUpdate, LightClientOptimisticUpdate, LightClientUpdate, SignedBeaconBlock, + execution::eip8025::SignedExecutionProof, }; pub type Id = u32; @@ -30,6 +31,10 @@ pub enum SyncRequestId { BlobsByRange(BlobsByRangeRequestId), /// Data columns by range request DataColumnsByRange(DataColumnsByRangeRequestId), + /// Execution proofs by range request + ExecutionProofsByRange(ExecutionProofsByRangeRequestId), + /// Execution proofs by root request + ExecutionProofsByRoot(ExecutionProofsByRootRequestId), } /// Request ID for data_columns_by_root requests. Block lookups do not issue this request directly. @@ -75,6 +80,18 @@ pub enum DataColumnsByRangeRequester { CustodyBackfillSync(CustodyBackFillBatchRequestId), } +/// Request ID for execution_proofs_by_range requests. +#[derive(Debug, Hash, PartialEq, Eq, Clone, Copy)] +pub struct ExecutionProofsByRangeRequestId { + pub id: Id, +} + +/// Request ID for execution_proofs_by_root requests. +#[derive(Debug, Hash, PartialEq, Eq, Clone, Copy)] +pub struct ExecutionProofsByRootRequestId { + pub id: Id, +} + /// Block components by range request for range sync. Includes an ID for downstream consumers to /// handle retries and tie all their sub requests together. #[derive(Debug, Hash, PartialEq, Eq, Clone, Copy)] @@ -172,6 +189,10 @@ pub enum Response { LightClientFinalityUpdate(Arc>), /// A response to a LightClientUpdatesByRange request. LightClientUpdatesByRange(Option>>), + /// A response to a get EXECUTION_PROOFS_BY_RANGE request. A None response signals end of batch. + ExecutionProofsByRange(Option>), + /// A response to a get EXECUTION_PROOFS_BY_ROOT request. A None response signals end of batch. + ExecutionProofsByRoot(Option>), } impl std::convert::From> for RpcResponse { @@ -217,6 +238,18 @@ impl std::convert::From> for RpcResponse { RpcResponse::StreamTermination(ResponseTermination::LightClientUpdatesByRange) } }, + Response::ExecutionProofsByRange(r) => match r { + Some(p) => RpcResponse::Success(RpcSuccessResponse::ExecutionProofsByRange(p)), + None => { + RpcResponse::StreamTermination(ResponseTermination::ExecutionProofsByRange) + } + }, + Response::ExecutionProofsByRoot(r) => match r { + Some(p) => RpcResponse::Success(RpcSuccessResponse::ExecutionProofsByRoot(p)), + None => { + RpcResponse::StreamTermination(ResponseTermination::ExecutionProofsByRoot) + } + }, } } } @@ -234,6 +267,8 @@ macro_rules! impl_display { // Since each request Id is deeply nested with various types, if rendered with Debug on logs they // take too much visual space. This custom Display implementations make the overall Id short while // not losing information +impl_display!(ExecutionProofsByRangeRequestId, "ExecProofsByRange/{}", id); +impl_display!(ExecutionProofsByRootRequestId, "ExecProofsByRoot/{}", id); impl_display!(BlocksByRangeRequestId, "{}/{}", id, parent_request_id); impl_display!(BlobsByRangeRequestId, "{}/{}", id, parent_request_id); impl_display!(DataColumnsByRangeRequestId, "{}/{}", id, parent_request_id); diff --git a/beacon_node/lighthouse_network/src/service/gossip_cache.rs b/beacon_node/lighthouse_network/src/service/gossip_cache.rs index 120b9e6c245..5b31823d2d2 100644 --- a/beacon_node/lighthouse_network/src/service/gossip_cache.rs +++ b/beacon_node/lighthouse_network/src/service/gossip_cache.rs @@ -44,6 +44,8 @@ pub struct GossipCache { light_client_finality_update: Option, /// Timeout for light client optimistic updates. light_client_optimistic_update: Option, + /// EIP-8025: Timeout for execution proofs. + execution_proof: Option, } #[derive(Default)] @@ -75,6 +77,8 @@ pub struct GossipCacheBuilder { light_client_finality_update: Option, /// Timeout for light client optimistic updates. light_client_optimistic_update: Option, + /// EIP-8025: Timeout for execution proofs. + execution_proof: Option, } #[allow(dead_code)] @@ -167,6 +171,7 @@ impl GossipCacheBuilder { bls_to_execution_change, light_client_finality_update, light_client_optimistic_update, + execution_proof, } = self; GossipCache { expirations: DelayQueue::default(), @@ -184,6 +189,7 @@ impl GossipCacheBuilder { bls_to_execution_change: bls_to_execution_change.or(default_timeout), light_client_finality_update: light_client_finality_update.or(default_timeout), light_client_optimistic_update: light_client_optimistic_update.or(default_timeout), + execution_proof: execution_proof.or(default_timeout), } } } @@ -211,6 +217,7 @@ impl GossipCache { GossipKind::BlsToExecutionChange => self.bls_to_execution_change, GossipKind::LightClientFinalityUpdate => self.light_client_finality_update, GossipKind::LightClientOptimisticUpdate => self.light_client_optimistic_update, + GossipKind::ExecutionProof => self.execution_proof, }; let Some(expire_timeout) = expire_timeout else { return; diff --git a/beacon_node/lighthouse_network/src/service/mod.rs b/beacon_node/lighthouse_network/src/service/mod.rs index 4eebda1decb..57d25f98123 100644 --- a/beacon_node/lighthouse_network/src/service/mod.rs +++ b/beacon_node/lighthouse_network/src/service/mod.rs @@ -297,13 +297,17 @@ impl Network { let all_topics_for_digests = current_and_future_digests .map(|(epoch, digest)| { let fork = ctx.chain_spec.fork_name_at_epoch(epoch); - all_topics_at_fork::(fork, &ctx.chain_spec) - .into_iter() - .map(|topic| { - Topic::new(GossipTopic::new(topic, GossipEncoding::default(), digest)) - .into() - }) - .collect::>() + all_topics_at_fork::( + fork, + &ctx.chain_spec, + network_globals.execution_proof(), + ) + .into_iter() + .map(|topic| { + Topic::new(GossipTopic::new(topic, GossipEncoding::default(), digest)) + .into() + }) + .collect::>() }) .collect::>(); @@ -370,6 +374,7 @@ impl Network { let eth2_rpc = RPC::new( ctx.fork_context.clone(), config.enable_light_client_server, + config.enable_execution_proof, config.inbound_rate_limiter_config.clone(), config.outbound_rate_limiter_config.clone(), seq_number, @@ -1607,6 +1612,28 @@ impl Network { request_type, }) } + RequestType::ExecutionProofsByRange(_) => { + metrics::inc_counter_vec( + &metrics::TOTAL_RPC_REQUESTS, + &["execution_proofs_by_range"], + ); + Some(NetworkEvent::RequestReceived { + peer_id, + inbound_request_id, + request_type, + }) + } + RequestType::ExecutionProofsByRoot(_) => { + metrics::inc_counter_vec( + &metrics::TOTAL_RPC_REQUESTS, + &["execution_proofs_by_root"], + ); + Some(NetworkEvent::RequestReceived { + peer_id, + inbound_request_id, + request_type, + }) + } } } Ok(RPCReceived::Response(id, resp)) => { @@ -1667,6 +1694,12 @@ impl Network { peer_id, Response::LightClientUpdatesByRange(Some(update)), ), + RpcSuccessResponse::ExecutionProofsByRange(proof) => { + self.build_response(id, peer_id, Response::ExecutionProofsByRange(Some(proof))) + } + RpcSuccessResponse::ExecutionProofsByRoot(proof) => { + self.build_response(id, peer_id, Response::ExecutionProofsByRoot(Some(proof))) + } } } Ok(RPCReceived::EndOfStream(id, termination)) => { @@ -1680,6 +1713,12 @@ impl Network { ResponseTermination::LightClientUpdatesByRange => { Response::LightClientUpdatesByRange(None) } + ResponseTermination::ExecutionProofsByRange => { + Response::ExecutionProofsByRange(None) + } + ResponseTermination::ExecutionProofsByRoot => { + Response::ExecutionProofsByRoot(None) + } }; self.build_response(id, peer_id, response) } diff --git a/beacon_node/lighthouse_network/src/service/utils.rs b/beacon_node/lighthouse_network/src/service/utils.rs index 63f22be5e2c..c6487b05e7d 100644 --- a/beacon_node/lighthouse_network/src/service/utils.rs +++ b/beacon_node/lighthouse_network/src/service/utils.rs @@ -275,6 +275,7 @@ pub(crate) fn create_whitelist_filter( add(BlsToExecutionChange); add(LightClientFinalityUpdate); add(LightClientOptimisticUpdate); + add(ExecutionProof); for id in 0..spec.attestation_subnet_count { add(Attestation(SubnetId::new(id))); } diff --git a/beacon_node/lighthouse_network/src/types/globals.rs b/beacon_node/lighthouse_network/src/types/globals.rs index df8dbdc559e..9bea929aa0d 100644 --- a/beacon_node/lighthouse_network/src/types/globals.rs +++ b/beacon_node/lighthouse_network/src/types/globals.rs @@ -228,6 +228,7 @@ impl NetworkGlobals { enable_light_client_server: self.config.enable_light_client_server, subscribe_all_subnets: self.config.subscribe_all_subnets, sampling_subnets: self.sampling_subnets.read().clone(), + execution_proof: self.config.enable_execution_proof, } } @@ -262,6 +263,10 @@ impl NetworkGlobals { let enr = discv5::enr::Enr::builder().build(&enr_key).unwrap(); NetworkGlobals::new(enr, metadata, trusted_peers, false, config, spec) } + + pub fn execution_proof(&self) -> bool { + self.config.enable_execution_proof + } } #[cfg(test)] diff --git a/beacon_node/lighthouse_network/src/types/pubsub.rs b/beacon_node/lighthouse_network/src/types/pubsub.rs index 72f2873def9..1abc1e9a38a 100644 --- a/beacon_node/lighthouse_network/src/types/pubsub.rs +++ b/beacon_node/lighthouse_network/src/types/pubsub.rs @@ -14,8 +14,8 @@ use types::{ SignedBeaconBlock, SignedBeaconBlockAltair, SignedBeaconBlockBase, SignedBeaconBlockBellatrix, SignedBeaconBlockCapella, SignedBeaconBlockDeneb, SignedBeaconBlockElectra, SignedBeaconBlockFulu, SignedBeaconBlockGloas, SignedBlsToExecutionChange, - SignedContributionAndProof, SignedVoluntaryExit, SingleAttestation, SubnetId, - SyncCommitteeMessage, SyncSubnetId, + SignedContributionAndProof, SignedExecutionProof, SignedVoluntaryExit, SingleAttestation, + SubnetId, SyncCommitteeMessage, SyncSubnetId, }; #[derive(Debug, Clone, PartialEq)] @@ -46,6 +46,8 @@ pub enum PubsubMessage { LightClientFinalityUpdate(Box>), /// Gossipsub message providing notification of a light client optimistic update. LightClientOptimisticUpdate(Box>), + /// EIP-8025: Gossipsub message providing notification of a signed execution proof. + ExecutionProof(Box), } // Implements the `DataTransform` trait of gossipsub to employ snappy compression @@ -149,6 +151,7 @@ impl PubsubMessage { PubsubMessage::LightClientOptimisticUpdate(_) => { GossipKind::LightClientOptimisticUpdate } + PubsubMessage::ExecutionProof(_) => GossipKind::ExecutionProof, } } @@ -387,6 +390,14 @@ impl PubsubMessage { light_client_optimistic_update, ))) } + GossipKind::ExecutionProof => { + // EIP-8025: Nodes only subscribe to this topic when a proof engine is + // configured (opt-in per node). No fork check needed — subscription + // itself is the gate. + let execution_proof = SignedExecutionProof::from_ssz_bytes(data) + .map_err(|e| format!("{:?}", e))?; + Ok(PubsubMessage::ExecutionProof(Box::new(execution_proof))) + } } } } @@ -413,6 +424,7 @@ impl PubsubMessage { PubsubMessage::BlsToExecutionChange(data) => data.as_ssz_bytes(), PubsubMessage::LightClientFinalityUpdate(data) => data.as_ssz_bytes(), PubsubMessage::LightClientOptimisticUpdate(data) => data.as_ssz_bytes(), + PubsubMessage::ExecutionProof(data) => data.as_ssz_bytes(), } } } @@ -472,6 +484,14 @@ impl std::fmt::Display for PubsubMessage { PubsubMessage::LightClientOptimisticUpdate(_data) => { write!(f, "Light CLient Optimistic Update") } + PubsubMessage::ExecutionProof(data) => { + write!( + f, + "Execution Proof: request_root: {:?}, proof_type: {}", + data.request_root(), + data.proof_type() + ) + } } } } diff --git a/beacon_node/lighthouse_network/src/types/subnet.rs b/beacon_node/lighthouse_network/src/types/subnet.rs index 1892dcc83af..c18f948654c 100644 --- a/beacon_node/lighthouse_network/src/types/subnet.rs +++ b/beacon_node/lighthouse_network/src/types/subnet.rs @@ -14,6 +14,8 @@ pub enum Subnet { SyncCommittee(SyncSubnetId), /// Represents a gossipsub data column subnet. DataColumn(DataColumnSubnetId), + /// EIP-8025: Capability flag — peers with a proof engine (ENR `ep=true`). + ExecutionProof, } /// A subnet to discover peers on along with the instant after which it's no longer useful. diff --git a/beacon_node/lighthouse_network/src/types/topics.rs b/beacon_node/lighthouse_network/src/types/topics.rs index 0c988f35c39..6d0ac31b3a8 100644 --- a/beacon_node/lighthouse_network/src/types/topics.rs +++ b/beacon_node/lighthouse_network/src/types/topics.rs @@ -25,12 +25,15 @@ pub const SYNC_COMMITTEE_PREFIX_TOPIC: &str = "sync_committee_"; pub const BLS_TO_EXECUTION_CHANGE_TOPIC: &str = "bls_to_execution_change"; pub const LIGHT_CLIENT_FINALITY_UPDATE: &str = "light_client_finality_update"; pub const LIGHT_CLIENT_OPTIMISTIC_UPDATE: &str = "light_client_optimistic_update"; +/// EIP-8025: Topic for publishing signed execution proofs. +pub const EXECUTION_PROOF_TOPIC: &str = "execution_proof"; #[derive(Debug)] pub struct TopicConfig { pub enable_light_client_server: bool, pub subscribe_all_subnets: bool, pub sampling_subnets: HashSet, + pub execution_proof: bool, } /// Returns all the topics the node should subscribe at `fork_name` @@ -85,6 +88,12 @@ pub fn core_topics_to_subscribe( } } + // EIP-8025: Subscribe to execution proof topic only when a proof engine is configured. + // This is an opt-in per-node feature, not tied to any fork. + if opts.execution_proof { + topics.push(GossipKind::ExecutionProof); + } + topics } @@ -109,17 +118,23 @@ pub fn is_fork_non_core_topic(topic: &GossipTopic, _fork_name: ForkName) -> bool | GossipKind::SignedContributionAndProof | GossipKind::BlsToExecutionChange | GossipKind::LightClientFinalityUpdate - | GossipKind::LightClientOptimisticUpdate => false, + | GossipKind::LightClientOptimisticUpdate + | GossipKind::ExecutionProof => false, } } -pub fn all_topics_at_fork(fork: ForkName, spec: &ChainSpec) -> Vec { +pub fn all_topics_at_fork( + fork: ForkName, + spec: &ChainSpec, + execution_proof: bool, +) -> Vec { // Compute the worst case of all forks let sampling_subnets = HashSet::from_iter(spec.all_data_column_sidecar_subnets()); let opts = TopicConfig { enable_light_client_server: true, subscribe_all_subnets: true, sampling_subnets, + execution_proof, }; core_topics_to_subscribe::(fork, &opts, spec) } @@ -169,6 +184,8 @@ pub enum GossipKind { LightClientFinalityUpdate, /// Topic for publishing optimistic updates for light clients. LightClientOptimisticUpdate, + /// EIP-8025: Topic for publishing signed execution proofs. + ExecutionProof, } impl std::fmt::Display for GossipKind { @@ -251,6 +268,7 @@ impl GossipTopic { BLS_TO_EXECUTION_CHANGE_TOPIC => GossipKind::BlsToExecutionChange, LIGHT_CLIENT_FINALITY_UPDATE => GossipKind::LightClientFinalityUpdate, LIGHT_CLIENT_OPTIMISTIC_UPDATE => GossipKind::LightClientOptimisticUpdate, + EXECUTION_PROOF_TOPIC => GossipKind::ExecutionProof, topic => match subnet_topic_index(topic) { Some(kind) => kind, None => return Err(format!("Unknown topic: {}", topic)), @@ -272,6 +290,7 @@ impl GossipTopic { GossipKind::Attestation(subnet_id) => Some(Subnet::Attestation(*subnet_id)), GossipKind::SyncCommitteeMessage(subnet_id) => Some(Subnet::SyncCommittee(*subnet_id)), GossipKind::DataColumnSidecar(subnet_id) => Some(Subnet::DataColumn(*subnet_id)), + GossipKind::ExecutionProof => Some(Subnet::ExecutionProof), _ => None, } } @@ -316,6 +335,7 @@ impl std::fmt::Display for GossipTopic { GossipKind::BlsToExecutionChange => BLS_TO_EXECUTION_CHANGE_TOPIC.into(), GossipKind::LightClientFinalityUpdate => LIGHT_CLIENT_FINALITY_UPDATE.into(), GossipKind::LightClientOptimisticUpdate => LIGHT_CLIENT_OPTIMISTIC_UPDATE.into(), + GossipKind::ExecutionProof => EXECUTION_PROOF_TOPIC.into(), }; write!( f, @@ -334,6 +354,7 @@ impl From for GossipKind { Subnet::Attestation(s) => GossipKind::Attestation(s), Subnet::SyncCommittee(s) => GossipKind::SyncCommitteeMessage(s), Subnet::DataColumn(s) => GossipKind::DataColumnSidecar(s), + Subnet::ExecutionProof => GossipKind::ExecutionProof, } } } @@ -514,6 +535,7 @@ mod tests { enable_light_client_server: false, subscribe_all_subnets: false, sampling_subnets: sampling_subnets.clone(), + execution_proof: false, } } @@ -558,6 +580,7 @@ mod tests { let s = HashSet::from_iter([1, 2].map(DataColumnSubnetId::new)); let mut topic_config = get_topic_config(&s); topic_config.enable_light_client_server = true; + topic_config.execution_proof = true; let latest_fork = *ForkName::list_all().last().unwrap(); let topics = core_topics_to_subscribe::(latest_fork, &topic_config, &spec); @@ -575,6 +598,10 @@ mod tests { for subnet in s { expected_topics.push(GossipKind::DataColumnSidecar(subnet)); } + // EIP-8025: ExecutionProof topic is added when execution_proof is enabled in TopicConfig + if topic_config.execution_proof { + expected_topics.push(GossipKind::ExecutionProof); + } // Need to check all the topics exist in an order independent manner for expected_topic in expected_topics { assert!( diff --git a/beacon_node/network/src/network_beacon_processor/gossip_methods.rs b/beacon_node/network/src/network_beacon_processor/gossip_methods.rs index ca259129348..597333b9002 100644 --- a/beacon_node/network/src/network_beacon_processor/gossip_methods.rs +++ b/beacon_node/network/src/network_beacon_processor/gossip_methods.rs @@ -35,12 +35,14 @@ use std::sync::Arc; use std::time::{Duration, Instant, SystemTime, UNIX_EPOCH}; use store::hot_cold_store::HotColdDBError; use tracing::{Instrument, Span, debug, error, info, instrument, trace, warn}; +use types::ProofStatus; use types::{ Attestation, AttestationData, AttestationRef, AttesterSlashing, BlobSidecar, DataColumnSidecar, DataColumnSubnetId, EthSpec, Hash256, IndexedAttestation, LightClientFinalityUpdate, LightClientOptimisticUpdate, ProposerSlashing, SignedAggregateAndProof, SignedBeaconBlock, - SignedBlsToExecutionChange, SignedContributionAndProof, SignedVoluntaryExit, SingleAttestation, - Slot, SubnetId, SyncCommitteeMessage, SyncSubnetId, block::BlockImportSource, + SignedBlsToExecutionChange, SignedContributionAndProof, SignedExecutionProof, + SignedVoluntaryExit, SingleAttestation, Slot, SubnetId, SyncCommitteeMessage, SyncSubnetId, + block::BlockImportSource, }; use beacon_processor::work_reprocessing_queue::QueuedColumnReconstruction; @@ -1862,6 +1864,122 @@ impl NetworkBeaconProcessor { metrics::inc_counter(&metrics::BEACON_PROCESSOR_BLS_TO_EXECUTION_CHANGE_IMPORTED_TOTAL); } + /// Process a signed execution proof received from the gossip network. + pub async fn process_gossip_execution_proof( + self: &Arc, + message_id: MessageId, + peer_id: PeerId, + execution_proof: SignedExecutionProof, + ) { + // Extract metadata for logging + let request_root = execution_proof.request_root(); + let proof_type = execution_proof.proof_type(); + let validator_index = execution_proof.validator_index(); + + // Verify the execution proof. + let verification_result = self.chain.verify_execution_proof(execution_proof).await; + + match verification_result { + // TODO: split our error types and penalize accordingly + Err(e) => { + warn!( + ?request_root, + validator_index, + %peer_id, + error = ?e, + "Error verifying execution proof for gossip" + ); + self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Reject); + self.gossip_penalize_peer( + peer_id, + PeerAction::HighToleranceError, + "invalid_execution_proof", + ); + } + Ok(ProofStatus::Valid) => { + debug!( + ?request_root, + validator_index, proof_type, "Execution proof is valid" + ); + self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Accept); + } + Ok(ProofStatus::Invalid) => { + debug!( + ?request_root, + %peer_id, + validator_index, proof_type, "Execution proof is invalid banning peer" + ); + self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Reject); + self.gossip_penalize_peer(peer_id, PeerAction::Fatal, "invalid_execution_proof"); + } + Ok(ProofStatus::Accepted) => { + debug!( + ?request_root, + validator_index, + proof_type, + "Execution proof is accepted but not fully verified" + ); + self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Accept); + } + Ok(ProofStatus::Syncing) => { + debug!( + ?request_root, + validator_index, + proof_type, + "Execution proof cannot be fully verified while syncing" + ); + self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore); + } + // TODO: Should we do this check earlier. This is a quick and cheap check, so it may be better to do it before the more expensive verification steps. + Ok(ProofStatus::NotSupported) => { + debug!( + ?request_root, + validator_index, proof_type, "Execution proof type not supported" + ); + self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore); + } + }; + } + + /// Process an execution proof received via RPC. + /// + /// Runs the same BLS + proof engine verification as the gossip path, but without gossip + /// propagation. Penalizes the serving peer if the proof is invalid. + pub async fn process_rpc_execution_proof( + self: &Arc, + peer_id: PeerId, + execution_proof: SignedExecutionProof, + ) { + let verification_result = self.chain.verify_execution_proof(execution_proof).await; + + match verification_result { + Err(e) => { + debug!(%peer_id, error = ?e, "Error verifying RPC execution proof"); + } + Ok(ProofStatus::Valid) => { + debug!(%peer_id, "RPC execution proof valid"); + } + Ok(ProofStatus::Invalid) => { + debug!(%peer_id, "RPC execution proof invalid, penalizing peer"); + self.send_network_message(NetworkMessage::ReportPeer { + peer_id, + action: PeerAction::HighToleranceError, + source: ReportSource::SyncService, + msg: "invalid_rpc_execution_proof", + }); + } + Ok(ProofStatus::Accepted) => { + debug!(%peer_id, "RPC execution proof accepted"); + } + Ok(ProofStatus::NotSupported) => { + debug!(%peer_id, "RPC execution proof type not supported by local engine"); + } + Ok(ProofStatus::Syncing) => { + debug!(%peer_id, "RPC execution proof received while block still syncing"); + } + } + } + /// Process the sync committee signature received from the gossip network and: /// /// - If it passes gossip propagation criteria, tell the network thread to forward it. diff --git a/beacon_node/network/src/network_beacon_processor/mod.rs b/beacon_node/network/src/network_beacon_processor/mod.rs index fd9c2c1e55c..6f00ef75cf6 100644 --- a/beacon_node/network/src/network_beacon_processor/mod.rs +++ b/beacon_node/network/src/network_beacon_processor/mod.rs @@ -423,6 +423,47 @@ impl NetworkBeaconProcessor { }) } + /// EIP-8025: Create a new `Work` event for some signed execution proof. + pub fn send_gossip_execution_proof( + self: &Arc, + message_id: MessageId, + peer_id: PeerId, + execution_proof: Box, + ) -> Result<(), Error> { + let processor = self.clone(); + let process_fn = async move { + processor + .process_gossip_execution_proof(message_id, peer_id, *execution_proof) + .await + }; + + self.try_send(BeaconWorkEvent { + drop_during_sync: false, + work: Work::GossipExecutionProof(Box::pin(process_fn)), + }) + } + + /// EIP-8025: Verify an execution proof received over RPC. + /// + /// Reuses `GossipExecutionProof` work queue — the verification logic is identical. + /// Peer penalization on invalid proof uses `ReportSource::SyncService`. + pub fn send_rpc_execution_proof( + self: &Arc, + peer_id: PeerId, + execution_proof: Arc, + ) -> Result<(), Error> { + let processor = self.clone(); + let process_fn = async move { + processor + .process_rpc_execution_proof(peer_id, (*execution_proof).clone()) + .await + }; + self.try_send(BeaconWorkEvent { + drop_during_sync: false, + work: Work::GossipExecutionProof(Box::pin(process_fn)), + }) + } + /// Create a new `Work` event for some block, where the result from computation (if any) is /// sent to the other side of `result_tx`. pub fn send_rpc_beacon_block( @@ -667,6 +708,46 @@ impl NetworkBeaconProcessor { }) } + /// Create a new work event to serve an `ExecutionProofsByRange` RPC request (EIP-8025). + pub fn send_execution_proofs_by_range_request( + self: &Arc, + peer_id: PeerId, + inbound_request_id: InboundRequestId, + request: lighthouse_network::rpc::methods::ExecutionProofsByRangeRequest, + ) -> Result<(), Error> { + let processor = self.clone(); + let process_fn = move || { + processor.handle_execution_proofs_by_range_request( + peer_id, + inbound_request_id, + request, + ) + }; + + self.try_send(BeaconWorkEvent { + drop_during_sync: false, + work: Work::ExecutionProofsByRangeRequest(Box::new(process_fn)), + }) + } + + /// Create a new work event to serve an `ExecutionProofsByRoot` RPC request (EIP-8025). + pub fn send_execution_proofs_by_root_request( + self: &Arc, + peer_id: PeerId, + inbound_request_id: InboundRequestId, + request: lighthouse_network::rpc::methods::ExecutionProofsByRootRequest, + ) -> Result<(), Error> { + let processor = self.clone(); + let process_fn = move || { + processor.handle_execution_proofs_by_root_request(peer_id, inbound_request_id, request) + }; + + self.try_send(BeaconWorkEvent { + drop_during_sync: false, + work: Work::ExecutionProofsByRootRequest(Box::new(process_fn)), + }) + } + /// Create a new work event to process `LightClientBootstrap`s from the RPC network. pub fn send_light_client_bootstrap_request( self: &Arc, diff --git a/beacon_node/network/src/network_beacon_processor/rpc_methods.rs b/beacon_node/network/src/network_beacon_processor/rpc_methods.rs index e443eb78d89..13a63eb10a7 100644 --- a/beacon_node/network/src/network_beacon_processor/rpc_methods.rs +++ b/beacon_node/network/src/network_beacon_processor/rpc_methods.rs @@ -7,6 +7,7 @@ use beacon_chain::{BeaconChainError, BeaconChainTypes, BlockProcessStatus, WhenS use itertools::{Itertools, process_results}; use lighthouse_network::rpc::methods::{ BlobsByRangeRequest, BlobsByRootRequest, DataColumnsByRangeRequest, DataColumnsByRootRequest, + ExecutionProofsByRangeRequest, ExecutionProofsByRootRequest, }; use lighthouse_network::rpc::*; use lighthouse_network::{PeerId, ReportSource, Response, SyncInfo}; @@ -1323,6 +1324,113 @@ impl NetworkBeaconProcessor { Ok(()) } + /// Handle an `ExecutionProofsByRange` request from the peer (EIP-8025). + /// + /// Streams all `SignedExecutionProof` objects known for the requested slot range. + pub fn handle_execution_proofs_by_range_request( + &self, + peer_id: PeerId, + inbound_request_id: InboundRequestId, + req: ExecutionProofsByRangeRequest, + ) { + self.terminate_response_stream( + peer_id, + inbound_request_id, + self.handle_execution_proofs_by_range_request_inner(peer_id, inbound_request_id, req), + Response::ExecutionProofsByRange, + ); + } + + fn handle_execution_proofs_by_range_request_inner( + &self, + peer_id: PeerId, + inbound_request_id: InboundRequestId, + req: ExecutionProofsByRangeRequest, + ) -> Result<(), (RpcErrorResponse, &'static str)> { + debug!( + %peer_id, + start_slot = req.start_slot, + count = req.count, + "Received ExecutionProofsByRange Request" + ); + + let block_roots = + self.get_block_roots_for_slot_range(req.start_slot, req.count, "ExecutionProofsByRange")?; + + let mut proofs_sent = 0usize; + for block_root in block_roots { + for proof in self.chain.get_execution_proofs_by_block_root(block_root) { + self.send_network_message(NetworkMessage::SendResponse { + peer_id, + inbound_request_id, + response: Response::ExecutionProofsByRange(Some(Arc::new(proof))), + }); + proofs_sent += 1; + } + } + + debug!( + %peer_id, + start_slot = req.start_slot, + count = req.count, + returned = proofs_sent, + "ExecutionProofsByRange Response processed" + ); + + Ok(()) + } + + /// Handle an `ExecutionProofsByRoot` request from the peer (EIP-8025). + /// + /// Streams all `SignedExecutionProof` objects known for the requested beacon block roots. + pub fn handle_execution_proofs_by_root_request( + &self, + peer_id: PeerId, + inbound_request_id: InboundRequestId, + req: ExecutionProofsByRootRequest, + ) { + self.terminate_response_stream( + peer_id, + inbound_request_id, + self.handle_execution_proofs_by_root_request_inner(peer_id, inbound_request_id, req), + Response::ExecutionProofsByRoot, + ); + } + + fn handle_execution_proofs_by_root_request_inner( + &self, + peer_id: PeerId, + inbound_request_id: InboundRequestId, + req: ExecutionProofsByRootRequest, + ) -> Result<(), (RpcErrorResponse, &'static str)> { + debug!( + %peer_id, + num_roots = req.block_roots.len(), + "Received ExecutionProofsByRoot Request" + ); + + let mut proofs_sent = 0usize; + for block_root in req.block_roots.iter() { + for proof in self.chain.get_execution_proofs_by_block_root(*block_root) { + self.send_network_message(NetworkMessage::SendResponse { + peer_id, + inbound_request_id, + response: Response::ExecutionProofsByRoot(Some(Arc::new(proof))), + }); + proofs_sent += 1; + } + } + + debug!( + %peer_id, + num_roots = req.block_roots.len(), + returned = proofs_sent, + "ExecutionProofsByRoot Response processed" + ); + + Ok(()) + } + /// Helper function to ensure single item protocol always end with either a single chunk or an /// error fn terminate_response_single_item Response>( diff --git a/beacon_node/network/src/router.rs b/beacon_node/network/src/router.rs index 60fe094bb7c..be447cf1a29 100644 --- a/beacon_node/network/src/router.rs +++ b/beacon_node/network/src/router.rs @@ -25,6 +25,7 @@ use tokio::sync::mpsc; use tokio_stream::wrappers::UnboundedReceiverStream; use tracing::{debug, error, trace, warn}; use types::{BlobSidecar, DataColumnSidecar, EthSpec, ForkContext, SignedBeaconBlock}; +use types::execution::eip8025::SignedExecutionProof; /// Handles messages from the network and routes them to the appropriate service to be handled. pub struct Router { @@ -272,6 +273,24 @@ impl Router { request, ), ), + RequestType::ExecutionProofsByRange(request) => self + .handle_beacon_processor_send_result( + self.network_beacon_processor + .send_execution_proofs_by_range_request( + peer_id, + inbound_request_id, + request, + ), + ), + RequestType::ExecutionProofsByRoot(request) => self + .handle_beacon_processor_send_result( + self.network_beacon_processor + .send_execution_proofs_by_root_request( + peer_id, + inbound_request_id, + request, + ), + ), _ => {} } } @@ -309,6 +328,12 @@ impl Router { Response::DataColumnsByRange(data_column) => { self.on_data_columns_by_range_response(peer_id, app_request_id, data_column); } + Response::ExecutionProofsByRange(execution_proof) => { + self.on_execution_proofs_by_range_response(peer_id, app_request_id, execution_proof); + } + Response::ExecutionProofsByRoot(execution_proof) => { + self.on_execution_proofs_by_root_response(peer_id, app_request_id, execution_proof); + } // Light client responses should not be received Response::LightClientBootstrap(_) | Response::LightClientOptimisticUpdate(_) @@ -486,6 +511,20 @@ impl Router { bls_to_execution_change, ), ), + // EIP-8025: Route execution proof messages to the gossip handler + PubsubMessage::ExecutionProof(execution_proof) => { + trace!( + %peer_id, + "Received execution proof" + ); + self.handle_beacon_processor_send_result( + self.network_beacon_processor.send_gossip_execution_proof( + message_id, + peer_id, + execution_proof, + ), + ) + } } } @@ -727,6 +766,42 @@ impl Router { } } + pub fn on_execution_proofs_by_range_response( + &mut self, + peer_id: PeerId, + app_request_id: AppRequestId, + execution_proof: Option>, + ) { + trace!(%peer_id, "Received ExecutionProofsByRange Response"); + if let AppRequestId::Sync(sync_request_id) = app_request_id { + self.send_to_sync(SyncMessage::RpcExecutionProof { + peer_id, + sync_request_id, + execution_proof, + }); + } else { + crit!("All execution proofs by range responses should belong to sync"); + } + } + + pub fn on_execution_proofs_by_root_response( + &mut self, + peer_id: PeerId, + app_request_id: AppRequestId, + execution_proof: Option>, + ) { + trace!(%peer_id, "Received ExecutionProofsByRoot Response"); + if let AppRequestId::Sync(sync_request_id) = app_request_id { + self.send_to_sync(SyncMessage::RpcExecutionProof { + peer_id, + sync_request_id, + execution_proof, + }); + } else { + crit!("All execution proofs by root responses should belong to sync"); + } + } + fn handle_beacon_processor_send_result( &mut self, result: Result<(), crate::network_beacon_processor::Error>, diff --git a/beacon_node/network/src/sync/manager.rs b/beacon_node/network/src/sync/manager.rs index 338f21ce987..5cd32b2efa4 100644 --- a/beacon_node/network/src/sync/manager.rs +++ b/beacon_node/network/src/sync/manager.rs @@ -39,6 +39,7 @@ use super::network_context::{ CustodyByRootResult, RangeBlockComponent, RangeRequestId, RpcEvent, SyncNetworkContext, }; use super::peer_sync_info::{PeerSyncType, remote_sync_type}; +use super::proof_sync::ProofSync; use super::range_sync::{EPOCHS_PER_BATCH, RangeSync, RangeSyncType}; use crate::network_beacon_processor::{ChainSegmentProcessId, NetworkBeaconProcessor}; use crate::service::NetworkMessage; @@ -72,6 +73,7 @@ use std::sync::Arc; use std::time::Duration; use tokio::sync::mpsc; use tracing::{debug, error, info, trace}; +use types::execution::eip8025::SignedExecutionProof; use types::{ BlobSidecar, DataColumnSidecar, EthSpec, ForkContext, Hash256, SignedBeaconBlock, Slot, }; @@ -132,6 +134,13 @@ pub enum SyncMessage { seen_timestamp: Duration, }, + /// An execution proof has been received from the RPC. + RpcExecutionProof { + sync_request_id: SyncRequestId, + peer_id: PeerId, + execution_proof: Option>, + }, + /// A block with an unknown parent has been received. UnknownParentBlock(PeerId, Arc>, Hash256), @@ -248,6 +257,9 @@ pub struct SyncManager { /// The object handling long-range batch load-balanced syncing. range_sync: RangeSync, + /// EIP-8025: catch-up mechanism for missing execution proofs. + proof_sync: ProofSync, + /// Backfill syncing. backfill_sync: BackFillSync, @@ -314,11 +326,15 @@ impl SyncManager { ), range_sync: RangeSync::new(beacon_chain.clone()), backfill_sync: BackFillSync::new(beacon_chain.clone(), network_globals.clone()), - custody_backfill_sync: CustodyBackFillSync::new(beacon_chain.clone(), network_globals), + custody_backfill_sync: CustodyBackFillSync::new( + beacon_chain.clone(), + network_globals.clone(), + ), block_lookups: BlockLookups::new(), notified_unknown_roots: LRUTimeCache::new(Duration::from_secs( NOTIFIED_UNKNOWN_ROOT_EXPIRY_SECONDS, )), + proof_sync: ProofSync::new(beacon_chain.clone()), } } @@ -368,6 +384,44 @@ impl SyncManager { self.handle_new_execution_engine_state(state); } + #[cfg(test)] + pub(crate) fn poll_proof_sync(&mut self) { + self.proof_sync.poll(&mut self.network); + } + + #[cfg(test)] + pub(crate) fn proof_sync_state(&self) -> super::proof_sync::ProofSyncState { + self.proof_sync.state() + } + + #[cfg(test)] + pub(crate) fn proof_sync_in_flight_count(&self) -> usize { + self.proof_sync.in_flight_count() + } + + #[cfg(test)] + pub(crate) fn set_proof_sync_missing( + &mut self, + missing: Vec, + ) { + self.proof_sync.test_missing_proofs = Some(missing); + } + + #[cfg(test)] + pub(crate) fn start_proof_sync(&mut self) { + self.proof_sync.start(); + } + + #[cfg(test)] + pub(crate) fn pause_proof_sync(&mut self) { + self.proof_sync.pause(); + } + + #[cfg(test)] + pub(crate) fn force_proof_sync_fill_mode(&mut self) { + self.proof_sync.enter_fill_mode_for_testing(); + } + fn network_globals(&self) -> &NetworkGlobals { self.network.network_globals() } @@ -503,6 +557,12 @@ impl SyncManager { SyncRequestId::DataColumnsByRange(req_id) => { self.on_data_columns_by_range_response(req_id, peer_id, RpcEvent::RPCError(error)) } + SyncRequestId::ExecutionProofsByRange(req_id) => { + debug!(%peer_id, ?req_id, "Execution proofs by range request failed"); + } + SyncRequestId::ExecutionProofsByRoot(req_id) => { + debug!(%peer_id, ?req_id, "Execution proofs by root request failed"); + } } } @@ -697,6 +757,7 @@ impl SyncManager { self.backfill_sync.pause(); self.custody_backfill_sync .pause("Range sync in progress".to_string()); + self.proof_sync.pause(); SyncState::SyncingFinalized { start_slot, @@ -710,6 +771,7 @@ impl SyncManager { self.backfill_sync.pause(); self.custody_backfill_sync .pause("Range sync in progress".to_string()); + self.proof_sync.pause(); SyncState::SyncingHead { start_slot, @@ -726,15 +788,11 @@ impl SyncManager { // If we have become synced - Subscribe to all the core subnet topics // We don't need to subscribe if the old state is a state that would have already // invoked this call. - if new_state.is_synced() - && !matches!( - old_state, - SyncState::Synced - | SyncState::BackFillSyncing { .. } - | SyncState::CustodyBackFillSyncing { .. } - ) - { + if new_state.is_synced() && !old_state.is_synced() { self.network.subscribe_core_topics(); + if self.network_globals().config.enable_execution_proof { + self.proof_sync.start(); + } } } } @@ -750,7 +808,10 @@ impl SyncManager { .as_ref() .map(|el| el.get_responsiveness_watch()) .into(); - futures::stream::iter(ee_responsiveness_watch.await).flatten() + match ee_responsiveness_watch.await.flatten() { + Some(watch) => watch.left_stream(), + None => futures::stream::empty().right_stream(), + } }; // min(LOOKUP_MAX_DURATION_*) is 15 seconds. The cost of calling prune_lookups more often is @@ -767,6 +828,9 @@ impl SyncManager { self.chain.slot_clock.slot_duration().as_secs() * T::EthSpec::slots_per_epoch(); let mut epoch_interval = tokio::time::interval(Duration::from_secs(epoch_duration)); + // Poll ProofSync every slot to request any missing execution proofs. + let mut proof_sync_interval = tokio::time::interval(self.chain.slot_clock.slot_duration()); + // process any inbound messages loop { tokio::select! { @@ -788,6 +852,9 @@ impl SyncManager { _ = epoch_interval.tick() => { self.update_sync_state(); } + _ = proof_sync_interval.tick() => { + self.proof_sync.poll(&mut self.network); + } } } } @@ -833,6 +900,13 @@ impl SyncManager { } => { self.rpc_data_column_received(sync_request_id, peer_id, data_column, seen_timestamp) } + SyncMessage::RpcExecutionProof { + sync_request_id, + peer_id, + execution_proof, + } => { + self.rpc_execution_proof_received(sync_request_id, peer_id, execution_proof); + } SyncMessage::UnknownParentBlock(peer_id, block, block_root) => { let block_slot = block.slot(); let parent_root = block.parent_root(); @@ -1186,6 +1260,41 @@ impl SyncManager { } } + fn rpc_execution_proof_received( + &mut self, + sync_request_id: SyncRequestId, + peer_id: PeerId, + execution_proof: Option>, + ) { + let Some(proof) = execution_proof else { + // Stream termination: clean up tracking map entry. + match &sync_request_id { + SyncRequestId::ExecutionProofsByRange(id) => { + self.network.on_execution_proofs_by_range_terminated(id); + self.proof_sync.on_range_request_terminated(id); + } + SyncRequestId::ExecutionProofsByRoot(id) => { + self.network.on_execution_proofs_by_root_terminated(id); + self.proof_sync.on_request_terminated(id); + } + other => { + debug!(%peer_id, ?other, "Unexpected sync_request_id for execution proof stream termination"); + } + } + return; + }; + + // Forward to the beacon processor for async BLS + proof engine verification. + // Peer penalization for invalid proofs is handled inside `process_rpc_execution_proof`. + if let Err(e) = self + .network + .beacon_processor() + .send_rpc_execution_proof(peer_id, proof) + { + debug!(%peer_id, error = ?e, "Failed to send RPC execution proof to beacon processor"); + } + } + fn on_single_blob_response( &mut self, id: SingleLookupReqId, diff --git a/beacon_node/network/src/sync/mod.rs b/beacon_node/network/src/sync/mod.rs index 054bab654c2..025bd9e792d 100644 --- a/beacon_node/network/src/sync/mod.rs +++ b/beacon_node/network/src/sync/mod.rs @@ -9,6 +9,7 @@ mod custody_backfill_sync; pub mod manager; mod network_context; mod peer_sync_info; +mod proof_sync; mod range_data_column_batch_request; mod range_sync; #[cfg(test)] diff --git a/beacon_node/network/src/sync/network_context.rs b/beacon_node/network/src/sync/network_context.rs index 00d4bf865d8..ec59decd9e9 100644 --- a/beacon_node/network/src/sync/network_context.rs +++ b/beacon_node/network/src/sync/network_context.rs @@ -21,14 +21,19 @@ use beacon_chain::block_verification_types::RpcBlock; use beacon_chain::{BeaconChain, BeaconChainTypes, BlockProcessStatus, EngineState}; use custody::CustodyRequestResult; use fnv::FnvHashMap; -use lighthouse_network::rpc::methods::{BlobsByRangeRequest, DataColumnsByRangeRequest}; +use lighthouse_network::Eth2Enr; +use lighthouse_network::rpc::methods::{ + BlobsByRangeRequest, DataColumnsByRangeRequest, ExecutionProofsByRangeRequest, + ExecutionProofsByRootRequest, +}; use lighthouse_network::rpc::{BlocksByRangeRequest, GoodbyeReason, RPCError, RequestType}; pub use lighthouse_network::service::api_types::RangeRequestId; use lighthouse_network::service::api_types::{ AppRequestId, BlobsByRangeRequestId, BlocksByRangeRequestId, ComponentsByRangeRequestId, CustodyBackFillBatchRequestId, CustodyBackfillBatchId, CustodyId, CustodyRequester, DataColumnsByRangeRequestId, DataColumnsByRangeRequester, DataColumnsByRootRequestId, - DataColumnsByRootRequester, Id, SingleLookupReqId, SyncRequestId, + DataColumnsByRootRequester, ExecutionProofsByRangeRequestId, ExecutionProofsByRootRequestId, + Id, SingleLookupReqId, SyncRequestId, }; use lighthouse_network::{Client, NetworkGlobals, PeerAction, PeerId, ReportSource}; use lighthouse_tracing::{SPAN_OUTGOING_BLOCK_BY_ROOT_REQUEST, SPAN_OUTGOING_RANGE_REQUEST}; @@ -117,6 +122,8 @@ pub enum RpcRequestSendError { pub enum NoPeerError { BlockPeer, CustodyPeer(ColumnIndex), + /// No connected peer with `execution_proof_enabled()` in their ENR. + ProofPeer, } #[derive(Debug, PartialEq, Eq)] @@ -224,6 +231,11 @@ pub struct SyncNetworkContext { custody_backfill_data_column_batch_requests: FnvHashMap>, + /// Tracking map for active ExecutionProofsByRange requests (request ID → serving peer). + execution_proofs_by_range_requests: FnvHashMap, + /// Tracking map for active ExecutionProofsByRoot requests (request ID → serving peer). + execution_proofs_by_root_requests: FnvHashMap, + /// Whether the ee is online. If it's not, we don't allow access to the /// `beacon_processor_send`. execution_engine_state: EngineState, @@ -301,6 +313,8 @@ impl SyncNetworkContext { custody_by_root_requests: <_>::default(), components_by_range_requests: FnvHashMap::default(), custody_backfill_data_column_batch_requests: FnvHashMap::default(), + execution_proofs_by_range_requests: FnvHashMap::default(), + execution_proofs_by_root_requests: FnvHashMap::default(), network_beacon_processor, chain, fork_context, @@ -331,6 +345,8 @@ impl SyncNetworkContext { // components_by_range_requests is a meta request of various _by_range requests components_by_range_requests: _, custody_backfill_data_column_batch_requests: _, + execution_proofs_by_range_requests, + execution_proofs_by_root_requests, execution_engine_state: _, network_beacon_processor: _, chain: _, @@ -361,12 +377,26 @@ impl SyncNetworkContext { .active_requests_of_peer(peer_id) .into_iter() .map(|req_id| SyncRequestId::DataColumnsByRange(*req_id)); + // Collect execution proof request IDs for this peer. These are soft requests and failures + // are handled gracefully (debug log only), so they don't block sync. + let ep_by_range_ids = execution_proofs_by_range_requests + .iter() + .filter(|(_, p)| *p == peer_id) + .map(|(id, _)| SyncRequestId::ExecutionProofsByRange(*id)) + .collect::>(); + let ep_by_root_ids = execution_proofs_by_root_requests + .iter() + .filter(|(_, p)| *p == peer_id) + .map(|(id, _)| SyncRequestId::ExecutionProofsByRoot(*id)) + .collect::>(); blocks_by_root_ids .chain(blobs_by_root_ids) .chain(data_column_by_root_ids) .chain(blocks_by_range_ids) .chain(blobs_by_range_ids) .chain(data_column_by_range_ids) + .chain(ep_by_range_ids) + .chain(ep_by_root_ids) .collect() } @@ -375,6 +405,103 @@ impl SyncNetworkContext { .custody_peers_for_column(column_index) } + /// Returns the first connected peer whose ENR advertises execution proof support (`ep = true`). + fn find_any_proof_capable_peer(&self) -> Option { + let db = self.network_globals().peers.read(); + db.connected_peer_ids() + .find(|peer_id| { + db.peer_info(peer_id) + .and_then(|info| info.enr()) + .map(|enr| enr.execution_proof_enabled()) + .unwrap_or(false) + }) + .copied() + } + + /// Send a `ExecutionProofsByRange` request to any connected proof-capable peer. + /// + /// Returns `Err(NoPeer)` if no connected peer has `ep = true` in their ENR. Callers + /// treat this as a soft failure — gossip serves as the fallback. + pub fn request_execution_proofs_by_range( + &mut self, + start_slot: Slot, + count: u64, + ) -> Result { + let peer_id = self + .find_any_proof_capable_peer() + .ok_or(RpcRequestSendError::NoPeer(NoPeerError::ProofPeer))?; + let id = ExecutionProofsByRangeRequestId { id: self.next_id() }; + let request = ExecutionProofsByRangeRequest { + start_slot: start_slot.as_u64(), + count, + }; + self.network_send + .send(NetworkMessage::SendRequest { + peer_id, + request: RequestType::ExecutionProofsByRange(request), + app_request_id: AppRequestId::Sync(SyncRequestId::ExecutionProofsByRange(id)), + }) + .map_err(|_| RpcRequestSendError::InternalError("network send error".to_owned()))?; + debug!( + method = "ExecutionProofsByRange", + %start_slot, + count, + peer = %peer_id, + %id, + "Sync RPC request sent" + ); + self.execution_proofs_by_range_requests.insert(id, peer_id); + Ok(id) + } + + /// Send a `ExecutionProofsByRoot` request for `block_root` to any connected proof-capable peer. + /// + /// Returns `Err(NoPeer)` if no connected peer has `ep = true` in their ENR. + pub fn request_execution_proofs_by_root( + &mut self, + block_root: Hash256, + ) -> Result { + let peer_id = self + .find_any_proof_capable_peer() + .ok_or(RpcRequestSendError::NoPeer(NoPeerError::ProofPeer))?; + let max_request_blocks = self + .chain + .spec + .max_request_blocks(self.fork_context.current_fork_name()); + let request = ExecutionProofsByRootRequest::new(vec![block_root], max_request_blocks) + .map_err(RpcRequestSendError::InternalError)?; + let id = ExecutionProofsByRootRequestId { id: self.next_id() }; + self.network_send + .send(NetworkMessage::SendRequest { + peer_id, + request: RequestType::ExecutionProofsByRoot(request), + app_request_id: AppRequestId::Sync(SyncRequestId::ExecutionProofsByRoot(id)), + }) + .map_err(|_| RpcRequestSendError::InternalError("network send error".to_owned()))?; + debug!( + method = "ExecutionProofsByRoot", + block_root = %block_root, + peer = %peer_id, + %id, + "Sync RPC request sent" + ); + self.execution_proofs_by_root_requests.insert(id, peer_id); + Ok(id) + } + + /// Remove a completed (or terminated) `ExecutionProofsByRange` request from the tracking map. + pub fn on_execution_proofs_by_range_terminated( + &mut self, + id: &ExecutionProofsByRangeRequestId, + ) { + self.execution_proofs_by_range_requests.remove(id); + } + + /// Remove a completed (or terminated) `ExecutionProofsByRoot` request from the tracking map. + pub fn on_execution_proofs_by_root_terminated(&mut self, id: &ExecutionProofsByRootRequestId) { + self.execution_proofs_by_root_requests.remove(id); + } + pub fn network_globals(&self) -> &NetworkGlobals { &self.network_beacon_processor.network_globals } @@ -428,6 +555,9 @@ impl SyncNetworkContext { // components_by_range_requests is a meta request of various _by_range requests components_by_range_requests: _, custody_backfill_data_column_batch_requests: _, + // execution proof requests are soft, fire-and-forget; not counted for load balancing + execution_proofs_by_range_requests: _, + execution_proofs_by_root_requests: _, execution_engine_state: _, network_beacon_processor: _, chain: _, diff --git a/beacon_node/network/src/sync/proof_sync.rs b/beacon_node/network/src/sync/proof_sync.rs new file mode 100644 index 00000000000..8f9701aa578 --- /dev/null +++ b/beacon_node/network/src/sync/proof_sync.rs @@ -0,0 +1,230 @@ +//! ProofSync: catch-up mechanism for EIP-8025 execution proofs. +//! +//! After range sync completes, `ProofSync` issues an `ExecutionProofsByRange` request to +//! bootstrap proofs for the newly-synced window (bootstrap mode), then switches to +//! `FillingByRoot` mode where it issues targeted `ExecutionProofsByRoot` requests for any +//! individual blocks that are still missing proofs. + +use super::network_context::{RpcRequestSendError, SyncNetworkContext}; +use beacon_chain::{BeaconChain, BeaconChainTypes}; +use execution_layer::MissingProofInfo; +use fnv::FnvHashMap; +use lighthouse_network::service::api_types::{ + ExecutionProofsByRangeRequestId, ExecutionProofsByRootRequestId, +}; +use std::collections::HashSet; +use std::sync::Arc; +use tracing::debug; +use types::{EthSpec, Hash256}; + +/// Maximum number of concurrent `ExecutionProofsByRoot` requests. +const DEFAULT_MAX_CONCURRENT: usize = 4; + +/// Operating mode for the proof sync subsystem. +#[derive(Debug, Copy, Clone, PartialEq, Eq)] +pub enum ProofSyncState { + /// Not running - range sync is active. + Idle, + /// Range sync is completed. Next poll will issue an `ExecutionProofsByRange` request. + PendingRangeRequest, + /// An `ExecutionProofsByRange` request is in-flight. Waiting for the stream to drain. + RangeRequestInFlight, + /// Bootstrap complete. Requesting any remaining missing proofs by root on each poll. + /// Terminal active state until range sync restarts, which resets to `Idle`. + FillingByRoot, +} + +/// Proof sync subsystem for EIP-8025. +/// +/// Operates as a state machine with four modes: +/// - `Idle`: no work to do (range sync active or not yet triggered). +/// - `PendingRangeRequest`: range sync is completed; next poll sends the bootstrap range request. +/// - `RangeRequestInFlight`: waiting for the bootstrap range stream to drain. +/// - `FillingByRoot`: terminal active state; issues per-block by-root requests each poll. +/// +/// Re-entering range sync resets state to `Idle` (via ProofSync::pause()), which cancels any in-flight requests and clears state. Proof sync will +/// automatically restart when range sync completes (via ProofSync::start()), which transitions to `PendingRangeRequest`. +pub struct ProofSync { + /// The beacon chain. + chain: Arc>, + /// The current state of the proof sync subsystem. + state: ProofSyncState, + /// Tracks the in-flight range request ID while in `RangeRequestInFlight` state. + /// `None` in all other states. + range_request_id: Option, + /// In-flight by-root request IDs → `MissingProofInfo` (fill mode). + /// Keeping the full info preserves `existing_proof_types` for awareness of what + /// proof types the remote peer should supply. + in_flight: FnvHashMap, + /// Maximum number of concurrent by-root requests in `FillingByRoot` state. + max_concurrent: usize, + /// Injected missing-proof list for unit testing fill-mode behaviour. + #[cfg(test)] + pub test_missing_proofs: Option>, +} + +impl ProofSync { + pub fn new(chain: Arc>) -> Self { + Self { + state: ProofSyncState::Idle, + range_request_id: None, + chain, + in_flight: FnvHashMap::default(), + max_concurrent: DEFAULT_MAX_CONCURRENT, + #[cfg(test)] + test_missing_proofs: None, + } + } + + /// Returns the current state of the proof sync subsystem. + #[cfg(test)] + pub fn state(&self) -> ProofSyncState { + self.state + } + + #[cfg(test)] + pub fn in_flight_count(&self) -> usize { + self.in_flight.len() + } + + /// Force-enter `FillingByRoot` state for tests that need to exercise fill-mode + /// behaviour without going through the bootstrap range cycle. + #[cfg(test)] + pub fn enter_fill_mode_for_testing(&mut self) { + self.state = ProofSyncState::FillingByRoot; + } + + /// Called by `SyncManager::update_sync_state()` when range sync completes. + /// + /// Transitions from `Idle` to `PendingRangeRequest`. The next `poll()` will send + /// the bootstrap `ExecutionProofsByRange` request. + pub fn start(&mut self) { + debug!("ProofSync: range sync complete, scheduling proof range request"); + self.state = ProofSyncState::PendingRangeRequest; + } + + /// Called by `SyncManager::update_sync_state()` when entering range sync. + /// + /// Stops any in-progress proof sync activity and resets to `Idle`. + /// Proof sync will automatically restart when range sync completes. + pub fn pause(&mut self) { + debug!("ProofSync: pausing and resetting to Idle"); + self.state = ProofSyncState::Idle; + self.range_request_id = None; + self.in_flight.clear(); + } + + /// Drive one polling cycle. + /// + /// Resets to `Idle` if the node has re-entered range sync. Otherwise dispatches + /// work according to the current state. + pub fn poll(&mut self, cx: &mut SyncNetworkContext) { + match &self.state { + ProofSyncState::Idle | ProofSyncState::RangeRequestInFlight => {} + ProofSyncState::PendingRangeRequest => { + self.request_proof_range(cx); + } + ProofSyncState::FillingByRoot => { + // Terminal active state: remain here until range sync restarts. + // On each poll, issue by-root requests for any missing proofs up to + // the concurrency limit. + #[cfg(not(test))] + let missing = self.chain.missing_execution_proofs(); + #[cfg(test)] + let missing = self + .test_missing_proofs + .clone() + .unwrap_or_else(|| self.chain.missing_execution_proofs()); + let in_flight_roots: HashSet = + self.in_flight.values().map(|i| i.root).collect(); + let available = self.max_concurrent.saturating_sub(self.in_flight.len()); + + for info in missing + .into_iter() + .filter(|info| !in_flight_roots.contains(&info.root)) + .take(available) + { + match cx.request_execution_proofs_by_root(info.root) { + Ok(id) => { + debug!( + block_root = %info.root, + existing_proof_types = ?info.existing_proof_types, + "ProofSync: requesting missing proof" + ); + self.in_flight.insert(id, info); + } + Err(RpcRequestSendError::NoPeer(_)) => { + debug!("ProofSync: no proof-capable peer, will retry next poll"); + break; + } + Err(e) => { + debug!(error = ?e, "ProofSync: failed to send proof request"); + } + } + } + } + } + } + + /// Called when an `ExecutionProofsByRange` RPC stream terminates (response `None`). + /// + /// Transitions from `RangeRequestInFlight` to `FillingByRoot`. + pub fn on_range_request_terminated(&mut self, id: &ExecutionProofsByRangeRequestId) { + if matches!(&self.state, ProofSyncState::RangeRequestInFlight) + && self.range_request_id.as_ref() == Some(id) + { + debug!("ProofSync: bootstrap range stream complete, switching to fill mode"); + self.range_request_id = None; + self.state = ProofSyncState::FillingByRoot; + } + } + + /// Called when an `ExecutionProofsByRoot` RPC stream terminates (response `None`). + pub fn on_request_terminated(&mut self, id: &ExecutionProofsByRootRequestId) { + self.in_flight.remove(id); + } + + /// Issue an `ExecutionProofsByRange` bootstrap request covering finalized+1 through head. + /// + /// Transitions to `RangeRequestInFlight` on success, stays `PendingRangeRequest` if no + /// proof-capable peer is available. + fn request_proof_range(&mut self, cx: &mut SyncNetworkContext) { + let finalized_slot = self + .chain + .canonical_head + .cached_head() + .finalized_checkpoint() + .epoch + .start_slot(T::EthSpec::slots_per_epoch()); + let start_slot = finalized_slot + 1; + // Use the current slot clock rather than the head block slot so that the range + // covers any slots after the head block that the EL may have processed. + let current_slot = self.chain.slot().unwrap_or_else(|_| self.chain.best_slot()); + if current_slot < start_slot { + debug!("ProofSync: current slot is behind start_slot, switching directly to fill mode"); + self.state = ProofSyncState::FillingByRoot; + return; + } + let count = current_slot.as_u64() - start_slot.as_u64() + 1; + + match cx.request_execution_proofs_by_range(start_slot, count) { + Ok(id) => { + debug!( + %start_slot, + %current_slot, + count, + "ProofSync: bootstrap range request sent" + ); + self.range_request_id = Some(id); + self.state = ProofSyncState::RangeRequestInFlight; + } + Err(RpcRequestSendError::NoPeer(_)) => { + debug!("ProofSync: no proof-capable peer for range request, will retry next poll"); + // State stays PendingRangeRequest. + } + Err(e) => { + debug!(error = ?e, "ProofSync: range request error"); + } + } + } +} diff --git a/beacon_node/network/src/sync/tests/lookups.rs b/beacon_node/network/src/sync/tests/lookups.rs index 715928906ee..d071491da00 100644 --- a/beacon_node/network/src/sync/tests/lookups.rs +++ b/beacon_node/network/src/sync/tests/lookups.rs @@ -353,6 +353,17 @@ impl TestRig { k256::ecdsa::SigningKey::random(&mut self.rng_08).into() } + pub fn new_connected_proof_capable_peer(&mut self) -> PeerId { + let key = self.determinstic_key(); + let peer_id = self + .network_globals + .peers + .write() + .__add_connected_proof_capable_peer_testing_only(key); + self.log(&format!("Added proof-capable peer {peer_id:?}")); + peer_id + } + pub fn new_connected_peers_for_peerdas(&mut self) { // Enough sampling peers with few columns for _ in 0..100 { @@ -705,7 +716,7 @@ impl TestRig { self.send_sync_message(SyncMessage::Disconnect(peer_id)); } - fn drain_network_rx(&mut self) { + pub fn drain_network_rx(&mut self) { while let Ok(event) = self.network_rx.try_recv() { self.network_rx_queue.push(event); } diff --git a/beacon_node/network/src/sync/tests/range.rs b/beacon_node/network/src/sync/tests/range.rs index cb728a90c1b..1608532415e 100644 --- a/beacon_node/network/src/sync/tests/range.rs +++ b/beacon_node/network/src/sync/tests/range.rs @@ -4,11 +4,14 @@ use crate::status::ToStatusMessage; use crate::sync::SyncMessage; use crate::sync::manager::SLOT_IMPORT_TOLERANCE; use crate::sync::network_context::RangeRequestId; +use crate::sync::proof_sync::ProofSyncState; use crate::sync::range_sync::RangeSyncType; use beacon_chain::data_column_verification::CustodyDataColumn; use beacon_chain::test_utils::{AttestationStrategy, BlockStrategy}; use beacon_chain::{EngineState, NotifyExecutionLayer, block_verification_types::RpcBlock}; use beacon_processor::WorkType; +use bls::SignatureBytes; +use execution_layer::MissingProofInfo; use lighthouse_network::rpc::RequestType; use lighthouse_network::rpc::methods::{ BlobsByRangeRequest, DataColumnsByRangeRequest, OldBlocksByRangeRequest, @@ -16,9 +19,10 @@ use lighthouse_network::rpc::methods::{ }; use lighthouse_network::service::api_types::{ AppRequestId, BlobsByRangeRequestId, BlocksByRangeRequestId, DataColumnsByRangeRequestId, - SyncRequestId, + ExecutionProofsByRangeRequestId, ExecutionProofsByRootRequestId, SyncRequestId, }; use lighthouse_network::{PeerId, SyncInfo}; +use std::sync::Arc; use std::time::Duration; use types::{ BlobSidecarList, BlockImportSource, Epoch, EthSpec, Hash256, MinimalEthSpec as E, @@ -323,6 +327,82 @@ impl TestRig { blocks_req_id.parent_request_id.requester } + /// Assert an `ExecutionProofsByRange` RPC was sent to the network. + /// Returns `(request_id, peer_id)`. + fn find_execution_proofs_by_range_request( + &mut self, + ) -> (ExecutionProofsByRangeRequestId, PeerId) { + self.pop_received_network_event(|ev| match ev { + NetworkMessage::SendRequest { + peer_id, + request: RequestType::ExecutionProofsByRange(_), + app_request_id: AppRequestId::Sync(SyncRequestId::ExecutionProofsByRange(id)), + } => Some((*id, *peer_id)), + _ => None, + }) + .unwrap_or_else(|e| panic!("Expected ExecutionProofsByRange request: {e:?}")) + } + + /// Assert no `ExecutionProofsByRange` RPC exists in the queue. + #[track_caller] + fn expect_no_execution_proof_range_request(&mut self) { + self.drain_network_rx(); + let found = self.network_rx_queue.iter().any(|ev| { + matches!( + ev, + NetworkMessage::SendRequest { + request: RequestType::ExecutionProofsByRange(_), + .. + } + ) + }); + assert!( + !found, + "Expected no ExecutionProofsByRange request, but one was found" + ); + } + + /// Assert an `ExecutionProofsByRoot` RPC was sent; return `(id, peer_id)`. + fn find_execution_proofs_by_root_request( + &mut self, + ) -> (ExecutionProofsByRootRequestId, PeerId) { + self.pop_received_network_event(|ev| match ev { + NetworkMessage::SendRequest { + peer_id, + request: RequestType::ExecutionProofsByRoot(_), + app_request_id: AppRequestId::Sync(SyncRequestId::ExecutionProofsByRoot(id)), + } => Some((*id, *peer_id)), + _ => None, + }) + .unwrap_or_else(|e| panic!("Expected ExecutionProofsByRoot request: {e:?}")) + } + + /// Send stream-termination for an `ExecutionProofsByRange` request. + fn terminate_execution_proofs_by_range( + &mut self, + req_id: ExecutionProofsByRangeRequestId, + peer_id: PeerId, + ) { + self.send_sync_message(SyncMessage::RpcExecutionProof { + sync_request_id: SyncRequestId::ExecutionProofsByRange(req_id), + peer_id, + execution_proof: None, + }); + } + + /// Send stream-termination for an `ExecutionProofsByRoot` request. + fn terminate_execution_proofs_by_root( + &mut self, + req_id: ExecutionProofsByRootRequestId, + peer_id: PeerId, + ) { + self.send_sync_message(SyncMessage::RpcExecutionProof { + sync_request_id: SyncRequestId::ExecutionProofsByRoot(req_id), + peer_id, + execution_proof: None, + }); + } + fn find_and_complete_processing_chain_segment(&mut self, id: ChainSegmentProcessId) { self.pop_received_processor_event(|ev| { (ev.work_type() == WorkType::ChainSegment).then_some(()) @@ -601,3 +681,478 @@ fn finalized_sync_not_enough_custody_peers_on_start() { let last_epoch = advanced_epochs + EXTRA_SYNCED_EPOCHS; r.complete_and_process_range_sync_until(last_epoch, filter()); } + +// --- ProofSync state-machine tests --- + +// These tests exercise the `ProofSync` state machine directly, covering its full lifecycle: +// Bootstrap (Idle → PendingRangeRequest → RangeRequestInFlight → FillingByRoot), +// pause/resume semantics, concurrency cap, in-flight deduplication, and response forwarding. + +/// Drive ProofSync through the full bootstrap cycle: +/// start() → PendingRangeRequest → poll() → RangeRequestInFlight → terminate → FillingByRoot. +/// Returns the `(req_id, peer_id)` of the range request that was terminated. +/// +/// Advances the harness slot clock by 1 so that `chain.slot()` > `finalized_start_slot`, +/// which is required for the range request to be sent (otherwise the genesis check triggers). +fn bootstrap_proof_sync_to_fill_mode( + rig: &mut TestRig, +) -> (ExecutionProofsByRangeRequestId, PeerId) { + // Advance the slot clock so current_slot >= start_slot (genesis check does not fire). + rig.harness.advance_slot(); + rig.sync_manager.start_proof_sync(); + rig.sync_manager.poll_proof_sync(); + let (req_id, peer_id) = rig.find_execution_proofs_by_range_request(); + rig.terminate_execution_proofs_by_range(req_id, peer_id); + assert_eq!( + rig.sync_manager.proof_sync_state(), + ProofSyncState::FillingByRoot, + "Expected FillingByRoot after range stream termination" + ); + (req_id, peer_id) +} + +/// Build a `MissingProofInfo` with a fresh random root for test seeding. +fn missing_proof(root: Hash256) -> MissingProofInfo { + MissingProofInfo { + root, + existing_proof_types: vec![], + } +} + +/// Build a minimal `SignedExecutionProof` suitable for RPC response messages. +fn make_signed_proof() -> Arc { + Arc::new(types::SignedExecutionProof { + message: types::ExecutionProof::default(), + validator_index: 0, + signature: SignatureBytes::empty(), + }) +} + +/// Test 1: The default initial state of ProofSync is Idle and polling it emits no network events. +#[test] +fn test_proof_sync_starts_in_idle() { + let mut rig = TestRig::test_setup(); + assert_eq!(rig.sync_manager.proof_sync_state(), ProofSyncState::Idle); + rig.sync_manager.poll_proof_sync(); + rig.expect_empty_network(); +} + +/// Test 2: After `start()`, the next `poll()` sends an `ExecutionProofsByRange` RPC and +/// transitions to `RangeRequestInFlight`. +#[test] +fn test_proof_sync_pending_range_issues_request_on_poll() { + let mut rig = TestRig::test_setup(); + let _proof_peer = rig.new_connected_proof_capable_peer(); + rig.harness.advance_slot(); // current_slot must be >= start_slot for range request + + rig.sync_manager.start_proof_sync(); + assert_eq!( + rig.sync_manager.proof_sync_state(), + ProofSyncState::PendingRangeRequest, + "Expected PendingRangeRequest after start()" + ); + + rig.sync_manager.poll_proof_sync(); + let _ = rig.find_execution_proofs_by_range_request(); + assert_eq!( + rig.sync_manager.proof_sync_state(), + ProofSyncState::RangeRequestInFlight, + "Expected RangeRequestInFlight after polling" + ); +} + +/// Test 3: With no proof-capable peer, `poll()` in PendingRangeRequest stays in that state +/// and emits no request (soft failure). Adding a peer and polling again sends the request. +#[test] +fn test_proof_sync_no_peer_stays_pending() { + let mut rig = TestRig::test_setup(); + rig.harness.advance_slot(); // current_slot must be >= start_slot for range request + + rig.sync_manager.start_proof_sync(); + // No proof-capable peer yet — poll should be a no-op. + rig.sync_manager.poll_proof_sync(); + rig.expect_no_execution_proof_range_request(); + assert_eq!( + rig.sync_manager.proof_sync_state(), + ProofSyncState::PendingRangeRequest, + "Should stay PendingRangeRequest when no proof-capable peer" + ); + + // Now add a proof-capable peer; the next poll should send the request. + let _proof_peer = rig.new_connected_proof_capable_peer(); + rig.sync_manager.poll_proof_sync(); + let _ = rig.find_execution_proofs_by_range_request(); + assert_eq!( + rig.sync_manager.proof_sync_state(), + ProofSyncState::RangeRequestInFlight + ); +} + +/// Test 4: In `RangeRequestInFlight`, `poll()` must not send any requests. +#[test] +fn test_proof_sync_in_flight_poll_is_noop() { + let mut rig = TestRig::test_setup(); + let _proof_peer = rig.new_connected_proof_capable_peer(); + rig.harness.advance_slot(); + + rig.sync_manager.start_proof_sync(); + rig.sync_manager.poll_proof_sync(); + let _ = rig.find_execution_proofs_by_range_request(); + assert_eq!( + rig.sync_manager.proof_sync_state(), + ProofSyncState::RangeRequestInFlight + ); + + // A second poll while in-flight should produce nothing. + rig.sync_manager.poll_proof_sync(); + rig.expect_empty_network(); + assert_eq!( + rig.sync_manager.proof_sync_state(), + ProofSyncState::RangeRequestInFlight + ); +} + +/// Test 5: Stream termination with the correct ID transitions from `RangeRequestInFlight` +/// to `FillingByRoot`. +#[test] +fn test_proof_sync_range_termination_enters_fill_mode() { + let mut rig = TestRig::test_setup(); + let _proof_peer = rig.new_connected_proof_capable_peer(); + rig.harness.advance_slot(); + + rig.sync_manager.start_proof_sync(); + rig.sync_manager.poll_proof_sync(); + let (req_id, peer_id) = rig.find_execution_proofs_by_range_request(); + assert_eq!( + rig.sync_manager.proof_sync_state(), + ProofSyncState::RangeRequestInFlight + ); + + rig.terminate_execution_proofs_by_range(req_id, peer_id); + assert_eq!( + rig.sync_manager.proof_sync_state(), + ProofSyncState::FillingByRoot, + "Should enter FillingByRoot after correct range termination" + ); +} + +/// Test 6: Stream termination with a wrong ID is ignored; state stays `RangeRequestInFlight`. +#[test] +fn test_proof_sync_wrong_id_termination_ignored() { + let mut rig = TestRig::test_setup(); + let _proof_peer = rig.new_connected_proof_capable_peer(); + rig.harness.advance_slot(); + + rig.sync_manager.start_proof_sync(); + rig.sync_manager.poll_proof_sync(); + let (_req_id, peer_id) = rig.find_execution_proofs_by_range_request(); + assert_eq!( + rig.sync_manager.proof_sync_state(), + ProofSyncState::RangeRequestInFlight + ); + + // Terminate with a different (fake) ID. + let fake_id = ExecutionProofsByRangeRequestId { id: 9999 }; + rig.terminate_execution_proofs_by_range(fake_id, peer_id); + assert_eq!( + rig.sync_manager.proof_sync_state(), + ProofSyncState::RangeRequestInFlight, + "Wrong ID should not trigger transition" + ); +} + +/// Test 7: In `FillingByRoot` with no missing proofs, `poll()` is a no-op. +#[test] +fn test_proof_sync_fill_mode_no_missing_proofs() { + let mut rig = TestRig::test_setup(); + let _proof_peer = rig.new_connected_proof_capable_peer(); + + // Bootstrap to FillingByRoot; test_missing_proofs stays None → returns empty. + let _ = bootstrap_proof_sync_to_fill_mode(&mut rig); + + rig.sync_manager.poll_proof_sync(); + rig.expect_empty_network(); + assert_eq!( + rig.sync_manager.proof_sync_state(), + ProofSyncState::FillingByRoot + ); +} + +/// Test 8: In `FillingByRoot` with seeded missing proofs, `poll()` sends one +/// `ExecutionProofsByRoot` request per missing proof. +#[test] +fn test_proof_sync_fill_mode_issues_by_root_requests() { + let mut rig = TestRig::test_setup(); + let _proof_peer = rig.new_connected_proof_capable_peer(); + + let _ = bootstrap_proof_sync_to_fill_mode(&mut rig); + + let missing = vec![ + missing_proof(Hash256::random()), + missing_proof(Hash256::random()), + ]; + rig.sync_manager.set_proof_sync_missing(missing); + rig.sync_manager.poll_proof_sync(); + + let _ = rig.find_execution_proofs_by_root_request(); + let _ = rig.find_execution_proofs_by_root_request(); + assert_eq!(rig.sync_manager.proof_sync_in_flight_count(), 2); +} + +/// Test 9: `poll()` in `FillingByRoot` must not exceed `DEFAULT_MAX_CONCURRENT = 4` +/// in-flight requests even when more missing proofs are present. +#[test] +fn test_proof_sync_fill_mode_respects_max_concurrent() { + let mut rig = TestRig::test_setup(); + let _proof_peer = rig.new_connected_proof_capable_peer(); + + let _ = bootstrap_proof_sync_to_fill_mode(&mut rig); + + // Seed 6 distinct missing proofs; only 4 should be requested. + let missing: Vec = (0..6).map(|_| missing_proof(Hash256::random())).collect(); + rig.sync_manager.set_proof_sync_missing(missing); + rig.sync_manager.poll_proof_sync(); + + // Consume exactly 4 requests. + for _ in 0..4 { + let _ = rig.find_execution_proofs_by_root_request(); + } + assert_eq!( + rig.sync_manager.proof_sync_in_flight_count(), + 4, + "Should have exactly 4 in-flight requests (max_concurrent)" + ); + // No 5th request should be present. + rig.expect_empty_network(); +} + +/// Test 10: In-flight roots must not be re-requested on a subsequent poll. +#[test] +fn test_proof_sync_fill_mode_skips_in_flight_roots() { + let mut rig = TestRig::test_setup(); + let _proof_peer = rig.new_connected_proof_capable_peer(); + + let _ = bootstrap_proof_sync_to_fill_mode(&mut rig); + + let missing = vec![ + missing_proof(Hash256::random()), + missing_proof(Hash256::random()), + ]; + rig.sync_manager.set_proof_sync_missing(missing); + + // First poll: 2 requests sent, in_flight = 2. + rig.sync_manager.poll_proof_sync(); + let _ = rig.find_execution_proofs_by_root_request(); + let _ = rig.find_execution_proofs_by_root_request(); + assert_eq!(rig.sync_manager.proof_sync_in_flight_count(), 2); + + // Second poll with same missing list: roots already in-flight, no new requests. + rig.sync_manager.poll_proof_sync(); + rig.expect_empty_network(); + assert_eq!( + rig.sync_manager.proof_sync_in_flight_count(), + 2, + "In-flight count should be unchanged after second poll" + ); +} + +/// Test 11: `NoPeer` from `request_execution_proofs_by_root` must stop iteration +/// and leave in_flight at 0. +/// +/// Uses `force_proof_sync_fill_mode` to skip the bootstrap cycle so there is no +/// proof-capable peer in the peerDB when the fill poll fires. +#[test] +fn test_proof_sync_fill_mode_no_peer_breaks() { + let mut rig = TestRig::test_setup(); + // No proof-capable peer — find_any_proof_capable_peer returns None → NoPeer. + rig.sync_manager.force_proof_sync_fill_mode(); + + let missing = vec![ + missing_proof(Hash256::random()), + missing_proof(Hash256::random()), + ]; + rig.sync_manager.set_proof_sync_missing(missing); + rig.sync_manager.poll_proof_sync(); + + rig.expect_empty_network(); + assert_eq!( + rig.sync_manager.proof_sync_in_flight_count(), + 0, + "NoPeer should break iteration leaving in_flight empty" + ); +} + +/// Test 12: `on_request_terminated` removes the entry from `in_flight`. +#[test] +fn test_proof_sync_on_request_terminated_clears_in_flight() { + let mut rig = TestRig::test_setup(); + let _proof_peer = rig.new_connected_proof_capable_peer(); + + let _ = bootstrap_proof_sync_to_fill_mode(&mut rig); + + let missing = vec![missing_proof(Hash256::random())]; + rig.sync_manager.set_proof_sync_missing(missing); + rig.sync_manager.poll_proof_sync(); + + let (req_id, peer_id) = rig.find_execution_proofs_by_root_request(); + assert_eq!(rig.sync_manager.proof_sync_in_flight_count(), 1); + + rig.terminate_execution_proofs_by_root(req_id, peer_id); + assert_eq!( + rig.sync_manager.proof_sync_in_flight_count(), + 0, + "in_flight should be empty after termination" + ); +} + +/// Test 13: `pause()` clears `in_flight`, clears `range_request_id`, and resets to `Idle`. +#[test] +fn test_proof_sync_pause_resets_to_idle() { + let mut rig = TestRig::test_setup(); + let _proof_peer = rig.new_connected_proof_capable_peer(); + + let _ = bootstrap_proof_sync_to_fill_mode(&mut rig); + + // Seed some in-flight requests. + let missing = vec![ + missing_proof(Hash256::random()), + missing_proof(Hash256::random()), + ]; + rig.sync_manager.set_proof_sync_missing(missing); + rig.sync_manager.poll_proof_sync(); + let _ = rig.find_execution_proofs_by_root_request(); + let _ = rig.find_execution_proofs_by_root_request(); + assert!(rig.sync_manager.proof_sync_in_flight_count() > 0); + + // Pause resets everything. + rig.sync_manager.pause_proof_sync(); + assert_eq!(rig.sync_manager.proof_sync_state(), ProofSyncState::Idle); + assert_eq!(rig.sync_manager.proof_sync_in_flight_count(), 0); + + // Polling in Idle emits nothing. + rig.sync_manager.poll_proof_sync(); + rig.expect_empty_network(); +} + +/// Test 14: Re-entering range sync (pause) then completing again restarts the full bootstrap. +#[test] +fn test_proof_sync_re_enter_range_resets_then_restarts() { + let mut rig = TestRig::test_setup(); + let _proof_peer = rig.new_connected_proof_capable_peer(); + rig.harness.advance_slot(); // current_slot must be >= start_slot for range request + + // First bootstrap cycle. + rig.sync_manager.start_proof_sync(); + rig.sync_manager.poll_proof_sync(); + let (req_id, peer_id) = rig.find_execution_proofs_by_range_request(); + rig.terminate_execution_proofs_by_range(req_id, peer_id); + assert_eq!( + rig.sync_manager.proof_sync_state(), + ProofSyncState::FillingByRoot + ); + + // Re-entering range sync pauses ProofSync. + rig.sync_manager.pause_proof_sync(); + assert_eq!(rig.sync_manager.proof_sync_state(), ProofSyncState::Idle); + + // Range sync completes again → start() → PendingRangeRequest. + rig.sync_manager.start_proof_sync(); + assert_eq!( + rig.sync_manager.proof_sync_state(), + ProofSyncState::PendingRangeRequest + ); + + // New poll sends a fresh range request. + rig.sync_manager.poll_proof_sync(); + let _ = rig.find_execution_proofs_by_range_request(); + assert_eq!( + rig.sync_manager.proof_sync_state(), + ProofSyncState::RangeRequestInFlight + ); +} + +/// Test 15: When `current_slot < start_slot` (slot clock behind finalized + 1), skip the +/// range request and transition directly to `FillingByRoot`. +/// +/// At genesis the slot clock is at slot 0, finalized epoch is 0, +/// so `start_slot = 1` and `current_slot (0) < start_slot (1)` → skip range request. +#[test] +fn test_proof_sync_count_zero_skips_to_fill() { + let mut rig = TestRig::test_setup(); + let _proof_peer = rig.new_connected_proof_capable_peer(); + + // Chain starts at slot 0, finalized_epoch = 0 → count == 0. + rig.sync_manager.start_proof_sync(); + rig.sync_manager.poll_proof_sync(); + + // No range request should have been issued. + rig.expect_no_execution_proof_range_request(); + assert_eq!( + rig.sync_manager.proof_sync_state(), + ProofSyncState::FillingByRoot, + "Should skip directly to FillingByRoot when count == 0" + ); +} + +/// Test 16: A proof arriving on an `ExecutionProofsByRange` stream must be forwarded +/// to the beacon processor as `GossipExecutionProof` work. +#[test] +fn test_proof_sync_range_response_forwarded_to_processor() { + let mut rig = TestRig::test_setup(); + let _proof_peer = rig.new_connected_proof_capable_peer(); + rig.harness.advance_slot(); + + rig.sync_manager.start_proof_sync(); + rig.sync_manager.poll_proof_sync(); + let (req_id, peer_id) = rig.find_execution_proofs_by_range_request(); + assert_eq!( + rig.sync_manager.proof_sync_state(), + ProofSyncState::RangeRequestInFlight + ); + + // Send a proof (non-termination) response. + rig.send_sync_message(SyncMessage::RpcExecutionProof { + sync_request_id: SyncRequestId::ExecutionProofsByRange(req_id), + peer_id, + execution_proof: Some(make_signed_proof()), + }); + + rig.pop_received_processor_event(|ev| { + (ev.work_type() == WorkType::GossipExecutionProof).then_some(()) + }) + .unwrap_or_else(|e| panic!("Expected GossipExecutionProof work event: {e:?}")); + + // State remains RangeRequestInFlight (stream not yet terminated). + assert_eq!( + rig.sync_manager.proof_sync_state(), + ProofSyncState::RangeRequestInFlight + ); +} + +/// Test 17: A proof arriving on an `ExecutionProofsByRoot` stream must be forwarded +/// to the beacon processor as `GossipExecutionProof` work. +#[test] +fn test_proof_sync_root_response_forwarded_to_processor() { + let mut rig = TestRig::test_setup(); + let _proof_peer = rig.new_connected_proof_capable_peer(); + + let _ = bootstrap_proof_sync_to_fill_mode(&mut rig); + + let missing = vec![missing_proof(Hash256::random())]; + rig.sync_manager.set_proof_sync_missing(missing); + rig.sync_manager.poll_proof_sync(); + + let (req_id, peer_id) = rig.find_execution_proofs_by_root_request(); + + // Send a proof (non-termination) response on the by-root stream. + rig.send_sync_message(SyncMessage::RpcExecutionProof { + sync_request_id: SyncRequestId::ExecutionProofsByRoot(req_id), + peer_id, + execution_proof: Some(make_signed_proof()), + }); + + rig.pop_received_processor_event(|ev| { + (ev.work_type() == WorkType::GossipExecutionProof).then_some(()) + }) + .unwrap_or_else(|e| panic!("Expected GossipExecutionProof work event: {e:?}")); +} diff --git a/beacon_node/src/cli.rs b/beacon_node/src/cli.rs index e4c7c6ff1fe..c30aed7b50f 100644 --- a/beacon_node/src/cli.rs +++ b/beacon_node/src/cli.rs @@ -817,8 +817,20 @@ pub fn cli_app() -> Command { .alias("execution-endpoints") .help("Server endpoint for an execution layer JWT-authenticated HTTP \ JSON-RPC connection. Uses the same endpoint to populate the \ - deposit cache.") - .required(true) + deposit cache. Optional - at least one of --execution-endpoint \ + or --proof-engine-endpoint must be provided.") + .required(false) + .action(ArgAction::Set) + .display_order(0) + ) + .arg( + Arg::new("proof-engine-endpoint") + .long("proof-engine-endpoint") + .value_name("PROOF-ENGINE-ENDPOINT") + .help("Server endpoint for an EIP-8025 proof engine HTTP JSON-RPC connection. \ + Does not require JWT authentication. Optional - at least one of \ + --execution-endpoint or --proof-engine-endpoint must be provided.") + .required(false) .action(ArgAction::Set) .display_order(0) ) diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index 26dd3b6642e..be6db3de06f 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -269,42 +269,73 @@ pub fn get_config( client_config.http_metrics.allocator_metrics_enabled = false; } - // `--execution-endpoint` is required now. - let endpoints: String = clap_utils::parse_required(cli_args, "execution-endpoint")?; + // Parse execution endpoint (optional) + let execution_endpoint: Option = + if let Some(endpoints) = cli_args.get_one::("execution-endpoint") { + Some(parse_only_one_value( + endpoints.as_str(), + SensitiveUrl::parse, + "--execution-endpoint", + )?) + } else { + None + }; + + // Parse proof engine endpoint (optional) + let proof_engine_endpoint: Option = + if let Some(endpoints) = cli_args.get_one::("proof-engine-endpoint") { + Some(parse_only_one_value( + endpoints.as_str(), + SensitiveUrl::parse, + "--proof-engine-endpoint", + )?) + } else { + None + }; + + // Validation: at least one endpoint must be provided + if execution_endpoint.is_none() && proof_engine_endpoint.is_none() { + return Err( + "At least one of --execution-endpoint or --proof-engine-endpoint must be provided" + .to_string(), + ); + } + let mut el_config = execution_layer::Config::default(); - // Parse a single execution endpoint, logging warnings if multiple endpoints are supplied. - let execution_endpoint = parse_only_one_value( - endpoints.as_str(), - SensitiveUrl::parse, - "--execution-endpoint", - )?; - - // JWTs are required if `--execution-endpoint` is supplied. They can be either passed via - // file_path or directly as string. - let secret_file: PathBuf; - // Parse a single JWT secret from a given file_path, logging warnings if multiple are supplied. - if let Some(secret_files) = cli_args.get_one::("execution-jwt") { - secret_file = parse_only_one_value(secret_files, PathBuf::from_str, "--execution-jwt")?; - // Check if the JWT secret key is passed directly via cli flag and persist it to the default - // file location. - } else if let Some(jwt_secret_key) = cli_args.get_one::("execution-jwt-secret-key") { - use std::fs::File; - use std::io::Write; - secret_file = client_config.data_dir().join(DEFAULT_JWT_FILE); - let mut jwt_secret_key_file = File::create(secret_file.clone()) - .map_err(|e| format!("Error while creating jwt_secret_key file: {:?}", e))?; - jwt_secret_key_file - .write_all(jwt_secret_key.as_bytes()) - .map_err(|e| { - format!( - "Error occurred while writing to jwt_secret_key file: {:?}", - e - ) - })?; + // JWT is required only if execution_endpoint is provided + let secret_file: Option = if execution_endpoint.is_some() { + // Parse a single JWT secret from a given file_path, logging warnings if multiple are supplied. + if let Some(secret_files) = cli_args.get_one::("execution-jwt") { + Some(parse_only_one_value( + secret_files, + PathBuf::from_str, + "--execution-jwt", + )?) + // Check if the JWT secret key is passed directly via cli flag and persist it to the default + // file location. + } else if let Some(jwt_secret_key) = cli_args.get_one::("execution-jwt-secret-key") + { + use std::fs::File; + use std::io::Write; + let secret_file_path = client_config.data_dir().join(DEFAULT_JWT_FILE); + let mut jwt_secret_key_file = File::create(secret_file_path.clone()) + .map_err(|e| format!("Error while creating jwt_secret_key file: {:?}", e))?; + jwt_secret_key_file + .write_all(jwt_secret_key.as_bytes()) + .map_err(|e| { + format!( + "Error occurred while writing to jwt_secret_key file: {:?}", + e + ) + })?; + Some(secret_file_path) + } else { + return Err("--execution-jwt or --execution-jwt-secret-key is required when using --execution-endpoint".to_string()); + } } else { - return Err("Error! Please set either --execution-jwt file_path or --execution-jwt-secret-key directly via cli when using --execution-endpoint".to_string()); - } + None + }; // Parse and set the payload builder, if any. if let Some(endpoint) = cli_args.get_one::("builder") { @@ -321,8 +352,11 @@ pub fn get_config( } // Set config values from parse values. - el_config.secret_file = Some(secret_file.clone()); - el_config.execution_endpoint = Some(execution_endpoint.clone()); + el_config.secret_file = secret_file; + el_config.execution_endpoint = execution_endpoint; + el_config.proof_engine_endpoint = proof_engine_endpoint; + // Gate execution proof gossip subscription on proof engine being configured. + client_config.network.enable_execution_proof = el_config.proof_engine_endpoint.is_some(); el_config.suggested_fee_recipient = clap_utils::parse_optional(cli_args, "suggested-fee-recipient")?; el_config.jwt_id = clap_utils::parse_optional(cli_args, "execution-jwt-id")?; diff --git a/beacon_node/store/src/hot_cold_store.rs b/beacon_node/store/src/hot_cold_store.rs index 8eec4d5eceb..588a24d0c3d 100644 --- a/beacon_node/store/src/hot_cold_store.rs +++ b/beacon_node/store/src/hot_cold_store.rs @@ -44,6 +44,12 @@ use types::data::{ColumnIndex, DataColumnSidecar, DataColumnSidecarList}; use types::*; use zstd::{Decoder, Encoder}; +/// Number of entries in the always-on EIP-8025 request root <-> block root mapping caches. +/// +/// Sized to cover several epochs of recent blocks: proofs are expected to arrive well within +/// this window after a block is imported. +const EIP8025_REQUEST_ROOT_CACHE_SIZE: usize = 512; + /// On-disk database that stores finalized states efficiently. /// /// Stores vector fields like the `block_roots` and `state_roots` separately, and only stores @@ -73,6 +79,16 @@ pub struct HotColdDB, Cold: ItemStore> { pub hot_db: Hot, /// LRU cache of deserialized blocks and blobs. Updated whenever a block or blob is loaded. block_cache: Option>>, + /// EIP-8025: always-on cache mapping request_root -> block_root. + /// + /// Kept separate from `block_cache` so it is always available regardless of whether the + /// block cache is enabled. Required for proof verification to look up the beacon block root + /// associated with an execution payload. + request_root_to_block_root: Mutex>, + /// EIP-8025: always-on cache mapping block_root -> request_root. + /// + /// Used by the HTTP API to retrieve the request root for a given block root. + block_root_to_request_root: Mutex>, /// Cache of beacon states. /// /// LOCK ORDERING: this lock must always be locked *after* the `split` if both are required. @@ -227,6 +243,8 @@ impl HotColdDB, MemoryStore> { // NOTE: Anchor slot is initialized to 0, which is only valid for new DBs. We shouldn't // be reusing memory stores, but if we want to do that we should redo this. + let eip8025_cache_size = NonZeroUsize::new(EIP8025_REQUEST_ROOT_CACHE_SIZE) + .expect("EIP8025_REQUEST_ROOT_CACHE_SIZE is non-zero"); let db = HotColdDB { split: RwLock::new(Split::default()), anchor_info: RwLock::new(ANCHOR_UNINITIALIZED), @@ -238,6 +256,8 @@ impl HotColdDB, MemoryStore> { block_cache: NonZeroUsize::new(config.block_cache_size) .map(BlockCache::new) .map(Mutex::new), + request_root_to_block_root: Mutex::new(LruCache::new(eip8025_cache_size)), + block_root_to_request_root: Mutex::new(LruCache::new(eip8025_cache_size)), state_cache: Mutex::new(StateCache::new( config.state_cache_size, config.state_cache_headroom, @@ -281,6 +301,8 @@ impl HotColdDB, BeaconNodeBackend> { let anchor_info = RwLock::new(Self::load_anchor_info(&hot_db)?); debug!(?anchor_info, "Loaded anchor info"); + let eip8025_cache_size = NonZeroUsize::new(EIP8025_REQUEST_ROOT_CACHE_SIZE) + .expect("EIP8025_REQUEST_ROOT_CACHE_SIZE is non-zero"); let db = HotColdDB { split: RwLock::new(Split::default()), anchor_info, @@ -292,6 +314,8 @@ impl HotColdDB, BeaconNodeBackend> { block_cache: NonZeroUsize::new(config.block_cache_size) .map(BlockCache::new) .map(Mutex::new), + request_root_to_block_root: Mutex::new(LruCache::new(eip8025_cache_size)), + block_root_to_request_root: Mutex::new(LruCache::new(eip8025_cache_size)), state_cache: Mutex::new(StateCache::new( config.state_cache_size, config.state_cache_headroom, @@ -1027,6 +1051,34 @@ impl, Cold: ItemStore> HotColdDB } } + /// Store bidirectional mapping between request_root and block_root (EIP-8025). + /// + /// This is in-memory only and not persisted to database in the initial implementation. + pub fn put_request_root_mapping(&self, request_root: Hash256, block_root: Hash256) { + self.request_root_to_block_root + .lock() + .put(request_root, block_root); + self.block_root_to_request_root + .lock() + .put(block_root, request_root); + } + + /// Look up block_root by request_root (EIP-8025, cache-only, no database). + pub fn get_block_root_by_request_root(&self, request_root: &Hash256) -> Option { + self.request_root_to_block_root + .lock() + .get(request_root) + .copied() + } + + /// Look up request_root by block_root (EIP-8025, cache-only, no database). + pub fn get_request_root_by_block_root(&self, block_root: &Hash256) -> Option { + self.block_root_to_request_root + .lock() + .get(block_root) + .copied() + } + /// Store a state in the store. pub fn put_state(&self, state_root: &Hash256, state: &BeaconState) -> Result<(), Error> { let mut ops: Vec = Vec::new(); diff --git a/common/eth2/src/lib.rs b/common/eth2/src/lib.rs index 8746e3c063c..7f3e98a86ce 100644 --- a/common/eth2/src/lib.rs +++ b/common/eth2/src/lib.rs @@ -1755,6 +1755,36 @@ impl BeaconNodeHttpClient { Ok(()) } + /// `POST beacon/execution_proofs` + /// + /// Submit signed execution proofs for EIP-8025 optional execution verification. + pub async fn post_beacon_execution_proofs( + &self, + proofs: &[SignedExecutionProof], + ) -> Result<(), Error> { + use serde::Serialize; + + #[derive(Serialize)] + struct SubmitExecutionProofsRequest { + proofs: Vec, + } + + let mut path = self.eth_path(V1)?; + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("beacon") + .push("execution_proofs"); + + let request = SubmitExecutionProofsRequest { + proofs: proofs.to_vec(), + }; + + self.post(path, &request).await?; + + Ok(()) + } + /// `POST beacon/rewards/sync_committee` pub async fn post_beacon_rewards_sync_committee( &self, diff --git a/common/eth2/src/lighthouse_vc/http_client.rs b/common/eth2/src/lighthouse_vc/http_client.rs index 3c850fcb052..eeac02dfc4d 100644 --- a/common/eth2/src/lighthouse_vc/http_client.rs +++ b/common/eth2/src/lighthouse_vc/http_client.rs @@ -681,4 +681,21 @@ impl ValidatorClientHttpClient { let url = self.make_graffiti_url(pubkey)?; self.delete(url).await } + + pub async fn post_execution_proof( + &self, + pubkey: &PublicKeyBytes, + req: SignExecutionProofRequest, + ) -> Result { + let mut path = self.server.expose_full().clone(); + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("lighthouse") + .push("validators") + .push(&pubkey.to_string()) + .push("execution_proofs"); + + self.post_with_raw_response(path, &req).await + } } diff --git a/common/eth2/src/lighthouse_vc/types.rs b/common/eth2/src/lighthouse_vc/types.rs index 07f8421dc5c..18395eef67d 100644 --- a/common/eth2/src/lighthouse_vc/types.rs +++ b/common/eth2/src/lighthouse_vc/types.rs @@ -207,3 +207,10 @@ pub struct UpdateCandidatesRequest { pub struct UpdateCandidatesResponse { pub new_beacon_nodes_list: Vec, } + +#[derive(Debug, Clone, Deserialize, Serialize)] +pub struct SignExecutionProofRequest { + pub execution_proof: types::ExecutionProof, + #[serde(default)] + pub epoch: Option, +} diff --git a/common/eth2/src/types.rs b/common/eth2/src/types.rs index b1a61ce00cc..52fcee1184d 100644 --- a/common/eth2/src/types.rs +++ b/common/eth2/src/types.rs @@ -980,6 +980,38 @@ pub struct SseBlock { pub execution_optimistic: bool, } +#[derive(PartialEq, Debug, Serialize, Deserialize, Clone)] +#[serde(bound = "E: EthSpec")] +pub struct SseBlockFull { + pub slot: Slot, + pub block: BeaconBlock, + pub execution_optimistic: bool, +} + +#[derive(PartialEq, Debug, Serialize, Deserialize, Clone)] +struct SseBlockFullGeneric { + pub slot: Slot, + pub block: T, + pub execution_optimistic: bool, +} + +type VersionedSseBlockFull = ForkVersionedResponse>; + +impl<'de, E: EthSpec> ContextDeserialize<'de, ForkName> for SseBlockFull { + fn context_deserialize(deserializer: D, context: ForkName) -> Result + where + D: Deserializer<'de>, + { + let helper = SseBlockFullGeneric::::deserialize(deserializer)?; + Ok(SseBlockFull { + slot: helper.slot, + block: BeaconBlock::context_deserialize(helper.block, context) + .map_err(serde::de::Error::custom)?, + execution_optimistic: helper.execution_optimistic, + }) + } +} + #[derive(PartialEq, Debug, Serialize, Deserialize, Clone)] pub struct SseBlobSidecar { pub block_root: Hash256, @@ -1180,6 +1212,7 @@ pub enum EventKind { Attestation(Box>), SingleAttestation(Box), Block(SseBlock), + BlockFull(Box>), BlobSidecar(SseBlobSidecar), DataColumnSidecar(SseDataColumnSidecar), FinalizedCheckpoint(SseFinalizedCheckpoint), @@ -1204,6 +1237,7 @@ impl EventKind { match self { EventKind::Head(_) => "head", EventKind::Block(_) => "block", + EventKind::BlockFull(_) => "block_full", EventKind::BlobSidecar(_) => "blob_sidecar", EventKind::DataColumnSidecar(_) => "data_column_sidecar", EventKind::Attestation(_) => "attestation", @@ -1238,6 +1272,9 @@ impl EventKind { "block" => Ok(EventKind::Block(serde_json::from_str(data).map_err( |e| ServerError::InvalidServerSentEvent(format!("Block: {:?}", e)), )?)), + "block_full" => Ok(EventKind::BlockFull(serde_json::from_str(data).map_err( + |e| ServerError::InvalidServerSentEvent(format!("Block Full: {:?}", e)), + )?)), "blob_sidecar" => Ok(EventKind::BlobSidecar(serde_json::from_str(data).map_err( |e| ServerError::InvalidServerSentEvent(format!("Blob Sidecar: {:?}", e)), )?)), @@ -1334,6 +1371,7 @@ pub struct EventQuery { pub enum EventTopic { Head, Block, + BlockFull, BlobSidecar, DataColumnSidecar, Attestation, @@ -1361,6 +1399,7 @@ impl FromStr for EventTopic { match s { "head" => Ok(EventTopic::Head), "block" => Ok(EventTopic::Block), + "block_full" => Ok(EventTopic::BlockFull), "blob_sidecar" => Ok(EventTopic::BlobSidecar), "data_column_sidecar" => Ok(EventTopic::DataColumnSidecar), "attestation" => Ok(EventTopic::Attestation), @@ -1389,6 +1428,7 @@ impl fmt::Display for EventTopic { match self { EventTopic::Head => write!(f, "head"), EventTopic::Block => write!(f, "block"), + EventTopic::BlockFull => write!(f, "block_full"), EventTopic::BlobSidecar => write!(f, "blob_sidecar"), EventTopic::DataColumnSidecar => write!(f, "data_column_sidecar"), EventTopic::Attestation => write!(f, "attestation"), @@ -2525,4 +2565,31 @@ mod test { let roundtrip = O::context_deserialize::(deserializer, fork_name).unwrap(); assert_eq!(original, roundtrip); } + + #[test] + fn test_versioned_sse_block_full_round_trip() { + let rng = &mut XorShiftRng::from_seed([42; 16]); + for fork_name in ForkName::list_all() { + let beacon_block = map_fork_name!(fork_name, BeaconBlock, <_>::random_for_test(rng)); + let slot = Slot::random_for_test(rng); + + let versioned_response = VersionedSseBlockFull:: { + version: fork_name, + metadata: EmptyMetadata {}, + data: SseBlockFull { + slot, + block: beacon_block, + execution_optimistic: true, + }, + }; + + let json_str = serde_json::to_string(&versioned_response).unwrap(); + let deserialized: VersionedSseBlockFull = + serde_json::from_str(&json_str).unwrap(); + + assert_eq!(versioned_response, deserialized); + assert!(deserialized.data.execution_optimistic); + println!("fork_name: {:?} PASSED", fork_name); + } + } } diff --git a/consensus/types/src/core/chain_spec.rs b/consensus/types/src/core/chain_spec.rs index 1bdf6c2cb86..65a0b5a49be 100644 --- a/consensus/types/src/core/chain_spec.rs +++ b/consensus/types/src/core/chain_spec.rs @@ -36,6 +36,8 @@ pub enum Domain { SyncCommitteeSelectionProof, BeaconBuilder, PTCAttester, + /// EIP-8025: Domain for execution proof signatures. + ExecutionProof, ApplicationMask(ApplicationDomain), } @@ -302,6 +304,11 @@ pub struct ChainSpec { * Capella params */ pub(crate) domain_bls_to_execution_change: u32, + /* + * EIP-8025 params + */ + /// Domain for execution proof signatures (0x0D000000). + pub(crate) domain_execution_proof: u32, } impl ChainSpec { @@ -505,6 +512,7 @@ impl ChainSpec { Domain::SyncCommitteeSelectionProof => self.domain_sync_committee_selection_proof, Domain::ApplicationMask(application_domain) => application_domain.get_domain_constant(), Domain::BlsToExecutionChange => self.domain_bls_to_execution_change, + Domain::ExecutionProof => self.domain_execution_proof, } } @@ -812,9 +820,11 @@ impl ChainSpec { let blob_retention_epoch = current_epoch.saturating_sub(self.min_epochs_for_blob_sidecars_requests); match self.fulu_fork_epoch { + Some(fulu_fork_epoch) if self.min_epochs_for_data_column_sidecars_requests == 0 => None, Some(fulu_fork_epoch) if blob_retention_epoch > fulu_fork_epoch => Some( current_epoch.saturating_sub(self.min_epochs_for_data_column_sidecars_requests), ), + None if self.min_epochs_for_blob_sidecars_requests == 0 => None, _ => Some(std::cmp::max(fork_epoch, blob_retention_epoch)), } } @@ -1177,6 +1187,10 @@ impl ChainSpec { * Capella params */ domain_bls_to_execution_change: 10, + /* + * EIP-8025 params + */ + domain_execution_proof: 13, // 0x0D000000 } } @@ -1540,6 +1554,10 @@ impl ChainSpec { * Capella params */ domain_bls_to_execution_change: 10, + /* + * EIP-8025 params + */ + domain_execution_proof: 13, // 0x0D000000 } } } diff --git a/consensus/types/src/core/execution_block_hash.rs b/consensus/types/src/core/execution_block_hash.rs index 91c019ce040..41ea32a0a85 100644 --- a/consensus/types/src/core/execution_block_hash.rs +++ b/consensus/types/src/core/execution_block_hash.rs @@ -1,4 +1,4 @@ -use std::fmt; +use std::{fmt, ops::Deref}; use fixed_bytes::FixedBytesExtended; use rand::RngCore; @@ -12,6 +12,14 @@ use crate::{core::Hash256, test_utils::TestRandom}; #[serde(transparent)] pub struct ExecutionBlockHash(#[serde(with = "serde_utils::b256_hex")] pub Hash256); +impl Deref for ExecutionBlockHash { + type Target = Hash256; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} + impl fmt::Debug for ExecutionBlockHash { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { std::fmt::Debug::fmt(&self.0, f) diff --git a/consensus/types/src/execution/eip8025.rs b/consensus/types/src/execution/eip8025.rs new file mode 100644 index 00000000000..1fcfc2c91b5 --- /dev/null +++ b/consensus/types/src/execution/eip8025.rs @@ -0,0 +1,352 @@ +//! EIP-8025: Optional Execution Proofs +//! +//! This module contains types for the EIP-8025 optional execution proofs feature. +//! See: https://eips.ethereum.org/EIPS/eip-8025 + +use crate::core::{Hash256, SignedRoot}; +use bls::SignatureBytes; +use serde::{Deserialize, Serialize}; +use ssz_derive::{Decode, Encode}; +use ssz_types::VariableList; +use tree_hash_derive::TreeHash; + +#[cfg(feature = "arbitrary")] +use arbitrary::Arbitrary; + +/// Maximum proof size: 300 KiB (307200 bytes) +/// +/// Product of U75 * U4096 +pub type MaxProofSize = typenum::Prod; + +/// Proof data type +/// +/// VariableList of bytes with max length [`MaxProofSize`]` +pub type ProofData = VariableList; + +/// Maximum execution proofs per payload +pub type MaxExecutionProofsPerPayload = typenum::U4; + +/// Proof generation identifier (8 bytes) +pub type ProofGenId = [u8; 8]; + +/// Proof type identifier +pub type ProofType = u8; + +/// List of execution proofs per payload +pub type ExecutionProofList = VariableList; + +/// Domain type for execution proof signatures (0x0D000000) +pub const DOMAIN_EXECUTION_PROOF: [u8; 4] = [0x0D, 0x00, 0x00, 0x00]; + +/// Minimum required execution proofs for payload verification +pub const MIN_REQUIRED_EXECUTION_PROOFS: usize = 1; + +/// Public input of an [`ExecutionProof`]. +/// +/// Contains the tree hash root of the new payload request that the proof is associated with. +#[derive( + Debug, Default, Clone, PartialEq, Eq, Hash, Serialize, Deserialize, Encode, Decode, TreeHash, +)] +#[cfg_attr(feature = "arbitrary", derive(Arbitrary))] +pub struct PublicInput { + /// The tree hash root of the NewPayloadRequest associated with the proof. + pub new_payload_request_root: Hash256, +} + +/// The type of an execution proof. +/// +/// Contains the proof data, type, and public input that links it to a specific new payload request. +#[derive( + Debug, Default, Clone, PartialEq, Eq, Hash, Serialize, Deserialize, Encode, Decode, TreeHash, +)] +#[cfg_attr(feature = "arbitrary", derive(Arbitrary))] +pub struct ExecutionProof { + /// The proof data. + #[serde(with = "ssz_types::serde_utils::hex_var_list")] + pub proof_data: ProofData, + /// The type of proof. + pub proof_type: ProofType, + /// Public input linking the proof to a specific new payload request. + pub public_input: PublicInput, +} + +impl SignedRoot for ExecutionProof {} + +/// A signed execution proof from a validator. +/// +/// Contains the execution proof, the validator's index, and their BLS signature. +#[derive(Debug, Clone, PartialEq, Hash, Serialize, Deserialize, Encode, Decode, TreeHash)] +#[cfg_attr(feature = "arbitrary", derive(Arbitrary))] +pub struct SignedExecutionProof { + /// The execution proof message + pub message: ExecutionProof, + /// Index of the validator who signed this proof + #[serde(with = "serde_utils::quoted_u64")] + pub validator_index: u64, + /// BLS signature over the execution proof + pub signature: SignatureBytes, +} + +/// Proof attributes for requesting proof generation. +/// +/// Specifies which types of proofs should be generated for a payload. +#[derive(Debug, Default, Clone, PartialEq, Eq, Hash, Serialize, Deserialize)] +pub struct ProofAttributes { + /// List of proof types to generate + pub proof_types: Vec, +} + +// ============================================================================= +// Status Types +// ============================================================================= + +/// Status returned from proof verification operations. +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize)] +#[serde(rename_all = "SCREAMING_SNAKE_CASE")] +pub enum ProofStatus { + /// The proof is valid. + Valid, + /// The proof/header verification failed. + Invalid, + /// The proof is valid but does not change the canonical head. + Accepted, + /// The proof type is not supported by this client. + NotSupported, + /// The request root that the proof is associated with is not yet known. + Syncing, +} + +impl ProofStatus { + /// Returns true if the status indicates successful verification. + pub fn is_valid(&self) -> bool { + matches!(self, ProofStatus::Valid) + } + + /// Returns true if the status indicates the node is still syncing proofs. + pub fn is_syncing(&self) -> bool { + matches!(self, ProofStatus::Syncing) + } + + /// Returns true if the status indicates the node has accepted the proof. + pub fn is_accepted(&self) -> bool { + matches!(self, ProofStatus::Accepted) + } +} + +impl std::fmt::Display for ProofStatus { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + ProofStatus::Valid => { + write!(f, "VALID") + } + ProofStatus::Invalid => write!(f, "INVALID"), + ProofStatus::Accepted => write!(f, "ACCEPTED"), + ProofStatus::NotSupported => write!(f, "NOT_SUPPORTED"), + ProofStatus::Syncing => write!(f, "SYNCING"), + } + } +} + +/// A generated proof with its tracking ID. +/// +/// Used when receiving proofs from the proof engine via the beacon API. +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +pub struct GeneratedProof { + /// The proof generation ID for tracking + #[serde(with = "serde_utils::bytes_8_hex")] + pub proof_gen_id: ProofGenId, + /// The generated execution proof + pub execution_proof: ExecutionProof, +} + +// ============================================================================= +// Utility Implementations +// ============================================================================= + +impl ExecutionProof { + /// Returns true if the proof data is empty. + pub fn is_empty(&self) -> bool { + self.proof_data.is_empty() + } + + /// Returns the size of the proof data in bytes. + pub fn proof_size(&self) -> usize { + self.proof_data.len() + } +} + +impl SignedExecutionProof { + /// Returns a reference to the underlying execution proof. + pub fn proof(&self) -> &ExecutionProof { + &self.message + } + + /// Returns the new payload request root this proof validates. + pub fn request_root(&self) -> Hash256 { + self.message.public_input.new_payload_request_root + } + + /// Returns the proof type. + pub fn proof_type(&self) -> ProofType { + self.message.proof_type + } + + /// Returns the validator index that signed this proof. + pub fn validator_index(&self) -> u64 { + self.validator_index + } +} + +#[cfg(test)] +mod tests { + use super::*; + use ssz::{Decode, Encode}; + + #[test] + fn public_input_round_trip() { + let input = PublicInput { + new_payload_request_root: Hash256::repeat_byte(0xab), + }; + let encoded = input.as_ssz_bytes(); + let decoded = PublicInput::from_ssz_bytes(&encoded).unwrap(); + assert_eq!(input, decoded); + } + + #[test] + fn execution_proof_round_trip() { + let proof = ExecutionProof { + proof_data: VariableList::new(vec![1u8, 2, 3, 4]).unwrap(), + proof_type: 1, + public_input: PublicInput { + new_payload_request_root: Hash256::repeat_byte(0xcd), + }, + }; + let encoded = proof.as_ssz_bytes(); + let decoded = ExecutionProof::from_ssz_bytes(&encoded).unwrap(); + assert_eq!(proof, decoded); + } + + #[test] + fn signed_execution_proof_round_trip() { + let signed_proof = SignedExecutionProof { + message: ExecutionProof { + proof_data: VariableList::new(vec![5u8, 6, 7, 8]).unwrap(), + proof_type: 2, + public_input: PublicInput { + new_payload_request_root: Hash256::repeat_byte(0xef), + }, + }, + validator_index: 42, + signature: SignatureBytes::empty(), + }; + let encoded = signed_proof.as_ssz_bytes(); + let decoded = SignedExecutionProof::from_ssz_bytes(&encoded).unwrap(); + assert_eq!(signed_proof, decoded); + } + + #[test] + fn execution_proof_is_empty() { + let empty_proof = ExecutionProof { + proof_data: VariableList::new(vec![]).unwrap(), + proof_type: 1, + public_input: PublicInput { + new_payload_request_root: Hash256::ZERO, + }, + }; + assert!(empty_proof.is_empty()); + + let non_empty_proof = ExecutionProof { + proof_data: VariableList::new(vec![1u8, 2, 3]).unwrap(), + proof_type: 1, + public_input: PublicInput { + new_payload_request_root: Hash256::ZERO, + }, + }; + assert!(!non_empty_proof.is_empty()); + } + + #[test] + fn execution_proof_size() { + let proof = ExecutionProof { + proof_data: VariableList::new(vec![1u8, 2, 3, 4, 5]).unwrap(), + proof_type: 1, + public_input: PublicInput { + new_payload_request_root: Hash256::ZERO, + }, + }; + assert_eq!(proof.proof_size(), 5); + + let empty_proof = ExecutionProof::default(); + assert_eq!(empty_proof.proof_size(), 0); + } + + #[test] + fn signed_execution_proof_accessors() { + let request_root = Hash256::repeat_byte(0xab); + let proof_type = 42u8; + let validator_index = 123u64; + + let signed_proof = SignedExecutionProof { + message: ExecutionProof { + proof_data: VariableList::new(vec![1u8, 2, 3]).unwrap(), + proof_type, + public_input: PublicInput { + new_payload_request_root: request_root, + }, + }, + validator_index, + signature: SignatureBytes::empty(), + }; + + assert_eq!(signed_proof.request_root(), request_root); + assert_eq!(signed_proof.proof_type(), proof_type); + assert_eq!(signed_proof.validator_index(), validator_index); + assert_eq!(signed_proof.proof().proof_type, proof_type); + } + + #[test] + fn proof_status_is_valid() { + assert!(ProofStatus::Valid.is_valid()); + assert!(!ProofStatus::Invalid.is_valid()); + assert!(!ProofStatus::Accepted.is_valid()); + assert!(!ProofStatus::NotSupported.is_valid()); + } + + #[test] + fn proof_status_is_syncing() { + assert!(ProofStatus::Syncing.is_syncing()); + assert!(!ProofStatus::Accepted.is_syncing()); + assert!(!ProofStatus::Valid.is_syncing()); + assert!(!ProofStatus::Invalid.is_syncing()); + assert!(!ProofStatus::NotSupported.is_syncing()); + } + + #[test] + fn generated_proof_json_round_trip() { + let proof = GeneratedProof { + proof_gen_id: [1, 2, 3, 4, 5, 6, 7, 8], + execution_proof: ExecutionProof { + proof_data: VariableList::new(vec![0xaa, 0xbb, 0xcc]).unwrap(), + proof_type: 1, + public_input: PublicInput { + new_payload_request_root: Hash256::repeat_byte(0xde), + }, + }, + }; + + let json = serde_json::to_string(&proof).unwrap(); + let decoded: GeneratedProof = serde_json::from_str(&json).unwrap(); + assert_eq!(proof, decoded); + } + + #[test] + fn proof_attributes_default() { + let attrs = ProofAttributes::default(); + assert!(attrs.proof_types.is_empty()); + + let attrs_with_types = ProofAttributes { + proof_types: vec![1, 2, 3], + }; + assert_eq!(attrs_with_types.proof_types.len(), 3); + } +} diff --git a/consensus/types/src/execution/execution_requests.rs b/consensus/types/src/execution/execution_requests.rs index 92d717778e3..c14ac5ea0de 100644 --- a/consensus/types/src/execution/execution_requests.rs +++ b/consensus/types/src/execution/execution_requests.rs @@ -3,7 +3,7 @@ use context_deserialize::context_deserialize; use educe::Educe; use ethereum_hashing::{DynamicContext, Sha256Context}; use serde::{Deserialize, Serialize}; -use ssz::Encode; +use ssz::{Decode, Encode}; use ssz_derive::{Decode, Encode}; use ssz_types::VariableList; use test_random_derive::TestRandom; @@ -71,6 +71,44 @@ impl ExecutionRequests { requests_list } + pub fn from_execution_requests_list(requests_list: Vec) -> Result { + let mut execution_requests = ExecutionRequests::default(); + + for request_bytes in requests_list { + let prefix = request_bytes + .first() + .copied() + .ok_or_else(|| "Empty request bytes".to_string())?; + let request_type = RequestType::from_u8(prefix) + .ok_or_else(|| format!("Invalid request type prefix: {}", prefix))?; + + let request_data = request_bytes + .get(1..) + .ok_or_else(|| "Empty request data".to_string())?; + match request_type { + RequestType::Deposit => { + let deposits = DepositRequests::::from_ssz_bytes(request_data) + .map_err(|e| format!("Failed to decode deposit requests: {:?}", e))?; + execution_requests.deposits = deposits; + } + RequestType::Withdrawal => { + let withdrawals = WithdrawalRequests::::from_ssz_bytes(request_data) + .map_err(|e| format!("Failed to decode withdrawal requests: {:?}", e))?; + execution_requests.withdrawals = withdrawals; + } + RequestType::Consolidation => { + let consolidations = ConsolidationRequests::::from_ssz_bytes(request_data) + .map_err(|e| { + format!("Failed to decode consolidation requests: {:?}", e) + })?; + execution_requests.consolidations = consolidations; + } + } + } + + Ok(execution_requests) + } + /// Generate the execution layer `requests_hash` based on EIP-7685. /// /// `sha256(sha256(requests_0) ++ sha256(requests_1) ++ ...)` diff --git a/consensus/types/src/execution/mod.rs b/consensus/types/src/execution/mod.rs index a3d4ed87301..e4caf43fc80 100644 --- a/consensus/types/src/execution/mod.rs +++ b/consensus/types/src/execution/mod.rs @@ -4,6 +4,7 @@ mod execution_block_header; mod execution_payload; mod bls_to_execution_change; mod dumb_macros; +pub mod eip8025; mod execution_payload_bid; mod execution_payload_envelope; mod execution_payload_header; @@ -41,3 +42,10 @@ pub use payload::{ pub use signed_bls_to_execution_change::SignedBlsToExecutionChange; pub use signed_execution_payload_bid::SignedExecutionPayloadBid; pub use signed_execution_payload_envelope::SignedExecutionPayloadEnvelope; + +// EIP-8025: Optional Execution Proofs +pub use eip8025::{ + DOMAIN_EXECUTION_PROOF, ExecutionProof, ExecutionProofList, GeneratedProof, + MIN_REQUIRED_EXECUTION_PROOFS, MaxExecutionProofsPerPayload, ProofAttributes, ProofGenId, + ProofStatus, ProofType, PublicInput, SignedExecutionProof, +}; diff --git a/consensus/types/src/fork/fork_name.rs b/consensus/types/src/fork/fork_name.rs index e9ec5fbe41e..65c56bd4f14 100644 --- a/consensus/types/src/fork/fork_name.rs +++ b/consensus/types/src/fork/fork_name.rs @@ -217,7 +217,7 @@ impl ForkName { ║ III DECEMBER MMXXV ║ ║ ║ ╚═══════════════════════════════════════╝ - + ============================================================================= |||| |||| |---------------------------------------------------------------------------| diff --git a/lighthouse/environment/Cargo.toml b/lighthouse/environment/Cargo.toml index 6d6ffa1725f..c5d831e1e10 100644 --- a/lighthouse/environment/Cargo.toml +++ b/lighthouse/environment/Cargo.toml @@ -23,3 +23,6 @@ types = { workspace = true } [target.'cfg(not(target_family = "unix"))'.dependencies] ctrlc = { version = "3.1.6", features = ["termination"] } + +[features] +test-utils = [] diff --git a/lighthouse/environment/src/lib.rs b/lighthouse/environment/src/lib.rs index 6694c673ed5..b43b7744bd4 100644 --- a/lighthouse/environment/src/lib.rs +++ b/lighthouse/environment/src/lib.rs @@ -21,7 +21,7 @@ use task_executor::{ShutdownReason, TaskExecutor}; use tokio::runtime::{Builder as RuntimeBuilder, Runtime}; use tracing::{error, info, warn}; use tracing_subscriber::filter::LevelFilter; -use types::{EthSpec, GnosisEthSpec, MainnetEthSpec, MinimalEthSpec}; +use types::{ChainSpec, EthSpec, GnosisEthSpec, MainnetEthSpec, MinimalEthSpec}; #[cfg(target_family = "unix")] use { @@ -33,6 +33,9 @@ use { #[cfg(not(target_family = "unix"))] use {futures::channel::oneshot, std::cell::RefCell}; +#[cfg(feature = "test-utils")] +pub mod test_utils; + pub mod tracing_common; pub const SSE_LOG_CHANNEL_SIZE: usize = 2048; @@ -284,6 +287,14 @@ impl EnvironmentBuilder { Ok(self) } + /// Map the `ChainSpec` used for the environment using the provided function. + pub fn map_spec(mut self, f: impl FnOnce(&mut ChainSpec)) -> Self { + let mut spec = Arc::unwrap_or_clone(self.eth2_config.spec); + f(&mut spec); + self.eth2_config.spec = spec.into(); + self + } + /// Consumes the builder, returning an `Environment`. pub fn build(self) -> Result, String> { let (signal, exit) = async_channel::bounded(1); @@ -302,6 +313,25 @@ impl EnvironmentBuilder { eth2_network_config: self.eth2_network_config.map(Arc::new), }) } + + #[cfg(feature = "test-utils")] + pub fn build_test_environment(self) -> Result, String> { + let (signal, exit) = async_channel::bounded(1); + let (signal_tx, signal_rx) = channel(1); + Ok(test_utils::TestEnvironment { + executor: TaskExecutor::new( + tokio::runtime::Handle::try_current().expect("failed to get tokio handle"), + exit.clone(), + signal_tx.clone(), + ), + signal_rx: Some(signal_rx), + signal: Some(signal), + sse_logging_components: self.sse_logging_components, + eth_spec_instance: self.eth_spec_instance, + eth2_config: self.eth2_config, + eth2_network_config: self.eth2_network_config.map(Arc::new), + }) + } } /// An environment where Lighthouse services can run. Used to start a production beacon node or diff --git a/lighthouse/environment/src/test_utils.rs b/lighthouse/environment/src/test_utils.rs new file mode 100644 index 00000000000..75d48b65f78 --- /dev/null +++ b/lighthouse/environment/src/test_utils.rs @@ -0,0 +1,24 @@ +use super::*; +use task_executor::TaskExecutor; + +pub struct TestEnvironment { + pub executor: TaskExecutor, + pub signal_rx: Option>, + pub signal: Option>, + pub sse_logging_components: Option, + pub eth_spec_instance: E, + pub eth2_config: Eth2Config, + pub eth2_network_config: Option>, +} + +impl TestEnvironment { + pub fn core_context(&self) -> RuntimeContext { + RuntimeContext { + executor: self.executor.clone(), + eth_spec_instance: self.eth_spec_instance.clone(), + eth2_config: self.eth2_config.clone(), + eth2_network_config: self.eth2_network_config.clone(), + sse_logging_components: self.sse_logging_components.clone(), + } + } +} diff --git a/testing/node_test_rig/Cargo.toml b/testing/node_test_rig/Cargo.toml index 0d9db528da4..4eef3e25dc9 100644 --- a/testing/node_test_rig/Cargo.toml +++ b/testing/node_test_rig/Cargo.toml @@ -7,12 +7,23 @@ edition = { workspace = true } [dependencies] beacon_node = { workspace = true } beacon_node_fallback = { workspace = true } +bls = { workspace = true } environment = { workspace = true } -eth2 = { workspace = true } +eth2 = { workspace = true, features = ["events"] } execution_layer = { workspace = true } +hex = { workspace = true } +mockito = { workspace = true } +parking_lot = { workspace = true } +reqwest = { workspace = true } sensitive_url = { workspace = true } +serde_json = { workspace = true } +ssz_types = { workspace = true } +task_executor = { workspace = true } tempfile = { workspace = true } tokio = { workspace = true } +tracing = { workspace = true } +tree_hash = { workspace = true} types = { workspace = true } validator_client = { workspace = true } validator_dir = { workspace = true, features = ["insecure_keys"] } +validator_store = { workspace = true } diff --git a/testing/node_test_rig/src/lib.rs b/testing/node_test_rig/src/lib.rs index e49d11ee1eb..b167fb2dd02 100644 --- a/testing/node_test_rig/src/lib.rs +++ b/testing/node_test_rig/src/lib.rs @@ -4,6 +4,7 @@ use beacon_node::ProductionBeaconNode; use environment::RuntimeContext; +use eth2::lighthouse_vc::http_client::ValidatorClientHttpClient; use eth2::{BeaconNodeHttpClient, Timeouts, reqwest::ClientBuilder}; use sensitive_url::SensitiveUrl; use std::path::PathBuf; @@ -22,7 +23,12 @@ pub use eth2; pub use execution_layer::test_utils::{ Config as MockServerConfig, MockExecutionConfig, MockServer, }; -pub use validator_client::Config as ValidatorConfig; +pub use validator_client::{ApiSecret, Config as ValidatorConfig}; + +mod mock_proof_engine_server; +pub use mock_proof_engine_server::{ + MockProofEngineConfig, MockProofEngineServer, ProofEngineServerConfig, ProofRequestRecord, +}; /// The global timeout for HTTP requests to the beacon node. const HTTP_TIMEOUT: Duration = Duration::from_secs(8); @@ -227,6 +233,24 @@ impl LocalValidatorClient { .expect("should start validator services"); Ok(Self { client, files }) } + + pub fn http_client(&self) -> Result, String> { + let client = if let Some(listen_addr) = self.client.listen_addr() { + let token_path = self.client.config().http_api.http_token_path.clone(); + let api_secret = ApiSecret::create_or_open(token_path)?; + let validator_client_url: SensitiveUrl = SensitiveUrl::parse( + format!("http://{}:{}", listen_addr.ip(), listen_addr.port()).as_str(), + ) + .map_err(|e| format!("Unable to parse validator client URL: {:?}", e))?; + Some( + ValidatorClientHttpClient::new(validator_client_url, api_secret.api_token()) + .map_err(|e| format!("failed to create http client: {:?}", e))?, + ) + } else { + None + }; + Ok(client) + } } /// Provides an execution engine api server that is running in the current process on a given tokio executor (it @@ -254,3 +278,28 @@ impl LocalExecutionNode { } } } + +/// Provides a mock proof engine that is running in the current process. +/// +/// Intended for use in testing and simulation. Not for production. +pub struct LocalProofEngine { + pub server: MockProofEngineServer, + pub datadir: TempDir, +} + +impl LocalProofEngine { + pub async fn new(context: RuntimeContext, config: MockProofEngineConfig) -> Self { + let datadir = TempBuilder::new() + .prefix("lighthouse_proof_engine") + .tempdir() + .expect("should create temp directory for proof engine"); + + let server = MockProofEngineServer::new(config, context.executor.clone()).await; + + Self { server, datadir } + } + + pub fn set_validator_client(&mut self, client: ValidatorClientHttpClient) { + self.server.set_validator_callback(client.into()); + } +} diff --git a/testing/node_test_rig/src/mock_proof_engine_server.rs b/testing/node_test_rig/src/mock_proof_engine_server.rs new file mode 100644 index 00000000000..0534ce9a1ff --- /dev/null +++ b/testing/node_test_rig/src/mock_proof_engine_server.rs @@ -0,0 +1,420 @@ +//! Mock proof engine server for testing EIP-8025 execution proofs. +//! +//! Provides an HTTP JSON-RPC server that simulates an external proof engine backend +//! for integration testing. Uses mockito to mock the HTTP endpoints. + +// TODO: Move this module into the execution_layer crate + +use super::ValidatorClientHttpClient; +use eth2::lighthouse_vc::types::SignExecutionProofRequest; +use execution_layer::NewPayloadRequestFulu; +use execution_layer::json_structures::JsonExecutionPayloadFulu; +use mockito::{Matcher, Mock, Server, ServerGuard}; +use parking_lot::{Mutex, RwLock}; +use sensitive_url::SensitiveUrl; +use serde_json::json; +use ssz_types::VariableList; +use std::sync::Arc; +use std::time::Duration; +use task_executor::TaskExecutor; +use tree_hash::TreeHash; +use types::execution::eip8025::{ + ExecutionProof, ProofAttributes, ProofGenId, ProofType, PublicInput, +}; +use types::{EthSpec, ExecutionPayloadFulu, ExecutionRequests, Hash256, VersionedHash}; + +/// Configuration for a mock proof engine. +#[derive(Clone)] +pub struct MockProofEngineConfig { + pub server_config: ProofEngineServerConfig, + pub callback_delay_ms: u64, + pub callback_url: Arc>>>, +} + +impl Default for MockProofEngineConfig { + fn default() -> Self { + Self { + server_config: ProofEngineServerConfig::default(), + callback_delay_ms: 200, + callback_url: Arc::new(RwLock::new(None)), + } + } +} + +/// Configuration for proof engine server. +#[derive(Clone)] +pub struct ProofEngineServerConfig { + pub listen_port: u16, + pub listen_addr: std::net::Ipv4Addr, +} + +impl Default for ProofEngineServerConfig { + fn default() -> Self { + Self { + listen_port: 0, + listen_addr: std::net::Ipv4Addr::LOCALHOST, + } + } +} + +/// Record of a proof request received by the mock server. +#[derive(Clone, Debug)] +pub struct ProofRequestRecord { + pub proof_gen_id: ProofGenId, + pub new_payload_request_root: Hash256, + pub proof_types: Vec, + pub timestamp: std::time::Instant, +} + +/// Mock proof engine HTTP server. +/// +/// Implements the JSON-RPC endpoints for: +/// - engine_requestProofsV1: Accept proof requests and return ProofGenId +/// - engine_verifyExecutionProofV1: Verify proof validity +pub struct MockProofEngineServer { + server: ServerGuard, + config: MockProofEngineConfig, + proof_requests: Arc>>, + executor: TaskExecutor, + _mocks: Vec, // Keep mocks alive + _phantom: std::marker::PhantomData, +} + +impl MockProofEngineServer { + /// Create a new mock proof engine server. + pub async fn new(config: MockProofEngineConfig, executor: TaskExecutor) -> Self { + // Use Server::new_async() to avoid starting a runtime within a runtime + let server = Server::new_async().await; + let proof_requests = Arc::new(Mutex::new(Vec::new())); + + let mut mock_server = Self { + server, + config, + proof_requests, + executor, + _mocks: Vec::new(), + _phantom: std::marker::PhantomData, + }; + + mock_server.setup_endpoints(); + mock_server + } + + pub fn set_validator_callback(&mut self, client: Arc) { + *self.config.callback_url.write() = Some(client); + } + + /// Setup all HTTP endpoints. + fn setup_endpoints(&mut self) { + self.setup_request_proofs_endpoint(); + self.setup_verify_proof_endpoint(); + } + + /// Setup the engine_requestProofsV1 endpoint. + fn setup_request_proofs_endpoint(&mut self) { + let proof_requests = self.proof_requests.clone(); + let callback_delay = self.config.callback_delay_ms; + let validator_client_ref = self.config.callback_url.clone(); + let task_executor = self.executor.clone(); + + let mock = self + .server + .mock("POST", "/") + .match_body(Matcher::Regex( + r#".*"method"\s*:\s*"engine_requestProofsV1".*"#.to_string(), + )) + .with_status(200) + .with_body_from_request(move |request| { + // Helper function to return JSON-RPC error response + let error_response = |error_msg: &str| -> Vec { + serde_json::to_vec(&json!({ + "jsonrpc": "2.0", + "error": { + "code": -32602, + "message": format!("Invalid params: {}", error_msg) + }, + "id": 1 + })) + .unwrap_or_else(|_| b"{\"error\":\"internal error\"}".to_vec()) + }; + + // Parse JSON-RPC request with error handling + let body_bytes = match request.body() { + Ok(bytes) => bytes, + Err(e) => { + return error_response(&format!("failed to read request body: {}", e)); + } + }; + + let body: serde_json::Value = match serde_json::from_slice(body_bytes) { + Ok(v) => v, + Err(e) => return error_response(&format!("invalid JSON: {}", e)), + }; + + // Parse params array + let Some(params) = body["params"].as_array() else { + return error_response("params is not an array"); + }; + + if params.len() < 5 { + return error_response(&format!("expected 5 params, got {}", params.len())); + } + + // Parse execution payload + let execution_payload_json: JsonExecutionPayloadFulu = + match serde_json::from_value(params[0].clone()) { + Ok(v) => v, + Err(e) => { + return error_response(&format!("invalid execution payload: {}", e)); + } + }; + + let execution_payload: ExecutionPayloadFulu = match execution_payload_json + .try_into() + { + Ok(v) => v, + Err(e) => return error_response(&format!("failed to convert payload: {}", e)), + }; + + // Parse versioned hashes + let versioned_hashes: VariableList = + match serde_json::from_value(params[1].clone()) { + Ok(v) => v, + Err(e) => { + return error_response(&format!("invalid versioned hashes: {}", e)); + } + }; + + // Parse parent beacon block root + let parent_beacon_block_root: Hash256 = + match serde_json::from_value(params[2].clone()) { + Ok(v) => v, + Err(e) => return error_response(&format!("invalid parent root: {}", e)), + }; + + // Deserialize execution requests from JSON with fork context + let execution_requests_bytes = match serde_json::from_value(params[3].clone()) { + Ok(v) => v, + Err(e) => { + return error_response(&format!("invalid execution requests: {}", e)); + } + }; + let execution_requests = match ExecutionRequests::::from_execution_requests_list( + execution_requests_bytes, + ) { + Ok(r) => r, + Err(e) => return error_response(&e), + }; + + // Parse proof attributes + let proof_attributes: ProofAttributes = + match serde_json::from_value(params[4].clone()) { + Ok(v) => v, + Err(e) => { + return error_response(&format!("invalid proof attributes: {}", e)); + } + }; + + // Compute request root with properly decoded execution_requests + let new_payload_request = NewPayloadRequestFulu { + execution_payload: &execution_payload, + versioned_hashes, + parent_beacon_block_root, + execution_requests: &execution_requests, + }; + let request_root = new_payload_request.tree_hash_root(); + + // Trigger callback if validator client is configured + if let Some(validator) = validator_client_ref.read().as_ref() { + tracing::info!( + target: "simulator", + ?request_root, + proof_types = ?proof_attributes.proof_types, + "Triggering proof callback" + ); + let _ = Self::proof_callback( + validator.clone(), + callback_delay, + task_executor.clone(), + request_root, + proof_attributes.proof_types.clone(), + ); + } + + // Generate deterministic ProofGenId from request root + let mut proof_gen_id = [0u8; 8]; + proof_gen_id.copy_from_slice(&request_root.0[0..8]); + + // Store request + proof_requests.lock().push(ProofRequestRecord { + proof_gen_id, + new_payload_request_root: request_root, + proof_types: proof_attributes.proof_types.clone(), + timestamp: std::time::Instant::now(), + }); + + tracing::info!( + target: "simulator", + proof_gen_id = hex::encode(proof_gen_id), + ?request_root, + num_proof_types = proof_attributes.proof_types.len(), + "Proof request recorded" + ); + + // Return success response + serde_json::to_vec(&json!({ + "jsonrpc": "2.0", + "result": format!("0x{}", hex::encode(proof_gen_id)), + "id": 1 + })) + .unwrap_or_else(|_| b"{\"error\":\"internal error\"}".to_vec()) + }) + .create(); + + self._mocks.push(mock); + } + + /// Setup the engine_verifyExecutionProofV1 endpoint. + fn setup_verify_proof_endpoint(&mut self) { + let mock = self.server + .mock("POST", "/") + .match_body(Matcher::Regex( + r#".*"method"\s*:\s*"engine_verifyExecutionProofV1".*"#.to_string(), + )) + .with_status(200) + .with_body_from_request(move |request| { + // Validate the request has a body + let _body_bytes = match request.body() { + Ok(bytes) => bytes, + Err(e) => { + return serde_json::to_vec(&json!({ + "jsonrpc": "2.0", + "error": {"code": -32602, "message": format!("failed to read request body: {}", e)}, + "id": 1 + })) + .unwrap_or_else(|_| b"{\"error\":\"internal error\"}".to_vec()); + } + }; + + // For the verify endpoint, we just return VALID for all properly formatted requests + serde_json::to_vec(&json!({ + "jsonrpc": "2.0", + "result": {"status": "VALID"}, + "id": 1 + })) + .unwrap_or_else(|_| b"{\"error\":\"internal error\"}".to_vec()) + }) + .create(); + + self._mocks.push(mock); + } + + /// Get the URL of the mock server. + pub fn url(&self) -> SensitiveUrl { + SensitiveUrl::parse(&self.server.url()).unwrap() + } + + /// Get all proof requests received by the server. + pub fn get_proof_requests(&self) -> Vec { + self.proof_requests.lock().clone() + } + + /// Manually trigger a callback to the validator client with a generated proof. + /// + /// This simulates the proof engine calling back to the validator client + /// after generating a proof asynchronously. + pub fn proof_callback( + client: Arc, + callback_delay: u64, + task_executor: TaskExecutor, + new_payload_request_root: Hash256, + proof_types: Vec, + ) -> Result<(), String> { + task_executor.spawn( + async move { + tracing::info!( + target: "simulator", + delay_ms = callback_delay, + "Proof callback task started, sleeping" + ); + + tokio::time::sleep(Duration::from_millis(callback_delay)).await; + + tracing::info!(target: "simulator", "Fetching validators for callback"); + + let validators = match client.get_lighthouse_validators().await { + Ok(v) => v, + Err(e) => { + tracing::error!(target: "simulator", error = ?e, "Failed to get validators"); + return; + } + }; + + let pubkey = match validators.data.first() { + Some(v) => v.voting_pubkey, + None => { + tracing::error!(target: "simulator", "No validators found"); + return; + } + }; + + tracing::info!( + target: "simulator", + ?pubkey, + num_proof_types = proof_types.len(), + "Generating and sending proofs" + ); + + let execution_proofs = + Self::generate_dummy_proofs(new_payload_request_root, proof_types); + + for execution_proof in execution_proofs { + tracing::info!( + target: "simulator", + proof_type = ?execution_proof.proof_type, + "Sending proof to validator client" + ); + + let request_body = SignExecutionProofRequest { + execution_proof, + epoch: None, + }; + + match client.post_execution_proof(&pubkey, request_body).await { + Ok(_) => { + tracing::info!(target: "simulator", "Proof sent successfully"); + } + Err(e) => { + tracing::error!(target: "simulator", error = ?e, "Failed to send proof"); + } + } + } + }, + "proof_callback", + ); + + Ok(()) + } + + /// Generate a dummy execution proof for testing. + fn generate_dummy_proofs(root: Hash256, proof_types: Vec) -> Vec { + let mut proofs = vec![]; + + for proof_type in proof_types { + let mut proof_bytes = vec![0xDE, 0xAD, 0xBE, 0xEF]; + proof_bytes.extend_from_slice(&root.0[0..16]); + + let proof = ExecutionProof { + proof_data: VariableList::new(proof_bytes).unwrap(), + proof_type, + public_input: PublicInput { + new_payload_request_root: root, + }, + }; + + proofs.push(proof); + } + + proofs + } +} diff --git a/testing/proof_engine/Cargo.toml b/testing/proof_engine/Cargo.toml new file mode 100644 index 00000000000..c4d0718d6eb --- /dev/null +++ b/testing/proof_engine/Cargo.toml @@ -0,0 +1,12 @@ +[package] +name = "proof_engine_test" +edition.workspace = true +version.workspace = true + +[dependencies] +simulator = { path = "../simulator", features = ["test-utils"] } +network = { workspace = true, features = ["disable-backfill"] } +tokio = { workspace = true } +tracing = { workspace = true } +anyhow = { workspace = true } + diff --git a/testing/proof_engine/src/lib.rs b/testing/proof_engine/src/lib.rs new file mode 100644 index 00000000000..bdb433980e1 --- /dev/null +++ b/testing/proof_engine/src/lib.rs @@ -0,0 +1,128 @@ +//! A test suite for the proof engine, using a local test network fixture. + +#[cfg(test)] +mod test { + use std::time::Duration; + + use simulator::test_utils::*; + + /// A base test network fixture builder for eip-8025 testing. + /// + /// This fixture has: + /// - all forks up to and including fulu activate at genesis + /// - all nodes configured with 1 second slots to speed up tests + /// - a minimal genesis time to allow tests to start quickly + /// + /// - 1 vanilla beacon node + /// - 1 proof generator node + /// - 1 proof verifier node + fn test_fixture_builder_base() -> TestNetworkFixtureBuilder { + TestNetworkFixture::builder() + .map_spec(|spec| { + spec.seconds_per_slot = 1; + spec.slot_duration_ms = 1000; + spec.min_genesis_time = 0; + spec.altair_fork_epoch = Some(Epoch::new(0)); + spec.bellatrix_fork_epoch = Some(Epoch::new(0)); + spec.capella_fork_epoch = Some(Epoch::new(0)); + spec.deneb_fork_epoch = Some(Epoch::new(0)); + spec.electra_fork_epoch = Some(Epoch::new(0)); + spec.fulu_fork_epoch = Some(Epoch::new(0)); + }) + .with_network_params(LocalNetworkParams { + validator_count: 4, + node_count: 1, + proposer_nodes: 0, + extra_nodes: 0, + proof_generator_nodes: 1, + proof_verifier_nodes: 1, + genesis_delay: 20, + }) + } + + #[tokio::test] + async fn test_proof_engine_basic() -> anyhow::Result<()> { + let mut fixture = test_fixture_builder_base() + .with_log_level(LevelFilter::DEBUG) + .with_log_dir("proof-engine".into()) + .build() + .await?; + fixture.payloads_valid(); + fixture.wait_for_genesis().await?; + + // Verify continuous operation + tokio::time::sleep(Duration::from_secs(60)).await; + + let requests = fixture + .network + .proof_engines + .read() + .first() + .unwrap() + .server + .get_proof_requests(); + + assert!( + requests.len() >= 2, + "Should have received multiple proof requests" + ); + + // TODO: Add more assertions after we extend test framework. For now just check logs to ensure correctness. + + Ok(()) + } + + #[tokio::test] + async fn test_proof_engine_sync() -> anyhow::Result<()> { + let mut fixture = test_fixture_builder_base() + .map_spec(|spec| { + // Collapse all columns onto a single subnet and reduce the total number of + // custody groups so the small 2-node network can fully cover them. + // + // - data_column_sidecar_subnet_count = 1: all custody groups map to subnet 0, + // so any connected peer satisfies `good_peers_on_sampling_subnets()`. + // + // - number_of_custody_groups = 8: with validator_custody_requirement = 8 (the + // spec default), nodes with validators always get cgc = min(max(units, 8), 8) + // = 8 = full custody of all groups. This avoids the "too many columns, not + // enough peers" problem that occurs with the default 128 groups across 2 nodes. + // Note: do NOT also set custody_requirement = 128; that shrinks the valid cgc + // range to 128..=128 and causes the joining node to ban existing peers whose + // validator-derived cgc is 8. + spec.data_column_sidecar_subnet_count = 1; + spec.number_of_custody_groups = 8; + }) + .map_network_params(|params| { + params.proof_verifier_nodes = 0; + }) + .with_log_level(LevelFilter::DEBUG) + .with_log_dir("proof-engine-sync".into()) + .build() + .await?; + fixture.payloads_valid(); + fixture.wait_for_genesis().await?; + + tokio::time::sleep(Duration::from_secs(60)).await; + + // Now lets add a new proof verifier node and observe the sync behaviour. + let net = fixture.network.clone(); + info!(target: "simulator", "Adding 1 proof verifier beacon nodes to the network"); + fixture.network.executor().spawn( + async move { + net.add_beacon_node( + fixture.config.client.clone(), + fixture.config.execution.clone(), + NodeType::ProofVerifier, + ) + .await + .map_err(anyhow::Error::msg) + .expect("should not error"); + }, + "add_proof_verifier", + ); + + tokio::time::sleep(Duration::from_secs(60)).await; + + Ok(()) + } +} diff --git a/testing/simulator/Cargo.toml b/testing/simulator/Cargo.toml index a1b1b6f95d2..f916585ac86 100644 --- a/testing/simulator/Cargo.toml +++ b/testing/simulator/Cargo.toml @@ -6,10 +6,14 @@ edition = { workspace = true } # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] +anyhow = { workspace = true } +beacon_chain = { workspace = true } clap = { workspace = true } -environment = { workspace = true } +environment = { workspace = true, features = ["test-utils"] } +eth2 = { workspace = true, features = ["events"] } execution_layer = { workspace = true } futures = { workspace = true } +lighthouse_network = { workspace = true } kzg = { workspace = true } logging = { workspace = true } node_test_rig = { path = "../node_test_rig" } @@ -17,8 +21,14 @@ parking_lot = { workspace = true } rayon = { workspace = true } sensitive_url = { workspace = true } serde_json = { workspace = true } +task_executor = { workspace = true } +tempfile = { workspace = true } tokio = { workspace = true } tracing = { workspace = true } tracing-subscriber = { workspace = true } typenum = { workspace = true } types = { workspace = true } +validator_http_api = { workspace = true } + +[features] +test-utils = [] diff --git a/testing/simulator/src/basic_sim.rs b/testing/simulator/src/basic_sim.rs index 13bfcb5fc35..b5ade95c241 100644 --- a/testing/simulator/src/basic_sim.rs +++ b/testing/simulator/src/basic_sim.rs @@ -1,4 +1,5 @@ use crate::local_network::LocalNetworkParams; +use crate::local_network::NodeType; use crate::local_network::TERMINAL_BLOCK; use crate::{LocalNetwork, checks}; use clap::ArgMatches; @@ -35,7 +36,7 @@ const ELECTRA_FORK_EPOCH: u64 = 2; // const FULU_FORK_EPOCH: u64 = 3; // const GLOAS_FORK_EPOCH: u64 = 4; -const SUGGESTED_FEE_RECIPIENT: [u8; 20] = +pub const SUGGESTED_FEE_RECIPIENT: [u8; 20] = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]; #[allow(clippy::large_stack_frames)] @@ -209,6 +210,8 @@ pub fn run_basic_sim(matches: &ArgMatches) -> Result<(), String> { extra_nodes, proposer_nodes, genesis_delay, + proof_generator_nodes: 0, + proof_verifier_nodes: 0, }, context.clone(), )) @@ -218,7 +221,11 @@ pub fn run_basic_sim(matches: &ArgMatches) -> Result<(), String> { // Add nodes to the network. for _ in 0..node_count { network - .add_beacon_node(beacon_config.clone(), mock_execution_config.clone(), false) + .add_beacon_node( + beacon_config.clone(), + mock_execution_config.clone(), + NodeType::Default, + ) .await?; } @@ -228,7 +235,11 @@ pub fn run_basic_sim(matches: &ArgMatches) -> Result<(), String> { for _ in 0..proposer_nodes { println!("Adding a proposer node"); network - .add_beacon_node(beacon_config.clone(), mock_execution_config.clone(), true) + .add_beacon_node( + beacon_config.clone(), + mock_execution_config.clone(), + NodeType::Proposer, + ) .await?; } @@ -259,7 +270,7 @@ pub fn run_basic_sim(matches: &ArgMatches) -> Result<(), String> { .await } else { network_1 - .add_validator_client(validator_config, i, files) + .add_validator_client(validator_config, i, files, NodeType::Default) .await } .expect("should add validator"); diff --git a/testing/simulator/src/fallback_sim.rs b/testing/simulator/src/fallback_sim.rs index 3d9a60abc7b..7d2f68658d3 100644 --- a/testing/simulator/src/fallback_sim.rs +++ b/testing/simulator/src/fallback_sim.rs @@ -1,4 +1,4 @@ -use crate::local_network::LocalNetworkParams; +use crate::local_network::{LocalNetworkParams, NodeType}; use crate::{LocalNetwork, checks}; use clap::ArgMatches; @@ -215,6 +215,8 @@ pub fn run_fallback_sim(matches: &ArgMatches) -> Result<(), String> { node_count, extra_nodes: 0, proposer_nodes: 0, + proof_generator_nodes: 0, + proof_verifier_nodes: 0, genesis_delay, }, context.clone(), @@ -225,7 +227,11 @@ pub fn run_fallback_sim(matches: &ArgMatches) -> Result<(), String> { // Add nodes to the network. for _ in 0..node_count { network - .add_beacon_node(beacon_config.clone(), mock_execution_config.clone(), false) + .add_beacon_node( + beacon_config.clone(), + mock_execution_config.clone(), + NodeType::Default, + ) .await?; } diff --git a/testing/simulator/src/lib.rs b/testing/simulator/src/lib.rs new file mode 100644 index 00000000000..b6c70d44969 --- /dev/null +++ b/testing/simulator/src/lib.rs @@ -0,0 +1,26 @@ +//! This crate provides various simulations that create both beacon nodes and validator clients, +//! each with `v` validators. +//! +//! When a simulation runs, there are checks made to ensure that all components are operating +//! as expected. If any of these checks fail, the simulation will exit immediately. +//! +//! ## Future works +//! +//! Presently all the beacon nodes and validator clients all log to stdout. Additionally, the +//! simulation uses `println` to communicate some info. It might be nice if the nodes logged to +//! easy-to-find files and stdout only contained info from the simulation. +//! +pub mod basic_sim; +pub mod checks; +pub mod cli; +pub mod fallback_sim; +pub mod local_network; +pub mod retry; + +pub use local_network::LocalNetwork; +pub use types::MinimalEthSpec; + +pub type E = MinimalEthSpec; + +#[cfg(feature = "test-utils")] +pub mod test_utils; diff --git a/testing/simulator/src/local_network.rs b/testing/simulator/src/local_network.rs index 58d7e1372fc..e4015d127fd 100644 --- a/testing/simulator/src/local_network.rs +++ b/testing/simulator/src/local_network.rs @@ -1,5 +1,7 @@ use crate::checks::epoch_delay; +use beacon_chain::custody_context::NodeCustodyType; use kzg::trusted_setup::get_trusted_setup; +use lighthouse_network::types::Enr; use node_test_rig::{ ClientConfig, ClientGenesis, LocalBeaconNode, LocalExecutionNode, LocalValidatorClient, MockExecutionConfig, MockServerConfig, ValidatorConfig, ValidatorFiles, @@ -7,6 +9,7 @@ use node_test_rig::{ eth2::{BeaconNodeHttpClient, types::StateId}, testing_client_config, }; +use node_test_rig::{LocalProofEngine, MockProofEngineConfig}; use parking_lot::RwLock; use sensitive_url::SensitiveUrl; use std::{ @@ -15,23 +18,89 @@ use std::{ sync::Arc, time::{Duration, SystemTime, UNIX_EPOCH}, }; +use task_executor::TaskExecutor; +use tempfile::tempdir; use types::{ChainSpec, Epoch, EthSpec}; +use validator_http_api::{Config as ValidatorHttpConfig, PK_FILENAME}; const BOOTNODE_PORT: u16 = 42424; const QUIC_PORT: u16 = 43424; pub const EXECUTION_PORT: u16 = 4000; +pub const PROOF_PORT: u16 = 6000; pub const TERMINAL_BLOCK: u64 = 0; +#[derive(Debug, Copy, Clone)] +pub enum NodeType { + Default, + Proposer, + ProofVerifier, + ProofGenerator, +} + +impl NodeType { + /// Returns true if this node is a proposer node. + pub fn is_proposer(&self) -> bool { + matches!(self, NodeType::Proposer) + } + + /// Returns true if this node is a proof verifier node. + pub fn is_proof_verifier(&self) -> bool { + matches!(self, NodeType::ProofVerifier) + } + + /// Returns true if this node is a proof generator node. + pub fn is_proof_generator(&self) -> bool { + matches!(self, NodeType::ProofGenerator) + } + + /// Returns true if this node requires a proof node. + pub fn requires_proof_node(&self) -> bool { + matches!(self, NodeType::ProofVerifier | NodeType::ProofGenerator) + } + + /// Returns true if this node requires an execution node. + pub fn requires_execution_node(&self) -> bool { + matches!( + self, + NodeType::Default | NodeType::Proposer | NodeType::ProofGenerator + ) + } +} + +#[derive(Debug, Clone)] pub struct LocalNetworkParams { pub validator_count: usize, pub node_count: usize, pub proposer_nodes: usize, + pub proof_generator_nodes: usize, + pub proof_verifier_nodes: usize, pub extra_nodes: usize, pub genesis_delay: u64, } +impl LocalNetworkParams { + pub fn node_type(&self, node_idx: usize) -> NodeType { + if node_idx < self.node_count { + NodeType::Default + } else if node_idx < self.node_count + self.proposer_nodes { + NodeType::Proposer + } else if node_idx < self.node_count + self.proposer_nodes + self.proof_generator_nodes { + NodeType::ProofGenerator + } else if node_idx + < self.node_count + + self.proposer_nodes + + self.proof_generator_nodes + + self.proof_verifier_nodes + { + NodeType::ProofVerifier + } else { + panic!("Invalid node index: {}", node_idx); + } + } +} + fn default_client_config(network_params: LocalNetworkParams, genesis_time: u64) -> ClientConfig { let mut beacon_config = testing_client_config(); @@ -39,14 +108,19 @@ fn default_client_config(network_params: LocalNetworkParams, genesis_time: u64) validator_count: network_params.validator_count, genesis_time, }; - beacon_config.network.target_peers = - network_params.node_count + network_params.proposer_nodes + network_params.extra_nodes - 1; + beacon_config.network.target_peers = network_params.node_count + + network_params.proposer_nodes + + network_params.proof_generator_nodes + + network_params.proof_verifier_nodes + + network_params.extra_nodes + - 1; beacon_config.network.enr_address = (Some(Ipv4Addr::LOCALHOST), None); beacon_config.network.enable_light_client_server = true; beacon_config.network.discv5_config.enable_packet_filter = false; beacon_config.chain.enable_light_client_server = true; beacon_config.chain.optimistic_finalized_sync = false; beacon_config.trusted_setup = get_trusted_setup(); + beacon_config.chain.node_custody_type = NodeCustodyType::Supernode; let el_config = execution_layer::Config { execution_endpoint: Some( @@ -103,6 +177,7 @@ pub struct Inner { pub proposer_nodes: RwLock>>, pub validator_clients: RwLock>>, pub execution_nodes: RwLock>>, + pub proof_engines: RwLock>>, } /// Represents a set of interconnected `LocalBeaconNode` and `LocalValidatorClient`. @@ -160,12 +235,18 @@ impl LocalNetwork { proposer_nodes: RwLock::new(vec![]), execution_nodes: RwLock::new(vec![]), validator_clients: RwLock::new(vec![]), + proof_engines: RwLock::new(vec![]), }), }; Ok((network, beacon_config, execution_config)) } + /// Returns the `TaskExecutor` used by this `LocalNetwork`. + pub fn executor(&self) -> &TaskExecutor { + &self.context.executor + } + /// Returns the number of beacon nodes in the network. /// /// Note: does not count nodes that are external to this `LocalNetwork` that may have connected @@ -182,6 +263,11 @@ impl LocalNetwork { self.proposer_nodes.read().len() } + /// Returns the number of proof engines in the network. + pub fn proof_engine_count(&self) -> usize { + self.proof_engines.read().len() + } + /// Returns the number of validator clients in the network. /// /// Note: does not count nodes that are external to this `LocalNetwork` that may have connected @@ -206,6 +292,15 @@ impl LocalNetwork { beacon_config.network.enr_tcp4_port = Some(BOOTNODE_PORT.try_into().expect("non zero")); beacon_config.network.discv5_config.table_filter = |_| true; + // The boot node is a full data-availability node and should custody all columns from + // genesis. Setting Supernode ensures cgc = number_of_custody_groups from startup so + // no validator-registration-triggered cgc jump occurs. Without this, the first proposer + // preparation call from the validator client causes cgc to increase from + // spec.custody_requirement → number_of_custody_groups, which stamps + // earliest_available_slot = current_slot and prevents late-joining nodes from syncing + // from epoch 0. + beacon_config.chain.node_custody_type = NodeCustodyType::Supernode; + let execution_node = LocalExecutionNode::new(self.context.clone(), mock_execution_config); beacon_config.execution_layer = Some(execution_layer::Config { @@ -224,8 +319,15 @@ impl LocalNetwork { &self, mut beacon_config: ClientConfig, mut mock_execution_config: MockExecutionConfig, - is_proposer: bool, - ) -> Result<(LocalBeaconNode, LocalExecutionNode), String> { + node_type: NodeType, + ) -> Result< + ( + LocalBeaconNode, + Option>, + Option>, + ), + String, + > { let count = (self.beacon_node_count() + self.proposer_node_count()) as u16; // Set config. @@ -240,25 +342,73 @@ impl LocalNetwork { beacon_config.network.enr_udp4_port = Some(discv5_port.try_into().unwrap()); beacon_config.network.enr_tcp4_port = Some(libp2p_tcp_port.try_into().unwrap()); beacon_config.network.discv5_config.table_filter = |_| true; - beacon_config.network.proposer_only = is_proposer; - - mock_execution_config.server_config.listen_port = EXECUTION_PORT + count; + beacon_config.network.proposer_only = node_type.is_proposer(); + + let execution_node = if node_type.requires_execution_node() { + // Construct execution node. + mock_execution_config.server_config.listen_port = EXECUTION_PORT + count; + let execution_node = + LocalExecutionNode::new(self.context.clone(), mock_execution_config); + + // Pair the beacon node and execution node. + beacon_config.execution_layer = Some(execution_layer::Config { + execution_endpoint: Some( + SensitiveUrl::parse(&execution_node.server.url()).unwrap(), + ), + default_datadir: execution_node.datadir.path().to_path_buf(), + secret_file: Some(execution_node.datadir.path().join("jwt.hex")), + ..Default::default() + }); + Some(execution_node) + } else { + beacon_config.execution_layer = None; + None + }; - // Construct execution node. - let execution_node = LocalExecutionNode::new(self.context.clone(), mock_execution_config); + let proof_node = if node_type.requires_proof_node() { + let mut config = MockProofEngineConfig::default(); + config.server_config.listen_port = PROOF_PORT + self.proof_engine_count() as u16; + let proof_engine = LocalProofEngine::new(self.context.clone(), config).await; + if let Some(exeuction_layer) = beacon_config.execution_layer.as_mut() { + exeuction_layer.proof_engine_endpoint = Some(proof_engine.server.url().clone()); + } else { + beacon_config.execution_layer = Some(execution_layer::Config { + proof_engine_endpoint: Some(proof_engine.server.url().clone()), + ..Default::default() + }); + } + // Subscribe to the execution_proof gossip topic for nodes with a proof engine. + beacon_config.network.enable_execution_proof = true; + Some(proof_engine) + } else { + None + }; - // Pair the beacon node and execution node. - beacon_config.execution_layer = Some(execution_layer::Config { - execution_endpoint: Some(SensitiveUrl::parse(&execution_node.server.url()).unwrap()), - default_datadir: execution_node.datadir.path().to_path_buf(), - secret_file: Some(execution_node.datadir.path().join("jwt.hex")), - ..Default::default() - }); + if node_type.is_proof_verifier() { + beacon_config.chain.optimistic_finalized_sync = true; + beacon_config.network.boot_nodes_enr.push(self.proof_generator_enr().ok_or_else(|| { + "Proof verifier node requires a proof generator node to connect to, but no proof generator node found in the network".to_string() + })?); + } // Construct beacon node using the config, let beacon_node = LocalBeaconNode::production(self.context.clone(), beacon_config).await?; - Ok((beacon_node, execution_node)) + Ok((beacon_node, execution_node, proof_node)) + } + + pub fn boot_node_enr(&self) -> Option { + self.beacon_nodes + .read() + .first() + .and_then(|bn| bn.client.enr()) + } + + pub fn proof_generator_enr(&self) -> Option { + self.beacon_nodes + .read() + .last() + .and_then(|bn| bn.client.enr()) } /// Adds a beacon node to the network, connecting to the 0'th beacon node via ENR. @@ -266,40 +416,34 @@ impl LocalNetwork { &self, mut beacon_config: ClientConfig, mock_execution_config: MockExecutionConfig, - is_proposer: bool, + node_type: NodeType, ) -> Result<(), String> { - let first_bn_exists: bool; - { - let read_lock = self.beacon_nodes.read(); - let boot_node = read_lock.first(); - first_bn_exists = boot_node.is_some(); - - if let Some(boot_node) = boot_node { - // Modify beacon_config to add boot node details. - beacon_config.network.boot_nodes_enr.push( - boot_node - .client - .enr() - .expect("Bootnode must have a network."), - ); - } - } - let (beacon_node, execution_node) = if first_bn_exists { - // Network already exists. We construct a new node. - self.construct_beacon_node(beacon_config, mock_execution_config, is_proposer) - .await? - } else { - // Network does not exist. We construct a boot node. - self.construct_boot_node(beacon_config, mock_execution_config) - .await? - }; + let (beacon_node, execution_node, proof_node) = + if let Some(boot_node) = self.boot_node_enr() { + // Network already exists. We construct a new node. + beacon_config.network.boot_nodes_enr.push(boot_node); + self.construct_beacon_node(beacon_config, mock_execution_config, node_type) + .await? + } else { + // Network does not exist. We construct a boot node. + let (bn, en) = self + .construct_boot_node(beacon_config, mock_execution_config) + .await?; + (bn, Some(en), None) + }; + // Add nodes to the network. - self.execution_nodes.write().push(execution_node); - if is_proposer { - self.proposer_nodes.write().push(beacon_node); - } else { - self.beacon_nodes.write().push(beacon_node); + if let Some(execution_node) = execution_node { + self.execution_nodes.write().push(execution_node); + } + if let Some(proof_node) = proof_node { + self.proof_engines.write().push(proof_node); + } + match node_type { + NodeType::Proposer => self.proposer_nodes.write().push(beacon_node), + _ => self.beacon_nodes.write().push(beacon_node), } + Ok(()) } @@ -315,7 +459,7 @@ impl LocalNetwork { ) -> Result<(), String> { epoch_delay(Epoch::new(wait_until_epoch), slot_duration, slots_per_epoch).await; - self.add_beacon_node(beacon_config, mock_execution_config, false) + self.add_beacon_node(beacon_config, mock_execution_config, NodeType::Default) .await?; Ok(()) @@ -328,9 +472,9 @@ impl LocalNetwork { mut validator_config: ValidatorConfig, beacon_node: usize, validator_files: ValidatorFiles, + node_type: NodeType, ) -> Result<(), String> { let context = self.context.clone(); - let self_1 = self.clone(); let socket_addr = { let read_lock = self.beacon_nodes.read(); let beacon_node = read_lock @@ -358,6 +502,30 @@ impl LocalNetwork { .unwrap(); validator_config.beacon_nodes = vec![beacon_node]; + // If this is a proof generator node, we will set the proof engine endpoint to the first proof engine in the network. + if node_type.is_proof_generator() { + let proof_engine_url = self + .proof_engines + .read() + .first() + .map(|proof_engine| proof_engine.server.url()) + // use expect here to fail fast if the network has been instantiated incorrectly + // even though we wrap in Some(..) again in the line below. + .expect("Proof generator node must exist if validator is a proof generator"); + validator_config.proof_engine_endpoint = Some(proof_engine_url); + let token_path = tempdir().unwrap().path().join(PK_FILENAME); + validator_config.http_api = ValidatorHttpConfig { + enabled: true, + listen_addr: Ipv4Addr::LOCALHOST.into(), + listen_port: 0, // use random port + allow_origin: None, + allow_keystore_export: true, + store_passwords_in_secrets_dir: false, + http_token_path: token_path, + bn_long_timeouts: false, + }; + }; + // If we have a proposer node established, use it. if let Some(proposer_socket_addr) = proposer_socket_addr { let url = SensitiveUrl::parse( @@ -378,7 +546,20 @@ impl LocalNetwork { validator_files, ) .await?; - self_1.validator_clients.write().push(validator_client); + + // Set the callback url on the proof engine if this is a proof generator node. + if node_type.is_proof_generator() { + let validator_http_client = validator_client + .http_client()? + .expect("HTTP client should be available for proof generator node"); + self.proof_engines + .write() + .first_mut() + .unwrap() + .set_validator_client(validator_http_client); + } + + self.validator_clients.write().push(validator_client); Ok(()) } @@ -389,7 +570,6 @@ impl LocalNetwork { validator_files: ValidatorFiles, ) -> Result<(), String> { let context = self.context.clone(); - let self_1 = self.clone(); let mut beacon_node_urls = vec![]; for beacon_node in beacon_nodes { let socket_addr = { @@ -417,7 +597,7 @@ impl LocalNetwork { validator_files, ) .await?; - self_1.validator_clients.write().push(validator_client); + self.validator_clients.write().push(validator_client); Ok(()) } diff --git a/testing/simulator/src/main.rs b/testing/simulator/src/main.rs index 7bd6e546f75..075040cbd20 100644 --- a/testing/simulator/src/main.rs +++ b/testing/simulator/src/main.rs @@ -1,27 +1,7 @@ -//! This crate provides various simulations that create both beacon nodes and validator clients, -//! each with `v` validators. -//! -//! When a simulation runs, there are checks made to ensure that all components are operating -//! as expected. If any of these checks fail, the simulation will exit immediately. -//! -//! ## Future works -//! -//! Presently all the beacon nodes and validator clients all log to stdout. Additionally, the -//! simulation uses `println` to communicate some info. It might be nice if the nodes logged to -//! easy-to-find files and stdout only contained info from the simulation. -//! -mod basic_sim; -mod checks; -mod cli; -mod fallback_sim; -mod local_network; -mod retry; +use simulator::{basic_sim, fallback_sim}; +pub mod cli; use cli::cli_app; -use local_network::LocalNetwork; -use types::MinimalEthSpec; - -pub type E = MinimalEthSpec; fn main() { let matches = cli_app().get_matches(); diff --git a/testing/simulator/src/test_utils/builder.rs b/testing/simulator/src/test_utils/builder.rs new file mode 100644 index 00000000000..ffa2f27ef63 --- /dev/null +++ b/testing/simulator/src/test_utils/builder.rs @@ -0,0 +1,371 @@ +use crate::local_network::NodeType; + +use super::*; + +/// Builder for creating test networks with configurable parameters. +pub struct TestNetworkFixtureBuilder { + env: EnvironmentBuilder, + network_params: LocalNetworkParams, + logger_config: LoggerConfig, + disable_stdout: bool, +} + +impl Default for TestNetworkFixtureBuilder { + fn default() -> Self { + Self { + env: EnvironmentBuilder::minimal(), + network_params: LocalNetworkParams { + validator_count: 4, + node_count: 2, + proposer_nodes: 0, + proof_generator_nodes: 0, + proof_verifier_nodes: 0, + extra_nodes: 0, + genesis_delay: 38, + }, + logger_config: LoggerConfig::default(), + disable_stdout: false, + } + } +} + +impl TestNetworkFixtureBuilder { + /// Set the `EnvironmentBuilder` to use for the network. + pub fn with_env(mut self, env: EnvironmentBuilder) -> Self { + self.env = env; + self + } + + /// Apply an arbitrary modification to the `EnvironmentBuilder` used for the network. + pub fn map_env(mut self, f: impl FnOnce(&mut EnvironmentBuilder)) -> Self { + f(&mut self.env); + self + } + + /// Apply an arbitrary modification to the `ChainSpec` used for the network. + pub fn map_spec(mut self, f: impl FnOnce(&mut ChainSpec)) -> Self { + self.env = self.env.map_spec(f); + self + } + + /// Set the log level. + pub fn with_log_level(mut self, level: LevelFilter) -> Self { + self.logger_config.debug_level = level; + self.logger_config.logfile_debug_level = level; + self + } + + /// Set the log directory. + pub fn with_log_dir(mut self, log_dir: PathBuf) -> Self { + self.logger_config.path = Some(log_dir); + self + } + + /// Apply an arbitrary modification to the `LoggerConfig` used for the network. + pub fn map_logger_config(mut self, f: impl FnOnce(&mut LoggerConfig)) -> Self { + f(&mut self.logger_config); + self + } + + /// Set the network params. + pub fn with_network_params(mut self, network_params: LocalNetworkParams) -> Self { + self.network_params = network_params; + self + } + + /// Apply an arbitrary modification to the `LocalNetworkParams` used for the network. + pub fn map_network_params(mut self, f: impl FnOnce(&mut LocalNetworkParams)) -> Self { + f(&mut self.network_params); + self + } + + /// Build the test network fixture with the specified configuration. + pub async fn build(self) -> anyhow::Result> { + info!(target: "simulator", "Building test network fixture"); + + // initialize the network + let (env, network_params, network, beacon_config, mock_execution_config) = + self.init_network().await?; + + // Initialize beacon nodes + Self::init_beacon_nodes( + &network, + &network_params, + &beacon_config, + &mock_execution_config, + ) + .await?; + + // Initialize validator clients + Self::init_validators(&network, &network_params).await?; + + Ok(TestNetworkFixture { + env, + network, + config: TestConfig { + client: beacon_config, + execution: mock_execution_config, + }, + }) + } + + async fn init_validators( + network: &LocalNetwork, + network_params: &LocalNetworkParams, + ) -> anyhow::Result<()> { + info!(target: "simulator", "Building validator clients for {} validators", network_params.validator_count); + let network_params = network_params.clone(); + let task_executor = network.executor(); + + // Generate validator keystores in parallel to speed up setup time + let validator_files = task_executor + .spawn_blocking_handle( + move || -> anyhow::Result> { + let num_beacon_nodes = + network_params.node_count + network_params.proof_generator_nodes; + let validators_per_node = network_params.validator_count / num_beacon_nodes; + + (0..num_beacon_nodes) + .into_par_iter() + .map(|i| -> anyhow::Result { + info!(target: "simulator", + "Generating keystores for validator {} of {}", + i + 1, + num_beacon_nodes + ); + + let indices = (i * validators_per_node..(i + 1) * validators_per_node) + .collect::>(); + + ValidatorFiles::with_keystores(&indices).map_err(anyhow::Error::msg) + }) + .collect::>>() + }, + "validator_keystore_generation", + ) + .ok_or_else(|| anyhow::anyhow!("Failed to spawn blocking task"))? + .await??; + + for (i, files) in validator_files.into_iter().enumerate() { + let network = network.clone(); + let network_params = network_params.clone(); + + task_executor.spawn( + async move { + let mut validator_config = testing_validator_config(); + validator_config.validator_store.fee_recipient = + Some(Into::
::into(SUGGESTED_FEE_RECIPIENT)); + + // Enable broadcast on every 2nd node. + // TODO: do we need this? + if i % 4 == 0 { + validator_config.broadcast_topics = ApiTopic::all(); + let beacon_nodes = vec![i, (i + 1) % network_params.node_count]; + network + .add_validator_client_with_fallbacks( + validator_config, + beacon_nodes, + files, + ) + .await + } else { + let node_type = network_params.node_type(i); + network + .add_validator_client(validator_config, i, files, node_type) + .await + } + .expect("should add validator"); + }, + "validator_client_setup", + ) + } + + Ok(()) + } + + async fn init_beacon_nodes( + network: &LocalNetwork, + network_params: &LocalNetworkParams, + beacon_config: &ClientConfig, + mock_execution_config: &MockExecutionConfig, + ) -> anyhow::Result<()> { + // Add nodes to the network + info!(target: "simulator", "Adding {} beacon nodes to the network", network_params.node_count); + for _idx in 0..network_params.node_count { + let net = network.clone(); + let config = beacon_config.clone(); + let mock_config = mock_execution_config.clone(); + network + .executor() + .spawn_handle( + async move { + net.add_beacon_node(config.clone(), mock_config.clone(), NodeType::Default) + .await + .map_err(anyhow::Error::msg) + .expect("should add beacon node"); + }, + "beacon_node_setup", + ) + .expect("Failed to spawn blocking task") + .await?; + } + + info!(target: "simulator", "Adding {} proposer beacon nodes to the network", network_params.proposer_nodes); + for _idx in 0..network_params.proposer_nodes { + let net = network.clone(); + let config = beacon_config.clone(); + let mock_config = mock_execution_config.clone(); + network + .executor() + .spawn_handle( + async move { + net.add_beacon_node( + config.clone(), + mock_config.clone(), + NodeType::Proposer, + ) + .await + .map_err(anyhow::Error::msg) + .expect("should add beacon node"); + }, + "proposer_beacon_node_setup", + ) + .expect("Failed to spawn blocking task") + .await?; + } + + info!(target: "simulator", "Adding {} proof generator beacon nodes to the network", network_params.proof_generator_nodes); + for _idx in 0..network_params.proof_generator_nodes { + let net = network.clone(); + let config = beacon_config.clone(); + let mock_config = mock_execution_config.clone(); + network + .executor() + .spawn_handle( + async move { + net.add_beacon_node( + config.clone(), + mock_config.clone(), + NodeType::ProofGenerator, + ) + .await + .map_err(anyhow::Error::msg) + .expect("should add beacon node"); + }, + "proof_generator_beacon_node_setup", + ) + .expect("Failed to spawn blocking task") + .await?; + } + + info!(target: "simulator", "Adding {} proof verifier beacon nodes to the network", network_params.proof_verifier_nodes); + for _idx in 0..network_params.proof_verifier_nodes { + let net = network.clone(); + let config = beacon_config.clone(); + let mock_config = mock_execution_config.clone(); + network + .executor() + .spawn_handle( + async move { + net.add_beacon_node( + config.clone(), + mock_config.clone(), + NodeType::ProofVerifier, + ) + .await + .map_err(anyhow::Error::msg) + .expect("should add beacon node"); + }, + "proof_verifier_beacon_node_setup", + ) + .expect("Failed to spawn blocking task") + .await?; + } + + Ok(()) + } + + /// Initialize the network environment and create the local network instance. + async fn init_network( + self, + ) -> anyhow::Result<( + TestEnvironment, + LocalNetworkParams, + LocalNetwork, + ClientConfig, + MockExecutionConfig, + )> { + info!(target: "simulator", "Initializing test network environment and local network"); + let Self { + mut env, + network_params, + logger_config, + disable_stdout, + } = self; + + // Ensure the `ChainSpec` is configured with the correct genesis parameters based on the network params. + env = env.map_spec(|spec| { + spec.genesis_delay = network_params.genesis_delay; + spec.min_genesis_active_validator_count = network_params.validator_count as u64; + }); + + // Initialize logging + info!(target: "simulator", "Initializing logging with config: {:?}", logger_config); + + let file_mode = if logger_config.is_restricted { + 0o600 + } else { + 0o644 + }; + let (env, stdout_logging_layer, file_logging_layer, _see_logging_layer) = + env.init_tracing(logger_config.clone(), "lighthouse", file_mode); + + //TODO: optionally add discv5 logging layer for network tests + // Instantiate logging layers + let filters = build_workspace_filter().expect("should build workspace filter"); + let mut layers = vec![]; + + if let Some(layer) = (!disable_stdout).then(|| { + stdout_logging_layer + .with_filter(logger_config.debug_level) + .with_filter(filters.clone()) + .boxed() + }) { + layers.push(layer); + } + if let Some(file_logging_layer) = file_logging_layer { + layers.push( + file_logging_layer + .with_filter(logger_config.logfile_debug_level) + .with_filter(filters.clone()) + .boxed(), + ); + } + + // Initialize the subscriber with the configured layers + tracing_subscriber::registry().with(layers).try_init()?; + + // Instantiate the environment + let env = env.build_test_environment().map_err(anyhow::Error::msg)?; + + // Instantiate the local network + info!(target: "simulator", "Initializing local network with params: {:?}", network_params); + let (network, beacon_config, mock_execution_config) = + Box::pin(LocalNetwork::create_local_network( + None, + None, + network_params.clone(), + env.core_context(), + )) + .await + .map_err(anyhow::Error::msg)?; + + Ok(( + env, + network_params, + network, + beacon_config, + mock_execution_config, + )) + } +} diff --git a/testing/simulator/src/test_utils/mod.rs b/testing/simulator/src/test_utils/mod.rs new file mode 100644 index 00000000000..d674a61386f --- /dev/null +++ b/testing/simulator/src/test_utils/mod.rs @@ -0,0 +1,94 @@ +//! Test network builder for creating local beacon node networks. +//! +//! Provides a builder pattern for setting up test networks with beacon nodes, +//! validator clients, and execution nodes. Used by simulator tests like +//! `basic_sim` and `proof_service_sim`. + +pub use crate::basic_sim::SUGGESTED_FEE_RECIPIENT; +pub use crate::local_network::{LocalNetwork, LocalNetworkParams, NodeType}; +pub use environment::LoggerConfig; +pub use environment::test_utils::TestEnvironment; +pub use logging::build_workspace_filter; +pub use node_test_rig::ApiTopic; +pub use node_test_rig::{ + ClientConfig, MockExecutionConfig, ValidatorFiles, environment::EnvironmentBuilder, + testing_validator_config, +}; +use rayon::iter::{IntoParallelIterator, ParallelIterator}; +use std::path::PathBuf; +pub use tracing::{info, level_filters::LevelFilter}; +use tracing_subscriber::{Layer, layer::SubscriberExt, util::SubscriberInitExt}; +pub use types::{Address, ChainSpec, Epoch, EthSpec, MinimalEthSpec}; + +mod builder; +pub use builder::TestNetworkFixtureBuilder; + +pub struct TestNetworkFixture { + pub env: TestEnvironment, + pub network: LocalNetwork, + pub config: TestConfig, +} + +pub struct TestConfig { + pub client: ClientConfig, + pub execution: MockExecutionConfig, +} + +impl TestNetworkFixture { + pub fn builder() -> TestNetworkFixtureBuilder { + TestNetworkFixtureBuilder::default() + } + + /// Mark all payloads as valid on execution nodes. + pub fn payloads_valid(&mut self) { + self.network + .execution_nodes + .write() + .iter() + .for_each(|node| { + node.server.all_payloads_valid(); + }); + } + + /// Wait for the network to reach genesis by sleeping until the genesis time. + pub async fn wait_for_genesis(&self) -> anyhow::Result<()> { + let duration_to_genesis = self + .network + .duration_to_genesis() + .await + .map_err(anyhow::Error::msg)?; + tokio::time::sleep(duration_to_genesis).await; + Ok(()) + } +} + +// Ignore this for now because it conflicts with the `proof_engine` testing crate. +// We should migrate to defaulting to unused ports assigned by the OS instead of hardcoding ports. +#[tokio::test] +#[ignore] +async fn test_network_fixture_build() -> anyhow::Result<()> { + let mut fixture = TestNetworkFixtureBuilder::default() + .map_network_params(|params| { + params.genesis_delay = 20; + }) + .map_spec(|spec| { + spec.seconds_per_slot = 1; + spec.slot_duration_ms = 1000; + spec.min_genesis_time = 0; + spec.altair_fork_epoch = Some(Epoch::new(0)); + spec.bellatrix_fork_epoch = Some(Epoch::new(0)); + spec.capella_fork_epoch = Some(Epoch::new(0)); + spec.deneb_fork_epoch = Some(Epoch::new(0)); + spec.electra_fork_epoch = Some(Epoch::new(0)); + spec.fulu_fork_epoch = Some(Epoch::new(2)); + }) + .build() + .await?; + fixture.payloads_valid(); + + fixture.wait_for_genesis().await?; + + tokio::time::sleep(std::time::Duration::from_secs(60)).await; + + Ok(()) +} diff --git a/validator_client/Cargo.toml b/validator_client/Cargo.toml index 6990a2f61a7..65f5fb608c0 100644 --- a/validator_client/Cargo.toml +++ b/validator_client/Cargo.toml @@ -17,7 +17,8 @@ directory = { workspace = true } dirs = { workspace = true } doppelganger_service = { workspace = true } environment = { workspace = true } -eth2 = { workspace = true } +eth2 = { workspace = true, features = ["events"] } +execution_layer = { workspace = true } fdlimit = "0.3.0" graffiti_file = { workspace = true } hyper = { workspace = true } diff --git a/validator_client/http_api/src/lib.rs b/validator_client/http_api/src/lib.rs index a35b4ec6c6d..2bf910cd1b8 100644 --- a/validator_client/http_api/src/lib.rs +++ b/validator_client/http_api/src/lib.rs @@ -32,7 +32,7 @@ use eth2::lighthouse_vc::{ std_types::{AuthResponse, GetFeeRecipientResponse, GetGasLimitResponse}, types::{ self as api_types, GenericResponse, GetGraffitiResponse, Graffiti, SetGraffitiRequest, - UpdateCandidatesRequest, UpdateCandidatesResponse, + SignExecutionProofRequest, UpdateCandidatesRequest, UpdateCandidatesResponse, }, }; use health_metrics::observe::Observe; @@ -56,6 +56,7 @@ use tracing::{info, warn}; use types::{ChainSpec, ConfigAndPreset, EthSpec}; use validator_dir::Builder as ValidatorDirBuilder; use validator_services::block_service::BlockService; +use validator_services::proof_service::ProofService; use warp::{Filter, reply::Response, sse::Event}; use warp_utils::reject::convert_rejection; use warp_utils::task::blocking_json_task; @@ -81,7 +82,7 @@ impl From for Error { /// A wrapper around all the items required to spawn the HTTP server. /// /// The server will gracefully handle the case where any fields are `None`. -pub struct Context { +pub struct Context { pub task_executor: TaskExecutor, pub api_secret: ApiSecret, pub block_service: Option, T>>, @@ -94,6 +95,7 @@ pub struct Context { pub config: Config, pub sse_logging_components: Option, pub slot_clock: T, + pub proof_service: Option, T>>>, } /// Configuration for the HTTP server. @@ -249,6 +251,19 @@ pub fn serve( let inner_spec = ctx.spec.clone(); let spec_filter = warp::any().map(move || inner_spec.clone()); + let inner_proof_service = ctx.proof_service.clone(); + let proof_service_filter = warp::any() + .map(move || inner_proof_service.clone()) + .and_then( + |service: Option, T>>>| async move { + service.ok_or_else(|| { + warp_utils::reject::custom_not_found( + "proof service is not initialized.".to_string(), + ) + }) + }, + ); + let api_token_path_inner = api_token_path.clone(); let api_token_path_filter = warp::any().map(move || api_token_path_inner.clone()); @@ -513,7 +528,7 @@ pub fn serve( .and(validator_dir_filter.clone()) .and(secrets_dir_filter.clone()) .and(validator_store_filter.clone()) - .and(spec_filter) + .and(spec_filter.clone()) .and(task_executor_filter.clone()) .then( move |body: api_types::CreateValidatorsMnemonicRequest, @@ -1130,7 +1145,7 @@ pub fn serve( .and(warp::query::()) .and(warp::path::end()) .and(validator_store_filter.clone()) - .and(slot_clock_filter) + .and(slot_clock_filter.clone()) .and(task_executor_filter.clone()) .then( |pubkey: PublicKey, @@ -1157,6 +1172,38 @@ pub fn serve( }, ); + let post_execution_proofs = warp::path("lighthouse") + .and(warp::path("validators")) + .and(warp::path::param::()) + .and(warp::path("execution_proofs")) + .and(warp::path::end()) + .and(warp::body::json()) + .and(proof_service_filter.clone()) + .and(task_executor_filter.clone()) + .then( + |pubkey: PublicKey, + request: SignExecutionProofRequest, + proof_service: Arc, T>>, + task_executor: TaskExecutor| { + blocking_json_task(move || { + if let Some(handle) = task_executor.handle() { + handle + .block_on(proof_service.handle_proof_request( + pubkey, + request.execution_proof, + request.epoch, + )) + .map_err(warp_utils::reject::custom_server_error) + } else { + Err(warp_utils::reject::custom_server_error( + "Lighthouse shutting down".into(), + )) + } + }) + }, + ) + .map(|reply| warp::reply::with_status(reply, warp::http::StatusCode::ACCEPTED)); + // GET /eth/v1/validator/{pubkey}/graffiti let get_graffiti = eth_v1 .and(warp::path("validator")) @@ -1377,6 +1424,7 @@ pub fn serve( .or(post_std_remotekeys) .or(post_graffiti) .or(post_lighthouse_beacon_update) + .or(post_execution_proofs) .recover(warp_utils::reject::handle_rejection), )) .or(warp::patch() diff --git a/validator_client/http_api/src/test_utils.rs b/validator_client/http_api/src/test_utils.rs index f83d9f4d526..42e6b00ca90 100644 --- a/validator_client/http_api/src/test_utils.rs +++ b/validator_client/http_api/src/test_utils.rs @@ -142,6 +142,7 @@ impl ApiTester { config: http_config, sse_logging_components: None, slot_clock, + proof_service: None, }); let ctx = context; let (shutdown_tx, shutdown_rx) = oneshot::channel(); diff --git a/validator_client/http_api/src/tests.rs b/validator_client/http_api/src/tests.rs index 5cb631983cc..404fc6c0f81 100644 --- a/validator_client/http_api/src/tests.rs +++ b/validator_client/http_api/src/tests.rs @@ -132,6 +132,7 @@ impl ApiTester { }, sse_logging_components: None, slot_clock: slot_clock.clone(), + proof_service: None, }); let ctx = context.clone(); let (listening_socket, server) = diff --git a/validator_client/lighthouse_validator_store/src/lib.rs b/validator_client/lighthouse_validator_store/src/lib.rs index 3bea21a05d8..ebd68a9747a 100644 --- a/validator_client/lighthouse_validator_store/src/lib.rs +++ b/validator_client/lighthouse_validator_store/src/lib.rs @@ -1,5 +1,5 @@ use account_utils::validator_definitions::{PasswordStorage, ValidatorDefinition}; -use bls::{PublicKeyBytes, Signature}; +use bls::{PublicKeyBytes, Signature, SignatureBytes}; use doppelganger_service::DoppelgangerService; use eth2::types::PublishBlockRequest; use initialized_validators::InitializedValidators; @@ -19,12 +19,12 @@ use task_executor::TaskExecutor; use tracing::{error, info, instrument, warn}; use types::{ AbstractExecPayload, Address, AggregateAndProof, Attestation, BeaconBlock, BlindedPayload, - ChainSpec, ContributionAndProof, Domain, Epoch, EthSpec, Fork, Graffiti, Hash256, - SelectionProof, SignedAggregateAndProof, SignedBeaconBlock, SignedContributionAndProof, - SignedRoot, SignedValidatorRegistrationData, SignedVoluntaryExit, Slot, - SyncAggregatorSelectionData, SyncCommitteeContribution, SyncCommitteeMessage, - SyncSelectionProof, SyncSubnetId, ValidatorRegistrationData, VoluntaryExit, - graffiti::GraffitiString, + ChainSpec, ContributionAndProof, Domain, Epoch, EthSpec, ExecutionProof, Fork, Graffiti, + Hash256, SelectionProof, SignedAggregateAndProof, SignedBeaconBlock, + SignedContributionAndProof, SignedExecutionProof, SignedRoot, SignedValidatorRegistrationData, + SignedVoluntaryExit, Slot, SyncAggregatorSelectionData, SyncCommitteeContribution, + SyncCommitteeMessage, SyncSelectionProof, SyncSubnetId, ValidatorRegistrationData, + VoluntaryExit, graffiti::GraffitiString, }; use validator_store::{ DoppelgangerStatus, Error as ValidatorStoreError, ProposalData, SignedBlock, UnsignedBlock, @@ -1055,6 +1055,43 @@ impl ValidatorStore for LighthouseValidatorS Ok(SignedContributionAndProof { message, signature }) } + async fn sign_execution_proof( + &self, + validator_pubkey: PublicKeyBytes, + execution_proof: ExecutionProof, + signing_epoch: Epoch, + ) -> Result { + let signing_context = self.signing_context(Domain::ExecutionProof, signing_epoch); + let signing_method = self.doppelganger_bypassed_signing_method(validator_pubkey)?; + + let signature = signing_method + .get_signature::>( + SignableMessage::ExecutionProof(&execution_proof), + signing_context, + &self.spec, + &self.task_executor, + ) + .await?; + + let validator_index = self + .validator_index(&validator_pubkey) + .ok_or(Error::UnknownPubkey(validator_pubkey))?; + + let signature_bytes = SignatureBytes::deserialize(&signature.serialize()) + .map_err(|_| Error::Middleware("Failed to serialize signature".to_string()))?; + + validator_metrics::inc_counter_vec( + &validator_metrics::SIGNED_EXECUTION_PROOFS_TOTAL, + &[validator_metrics::SUCCESS], + ); + + Ok(SignedExecutionProof { + message: execution_proof, + validator_index, + signature: signature_bytes, + }) + } + /// Prune the slashing protection database so that it remains performant. /// /// This function will only do actual pruning periodically, so it should usually be diff --git a/validator_client/signing_method/src/lib.rs b/validator_client/signing_method/src/lib.rs index d0d98689526..4a19722263f 100644 --- a/validator_client/signing_method/src/lib.rs +++ b/validator_client/signing_method/src/lib.rs @@ -49,6 +49,8 @@ pub enum SignableMessage<'a, E: EthSpec, Payload: AbstractExecPayload = FullP SignedContributionAndProof(&'a ContributionAndProof), ValidatorRegistration(&'a ValidatorRegistrationData), VoluntaryExit(&'a VoluntaryExit), + /// EIP-8025: Execution proof for optional verification + ExecutionProof(&'a ExecutionProof), } impl> SignableMessage<'_, E, Payload> { @@ -70,6 +72,7 @@ impl> SignableMessage<'_, E, Payload SignableMessage::SignedContributionAndProof(c) => c.signing_root(domain), SignableMessage::ValidatorRegistration(v) => v.signing_root(domain), SignableMessage::VoluntaryExit(exit) => exit.signing_root(domain), + SignableMessage::ExecutionProof(proof) => proof.signing_root(domain), } } } @@ -231,6 +234,7 @@ impl SigningMethod { Web3SignerObject::ValidatorRegistration(v) } SignableMessage::VoluntaryExit(e) => Web3SignerObject::VoluntaryExit(e), + SignableMessage::ExecutionProof(p) => Web3SignerObject::ExecutionProof(p), }; // Determine the Web3Signer message type. diff --git a/validator_client/signing_method/src/web3signer.rs b/validator_client/signing_method/src/web3signer.rs index 246d9e9e091..0a3823e4c5d 100644 --- a/validator_client/signing_method/src/web3signer.rs +++ b/validator_client/signing_method/src/web3signer.rs @@ -19,6 +19,8 @@ pub enum MessageType { SyncCommitteeSelectionProof, SyncCommitteeContributionAndProof, ValidatorRegistration, + /// EIP-8025: Execution proof signing + ExecutionProof, } #[derive(Debug, PartialEq, Copy, Clone, Serialize)] @@ -75,6 +77,8 @@ pub enum Web3SignerObject<'a, E: EthSpec, Payload: AbstractExecPayload> { SyncAggregatorSelectionData(&'a SyncAggregatorSelectionData), ContributionAndProof(&'a ContributionAndProof), ValidatorRegistration(&'a ValidatorRegistrationData), + /// EIP-8025: Execution proof for optional verification + ExecutionProof(&'a ExecutionProof), } impl<'a, E: EthSpec, Payload: AbstractExecPayload> Web3SignerObject<'a, E, Payload> { @@ -140,6 +144,7 @@ impl<'a, E: EthSpec, Payload: AbstractExecPayload> Web3SignerObject<'a, E, Pa MessageType::SyncCommitteeContributionAndProof } Web3SignerObject::ValidatorRegistration(_) => MessageType::ValidatorRegistration, + Web3SignerObject::ExecutionProof(_) => MessageType::ExecutionProof, } } } diff --git a/validator_client/src/cli.rs b/validator_client/src/cli.rs index 3e1c46097f0..ffc9ce30940 100644 --- a/validator_client/src/cli.rs +++ b/validator_client/src/cli.rs @@ -508,4 +508,25 @@ pub struct ValidatorClient { display_order = 0 )] pub web3_signer_max_idle_connections: Option, + + #[clap( + long, + value_name = "HTTP-JSON-RPC-URL", + help = "URL of the proof engine HTTP JSON-RPC endpoint for EIP-8025 execution proofs. \ + When set, the validator client will proactively monitor for new blocks and \ + request execution proofs from this endpoint.", + display_order = 0 + )] + pub proof_engine_endpoint: Option, + + #[clap( + long, + value_name = "TYPES", + value_delimiter = ',', + requires = "proof_engine_endpoint", + help = "Comma-separated list of proof type identifiers to request from the proof engine \ + (e.g., 0,1,2). If not specified, defaults to all available types.", + display_order = 0 + )] + pub proof_types: Option>, } diff --git a/validator_client/src/config.rs b/validator_client/src/config.rs index 1a286a74dc1..e868814f9d7 100644 --- a/validator_client/src/config.rs +++ b/validator_client/src/config.rs @@ -90,6 +90,10 @@ pub struct Config { #[serde(flatten)] pub initialized_validators: InitializedValidatorsConfig, pub disable_attesting: bool, + /// URL of the proof engine HTTP JSON-RPC endpoint for EIP-8025 execution proofs + pub proof_engine_endpoint: Option, + /// Proof type identifiers to request from the proof engine (e.g., 0, 1, 2) + pub proof_types: Option>, } impl Default for Config { @@ -136,6 +140,8 @@ impl Default for Config { distributed: false, initialized_validators: <_>::default(), disable_attesting: false, + proof_engine_endpoint: None, + proof_types: None, } } } @@ -281,6 +287,19 @@ impl Config { .web3_signer_max_idle_connections = Some(n); } + /* + * Proof Engine (EIP-8025) + */ + if let Some(proof_engine_endpoint) = validator_client_config.proof_engine_endpoint.as_ref() + { + config.proof_engine_endpoint = Some( + SensitiveUrl::parse(proof_engine_endpoint) + .map_err(|e| format!("Unable to parse proof engine URL: {:?}", e))?, + ); + } + + config.proof_types = validator_client_config.proof_types.clone(); + /* * Http API server */ diff --git a/validator_client/src/lib.rs b/validator_client/src/lib.rs index b3cd3425f3d..428476dcca9 100644 --- a/validator_client/src/lib.rs +++ b/validator_client/src/lib.rs @@ -36,7 +36,7 @@ use tokio::{ }; use tracing::{debug, error, info, warn}; use types::{EthSpec, Hash256}; -use validator_http_api::ApiSecret; +pub use validator_http_api::ApiSecret; use validator_services::notifier_service::spawn_notifier; use validator_services::{ attestation_service::{AttestationService, AttestationServiceBuilder}, @@ -44,6 +44,7 @@ use validator_services::{ duties_service::{self, DutiesService, DutiesServiceBuilder}, latency_service, preparation_service::{PreparationService, PreparationServiceBuilder}, + proof_service::ProofService, sync_committee_service::SyncCommitteeService, }; use validator_store::ValidatorStore as ValidatorStoreTrait; @@ -86,6 +87,7 @@ pub struct ProductionValidatorClient { http_api_listen_addr: Option, config: Config, genesis_time: u64, + proof_service: Option, SystemTimeSlotClock>>>, } impl ProductionValidatorClient { @@ -532,6 +534,24 @@ impl ProductionValidatorClient { context.executor.clone(), ); + // Create proof service (EIP-8025) if proof engine endpoint is configured + let proof_service = config.proof_engine_endpoint.as_ref().map(|endpoint| { + info!(endpoint = %endpoint, "Initializing proof engine client"); + let proof_engine_client = Arc::new(execution_layer::eip8025::HttpProofEngine::new( + endpoint.clone(), + None, // No custom timeout + )); + + Arc::new(ProofService::new( + validator_store.clone(), + beacon_nodes.clone(), + proof_engine_client, + slot_clock.clone(), + context.executor.clone(), + config.proof_types.clone(), + )) + }); + Ok(Self { context, duties_service, @@ -545,6 +565,7 @@ impl ProductionValidatorClient { slot_clock, http_api_listen_addr: None, genesis_time, + proof_service, }) } @@ -571,6 +592,7 @@ impl ProductionValidatorClient { config: self.config.http_api.clone(), sse_logging_components: self.context.sse_logging_components.clone(), slot_clock: self.slot_clock.clone(), + proof_service: self.proof_service.clone(), }); let exit = self.context.executor.exit(); @@ -627,6 +649,14 @@ impl ProductionValidatorClient { info!("Doppelganger protection disabled.") } + // Start proof service (EIP-8025) if configured + if let Some(proof_service) = &self.proof_service { + proof_service + .clone() + .start_service() + .map_err(|e| format!("Unable to start proof service: {}", e))?; + } + let context = self.context.clone(); spawn_notifier( self.duties_service.clone(), @@ -645,6 +675,16 @@ impl ProductionValidatorClient { Ok(()) } + + /// Returns the listen address of the HTTP API, if enabled. + pub fn listen_addr(&self) -> Option { + self.http_api_listen_addr + } + + /// Returns a reference to the validator client config. + pub fn config(&self) -> &Config { + &self.config + } } async fn init_from_beacon_node( diff --git a/validator_client/validator_metrics/src/lib.rs b/validator_client/validator_metrics/src/lib.rs index 060d8a4edd2..e7e286b91de 100644 --- a/validator_client/validator_metrics/src/lib.rs +++ b/validator_client/validator_metrics/src/lib.rs @@ -121,6 +121,14 @@ pub static SIGNED_VALIDATOR_REGISTRATIONS_TOTAL: LazyLock> &["status"], ) }); +/// EIP-8025: Execution proof signing metric +pub static SIGNED_EXECUTION_PROOFS_TOTAL: LazyLock> = LazyLock::new(|| { + try_create_int_counter_vec( + "vc_signed_execution_proofs_total", + "Total count of ExecutionProof signings", + &["status"], + ) +}); pub static DUTIES_SERVICE_TIMES: LazyLock> = LazyLock::new(|| { try_create_histogram_vec( "vc_duties_service_task_times_seconds", diff --git a/validator_client/validator_services/Cargo.toml b/validator_client/validator_services/Cargo.toml index c9149409148..93ef421ea85 100644 --- a/validator_client/validator_services/Cargo.toml +++ b/validator_client/validator_services/Cargo.toml @@ -8,12 +8,15 @@ authors = ["Sigma Prime "] beacon_node_fallback = { workspace = true } bls = { workspace = true } either = { workspace = true } -eth2 = { workspace = true } +eth2 = { workspace = true, features = ["events"] } +execution_layer = { workspace = true } futures = { workspace = true } graffiti_file = { workspace = true } +lighthouse_validator_store = { workspace = true } logging = { workspace = true } parking_lot = { workspace = true } safe_arith = { workspace = true } +serde_json = { workspace = true } slot_clock = { workspace = true } task_executor = { workspace = true } tokio = { workspace = true } @@ -22,3 +25,4 @@ tree_hash = { workspace = true } types = { workspace = true } validator_metrics = { workspace = true } validator_store = { workspace = true } +warp_utils = { workspace = true } diff --git a/validator_client/validator_services/src/lib.rs b/validator_client/validator_services/src/lib.rs index 3b8bd9ae14b..40bc1af7287 100644 --- a/validator_client/validator_services/src/lib.rs +++ b/validator_client/validator_services/src/lib.rs @@ -4,5 +4,6 @@ pub mod duties_service; pub mod latency_service; pub mod notifier_service; pub mod preparation_service; +pub mod proof_service; pub mod sync; pub mod sync_committee_service; diff --git a/validator_client/validator_services/src/proof_service.rs b/validator_client/validator_services/src/proof_service.rs new file mode 100644 index 00000000000..679df7cffe5 --- /dev/null +++ b/validator_client/validator_services/src/proof_service.rs @@ -0,0 +1,262 @@ +//! EIP-8025 Execution Proof Service +//! +//! This service handles both proactive and reactive execution proof workflows: +//! +//! 1. **Proactive Mode**: Monitors beacon chain for new blocks via SSE and requests +//! proofs from the configured proof engine +//! 2. **Reactive Mode**: Receives proof requests from HTTP API (proof engine callbacks) +//! and signs/submits them to the beacon chain +//! +//! The service bridges the gap between external proof engines, validator keys, and +//! beacon nodes, providing a complete end-to-end execution proof flow. + +use beacon_node_fallback::BeaconNodeFallback; +use bls::PublicKey; +use eth2::types::EventTopic; +use execution_layer::NewPayloadRequest; +use execution_layer::eip8025::{HttpProofEngine, ProofEngine}; +use futures::StreamExt; +use slot_clock::SlotClock; +use std::sync::Arc; +use task_executor::TaskExecutor; +use tracing::{debug, error, info, warn}; +use types::execution::eip8025::ProofAttributes; +use types::{BeaconBlock, Epoch, EthSpec, ExecutionProof}; +use validator_store::ValidatorStore; + +/// Background service for execution proof handling +pub struct ProofService { + inner: Arc>, +} + +struct Inner { + validator_store: Arc, + beacon_nodes: Arc>, + proof_engine: Arc, + slot_clock: T, + executor: TaskExecutor, + proof_types: Vec, +} + +impl ProofService { + /// Create a new proof service + pub fn new( + validator_store: Arc, + beacon_nodes: Arc>, + proof_engine: Arc, + slot_clock: T, + executor: TaskExecutor, + proof_types: Option>, + ) -> Self { + // Default to all available proof types if not specified + // TODO: Update when proof types are standardized + let proof_types = proof_types.unwrap_or_else(|| vec![0, 1, 2]); + + Self { + inner: Arc::new(Inner { + validator_store, + beacon_nodes, + proof_engine, + slot_clock, + executor, + proof_types, + }), + } + } + + /// Start the proof service background task (proactive monitoring) + pub fn start_service(self: Arc) -> Result<(), String> { + // Only start monitoring if proof engine is configured + let inner = self.inner.clone(); + let service_fut = async move { + inner.monitor_blocks_task().await; + }; + self.inner + .executor + .spawn(service_fut, "proof_service_monitor"); + + info!("Proof service started - monitoring for new blocks"); + + Ok(()) + } + + /// Public method called by HTTP API when proof engine callbacks with unsigned proof + /// + /// This is the reactive endpoint that receives proofs from the proof engine + /// and signs them with validator keys before submitting to beacon nodes. + pub async fn handle_proof_request( + &self, + pubkey: PublicKey, + execution_proof: ExecutionProof, + epoch: Option, + ) -> Result<(), String> { + self.inner + .sign_and_submit_proof(pubkey, execution_proof, epoch) + .await + } +} + +impl Inner { + /// Proactive: Monitor beacon node for new blocks and request proofs + async fn monitor_blocks_task(self: Arc) { + info!("Starting proof service block monitoring via SSE"); + + loop { + // Attempt to subscribe to block events from beacon node + match self.subscribe_to_blocks().await { + Ok(mut stream) => { + info!("Successfully subscribed to block events"); + + // Process events from the stream + while let Some(event_result) = stream.next().await { + match event_result { + Ok(eth2::types::EventKind::BlockFull(block_event)) => { + let block = block_event.data; + if block.execution_optimistic { + debug!( + slot = block.slot.as_u64(), + "Received execution optimistic block event" + ); + } + self.handle_block_event(&block.block, block.slot).await; + } + Ok(_) => { + // Ignore other event types (shouldn't happen with our topic filter) + debug!("Received non-block event in block_full stream"); + } + Err(e) => { + warn!( + error = %e, + "Error receiving block event, will reconnect" + ); + break; // Break inner loop to reconnect + } + } + } + + // Stream ended or errored - reconnect + warn!("Block event stream ended, reconnecting..."); + } + Err(e) => { + error!( + error = %e, + "Failed to subscribe to block events, retrying..." + ); + } + } + } + } + + /// Helper method to establish SSE subscription with beacon node fallback + async fn subscribe_to_blocks( + &self, + ) -> Result< + impl futures::Stream, eth2::Error>>, + String, + > { + self.beacon_nodes + .first_success( + |node| async move { node.get_events::(&[EventTopic::BlockFull]).await }, + ) + .await + .map_err(|e| format!("All beacon nodes failed to provide event stream: {}", e)) + } + + /// Handle a new block event by requesting proofs from proof engine + async fn handle_block_event(&self, block: &BeaconBlock, slot: types::Slot) { + let block_root = block.canonical_root(); + + info!( + slot = slot.as_u64(), + block = %block_root, + "New block detected, requesting proofs from proof engine" + ); + + // Construct NewPayloadRequest from beacon block + let new_payload_request = match NewPayloadRequest::try_from(block.to_ref()) { + Ok(req) => req, + Err(e) => { + error!( + error = ?e, + block = %block_root, + "Failed to construct NewPayloadRequest from block" + ); + return; + } + }; + + // Use configured proof types + let proof_attributes = ProofAttributes { + proof_types: self.proof_types.clone(), + }; + + // Request proofs from proof engine - HttpProofEngine handles JSON serialization + match self + .proof_engine + .request_proofs(new_payload_request, proof_attributes) + .await + { + Ok(proof_gen_id) => { + debug!( + proof_gen_id = ?proof_gen_id, + block = %block_root, + "Proof generation requested, awaiting callback to HTTP API" + ); + } + Err(e) => { + error!( + error = ?e, + block = %block_root, + "Failed to request proofs from proof engine" + ); + } + } + } + + /// Reactive: Sign and submit proof (called by HTTP API) + async fn sign_and_submit_proof( + &self, + pubkey: PublicKey, + execution_proof: ExecutionProof, + epoch: Option, + ) -> Result<(), String> { + // Determine epoch for signing context + let epoch = epoch.unwrap_or_else(|| { + self.slot_clock + .now() + .map(|slot| slot.epoch(S::E::slots_per_epoch())) + .unwrap_or(Epoch::new(0)) + }); + + let pubkey_bytes = pubkey.clone(); + info!( + validator = %pubkey, + %epoch, + "Signing execution proof" + ); + + // Sign the proof + let signed_proof = self + .validator_store + .sign_execution_proof(pubkey_bytes.into(), execution_proof, epoch) + .await + .map_err(|e| format!("Failed to sign execution proof: {:?}", e))?; + + // Submit to beacon node + let signed_proof_for_submission = signed_proof.clone(); + self.beacon_nodes + .first_success(move |node| { + let proof_clone = signed_proof_for_submission.clone(); + async move { node.post_beacon_execution_proofs(&[proof_clone]).await } + }) + .await + .map_err(|e| format!("Failed to submit proof to beacon node: {}", e))?; + + info!( + validator = %pubkey, + "Successfully submitted signed execution proof to beacon node" + ); + + Ok(()) + } +} diff --git a/validator_client/validator_store/src/lib.rs b/validator_client/validator_store/src/lib.rs index 2b472799d24..75cd191a19b 100644 --- a/validator_client/validator_store/src/lib.rs +++ b/validator_client/validator_store/src/lib.rs @@ -5,10 +5,11 @@ use std::fmt::Debug; use std::future::Future; use std::sync::Arc; use types::{ - Address, Attestation, AttestationError, BlindedBeaconBlock, Epoch, EthSpec, Graffiti, Hash256, - SelectionProof, SignedAggregateAndProof, SignedBlindedBeaconBlock, SignedContributionAndProof, - SignedValidatorRegistrationData, Slot, SyncCommitteeContribution, SyncCommitteeMessage, - SyncSelectionProof, SyncSubnetId, ValidatorRegistrationData, + Address, Attestation, AttestationError, BlindedBeaconBlock, Epoch, EthSpec, ExecutionProof, + Graffiti, Hash256, SelectionProof, SignedAggregateAndProof, SignedBlindedBeaconBlock, + SignedContributionAndProof, SignedExecutionProof, SignedValidatorRegistrationData, Slot, + SyncCommitteeContribution, SyncCommitteeMessage, SyncSelectionProof, SyncSubnetId, + ValidatorRegistrationData, }; #[derive(Debug, PartialEq, Clone)] @@ -160,6 +161,16 @@ pub trait ValidatorStore: Send + Sync { selection_proof: SyncSelectionProof, ) -> impl Future, Error>> + Send; + /// Signs an execution proof for EIP-8025. + /// + /// This allows validators to sign execution proofs for optional execution verification. + fn sign_execution_proof( + &self, + validator_pubkey: PublicKeyBytes, + execution_proof: ExecutionProof, + signing_epoch: Epoch, + ) -> impl Future>> + Send; + /// Prune the slashing protection database so that it remains performant. /// /// This function will only do actual pruning periodically, so it should usually be