From 44e7eb4df5d4333de0d49d358b22a25d3f725e2e Mon Sep 17 00:00:00 2001 From: Mykhailo Kremniov Date: Mon, 16 Feb 2026 20:45:03 +0200 Subject: [PATCH 01/11] Improve bootstrapping - WIP --- CHANGELOG.md | 25 +- Cargo.lock | 10 +- blockprod/src/lib.rs | 2 +- chainstate/Cargo.toml | 1 + chainstate/launcher/src/lib.rs | 4 +- chainstate/src/config.rs | 33 +- chainstate/src/detail/bootstrap.rs | 246 ++++-- chainstate/src/detail/chainstateref/mod.rs | 8 + chainstate/src/detail/mod.rs | 75 +- chainstate/src/detail/query.rs | 9 +- .../interface/chainstate_interface_impl.rs | 29 +- .../chainstate_interface_impl_delegation.rs | 5 +- chainstate/src/lib.rs | 12 +- chainstate/src/rpc/mod.rs | 6 +- chainstate/storage/Cargo.toml | 2 + .../src/internal/blockchain_storage.rs | 72 ++ chainstate/storage/src/internal/mod.rs | 27 +- .../storage/src/internal/store_tx/mod.rs | 56 +- .../src/internal/store_tx/read_impls.rs | 10 + chainstate/storage/src/lib.rs | 10 +- chainstate/storage/src/mock/mock_impl.rs | 8 +- chainstate/test-framework/Cargo.toml | 1 + .../blockchain_storage_trivial_implementor.rs | 98 +++ chainstate/test-framework/src/lib.rs | 1 + chainstate/test-framework/src/storage.rs | 8 +- chainstate/test-suite/Cargo.toml | 1 + chainstate/test-suite/src/tests/bootstrap.rs | 705 +++++++++++++++--- .../test-suite/src/tests/syncing_tests.rs | 25 +- common/src/chain/config/mod.rs | 2 +- common/src/chain/currency.rs | 2 +- crypto/Cargo.toml | 1 + crypto/src/key/secp256k1/mod.rs | 27 +- node-lib/src/config_files/chainstate/mod.rs | 14 +- node-lib/src/config_files/mod.rs | 6 +- node-lib/src/options.rs | 17 + node-lib/tests/cli.rs | 11 + p2p/src/protocol.rs | 2 +- storage/core/src/adaptor/mod.rs | 2 +- storage/core/src/lib.rs | 4 +- storage/failing/src/backend.rs | 8 + storage/failing/src/lib.rs | 2 +- storage/inmemory/src/lib.rs | 14 +- storage/lmdb/Cargo.toml | 3 +- storage/lmdb/src/lib.rs | 24 +- storage/src/database/mod.rs | 8 + storage/src/lib.rs | 3 +- 46 files changed, 1322 insertions(+), 317 deletions(-) create mode 100644 chainstate/storage/src/internal/blockchain_storage.rs create mode 100644 chainstate/test-framework/src/blockchain_storage_trivial_implementor.rs diff --git a/CHANGELOG.md b/CHANGELOG.md index b8444b739a..997c4b43b4 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -12,7 +12,7 @@ The format is loosely based on [Keep a Changelog](https://keepachangelog.com/en/ ## [Unreleased] ### Added - - Node RPC: new method added - `chainstate_tokens_info`, `chainstate_orders_info_by_currencies`. + - Node RPC: new methods added - `chainstate_tokens_info`, `chainstate_orders_info_by_currencies`. - Wallet RPC: - new methods added: `node_get_tokens_info`, `order_list_own`, `order_list_all_active`. @@ -30,6 +30,12 @@ The format is loosely based on [Keep a Changelog](https://keepachangelog.com/en/ - Now the wallet subscribes to events from the Mempool to include not yet confirmed transactions relevant to this wallet. + - Node: + - new options added to `node-daemon` and `node-gui`: + - `--enable-db-reckless-mode-in-ibd` - this enables the "reckless" mode of the chainstate + database during initial block download or bootstrapping, which significantly increases + its speed at the cost of a potential db corruption if the system crashes in the meantime. + ### Changed - Wallet RPC: - `wallet_info`: the structure of the returned field `extra_info` was changed. @@ -48,8 +54,14 @@ The format is loosely based on [Keep a Changelog](https://keepachangelog.com/en/ - Documentation-only changes: - Certain parameters that were designated as "string" are now designated as "bech32 string". + - Node: + - The now redundant option `min_max_bootstrap_import_buffer_sizes` was removed from chainstate config. + + - Node bootstrapping: + - The format of the bootstrap file was changed and the legacy format is no longer supported. + ### Fixed - - p2p: when a peer sends a message that can't be decoded, it will now be discouraged (which is what + - P2p: when a peer sends a message that can't be decoded, it will now be discouraged (which is what is normally done for misbehaving peers) and the node won't try connecting to it again.\ Also, the peer will be sent an appropriate `WillDisconnect` message prior to disconnection. @@ -66,6 +78,15 @@ The format is loosely based on [Keep a Changelog](https://keepachangelog.com/en/ - Parameters and/or returned values having the "plain" `Destination` type were incorrectly designated as "bech32 string", while in reality they are "hexified destination". + - Node bootstrapping: + - Fixed a bug where importing a bootstrap file would truncate the file to zero length + instead of actually importing it. + + - Importing a bootstrap file will no longer fail if some of the blocks already exist in the + chainstate. + + - The speed of the import was improved. + - General - Fixed a bug that could lead to indefinite stalling of the node during initial sync when there are many peers and the host machine is slow. diff --git a/Cargo.lock b/Cargo.lock index f27659134c..5c0eb6f025 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1352,6 +1352,7 @@ dependencies = [ "serialization", "static_assertions", "storage", + "strum 0.26.3", "subsystem", "test-utils", "thiserror 1.0.69", @@ -1423,7 +1424,9 @@ dependencies = [ "rstest", "serialization", "storage", + "storage-failing", "storage-inmemory", + "storage-lmdb", "test-utils", "tokens-accounting", "utils", @@ -1449,6 +1452,7 @@ dependencies = [ "randomness", "rstest", "serialization", + "storage", "storage-failing", "storage-inmemory", "strum 0.26.3", @@ -1484,6 +1488,7 @@ dependencies = [ "randomness", "rstest", "serialization", + "strum 0.26.3", "test-utils", "tokens-accounting", "tokio", @@ -2103,6 +2108,7 @@ dependencies = [ "generic-array", "hex", "hmac", + "lazy_static", "merlin", "num", "num-derive", @@ -4735,7 +4741,7 @@ checksum = "6373607a59f0be73a39b6fe456b8192fcc3585f602af20751600e974dd455e77" [[package]] name = "lmdb-mintlayer" version = "0.16.2" -source = "git+https://github.com/mintlayer/lmdb-rs-mintlayer.git?tag=v0.16.2#fb33e56a76697dd6f484c6d21f89a99ef9145e93" +source = "git+https://github.com/mintlayer/lmdb-rs-mintlayer.git?rev=4427a41fc32e8b7a30bdae42ce48414a6a5d6920#4427a41fc32e8b7a30bdae42ce48414a6a5d6920" dependencies = [ "bitflags 1.3.2", "byteorder", @@ -4748,7 +4754,7 @@ dependencies = [ [[package]] name = "lmdb-rkv-sys" version = "0.11.3" -source = "git+https://github.com/mintlayer/lmdb-rs-mintlayer.git?tag=v0.16.2#fb33e56a76697dd6f484c6d21f89a99ef9145e93" +source = "git+https://github.com/mintlayer/lmdb-rs-mintlayer.git?rev=4427a41fc32e8b7a30bdae42ce48414a6a5d6920#4427a41fc32e8b7a30bdae42ce48414a6a5d6920" dependencies = [ "cc", "libc", diff --git a/blockprod/src/lib.rs b/blockprod/src/lib.rs index 073c855fc7..6070e9525b 100644 --- a/blockprod/src/lib.rs +++ b/blockprod/src/lib.rs @@ -272,8 +272,8 @@ mod tests { enable_heavy_checks: Some(false), max_db_commit_attempts: Default::default(), + enable_db_reckless_mode_in_ibd: Default::default(), max_orphan_blocks: Default::default(), - min_max_bootstrap_import_buffer_sizes: Default::default(), allow_checkpoints_mismatch: Default::default(), }; diff --git a/chainstate/Cargo.toml b/chainstate/Cargo.toml index adc79aca10..64cfe15c40 100644 --- a/chainstate/Cargo.toml +++ b/chainstate/Cargo.toml @@ -40,6 +40,7 @@ parity-scale-codec.workspace = true serde = { workspace = true, features = ["derive"] } serde_json.workspace = true static_assertions.workspace = true +strum.workspace = true thiserror.workspace = true tokio.workspace = true tracing.workspace = true diff --git a/chainstate/launcher/src/lib.rs b/chainstate/launcher/src/lib.rs index 8efc8adaf4..bd75d859af 100644 --- a/chainstate/launcher/src/lib.rs +++ b/chainstate/launcher/src/lib.rs @@ -21,7 +21,7 @@ mod storage_compatibility; use std::sync::Arc; use chainstate::InitializationError; -use chainstate_storage::Transactional; +use chainstate_storage::{BlockchainStorageBackend, Transactional}; use storage_lmdb::resize_callback::MapResizeCallback; // Some useful reexports @@ -37,7 +37,7 @@ pub const SUBDIRECTORY_LMDB: &str = "chainstate-lmdb"; pub use storage_compatibility::check_storage_compatibility; -fn make_chainstate_and_storage_impl( +fn make_chainstate_and_storage_impl( storage_backend: B, chain_config: Arc, chainstate_config: ChainstateConfig, diff --git a/chainstate/src/config.rs b/chainstate/src/config.rs index a68e091a5d..f13f956eb1 100644 --- a/chainstate/src/config.rs +++ b/chainstate/src/config.rs @@ -18,19 +18,8 @@ use std::time::Duration; use common::chain::{config::ChainType, ChainConfig}; use utils::make_config_setting; -const DEFAULT_MIN_IMPORT_BUFFER_SIZE: usize = 1 << 22; // 4 MB -const DEFAULT_MAX_IMPORT_BUFFER_SIZE: usize = 1 << 26; // 64 MB - make_config_setting!(MaxDbCommitAttempts, usize, 10); make_config_setting!(MaxOrphanBlocks, usize, 512); -make_config_setting!( - MinMaxBootstrapImportBufferSizes, - (usize, usize), - ( - DEFAULT_MIN_IMPORT_BUFFER_SIZE, - DEFAULT_MAX_IMPORT_BUFFER_SIZE, - ) -); make_config_setting!(MaxTipAge, Duration, Duration::from_secs(60 * 60 * 24)); /// The chainstate subsystem configuration. @@ -39,13 +28,15 @@ pub struct ChainstateConfig { /// The number of maximum attempts to process a block. pub max_db_commit_attempts: MaxDbCommitAttempts, + /// Whether to use the "reckless" mode during the initial block download or bootstrapping. + /// + /// In "reckless" mode the db contents is not synced to disk on each commit, which increases + /// performance at the cost of a potential db corruption if the system crashes. + pub enable_db_reckless_mode_in_ibd: Option, + /// The maximum capacity of the orphan blocks pool. pub max_orphan_blocks: MaxOrphanBlocks, - /// When importing bootstrap file, this controls the buffer sizes (min, max) - /// (see bootstrap import function for more information) - pub min_max_bootstrap_import_buffer_sizes: MinMaxBootstrapImportBufferSizes, - /// The initial block download is finished if the difference between the current time and the /// tip time is less than this value. pub max_tip_age: MaxTipAge, @@ -74,19 +65,15 @@ impl ChainstateConfig { self } - pub fn with_bootstrap_buffer_sizes( - mut self, - min_max_bootstrap_import_buffer_sizes: (usize, usize), - ) -> Self { - self.min_max_bootstrap_import_buffer_sizes = min_max_bootstrap_import_buffer_sizes.into(); - self - } - pub fn with_heavy_checks_enabled(mut self, enable: bool) -> Self { self.enable_heavy_checks = Some(enable); self } + pub fn db_reckless_mode_in_ibd_enabled(&self) -> bool { + self.enable_db_reckless_mode_in_ibd.unwrap_or(false) + } + pub fn heavy_checks_enabled(&self, chain_config: &ChainConfig) -> bool { if let Some(enable_heavy_checks) = self.enable_heavy_checks { return enable_heavy_checks; diff --git a/chainstate/src/detail/bootstrap.rs b/chainstate/src/detail/bootstrap.rs index e6a82baeed..d9221a6ca6 100644 --- a/chainstate/src/detail/bootstrap.rs +++ b/chainstate/src/detail/bootstrap.rs @@ -15,99 +15,198 @@ use std::io::{BufRead, Write}; +use strum::IntoEnumIterator; + use chainstate_storage::BlockchainStorageRead; use chainstate_types::{BlockIndex, PropertyQueryError}; -use common::{chain::Block, primitives::id::WithId}; -use serialization::{Decode, Encode}; +use common::{ + chain::{ + config::{ChainType, MagicBytes}, + Block, ChainConfig, + }, + primitives::id::WithId, +}; +use serialization::{Decode, DecodeAll as _, Encode}; +use utils::ensure; -use crate::{BlockError, ChainstateConfig}; +use crate::BlockError; use super::{query::ChainstateQuery, tx_verification_strategy::TransactionVerificationStrategy}; +// Note: bootstrapping used to have a legacy format, where the file didn't have any header and +// blocks were written one by one, prepended by the chain magic bytes corresponding to the +// appropriate chain. This format is no longer supported, the `BootstrapFileSubHeaderV0` struct +// below refers to version 0 of the new format. + +const FILE_MAGIC_BYTES: &[u8; 8] = b"MLBTSTRP"; + +/// The bootstrap file will always start with this header (SCALE-encoded). +#[derive(Encode, Decode)] +struct BootstrapFileHeader { + /// This must be equal to FILE_MAGIC_BYTES + pub file_magic_bytes: [u8; 8], + /// Magic bytes of the chain this file belongs to. + pub chain_magic_bytes: MagicBytes, + /// This specifies the version of the file format and determines what + /// will go after the header. + pub file_format_version: u32, + /// The number of blocks in the file. + pub blocks_count: u64, +} + +const FILE_HEADER_SIZE: usize = 24; + +// In format v0, blocks go directly after the header, each block preceded by its length +// represented as a little-endian `u32`. + +type BlockSizeType = u32; + #[derive(thiserror::Error, Debug, Clone, Eq, PartialEq)] pub enum BootstrapError { - #[error("File error: {0}")] - File(String), + #[error("Block storage error: `{0}`")] + StorageError(#[from] chainstate_storage::Error), + + #[error("I/O error: {0}")] + IoError(String), + #[error("Deserialization error: {0}")] - Deserialization(#[from] serialization::Error), + DeserializationError(#[from] serialization::Error), + #[error("Block import error: {0}")] BlockProcessing(#[from] BlockError), - #[error("Block import error: {0}")] + + #[error("Property query error: {0}")] FailedToReadProperty(#[from] PropertyQueryError), + + // Note: integer conversions shouldn't happen here, so we don't bother including + // extra info in the error. + #[error(transparent)] + TryFromIntError(#[from] std::num::TryFromIntError), + + #[error("Legacy file format no longer supported")] + LegacyFileFormat, + + #[error("File too small")] + FileTooSmall, + + #[error("Wrong file format")] + WrongFileFormat, + + #[error("Bad file format")] + BadFileFormat, + + #[error("This file belongs to a different chain")] + WrongChain, + + #[error( + "This seems to be some future version of bootstrap file that is not supported by this node" + )] + UnsupportedFutureFormatVersion, } impl From for BootstrapError { fn from(error: std::io::Error) -> Self { - Self::File(error.to_string()) + Self::IoError(error.to_string()) } } pub fn import_bootstrap_stream( - expected_magic_bytes: &[u8], + chain_config: &ChainConfig, file_reader: &mut std::io::BufReader, process_block_func: &mut P, - chainstate_config: &ChainstateConfig, ) -> Result<(), BootstrapError> where - P: FnMut(WithId) -> Result, BlockError>, + P: FnMut(WithId) -> Result, BootstrapError>, { - // min: The smallest buffer size, after which another read is triggered from the bootstrap file - // max: The largest buffer size, after which reading the file is stopped - // NOTE: both sizes MUST be larger than the largest block in the blockchain + 4 bytes for magic bytes - let (min_buffer_size, max_buffer_size) = - *chainstate_config.min_max_bootstrap_import_buffer_sizes; - - // It's more reasonable to use a VeqDeque, but it's incompatible with the windows() method which is needed to search for magic bytes - // There's a performance hit behind this, but we don't care. Anyone is free to optimize this. - let mut buffer_queue = Vec::::new(); - - loop { - if buffer_queue.len() < min_buffer_size + expected_magic_bytes.len() { - fill_buffer(&mut buffer_queue, file_reader, max_buffer_size)?; - } + let mut buffer_queue = Vec::::with_capacity(1024 * 1024); - // locate magic bytes to recognize the start of a block - let current_pos = buffer_queue - .windows(expected_magic_bytes.len()) - .position(|window| window == expected_magic_bytes); - - // read the block after the magic bytes - let block = match current_pos { - Some(v) => Block::decode(&mut &buffer_queue[v + expected_magic_bytes.len()..])?, - None => break, - }; - let block_len = block.encoded_size(); - process_block_func(block.into())?; + let header = { + fill_buffer(&mut buffer_queue, file_reader, FILE_HEADER_SIZE)?; + ensure!( + buffer_queue.len() == FILE_HEADER_SIZE, + BootstrapError::FileTooSmall + ); + check_for_legacy_format(&buffer_queue)?; + + BootstrapFileHeader::decode_all(&mut buffer_queue.as_slice())? + }; + + buffer_queue.clear(); + + ensure!( + &header.file_magic_bytes == FILE_MAGIC_BYTES, + BootstrapError::WrongFileFormat + ); + ensure!( + &header.chain_magic_bytes == chain_config.magic_bytes(), + BootstrapError::WrongChain + ); + ensure!( + header.file_format_version == 0, + BootstrapError::UnsupportedFutureFormatVersion + ); - // consume the buffer from the front - buffer_queue = buffer_queue[expected_magic_bytes.len() + block_len..].to_vec(); + for _ in 0..header.blocks_count { + fill_buffer(&mut buffer_queue, file_reader, size_of::())?; + ensure!( + buffer_queue.len() == size_of::(), + BootstrapError::BadFileFormat + ); + let block_size = BlockSizeType::from_le_bytes( + buffer_queue + .as_slice() + .try_into() + .expect("Buffer is known to have the correct size"), + ) + .try_into()?; + buffer_queue.clear(); + + fill_buffer(&mut buffer_queue, file_reader, block_size)?; + ensure!( + buffer_queue.len() == block_size, + BootstrapError::BadFileFormat + ); + + let block = Block::decode_all(&mut buffer_queue.as_slice())?; + process_block_func(block.into())?; + buffer_queue.clear(); } Ok(()) } +fn check_for_legacy_format(header_bytes: &[u8]) -> Result<(), BootstrapError> { + // In the legacy format the file starts with magic bytes of the corresponding chain. + for chain_type in ChainType::iter() { + if header_bytes.starts_with(&chain_type.magic_bytes().bytes()) { + return Err(BootstrapError::LegacyFileFormat); + } + } + Ok(()) +} + fn fill_buffer( buffer_queue: &mut Vec, reader: &mut std::io::BufReader, max_buffer_size: usize, ) -> Result<(), BootstrapError> { while buffer_queue.len() < max_buffer_size { - let buf_len = { - let data = reader.fill_buf()?; - if data.is_empty() { - break; - } - buffer_queue.extend(data.iter()); - data.len() - }; - reader.consume(buf_len); + let data = reader.fill_buf()?; + if data.is_empty() { + break; + } + + let remaining_len = max_buffer_size - buffer_queue.len(); + let len_to_consume = std::cmp::min(remaining_len, data.len()); + buffer_queue.extend_from_slice(&data[..len_to_consume]); + reader.consume(len_to_consume); } Ok(()) } pub fn export_bootstrap_stream<'a, S: BlockchainStorageRead, V: TransactionVerificationStrategy>( - magic_bytes: &[u8], + chain_config: &ChainConfig, writer: &mut std::io::BufWriter>, include_stale_blocks: bool, query_interface: &ChainstateQuery<'a, S, V>, @@ -120,10 +219,53 @@ where query_interface.get_mainchain_blocks_list()? }; + let header = BootstrapFileHeader { + file_magic_bytes: *FILE_MAGIC_BYTES, + chain_magic_bytes: *chain_config.magic_bytes(), + file_format_version: 0, + blocks_count: blocks_list.len().try_into()?, + }; + + header.encode_to(writer); + for block_id in blocks_list { - writer.write_all(magic_bytes)?; - let block = query_interface.get_existing_block(&block_id)?; - writer.write_all(&block.encode())?; + let encoded_block = query_interface.get_encoded_existing_block(&block_id)?; + let block_size: BlockSizeType = encoded_block.len().try_into()?; + writer.write_all(block_size.to_le_bytes().as_slice())?; + writer.write_all(&encoded_block)?; } + Ok(()) } + +#[cfg(test)] +mod tests { + use rstest::rstest; + + use randomness::Rng as _; + use test_utils::random::{make_seedable_rng, Seed}; + + use super::*; + + // Check that BootstrapFileHeader's encoded size if is always FILE_HEADER_SIZE, no matter the contents. + #[rstest] + #[trace] + #[case(Seed::from_entropy())] + fn header_encoding_size(#[case] seed: Seed) { + for _ in 0..100 { + let mut rng = make_seedable_rng(seed); + + { + let header = BootstrapFileHeader { + file_magic_bytes: rng.gen(), + chain_magic_bytes: MagicBytes::new(rng.gen()), + file_format_version: rng.gen(), + blocks_count: rng.gen(), + }; + + let encoded_size = header.encoded_size(); + assert_eq!(encoded_size, FILE_HEADER_SIZE); + } + } + } +} diff --git a/chainstate/src/detail/chainstateref/mod.rs b/chainstate/src/detail/chainstateref/mod.rs index 325812be76..ed5d0c2d18 100644 --- a/chainstate/src/detail/chainstateref/mod.rs +++ b/chainstate/src/detail/chainstateref/mod.rs @@ -270,6 +270,14 @@ impl<'a, S: BlockchainStorageRead, V: TransactionVerificationStrategy> Chainstat self.db_tx.get_block(block_id).map_err(PropertyQueryError::from) } + #[log_error] + pub fn get_encoded_block( + &self, + block_id: &Id, + ) -> Result>, PropertyQueryError> { + self.db_tx.get_encoded_block(block_id).map_err(PropertyQueryError::from) + } + #[log_error] pub fn block_exists(&self, block_id: &Id) -> Result { self.db_tx.block_exists(block_id).map_err(PropertyQueryError::from) diff --git a/chainstate/src/detail/mod.rs b/chainstate/src/detail/mod.rs index 391929d986..cce2f1a7ac 100644 --- a/chainstate/src/detail/mod.rs +++ b/chainstate/src/detail/mod.rs @@ -58,7 +58,7 @@ use pos_accounting::{ use tx_verifier::transaction_verifier; use utils::{ const_value::ConstValue, - ensure, + debug_assert_or_log, ensure, eventhandler::{EventHandler, EventsController}, log_error, set_flag::SetFlag, @@ -67,7 +67,10 @@ use utils::{ use utils_networking::broadcaster; use utxo::UtxosDB; -use crate::{BlockInvalidatorError, ChainstateConfig, ChainstateEvent}; +use crate::{ + detail::bootstrap::import_bootstrap_stream, BlockInvalidatorError, BootstrapError, + ChainstateConfig, ChainstateError, ChainstateEvent, +}; use self::{ block_invalidation::BlockInvalidator, @@ -182,9 +185,7 @@ impl Chainstate tx_verification_strategy: V, custom_orphan_error_hook: Option>, time_getter: TimeGetter, - ) -> Result { - use crate::ChainstateError; - + ) -> Result { let best_block_id = { let db_tx = chainstate_storage.transaction_ro()?; db_tx.get_best_block_id()? @@ -210,7 +211,13 @@ impl Chainstate .map_err(|e| ChainstateError::FailedToInitializeChainstate(e.into()))?; let best_block_index = chainstate.make_db_tx_ro()?.get_best_block_index()?; - chainstate.update_initial_block_download_flag(best_block_index.as_ref()); + chainstate.update_initial_block_download_flag(best_block_index.as_ref())?; + + if !chainstate.is_initial_block_download_finished.test() + && chainstate.chainstate_config.db_reckless_mode_in_ibd_enabled() + { + chainstate.chainstate_storage.set_reckless_mode(true)?; + } Ok(chainstate) } @@ -622,7 +629,7 @@ impl Chainstate best_block_index_after_orphans_opt.or(best_block_index_after_process_block_opt); if let Some(best_block_index) = &best_block_index_opt { - self.update_initial_block_download_flag(GenBlockIndexRef::Block(best_block_index)); + self.update_initial_block_download_flag(GenBlockIndexRef::Block(best_block_index))?; self.broadcast_new_tip_event( GenBlockIndexRef::Block(best_block_index), self.is_initial_block_download(), @@ -822,16 +829,30 @@ impl Chainstate } /// Update `is_initial_block_download_finished` when tip changes (can only be set once) - fn update_initial_block_download_flag(&mut self, best_block_index: GenBlockIndexRef<'_>) { + #[log_error] + fn update_initial_block_download_flag( + &mut self, + best_block_index: GenBlockIndexRef<'_>, + ) -> Result<(), chainstate_storage::Error> { if self.is_initial_block_download_finished.test() { - return; + return Ok(()); } let tip_timestamp = best_block_index.block_timestamp(); if self.is_fresh_block(&tip_timestamp) { self.is_initial_block_download_finished.set(); + + if self.chainstate_storage.in_reckless_mode()? { + debug_assert_or_log!( + self.chainstate_config.db_reckless_mode_in_ibd_enabled(), + "The db was in a reckless mode even though it wasn't enabled" + ); + self.chainstate_storage.set_reckless_mode(false)?; + } } + + Ok(()) } #[log_error] @@ -866,6 +887,42 @@ impl Chainstate Err(err) => (*err).into(), } } + + #[log_error] + pub fn import_bootstrap_stream<'a>( + &mut self, + mut reader: std::io::BufReader>, + ) -> Result<(), BootstrapError> { + let enable_reckless_mode = !self.chainstate_storage.in_reckless_mode()? + && self.chainstate_config.db_reckless_mode_in_ibd_enabled(); + + if enable_reckless_mode { + self.chainstate_storage.set_reckless_mode(true)?; + } + + let chain_config = Arc::clone(&self.chain_config); + let mut block_processor = |block: WithId| -> Result<_, BootstrapError> { + let block_exists = self.make_db_tx_ro()?.block_exists(&block.get_id())?; + + if !block_exists { + Ok(self.process_block(block, BlockSource::Local)?) + } else { + Ok(None) + } + }; + + let result = import_bootstrap_stream(&chain_config, &mut reader, &mut block_processor); + + // FIXME write to log when reckless mode is turned on and off? + + // FIXME check if the db can become corrupted silently + + if enable_reckless_mode { + self.chainstate_storage.set_reckless_mode(false)?; + } + + result + } } /// The error type for integrate_block. diff --git a/chainstate/src/detail/query.rs b/chainstate/src/detail/query.rs index 6882a743d9..c30410189e 100644 --- a/chainstate/src/detail/query.rs +++ b/chainstate/src/detail/query.rs @@ -90,8 +90,13 @@ impl<'a, S: BlockchainStorageRead, V: TransactionVerificationStrategy> Chainstat self.chainstate_ref.get_block(id) } - pub fn get_existing_block(&self, id: &Id) -> Result { - self.chainstate_ref.get_block(id)?.ok_or(PropertyQueryError::BlockNotFound(*id)) + pub fn get_encoded_existing_block( + &self, + id: &Id, + ) -> Result, PropertyQueryError> { + self.chainstate_ref + .get_encoded_block(id)? + .ok_or(PropertyQueryError::BlockNotFound(*id)) } pub fn get_mainchain_blocks( diff --git a/chainstate/src/interface/chainstate_interface_impl.rs b/chainstate/src/interface/chainstate_interface_impl.rs index 76f2e8ceed..982d0a5ca5 100644 --- a/chainstate/src/interface/chainstate_interface_impl.rs +++ b/chainstate/src/interface/chainstate_interface_impl.rs @@ -21,11 +21,8 @@ use std::{ use crate::{ detail::{ - self, - block_checking::BlockChecker, - bootstrap::{export_bootstrap_stream, import_bootstrap_stream}, - calculate_median_time_past, - tx_verification_strategy::TransactionVerificationStrategy, + self, block_checking::BlockChecker, bootstrap::export_bootstrap_stream, + calculate_median_time_past, tx_verification_strategy::TransactionVerificationStrategy, BlockSource, OrphanBlocksRef, CHAINSTATE_TRACING_TARGET_VERBOSE_BLOCK_IDS, }, ChainInfo, ChainstateConfig, ChainstateError, ChainstateEvent, ChainstateInterface, Locator, @@ -616,24 +613,7 @@ where &mut self, reader: std::io::BufReader>, ) -> Result<(), ChainstateError> { - let magic_bytes = *self.chainstate.chain_config().magic_bytes(); - - let mut reader = reader; - - // We clone because borrowing with the closure below prevents immutable borrows, - // and the cost of cloning is small compared to the bootstrapping - let chainstate_config = self.chainstate.chainstate_config().clone(); - - let mut block_processor = |block| self.chainstate.process_block(block, BlockSource::Local); - - import_bootstrap_stream( - &magic_bytes.bytes(), - &mut reader, - &mut block_processor, - &chainstate_config, - )?; - - Ok(()) + Ok(self.chainstate.import_bootstrap_stream(reader)?) } #[tracing::instrument(skip_all)] @@ -642,10 +622,9 @@ where writer: std::io::BufWriter>, include_stale_blocks: bool, ) -> Result<(), ChainstateError> { - let magic_bytes = self.chainstate.chain_config().magic_bytes(); let mut writer = writer; export_bootstrap_stream( - &magic_bytes.bytes(), + self.chainstate.chain_config(), &mut writer, include_stale_blocks, &self.chainstate.query().map_err(ChainstateError::from)?, diff --git a/chainstate/src/interface/chainstate_interface_impl_delegation.rs b/chainstate/src/interface/chainstate_interface_impl_delegation.rs index 8cb6c5701f..11d2966b3c 100644 --- a/chainstate/src/interface/chainstate_interface_impl_delegation.rs +++ b/chainstate/src/interface/chainstate_interface_impl_delegation.rs @@ -452,20 +452,19 @@ where #[cfg(test)] mod tests { - use std::sync::Arc; use chainstate_storage::inmemory::Store; use common::{ chain::{config::create_unit_test_config, ChainConfig}, primitives::BlockHeight, + time_getter::TimeGetter, }; use crate::{ chainstate_interface::ChainstateInterface, make_chainstate, ChainstateConfig, DefaultTransactionVerificationStrategy, }; - use common::time_getter::TimeGetter; fn test_interface_ref(chainstate: &C, chain_config: &ChainConfig) { assert_eq!( @@ -495,8 +494,8 @@ mod tests { let chain_config = Arc::new(create_unit_test_config()); let chainstate_config = ChainstateConfig { max_db_commit_attempts: 10.into(), + enable_db_reckless_mode_in_ibd: Default::default(), max_orphan_blocks: 0.into(), - min_max_bootstrap_import_buffer_sizes: Default::default(), max_tip_age: Default::default(), enable_heavy_checks: Some(true), allow_checkpoints_mismatch: Default::default(), diff --git a/chainstate/src/lib.rs b/chainstate/src/lib.rs index 4c98b0610f..9c7b5b1462 100644 --- a/chainstate/src/lib.rs +++ b/chainstate/src/lib.rs @@ -28,17 +28,17 @@ use common::{ primitives::{BlockHeight, Id}, time_getter::TimeGetter, }; -use detail::{bootstrap::BootstrapError, Chainstate}; +use detail::Chainstate; use interface::chainstate_interface_impl; pub use crate::{ config::{ChainstateConfig, MaxTipAge}, detail::{ - ban_score, block_invalidation::BlockInvalidatorError, calculate_median_time_past, - calculate_median_time_past_from_blocktimestamps, BlockError, BlockProcessingErrorClass, - BlockProcessingErrorClassification, BlockSource, ChainInfo, CheckBlockError, - CheckBlockTransactionsError, ConnectTransactionError, IOPolicyError, InitializationError, - Locator, NonZeroPoolBalances, OrphanCheckError, SpendStakeError, + ban_score, block_invalidation::BlockInvalidatorError, bootstrap::BootstrapError, + calculate_median_time_past, calculate_median_time_past_from_blocktimestamps, BlockError, + BlockProcessingErrorClass, BlockProcessingErrorClassification, BlockSource, ChainInfo, + CheckBlockError, CheckBlockTransactionsError, ConnectTransactionError, IOPolicyError, + InitializationError, Locator, NonZeroPoolBalances, OrphanCheckError, SpendStakeError, StorageCompatibilityCheckError, TokenIssuanceError, TokensError, TransactionVerifierStorageError, MEDIAN_TIME_SPAN, }, diff --git a/chainstate/src/rpc/mod.rs b/chainstate/src/rpc/mod.rs index 0f06f72bd2..449fa4ca83 100644 --- a/chainstate/src/rpc/mod.rs +++ b/chainstate/src/rpc/mod.rs @@ -525,7 +525,7 @@ impl ChainstateRpcServer for super::ChainstateHandle { file_path: &std::path::Path, include_stale_blocks: bool, ) -> RpcResult<()> { - // TODO: test this function in functional tests + // FIXME: test this function in functional tests let file_obj: std::fs::File = rpc::handle_result(std::fs::File::create(file_path))?; let writer: std::io::BufWriter> = std::io::BufWriter::new(Box::new(file_obj)); @@ -537,8 +537,8 @@ impl ChainstateRpcServer for super::ChainstateHandle { } async fn import_bootstrap_file(&self, file_path: &std::path::Path) -> RpcResult<()> { - // TODO: test this function in functional tests - let file_obj: std::fs::File = rpc::handle_result(std::fs::File::create(file_path))?; + // FIXME: test this function in functional tests + let file_obj: std::fs::File = rpc::handle_result(std::fs::File::open(file_path))?; let reader: std::io::BufReader> = std::io::BufReader::new(Box::new(file_obj)); diff --git a/chainstate/storage/Cargo.toml b/chainstate/storage/Cargo.toml index 4e5fe4ca67..cc36f0f421 100644 --- a/chainstate/storage/Cargo.toml +++ b/chainstate/storage/Cargo.toml @@ -17,7 +17,9 @@ pos-accounting = { path = "../../pos-accounting" } randomness = { path = "../../randomness" } serialization = { path = "../../serialization" } storage = { path = "../../storage" } +storage-failing = { path = "../../storage/failing" } storage-inmemory = { path = "../../storage/inmemory" } +storage-lmdb = { path = "../../storage/lmdb" } tokens-accounting = { path = "../../tokens-accounting" } utils = { path = "../../utils" } utxo = { path = "../../utxo" } diff --git a/chainstate/storage/src/internal/blockchain_storage.rs b/chainstate/storage/src/internal/blockchain_storage.rs new file mode 100644 index 0000000000..9d98e27040 --- /dev/null +++ b/chainstate/storage/src/internal/blockchain_storage.rs @@ -0,0 +1,72 @@ +// Copyright (c) 2021-2026 RBB S.r.l +// opensource@mintlayer.org +// SPDX-License-Identifier: MIT +// Licensed under the MIT License; +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://github.com/mintlayer/mintlayer-core/blob/master/LICENSE +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use storage_inmemory::{InMemory, InMemoryImpl}; +use storage_lmdb::{Lmdb, LmdbImpl}; + +use crate::Transactional; + +pub trait BlockchainStorageBackendImpl: storage::SharedBackendImpl { + fn set_reckless_mode(&self, set: bool) -> crate::Result<()>; + fn in_reckless_mode(&self) -> crate::Result; +} + +pub trait BlockchainStorageBackend: + storage::SharedBackend::ImplHelper> +{ + type ImplHelper: BlockchainStorageBackendImpl; +} + +pub trait BlockchainStorage: for<'tx> Transactional<'tx> + Send { + fn set_reckless_mode(&self, set: bool) -> crate::Result<()>; + fn in_reckless_mode(&self) -> crate::Result; +} + +impl BlockchainStorageBackendImpl for LmdbImpl { + fn set_reckless_mode(&self, set: bool) -> crate::Result<()> { + // When switching the reckless mode off, do a sync immediately. + let force_sync = self.in_reckless_mode()? && !set; + + self.set_no_sync_on_commit(set); + + if force_sync { + self.force_sync()?; + } + + Ok(()) + } + + fn in_reckless_mode(&self) -> crate::Result { + Ok(self.get_no_sync_on_commit()) + } +} + +impl BlockchainStorageBackend for Lmdb { + type ImplHelper = LmdbImpl; +} + +impl BlockchainStorageBackendImpl for InMemoryImpl { + fn set_reckless_mode(&self, _set: bool) -> crate::Result<()> { + Ok(()) + } + + fn in_reckless_mode(&self) -> crate::Result { + Ok(false) + } +} + +impl BlockchainStorageBackend for InMemory { + type ImplHelper = InMemoryImpl; +} diff --git a/chainstate/storage/src/internal/mod.rs b/chainstate/storage/src/internal/mod.rs index 3dd8c62ec7..6cc95aec3c 100644 --- a/chainstate/storage/src/internal/mod.rs +++ b/chainstate/storage/src/internal/mod.rs @@ -13,10 +13,11 @@ // See the License for the specific language governing permissions and // limitations under the License. -mod store_tx; - +mod blockchain_storage; #[cfg(any(test, feature = "expensive-reads"))] mod expensive; +mod store_tx; +mod version; use std::collections::BTreeMap; @@ -25,19 +26,20 @@ use common::{ chain::{ChainConfig, DelegationId, PoolId}, primitives::Amount, }; +use logging::log; use pos_accounting::{ DelegationData, PoSAccountingStorageRead, PoSAccountingStorageWrite, PoolData, }; use utils::log_error; use crate::{ - schema::Schema, BlockchainStorage, BlockchainStorageRead, BlockchainStorageWrite, - TransactionRw, Transactional, + schema::Schema, BlockchainStorageRead, BlockchainStorageWrite, TransactionRw, Transactional, }; +pub use blockchain_storage::{ + BlockchainStorage, BlockchainStorageBackend, BlockchainStorageBackendImpl, +}; pub use store_tx::{StoreTxRo, StoreTxRw}; - -mod version; pub use version::ChainstateStorageVersion; /// Store for blockchain data, parametrized over the backend B @@ -113,7 +115,18 @@ impl<'tx, B: storage::SharedBackend + 'tx> Transactional<'tx> for Store { } } -impl BlockchainStorage for Store {} +impl BlockchainStorage for Store { + fn set_reckless_mode(&self, set: bool) -> crate::Result<()> { + let enabled = if set { "enabled" } else { "disabled" }; + log::warn!("Blockchain storage reckless mode {enabled}"); + + self.0.backend_impl().set_reckless_mode(set) + } + + fn in_reckless_mode(&self) -> crate::Result { + self.0.backend_impl().in_reckless_mode() + } +} impl PoSAccountingStorageRead for Store { type Error = crate::Error; diff --git a/chainstate/storage/src/internal/store_tx/mod.rs b/chainstate/storage/src/internal/store_tx/mod.rs index 265c927e39..046f1db4a9 100644 --- a/chainstate/storage/src/internal/store_tx/mod.rs +++ b/chainstate/storage/src/internal/store_tx/mod.rs @@ -70,6 +70,17 @@ pub struct StoreTxRw<'st, B: storage::SharedBackend> { } impl StoreTxRo<'_, B> { + // Read a value from the database + fn read_raw(&self, key: K) -> crate::Result>> + where + DbMap: schema::DbMap, + Schema: schema::HasDbMap, + K: EncodeLike, + { + let map = self.0.get::(); + Ok(map.get(key)?.map(|x| x.take_bytes().into_owned())) + } + // Read a value from the database and decode it fn read(&self, key: K) -> crate::Result> where @@ -78,7 +89,7 @@ impl StoreTxRo<'_, B> { K: EncodeLike, { let map = self.0.get::(); - map.get(key).map_err(crate::Error::from).map(|x| x.map(|x| x.decode())) + Ok(map.get(key)?.map(|x| x.decode())) } // Return true if an entry for the given key exists in the db. @@ -95,11 +106,9 @@ impl StoreTxRo<'_, B> { // Read a value for a well-known entry fn read_value(&self) -> crate::Result> { - self.read::(E::KEY).map(|x| { - x.map(|x| { - E::Value::decode_all(&mut x.as_ref()).expect("db values to be encoded correctly") - }) - }) + Ok(self.read::(E::KEY)?.map(|x| { + E::Value::decode_all(&mut x.as_ref()).expect("db values to be encoded correctly") + })) } } @@ -139,6 +148,17 @@ impl<'st, B: storage::SharedBackend> StoreTxRw<'st, B> { Ok(self.db_tx_ref()?.get::()) } + // Read a value from the database + fn read_raw(&self, key: K) -> crate::Result>> + where + DbMap: schema::DbMap, + Schema: schema::HasDbMap, + K: EncodeLike, + { + let map = self.db_tx_ref()?.get::(); + Ok(map.get(key)?.map(|x| x.take_bytes().into_owned())) + } + // Read a value from the database and decode it fn read(&self, key: K) -> crate::Result> where @@ -146,14 +166,8 @@ impl<'st, B: storage::SharedBackend> StoreTxRw<'st, B> { Schema: schema::HasDbMap, K: EncodeLike, { - logging::log::trace!( - "Reading {}/{}", - DbMap::NAME, - serialization::hex_encoded::HexEncoded::new(&key), - ); - let map = self.db_tx_ref()?.get::(); - map.get(key).map_err(crate::Error::from).map(|x| x.map(|x| x.decode())) + Ok(map.get(key)?.map(|x| x.decode())) } // Return true if an entry for the given key exists in the db. @@ -165,16 +179,14 @@ impl<'st, B: storage::SharedBackend> StoreTxRw<'st, B> { K: EncodeLike, { let map = self.db_tx_ref()?.get::(); - map.get(key).map_err(crate::Error::from).map(|x| x.is_some()) + Ok(map.get(key)?.is_some()) } // Read a value for a well-known entry fn read_value(&self) -> crate::Result> { - self.read::(E::KEY).map(|x| { - x.map(|x| { - E::Value::decode_all(&mut x.as_ref()).expect("db values to be encoded correctly") - }) - }) + Ok(self.read::(E::KEY)?.map(|x| { + E::Value::decode_all(&mut x.as_ref()).expect("db values to be encoded correctly") + })) } // Encode a value and write it to the database @@ -185,12 +197,6 @@ impl<'st, B: storage::SharedBackend> StoreTxRw<'st, B> { K: EncodeLike<::Key>, V: EncodeLike<::Value>, { - logging::log::trace!( - "Writing {}/{}", - DbMap::NAME, - serialization::hex_encoded::HexEncoded::new(&key), - ); - self.track_error(|tx| Ok(tx.get_mut::().put(key, value)?)) } diff --git a/chainstate/storage/src/internal/store_tx/read_impls.rs b/chainstate/storage/src/internal/store_tx/read_impls.rs index c244b195bf..90b8a8a650 100644 --- a/chainstate/storage/src/internal/store_tx/read_impls.rs +++ b/chainstate/storage/src/internal/store_tx/read_impls.rs @@ -118,6 +118,11 @@ impl BlockchainStorageRead for super::StoreTxRo<'_, B self.read::(id) } + #[log_error] + fn get_encoded_block(&self, id: &Id) -> crate::Result>> { + self.read_raw::(id) + } + #[log_error] fn block_exists(&self, id: &Id) -> crate::Result { self.entry_exists::(id) @@ -440,6 +445,11 @@ impl BlockchainStorageRead for super::StoreTxRw<'_, B self.read::(id) } + #[log_error] + fn get_encoded_block(&self, id: &Id) -> crate::Result>> { + self.read_raw::(id) + } + #[log_error] fn block_exists(&self, id: &Id) -> crate::Result { self.entry_exists::(id) diff --git a/chainstate/storage/src/lib.rs b/chainstate/storage/src/lib.rs index 2c62b0fa46..aac39b862a 100644 --- a/chainstate/storage/src/lib.rs +++ b/chainstate/storage/src/lib.rs @@ -48,7 +48,10 @@ use tokens_accounting::{ }; use utxo::{UtxosBlockUndo, UtxosStorageRead, UtxosStorageWrite}; -pub use internal::{ChainstateStorageVersion, Store}; +pub use internal::{ + BlockchainStorage, BlockchainStorageBackend, BlockchainStorageBackendImpl, + ChainstateStorageVersion, Store, +}; /// Possibly failing result of blockchain storage query pub type Result = chainstate_types::storage_result::Result; @@ -86,6 +89,9 @@ pub trait BlockchainStorageRead: /// Get block by its hash fn get_block(&self, id: &Id) -> crate::Result>; + /// Get block by its hash, in its encoded form + fn get_encoded_block(&self, id: &Id) -> crate::Result>>; + /// Return true if the block exists in the db and false otherwise. /// This is cheaper than calling `get_block` and checking for `is_some`. fn block_exists(&self, id: &Id) -> crate::Result; @@ -306,5 +312,3 @@ pub trait Transactional<'t> { /// Start a read-write transaction. fn transaction_rw<'s: 't>(&'s self, size: Option) -> Result; } - -pub trait BlockchainStorage: for<'tx> Transactional<'tx> + Send {} diff --git a/chainstate/storage/src/mock/mock_impl.rs b/chainstate/storage/src/mock/mock_impl.rs index 961d72a29a..2ba535aa4a 100644 --- a/chainstate/storage/src/mock/mock_impl.rs +++ b/chainstate/storage/src/mock/mock_impl.rs @@ -57,6 +57,7 @@ mockall::mock! { fn get_best_block_id(&self) -> crate::Result>>; fn get_block_index(&self, id: &Id) -> crate::Result>; fn get_block(&self, id: &Id) -> crate::Result>; + fn get_encoded_block(&self, id: &Id) -> crate::Result>>; fn block_exists(&self, id: &Id) -> crate::Result; fn get_block_reward(&self, block_index: &BlockIndex) -> crate::Result>; fn get_block_header(&self, id: &Id) -> crate::Result>; @@ -349,7 +350,10 @@ mockall::mock! { fn transaction_rw<'st>(&'st self, size: Option) -> crate::Result where 'st: 'tx; } - impl crate::BlockchainStorage for Store {} + impl crate::BlockchainStorage for Store { + fn set_reckless_mode(&self, set: bool) -> crate::Result<()>; + fn in_reckless_mode(&self) -> crate::Result; + } } mockall::mock! { @@ -363,6 +367,7 @@ mockall::mock! { fn get_best_block_id(&self) -> crate::Result>>; fn get_block_index(&self, id: &Id) -> crate::Result>; fn get_block(&self, id: &Id) -> crate::Result>; + fn get_encoded_block(&self, id: &Id) -> crate::Result>>; fn block_exists(&self, id: &Id) -> crate::Result; fn get_block_reward(&self, block_index: &BlockIndex) -> crate::Result>; fn get_block_header(&self, id: &Id) -> crate::Result>; @@ -489,6 +494,7 @@ mockall::mock! { fn get_chain_type(&self) -> crate::Result>; fn get_best_block_id(&self) -> crate::Result>>; fn get_block(&self, id: &Id) -> crate::Result>; + fn get_encoded_block(&self, id: &Id) -> crate::Result>>; fn block_exists(&self, id: &Id) -> crate::Result; fn get_block_index(&self, id: &Id) -> crate::Result>; fn get_block_reward(&self, block_index: &BlockIndex) -> crate::Result>; diff --git a/chainstate/test-framework/Cargo.toml b/chainstate/test-framework/Cargo.toml index ca45d463ca..213c4eb550 100644 --- a/chainstate/test-framework/Cargo.toml +++ b/chainstate/test-framework/Cargo.toml @@ -20,6 +20,7 @@ orders-accounting = { path = "../../orders-accounting" } pos-accounting = { path = "../../pos-accounting" } randomness = { path = "../../randomness" } serialization = { path = "../../serialization" } +storage = { path = "../../storage" } storage-failing = { path = "../../storage/failing" } storage-inmemory = { path = "../../storage/inmemory" } test-utils = { path = "../../test-utils" } diff --git a/chainstate/test-framework/src/blockchain_storage_trivial_implementor.rs b/chainstate/test-framework/src/blockchain_storage_trivial_implementor.rs new file mode 100644 index 0000000000..3c711c5abc --- /dev/null +++ b/chainstate/test-framework/src/blockchain_storage_trivial_implementor.rs @@ -0,0 +1,98 @@ +// Copyright (c) 2021-2026 RBB S.r.l +// opensource@mintlayer.org +// SPDX-License-Identifier: MIT +// Licensed under the MIT License; +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://github.com/mintlayer/mintlayer-core/blob/master/LICENSE +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use std::sync::Arc; + +use chainstate_storage::{BlockchainStorageBackend, BlockchainStorageBackendImpl}; +use storage::{Backend, BackendImpl, DbDesc, SharedBackend, SharedBackendImpl}; +use utils::{atomics::RelaxedAtomicBool, shallow_clone::ShallowClone}; + +/// A struct that implements BlockchainStorageBackendImpl by simply storing the `in_reckless_mode` +/// flag in a field. +#[derive(Clone)] +pub struct BlockchainStorageImplTrivialImplementor { + internal: T, + in_reckless_mode: Arc, +} + +impl BackendImpl for BlockchainStorageImplTrivialImplementor { + type TxRo<'a> = ::TxRo<'a>; + type TxRw<'a> = ::TxRw<'a>; + + fn transaction_ro(&self) -> storage::Result> { + self.internal.transaction_ro() + } + + fn transaction_rw(&mut self, size: Option) -> storage::Result> { + self.internal.transaction_rw(size) + } +} + +impl SharedBackendImpl for BlockchainStorageImplTrivialImplementor { + fn transaction_rw(&self, size: Option) -> storage::Result> { + self.internal.transaction_rw(size) + } +} + +impl ShallowClone for BlockchainStorageImplTrivialImplementor { + fn shallow_clone(&self) -> Self { + Self { + internal: self.internal.shallow_clone(), + in_reckless_mode: Arc::clone(&self.in_reckless_mode), + } + } +} + +impl BlockchainStorageBackendImpl + for BlockchainStorageImplTrivialImplementor +{ + fn set_reckless_mode(&self, set: bool) -> chainstate_storage::Result<()> { + self.in_reckless_mode.store(set); + Ok(()) + } + + fn in_reckless_mode(&self) -> chainstate_storage::Result { + Ok(self.in_reckless_mode.load()) + } +} + +/// A struct that implements `BlockchainStorageBackend via `BlockchainStorageImplTrivialImplementor`. +#[derive(Default)] +pub struct BlockchainStorageTrivialImplementor(T); + +impl BlockchainStorageTrivialImplementor { + pub fn new(inner: T) -> Self { + Self(inner) + } +} + +impl Backend for BlockchainStorageTrivialImplementor { + type Impl = BlockchainStorageImplTrivialImplementor<::Impl>; + + fn open(self, desc: DbDesc) -> storage::Result { + Ok(BlockchainStorageImplTrivialImplementor { + internal: self.0.open(desc)?, + in_reckless_mode: Arc::new(RelaxedAtomicBool::new(false)), + }) + } +} + +impl SharedBackend for BlockchainStorageTrivialImplementor { + type ImplHelper = BlockchainStorageImplTrivialImplementor<::Impl>; +} + +impl BlockchainStorageBackend for BlockchainStorageTrivialImplementor { + type ImplHelper = BlockchainStorageImplTrivialImplementor<::Impl>; +} diff --git a/chainstate/test-framework/src/lib.rs b/chainstate/test-framework/src/lib.rs index 8f7cb15954..8b42f363ef 100644 --- a/chainstate/test-framework/src/lib.rs +++ b/chainstate/test-framework/src/lib.rs @@ -16,6 +16,7 @@ #![allow(clippy::unwrap_used)] mod block_builder; +mod blockchain_storage_trivial_implementor; mod framework; mod framework_builder; pub mod helpers; diff --git a/chainstate/test-framework/src/storage.rs b/chainstate/test-framework/src/storage.rs index c5955507f5..39d3424e14 100644 --- a/chainstate/test-framework/src/storage.rs +++ b/chainstate/test-framework/src/storage.rs @@ -17,9 +17,12 @@ use storage_failing::{Failing, FailureConfig}; use storage_inmemory::InMemory; use test_utils::random::Seed; +use crate::blockchain_storage_trivial_implementor::BlockchainStorageTrivialImplementor; + pub use storage_failing::StorageError; -pub type TestStore = chainstate_storage::Store>; +pub type TestStore = + chainstate_storage::Store>>; pub type ConfigBuilder = storage_failing::Builder; /// A builder for chainstate testing storage @@ -47,7 +50,8 @@ impl Builder { /// Build the storage. pub fn build(self, seed: Seed) -> TestStore { let Self { inner, config } = self; - let backend = Failing::new(inner, config.build(), seed); + let backend = + BlockchainStorageTrivialImplementor::new(Failing::new(inner, config.build(), seed)); TestStore::from_backend(backend).expect("backend creation to succeed") } diff --git a/chainstate/test-suite/Cargo.toml b/chainstate/test-suite/Cargo.toml index 0ffa6fe4cb..795583a79a 100644 --- a/chainstate/test-suite/Cargo.toml +++ b/chainstate/test-suite/Cargo.toml @@ -39,6 +39,7 @@ criterion.workspace = true expect-test.workspace = true rand.workspace = true rstest.workspace = true +strum.workspace = true [[bench]] name = "benches" diff --git a/chainstate/test-suite/src/tests/bootstrap.rs b/chainstate/test-suite/src/tests/bootstrap.rs index 10884ca955..310f0b169d 100644 --- a/chainstate/test-suite/src/tests/bootstrap.rs +++ b/chainstate/test-suite/src/tests/bootstrap.rs @@ -13,19 +13,28 @@ // See the License for the specific language governing permissions and // limitations under the License. -use serialization::Encode; -use std::collections::BTreeSet; -use std::io::BufWriter; +use std::{ + collections::{BTreeMap, BTreeSet}, + io::BufWriter, +}; -use chainstate::chainstate_interface::ChainstateInterface; -use chainstate::ChainstateConfig; -use chainstate_test_framework::TestFramework; -use common::chain::Block; -use common::primitives::Id; -use common::primitives::Idable; +use itertools::Itertools as _; use rstest::rstest; -use test_utils::random::make_seedable_rng; -use test_utils::random::Seed; +use strum::IntoEnumIterator; + +use chainstate::{ + chainstate_interface::ChainstateInterface, BootstrapError, ChainstateConfig, ChainstateError, +}; +use chainstate_storage::BlockchainStorage; +use chainstate_test_framework::TestFramework; +use common::{ + chain::{self, config::ChainType, Block, ChainConfig, Destination, NetUpgrades}, + primitives::{Id, Idable}, +}; +use logging::log; +use rand::{seq::IteratorRandom as _, CryptoRng, Rng}; +use serialization::Encode as _; +use test_utils::random::{gen_random_bytes, make_seedable_rng, Seed}; /// Ensure that the blocks vector put blocks in order with height in the blockchain fn check_height_order(blocks: &[Id], tf: &TestFramework) { @@ -38,36 +47,119 @@ fn check_height_order(blocks: &[Id], tf: &TestFramework) { } } +const EXPECTED_MAGIC_BYTES: &str = "MLBTSTRP"; + +fn make_header_data(chain_config: &ChainConfig, version: u32, blocks_count: u64) -> Vec { + let mut data = Vec::new(); + data.extend_from_slice(EXPECTED_MAGIC_BYTES.as_bytes()); + data.extend_from_slice(&chain_config.magic_bytes().bytes()); + data.extend_from_slice(&version.to_le_bytes()); + data.extend_from_slice(&blocks_count.to_le_bytes()); + + data +} + +fn append_block_data_for_v0(dest: &mut Vec, encoded_block_data: &[u8]) { + dest.extend_from_slice(&(encoded_block_data.len() as u32).to_le_bytes()); + dest.extend_from_slice(encoded_block_data); +} + +fn gen_blocks( + chain_config: ChainConfig, + blocks_count: usize, + mut rng: impl Rng + CryptoRng, +) -> Vec { + let mut tf = TestFramework::builder(&mut rng).with_chain_config(chain_config).build(); + let genesis_id = tf.genesis().get_id(); + tf.create_chain(&genesis_id.into(), blocks_count, &mut rng).unwrap(); + + tf.chainstate + .get_block_id_tree_as_list() + .unwrap() + .iter() + .map(|block_id| tf.chainstate.get_block(block_id).unwrap().unwrap()) + .collect_vec() +} + +fn export_to_vec(tf: &TestFramework, with_stale_blocks: bool) -> Vec { + let mut write_buffer = Vec::new(); + + let writer: BufWriter> = + BufWriter::new(Box::new(&mut write_buffer)); + + tf.chainstate.export_bootstrap_stream(writer, with_stale_blocks).unwrap(); + + assert!(write_buffer.starts_with(EXPECTED_MAGIC_BYTES.as_bytes())); + + write_buffer +} + +fn import_from_slice_with_callback std::io::Result<()> + Send>( + tf: &mut TestFramework, + bytes: &[u8], + callback: C, +) -> Result<(), ChainstateError> { + let reader = ReaderWithCallback { + inner: bytes, + callback, + }; + let reader: std::io::BufReader> = + std::io::BufReader::new(Box::new(reader)); + + tf.chainstate.import_bootstrap_stream(reader) +} + +fn import_from_slice(tf: &mut TestFramework, bytes: &[u8]) -> Result<(), ChainstateError> { + import_from_slice_with_callback(tf, bytes, || Ok(())) +} + +struct ReaderWithCallback { + inner: R, + callback: C, +} + +impl std::io::Result<()>> std::io::Read for ReaderWithCallback { + fn read(&mut self, buf: &mut [u8]) -> std::io::Result { + (self.callback)()?; + self.inner.read(buf) + } +} + #[rstest] #[trace] #[case(Seed::from_entropy())] -fn bootstrap_tests(#[case] seed: Seed) { +fn successful_import_export(#[case] seed: Seed) { utils::concurrency::model(move || { let mut rng = make_seedable_rng(seed); - let mut tf1 = TestFramework::builder(&mut rng).build(); - let genesis_id = tf1.genesis().get_id(); + + let chain_type = ChainType::iter().choose(&mut rng).unwrap(); + let chain_config = make_chain_config(chain_type); + + let mut source_tf = + TestFramework::builder(&mut rng).with_chain_config(chain_config.clone()).build(); + let genesis_id = source_tf.genesis().get_id(); let chain1 = { - tf1.create_chain(&genesis_id.into(), 5, &mut rng).unwrap(); + source_tf.create_chain(&genesis_id.into(), 5, &mut rng).unwrap(); - let tree_vec = tf1.chainstate.get_block_id_tree_as_list().unwrap(); - let mainchain_vec = tf1.chainstate.get_mainchain_blocks_list().unwrap(); + let tree_vec = source_tf.chainstate.get_block_id_tree_as_list().unwrap(); + let mainchain_vec = source_tf.chainstate.get_mainchain_blocks_list().unwrap(); // since there's only one chain, both should be equal assert_eq!(mainchain_vec, tree_vec); assert_eq!(mainchain_vec.len(), 5); - check_height_order(&mainchain_vec, &tf1); - check_height_order(&tree_vec, &tf1); + check_height_order(&mainchain_vec, &source_tf); + check_height_order(&tree_vec, &source_tf); mainchain_vec }; let chain2 = { - tf1.create_chain(&genesis_id.into(), 15, &mut rng).unwrap(); + source_tf.create_chain(&genesis_id.into(), 15, &mut rng).unwrap(); - let tree_vec = tf1.chainstate.get_block_id_tree_as_list().unwrap(); - let mainchain_vec = tf1.chainstate.get_mainchain_blocks_list().unwrap(); + let tree_vec = source_tf.chainstate.get_block_id_tree_as_list().unwrap(); + let mainchain_vec = source_tf.chainstate.get_mainchain_blocks_list().unwrap(); // since now we have more than one chain, we have to ensure that all blocks exist assert!(mainchain_vec.iter().all(|item| tree_vec.contains(item))); @@ -75,17 +167,17 @@ fn bootstrap_tests(#[case] seed: Seed) { assert_eq!(mainchain_vec.len(), 15); assert_eq!(tree_vec.len(), 20); - check_height_order(&mainchain_vec, &tf1); - check_height_order(&tree_vec, &tf1); + check_height_order(&mainchain_vec, &source_tf); + check_height_order(&tree_vec, &source_tf); mainchain_vec }; let chain3 = { - tf1.create_chain(&genesis_id.into(), 25, &mut rng).unwrap(); + source_tf.create_chain(&genesis_id.into(), 25, &mut rng).unwrap(); - let tree_vec = tf1.chainstate.get_block_id_tree_as_list().unwrap(); - let mainchain_vec = tf1.chainstate.get_mainchain_blocks_list().unwrap(); + let tree_vec = source_tf.chainstate.get_block_id_tree_as_list().unwrap(); + let mainchain_vec = source_tf.chainstate.get_mainchain_blocks_list().unwrap(); // since now we have more than one chain, we have to ensure that all blocks exist assert!(mainchain_vec.iter().all(|item| tree_vec.contains(item))); @@ -94,8 +186,8 @@ fn bootstrap_tests(#[case] seed: Seed) { assert_eq!(mainchain_vec.len(), 25); assert_eq!(tree_vec.len(), 45); - check_height_order(&mainchain_vec, &tf1); - check_height_order(&tree_vec, &tf1); + check_height_order(&mainchain_vec, &source_tf); + check_height_order(&tree_vec, &source_tf); mainchain_vec }; @@ -103,15 +195,16 @@ fn bootstrap_tests(#[case] seed: Seed) { let chain4 = { let len_to_cut_from_branch = 5; let new_branch_len = 35; - tf1.create_chain( - &chain2[chain2.len() - 1 - len_to_cut_from_branch].to_hash().into(), - new_branch_len, - &mut rng, - ) - .unwrap(); + source_tf + .create_chain( + &chain2[chain2.len() - 1 - len_to_cut_from_branch].to_hash().into(), + new_branch_len, + &mut rng, + ) + .unwrap(); - let tree_vec = tf1.chainstate.get_block_id_tree_as_list().unwrap(); - let mainchain_vec = tf1.chainstate.get_mainchain_blocks_list().unwrap(); + let tree_vec = source_tf.chainstate.get_block_id_tree_as_list().unwrap(); + let mainchain_vec = source_tf.chainstate.get_mainchain_blocks_list().unwrap(); // since now we have more than one chain, we have to ensure that all blocks exist assert!(mainchain_vec.iter().all(|item| tree_vec.contains(item))); @@ -124,18 +217,18 @@ fn bootstrap_tests(#[case] seed: Seed) { ); assert_eq!(tree_vec.len(), 45 + new_branch_len); - check_height_order(&mainchain_vec, &tf1); - check_height_order(&tree_vec, &tf1); + check_height_order(&mainchain_vec, &source_tf); + check_height_order(&tree_vec, &source_tf); mainchain_vec }; // from now on, the original TestFramework cannot be modified - let tf1 = tf1; + let source_tf = source_tf; // double-check that we have all blocks of all chains - let all_blocks = { - let tree_vec = tf1.chainstate.get_block_id_tree_as_list().unwrap(); + { + let tree_vec = source_tf.chainstate.get_block_id_tree_as_list().unwrap(); let all_blocks = chain1 .into_iter() @@ -152,114 +245,486 @@ fn bootstrap_tests(#[case] seed: Seed) { tree_vec }; - // get max block size to test buffers on threshold - let largest_block_size = { - all_blocks - .iter() - .map(|id| tf1.block(*id).encoded_size()) - .max() - .expect("The list can't be empty so this must have something in it") - }; + let bootstrap_with_stale_blocks = export_to_vec(&source_tf, true); + let bootstrap_no_stale_blocks = export_to_vec(&source_tf, false); - // bootstrap export - // Note: here "orphans" means "blocks not on mainchain" - let make_bootstrap_as_vec = |with_orphans: bool| { - let mut write_buffer = Vec::new(); + // Test importing all blocks, including stale ones + { + let mut dest_tf = + TestFramework::builder(&mut rng).with_chain_config(chain_config.clone()).build(); + + // Import the blocks + { + import_from_slice(&mut dest_tf, &bootstrap_with_stale_blocks).unwrap(); + + assert_eq!( + dest_tf.chainstate.get_block_id_tree_as_list().unwrap(), + source_tf.chainstate.get_block_id_tree_as_list().unwrap(), + ); + } + + // Do it again; it shouldn't fail and just skip known blocks instead. + { + import_from_slice(&mut dest_tf, &bootstrap_with_stale_blocks).unwrap(); + + assert_eq!( + dest_tf.chainstate.get_block_id_tree_as_list().unwrap(), + source_tf.chainstate.get_block_id_tree_as_list().unwrap(), + ); + } + } - let writer: BufWriter> = - BufWriter::new(Box::new(&mut write_buffer)); + // Test importing all non-stale blocks + { + let mut dest_tf = + TestFramework::builder(&mut rng).with_chain_config(chain_config.clone()).build(); + + // Import the blocks + { + import_from_slice(&mut dest_tf, &bootstrap_no_stale_blocks).unwrap(); + + // The dest_tf chain will only contain the mainchain + assert_eq!( + dest_tf.chainstate.get_block_id_tree_as_list().unwrap(), + source_tf.chainstate.get_mainchain_blocks_list().unwrap(), + ); + + assert_eq!( + dest_tf.chainstate.get_mainchain_blocks_list().unwrap(), + source_tf.chainstate.get_mainchain_blocks_list().unwrap(), + ); + } + + // Now import the entire tree into the same test framework; it should skip already + // existing blocks and import the rest. + { + import_from_slice(&mut dest_tf, &bootstrap_with_stale_blocks).unwrap(); + + // The dest_tf chain will only contain the mainchain + assert_eq!( + dest_tf.chainstate.get_block_id_tree_as_list().unwrap(), + source_tf.chainstate.get_block_id_tree_as_list().unwrap(), + ); + } + } + }); +} - tf1.chainstate.export_bootstrap_stream(writer, with_orphans).unwrap(); +// Construct v0 data manually and import it +#[rstest] +#[trace] +#[case(Seed::from_entropy())] +fn exact_v0_format(#[case] seed: Seed) { + utils::concurrency::model(move || { + let mut rng = make_seedable_rng(seed); - write_buffer - }; + let chain_type = ChainType::iter().choose(&mut rng).unwrap(); + let chain_config = make_chain_config(chain_type); - let bootstrap_with_orphans = make_bootstrap_as_vec(true); - let bootstrap_no_orphans = make_bootstrap_as_vec(false); + let mut source_tf = + TestFramework::builder(&mut rng).with_chain_config(chain_config.clone()).build(); + let source_tf_genesis_id = source_tf.genesis().get_id(); + let blocks_count = rng.gen_range(5..10); + source_tf + .create_chain(&source_tf_genesis_id.into(), blocks_count, &mut rng) + .unwrap(); + let source_tf_block_ids = source_tf.chainstate.get_block_id_tree_as_list().unwrap(); - // test importing all blocks with orphans - { - let mut tf2 = TestFramework::builder(&mut rng) - .with_chainstate_config( - ChainstateConfig::new() - .with_max_orphan_blocks(0) - .with_bootstrap_buffer_sizes((largest_block_size, 2 * largest_block_size)), - ) - .build(); + let mut data = make_header_data(&chain_config, 0, blocks_count as u64); - let reader: std::io::BufReader> = - std::io::BufReader::new(Box::new(bootstrap_with_orphans.as_slice())); + for block_id in &source_tf_block_ids { + let encoded_block = source_tf.chainstate.get_block(block_id).unwrap().unwrap().encode(); + append_block_data_for_v0(&mut data, &encoded_block); + } - tf2.chainstate.import_bootstrap_stream(reader).unwrap(); + let mut dest_tf = + TestFramework::builder(&mut rng).with_chain_config(chain_config.clone()).build(); - assert_eq!( - tf2.chainstate.get_block_id_tree_as_list().unwrap(), - tf1.chainstate.get_block_id_tree_as_list().unwrap(), - ); - } + import_from_slice(&mut dest_tf, &data).unwrap(); - // test importing all blocks with no orphans - { - let mut tf3 = TestFramework::builder(&mut rng) - .with_chainstate_config( - ChainstateConfig::new() - .with_max_orphan_blocks(0) - .with_bootstrap_buffer_sizes((largest_block_size, 2 * largest_block_size)), - ) - .build(); + assert_eq!( + dest_tf.chainstate.get_block_id_tree_as_list().unwrap(), + source_tf_block_ids, + ); + }); +} + +#[rstest] +#[trace] +#[case(Seed::from_entropy())] +fn wrong_chain(#[case] seed: Seed) { + utils::concurrency::model(move || { + let mut rng = make_seedable_rng(seed); + + let chain_type1 = ChainType::iter().choose(&mut rng).unwrap(); + let chain_type2 = + ChainType::iter().filter(|ct| ct != &chain_type1).choose(&mut rng).unwrap(); + + let chain_config1 = make_chain_config(chain_type1); + let chain_config2 = make_chain_config(chain_type2); + + let mut tf1 = TestFramework::builder(&mut rng).with_chain_config(chain_config1).build(); + let genesis1_id = tf1.genesis().get_id(); + + tf1.create_chain(&genesis1_id.into(), 5, &mut rng).unwrap(); + let tf1_orig_block_ids = tf1.chainstate.get_block_id_tree_as_list().unwrap(); + let tf1_export = export_to_vec(&tf1, false); + + let mut tf2 = TestFramework::builder(&mut rng).with_chain_config(chain_config2).build(); + let genesis2_id = tf2.genesis().get_id(); + + tf2.create_chain(&genesis2_id.into(), 5, &mut rng).unwrap(); + let tf2_orig_block_ids = tf2.chainstate.get_block_id_tree_as_list().unwrap(); + let tf2_export = export_to_vec(&tf2, false); + + // Import tf2's blocks into tf1 + let err = import_from_slice(&mut tf1, &tf2_export).unwrap_err(); + assert_eq!( + err, + ChainstateError::BootstrapError(BootstrapError::WrongChain) + ); + assert_eq!( + tf1.chainstate.get_block_id_tree_as_list().unwrap(), + tf1_orig_block_ids + ); + + // Import tf1's blocks into tf2 + let err = import_from_slice(&mut tf2, &tf1_export).unwrap_err(); + assert_eq!( + err, + ChainstateError::BootstrapError(BootstrapError::WrongChain) + ); + assert_eq!( + tf2.chainstate.get_block_id_tree_as_list().unwrap(), + tf2_orig_block_ids + ); + }); +} + +// If the imported data starts with some chain's magic bytes, it should be recognized as +// the legacy file format. +#[rstest] +#[trace] +#[case(Seed::from_entropy())] +fn legacy_file_format(#[case] seed: Seed) { + utils::concurrency::model(move || { + let mut rng = make_seedable_rng(seed); - let reader: std::io::BufReader> = - std::io::BufReader::new(Box::new(bootstrap_no_orphans.as_slice())); + let chain_type = ChainType::iter().choose(&mut rng).unwrap(); + let chain_config = make_chain_config(chain_type); - tf3.chainstate.import_bootstrap_stream(reader).unwrap(); + let mut tf = TestFramework::builder(&mut rng).with_chain_config(chain_config).build(); + let genesis_id = tf.genesis().get_id(); + tf.create_chain(&genesis_id.into(), 5, &mut rng).unwrap(); + let orig_block_ids = tf.chainstate.get_block_id_tree_as_list().unwrap(); - //Without the orphans the tf3 chain will only contain the mainchain + for chain_type in ChainType::iter() { + let data = [ + chain_type.magic_bytes().bytes().as_slice(), + gen_random_bytes(&mut rng, 100, 1000).as_slice(), + ] + .concat(); + + let err = import_from_slice(&mut tf, &data).unwrap_err(); assert_eq!( - tf3.chainstate.get_block_id_tree_as_list().unwrap(), - tf1.chainstate.get_mainchain_blocks_list().unwrap(), + err, + ChainstateError::BootstrapError(BootstrapError::LegacyFileFormat) ); assert_eq!( - tf3.chainstate.get_mainchain_blocks_list().unwrap(), - tf1.chainstate.get_mainchain_blocks_list().unwrap(), + tf.chainstate.get_block_id_tree_as_list().unwrap(), + orig_block_ids ); } + }); +} - // test importing all blocks with orphans with default buffer size - { - let mut tf4 = TestFramework::builder(&mut rng) - .with_chainstate_config(ChainstateConfig::new().with_max_orphan_blocks(0)) - .build(); +// Import data that only contains an incomplete header. +#[rstest] +#[trace] +#[case(Seed::from_entropy())] +fn file_too_small(#[case] seed: Seed) { + utils::concurrency::model(move || { + let mut rng = make_seedable_rng(seed); + + let chain_type = ChainType::iter().choose(&mut rng).unwrap(); + let chain_config = make_chain_config(chain_type); + + let mut tf = + TestFramework::builder(&mut rng).with_chain_config(chain_config.clone()).build(); + let genesis_id = tf.genesis().get_id(); + tf.create_chain(&genesis_id.into(), 5, &mut rng).unwrap(); + let orig_block_ids = tf.chainstate.get_block_id_tree_as_list().unwrap(); + + let header_data = make_header_data(&chain_config, 0, rng.gen()); + + let incomplete_header_data = + &header_data[0..rng.gen_range(EXPECTED_MAGIC_BYTES.len()..header_data.len() - 1)]; + + let err = import_from_slice(&mut tf, incomplete_header_data).unwrap_err(); + assert_eq!( + err, + ChainstateError::BootstrapError(BootstrapError::FileTooSmall) + ); - let reader: std::io::BufReader> = - std::io::BufReader::new(Box::new(bootstrap_with_orphans.as_slice())); + assert_eq!( + tf.chainstate.get_block_id_tree_as_list().unwrap(), + orig_block_ids + ); + }); +} + +// The header's version field contains a non-supported version number (i.e. something other than 0). +#[rstest] +#[trace] +#[case(Seed::from_entropy())] +fn unsupported_version(#[case] seed: Seed) { + utils::concurrency::model(move || { + let mut rng = make_seedable_rng(seed); + + let chain_type = ChainType::iter().choose(&mut rng).unwrap(); + let chain_config = make_chain_config(chain_type); - tf4.chainstate.import_bootstrap_stream(reader).unwrap(); + let mut tf = + TestFramework::builder(&mut rng).with_chain_config(chain_config.clone()).build(); + let genesis_id = tf.genesis().get_id(); + tf.create_chain(&genesis_id.into(), 5, &mut rng).unwrap(); + let orig_block_ids = tf.chainstate.get_block_id_tree_as_list().unwrap(); + let valid_blocks_count = rng.gen_range(1..5); + let valid_blocks = gen_blocks(chain_config.clone(), valid_blocks_count, &mut rng); + + // Make a header with an unsupported version, followed by valid v0 data. + let format_version = rng.gen_range(1..10); + let mut data = make_header_data(&chain_config, format_version, valid_blocks_count as u64); + for valid_block in &valid_blocks { + let encoded_block = valid_block.encode(); + append_block_data_for_v0(&mut data, &encoded_block); + } + + // Importing should fail right away and no blocks should be imported. + let err = import_from_slice(&mut tf, &data).unwrap_err(); + assert_eq!( + err, + ChainstateError::BootstrapError(BootstrapError::UnsupportedFutureFormatVersion) + ); + + assert_eq!( + tf.chainstate.get_block_id_tree_as_list().unwrap(), + orig_block_ids + ); + + // Sanity check - if we reset the version to zero, the import will succeed. + data[12..16].fill(0); + import_from_slice(&mut tf, &data).unwrap(); + let expected_block_ids = BTreeSet::from_iter( + orig_block_ids + .into_iter() + .chain(valid_blocks.iter().map(|block| block.get_id())), + ); + let actual_block_ids = + BTreeSet::from_iter(tf.chainstate.get_block_id_tree_as_list().unwrap()); + assert_eq!(actual_block_ids, expected_block_ids); + }); +} + +fn make_chain_config(chain_type: ChainType) -> ChainConfig { + chain::config::Builder::new(chain_type) + .consensus_upgrades(NetUpgrades::unit_tests()) + .data_in_no_signature_witness_allowed(true) + .genesis_unittest(Destination::AnyoneCanSpend) + // Force empty checkpoints because a custom genesis is used. + .checkpoints(BTreeMap::new()) + .build() +} + +// The data starts with wrong format magic bytes. +#[rstest] +#[trace] +#[case(Seed::from_entropy())] +fn wrong_format(#[case] seed: Seed) { + utils::concurrency::model(move || { + let mut rng = make_seedable_rng(seed); + + let chain_type = ChainType::iter().choose(&mut rng).unwrap(); + let chain_config = make_chain_config(chain_type); + + let mut tf = + TestFramework::builder(&mut rng).with_chain_config(chain_config.clone()).build(); + let genesis_id = tf.genesis().get_id(); + tf.create_chain(&genesis_id.into(), 5, &mut rng).unwrap(); + let orig_block_ids = tf.chainstate.get_block_id_tree_as_list().unwrap(); + + let valid_blocks_count = rng.gen_range(1..5); + let valid_blocks = gen_blocks(chain_config.clone(), valid_blocks_count, &mut rng); + + // Make a header with wrong format magic bytes, followed by valid v0 data. + let mut data = make_header_data(&chain_config, 0, valid_blocks_count as u64); + let byte_idx_to_mutate = rng.gen_range(0..EXPECTED_MAGIC_BYTES.len()); + data[byte_idx_to_mutate] = data[byte_idx_to_mutate].wrapping_add(rng.gen_range(1..255)); + for valid_block in &valid_blocks { + let encoded_block = valid_block.encode(); + append_block_data_for_v0(&mut data, &encoded_block); + } + + // Importing should fail right away and no blocks should be imported. + let err = import_from_slice(&mut tf, &data).unwrap_err(); + assert_eq!( + err, + ChainstateError::BootstrapError(BootstrapError::WrongFileFormat) + ); + + assert_eq!( + tf.chainstate.get_block_id_tree_as_list().unwrap(), + orig_block_ids + ); + + // Sanity check - if we overwrite format magic bytes with correct ones, the import will succeed. + data[0..EXPECTED_MAGIC_BYTES.len()].copy_from_slice(EXPECTED_MAGIC_BYTES.as_bytes()); + import_from_slice(&mut tf, &data).unwrap(); + let expected_block_ids = BTreeSet::from_iter( + orig_block_ids + .into_iter() + .chain(valid_blocks.iter().map(|block| block.get_id())), + ); + let actual_block_ids = + BTreeSet::from_iter(tf.chainstate.get_block_id_tree_as_list().unwrap()); + assert_eq!(actual_block_ids, expected_block_ids); + }); +} + +// The file format is correct to some point, but then the data ends abruptly. +#[rstest] +#[trace] +#[case(Seed::from_entropy())] +fn bad_v0_file(#[case] seed: Seed) { + utils::concurrency::model(move || { + let mut rng = make_seedable_rng(seed); + + let chain_type = ChainType::iter().choose(&mut rng).unwrap(); + let chain_config = make_chain_config(chain_type); + + #[derive(Debug, strum::EnumIter)] + enum TestKind { + BadBlockLenLen, + BadBlockDataLen, + } + + let valid_blocks_count = rng.gen_range(1..5); + let valid_blocks = gen_blocks(chain_config.clone(), valid_blocks_count, &mut rng); + + for kind in TestKind::iter() { + log::debug!("Kind is {kind:?}"); + + let mut tf = + TestFramework::builder(&mut rng).with_chain_config(chain_config.clone()).build(); + let genesis_id = tf.genesis().get_id(); + tf.create_chain(&genesis_id.into(), 5, &mut rng).unwrap(); + let orig_block_ids = tf.chainstate.get_block_id_tree_as_list().unwrap(); + + // Make a header, followed by `valid_blocks_count-1` valid blocks. + let mut data = make_header_data(&chain_config, 0, valid_blocks_count as u64); + + for valid_block in valid_blocks.iter().take(valid_blocks.len() - 1) { + let encoded_block = valid_block.encode(); + append_block_data_for_v0(&mut data, &encoded_block); + } + + let last_block_data = { + let mut data = Vec::new(); + let encoded_block = valid_blocks.last().unwrap().encode(); + append_block_data_for_v0(&mut data, &encoded_block); + data + }; + + let last_block_cutoff_pos = match kind { + TestKind::BadBlockLenLen => { + // Either the file ends right after the previously written valid_blocks_count-1 blocks, + // or some portion of the block length field is present. + rng.gen_range(0..3) + } + TestKind::BadBlockDataLen => { + // The block length is correct, but the data after it is incomplete. + rng.gen_range(4..last_block_data.len() - 1) + } + }; + data.extend_from_slice(&last_block_data[0..last_block_cutoff_pos]); + + // Importing should fail when it reaches the bad block; + // the correct ones should have been imported. + let err = import_from_slice(&mut tf, &data).unwrap_err(); assert_eq!( - tf4.chainstate.get_block_id_tree_as_list().unwrap(), - tf1.chainstate.get_block_id_tree_as_list().unwrap(), + err, + ChainstateError::BootstrapError(BootstrapError::BadFileFormat) ); - } - // test importing all blocks with orphans with huge buffers - { - let mut tf5 = TestFramework::builder(&mut rng) - .with_chainstate_config( - ChainstateConfig::new() - .with_max_orphan_blocks(0) - .with_bootstrap_buffer_sizes((usize::MAX / 2, usize::MAX / 2)), - ) - .build(); + let expected_block_ids = BTreeSet::from_iter(orig_block_ids.into_iter().chain( + valid_blocks.iter().take(valid_blocks.len() - 1).map(|block| block.get_id()), + )); + let actual_block_ids = + BTreeSet::from_iter(tf.chainstate.get_block_id_tree_as_list().unwrap()); + assert_eq!(actual_block_ids, expected_block_ids); + } + }); +} - let reader: std::io::BufReader> = - std::io::BufReader::new(Box::new(bootstrap_with_orphans.as_slice())); +#[rstest] +#[trace] +#[case(Seed::from_entropy())] +fn check_reckless_mode( + #[case] seed: Seed, + #[values(None, Some(false), Some(true))] enable_db_reckless_mode_in_ibd: Option, + #[values(false, true)] fail_on_read: bool, +) { + utils::concurrency::model(move || { + let mut rng = make_seedable_rng(seed); - tf5.chainstate.import_bootstrap_stream(reader).unwrap(); + let chain_type = ChainType::iter().choose(&mut rng).unwrap(); + let chain_config = make_chain_config(chain_type); + + let mut source_tf = + TestFramework::builder(&mut rng).with_chain_config(chain_config.clone()).build(); + let genesis_id = source_tf.genesis().get_id(); + let blocks_count = rng.gen_range(5..10); + source_tf.create_chain(&genesis_id.into(), blocks_count, &mut rng).unwrap(); + + let block_ids = source_tf.chainstate.get_mainchain_blocks_list().unwrap(); + let exported_blocks = export_to_vec(&source_tf, true); + + let mut dest_tf = TestFramework::builder(&mut rng) + .with_chain_config(chain_config.clone()) + .with_chainstate_config(ChainstateConfig { + enable_db_reckless_mode_in_ibd, + + max_db_commit_attempts: Default::default(), + max_orphan_blocks: Default::default(), + max_tip_age: Default::default(), + enable_heavy_checks: Default::default(), + allow_checkpoints_mismatch: Default::default(), + }) + .build(); + let use_reckless_mode = enable_db_reckless_mode_in_ibd.unwrap_or(false); + let dest_tf_store = dest_tf.storage.clone(); + + let result = import_from_slice_with_callback(&mut dest_tf, &exported_blocks, || { + assert_eq!(dest_tf_store.in_reckless_mode().unwrap(), use_reckless_mode); + + if fail_on_read { + Err(std::io::ErrorKind::Other.into()) + } else { + Ok(()) + } + }); + assert!(!dest_tf_store.in_reckless_mode().unwrap()); + + if fail_on_read { + assert!(result.is_err()); + } else { + assert!(result.is_ok()); assert_eq!( - tf5.chainstate.get_block_id_tree_as_list().unwrap(), - tf1.chainstate.get_block_id_tree_as_list().unwrap(), + dest_tf.chainstate.get_block_id_tree_as_list().unwrap(), + block_ids ); } }); diff --git a/chainstate/test-suite/src/tests/syncing_tests.rs b/chainstate/test-suite/src/tests/syncing_tests.rs index e2a0e295df..769c4a66c4 100644 --- a/chainstate/test-suite/src/tests/syncing_tests.rs +++ b/chainstate/test-suite/src/tests/syncing_tests.rs @@ -15,13 +15,14 @@ use std::{collections::BTreeMap, iter, num::NonZeroUsize, time::Duration}; +use chainstate_storage::BlockchainStorage; use rstest::rstest; use chainstate::{ BlockError, BlockSource, ChainstateConfig, ChainstateError, CheckBlockError, CheckBlockTransactionsError, }; -use chainstate_test_framework::TestFramework; +use chainstate_test_framework::{TestFramework, TestFrameworkBuilder}; use chainstate_types::{BlockStatus, BlockValidationStage, PropertyQueryError}; use common::{ chain::{ @@ -642,23 +643,29 @@ fn split_off_leading_known_headers(#[case] seed: Seed) { #[rstest] #[trace] #[case(Seed::from_entropy())] -fn initial_block_download(#[case] seed: Seed) { +fn initial_block_download( + #[case] seed: Seed, + #[values(None, Some(false), Some(true))] enable_db_reckless_mode_in_ibd: Option, + #[values(false, true)] recreate_chainstate: bool, +) { utils::concurrency::model(move || { let mut rng = make_seedable_rng(seed); let mut tf = TestFramework::builder(&mut rng) .with_chainstate_config(ChainstateConfig { max_db_commit_attempts: Default::default(), + enable_db_reckless_mode_in_ibd, max_orphan_blocks: Default::default(), - min_max_bootstrap_import_buffer_sizes: Default::default(), max_tip_age: Duration::from_secs(1).into(), enable_heavy_checks: Some(true), allow_checkpoints_mismatch: Default::default(), }) .with_initial_time_since_genesis(2) .build(); + let use_reckless_mode = enable_db_reckless_mode_in_ibd.unwrap_or(false); // We are two seconds after genesis timestamp, so in the IBD state assert!(tf.chainstate.is_initial_block_download()); + assert_eq!(tf.storage.in_reckless_mode().unwrap(), use_reckless_mode); // Create a block with an "old" timestamp. let now = tf.current_time(); @@ -668,14 +675,25 @@ fn initial_block_download(#[case] seed: Seed) { .build_and_process(&mut rng) .unwrap(); assert!(tf.chainstate.is_initial_block_download()); + assert_eq!(tf.storage.in_reckless_mode().unwrap(), use_reckless_mode); // Create a block with fresh timestamp. tf.make_block_builder().build_and_process(&mut rng).unwrap(); assert!(!tf.chainstate.is_initial_block_download()); + assert!(!tf.storage.in_reckless_mode().unwrap()); // Add one more block. tf.make_block_builder().build_and_process(&mut rng).unwrap(); assert!(!tf.chainstate.is_initial_block_download()); + assert!(!tf.storage.in_reckless_mode().unwrap()); + + if recreate_chainstate { + // Create a new framework from the existing one; this will re-create Chainstate re-using + // the storage. The resulting chainstate should not be in ibd. + tf = TestFrameworkBuilder::from_existing_framework(tf).build(); + assert!(!tf.chainstate.is_initial_block_download()); + assert!(!tf.storage.in_reckless_mode().unwrap()); + } // Check that receiving an "old" block does not revert `is_initial_block_download` back tf.progress_time_seconds_since_epoch(5); @@ -687,6 +705,7 @@ fn initial_block_download(#[case] seed: Seed) { tf.progress_time_seconds_since_epoch(10); tf.process_block(block, BlockSource::Local).unwrap(); assert!(!tf.chainstate.is_initial_block_download()); + assert!(!tf.storage.in_reckless_mode().unwrap()); }); } diff --git a/common/src/chain/config/mod.rs b/common/src/chain/config/mod.rs index c688a29c5a..b4811ff9ae 100644 --- a/common/src/chain/config/mod.rs +++ b/common/src/chain/config/mod.rs @@ -154,7 +154,7 @@ impl ChainType { } } - const fn magic_bytes(&self) -> MagicBytes { + pub const fn magic_bytes(&self) -> MagicBytes { match self { ChainType::Mainnet => Self::MAINNET_MAGIC_BYTES, ChainType::Testnet => Self::TESTNET_MAGIC_BYTES, diff --git a/common/src/chain/currency.rs b/common/src/chain/currency.rs index be3de7797e..344157295a 100644 --- a/common/src/chain/currency.rs +++ b/common/src/chain/currency.rs @@ -23,7 +23,7 @@ use crate::{ primitives::Amount, }; -// TODO: currently out RPC types are a bit of a mess and we need to revamp them. +// TODO: currently our RPC types are a bit of a mess and we need to revamp them. // The reason for having RPC types in the first place is that in RPC we'd like for certain things to have a more // human-readable representation, namely: // 1) Destinations, VRF public keys and ids of pools/delegations/tokens/orders should be bech32-encoded instead diff --git a/crypto/Cargo.toml b/crypto/Cargo.toml index 70d64d39d7..c4ea564d98 100644 --- a/crypto/Cargo.toml +++ b/crypto/Cargo.toml @@ -20,6 +20,7 @@ blake2.workspace = true chacha20poly1305.workspace = true generic-array.workspace = true hmac.workspace = true +lazy_static.workspace = true merlin = { workspace = true, default-features = false } num-derive.workspace = true num-traits.workspace = true diff --git a/crypto/src/key/secp256k1/mod.rs b/crypto/src/key/secp256k1/mod.rs index a4f36f0c65..df3ed41f52 100644 --- a/crypto/src/key/secp256k1/mod.rs +++ b/crypto/src/key/secp256k1/mod.rs @@ -179,16 +179,29 @@ impl Secp256k1PublicKey { signature: &secp256k1::schnorr::Signature, msg_hashed: &secp256k1::Message, ) -> bool { - let secp = secp256k1::Secp256k1::new(); - secp.verify_schnorr( - signature, - msg_hashed, - &self.pubkey_data.x_only_public_key().0, - ) - .is_ok() + // Note: `Secp256k1::new()` introduces extra overhead due to context randomization, + // so we re-use the same "verification_only" object to avoid it. + // Also note that the overhead is noticeable when bootstrapping the chainstate using + // the "reckless" mode, e.g. importing the first ~640k blocks on Testnet took 708s + // when re-using the verifier object and 756s when creating a new one every time + // (in the non-"reckless" mode, however, the I/O dominates and the difference is barely + // noticeable, as both cases take more than 90 minutes to complete). + VERIFIER + .verify_schnorr( + signature, + msg_hashed, + &self.pubkey_data.x_only_public_key().0, + ) + .is_ok() } } +lazy_static::lazy_static! { + static ref VERIFIER: secp256k1::Secp256k1 = { + secp256k1::Secp256k1::verification_only() + }; +} + #[cfg(test)] mod test { use super::*; diff --git a/node-lib/src/config_files/chainstate/mod.rs b/node-lib/src/config_files/chainstate/mod.rs index f40b7552de..4c6336329a 100644 --- a/node-lib/src/config_files/chainstate/mod.rs +++ b/node-lib/src/config_files/chainstate/mod.rs @@ -25,13 +25,15 @@ pub struct ChainstateConfigFile { /// The number of maximum attempts to process a block. pub max_db_commit_attempts: Option, + /// Whether to use the "reckless" mode during the initial block download or bootstrapping. + /// + /// In "reckless" mode the db contents is not synced to disk on each commit, which increases + /// performance at the cost of a potential db corruption if the system crashes. + pub enable_db_reckless_mode_in_ibd: Option, + /// The maximum capacity of the orphan blocks pool. pub max_orphan_blocks: Option, - /// When importing bootstrap file, this controls the buffer sizes (min, max) - /// (see bootstrap import function for more information) - pub min_max_bootstrap_import_buffer_sizes: Option<(usize, usize)>, - /// A maximum tip age in seconds. /// /// The initial block download is finished if the difference between the current time and the @@ -49,8 +51,8 @@ impl From for ChainstateConfig { fn from(config_file: ChainstateConfigFile) -> Self { let ChainstateConfigFile { max_db_commit_attempts, + enable_db_reckless_mode_in_ibd, max_orphan_blocks, - min_max_bootstrap_import_buffer_sizes, max_tip_age, enable_heavy_checks, allow_checkpoints_mismatch, @@ -58,8 +60,8 @@ impl From for ChainstateConfig { ChainstateConfig { max_db_commit_attempts: max_db_commit_attempts.into(), + enable_db_reckless_mode_in_ibd, max_orphan_blocks: max_orphan_blocks.into(), - min_max_bootstrap_import_buffer_sizes: min_max_bootstrap_import_buffer_sizes.into(), max_tip_age: max_tip_age.map(Duration::from_secs).into(), enable_heavy_checks, allow_checkpoints_mismatch, diff --git a/node-lib/src/config_files/mod.rs b/node-lib/src/config_files/mod.rs index c5f49d26f3..3ed9d884ce 100644 --- a/node-lib/src/config_files/mod.rs +++ b/node-lib/src/config_files/mod.rs @@ -148,8 +148,8 @@ fn chainstate_config( let ChainstateConfigFile { max_db_commit_attempts, + enable_db_reckless_mode_in_ibd, max_orphan_blocks, - min_max_bootstrap_import_buffer_sizes, max_tip_age, enable_heavy_checks, allow_checkpoints_mismatch, @@ -157,6 +157,8 @@ fn chainstate_config( let storage_backend = options.storage_backend.clone().unwrap_or(storage_backend); let max_db_commit_attempts = options.max_db_commit_attempts.or(max_db_commit_attempts); + let enable_db_reckless_mode_in_ibd = + options.enable_db_reckless_mode_in_ibd.or(enable_db_reckless_mode_in_ibd); let max_orphan_blocks = options.max_orphan_blocks.or(max_orphan_blocks); let max_tip_age = options.max_tip_age.or(max_tip_age); let enable_heavy_checks = options.enable_chainstate_heavy_checks.or(enable_heavy_checks); @@ -165,8 +167,8 @@ fn chainstate_config( let chainstate_config = ChainstateConfigFile { max_db_commit_attempts, + enable_db_reckless_mode_in_ibd, max_orphan_blocks, - min_max_bootstrap_import_buffer_sizes, max_tip_age, enable_heavy_checks, allow_checkpoints_mismatch, diff --git a/node-lib/src/options.rs b/node-lib/src/options.rs index 55e3d5146a..ff6bf750f1 100644 --- a/node-lib/src/options.rs +++ b/node-lib/src/options.rs @@ -246,6 +246,22 @@ pub struct RunOptions { #[clap(long, value_name = "COUNT")] pub max_db_commit_attempts: Option, + /// Whether to switch to the "reckless" mode during the initial block download or bootstrapping. + /// + /// In the "reckless" mode the chainstate db contents is not synced to disk on each commit, which + /// increases performance at the cost of potential db corruption on a system crash. + /// + /// Once the initial block download or bootstrapping is complete, the node will automatically + /// switch to the normal mode of operation. + /// + /// Note: if a system crash does occur during syncing in the reckless mode and the chainstate + /// db gets corrupted, you will need to delete it manually and re-sync again. + /// Additionally, the corruption may not be detectable by the db engine, in which case you'll + /// end up having a malfunctioning node. Therefore, if you are using the reckless mode and + /// the system crashes in the process, always delete the db and re-sync, just in case. + #[clap(long, action = clap::ArgAction::SetTrue)] + pub enable_db_reckless_mode_in_ibd: Option, + /// The maximum capacity of the orphan blocks pool in blocks. #[clap(long, value_name = "COUNT")] pub max_orphan_blocks: Option, @@ -420,6 +436,7 @@ mod tests { node_type: Default::default(), mock_time: Default::default(), max_db_commit_attempts: Default::default(), + enable_db_reckless_mode_in_ibd: Default::default(), max_orphan_blocks: Default::default(), p2p_networking_enabled: Default::default(), p2p_bind_addresses: Default::default(), diff --git a/node-lib/tests/cli.rs b/node-lib/tests/cli.rs index 1d58b3a261..fe19bce8aa 100644 --- a/node-lib/tests/cli.rs +++ b/node-lib/tests/cli.rs @@ -98,6 +98,7 @@ fn read_config_override_values() { let blockprod_skip_ibd_check = true; let blockprod_use_current_time_if_non_pos = true; let max_db_commit_attempts = 1; + let enable_db_reckless_mode_in_ibd = true; let max_orphan_blocks = 2; let p2p_networking_enabled = false; let p2p_bind_addr = "127.0.0.1:44444".parse::().unwrap(); @@ -134,6 +135,7 @@ fn read_config_override_values() { node_type: Some(node_type), mock_time: None, max_db_commit_attempts: Some(max_db_commit_attempts), + enable_db_reckless_mode_in_ibd: Some(enable_db_reckless_mode_in_ibd), max_orphan_blocks: Some(max_orphan_blocks), p2p_networking_enabled: Some(p2p_networking_enabled), p2p_bind_addresses: Some(vec![p2p_bind_addr]), @@ -193,6 +195,15 @@ fn read_config_override_values() { config.chainstate.as_ref().unwrap().chainstate_config.max_db_commit_attempts, Some(max_db_commit_attempts) ); + assert_eq!( + config + .chainstate + .as_ref() + .unwrap() + .chainstate_config + .enable_db_reckless_mode_in_ibd, + Some(enable_db_reckless_mode_in_ibd) + ); assert_eq!( config.chainstate.as_ref().unwrap().chainstate_config.max_orphan_blocks, Some(max_orphan_blocks) diff --git a/p2p/src/protocol.rs b/p2p/src/protocol.rs index dab43882d5..34a167f055 100644 --- a/p2p/src/protocol.rs +++ b/p2p/src/protocol.rs @@ -113,6 +113,6 @@ pub struct ProtocolConfig { pub msg_max_locator_count: MaxLocatorSize, /// The maximum size of a p2p message in bytes. pub max_message_size: MaxMessageSize, - /// The maximum number of announcements (hashes) for which we haven't receive transactions. + /// The maximum number of announcements (hashes) for which we haven't received transactions. pub max_peer_tx_announcements: MaxPeerTxAnnouncements, } diff --git a/storage/core/src/adaptor/mod.rs b/storage/core/src/adaptor/mod.rs index e4c20eadd1..8a604c45d0 100644 --- a/storage/core/src/adaptor/mod.rs +++ b/storage/core/src/adaptor/mod.rs @@ -17,7 +17,7 @@ mod locking; -pub use locking::Locking; +pub use locking::{Locking, TransactionLockImpl}; use crate::backend; diff --git a/storage/core/src/lib.rs b/storage/core/src/lib.rs index a45aa33ad0..56c9211619 100644 --- a/storage/core/src/lib.rs +++ b/storage/core/src/lib.rs @@ -91,7 +91,9 @@ pub mod types; pub mod util; // Re-export some commonly used items -pub use backend::{Backend, BackendWithSendableTransactions, SharedBackend}; +pub use backend::{ + Backend, BackendImpl, BackendWithSendableTransactions, SharedBackend, SharedBackendImpl, +}; pub use error::Error; pub use types::{DbDesc, DbMapCount, DbMapDesc, DbMapId, DbMapsData}; diff --git a/storage/failing/src/backend.rs b/storage/failing/src/backend.rs index f8578bdbcf..b0e1a9425a 100644 --- a/storage/failing/src/backend.rs +++ b/storage/failing/src/backend.rs @@ -107,6 +107,14 @@ impl FailingImpl { transaction_failures: 0, } } + + pub fn inner(&self) -> &T { + &self.inner + } + + pub fn inner_mut(&mut self) -> &mut T { + &mut self.inner + } } impl Clone for FailingImpl { diff --git a/storage/failing/src/lib.rs b/storage/failing/src/lib.rs index 04b58c72b8..5b972ab7ea 100644 --- a/storage/failing/src/lib.rs +++ b/storage/failing/src/lib.rs @@ -18,6 +18,6 @@ mod backend; mod config; -pub use backend::Failing; +pub use backend::{Failing, FailingImpl}; pub use config::{builder::Builder, ErrorGeneration, ErrorSet, FailureConfig}; pub use storage_core::error::Recoverable as StorageError; diff --git a/storage/inmemory/src/lib.rs b/storage/inmemory/src/lib.rs index 4e83214733..15414305df 100644 --- a/storage/inmemory/src/lib.rs +++ b/storage/inmemory/src/lib.rs @@ -13,7 +13,12 @@ // See the License for the specific language governing permissions and // limitations under the License. -use storage_core::{adaptor, backend, util::MapPrefixIter, Data, DbDesc, DbMapId, DbMapsData}; +use storage_core::{ + adaptor::{self, TransactionLockImpl}, + backend, + util::MapPrefixIter, + Data, DbDesc, DbMapId, DbMapsData, +}; use std::{borrow::Cow, collections::BTreeMap}; @@ -66,8 +71,13 @@ impl adaptor::Construct for StorageMaps { #[derive(Clone)] pub struct InMemory(adaptor::Locking); +// Note: it'd be better if this was defined simply as ` as backend::Backend>::Impl`, +// but then it'd be impossible to impl a trait for it due to an ancient Rust bug (e.g. see +// https://github.com/rust-lang/rust/issues/150143) +pub type InMemoryImpl = TransactionLockImpl; + impl backend::Backend for InMemory { - type Impl = as backend::Backend>::Impl; + type Impl = InMemoryImpl; fn open(self, desc: DbDesc) -> storage_core::Result { self.0.open(desc) diff --git a/storage/lmdb/Cargo.toml b/storage/lmdb/Cargo.toml index 2a7762d990..b6534a48a0 100644 --- a/storage/lmdb/Cargo.toml +++ b/storage/lmdb/Cargo.toml @@ -10,7 +10,8 @@ logging = { path = '../../logging' } storage-core = { path = '../core' } utils = { path = '../../utils' } -lmdb-mintlayer = { git = 'https://github.com/mintlayer/lmdb-rs-mintlayer.git', tag = 'v0.16.2' } +# Commit "Attempt to fix CI" from the branch "specify_nosync_per_tx" +lmdb-mintlayer = { git = 'https://github.com/mintlayer/lmdb-rs-mintlayer.git', rev = '4427a41fc32e8b7a30bdae42ce48414a6a5d6920' } [dev-dependencies] rstest.workspace = true diff --git a/storage/lmdb/src/lib.rs b/storage/lmdb/src/lib.rs index a9a564e606..56b08a5497 100644 --- a/storage/lmdb/src/lib.rs +++ b/storage/lmdb/src/lib.rs @@ -156,6 +156,11 @@ pub struct LmdbImpl { /// Schedule a database resize of the database map map_resize_scheduled: Arc, + + /// If true, LMBD transactions will be created with `no_sync` parameter set to true, + /// which means that filesystem sync will not be performed on commit. This increases performance, + /// but the db may become corrupted if a system crash occurs before the data is flushed to disk. + no_sync_on_commit: Arc, } impl LmdbImpl { @@ -209,6 +214,18 @@ impl LmdbImpl { } err } + + pub fn set_no_sync_on_commit(&self, set: bool) { + self.no_sync_on_commit.store(set, Ordering::Release); + } + + pub fn get_no_sync_on_commit(&self) -> bool { + self.no_sync_on_commit.load(Ordering::Acquire) + } + + pub fn force_sync(&self) -> storage_core::Result<()> { + self.env.sync(true).or_else(error::process_with_err) + } } impl utils::shallow_clone::ShallowClone for LmdbImpl { @@ -217,6 +234,7 @@ impl utils::shallow_clone::ShallowClone for LmdbImpl { env: self.env.shallow_clone(), dbs: self.dbs.shallow_clone(), map_resize_scheduled: self.map_resize_scheduled.shallow_clone(), + no_sync_on_commit: self.no_sync_on_commit.shallow_clone(), } } } @@ -237,8 +255,11 @@ impl backend::BackendImpl for LmdbImpl { impl backend::SharedBackendImpl for LmdbImpl { fn transaction_rw(&self, size: Option) -> storage_core::Result> { + let no_sync_on_commit = self.no_sync_on_commit.load(Ordering::Acquire); self.resize_if_resize_scheduled(); - self.start_transaction(|env| lmdb::Environment::begin_rw_txn(env, size)) + self.start_transaction(|env| { + lmdb::Environment::begin_rw_txn_generic(env, size, no_sync_on_commit, false) + }) } } @@ -343,6 +364,7 @@ impl backend::Backend for Lmdb { env: Arc::new(environment), dbs, map_resize_scheduled: Arc::new(AtomicBool::new(false)), + no_sync_on_commit: Arc::new(AtomicBool::new(false)), }) } } diff --git a/storage/src/database/mod.rs b/storage/src/database/mod.rs index 8c79322b41..d8318c7fe4 100644 --- a/storage/src/database/mod.rs +++ b/storage/src/database/mod.rs @@ -67,6 +67,14 @@ impl Storage { let _schema = std::marker::PhantomData; Ok(Self { backend, _schema }) } + + pub fn backend_impl(&self) -> &B::Impl { + &self.backend + } + + pub fn backend_impl_mut(&mut self) -> &mut B::Impl { + &mut self.backend + } } impl Storage { diff --git a/storage/src/lib.rs b/storage/src/lib.rs index 84b05604f9..2376616b33 100644 --- a/storage/src/lib.rs +++ b/storage/src/lib.rs @@ -100,7 +100,8 @@ pub mod schema; // Re-export user-facing items from core pub use order_preserving_value::OrderPreservingValue; pub use storage_core::{ - error, Backend, BackendWithSendableTransactions, Error, Result, SharedBackend, + error, Backend, BackendImpl, BackendWithSendableTransactions, DbDesc, Error, Result, + SharedBackend, SharedBackendImpl, }; // Re-export the interface types From 8516e428215eda452281018529cb39d7913500bd Mon Sep 17 00:00:00 2001 From: Mykhailo Kremniov Date: Thu, 12 Feb 2026 12:19:32 +0200 Subject: [PATCH 02/11] Pass shutdown flag to chainstate --- Cargo.lock | 1 + blockprod/src/lib.rs | 1 + chainstate/db-dumper/src/dumper_lib/utils.rs | 1 + chainstate/launcher/Cargo.toml | 2 + chainstate/launcher/src/lib.rs | 139 ++++++++++++------ chainstate/src/detail/bootstrap.rs | 14 +- chainstate/src/detail/median_time.rs | 2 + chainstate/src/detail/mod.rs | 17 ++- chainstate/src/detail/test.rs | 2 + .../chainstate_interface_impl_delegation.rs | 1 + chainstate/src/lib.rs | 5 + chainstate/src/rpc/mod.rs | 1 + .../test-framework/src/framework_builder.rs | 3 + .../test-suite/src/tests/processing_tests.rs | 1 + mempool/src/pool/tx_pool/tests/utils.rs | 1 + node-lib/src/runner.rs | 7 +- p2p/src/sync/tests/helpers/mod.rs | 1 + p2p/src/tests/helpers/test_node.rs | 1 + p2p/test-utils/src/lib.rs | 1 + p2p/tests/shutdown.rs | 1 + wallet/wallet-node-client/tests/call_tests.rs | 1 + wallet/wallet-test-node/src/lib.rs | 1 + 22 files changed, 152 insertions(+), 52 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 5c0eb6f025..b09eeb5ac3 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1402,6 +1402,7 @@ dependencies = [ "storage-inmemory", "storage-lmdb", "subsystem", + "tokio", "utils", ] diff --git a/blockprod/src/lib.rs b/blockprod/src/lib.rs index 6070e9525b..c5b47d8128 100644 --- a/blockprod/src/lib.rs +++ b/blockprod/src/lib.rs @@ -286,6 +286,7 @@ mod tests { DefaultTransactionVerificationStrategy::new(), None, time_getter.clone(), + None, ) .expect("Error initializing chainstate"); diff --git a/chainstate/db-dumper/src/dumper_lib/utils.rs b/chainstate/db-dumper/src/dumper_lib/utils.rs index 637ab6fd41..5d2f0e7b67 100644 --- a/chainstate/db-dumper/src/dumper_lib/utils.rs +++ b/chainstate/db-dumper/src/dumper_lib/utils.rs @@ -80,6 +80,7 @@ pub fn create_chainstate( DefaultTransactionVerificationStrategy::new(), None, Default::default(), + None, )?; Ok(chainstate) diff --git a/chainstate/launcher/Cargo.toml b/chainstate/launcher/Cargo.toml index 06108ce0c4..b59554915a 100644 --- a/chainstate/launcher/Cargo.toml +++ b/chainstate/launcher/Cargo.toml @@ -15,3 +15,5 @@ storage-inmemory = { path = "../../storage/inmemory" } storage-lmdb = { path = "../../storage/lmdb" } subsystem = { path = "../../subsystem" } utils = { path = "../../utils" } + +tokio.workspace = true diff --git a/chainstate/launcher/src/lib.rs b/chainstate/launcher/src/lib.rs index bd75d859af..f7f291a17e 100644 --- a/chainstate/launcher/src/lib.rs +++ b/chainstate/launcher/src/lib.rs @@ -20,9 +20,12 @@ mod storage_compatibility; use std::sync::Arc; +use tokio::sync::watch; + use chainstate::InitializationError; -use chainstate_storage::{BlockchainStorageBackend, Transactional}; +use chainstate_storage::{BlockchainStorage, BlockchainStorageBackend, Transactional}; use storage_lmdb::resize_callback::MapResizeCallback; +use utils::set_flag::SetFlag; // Some useful reexports pub use chainstate::{ @@ -32,68 +35,116 @@ pub use chainstate::{ pub use common::chain::ChainConfig; pub use config::{ChainstateLauncherConfig, StorageBackendConfig}; +pub use storage_compatibility::check_storage_compatibility; + /// Subdirectory under `datadir` where LMDB chainstate database is placed pub const SUBDIRECTORY_LMDB: &str = "chainstate-lmdb"; -pub use storage_compatibility::check_storage_compatibility; +pub type ChainstateMaker = Box< + dyn FnOnce( + /*shutdown_initiated_rx*/ Option>, + ) -> Result + + Send, +>; -fn make_chainstate_and_storage_impl( - storage_backend: B, +/// Return a closure that will make the chainstate given the `shutdown_initiated_rx` parameter. +/// +/// Note: the storage is created right away, so the corresponding errors (including compatibility +/// check failures) will cause `create_chainstate_maker` itself to fail and not the returned maker. +pub fn create_chainstate_maker( + datadir: &std::path::Path, chain_config: Arc, - chainstate_config: ChainstateConfig, -) -> Result { - let storage = chainstate_storage::Store::new(storage_backend, &chain_config) - .map_err(|e| Error::FailedToInitializeChainstate(e.into()))?; + config: ChainstateLauncherConfig, +) -> Result { + let ChainstateLauncherConfig { + storage_backend, + chainstate_config, + } = config; - let db_tx = storage - .transaction_ro() - .map_err(|e| Error::FailedToInitializeChainstate(e.into()))?; + let maker: ChainstateMaker = match storage_backend { + StorageBackendConfig::Lmdb => { + let storage = create_lmdb_storage(datadir, &chain_config)?; - check_storage_compatibility(&db_tx, chain_config.as_ref()) - .map_err(InitializationError::StorageCompatibilityCheckError)?; - drop(db_tx); + Box::new(|shutdown_initiated_rx| { + make_chainstate_impl( + storage, + chain_config, + chainstate_config, + shutdown_initiated_rx, + ) + }) + } + StorageBackendConfig::InMemory => { + let storage = create_inmemory_storage(&chain_config)?; + + Box::new(|shutdown_initiated_rx| { + make_chainstate_impl( + storage, + chain_config, + chainstate_config, + shutdown_initiated_rx, + ) + }) + } + }; + + Ok(maker) +} - let chainstate = chainstate::make_chainstate( +fn make_chainstate_impl( + storage: impl BlockchainStorage + Sync + 'static, + chain_config: Arc, + chainstate_config: ChainstateConfig, + shutdown_initiated_rx: Option>, +) -> Result { + chainstate::make_chainstate( chain_config, chainstate_config, storage, DefaultTransactionVerificationStrategy::new(), None, Default::default(), - )?; - Ok(chainstate) + shutdown_initiated_rx, + ) } -/// Create chainstate together with its storage -pub fn make_chainstate( +fn create_lmdb_storage( datadir: &std::path::Path, - chain_config: Arc, - config: ChainstateLauncherConfig, -) -> Result { - let ChainstateLauncherConfig { - storage_backend, - chainstate_config, - } = config; - + chain_config: &ChainConfig, +) -> Result { let lmdb_resize_callback = MapResizeCallback::new(Box::new(|resize_info| { logging::log::info!("Lmdb resize happened: {:?}", resize_info) })); - // There is some code duplication because `make_chainstate_and_storage_impl` is called with - // a different set of generic parameters in each case. - match storage_backend { - StorageBackendConfig::Lmdb => { - let storage = storage_lmdb::Lmdb::new( - datadir.join(SUBDIRECTORY_LMDB), - Default::default(), - Default::default(), - lmdb_resize_callback, - ); - make_chainstate_and_storage_impl(storage, chain_config, chainstate_config) - } - StorageBackendConfig::InMemory => { - let storage = storage_inmemory::InMemory::new(); - make_chainstate_and_storage_impl(storage, chain_config, chainstate_config) - } - } + let backend = storage_lmdb::Lmdb::new( + datadir.join(SUBDIRECTORY_LMDB), + Default::default(), + Default::default(), + lmdb_resize_callback, + ); + + create_storage(backend, chain_config) +} + +fn create_inmemory_storage(chain_config: &ChainConfig) -> Result { + create_storage(storage_inmemory::InMemory::new(), chain_config) +} + +fn create_storage( + storage_backend: impl BlockchainStorageBackend + 'static, + chain_config: &ChainConfig, +) -> Result { + let storage = chainstate_storage::Store::new(storage_backend, chain_config) + .map_err(|e| Error::FailedToInitializeChainstate(e.into()))?; + + let db_tx = storage + .transaction_ro() + .map_err(|e| Error::FailedToInitializeChainstate(e.into()))?; + + check_storage_compatibility(&db_tx, chain_config) + .map_err(InitializationError::StorageCompatibilityCheckError)?; + + drop(db_tx); + + Ok(storage) } diff --git a/chainstate/src/detail/bootstrap.rs b/chainstate/src/detail/bootstrap.rs index d9221a6ca6..0058ac1fd7 100644 --- a/chainstate/src/detail/bootstrap.rs +++ b/chainstate/src/detail/bootstrap.rs @@ -18,7 +18,7 @@ use std::io::{BufRead, Write}; use strum::IntoEnumIterator; use chainstate_storage::BlockchainStorageRead; -use chainstate_types::{BlockIndex, PropertyQueryError}; +use chainstate_types::PropertyQueryError; use common::{ chain::{ config::{ChainType, MagicBytes}, @@ -110,13 +110,17 @@ impl From for BootstrapError { } } +/// Import blocks from the provided bootstrap stream. +/// +/// `process_block_func` must return true if importing should continue and false if it should +/// stop. pub fn import_bootstrap_stream( chain_config: &ChainConfig, file_reader: &mut std::io::BufReader, process_block_func: &mut P, ) -> Result<(), BootstrapError> where - P: FnMut(WithId) -> Result, BootstrapError>, + P: FnMut(WithId) -> Result, { let mut buffer_queue = Vec::::with_capacity(1024 * 1024); @@ -168,8 +172,12 @@ where ); let block = Block::decode_all(&mut buffer_queue.as_slice())?; - process_block_func(block.into())?; + let should_continue = process_block_func(block.into())?; buffer_queue.clear(); + + if !should_continue { + break; + } } Ok(()) diff --git a/chainstate/src/detail/median_time.rs b/chainstate/src/detail/median_time.rs index 7b28898ceb..69e183bd92 100644 --- a/chainstate/src/detail/median_time.rs +++ b/chainstate/src/detail/median_time.rs @@ -108,6 +108,7 @@ mod test { DefaultTransactionVerificationStrategy::new(), None, Default::default(), + None, ) .unwrap(); @@ -184,6 +185,7 @@ mod test { DefaultTransactionVerificationStrategy::new(), None, time_getter, + None, ) .unwrap(); diff --git a/chainstate/src/detail/mod.rs b/chainstate/src/detail/mod.rs index cce2f1a7ac..acc0ac87f7 100644 --- a/chainstate/src/detail/mod.rs +++ b/chainstate/src/detail/mod.rs @@ -31,6 +31,7 @@ use std::{collections::VecDeque, sync::Arc}; use itertools::Itertools; use thiserror::Error; +use tokio::sync::watch; use chainstate_storage::{ BlockchainStorage, BlockchainStorageRead, BlockchainStorageWrite, TransactionRw, Transactional, @@ -119,6 +120,7 @@ pub struct Chainstate { rpc_events: broadcaster::Broadcaster, time_getter: TimeGetter, is_initial_block_download_finished: SetFlag, + shutdown_initiated_rx: Option>, } #[derive(Copy, Clone, Eq, Debug, PartialEq)] @@ -185,6 +187,7 @@ impl Chainstate tx_verification_strategy: V, custom_orphan_error_hook: Option>, time_getter: TimeGetter, + shutdown_initiated_rx: Option>, ) -> Result { let best_block_id = { let db_tx = chainstate_storage.transaction_ro()?; @@ -198,6 +201,7 @@ impl Chainstate tx_verification_strategy, custom_orphan_error_hook, time_getter, + shutdown_initiated_rx, ); if best_block_id.is_none() { @@ -229,6 +233,7 @@ impl Chainstate tx_verification_strategy: V, custom_orphan_error_hook: Option>, time_getter: TimeGetter, + shutdown_initiated_rx: Option>, ) -> Self { let orphan_blocks = OrphansProxy::new(*chainstate_config.max_orphan_blocks); let subsystem_events = EventsController::new(); @@ -244,6 +249,7 @@ impl Chainstate rpc_events, time_getter, is_initial_block_download_finished: SetFlag::new(), + shutdown_initiated_rx, } } @@ -902,13 +908,18 @@ impl Chainstate let chain_config = Arc::clone(&self.chain_config); let mut block_processor = |block: WithId| -> Result<_, BootstrapError> { + // If chainstate is being shutdown, stop immediately. + if self.shutdown_initiated_rx.as_ref().is_some_and(|rx| rx.borrow().test()) { + return Ok(false); + } + let block_exists = self.make_db_tx_ro()?.block_exists(&block.get_id())?; if !block_exists { - Ok(self.process_block(block, BlockSource::Local)?) - } else { - Ok(None) + self.process_block(block, BlockSource::Local)?; } + + Ok(true) }; let result = import_bootstrap_stream(&chain_config, &mut reader, &mut block_processor); diff --git a/chainstate/src/detail/test.rs b/chainstate/src/detail/test.rs index 641d20ec84..b59b2f685e 100644 --- a/chainstate/src/detail/test.rs +++ b/chainstate/src/detail/test.rs @@ -57,6 +57,7 @@ fn process_genesis_block() { DefaultTransactionVerificationStrategy::new(), None, time_getter, + None, ); chainstate.process_genesis().unwrap(); @@ -103,6 +104,7 @@ fn empty_chainstate_no_genesis() { DefaultTransactionVerificationStrategy::new(), None, time_getter, + None, ); // This panics let _ = chainstate.query().unwrap().get_best_block_id(); diff --git a/chainstate/src/interface/chainstate_interface_impl_delegation.rs b/chainstate/src/interface/chainstate_interface_impl_delegation.rs index 11d2966b3c..395ee869c5 100644 --- a/chainstate/src/interface/chainstate_interface_impl_delegation.rs +++ b/chainstate/src/interface/chainstate_interface_impl_delegation.rs @@ -509,6 +509,7 @@ mod tests { DefaultTransactionVerificationStrategy::new(), None, TimeGetter::default(), + None, ) .unwrap(); diff --git a/chainstate/src/lib.rs b/chainstate/src/lib.rs index 9c7b5b1462..bf556e3bec 100644 --- a/chainstate/src/lib.rs +++ b/chainstate/src/lib.rs @@ -21,6 +21,8 @@ pub mod rpc; use std::sync::Arc; +use tokio::sync::watch; + use chainstate_interface::ChainstateInterface; use chainstate_interface_impl::ChainstateInterfaceImpl; use common::{ @@ -30,6 +32,7 @@ use common::{ }; use detail::Chainstate; use interface::chainstate_interface_impl; +use utils::set_flag::SetFlag; pub use crate::{ config::{ChainstateConfig, MaxTipAge}, @@ -107,6 +110,7 @@ pub fn make_chainstate( tx_verification_strategy: V, custom_orphan_error_hook: Option>, time_getter: TimeGetter, + shutdown_initiated_rx: Option>, ) -> Result where S: chainstate_storage::BlockchainStorage + Sync + 'static, @@ -119,6 +123,7 @@ where tx_verification_strategy, custom_orphan_error_hook, time_getter, + shutdown_initiated_rx, )?; let chainstate_interface = ChainstateInterfaceImpl::new(chainstate); Ok(Box::new(chainstate_interface)) diff --git a/chainstate/src/rpc/mod.rs b/chainstate/src/rpc/mod.rs index 449fa4ca83..37873c9b9a 100644 --- a/chainstate/src/rpc/mod.rs +++ b/chainstate/src/rpc/mod.rs @@ -589,6 +589,7 @@ mod test { DefaultTransactionVerificationStrategy::new(), None, Default::default(), + None, ) .unwrap(), ); diff --git a/chainstate/test-framework/src/framework_builder.rs b/chainstate/test-framework/src/framework_builder.rs index 18b3fce883..c4042a245a 100644 --- a/chainstate/test-framework/src/framework_builder.rs +++ b/chainstate/test-framework/src/framework_builder.rs @@ -208,6 +208,7 @@ impl TestFrameworkBuilder { DefaultTransactionVerificationStrategy::new(), self.custom_orphan_error_hook, time_getter.clone(), + None, ), TxVerificationStrategy::Disposable => chainstate::make_chainstate( Arc::new(self.chain_config), @@ -216,6 +217,7 @@ impl TestFrameworkBuilder { DisposableTransactionVerificationStrategy::new(), self.custom_orphan_error_hook, time_getter.clone(), + None, ), TxVerificationStrategy::Randomized(seed) => chainstate::make_chainstate( Arc::new(self.chain_config), @@ -224,6 +226,7 @@ impl TestFrameworkBuilder { RandomizedTransactionVerificationStrategy::new(seed), self.custom_orphan_error_hook, time_getter.clone(), + None, ), }?; diff --git a/chainstate/test-suite/src/tests/processing_tests.rs b/chainstate/test-suite/src/tests/processing_tests.rs index 67eecc756c..c409f41b42 100644 --- a/chainstate/test-suite/src/tests/processing_tests.rs +++ b/chainstate/test-suite/src/tests/processing_tests.rs @@ -1126,6 +1126,7 @@ fn mainnet_initialization() { DefaultTransactionVerificationStrategy::new(), None, Default::default(), + None, ) .unwrap(); } diff --git a/mempool/src/pool/tx_pool/tests/utils.rs b/mempool/src/pool/tx_pool/tests/utils.rs index abbe6d85fc..e6da783530 100644 --- a/mempool/src/pool/tx_pool/tests/utils.rs +++ b/mempool/src/pool/tx_pool/tests/utils.rs @@ -393,6 +393,7 @@ pub fn start_chainstate_with_config( DefaultTransactionVerificationStrategy::new(), None, Default::default(), + None, ) .unwrap(); start_chainstate(chainstate) diff --git a/node-lib/src/runner.rs b/node-lib/src/runner.rs index 8896d597e7..dd1d4de48c 100644 --- a/node-lib/src/runner.rs +++ b/node-lib/src/runner.rs @@ -85,12 +85,15 @@ async fn initialize( let mut manager = subsystem::Manager::new_with_config(manager_config); // Chainstate subsystem - let chainstate = chainstate_launcher::make_chainstate( + let chainstate_maker = chainstate_launcher::create_chainstate_maker( data_dir, Arc::clone(&chain_config), node_config.chainstate.unwrap_or_default().into(), )?; - let chainstate = manager.add_subsystem("chainstate", chainstate); + let chainstate = manager + .add_custom_subsystem("chainstate", async move |_, shutdown_initiated_rx| { + chainstate_maker(Some(shutdown_initiated_rx)) + }); // Mempool subsystem let mempool_init = MempoolInit::new( diff --git a/p2p/src/sync/tests/helpers/mod.rs b/p2p/src/sync/tests/helpers/mod.rs index 142ed310ba..d05584f128 100644 --- a/p2p/src/sync/tests/helpers/mod.rs +++ b/p2p/src/sync/tests/helpers/mod.rs @@ -600,6 +600,7 @@ impl TestNodeBuilder { DefaultTransactionVerificationStrategy::new(), None, time_getter.clone(), + None, ) .unwrap() }); diff --git a/p2p/src/tests/helpers/test_node.rs b/p2p/src/tests/helpers/test_node.rs index 179f03f2ca..7eabce09fe 100644 --- a/p2p/src/tests/helpers/test_node.rs +++ b/p2p/src/tests/helpers/test_node.rs @@ -137,6 +137,7 @@ where DefaultTransactionVerificationStrategy::new(), None, time_getter.get_time_getter(), + None, ) .unwrap(); diff --git a/p2p/test-utils/src/lib.rs b/p2p/test-utils/src/lib.rs index 7b998cba1d..bc1b882265 100644 --- a/p2p/test-utils/src/lib.rs +++ b/p2p/test-utils/src/lib.rs @@ -58,6 +58,7 @@ pub fn start_subsystems( DefaultTransactionVerificationStrategy::new(), None, time_getter.clone(), + None, ) .unwrap(); let mempool_config = MempoolConfig::new(); diff --git a/p2p/tests/shutdown.rs b/p2p/tests/shutdown.rs index 905ef005e1..2133663efc 100644 --- a/p2p/tests/shutdown.rs +++ b/p2p/tests/shutdown.rs @@ -49,6 +49,7 @@ async fn shutdown_timeout() { DefaultTransactionVerificationStrategy::new(), None, Default::default(), + None, ) .unwrap(); let chainstate = manager.add_subsystem("shutdown-test-chainstate", chainstate); diff --git a/wallet/wallet-node-client/tests/call_tests.rs b/wallet/wallet-node-client/tests/call_tests.rs index f4ce936d58..fce4570bba 100644 --- a/wallet/wallet-node-client/tests/call_tests.rs +++ b/wallet/wallet-node-client/tests/call_tests.rs @@ -81,6 +81,7 @@ pub async fn start_subsystems( DefaultTransactionVerificationStrategy::new(), None, Default::default(), + None, ) .unwrap(); diff --git a/wallet/wallet-test-node/src/lib.rs b/wallet/wallet-test-node/src/lib.rs index c39df02a97..b396e4aa12 100644 --- a/wallet/wallet-test-node/src/lib.rs +++ b/wallet/wallet-test-node/src/lib.rs @@ -203,6 +203,7 @@ pub async fn start_node(chain_config: Arc) -> (subsystem::Manager, DefaultTransactionVerificationStrategy::new(), None, Default::default(), + None, ) .unwrap(); From 21e9dd2f81fa014445ed9ce6f61122bc8aeec6d8 Mon Sep 17 00:00:00 2001 From: Mykhailo Kremniov Date: Fri, 13 Feb 2026 14:34:25 +0200 Subject: [PATCH 03/11] Implement DbRecklessModeCounter in chainstate --- chainstate/src/detail/mod.rs | 109 ++++++++++++++++++++++++----------- 1 file changed, 76 insertions(+), 33 deletions(-) diff --git a/chainstate/src/detail/mod.rs b/chainstate/src/detail/mod.rs index acc0ac87f7..8ab6e26c6a 100644 --- a/chainstate/src/detail/mod.rs +++ b/chainstate/src/detail/mod.rs @@ -59,7 +59,7 @@ use pos_accounting::{ use tx_verifier::transaction_verifier; use utils::{ const_value::ConstValue, - debug_assert_or_log, ensure, + debug_panic_or_log, ensure, eventhandler::{EventHandler, EventsController}, log_error, set_flag::SetFlag, @@ -121,6 +121,7 @@ pub struct Chainstate { time_getter: TimeGetter, is_initial_block_download_finished: SetFlag, shutdown_initiated_rx: Option>, + reckless_mode_counter: DbRecklessModeCounter, } #[derive(Copy, Clone, Eq, Debug, PartialEq)] @@ -215,13 +216,7 @@ impl Chainstate .map_err(|e| ChainstateError::FailedToInitializeChainstate(e.into()))?; let best_block_index = chainstate.make_db_tx_ro()?.get_best_block_index()?; - chainstate.update_initial_block_download_flag(best_block_index.as_ref())?; - - if !chainstate.is_initial_block_download_finished.test() - && chainstate.chainstate_config.db_reckless_mode_in_ibd_enabled() - { - chainstate.chainstate_storage.set_reckless_mode(true)?; - } + chainstate.update_initial_block_download_flag(best_block_index.as_ref(), true)?; Ok(chainstate) } @@ -250,6 +245,7 @@ impl Chainstate time_getter, is_initial_block_download_finished: SetFlag::new(), shutdown_initiated_rx, + reckless_mode_counter: DbRecklessModeCounter::new(), } } @@ -635,11 +631,9 @@ impl Chainstate best_block_index_after_orphans_opt.or(best_block_index_after_process_block_opt); if let Some(best_block_index) = &best_block_index_opt { - self.update_initial_block_download_flag(GenBlockIndexRef::Block(best_block_index))?; - self.broadcast_new_tip_event( - GenBlockIndexRef::Block(best_block_index), - self.is_initial_block_download(), - ); + let best_block_index_ref = GenBlockIndexRef::Block(best_block_index); + self.update_initial_block_download_flag(best_block_index_ref, false)?; + self.broadcast_new_tip_event(best_block_index_ref, self.is_initial_block_download()); let compact_target = match best_block_index.block_header().consensus_data() { ConsensusData::None => Compact::from(Uint256::ZERO), @@ -839,22 +833,26 @@ impl Chainstate fn update_initial_block_download_flag( &mut self, best_block_index: GenBlockIndexRef<'_>, + initial_update: bool, ) -> Result<(), chainstate_storage::Error> { if self.is_initial_block_download_finished.test() { return Ok(()); } let tip_timestamp = best_block_index.block_timestamp(); + let is_fresh_block = self.is_fresh_block(&tip_timestamp); - if self.is_fresh_block(&tip_timestamp) { + if !is_fresh_block { + if initial_update { + self.reckless_mode_counter + .inc(&self.chainstate_config, &self.chainstate_storage)?; + } + } else { self.is_initial_block_download_finished.set(); - if self.chainstate_storage.in_reckless_mode()? { - debug_assert_or_log!( - self.chainstate_config.db_reckless_mode_in_ibd_enabled(), - "The db was in a reckless mode even though it wasn't enabled" - ); - self.chainstate_storage.set_reckless_mode(false)?; + if !initial_update { + self.reckless_mode_counter + .dec(&self.chainstate_config, &self.chainstate_storage)?; } } @@ -899,12 +897,8 @@ impl Chainstate &mut self, mut reader: std::io::BufReader>, ) -> Result<(), BootstrapError> { - let enable_reckless_mode = !self.chainstate_storage.in_reckless_mode()? - && self.chainstate_config.db_reckless_mode_in_ibd_enabled(); - - if enable_reckless_mode { - self.chainstate_storage.set_reckless_mode(true)?; - } + self.reckless_mode_counter + .inc(&self.chainstate_config, &self.chainstate_storage)?; let chain_config = Arc::clone(&self.chain_config); let mut block_processor = |block: WithId| -> Result<_, BootstrapError> { @@ -924,13 +918,8 @@ impl Chainstate let result = import_bootstrap_stream(&chain_config, &mut reader, &mut block_processor); - // FIXME write to log when reckless mode is turned on and off? - - // FIXME check if the db can become corrupted silently - - if enable_reckless_mode { - self.chainstate_storage.set_reckless_mode(false)?; - } + self.reckless_mode_counter + .dec(&self.chainstate_config, &self.chainstate_storage)?; result } @@ -1000,5 +989,59 @@ where .map_err(|err| BlockError::IsBlockInMainChainQueryError(*block_id, err)) } +/// A counter that enters the reckless db mode (if enabled) when going from 0 to 1 and exits it +/// when going from 1 to 0, i.e. to exit the reckless mode one has to make as many decrements as +/// there were increments. +/// +/// The main purpose is to make sure the reckless mode stays enabled during bootstrapping for +/// the entire duration (i.e. for fresh blocks too). +struct DbRecklessModeCounter { + counter: u32, +} + +impl DbRecklessModeCounter { + fn new() -> Self { + Self { counter: 0 } + } + + fn inc( + &mut self, + config: &ChainstateConfig, + storage: &S, + ) -> Result<(), chainstate_storage::Error> { + if config.db_reckless_mode_in_ibd_enabled() { + if self.counter == 0 { + storage.set_reckless_mode(true)?; + } + + self.counter = self.counter.checked_add(1).unwrap_or_else(|| { + debug_panic_or_log!("Reckless mode counter overflow"); + self.counter + }); + } + + Ok(()) + } + + fn dec( + &mut self, + config: &ChainstateConfig, + storage: &S, + ) -> Result<(), chainstate_storage::Error> { + if config.db_reckless_mode_in_ibd_enabled() { + self.counter = self.counter.checked_sub(1).unwrap_or_else(|| { + debug_panic_or_log!("Reckless mode counter underflow"); + self.counter + }); + + if self.counter == 0 { + storage.set_reckless_mode(false)?; + } + } + + Ok(()) + } +} + #[cfg(test)] mod test; From d1b26f8d3515234a3c79bace42ba0108dddea13e Mon Sep 17 00:00:00 2001 From: Mykhailo Kremniov Date: Wed, 11 Feb 2026 12:21:50 +0200 Subject: [PATCH 04/11] Add import_bootstrap_file option to the node --- CHANGELOG.md | 3 ++ chainstate/src/detail/ban_score.rs | 1 + chainstate/src/lib.rs | 35 ++++++++++++++++++- chainstate/src/rpc/mod.rs | 26 ++++---------- mempool/src/error/ban_score.rs | 1 + node-daemon/src/main.rs | 14 ++++++++ node-gui/backend/src/lib.rs | 8 ++++- node-gui/src/main.rs | 42 +++++++++++++++++++--- node-lib/src/lib.rs | 2 +- node-lib/src/options.rs | 7 ++++ node-lib/src/runner.rs | 56 +++++++++++++++++++++++++++--- node-lib/tests/cli.rs | 2 ++ test/src/bin/test_node.rs | 13 ++++--- 13 files changed, 176 insertions(+), 34 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 997c4b43b4..77f75749c8 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -32,6 +32,9 @@ The format is loosely based on [Keep a Changelog](https://keepachangelog.com/en/ - Node: - new options added to `node-daemon` and `node-gui`: + - `--import-bootstrap-file` - import a bootstrap file on start (previously bootstrapping + was only available via node RPC). + - `--enable-db-reckless-mode-in-ibd` - this enables the "reckless" mode of the chainstate database during initial block download or bootstrapping, which significantly increases its speed at the cost of a potential db corruption if the system crashes in the meantime. diff --git a/chainstate/src/detail/ban_score.rs b/chainstate/src/detail/ban_score.rs index 687d0719fa..2506b664e5 100644 --- a/chainstate/src/detail/ban_score.rs +++ b/chainstate/src/detail/ban_score.rs @@ -574,6 +574,7 @@ impl BanScore for ChainstateError { ChainstateError::FailedToReadProperty(_) => 0, ChainstateError::BootstrapError(_) => 0, ChainstateError::BlockInvalidatorError(_) => 0, + ChainstateError::IoError(_) => 0, } } } diff --git a/chainstate/src/lib.rs b/chainstate/src/lib.rs index bf556e3bec..e1d9a23fd0 100644 --- a/chainstate/src/lib.rs +++ b/chainstate/src/lib.rs @@ -19,7 +19,7 @@ mod interface; pub mod rpc; -use std::sync::Arc; +use std::{path::Path, sync::Arc}; use tokio::sync::watch; @@ -87,16 +87,24 @@ impl std::fmt::Display for ChainstateEventTracingWrapper<'_> { pub enum ChainstateError { #[error("Block storage error: `{0}`")] StorageError(#[from] chainstate_storage::Error), + #[error("Initialization error: {0}")] FailedToInitializeChainstate(#[from] InitializationError), + #[error("Block processing failed: `{0}`")] ProcessBlockError(#[from] BlockError), + #[error("Property read error: `{0}`")] FailedToReadProperty(#[from] PropertyQueryError), + #[error("Block import error {0}")] BootstrapError(#[from] BootstrapError), + #[error("Error invoking block invalidator: {0}")] BlockInvalidatorError(#[from] BlockInvalidatorError), + + #[error("I/O error: {0}")] + IoError(String), } pub type ChainstateSubsystem = Box; @@ -128,3 +136,28 @@ where let chainstate_interface = ChainstateInterfaceImpl::new(chainstate); Ok(Box::new(chainstate_interface)) } + +pub fn export_bootstrap_file( + chainsate: &CS, + file_path: &Path, + include_stale_blocks: bool, +) -> Result<(), ChainstateError> { + let file_obj = std::fs::File::create(file_path) + .map_err(|err| ChainstateError::IoError(err.to_string()))?; + let writer: std::io::BufWriter> = + std::io::BufWriter::new(Box::new(file_obj)); + + chainsate.export_bootstrap_stream(writer, include_stale_blocks) +} + +pub fn import_bootstrap_file( + chainsate: &mut CS, + file_path: &Path, +) -> Result<(), ChainstateError> { + let file_obj = + std::fs::File::open(file_path).map_err(|err| ChainstateError::IoError(err.to_string()))?; + let reader: std::io::BufReader> = + std::io::BufReader::new(Box::new(file_obj)); + + chainsate.import_bootstrap_stream(reader) +} diff --git a/chainstate/src/rpc/mod.rs b/chainstate/src/rpc/mod.rs index 37873c9b9a..0e3f87fe1b 100644 --- a/chainstate/src/rpc/mod.rs +++ b/chainstate/src/rpc/mod.rs @@ -17,13 +17,7 @@ mod types; -use std::{ - collections::BTreeMap, - convert::Infallible, - io::{Read, Write}, - num::NonZeroUsize, - sync::Arc, -}; +use std::{collections::BTreeMap, convert::Infallible, num::NonZeroUsize, sync::Arc}; use chainstate_types::BlockIndex; use common::{ @@ -41,8 +35,8 @@ use rpc::{subscription, RpcResult}; use serialization::hex_encoded::HexEncoded; use crate::{ - chainstate_interface::ChainstateInterface, Block, BlockSource, ChainInfo, ChainstateError, - GenBlock, + chainstate_interface::ChainstateInterface, export_bootstrap_file, import_bootstrap_file, Block, + BlockSource, ChainInfo, ChainstateError, GenBlock, }; use self::types::{block::RpcBlock, event::RpcEvent}; @@ -526,23 +520,17 @@ impl ChainstateRpcServer for super::ChainstateHandle { include_stale_blocks: bool, ) -> RpcResult<()> { // FIXME: test this function in functional tests - let file_obj: std::fs::File = rpc::handle_result(std::fs::File::create(file_path))?; - let writer: std::io::BufWriter> = - std::io::BufWriter::new(Box::new(file_obj)); - + let file_path = file_path.to_owned(); rpc::handle_result( - self.call(move |this| this.export_bootstrap_stream(writer, include_stale_blocks)) + self.call(move |this| export_bootstrap_file(this, &file_path, include_stale_blocks)) .await, ) } async fn import_bootstrap_file(&self, file_path: &std::path::Path) -> RpcResult<()> { // FIXME: test this function in functional tests - let file_obj: std::fs::File = rpc::handle_result(std::fs::File::open(file_path))?; - let reader: std::io::BufReader> = - std::io::BufReader::new(Box::new(file_obj)); - - rpc::handle_result(self.call_mut(move |this| this.import_bootstrap_stream(reader)).await) + let file_path = file_path.to_owned(); + rpc::handle_result(self.call_mut(move |this| import_bootstrap_file(this, &file_path)).await) } async fn info(&self) -> RpcResult { diff --git a/mempool/src/error/ban_score.rs b/mempool/src/error/ban_score.rs index 0414fa1d5f..6fab80f674 100644 --- a/mempool/src/error/ban_score.rs +++ b/mempool/src/error/ban_score.rs @@ -118,6 +118,7 @@ impl MempoolBanScore for ChainstateError { ChainstateError::FailedToReadProperty(_) => 0, ChainstateError::BootstrapError(_) => 0, ChainstateError::BlockInvalidatorError(_) => 0, + ChainstateError::IoError(_) => 0, } } } diff --git a/node-daemon/src/main.rs b/node-daemon/src/main.rs index 5866323c3f..ef8a17f743 100644 --- a/node-daemon/src/main.rs +++ b/node-daemon/src/main.rs @@ -26,6 +26,20 @@ pub async fn run() -> anyhow::Result<()> { node_lib::CLEAN_DATA_OPTION_LONG_NAME ); } + node_lib::NodeSetupResult::BootstrapFileImported(bootstrap_result) => { + match bootstrap_result { + Ok(()) => { + logging::log::info!( + "Node was bootstrapped successfully. Please restart the node without the `--{}` flag", + node_lib::IMPORT_BOOTSTRAP_FILE_OPTION_LONG_NAME + ); + } + Err(err) => { + logging::log::error!("Node bootstrapping failed: {err}"); + std::process::exit(1) + } + } + } }; Ok(()) diff --git a/node-gui/backend/src/lib.rs b/node-gui/backend/src/lib.rs index 214b892def..85ce33ff6c 100644 --- a/node-gui/backend/src/lib.rs +++ b/node-gui/backend/src/lib.rs @@ -26,7 +26,7 @@ use std::{fmt::Debug, sync::Arc}; use tokio::sync::mpsc::{unbounded_channel, UnboundedReceiver, UnboundedSender}; -use chainstate::ChainInfo; +use chainstate::{BootstrapError, ChainInfo}; use common::{ address::{Address, AddressError}, chain::{ChainConfig, Destination}, @@ -103,6 +103,7 @@ pub struct InitializedNode { pub enum NodeInitializationOutcome { BackendControls(BackendControls), DataDirCleanedUp, + BootstrapFileImported(Result<(), BootstrapError>), } pub async fn node_initialize( @@ -146,6 +147,11 @@ pub async fn node_initialize( node_lib::NodeSetupResult::DataDirCleanedUp => { return Ok(NodeInitializationOutcome::DataDirCleanedUp); } + node_lib::NodeSetupResult::BootstrapFileImported(bootstrap_result) => { + return Ok(NodeInitializationOutcome::BootstrapFileImported( + bootstrap_result, + )); + } }; let controller = node.controller().clone(); diff --git a/node-gui/src/main.rs b/node-gui/src/main.rs index fc3650439a..598b1dfd70 100644 --- a/node-gui/src/main.rs +++ b/node-gui/src/main.rs @@ -20,6 +20,7 @@ mod widgets; use std::{convert::identity, env}; +use chainstate::BootstrapError; use heck::ToUpperCamelCase as _; use iced::{ advanced::graphics::core::window, @@ -37,7 +38,7 @@ use node_gui_backend::{ node_initialize, BackendControls, BackendSender, InitNetwork, NodeInitializationOutcome, WalletMode, }; -use node_lib::CLEAN_DATA_OPTION_LONG_NAME; +use node_lib::{CLEAN_DATA_OPTION_LONG_NAME, IMPORT_BOOTSTRAP_FILE_OPTION_LONG_NAME}; const COLD_WALLET_TOOLTIP_TEXT: &str = "Start the wallet in Cold mode without connecting to the network or any nodes. The Cold mode is made to run the wallet on an air-gapped machine without internet connection for storage of keys of high-value. For example, pool decommission keys."; @@ -103,6 +104,7 @@ impl From for GuiState { enum InitializationInterruptionReason { Failure(InitializationFailure), DataDirCleanedUp, + BootstrapFileImported(Result<(), BootstrapError>), } struct InitializationFailure { @@ -168,6 +170,9 @@ fn title(state: &GuiState) -> String { InitializationInterruptionReason::DataDirCleanedUp => { "Mintlayer data directory cleaned up".into() } + InitializationInterruptionReason::BootstrapFileImported(_) => { + "Mintlayer node bootstrapping complete".into() + } }, } } @@ -196,9 +201,11 @@ fn update(state: &mut GuiState, message: Message) -> Task { command: command.clone(), }; - if resolved_options.clean_data_option_set() { - // If "clean data" option was set, selecting the wallet mode makes no sense; - // since the option is ignored in the cold mode, we use the hot one. + let run_options = resolved_options.command.run_options(); + if run_options.clean_data || run_options.import_bootstrap_file.is_some() { + // If either the "clean data" or "import bootstrap file" option was set, + // selecting the wallet mode makes no sense. + // Since these options don't work in the cold mode, we use the hot one. update_on_mode_selected(state, WalletMode::Hot, resolved_options) } else { *state = GuiState::SelectWalletMode { resolved_options }; @@ -320,6 +327,12 @@ fn update(state: &mut GuiState, message: Message) -> Task { ); Task::none() } + NodeInitializationOutcome::BootstrapFileImported(bootstrap_result) => { + *state = GuiState::InitializationInterrupted( + InitializationInterruptionReason::BootstrapFileImported(bootstrap_result), + ); + Task::none() + } }, Message::Loaded(Err(e)) => { *state = InitializationFailure { @@ -552,6 +565,27 @@ fn view(state: &GuiState) -> Element<'_, Message> { .size(text_font_size) ] } + InitializationInterruptionReason::BootstrapFileImported(bootstrap_result) => { + match bootstrap_result { + Ok(()) => { + column![ + iced::widget::text("Bootstrap file has been imported") + .size(header_font_size), + iced::widget::text(format!( + "Please restart the node without the `--{}` flag", + IMPORT_BOOTSTRAP_FILE_OPTION_LONG_NAME + )) + .size(text_font_size) + ] + } + Err(err) => { + column![ + iced::widget::text("Bootstrapping failed").size(header_font_size), + iced::widget::text(err.to_string()).size(text_font_size) + ] + } + } + } }; let error_box = error_box .extend([iced::widget::button(text("Close")).on_press(()).into()]) diff --git a/node-lib/src/lib.rs b/node-lib/src/lib.rs index eb74795d80..0f359dd855 100644 --- a/node-lib/src/lib.rs +++ b/node-lib/src/lib.rs @@ -32,7 +32,7 @@ pub use config_files::{ }; pub use options::{ Command, Options, OptionsWithResolvedCommand, RunOptions, TopLevelOptions, - CLEAN_DATA_OPTION_LONG_NAME, + CLEAN_DATA_OPTION_LONG_NAME, IMPORT_BOOTSTRAP_FILE_OPTION_LONG_NAME, }; pub use runner::{setup, NodeSetupResult}; diff --git a/node-lib/src/options.rs b/node-lib/src/options.rs index ff6bf750f1..51a2a4309f 100644 --- a/node-lib/src/options.rs +++ b/node-lib/src/options.rs @@ -192,6 +192,7 @@ pub struct RegtestOptions { } pub const CLEAN_DATA_OPTION_LONG_NAME: &str = "clean-data"; +pub const IMPORT_BOOTSTRAP_FILE_OPTION_LONG_NAME: &str = "import-bootstrap-file"; #[derive(Args, Clone, Debug, Default)] pub struct RunOptions { @@ -394,6 +395,11 @@ pub struct RunOptions { /// Path to a CSV file with custom checkpoints that must be used instead of the predefined ones. #[clap(long, hide = true)] pub custom_checkpoints_csv_file: Option, + + /// Start the node with networking disabled, import blocks from the specified bootstrap file + /// and exit. + #[clap(long = IMPORT_BOOTSTRAP_FILE_OPTION_LONG_NAME, value_name = "FILE", conflicts_with("clean_data"))] + pub import_bootstrap_file: Option, } pub fn default_data_dir(chain_type: ChainType) -> PathBuf { @@ -466,6 +472,7 @@ mod tests { enable_chainstate_heavy_checks: Default::default(), allow_checkpoints_mismatch: Default::default(), custom_checkpoints_csv_file, + import_bootstrap_file: Default::default(), }; let make_cmd = |run_options| match chain_type { ChainType::Mainnet => Command::Mainnet(run_options), diff --git a/node-lib/src/runner.rs b/node-lib/src/runner.rs index dd1d4de48c..858e7cbc95 100644 --- a/node-lib/src/runner.rs +++ b/node-lib/src/runner.rs @@ -25,7 +25,10 @@ use anyhow::{anyhow, Context, Result}; use file_rotate::{compression::Compression, suffix::AppendCount, ContentLimit, FileRotate}; use blockprod::rpc::BlockProductionRpcServer; -use chainstate::{rpc::ChainstateRpcServer, ChainstateError, InitializationError}; +use chainstate::{ + import_bootstrap_file, rpc::ChainstateRpcServer, BootstrapError, ChainstateError, + InitializationError, +}; use chainstate_launcher::{ChainConfig, StorageBackendConfig}; use common::chain::config::{assert_no_ignore_consensus_in_chain_config, ChainType}; use logging::log; @@ -35,6 +38,7 @@ use rpc::rpc_creds::RpcCreds; use test_rpc_functions::{ empty::make_empty_rpc_test_functions, make_rpc_test_functions, rpc::RpcTestFunctionsRpcServer, }; +use utils::{shallow_clone::ShallowClone, tokio_spawn}; use crate::{ config_files::{NodeConfigFile, DEFAULT_P2P_NETWORKING_ENABLED, DEFAULT_RPC_ENABLED}, @@ -50,6 +54,7 @@ const DEFAULT_LOG_FILE_NAME: &str = "mintlayer.log"; pub enum NodeSetupResult { Node(Node), DataDirCleanedUp, + BootstrapFileImported(Result<(), BootstrapError>), } pub struct Node { @@ -235,7 +240,6 @@ async fn initialize( /// Processes options and potentially runs the node. pub async fn setup(options: OptionsWithResolvedCommand) -> Result { - let run_options = options.command.run_options(); let chain_config = options.command.create_chain_config()?; // Prepare data dir @@ -296,6 +300,15 @@ pub async fn setup(options: OptionsWithResolvedCommand) -> Result Result { + return Ok(NodeSetupResult::BootstrapFileImported(Ok(()))); + } + Err(err) => match err { + ChainstateError::BootstrapError(err) => { + return Ok(NodeSetupResult::BootstrapFileImported(Err(err))); + } + err @ (ChainstateError::StorageError(_) + | ChainstateError::FailedToInitializeChainstate(_) + | ChainstateError::ProcessBlockError(_) + | ChainstateError::FailedToReadProperty(_) + | ChainstateError::BlockInvalidatorError(_) + | ChainstateError::IoError(_)) => { + return Err(err.into()); + } + }, + } + } + + Ok(NodeSetupResult::Node(node)) } /// Creates an exclusive lock file in the specified directory. diff --git a/node-lib/tests/cli.rs b/node-lib/tests/cli.rs index fe19bce8aa..50aa0fb1f6 100644 --- a/node-lib/tests/cli.rs +++ b/node-lib/tests/cli.rs @@ -173,6 +173,8 @@ fn read_config_override_values() { // Note: there is no correspondence to this option inside NodeConfigFile; // the contents of the csv file will become part of ChainConfig. custom_checkpoints_csv_file: Some("foo.csv".to_owned().into()), + // Note: there is no correspondence to this option inside NodeConfigFile. + import_bootstrap_file: Some("foo.bin".to_owned().into()), }; let config = NodeConfigFile::read(&chain_config, &config_path, &options).unwrap(); diff --git a/test/src/bin/test_node.rs b/test/src/bin/test_node.rs index d583dae28e..090aec3744 100644 --- a/test/src/bin/test_node.rs +++ b/test/src/bin/test_node.rs @@ -19,12 +19,17 @@ use std::env; async fn main() -> Result<(), node_lib::Error> { let opts = node_lib::Options::from_args(env::args_os()); let setup_result = node_lib::setup(opts.with_resolved_command()).await?; - let node = match setup_result { - node_lib::NodeSetupResult::Node(node) => node, + + match setup_result { + node_lib::NodeSetupResult::Node(node) => { + node.main().await; + } node_lib::NodeSetupResult::DataDirCleanedUp => { panic!("Data dir cleanup option was passed to the test node"); } - }; - node.main().await; + node_lib::NodeSetupResult::BootstrapFileImported(_) => { + logging::log::debug!("Node bootstrap option was passed to the test node"); + } + } Ok(()) } From 2a7cde6230ecdf2d6571e1b7ec20454a9a55834c Mon Sep 17 00:00:00 2001 From: Mykhailo Kremniov Date: Mon, 16 Feb 2026 16:59:19 +0200 Subject: [PATCH 05/11] Functional tests for node bootstrapping --- chainstate/src/rpc/mod.rs | 2 - test/functional/node_bootstrapping.py | 179 ++++++++++++++++++++++++++ test/functional/test_runner.py | 3 +- 3 files changed, 181 insertions(+), 3 deletions(-) create mode 100755 test/functional/node_bootstrapping.py diff --git a/chainstate/src/rpc/mod.rs b/chainstate/src/rpc/mod.rs index 0e3f87fe1b..92ec3d8443 100644 --- a/chainstate/src/rpc/mod.rs +++ b/chainstate/src/rpc/mod.rs @@ -519,7 +519,6 @@ impl ChainstateRpcServer for super::ChainstateHandle { file_path: &std::path::Path, include_stale_blocks: bool, ) -> RpcResult<()> { - // FIXME: test this function in functional tests let file_path = file_path.to_owned(); rpc::handle_result( self.call(move |this| export_bootstrap_file(this, &file_path, include_stale_blocks)) @@ -528,7 +527,6 @@ impl ChainstateRpcServer for super::ChainstateHandle { } async fn import_bootstrap_file(&self, file_path: &std::path::Path) -> RpcResult<()> { - // FIXME: test this function in functional tests let file_path = file_path.to_owned(); rpc::handle_result(self.call_mut(move |this| import_bootstrap_file(this, &file_path)).await) } diff --git a/test/functional/node_bootstrapping.py b/test/functional/node_bootstrapping.py new file mode 100755 index 0000000000..7676208ab5 --- /dev/null +++ b/test/functional/node_bootstrapping.py @@ -0,0 +1,179 @@ +#!/usr/bin/env python3 +# Copyright (c) 2023 RBB S.r.l +# opensource@mintlayer.org +# SPDX-License-Identifier: MIT +# Licensed under the MIT License; +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://github.com/mintlayer/mintlayer-core/blob/master/LICENSE +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Test node bnootstrapping, both via RPC and the command line option. +""" + +import os +import shutil +import tempfile + +from scalecodec.base import RuntimeConfiguration +from test_framework.authproxy import JSONRPCException +from test_framework.mintlayer import make_tx, reward_input +from test_framework.test_framework import BitcoinTestFramework +from test_framework.test_node import TestNode +from test_framework.util import assert_equal, assert_in + + +BLOCK_INPUT_DATA = RuntimeConfiguration().create_scale_object('GenerateBlockInputData').encode( + {"PoW": {"reward_destination": "AnyoneCanSpend"}} +).to_hex()[2:] + + +class NodeBootstrappingTest(BitcoinTestFramework): + def set_test_params(self): + self.setup_clean_chain = True + self.num_nodes = 1 + self.extra_args = [ + ["--blockprod-min-peers-to-produce-blocks=0"] + ] + + # Create the specified number of blocks; the first block will contain a transaction that + # transfers the specified amount to AnyoneCanSpend. + def create_blocks(self, blocks_count: int, initial_transfer_amount_atoms: int) -> list[str]: + node = self.nodes[0] + tip_id = node.chainstate_best_block_id() + tx_outputs = [ + {'Transfer': [ + {'Coin': initial_transfer_amount_atoms}, + "AnyoneCanSpend" + ]} + ] + encoded_tx, _ = make_tx([reward_input(tip_id)], tx_outputs, 0) + + block_ids = [] + for i in range(blocks_count): + txs = [encoded_tx] if i == 0 else [] + + block = node.blockprod_generate_block( + BLOCK_INPUT_DATA, txs, [], "LeaveEmptySpace") + node.chainstate_submit_block(block) + + tip_height = node.chainstate_best_block_height() + assert_equal(tip_height, i + 1) + + block_id = node.chainstate_best_block_id() + block_ids.append(block_id) + + return block_ids + + def run_test(self): + node = self.nodes[0] + stale_blocks_count = 5 + blocks_count = 10 + + # Create the shorter chain (which will be the stale one) and invalidate it immediately, + # so that the next chain starts from generis as well. + stale_block_ids = self.create_blocks(stale_blocks_count, 111) + node.chainstate_invalidate_block(stale_block_ids[0]) + # Sanity check + tip_height = node.chainstate_best_block_height() + assert_equal(tip_height, 0) + + # Create the longer chain. + block_ids = self.create_blocks(blocks_count, 222) + + # Reset the failure flags on the shorter chain, just in case. + node.chainstate_reset_block_failure_flags(stale_block_ids[0]) + + # Export all blocks to a file. + bootstrap_file_full = os.path.join( + self.options.tmpdir, 'bootstrap_full.bin') + node.chainstate_export_bootstrap_file( + file_path=bootstrap_file_full, include_stale_blocks=True) + + # Export mainchain blocks to a file. + bootstrap_file_mainchain = os.path.join( + self.options.tmpdir, 'bootstrap_mainchain.bin') + node.chainstate_export_bootstrap_file( + file_path=bootstrap_file_mainchain, include_stale_blocks=False) + + def assert_blocks_exist(node: TestNode, block_ids: list[str]): + for block_id in block_ids: + block = node.chainstate_get_block(block_id) + assert block is not None + + def assert_blocks_missing(node: TestNode, block_ids: list[str]): + for block_id in block_ids: + block = node.chainstate_get_block(block_id) + assert block is None + + # Reset the node's data directory. The previous directory is backed up, just in case + # it's needed for debugging later. + def reset_datadir(backup_suffix: str): + data_dir = self.get_node_datadir(0) + shutil.move(data_dir, data_dir + backup_suffix) + self.init_node_datadir(0) + + ############################################################################################ + # Test importing via RPC. + + self.stop_node(0) + reset_datadir('.bak0') + self.start_node(0) + + assert_blocks_missing(node, block_ids) + assert_blocks_missing(node, stale_block_ids) + node.chainstate_import_bootstrap_file(file_path=bootstrap_file_full) + assert_blocks_exist(node, block_ids) + assert_blocks_exist(node, stale_block_ids) + + self.stop_node(0) + reset_datadir('.bak1') + self.start_node(0) + + assert_blocks_missing(node, block_ids) + assert_blocks_missing(node, stale_block_ids) + node.chainstate_import_bootstrap_file( + file_path=bootstrap_file_mainchain) + assert_blocks_exist(node, block_ids) + assert_blocks_missing(node, stale_block_ids) + + ############################################################################################ + # Test importing via the command line argument. + # Note that in this case the node will exit immediately after importing, so we can't + # call `self.start_node`, because it also waits for the RPC to come up. + # Instead, we call `start` on the node object directly and wait for the process to finish. + + self.stop_node(0) + reset_datadir('.bak2') + + # Import bootstrap_file_full + node.start(["--import-bootstrap-file", bootstrap_file_full]) + node_ret_code = node.process.wait(timeout=10) + assert node_ret_code == 0 + + # Start the node normally and check the blocks. + self.start_node(0) + assert_blocks_exist(node, block_ids) + assert_blocks_exist(node, stale_block_ids) + + self.stop_node(0) + reset_datadir('.bak3') + + # Import bootstrap_file_mainchain + node.start(["--import-bootstrap-file", bootstrap_file_mainchain]) + node_ret_code = node.process.wait(timeout=10) + assert node_ret_code == 0 + + # Start the node normally and check the blocks. + self.start_node(0) + assert_blocks_exist(node, block_ids) + assert_blocks_missing(node, stale_block_ids) + + +if __name__ == '__main__': + NodeBootstrappingTest().main() diff --git a/test/functional/test_runner.py b/test/functional/test_runner.py index b0a1065c3b..e33d11b27a 100755 --- a/test/functional/test_runner.py +++ b/test/functional/test_runner.py @@ -131,6 +131,7 @@ class UnicodeOnWindowsError(ValueError): 'blockprod_generate_pow_blocks.py', 'blockprod_ibd.py', 'blockprod_ibd_genesis.py', + 'node_bootstrapping.py', 'example_test.py', 'p2p_ping.py', 'p2p_syncing_test.py', @@ -599,7 +600,7 @@ def was_successful(self): def check_script_prefixes(): """Check that test scripts start with one of the allowed name prefixes.""" - good_prefixes_re = re.compile("^(blockprod|example|feature|framework|interface|mempool|p2p|rpc|wallet|tool)_") + good_prefixes_re = re.compile("^(blockprod|example|feature|framework|interface|mempool|node|p2p|rpc|wallet|tool)_") bad_script_names = [script for script in ALL_SCRIPTS if good_prefixes_re.match(script) is None] if bad_script_names: From 75dbf862f39c016c4b4120af9bd582288583d693 Mon Sep 17 00:00:00 2001 From: Mykhailo Kremniov Date: Fri, 13 Feb 2026 20:49:10 +0200 Subject: [PATCH 06/11] Bootstrapping: fail early if block size is too big; don't "succeed" if the app was shutdown during bootstrapping. --- chainstate/src/detail/bootstrap.rs | 16 ++++++++ chainstate/src/detail/mod.rs | 2 +- chainstate/test-suite/src/tests/bootstrap.rs | 39 ++++++++++++++++++++ 3 files changed, 56 insertions(+), 1 deletion(-) diff --git a/chainstate/src/detail/bootstrap.rs b/chainstate/src/detail/bootstrap.rs index 0058ac1fd7..b6f8336cd2 100644 --- a/chainstate/src/detail/bootstrap.rs +++ b/chainstate/src/detail/bootstrap.rs @@ -61,6 +61,12 @@ const FILE_HEADER_SIZE: usize = 24; type BlockSizeType = u32; +// Bootstrapping will fail if a block size value is bigger that this. +// Note: the main purpose of this is to avoid reading a potentially unlimited amount of data from +// a corrupted file. Since we don't have an exact limit on the encoded size of a block, we just use +// a value large enough so that any real block would fit. +const MAX_BLOCK_SIZE: usize = 10 * 1024 * 1024; + #[derive(thiserror::Error, Debug, Clone, Eq, PartialEq)] pub enum BootstrapError { #[error("Block storage error: `{0}`")] @@ -102,6 +108,12 @@ pub enum BootstrapError { "This seems to be some future version of bootstrap file that is not supported by this node" )] UnsupportedFutureFormatVersion, + + #[error("Block size too big: {0}")] + BlockSizeTooBig(usize), + + #[error("Bootstrapping was interrupted")] + Interrupted, } impl From for BootstrapError { @@ -163,6 +175,10 @@ where .expect("Buffer is known to have the correct size"), ) .try_into()?; + ensure!( + block_size <= MAX_BLOCK_SIZE, + BootstrapError::BlockSizeTooBig(block_size) + ); buffer_queue.clear(); fill_buffer(&mut buffer_queue, file_reader, block_size)?; diff --git a/chainstate/src/detail/mod.rs b/chainstate/src/detail/mod.rs index 8ab6e26c6a..79ed332e7b 100644 --- a/chainstate/src/detail/mod.rs +++ b/chainstate/src/detail/mod.rs @@ -904,7 +904,7 @@ impl Chainstate let mut block_processor = |block: WithId| -> Result<_, BootstrapError> { // If chainstate is being shutdown, stop immediately. if self.shutdown_initiated_rx.as_ref().is_some_and(|rx| rx.borrow().test()) { - return Ok(false); + return Err(BootstrapError::Interrupted); } let block_exists = self.make_db_tx_ro()?.block_exists(&block.get_id())?; diff --git a/chainstate/test-suite/src/tests/bootstrap.rs b/chainstate/test-suite/src/tests/bootstrap.rs index 310f0b169d..eeb6ea0a93 100644 --- a/chainstate/test-suite/src/tests/bootstrap.rs +++ b/chainstate/test-suite/src/tests/bootstrap.rs @@ -668,6 +668,45 @@ fn bad_v0_file(#[case] seed: Seed) { }); } +// The recorded block size is too big. +#[rstest] +#[trace] +#[case(Seed::from_entropy())] +fn block_size_too_big(#[case] seed: Seed) { + utils::concurrency::model(move || { + let mut rng = make_seedable_rng(seed); + + let chain_type = ChainType::iter().choose(&mut rng).unwrap(); + let chain_config = make_chain_config(chain_type); + + let valid_blocks_count = rng.gen_range(0..5); + let valid_blocks = gen_blocks(chain_config.clone(), valid_blocks_count, &mut rng); + + let mut tf = + TestFramework::builder(&mut rng).with_chain_config(chain_config.clone()).build(); + + let mut data = make_header_data(&chain_config, 0, (valid_blocks_count + 1) as u64); + + for valid_block in valid_blocks { + let encoded_block = valid_block.encode(); + append_block_data_for_v0(&mut data, &encoded_block); + } + + let bad_block_size: u32 = 100 * 1024 * 1024; + data.extend_from_slice(&bad_block_size.to_le_bytes()); + + // Importing should fail with this specific error, meaning that we didn't attempt to + // actually read the data. + let err = import_from_slice(&mut tf, &data).unwrap_err(); + assert_eq!( + err, + ChainstateError::BootstrapError(BootstrapError::BlockSizeTooBig( + bad_block_size as usize + )) + ); + }); +} + #[rstest] #[trace] #[case(Seed::from_entropy())] From 859e6f76d32b21df7be412ee729deac6a94ca0e9 Mon Sep 17 00:00:00 2001 From: Mykhailo Kremniov Date: Mon, 16 Feb 2026 16:59:30 +0200 Subject: [PATCH 07/11] Extract bootstrapping logic `from node_lib::setup`; move node-daemon's `run` to node-lib to re-use it in test node. Remove `--import-bootstrap-file` option from node-gui. Make `--clean-data` and `--import-bootstrap-file` top-level options. Update bootstrapping functional tests to also check bad file import. --- CHANGELOG.md | 16 +-- Cargo.lock | 1 + chainstate/src/lib.rs | 2 +- node-daemon/Cargo.toml | 1 + node-daemon/src/main.rs | 38 +------ node-gui/backend/src/lib.rs | 16 +-- node-gui/src/main.rs | 50 ++-------- node-lib/src/lib.rs | 4 +- node-lib/src/node_daemon_runner.rs | 90 +++++++++++++++++ node-lib/src/options.rs | 69 ++++++++----- node-lib/src/runner.rs | 54 +++------- node-lib/tests/cli.rs | 4 - test/functional/node_bootstrapping.py | 38 ++++++- test/functional/test_framework/test_node.py | 105 +++++++++++--------- test/src/bin/test_node.rs | 23 ++--- 15 files changed, 290 insertions(+), 221 deletions(-) create mode 100644 node-lib/src/node_daemon_runner.rs diff --git a/CHANGELOG.md b/CHANGELOG.md index 77f75749c8..1b204aa6d6 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -31,13 +31,15 @@ The format is loosely based on [Keep a Changelog](https://keepachangelog.com/en/ relevant to this wallet. - Node: - - new options added to `node-daemon` and `node-gui`: - - `--import-bootstrap-file` - import a bootstrap file on start (previously bootstrapping - was only available via node RPC). + - new options added: + - `node-daemon`: + - `--import-bootstrap-file` - import a bootstrap file on start (previously bootstrapping + was only available via node RPC). - - `--enable-db-reckless-mode-in-ibd` - this enables the "reckless" mode of the chainstate - database during initial block download or bootstrapping, which significantly increases - its speed at the cost of a potential db corruption if the system crashes in the meantime. + `node-daemon` and `node-gui`: + - `--enable-db-reckless-mode-in-ibd` - this enables the "reckless" mode of the chainstate + database during initial block download or bootstrapping, which significantly increases + its speed at the cost of a potential db corruption if the system crashes in the meantime. ### Changed - Wallet RPC: @@ -59,6 +61,8 @@ The format is loosely based on [Keep a Changelog](https://keepachangelog.com/en/ - Node: - The now redundant option `min_max_bootstrap_import_buffer_sizes` was removed from chainstate config. + - The option `--clean-data` is now a top-level option, i.e. instead of writing + `node-daemon testnet --clean-data` you have to write `node-daemon --clean-data testnet`. - Node bootstrapping: - The format of the bootstrap file was changed and the legacy format is no longer supported. diff --git a/Cargo.lock b/Cargo.lock index b09eeb5ac3..ebc447f204 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5356,6 +5356,7 @@ version = "1.2.0" dependencies = [ "anyhow", "assert_cmd", + "chainstate", "expect-test", "logging", "node-lib", diff --git a/chainstate/src/lib.rs b/chainstate/src/lib.rs index e1d9a23fd0..38b7e943a3 100644 --- a/chainstate/src/lib.rs +++ b/chainstate/src/lib.rs @@ -97,7 +97,7 @@ pub enum ChainstateError { #[error("Property read error: `{0}`")] FailedToReadProperty(#[from] PropertyQueryError), - #[error("Block import error {0}")] + #[error("Bootstrap error {0}")] BootstrapError(#[from] BootstrapError), #[error("Error invoking block invalidator: {0}")] diff --git a/node-daemon/Cargo.toml b/node-daemon/Cargo.toml index 05d89fadbc..a81550d5fe 100644 --- a/node-daemon/Cargo.toml +++ b/node-daemon/Cargo.toml @@ -9,6 +9,7 @@ authors = ["Samer Afach ", "Ben Marsh anyhow::Result<()> { - let opts = node_lib::Options::from_args(std::env::args_os()); - let setup_result = node_lib::setup(opts.with_resolved_command()).await?; - match setup_result { - node_lib::NodeSetupResult::Node(node) => { - node.main().await; - } - node_lib::NodeSetupResult::DataDirCleanedUp => { - logging::log::info!( - "Data directory is now clean. Please restart the node without the `--{}` flag", - node_lib::CLEAN_DATA_OPTION_LONG_NAME - ); - } - node_lib::NodeSetupResult::BootstrapFileImported(bootstrap_result) => { - match bootstrap_result { - Ok(()) => { - logging::log::info!( - "Node was bootstrapped successfully. Please restart the node without the `--{}` flag", - node_lib::IMPORT_BOOTSTRAP_FILE_OPTION_LONG_NAME - ); - } - Err(err) => { - logging::log::error!("Node bootstrapping failed: {err}"); - std::process::exit(1) - } - } - } - }; - - Ok(()) -} +use node_lib::run_node_daemon; #[tokio::main] async fn main() { @@ -53,8 +23,10 @@ async fn main() { std::env::set_var("RUST_LOG", "info"); } - run().await.unwrap_or_else(|err| { + let exit_code = run_node_daemon().await.unwrap_or_else(|err| { eprintln!("Mintlayer node launch failed: {err:?}"); std::process::exit(1) - }) + }); + + std::process::exit(exit_code.0) } diff --git a/node-gui/backend/src/lib.rs b/node-gui/backend/src/lib.rs index 85ce33ff6c..4b9ca1a377 100644 --- a/node-gui/backend/src/lib.rs +++ b/node-gui/backend/src/lib.rs @@ -24,9 +24,10 @@ mod wallet_events; use std::{fmt::Debug, sync::Arc}; +use anyhow::anyhow; use tokio::sync::mpsc::{unbounded_channel, UnboundedReceiver, UnboundedSender}; -use chainstate::{BootstrapError, ChainInfo}; +use chainstate::ChainInfo; use common::{ address::{Address, AddressError}, chain::{ChainConfig, Destination}, @@ -103,7 +104,6 @@ pub struct InitializedNode { pub enum NodeInitializationOutcome { BackendControls(BackendControls), DataDirCleanedUp, - BootstrapFileImported(Result<(), BootstrapError>), } pub async fn node_initialize( @@ -143,15 +143,15 @@ pub async fn node_initialize( WalletMode::Hot => { let setup_result = node_lib::setup(opts).await?; let node = match setup_result { - node_lib::NodeSetupResult::Node(node) => node, + node_lib::NodeSetupResult::RunNode(node) => node, + node_lib::NodeSetupResult::Bootstrap(_, _) => { + return Err(anyhow!( + "Bootstrapping is not supported by node-gui, use node-daemon instead" + )); + } node_lib::NodeSetupResult::DataDirCleanedUp => { return Ok(NodeInitializationOutcome::DataDirCleanedUp); } - node_lib::NodeSetupResult::BootstrapFileImported(bootstrap_result) => { - return Ok(NodeInitializationOutcome::BootstrapFileImported( - bootstrap_result, - )); - } }; let controller = node.controller().clone(); diff --git a/node-gui/src/main.rs b/node-gui/src/main.rs index 598b1dfd70..2eba0ce2a2 100644 --- a/node-gui/src/main.rs +++ b/node-gui/src/main.rs @@ -20,7 +20,6 @@ mod widgets; use std::{convert::identity, env}; -use chainstate::BootstrapError; use heck::ToUpperCamelCase as _; use iced::{ advanced::graphics::core::window, @@ -38,7 +37,7 @@ use node_gui_backend::{ node_initialize, BackendControls, BackendSender, InitNetwork, NodeInitializationOutcome, WalletMode, }; -use node_lib::{CLEAN_DATA_OPTION_LONG_NAME, IMPORT_BOOTSTRAP_FILE_OPTION_LONG_NAME}; +use node_lib::{NodeType, CLEAN_DATA_OPTION_LONG_NAME}; const COLD_WALLET_TOOLTIP_TEXT: &str = "Start the wallet in Cold mode without connecting to the network or any nodes. The Cold mode is made to run the wallet on an air-gapped machine without internet connection for storage of keys of high-value. For example, pool decommission keys."; @@ -54,7 +53,7 @@ const INITIAL_MAIN_WINDOW_HEIGHT: f32 = 768.0; pub fn main() -> iced::Result { utils::rust_backtrace::enable(); - let initial_opts = node_lib::Options::from_args(std::env::args_os()); + let initial_opts = node_lib::Options::from_args(std::env::args_os(), NodeType::NodeGui); iced::application(title, update, view) .executor::() @@ -104,7 +103,6 @@ impl From for GuiState { enum InitializationInterruptionReason { Failure(InitializationFailure), DataDirCleanedUp, - BootstrapFileImported(Result<(), BootstrapError>), } struct InitializationFailure { @@ -170,9 +168,6 @@ fn title(state: &GuiState) -> String { InitializationInterruptionReason::DataDirCleanedUp => { "Mintlayer data directory cleaned up".into() } - InitializationInterruptionReason::BootstrapFileImported(_) => { - "Mintlayer node bootstrapping complete".into() - } }, } } @@ -201,11 +196,15 @@ fn update(state: &mut GuiState, message: Message) -> Task { command: command.clone(), }; - let run_options = resolved_options.command.run_options(); - if run_options.clean_data || run_options.import_bootstrap_file.is_some() { - // If either the "clean data" or "import bootstrap file" option was set, - // selecting the wallet mode makes no sense. - // Since these options don't work in the cold mode, we use the hot one. + if resolved_options.top_level.clean_data + || resolved_options.top_level.import_bootstrap_file.is_some() + { + // If "clean data" option was set, selecting the wallet mode makes no sense; + // since the option is ignored in the cold mode, we use the hot one. + // Same applies to "import bootstrap file" - even though we don't support + // bootstrapping in node-gui at this moment, we still need to show the + // corresponding error, and asking for wallet mode before doing it makes + // no sense either. update_on_mode_selected(state, WalletMode::Hot, resolved_options) } else { *state = GuiState::SelectWalletMode { resolved_options }; @@ -327,12 +326,6 @@ fn update(state: &mut GuiState, message: Message) -> Task { ); Task::none() } - NodeInitializationOutcome::BootstrapFileImported(bootstrap_result) => { - *state = GuiState::InitializationInterrupted( - InitializationInterruptionReason::BootstrapFileImported(bootstrap_result), - ); - Task::none() - } }, Message::Loaded(Err(e)) => { *state = InitializationFailure { @@ -565,27 +558,6 @@ fn view(state: &GuiState) -> Element<'_, Message> { .size(text_font_size) ] } - InitializationInterruptionReason::BootstrapFileImported(bootstrap_result) => { - match bootstrap_result { - Ok(()) => { - column![ - iced::widget::text("Bootstrap file has been imported") - .size(header_font_size), - iced::widget::text(format!( - "Please restart the node without the `--{}` flag", - IMPORT_BOOTSTRAP_FILE_OPTION_LONG_NAME - )) - .size(text_font_size) - ] - } - Err(err) => { - column![ - iced::widget::text("Bootstrapping failed").size(header_font_size), - iced::widget::text(err.to_string()).size(text_font_size) - ] - } - } - } }; let error_box = error_box .extend([iced::widget::button(text("Close")).on_press(()).into()]) diff --git a/node-lib/src/lib.rs b/node-lib/src/lib.rs index 0f359dd855..2491f1775e 100644 --- a/node-lib/src/lib.rs +++ b/node-lib/src/lib.rs @@ -19,6 +19,7 @@ mod checkpoints_from_file; mod config_files; mod mock_time; pub mod node_controller; +mod node_daemon_runner; mod options; pub mod rpc; mod runner; @@ -30,11 +31,12 @@ use chainstate_launcher::ChainConfig; pub use config_files::{ NodeConfigFile, NodeTypeConfigFile, RpcConfigFile, StorageBackendConfigFile, }; +pub use node_daemon_runner::{run_node_daemon, ExitCode}; pub use options::{ Command, Options, OptionsWithResolvedCommand, RunOptions, TopLevelOptions, CLEAN_DATA_OPTION_LONG_NAME, IMPORT_BOOTSTRAP_FILE_OPTION_LONG_NAME, }; -pub use runner::{setup, NodeSetupResult}; +pub use runner::{setup, NodeSetupResult, NodeType}; pub fn default_rpc_config(chain_config: &ChainConfig) -> RpcConfigFile { RpcConfigFile::with_run_options( diff --git a/node-lib/src/node_daemon_runner.rs b/node-lib/src/node_daemon_runner.rs new file mode 100644 index 0000000000..9e100c60b3 --- /dev/null +++ b/node-lib/src/node_daemon_runner.rs @@ -0,0 +1,90 @@ +// Copyright (c) 2021-2026 RBB S.r.l +// opensource@mintlayer.org +// SPDX-License-Identifier: MIT +// Licensed under the MIT License; +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://github.com/mintlayer/mintlayer-core/blob/master/LICENSE +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use anyhow::Result; + +use chainstate::{import_bootstrap_file, BootstrapError, ChainstateError}; +use utils::{shallow_clone::ShallowClone as _, tokio_spawn}; + +use crate::{ + setup, NodeSetupResult, NodeType, Options, CLEAN_DATA_OPTION_LONG_NAME, + IMPORT_BOOTSTRAP_FILE_OPTION_LONG_NAME, +}; + +pub struct ExitCode(pub i32); + +pub async fn run_node_daemon() -> anyhow::Result { + let opts = Options::from_args(std::env::args_os(), NodeType::NodeDaemon); + let setup_result = setup(opts.with_resolved_command()).await?; + match setup_result { + NodeSetupResult::RunNode(node) => { + node.main().await; + } + NodeSetupResult::Bootstrap(node, bootstrap_file) => { + let chainstate_handle = node.controller().chainstate.shallow_clone(); + let shutdown_trigger = node.controller().shutdown_trigger.clone(); + let node_main_join_handle = tokio_spawn(node.main(), "Node main"); + + let bootstrap_result = chainstate_handle + .call_mut(move |cs| import_bootstrap_file(cs, &bootstrap_file)) + .await; + + shutdown_trigger.initiate(); + node_main_join_handle.await?; + + match extract_bootstrap_error(bootstrap_result?)? { + Ok(()) => { + logging::log::info!( + "Node was bootstrapped successfully. Please restart the node without the `--{}` flag", + IMPORT_BOOTSTRAP_FILE_OPTION_LONG_NAME + ); + } + Err(err) => { + // Note: we don't return it as a generic error, because bootstrapping will + // likely fail due to user's mistake rather than node's malfunction, so we + // don't want for e.g. the stack trace to be printed (which would happen with + // a "normal" error when backtrace is enabled). + logging::log::error!("Node bootstrapping failed: {err}"); + return Ok(ExitCode(1)); + } + } + } + NodeSetupResult::DataDirCleanedUp => { + logging::log::info!( + "Data directory is now clean. Please restart the node without the `--{}` flag", + CLEAN_DATA_OPTION_LONG_NAME + ); + } + }; + + Ok(ExitCode(0)) +} + +fn extract_bootstrap_error( + bootstrap_result: Result<(), ChainstateError>, +) -> Result, ChainstateError> { + match bootstrap_result { + Ok(()) => Ok(Ok(())), + Err(err) => match err { + ChainstateError::BootstrapError(err) => Ok(Err(err)), + err @ (ChainstateError::StorageError(_) + | ChainstateError::FailedToInitializeChainstate(_) + | ChainstateError::ProcessBlockError(_) + | ChainstateError::FailedToReadProperty(_) + | ChainstateError::BlockInvalidatorError(_) + | ChainstateError::IoError(_)) => Err(err), + }, + } +} diff --git a/node-lib/src/options.rs b/node-lib/src/options.rs index 51a2a4309f..08245c1e4a 100644 --- a/node-lib/src/options.rs +++ b/node-lib/src/options.rs @@ -22,7 +22,7 @@ use std::{ path::{Path, PathBuf}, }; -use clap::{Args, Parser, Subcommand}; +use clap::{Args, CommandFactory, FromArgMatches, Parser, Subcommand}; use chainstate_launcher::ChainConfig; use common::chain::{ @@ -40,10 +40,15 @@ use utils_networking::IpOrSocketAddress; use crate::{ checkpoints_from_file::read_checkpoints_from_csv_file, config_files::{NodeTypeConfigFile, StorageBackendConfigFile}, + NodeType, }; const CONFIG_NAME: &str = "config.toml"; +pub const CLEAN_DATA_OPTION_LONG_NAME: &str = "clean-data"; +pub const IMPORT_BOOTSTRAP_FILE_OPTION_LONG_NAME: &str = "import-bootstrap-file"; +pub const IMPORT_BOOTSTRAP_FILE_OPTION_ID: &str = "import_bootstrap_file"; + /// Mintlayer node executable // Note: this struct is shared between different node executables, namely, node-daemon and node-gui, // so the env vars for both of them will use the same infix; this is intended. @@ -60,8 +65,26 @@ pub struct Options { impl Options { /// Constructs an instance by parsing the given arguments. - pub fn from_args + Clone>(args: impl IntoIterator) -> Self { - Parser::parse_from(args) + pub fn from_args + Clone>( + args: impl IntoIterator, + node_type: NodeType, + ) -> Self { + // Here we do the same that `Parser::parse_from` does, but also hide the bootstrapping + // option for node-gui. + + let mut cmd = ::command(); + + match node_type { + NodeType::NodeDaemon => {} + NodeType::NodeGui => { + cmd = cmd.mut_arg(IMPORT_BOOTSTRAP_FILE_OPTION_ID, |arg| arg.hide(true)); + } + } + + let mut matches = cmd.try_get_matches_from_mut(args).unwrap_or_else(|err| err.exit()); + + ::from_arg_matches_mut(&mut matches) + .unwrap_or_else(|err| err.format(&mut cmd).exit()) } /// Returns a different representation of `self`, where `command` is no longer optional. @@ -82,7 +105,7 @@ pub struct OptionsWithResolvedCommand { impl OptionsWithResolvedCommand { pub fn clean_data_option_set(&self) -> bool { - self.command.run_options().clean_data + self.top_level.clean_data } pub fn log_to_file_option_set(&self) -> bool { @@ -112,6 +135,25 @@ pub struct TopLevelOptions { /// By default, the option is enabled for node-gui and disabled for node-daemon. #[clap(long, action = clap::ArgAction::Set)] pub log_to_file: Option, + + /// If specified, the application will clean the data directory and exit immediately. + #[clap( + long = CLEAN_DATA_OPTION_LONG_NAME, + short, + action = clap::ArgAction::SetTrue, + default_value_t = false + )] + pub clean_data: bool, + + /// Start the node with networking disabled, import blocks from the specified bootstrap file + /// and exit. + #[clap( + long = IMPORT_BOOTSTRAP_FILE_OPTION_LONG_NAME, + id = IMPORT_BOOTSTRAP_FILE_OPTION_ID, + value_name = "FILE", + conflicts_with("clean_data") + )] + pub import_bootstrap_file: Option, } impl TopLevelOptions { @@ -191,20 +233,8 @@ pub struct RegtestOptions { pub chain_config: ChainConfigOptions, } -pub const CLEAN_DATA_OPTION_LONG_NAME: &str = "clean-data"; -pub const IMPORT_BOOTSTRAP_FILE_OPTION_LONG_NAME: &str = "import-bootstrap-file"; - #[derive(Args, Clone, Debug, Default)] pub struct RunOptions { - /// If specified, the application will clean the data directory and exit immediately. - #[clap( - long = CLEAN_DATA_OPTION_LONG_NAME, - short, - action = clap::ArgAction::SetTrue, - default_value_t = false - )] - pub clean_data: bool, - /// Minimum number of connected peers to enable block production. #[clap(long, value_name = "COUNT")] pub blockprod_min_peers_to_produce_blocks: Option, @@ -395,11 +425,6 @@ pub struct RunOptions { /// Path to a CSV file with custom checkpoints that must be used instead of the predefined ones. #[clap(long, hide = true)] pub custom_checkpoints_csv_file: Option, - - /// Start the node with networking disabled, import blocks from the specified bootstrap file - /// and exit. - #[clap(long = IMPORT_BOOTSTRAP_FILE_OPTION_LONG_NAME, value_name = "FILE", conflicts_with("clean_data"))] - pub import_bootstrap_file: Option, } pub fn default_data_dir(chain_type: ChainType) -> PathBuf { @@ -434,7 +459,6 @@ mod tests { }; let make_run_options = |custom_checkpoints_csv_file: Option| RunOptions { - clean_data: Default::default(), blockprod_min_peers_to_produce_blocks: Default::default(), blockprod_skip_ibd_check: Default::default(), blockprod_use_current_time_if_non_pos: Default::default(), @@ -472,7 +496,6 @@ mod tests { enable_chainstate_heavy_checks: Default::default(), allow_checkpoints_mismatch: Default::default(), custom_checkpoints_csv_file, - import_bootstrap_file: Default::default(), }; let make_cmd = |run_options| match chain_type { ChainType::Mainnet => Command::Mainnet(run_options), diff --git a/node-lib/src/runner.rs b/node-lib/src/runner.rs index 858e7cbc95..13e93189ee 100644 --- a/node-lib/src/runner.rs +++ b/node-lib/src/runner.rs @@ -25,10 +25,7 @@ use anyhow::{anyhow, Context, Result}; use file_rotate::{compression::Compression, suffix::AppendCount, ContentLimit, FileRotate}; use blockprod::rpc::BlockProductionRpcServer; -use chainstate::{ - import_bootstrap_file, rpc::ChainstateRpcServer, BootstrapError, ChainstateError, - InitializationError, -}; +use chainstate::{rpc::ChainstateRpcServer, ChainstateError, InitializationError}; use chainstate_launcher::{ChainConfig, StorageBackendConfig}; use common::chain::config::{assert_no_ignore_consensus_in_chain_config, ChainType}; use logging::log; @@ -38,7 +35,6 @@ use rpc::rpc_creds::RpcCreds; use test_rpc_functions::{ empty::make_empty_rpc_test_functions, make_rpc_test_functions, rpc::RpcTestFunctionsRpcServer, }; -use utils::{shallow_clone::ShallowClone, tokio_spawn}; use crate::{ config_files::{NodeConfigFile, DEFAULT_P2P_NETWORKING_ENABLED, DEFAULT_RPC_ENABLED}, @@ -51,10 +47,15 @@ use crate::{ const LOCK_FILE_NAME: &str = ".lock"; const DEFAULT_LOG_FILE_NAME: &str = "mintlayer.log"; +pub enum NodeType { + NodeDaemon, + NodeGui, +} + pub enum NodeSetupResult { - Node(Node), + RunNode(Node), + Bootstrap(Node, PathBuf), DataDirCleanedUp, - BootstrapFileImported(Result<(), BootstrapError>), } pub struct Node { @@ -302,7 +303,7 @@ pub async fn setup(options: OptionsWithResolvedCommand) -> Result Result { - return Ok(NodeSetupResult::BootstrapFileImported(Ok(()))); - } - Err(err) => match err { - ChainstateError::BootstrapError(err) => { - return Ok(NodeSetupResult::BootstrapFileImported(Err(err))); - } - err @ (ChainstateError::StorageError(_) - | ChainstateError::FailedToInitializeChainstate(_) - | ChainstateError::ProcessBlockError(_) - | ChainstateError::FailedToReadProperty(_) - | ChainstateError::BlockInvalidatorError(_) - | ChainstateError::IoError(_)) => { - return Err(err.into()); - } - }, - } + if let Some(file_path) = &options.top_level.import_bootstrap_file { + Ok(NodeSetupResult::Bootstrap(node, file_path.clone())) + } else { + Ok(NodeSetupResult::RunNode(node)) } - - Ok(NodeSetupResult::Node(node)) } /// Creates an exclusive lock file in the specified directory. diff --git a/node-lib/tests/cli.rs b/node-lib/tests/cli.rs index 50aa0fb1f6..580df0fcfc 100644 --- a/node-lib/tests/cli.rs +++ b/node-lib/tests/cli.rs @@ -164,8 +164,6 @@ fn read_config_override_values() { rpc_username: Some(rpc_username.to_owned()), rpc_password: Some(rpc_password.to_owned()), rpc_cookie_file: Some(rpc_cookie_file.to_owned()), - // Note: there is no correspondence to this option inside NodeConfigFile. - clean_data: false, min_tx_relay_fee_rate: Some(min_tx_relay_fee_rate), force_allow_run_as_root_outer: Default::default(), enable_chainstate_heavy_checks: Some(enable_chainstate_heavy_checks), @@ -173,8 +171,6 @@ fn read_config_override_values() { // Note: there is no correspondence to this option inside NodeConfigFile; // the contents of the csv file will become part of ChainConfig. custom_checkpoints_csv_file: Some("foo.csv".to_owned().into()), - // Note: there is no correspondence to this option inside NodeConfigFile. - import_bootstrap_file: Some("foo.bin".to_owned().into()), }; let config = NodeConfigFile::read(&chain_config, &config_path, &options).unwrap(); diff --git a/test/functional/node_bootstrapping.py b/test/functional/node_bootstrapping.py index 7676208ab5..20d6c02c44 100755 --- a/test/functional/node_bootstrapping.py +++ b/test/functional/node_bootstrapping.py @@ -75,6 +75,10 @@ def run_test(self): stale_blocks_count = 5 blocks_count = 10 + bogus_file = os.path.join(self.options.tmpdir, 'bogus.bin') + with open(bogus_file, 'w') as f: + f.write('bogus data') + # Create the shorter chain (which will be the stale one) and invalidate it immediately, # so that the next chain starts from generis as well. stale_block_ids = self.create_blocks(stale_blocks_count, 111) @@ -142,6 +146,16 @@ def reset_datadir(backup_suffix: str): assert_blocks_exist(node, block_ids) assert_blocks_missing(node, stale_block_ids) + # Try importing bogus_file; the RPC call should fail and the chainstate should remain + # in the same state. + try: + node.chainstate_import_bootstrap_file(file_path=bogus_file) + except JSONRPCException as e: + assert_in("Bootstrap error", str(e)) + + assert_blocks_exist(node, block_ids) + assert_blocks_missing(node, stale_block_ids) + ############################################################################################ # Test importing via the command line argument. # Note that in this case the node will exit immediately after importing, so we can't @@ -152,7 +166,7 @@ def reset_datadir(backup_suffix: str): reset_datadir('.bak2') # Import bootstrap_file_full - node.start(["--import-bootstrap-file", bootstrap_file_full]) + node.start(extra_top_level_args=["--import-bootstrap-file", bootstrap_file_full]) node_ret_code = node.process.wait(timeout=10) assert node_ret_code == 0 @@ -165,7 +179,7 @@ def reset_datadir(backup_suffix: str): reset_datadir('.bak3') # Import bootstrap_file_mainchain - node.start(["--import-bootstrap-file", bootstrap_file_mainchain]) + node.start(extra_top_level_args=["--import-bootstrap-file", bootstrap_file_mainchain]) node_ret_code = node.process.wait(timeout=10) assert node_ret_code == 0 @@ -174,6 +188,26 @@ def reset_datadir(backup_suffix: str): assert_blocks_exist(node, block_ids) assert_blocks_missing(node, stale_block_ids) + self.stop_node(0) + + # Try importing bogus_file; the node should exit with non-zero code and the chainstate + # should remain in the same state. + # Just in case, specify stderr_file explicitly, to make sure it's not re-used from + # previous runs (though `start` would create a new file anyway). + stderr_file = tempfile.NamedTemporaryFile(dir=node.stderr_dir, mode='w+', delete=False) + node.start(extra_top_level_args=["--import-bootstrap-file", bogus_file], stderr=stderr_file) + node_ret_code = node.process.wait(timeout=10) + assert node_ret_code != 0 + + stderr_file.seek(0) + stderr = stderr_file.read() + assert_in("Node bootstrapping failed", stderr) + + # Start the node normally and check the blocks. + self.start_node(0) + assert_blocks_exist(node, block_ids) + assert_blocks_missing(node, stale_block_ids) + if __name__ == '__main__': NodeBootstrappingTest().main() diff --git a/test/functional/test_framework/test_node.py b/test/functional/test_framework/test_node.py index b1d12e386b..cbc134d920 100755 --- a/test/functional/test_framework/test_node.py +++ b/test/functional/test_framework/test_node.py @@ -89,36 +89,7 @@ def __init__(self, i, datadir, *, chain, rpchost, timewait, timeout_factor, bitc # For those callers that need more flexibility, they can just set the args property directly. self.extra_args = extra_args self.version = version - - # Calculate RPC address to be passed into the command line - rpc_bind_address = rpc_addr(self.index) - p2p_bind_address = p2p_url(self.index) - - # For functional tests, we don't want to fail when blocks are too old - max_tip_age = 60 * 60 * 24 * 365 * 100 - - # Note: some tests depend on this value being relatively small; they'll fail if the - # current default value is used. - min_tx_relay_fee_rate = 1000 - - self.args = [ - self.binary, - f"--datadir={datadir}", - "regtest", - f"--rpc-bind-address={rpc_bind_address}", - f"--p2p-bind-addresses={p2p_bind_address}", - f"--max-tip-age={max_tip_age}", - f"--min-tx-relay-fee-rate={min_tx_relay_fee_rate}", - ] - if use_valgrind: - default_suppressions_file = os.path.join( - os.path.dirname(os.path.realpath(__file__)), - "..", "..", "..", "contrib", "valgrind.supp") - suppressions_file = os.getenv("VALGRIND_SUPPRESSIONS_FILE", - default_suppressions_file) - self.args = ["valgrind", "--suppressions={}".format(suppressions_file), - "--gen-suppressions=all", "--exit-on-first-error=yes", - "--error-exitcode=1", "--quiet"] + self.args + self.use_valgrind = use_valgrind self.cli = TestNodeCLI(bitcoin_cli, self.datadir) self.use_cli = use_cli @@ -130,7 +101,7 @@ def __init__(self, i, datadir, *, chain, rpchost, timewait, timeout_factor, bitc self.rpc = None self.url = None self.log = logging.getLogger('TestFramework.node%d' % i) - self.cleanup_on_exit = True # Whether to kill the node when this object goes away + self.cleanup_on_exit = True # Whether to kill the node when this object goes away # Cache perf subprocesses here by their data output filename. self.perf_subprocesses = {} @@ -139,19 +110,19 @@ def __init__(self, i, datadir, *, chain, rpchost, timewait, timeout_factor, bitc AddressKeyPair = collections.namedtuple('AddressKeyPair', ['address', 'key']) PRIV_KEYS = [ - # address , privkey - AddressKeyPair('mjTkW3DjgyZck4KbiRusZsqTgaYTxdSz6z', 'cVpF924EspNh8KjYsfhgY96mmxvT6DgdWiTYMtMjuM74hJaU5psW'), - AddressKeyPair('msX6jQXvxiNhx3Q62PKeLPrhrqZQdSimTg', 'cUxsWyKyZ9MAQTaAhUQWJmBbSvHMwSmuv59KgxQV7oZQU3PXN3KE'), - AddressKeyPair('mnonCMyH9TmAsSj3M59DsbH8H63U3RKoFP', 'cTrh7dkEAeJd6b3MRX9bZK8eRmNqVCMH3LSUkE3dSFDyzjU38QxK'), - AddressKeyPair('mqJupas8Dt2uestQDvV2NH3RU8uZh2dqQR', 'cVuKKa7gbehEQvVq717hYcbE9Dqmq7KEBKqWgWrYBa2CKKrhtRim'), - AddressKeyPair('msYac7Rvd5ywm6pEmkjyxhbCDKqWsVeYws', 'cQDCBuKcjanpXDpCqacNSjYfxeQj8G6CAtH1Dsk3cXyqLNC4RPuh'), - AddressKeyPair('n2rnuUnwLgXqf9kk2kjvVm8R5BZK1yxQBi', 'cQakmfPSLSqKHyMFGwAqKHgWUiofJCagVGhiB4KCainaeCSxeyYq'), - AddressKeyPair('myzuPxRwsf3vvGzEuzPfK9Nf2RfwauwYe6', 'cQMpDLJwA8DBe9NcQbdoSb1BhmFxVjWD5gRyrLZCtpuF9Zi3a9RK'), - AddressKeyPair('mumwTaMtbxEPUswmLBBN3vM9oGRtGBrys8', 'cSXmRKXVcoouhNNVpcNKFfxsTsToY5pvB9DVsFksF1ENunTzRKsy'), - AddressKeyPair('mpV7aGShMkJCZgbW7F6iZgrvuPHjZjH9qg', 'cSoXt6tm3pqy43UMabY6eUTmR3eSUYFtB2iNQDGgb3VUnRsQys2k'), - AddressKeyPair('mq4fBNdckGtvY2mijd9am7DRsbRB4KjUkf', 'cN55daf1HotwBAgAKWVgDcoppmUNDtQSfb7XLutTLeAgVc3u8hik'), - AddressKeyPair('mpFAHDjX7KregM3rVotdXzQmkbwtbQEnZ6', 'cT7qK7g1wkYEMvKowd2ZrX1E5f6JQ7TM246UfqbCiyF7kZhorpX3'), - AddressKeyPair('mzRe8QZMfGi58KyWCse2exxEFry2sfF2Y7', 'cPiRWE8KMjTRxH1MWkPerhfoHFn5iHPWVK5aPqjW8NxmdwenFinJ'), + # address , privkey + AddressKeyPair('mjTkW3DjgyZck4KbiRusZsqTgaYTxdSz6z', 'cVpF924EspNh8KjYsfhgY96mmxvT6DgdWiTYMtMjuM74hJaU5psW'), + AddressKeyPair('msX6jQXvxiNhx3Q62PKeLPrhrqZQdSimTg', 'cUxsWyKyZ9MAQTaAhUQWJmBbSvHMwSmuv59KgxQV7oZQU3PXN3KE'), + AddressKeyPair('mnonCMyH9TmAsSj3M59DsbH8H63U3RKoFP', 'cTrh7dkEAeJd6b3MRX9bZK8eRmNqVCMH3LSUkE3dSFDyzjU38QxK'), + AddressKeyPair('mqJupas8Dt2uestQDvV2NH3RU8uZh2dqQR', 'cVuKKa7gbehEQvVq717hYcbE9Dqmq7KEBKqWgWrYBa2CKKrhtRim'), + AddressKeyPair('msYac7Rvd5ywm6pEmkjyxhbCDKqWsVeYws', 'cQDCBuKcjanpXDpCqacNSjYfxeQj8G6CAtH1Dsk3cXyqLNC4RPuh'), + AddressKeyPair('n2rnuUnwLgXqf9kk2kjvVm8R5BZK1yxQBi', 'cQakmfPSLSqKHyMFGwAqKHgWUiofJCagVGhiB4KCainaeCSxeyYq'), + AddressKeyPair('myzuPxRwsf3vvGzEuzPfK9Nf2RfwauwYe6', 'cQMpDLJwA8DBe9NcQbdoSb1BhmFxVjWD5gRyrLZCtpuF9Zi3a9RK'), + AddressKeyPair('mumwTaMtbxEPUswmLBBN3vM9oGRtGBrys8', 'cSXmRKXVcoouhNNVpcNKFfxsTsToY5pvB9DVsFksF1ENunTzRKsy'), + AddressKeyPair('mpV7aGShMkJCZgbW7F6iZgrvuPHjZjH9qg', 'cSoXt6tm3pqy43UMabY6eUTmR3eSUYFtB2iNQDGgb3VUnRsQys2k'), + AddressKeyPair('mq4fBNdckGtvY2mijd9am7DRsbRB4KjUkf', 'cN55daf1HotwBAgAKWVgDcoppmUNDtQSfb7XLutTLeAgVc3u8hik'), + AddressKeyPair('mpFAHDjX7KregM3rVotdXzQmkbwtbQEnZ6', 'cT7qK7g1wkYEMvKowd2ZrX1E5f6JQ7TM246UfqbCiyF7kZhorpX3'), + AddressKeyPair('mzRe8QZMfGi58KyWCse2exxEFry2sfF2Y7', 'cPiRWE8KMjTRxH1MWkPerhfoHFn5iHPWVK5aPqjW8NxmdwenFinJ') ] def get_deterministic_priv_key(self): @@ -185,7 +156,47 @@ def __getattr__(self, name): assert self.rpc_connected and self.rpc is not None, self._node_msg("Error: no RPC connection") return getattr(RPCOverloadWrapper(self.rpc, descriptors=self.descriptors), name) - def start(self, extra_args=None, *, cwd=None, stdout=None, stderr=None, **kwargs): + def _make_args(self, extra_top_level_args: list[str] | None = None): + # Calculate RPC address to be passed into the command line + rpc_bind_address = rpc_addr(self.index) + p2p_bind_address = p2p_url(self.index) + + # For functional tests, we don't want to fail when blocks are too old + max_tip_age = 60 * 60 * 24 * 365 * 100 + + # Note: some tests depend on this value being relatively small; they'll fail if the + # current default value is used. + min_tx_relay_fee_rate = 1000 + + args = [ + self.binary, + f"--datadir={self.datadir}", + ] + + if extra_top_level_args is not None: + args += extra_top_level_args + + args += [ + "regtest", + f"--rpc-bind-address={rpc_bind_address}", + f"--p2p-bind-addresses={p2p_bind_address}", + f"--max-tip-age={max_tip_age}", + f"--min-tx-relay-fee-rate={min_tx_relay_fee_rate}", + ] + + if self.use_valgrind: + default_suppressions_file = os.path.join( + os.path.dirname(os.path.realpath(__file__)), + "..", "..", "..", "contrib", "valgrind.supp") + suppressions_file = os.getenv("VALGRIND_SUPPRESSIONS_FILE", + default_suppressions_file) + args = ["valgrind", "--suppressions={}".format(suppressions_file), + "--gen-suppressions=all", "--exit-on-first-error=yes", + "--error-exitcode=1", "--quiet"] + args + + return args + + def start(self, extra_args=None, extra_top_level_args=None, *, cwd=None, stdout=None, stderr=None, **kwargs): """Start the node.""" if extra_args is None: extra_args = self.extra_args @@ -214,8 +225,10 @@ def start(self, extra_args=None, *, cwd=None, stdout=None, stderr=None, **kwargs # Create a dictionary to store the arguments arg_dict = {} + args = self._make_args(extra_top_level_args) + # Process args list - for arg in self.args: + for arg in args: if '=' in arg: name, value = arg.split('=') arg_dict[name] = value diff --git a/test/src/bin/test_node.rs b/test/src/bin/test_node.rs index 090aec3744..5f8eabbf6e 100644 --- a/test/src/bin/test_node.rs +++ b/test/src/bin/test_node.rs @@ -13,23 +13,12 @@ // See the License for the specific language governing permissions and // limitations under the License. -use std::env; - #[tokio::main] -async fn main() -> Result<(), node_lib::Error> { - let opts = node_lib::Options::from_args(env::args_os()); - let setup_result = node_lib::setup(opts.with_resolved_command()).await?; +async fn main() { + let exit_code = node_lib::run_node_daemon().await.unwrap_or_else(|err| { + eprintln!("Mintlayer node launch failed: {err:?}"); + std::process::exit(1) + }); - match setup_result { - node_lib::NodeSetupResult::Node(node) => { - node.main().await; - } - node_lib::NodeSetupResult::DataDirCleanedUp => { - panic!("Data dir cleanup option was passed to the test node"); - } - node_lib::NodeSetupResult::BootstrapFileImported(_) => { - logging::log::debug!("Node bootstrap option was passed to the test node"); - } - } - Ok(()) + std::process::exit(exit_code.0) } From 18d748350fb17aaf19cc54643fa1a1336f072892 Mon Sep 17 00:00:00 2001 From: Mykhailo Kremniov Date: Tue, 17 Feb 2026 12:07:46 +0200 Subject: [PATCH 08/11] Hide chainstate-storage's dependency on lmdb behind a feature to fix compilation of wasm bindings --- Cargo.lock | 1 - chainstate/Cargo.toml | 2 +- chainstate/storage/Cargo.toml | 4 ++-- chainstate/storage/src/internal/blockchain_storage.rs | 6 ++++++ 4 files changed, 9 insertions(+), 4 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index ebc447f204..822d66c1d9 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1425,7 +1425,6 @@ dependencies = [ "rstest", "serialization", "storage", - "storage-failing", "storage-inmemory", "storage-lmdb", "test-utils", diff --git a/chainstate/Cargo.toml b/chainstate/Cargo.toml index 64cfe15c40..93d014d72a 100644 --- a/chainstate/Cargo.toml +++ b/chainstate/Cargo.toml @@ -7,7 +7,7 @@ rust-version.workspace = true [dependencies] accounting = { path = "../accounting" } -chainstate-storage = { path = "./storage", features = ["mock"] } +chainstate-storage = { path = "./storage", features = ["lmdb", "mock"] } chainstate-types = { path = "./types" } common = { path = "../common" } consensus = { path = "../consensus" } diff --git a/chainstate/storage/Cargo.toml b/chainstate/storage/Cargo.toml index cc36f0f421..06b35cb5d7 100644 --- a/chainstate/storage/Cargo.toml +++ b/chainstate/storage/Cargo.toml @@ -17,9 +17,8 @@ pos-accounting = { path = "../../pos-accounting" } randomness = { path = "../../randomness" } serialization = { path = "../../serialization" } storage = { path = "../../storage" } -storage-failing = { path = "../../storage/failing" } storage-inmemory = { path = "../../storage/inmemory" } -storage-lmdb = { path = "../../storage/lmdb" } +storage-lmdb = { path = "../../storage/lmdb", optional = true } tokens-accounting = { path = "../../tokens-accounting" } utils = { path = "../../utils" } utxo = { path = "../../utxo" } @@ -40,6 +39,7 @@ num-traits.workspace = true [features] mock = [ "mockall" ] expensive-reads = [] +lmdb = ["dep:storage-lmdb"] [lints.rust] unexpected_cfgs = { level = "warn", check-cfg = ['cfg(loom)'] } diff --git a/chainstate/storage/src/internal/blockchain_storage.rs b/chainstate/storage/src/internal/blockchain_storage.rs index 9d98e27040..eb92ab8342 100644 --- a/chainstate/storage/src/internal/blockchain_storage.rs +++ b/chainstate/storage/src/internal/blockchain_storage.rs @@ -14,6 +14,7 @@ // limitations under the License. use storage_inmemory::{InMemory, InMemoryImpl}; +#[cfg(feature = "lmdb")] use storage_lmdb::{Lmdb, LmdbImpl}; use crate::Transactional; @@ -34,6 +35,10 @@ pub trait BlockchainStorage: for<'tx> Transactional<'tx> + Send { fn in_reckless_mode(&self) -> crate::Result; } +// Note: since chainstate-storage is also used (indirectly) by wasm-bindings, we can't have +// an unconditional dependency on LMDB here, because it won't compile for WASM. So we put it +// behind a feature. +#[cfg(feature = "lmdb")] impl BlockchainStorageBackendImpl for LmdbImpl { fn set_reckless_mode(&self, set: bool) -> crate::Result<()> { // When switching the reckless mode off, do a sync immediately. @@ -53,6 +58,7 @@ impl BlockchainStorageBackendImpl for LmdbImpl { } } +#[cfg(feature = "lmdb")] impl BlockchainStorageBackend for Lmdb { type ImplHelper = LmdbImpl; } From 3a042868b21161654e1ee28e620ca9af6f0cd601 Mon Sep 17 00:00:00 2001 From: Mykhailo Kremniov Date: Tue, 17 Feb 2026 14:04:58 +0200 Subject: [PATCH 09/11] Some cleanup; fix spurious failures in bootstrapping tests --- CHANGELOG.md | 2 + Cargo.lock | 1 - chainstate/src/detail/bootstrap.rs | 43 ++++++++---------- chainstate/src/detail/mod.rs | 4 ++ chainstate/src/lib.rs | 4 +- chainstate/test-suite/src/tests/bootstrap.rs | 46 +++++++++++--------- node-daemon/Cargo.toml | 1 - node-lib/src/node_daemon_runner.rs | 8 ++-- storage/failing/src/backend.rs | 8 ---- storage/src/database/mod.rs | 4 -- test/functional/node_bootstrapping.py | 19 ++++++-- 11 files changed, 73 insertions(+), 67 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 1b204aa6d6..bac98e1ff8 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -92,6 +92,8 @@ The format is loosely based on [Keep a Changelog](https://keepachangelog.com/en/ - Importing a bootstrap file will no longer fail if some of the blocks already exist in the chainstate. + - Bootstrapping can now be interrupted via Ctrl-C. + - The speed of the import was improved. - General diff --git a/Cargo.lock b/Cargo.lock index 822d66c1d9..a597c08851 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5355,7 +5355,6 @@ version = "1.2.0" dependencies = [ "anyhow", "assert_cmd", - "chainstate", "expect-test", "logging", "node-lib", diff --git a/chainstate/src/detail/bootstrap.rs b/chainstate/src/detail/bootstrap.rs index b6f8336cd2..2259890108 100644 --- a/chainstate/src/detail/bootstrap.rs +++ b/chainstate/src/detail/bootstrap.rs @@ -54,6 +54,7 @@ struct BootstrapFileHeader { pub blocks_count: u64, } +/// Size of the encoded BootstrapFileHeader. const FILE_HEADER_SIZE: usize = 24; // In format v0, blocks go directly after the header, each block preceded by its length @@ -134,20 +135,20 @@ pub fn import_bootstrap_stream( where P: FnMut(WithId) -> Result, { - let mut buffer_queue = Vec::::with_capacity(1024 * 1024); + let mut buffer = Vec::::with_capacity(1024 * 1024); let header = { - fill_buffer(&mut buffer_queue, file_reader, FILE_HEADER_SIZE)?; + fill_buffer(&mut buffer, file_reader, FILE_HEADER_SIZE)?; ensure!( - buffer_queue.len() == FILE_HEADER_SIZE, + buffer.len() == FILE_HEADER_SIZE, BootstrapError::FileTooSmall ); - check_for_legacy_format(&buffer_queue)?; + check_for_legacy_format(&buffer)?; - BootstrapFileHeader::decode_all(&mut buffer_queue.as_slice())? + BootstrapFileHeader::decode_all(&mut buffer.as_slice())? }; - buffer_queue.clear(); + buffer.clear(); ensure!( &header.file_magic_bytes == FILE_MAGIC_BYTES, @@ -163,33 +164,27 @@ where ); for _ in 0..header.blocks_count { - fill_buffer(&mut buffer_queue, file_reader, size_of::())?; + fill_buffer(&mut buffer, file_reader, size_of::())?; ensure!( - buffer_queue.len() == size_of::(), + buffer.len() == size_of::(), BootstrapError::BadFileFormat ); let block_size = BlockSizeType::from_le_bytes( - buffer_queue - .as_slice() - .try_into() - .expect("Buffer is known to have the correct size"), + buffer.as_slice().try_into().expect("Buffer is known to have the correct size"), ) .try_into()?; ensure!( block_size <= MAX_BLOCK_SIZE, BootstrapError::BlockSizeTooBig(block_size) ); - buffer_queue.clear(); + buffer.clear(); - fill_buffer(&mut buffer_queue, file_reader, block_size)?; - ensure!( - buffer_queue.len() == block_size, - BootstrapError::BadFileFormat - ); + fill_buffer(&mut buffer, file_reader, block_size)?; + ensure!(buffer.len() == block_size, BootstrapError::BadFileFormat); - let block = Block::decode_all(&mut buffer_queue.as_slice())?; + let block = Block::decode_all(&mut buffer.as_slice())?; let should_continue = process_block_func(block.into())?; - buffer_queue.clear(); + buffer.clear(); if !should_continue { break; @@ -210,19 +205,19 @@ fn check_for_legacy_format(header_bytes: &[u8]) -> Result<(), BootstrapError> { } fn fill_buffer( - buffer_queue: &mut Vec, + buffer: &mut Vec, reader: &mut std::io::BufReader, max_buffer_size: usize, ) -> Result<(), BootstrapError> { - while buffer_queue.len() < max_buffer_size { + while buffer.len() < max_buffer_size { let data = reader.fill_buf()?; if data.is_empty() { break; } - let remaining_len = max_buffer_size - buffer_queue.len(); + let remaining_len = max_buffer_size - buffer.len(); let len_to_consume = std::cmp::min(remaining_len, data.len()); - buffer_queue.extend_from_slice(&data[..len_to_consume]); + buffer.extend_from_slice(&data[..len_to_consume]); reader.consume(len_to_consume); } diff --git a/chainstate/src/detail/mod.rs b/chainstate/src/detail/mod.rs index 79ed332e7b..1439ce7960 100644 --- a/chainstate/src/detail/mod.rs +++ b/chainstate/src/detail/mod.rs @@ -903,6 +903,8 @@ impl Chainstate let chain_config = Arc::clone(&self.chain_config); let mut block_processor = |block: WithId| -> Result<_, BootstrapError> { // If chainstate is being shutdown, stop immediately. + // Note that we return an error instead of Ok(false) to avoid an interrupted import + // being treated as successful. if self.shutdown_initiated_rx.as_ref().is_some_and(|rx| rx.borrow().test()) { return Err(BootstrapError::Interrupted); } @@ -995,6 +997,8 @@ where /// /// The main purpose is to make sure the reckless mode stays enabled during bootstrapping for /// the entire duration (i.e. for fresh blocks too). +/// +/// Note: it is assumed that inc/dec are only called during bootstrapping or ibd. struct DbRecklessModeCounter { counter: u32, } diff --git a/chainstate/src/lib.rs b/chainstate/src/lib.rs index 38b7e943a3..e310cb79ed 100644 --- a/chainstate/src/lib.rs +++ b/chainstate/src/lib.rs @@ -30,10 +30,10 @@ use common::{ primitives::{BlockHeight, Id}, time_getter::TimeGetter, }; -use detail::Chainstate; -use interface::chainstate_interface_impl; use utils::set_flag::SetFlag; +use crate::{detail::Chainstate, interface::chainstate_interface_impl}; + pub use crate::{ config::{ChainstateConfig, MaxTipAge}, detail::{ diff --git a/chainstate/test-suite/src/tests/bootstrap.rs b/chainstate/test-suite/src/tests/bootstrap.rs index eeb6ea0a93..071d77e762 100644 --- a/chainstate/test-suite/src/tests/bootstrap.rs +++ b/chainstate/test-suite/src/tests/bootstrap.rs @@ -47,6 +47,16 @@ fn check_height_order(blocks: &[Id], tf: &TestFramework) { } } +fn make_chain_config(chain_type: ChainType) -> ChainConfig { + chain::config::Builder::new(chain_type) + .consensus_upgrades(NetUpgrades::unit_tests()) + .data_in_no_signature_witness_allowed(true) + .genesis_unittest(Destination::AnyoneCanSpend) + // Force empty checkpoints because a custom genesis is used. + .checkpoints(BTreeMap::new()) + .build() +} + const EXPECTED_MAGIC_BYTES: &str = "MLBTSTRP"; fn make_header_data(chain_config: &ChainConfig, version: u32, blocks_count: u64) -> Vec { @@ -69,16 +79,20 @@ fn gen_blocks( blocks_count: usize, mut rng: impl Rng + CryptoRng, ) -> Vec { - let mut tf = TestFramework::builder(&mut rng).with_chain_config(chain_config).build(); - let genesis_id = tf.genesis().get_id(); - tf.create_chain(&genesis_id.into(), blocks_count, &mut rng).unwrap(); - - tf.chainstate - .get_block_id_tree_as_list() - .unwrap() - .iter() - .map(|block_id| tf.chainstate.get_block(block_id).unwrap().unwrap()) - .collect_vec() + if blocks_count > 0 { + let mut tf = TestFramework::builder(&mut rng).with_chain_config(chain_config).build(); + let genesis_id = tf.genesis().get_id(); + tf.create_chain(&genesis_id.into(), blocks_count, &mut rng).unwrap(); + + tf.chainstate + .get_block_id_tree_as_list() + .unwrap() + .iter() + .map(|block_id| tf.chainstate.get_block(block_id).unwrap().unwrap()) + .collect_vec() + } else { + Vec::new() + } } fn export_to_vec(tf: &TestFramework, with_stale_blocks: bool) -> Vec { @@ -528,16 +542,6 @@ fn unsupported_version(#[case] seed: Seed) { }); } -fn make_chain_config(chain_type: ChainType) -> ChainConfig { - chain::config::Builder::new(chain_type) - .consensus_upgrades(NetUpgrades::unit_tests()) - .data_in_no_signature_witness_allowed(true) - .genesis_unittest(Destination::AnyoneCanSpend) - // Force empty checkpoints because a custom genesis is used. - .checkpoints(BTreeMap::new()) - .build() -} - // The data starts with wrong format magic bytes. #[rstest] #[trace] @@ -707,6 +711,8 @@ fn block_size_too_big(#[case] seed: Seed) { }); } +// If chainstate reckless mode is enabled, it should be switched on when bootstrapping starts +// and off when it ends, including the case when it ends due to an error. #[rstest] #[trace] #[case(Seed::from_entropy())] diff --git a/node-daemon/Cargo.toml b/node-daemon/Cargo.toml index a81550d5fe..05d89fadbc 100644 --- a/node-daemon/Cargo.toml +++ b/node-daemon/Cargo.toml @@ -9,7 +9,6 @@ authors = ["Samer Afach ", "Ben Marsh anyhow::Result { ); } Err(err) => { - // Note: we don't return it as a generic error, because bootstrapping will - // likely fail due to user's mistake rather than node's malfunction, so we - // don't want for e.g. the stack trace to be printed (which would happen with - // a "normal" error when backtrace is enabled). + // Note: we don't return an error here, because bootstrapping will likely fail + // due to a user mistake rather than node malfunction, so we don't want for + // e.g. the stack trace to be printed in this case (anyhow::Error does this + // when backtrace is enabled). logging::log::error!("Node bootstrapping failed: {err}"); return Ok(ExitCode(1)); } diff --git a/storage/failing/src/backend.rs b/storage/failing/src/backend.rs index b0e1a9425a..f8578bdbcf 100644 --- a/storage/failing/src/backend.rs +++ b/storage/failing/src/backend.rs @@ -107,14 +107,6 @@ impl FailingImpl { transaction_failures: 0, } } - - pub fn inner(&self) -> &T { - &self.inner - } - - pub fn inner_mut(&mut self) -> &mut T { - &mut self.inner - } } impl Clone for FailingImpl { diff --git a/storage/src/database/mod.rs b/storage/src/database/mod.rs index d8318c7fe4..41425362a4 100644 --- a/storage/src/database/mod.rs +++ b/storage/src/database/mod.rs @@ -71,10 +71,6 @@ impl Storage { pub fn backend_impl(&self) -> &B::Impl { &self.backend } - - pub fn backend_impl_mut(&mut self) -> &mut B::Impl { - &mut self.backend - } } impl Storage { diff --git a/test/functional/node_bootstrapping.py b/test/functional/node_bootstrapping.py index 20d6c02c44..306f104991 100755 --- a/test/functional/node_bootstrapping.py +++ b/test/functional/node_bootstrapping.py @@ -68,25 +68,37 @@ def create_blocks(self, blocks_count: int, initial_transfer_amount_atoms: int) - block_id = node.chainstate_best_block_id() block_ids.append(block_id) + self.wait_for_mempool_update(block_id) + return block_ids + # Need to call this function after the tip has changed, if a new block is to be generated + # afterwards. Otherwise the block generation may fail with "Recoverable mempool error". + def wait_for_mempool_update(self, tip_id: str): + node = self.nodes[0] + self.wait_until(lambda: node.mempool_local_best_block_id() == tip_id, timeout=5) + def run_test(self): node = self.nodes[0] stale_blocks_count = 5 blocks_count = 10 + genesis_id = node.chainstate_best_block_id() + bogus_file = os.path.join(self.options.tmpdir, 'bogus.bin') with open(bogus_file, 'w') as f: f.write('bogus data') # Create the shorter chain (which will be the stale one) and invalidate it immediately, - # so that the next chain starts from generis as well. + # so that the next chain starts from genesis as well. stale_block_ids = self.create_blocks(stale_blocks_count, 111) node.chainstate_invalidate_block(stale_block_ids[0]) # Sanity check tip_height = node.chainstate_best_block_height() assert_equal(tip_height, 0) + self.wait_for_mempool_update(genesis_id) + # Create the longer chain. block_ids = self.create_blocks(blocks_count, 222) @@ -129,6 +141,7 @@ def reset_datadir(backup_suffix: str): reset_datadir('.bak0') self.start_node(0) + # Import bootstrap_file_full assert_blocks_missing(node, block_ids) assert_blocks_missing(node, stale_block_ids) node.chainstate_import_bootstrap_file(file_path=bootstrap_file_full) @@ -139,10 +152,10 @@ def reset_datadir(backup_suffix: str): reset_datadir('.bak1') self.start_node(0) + # Import bootstrap_file_mainchain assert_blocks_missing(node, block_ids) assert_blocks_missing(node, stale_block_ids) - node.chainstate_import_bootstrap_file( - file_path=bootstrap_file_mainchain) + node.chainstate_import_bootstrap_file(file_path=bootstrap_file_mainchain) assert_blocks_exist(node, block_ids) assert_blocks_missing(node, stale_block_ids) From 50c0a4b5c734076c8380129ce527d052073e1d92 Mon Sep 17 00:00:00 2001 From: Mykhailo Kremniov Date: Wed, 18 Feb 2026 18:17:54 +0200 Subject: [PATCH 10/11] Fix bootstrapping functional test failure on Windows --- chainstate/src/rpc/mod.rs | 10 ++++------ rpc/description/src/value_hint.rs | 8 +++++++- 2 files changed, 11 insertions(+), 7 deletions(-) diff --git a/chainstate/src/rpc/mod.rs b/chainstate/src/rpc/mod.rs index 92ec3d8443..30c7182fcd 100644 --- a/chainstate/src/rpc/mod.rs +++ b/chainstate/src/rpc/mod.rs @@ -198,13 +198,13 @@ trait ChainstateRpc { #[method(name = "export_bootstrap_file")] async fn export_bootstrap_file( &self, - file_path: &std::path::Path, + file_path: std::path::PathBuf, include_stale_blocks: bool, ) -> RpcResult<()>; /// Imports a bootstrap file's blocks to this node #[method(name = "import_bootstrap_file")] - async fn import_bootstrap_file(&self, file_path: &std::path::Path) -> RpcResult<()>; + async fn import_bootstrap_file(&self, file_path: std::path::PathBuf) -> RpcResult<()>; /// Return generic information about the chain, including the current best block, best block height and more. #[method(name = "info")] @@ -516,18 +516,16 @@ impl ChainstateRpcServer for super::ChainstateHandle { async fn export_bootstrap_file( &self, - file_path: &std::path::Path, + file_path: std::path::PathBuf, include_stale_blocks: bool, ) -> RpcResult<()> { - let file_path = file_path.to_owned(); rpc::handle_result( self.call(move |this| export_bootstrap_file(this, &file_path, include_stale_blocks)) .await, ) } - async fn import_bootstrap_file(&self, file_path: &std::path::Path) -> RpcResult<()> { - let file_path = file_path.to_owned(); + async fn import_bootstrap_file(&self, file_path: std::path::PathBuf) -> RpcResult<()> { rpc::handle_result(self.call_mut(move |this| import_bootstrap_file(this, &file_path)).await) } diff --git a/rpc/description/src/value_hint.rs b/rpc/description/src/value_hint.rs index f8dad1c404..69701120b3 100644 --- a/rpc/description/src/value_hint.rs +++ b/rpc/description/src/value_hint.rs @@ -181,7 +181,13 @@ impl_value_hint!({ NonZeroIsize => VH::NONZERO_NUMBER; NonZeroUsize => VH::NONZERO_NUMBER; String => VH::STRING; - std::path::Path => VH::STRING; + // Note: we implement `HasValueHint` for `PathBuf` but not for `Path`, because the latter + // won't work if used as a parameter and a native Windows path is passed to it, producing + // weird errors like: + // jsonrpsee_core::proc_macros_support: Failed to parse JSON-RPC params as object: ErrorObject + // { code: InvalidParams, message: "Invalid params", data: Some(RawValue("invalid type: string + // \"C:\\\\aaa\\\\bbb\\\\file.txt\", expected a borrowed path at line 1 column xxx")) } + std::path::PathBuf => VH::STRING; std::net::SocketAddr => VH::STRING; std::net::IpAddr => VH::STRING; std::net::Ipv4Addr => VH::STRING; From 9e024fe95e2e87c1f13c98179515ec045187ce3e Mon Sep 17 00:00:00 2001 From: Mykhailo Kremniov Date: Mon, 23 Feb 2026 18:37:41 +0200 Subject: [PATCH 11/11] Change lmdb-mintlayer git revision to point to master instead of a PR branch --- Cargo.lock | 4 ++-- storage/lmdb/Cargo.toml | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index a597c08851..5c77b0bce6 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4741,7 +4741,7 @@ checksum = "6373607a59f0be73a39b6fe456b8192fcc3585f602af20751600e974dd455e77" [[package]] name = "lmdb-mintlayer" version = "0.16.2" -source = "git+https://github.com/mintlayer/lmdb-rs-mintlayer.git?rev=4427a41fc32e8b7a30bdae42ce48414a6a5d6920#4427a41fc32e8b7a30bdae42ce48414a6a5d6920" +source = "git+https://github.com/mintlayer/lmdb-rs-mintlayer.git?rev=4b894d5b8e3c0b25d05517d7c5c37128d3a737bb#4b894d5b8e3c0b25d05517d7c5c37128d3a737bb" dependencies = [ "bitflags 1.3.2", "byteorder", @@ -4754,7 +4754,7 @@ dependencies = [ [[package]] name = "lmdb-rkv-sys" version = "0.11.3" -source = "git+https://github.com/mintlayer/lmdb-rs-mintlayer.git?rev=4427a41fc32e8b7a30bdae42ce48414a6a5d6920#4427a41fc32e8b7a30bdae42ce48414a6a5d6920" +source = "git+https://github.com/mintlayer/lmdb-rs-mintlayer.git?rev=4b894d5b8e3c0b25d05517d7c5c37128d3a737bb#4b894d5b8e3c0b25d05517d7c5c37128d3a737bb" dependencies = [ "cc", "libc", diff --git a/storage/lmdb/Cargo.toml b/storage/lmdb/Cargo.toml index b6534a48a0..39e30a41d9 100644 --- a/storage/lmdb/Cargo.toml +++ b/storage/lmdb/Cargo.toml @@ -10,8 +10,8 @@ logging = { path = '../../logging' } storage-core = { path = '../core' } utils = { path = '../../utils' } -# Commit "Attempt to fix CI" from the branch "specify_nosync_per_tx" -lmdb-mintlayer = { git = 'https://github.com/mintlayer/lmdb-rs-mintlayer.git', rev = '4427a41fc32e8b7a30bdae42ce48414a6a5d6920' } +# Commit "Merge pull request #26 from mintlayer/specify_nosync_per_tx" +lmdb-mintlayer = { git = 'https://github.com/mintlayer/lmdb-rs-mintlayer.git', rev = '4b894d5b8e3c0b25d05517d7c5c37128d3a737bb' } [dev-dependencies] rstest.workspace = true