Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
20 commits
Select commit Hold shift + click to select a range
91b883a
perf(db): replace disabled compaction with bounded bulk-load mode
philippem Mar 4, 2026
1c01e40
perf(db): cache index/filter blocks in block cache to bound memory
philippem Mar 2, 2026
ba8f849
perf(db): incremental bytes_per_sync for SST writes
philippem Feb 20, 2026
5f1eda5
perf(db): parallel subcompactions for initial sync
philippem Feb 23, 2026
208d727
perf(db): forced bottommost compaction
philippem Mar 5, 2026
f378638
perf(db): switch compression from Snappy to LZ4, Zstd for bottommost
philippem Mar 2, 2026
defab78
perf(db): larger compaction readahead
philippem Mar 5, 2026
8860625
perf(db): parallel sort in write_rows/delete_rows
philippem Feb 23, 2026
c355522
perf(db): add Bloom filters for faster point lookups
philippem Mar 4, 2026
a3db816
test(db): add iterator tests for short and full-length prefix scans
philippem Mar 10, 2026
fb5451f
perf(db): add prefix extractor for prefix Bloom filters on range scans
philippem Mar 10, 2026
ad72179
perf(index): merge two-pass add+index into single per-batch pass
philippem Feb 19, 2026
62d182a
perf(fetch): make initial sync batch size configurable (default 250)
philippem Feb 23, 2026
9fb6c81
perf(fetch): reuse rayon thread pool across blk*.dat files
philippem Feb 24, 2026
b3e8bf3
perf(fetch,index): pre-compute txids in BlockEntry
philippem Feb 24, 2026
97b2d79
perf(fetch): blkfiles pipeline lookahead
philippem Mar 5, 2026
04795af
feat(metrics): add batch_total timer to index_duration histogram
philippem Mar 5, 2026
2251fb5
feat(metrics): export per-level SST file counts to Prometheus
philippem Mar 5, 2026
5533400
refactor(logging): show block height instead of batch count in progress
philippem Mar 7, 2026
e9f56c3
feat(metrics): export initial sync height as Prometheus gauge
philippem Mar 7, 2026
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
35 changes: 20 additions & 15 deletions src/config.rs
Original file line number Diff line number Diff line change
Expand Up @@ -43,17 +43,14 @@ pub struct Config {
pub rpc_logging: RpcLogging,
pub zmq_addr: Option<SocketAddr>,

/// Enable compaction during initial sync
///
/// By default compaction is off until initial sync is finished for performance reasons,
/// however, this requires much more disk space.
pub initial_sync_compaction: bool,

/// RocksDB block cache size in MB (per database)
/// Caches decompressed blocks in memory to avoid repeated decompression (CPU intensive)
/// Caches decompressed data blocks, plus index and filter blocks (via cache_index_and_filter_blocks).
/// Total memory usage = cache_size * 3_databases (txstore, history, cache)
/// Recommendation: Start with 1024MB for production
/// Higher values reduce CPU load from cache misses but use more RAM
/// Recommendation: 1024 MB for steady-state; 4096 MB+ for initial sync (L0 SST
/// files accumulate up to the compaction trigger — their index, filter (Bloom),
/// and data blocks must fit in this cache). With 10 bits/key bloom filters and
/// a 512 MB write buffer, each L0 file's filter block is ~9.75 MB, so 64 L0
/// files need ~625 MB of filter blocks on top of index blocks.
pub db_block_cache_mb: usize,

/// RocksDB parallelism level (background compaction and flush threads)
Expand All @@ -67,6 +64,12 @@ pub struct Config {
/// Larger buffers = fewer flushes (less CPU) but more RAM usage
pub db_write_buffer_size_mb: usize,

/// Number of blocks per batch during initial sync (bitcoind fetch mode).
/// Larger batches keep more O rows in the write buffer when index() runs lookup_txos(),
/// improving cache hit rate for outputs spent within the same batch window.
/// Must stay within db_write_buffer_size_mb to avoid mid-batch flushes.
pub initial_sync_batch_size: usize,

#[cfg(feature = "liquid")]
pub parent_network: BNetwork,
#[cfg(feature = "liquid")]
Expand Down Expand Up @@ -229,14 +232,10 @@ impl Config {
.long("anonymize-json-rpc-logging-source-ip")
.help("enables ip anonymization in rpc logs")
.takes_value(false)
).arg(
Arg::with_name("initial_sync_compaction")
.long("initial-sync-compaction")
.help("Perform compaction during initial sync (slower but less disk space required)")
).arg(
Arg::with_name("db_block_cache_mb")
.long("db-block-cache-mb")
.help("RocksDB block cache size in MB per database")
.help("RocksDB block cache size in MB per database. Bounds index/filter block memory; use 4096+ for initial sync to avoid table-reader heap growth.")
.takes_value(true)
.default_value("8")
).arg(
Expand All @@ -251,6 +250,12 @@ impl Config {
.help("RocksDB write buffer size in MB per database. RAM usage = size * max_write_buffers(2) * 3_databases")
.takes_value(true)
.default_value("256")
).arg(
Arg::with_name("initial_sync_batch_size")
.long("initial-sync-batch-size")
.help("Number of blocks per batch during initial sync. Larger values keep more txo rows in the write buffer during indexing, improving lookup_txos cache hit rate for recently-created outputs.")
.takes_value(true)
.default_value("250")
).arg(
Arg::with_name("zmq_addr")
.long("zmq-addr")
Expand Down Expand Up @@ -487,10 +492,10 @@ impl Config {
index_unspendables: m.is_present("index_unspendables"),
cors: m.value_of("cors").map(|s| s.to_string()),
precache_scripts: m.value_of("precache_scripts").map(|s| s.to_string()),
initial_sync_compaction: m.is_present("initial_sync_compaction"),
db_block_cache_mb: value_t_or_exit!(m, "db_block_cache_mb", usize),
db_parallelism: value_t_or_exit!(m, "db_parallelism", usize),
db_write_buffer_size_mb: value_t_or_exit!(m, "db_write_buffer_size_mb", usize),
initial_sync_batch_size: value_t_or_exit!(m, "initial_sync_batch_size", usize),
zmq_addr,

#[cfg(feature = "liquid")]
Expand Down
6 changes: 6 additions & 0 deletions src/metrics.rs
Original file line number Diff line number Diff line change
Expand Up @@ -48,6 +48,12 @@ impl Metrics {
g
}

pub fn float_gauge(&self, opts: prometheus::Opts) -> prometheus::Gauge {
let g = prometheus::Gauge::with_opts(opts).unwrap();
self.reg.register(Box::new(g.clone())).unwrap();
g
}

pub fn gauge_vec(&self, opts: prometheus::Opts, labels: &[&str]) -> GaugeVec {
let g = GaugeVec::new(opts, labels).unwrap();
self.reg.register(Box::new(g.clone())).unwrap();
Expand Down
Loading
Loading