From 0caad0071a22a0254d1f8ac5de5b47510aeb12de Mon Sep 17 00:00:00 2001 From: ananas Date: Tue, 17 Mar 2026 16:54:37 +0000 Subject: [PATCH 01/16] feat: nullify_2 shared proof node and 1-byte discriminator Deduplicate the level-15 proof node shared by both leaves (saves 32B) and shrink the discriminator from 4 bytes to 1 byte (saves 3B). Total instruction data drops from 1042B to 1007B, fitting the v0 transaction within the 1232-byte limit with 4 bytes margin. --- forester/tests/test_nullify_2_tx_size.rs | 108 ++++++++ .../registry-test/tests/nullify_2.rs | 232 ++++++++++++++++++ .../src/account_compression_cpi/nullify.rs | 74 ++++++ .../src/account_compression_cpi/sdk.rs | 185 ++++++++++++++ programs/registry/src/lib.rs | 42 ++++ 5 files changed, 641 insertions(+) create mode 100644 forester/tests/test_nullify_2_tx_size.rs create mode 100644 program-tests/registry-test/tests/nullify_2.rs diff --git a/forester/tests/test_nullify_2_tx_size.rs b/forester/tests/test_nullify_2_tx_size.rs new file mode 100644 index 0000000000..e3c1e5f84a --- /dev/null +++ b/forester/tests/test_nullify_2_tx_size.rs @@ -0,0 +1,108 @@ +use light_registry::{ + account_compression_cpi::sdk::{ + create_nullify_2_instruction, nullify_2_lookup_table_accounts, + CreateNullify2InstructionInputs, + }, + utils::get_forester_epoch_pda_from_authority, +}; +use solana_sdk::{ + hash::Hash, + message::{v0, AddressLookupTableAccount, VersionedMessage}, + pubkey::Pubkey, + signature::Keypair, + signer::Signer, + transaction::VersionedTransaction, +}; + +/// Validates that a nullify_2 v0 transaction with ALT fits within the +/// 1232-byte Solana transaction size limit. +/// +/// This is a pure serialization check -- no validator needed. +/// If the serialized size is <= 1232 bytes, it will work on any validator. +#[test] +fn test_nullify_2_v0_transaction_size() { + let authority = Keypair::new(); + let merkle_tree = Pubkey::new_unique(); + let nullifier_queue = Pubkey::new_unique(); + let epoch = 0u64; + let forester_pda = get_forester_epoch_pda_from_authority(&authority.pubkey(), epoch).0; + + let ix = create_nullify_2_instruction( + CreateNullify2InstructionInputs { + authority: authority.pubkey(), + nullifier_queue, + merkle_tree, + change_log_index: 1400, + queue_index_0: 100, + queue_index_1: 200, + leaf_index_0: 67_000_000, + leaf_index_1: 67_000_001, + proof_0: [[0xAA; 32]; 15], + proof_1: [[0xBB; 32]; 15], + shared_proof_node: [0xCC; 32], + derivation: authority.pubkey(), + is_metadata_forester: false, + }, + 0, + ); + + // Build synthetic ALT with the known accounts + let alt_accounts = + nullify_2_lookup_table_accounts(merkle_tree, nullifier_queue, Some(forester_pda)); + let alt_address = Pubkey::new_unique(); + let alt = AddressLookupTableAccount { + key: alt_address, + addresses: alt_accounts, + }; + + // Compile v0 message + let blockhash = Hash::default(); + let message = v0::Message::try_compile(&authority.pubkey(), &[ix], &[alt], blockhash) + .expect("Failed to compile v0 message"); + + // Create signed transaction + let versioned_message = VersionedMessage::V0(message); + let tx = VersionedTransaction::try_new(versioned_message, &[&authority]) + .expect("Failed to create versioned transaction"); + + let serialized = tx.message.serialize(); + // Full tx = compact-u16 sig count (1) + signatures (64 * n) + serialized message + let tx_size = 1 + tx.signatures.len() * 64 + serialized.len(); + + println!( + "nullify_2 v0 transaction size: {} bytes (limit: 1232)", + tx_size + ); + println!(" instruction data: {} bytes", 1007); + println!(" margin: {} bytes", 1232_i64 - tx_size as i64); + + // Breakdown + println!("\nTransaction breakdown:"); + println!(" signatures: {}", tx.signatures.len() * 64 + 1); + let static_keys = tx.message.static_account_keys(); + println!(" static account keys: {}", static_keys.len()); + for (i, key) in static_keys.iter().enumerate() { + let label = if *key == authority.pubkey() { + "authority (signer)" + } else if *key == light_registry::ID { + "registry program" + } else { + "unknown" + }; + println!(" [{}] {} ({})", i, key, label); + } + if let VersionedMessage::V0(m) = &tx.message { + println!(" address table lookups: {}", m.address_table_lookups.len()); + for alt_lookup in &m.address_table_lookups { + println!(" writable indices: {:?}", alt_lookup.writable_indexes); + println!(" readonly indices: {:?}", alt_lookup.readonly_indexes); + } + }; + + assert!( + tx_size <= 1232, + "nullify_2 v0 transaction is {} bytes, exceeds 1232 byte limit by {} bytes", + tx_size, + tx_size - 1232 + ); +} diff --git a/program-tests/registry-test/tests/nullify_2.rs b/program-tests/registry-test/tests/nullify_2.rs new file mode 100644 index 0000000000..4e919ab0a0 --- /dev/null +++ b/program-tests/registry-test/tests/nullify_2.rs @@ -0,0 +1,232 @@ +use account_compression::{state::QueueAccount, StateMerkleTreeAccount}; +use forester_utils::account_zero_copy::{get_concurrent_merkle_tree, get_hash_set}; +use light_client::rpc::Rpc; +use light_compressed_account::TreeType; +use light_hasher::Poseidon; +use light_program_test::{program_test::LightProgramTest, ProgramTestConfig}; +use light_registry::account_compression_cpi::sdk::{ + create_nullify_2_instruction, CreateNullify2InstructionInputs, +}; +use light_test_utils::e2e_test_env::init_program_test_env; +use serial_test::serial; +use solana_sdk::signature::{Keypair, Signer}; + +/// Tests that nullify_2 correctly nullifies two leaves in a single instruction +/// using two sequential CPIs to account_compression::nullify_leaves. +/// Uses LiteSVM (light-program-test) for fast logic testing. +/// Note: LiteSVM allows 10KB transactions, so this does NOT validate tx size. +#[serial] +#[tokio::test] +async fn test_nullify_2() { + let mut rpc = LightProgramTest::new(ProgramTestConfig::default_with_batched_trees(true)) + .await + .unwrap(); + rpc.indexer = None; + + let env = rpc.test_accounts.clone(); + + let forester_keypair = Keypair::new(); + rpc.airdrop_lamports(&forester_keypair.pubkey(), 2_000_000_000) + .await + .unwrap(); + + let merkle_tree_keypair = Keypair::new(); + let nullifier_queue_keypair = Keypair::new(); + let cpi_context_keypair = Keypair::new(); + + let (mut state_tree_bundle, mut rpc) = { + let mut e2e_env = init_program_test_env(rpc, &env, 50).await; + e2e_env.indexer.state_merkle_trees.clear(); + e2e_env.keypair_action_config.fee_assert = false; + + // Create V1 state merkle tree with custom forester + e2e_env + .indexer + .add_state_merkle_tree( + &mut e2e_env.rpc, + &merkle_tree_keypair, + &nullifier_queue_keypair, + &cpi_context_keypair, + None, + Some(forester_keypair.pubkey()), + TreeType::StateV1, + ) + .await; + + // Create 2 compressed accounts by compressing + transferring twice. + // Each transfer nullifies the input, putting it in the nullifier queue. + e2e_env + .compress_sol_deterministic(&forester_keypair, 1_000_000, None) + .await; + e2e_env + .transfer_sol_deterministic( + &forester_keypair, + &Keypair::new().pubkey(), + None, + ) + .await + .unwrap(); + + e2e_env + .compress_sol_deterministic(&forester_keypair, 1_000_000, None) + .await; + e2e_env + .transfer_sol_deterministic( + &forester_keypair, + &Keypair::new().pubkey(), + None, + ) + .await + .unwrap(); + + ( + e2e_env.indexer.state_merkle_trees[0].clone(), + e2e_env.rpc, + ) + }; + + // Read on-chain state + let nullifier_queue = unsafe { + get_hash_set::(&mut rpc, state_tree_bundle.accounts.nullifier_queue) + .await + .unwrap() + }; + + let onchain_tree = get_concurrent_merkle_tree::( + &mut rpc, + state_tree_bundle.accounts.merkle_tree, + ) + .await + .unwrap(); + + let pre_root = onchain_tree.root(); + let change_log_index = onchain_tree.changelog_index(); + + // Collect 2 unmarked items from the queue + let mut items_to_nullify = Vec::new(); + for i in 0..nullifier_queue.get_capacity() { + let bucket = nullifier_queue.get_bucket(i).unwrap(); + if let Some(bucket) = bucket { + if bucket.sequence_number.is_none() { + items_to_nullify.push((i, bucket.value_bytes())); + } + } + } + assert!( + items_to_nullify.len() >= 2, + "Need at least 2 items in nullifier queue, got {}", + items_to_nullify.len() + ); + + let (queue_idx_0, leaf_0) = items_to_nullify[0]; + let (queue_idx_1, leaf_1) = items_to_nullify[1]; + + let leaf_index_0 = state_tree_bundle + .merkle_tree + .get_leaf_index(&leaf_0) + .unwrap(); + let leaf_index_1 = state_tree_bundle + .merkle_tree + .get_leaf_index(&leaf_1) + .unwrap(); + + let proof_0: Vec<[u8; 32]> = state_tree_bundle + .merkle_tree + .get_proof_of_leaf(leaf_index_0, false) + .unwrap(); + let proof_1: Vec<[u8; 32]> = state_tree_bundle + .merkle_tree + .get_proof_of_leaf(leaf_index_1, false) + .unwrap(); + + // Split proofs: first 15 nodes are unique per leaf, node at index 15 is shared. + // Both leaves are in the same 2^16 subtree so they share the proof node at level 15. + let proof_0_arr: [[u8; 32]; 15] = proof_0[..15].try_into().unwrap(); + let proof_1_arr: [[u8; 32]; 15] = proof_1[..15].try_into().unwrap(); + let shared_proof_node: [u8; 32] = proof_0[15]; + // Verify the shared node is the same in both proofs. + assert_eq!( + proof_0[15], proof_1[15], + "Level 15 proof node must be shared between both leaves in the same subtree" + ); + + // Build nullify_2 instruction + let ix = create_nullify_2_instruction( + CreateNullify2InstructionInputs { + authority: forester_keypair.pubkey(), + nullifier_queue: state_tree_bundle.accounts.nullifier_queue, + merkle_tree: state_tree_bundle.accounts.merkle_tree, + change_log_index: change_log_index as u16, + queue_index_0: queue_idx_0 as u16, + queue_index_1: queue_idx_1 as u16, + leaf_index_0: leaf_index_0 as u32, + leaf_index_1: leaf_index_1 as u32, + proof_0: proof_0_arr, + proof_1: proof_1_arr, + shared_proof_node, + derivation: forester_keypair.pubkey(), + is_metadata_forester: true, + }, + 0, + ); + + // Send transaction + rpc.create_and_send_transaction(&[ix], &forester_keypair.pubkey(), &[&forester_keypair]) + .await + .unwrap(); + + // Verify: both queue items should be marked + let nullifier_queue_post = unsafe { + get_hash_set::(&mut rpc, state_tree_bundle.accounts.nullifier_queue) + .await + .unwrap() + }; + + let bucket_0 = nullifier_queue_post + .get_bucket(queue_idx_0) + .unwrap() + .unwrap(); + assert!( + bucket_0.sequence_number.is_some(), + "First queue item should be marked after nullify_2" + ); + + let bucket_1 = nullifier_queue_post + .get_bucket(queue_idx_1) + .unwrap() + .unwrap(); + assert!( + bucket_1.sequence_number.is_some(), + "Second queue item should be marked after nullify_2" + ); + + // Verify: tree root changed + let onchain_tree_post = + get_concurrent_merkle_tree::( + &mut rpc, + state_tree_bundle.accounts.merkle_tree, + ) + .await + .unwrap(); + assert_ne!( + pre_root, + onchain_tree_post.root(), + "Root should have changed after nullify_2" + ); + + // Locally update the merkle tree and verify roots match + state_tree_bundle + .merkle_tree + .update(&[0u8; 32], leaf_index_0) + .unwrap(); + state_tree_bundle + .merkle_tree + .update(&[0u8; 32], leaf_index_1) + .unwrap(); + + assert_eq!( + onchain_tree_post.root(), + state_tree_bundle.merkle_tree.root(), + "On-chain root should match local tree after nullifying both leaves" + ); +} diff --git a/programs/registry/src/account_compression_cpi/nullify.rs b/programs/registry/src/account_compression_cpi/nullify.rs index 818e2b43a8..9e0e5d8f4e 100644 --- a/programs/registry/src/account_compression_cpi/nullify.rs +++ b/programs/registry/src/account_compression_cpi/nullify.rs @@ -61,3 +61,77 @@ pub fn process_nullify( proofs, ) } + +#[allow(clippy::too_many_arguments)] +pub fn process_nullify_2( + ctx: &Context, + change_log_index: u16, + queue_index_0: u16, + queue_index_1: u16, + leaf_index_0: u32, + leaf_index_1: u32, + proof_0: [[u8; 32]; 15], + proof_1: [[u8; 32]; 15], + shared_proof_node: [u8; 32], +) -> Result<()> { + let bump = ctx.bumps.cpi_authority; + let bump = &[bump]; + let seeds = [CPI_AUTHORITY_PDA_SEED, bump]; + let signer_seeds = &[&seeds[..]]; + + // Reconstruct full 16-node proofs by appending the shared node (level 15). + let mut full_proof_0: Vec<[u8; 32]> = proof_0.to_vec(); + full_proof_0.push(shared_proof_node); + let mut full_proof_1: Vec<[u8; 32]> = proof_1.to_vec(); + full_proof_1.push(shared_proof_node); + + // First CPI: nullify leaf 0 + { + let accounts = account_compression::cpi::accounts::NullifyLeaves { + authority: ctx.accounts.cpi_authority.to_account_info(), + registered_program_pda: Some(ctx.accounts.registered_program_pda.to_account_info()), + log_wrapper: ctx.accounts.log_wrapper.to_account_info(), + merkle_tree: ctx.accounts.merkle_tree.to_account_info(), + nullifier_queue: ctx.accounts.nullifier_queue.to_account_info(), + fee_payer: Some(ctx.accounts.authority.to_account_info()), + }; + let cpi_ctx = CpiContext::new_with_signer( + ctx.accounts.account_compression_program.to_account_info(), + accounts, + signer_seeds, + ); + account_compression::cpi::nullify_leaves( + cpi_ctx, + vec![change_log_index as u64], + vec![queue_index_0], + vec![leaf_index_0 as u64], + vec![full_proof_0], + )?; + } + + // Second CPI: nullify leaf 1 (same change_log_index -- proof is patched via changelog replay) + { + let accounts = account_compression::cpi::accounts::NullifyLeaves { + authority: ctx.accounts.cpi_authority.to_account_info(), + registered_program_pda: Some(ctx.accounts.registered_program_pda.to_account_info()), + log_wrapper: ctx.accounts.log_wrapper.to_account_info(), + merkle_tree: ctx.accounts.merkle_tree.to_account_info(), + nullifier_queue: ctx.accounts.nullifier_queue.to_account_info(), + fee_payer: Some(ctx.accounts.authority.to_account_info()), + }; + let cpi_ctx = CpiContext::new_with_signer( + ctx.accounts.account_compression_program.to_account_info(), + accounts, + signer_seeds, + ); + account_compression::cpi::nullify_leaves( + cpi_ctx, + vec![change_log_index as u64], + vec![queue_index_1], + vec![leaf_index_1 as u64], + vec![full_proof_1], + )?; + } + + Ok(()) +} diff --git a/programs/registry/src/account_compression_cpi/sdk.rs b/programs/registry/src/account_compression_cpi/sdk.rs index f002c35499..96fa63416b 100644 --- a/programs/registry/src/account_compression_cpi/sdk.rs +++ b/programs/registry/src/account_compression_cpi/sdk.rs @@ -62,6 +62,87 @@ pub fn create_nullify_instruction( } } +#[derive(Clone, Debug, PartialEq)] +pub struct CreateNullify2InstructionInputs { + pub authority: Pubkey, + pub nullifier_queue: Pubkey, + pub merkle_tree: Pubkey, + pub change_log_index: u16, + pub queue_index_0: u16, + pub queue_index_1: u16, + pub leaf_index_0: u32, + pub leaf_index_1: u32, + pub proof_0: [[u8; 32]; 15], + pub proof_1: [[u8; 32]; 15], + pub shared_proof_node: [u8; 32], + pub derivation: Pubkey, + pub is_metadata_forester: bool, +} + +pub fn create_nullify_2_instruction( + inputs: CreateNullify2InstructionInputs, + epoch: u64, +) -> Instruction { + let register_program_pda = get_registered_program_pda(&crate::ID); + let registered_forester_pda = if inputs.is_metadata_forester { + None + } else { + Some(get_forester_epoch_pda_from_authority(&inputs.derivation, epoch).0) + }; + let (cpi_authority, _bump) = get_cpi_authority_pda(); + let instruction_data = crate::instruction::Nullify2 { + change_log_index: inputs.change_log_index, + queue_index_0: inputs.queue_index_0, + queue_index_1: inputs.queue_index_1, + leaf_index_0: inputs.leaf_index_0, + leaf_index_1: inputs.leaf_index_1, + proof_0: inputs.proof_0, + proof_1: inputs.proof_1, + shared_proof_node: inputs.shared_proof_node, + }; + + let accounts = crate::accounts::NullifyLeaves { + authority: inputs.authority, + registered_forester_pda, + registered_program_pda: register_program_pda, + nullifier_queue: inputs.nullifier_queue, + merkle_tree: inputs.merkle_tree, + log_wrapper: NOOP_PUBKEY.into(), + cpi_authority, + account_compression_program: account_compression::ID, + }; + Instruction { + program_id: crate::ID, + accounts: accounts.to_account_metas(Some(true)), + data: instruction_data.data(), + } +} + +/// Returns the known accounts for populating an address lookup table +/// for nullify_2 v0 transactions. These are the accounts that don't change +/// between nullify_2 calls on the same tree. +pub fn nullify_2_lookup_table_accounts( + merkle_tree: Pubkey, + nullifier_queue: Pubkey, + forester_pda: Option, +) -> Vec { + let (cpi_authority, _) = get_cpi_authority_pda(); + let registered_program_pda = get_registered_program_pda(&crate::ID); + let mut accounts = vec![ + cpi_authority, + registered_program_pda, + account_compression::ID, + Pubkey::new_from_array(NOOP_PUBKEY), + merkle_tree, + nullifier_queue, + crate::ID, + ]; + if let Some(pda) = forester_pda { + accounts.push(pda); + } + accounts +} + #[derive(Clone, Debug, PartialEq)] pub struct CreateMigrateStateInstructionInputs { pub authority: Pubkey, @@ -545,3 +626,107 @@ pub fn create_rollover_batch_address_tree_instruction( data: instruction_data.data(), } } + +#[cfg(test)] +mod tests { + use super::*; + use anchor_lang::Discriminator; + + #[test] + fn test_nullify_2_instruction_data_size() { + let instruction_data = crate::instruction::Nullify2 { + change_log_index: 0, + queue_index_0: 0, + queue_index_1: 0, + leaf_index_0: 0, + leaf_index_1: 0, + proof_0: [[0u8; 32]; 15], + proof_1: [[0u8; 32]; 15], + shared_proof_node: [0u8; 32], + }; + let data = instruction_data.data(); + assert_eq!( + data.len(), + 1007, + "nullify_2 instruction data must be exactly 1007 bytes \ + (1 disc + 2 changelog + 2+2 queue + 4+4 leaf + 480+480 proofs + 32 shared), got {}", + data.len() + ); + } + + #[test] + fn test_nullify_2_instruction_accounts() { + let authority = Pubkey::new_unique(); + let inputs = CreateNullify2InstructionInputs { + authority, + nullifier_queue: Pubkey::new_unique(), + merkle_tree: Pubkey::new_unique(), + change_log_index: 0, + queue_index_0: 0, + queue_index_1: 1, + leaf_index_0: 0, + leaf_index_1: 1, + proof_0: [[0u8; 32]; 15], + proof_1: [[0u8; 32]; 15], + shared_proof_node: [0u8; 32], + derivation: authority, + is_metadata_forester: false, + }; + let ix = create_nullify_2_instruction(inputs, 0); + assert_eq!(ix.data.len(), 1007); + // 8 accounts: forester_pda, authority, cpi_authority, registered_program_pda, + // account_compression_program, log_wrapper, merkle_tree, nullifier_queue + assert_eq!(ix.accounts.len(), 8, "expected 8 accounts"); + } + + #[test] + fn test_nullify_2_discriminator_no_collision() { + let disc = crate::instruction::Nullify2::DISCRIMINATOR; + assert_eq!(disc.len(), 1, "nullify_2 discriminator must be 1 byte"); + + let existing: &[(&str, &[u8])] = &[ + ("InitializeProtocolConfig", crate::instruction::InitializeProtocolConfig::DISCRIMINATOR), + ("UpdateProtocolConfig", crate::instruction::UpdateProtocolConfig::DISCRIMINATOR), + ("RegisterSystemProgram", crate::instruction::RegisterSystemProgram::DISCRIMINATOR), + ("DeregisterSystemProgram", crate::instruction::DeregisterSystemProgram::DISCRIMINATOR), + ("RegisterForester", crate::instruction::RegisterForester::DISCRIMINATOR), + ("UpdateForesterPda", crate::instruction::UpdateForesterPda::DISCRIMINATOR), + ("UpdateForesterPdaWeight", crate::instruction::UpdateForesterPdaWeight::DISCRIMINATOR), + ("RegisterForesterEpoch", crate::instruction::RegisterForesterEpoch::DISCRIMINATOR), + ("FinalizeRegistration", crate::instruction::FinalizeRegistration::DISCRIMINATOR), + ("ReportWork", crate::instruction::ReportWork::DISCRIMINATOR), + ("InitializeAddressMerkleTree", crate::instruction::InitializeAddressMerkleTree::DISCRIMINATOR), + ("InitializeStateMerkleTree", crate::instruction::InitializeStateMerkleTree::DISCRIMINATOR), + ("Nullify", crate::instruction::Nullify::DISCRIMINATOR), + ("UpdateAddressMerkleTree", crate::instruction::UpdateAddressMerkleTree::DISCRIMINATOR), + ("RolloverAddressMerkleTreeAndQueue", crate::instruction::RolloverAddressMerkleTreeAndQueue::DISCRIMINATOR), + ("RolloverStateMerkleTreeAndQueue", crate::instruction::RolloverStateMerkleTreeAndQueue::DISCRIMINATOR), + ("InitializeBatchedStateMerkleTree", crate::instruction::InitializeBatchedStateMerkleTree::DISCRIMINATOR), + ("BatchNullify", crate::instruction::BatchNullify::DISCRIMINATOR), + ("BatchAppend", crate::instruction::BatchAppend::DISCRIMINATOR), + ("InitializeBatchedAddressMerkleTree", crate::instruction::InitializeBatchedAddressMerkleTree::DISCRIMINATOR), + ("BatchUpdateAddressTree", crate::instruction::BatchUpdateAddressTree::DISCRIMINATOR), + ("RolloverBatchedAddressMerkleTree", crate::instruction::RolloverBatchedAddressMerkleTree::DISCRIMINATOR), + ("RolloverBatchedStateMerkleTree", crate::instruction::RolloverBatchedStateMerkleTree::DISCRIMINATOR), + ("MigrateState", crate::instruction::MigrateState::DISCRIMINATOR), + ("CreateConfigCounter", crate::instruction::CreateConfigCounter::DISCRIMINATOR), + ("CreateCompressibleConfig", crate::instruction::CreateCompressibleConfig::DISCRIMINATOR), + ("UpdateCompressibleConfig", crate::instruction::UpdateCompressibleConfig::DISCRIMINATOR), + ("PauseCompressibleConfig", crate::instruction::PauseCompressibleConfig::DISCRIMINATOR), + ("UnpauseCompressibleConfig", crate::instruction::UnpauseCompressibleConfig::DISCRIMINATOR), + ("DeprecateCompressibleConfig", crate::instruction::DeprecateCompressibleConfig::DISCRIMINATOR), + ("WithdrawFundingPool", crate::instruction::WithdrawFundingPool::DISCRIMINATOR), + ("Claim", crate::instruction::Claim::DISCRIMINATOR), + ("CompressAndClose", crate::instruction::CompressAndClose::DISCRIMINATOR), + ]; + + for (name, existing_disc) in existing { + assert!( + !existing_disc.starts_with(disc), + "nullify_2 1-byte discriminator {:?} collides with {name} discriminator prefix {:?}", + disc, + &existing_disc[..disc.len().min(existing_disc.len())] + ); + } + } +} diff --git a/programs/registry/src/lib.rs b/programs/registry/src/lib.rs index a21b58cd4b..38ddc738a8 100644 --- a/programs/registry/src/lib.rs +++ b/programs/registry/src/lib.rs @@ -420,6 +420,48 @@ pub mod light_registry { ) } + /// Nullifies two leaves in a single instruction via two sequential CPIs. + /// Uses a 1-byte custom discriminator + shared proof node to fit within + /// the 1232-byte transaction limit when combined with an address lookup table. + /// The two leaves must share the same subtree at level 15 (highest proof + /// level below canopy 10), so the forester pairs leaves whose indices + /// agree on bits 16-25. + /// Bump is derived on-chain via ctx.bumps.cpi_authority. + #[allow(clippy::too_many_arguments)] + #[instruction(discriminator = [78])] + pub fn nullify_2<'info>( + ctx: Context<'_, '_, '_, 'info, NullifyLeaves<'info>>, + change_log_index: u16, + queue_index_0: u16, + queue_index_1: u16, + leaf_index_0: u32, + leaf_index_1: u32, + proof_0: [[u8; 32]; 15], + proof_1: [[u8; 32]; 15], + shared_proof_node: [u8; 32], + ) -> Result<()> { + let metadata = ctx.accounts.merkle_tree.load()?.metadata; + check_forester( + &metadata, + ctx.accounts.authority.key(), + ctx.accounts.nullifier_queue.key(), + &mut ctx.accounts.registered_forester_pda, + 2 * DEFAULT_WORK_V1, + )?; + + process_nullify_2( + &ctx, + change_log_index, + queue_index_0, + queue_index_1, + leaf_index_0, + leaf_index_1, + proof_0, + proof_1, + shared_proof_node, + ) + } + #[allow(clippy::too_many_arguments)] pub fn update_address_merkle_tree( ctx: Context, From 3b7291d3ebb461aaa8af9e14e75d36d7d7f1196e Mon Sep 17 00:00:00 2001 From: ananas Date: Tue, 17 Mar 2026 17:54:55 +0000 Subject: [PATCH 02/16] feat: nullify_dedup instruction for 2-4 nullifications with proof deduplication Add nullify_dedup instruction that packs 2-4 nullifications into a single transaction using proof node deduplication. Nearby Merkle tree leaves share sibling nodes at common ancestor levels; the encoding stores each unique node once and uses bitvecs/2-bit source fields to reconstruct all proofs on-chain. - 1-byte custom discriminator [79], reuses NullifyLeaves accounts struct - Encoding: shared_top_node (level 15) + bitvec for proof_2 + 2-bit source fields for proof_3/proof_4, with u32::MAX sentinels for count < 4 - MAX_NODES=28 verified by tx size test (1230 bytes with ALT + compute budget ix) - SDK: compress_proofs() encoder, create_nullify_dedup_instruction() builder, nullify_dedup_lookup_table_accounts() helper - Unit tests: data size, accounts, discriminator collision, round-trip, edge cases - Integration tests: 4-leaf, 3-leaf, 2-leaf success + 1-leaf rejection --- forester/tests/test_nullify_dedup_tx_size.rs | 122 ++++ .../registry-test/tests/nullify_dedup.rs | 661 ++++++++++++++++++ .../src/account_compression_cpi/nullify.rs | 183 +++++ .../src/account_compression_cpi/sdk.rs | 497 ++++++++++++- programs/registry/src/errors.rs | 2 + programs/registry/src/lib.rs | 42 ++ 6 files changed, 1503 insertions(+), 4 deletions(-) create mode 100644 forester/tests/test_nullify_dedup_tx_size.rs create mode 100644 program-tests/registry-test/tests/nullify_dedup.rs diff --git a/forester/tests/test_nullify_dedup_tx_size.rs b/forester/tests/test_nullify_dedup_tx_size.rs new file mode 100644 index 0000000000..60168bdcb5 --- /dev/null +++ b/forester/tests/test_nullify_dedup_tx_size.rs @@ -0,0 +1,122 @@ +use light_registry::{ + account_compression_cpi::sdk::{ + create_nullify_dedup_instruction, nullify_dedup_lookup_table_accounts, + CreateNullifyDedupInstructionInputs, NULLIFY_DEDUP_MAX_NODES, + }, + utils::get_forester_epoch_pda_from_authority, +}; +use solana_sdk::{ + compute_budget::ComputeBudgetInstruction, + hash::Hash, + message::{v0, AddressLookupTableAccount, VersionedMessage}, + pubkey::Pubkey, + signature::Keypair, + signer::Signer, + transaction::VersionedTransaction, +}; + +/// Validates that a nullify_dedup v0 transaction with ALT and SetComputeUnitLimit +/// fits within the 1232-byte Solana transaction size limit. +/// +/// This is a pure serialization check -- no validator needed. +#[test] +fn test_nullify_dedup_v0_transaction_size() { + let authority = Keypair::new(); + let merkle_tree = Pubkey::new_unique(); + let nullifier_queue = Pubkey::new_unique(); + let epoch = 0u64; + let forester_pda = get_forester_epoch_pda_from_authority(&authority.pubkey(), epoch).0; + + // Worst case: MAX_NODES unique nodes + let nullify_ix = create_nullify_dedup_instruction( + CreateNullifyDedupInstructionInputs { + authority: authority.pubkey(), + nullifier_queue, + merkle_tree, + change_log_index: 1400, + queue_indices: [100, 200, 300, 400], + leaf_indices: [67_000_000, 67_000_001, 67_000_002, 67_000_003], + proof_2_shared: 0, + proof_3_source: 0, + proof_4_source: 0, + shared_top_node: [0xCC; 32], + nodes: vec![[0xAA; 32]; NULLIFY_DEDUP_MAX_NODES], + derivation: authority.pubkey(), + is_metadata_forester: false, + }, + 0, + ); + + // SetComputeUnitLimit instruction + let compute_ix = ComputeBudgetInstruction::set_compute_unit_limit(600_000); + + // Build synthetic ALT with the known accounts (includes ComputeBudget program ID) + let alt_accounts = + nullify_dedup_lookup_table_accounts(merkle_tree, nullifier_queue, Some(forester_pda)); + let alt_address = Pubkey::new_unique(); + let alt = AddressLookupTableAccount { + key: alt_address, + addresses: alt_accounts, + }; + + // Compile v0 message with both instructions + let blockhash = Hash::default(); + let message = + v0::Message::try_compile(&authority.pubkey(), &[compute_ix, nullify_ix], &[alt], blockhash) + .expect("Failed to compile v0 message"); + + // Create signed transaction + let versioned_message = VersionedMessage::V0(message); + let tx = VersionedTransaction::try_new(versioned_message, &[&authority]) + .expect("Failed to create versioned transaction"); + + let serialized = tx.message.serialize(); + // Full tx = compact-u16 sig count (1) + signatures (64 * n) + serialized message + let tx_size = 1 + tx.signatures.len() * 64 + serialized.len(); + + let ix_data_size = 1 + 2 + 8 + 16 + 2 + 4 + 4 + 32 + 4 + NULLIFY_DEDUP_MAX_NODES * 32; + + println!( + "nullify_dedup v0 transaction size: {} bytes (limit: 1232)", + tx_size + ); + println!(" nullify_dedup instruction data: {} bytes", ix_data_size); + println!( + " max_nodes: {} ({} bytes payload)", + NULLIFY_DEDUP_MAX_NODES, + NULLIFY_DEDUP_MAX_NODES * 32 + ); + println!(" margin: {} bytes", 1232_i64 - tx_size as i64); + + // Breakdown + println!("\nTransaction breakdown:"); + println!(" signatures: {}", tx.signatures.len() * 64 + 1); + let static_keys = tx.message.static_account_keys(); + println!(" static account keys: {}", static_keys.len()); + for (i, key) in static_keys.iter().enumerate() { + let label = if *key == authority.pubkey() { + "authority (signer)" + } else if *key == light_registry::ID { + "registry program" + } else if *key == solana_sdk::compute_budget::ID { + "compute budget program" + } else { + "unknown" + }; + println!(" [{}] {} ({})", i, key, label); + } + if let VersionedMessage::V0(m) = &tx.message { + println!(" address table lookups: {}", m.address_table_lookups.len()); + for alt_lookup in &m.address_table_lookups { + println!(" writable indices: {:?}", alt_lookup.writable_indexes); + println!(" readonly indices: {:?}", alt_lookup.readonly_indexes); + } + }; + + assert!( + tx_size <= 1232, + "nullify_dedup v0 transaction is {} bytes, exceeds 1232 byte limit by {} bytes", + tx_size, + tx_size - 1232 + ); +} diff --git a/program-tests/registry-test/tests/nullify_dedup.rs b/program-tests/registry-test/tests/nullify_dedup.rs new file mode 100644 index 0000000000..b2e19b3477 --- /dev/null +++ b/program-tests/registry-test/tests/nullify_dedup.rs @@ -0,0 +1,661 @@ +use account_compression::{state::QueueAccount, StateMerkleTreeAccount}; +use forester_utils::account_zero_copy::{get_concurrent_merkle_tree, get_hash_set}; +use light_client::rpc::Rpc; +use light_compressed_account::TreeType; +use light_hasher::Poseidon; +use light_program_test::{program_test::LightProgramTest, ProgramTestConfig}; +use light_registry::account_compression_cpi::sdk::{ + compress_proofs, create_nullify_dedup_instruction, CreateNullifyDedupInstructionInputs, +}; +use light_test_utils::e2e_test_env::init_program_test_env; +use serial_test::serial; +use solana_sdk::signature::{Keypair, Signer}; + +#[serial] +#[tokio::test] +async fn test_nullify_dedup_4_leaves() { + let mut rpc = LightProgramTest::new(ProgramTestConfig::default_with_batched_trees(true)) + .await + .unwrap(); + rpc.indexer = None; + let env = rpc.test_accounts.clone(); + + let forester_keypair = Keypair::new(); + rpc.airdrop_lamports(&forester_keypair.pubkey(), 2_000_000_000) + .await + .unwrap(); + + let merkle_tree_keypair = Keypair::new(); + let nullifier_queue_keypair = Keypair::new(); + let cpi_context_keypair = Keypair::new(); + + let (mut state_tree_bundle, mut rpc) = { + let mut e2e_env = init_program_test_env(rpc, &env, 50).await; + e2e_env.indexer.state_merkle_trees.clear(); + e2e_env.keypair_action_config.fee_assert = false; + + e2e_env + .indexer + .add_state_merkle_tree( + &mut e2e_env.rpc, + &merkle_tree_keypair, + &nullifier_queue_keypair, + &cpi_context_keypair, + None, + Some(forester_keypair.pubkey()), + TreeType::StateV1, + ) + .await; + + for _ in 0..4 { + e2e_env + .compress_sol_deterministic(&forester_keypair, 1_000_000, None) + .await; + e2e_env + .transfer_sol_deterministic( + &forester_keypair, + &Keypair::new().pubkey(), + None, + ) + .await + .unwrap(); + } + + ( + e2e_env.indexer.state_merkle_trees[0].clone(), + e2e_env.rpc, + ) + }; + + // Read on-chain state + let nullifier_queue = unsafe { + get_hash_set::(&mut rpc, state_tree_bundle.accounts.nullifier_queue) + .await + .unwrap() + }; + let onchain_tree = get_concurrent_merkle_tree::( + &mut rpc, + state_tree_bundle.accounts.merkle_tree, + ) + .await + .unwrap(); + let pre_root = onchain_tree.root(); + let change_log_index = onchain_tree.changelog_index(); + + // Collect 4 unmarked items + let mut items_to_nullify = Vec::new(); + for i in 0..nullifier_queue.get_capacity() { + let bucket = nullifier_queue.get_bucket(i).unwrap(); + if let Some(bucket) = bucket { + if bucket.sequence_number.is_none() { + items_to_nullify.push((i, bucket.value_bytes())); + } + } + } + assert!( + items_to_nullify.len() >= 4, + "Need at least 4 items in nullifier queue, got {}", + items_to_nullify.len() + ); + + // Get proofs + let mut leaf_indices = Vec::new(); + let mut proofs = Vec::new(); + for (_, leaf) in items_to_nullify.iter().take(4) { + let leaf_index = state_tree_bundle.merkle_tree.get_leaf_index(leaf).unwrap(); + leaf_indices.push(leaf_index); + let proof: Vec<[u8; 32]> = state_tree_bundle + .merkle_tree + .get_proof_of_leaf(leaf_index, false) + .unwrap(); + let proof_arr: [[u8; 32]; 16] = proof.try_into().unwrap(); + proofs.push(proof_arr); + } + + // Verify shared top node + for i in 1..4 { + assert_eq!( + proofs[0][15], proofs[i][15], + "Level 15 proof node must be shared between all leaves" + ); + } + + let proof_refs: Vec<&[[u8; 32]; 16]> = proofs.iter().collect(); + let (proof_2_shared, proof_3_source, proof_4_source, shared_top_node, nodes) = + compress_proofs(&proof_refs).expect("compress_proofs should succeed for 4 leaves"); + + let queue_indices: [u16; 4] = [ + items_to_nullify[0].0 as u16, + items_to_nullify[1].0 as u16, + items_to_nullify[2].0 as u16, + items_to_nullify[3].0 as u16, + ]; + let leaf_indices_arr: [u32; 4] = [ + leaf_indices[0] as u32, + leaf_indices[1] as u32, + leaf_indices[2] as u32, + leaf_indices[3] as u32, + ]; + + let ix = create_nullify_dedup_instruction( + CreateNullifyDedupInstructionInputs { + authority: forester_keypair.pubkey(), + nullifier_queue: state_tree_bundle.accounts.nullifier_queue, + merkle_tree: state_tree_bundle.accounts.merkle_tree, + change_log_index: change_log_index as u16, + queue_indices, + leaf_indices: leaf_indices_arr, + proof_2_shared, + proof_3_source, + proof_4_source, + shared_top_node, + nodes, + derivation: forester_keypair.pubkey(), + is_metadata_forester: true, + }, + 0, + ); + + rpc.create_and_send_transaction(&[ix], &forester_keypair.pubkey(), &[&forester_keypair]) + .await + .unwrap(); + + // Verify all 4 queue items marked + let nullifier_queue_post = unsafe { + get_hash_set::(&mut rpc, state_tree_bundle.accounts.nullifier_queue) + .await + .unwrap() + }; + for (idx, (queue_idx, _)) in items_to_nullify.iter().take(4).enumerate() { + let bucket = nullifier_queue_post + .get_bucket(*queue_idx) + .unwrap() + .unwrap(); + assert!( + bucket.sequence_number.is_some(), + "Queue item {} should be marked after nullify_dedup", + idx + ); + } + + // Verify root changed + let onchain_tree_post = + get_concurrent_merkle_tree::( + &mut rpc, + state_tree_bundle.accounts.merkle_tree, + ) + .await + .unwrap(); + assert_ne!( + pre_root, + onchain_tree_post.root(), + "Root should have changed after nullify_dedup" + ); + + // Locally update and verify root match + for &li in &leaf_indices { + state_tree_bundle + .merkle_tree + .update(&[0u8; 32], li) + .unwrap(); + } + assert_eq!( + onchain_tree_post.root(), + state_tree_bundle.merkle_tree.root(), + "On-chain root should match local tree after nullifying all 4 leaves" + ); +} + +#[serial] +#[tokio::test] +async fn test_nullify_dedup_3_leaves() { + let mut rpc = LightProgramTest::new(ProgramTestConfig::default_with_batched_trees(true)) + .await + .unwrap(); + rpc.indexer = None; + let env = rpc.test_accounts.clone(); + + let forester_keypair = Keypair::new(); + rpc.airdrop_lamports(&forester_keypair.pubkey(), 2_000_000_000) + .await + .unwrap(); + + let merkle_tree_keypair = Keypair::new(); + let nullifier_queue_keypair = Keypair::new(); + let cpi_context_keypair = Keypair::new(); + + let (mut state_tree_bundle, mut rpc) = { + let mut e2e_env = init_program_test_env(rpc, &env, 50).await; + e2e_env.indexer.state_merkle_trees.clear(); + e2e_env.keypair_action_config.fee_assert = false; + + e2e_env + .indexer + .add_state_merkle_tree( + &mut e2e_env.rpc, + &merkle_tree_keypair, + &nullifier_queue_keypair, + &cpi_context_keypair, + None, + Some(forester_keypair.pubkey()), + TreeType::StateV1, + ) + .await; + + for _ in 0..3 { + e2e_env + .compress_sol_deterministic(&forester_keypair, 1_000_000, None) + .await; + e2e_env + .transfer_sol_deterministic( + &forester_keypair, + &Keypair::new().pubkey(), + None, + ) + .await + .unwrap(); + } + + ( + e2e_env.indexer.state_merkle_trees[0].clone(), + e2e_env.rpc, + ) + }; + + let nullifier_queue = unsafe { + get_hash_set::(&mut rpc, state_tree_bundle.accounts.nullifier_queue) + .await + .unwrap() + }; + let onchain_tree = get_concurrent_merkle_tree::( + &mut rpc, + state_tree_bundle.accounts.merkle_tree, + ) + .await + .unwrap(); + let change_log_index = onchain_tree.changelog_index(); + + let mut items_to_nullify = Vec::new(); + for i in 0..nullifier_queue.get_capacity() { + let bucket = nullifier_queue.get_bucket(i).unwrap(); + if let Some(bucket) = bucket { + if bucket.sequence_number.is_none() { + items_to_nullify.push((i, bucket.value_bytes())); + } + } + } + assert!(items_to_nullify.len() >= 3); + + let mut leaf_indices = Vec::new(); + let mut proofs = Vec::new(); + for (_, leaf) in items_to_nullify.iter().take(3) { + let leaf_index = state_tree_bundle.merkle_tree.get_leaf_index(leaf).unwrap(); + leaf_indices.push(leaf_index); + let proof: Vec<[u8; 32]> = state_tree_bundle + .merkle_tree + .get_proof_of_leaf(leaf_index, false) + .unwrap(); + proofs.push(<[[u8; 32]; 16]>::try_from(proof).unwrap()); + } + + let proof_refs: Vec<&[[u8; 32]; 16]> = proofs.iter().collect(); + let (proof_2_shared, proof_3_source, proof_4_source, shared_top_node, nodes) = + compress_proofs(&proof_refs).expect("compress_proofs should succeed for 3 leaves"); + + let ix = create_nullify_dedup_instruction( + CreateNullifyDedupInstructionInputs { + authority: forester_keypair.pubkey(), + nullifier_queue: state_tree_bundle.accounts.nullifier_queue, + merkle_tree: state_tree_bundle.accounts.merkle_tree, + change_log_index: change_log_index as u16, + queue_indices: [ + items_to_nullify[0].0 as u16, + items_to_nullify[1].0 as u16, + items_to_nullify[2].0 as u16, + 0, + ], + leaf_indices: [ + leaf_indices[0] as u32, + leaf_indices[1] as u32, + leaf_indices[2] as u32, + u32::MAX, + ], + proof_2_shared, + proof_3_source, + proof_4_source, + shared_top_node, + nodes, + derivation: forester_keypair.pubkey(), + is_metadata_forester: true, + }, + 0, + ); + + rpc.create_and_send_transaction(&[ix], &forester_keypair.pubkey(), &[&forester_keypair]) + .await + .unwrap(); + + // Verify 3 queue items marked + let nullifier_queue_post = unsafe { + get_hash_set::(&mut rpc, state_tree_bundle.accounts.nullifier_queue) + .await + .unwrap() + }; + for (idx, (queue_idx, _)) in items_to_nullify.iter().take(3).enumerate() { + let bucket = nullifier_queue_post + .get_bucket(*queue_idx) + .unwrap() + .unwrap(); + assert!( + bucket.sequence_number.is_some(), + "Queue item {} should be marked", + idx + ); + } + + // Locally update and verify root match + for &li in &leaf_indices { + state_tree_bundle + .merkle_tree + .update(&[0u8; 32], li) + .unwrap(); + } + let onchain_tree_post = + get_concurrent_merkle_tree::( + &mut rpc, + state_tree_bundle.accounts.merkle_tree, + ) + .await + .unwrap(); + assert_eq!( + onchain_tree_post.root(), + state_tree_bundle.merkle_tree.root(), + ); +} + +#[serial] +#[tokio::test] +async fn test_nullify_dedup_2_leaves() { + let mut rpc = LightProgramTest::new(ProgramTestConfig::default_with_batched_trees(true)) + .await + .unwrap(); + rpc.indexer = None; + let env = rpc.test_accounts.clone(); + + let forester_keypair = Keypair::new(); + rpc.airdrop_lamports(&forester_keypair.pubkey(), 2_000_000_000) + .await + .unwrap(); + + let merkle_tree_keypair = Keypair::new(); + let nullifier_queue_keypair = Keypair::new(); + let cpi_context_keypair = Keypair::new(); + + let (mut state_tree_bundle, mut rpc) = { + let mut e2e_env = init_program_test_env(rpc, &env, 50).await; + e2e_env.indexer.state_merkle_trees.clear(); + e2e_env.keypair_action_config.fee_assert = false; + + e2e_env + .indexer + .add_state_merkle_tree( + &mut e2e_env.rpc, + &merkle_tree_keypair, + &nullifier_queue_keypair, + &cpi_context_keypair, + None, + Some(forester_keypair.pubkey()), + TreeType::StateV1, + ) + .await; + + for _ in 0..2 { + e2e_env + .compress_sol_deterministic(&forester_keypair, 1_000_000, None) + .await; + e2e_env + .transfer_sol_deterministic( + &forester_keypair, + &Keypair::new().pubkey(), + None, + ) + .await + .unwrap(); + } + + ( + e2e_env.indexer.state_merkle_trees[0].clone(), + e2e_env.rpc, + ) + }; + + let nullifier_queue = unsafe { + get_hash_set::(&mut rpc, state_tree_bundle.accounts.nullifier_queue) + .await + .unwrap() + }; + let onchain_tree = get_concurrent_merkle_tree::( + &mut rpc, + state_tree_bundle.accounts.merkle_tree, + ) + .await + .unwrap(); + let change_log_index = onchain_tree.changelog_index(); + + let mut items_to_nullify = Vec::new(); + for i in 0..nullifier_queue.get_capacity() { + let bucket = nullifier_queue.get_bucket(i).unwrap(); + if let Some(bucket) = bucket { + if bucket.sequence_number.is_none() { + items_to_nullify.push((i, bucket.value_bytes())); + } + } + } + assert!(items_to_nullify.len() >= 2); + + let mut leaf_indices = Vec::new(); + let mut proofs = Vec::new(); + for (_, leaf) in items_to_nullify.iter().take(2) { + let leaf_index = state_tree_bundle.merkle_tree.get_leaf_index(leaf).unwrap(); + leaf_indices.push(leaf_index); + let proof: Vec<[u8; 32]> = state_tree_bundle + .merkle_tree + .get_proof_of_leaf(leaf_index, false) + .unwrap(); + proofs.push(<[[u8; 32]; 16]>::try_from(proof).unwrap()); + } + + let proof_refs: Vec<&[[u8; 32]; 16]> = proofs.iter().collect(); + let (proof_2_shared, proof_3_source, proof_4_source, shared_top_node, nodes) = + compress_proofs(&proof_refs).expect("compress_proofs should succeed for 2 leaves"); + + let ix = create_nullify_dedup_instruction( + CreateNullifyDedupInstructionInputs { + authority: forester_keypair.pubkey(), + nullifier_queue: state_tree_bundle.accounts.nullifier_queue, + merkle_tree: state_tree_bundle.accounts.merkle_tree, + change_log_index: change_log_index as u16, + queue_indices: [ + items_to_nullify[0].0 as u16, + items_to_nullify[1].0 as u16, + 0, + 0, + ], + leaf_indices: [ + leaf_indices[0] as u32, + leaf_indices[1] as u32, + u32::MAX, + u32::MAX, + ], + proof_2_shared, + proof_3_source, + proof_4_source, + shared_top_node, + nodes, + derivation: forester_keypair.pubkey(), + is_metadata_forester: true, + }, + 0, + ); + + rpc.create_and_send_transaction(&[ix], &forester_keypair.pubkey(), &[&forester_keypair]) + .await + .unwrap(); + + // Verify 2 queue items marked + let nullifier_queue_post = unsafe { + get_hash_set::(&mut rpc, state_tree_bundle.accounts.nullifier_queue) + .await + .unwrap() + }; + for (idx, (queue_idx, _)) in items_to_nullify.iter().take(2).enumerate() { + let bucket = nullifier_queue_post + .get_bucket(*queue_idx) + .unwrap() + .unwrap(); + assert!( + bucket.sequence_number.is_some(), + "Queue item {} should be marked", + idx + ); + } + + // Locally update and verify root match + for &li in &leaf_indices { + state_tree_bundle + .merkle_tree + .update(&[0u8; 32], li) + .unwrap(); + } + let onchain_tree_post = + get_concurrent_merkle_tree::( + &mut rpc, + state_tree_bundle.accounts.merkle_tree, + ) + .await + .unwrap(); + assert_eq!( + onchain_tree_post.root(), + state_tree_bundle.merkle_tree.root(), + ); +} + +#[serial] +#[tokio::test] +async fn test_nullify_dedup_1_leaf_fails() { + let mut rpc = LightProgramTest::new(ProgramTestConfig::default_with_batched_trees(true)) + .await + .unwrap(); + rpc.indexer = None; + let env = rpc.test_accounts.clone(); + + let forester_keypair = Keypair::new(); + rpc.airdrop_lamports(&forester_keypair.pubkey(), 2_000_000_000) + .await + .unwrap(); + + let merkle_tree_keypair = Keypair::new(); + let nullifier_queue_keypair = Keypair::new(); + let cpi_context_keypair = Keypair::new(); + + let (state_tree_bundle, mut rpc) = { + let mut e2e_env = init_program_test_env(rpc, &env, 50).await; + e2e_env.indexer.state_merkle_trees.clear(); + e2e_env.keypair_action_config.fee_assert = false; + + e2e_env + .indexer + .add_state_merkle_tree( + &mut e2e_env.rpc, + &merkle_tree_keypair, + &nullifier_queue_keypair, + &cpi_context_keypair, + None, + Some(forester_keypair.pubkey()), + TreeType::StateV1, + ) + .await; + + e2e_env + .compress_sol_deterministic(&forester_keypair, 1_000_000, None) + .await; + e2e_env + .transfer_sol_deterministic( + &forester_keypair, + &Keypair::new().pubkey(), + None, + ) + .await + .unwrap(); + + ( + e2e_env.indexer.state_merkle_trees[0].clone(), + e2e_env.rpc, + ) + }; + + let nullifier_queue = unsafe { + get_hash_set::(&mut rpc, state_tree_bundle.accounts.nullifier_queue) + .await + .unwrap() + }; + let onchain_tree = get_concurrent_merkle_tree::( + &mut rpc, + state_tree_bundle.accounts.merkle_tree, + ) + .await + .unwrap(); + let change_log_index = onchain_tree.changelog_index(); + + let mut items_to_nullify = Vec::new(); + for i in 0..nullifier_queue.get_capacity() { + let bucket = nullifier_queue.get_bucket(i).unwrap(); + if let Some(bucket) = bucket { + if bucket.sequence_number.is_none() { + items_to_nullify.push((i, bucket.value_bytes())); + } + } + } + assert!(!items_to_nullify.is_empty()); + + let leaf_index = state_tree_bundle + .merkle_tree + .get_leaf_index(&items_to_nullify[0].1) + .unwrap(); + let proof: Vec<[u8; 32]> = state_tree_bundle + .merkle_tree + .get_proof_of_leaf(leaf_index, false) + .unwrap(); + let proof_arr: [[u8; 32]; 16] = proof.try_into().unwrap(); + + let nodes: Vec<[u8; 32]> = proof_arr[..15].to_vec(); + let shared_top_node = proof_arr[15]; + + let ix = create_nullify_dedup_instruction( + CreateNullifyDedupInstructionInputs { + authority: forester_keypair.pubkey(), + nullifier_queue: state_tree_bundle.accounts.nullifier_queue, + merkle_tree: state_tree_bundle.accounts.merkle_tree, + change_log_index: change_log_index as u16, + queue_indices: [items_to_nullify[0].0 as u16, 0, 0, 0], + leaf_indices: [leaf_index as u32, u32::MAX, u32::MAX, u32::MAX], + proof_2_shared: 0, + proof_3_source: 0, + proof_4_source: 0, + shared_top_node, + nodes, + derivation: forester_keypair.pubkey(), + is_metadata_forester: true, + }, + 0, + ); + + let result = rpc + .create_and_send_transaction(&[ix], &forester_keypair.pubkey(), &[&forester_keypair]) + .await; + + assert!( + result.is_err(), + "nullify_dedup with 1 leaf should fail with InvalidProofEncoding" + ); +} diff --git a/programs/registry/src/account_compression_cpi/nullify.rs b/programs/registry/src/account_compression_cpi/nullify.rs index 9e0e5d8f4e..91d021ef81 100644 --- a/programs/registry/src/account_compression_cpi/nullify.rs +++ b/programs/registry/src/account_compression_cpi/nullify.rs @@ -3,6 +3,8 @@ use account_compression::{ }; use anchor_lang::prelude::*; +use crate::errors::RegistryError; + use crate::epoch::register_epoch::ForesterEpochPda; #[derive(Accounts)] @@ -62,6 +64,38 @@ pub fn process_nullify( ) } +/// Issues a single nullify_leaves CPI for one leaf. +#[inline(always)] +fn nullify_single_leaf_cpi( + ctx: &Context, + signer_seeds: &[&[&[u8]]], + change_log_index: u64, + queue_index: u16, + leaf_index: u64, + proof: Vec<[u8; 32]>, +) -> Result<()> { + let accounts = account_compression::cpi::accounts::NullifyLeaves { + authority: ctx.accounts.cpi_authority.to_account_info(), + registered_program_pda: Some(ctx.accounts.registered_program_pda.to_account_info()), + log_wrapper: ctx.accounts.log_wrapper.to_account_info(), + merkle_tree: ctx.accounts.merkle_tree.to_account_info(), + nullifier_queue: ctx.accounts.nullifier_queue.to_account_info(), + fee_payer: Some(ctx.accounts.authority.to_account_info()), + }; + let cpi_ctx = CpiContext::new_with_signer( + ctx.accounts.account_compression_program.to_account_info(), + accounts, + signer_seeds, + ); + account_compression::cpi::nullify_leaves( + cpi_ctx, + vec![change_log_index], + vec![queue_index], + vec![leaf_index], + vec![proof], + ) +} + #[allow(clippy::too_many_arguments)] pub fn process_nullify_2( ctx: &Context, @@ -135,3 +169,152 @@ pub fn process_nullify_2( Ok(()) } + +/// Determines proof count from leaf_indices sentinel values. +/// Returns Err(InvalidProofEncoding) if fewer than 2 leaves are specified. +pub fn count_from_leaf_indices(leaf_indices: &[u32; 4]) -> Result { + if leaf_indices[0] == u32::MAX || leaf_indices[1] == u32::MAX { + return err!(RegistryError::InvalidProofEncoding); + } + Ok(if leaf_indices[2] == u32::MAX { + 2 + } else if leaf_indices[3] == u32::MAX { + 3 + } else { + 4 + }) +} + +#[allow(clippy::too_many_arguments)] +pub fn process_nullify_dedup( + ctx: &Context, + count: usize, + change_log_index: u16, + queue_indices: [u16; 4], + leaf_indices: [u32; 4], + proof_2_shared: u16, + proof_3_source: u32, + proof_4_source: u32, + shared_top_node: [u8; 32], + nodes: Vec<[u8; 32]>, +) -> Result<()> { + let bump = ctx.bumps.cpi_authority; + let bump = &[bump]; + let seeds = [CPI_AUTHORITY_PDA_SEED, bump]; + let signer_seeds = &[&seeds[..]]; + + // Reconstruct proofs from dedup encoding. + let mut cursor: usize = 0; + + // proof_1: levels 0..14 from nodes[0..15] + if nodes.len() < 15 { + return err!(RegistryError::InvalidProofEncoding); + } + let mut proof_1 = [[0u8; 32]; 16]; + proof_1[..15].copy_from_slice(&nodes[cursor..cursor + 15]); + proof_1[15] = shared_top_node; + cursor += 15; + + // proof_2: bitvec proof_2_shared, bit i=1 means reuse proof_1[i], bit=0 means take next node + let mut proof_2 = [[0u8; 32]; 16]; + for i in 0..15 { + if (proof_2_shared >> i) & 1 == 1 { + proof_2[i] = proof_1[i]; + } else { + if cursor >= nodes.len() { + return err!(RegistryError::InvalidProofEncoding); + } + proof_2[i] = nodes[cursor]; + cursor += 1; + } + } + proof_2[15] = shared_top_node; + + // Issue CPIs for proof_1 and proof_2 immediately to free stack space + // before reconstructing proof_3/proof_4. + let change_log_index_u64 = change_log_index as u64; + nullify_single_leaf_cpi( + ctx, + signer_seeds, + change_log_index_u64, + queue_indices[0], + leaf_indices[0] as u64, + proof_1.to_vec(), + )?; + nullify_single_leaf_cpi( + ctx, + signer_seeds, + change_log_index_u64, + queue_indices[1], + leaf_indices[1] as u64, + proof_2.to_vec(), + )?; + + // proof_3: 2 bits per level from proof_3_source + if count >= 3 { + let mut proof_3 = [[0u8; 32]; 16]; + for i in 0..15 { + let src = (proof_3_source >> (i * 2)) & 0b11; + match src { + 0b00 => proof_3[i] = proof_1[i], + 0b01 => proof_3[i] = proof_2[i], + 0b10 => { + if cursor >= nodes.len() { + return err!(RegistryError::InvalidProofEncoding); + } + proof_3[i] = nodes[cursor]; + cursor += 1; + } + _ => return err!(RegistryError::InvalidProofEncoding), + } + } + proof_3[15] = shared_top_node; + + nullify_single_leaf_cpi( + ctx, + signer_seeds, + change_log_index_u64, + queue_indices[2], + leaf_indices[2] as u64, + proof_3.to_vec(), + )?; + + // proof_4: 2 bits per level from proof_4_source + if count == 4 { + let mut proof_4 = [[0u8; 32]; 16]; + for i in 0..15 { + let src = (proof_4_source >> (i * 2)) & 0b11; + match src { + 0b00 => proof_4[i] = proof_1[i], + 0b01 => proof_4[i] = proof_2[i], + 0b10 => proof_4[i] = proof_3[i], + 0b11 => { + if cursor >= nodes.len() { + return err!(RegistryError::InvalidProofEncoding); + } + proof_4[i] = nodes[cursor]; + cursor += 1; + } + _ => unreachable!(), + } + } + proof_4[15] = shared_top_node; + + nullify_single_leaf_cpi( + ctx, + signer_seeds, + change_log_index_u64, + queue_indices[3], + leaf_indices[3] as u64, + proof_4.to_vec(), + )?; + } + } + + // Validate all nodes consumed + if cursor != nodes.len() { + return err!(RegistryError::InvalidProofEncoding); + } + + Ok(()) +} diff --git a/programs/registry/src/account_compression_cpi/sdk.rs b/programs/registry/src/account_compression_cpi/sdk.rs index 96fa63416b..7ea33e99db 100644 --- a/programs/registry/src/account_compression_cpi/sdk.rs +++ b/programs/registry/src/account_compression_cpi/sdk.rs @@ -118,10 +118,9 @@ pub fn create_nullify_2_instruction( } } -/// Returns the known accounts for populating an address lookup table -/// for nullify_2 v0 transactions. These are the accounts that don't change -/// between nullify_2 calls on the same tree. -pub fn nullify_2_lookup_table_accounts( +/// Returns the base accounts for populating an address lookup table +/// for nullify v0 transactions. +fn nullify_lookup_table_accounts_base( merkle_tree: Pubkey, nullifier_queue: Pubkey, forester_pda: Option, @@ -143,6 +142,171 @@ pub fn nullify_2_lookup_table_accounts( accounts } +/// Returns the known accounts for populating an address lookup table +/// for nullify_2 v0 transactions. +pub fn nullify_2_lookup_table_accounts( + merkle_tree: Pubkey, + nullifier_queue: Pubkey, + forester_pda: Option, +) -> Vec { + nullify_lookup_table_accounts_base(merkle_tree, nullifier_queue, forester_pda) +} + +/// Max number of 32-byte nodes in the dedup encoding vec. +/// Verified by tx size test (forester/tests/test_nullify_dedup_tx_size.rs). +/// With ALT, SetComputeUnitLimit ix, and worst-case nodes, the tx is 1230 bytes (2 byte margin). +pub const NULLIFY_DEDUP_MAX_NODES: usize = 28; + +#[derive(Clone, Debug, PartialEq)] +pub struct CreateNullifyDedupInstructionInputs { + pub authority: Pubkey, + pub nullifier_queue: Pubkey, + pub merkle_tree: Pubkey, + pub change_log_index: u16, + pub queue_indices: [u16; 4], + pub leaf_indices: [u32; 4], + pub proof_2_shared: u16, + pub proof_3_source: u32, + pub proof_4_source: u32, + pub shared_top_node: [u8; 32], + pub nodes: Vec<[u8; 32]>, + pub derivation: Pubkey, + pub is_metadata_forester: bool, +} + +pub fn create_nullify_dedup_instruction( + inputs: CreateNullifyDedupInstructionInputs, + epoch: u64, +) -> Instruction { + let register_program_pda = get_registered_program_pda(&crate::ID); + let registered_forester_pda = if inputs.is_metadata_forester { + None + } else { + Some(get_forester_epoch_pda_from_authority(&inputs.derivation, epoch).0) + }; + let (cpi_authority, _bump) = get_cpi_authority_pda(); + let instruction_data = crate::instruction::NullifyDedup { + change_log_index: inputs.change_log_index, + queue_indices: inputs.queue_indices, + leaf_indices: inputs.leaf_indices, + proof_2_shared: inputs.proof_2_shared, + proof_3_source: inputs.proof_3_source, + proof_4_source: inputs.proof_4_source, + shared_top_node: inputs.shared_top_node, + nodes: inputs.nodes, + }; + + let accounts = crate::accounts::NullifyLeaves { + authority: inputs.authority, + registered_forester_pda, + registered_program_pda: register_program_pda, + nullifier_queue: inputs.nullifier_queue, + merkle_tree: inputs.merkle_tree, + log_wrapper: NOOP_PUBKEY.into(), + cpi_authority, + account_compression_program: account_compression::ID, + }; + Instruction { + program_id: crate::ID, + accounts: accounts.to_account_metas(Some(true)), + data: instruction_data.data(), + } +} + +/// Compresses 2-4 full 16-node Merkle proofs into the dedup encoding. +/// Returns `(proof_2_shared, proof_3_source, proof_4_source, shared_top_node, nodes)`, +/// or `None` if compression is impossible (different top nodes, too many unique nodes, or +/// fewer than 2 or more than 4 proofs). +pub fn compress_proofs( + proofs: &[&[[u8; 32]; 16]], +) -> Option<(u16, u32, u32, [u8; 32], Vec<[u8; 32]>)> { + if proofs.len() < 2 || proofs.len() > 4 { + return None; + } + + // All proofs must share the same node at index 15 + let shared_top_node = proofs[0][15]; + for p in &proofs[1..] { + if p[15] != shared_top_node { + return None; + } + } + + let mut nodes: Vec<[u8; 32]> = Vec::new(); + + // proof_1: levels 0..14 + for i in 0..15 { + nodes.push(proofs[0][i]); + } + + // proof_2: bitvec + let mut proof_2_shared: u16 = 0; + for i in 0..15 { + if proofs[1][i] == proofs[0][i] { + proof_2_shared |= 1 << i; + } else { + nodes.push(proofs[1][i]); + } + } + + // proof_3 + let mut proof_3_source: u32 = 0; + if proofs.len() >= 3 { + for i in 0..15 { + if proofs[2][i] == proofs[0][i] { + // 00 = proof_1 + } else if proofs[2][i] == proofs[1][i] { + proof_3_source |= 0b01 << (i * 2); + } else { + proof_3_source |= 0b10 << (i * 2); + nodes.push(proofs[2][i]); + } + } + } + + // proof_4 + let mut proof_4_source: u32 = 0; + if proofs.len() >= 4 { + for i in 0..15 { + if proofs[3][i] == proofs[0][i] { + // 00 = proof_1 + } else if proofs[3][i] == proofs[1][i] { + proof_4_source |= 0b01 << (i * 2); + } else if proofs[3][i] == proofs[2][i] { + proof_4_source |= 0b10 << (i * 2); + } else { + proof_4_source |= 0b11 << (i * 2); + nodes.push(proofs[3][i]); + } + } + } + + if nodes.len() > NULLIFY_DEDUP_MAX_NODES { + return None; + } + + Some(( + proof_2_shared, + proof_3_source, + proof_4_source, + shared_top_node, + nodes, + )) +} + +/// Returns the known accounts for populating an address lookup table +/// for nullify_dedup v0 transactions. Includes ComputeBudget program ID +/// since nullify_dedup transactions also include a SetComputeUnitLimit instruction. +pub fn nullify_dedup_lookup_table_accounts( + merkle_tree: Pubkey, + nullifier_queue: Pubkey, + forester_pda: Option, +) -> Vec { + let mut accounts = nullify_lookup_table_accounts_base(merkle_tree, nullifier_queue, forester_pda); + accounts.push(solana_sdk::compute_budget::ID); + accounts +} + #[derive(Clone, Debug, PartialEq)] pub struct CreateMigrateStateInstructionInputs { pub authority: Pubkey, @@ -729,4 +893,329 @@ mod tests { ); } } + + #[test] + fn test_nullify_dedup_instruction_data_size() { + // Worst case: max_nodes unique nodes + let instruction_data = crate::instruction::NullifyDedup { + change_log_index: 0, + queue_indices: [0; 4], + leaf_indices: [0; 4], + proof_2_shared: 0, + proof_3_source: 0, + proof_4_source: 0, + shared_top_node: [0u8; 32], + nodes: vec![[0u8; 32]; NULLIFY_DEDUP_MAX_NODES], + }; + let data = instruction_data.data(); + // 1 disc + 2 changelog + 8 queue_indices + 16 leaf_indices + 2 proof_2_shared + // + 4 proof_3_source + 4 proof_4_source + 32 shared_top_node + // + 4 vec_prefix + N*32 nodes + let expected = 1 + 2 + 8 + 16 + 2 + 4 + 4 + 32 + 4 + NULLIFY_DEDUP_MAX_NODES * 32; + assert_eq!( + data.len(), + expected, + "nullify_dedup instruction data must be exactly {} bytes, got {}", + expected, + data.len() + ); + } + + #[test] + fn test_nullify_dedup_instruction_accounts() { + let authority = Pubkey::new_unique(); + let inputs = CreateNullifyDedupInstructionInputs { + authority, + nullifier_queue: Pubkey::new_unique(), + merkle_tree: Pubkey::new_unique(), + change_log_index: 0, + queue_indices: [0, 1, 2, 3], + leaf_indices: [0, 1, 2, 3], + proof_2_shared: 0, + proof_3_source: 0, + proof_4_source: 0, + shared_top_node: [0u8; 32], + nodes: vec![[0u8; 32]; 15], + derivation: authority, + is_metadata_forester: false, + }; + let ix = create_nullify_dedup_instruction(inputs, 0); + assert_eq!(ix.accounts.len(), 8, "expected 8 accounts"); + } + + #[test] + fn test_nullify_dedup_discriminator_no_collision() { + let disc = crate::instruction::NullifyDedup::DISCRIMINATOR; + assert_eq!(disc.len(), 1, "nullify_dedup discriminator must be 1 byte"); + assert_eq!(disc, &[79], "nullify_dedup discriminator must be [79]"); + + // Verify no collision with nullify_2's discriminator [78] + let nullify_2_disc = crate::instruction::Nullify2::DISCRIMINATOR; + assert_ne!( + disc, nullify_2_disc, + "nullify_dedup [79] must not collide with nullify_2 [78]" + ); + + let existing: &[(&str, &[u8])] = &[ + ("InitializeProtocolConfig", crate::instruction::InitializeProtocolConfig::DISCRIMINATOR), + ("UpdateProtocolConfig", crate::instruction::UpdateProtocolConfig::DISCRIMINATOR), + ("RegisterSystemProgram", crate::instruction::RegisterSystemProgram::DISCRIMINATOR), + ("DeregisterSystemProgram", crate::instruction::DeregisterSystemProgram::DISCRIMINATOR), + ("RegisterForester", crate::instruction::RegisterForester::DISCRIMINATOR), + ("UpdateForesterPda", crate::instruction::UpdateForesterPda::DISCRIMINATOR), + ("UpdateForesterPdaWeight", crate::instruction::UpdateForesterPdaWeight::DISCRIMINATOR), + ("RegisterForesterEpoch", crate::instruction::RegisterForesterEpoch::DISCRIMINATOR), + ("FinalizeRegistration", crate::instruction::FinalizeRegistration::DISCRIMINATOR), + ("ReportWork", crate::instruction::ReportWork::DISCRIMINATOR), + ("InitializeAddressMerkleTree", crate::instruction::InitializeAddressMerkleTree::DISCRIMINATOR), + ("InitializeStateMerkleTree", crate::instruction::InitializeStateMerkleTree::DISCRIMINATOR), + ("Nullify", crate::instruction::Nullify::DISCRIMINATOR), + ("Nullify2", crate::instruction::Nullify2::DISCRIMINATOR), + ("UpdateAddressMerkleTree", crate::instruction::UpdateAddressMerkleTree::DISCRIMINATOR), + ("RolloverAddressMerkleTreeAndQueue", crate::instruction::RolloverAddressMerkleTreeAndQueue::DISCRIMINATOR), + ("RolloverStateMerkleTreeAndQueue", crate::instruction::RolloverStateMerkleTreeAndQueue::DISCRIMINATOR), + ("InitializeBatchedStateMerkleTree", crate::instruction::InitializeBatchedStateMerkleTree::DISCRIMINATOR), + ("BatchNullify", crate::instruction::BatchNullify::DISCRIMINATOR), + ("BatchAppend", crate::instruction::BatchAppend::DISCRIMINATOR), + ("InitializeBatchedAddressMerkleTree", crate::instruction::InitializeBatchedAddressMerkleTree::DISCRIMINATOR), + ("BatchUpdateAddressTree", crate::instruction::BatchUpdateAddressTree::DISCRIMINATOR), + ("RolloverBatchedAddressMerkleTree", crate::instruction::RolloverBatchedAddressMerkleTree::DISCRIMINATOR), + ("RolloverBatchedStateMerkleTree", crate::instruction::RolloverBatchedStateMerkleTree::DISCRIMINATOR), + ("MigrateState", crate::instruction::MigrateState::DISCRIMINATOR), + ("CreateConfigCounter", crate::instruction::CreateConfigCounter::DISCRIMINATOR), + ("CreateCompressibleConfig", crate::instruction::CreateCompressibleConfig::DISCRIMINATOR), + ("UpdateCompressibleConfig", crate::instruction::UpdateCompressibleConfig::DISCRIMINATOR), + ("PauseCompressibleConfig", crate::instruction::PauseCompressibleConfig::DISCRIMINATOR), + ("UnpauseCompressibleConfig", crate::instruction::UnpauseCompressibleConfig::DISCRIMINATOR), + ("DeprecateCompressibleConfig", crate::instruction::DeprecateCompressibleConfig::DISCRIMINATOR), + ("WithdrawFundingPool", crate::instruction::WithdrawFundingPool::DISCRIMINATOR), + ("Claim", crate::instruction::Claim::DISCRIMINATOR), + ("CompressAndClose", crate::instruction::CompressAndClose::DISCRIMINATOR), + ]; + + for (name, existing_disc) in existing { + assert!( + !existing_disc.starts_with(disc), + "nullify_dedup 1-byte discriminator {:?} collides with {name} discriminator prefix {:?}", + disc, + &existing_disc[..disc.len().min(existing_disc.len())] + ); + } + } + + #[test] + fn test_compress_proofs_round_trip() { + // Create 4 proofs with sharing patterns that fit within MAX_NODES (28). + // Budget: 15 (proof_1) + 5 (proof_2 unique) + 5 (proof_3 unique) + 3 (proof_4 unique) = 28 + let shared_top = [0xCC; 32]; + let mut proof_1 = [[0u8; 32]; 16]; + let mut proof_2 = [[0u8; 32]; 16]; + let mut proof_3 = [[0u8; 32]; 16]; + let mut proof_4 = [[0u8; 32]; 16]; + + for i in 0..15 { + proof_1[i] = [i as u8 + 1; 32]; + } + proof_1[15] = shared_top; + + // proof_2: 10 shared with proof_1, 5 unique (levels 0-4) + for i in 0..15 { + if i < 5 { + proof_2[i] = [i as u8 + 100; 32]; // unique + } else { + proof_2[i] = proof_1[i]; // shared + } + } + proof_2[15] = shared_top; + + // proof_3: 5 from proof_1, 5 new (levels 5-9), 5 from proof_2 + for i in 0..15 { + if i < 5 { + proof_3[i] = proof_1[i]; // same as proof_1 + } else if i < 10 { + proof_3[i] = [i as u8 + 200; 32]; // new + } else { + proof_3[i] = proof_2[i]; // same as proof_2 (and proof_1) + } + } + proof_3[15] = shared_top; + + // proof_4: 4 from proof_1, 4 from proof_2, 4 from proof_3, 3 new + for i in 0..15 { + if i < 4 { + proof_4[i] = proof_1[i]; // from proof_1 + } else if i < 8 { + proof_4[i] = proof_2[i]; // from proof_2 + } else if i < 12 { + proof_4[i] = proof_3[i]; // from proof_3 + } else { + proof_4[i] = [(i as u8).wrapping_add(250); 32]; // new + } + } + proof_4[15] = shared_top; + + let proofs: Vec<&[[u8; 32]; 16]> = vec![&proof_1, &proof_2, &proof_3, &proof_4]; + let result = compress_proofs(&proofs); + assert!(result.is_some(), "compress_proofs should succeed"); + let (p2_shared, p3_source, p4_source, top, nodes) = result.unwrap(); + + // Simulate on-chain reconstruction + let mut cursor = 0usize; + + // Reconstruct proof_1 + let mut r_proof_1 = [[0u8; 32]; 16]; + r_proof_1[..15].copy_from_slice(&nodes[cursor..cursor + 15]); + r_proof_1[15] = top; + cursor += 15; + assert_eq!(r_proof_1, proof_1); + + // Reconstruct proof_2 + let mut r_proof_2 = [[0u8; 32]; 16]; + for i in 0..15 { + if (p2_shared >> i) & 1 == 1 { + r_proof_2[i] = r_proof_1[i]; + } else { + r_proof_2[i] = nodes[cursor]; + cursor += 1; + } + } + r_proof_2[15] = top; + assert_eq!(r_proof_2, proof_2); + + // Reconstruct proof_3 + let mut r_proof_3 = [[0u8; 32]; 16]; + for i in 0..15 { + let src = (p3_source >> (i * 2)) & 0b11; + match src { + 0b00 => r_proof_3[i] = r_proof_1[i], + 0b01 => r_proof_3[i] = r_proof_2[i], + 0b10 => { + r_proof_3[i] = nodes[cursor]; + cursor += 1; + } + _ => panic!("unexpected source 0b11 for proof_3"), + } + } + r_proof_3[15] = top; + assert_eq!(r_proof_3, proof_3); + + // Reconstruct proof_4 + let mut r_proof_4 = [[0u8; 32]; 16]; + for i in 0..15 { + let src = (p4_source >> (i * 2)) & 0b11; + match src { + 0b00 => r_proof_4[i] = r_proof_1[i], + 0b01 => r_proof_4[i] = r_proof_2[i], + 0b10 => r_proof_4[i] = r_proof_3[i], + 0b11 => { + r_proof_4[i] = nodes[cursor]; + cursor += 1; + } + _ => unreachable!(), + } + } + r_proof_4[15] = top; + assert_eq!(r_proof_4, proof_4); + + assert_eq!(cursor, nodes.len(), "all nodes should be consumed"); + } + + #[test] + fn test_compress_proofs_returns_none_when_too_many_nodes() { + // All 4 proofs with completely unique nodes at every level = 15 + 15 + 15 + 15 = 60 nodes + let shared_top = [0xCC; 32]; + let make_proof = |base: u8| -> [[u8; 32]; 16] { + let mut p = [[0u8; 32]; 16]; + for i in 0..15 { + p[i] = [base.wrapping_add(i as u8); 32]; + } + p[15] = shared_top; + p + }; + let p1 = make_proof(1); + let p2 = make_proof(50); + let p3 = make_proof(100); + let p4 = make_proof(150); + + let proofs: Vec<&[[u8; 32]; 16]> = vec![&p1, &p2, &p3, &p4]; + let result = compress_proofs(&proofs); + assert!( + result.is_none(), + "should return None when no sharing leads to > MAX_NODES" + ); + } + + #[test] + fn test_compress_proofs_2_proofs() { + let shared_top = [0xCC; 32]; + let mut proof_1 = [[0u8; 32]; 16]; + let mut proof_2 = [[0u8; 32]; 16]; + for i in 0..15 { + proof_1[i] = [i as u8 + 1; 32]; + // Share half the nodes + if i % 2 == 0 { + proof_2[i] = proof_1[i]; + } else { + proof_2[i] = [i as u8 + 100; 32]; + } + } + proof_1[15] = shared_top; + proof_2[15] = shared_top; + + let proofs: Vec<&[[u8; 32]; 16]> = vec![&proof_1, &proof_2]; + let result = compress_proofs(&proofs); + assert!(result.is_some(), "2 proofs should compress"); + let (p2_shared, p3_source, p4_source, top, nodes) = result.unwrap(); + + // proof_3_source and proof_4_source should be 0 (unused) + assert_eq!(p3_source, 0); + assert_eq!(p4_source, 0); + assert_eq!(top, shared_top); + + // Verify proof_2_shared bitvec + for i in 0..15 { + if i % 2 == 0 { + assert_eq!((p2_shared >> i) & 1, 1, "level {} should be shared", i); + } else { + assert_eq!((p2_shared >> i) & 1, 0, "level {} should not be shared", i); + } + } + + // 15 for proof_1 + 7 unique for proof_2 (odd indices 1,3,5,7,9,11,13) + assert_eq!(nodes.len(), 15 + 7); + } + + #[test] + fn test_compress_proofs_3_proofs() { + let shared_top = [0xCC; 32]; + let mut proof_1 = [[0u8; 32]; 16]; + let mut proof_2 = [[0u8; 32]; 16]; + let mut proof_3 = [[0u8; 32]; 16]; + for i in 0..15 { + proof_1[i] = [i as u8 + 1; 32]; + // proof_2 shares some levels with proof_1 to stay within MAX_NODES + if i % 2 == 0 { + proof_2[i] = proof_1[i]; // shared + } else { + proof_2[i] = [i as u8 + 50; 32]; + } + // proof_3 alternates between proof_1 and proof_2 + if i % 3 == 0 { + proof_3[i] = proof_1[i]; + } else if i % 3 == 1 { + proof_3[i] = proof_2[i]; + } else { + proof_3[i] = [i as u8 + 100; 32]; // new + } + } + proof_1[15] = shared_top; + proof_2[15] = shared_top; + proof_3[15] = shared_top; + + let proofs: Vec<&[[u8; 32]; 16]> = vec![&proof_1, &proof_2, &proof_3]; + let result = compress_proofs(&proofs); + assert!(result.is_some(), "3 proofs should compress"); + let (_, _, p4_source, _, _) = result.unwrap(); + assert_eq!(p4_source, 0, "proof_4_source should be 0 for 3 proofs"); + } } diff --git a/programs/registry/src/errors.rs b/programs/registry/src/errors.rs index 7c445d2ca3..ce2c5699b0 100644 --- a/programs/registry/src/errors.rs +++ b/programs/registry/src/errors.rs @@ -42,4 +42,6 @@ pub enum RegistryError { BorrowAccountDataFailed, #[msg("Failed to serialize instruction data")] SerializationFailed, + #[msg("Invalid proof dedup encoding")] + InvalidProofEncoding, } diff --git a/programs/registry/src/lib.rs b/programs/registry/src/lib.rs index 38ddc738a8..11452fadbe 100644 --- a/programs/registry/src/lib.rs +++ b/programs/registry/src/lib.rs @@ -462,6 +462,48 @@ pub mod light_registry { ) } + /// Nullifies 2-4 leaves in a single instruction via sequential CPIs. + /// Uses proof deduplication: nearby leaves share Merkle proof nodes at + /// common ancestor levels. The encoding stores each unique node once and + /// uses bitvecs/2-bit source fields to reconstruct all proofs on-chain. + /// All leaves must share the same subtree at level 15 (shared_top_node). + #[allow(clippy::too_many_arguments)] + #[instruction(discriminator = [79])] + pub fn nullify_dedup<'info>( + ctx: Context<'_, '_, '_, 'info, NullifyLeaves<'info>>, + change_log_index: u16, + queue_indices: [u16; 4], + leaf_indices: [u32; 4], + proof_2_shared: u16, + proof_3_source: u32, + proof_4_source: u32, + shared_top_node: [u8; 32], + nodes: Vec<[u8; 32]>, + ) -> Result<()> { + let metadata = ctx.accounts.merkle_tree.load()?.metadata; + let count = account_compression_cpi::nullify::count_from_leaf_indices(&leaf_indices)?; + check_forester( + &metadata, + ctx.accounts.authority.key(), + ctx.accounts.nullifier_queue.key(), + &mut ctx.accounts.registered_forester_pda, + count as u64 * DEFAULT_WORK_V1, + )?; + + process_nullify_dedup( + &ctx, + count, + change_log_index, + queue_indices, + leaf_indices, + proof_2_shared, + proof_3_source, + proof_4_source, + shared_top_node, + nodes, + ) + } + #[allow(clippy::too_many_arguments)] pub fn update_address_merkle_tree( ctx: Context, From 3365adcbb2ad4c7192b1b7d2b5b685c75fd010e8 Mon Sep 17 00:00:00 2001 From: ananas Date: Tue, 17 Mar 2026 22:50:21 +0000 Subject: [PATCH 03/16] feat: forester dedup integration with min_queue_items threshold, versioned transactions, and tx size fix - Add min_queue_items config (CLI --min-queue-items, default 5000) to delay V1 state nullification processing until enough items accumulate for optimal dedup grouping - Integrate nullify_dedup into forester: group_state_items_for_dedup greedy algorithm forms groups of 4, 3, 2 with shared proof compression (70% savings observed) - Support versioned transactions with address lookup tables for dedup instructions - Reduce NULLIFY_DEDUP_MAX_NODES from 28 to 27 to fit within 1232-byte tx limit when both SetComputeUnitLimit and SetComputeUnitPrice are included - Add CompressedProofs struct replacing tuple return from compress_proofs - Remove nullify_2 instruction (superseded by nullify_dedup) - Add slot advancement in e2e test for surfpool offline mode - Add dedup grouping log assertion in e2e test --- forester/justfile | 4 + forester/src/cli.rs | 8 + forester/src/config.rs | 6 + forester/src/epoch_manager.rs | 114 ++++- forester/src/processor/v1/config.rs | 4 + forester/src/processor/v1/helpers.rs | 480 +++++++++++++++++- forester/src/processor/v1/send_transaction.rs | 82 +-- forester/src/processor/v1/tx_builder.rs | 20 +- forester/src/smart_transaction.rs | 45 +- forester/tests/e2e_test.rs | 98 +++- forester/tests/legacy/test_utils.rs | 2 + forester/tests/priority_fee_test.rs | 1 + forester/tests/test_nullify_2_tx_size.rs | 108 ---- forester/tests/test_nullify_dedup_tx_size.rs | 17 +- forester/tests/test_utils.rs | 1 + .../registry-test/tests/nullify_2.rs | 232 --------- .../registry-test/tests/nullify_dedup.rs | 113 ++--- .../src/account_compression_cpi/nullify.rs | 78 +-- .../src/account_compression_cpi/sdk.rs | 411 +++++++-------- programs/registry/src/lib.rs | 42 -- .../tests/integration_tests.rs | 2 +- 21 files changed, 1025 insertions(+), 843 deletions(-) delete mode 100644 forester/tests/test_nullify_2_tx_size.rs delete mode 100644 program-tests/registry-test/tests/nullify_2.rs diff --git a/forester/justfile b/forester/justfile index ad7798ecae..95b5c938b3 100644 --- a/forester/justfile +++ b/forester/justfile @@ -20,6 +20,10 @@ build-test-deps: test: build-test-deps cargo test --package forester e2e_test -- --nocapture +# Run e2e test without rebuilding SBF programs +local: + cargo test --package forester e2e_test -- --nocapture + # Builds csdk-anchor-full-derived-test program for compressible tests build-compressible-test-deps: cargo build-sbf --manifest-path ../sdk-tests/csdk-anchor-full-derived-test/Cargo.toml diff --git a/forester/src/cli.rs b/forester/src/cli.rs index 467b311604..3a22a633c7 100644 --- a/forester/src/cli.rs +++ b/forester/src/cli.rs @@ -287,6 +287,14 @@ pub struct StartArgs { )] pub lookup_table_address: Option, + #[arg( + long, + env = "MIN_QUEUE_ITEMS", + default_value = "5000", + help = "Minimum queue items before processing V1 state nullifications. Delays processing to allow dedup grouping. Only applies when lookup_table_address is set." + )] + pub min_queue_items: Option, + #[arg( long, env = "API_SERVER_PORT", diff --git a/forester/src/config.rs b/forester/src/config.rs index 550828b7c3..6043c3151a 100644 --- a/forester/src/config.rs +++ b/forester/src/config.rs @@ -30,6 +30,9 @@ pub struct ForesterConfig { pub compressible_config: Option, /// Address lookup table for versioned transactions. If None, legacy transactions are used. pub lookup_table_address: Option, + /// Minimum queue items before processing V1 state nullifications. + /// Delays processing to allow dedup grouping. Only applies when lookup_table_address is set. + pub min_queue_items: Option, } #[derive(Debug, Clone)] @@ -421,6 +424,7 @@ impl ForesterConfig { }) }) .transpose()?, + min_queue_items: args.min_queue_items, }) } @@ -475,6 +479,7 @@ impl ForesterConfig { state_tree_data: vec![], compressible_config: None, lookup_table_address: None, + min_queue_items: None, }) } } @@ -495,6 +500,7 @@ impl Clone for ForesterConfig { state_tree_data: self.state_tree_data.clone(), compressible_config: self.compressible_config.clone(), lookup_table_address: self.lookup_table_address, + min_queue_items: self.min_queue_items, } } } diff --git a/forester/src/epoch_manager.rs b/forester/src/epoch_manager.rs index f52efa1b13..d4e069ca63 100644 --- a/forester/src/epoch_manager.rs +++ b/forester/src/epoch_manager.rs @@ -275,7 +275,7 @@ pub struct EpochManager { mint_tracker: Option>, /// Cached zkp_batch_size per tree to filter queue updates below threshold zkp_batch_sizes: Arc>, - address_lookup_tables: Arc>, + address_lookup_tables: Arc>>, heartbeat: Arc, run_id: Arc, /// Per-epoch registration trackers to coordinate re-finalization when new foresters register mid-epoch @@ -328,7 +328,7 @@ impl EpochManager { compressible_tracker: Option>, pda_tracker: Option>, mint_tracker: Option>, - address_lookup_tables: Arc>, + address_lookup_tables: Arc>>, heartbeat: Arc, run_id: String, ) -> Result { @@ -1096,6 +1096,21 @@ impl EpochManager { debug!("Recovered registration info for epoch {}", epoch); update_epoch_registered(epoch); + // Extend ALT with new forester epoch PDA if ALT is configured + let forester_epoch_pda_pubkey = + get_forester_epoch_pda_from_authority(&self.config.derivation_pubkey, epoch).0; + if let Err(e) = self + .extend_alt_with_forester_pda(forester_epoch_pda_pubkey) + .await + { + warn!( + event = "extend_alt_failed", + epoch, + error = ?e, + "Failed to extend ALT with forester PDA, continuing with static account" + ); + } + // Wait for the active phase registration_info = match self.wait_for_active_phase(®istration_info).await? { Some(info) => info, @@ -1380,6 +1395,51 @@ impl EpochManager { } } + async fn extend_alt_with_forester_pda(&self, forester_epoch_pda: Pubkey) -> anyhow::Result<()> { + let alt_address = match self.config.lookup_table_address { + Some(addr) => addr, + None => return Ok(()), + }; + + // Check if the PDA is already in the ALT + { + let alt = self.address_lookup_tables.read().await; + if alt + .iter() + .any(|t| t.addresses.contains(&forester_epoch_pda)) + { + return Ok(()); + } + } + + let extend_ix = light_client::rpc::lut::instruction::extend_lookup_table( + alt_address, + self.config.payer_keypair.pubkey(), + Some(self.config.payer_keypair.pubkey()), + vec![forester_epoch_pda], + ); + let payer_pubkey = self.config.payer_keypair.pubkey(); + let mut rpc = self.rpc_pool.get_connection().await?; + rpc.create_and_send_transaction(&[extend_ix], &payer_pubkey, &[&self.config.payer_keypair]) + .await + .map_err(|e| anyhow::anyhow!("Failed to extend ALT: {e}"))?; + + // Reload the ALT from on-chain + let updated_lut = load_lookup_table_async(&*rpc, alt_address).await?; + info!( + event = "alt_extended", + lookup_table = %alt_address, + new_address = %forester_epoch_pda, + address_count = updated_lut.addresses.len(), + "Extended ALT with forester epoch PDA" + ); + + let mut alt = self.address_lookup_tables.write().await; + *alt = vec![updated_lut]; + + Ok(()) + } + async fn recover_registration_info_internal( &self, epoch: u64, @@ -1514,13 +1574,14 @@ impl EpochManager { }; let payer = self.config.payer_keypair.pubkey(); let signers = [&self.config.payer_keypair]; + let alt_guard = self.address_lookup_tables.read().await; send_smart_transaction( &mut *rpc, SendSmartTransactionConfig { instructions: vec![ix], payer: &payer, signers: &signers, - address_lookup_tables: &self.address_lookup_tables, + address_lookup_tables: &alt_guard, compute_budget: ComputeBudgetConfig { compute_unit_price: priority_fee, compute_unit_limit: Some(self.config.transaction_config.cu_limit), @@ -1925,13 +1986,14 @@ impl EpochManager { }; let payer = self.config.payer_keypair.pubkey(); let signers = [&self.config.payer_keypair]; + let alt_guard = self.address_lookup_tables.read().await; match send_smart_transaction( &mut *rpc, SendSmartTransactionConfig { instructions: vec![ix], payer: &payer, signers: &signers, - address_lookup_tables: &self.address_lookup_tables, + address_lookup_tables: &alt_guard, compute_budget: ComputeBudgetConfig { compute_unit_price: priority_fee, compute_unit_limit: Some(self.config.transaction_config.cu_limit), @@ -2985,7 +3047,7 @@ impl EpochManager { return Ok(0); }; - let batched_tx_config = SendBatchedTransactionsConfig { + let mut batched_tx_config = SendBatchedTransactionsConfig { num_batches: 1, build_transaction_batch_config: BuildTransactionBatchConfig { batch_size: self.config.transaction_config.legacy_ixs_per_tx as u64, @@ -3005,12 +3067,18 @@ impl EpochManager { ), confirmation_max_attempts: self.config.transaction_config.confirmation_max_attempts as usize, + min_queue_items: None, // set below after reading ALT }; + let alt_snapshot = self.address_lookup_tables.read().await.clone(); + if !alt_snapshot.is_empty() { + batched_tx_config.min_queue_items = self.config.min_queue_items; + } let transaction_builder = Arc::new(EpochManagerTransactions::new( self.rpc_pool.clone(), epoch_info.epoch, self.tx_cache.clone(), + alt_snapshot, )); let num_sent = send_batched_transactions( @@ -3056,6 +3124,7 @@ impl EpochManager { input_queue_hint: Option, output_queue_hint: Option, eligibility_end: Option, + address_lookup_tables: Arc>, ) -> BatchContext { let default_prover_url = "http://127.0.0.1:3001".to_string(); let eligibility_end = eligibility_end.unwrap_or(0); @@ -3105,7 +3174,7 @@ impl EpochManager { output_queue_hint, num_proof_workers: self.config.transaction_config.max_concurrent_batches, forester_eligibility_end_slot: Arc::new(AtomicU64::new(eligibility_end)), - address_lookup_tables: self.address_lookup_tables.clone(), + address_lookup_tables, transaction_policy: self.transaction_policy(), max_batches_per_tree: self.config.transaction_config.max_batches_per_tree, } @@ -3211,7 +3280,15 @@ impl EpochManager { } // No existing processor - create new one - let batch_context = self.build_batch_context(epoch_info, tree_accounts, None, None, None); + let alt_guard = self.address_lookup_tables.read().await; + let batch_context = self.build_batch_context( + epoch_info, + tree_accounts, + None, + None, + None, + Arc::new(alt_guard.clone()), + ); let processor = Arc::new(Mutex::new( QueueProcessor::new(batch_context, StateTreeStrategy).await?, )); @@ -3265,7 +3342,15 @@ impl EpochManager { } // No existing processor - create new one - let batch_context = self.build_batch_context(epoch_info, tree_accounts, None, None, None); + let alt_guard = self.address_lookup_tables.read().await; + let batch_context = self.build_batch_context( + epoch_info, + tree_accounts, + None, + None, + None, + Arc::new(alt_guard.clone()), + ); let processor = Arc::new(Mutex::new( QueueProcessor::new(batch_context, AddressTreeStrategy).await?, )); @@ -3842,13 +3927,14 @@ impl EpochManager { let instruction_count = instructions.len(); let payer = self.config.payer_keypair.pubkey(); let signers = [&self.config.payer_keypair]; + let alt_guard = self.address_lookup_tables.read().await; match send_smart_transaction( &mut *rpc, SendSmartTransactionConfig { instructions, payer: &payer, signers: &signers, - address_lookup_tables: &self.address_lookup_tables, + address_lookup_tables: &alt_guard, compute_budget: ComputeBudgetConfig { compute_unit_price: priority_fee, compute_unit_limit: Some(self.config.transaction_config.cu_limit), @@ -4017,13 +4103,14 @@ impl EpochManager { .await?; let payer = self.config.payer_keypair.pubkey(); let signers = [&self.config.payer_keypair]; + let alt_guard = self.address_lookup_tables.read().await; match send_smart_transaction( &mut rpc, SendSmartTransactionConfig { instructions: vec![ix], payer: &payer, signers: &signers, - address_lookup_tables: &self.address_lookup_tables, + address_lookup_tables: &alt_guard, compute_budget: ComputeBudgetConfig { compute_unit_price: priority_fee, compute_unit_limit: Some(self.config.transaction_config.cu_limit), @@ -4468,19 +4555,19 @@ pub async fn run_service( address_count = lut.addresses.len(), "Loaded lookup table" ); - Arc::new(vec![lut]) + Arc::new(tokio::sync::RwLock::new(vec![lut])) } Err(e) => { debug!( "Lookup table {} not available: {}. Using legacy transactions.", lut_address, e ); - Arc::new(Vec::new()) + Arc::new(tokio::sync::RwLock::new(Vec::new())) } } } else { debug!("No lookup table address configured. Using legacy transactions."); - Arc::new(Vec::new()) + Arc::new(tokio::sync::RwLock::new(Vec::new())) } }; @@ -4650,6 +4737,7 @@ mod tests { state_tree_data: vec![], compressible_config: None, lookup_table_address: None, + min_queue_items: None, } } diff --git a/forester/src/processor/v1/config.rs b/forester/src/processor/v1/config.rs index f2ee05f353..8af88b6e25 100644 --- a/forester/src/processor/v1/config.rs +++ b/forester/src/processor/v1/config.rs @@ -19,6 +19,10 @@ pub struct SendBatchedTransactionsConfig { pub light_slot_length: u64, pub confirmation_poll_interval: std::time::Duration, pub confirmation_max_attempts: usize, + /// Minimum number of queue items required before processing begins. + /// Only applies to StateV1 trees. When `None`, processing starts immediately. + /// When the timeout deadline is near, this threshold is ignored to prevent starvation. + pub min_queue_items: Option, } #[derive(Debug, Clone, Copy)] diff --git a/forester/src/processor/v1/helpers.rs b/forester/src/processor/v1/helpers.rs index d980b02e32..a673d44be6 100644 --- a/forester/src/processor/v1/helpers.rs +++ b/forester/src/processor/v1/helpers.rs @@ -8,15 +8,20 @@ use account_compression::{ }, }; use forester_utils::{rpc_pool::SolanaRpcPool, utils::wait_for_indexer}; -use light_client::{indexer::Indexer, rpc::Rpc}; +use light_client::{ + indexer::{Indexer, MerkleProof}, + rpc::Rpc, +}; use light_compressed_account::TreeType; use light_registry::account_compression_cpi::sdk::{ - create_nullify_instruction, create_update_address_merkle_tree_instruction, - CreateNullifyInstructionInputs, UpdateAddressMerkleTreeInstructionInputs, + compress_proofs, create_nullify_dedup_instruction, create_nullify_instruction, + create_update_address_merkle_tree_instruction, CompressedProofs, + CreateNullifyDedupInstructionInputs, CreateNullifyInstructionInputs, + UpdateAddressMerkleTreeInstructionInputs, }; use solana_program::instruction::Instruction; use tokio::time::Instant; -use tracing::{info, warn}; +use tracing::{debug, info, warn}; use crate::{ logging::should_emit_rate_limited_warning, @@ -39,6 +44,7 @@ pub async fn fetch_proofs_and_create_instructions( pool: Arc>, epoch: u64, work_items: &[WorkItem], + use_dedup: bool, ) -> crate::Result<(Vec, Vec)> { let mut proofs = Vec::new(); let mut instructions = vec![]; @@ -372,25 +378,457 @@ pub async fn fetch_proofs_and_create_instructions( )); } - for (item, proof) in state_items.iter().zip(state_proofs.into_iter()) { - proofs.push(MerkleProofType::StateProof(proof.clone())); - - let instruction = create_nullify_instruction( - CreateNullifyInstructionInputs { - nullifier_queue: item.tree_account.queue, - merkle_tree: item.tree_account.merkle_tree, - change_log_indices: vec![proof.root_seq % STATE_MERKLE_TREE_CHANGELOG], - leaves_queue_indices: vec![item.queue_item_data.index as u16], - indices: vec![proof.leaf_index], - proofs: vec![proof.proof.clone()], - authority, - derivation, - is_metadata_forester: false, - }, - epoch, + let mut items_with_proofs: Vec<(&WorkItem, MerkleProof)> = state_items + .iter() + .zip(state_proofs.into_iter()) + .map(|(item, proof)| (*item, proof)) + .collect(); + + if use_dedup && items_with_proofs.len() >= 2 { + let groups = group_state_items_for_dedup(&mut items_with_proofs); + + // Push proofs in sorted order (after grouping may have sorted) + for (_, proof) in items_with_proofs.iter() { + proofs.push(MerkleProofType::StateProof(proof.clone())); + } + + let mut count_1 = 0usize; + let mut count_2 = 0usize; + let mut count_3 = 0usize; + let mut count_4 = 0usize; + for g in &groups { + match g.len() { + 1 => count_1 += 1, + 2 => count_2 += 1, + 3 => count_3 += 1, + 4 => count_4 += 1, + _ => {} + } + } + let total_leaves = items_with_proofs.len(); + let total_instructions = groups.len(); + let dedup_savings_pct = if total_leaves > 0 { + ((total_leaves - total_instructions) as f64 / total_leaves as f64 * 100.0) as u32 + } else { + 0 + }; + info!( + event = "v1_nullify_dedup_grouping", + total_leaves, + groups_of_4 = count_4, + groups_of_3 = count_3, + groups_of_2 = count_2, + singletons = count_1, + total_instructions, + dedup_savings_pct, + "State nullify dedup grouping complete" ); - instructions.push(instruction); + + for group_indices in groups { + if group_indices.len() == 1 { + let (item, proof) = &items_with_proofs[group_indices[0]]; + instructions.push(build_nullify_instruction( + item, proof, authority, derivation, epoch, + )); + } else { + let group_proofs: Vec<[[u8; 32]; 16]> = group_indices + .iter() + .map(|&idx| { + let proof = &items_with_proofs[idx].1.proof; + let arr: [[u8; 32]; 16] = proof.as_slice().try_into().map_err(|_| { + anyhow::anyhow!("proof has {} nodes, expected 16", proof.len()) + })?; + Ok(arr) + }) + .collect::>>()?; + let proof_refs: Vec<&[[u8; 32]; 16]> = group_proofs.iter().collect(); + let CompressedProofs { + proof_2_shared, + proof_3_source, + proof_4_source, + shared_top_node, + nodes, + } = compress_proofs(&proof_refs).ok_or_else(|| { + anyhow::anyhow!( + "compress_proofs failed for group that passed try_compress_group" + ) + })?; + + let first_item = &items_with_proofs[group_indices[0]]; + let change_log_index = (first_item.1.root_seq % STATE_MERKLE_TREE_CHANGELOG) as u16; + + let mut queue_indices = [0u16; 4]; + let mut leaf_indices = [u32::MAX; 4]; + for (slot, &idx) in group_indices.iter().enumerate() { + let (item, proof) = &items_with_proofs[idx]; + queue_indices[slot] = item.queue_item_data.index as u16; + leaf_indices[slot] = proof.leaf_index as u32; + } + + let node_count = nodes.len(); + let instruction = create_nullify_dedup_instruction( + CreateNullifyDedupInstructionInputs { + authority, + nullifier_queue: first_item.0.tree_account.queue, + merkle_tree: first_item.0.tree_account.merkle_tree, + change_log_index, + queue_indices, + leaf_indices, + proof_2_shared, + proof_3_source, + proof_4_source, + shared_top_node, + nodes, + derivation, + is_metadata_forester: false, + }, + epoch, + ); + debug!( + event = "v1_nullify_dedup_instruction", + group_size = group_indices.len(), + node_count, + ix_data_bytes = instruction.data.len(), + "Created nullify_dedup instruction" + ); + instructions.push(instruction); + } + } + } else { + for (_, proof) in items_with_proofs.iter() { + proofs.push(MerkleProofType::StateProof(proof.clone())); + } + for (item, proof) in items_with_proofs.iter() { + instructions.push(build_nullify_instruction( + item, proof, authority, derivation, epoch, + )); + } } Ok((proofs, instructions)) } + +fn build_nullify_instruction( + item: &WorkItem, + proof: &MerkleProof, + authority: Pubkey, + derivation: Pubkey, + epoch: u64, +) -> Instruction { + create_nullify_instruction( + CreateNullifyInstructionInputs { + nullifier_queue: item.tree_account.queue, + merkle_tree: item.tree_account.merkle_tree, + change_log_indices: vec![proof.root_seq % STATE_MERKLE_TREE_CHANGELOG], + leaves_queue_indices: vec![item.queue_item_data.index as u16], + indices: vec![proof.leaf_index], + proofs: vec![proof.proof.clone()], + authority, + derivation, + is_metadata_forester: false, + }, + epoch, + ) +} + +/// Groups sorted (WorkItem, MerkleProof) pairs for dedup nullification. +/// Returns a vec of groups: each group is a vec of indices into `items_with_proofs` +/// that can be packed into a single nullify_dedup instruction (2-4 items), +/// or a singleton for regular nullify. +fn group_state_items_for_dedup( + items_with_proofs: &mut [(&WorkItem, MerkleProof)], +) -> Vec> { + items_with_proofs.sort_by_key(|(_, proof)| proof.leaf_index); + + let n = items_with_proofs.len(); + let mut groups = Vec::new(); + let mut i = 0; + + while i < n { + if i + 4 <= n && try_compress_group(items_with_proofs, i, 4).is_some() { + groups.push((i..i + 4).collect()); + i += 4; + } else if i + 3 <= n && try_compress_group(items_with_proofs, i, 3).is_some() { + groups.push((i..i + 3).collect()); + i += 3; + } else if i + 2 <= n && try_compress_group(items_with_proofs, i, 2).is_some() { + groups.push((i..i + 2).collect()); + i += 2; + } else { + groups.push(vec![i]); + i += 1; + } + } + + groups +} + +/// Attempt to compress a group of proofs starting at `start` with `count` items. +/// Returns the compression result if successful. +fn try_compress_group( + items_with_proofs: &[(&WorkItem, MerkleProof)], + start: usize, + count: usize, +) -> Option { + let proof_arrays: Vec<[[u8; 32]; 16]> = (start..start + count) + .map(|idx| items_with_proofs[idx].1.proof.as_slice().try_into().ok()) + .collect::>>()?; + let refs: Vec<&[[u8; 32]; 16]> = proof_arrays.iter().collect(); + compress_proofs(&refs) +} + +#[cfg(test)] +mod tests { + use forester_utils::forester_epoch::TreeAccounts; + use light_compressed_account::TreeType; + use solana_sdk::pubkey::Pubkey; + + use super::*; + use crate::queue_helpers::QueueItemData; + + fn make_work_item() -> WorkItem { + WorkItem { + tree_account: TreeAccounts { + merkle_tree: Pubkey::new_unique(), + queue: Pubkey::new_unique(), + tree_type: TreeType::StateV1, + is_rolledover: false, + owner: Pubkey::new_unique(), + }, + queue_item_data: QueueItemData { + hash: [0u8; 32], + index: 0, + }, + } + } + + /// Create a 16-node proof where all proofs share the same top node (index 15) + /// but lower nodes differ unless leaves are in the same subtree. + fn make_proof(leaf_index: u64, shared_top: [u8; 32]) -> MerkleProof { + let mut proof = [[0u8; 32]; 16]; + // Set unique values per leaf for levels 0..15 + for (level, slot) in proof.iter_mut().enumerate().take(15) { + let mut node = [0u8; 32]; + node[0..8].copy_from_slice(&leaf_index.to_le_bytes()); + node[8] = level as u8; + *slot = node; + } + // All proofs share the same top node + proof[15] = shared_top; + MerkleProof { + hash: [0u8; 32], + leaf_index, + merkle_tree: Pubkey::new_unique(), + proof: proof.to_vec(), + root_seq: 100, + root: [0u8; 32], + } + } + + /// Create proofs that share sibling nodes so compress_proofs succeeds. + /// Adjacent leaves (leaf_index differing only in low bits) share many proof nodes. + fn make_compressible_proofs(leaf_indices: &[u64]) -> Vec { + let shared_top = [0xFFu8; 32]; + let base_proof = { + let mut p = [[0u8; 32]; 16]; + for (level, slot) in p.iter_mut().enumerate().take(15) { + let mut node = [0u8; 32]; + node[0] = level as u8; + node[1] = 0xAA; + *slot = node; + } + p[15] = shared_top; + p + }; + + leaf_indices + .iter() + .map(|&li| { + // All proofs share the same nodes (maximally compressible). + // Only the leaf_index differs. + MerkleProof { + hash: [0u8; 32], + leaf_index: li, + merkle_tree: Pubkey::new_unique(), + proof: base_proof.to_vec(), + root_seq: 100, + root: [0u8; 32], + } + }) + .collect() + } + + /// Describes expected grouping result for assertion. + #[derive(Debug, PartialEq)] + struct GroupingResult { + group_sizes: Vec, + } + + impl GroupingResult { + fn from_groups(groups: &[Vec]) -> Self { + Self { + group_sizes: groups.iter().map(|g| g.len()).collect(), + } + } + } + + #[test] + fn test_group_dedup_empty() { + let mut items: Vec<(&WorkItem, MerkleProof)> = vec![]; + let groups = group_state_items_for_dedup(&mut items); + assert_eq!( + GroupingResult::from_groups(&groups), + GroupingResult { + group_sizes: vec![] + }, + "Empty input should produce empty grouping" + ); + } + + #[test] + fn test_group_dedup_single_item() { + let work_item = make_work_item(); + let proof = make_proof(0, [0xFFu8; 32]); + let mut items: Vec<(&WorkItem, MerkleProof)> = vec![(&work_item, proof)]; + let groups = group_state_items_for_dedup(&mut items); + assert_eq!( + GroupingResult::from_groups(&groups), + GroupingResult { + group_sizes: vec![1] + }, + "Single item should produce one singleton group" + ); + } + + #[test] + fn test_group_dedup_2_compressible() { + let work_items: Vec = (0..2).map(|_| make_work_item()).collect(); + let proofs = make_compressible_proofs(&[0, 1]); + let mut items: Vec<(&WorkItem, MerkleProof)> = work_items.iter().zip(proofs).collect(); + let groups = group_state_items_for_dedup(&mut items); + assert_eq!( + GroupingResult::from_groups(&groups), + GroupingResult { + group_sizes: vec![2] + }, + "2 compressible leaves should form 1 group of 2" + ); + } + + #[test] + fn test_group_dedup_3_compressible() { + let work_items: Vec = (0..3).map(|_| make_work_item()).collect(); + let proofs = make_compressible_proofs(&[0, 1, 2]); + let mut items: Vec<(&WorkItem, MerkleProof)> = work_items.iter().zip(proofs).collect(); + let groups = group_state_items_for_dedup(&mut items); + assert_eq!( + GroupingResult::from_groups(&groups), + GroupingResult { + group_sizes: vec![3] + }, + "3 compressible leaves should form 1 group of 3" + ); + } + + #[test] + fn test_group_dedup_4_compressible() { + let work_items: Vec = (0..4).map(|_| make_work_item()).collect(); + let proofs = make_compressible_proofs(&[0, 1, 2, 3]); + let mut items: Vec<(&WorkItem, MerkleProof)> = work_items.iter().zip(proofs).collect(); + let groups = group_state_items_for_dedup(&mut items); + assert_eq!( + GroupingResult::from_groups(&groups), + GroupingResult { + group_sizes: vec![4] + }, + "4 compressible leaves should form 1 group of 4" + ); + } + + #[test] + fn test_group_dedup_5_compressible_makes_4_plus_1() { + let work_items: Vec = (0..5).map(|_| make_work_item()).collect(); + let proofs = make_compressible_proofs(&[0, 1, 2, 3, 4]); + let mut items: Vec<(&WorkItem, MerkleProof)> = work_items.iter().zip(proofs).collect(); + let groups = group_state_items_for_dedup(&mut items); + assert_eq!( + GroupingResult::from_groups(&groups), + GroupingResult { + group_sizes: vec![4, 1] + }, + "5 compressible leaves should form group of 4 + singleton" + ); + } + + #[test] + fn test_group_dedup_6_compressible_makes_4_plus_2() { + let work_items: Vec = (0..6).map(|_| make_work_item()).collect(); + let proofs = make_compressible_proofs(&[0, 1, 2, 3, 4, 5]); + let mut items: Vec<(&WorkItem, MerkleProof)> = work_items.iter().zip(proofs).collect(); + let groups = group_state_items_for_dedup(&mut items); + assert_eq!( + GroupingResult::from_groups(&groups), + GroupingResult { + group_sizes: vec![4, 2] + }, + "6 compressible leaves should form group of 4 + group of 2" + ); + } + + #[test] + fn test_group_dedup_incompressible_becomes_singletons() { + let shared_top = [0xFFu8; 32]; + let work_items: Vec = (0..3).map(|_| make_work_item()).collect(); + // Each proof has unique nodes per leaf, so compress_proofs fails when + // total unique nodes exceed NULLIFY_DEDUP_MAX_NODES (28). + // proof_1 contributes 15 nodes; proof_2 has 15 unique => 30 total > 28. + let proofs: Vec = (0..3).map(|i| make_proof(i * 1000, shared_top)).collect(); + let mut items: Vec<(&WorkItem, MerkleProof)> = work_items.iter().zip(proofs).collect(); + let groups = group_state_items_for_dedup(&mut items); + // 15 (proof1) + 15 (proof2 unique) = 30 > 28 max, so pairs fail. + // All 3 become singletons. + assert_eq!( + GroupingResult::from_groups(&groups), + GroupingResult { + group_sizes: vec![1, 1, 1] + }, + "Incompressible proofs (30 nodes > 28 max) should all become singletons" + ); + } + + #[test] + fn test_group_dedup_sorts_by_leaf_index() { + let work_items: Vec = (0..4).map(|_| make_work_item()).collect(); + let proofs = make_compressible_proofs(&[100, 3, 50, 1]); + let mut items: Vec<(&WorkItem, MerkleProof)> = work_items.iter().zip(proofs).collect(); + let groups = group_state_items_for_dedup(&mut items); + + let sorted_leaf_indices: Vec = + items.iter().map(|(_, proof)| proof.leaf_index).collect(); + assert_eq!( + sorted_leaf_indices, + vec![1, 3, 50, 100], + "Items should be sorted by leaf_index after grouping" + ); + assert_eq!( + GroupingResult::from_groups(&groups), + GroupingResult { + group_sizes: vec![4] + }, + "All compressible, should form 1 group of 4" + ); + } + + #[test] + fn test_group_dedup_indices_reference_sorted_positions() { + let work_items: Vec = (0..4).map(|_| make_work_item()).collect(); + let proofs = make_compressible_proofs(&[0, 1, 2, 3]); + let mut items: Vec<(&WorkItem, MerkleProof)> = work_items.iter().zip(proofs).collect(); + let groups = group_state_items_for_dedup(&mut items); + assert_eq!( + groups, + vec![vec![0, 1, 2, 3]], + "Group indices should reference positions in the sorted items array" + ); + } +} diff --git a/forester/src/processor/v1/send_transaction.rs b/forester/src/processor/v1/send_transaction.rs index b5282bc47a..a4a8ec1664 100644 --- a/forester/src/processor/v1/send_transaction.rs +++ b/forester/src/processor/v1/send_transaction.rs @@ -16,7 +16,6 @@ use solana_sdk::{ hash::Hash, pubkey::Pubkey, signature::{Keypair, Signature, Signer}, - transaction::Transaction, }; use tokio::time::Instant; use tracing::{debug, error, info, trace, warn}; @@ -78,23 +77,38 @@ pub async fn send_batched_transactions data, - None => { - trace!(tree.id = %tree_accounts.merkle_tree, queue.id = %tree_accounts.queue, "Preparation returned no data, 0 transactions sent."); + const THRESHOLD_POLL_INTERVAL: Duration = Duration::from_millis(500); + let timeout_deadline = function_start_time + config.retry_config.timeout; + + let data = loop { + if Instant::now() >= timeout_deadline { + trace!(tree.id = %tree_accounts.merkle_tree, "Timeout deadline reached while waiting for threshold, 0 transactions sent."); return Ok(0); } + + match prepare_batch_prerequisites( + &payer.pubkey(), + derivation, + &pool, + config, + tree_accounts, + &*transaction_builder, + function_start_time, + config.min_queue_items, + ) + .await + .map_err(ForesterError::from)? + { + Some(data) => break data, + None => { + if config.min_queue_items.is_some() { + tokio::time::sleep(THRESHOLD_POLL_INTERVAL).await; + continue; + } + trace!(tree.id = %tree_accounts.merkle_tree, queue.id = %tree_accounts.queue, "Preparation returned no data, 0 transactions sent."); + return Ok(0); + } + } }; let max_concurrent_sends = config @@ -154,7 +168,7 @@ pub async fn send_batched_transactions( payer_pubkey: &Pubkey, derivation: &Pubkey, @@ -232,6 +242,7 @@ async fn prepare_batch_prerequisites( tree_accounts: TreeAccounts, transaction_builder: &T, start_time: Instant, + min_queue_items: Option, ) -> Result> { let tree_id_str = tree_accounts.merkle_tree.to_string(); @@ -266,7 +277,19 @@ async fn prepare_batch_prerequisites( if queue_item_data.is_empty() { trace!(tree = %tree_id_str, "Queue is empty, no transactions to send."); - return Ok(None); // Return None to indicate no work + return Ok(None); + } + + if let Some(min) = min_queue_items { + if tree_accounts.tree_type == TreeType::StateV1 && queue_item_data.len() < min { + trace!( + tree = %tree_id_str, + queue_len = queue_item_data.len(), + min_queue_items = min, + "Queue below min_queue_items threshold, skipping" + ); + return Ok(None); + } } let (recent_blockhash, last_valid_block_height, priority_fee) = { @@ -343,8 +366,7 @@ fn compute_effective_max_concurrent_sends( } async fn execute_transaction_chunk_sending( - transactions: Vec, - last_valid_block_height: u64, + transactions: Vec, context: &ChunkSendContext, ) -> std::result::Result<(), ForesterError> { if transactions.is_empty() { @@ -358,7 +380,7 @@ async fn execute_transaction_chunk_sending( let timeout_deadline = context.timeout_deadline; let max_concurrent_sends = context.max_concurrent_sends; let confirmation = context.confirmation; - let transaction_send_futures = transactions.into_iter().map(|tx| { + let transaction_send_futures = transactions.into_iter().map(|prepared_transaction| { let pool_clone = Arc::clone(&pool); let cancel_signal_clone = Arc::clone(&cancel_signal); let num_sent_transactions_clone = Arc::clone(&num_sent_transactions); @@ -368,7 +390,9 @@ async fn execute_transaction_chunk_sending( return TransactionSendResult::Cancelled; // Or Timeout } - let tx_signature = tx.signatures.first().copied().unwrap_or_default(); + let tx_signature = prepared_transaction + .signature() + .unwrap_or_default(); let tx_signature_str = tx_signature.to_string(); match pool_clone.get_connection().await { @@ -379,8 +403,6 @@ async fn execute_transaction_chunk_sending( } let send_time = Instant::now(); - let prepared_transaction = - PreparedTransaction::legacy(tx, last_valid_block_height); match prepared_transaction .send(&mut *rpc, Some(confirmation), Some(timeout_deadline)) .await diff --git a/forester/src/processor/v1/tx_builder.rs b/forester/src/processor/v1/tx_builder.rs index 463cc0b2bf..9fb2dbc2a6 100644 --- a/forester/src/processor/v1/tx_builder.rs +++ b/forester/src/processor/v1/tx_builder.rs @@ -6,8 +6,8 @@ use forester_utils::rpc_pool::SolanaRpcPool; use light_client::rpc::Rpc; use solana_program::hash::Hash; use solana_sdk::{ + address_lookup_table::AddressLookupTableAccount, signature::{Keypair, Signer}, - transaction::Transaction, }; use tokio::sync::Mutex; use tracing::{trace, warn}; @@ -18,7 +18,9 @@ use crate::{ tx_cache::ProcessedHashCache, v1::{config::BuildTransactionBatchConfig, helpers::fetch_proofs_and_create_instructions}, }, - smart_transaction::{create_smart_transaction, CreateSmartTransactionConfig}, + smart_transaction::{ + create_smart_transaction, CreateSmartTransactionConfig, PreparedTransaction, + }, Result, }; @@ -35,7 +37,7 @@ pub trait TransactionBuilder: Send + Sync { priority_fee: Option, work_items: &[WorkItem], config: BuildTransactionBatchConfig, - ) -> Result<(Vec, u64)>; + ) -> Result<(Vec, u64)>; } pub struct EpochManagerTransactions { @@ -43,6 +45,7 @@ pub struct EpochManagerTransactions { pub epoch: u64, pub phantom: std::marker::PhantomData, pub processed_hash_cache: Arc>, + pub address_lookup_tables: Vec, } impl EpochManagerTransactions { @@ -50,12 +53,14 @@ impl EpochManagerTransactions { pool: Arc>, epoch: u64, cache: Arc>, + address_lookup_tables: Vec, ) -> Self { Self { pool, epoch, phantom: std::marker::PhantomData, processed_hash_cache: cache, + address_lookup_tables, } } } @@ -75,7 +80,7 @@ impl TransactionBuilder for EpochManagerTransactions { priority_fee: Option, work_items: &[WorkItem], config: BuildTransactionBatchConfig, - ) -> Result<(Vec, u64)> { + ) -> Result<(Vec, u64)> { let mut cache = self.processed_hash_cache.lock().await; let work_items: Vec<&WorkItem> = work_items @@ -115,6 +120,7 @@ impl TransactionBuilder for EpochManagerTransactions { .map(|&item| item.clone()) .collect::>(); + let use_dedup = !self.address_lookup_tables.is_empty(); let mut transactions = vec![]; let all_instructions = match fetch_proofs_and_create_instructions( payer.pubkey(), @@ -122,6 +128,7 @@ impl TransactionBuilder for EpochManagerTransactions { self.pool.clone(), self.epoch, work_items.as_slice(), + use_dedup, ) .await { @@ -145,16 +152,17 @@ impl TransactionBuilder for EpochManagerTransactions { let batch_size = config.batch_size.max(1) as usize; for instruction_chunk in all_instructions.chunks(batch_size) { - let (transaction, _) = create_smart_transaction(CreateSmartTransactionConfig { + let prepared = create_smart_transaction(CreateSmartTransactionConfig { payer: payer.insecure_clone(), instructions: instruction_chunk.to_vec(), recent_blockhash: *recent_blockhash, compute_unit_price: priority_fee, compute_unit_limit: config.compute_unit_limit, last_valid_block_height, + address_lookup_tables: self.address_lookup_tables.clone(), }) .await?; - transactions.push(transaction); + transactions.push(prepared); } if !transactions.is_empty() { diff --git a/forester/src/smart_transaction.rs b/forester/src/smart_transaction.rs index 38df9f70be..fac792df06 100644 --- a/forester/src/smart_transaction.rs +++ b/forester/src/smart_transaction.rs @@ -66,6 +66,7 @@ pub struct CreateSmartTransactionConfig { pub compute_unit_limit: Option, pub instructions: Vec, pub last_valid_block_height: u64, + pub address_lookup_tables: Vec, } pub struct SendSmartTransactionConfig<'a> { @@ -201,10 +202,10 @@ fn with_compute_budget_instructions( /// whether it's a legacy or versioned smart transaction. The transaction's send configuration can also be changed, if provided /// /// # Returns -/// An optimized `Transaction` and the `last_valid_block_height` +/// A `PreparedTransaction` (legacy or versioned) and the `last_valid_block_height` pub async fn create_smart_transaction( config: CreateSmartTransactionConfig, -) -> Result<(Transaction, u64), RpcError> { +) -> Result { let payer_pubkey: Pubkey = config.payer.pubkey(); let final_instructions = with_compute_budget_instructions( config.instructions, @@ -214,10 +215,28 @@ pub async fn create_smart_transaction( }, ); - let mut tx = Transaction::new_with_payer(&final_instructions, Some(&payer_pubkey)); - tx.sign(&[&config.payer], config.recent_blockhash); - - Ok((tx, config.last_valid_block_height)) + if config.address_lookup_tables.is_empty() { + let mut tx = Transaction::new_with_payer(&final_instructions, Some(&payer_pubkey)); + tx.sign(&[&config.payer], config.recent_blockhash); + Ok(PreparedTransaction::legacy( + tx, + config.last_valid_block_height, + )) + } else { + let message = v0::Message::try_compile( + &payer_pubkey, + &final_instructions, + &config.address_lookup_tables, + config.recent_blockhash, + ) + .map_err(|e| RpcError::CustomError(format!("Failed to compile v0 message: {}", e)))?; + let tx = VersionedTransaction::try_new(VersionedMessage::V0(message), &[&config.payer]) + .map_err(|e| RpcError::SigningError(e.to_string()))?; + Ok(PreparedTransaction::versioned( + tx, + config.last_valid_block_height, + )) + } } pub async fn send_transaction_with_policy( @@ -251,7 +270,7 @@ pub async fn send_transaction_with_policy( .await } -pub(crate) struct PreparedTransaction { +pub struct PreparedTransaction { transaction: PreparedTransactionKind, last_valid_block_height: u64, } @@ -269,7 +288,17 @@ impl PreparedTransaction { } } - fn signature(&self) -> Option { + pub(crate) fn versioned( + transaction: VersionedTransaction, + last_valid_block_height: u64, + ) -> Self { + Self { + transaction: PreparedTransactionKind::Versioned(transaction), + last_valid_block_height, + } + } + + pub(crate) fn signature(&self) -> Option { match &self.transaction { PreparedTransactionKind::Legacy(transaction) => transaction.signatures.first().copied(), PreparedTransactionKind::Versioned(transaction) => { diff --git a/forester/tests/e2e_test.rs b/forester/tests/e2e_test.rs index 1727ed108b..3958dece65 100644 --- a/forester/tests/e2e_test.rs +++ b/forester/tests/e2e_test.rs @@ -38,6 +38,7 @@ use light_compressed_token::process_transfer::{ use light_hasher::Poseidon; use light_program_test::accounts::test_accounts::TestAccounts; use light_prover_client::prover::spawn_prover; +use light_registry::account_compression_cpi::sdk::nullify_dedup_lookup_table_accounts; use light_test_utils::{ actions::{create_compressible_token_account, CreateCompressibleTokenAccountInputs}, conversions::sdk_to_program_token_data, @@ -189,13 +190,40 @@ fn is_v2_address_test_enabled() -> bool { env::var("TEST_V2_ADDRESS").unwrap_or_else(|_| "true".to_string()) == "true" } +/// Creates an on-chain Address Lookup Table populated with the accounts +/// needed for nullify_dedup instructions. Returns the ALT address. +async fn create_nullify_dedup_alt( + rpc: &mut R, + payer: &Keypair, + merkle_tree: Pubkey, + nullifier_queue: Pubkey, + forester_pda: Option, +) -> Pubkey { + use light_client::rpc::lut::instruction::{create_lookup_table, extend_lookup_table}; + + let slot = rpc.get_slot().await.unwrap(); + let (create_ix, alt_address) = create_lookup_table(payer.pubkey(), payer.pubkey(), slot); + rpc.create_and_send_transaction(&[create_ix], &payer.pubkey(), &[payer]) + .await + .unwrap(); + + let addresses = nullify_dedup_lookup_table_accounts(merkle_tree, nullifier_queue, forester_pda); + let extend_ix = + extend_lookup_table(alt_address, payer.pubkey(), Some(payer.pubkey()), addresses); + rpc.create_and_send_transaction(&[extend_ix], &payer.pubkey(), &[payer]) + .await + .unwrap(); + + alt_address +} + #[tokio::test(flavor = "multi_thread", worker_threads = 16)] #[serial] async fn e2e_test() { let state_tree_params = InitStateTreeAccountsInstructionData::test_default(); let env = TestAccounts::get_local_test_validator_accounts(); println!("env {:?}", env); - let config = ForesterConfig { + let mut config = ForesterConfig { external_services: ExternalServicesConfig { rpc_url: get_rpc_url(), ws_rpc_url: Some(get_ws_rpc_url()), @@ -259,6 +287,7 @@ async fn e2e_test() { max_concurrent_batches: 10, pda_programs: vec![], }), + min_queue_items: None, }; let test_mode = TestMode::from_env(); @@ -296,6 +325,21 @@ async fn e2e_test() { .await; } + // Create ALT for nullify_dedup if V1 state test is enabled + if is_v1_state_test_enabled() { + let alt_addr = create_nullify_dedup_alt( + &mut rpc, + &env.protocol.forester, + env.v1_state_trees[0].merkle_tree, + env.v1_state_trees[0].nullifier_queue, + None, + ) + .await; + println!("Created nullify_dedup ALT: {}", alt_addr); + config.lookup_table_address = Some(alt_addr); + config.min_queue_items = Some(10); + } + // Get initial state for V1 state tree if enabled let pre_state_v1_root = if is_v1_state_test_enabled() { let (_, _, root) = get_initial_merkle_tree_state( @@ -490,6 +534,31 @@ async fn e2e_test() { ) .await; + // Spawn a slot advancement task so the forester doesn't get stuck waiting + // for epoch registration windows (surfpool offline mode doesn't auto-advance slots). + let slot_advance_rpc_url = config.external_services.rpc_url.clone(); + let slot_advance_handle = tokio::spawn(async move { + let advance_rpc = LightClient::new(LightClientConfig { + url: slot_advance_rpc_url, + commitment_config: None, + photon_url: None, + fetch_active_tree: false, + }) + .await + .unwrap(); + loop { + let current_slot = match advance_rpc.get_slot().await { + Ok(s) => s, + Err(_) => break, + }; + let target = current_slot + 50; + if advance_rpc.warp_to_slot(target).await.is_err() { + break; + } + tokio::time::sleep(Duration::from_secs(2)).await; + } + }); + wait_for_work_report( &mut work_report_receiver, &state_tree_params, @@ -497,6 +566,8 @@ async fn e2e_test() { ) .await; + slot_advance_handle.abort(); + // Verify root changes based on enabled tests if is_v1_state_test_enabled() { if let Some(pre_root) = pre_state_v1_root { @@ -569,6 +640,31 @@ async fn e2e_test() { ); println!("Compressible account (subscriber) successfully closed"); + // Verify dedup grouping logs when ALT is configured + if is_v1_state_test_enabled() { + let log_dir = std::path::Path::new("logs"); + if log_dir.exists() { + let latest_log = std::fs::read_dir(log_dir) + .unwrap() + .filter_map(|e| e.ok()) + .filter(|e| e.file_name().to_string_lossy().starts_with("forester.")) + .max_by_key(|e| e.metadata().unwrap().modified().unwrap()); + if let Some(log_entry) = latest_log { + let content = std::fs::read_to_string(log_entry.path()).unwrap(); + let has_dedup = content.contains("v1_nullify_dedup_grouping"); + assert!( + has_dedup, + "Expected v1_nullify_dedup_grouping logs when ALT is configured" + ); + println!("Verified: dedup grouping events found in forester logs"); + } else { + println!("Warning: no forester log files found in logs/"); + } + } else { + println!("Warning: logs/ directory not found"); + } + } + // Shutdown all services // Bootstrap may have already completed, so ignore send errors let _ = shutdown_bootstrap_sender.send(()); diff --git a/forester/tests/legacy/test_utils.rs b/forester/tests/legacy/test_utils.rs index d535665d71..4a7cb1c543 100644 --- a/forester/tests/legacy/test_utils.rs +++ b/forester/tests/legacy/test_utils.rs @@ -122,6 +122,8 @@ pub fn forester_config() -> ForesterConfig { address_tree_data: vec![], state_tree_data: vec![], compressible_config: None, + lookup_table_address: None, + min_queue_items: None, } } diff --git a/forester/tests/priority_fee_test.rs b/forester/tests/priority_fee_test.rs index 976898455a..cc88e78e1a 100644 --- a/forester/tests/priority_fee_test.rs +++ b/forester/tests/priority_fee_test.rs @@ -92,6 +92,7 @@ async fn test_priority_fee_request() { tree_ids: vec![], enable_compressible: true, lookup_table_address: None, + min_queue_items: None, api_server_port: 8080, group_authority: None, light_pda_programs: vec![], diff --git a/forester/tests/test_nullify_2_tx_size.rs b/forester/tests/test_nullify_2_tx_size.rs deleted file mode 100644 index e3c1e5f84a..0000000000 --- a/forester/tests/test_nullify_2_tx_size.rs +++ /dev/null @@ -1,108 +0,0 @@ -use light_registry::{ - account_compression_cpi::sdk::{ - create_nullify_2_instruction, nullify_2_lookup_table_accounts, - CreateNullify2InstructionInputs, - }, - utils::get_forester_epoch_pda_from_authority, -}; -use solana_sdk::{ - hash::Hash, - message::{v0, AddressLookupTableAccount, VersionedMessage}, - pubkey::Pubkey, - signature::Keypair, - signer::Signer, - transaction::VersionedTransaction, -}; - -/// Validates that a nullify_2 v0 transaction with ALT fits within the -/// 1232-byte Solana transaction size limit. -/// -/// This is a pure serialization check -- no validator needed. -/// If the serialized size is <= 1232 bytes, it will work on any validator. -#[test] -fn test_nullify_2_v0_transaction_size() { - let authority = Keypair::new(); - let merkle_tree = Pubkey::new_unique(); - let nullifier_queue = Pubkey::new_unique(); - let epoch = 0u64; - let forester_pda = get_forester_epoch_pda_from_authority(&authority.pubkey(), epoch).0; - - let ix = create_nullify_2_instruction( - CreateNullify2InstructionInputs { - authority: authority.pubkey(), - nullifier_queue, - merkle_tree, - change_log_index: 1400, - queue_index_0: 100, - queue_index_1: 200, - leaf_index_0: 67_000_000, - leaf_index_1: 67_000_001, - proof_0: [[0xAA; 32]; 15], - proof_1: [[0xBB; 32]; 15], - shared_proof_node: [0xCC; 32], - derivation: authority.pubkey(), - is_metadata_forester: false, - }, - 0, - ); - - // Build synthetic ALT with the known accounts - let alt_accounts = - nullify_2_lookup_table_accounts(merkle_tree, nullifier_queue, Some(forester_pda)); - let alt_address = Pubkey::new_unique(); - let alt = AddressLookupTableAccount { - key: alt_address, - addresses: alt_accounts, - }; - - // Compile v0 message - let blockhash = Hash::default(); - let message = v0::Message::try_compile(&authority.pubkey(), &[ix], &[alt], blockhash) - .expect("Failed to compile v0 message"); - - // Create signed transaction - let versioned_message = VersionedMessage::V0(message); - let tx = VersionedTransaction::try_new(versioned_message, &[&authority]) - .expect("Failed to create versioned transaction"); - - let serialized = tx.message.serialize(); - // Full tx = compact-u16 sig count (1) + signatures (64 * n) + serialized message - let tx_size = 1 + tx.signatures.len() * 64 + serialized.len(); - - println!( - "nullify_2 v0 transaction size: {} bytes (limit: 1232)", - tx_size - ); - println!(" instruction data: {} bytes", 1007); - println!(" margin: {} bytes", 1232_i64 - tx_size as i64); - - // Breakdown - println!("\nTransaction breakdown:"); - println!(" signatures: {}", tx.signatures.len() * 64 + 1); - let static_keys = tx.message.static_account_keys(); - println!(" static account keys: {}", static_keys.len()); - for (i, key) in static_keys.iter().enumerate() { - let label = if *key == authority.pubkey() { - "authority (signer)" - } else if *key == light_registry::ID { - "registry program" - } else { - "unknown" - }; - println!(" [{}] {} ({})", i, key, label); - } - if let VersionedMessage::V0(m) = &tx.message { - println!(" address table lookups: {}", m.address_table_lookups.len()); - for alt_lookup in &m.address_table_lookups { - println!(" writable indices: {:?}", alt_lookup.writable_indexes); - println!(" readonly indices: {:?}", alt_lookup.readonly_indexes); - } - }; - - assert!( - tx_size <= 1232, - "nullify_2 v0 transaction is {} bytes, exceeds 1232 byte limit by {} bytes", - tx_size, - tx_size - 1232 - ); -} diff --git a/forester/tests/test_nullify_dedup_tx_size.rs b/forester/tests/test_nullify_dedup_tx_size.rs index 60168bdcb5..0b1e578bfc 100644 --- a/forester/tests/test_nullify_dedup_tx_size.rs +++ b/forester/tests/test_nullify_dedup_tx_size.rs @@ -47,8 +47,9 @@ fn test_nullify_dedup_v0_transaction_size() { 0, ); - // SetComputeUnitLimit instruction - let compute_ix = ComputeBudgetInstruction::set_compute_unit_limit(600_000); + // Compute budget instructions (both are added by the forester's smart_transaction logic) + let compute_limit_ix = ComputeBudgetInstruction::set_compute_unit_limit(600_000); + let compute_price_ix = ComputeBudgetInstruction::set_compute_unit_price(1); // Build synthetic ALT with the known accounts (includes ComputeBudget program ID) let alt_accounts = @@ -59,11 +60,15 @@ fn test_nullify_dedup_v0_transaction_size() { addresses: alt_accounts, }; - // Compile v0 message with both instructions + // Compile v0 message with all instructions let blockhash = Hash::default(); - let message = - v0::Message::try_compile(&authority.pubkey(), &[compute_ix, nullify_ix], &[alt], blockhash) - .expect("Failed to compile v0 message"); + let message = v0::Message::try_compile( + &authority.pubkey(), + &[compute_price_ix, compute_limit_ix, nullify_ix], + &[alt], + blockhash, + ) + .expect("Failed to compile v0 message"); // Create signed transaction let versioned_message = VersionedMessage::V0(message); diff --git a/forester/tests/test_utils.rs b/forester/tests/test_utils.rs index 4225503a19..4eafd33da5 100644 --- a/forester/tests/test_utils.rs +++ b/forester/tests/test_utils.rs @@ -136,6 +136,7 @@ pub fn forester_config() -> ForesterConfig { state_tree_data: vec![], compressible_config: None, lookup_table_address: None, + min_queue_items: None, } } diff --git a/program-tests/registry-test/tests/nullify_2.rs b/program-tests/registry-test/tests/nullify_2.rs deleted file mode 100644 index 4e919ab0a0..0000000000 --- a/program-tests/registry-test/tests/nullify_2.rs +++ /dev/null @@ -1,232 +0,0 @@ -use account_compression::{state::QueueAccount, StateMerkleTreeAccount}; -use forester_utils::account_zero_copy::{get_concurrent_merkle_tree, get_hash_set}; -use light_client::rpc::Rpc; -use light_compressed_account::TreeType; -use light_hasher::Poseidon; -use light_program_test::{program_test::LightProgramTest, ProgramTestConfig}; -use light_registry::account_compression_cpi::sdk::{ - create_nullify_2_instruction, CreateNullify2InstructionInputs, -}; -use light_test_utils::e2e_test_env::init_program_test_env; -use serial_test::serial; -use solana_sdk::signature::{Keypair, Signer}; - -/// Tests that nullify_2 correctly nullifies two leaves in a single instruction -/// using two sequential CPIs to account_compression::nullify_leaves. -/// Uses LiteSVM (light-program-test) for fast logic testing. -/// Note: LiteSVM allows 10KB transactions, so this does NOT validate tx size. -#[serial] -#[tokio::test] -async fn test_nullify_2() { - let mut rpc = LightProgramTest::new(ProgramTestConfig::default_with_batched_trees(true)) - .await - .unwrap(); - rpc.indexer = None; - - let env = rpc.test_accounts.clone(); - - let forester_keypair = Keypair::new(); - rpc.airdrop_lamports(&forester_keypair.pubkey(), 2_000_000_000) - .await - .unwrap(); - - let merkle_tree_keypair = Keypair::new(); - let nullifier_queue_keypair = Keypair::new(); - let cpi_context_keypair = Keypair::new(); - - let (mut state_tree_bundle, mut rpc) = { - let mut e2e_env = init_program_test_env(rpc, &env, 50).await; - e2e_env.indexer.state_merkle_trees.clear(); - e2e_env.keypair_action_config.fee_assert = false; - - // Create V1 state merkle tree with custom forester - e2e_env - .indexer - .add_state_merkle_tree( - &mut e2e_env.rpc, - &merkle_tree_keypair, - &nullifier_queue_keypair, - &cpi_context_keypair, - None, - Some(forester_keypair.pubkey()), - TreeType::StateV1, - ) - .await; - - // Create 2 compressed accounts by compressing + transferring twice. - // Each transfer nullifies the input, putting it in the nullifier queue. - e2e_env - .compress_sol_deterministic(&forester_keypair, 1_000_000, None) - .await; - e2e_env - .transfer_sol_deterministic( - &forester_keypair, - &Keypair::new().pubkey(), - None, - ) - .await - .unwrap(); - - e2e_env - .compress_sol_deterministic(&forester_keypair, 1_000_000, None) - .await; - e2e_env - .transfer_sol_deterministic( - &forester_keypair, - &Keypair::new().pubkey(), - None, - ) - .await - .unwrap(); - - ( - e2e_env.indexer.state_merkle_trees[0].clone(), - e2e_env.rpc, - ) - }; - - // Read on-chain state - let nullifier_queue = unsafe { - get_hash_set::(&mut rpc, state_tree_bundle.accounts.nullifier_queue) - .await - .unwrap() - }; - - let onchain_tree = get_concurrent_merkle_tree::( - &mut rpc, - state_tree_bundle.accounts.merkle_tree, - ) - .await - .unwrap(); - - let pre_root = onchain_tree.root(); - let change_log_index = onchain_tree.changelog_index(); - - // Collect 2 unmarked items from the queue - let mut items_to_nullify = Vec::new(); - for i in 0..nullifier_queue.get_capacity() { - let bucket = nullifier_queue.get_bucket(i).unwrap(); - if let Some(bucket) = bucket { - if bucket.sequence_number.is_none() { - items_to_nullify.push((i, bucket.value_bytes())); - } - } - } - assert!( - items_to_nullify.len() >= 2, - "Need at least 2 items in nullifier queue, got {}", - items_to_nullify.len() - ); - - let (queue_idx_0, leaf_0) = items_to_nullify[0]; - let (queue_idx_1, leaf_1) = items_to_nullify[1]; - - let leaf_index_0 = state_tree_bundle - .merkle_tree - .get_leaf_index(&leaf_0) - .unwrap(); - let leaf_index_1 = state_tree_bundle - .merkle_tree - .get_leaf_index(&leaf_1) - .unwrap(); - - let proof_0: Vec<[u8; 32]> = state_tree_bundle - .merkle_tree - .get_proof_of_leaf(leaf_index_0, false) - .unwrap(); - let proof_1: Vec<[u8; 32]> = state_tree_bundle - .merkle_tree - .get_proof_of_leaf(leaf_index_1, false) - .unwrap(); - - // Split proofs: first 15 nodes are unique per leaf, node at index 15 is shared. - // Both leaves are in the same 2^16 subtree so they share the proof node at level 15. - let proof_0_arr: [[u8; 32]; 15] = proof_0[..15].try_into().unwrap(); - let proof_1_arr: [[u8; 32]; 15] = proof_1[..15].try_into().unwrap(); - let shared_proof_node: [u8; 32] = proof_0[15]; - // Verify the shared node is the same in both proofs. - assert_eq!( - proof_0[15], proof_1[15], - "Level 15 proof node must be shared between both leaves in the same subtree" - ); - - // Build nullify_2 instruction - let ix = create_nullify_2_instruction( - CreateNullify2InstructionInputs { - authority: forester_keypair.pubkey(), - nullifier_queue: state_tree_bundle.accounts.nullifier_queue, - merkle_tree: state_tree_bundle.accounts.merkle_tree, - change_log_index: change_log_index as u16, - queue_index_0: queue_idx_0 as u16, - queue_index_1: queue_idx_1 as u16, - leaf_index_0: leaf_index_0 as u32, - leaf_index_1: leaf_index_1 as u32, - proof_0: proof_0_arr, - proof_1: proof_1_arr, - shared_proof_node, - derivation: forester_keypair.pubkey(), - is_metadata_forester: true, - }, - 0, - ); - - // Send transaction - rpc.create_and_send_transaction(&[ix], &forester_keypair.pubkey(), &[&forester_keypair]) - .await - .unwrap(); - - // Verify: both queue items should be marked - let nullifier_queue_post = unsafe { - get_hash_set::(&mut rpc, state_tree_bundle.accounts.nullifier_queue) - .await - .unwrap() - }; - - let bucket_0 = nullifier_queue_post - .get_bucket(queue_idx_0) - .unwrap() - .unwrap(); - assert!( - bucket_0.sequence_number.is_some(), - "First queue item should be marked after nullify_2" - ); - - let bucket_1 = nullifier_queue_post - .get_bucket(queue_idx_1) - .unwrap() - .unwrap(); - assert!( - bucket_1.sequence_number.is_some(), - "Second queue item should be marked after nullify_2" - ); - - // Verify: tree root changed - let onchain_tree_post = - get_concurrent_merkle_tree::( - &mut rpc, - state_tree_bundle.accounts.merkle_tree, - ) - .await - .unwrap(); - assert_ne!( - pre_root, - onchain_tree_post.root(), - "Root should have changed after nullify_2" - ); - - // Locally update the merkle tree and verify roots match - state_tree_bundle - .merkle_tree - .update(&[0u8; 32], leaf_index_0) - .unwrap(); - state_tree_bundle - .merkle_tree - .update(&[0u8; 32], leaf_index_1) - .unwrap(); - - assert_eq!( - onchain_tree_post.root(), - state_tree_bundle.merkle_tree.root(), - "On-chain root should match local tree after nullifying both leaves" - ); -} diff --git a/program-tests/registry-test/tests/nullify_dedup.rs b/program-tests/registry-test/tests/nullify_dedup.rs index b2e19b3477..37b651ea2c 100644 --- a/program-tests/registry-test/tests/nullify_dedup.rs +++ b/program-tests/registry-test/tests/nullify_dedup.rs @@ -5,7 +5,8 @@ use light_compressed_account::TreeType; use light_hasher::Poseidon; use light_program_test::{program_test::LightProgramTest, ProgramTestConfig}; use light_registry::account_compression_cpi::sdk::{ - compress_proofs, create_nullify_dedup_instruction, CreateNullifyDedupInstructionInputs, + compress_proofs, create_nullify_dedup_instruction, CompressedProofs, + CreateNullifyDedupInstructionInputs, }; use light_test_utils::e2e_test_env::init_program_test_env; use serial_test::serial; @@ -52,19 +53,12 @@ async fn test_nullify_dedup_4_leaves() { .compress_sol_deterministic(&forester_keypair, 1_000_000, None) .await; e2e_env - .transfer_sol_deterministic( - &forester_keypair, - &Keypair::new().pubkey(), - None, - ) + .transfer_sol_deterministic(&forester_keypair, &Keypair::new().pubkey(), None) .await .unwrap(); } - ( - e2e_env.indexer.state_merkle_trees[0].clone(), - e2e_env.rpc, - ) + (e2e_env.indexer.state_merkle_trees[0].clone(), e2e_env.rpc) }; // Read on-chain state @@ -121,8 +115,13 @@ async fn test_nullify_dedup_4_leaves() { } let proof_refs: Vec<&[[u8; 32]; 16]> = proofs.iter().collect(); - let (proof_2_shared, proof_3_source, proof_4_source, shared_top_node, nodes) = - compress_proofs(&proof_refs).expect("compress_proofs should succeed for 4 leaves"); + let CompressedProofs { + proof_2_shared, + proof_3_source, + proof_4_source, + shared_top_node, + nodes, + } = compress_proofs(&proof_refs).expect("compress_proofs should succeed for 4 leaves"); let queue_indices: [u16; 4] = [ items_to_nullify[0].0 as u16, @@ -179,13 +178,12 @@ async fn test_nullify_dedup_4_leaves() { } // Verify root changed - let onchain_tree_post = - get_concurrent_merkle_tree::( - &mut rpc, - state_tree_bundle.accounts.merkle_tree, - ) - .await - .unwrap(); + let onchain_tree_post = get_concurrent_merkle_tree::( + &mut rpc, + state_tree_bundle.accounts.merkle_tree, + ) + .await + .unwrap(); assert_ne!( pre_root, onchain_tree_post.root(), @@ -247,19 +245,12 @@ async fn test_nullify_dedup_3_leaves() { .compress_sol_deterministic(&forester_keypair, 1_000_000, None) .await; e2e_env - .transfer_sol_deterministic( - &forester_keypair, - &Keypair::new().pubkey(), - None, - ) + .transfer_sol_deterministic(&forester_keypair, &Keypair::new().pubkey(), None) .await .unwrap(); } - ( - e2e_env.indexer.state_merkle_trees[0].clone(), - e2e_env.rpc, - ) + (e2e_env.indexer.state_merkle_trees[0].clone(), e2e_env.rpc) }; let nullifier_queue = unsafe { @@ -299,8 +290,13 @@ async fn test_nullify_dedup_3_leaves() { } let proof_refs: Vec<&[[u8; 32]; 16]> = proofs.iter().collect(); - let (proof_2_shared, proof_3_source, proof_4_source, shared_top_node, nodes) = - compress_proofs(&proof_refs).expect("compress_proofs should succeed for 3 leaves"); + let CompressedProofs { + proof_2_shared, + proof_3_source, + proof_4_source, + shared_top_node, + nodes, + } = compress_proofs(&proof_refs).expect("compress_proofs should succeed for 3 leaves"); let ix = create_nullify_dedup_instruction( CreateNullifyDedupInstructionInputs { @@ -360,13 +356,12 @@ async fn test_nullify_dedup_3_leaves() { .update(&[0u8; 32], li) .unwrap(); } - let onchain_tree_post = - get_concurrent_merkle_tree::( - &mut rpc, - state_tree_bundle.accounts.merkle_tree, - ) - .await - .unwrap(); + let onchain_tree_post = get_concurrent_merkle_tree::( + &mut rpc, + state_tree_bundle.accounts.merkle_tree, + ) + .await + .unwrap(); assert_eq!( onchain_tree_post.root(), state_tree_bundle.merkle_tree.root(), @@ -414,19 +409,12 @@ async fn test_nullify_dedup_2_leaves() { .compress_sol_deterministic(&forester_keypair, 1_000_000, None) .await; e2e_env - .transfer_sol_deterministic( - &forester_keypair, - &Keypair::new().pubkey(), - None, - ) + .transfer_sol_deterministic(&forester_keypair, &Keypair::new().pubkey(), None) .await .unwrap(); } - ( - e2e_env.indexer.state_merkle_trees[0].clone(), - e2e_env.rpc, - ) + (e2e_env.indexer.state_merkle_trees[0].clone(), e2e_env.rpc) }; let nullifier_queue = unsafe { @@ -466,8 +454,13 @@ async fn test_nullify_dedup_2_leaves() { } let proof_refs: Vec<&[[u8; 32]; 16]> = proofs.iter().collect(); - let (proof_2_shared, proof_3_source, proof_4_source, shared_top_node, nodes) = - compress_proofs(&proof_refs).expect("compress_proofs should succeed for 2 leaves"); + let CompressedProofs { + proof_2_shared, + proof_3_source, + proof_4_source, + shared_top_node, + nodes, + } = compress_proofs(&proof_refs).expect("compress_proofs should succeed for 2 leaves"); let ix = create_nullify_dedup_instruction( CreateNullifyDedupInstructionInputs { @@ -527,13 +520,12 @@ async fn test_nullify_dedup_2_leaves() { .update(&[0u8; 32], li) .unwrap(); } - let onchain_tree_post = - get_concurrent_merkle_tree::( - &mut rpc, - state_tree_bundle.accounts.merkle_tree, - ) - .await - .unwrap(); + let onchain_tree_post = get_concurrent_merkle_tree::( + &mut rpc, + state_tree_bundle.accounts.merkle_tree, + ) + .await + .unwrap(); assert_eq!( onchain_tree_post.root(), state_tree_bundle.merkle_tree.root(), @@ -580,18 +572,11 @@ async fn test_nullify_dedup_1_leaf_fails() { .compress_sol_deterministic(&forester_keypair, 1_000_000, None) .await; e2e_env - .transfer_sol_deterministic( - &forester_keypair, - &Keypair::new().pubkey(), - None, - ) + .transfer_sol_deterministic(&forester_keypair, &Keypair::new().pubkey(), None) .await .unwrap(); - ( - e2e_env.indexer.state_merkle_trees[0].clone(), - e2e_env.rpc, - ) + (e2e_env.indexer.state_merkle_trees[0].clone(), e2e_env.rpc) }; let nullifier_queue = unsafe { diff --git a/programs/registry/src/account_compression_cpi/nullify.rs b/programs/registry/src/account_compression_cpi/nullify.rs index 91d021ef81..b25a6255ad 100644 --- a/programs/registry/src/account_compression_cpi/nullify.rs +++ b/programs/registry/src/account_compression_cpi/nullify.rs @@ -3,9 +3,7 @@ use account_compression::{ }; use anchor_lang::prelude::*; -use crate::errors::RegistryError; - -use crate::epoch::register_epoch::ForesterEpochPda; +use crate::{epoch::register_epoch::ForesterEpochPda, errors::RegistryError}; #[derive(Accounts)] pub struct NullifyLeaves<'info> { @@ -96,80 +94,6 @@ fn nullify_single_leaf_cpi( ) } -#[allow(clippy::too_many_arguments)] -pub fn process_nullify_2( - ctx: &Context, - change_log_index: u16, - queue_index_0: u16, - queue_index_1: u16, - leaf_index_0: u32, - leaf_index_1: u32, - proof_0: [[u8; 32]; 15], - proof_1: [[u8; 32]; 15], - shared_proof_node: [u8; 32], -) -> Result<()> { - let bump = ctx.bumps.cpi_authority; - let bump = &[bump]; - let seeds = [CPI_AUTHORITY_PDA_SEED, bump]; - let signer_seeds = &[&seeds[..]]; - - // Reconstruct full 16-node proofs by appending the shared node (level 15). - let mut full_proof_0: Vec<[u8; 32]> = proof_0.to_vec(); - full_proof_0.push(shared_proof_node); - let mut full_proof_1: Vec<[u8; 32]> = proof_1.to_vec(); - full_proof_1.push(shared_proof_node); - - // First CPI: nullify leaf 0 - { - let accounts = account_compression::cpi::accounts::NullifyLeaves { - authority: ctx.accounts.cpi_authority.to_account_info(), - registered_program_pda: Some(ctx.accounts.registered_program_pda.to_account_info()), - log_wrapper: ctx.accounts.log_wrapper.to_account_info(), - merkle_tree: ctx.accounts.merkle_tree.to_account_info(), - nullifier_queue: ctx.accounts.nullifier_queue.to_account_info(), - fee_payer: Some(ctx.accounts.authority.to_account_info()), - }; - let cpi_ctx = CpiContext::new_with_signer( - ctx.accounts.account_compression_program.to_account_info(), - accounts, - signer_seeds, - ); - account_compression::cpi::nullify_leaves( - cpi_ctx, - vec![change_log_index as u64], - vec![queue_index_0], - vec![leaf_index_0 as u64], - vec![full_proof_0], - )?; - } - - // Second CPI: nullify leaf 1 (same change_log_index -- proof is patched via changelog replay) - { - let accounts = account_compression::cpi::accounts::NullifyLeaves { - authority: ctx.accounts.cpi_authority.to_account_info(), - registered_program_pda: Some(ctx.accounts.registered_program_pda.to_account_info()), - log_wrapper: ctx.accounts.log_wrapper.to_account_info(), - merkle_tree: ctx.accounts.merkle_tree.to_account_info(), - nullifier_queue: ctx.accounts.nullifier_queue.to_account_info(), - fee_payer: Some(ctx.accounts.authority.to_account_info()), - }; - let cpi_ctx = CpiContext::new_with_signer( - ctx.accounts.account_compression_program.to_account_info(), - accounts, - signer_seeds, - ); - account_compression::cpi::nullify_leaves( - cpi_ctx, - vec![change_log_index as u64], - vec![queue_index_1], - vec![leaf_index_1 as u64], - vec![full_proof_1], - )?; - } - - Ok(()) -} - /// Determines proof count from leaf_indices sentinel values. /// Returns Err(InvalidProofEncoding) if fewer than 2 leaves are specified. pub fn count_from_leaf_indices(leaf_indices: &[u32; 4]) -> Result { diff --git a/programs/registry/src/account_compression_cpi/sdk.rs b/programs/registry/src/account_compression_cpi/sdk.rs index 7ea33e99db..9bac86593d 100644 --- a/programs/registry/src/account_compression_cpi/sdk.rs +++ b/programs/registry/src/account_compression_cpi/sdk.rs @@ -62,62 +62,6 @@ pub fn create_nullify_instruction( } } -#[derive(Clone, Debug, PartialEq)] -pub struct CreateNullify2InstructionInputs { - pub authority: Pubkey, - pub nullifier_queue: Pubkey, - pub merkle_tree: Pubkey, - pub change_log_index: u16, - pub queue_index_0: u16, - pub queue_index_1: u16, - pub leaf_index_0: u32, - pub leaf_index_1: u32, - pub proof_0: [[u8; 32]; 15], - pub proof_1: [[u8; 32]; 15], - pub shared_proof_node: [u8; 32], - pub derivation: Pubkey, - pub is_metadata_forester: bool, -} - -pub fn create_nullify_2_instruction( - inputs: CreateNullify2InstructionInputs, - epoch: u64, -) -> Instruction { - let register_program_pda = get_registered_program_pda(&crate::ID); - let registered_forester_pda = if inputs.is_metadata_forester { - None - } else { - Some(get_forester_epoch_pda_from_authority(&inputs.derivation, epoch).0) - }; - let (cpi_authority, _bump) = get_cpi_authority_pda(); - let instruction_data = crate::instruction::Nullify2 { - change_log_index: inputs.change_log_index, - queue_index_0: inputs.queue_index_0, - queue_index_1: inputs.queue_index_1, - leaf_index_0: inputs.leaf_index_0, - leaf_index_1: inputs.leaf_index_1, - proof_0: inputs.proof_0, - proof_1: inputs.proof_1, - shared_proof_node: inputs.shared_proof_node, - }; - - let accounts = crate::accounts::NullifyLeaves { - authority: inputs.authority, - registered_forester_pda, - registered_program_pda: register_program_pda, - nullifier_queue: inputs.nullifier_queue, - merkle_tree: inputs.merkle_tree, - log_wrapper: NOOP_PUBKEY.into(), - cpi_authority, - account_compression_program: account_compression::ID, - }; - Instruction { - program_id: crate::ID, - accounts: accounts.to_account_metas(Some(true)), - data: instruction_data.data(), - } -} - /// Returns the base accounts for populating an address lookup table /// for nullify v0 transactions. fn nullify_lookup_table_accounts_base( @@ -142,20 +86,11 @@ fn nullify_lookup_table_accounts_base( accounts } -/// Returns the known accounts for populating an address lookup table -/// for nullify_2 v0 transactions. -pub fn nullify_2_lookup_table_accounts( - merkle_tree: Pubkey, - nullifier_queue: Pubkey, - forester_pda: Option, -) -> Vec { - nullify_lookup_table_accounts_base(merkle_tree, nullifier_queue, forester_pda) -} - /// Max number of 32-byte nodes in the dedup encoding vec. /// Verified by tx size test (forester/tests/test_nullify_dedup_tx_size.rs). -/// With ALT, SetComputeUnitLimit ix, and worst-case nodes, the tx is 1230 bytes (2 byte margin). -pub const NULLIFY_DEDUP_MAX_NODES: usize = 28; +/// With ALT, SetComputeUnitLimit + SetComputeUnitPrice ixs, and worst-case nodes, +/// the tx fits within the 1232 byte limit. +pub const NULLIFY_DEDUP_MAX_NODES: usize = 27; #[derive(Clone, Debug, PartialEq)] pub struct CreateNullifyDedupInstructionInputs { @@ -213,13 +148,20 @@ pub fn create_nullify_dedup_instruction( } } +/// Result of compressing 2-4 Merkle proofs into the dedup encoding. +pub struct CompressedProofs { + pub proof_2_shared: u16, + pub proof_3_source: u32, + pub proof_4_source: u32, + pub shared_top_node: [u8; 32], + pub nodes: Vec<[u8; 32]>, +} + /// Compresses 2-4 full 16-node Merkle proofs into the dedup encoding. -/// Returns `(proof_2_shared, proof_3_source, proof_4_source, shared_top_node, nodes)`, +/// Returns the compressed proof data, /// or `None` if compression is impossible (different top nodes, too many unique nodes, or /// fewer than 2 or more than 4 proofs). -pub fn compress_proofs( - proofs: &[&[[u8; 32]; 16]], -) -> Option<(u16, u32, u32, [u8; 32], Vec<[u8; 32]>)> { +pub fn compress_proofs(proofs: &[&[[u8; 32]; 16]]) -> Option { if proofs.len() < 2 || proofs.len() > 4 { return None; } @@ -285,13 +227,13 @@ pub fn compress_proofs( return None; } - Some(( + Some(CompressedProofs { proof_2_shared, proof_3_source, proof_4_source, shared_top_node, nodes, - )) + }) } /// Returns the known accounts for populating an address lookup table @@ -302,7 +244,8 @@ pub fn nullify_dedup_lookup_table_accounts( nullifier_queue: Pubkey, forester_pda: Option, ) -> Vec { - let mut accounts = nullify_lookup_table_accounts_base(merkle_tree, nullifier_queue, forester_pda); + let mut accounts = + nullify_lookup_table_accounts_base(merkle_tree, nullifier_queue, forester_pda); accounts.push(solana_sdk::compute_budget::ID); accounts } @@ -793,106 +736,9 @@ pub fn create_rollover_batch_address_tree_instruction( #[cfg(test)] mod tests { - use super::*; use anchor_lang::Discriminator; - #[test] - fn test_nullify_2_instruction_data_size() { - let instruction_data = crate::instruction::Nullify2 { - change_log_index: 0, - queue_index_0: 0, - queue_index_1: 0, - leaf_index_0: 0, - leaf_index_1: 0, - proof_0: [[0u8; 32]; 15], - proof_1: [[0u8; 32]; 15], - shared_proof_node: [0u8; 32], - }; - let data = instruction_data.data(); - assert_eq!( - data.len(), - 1007, - "nullify_2 instruction data must be exactly 1007 bytes \ - (1 disc + 2 changelog + 2+2 queue + 4+4 leaf + 480+480 proofs + 32 shared), got {}", - data.len() - ); - } - - #[test] - fn test_nullify_2_instruction_accounts() { - let authority = Pubkey::new_unique(); - let inputs = CreateNullify2InstructionInputs { - authority, - nullifier_queue: Pubkey::new_unique(), - merkle_tree: Pubkey::new_unique(), - change_log_index: 0, - queue_index_0: 0, - queue_index_1: 1, - leaf_index_0: 0, - leaf_index_1: 1, - proof_0: [[0u8; 32]; 15], - proof_1: [[0u8; 32]; 15], - shared_proof_node: [0u8; 32], - derivation: authority, - is_metadata_forester: false, - }; - let ix = create_nullify_2_instruction(inputs, 0); - assert_eq!(ix.data.len(), 1007); - // 8 accounts: forester_pda, authority, cpi_authority, registered_program_pda, - // account_compression_program, log_wrapper, merkle_tree, nullifier_queue - assert_eq!(ix.accounts.len(), 8, "expected 8 accounts"); - } - - #[test] - fn test_nullify_2_discriminator_no_collision() { - let disc = crate::instruction::Nullify2::DISCRIMINATOR; - assert_eq!(disc.len(), 1, "nullify_2 discriminator must be 1 byte"); - - let existing: &[(&str, &[u8])] = &[ - ("InitializeProtocolConfig", crate::instruction::InitializeProtocolConfig::DISCRIMINATOR), - ("UpdateProtocolConfig", crate::instruction::UpdateProtocolConfig::DISCRIMINATOR), - ("RegisterSystemProgram", crate::instruction::RegisterSystemProgram::DISCRIMINATOR), - ("DeregisterSystemProgram", crate::instruction::DeregisterSystemProgram::DISCRIMINATOR), - ("RegisterForester", crate::instruction::RegisterForester::DISCRIMINATOR), - ("UpdateForesterPda", crate::instruction::UpdateForesterPda::DISCRIMINATOR), - ("UpdateForesterPdaWeight", crate::instruction::UpdateForesterPdaWeight::DISCRIMINATOR), - ("RegisterForesterEpoch", crate::instruction::RegisterForesterEpoch::DISCRIMINATOR), - ("FinalizeRegistration", crate::instruction::FinalizeRegistration::DISCRIMINATOR), - ("ReportWork", crate::instruction::ReportWork::DISCRIMINATOR), - ("InitializeAddressMerkleTree", crate::instruction::InitializeAddressMerkleTree::DISCRIMINATOR), - ("InitializeStateMerkleTree", crate::instruction::InitializeStateMerkleTree::DISCRIMINATOR), - ("Nullify", crate::instruction::Nullify::DISCRIMINATOR), - ("UpdateAddressMerkleTree", crate::instruction::UpdateAddressMerkleTree::DISCRIMINATOR), - ("RolloverAddressMerkleTreeAndQueue", crate::instruction::RolloverAddressMerkleTreeAndQueue::DISCRIMINATOR), - ("RolloverStateMerkleTreeAndQueue", crate::instruction::RolloverStateMerkleTreeAndQueue::DISCRIMINATOR), - ("InitializeBatchedStateMerkleTree", crate::instruction::InitializeBatchedStateMerkleTree::DISCRIMINATOR), - ("BatchNullify", crate::instruction::BatchNullify::DISCRIMINATOR), - ("BatchAppend", crate::instruction::BatchAppend::DISCRIMINATOR), - ("InitializeBatchedAddressMerkleTree", crate::instruction::InitializeBatchedAddressMerkleTree::DISCRIMINATOR), - ("BatchUpdateAddressTree", crate::instruction::BatchUpdateAddressTree::DISCRIMINATOR), - ("RolloverBatchedAddressMerkleTree", crate::instruction::RolloverBatchedAddressMerkleTree::DISCRIMINATOR), - ("RolloverBatchedStateMerkleTree", crate::instruction::RolloverBatchedStateMerkleTree::DISCRIMINATOR), - ("MigrateState", crate::instruction::MigrateState::DISCRIMINATOR), - ("CreateConfigCounter", crate::instruction::CreateConfigCounter::DISCRIMINATOR), - ("CreateCompressibleConfig", crate::instruction::CreateCompressibleConfig::DISCRIMINATOR), - ("UpdateCompressibleConfig", crate::instruction::UpdateCompressibleConfig::DISCRIMINATOR), - ("PauseCompressibleConfig", crate::instruction::PauseCompressibleConfig::DISCRIMINATOR), - ("UnpauseCompressibleConfig", crate::instruction::UnpauseCompressibleConfig::DISCRIMINATOR), - ("DeprecateCompressibleConfig", crate::instruction::DeprecateCompressibleConfig::DISCRIMINATOR), - ("WithdrawFundingPool", crate::instruction::WithdrawFundingPool::DISCRIMINATOR), - ("Claim", crate::instruction::Claim::DISCRIMINATOR), - ("CompressAndClose", crate::instruction::CompressAndClose::DISCRIMINATOR), - ]; - - for (name, existing_disc) in existing { - assert!( - !existing_disc.starts_with(disc), - "nullify_2 1-byte discriminator {:?} collides with {name} discriminator prefix {:?}", - disc, - &existing_disc[..disc.len().min(existing_disc.len())] - ); - } - } + use super::*; #[test] fn test_nullify_dedup_instruction_data_size() { @@ -949,48 +795,130 @@ mod tests { assert_eq!(disc.len(), 1, "nullify_dedup discriminator must be 1 byte"); assert_eq!(disc, &[79], "nullify_dedup discriminator must be [79]"); - // Verify no collision with nullify_2's discriminator [78] - let nullify_2_disc = crate::instruction::Nullify2::DISCRIMINATOR; - assert_ne!( - disc, nullify_2_disc, - "nullify_dedup [79] must not collide with nullify_2 [78]" - ); - let existing: &[(&str, &[u8])] = &[ - ("InitializeProtocolConfig", crate::instruction::InitializeProtocolConfig::DISCRIMINATOR), - ("UpdateProtocolConfig", crate::instruction::UpdateProtocolConfig::DISCRIMINATOR), - ("RegisterSystemProgram", crate::instruction::RegisterSystemProgram::DISCRIMINATOR), - ("DeregisterSystemProgram", crate::instruction::DeregisterSystemProgram::DISCRIMINATOR), - ("RegisterForester", crate::instruction::RegisterForester::DISCRIMINATOR), - ("UpdateForesterPda", crate::instruction::UpdateForesterPda::DISCRIMINATOR), - ("UpdateForesterPdaWeight", crate::instruction::UpdateForesterPdaWeight::DISCRIMINATOR), - ("RegisterForesterEpoch", crate::instruction::RegisterForesterEpoch::DISCRIMINATOR), - ("FinalizeRegistration", crate::instruction::FinalizeRegistration::DISCRIMINATOR), + ( + "InitializeProtocolConfig", + crate::instruction::InitializeProtocolConfig::DISCRIMINATOR, + ), + ( + "UpdateProtocolConfig", + crate::instruction::UpdateProtocolConfig::DISCRIMINATOR, + ), + ( + "RegisterSystemProgram", + crate::instruction::RegisterSystemProgram::DISCRIMINATOR, + ), + ( + "DeregisterSystemProgram", + crate::instruction::DeregisterSystemProgram::DISCRIMINATOR, + ), + ( + "RegisterForester", + crate::instruction::RegisterForester::DISCRIMINATOR, + ), + ( + "UpdateForesterPda", + crate::instruction::UpdateForesterPda::DISCRIMINATOR, + ), + ( + "UpdateForesterPdaWeight", + crate::instruction::UpdateForesterPdaWeight::DISCRIMINATOR, + ), + ( + "RegisterForesterEpoch", + crate::instruction::RegisterForesterEpoch::DISCRIMINATOR, + ), + ( + "FinalizeRegistration", + crate::instruction::FinalizeRegistration::DISCRIMINATOR, + ), ("ReportWork", crate::instruction::ReportWork::DISCRIMINATOR), - ("InitializeAddressMerkleTree", crate::instruction::InitializeAddressMerkleTree::DISCRIMINATOR), - ("InitializeStateMerkleTree", crate::instruction::InitializeStateMerkleTree::DISCRIMINATOR), + ( + "InitializeAddressMerkleTree", + crate::instruction::InitializeAddressMerkleTree::DISCRIMINATOR, + ), + ( + "InitializeStateMerkleTree", + crate::instruction::InitializeStateMerkleTree::DISCRIMINATOR, + ), ("Nullify", crate::instruction::Nullify::DISCRIMINATOR), - ("Nullify2", crate::instruction::Nullify2::DISCRIMINATOR), - ("UpdateAddressMerkleTree", crate::instruction::UpdateAddressMerkleTree::DISCRIMINATOR), - ("RolloverAddressMerkleTreeAndQueue", crate::instruction::RolloverAddressMerkleTreeAndQueue::DISCRIMINATOR), - ("RolloverStateMerkleTreeAndQueue", crate::instruction::RolloverStateMerkleTreeAndQueue::DISCRIMINATOR), - ("InitializeBatchedStateMerkleTree", crate::instruction::InitializeBatchedStateMerkleTree::DISCRIMINATOR), - ("BatchNullify", crate::instruction::BatchNullify::DISCRIMINATOR), - ("BatchAppend", crate::instruction::BatchAppend::DISCRIMINATOR), - ("InitializeBatchedAddressMerkleTree", crate::instruction::InitializeBatchedAddressMerkleTree::DISCRIMINATOR), - ("BatchUpdateAddressTree", crate::instruction::BatchUpdateAddressTree::DISCRIMINATOR), - ("RolloverBatchedAddressMerkleTree", crate::instruction::RolloverBatchedAddressMerkleTree::DISCRIMINATOR), - ("RolloverBatchedStateMerkleTree", crate::instruction::RolloverBatchedStateMerkleTree::DISCRIMINATOR), - ("MigrateState", crate::instruction::MigrateState::DISCRIMINATOR), - ("CreateConfigCounter", crate::instruction::CreateConfigCounter::DISCRIMINATOR), - ("CreateCompressibleConfig", crate::instruction::CreateCompressibleConfig::DISCRIMINATOR), - ("UpdateCompressibleConfig", crate::instruction::UpdateCompressibleConfig::DISCRIMINATOR), - ("PauseCompressibleConfig", crate::instruction::PauseCompressibleConfig::DISCRIMINATOR), - ("UnpauseCompressibleConfig", crate::instruction::UnpauseCompressibleConfig::DISCRIMINATOR), - ("DeprecateCompressibleConfig", crate::instruction::DeprecateCompressibleConfig::DISCRIMINATOR), - ("WithdrawFundingPool", crate::instruction::WithdrawFundingPool::DISCRIMINATOR), + ( + "UpdateAddressMerkleTree", + crate::instruction::UpdateAddressMerkleTree::DISCRIMINATOR, + ), + ( + "RolloverAddressMerkleTreeAndQueue", + crate::instruction::RolloverAddressMerkleTreeAndQueue::DISCRIMINATOR, + ), + ( + "RolloverStateMerkleTreeAndQueue", + crate::instruction::RolloverStateMerkleTreeAndQueue::DISCRIMINATOR, + ), + ( + "InitializeBatchedStateMerkleTree", + crate::instruction::InitializeBatchedStateMerkleTree::DISCRIMINATOR, + ), + ( + "BatchNullify", + crate::instruction::BatchNullify::DISCRIMINATOR, + ), + ( + "BatchAppend", + crate::instruction::BatchAppend::DISCRIMINATOR, + ), + ( + "InitializeBatchedAddressMerkleTree", + crate::instruction::InitializeBatchedAddressMerkleTree::DISCRIMINATOR, + ), + ( + "BatchUpdateAddressTree", + crate::instruction::BatchUpdateAddressTree::DISCRIMINATOR, + ), + ( + "RolloverBatchedAddressMerkleTree", + crate::instruction::RolloverBatchedAddressMerkleTree::DISCRIMINATOR, + ), + ( + "RolloverBatchedStateMerkleTree", + crate::instruction::RolloverBatchedStateMerkleTree::DISCRIMINATOR, + ), + ( + "MigrateState", + crate::instruction::MigrateState::DISCRIMINATOR, + ), + ( + "CreateConfigCounter", + crate::instruction::CreateConfigCounter::DISCRIMINATOR, + ), + ( + "CreateCompressibleConfig", + crate::instruction::CreateCompressibleConfig::DISCRIMINATOR, + ), + ( + "UpdateCompressibleConfig", + crate::instruction::UpdateCompressibleConfig::DISCRIMINATOR, + ), + ( + "PauseCompressibleConfig", + crate::instruction::PauseCompressibleConfig::DISCRIMINATOR, + ), + ( + "UnpauseCompressibleConfig", + crate::instruction::UnpauseCompressibleConfig::DISCRIMINATOR, + ), + ( + "DeprecateCompressibleConfig", + crate::instruction::DeprecateCompressibleConfig::DISCRIMINATOR, + ), + ( + "WithdrawFundingPool", + crate::instruction::WithdrawFundingPool::DISCRIMINATOR, + ), ("Claim", crate::instruction::Claim::DISCRIMINATOR), - ("CompressAndClose", crate::instruction::CompressAndClose::DISCRIMINATOR), + ( + "CompressAndClose", + crate::instruction::CompressAndClose::DISCRIMINATOR, + ), ]; for (name, existing_disc) in existing { @@ -1005,51 +933,51 @@ mod tests { #[test] fn test_compress_proofs_round_trip() { - // Create 4 proofs with sharing patterns that fit within MAX_NODES (28). - // Budget: 15 (proof_1) + 5 (proof_2 unique) + 5 (proof_3 unique) + 3 (proof_4 unique) = 28 + // Create 4 proofs with sharing patterns that fit within MAX_NODES (27). + // Budget: 15 (proof_1) + 5 (proof_2 unique) + 5 (proof_3 unique) + 2 (proof_4 unique) = 27 let shared_top = [0xCC; 32]; let mut proof_1 = [[0u8; 32]; 16]; let mut proof_2 = [[0u8; 32]; 16]; let mut proof_3 = [[0u8; 32]; 16]; let mut proof_4 = [[0u8; 32]; 16]; - for i in 0..15 { - proof_1[i] = [i as u8 + 1; 32]; + for (i, slot) in proof_1.iter_mut().enumerate().take(15) { + *slot = [i as u8 + 1; 32]; } proof_1[15] = shared_top; // proof_2: 10 shared with proof_1, 5 unique (levels 0-4) - for i in 0..15 { + for (i, slot) in proof_2.iter_mut().enumerate().take(15) { if i < 5 { - proof_2[i] = [i as u8 + 100; 32]; // unique + *slot = [i as u8 + 100; 32]; // unique } else { - proof_2[i] = proof_1[i]; // shared + *slot = proof_1[i]; // shared } } proof_2[15] = shared_top; // proof_3: 5 from proof_1, 5 new (levels 5-9), 5 from proof_2 - for i in 0..15 { + for (i, slot) in proof_3.iter_mut().enumerate().take(15) { if i < 5 { - proof_3[i] = proof_1[i]; // same as proof_1 + *slot = proof_1[i]; // same as proof_1 } else if i < 10 { - proof_3[i] = [i as u8 + 200; 32]; // new + *slot = [i as u8 + 200; 32]; // new } else { - proof_3[i] = proof_2[i]; // same as proof_2 (and proof_1) + *slot = proof_2[i]; // same as proof_2 (and proof_1) } } proof_3[15] = shared_top; - // proof_4: 4 from proof_1, 4 from proof_2, 4 from proof_3, 3 new - for i in 0..15 { + // proof_4: 4 from proof_1, 4 from proof_2, 5 from proof_3, 2 new + for (i, slot) in proof_4.iter_mut().enumerate().take(15) { if i < 4 { - proof_4[i] = proof_1[i]; // from proof_1 + *slot = proof_1[i]; // from proof_1 } else if i < 8 { - proof_4[i] = proof_2[i]; // from proof_2 - } else if i < 12 { - proof_4[i] = proof_3[i]; // from proof_3 + *slot = proof_2[i]; // from proof_2 + } else if i < 13 { + *slot = proof_3[i]; // from proof_3 } else { - proof_4[i] = [(i as u8).wrapping_add(250); 32]; // new + *slot = [(i as u8).wrapping_add(250); 32]; // new } } proof_4[15] = shared_top; @@ -1057,7 +985,13 @@ mod tests { let proofs: Vec<&[[u8; 32]; 16]> = vec![&proof_1, &proof_2, &proof_3, &proof_4]; let result = compress_proofs(&proofs); assert!(result.is_some(), "compress_proofs should succeed"); - let (p2_shared, p3_source, p4_source, top, nodes) = result.unwrap(); + let CompressedProofs { + proof_2_shared: p2_shared, + proof_3_source: p3_source, + proof_4_source: p4_source, + shared_top_node: top, + nodes, + } = result.unwrap(); // Simulate on-chain reconstruction let mut cursor = 0usize; @@ -1126,8 +1060,8 @@ mod tests { let shared_top = [0xCC; 32]; let make_proof = |base: u8| -> [[u8; 32]; 16] { let mut p = [[0u8; 32]; 16]; - for i in 0..15 { - p[i] = [base.wrapping_add(i as u8); 32]; + for (i, slot) in p.iter_mut().enumerate().take(15) { + *slot = [base.wrapping_add(i as u8); 32]; } p[15] = shared_top; p @@ -1165,7 +1099,13 @@ mod tests { let proofs: Vec<&[[u8; 32]; 16]> = vec![&proof_1, &proof_2]; let result = compress_proofs(&proofs); assert!(result.is_some(), "2 proofs should compress"); - let (p2_shared, p3_source, p4_source, top, nodes) = result.unwrap(); + let CompressedProofs { + proof_2_shared: p2_shared, + proof_3_source: p3_source, + proof_4_source: p4_source, + shared_top_node: top, + nodes, + } = result.unwrap(); // proof_3_source and proof_4_source should be 0 (unused) assert_eq!(p3_source, 0); @@ -1215,7 +1155,10 @@ mod tests { let proofs: Vec<&[[u8; 32]; 16]> = vec![&proof_1, &proof_2, &proof_3]; let result = compress_proofs(&proofs); assert!(result.is_some(), "3 proofs should compress"); - let (_, _, p4_source, _, _) = result.unwrap(); + let CompressedProofs { + proof_4_source: p4_source, + .. + } = result.unwrap(); assert_eq!(p4_source, 0, "proof_4_source should be 0 for 3 proofs"); } } diff --git a/programs/registry/src/lib.rs b/programs/registry/src/lib.rs index 11452fadbe..6040b69933 100644 --- a/programs/registry/src/lib.rs +++ b/programs/registry/src/lib.rs @@ -420,48 +420,6 @@ pub mod light_registry { ) } - /// Nullifies two leaves in a single instruction via two sequential CPIs. - /// Uses a 1-byte custom discriminator + shared proof node to fit within - /// the 1232-byte transaction limit when combined with an address lookup table. - /// The two leaves must share the same subtree at level 15 (highest proof - /// level below canopy 10), so the forester pairs leaves whose indices - /// agree on bits 16-25. - /// Bump is derived on-chain via ctx.bumps.cpi_authority. - #[allow(clippy::too_many_arguments)] - #[instruction(discriminator = [78])] - pub fn nullify_2<'info>( - ctx: Context<'_, '_, '_, 'info, NullifyLeaves<'info>>, - change_log_index: u16, - queue_index_0: u16, - queue_index_1: u16, - leaf_index_0: u32, - leaf_index_1: u32, - proof_0: [[u8; 32]; 15], - proof_1: [[u8; 32]; 15], - shared_proof_node: [u8; 32], - ) -> Result<()> { - let metadata = ctx.accounts.merkle_tree.load()?.metadata; - check_forester( - &metadata, - ctx.accounts.authority.key(), - ctx.accounts.nullifier_queue.key(), - &mut ctx.accounts.registered_forester_pda, - 2 * DEFAULT_WORK_V1, - )?; - - process_nullify_2( - &ctx, - change_log_index, - queue_index_0, - queue_index_1, - leaf_index_0, - leaf_index_1, - proof_0, - proof_1, - shared_proof_node, - ) - } - /// Nullifies 2-4 leaves in a single instruction via sequential CPIs. /// Uses proof deduplication: nearby leaves share Merkle proof nodes at /// common ancestor levels. The encoding stores each unique node once and diff --git a/sdk-tests/csdk-anchor-full-derived-test/tests/integration_tests.rs b/sdk-tests/csdk-anchor-full-derived-test/tests/integration_tests.rs index 9b40b900e5..2c3e82972a 100644 --- a/sdk-tests/csdk-anchor-full-derived-test/tests/integration_tests.rs +++ b/sdk-tests/csdk-anchor-full-derived-test/tests/integration_tests.rs @@ -3863,7 +3863,7 @@ async fn test_d9_edge_many_literals() { #[tokio::test] async fn test_d9_edge_mixed() { use csdk_anchor_full_derived_test::d9_seeds::{ - edge_cases::{AB, SEED_123, _UNDERSCORE_CONST}, + edge_cases::{_UNDERSCORE_CONST, AB, SEED_123}, D9EdgeMixedParams, }; From 16b5985ae09247531c478ca43cee57981daf7b6e Mon Sep 17 00:00:00 2001 From: ananas Date: Wed, 18 Mar 2026 00:06:01 +0000 Subject: [PATCH 04/16] feat: disable v1 state multi-nullify when queue exceeds 10,000 items Adds queue_item_count to BuildTransactionBatchConfig and disables multi-nullify when the queue is too large, falling back to single nullify for more reliable throughput. Renames use_dedup to use_multi_nullify for consistency. --- forester/src/cli.rs | 8 + forester/src/config.rs | 6 + forester/src/epoch_manager.rs | 125 +++-------- forester/src/processor/v1/config.rs | 3 + forester/src/processor/v1/helpers.rs | 24 +-- forester/src/processor/v1/send_transaction.rs | 20 +- forester/src/processor/v1/tx_builder.rs | 10 +- forester/tests/e2e_test.rs | 22 +- forester/tests/legacy/priority_fee_test.rs | 2 + forester/tests/legacy/test_utils.rs | 1 + forester/tests/priority_fee_test.rs | 1 + ...=> test_nullify_state_v1_multi_tx_size.rs} | 39 ++-- forester/tests/test_utils.rs | 1 + ...ify_dedup.rs => nullify_state_v1_multi.rs} | 34 +-- .../src/account_compression_cpi/nullify.rs | 2 +- .../src/account_compression_cpi/sdk.rs | 200 ++---------------- programs/registry/src/lib.rs | 5 +- .../tests/integration_tests.rs | 2 +- 18 files changed, 160 insertions(+), 345 deletions(-) rename forester/tests/{test_nullify_dedup_tx_size.rs => test_nullify_state_v1_multi_tx_size.rs} (75%) rename program-tests/registry-test/tests/{nullify_dedup.rs => nullify_state_v1_multi.rs} (95%) diff --git a/forester/src/cli.rs b/forester/src/cli.rs index 3a22a633c7..838172a24c 100644 --- a/forester/src/cli.rs +++ b/forester/src/cli.rs @@ -295,6 +295,14 @@ pub struct StartArgs { )] pub min_queue_items: Option, + #[arg( + long, + env = "ENABLE_V1_MULTI_NULLIFY", + help = "Enable nullify_state_v1_multi instruction for batching 2-4 V1 state nullifications per instruction. Requires --lookup-table-address.", + default_value = "false" + )] + pub enable_v1_multi_nullify: bool, + #[arg( long, env = "API_SERVER_PORT", diff --git a/forester/src/config.rs b/forester/src/config.rs index 6043c3151a..7452283b1d 100644 --- a/forester/src/config.rs +++ b/forester/src/config.rs @@ -33,6 +33,9 @@ pub struct ForesterConfig { /// Minimum queue items before processing V1 state nullifications. /// Delays processing to allow dedup grouping. Only applies when lookup_table_address is set. pub min_queue_items: Option, + /// Enable nullify_state_v1_multi instruction for batching 2-4 V1 state nullifications. + /// Requires lookup_table_address to be set. + pub enable_v1_multi_nullify: bool, } #[derive(Debug, Clone)] @@ -425,6 +428,7 @@ impl ForesterConfig { }) .transpose()?, min_queue_items: args.min_queue_items, + enable_v1_multi_nullify: args.enable_v1_multi_nullify, }) } @@ -480,6 +484,7 @@ impl ForesterConfig { compressible_config: None, lookup_table_address: None, min_queue_items: None, + enable_v1_multi_nullify: false, }) } } @@ -501,6 +506,7 @@ impl Clone for ForesterConfig { compressible_config: self.compressible_config.clone(), lookup_table_address: self.lookup_table_address, min_queue_items: self.min_queue_items, + enable_v1_multi_nullify: self.enable_v1_multi_nullify, } } } diff --git a/forester/src/epoch_manager.rs b/forester/src/epoch_manager.rs index d4e069ca63..f04b12eb6f 100644 --- a/forester/src/epoch_manager.rs +++ b/forester/src/epoch_manager.rs @@ -275,7 +275,7 @@ pub struct EpochManager { mint_tracker: Option>, /// Cached zkp_batch_size per tree to filter queue updates below threshold zkp_batch_sizes: Arc>, - address_lookup_tables: Arc>>, + address_lookup_tables: Arc>, heartbeat: Arc, run_id: Arc, /// Per-epoch registration trackers to coordinate re-finalization when new foresters register mid-epoch @@ -328,7 +328,7 @@ impl EpochManager { compressible_tracker: Option>, pda_tracker: Option>, mint_tracker: Option>, - address_lookup_tables: Arc>>, + address_lookup_tables: Arc>, heartbeat: Arc, run_id: String, ) -> Result { @@ -1096,21 +1096,6 @@ impl EpochManager { debug!("Recovered registration info for epoch {}", epoch); update_epoch_registered(epoch); - // Extend ALT with new forester epoch PDA if ALT is configured - let forester_epoch_pda_pubkey = - get_forester_epoch_pda_from_authority(&self.config.derivation_pubkey, epoch).0; - if let Err(e) = self - .extend_alt_with_forester_pda(forester_epoch_pda_pubkey) - .await - { - warn!( - event = "extend_alt_failed", - epoch, - error = ?e, - "Failed to extend ALT with forester PDA, continuing with static account" - ); - } - // Wait for the active phase registration_info = match self.wait_for_active_phase(®istration_info).await? { Some(info) => info, @@ -1395,51 +1380,6 @@ impl EpochManager { } } - async fn extend_alt_with_forester_pda(&self, forester_epoch_pda: Pubkey) -> anyhow::Result<()> { - let alt_address = match self.config.lookup_table_address { - Some(addr) => addr, - None => return Ok(()), - }; - - // Check if the PDA is already in the ALT - { - let alt = self.address_lookup_tables.read().await; - if alt - .iter() - .any(|t| t.addresses.contains(&forester_epoch_pda)) - { - return Ok(()); - } - } - - let extend_ix = light_client::rpc::lut::instruction::extend_lookup_table( - alt_address, - self.config.payer_keypair.pubkey(), - Some(self.config.payer_keypair.pubkey()), - vec![forester_epoch_pda], - ); - let payer_pubkey = self.config.payer_keypair.pubkey(); - let mut rpc = self.rpc_pool.get_connection().await?; - rpc.create_and_send_transaction(&[extend_ix], &payer_pubkey, &[&self.config.payer_keypair]) - .await - .map_err(|e| anyhow::anyhow!("Failed to extend ALT: {e}"))?; - - // Reload the ALT from on-chain - let updated_lut = load_lookup_table_async(&*rpc, alt_address).await?; - info!( - event = "alt_extended", - lookup_table = %alt_address, - new_address = %forester_epoch_pda, - address_count = updated_lut.addresses.len(), - "Extended ALT with forester epoch PDA" - ); - - let mut alt = self.address_lookup_tables.write().await; - *alt = vec![updated_lut]; - - Ok(()) - } - async fn recover_registration_info_internal( &self, epoch: u64, @@ -1574,14 +1514,13 @@ impl EpochManager { }; let payer = self.config.payer_keypair.pubkey(); let signers = [&self.config.payer_keypair]; - let alt_guard = self.address_lookup_tables.read().await; send_smart_transaction( &mut *rpc, SendSmartTransactionConfig { instructions: vec![ix], payer: &payer, signers: &signers, - address_lookup_tables: &alt_guard, + address_lookup_tables: &self.address_lookup_tables, compute_budget: ComputeBudgetConfig { compute_unit_price: priority_fee, compute_unit_limit: Some(self.config.transaction_config.cu_limit), @@ -1986,14 +1925,13 @@ impl EpochManager { }; let payer = self.config.payer_keypair.pubkey(); let signers = [&self.config.payer_keypair]; - let alt_guard = self.address_lookup_tables.read().await; match send_smart_transaction( &mut *rpc, SendSmartTransactionConfig { instructions: vec![ix], payer: &payer, signers: &signers, - address_lookup_tables: &alt_guard, + address_lookup_tables: &self.address_lookup_tables, compute_budget: ComputeBudgetConfig { compute_unit_price: priority_fee, compute_unit_limit: Some(self.config.transaction_config.cu_limit), @@ -3055,6 +2993,7 @@ impl EpochManager { compute_unit_limit: Some(self.config.transaction_config.cu_limit), enable_priority_fees: self.config.transaction_config.enable_priority_fees, max_concurrent_sends: Some(self.config.transaction_config.max_concurrent_sends), + queue_item_count: 0, }, queue_config: self.config.queue_config, retry_config: RetryConfig { @@ -3070,8 +3009,8 @@ impl EpochManager { min_queue_items: None, // set below after reading ALT }; - let alt_snapshot = self.address_lookup_tables.read().await.clone(); - if !alt_snapshot.is_empty() { + let alt_snapshot = (*self.address_lookup_tables).clone(); + if self.config.enable_v1_multi_nullify && !alt_snapshot.is_empty() { batched_tx_config.min_queue_items = self.config.min_queue_items; } let transaction_builder = Arc::new(EpochManagerTransactions::new( @@ -3079,6 +3018,7 @@ impl EpochManager { epoch_info.epoch, self.tx_cache.clone(), alt_snapshot, + self.config.enable_v1_multi_nullify, )); let num_sent = send_batched_transactions( @@ -3280,14 +3220,13 @@ impl EpochManager { } // No existing processor - create new one - let alt_guard = self.address_lookup_tables.read().await; let batch_context = self.build_batch_context( epoch_info, tree_accounts, None, None, None, - Arc::new(alt_guard.clone()), + self.address_lookup_tables.clone(), ); let processor = Arc::new(Mutex::new( QueueProcessor::new(batch_context, StateTreeStrategy).await?, @@ -3342,14 +3281,13 @@ impl EpochManager { } // No existing processor - create new one - let alt_guard = self.address_lookup_tables.read().await; let batch_context = self.build_batch_context( epoch_info, tree_accounts, None, None, None, - Arc::new(alt_guard.clone()), + self.address_lookup_tables.clone(), ); let processor = Arc::new(Mutex::new( QueueProcessor::new(batch_context, AddressTreeStrategy).await?, @@ -3927,14 +3865,13 @@ impl EpochManager { let instruction_count = instructions.len(); let payer = self.config.payer_keypair.pubkey(); let signers = [&self.config.payer_keypair]; - let alt_guard = self.address_lookup_tables.read().await; match send_smart_transaction( &mut *rpc, SendSmartTransactionConfig { instructions, payer: &payer, signers: &signers, - address_lookup_tables: &alt_guard, + address_lookup_tables: &self.address_lookup_tables, compute_budget: ComputeBudgetConfig { compute_unit_price: priority_fee, compute_unit_limit: Some(self.config.transaction_config.cu_limit), @@ -4103,14 +4040,13 @@ impl EpochManager { .await?; let payer = self.config.payer_keypair.pubkey(); let signers = [&self.config.payer_keypair]; - let alt_guard = self.address_lookup_tables.read().await; match send_smart_transaction( &mut rpc, SendSmartTransactionConfig { instructions: vec![ix], payer: &payer, signers: &signers, - address_lookup_tables: &alt_guard, + address_lookup_tables: &self.address_lookup_tables, compute_budget: ComputeBudgetConfig { compute_unit_price: priority_fee, compute_unit_limit: Some(self.config.transaction_config.cu_limit), @@ -4546,28 +4482,28 @@ pub async fn run_service( let address_lookup_tables = { if let Some(lut_address) = config.lookup_table_address { let rpc = rpc_pool.get_connection().await?; - match load_lookup_table_async(&*rpc, lut_address).await { - Ok(lut) => { - info!( - event = "lookup_table_loaded", + let lut = load_lookup_table_async(&*rpc, lut_address).await + .map_err(|e| { + error!( + event = "lookup_table_load_failed", run_id = %run_id_for_logs, lookup_table = %lut_address, - address_count = lut.addresses.len(), - "Loaded lookup table" + error = %e, + "Failed to load lookup table" ); - Arc::new(tokio::sync::RwLock::new(vec![lut])) - } - Err(e) => { - debug!( - "Lookup table {} not available: {}. Using legacy transactions.", - lut_address, e - ); - Arc::new(tokio::sync::RwLock::new(Vec::new())) - } - } + e + })?; + info!( + event = "lookup_table_loaded", + run_id = %run_id_for_logs, + lookup_table = %lut_address, + address_count = lut.addresses.len(), + "Loaded lookup table" + ); + Arc::new(vec![lut]) } else { - debug!("No lookup table address configured. Using legacy transactions."); - Arc::new(tokio::sync::RwLock::new(Vec::new())) + debug!("No lookup table address configured. Using v1 state single nullify transactions."); + Arc::new(Vec::new()) } }; @@ -4738,6 +4674,7 @@ mod tests { compressible_config: None, lookup_table_address: None, min_queue_items: None, + enable_v1_multi_nullify: false, } } diff --git a/forester/src/processor/v1/config.rs b/forester/src/processor/v1/config.rs index 8af88b6e25..c06e378669 100644 --- a/forester/src/processor/v1/config.rs +++ b/forester/src/processor/v1/config.rs @@ -32,4 +32,7 @@ pub struct BuildTransactionBatchConfig { pub compute_unit_limit: Option, pub enable_priority_fees: bool, pub max_concurrent_sends: Option, + /// Number of items in the queue at the time of batch preparation. + /// Used to disable multi-nullify when queue is very large (>10,000 items). + pub queue_item_count: usize, } diff --git a/forester/src/processor/v1/helpers.rs b/forester/src/processor/v1/helpers.rs index a673d44be6..61ae2fcb55 100644 --- a/forester/src/processor/v1/helpers.rs +++ b/forester/src/processor/v1/helpers.rs @@ -14,9 +14,9 @@ use light_client::{ }; use light_compressed_account::TreeType; use light_registry::account_compression_cpi::sdk::{ - compress_proofs, create_nullify_dedup_instruction, create_nullify_instruction, + compress_proofs, create_nullify_instruction, create_nullify_state_v1_multi_instruction, create_update_address_merkle_tree_instruction, CompressedProofs, - CreateNullifyDedupInstructionInputs, CreateNullifyInstructionInputs, + CreateNullifyInstructionInputs, CreateNullifyStateV1MultiInstructionInputs, UpdateAddressMerkleTreeInstructionInputs, }; use solana_program::instruction::Instruction; @@ -44,7 +44,7 @@ pub async fn fetch_proofs_and_create_instructions( pool: Arc>, epoch: u64, work_items: &[WorkItem], - use_dedup: bool, + use_multi_nullify: bool, ) -> crate::Result<(Vec, Vec)> { let mut proofs = Vec::new(); let mut instructions = vec![]; @@ -384,7 +384,7 @@ pub async fn fetch_proofs_and_create_instructions( .map(|(item, proof)| (*item, proof)) .collect(); - if use_dedup && items_with_proofs.len() >= 2 { + if use_multi_nullify && items_with_proofs.len() >= 2 { let groups = group_state_items_for_dedup(&mut items_with_proofs); // Push proofs in sorted order (after grouping may have sorted) @@ -413,7 +413,7 @@ pub async fn fetch_proofs_and_create_instructions( 0 }; info!( - event = "v1_nullify_dedup_grouping", + event = "v1_nullify_state_v1_multi_grouping", total_leaves, groups_of_4 = count_4, groups_of_3 = count_3, @@ -466,8 +466,8 @@ pub async fn fetch_proofs_and_create_instructions( } let node_count = nodes.len(); - let instruction = create_nullify_dedup_instruction( - CreateNullifyDedupInstructionInputs { + let instruction = create_nullify_state_v1_multi_instruction( + CreateNullifyStateV1MultiInstructionInputs { authority, nullifier_queue: first_item.0.tree_account.queue, merkle_tree: first_item.0.tree_account.merkle_tree, @@ -485,11 +485,11 @@ pub async fn fetch_proofs_and_create_instructions( epoch, ); debug!( - event = "v1_nullify_dedup_instruction", + event = "v1_nullify_state_v1_multi_instruction", group_size = group_indices.len(), node_count, ix_data_bytes = instruction.data.len(), - "Created nullify_dedup instruction" + "Created nullify_state_v1_multi instruction" ); instructions.push(instruction); } @@ -533,7 +533,7 @@ fn build_nullify_instruction( /// Groups sorted (WorkItem, MerkleProof) pairs for dedup nullification. /// Returns a vec of groups: each group is a vec of indices into `items_with_proofs` -/// that can be packed into a single nullify_dedup instruction (2-4 items), +/// that can be packed into a single nullify_state_v1_multi instruction (2-4 items), /// or a singleton for regular nullify. fn group_state_items_for_dedup( items_with_proofs: &mut [(&WorkItem, MerkleProof)], @@ -780,8 +780,8 @@ mod tests { let shared_top = [0xFFu8; 32]; let work_items: Vec = (0..3).map(|_| make_work_item()).collect(); // Each proof has unique nodes per leaf, so compress_proofs fails when - // total unique nodes exceed NULLIFY_DEDUP_MAX_NODES (28). - // proof_1 contributes 15 nodes; proof_2 has 15 unique => 30 total > 28. + // total unique nodes exceed NULLIFY_STATE_V1_MULTI_MAX_NODES (26). + // proof_1 contributes 15 nodes; proof_2 has 15 unique => 30 total > 26. let proofs: Vec = (0..3).map(|i| make_proof(i * 1000, shared_top)).collect(); let mut items: Vec<(&WorkItem, MerkleProof)> = work_items.iter().zip(proofs).collect(); let groups = group_state_items_for_dedup(&mut items); diff --git a/forester/src/processor/v1/send_transaction.rs b/forester/src/processor/v1/send_transaction.rs index a4a8ec1664..fb1f078c26 100644 --- a/forester/src/processor/v1/send_transaction.rs +++ b/forester/src/processor/v1/send_transaction.rs @@ -111,11 +111,19 @@ pub async fn send_batched_transactions MULTI_NULLIFY_MAX_QUEUE_SIZE { + warn!( + tree = %tree_accounts.merkle_tree, + "v1 state multi-nullify disabled: queue_item_count {} exceeds threshold {}", + data.work_items.len(), MULTI_NULLIFY_MAX_QUEUE_SIZE + ); + } + + let max_concurrent_sends = build_config.max_concurrent_sends.unwrap_or(1).max(1); let effective_max_concurrent_sends = compute_effective_max_concurrent_sends(config, max_concurrent_sends, data.work_items.len()); @@ -176,7 +184,7 @@ pub async fn send_batched_transactions { pub phantom: std::marker::PhantomData, pub processed_hash_cache: Arc>, pub address_lookup_tables: Vec, + pub enable_v1_multi_nullify: bool, } impl EpochManagerTransactions { @@ -54,6 +55,7 @@ impl EpochManagerTransactions { epoch: u64, cache: Arc>, address_lookup_tables: Vec, + enable_v1_multi_nullify: bool, ) -> Self { Self { pool, @@ -61,6 +63,7 @@ impl EpochManagerTransactions { phantom: std::marker::PhantomData, processed_hash_cache: cache, address_lookup_tables, + enable_v1_multi_nullify, } } } @@ -120,7 +123,10 @@ impl TransactionBuilder for EpochManagerTransactions { .map(|&item| item.clone()) .collect::>(); - let use_dedup = !self.address_lookup_tables.is_empty(); + const MULTI_NULLIFY_MAX_QUEUE_SIZE: usize = 10_000; + let use_multi_nullify = self.enable_v1_multi_nullify + && !self.address_lookup_tables.is_empty() + && config.queue_item_count <= MULTI_NULLIFY_MAX_QUEUE_SIZE; let mut transactions = vec![]; let all_instructions = match fetch_proofs_and_create_instructions( payer.pubkey(), @@ -128,7 +134,7 @@ impl TransactionBuilder for EpochManagerTransactions { self.pool.clone(), self.epoch, work_items.as_slice(), - use_dedup, + use_multi_nullify, ) .await { diff --git a/forester/tests/e2e_test.rs b/forester/tests/e2e_test.rs index 3958dece65..cc17436872 100644 --- a/forester/tests/e2e_test.rs +++ b/forester/tests/e2e_test.rs @@ -38,7 +38,7 @@ use light_compressed_token::process_transfer::{ use light_hasher::Poseidon; use light_program_test::accounts::test_accounts::TestAccounts; use light_prover_client::prover::spawn_prover; -use light_registry::account_compression_cpi::sdk::nullify_dedup_lookup_table_accounts; +use light_registry::account_compression_cpi::sdk::nullify_state_v1_multi_lookup_table_accounts; use light_test_utils::{ actions::{create_compressible_token_account, CreateCompressibleTokenAccountInputs}, conversions::sdk_to_program_token_data, @@ -191,13 +191,12 @@ fn is_v2_address_test_enabled() -> bool { } /// Creates an on-chain Address Lookup Table populated with the accounts -/// needed for nullify_dedup instructions. Returns the ALT address. -async fn create_nullify_dedup_alt( +/// needed for nullify_state_v1_multi instructions. Returns the ALT address. +async fn create_nullify_state_v1_multi_alt( rpc: &mut R, payer: &Keypair, merkle_tree: Pubkey, nullifier_queue: Pubkey, - forester_pda: Option, ) -> Pubkey { use light_client::rpc::lut::instruction::{create_lookup_table, extend_lookup_table}; @@ -207,7 +206,7 @@ async fn create_nullify_dedup_alt( .await .unwrap(); - let addresses = nullify_dedup_lookup_table_accounts(merkle_tree, nullifier_queue, forester_pda); + let addresses = nullify_state_v1_multi_lookup_table_accounts(merkle_tree, nullifier_queue); let extend_ix = extend_lookup_table(alt_address, payer.pubkey(), Some(payer.pubkey()), addresses); rpc.create_and_send_transaction(&[extend_ix], &payer.pubkey(), &[payer]) @@ -288,6 +287,7 @@ async fn e2e_test() { pda_programs: vec![], }), min_queue_items: None, + enable_v1_multi_nullify: false, }; let test_mode = TestMode::from_env(); @@ -325,19 +325,19 @@ async fn e2e_test() { .await; } - // Create ALT for nullify_dedup if V1 state test is enabled + // Create ALT for nullify_state_v1_multi if V1 state test is enabled if is_v1_state_test_enabled() { - let alt_addr = create_nullify_dedup_alt( + let alt_addr = create_nullify_state_v1_multi_alt( &mut rpc, &env.protocol.forester, env.v1_state_trees[0].merkle_tree, env.v1_state_trees[0].nullifier_queue, - None, ) .await; - println!("Created nullify_dedup ALT: {}", alt_addr); + println!("Created nullify_state_v1_multi ALT: {}", alt_addr); config.lookup_table_address = Some(alt_addr); config.min_queue_items = Some(10); + config.enable_v1_multi_nullify = true; } // Get initial state for V1 state tree if enabled @@ -651,10 +651,10 @@ async fn e2e_test() { .max_by_key(|e| e.metadata().unwrap().modified().unwrap()); if let Some(log_entry) = latest_log { let content = std::fs::read_to_string(log_entry.path()).unwrap(); - let has_dedup = content.contains("v1_nullify_dedup_grouping"); + let has_dedup = content.contains("v1_nullify_state_v1_multi_grouping"); assert!( has_dedup, - "Expected v1_nullify_dedup_grouping logs when ALT is configured" + "Expected v1_nullify_state_v1_multi_grouping logs when ALT is configured" ); println!("Verified: dedup grouping events found in forester logs"); } else { diff --git a/forester/tests/legacy/priority_fee_test.rs b/forester/tests/legacy/priority_fee_test.rs index 46f5b2afe9..37f2ed010a 100644 --- a/forester/tests/legacy/priority_fee_test.rs +++ b/forester/tests/legacy/priority_fee_test.rs @@ -87,6 +87,8 @@ async fn test_priority_fee_request() { tree_ids: vec![], enable_compressible: false, lookup_table_address: None, + min_queue_items: None, + enable_v1_multi_nullify: false, api_server_port: 8080, api_server_public_bind: false, group_authority: None, diff --git a/forester/tests/legacy/test_utils.rs b/forester/tests/legacy/test_utils.rs index 4a7cb1c543..4642bd3af7 100644 --- a/forester/tests/legacy/test_utils.rs +++ b/forester/tests/legacy/test_utils.rs @@ -124,6 +124,7 @@ pub fn forester_config() -> ForesterConfig { compressible_config: None, lookup_table_address: None, min_queue_items: None, + enable_v1_multi_nullify: false, } } diff --git a/forester/tests/priority_fee_test.rs b/forester/tests/priority_fee_test.rs index cc88e78e1a..bb8149b94b 100644 --- a/forester/tests/priority_fee_test.rs +++ b/forester/tests/priority_fee_test.rs @@ -93,6 +93,7 @@ async fn test_priority_fee_request() { enable_compressible: true, lookup_table_address: None, min_queue_items: None, + enable_v1_multi_nullify: false, api_server_port: 8080, group_authority: None, light_pda_programs: vec![], diff --git a/forester/tests/test_nullify_dedup_tx_size.rs b/forester/tests/test_nullify_state_v1_multi_tx_size.rs similarity index 75% rename from forester/tests/test_nullify_dedup_tx_size.rs rename to forester/tests/test_nullify_state_v1_multi_tx_size.rs index 0b1e578bfc..269bbdb82a 100644 --- a/forester/tests/test_nullify_dedup_tx_size.rs +++ b/forester/tests/test_nullify_state_v1_multi_tx_size.rs @@ -1,9 +1,6 @@ -use light_registry::{ - account_compression_cpi::sdk::{ - create_nullify_dedup_instruction, nullify_dedup_lookup_table_accounts, - CreateNullifyDedupInstructionInputs, NULLIFY_DEDUP_MAX_NODES, - }, - utils::get_forester_epoch_pda_from_authority, +use light_registry::account_compression_cpi::sdk::{ + create_nullify_state_v1_multi_instruction, nullify_state_v1_multi_lookup_table_accounts, + CreateNullifyStateV1MultiInstructionInputs, NULLIFY_STATE_V1_MULTI_MAX_NODES, }; use solana_sdk::{ compute_budget::ComputeBudgetInstruction, @@ -15,21 +12,19 @@ use solana_sdk::{ transaction::VersionedTransaction, }; -/// Validates that a nullify_dedup v0 transaction with ALT and SetComputeUnitLimit +/// Validates that a nullify_state_v1_multi v0 transaction with ALT and SetComputeUnitLimit /// fits within the 1232-byte Solana transaction size limit. /// /// This is a pure serialization check -- no validator needed. #[test] -fn test_nullify_dedup_v0_transaction_size() { +fn test_nullify_state_v1_multi_v0_transaction_size() { let authority = Keypair::new(); let merkle_tree = Pubkey::new_unique(); let nullifier_queue = Pubkey::new_unique(); - let epoch = 0u64; - let forester_pda = get_forester_epoch_pda_from_authority(&authority.pubkey(), epoch).0; // Worst case: MAX_NODES unique nodes - let nullify_ix = create_nullify_dedup_instruction( - CreateNullifyDedupInstructionInputs { + let nullify_ix = create_nullify_state_v1_multi_instruction( + CreateNullifyStateV1MultiInstructionInputs { authority: authority.pubkey(), nullifier_queue, merkle_tree, @@ -40,7 +35,7 @@ fn test_nullify_dedup_v0_transaction_size() { proof_3_source: 0, proof_4_source: 0, shared_top_node: [0xCC; 32], - nodes: vec![[0xAA; 32]; NULLIFY_DEDUP_MAX_NODES], + nodes: vec![[0xAA; 32]; NULLIFY_STATE_V1_MULTI_MAX_NODES], derivation: authority.pubkey(), is_metadata_forester: false, }, @@ -52,8 +47,7 @@ fn test_nullify_dedup_v0_transaction_size() { let compute_price_ix = ComputeBudgetInstruction::set_compute_unit_price(1); // Build synthetic ALT with the known accounts (includes ComputeBudget program ID) - let alt_accounts = - nullify_dedup_lookup_table_accounts(merkle_tree, nullifier_queue, Some(forester_pda)); + let alt_accounts = nullify_state_v1_multi_lookup_table_accounts(merkle_tree, nullifier_queue); let alt_address = Pubkey::new_unique(); let alt = AddressLookupTableAccount { key: alt_address, @@ -79,17 +73,20 @@ fn test_nullify_dedup_v0_transaction_size() { // Full tx = compact-u16 sig count (1) + signatures (64 * n) + serialized message let tx_size = 1 + tx.signatures.len() * 64 + serialized.len(); - let ix_data_size = 1 + 2 + 8 + 16 + 2 + 4 + 4 + 32 + 4 + NULLIFY_DEDUP_MAX_NODES * 32; + let ix_data_size = 8 + 2 + 8 + 16 + 2 + 4 + 4 + 32 + 4 + NULLIFY_STATE_V1_MULTI_MAX_NODES * 32; println!( - "nullify_dedup v0 transaction size: {} bytes (limit: 1232)", + "nullify_state_v1_multi v0 transaction size: {} bytes (limit: 1232)", tx_size ); - println!(" nullify_dedup instruction data: {} bytes", ix_data_size); + println!( + " nullify_state_v1_multi instruction data: {} bytes", + ix_data_size + ); println!( " max_nodes: {} ({} bytes payload)", - NULLIFY_DEDUP_MAX_NODES, - NULLIFY_DEDUP_MAX_NODES * 32 + NULLIFY_STATE_V1_MULTI_MAX_NODES, + NULLIFY_STATE_V1_MULTI_MAX_NODES * 32 ); println!(" margin: {} bytes", 1232_i64 - tx_size as i64); @@ -120,7 +117,7 @@ fn test_nullify_dedup_v0_transaction_size() { assert!( tx_size <= 1232, - "nullify_dedup v0 transaction is {} bytes, exceeds 1232 byte limit by {} bytes", + "nullify_state_v1_multi v0 transaction is {} bytes, exceeds 1232 byte limit by {} bytes", tx_size, tx_size - 1232 ); diff --git a/forester/tests/test_utils.rs b/forester/tests/test_utils.rs index 4eafd33da5..358c25c6fe 100644 --- a/forester/tests/test_utils.rs +++ b/forester/tests/test_utils.rs @@ -137,6 +137,7 @@ pub fn forester_config() -> ForesterConfig { compressible_config: None, lookup_table_address: None, min_queue_items: None, + enable_v1_multi_nullify: false, } } diff --git a/program-tests/registry-test/tests/nullify_dedup.rs b/program-tests/registry-test/tests/nullify_state_v1_multi.rs similarity index 95% rename from program-tests/registry-test/tests/nullify_dedup.rs rename to program-tests/registry-test/tests/nullify_state_v1_multi.rs index 37b651ea2c..160074357c 100644 --- a/program-tests/registry-test/tests/nullify_dedup.rs +++ b/program-tests/registry-test/tests/nullify_state_v1_multi.rs @@ -5,8 +5,8 @@ use light_compressed_account::TreeType; use light_hasher::Poseidon; use light_program_test::{program_test::LightProgramTest, ProgramTestConfig}; use light_registry::account_compression_cpi::sdk::{ - compress_proofs, create_nullify_dedup_instruction, CompressedProofs, - CreateNullifyDedupInstructionInputs, + compress_proofs, create_nullify_state_v1_multi_instruction, CompressedProofs, + CreateNullifyStateV1MultiInstructionInputs, }; use light_test_utils::e2e_test_env::init_program_test_env; use serial_test::serial; @@ -14,7 +14,7 @@ use solana_sdk::signature::{Keypair, Signer}; #[serial] #[tokio::test] -async fn test_nullify_dedup_4_leaves() { +async fn test_nullify_state_v1_multi_4_leaves() { let mut rpc = LightProgramTest::new(ProgramTestConfig::default_with_batched_trees(true)) .await .unwrap(); @@ -136,8 +136,8 @@ async fn test_nullify_dedup_4_leaves() { leaf_indices[3] as u32, ]; - let ix = create_nullify_dedup_instruction( - CreateNullifyDedupInstructionInputs { + let ix = create_nullify_state_v1_multi_instruction( + CreateNullifyStateV1MultiInstructionInputs { authority: forester_keypair.pubkey(), nullifier_queue: state_tree_bundle.accounts.nullifier_queue, merkle_tree: state_tree_bundle.accounts.merkle_tree, @@ -172,7 +172,7 @@ async fn test_nullify_dedup_4_leaves() { .unwrap(); assert!( bucket.sequence_number.is_some(), - "Queue item {} should be marked after nullify_dedup", + "Queue item {} should be marked after nullify_state_v1_multi", idx ); } @@ -187,7 +187,7 @@ async fn test_nullify_dedup_4_leaves() { assert_ne!( pre_root, onchain_tree_post.root(), - "Root should have changed after nullify_dedup" + "Root should have changed after nullify_state_v1_multi" ); // Locally update and verify root match @@ -206,7 +206,7 @@ async fn test_nullify_dedup_4_leaves() { #[serial] #[tokio::test] -async fn test_nullify_dedup_3_leaves() { +async fn test_nullify_state_v1_multi_3_leaves() { let mut rpc = LightProgramTest::new(ProgramTestConfig::default_with_batched_trees(true)) .await .unwrap(); @@ -298,8 +298,8 @@ async fn test_nullify_dedup_3_leaves() { nodes, } = compress_proofs(&proof_refs).expect("compress_proofs should succeed for 3 leaves"); - let ix = create_nullify_dedup_instruction( - CreateNullifyDedupInstructionInputs { + let ix = create_nullify_state_v1_multi_instruction( + CreateNullifyStateV1MultiInstructionInputs { authority: forester_keypair.pubkey(), nullifier_queue: state_tree_bundle.accounts.nullifier_queue, merkle_tree: state_tree_bundle.accounts.merkle_tree, @@ -370,7 +370,7 @@ async fn test_nullify_dedup_3_leaves() { #[serial] #[tokio::test] -async fn test_nullify_dedup_2_leaves() { +async fn test_nullify_state_v1_multi_2_leaves() { let mut rpc = LightProgramTest::new(ProgramTestConfig::default_with_batched_trees(true)) .await .unwrap(); @@ -462,8 +462,8 @@ async fn test_nullify_dedup_2_leaves() { nodes, } = compress_proofs(&proof_refs).expect("compress_proofs should succeed for 2 leaves"); - let ix = create_nullify_dedup_instruction( - CreateNullifyDedupInstructionInputs { + let ix = create_nullify_state_v1_multi_instruction( + CreateNullifyStateV1MultiInstructionInputs { authority: forester_keypair.pubkey(), nullifier_queue: state_tree_bundle.accounts.nullifier_queue, merkle_tree: state_tree_bundle.accounts.merkle_tree, @@ -534,7 +534,7 @@ async fn test_nullify_dedup_2_leaves() { #[serial] #[tokio::test] -async fn test_nullify_dedup_1_leaf_fails() { +async fn test_nullify_state_v1_multi_1_leaf_fails() { let mut rpc = LightProgramTest::new(ProgramTestConfig::default_with_batched_trees(true)) .await .unwrap(); @@ -616,8 +616,8 @@ async fn test_nullify_dedup_1_leaf_fails() { let nodes: Vec<[u8; 32]> = proof_arr[..15].to_vec(); let shared_top_node = proof_arr[15]; - let ix = create_nullify_dedup_instruction( - CreateNullifyDedupInstructionInputs { + let ix = create_nullify_state_v1_multi_instruction( + CreateNullifyStateV1MultiInstructionInputs { authority: forester_keypair.pubkey(), nullifier_queue: state_tree_bundle.accounts.nullifier_queue, merkle_tree: state_tree_bundle.accounts.merkle_tree, @@ -641,6 +641,6 @@ async fn test_nullify_dedup_1_leaf_fails() { assert!( result.is_err(), - "nullify_dedup with 1 leaf should fail with InvalidProofEncoding" + "nullify_state_v1_multi with 1 leaf should fail with InvalidProofEncoding" ); } diff --git a/programs/registry/src/account_compression_cpi/nullify.rs b/programs/registry/src/account_compression_cpi/nullify.rs index b25a6255ad..c783c70f19 100644 --- a/programs/registry/src/account_compression_cpi/nullify.rs +++ b/programs/registry/src/account_compression_cpi/nullify.rs @@ -110,7 +110,7 @@ pub fn count_from_leaf_indices(leaf_indices: &[u32; 4]) -> Result { } #[allow(clippy::too_many_arguments)] -pub fn process_nullify_dedup( +pub fn process_nullify_state_v1_multi( ctx: &Context, count: usize, change_log_index: u16, diff --git a/programs/registry/src/account_compression_cpi/sdk.rs b/programs/registry/src/account_compression_cpi/sdk.rs index 9bac86593d..96d46f6880 100644 --- a/programs/registry/src/account_compression_cpi/sdk.rs +++ b/programs/registry/src/account_compression_cpi/sdk.rs @@ -64,14 +64,10 @@ pub fn create_nullify_instruction( /// Returns the base accounts for populating an address lookup table /// for nullify v0 transactions. -fn nullify_lookup_table_accounts_base( - merkle_tree: Pubkey, - nullifier_queue: Pubkey, - forester_pda: Option, -) -> Vec { +fn nullify_lookup_table_accounts_base(merkle_tree: Pubkey, nullifier_queue: Pubkey) -> Vec { let (cpi_authority, _) = get_cpi_authority_pda(); let registered_program_pda = get_registered_program_pda(&crate::ID); - let mut accounts = vec![ + vec![ cpi_authority, registered_program_pda, account_compression::ID, @@ -79,21 +75,17 @@ fn nullify_lookup_table_accounts_base( merkle_tree, nullifier_queue, crate::ID, - ]; - if let Some(pda) = forester_pda { - accounts.push(pda); - } - accounts + ] } /// Max number of 32-byte nodes in the dedup encoding vec. -/// Verified by tx size test (forester/tests/test_nullify_dedup_tx_size.rs). +/// Verified by tx size test (forester/tests/test_nullify_state_v1_multi_tx_size.rs). /// With ALT, SetComputeUnitLimit + SetComputeUnitPrice ixs, and worst-case nodes, /// the tx fits within the 1232 byte limit. -pub const NULLIFY_DEDUP_MAX_NODES: usize = 27; +pub const NULLIFY_STATE_V1_MULTI_MAX_NODES: usize = 26; #[derive(Clone, Debug, PartialEq)] -pub struct CreateNullifyDedupInstructionInputs { +pub struct CreateNullifyStateV1MultiInstructionInputs { pub authority: Pubkey, pub nullifier_queue: Pubkey, pub merkle_tree: Pubkey, @@ -109,8 +101,8 @@ pub struct CreateNullifyDedupInstructionInputs { pub is_metadata_forester: bool, } -pub fn create_nullify_dedup_instruction( - inputs: CreateNullifyDedupInstructionInputs, +pub fn create_nullify_state_v1_multi_instruction( + inputs: CreateNullifyStateV1MultiInstructionInputs, epoch: u64, ) -> Instruction { let register_program_pda = get_registered_program_pda(&crate::ID); @@ -120,7 +112,7 @@ pub fn create_nullify_dedup_instruction( Some(get_forester_epoch_pda_from_authority(&inputs.derivation, epoch).0) }; let (cpi_authority, _bump) = get_cpi_authority_pda(); - let instruction_data = crate::instruction::NullifyDedup { + let instruction_data = crate::instruction::NullifyStateV1Multi { change_log_index: inputs.change_log_index, queue_indices: inputs.queue_indices, leaf_indices: inputs.leaf_indices, @@ -223,7 +215,7 @@ pub fn compress_proofs(proofs: &[&[[u8; 32]; 16]]) -> Option { } } - if nodes.len() > NULLIFY_DEDUP_MAX_NODES { + if nodes.len() > NULLIFY_STATE_V1_MULTI_MAX_NODES { return None; } @@ -237,15 +229,13 @@ pub fn compress_proofs(proofs: &[&[[u8; 32]; 16]]) -> Option { } /// Returns the known accounts for populating an address lookup table -/// for nullify_dedup v0 transactions. Includes ComputeBudget program ID -/// since nullify_dedup transactions also include a SetComputeUnitLimit instruction. -pub fn nullify_dedup_lookup_table_accounts( +/// for nullify_state_v1_multi v0 transactions. Includes ComputeBudget program ID +/// since nullify_state_v1_multi transactions also include a SetComputeUnitLimit instruction. +pub fn nullify_state_v1_multi_lookup_table_accounts( merkle_tree: Pubkey, nullifier_queue: Pubkey, - forester_pda: Option, ) -> Vec { - let mut accounts = - nullify_lookup_table_accounts_base(merkle_tree, nullifier_queue, forester_pda); + let mut accounts = nullify_lookup_table_accounts_base(merkle_tree, nullifier_queue); accounts.push(solana_sdk::compute_budget::ID); accounts } @@ -736,14 +726,12 @@ pub fn create_rollover_batch_address_tree_instruction( #[cfg(test)] mod tests { - use anchor_lang::Discriminator; - use super::*; #[test] - fn test_nullify_dedup_instruction_data_size() { + fn test_nullify_state_v1_multi_instruction_data_size() { // Worst case: max_nodes unique nodes - let instruction_data = crate::instruction::NullifyDedup { + let instruction_data = crate::instruction::NullifyStateV1Multi { change_log_index: 0, queue_indices: [0; 4], leaf_indices: [0; 4], @@ -751,26 +739,26 @@ mod tests { proof_3_source: 0, proof_4_source: 0, shared_top_node: [0u8; 32], - nodes: vec![[0u8; 32]; NULLIFY_DEDUP_MAX_NODES], + nodes: vec![[0u8; 32]; NULLIFY_STATE_V1_MULTI_MAX_NODES], }; let data = instruction_data.data(); - // 1 disc + 2 changelog + 8 queue_indices + 16 leaf_indices + 2 proof_2_shared + // 8 disc + 2 changelog + 8 queue_indices + 16 leaf_indices + 2 proof_2_shared // + 4 proof_3_source + 4 proof_4_source + 32 shared_top_node // + 4 vec_prefix + N*32 nodes - let expected = 1 + 2 + 8 + 16 + 2 + 4 + 4 + 32 + 4 + NULLIFY_DEDUP_MAX_NODES * 32; + let expected = 8 + 2 + 8 + 16 + 2 + 4 + 4 + 32 + 4 + NULLIFY_STATE_V1_MULTI_MAX_NODES * 32; assert_eq!( data.len(), expected, - "nullify_dedup instruction data must be exactly {} bytes, got {}", + "nullify_state_v1_multi instruction data must be exactly {} bytes, got {}", expected, data.len() ); } #[test] - fn test_nullify_dedup_instruction_accounts() { + fn test_nullify_state_v1_multi_instruction_accounts() { let authority = Pubkey::new_unique(); - let inputs = CreateNullifyDedupInstructionInputs { + let inputs = CreateNullifyStateV1MultiInstructionInputs { authority, nullifier_queue: Pubkey::new_unique(), merkle_tree: Pubkey::new_unique(), @@ -785,152 +773,10 @@ mod tests { derivation: authority, is_metadata_forester: false, }; - let ix = create_nullify_dedup_instruction(inputs, 0); + let ix = create_nullify_state_v1_multi_instruction(inputs, 0); assert_eq!(ix.accounts.len(), 8, "expected 8 accounts"); } - #[test] - fn test_nullify_dedup_discriminator_no_collision() { - let disc = crate::instruction::NullifyDedup::DISCRIMINATOR; - assert_eq!(disc.len(), 1, "nullify_dedup discriminator must be 1 byte"); - assert_eq!(disc, &[79], "nullify_dedup discriminator must be [79]"); - - let existing: &[(&str, &[u8])] = &[ - ( - "InitializeProtocolConfig", - crate::instruction::InitializeProtocolConfig::DISCRIMINATOR, - ), - ( - "UpdateProtocolConfig", - crate::instruction::UpdateProtocolConfig::DISCRIMINATOR, - ), - ( - "RegisterSystemProgram", - crate::instruction::RegisterSystemProgram::DISCRIMINATOR, - ), - ( - "DeregisterSystemProgram", - crate::instruction::DeregisterSystemProgram::DISCRIMINATOR, - ), - ( - "RegisterForester", - crate::instruction::RegisterForester::DISCRIMINATOR, - ), - ( - "UpdateForesterPda", - crate::instruction::UpdateForesterPda::DISCRIMINATOR, - ), - ( - "UpdateForesterPdaWeight", - crate::instruction::UpdateForesterPdaWeight::DISCRIMINATOR, - ), - ( - "RegisterForesterEpoch", - crate::instruction::RegisterForesterEpoch::DISCRIMINATOR, - ), - ( - "FinalizeRegistration", - crate::instruction::FinalizeRegistration::DISCRIMINATOR, - ), - ("ReportWork", crate::instruction::ReportWork::DISCRIMINATOR), - ( - "InitializeAddressMerkleTree", - crate::instruction::InitializeAddressMerkleTree::DISCRIMINATOR, - ), - ( - "InitializeStateMerkleTree", - crate::instruction::InitializeStateMerkleTree::DISCRIMINATOR, - ), - ("Nullify", crate::instruction::Nullify::DISCRIMINATOR), - ( - "UpdateAddressMerkleTree", - crate::instruction::UpdateAddressMerkleTree::DISCRIMINATOR, - ), - ( - "RolloverAddressMerkleTreeAndQueue", - crate::instruction::RolloverAddressMerkleTreeAndQueue::DISCRIMINATOR, - ), - ( - "RolloverStateMerkleTreeAndQueue", - crate::instruction::RolloverStateMerkleTreeAndQueue::DISCRIMINATOR, - ), - ( - "InitializeBatchedStateMerkleTree", - crate::instruction::InitializeBatchedStateMerkleTree::DISCRIMINATOR, - ), - ( - "BatchNullify", - crate::instruction::BatchNullify::DISCRIMINATOR, - ), - ( - "BatchAppend", - crate::instruction::BatchAppend::DISCRIMINATOR, - ), - ( - "InitializeBatchedAddressMerkleTree", - crate::instruction::InitializeBatchedAddressMerkleTree::DISCRIMINATOR, - ), - ( - "BatchUpdateAddressTree", - crate::instruction::BatchUpdateAddressTree::DISCRIMINATOR, - ), - ( - "RolloverBatchedAddressMerkleTree", - crate::instruction::RolloverBatchedAddressMerkleTree::DISCRIMINATOR, - ), - ( - "RolloverBatchedStateMerkleTree", - crate::instruction::RolloverBatchedStateMerkleTree::DISCRIMINATOR, - ), - ( - "MigrateState", - crate::instruction::MigrateState::DISCRIMINATOR, - ), - ( - "CreateConfigCounter", - crate::instruction::CreateConfigCounter::DISCRIMINATOR, - ), - ( - "CreateCompressibleConfig", - crate::instruction::CreateCompressibleConfig::DISCRIMINATOR, - ), - ( - "UpdateCompressibleConfig", - crate::instruction::UpdateCompressibleConfig::DISCRIMINATOR, - ), - ( - "PauseCompressibleConfig", - crate::instruction::PauseCompressibleConfig::DISCRIMINATOR, - ), - ( - "UnpauseCompressibleConfig", - crate::instruction::UnpauseCompressibleConfig::DISCRIMINATOR, - ), - ( - "DeprecateCompressibleConfig", - crate::instruction::DeprecateCompressibleConfig::DISCRIMINATOR, - ), - ( - "WithdrawFundingPool", - crate::instruction::WithdrawFundingPool::DISCRIMINATOR, - ), - ("Claim", crate::instruction::Claim::DISCRIMINATOR), - ( - "CompressAndClose", - crate::instruction::CompressAndClose::DISCRIMINATOR, - ), - ]; - - for (name, existing_disc) in existing { - assert!( - !existing_disc.starts_with(disc), - "nullify_dedup 1-byte discriminator {:?} collides with {name} discriminator prefix {:?}", - disc, - &existing_disc[..disc.len().min(existing_disc.len())] - ); - } - } - #[test] fn test_compress_proofs_round_trip() { // Create 4 proofs with sharing patterns that fit within MAX_NODES (27). diff --git a/programs/registry/src/lib.rs b/programs/registry/src/lib.rs index 6040b69933..8aabba0af8 100644 --- a/programs/registry/src/lib.rs +++ b/programs/registry/src/lib.rs @@ -426,8 +426,7 @@ pub mod light_registry { /// uses bitvecs/2-bit source fields to reconstruct all proofs on-chain. /// All leaves must share the same subtree at level 15 (shared_top_node). #[allow(clippy::too_many_arguments)] - #[instruction(discriminator = [79])] - pub fn nullify_dedup<'info>( + pub fn nullify_state_v1_multi<'info>( ctx: Context<'_, '_, '_, 'info, NullifyLeaves<'info>>, change_log_index: u16, queue_indices: [u16; 4], @@ -448,7 +447,7 @@ pub mod light_registry { count as u64 * DEFAULT_WORK_V1, )?; - process_nullify_dedup( + process_nullify_state_v1_multi( &ctx, count, change_log_index, diff --git a/sdk-tests/csdk-anchor-full-derived-test/tests/integration_tests.rs b/sdk-tests/csdk-anchor-full-derived-test/tests/integration_tests.rs index 2c3e82972a..9b40b900e5 100644 --- a/sdk-tests/csdk-anchor-full-derived-test/tests/integration_tests.rs +++ b/sdk-tests/csdk-anchor-full-derived-test/tests/integration_tests.rs @@ -3863,7 +3863,7 @@ async fn test_d9_edge_many_literals() { #[tokio::test] async fn test_d9_edge_mixed() { use csdk_anchor_full_derived_test::d9_seeds::{ - edge_cases::{_UNDERSCORE_CONST, AB, SEED_123}, + edge_cases::{AB, SEED_123, _UNDERSCORE_CONST}, D9EdgeMixedParams, }; From 513749e1c197fe91e42e0ae974f9f75ca4c0667b Mon Sep 17 00:00:00 2001 From: ananas Date: Wed, 18 Mar 2026 00:11:23 +0000 Subject: [PATCH 05/16] fix: pin time <0.3.46 for Solana platform-tools Cargo 1.84 compatibility time 0.3.46+ pulls time-core 0.1.8 which uses edition2024, unsupported by the Cargo 1.84 bundled with Solana platform-tools. --- Cargo.lock | 18 ++++++++++++++++++ Cargo.toml | 3 +++ 2 files changed, 21 insertions(+) diff --git a/Cargo.lock b/Cargo.lock index dfc954f985..a62e12ec1e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -10933,9 +10933,15 @@ dependencies = [ [[package]] name = "time" +<<<<<<< HEAD version = "0.3.37" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "35e7868883861bd0e56d9ac6efcaaca0d6d5d82a2a7ec8209ff492c07cf37b21" +======= +version = "0.3.45" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f9e442fc33d7fdb45aa9bfeb312c095964abdf596f7567261062b2a7107aaabd" +>>>>>>> ebaaabf32 (fix: pin time <0.3.46 for Solana platform-tools Cargo 1.84 compatibility) dependencies = [ "deranged", "itoa", @@ -10948,6 +10954,7 @@ dependencies = [ [[package]] name = "time-core" +<<<<<<< HEAD version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ef927ca75afb808a4d64dd374f00a2adf8d0fcff8e7b184af886c3c87ec4a3f3" @@ -10957,6 +10964,17 @@ name = "time-macros" version = "0.2.19" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2834e6017e3e5e4b9834939793b282bc03b37a3336245fa820e35e233e2a85de" +======= +version = "0.1.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8b36ee98fd31ec7426d599183e8fe26932a8dc1fb76ddb6214d05493377d34ca" + +[[package]] +name = "time-macros" +version = "0.2.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "71e552d1249bf61ac2a52db88179fd0673def1e1ad8243a00d9ec9ed71fee3dd" +>>>>>>> ebaaabf32 (fix: pin time <0.3.46 for Solana platform-tools Cargo 1.84 compatibility) dependencies = [ "num-conv", "time-core", diff --git a/Cargo.toml b/Cargo.toml index 54f0c4e1ef..110bcc81b2 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -120,6 +120,9 @@ version = "0.1.0" edition = "2021" [workspace.dependencies] +# Pin time <0.3.46 -- 0.3.46+ pulls time-core 0.1.8 which uses edition2024, +# incompatible with Solana platform-tools Cargo 1.84. +time = ">=0.3, <0.3.46" solana-banks-client = { version = "2.3" } solana-banks-interface = { version = "2.3" } solana-program = "2.3" From bc36a498adccdd8eafcedc3de2447de43a238365 Mon Sep 17 00:00:00 2001 From: ananas Date: Wed, 18 Mar 2026 00:25:02 +0000 Subject: [PATCH 06/16] fix: reject non-trailing sentinels in count_from_leaf_indices Harden leaf_indices validation to reject malformed layouts like [a, b, MAX, c] where sentinels appear in non-trailing positions. --- .../src/account_compression_cpi/nullify.rs | 14 +++++--------- 1 file changed, 5 insertions(+), 9 deletions(-) diff --git a/programs/registry/src/account_compression_cpi/nullify.rs b/programs/registry/src/account_compression_cpi/nullify.rs index c783c70f19..07d22bcf9b 100644 --- a/programs/registry/src/account_compression_cpi/nullify.rs +++ b/programs/registry/src/account_compression_cpi/nullify.rs @@ -97,16 +97,12 @@ fn nullify_single_leaf_cpi( /// Determines proof count from leaf_indices sentinel values. /// Returns Err(InvalidProofEncoding) if fewer than 2 leaves are specified. pub fn count_from_leaf_indices(leaf_indices: &[u32; 4]) -> Result { - if leaf_indices[0] == u32::MAX || leaf_indices[1] == u32::MAX { - return err!(RegistryError::InvalidProofEncoding); + match *leaf_indices { + [a, b, u32::MAX, u32::MAX] if a != u32::MAX && b != u32::MAX => Ok(2), + [a, b, c, u32::MAX] if a != u32::MAX && b != u32::MAX && c != u32::MAX => Ok(3), + [a, b, c, d] if a != u32::MAX && b != u32::MAX && c != u32::MAX && d != u32::MAX => Ok(4), + _ => err!(RegistryError::InvalidProofEncoding), } - Ok(if leaf_indices[2] == u32::MAX { - 2 - } else if leaf_indices[3] == u32::MAX { - 3 - } else { - 4 - }) } #[allow(clippy::too_many_arguments)] From 8efe3bc05001479f649f1b08b058a71335705ee5 Mon Sep 17 00:00:00 2001 From: ananas Date: Wed, 18 Mar 2026 01:30:49 +0000 Subject: [PATCH 07/16] refactor: simplify nullify_state_v1_multi proof dedup encoding Replace the complex multi-scheme encoding (1-bit bitvec for proof_2, 2-bit source selectors for proof_3/proof_4, separate shared_top_node) with a uniform pool-based approach: - Deduplicated node pool built level-by-level across all proofs - Each proof (including proof_1) selects 16 nodes from the pool via a u32 bitvec using the bitvec crate - Removes proof_2_shared, proof_3_source, proof_4_source, shared_top_node - Adds proof_bitvecs: [u32; 4] - Bumps NULLIFY_STATE_V1_MULTI_MAX_NODES from 26 to 27 (10-byte margin) - Hardens count_from_leaf_indices to reject non-trailing sentinels --- CLAUDE.md | 5 + Cargo.lock | 1 + forester/src/processor/v1/helpers.rs | 10 +- .../test_nullify_state_v1_multi_tx_size.rs | 7 +- .../tests/nullify_state_v1_multi.rs | 46 +-- programs/registry/Cargo.toml | 1 + .../src/account_compression_cpi/nullify.rs | 143 ++------ .../src/account_compression_cpi/sdk.rs | 336 ++++++------------ programs/registry/src/lib.rs | 17 +- 9 files changed, 169 insertions(+), 397 deletions(-) diff --git a/CLAUDE.md b/CLAUDE.md index 3bb51a2410..1be55e7fce 100644 --- a/CLAUDE.md +++ b/CLAUDE.md @@ -177,6 +177,11 @@ go test -run TestLightweight -timeout 15m End-to-end tests for the off-chain tree maintenance service. ```bash +# Using just (preferred, from forester/ directory): +just -f forester/justfile local # Run e2e test without rebuilding SBF programs +just -f forester/justfile test # Build SBF test deps first, then run e2e test + +# Or directly: TEST_MODE=local cargo test --package forester e2e_test -- --nocapture ``` diff --git a/Cargo.lock b/Cargo.lock index a62e12ec1e..070b02db66 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4199,6 +4199,7 @@ dependencies = [ "account-compression", "aligned-sized", "anchor-lang", + "bitvec", "borsh 0.10.4", "light-account-checks", "light-batched-merkle-tree", diff --git a/forester/src/processor/v1/helpers.rs b/forester/src/processor/v1/helpers.rs index 61ae2fcb55..6d48aed9cf 100644 --- a/forester/src/processor/v1/helpers.rs +++ b/forester/src/processor/v1/helpers.rs @@ -443,10 +443,7 @@ pub async fn fetch_proofs_and_create_instructions( .collect::>>()?; let proof_refs: Vec<&[[u8; 32]; 16]> = group_proofs.iter().collect(); let CompressedProofs { - proof_2_shared, - proof_3_source, - proof_4_source, - shared_top_node, + proof_bitvecs, nodes, } = compress_proofs(&proof_refs).ok_or_else(|| { anyhow::anyhow!( @@ -474,10 +471,7 @@ pub async fn fetch_proofs_and_create_instructions( change_log_index, queue_indices, leaf_indices, - proof_2_shared, - proof_3_source, - proof_4_source, - shared_top_node, + proof_bitvecs, nodes, derivation, is_metadata_forester: false, diff --git a/forester/tests/test_nullify_state_v1_multi_tx_size.rs b/forester/tests/test_nullify_state_v1_multi_tx_size.rs index 269bbdb82a..68a8713afe 100644 --- a/forester/tests/test_nullify_state_v1_multi_tx_size.rs +++ b/forester/tests/test_nullify_state_v1_multi_tx_size.rs @@ -31,10 +31,7 @@ fn test_nullify_state_v1_multi_v0_transaction_size() { change_log_index: 1400, queue_indices: [100, 200, 300, 400], leaf_indices: [67_000_000, 67_000_001, 67_000_002, 67_000_003], - proof_2_shared: 0, - proof_3_source: 0, - proof_4_source: 0, - shared_top_node: [0xCC; 32], + proof_bitvecs: [0; 4], nodes: vec![[0xAA; 32]; NULLIFY_STATE_V1_MULTI_MAX_NODES], derivation: authority.pubkey(), is_metadata_forester: false, @@ -73,7 +70,7 @@ fn test_nullify_state_v1_multi_v0_transaction_size() { // Full tx = compact-u16 sig count (1) + signatures (64 * n) + serialized message let tx_size = 1 + tx.signatures.len() * 64 + serialized.len(); - let ix_data_size = 8 + 2 + 8 + 16 + 2 + 4 + 4 + 32 + 4 + NULLIFY_STATE_V1_MULTI_MAX_NODES * 32; + let ix_data_size = 8 + 2 + 8 + 16 + 16 + 4 + NULLIFY_STATE_V1_MULTI_MAX_NODES * 32; println!( "nullify_state_v1_multi v0 transaction size: {} bytes (limit: 1232)", diff --git a/program-tests/registry-test/tests/nullify_state_v1_multi.rs b/program-tests/registry-test/tests/nullify_state_v1_multi.rs index 160074357c..eea55b4b7e 100644 --- a/program-tests/registry-test/tests/nullify_state_v1_multi.rs +++ b/program-tests/registry-test/tests/nullify_state_v1_multi.rs @@ -106,20 +106,9 @@ async fn test_nullify_state_v1_multi_4_leaves() { proofs.push(proof_arr); } - // Verify shared top node - for i in 1..4 { - assert_eq!( - proofs[0][15], proofs[i][15], - "Level 15 proof node must be shared between all leaves" - ); - } - let proof_refs: Vec<&[[u8; 32]; 16]> = proofs.iter().collect(); let CompressedProofs { - proof_2_shared, - proof_3_source, - proof_4_source, - shared_top_node, + proof_bitvecs, nodes, } = compress_proofs(&proof_refs).expect("compress_proofs should succeed for 4 leaves"); @@ -144,10 +133,7 @@ async fn test_nullify_state_v1_multi_4_leaves() { change_log_index: change_log_index as u16, queue_indices, leaf_indices: leaf_indices_arr, - proof_2_shared, - proof_3_source, - proof_4_source, - shared_top_node, + proof_bitvecs, nodes, derivation: forester_keypair.pubkey(), is_metadata_forester: true, @@ -291,10 +277,7 @@ async fn test_nullify_state_v1_multi_3_leaves() { let proof_refs: Vec<&[[u8; 32]; 16]> = proofs.iter().collect(); let CompressedProofs { - proof_2_shared, - proof_3_source, - proof_4_source, - shared_top_node, + proof_bitvecs, nodes, } = compress_proofs(&proof_refs).expect("compress_proofs should succeed for 3 leaves"); @@ -316,10 +299,7 @@ async fn test_nullify_state_v1_multi_3_leaves() { leaf_indices[2] as u32, u32::MAX, ], - proof_2_shared, - proof_3_source, - proof_4_source, - shared_top_node, + proof_bitvecs, nodes, derivation: forester_keypair.pubkey(), is_metadata_forester: true, @@ -455,10 +435,7 @@ async fn test_nullify_state_v1_multi_2_leaves() { let proof_refs: Vec<&[[u8; 32]; 16]> = proofs.iter().collect(); let CompressedProofs { - proof_2_shared, - proof_3_source, - proof_4_source, - shared_top_node, + proof_bitvecs, nodes, } = compress_proofs(&proof_refs).expect("compress_proofs should succeed for 2 leaves"); @@ -480,10 +457,7 @@ async fn test_nullify_state_v1_multi_2_leaves() { u32::MAX, u32::MAX, ], - proof_2_shared, - proof_3_source, - proof_4_source, - shared_top_node, + proof_bitvecs, nodes, derivation: forester_keypair.pubkey(), is_metadata_forester: true, @@ -613,8 +587,7 @@ async fn test_nullify_state_v1_multi_1_leaf_fails() { .unwrap(); let proof_arr: [[u8; 32]; 16] = proof.try_into().unwrap(); - let nodes: Vec<[u8; 32]> = proof_arr[..15].to_vec(); - let shared_top_node = proof_arr[15]; + let nodes: Vec<[u8; 32]> = proof_arr.to_vec(); let ix = create_nullify_state_v1_multi_instruction( CreateNullifyStateV1MultiInstructionInputs { @@ -624,10 +597,7 @@ async fn test_nullify_state_v1_multi_1_leaf_fails() { change_log_index: change_log_index as u16, queue_indices: [items_to_nullify[0].0 as u16, 0, 0, 0], leaf_indices: [leaf_index as u32, u32::MAX, u32::MAX, u32::MAX], - proof_2_shared: 0, - proof_3_source: 0, - proof_4_source: 0, - shared_top_node, + proof_bitvecs: [0; 4], nodes, derivation: forester_keypair.pubkey(), is_metadata_forester: true, diff --git a/programs/registry/Cargo.toml b/programs/registry/Cargo.toml index 0423906a08..c173733030 100644 --- a/programs/registry/Cargo.toml +++ b/programs/registry/Cargo.toml @@ -31,6 +31,7 @@ light-system-program-anchor = { workspace = true, features = ["cpi"] } light-account-checks = { workspace = true, features = ["solana", "std", "msg"] } light-program-profiler = { workspace = true } light-macros = { workspace = true } +bitvec = { workspace = true } borsh = { workspace = true } solana-account-info = { workspace = true } solana-instruction = { workspace = true } diff --git a/programs/registry/src/account_compression_cpi/nullify.rs b/programs/registry/src/account_compression_cpi/nullify.rs index 07d22bcf9b..89a16c4594 100644 --- a/programs/registry/src/account_compression_cpi/nullify.rs +++ b/programs/registry/src/account_compression_cpi/nullify.rs @@ -2,6 +2,7 @@ use account_compression::{ program::AccountCompression, utils::constants::CPI_AUTHORITY_PDA_SEED, StateMerkleTreeAccount, }; use anchor_lang::prelude::*; +use bitvec::prelude::*; use crate::{epoch::register_epoch::ForesterEpochPda, errors::RegistryError}; @@ -105,135 +106,57 @@ pub fn count_from_leaf_indices(leaf_indices: &[u32; 4]) -> Result { } } -#[allow(clippy::too_many_arguments)] +/// Reconstructs a 16-node Merkle proof by selecting nodes from a +/// deduplicated pool. The bitvec selects which pool nodes belong to +/// this proof (exactly 16 bits must be set). +fn reconstruct_proof(nodes: &[[u8; 32]], bits: u32) -> Result<[[u8; 32]; 16]> { + let bv = bits.view_bits::(); + let mut proof = [[0u8; 32]; 16]; + let mut proof_idx = 0; + for i in 0..nodes.len() { + if bv[i] { + if proof_idx >= 16 { + return err!(RegistryError::InvalidProofEncoding); + } + proof[proof_idx] = nodes[i]; + proof_idx += 1; + } + } + if proof_idx != 16 { + return err!(RegistryError::InvalidProofEncoding); + } + Ok(proof) +} + pub fn process_nullify_state_v1_multi( ctx: &Context, count: usize, change_log_index: u16, queue_indices: [u16; 4], leaf_indices: [u32; 4], - proof_2_shared: u16, - proof_3_source: u32, - proof_4_source: u32, - shared_top_node: [u8; 32], + proof_bitvecs: [u32; 4], nodes: Vec<[u8; 32]>, ) -> Result<()> { + if nodes.len() > 32 { + return err!(RegistryError::InvalidProofEncoding); + } + let bump = ctx.bumps.cpi_authority; let bump = &[bump]; let seeds = [CPI_AUTHORITY_PDA_SEED, bump]; let signer_seeds = &[&seeds[..]]; - - // Reconstruct proofs from dedup encoding. - let mut cursor: usize = 0; - - // proof_1: levels 0..14 from nodes[0..15] - if nodes.len() < 15 { - return err!(RegistryError::InvalidProofEncoding); - } - let mut proof_1 = [[0u8; 32]; 16]; - proof_1[..15].copy_from_slice(&nodes[cursor..cursor + 15]); - proof_1[15] = shared_top_node; - cursor += 15; - - // proof_2: bitvec proof_2_shared, bit i=1 means reuse proof_1[i], bit=0 means take next node - let mut proof_2 = [[0u8; 32]; 16]; - for i in 0..15 { - if (proof_2_shared >> i) & 1 == 1 { - proof_2[i] = proof_1[i]; - } else { - if cursor >= nodes.len() { - return err!(RegistryError::InvalidProofEncoding); - } - proof_2[i] = nodes[cursor]; - cursor += 1; - } - } - proof_2[15] = shared_top_node; - - // Issue CPIs for proof_1 and proof_2 immediately to free stack space - // before reconstructing proof_3/proof_4. let change_log_index_u64 = change_log_index as u64; - nullify_single_leaf_cpi( - ctx, - signer_seeds, - change_log_index_u64, - queue_indices[0], - leaf_indices[0] as u64, - proof_1.to_vec(), - )?; - nullify_single_leaf_cpi( - ctx, - signer_seeds, - change_log_index_u64, - queue_indices[1], - leaf_indices[1] as u64, - proof_2.to_vec(), - )?; - - // proof_3: 2 bits per level from proof_3_source - if count >= 3 { - let mut proof_3 = [[0u8; 32]; 16]; - for i in 0..15 { - let src = (proof_3_source >> (i * 2)) & 0b11; - match src { - 0b00 => proof_3[i] = proof_1[i], - 0b01 => proof_3[i] = proof_2[i], - 0b10 => { - if cursor >= nodes.len() { - return err!(RegistryError::InvalidProofEncoding); - } - proof_3[i] = nodes[cursor]; - cursor += 1; - } - _ => return err!(RegistryError::InvalidProofEncoding), - } - } - proof_3[15] = shared_top_node; + for i in 0..count { + let proof = reconstruct_proof(&nodes, proof_bitvecs[i])?; nullify_single_leaf_cpi( ctx, signer_seeds, change_log_index_u64, - queue_indices[2], - leaf_indices[2] as u64, - proof_3.to_vec(), + queue_indices[i], + leaf_indices[i] as u64, + proof.to_vec(), )?; - - // proof_4: 2 bits per level from proof_4_source - if count == 4 { - let mut proof_4 = [[0u8; 32]; 16]; - for i in 0..15 { - let src = (proof_4_source >> (i * 2)) & 0b11; - match src { - 0b00 => proof_4[i] = proof_1[i], - 0b01 => proof_4[i] = proof_2[i], - 0b10 => proof_4[i] = proof_3[i], - 0b11 => { - if cursor >= nodes.len() { - return err!(RegistryError::InvalidProofEncoding); - } - proof_4[i] = nodes[cursor]; - cursor += 1; - } - _ => unreachable!(), - } - } - proof_4[15] = shared_top_node; - - nullify_single_leaf_cpi( - ctx, - signer_seeds, - change_log_index_u64, - queue_indices[3], - leaf_indices[3] as u64, - proof_4.to_vec(), - )?; - } - } - - // Validate all nodes consumed - if cursor != nodes.len() { - return err!(RegistryError::InvalidProofEncoding); } Ok(()) diff --git a/programs/registry/src/account_compression_cpi/sdk.rs b/programs/registry/src/account_compression_cpi/sdk.rs index 96d46f6880..8f9bb8445d 100644 --- a/programs/registry/src/account_compression_cpi/sdk.rs +++ b/programs/registry/src/account_compression_cpi/sdk.rs @@ -82,7 +82,7 @@ fn nullify_lookup_table_accounts_base(merkle_tree: Pubkey, nullifier_queue: Pubk /// Verified by tx size test (forester/tests/test_nullify_state_v1_multi_tx_size.rs). /// With ALT, SetComputeUnitLimit + SetComputeUnitPrice ixs, and worst-case nodes, /// the tx fits within the 1232 byte limit. -pub const NULLIFY_STATE_V1_MULTI_MAX_NODES: usize = 26; +pub const NULLIFY_STATE_V1_MULTI_MAX_NODES: usize = 27; #[derive(Clone, Debug, PartialEq)] pub struct CreateNullifyStateV1MultiInstructionInputs { @@ -92,10 +92,7 @@ pub struct CreateNullifyStateV1MultiInstructionInputs { pub change_log_index: u16, pub queue_indices: [u16; 4], pub leaf_indices: [u32; 4], - pub proof_2_shared: u16, - pub proof_3_source: u32, - pub proof_4_source: u32, - pub shared_top_node: [u8; 32], + pub proof_bitvecs: [u32; 4], pub nodes: Vec<[u8; 32]>, pub derivation: Pubkey, pub is_metadata_forester: bool, @@ -116,10 +113,7 @@ pub fn create_nullify_state_v1_multi_instruction( change_log_index: inputs.change_log_index, queue_indices: inputs.queue_indices, leaf_indices: inputs.leaf_indices, - proof_2_shared: inputs.proof_2_shared, - proof_3_source: inputs.proof_3_source, - proof_4_source: inputs.proof_4_source, - shared_top_node: inputs.shared_top_node, + proof_bitvecs: inputs.proof_bitvecs, nodes: inputs.nodes, }; @@ -140,90 +134,57 @@ pub fn create_nullify_state_v1_multi_instruction( } } -/// Result of compressing 2-4 Merkle proofs into the dedup encoding. +/// Result of compressing 2-4 Merkle proofs into a deduplicated node pool. pub struct CompressedProofs { - pub proof_2_shared: u16, - pub proof_3_source: u32, - pub proof_4_source: u32, - pub shared_top_node: [u8; 32], + /// Bitvecs for proofs 2-4, each selecting 16 nodes from the pool. + /// proof_1 is always nodes[0..16]. + pub proof_bitvecs: [u32; 4], pub nodes: Vec<[u8; 32]>, } -/// Compresses 2-4 full 16-node Merkle proofs into the dedup encoding. -/// Returns the compressed proof data, -/// or `None` if compression is impossible (different top nodes, too many unique nodes, or -/// fewer than 2 or more than 4 proofs). +/// Compresses 2-4 full 16-node Merkle proofs into a deduplicated node pool. +/// The pool is built level-by-level so that iterating set bits in ascending +/// order produces nodes in proof-level order. +/// Proof 1 is always nodes[0..16]. Proofs 2-4 each have a bitvec selecting +/// which pool nodes form that proof. +/// Returns `None` if fewer than 2, more than 4 proofs, or too many unique nodes. pub fn compress_proofs(proofs: &[&[[u8; 32]; 16]]) -> Option { + use bitvec::prelude::*; + if proofs.len() < 2 || proofs.len() > 4 { return None; } - // All proofs must share the same node at index 15 - let shared_top_node = proofs[0][15]; - for p in &proofs[1..] { - if p[15] != shared_top_node { - return None; - } - } - + // Build level-ordered deduplicated pool. For each level, add unique + // nodes across all proofs. Ascending pool index == ascending level. let mut nodes: Vec<[u8; 32]> = Vec::new(); + let mut pool_indices = [[0usize; 16]; 4]; - // proof_1: levels 0..14 - for i in 0..15 { - nodes.push(proofs[0][i]); - } - - // proof_2: bitvec - let mut proof_2_shared: u16 = 0; - for i in 0..15 { - if proofs[1][i] == proofs[0][i] { - proof_2_shared |= 1 << i; - } else { - nodes.push(proofs[1][i]); - } - } - - // proof_3 - let mut proof_3_source: u32 = 0; - if proofs.len() >= 3 { - for i in 0..15 { - if proofs[2][i] == proofs[0][i] { - // 00 = proof_1 - } else if proofs[2][i] == proofs[1][i] { - proof_3_source |= 0b01 << (i * 2); + for level in 0..16 { + for (proof_idx, proof) in proofs.iter().enumerate() { + if let Some(idx) = nodes.iter().position(|n| *n == proof[level]) { + pool_indices[proof_idx][level] = idx; } else { - proof_3_source |= 0b10 << (i * 2); - nodes.push(proofs[2][i]); + pool_indices[proof_idx][level] = nodes.len(); + nodes.push(proof[level]); } } } - // proof_4 - let mut proof_4_source: u32 = 0; - if proofs.len() >= 4 { - for i in 0..15 { - if proofs[3][i] == proofs[0][i] { - // 00 = proof_1 - } else if proofs[3][i] == proofs[1][i] { - proof_4_source |= 0b01 << (i * 2); - } else if proofs[3][i] == proofs[2][i] { - proof_4_source |= 0b10 << (i * 2); - } else { - proof_4_source |= 0b11 << (i * 2); - nodes.push(proofs[3][i]); - } - } + if nodes.len() > NULLIFY_STATE_V1_MULTI_MAX_NODES || nodes.len() > 32 { + return None; } - if nodes.len() > NULLIFY_STATE_V1_MULTI_MAX_NODES { - return None; + let mut proof_bitvecs = [0u32; 4]; + for (proof_idx, _) in proofs.iter().enumerate() { + let bv = proof_bitvecs[proof_idx].view_bits_mut::(); + for level in 0..16 { + bv.set(pool_indices[proof_idx][level], true); + } } Some(CompressedProofs { - proof_2_shared, - proof_3_source, - proof_4_source, - shared_top_node, + proof_bitvecs, nodes, }) } @@ -726,26 +687,38 @@ pub fn create_rollover_batch_address_tree_instruction( #[cfg(test)] mod tests { + use bitvec::prelude::*; + use super::*; + /// Simulates on-chain reconstruction for testing round-trips. + fn reconstruct_proof(nodes: &[[u8; 32]], bits: u32) -> [[u8; 32]; 16] { + let bv = bits.view_bits::(); + let mut proof = [[0u8; 32]; 16]; + let mut proof_idx = 0; + for (i, node) in nodes.iter().enumerate() { + if bv[i] { + proof[proof_idx] = *node; + proof_idx += 1; + } + } + assert_eq!(proof_idx, 16, "bitvec must select exactly 16 nodes"); + proof + } + #[test] fn test_nullify_state_v1_multi_instruction_data_size() { - // Worst case: max_nodes unique nodes let instruction_data = crate::instruction::NullifyStateV1Multi { change_log_index: 0, queue_indices: [0; 4], leaf_indices: [0; 4], - proof_2_shared: 0, - proof_3_source: 0, - proof_4_source: 0, - shared_top_node: [0u8; 32], + proof_bitvecs: [0; 4], nodes: vec![[0u8; 32]; NULLIFY_STATE_V1_MULTI_MAX_NODES], }; let data = instruction_data.data(); - // 8 disc + 2 changelog + 8 queue_indices + 16 leaf_indices + 2 proof_2_shared - // + 4 proof_3_source + 4 proof_4_source + 32 shared_top_node + // 8 disc + 2 changelog + 8 queue_indices + 16 leaf_indices + 16 proof_bitvecs // + 4 vec_prefix + N*32 nodes - let expected = 8 + 2 + 8 + 16 + 2 + 4 + 4 + 32 + 4 + NULLIFY_STATE_V1_MULTI_MAX_NODES * 32; + let expected = 8 + 2 + 8 + 16 + 16 + 4 + NULLIFY_STATE_V1_MULTI_MAX_NODES * 32; assert_eq!( data.len(), expected, @@ -765,11 +738,8 @@ mod tests { change_log_index: 0, queue_indices: [0, 1, 2, 3], leaf_indices: [0, 1, 2, 3], - proof_2_shared: 0, - proof_3_source: 0, - proof_4_source: 0, - shared_top_node: [0u8; 32], - nodes: vec![[0u8; 32]; 15], + proof_bitvecs: [0; 4], + nodes: vec![[0u8; 32]; 16], derivation: authority, is_metadata_forester: false, }; @@ -779,137 +749,67 @@ mod tests { #[test] fn test_compress_proofs_round_trip() { - // Create 4 proofs with sharing patterns that fit within MAX_NODES (27). - // Budget: 15 (proof_1) + 5 (proof_2 unique) + 5 (proof_3 unique) + 2 (proof_4 unique) = 27 - let shared_top = [0xCC; 32]; let mut proof_1 = [[0u8; 32]; 16]; let mut proof_2 = [[0u8; 32]; 16]; let mut proof_3 = [[0u8; 32]; 16]; let mut proof_4 = [[0u8; 32]; 16]; - for (i, slot) in proof_1.iter_mut().enumerate().take(15) { - *slot = [i as u8 + 1; 32]; + for (i, elem) in proof_1.iter_mut().enumerate() { + *elem = [i as u8 + 1; 32]; } - proof_1[15] = shared_top; - // proof_2: 10 shared with proof_1, 5 unique (levels 0-4) - for (i, slot) in proof_2.iter_mut().enumerate().take(15) { - if i < 5 { - *slot = [i as u8 + 100; 32]; // unique + // proof_2: differs at levels 0-3, shares 4-15 (total: 16 + 4 = 20) + for i in 0..16 { + if i < 4 { + proof_2[i] = [i as u8 + 100; 32]; } else { - *slot = proof_1[i]; // shared + proof_2[i] = proof_1[i]; } } - proof_2[15] = shared_top; - - // proof_3: 5 from proof_1, 5 new (levels 5-9), 5 from proof_2 - for (i, slot) in proof_3.iter_mut().enumerate().take(15) { - if i < 5 { - *slot = proof_1[i]; // same as proof_1 - } else if i < 10 { - *slot = [i as u8 + 200; 32]; // new + + // proof_3: differs at levels 0-2, shares 3-15 (total: 20 + 3 = 23) + for i in 0..16 { + if i < 3 { + proof_3[i] = [i as u8 + 200; 32]; } else { - *slot = proof_2[i]; // same as proof_2 (and proof_1) + proof_3[i] = proof_1[i]; } } - proof_3[15] = shared_top; - // proof_4: 4 from proof_1, 4 from proof_2, 5 from proof_3, 2 new - for (i, slot) in proof_4.iter_mut().enumerate().take(15) { - if i < 4 { - *slot = proof_1[i]; // from proof_1 - } else if i < 8 { - *slot = proof_2[i]; // from proof_2 - } else if i < 13 { - *slot = proof_3[i]; // from proof_3 + // proof_4: differs at levels 0-1, shares 2-15 (total: 23 + 2 = 25) + for i in 0..16 { + if i < 2 { + proof_4[i] = [(i as u8).wrapping_add(250); 32]; } else { - *slot = [(i as u8).wrapping_add(250); 32]; // new + proof_4[i] = proof_1[i]; } } - proof_4[15] = shared_top; let proofs: Vec<&[[u8; 32]; 16]> = vec![&proof_1, &proof_2, &proof_3, &proof_4]; let result = compress_proofs(&proofs); assert!(result.is_some(), "compress_proofs should succeed"); - let CompressedProofs { - proof_2_shared: p2_shared, - proof_3_source: p3_source, - proof_4_source: p4_source, - shared_top_node: top, - nodes, - } = result.unwrap(); - - // Simulate on-chain reconstruction - let mut cursor = 0usize; - - // Reconstruct proof_1 - let mut r_proof_1 = [[0u8; 32]; 16]; - r_proof_1[..15].copy_from_slice(&nodes[cursor..cursor + 15]); - r_proof_1[15] = top; - cursor += 15; + let compressed = result.unwrap(); + + let r_proof_1 = reconstruct_proof(&compressed.nodes, compressed.proof_bitvecs[0]); assert_eq!(r_proof_1, proof_1); - // Reconstruct proof_2 - let mut r_proof_2 = [[0u8; 32]; 16]; - for i in 0..15 { - if (p2_shared >> i) & 1 == 1 { - r_proof_2[i] = r_proof_1[i]; - } else { - r_proof_2[i] = nodes[cursor]; - cursor += 1; - } - } - r_proof_2[15] = top; + let r_proof_2 = reconstruct_proof(&compressed.nodes, compressed.proof_bitvecs[1]); assert_eq!(r_proof_2, proof_2); - // Reconstruct proof_3 - let mut r_proof_3 = [[0u8; 32]; 16]; - for i in 0..15 { - let src = (p3_source >> (i * 2)) & 0b11; - match src { - 0b00 => r_proof_3[i] = r_proof_1[i], - 0b01 => r_proof_3[i] = r_proof_2[i], - 0b10 => { - r_proof_3[i] = nodes[cursor]; - cursor += 1; - } - _ => panic!("unexpected source 0b11 for proof_3"), - } - } - r_proof_3[15] = top; + let r_proof_3 = reconstruct_proof(&compressed.nodes, compressed.proof_bitvecs[2]); assert_eq!(r_proof_3, proof_3); - // Reconstruct proof_4 - let mut r_proof_4 = [[0u8; 32]; 16]; - for i in 0..15 { - let src = (p4_source >> (i * 2)) & 0b11; - match src { - 0b00 => r_proof_4[i] = r_proof_1[i], - 0b01 => r_proof_4[i] = r_proof_2[i], - 0b10 => r_proof_4[i] = r_proof_3[i], - 0b11 => { - r_proof_4[i] = nodes[cursor]; - cursor += 1; - } - _ => unreachable!(), - } - } - r_proof_4[15] = top; + let r_proof_4 = reconstruct_proof(&compressed.nodes, compressed.proof_bitvecs[3]); assert_eq!(r_proof_4, proof_4); - - assert_eq!(cursor, nodes.len(), "all nodes should be consumed"); } #[test] fn test_compress_proofs_returns_none_when_too_many_nodes() { - // All 4 proofs with completely unique nodes at every level = 15 + 15 + 15 + 15 = 60 nodes - let shared_top = [0xCC; 32]; let make_proof = |base: u8| -> [[u8; 32]; 16] { let mut p = [[0u8; 32]; 16]; - for (i, slot) in p.iter_mut().enumerate().take(15) { + for (i, slot) in p.iter_mut().enumerate() { *slot = [base.wrapping_add(i as u8); 32]; } - p[15] = shared_top; p }; let p1 = make_proof(1); @@ -927,84 +827,72 @@ mod tests { #[test] fn test_compress_proofs_2_proofs() { - let shared_top = [0xCC; 32]; let mut proof_1 = [[0u8; 32]; 16]; let mut proof_2 = [[0u8; 32]; 16]; - for i in 0..15 { + for i in 0..16 { proof_1[i] = [i as u8 + 1; 32]; - // Share half the nodes if i % 2 == 0 { proof_2[i] = proof_1[i]; } else { proof_2[i] = [i as u8 + 100; 32]; } } - proof_1[15] = shared_top; - proof_2[15] = shared_top; let proofs: Vec<&[[u8; 32]; 16]> = vec![&proof_1, &proof_2]; let result = compress_proofs(&proofs); assert!(result.is_some(), "2 proofs should compress"); - let CompressedProofs { - proof_2_shared: p2_shared, - proof_3_source: p3_source, - proof_4_source: p4_source, - shared_top_node: top, - nodes, - } = result.unwrap(); - - // proof_3_source and proof_4_source should be 0 (unused) - assert_eq!(p3_source, 0); - assert_eq!(p4_source, 0); - assert_eq!(top, shared_top); - - // Verify proof_2_shared bitvec - for i in 0..15 { - if i % 2 == 0 { - assert_eq!((p2_shared >> i) & 1, 1, "level {} should be shared", i); - } else { - assert_eq!((p2_shared >> i) & 1, 0, "level {} should not be shared", i); - } - } + let compressed = result.unwrap(); - // 15 for proof_1 + 7 unique for proof_2 (odd indices 1,3,5,7,9,11,13) - assert_eq!(nodes.len(), 15 + 7); + // Unused bitvecs should be 0 + assert_eq!(compressed.proof_bitvecs[2], 0); + assert_eq!(compressed.proof_bitvecs[3], 0); + + // 16 for proof_1 + 8 unique for proof_2 (odd indices) + assert_eq!(compressed.nodes.len(), 16 + 8); + + // Round-trip + let r_proof_1 = reconstruct_proof(&compressed.nodes, compressed.proof_bitvecs[0]); + assert_eq!(r_proof_1, proof_1); + + let r_proof_2 = reconstruct_proof(&compressed.nodes, compressed.proof_bitvecs[1]); + assert_eq!(r_proof_2, proof_2); } #[test] fn test_compress_proofs_3_proofs() { - let shared_top = [0xCC; 32]; let mut proof_1 = [[0u8; 32]; 16]; let mut proof_2 = [[0u8; 32]; 16]; let mut proof_3 = [[0u8; 32]; 16]; - for i in 0..15 { + for i in 0..16 { proof_1[i] = [i as u8 + 1; 32]; - // proof_2 shares some levels with proof_1 to stay within MAX_NODES if i % 2 == 0 { - proof_2[i] = proof_1[i]; // shared + proof_2[i] = proof_1[i]; } else { proof_2[i] = [i as u8 + 50; 32]; } - // proof_3 alternates between proof_1 and proof_2 if i % 3 == 0 { proof_3[i] = proof_1[i]; - } else if i % 3 == 1 { - proof_3[i] = proof_2[i]; } else { - proof_3[i] = [i as u8 + 100; 32]; // new + proof_3[i] = proof_2[i]; } } - proof_1[15] = shared_top; - proof_2[15] = shared_top; - proof_3[15] = shared_top; let proofs: Vec<&[[u8; 32]; 16]> = vec![&proof_1, &proof_2, &proof_3]; let result = compress_proofs(&proofs); assert!(result.is_some(), "3 proofs should compress"); - let CompressedProofs { - proof_4_source: p4_source, - .. - } = result.unwrap(); - assert_eq!(p4_source, 0, "proof_4_source should be 0 for 3 proofs"); + let compressed = result.unwrap(); + assert_eq!( + compressed.proof_bitvecs[3], 0, + "proof_4 bitvec should be 0 for 3 proofs" + ); + + let r_proof_1 = reconstruct_proof(&compressed.nodes, compressed.proof_bitvecs[0]); + assert_eq!(r_proof_1, proof_1); + + let r_proof_2 = reconstruct_proof(&compressed.nodes, compressed.proof_bitvecs[1]); + assert_eq!(r_proof_2, proof_2); + + let r_proof_3 = reconstruct_proof(&compressed.nodes, compressed.proof_bitvecs[2]); + assert_eq!(r_proof_3, proof_3); } } diff --git a/programs/registry/src/lib.rs b/programs/registry/src/lib.rs index 8aabba0af8..b862870de6 100644 --- a/programs/registry/src/lib.rs +++ b/programs/registry/src/lib.rs @@ -422,19 +422,15 @@ pub mod light_registry { /// Nullifies 2-4 leaves in a single instruction via sequential CPIs. /// Uses proof deduplication: nearby leaves share Merkle proof nodes at - /// common ancestor levels. The encoding stores each unique node once and - /// uses bitvecs/2-bit source fields to reconstruct all proofs on-chain. - /// All leaves must share the same subtree at level 15 (shared_top_node). - #[allow(clippy::too_many_arguments)] + /// common ancestor levels. The `nodes` vec is a deduplicated pool of + /// unique nodes, and each proof's bitvec selects which 16 nodes from + /// the pool form that proof. pub fn nullify_state_v1_multi<'info>( ctx: Context<'_, '_, '_, 'info, NullifyLeaves<'info>>, change_log_index: u16, queue_indices: [u16; 4], leaf_indices: [u32; 4], - proof_2_shared: u16, - proof_3_source: u32, - proof_4_source: u32, - shared_top_node: [u8; 32], + proof_bitvecs: [u32; 4], nodes: Vec<[u8; 32]>, ) -> Result<()> { let metadata = ctx.accounts.merkle_tree.load()?.metadata; @@ -453,10 +449,7 @@ pub mod light_registry { change_log_index, queue_indices, leaf_indices, - proof_2_shared, - proof_3_source, - proof_4_source, - shared_top_node, + proof_bitvecs, nodes, ) } From d9087d2352096d9d323083140b07b210c2873b5d Mon Sep 17 00:00:00 2001 From: ananas Date: Wed, 18 Mar 2026 02:04:48 +0000 Subject: [PATCH 08/16] feat: unified forester ALT covering all tree types Replace v1-specific ALT with a single unified ALT that includes accounts for all forester operations (v1 state, v1 address, v2 state, v2 address). Solana's v0::Message::try_compile automatically selects relevant entries per instruction, so unused entries cost nothing. - Add ForesterLookupTableParams and forester_lookup_table_accounts() - Create ALT unconditionally in e2e test for all operations - Clamp v1 batch_size to 1 when ALT is present (tx size limit) - Fix min_queue_items doc comment to match actual behavior --- forester/src/processor/v1/config.rs | 2 +- forester/src/processor/v1/tx_builder.rs | 6 +- forester/tests/e2e_test.rs | 47 +++++++++------ .../src/account_compression_cpi/sdk.rs | 58 ++++++++++++++++--- 4 files changed, 86 insertions(+), 27 deletions(-) diff --git a/forester/src/processor/v1/config.rs b/forester/src/processor/v1/config.rs index c06e378669..fb562ed6e2 100644 --- a/forester/src/processor/v1/config.rs +++ b/forester/src/processor/v1/config.rs @@ -21,7 +21,7 @@ pub struct SendBatchedTransactionsConfig { pub confirmation_max_attempts: usize, /// Minimum number of queue items required before processing begins. /// Only applies to StateV1 trees. When `None`, processing starts immediately. - /// When the timeout deadline is near, this threshold is ignored to prevent starvation. + /// When the timeout deadline is reached, returns 0 (re-scheduled next light slot). pub min_queue_items: Option, } diff --git a/forester/src/processor/v1/tx_builder.rs b/forester/src/processor/v1/tx_builder.rs index 938e7931dc..15e1968515 100644 --- a/forester/src/processor/v1/tx_builder.rs +++ b/forester/src/processor/v1/tx_builder.rs @@ -155,7 +155,11 @@ impl TransactionBuilder for EpochManagerTransactions { } }; - let batch_size = config.batch_size.max(1) as usize; + let batch_size = if !self.address_lookup_tables.is_empty() { + 1 + } else { + config.batch_size.max(1) as usize + }; for instruction_chunk in all_instructions.chunks(batch_size) { let prepared = create_smart_transaction(CreateSmartTransactionConfig { diff --git a/forester/tests/e2e_test.rs b/forester/tests/e2e_test.rs index cc17436872..e9d5f6bff5 100644 --- a/forester/tests/e2e_test.rs +++ b/forester/tests/e2e_test.rs @@ -38,7 +38,9 @@ use light_compressed_token::process_transfer::{ use light_hasher::Poseidon; use light_program_test::accounts::test_accounts::TestAccounts; use light_prover_client::prover::spawn_prover; -use light_registry::account_compression_cpi::sdk::nullify_state_v1_multi_lookup_table_accounts; +use light_registry::account_compression_cpi::sdk::{ + forester_lookup_table_accounts, ForesterLookupTableParams, +}; use light_test_utils::{ actions::{create_compressible_token_account, CreateCompressibleTokenAccountInputs}, conversions::sdk_to_program_token_data, @@ -192,12 +194,7 @@ fn is_v2_address_test_enabled() -> bool { /// Creates an on-chain Address Lookup Table populated with the accounts /// needed for nullify_state_v1_multi instructions. Returns the ALT address. -async fn create_nullify_state_v1_multi_alt( - rpc: &mut R, - payer: &Keypair, - merkle_tree: Pubkey, - nullifier_queue: Pubkey, -) -> Pubkey { +async fn create_forester_alt(rpc: &mut R, payer: &Keypair, env: &TestAccounts) -> Pubkey { use light_client::rpc::lut::instruction::{create_lookup_table, extend_lookup_table}; let slot = rpc.get_slot().await.unwrap(); @@ -206,7 +203,25 @@ async fn create_nullify_state_v1_multi_alt( .await .unwrap(); - let addresses = nullify_state_v1_multi_lookup_table_accounts(merkle_tree, nullifier_queue); + let params = ForesterLookupTableParams { + v1_state_trees: env + .v1_state_trees + .iter() + .map(|t| (t.merkle_tree, t.nullifier_queue)) + .collect(), + v1_address_trees: env + .v1_address_trees + .iter() + .map(|t| (t.merkle_tree, t.queue)) + .collect(), + v2_state_trees: env + .v2_state_trees + .iter() + .map(|t| (t.merkle_tree, t.output_queue)) + .collect(), + v2_address_trees: env.v2_address_trees.clone(), + }; + let addresses = forester_lookup_table_accounts(¶ms); let extend_ix = extend_lookup_table(alt_address, payer.pubkey(), Some(payer.pubkey()), addresses); rpc.create_and_send_transaction(&[extend_ix], &payer.pubkey(), &[payer]) @@ -325,17 +340,13 @@ async fn e2e_test() { .await; } - // Create ALT for nullify_state_v1_multi if V1 state test is enabled + // Create unified ALT for all forester operations. + // v0::Message::try_compile selects relevant entries per instruction automatically. + let alt_addr = create_forester_alt(&mut rpc, &env.protocol.forester, &env).await; + println!("Created forester ALT: {}", alt_addr); + config.lookup_table_address = Some(alt_addr); + if is_v1_state_test_enabled() { - let alt_addr = create_nullify_state_v1_multi_alt( - &mut rpc, - &env.protocol.forester, - env.v1_state_trees[0].merkle_tree, - env.v1_state_trees[0].nullifier_queue, - ) - .await; - println!("Created nullify_state_v1_multi ALT: {}", alt_addr); - config.lookup_table_address = Some(alt_addr); config.min_queue_items = Some(10); config.enable_v1_multi_nullify = true; } diff --git a/programs/registry/src/account_compression_cpi/sdk.rs b/programs/registry/src/account_compression_cpi/sdk.rs index 8f9bb8445d..bc29789c0b 100644 --- a/programs/registry/src/account_compression_cpi/sdk.rs +++ b/programs/registry/src/account_compression_cpi/sdk.rs @@ -62,9 +62,8 @@ pub fn create_nullify_instruction( } } -/// Returns the base accounts for populating an address lookup table -/// for nullify v0 transactions. -fn nullify_lookup_table_accounts_base(merkle_tree: Pubkey, nullifier_queue: Pubkey) -> Vec { +/// Returns the common accounts shared by all forester lookup tables. +fn common_lookup_table_accounts() -> Vec { let (cpi_authority, _) = get_cpi_authority_pda(); let registered_program_pda = get_registered_program_pda(&crate::ID); vec![ @@ -72,9 +71,8 @@ fn nullify_lookup_table_accounts_base(merkle_tree: Pubkey, nullifier_queue: Pubk registered_program_pda, account_compression::ID, Pubkey::new_from_array(NOOP_PUBKEY), - merkle_tree, - nullifier_queue, crate::ID, + solana_sdk::compute_budget::ID, ] } @@ -196,11 +194,57 @@ pub fn nullify_state_v1_multi_lookup_table_accounts( merkle_tree: Pubkey, nullifier_queue: Pubkey, ) -> Vec { - let mut accounts = nullify_lookup_table_accounts_base(merkle_tree, nullifier_queue); - accounts.push(solana_sdk::compute_budget::ID); + let mut accounts = common_lookup_table_accounts(); + accounts.push(merkle_tree); + accounts.push(nullifier_queue); accounts } +/// Parameters for creating a unified forester address lookup table +/// that covers all tree types. +pub struct ForesterLookupTableParams { + /// (merkle_tree, nullifier_queue) + pub v1_state_trees: Vec<(Pubkey, Pubkey)>, + /// (merkle_tree, queue) + pub v1_address_trees: Vec<(Pubkey, Pubkey)>, + /// (merkle_tree, output_queue) + pub v2_state_trees: Vec<(Pubkey, Pubkey)>, + /// merkle_tree (== queue for v2 address trees) + pub v2_address_trees: Vec, +} + +/// Returns a deduplicated list of accounts for a unified forester ALT +/// that covers all tree types. `v0::Message::try_compile` automatically +/// selects which ALT entries to reference per instruction, so unused +/// entries cost nothing. +pub fn forester_lookup_table_accounts(params: &ForesterLookupTableParams) -> Vec { + let mut accounts = common_lookup_table_accounts(); + + for (merkle_tree, nullifier_queue) in ¶ms.v1_state_trees { + push_if_absent(&mut accounts, *merkle_tree); + push_if_absent(&mut accounts, *nullifier_queue); + } + for (merkle_tree, queue) in ¶ms.v1_address_trees { + push_if_absent(&mut accounts, *merkle_tree); + push_if_absent(&mut accounts, *queue); + } + for (merkle_tree, output_queue) in ¶ms.v2_state_trees { + push_if_absent(&mut accounts, *merkle_tree); + push_if_absent(&mut accounts, *output_queue); + } + for merkle_tree in ¶ms.v2_address_trees { + push_if_absent(&mut accounts, *merkle_tree); + } + + accounts +} + +fn push_if_absent(accounts: &mut Vec, key: Pubkey) { + if !accounts.contains(&key) { + accounts.push(key); + } +} + #[derive(Clone, Debug, PartialEq)] pub struct CreateMigrateStateInstructionInputs { pub authority: Pubkey, From f60eda826f508a0b0fbcb622ecf89c8200b57780 Mon Sep 17 00:00:00 2001 From: Sergey Timoshin Date: Wed, 18 Mar 2026 14:29:12 +0000 Subject: [PATCH 09/16] feat: add get_queue_leaf_indices - Implemented `make_get_queue_leaf_indices_body` function to construct the request body for the `getQueueLeafIndices` API. - Added API call for `getQueueLeafIndices` in the photon API module. - Introduced `get_queue_leaf_indices` method in the `TestIndexer` struct to handle the API call. - Updated `LightProgramTest` to include the `get_queue_leaf_indices` method, delegating to the underlying indexer. --- external/photon | 2 +- forester/src/epoch_manager.rs | 2 + forester/src/processor/v1/config.rs | 4 + forester/src/processor/v1/helpers.rs | 42 +- forester/src/processor/v1/send_transaction.rs | 62 +- forester/src/processor/v1/tx_builder.rs | 21 +- forester/src/queue_helpers.rs | 4 +- forester/src/smart_transaction.rs | 23 +- forester/tests/e2e_test.rs | 25 - sdk-libs/client/src/indexer/indexer_trait.rs | 12 +- sdk-libs/client/src/indexer/mod.rs | 4 +- sdk-libs/client/src/indexer/photon_indexer.rs | 56 + sdk-libs/client/src/indexer/types/mod.rs | 3 +- sdk-libs/client/src/indexer/types/queue.rs | 8 + sdk-libs/client/src/rpc/indexer.rs | 19 +- sdk-libs/photon-api/src/codegen.rs | 3628 ++++++++++++----- sdk-libs/photon-api/src/lib.rs | 17 + .../program-test/src/indexer/test_indexer.rs | 14 +- .../program-test/src/program_test/indexer.rs | 19 +- 19 files changed, 2825 insertions(+), 1140 deletions(-) diff --git a/external/photon b/external/photon index 7a649f9c45..8a0bbce6a9 160000 --- a/external/photon +++ b/external/photon @@ -1 +1 @@ -Subproject commit 7a649f9c45a138ef47b090445163abe84775145c +Subproject commit 8a0bbce6a9250e2cc41e50d10efa9256a180db58 diff --git a/forester/src/epoch_manager.rs b/forester/src/epoch_manager.rs index f04b12eb6f..46d930df20 100644 --- a/forester/src/epoch_manager.rs +++ b/forester/src/epoch_manager.rs @@ -4774,6 +4774,7 @@ mod tests { queue_item_data: QueueItemData { hash: [0u8; 32], index: 0, + leaf_index: None, }, }; @@ -4796,6 +4797,7 @@ mod tests { queue_item_data: QueueItemData { hash: [0u8; 32], index: 0, + leaf_index: None, }, }; diff --git a/forester/src/processor/v1/config.rs b/forester/src/processor/v1/config.rs index fb562ed6e2..d472383985 100644 --- a/forester/src/processor/v1/config.rs +++ b/forester/src/processor/v1/config.rs @@ -2,6 +2,10 @@ use light_client::rpc::RetryConfig; use crate::config::QueueConfig; +/// Maximum queue size for which multi-nullify grouping is enabled. +/// Above this threshold, fall back to single-nullify for more reliable throughput. +pub const MULTI_NULLIFY_MAX_QUEUE_SIZE: usize = 10_000; + #[derive(Debug, Clone, Copy)] pub struct CapConfig { pub rec_fee_microlamports_per_cu: u64, diff --git a/forester/src/processor/v1/helpers.rs b/forester/src/processor/v1/helpers.rs index 6d48aed9cf..f25b66ee77 100644 --- a/forester/src/processor/v1/helpers.rs +++ b/forester/src/processor/v1/helpers.rs @@ -37,6 +37,14 @@ use crate::{ errors::ForesterError, }; +/// A labeled instruction for logging purposes. +#[derive(Clone)] +pub struct LabeledInstruction { + pub instruction: Instruction, + /// Label for logging, e.g. "StateV1Nullify" or "StateV1MultiNullify(3)" + pub label: String, +} + /// Work items should be of only one type and tree pub async fn fetch_proofs_and_create_instructions( authority: Pubkey, @@ -45,7 +53,7 @@ pub async fn fetch_proofs_and_create_instructions( epoch: u64, work_items: &[WorkItem], use_multi_nullify: bool, -) -> crate::Result<(Vec, Vec)> { +) -> crate::Result<(Vec, Vec)> { let mut proofs = Vec::new(); let mut instructions = vec![]; @@ -366,7 +374,10 @@ pub async fn fetch_proofs_and_create_instructions( }, epoch, ); - instructions.push(instruction); + instructions.push(LabeledInstruction { + instruction, + label: "AddressV1Update".to_string(), + }); } // Process state proofs and create instructions @@ -427,9 +438,12 @@ pub async fn fetch_proofs_and_create_instructions( for group_indices in groups { if group_indices.len() == 1 { let (item, proof) = &items_with_proofs[group_indices[0]]; - instructions.push(build_nullify_instruction( - item, proof, authority, derivation, epoch, - )); + instructions.push(LabeledInstruction { + instruction: build_nullify_instruction( + item, proof, authority, derivation, epoch, + ), + label: "StateV1Nullify".to_string(), + }); } else { let group_proofs: Vec<[[u8; 32]; 16]> = group_indices .iter() @@ -478,14 +492,18 @@ pub async fn fetch_proofs_and_create_instructions( }, epoch, ); + let group_size = group_indices.len(); debug!( event = "v1_nullify_state_v1_multi_instruction", - group_size = group_indices.len(), + group_size, node_count, ix_data_bytes = instruction.data.len(), "Created nullify_state_v1_multi instruction" ); - instructions.push(instruction); + instructions.push(LabeledInstruction { + instruction, + label: format!("StateV1MultiNullify({})", group_size), + }); } } } else { @@ -493,9 +511,12 @@ pub async fn fetch_proofs_and_create_instructions( proofs.push(MerkleProofType::StateProof(proof.clone())); } for (item, proof) in items_with_proofs.iter() { - instructions.push(build_nullify_instruction( - item, proof, authority, derivation, epoch, - )); + instructions.push(LabeledInstruction { + instruction: build_nullify_instruction( + item, proof, authority, derivation, epoch, + ), + label: "StateV1Nullify".to_string(), + }); } } @@ -592,6 +613,7 @@ mod tests { queue_item_data: QueueItemData { hash: [0u8; 32], index: 0, + leaf_index: None, }, } } diff --git a/forester/src/processor/v1/send_transaction.rs b/forester/src/processor/v1/send_transaction.rs index fb1f078c26..87a273c630 100644 --- a/forester/src/processor/v1/send_transaction.rs +++ b/forester/src/processor/v1/send_transaction.rs @@ -26,7 +26,10 @@ use crate::{ errors::ForesterError, metrics::increment_transactions_failed, priority_fee::PriorityFeeConfig, - processor::v1::{config::SendBatchedTransactionsConfig, tx_builder::TransactionBuilder}, + processor::v1::{ + config::{SendBatchedTransactionsConfig, MULTI_NULLIFY_MAX_QUEUE_SIZE}, + tx_builder::TransactionBuilder, + }, queue_helpers::fetch_queue_item_data, smart_transaction::{ConfirmationConfig, PreparedTransaction, SmartTransactionError}, Result, @@ -77,44 +80,29 @@ pub async fn send_batched_transactions= timeout_deadline { - trace!(tree.id = %tree_accounts.merkle_tree, "Timeout deadline reached while waiting for threshold, 0 transactions sent."); + let data = match prepare_batch_prerequisites( + &payer.pubkey(), + derivation, + &pool, + config, + tree_accounts, + &*transaction_builder, + function_start_time, + config.min_queue_items, + ) + .await + .map_err(ForesterError::from)? + { + Some(data) => data, + None => { + trace!(tree.id = %tree_accounts.merkle_tree, queue.id = %tree_accounts.queue, "Preparation returned no data, 0 transactions sent."); return Ok(0); } - - match prepare_batch_prerequisites( - &payer.pubkey(), - derivation, - &pool, - config, - tree_accounts, - &*transaction_builder, - function_start_time, - config.min_queue_items, - ) - .await - .map_err(ForesterError::from)? - { - Some(data) => break data, - None => { - if config.min_queue_items.is_some() { - tokio::time::sleep(THRESHOLD_POLL_INTERVAL).await; - continue; - } - trace!(tree.id = %tree_accounts.merkle_tree, queue.id = %tree_accounts.queue, "Preparation returned no data, 0 transactions sent."); - return Ok(0); - } - } }; let mut build_config = config.build_transaction_batch_config; build_config.queue_item_count = data.work_items.len(); - const MULTI_NULLIFY_MAX_QUEUE_SIZE: usize = 10_000; if data.work_items.len() > MULTI_NULLIFY_MAX_QUEUE_SIZE { warn!( tree = %tree_accounts.merkle_tree, @@ -392,6 +380,7 @@ async fn execute_transaction_chunk_sending( let pool_clone = Arc::clone(&pool); let cancel_signal_clone = Arc::clone(&cancel_signal); let num_sent_transactions_clone = Arc::clone(&num_sent_transactions); + let tx_label = prepared_transaction.label().to_string(); async move { if cancel_signal_clone.load(Ordering::SeqCst) || Instant::now() >= timeout_deadline { @@ -418,10 +407,11 @@ async fn execute_transaction_chunk_sending( Ok(signature) => { if !cancel_signal_clone.load(Ordering::SeqCst) { num_sent_transactions_clone.fetch_add(1, Ordering::SeqCst); - trace!( - tx.signature = %signature, - elapsed = ?send_time.elapsed(), - "Transaction sent and confirmed successfully" + info!( + "tx sent: {} type={} e2e={}ms", + signature, + tx_label, + send_time.elapsed().as_millis(), ); TransactionSendResult::Success(signature) } else { diff --git a/forester/src/processor/v1/tx_builder.rs b/forester/src/processor/v1/tx_builder.rs index 15e1968515..74438bb7a7 100644 --- a/forester/src/processor/v1/tx_builder.rs +++ b/forester/src/processor/v1/tx_builder.rs @@ -7,6 +7,7 @@ use light_client::rpc::Rpc; use solana_program::hash::Hash; use solana_sdk::{ address_lookup_table::AddressLookupTableAccount, + instruction::Instruction, signature::{Keypair, Signer}, }; use tokio::sync::Mutex; @@ -16,7 +17,10 @@ use crate::{ epoch_manager::WorkItem, processor::{ tx_cache::ProcessedHashCache, - v1::{config::BuildTransactionBatchConfig, helpers::fetch_proofs_and_create_instructions}, + v1::{ + config::{BuildTransactionBatchConfig, MULTI_NULLIFY_MAX_QUEUE_SIZE}, + helpers::fetch_proofs_and_create_instructions, + }, }, smart_transaction::{ create_smart_transaction, CreateSmartTransactionConfig, PreparedTransaction, @@ -123,7 +127,6 @@ impl TransactionBuilder for EpochManagerTransactions { .map(|&item| item.clone()) .collect::>(); - const MULTI_NULLIFY_MAX_QUEUE_SIZE: usize = 10_000; let use_multi_nullify = self.enable_v1_multi_nullify && !self.address_lookup_tables.is_empty() && config.queue_item_count <= MULTI_NULLIFY_MAX_QUEUE_SIZE; @@ -161,17 +164,25 @@ impl TransactionBuilder for EpochManagerTransactions { config.batch_size.max(1) as usize }; - for instruction_chunk in all_instructions.chunks(batch_size) { + for labeled_chunk in all_instructions.chunks(batch_size) { + let label = labeled_chunk + .iter() + .map(|li| li.label.as_str()) + .collect::>() + .join("+"); + let instructions: Vec = + labeled_chunk.iter().map(|li| li.instruction.clone()).collect(); let prepared = create_smart_transaction(CreateSmartTransactionConfig { payer: payer.insecure_clone(), - instructions: instruction_chunk.to_vec(), + instructions, recent_blockhash: *recent_blockhash, compute_unit_price: priority_fee, compute_unit_limit: config.compute_unit_limit, last_valid_block_height, address_lookup_tables: self.address_lookup_tables.clone(), }) - .await?; + .await? + .with_label(label); transactions.push(prepared); } diff --git a/forester/src/queue_helpers.rs b/forester/src/queue_helpers.rs index f4a7dac704..4eaa7c421d 100644 --- a/forester/src/queue_helpers.rs +++ b/forester/src/queue_helpers.rs @@ -164,6 +164,8 @@ pub struct QueueLengthAndCapacity { pub struct QueueItemData { pub hash: [u8; 32], pub index: usize, + /// Leaf index in the Merkle tree. Available when fetched from indexer. + pub leaf_index: Option, } /// Result of fetching V1 queue data, including items and capacity. @@ -230,7 +232,7 @@ pub async fn fetch_queue_item_data( .filter(|(index, _, is_pending)| { *index >= start_index as usize && *index < end_index && *is_pending }) - .map(|(index, hash, _)| QueueItemData { hash, index }) + .map(|(index, hash, _)| QueueItemData { hash, index, leaf_index: None }) .collect(); tracing::debug!( diff --git a/forester/src/smart_transaction.rs b/forester/src/smart_transaction.rs index fac792df06..53bd7069f0 100644 --- a/forester/src/smart_transaction.rs +++ b/forester/src/smart_transaction.rs @@ -273,6 +273,8 @@ pub async fn send_transaction_with_policy( pub struct PreparedTransaction { transaction: PreparedTransactionKind, last_valid_block_height: u64, + /// Optional label for logging (e.g. "StateV1MultiNullify(4)") + label: Option, } enum PreparedTransactionKind { @@ -285,6 +287,7 @@ impl PreparedTransaction { Self { transaction: PreparedTransactionKind::Legacy(transaction), last_valid_block_height, + label: None, } } @@ -295,9 +298,19 @@ impl PreparedTransaction { Self { transaction: PreparedTransactionKind::Versioned(transaction), last_valid_block_height, + label: None, } } + pub(crate) fn with_label(mut self, label: String) -> Self { + self.label = Some(label); + self + } + + pub(crate) fn label(&self) -> &str { + self.label.as_deref().unwrap_or("V1Nullify") + } + pub(crate) fn signature(&self) -> Option { match &self.transaction { PreparedTransactionKind::Legacy(transaction) => transaction.signatures.first().copied(), @@ -389,10 +402,7 @@ async fn prepare_transaction( transaction .try_sign(signers, blockhash) .map_err(|e| RpcError::SigningError(e.to_string()))?; - Ok(PreparedTransaction { - transaction: PreparedTransactionKind::Legacy(transaction), - last_valid_block_height, - }) + Ok(PreparedTransaction::legacy(transaction, last_valid_block_height)) } else { let message = v0::Message::try_compile(payer, &final_instructions, address_lookup_tables, blockhash) @@ -401,10 +411,7 @@ async fn prepare_transaction( })?; let transaction = VersionedTransaction::try_new(VersionedMessage::V0(message), signers) .map_err(|e| RpcError::SigningError(e.to_string()))?; - Ok(PreparedTransaction { - transaction: PreparedTransactionKind::Versioned(transaction), - last_valid_block_height, - }) + Ok(PreparedTransaction::versioned(transaction, last_valid_block_height)) } } diff --git a/forester/tests/e2e_test.rs b/forester/tests/e2e_test.rs index e9d5f6bff5..295c4eeb82 100644 --- a/forester/tests/e2e_test.rs +++ b/forester/tests/e2e_test.rs @@ -651,31 +651,6 @@ async fn e2e_test() { ); println!("Compressible account (subscriber) successfully closed"); - // Verify dedup grouping logs when ALT is configured - if is_v1_state_test_enabled() { - let log_dir = std::path::Path::new("logs"); - if log_dir.exists() { - let latest_log = std::fs::read_dir(log_dir) - .unwrap() - .filter_map(|e| e.ok()) - .filter(|e| e.file_name().to_string_lossy().starts_with("forester.")) - .max_by_key(|e| e.metadata().unwrap().modified().unwrap()); - if let Some(log_entry) = latest_log { - let content = std::fs::read_to_string(log_entry.path()).unwrap(); - let has_dedup = content.contains("v1_nullify_state_v1_multi_grouping"); - assert!( - has_dedup, - "Expected v1_nullify_state_v1_multi_grouping logs when ALT is configured" - ); - println!("Verified: dedup grouping events found in forester logs"); - } else { - println!("Warning: no forester log files found in logs/"); - } - } else { - println!("Warning: logs/ directory not found"); - } - } - // Shutdown all services // Bootstrap may have already completed, so ignore send errors let _ = shutdown_bootstrap_sender.send(()); diff --git a/sdk-libs/client/src/indexer/indexer_trait.rs b/sdk-libs/client/src/indexer/indexer_trait.rs index b051ab3c1d..8def890fd5 100644 --- a/sdk-libs/client/src/indexer/indexer_trait.rs +++ b/sdk-libs/client/src/indexer/indexer_trait.rs @@ -11,7 +11,7 @@ use super::{ GetCompressedTokenAccountsByOwnerOrDelegateOptions, Hash, IndexerError, IndexerRpcConfig, MerkleProof, NewAddressProofWithContext, PaginatedOptions, QueueElementsV2Options, RetryConfig, }; -use crate::indexer::QueueElementsResult; +use crate::indexer::{QueueElementsResult, QueueLeafIndex}; // TODO: remove all references in input types. #[async_trait] pub trait Indexer: std::marker::Send + std::marker::Sync { @@ -181,6 +181,16 @@ pub trait Indexer: std::marker::Send + std::marker::Sync { config: Option, ) -> Result, IndexerError>; + /// Returns lightweight (hash, queue_index, leaf_index) tuples for nullifier queue items. + /// Used by the forester to sort queue items before grouping for multi-nullify. + async fn get_queue_leaf_indices( + &self, + merkle_tree_pubkey: [u8; 32], + limit: u16, + start_index: Option, + config: Option, + ) -> Result>, IndexerError>; + /// Returns information about all queues in the system. /// Includes tree pubkey, queue pubkey, queue type, and queue size for each queue. async fn get_queue_info( diff --git a/sdk-libs/client/src/indexer/mod.rs b/sdk-libs/client/src/indexer/mod.rs index cc3167459c..999f76895a 100644 --- a/sdk-libs/client/src/indexer/mod.rs +++ b/sdk-libs/client/src/indexer/mod.rs @@ -18,8 +18,8 @@ pub use types::{ AddressQueueData, AddressWithTree, ColdContext, ColdData, CompressedAccount, CompressedTokenAccount, Hash, InputQueueData, InterfaceTreeInfo, MerkleProof, MerkleProofWithContext, NewAddressProofWithContext, NextTreeInfo, OutputQueueData, - OwnerBalance, ProofOfLeaf, QueueElementsResult, QueueInfo, QueueInfoResult, RootIndex, - SignatureWithMetadata, SolanaAccountData, StateMerkleTreeAccounts, StateQueueData, + OwnerBalance, ProofOfLeaf, QueueElementsResult, QueueInfo, QueueInfoResult, QueueLeafIndex, + RootIndex, SignatureWithMetadata, SolanaAccountData, StateMerkleTreeAccounts, StateQueueData, TokenAccountInterface, TokenBalance, TreeInfo, ValidityProofWithContext, }; mod options; diff --git a/sdk-libs/client/src/indexer/photon_indexer.rs b/sdk-libs/client/src/indexer/photon_indexer.rs index 26d16ae235..bca2902ae6 100644 --- a/sdk-libs/client/src/indexer/photon_indexer.rs +++ b/sdk-libs/client/src/indexer/photon_indexer.rs @@ -1701,6 +1701,62 @@ impl Indexer for PhotonIndexer { .await } + async fn get_queue_leaf_indices( + &self, + merkle_tree_pubkey: [u8; 32], + limit: u16, + start_index: Option, + config: Option, + ) -> Result>, IndexerError> { + let config = config.unwrap_or_default(); + self.retry(config.retry_config, || async { + let tree_hash = + photon_api::types::Hash(bs58::encode(&merkle_tree_pubkey).into_string()); + + let params = photon_api::types::PostGetQueueLeafIndicesBodyParams { + tree: tree_hash, + limit, + start_index, + }; + let request = + photon_api::apis::default_api::make_get_queue_leaf_indices_body(params); + + let result = photon_api::apis::default_api::get_queue_leaf_indices_post( + &self.configuration, + request, + ) + .await?; + + Self::check_api_error("get_queue_leaf_indices", result.error)?; + let api_response = + Self::extract_result("get_queue_leaf_indices", result.result)?; + + if api_response.context.slot < config.slot { + return Err(IndexerError::IndexerNotSyncedToSlot); + } + + let items = api_response + .value + .into_iter() + .map(|item| { + Ok(super::QueueLeafIndex { + hash: super::base58::decode_base58_to_fixed_array(&item.hash.0)?, + queue_index: item.queue_index, + leaf_index: item.leaf_index, + }) + }) + .collect::, IndexerError>>()?; + + Ok(Response { + context: super::response::Context { + slot: api_response.context.slot, + }, + value: Items { items }, + }) + }) + .await + } + async fn get_subtrees( &self, _merkle_tree_pubkey: [u8; 32], diff --git a/sdk-libs/client/src/indexer/types/mod.rs b/sdk-libs/client/src/indexer/types/mod.rs index f91504c8e3..1b5f34c590 100644 --- a/sdk-libs/client/src/indexer/types/mod.rs +++ b/sdk-libs/client/src/indexer/types/mod.rs @@ -16,7 +16,8 @@ pub use proof::{ NewAddressProofWithContext, RootIndex, ValidityProofWithContext, }; pub use queue::{ - AddressQueueData, InputQueueData, OutputQueueData, QueueElementsResult, StateQueueData, + AddressQueueData, InputQueueData, OutputQueueData, QueueElementsResult, QueueLeafIndex, + StateQueueData, }; pub use signature::SignatureWithMetadata; pub use token::{CompressedTokenAccount, OwnerBalance, TokenBalance}; diff --git a/sdk-libs/client/src/indexer/types/queue.rs b/sdk-libs/client/src/indexer/types/queue.rs index 40e7cc0f6e..f9a863cecb 100644 --- a/sdk-libs/client/src/indexer/types/queue.rs +++ b/sdk-libs/client/src/indexer/types/queue.rs @@ -138,6 +138,14 @@ impl AddressQueueData { } } +/// Lightweight queue leaf index entry (hash, queue_index, leaf_index) +#[derive(Debug, Clone, PartialEq, Default)] +pub struct QueueLeafIndex { + pub hash: [u8; 32], + pub queue_index: u64, + pub leaf_index: u64, +} + /// V2 Queue Elements Result with deduplicated node data #[derive(Debug, Clone, PartialEq, Default)] pub struct QueueElementsResult { diff --git a/sdk-libs/client/src/rpc/indexer.rs b/sdk-libs/client/src/rpc/indexer.rs index 55c6b069e0..08fbea4748 100644 --- a/sdk-libs/client/src/rpc/indexer.rs +++ b/sdk-libs/client/src/rpc/indexer.rs @@ -7,8 +7,8 @@ use crate::indexer::{ GetCompressedAccountsByOwnerConfig, GetCompressedTokenAccountsByOwnerOrDelegateOptions, Hash, Indexer, IndexerError, IndexerRpcConfig, Items, ItemsWithCursor, MerkleProof, NewAddressProofWithContext, OwnerBalance, PaginatedOptions, QueueElementsResult, - QueueElementsV2Options, QueueInfoResult, Response, RetryConfig, SignatureWithMetadata, - TokenBalance, ValidityProofWithContext, + QueueElementsV2Options, QueueInfoResult, QueueLeafIndex, Response, RetryConfig, + SignatureWithMetadata, TokenBalance, ValidityProofWithContext, }; #[async_trait] @@ -200,6 +200,21 @@ impl Indexer for LightClient { .await?) } + async fn get_queue_leaf_indices( + &self, + merkle_tree_pubkey: [u8; 32], + limit: u16, + start_index: Option, + config: Option, + ) -> Result>, IndexerError> { + Ok(self + .indexer + .as_ref() + .ok_or(IndexerError::NotInitialized)? + .get_queue_leaf_indices(merkle_tree_pubkey, limit, start_index, config) + .await?) + } + async fn get_queue_info( &self, config: Option, diff --git a/sdk-libs/photon-api/src/codegen.rs b/sdk-libs/photon-api/src/codegen.rs index 4dd88bda05..0936da0ea0 100644 --- a/sdk-libs/photon-api/src/codegen.rs +++ b/sdk-libs/photon-api/src/codegen.rs @@ -1550,6 +1550,96 @@ All endpoints return AccountV2.*/ Default::default() } } + /**Parameters for requesting input queue leaf indices. +Returns (hash, queue_index, leaf_index) for nullifier queue items.*/ + /// + ///
JSON schema + /// + /// ```json + ///{ + /// "description": "Parameters for requesting input queue leaf indices.\nReturns (hash, queue_index, leaf_index) for nullifier queue items.", + /// "type": "object", + /// "required": [ + /// "limit", + /// "tree" + /// ], + /// "properties": { + /// "limit": { + /// "type": "integer", + /// "format": "uint16", + /// "minimum": 0.0 + /// }, + /// "startIndex": { + /// "type": [ + /// "integer", + /// "null" + /// ], + /// "format": "uint64", + /// "minimum": 0.0 + /// }, + /// "tree": { + /// "$ref": "#/components/schemas/Hash" + /// } + /// }, + /// "additionalProperties": false + ///} + /// ``` + ///
+ #[derive(::serde::Deserialize, ::serde::Serialize, Clone, Debug)] + #[serde(deny_unknown_fields)] + pub struct GetQueueLeafIndicesRequest { + pub limit: u16, + #[serde( + rename = "startIndex", + default, + skip_serializing_if = "::std::option::Option::is_none" + )] + pub start_index: ::std::option::Option, + pub tree: Hash, + } + impl GetQueueLeafIndicesRequest { + pub fn builder() -> builder::GetQueueLeafIndicesRequest { + Default::default() + } + } + ///Response containing queue leaf indices + /// + ///
JSON schema + /// + /// ```json + ///{ + /// "description": "Response containing queue leaf indices", + /// "type": "object", + /// "required": [ + /// "context", + /// "value" + /// ], + /// "properties": { + /// "context": { + /// "$ref": "#/components/schemas/Context" + /// }, + /// "value": { + /// "type": "array", + /// "items": { + /// "$ref": "#/components/schemas/QueueLeafIndex" + /// } + /// } + /// }, + /// "additionalProperties": false + ///} + /// ``` + ///
+ #[derive(::serde::Deserialize, ::serde::Serialize, Clone, Debug)] + #[serde(deny_unknown_fields)] + pub struct GetQueueLeafIndicesResponse { + pub context: Context, + pub value: ::std::vec::Vec, + } + impl GetQueueLeafIndicesResponse { + pub fn builder() -> builder::GetQueueLeafIndicesResponse { + Default::default() + } + } ///A 32-byte hash represented as a base58 string. /// ///
JSON schema @@ -24678,7 +24768,7 @@ All endpoints return AccountV2.*/ Default::default() } } - ///`PostGetTransactionWithCompressionInfoBody` + ///`PostGetQueueLeafIndicesBody` /// ///
JSON schema /// @@ -24710,17 +24800,32 @@ All endpoints return AccountV2.*/ /// "description": "The name of the method to invoke.", /// "type": "string", /// "enum": [ - /// "getTransactionWithCompressionInfo" + /// "getQueueLeafIndices" /// ] /// }, /// "params": { + /// "description": "Parameters for requesting input queue leaf indices.\nReturns (hash, queue_index, leaf_index) for nullifier queue items.", /// "type": "object", /// "required": [ - /// "signature" + /// "limit", + /// "tree" /// ], /// "properties": { - /// "signature": { - /// "$ref": "#/components/schemas/SerializableSignature" + /// "limit": { + /// "type": "integer", + /// "format": "uint16", + /// "minimum": 0.0 + /// }, + /// "startIndex": { + /// "type": [ + /// "integer", + /// "null" + /// ], + /// "format": "uint64", + /// "minimum": 0.0 + /// }, + /// "tree": { + /// "$ref": "#/components/schemas/Hash" /// } /// }, /// "additionalProperties": false @@ -24730,17 +24835,17 @@ All endpoints return AccountV2.*/ /// ``` ///
#[derive(::serde::Deserialize, ::serde::Serialize, Clone, Debug)] - pub struct PostGetTransactionWithCompressionInfoBody { + pub struct PostGetQueueLeafIndicesBody { ///An ID to identify the request. - pub id: PostGetTransactionWithCompressionInfoBodyId, + pub id: PostGetQueueLeafIndicesBodyId, ///The version of the JSON-RPC protocol. - pub jsonrpc: PostGetTransactionWithCompressionInfoBodyJsonrpc, + pub jsonrpc: PostGetQueueLeafIndicesBodyJsonrpc, ///The name of the method to invoke. - pub method: PostGetTransactionWithCompressionInfoBodyMethod, - pub params: PostGetTransactionWithCompressionInfoBodyParams, + pub method: PostGetQueueLeafIndicesBodyMethod, + pub params: PostGetQueueLeafIndicesBodyParams, } - impl PostGetTransactionWithCompressionInfoBody { - pub fn builder() -> builder::PostGetTransactionWithCompressionInfoBody { + impl PostGetQueueLeafIndicesBody { + pub fn builder() -> builder::PostGetQueueLeafIndicesBody { Default::default() } } @@ -24770,18 +24875,18 @@ All endpoints return AccountV2.*/ PartialEq, PartialOrd )] - pub enum PostGetTransactionWithCompressionInfoBodyId { + pub enum PostGetQueueLeafIndicesBodyId { #[serde(rename = "test-account")] TestAccount, } - impl ::std::fmt::Display for PostGetTransactionWithCompressionInfoBodyId { + impl ::std::fmt::Display for PostGetQueueLeafIndicesBodyId { fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { match *self { Self::TestAccount => f.write_str("test-account"), } } } - impl ::std::str::FromStr for PostGetTransactionWithCompressionInfoBodyId { + impl ::std::str::FromStr for PostGetQueueLeafIndicesBodyId { type Err = self::error::ConversionError; fn from_str( value: &str, @@ -24792,7 +24897,7 @@ All endpoints return AccountV2.*/ } } } - impl ::std::convert::TryFrom<&str> for PostGetTransactionWithCompressionInfoBodyId { + impl ::std::convert::TryFrom<&str> for PostGetQueueLeafIndicesBodyId { type Error = self::error::ConversionError; fn try_from( value: &str, @@ -24801,7 +24906,7 @@ All endpoints return AccountV2.*/ } } impl ::std::convert::TryFrom<&::std::string::String> - for PostGetTransactionWithCompressionInfoBodyId { + for PostGetQueueLeafIndicesBodyId { type Error = self::error::ConversionError; fn try_from( value: &::std::string::String, @@ -24810,7 +24915,7 @@ All endpoints return AccountV2.*/ } } impl ::std::convert::TryFrom<::std::string::String> - for PostGetTransactionWithCompressionInfoBodyId { + for PostGetQueueLeafIndicesBodyId { type Error = self::error::ConversionError; fn try_from( value: ::std::string::String, @@ -24844,18 +24949,18 @@ All endpoints return AccountV2.*/ PartialEq, PartialOrd )] - pub enum PostGetTransactionWithCompressionInfoBodyJsonrpc { + pub enum PostGetQueueLeafIndicesBodyJsonrpc { #[serde(rename = "2.0")] X20, } - impl ::std::fmt::Display for PostGetTransactionWithCompressionInfoBodyJsonrpc { + impl ::std::fmt::Display for PostGetQueueLeafIndicesBodyJsonrpc { fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { match *self { Self::X20 => f.write_str("2.0"), } } } - impl ::std::str::FromStr for PostGetTransactionWithCompressionInfoBodyJsonrpc { + impl ::std::str::FromStr for PostGetQueueLeafIndicesBodyJsonrpc { type Err = self::error::ConversionError; fn from_str( value: &str, @@ -24866,8 +24971,7 @@ All endpoints return AccountV2.*/ } } } - impl ::std::convert::TryFrom<&str> - for PostGetTransactionWithCompressionInfoBodyJsonrpc { + impl ::std::convert::TryFrom<&str> for PostGetQueueLeafIndicesBodyJsonrpc { type Error = self::error::ConversionError; fn try_from( value: &str, @@ -24876,7 +24980,7 @@ All endpoints return AccountV2.*/ } } impl ::std::convert::TryFrom<&::std::string::String> - for PostGetTransactionWithCompressionInfoBodyJsonrpc { + for PostGetQueueLeafIndicesBodyJsonrpc { type Error = self::error::ConversionError; fn try_from( value: &::std::string::String, @@ -24885,7 +24989,7 @@ All endpoints return AccountV2.*/ } } impl ::std::convert::TryFrom<::std::string::String> - for PostGetTransactionWithCompressionInfoBodyJsonrpc { + for PostGetQueueLeafIndicesBodyJsonrpc { type Error = self::error::ConversionError; fn try_from( value: ::std::string::String, @@ -24902,7 +25006,7 @@ All endpoints return AccountV2.*/ /// "description": "The name of the method to invoke.", /// "type": "string", /// "enum": [ - /// "getTransactionWithCompressionInfo" + /// "getQueueLeafIndices" /// ] ///} /// ``` @@ -24919,34 +25023,29 @@ All endpoints return AccountV2.*/ PartialEq, PartialOrd )] - pub enum PostGetTransactionWithCompressionInfoBodyMethod { - #[serde(rename = "getTransactionWithCompressionInfo")] - GetTransactionWithCompressionInfo, + pub enum PostGetQueueLeafIndicesBodyMethod { + #[serde(rename = "getQueueLeafIndices")] + GetQueueLeafIndices, } - impl ::std::fmt::Display for PostGetTransactionWithCompressionInfoBodyMethod { + impl ::std::fmt::Display for PostGetQueueLeafIndicesBodyMethod { fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { match *self { - Self::GetTransactionWithCompressionInfo => { - f.write_str("getTransactionWithCompressionInfo") - } + Self::GetQueueLeafIndices => f.write_str("getQueueLeafIndices"), } } } - impl ::std::str::FromStr for PostGetTransactionWithCompressionInfoBodyMethod { + impl ::std::str::FromStr for PostGetQueueLeafIndicesBodyMethod { type Err = self::error::ConversionError; fn from_str( value: &str, ) -> ::std::result::Result { match value { - "getTransactionWithCompressionInfo" => { - Ok(Self::GetTransactionWithCompressionInfo) - } + "getQueueLeafIndices" => Ok(Self::GetQueueLeafIndices), _ => Err("invalid value".into()), } } } - impl ::std::convert::TryFrom<&str> - for PostGetTransactionWithCompressionInfoBodyMethod { + impl ::std::convert::TryFrom<&str> for PostGetQueueLeafIndicesBodyMethod { type Error = self::error::ConversionError; fn try_from( value: &str, @@ -24955,7 +25054,7 @@ All endpoints return AccountV2.*/ } } impl ::std::convert::TryFrom<&::std::string::String> - for PostGetTransactionWithCompressionInfoBodyMethod { + for PostGetQueueLeafIndicesBodyMethod { type Error = self::error::ConversionError; fn try_from( value: &::std::string::String, @@ -24964,7 +25063,7 @@ All endpoints return AccountV2.*/ } } impl ::std::convert::TryFrom<::std::string::String> - for PostGetTransactionWithCompressionInfoBodyMethod { + for PostGetQueueLeafIndicesBodyMethod { type Error = self::error::ConversionError; fn try_from( value: ::std::string::String, @@ -24972,19 +25071,35 @@ All endpoints return AccountV2.*/ value.parse() } } - ///`PostGetTransactionWithCompressionInfoBodyParams` + /**Parameters for requesting input queue leaf indices. +Returns (hash, queue_index, leaf_index) for nullifier queue items.*/ /// ///
JSON schema /// /// ```json ///{ + /// "description": "Parameters for requesting input queue leaf indices.\nReturns (hash, queue_index, leaf_index) for nullifier queue items.", /// "type": "object", /// "required": [ - /// "signature" + /// "limit", + /// "tree" /// ], /// "properties": { - /// "signature": { - /// "$ref": "#/components/schemas/SerializableSignature" + /// "limit": { + /// "type": "integer", + /// "format": "uint16", + /// "minimum": 0.0 + /// }, + /// "startIndex": { + /// "type": [ + /// "integer", + /// "null" + /// ], + /// "format": "uint64", + /// "minimum": 0.0 + /// }, + /// "tree": { + /// "$ref": "#/components/schemas/Hash" /// } /// }, /// "additionalProperties": false @@ -24993,15 +25108,22 @@ All endpoints return AccountV2.*/ ///
#[derive(::serde::Deserialize, ::serde::Serialize, Clone, Debug)] #[serde(deny_unknown_fields)] - pub struct PostGetTransactionWithCompressionInfoBodyParams { - pub signature: SerializableSignature, + pub struct PostGetQueueLeafIndicesBodyParams { + pub limit: u16, + #[serde( + rename = "startIndex", + default, + skip_serializing_if = "::std::option::Option::is_none" + )] + pub start_index: ::std::option::Option, + pub tree: Hash, } - impl PostGetTransactionWithCompressionInfoBodyParams { - pub fn builder() -> builder::PostGetTransactionWithCompressionInfoBodyParams { + impl PostGetQueueLeafIndicesBodyParams { + pub fn builder() -> builder::PostGetQueueLeafIndicesBodyParams { Default::default() } } - ///`PostGetTransactionWithCompressionInfoResponse` + ///`PostGetQueueLeafIndicesResponse` /// ///
JSON schema /// @@ -25039,62 +25161,46 @@ All endpoints return AccountV2.*/ /// ] /// }, /// "result": { - /// "description": "A Solana transaction with additional compression information", + /// "description": "Response containing queue leaf indices", /// "type": "object", + /// "required": [ + /// "context", + /// "value" + /// ], /// "properties": { - /// "compression_info": { - /// "type": "object", - /// "required": [ - /// "closedAccounts", - /// "openedAccounts" - /// ], - /// "properties": { - /// "closedAccounts": { - /// "type": "array", - /// "items": { - /// "$ref": "#/components/schemas/AccountWithOptionalTokenData" - /// } - /// }, - /// "openedAccounts": { - /// "type": "array", - /// "items": { - /// "$ref": "#/components/schemas/AccountWithOptionalTokenData" - /// } - /// } - /// }, - /// "additionalProperties": false + /// "context": { + /// "$ref": "#/components/schemas/Context" /// }, - /// "transaction": { - /// "description": "An encoded confirmed transaction with status meta", - /// "type": "object" + /// "value": { + /// "type": "array", + /// "items": { + /// "$ref": "#/components/schemas/QueueLeafIndex" + /// } /// } - /// } + /// }, + /// "additionalProperties": false /// } /// } ///} /// ``` ///
#[derive(::serde::Deserialize, ::serde::Serialize, Clone, Debug)] - pub struct PostGetTransactionWithCompressionInfoResponse { + pub struct PostGetQueueLeafIndicesResponse { #[serde(default, skip_serializing_if = "::std::option::Option::is_none")] - pub error: ::std::option::Option< - PostGetTransactionWithCompressionInfoResponseError, - >, + pub error: ::std::option::Option, ///An ID to identify the response. - pub id: PostGetTransactionWithCompressionInfoResponseId, + pub id: PostGetQueueLeafIndicesResponseId, ///The version of the JSON-RPC protocol. - pub jsonrpc: PostGetTransactionWithCompressionInfoResponseJsonrpc, + pub jsonrpc: PostGetQueueLeafIndicesResponseJsonrpc, #[serde(default, skip_serializing_if = "::std::option::Option::is_none")] - pub result: ::std::option::Option< - PostGetTransactionWithCompressionInfoResponseResult, - >, + pub result: ::std::option::Option, } - impl PostGetTransactionWithCompressionInfoResponse { - pub fn builder() -> builder::PostGetTransactionWithCompressionInfoResponse { + impl PostGetQueueLeafIndicesResponse { + pub fn builder() -> builder::PostGetQueueLeafIndicesResponse { Default::default() } } - ///`PostGetTransactionWithCompressionInfoResponseError` + ///`PostGetQueueLeafIndicesResponseError` /// ///
JSON schema /// @@ -25113,13 +25219,13 @@ All endpoints return AccountV2.*/ /// ``` ///
#[derive(::serde::Deserialize, ::serde::Serialize, Clone, Debug)] - pub struct PostGetTransactionWithCompressionInfoResponseError { + pub struct PostGetQueueLeafIndicesResponseError { #[serde(default, skip_serializing_if = "::std::option::Option::is_none")] pub code: ::std::option::Option, #[serde(default, skip_serializing_if = "::std::option::Option::is_none")] pub message: ::std::option::Option<::std::string::String>, } - impl ::std::default::Default for PostGetTransactionWithCompressionInfoResponseError { + impl ::std::default::Default for PostGetQueueLeafIndicesResponseError { fn default() -> Self { Self { code: Default::default(), @@ -25127,8 +25233,8 @@ All endpoints return AccountV2.*/ } } } - impl PostGetTransactionWithCompressionInfoResponseError { - pub fn builder() -> builder::PostGetTransactionWithCompressionInfoResponseError { + impl PostGetQueueLeafIndicesResponseError { + pub fn builder() -> builder::PostGetQueueLeafIndicesResponseError { Default::default() } } @@ -25158,18 +25264,18 @@ All endpoints return AccountV2.*/ PartialEq, PartialOrd )] - pub enum PostGetTransactionWithCompressionInfoResponseId { + pub enum PostGetQueueLeafIndicesResponseId { #[serde(rename = "test-account")] TestAccount, } - impl ::std::fmt::Display for PostGetTransactionWithCompressionInfoResponseId { + impl ::std::fmt::Display for PostGetQueueLeafIndicesResponseId { fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { match *self { Self::TestAccount => f.write_str("test-account"), } } } - impl ::std::str::FromStr for PostGetTransactionWithCompressionInfoResponseId { + impl ::std::str::FromStr for PostGetQueueLeafIndicesResponseId { type Err = self::error::ConversionError; fn from_str( value: &str, @@ -25180,8 +25286,7 @@ All endpoints return AccountV2.*/ } } } - impl ::std::convert::TryFrom<&str> - for PostGetTransactionWithCompressionInfoResponseId { + impl ::std::convert::TryFrom<&str> for PostGetQueueLeafIndicesResponseId { type Error = self::error::ConversionError; fn try_from( value: &str, @@ -25190,7 +25295,7 @@ All endpoints return AccountV2.*/ } } impl ::std::convert::TryFrom<&::std::string::String> - for PostGetTransactionWithCompressionInfoResponseId { + for PostGetQueueLeafIndicesResponseId { type Error = self::error::ConversionError; fn try_from( value: &::std::string::String, @@ -25199,7 +25304,7 @@ All endpoints return AccountV2.*/ } } impl ::std::convert::TryFrom<::std::string::String> - for PostGetTransactionWithCompressionInfoResponseId { + for PostGetQueueLeafIndicesResponseId { type Error = self::error::ConversionError; fn try_from( value: ::std::string::String, @@ -25233,18 +25338,18 @@ All endpoints return AccountV2.*/ PartialEq, PartialOrd )] - pub enum PostGetTransactionWithCompressionInfoResponseJsonrpc { + pub enum PostGetQueueLeafIndicesResponseJsonrpc { #[serde(rename = "2.0")] X20, } - impl ::std::fmt::Display for PostGetTransactionWithCompressionInfoResponseJsonrpc { + impl ::std::fmt::Display for PostGetQueueLeafIndicesResponseJsonrpc { fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { match *self { Self::X20 => f.write_str("2.0"), } } } - impl ::std::str::FromStr for PostGetTransactionWithCompressionInfoResponseJsonrpc { + impl ::std::str::FromStr for PostGetQueueLeafIndicesResponseJsonrpc { type Err = self::error::ConversionError; fn from_str( value: &str, @@ -25255,8 +25360,7 @@ All endpoints return AccountV2.*/ } } } - impl ::std::convert::TryFrom<&str> - for PostGetTransactionWithCompressionInfoResponseJsonrpc { + impl ::std::convert::TryFrom<&str> for PostGetQueueLeafIndicesResponseJsonrpc { type Error = self::error::ConversionError; fn try_from( value: &str, @@ -25265,7 +25369,7 @@ All endpoints return AccountV2.*/ } } impl ::std::convert::TryFrom<&::std::string::String> - for PostGetTransactionWithCompressionInfoResponseJsonrpc { + for PostGetQueueLeafIndicesResponseJsonrpc { type Error = self::error::ConversionError; fn try_from( value: &::std::string::String, @@ -25274,7 +25378,7 @@ All endpoints return AccountV2.*/ } } impl ::std::convert::TryFrom<::std::string::String> - for PostGetTransactionWithCompressionInfoResponseJsonrpc { + for PostGetQueueLeafIndicesResponseJsonrpc { type Error = self::error::ConversionError; fn try_from( value: ::std::string::String, @@ -25282,91 +25386,26 @@ All endpoints return AccountV2.*/ value.parse() } } - ///A Solana transaction with additional compression information - /// - ///
JSON schema - /// - /// ```json - ///{ - /// "description": "A Solana transaction with additional compression information", - /// "type": "object", - /// "properties": { - /// "compression_info": { - /// "type": "object", - /// "required": [ - /// "closedAccounts", - /// "openedAccounts" - /// ], - /// "properties": { - /// "closedAccounts": { - /// "type": "array", - /// "items": { - /// "$ref": "#/components/schemas/AccountWithOptionalTokenData" - /// } - /// }, - /// "openedAccounts": { - /// "type": "array", - /// "items": { - /// "$ref": "#/components/schemas/AccountWithOptionalTokenData" - /// } - /// } - /// }, - /// "additionalProperties": false - /// }, - /// "transaction": { - /// "description": "An encoded confirmed transaction with status meta", - /// "type": "object" - /// } - /// } - ///} - /// ``` - ///
- #[derive(::serde::Deserialize, ::serde::Serialize, Clone, Debug)] - pub struct PostGetTransactionWithCompressionInfoResponseResult { - #[serde(default, skip_serializing_if = "::std::option::Option::is_none")] - pub compression_info: ::std::option::Option< - PostGetTransactionWithCompressionInfoResponseResultCompressionInfo, - >, - ///An encoded confirmed transaction with status meta - #[serde(default, skip_serializing_if = "::serde_json::Map::is_empty")] - pub transaction: ::serde_json::Map<::std::string::String, ::serde_json::Value>, - } - impl ::std::default::Default - for PostGetTransactionWithCompressionInfoResponseResult { - fn default() -> Self { - Self { - compression_info: Default::default(), - transaction: Default::default(), - } - } - } - impl PostGetTransactionWithCompressionInfoResponseResult { - pub fn builder() -> builder::PostGetTransactionWithCompressionInfoResponseResult { - Default::default() - } - } - ///`PostGetTransactionWithCompressionInfoResponseResultCompressionInfo` + ///Response containing queue leaf indices /// ///
JSON schema /// /// ```json ///{ + /// "description": "Response containing queue leaf indices", /// "type": "object", /// "required": [ - /// "closedAccounts", - /// "openedAccounts" + /// "context", + /// "value" /// ], /// "properties": { - /// "closedAccounts": { - /// "type": "array", - /// "items": { - /// "$ref": "#/components/schemas/AccountWithOptionalTokenData" - /// } + /// "context": { + /// "$ref": "#/components/schemas/Context" /// }, - /// "openedAccounts": { + /// "value": { /// "type": "array", /// "items": { - /// "$ref": "#/components/schemas/AccountWithOptionalTokenData" + /// "$ref": "#/components/schemas/QueueLeafIndex" /// } /// } /// }, @@ -25376,18 +25415,16 @@ All endpoints return AccountV2.*/ ///
#[derive(::serde::Deserialize, ::serde::Serialize, Clone, Debug)] #[serde(deny_unknown_fields)] - pub struct PostGetTransactionWithCompressionInfoResponseResultCompressionInfo { - #[serde(rename = "closedAccounts")] - pub closed_accounts: ::std::vec::Vec, - #[serde(rename = "openedAccounts")] - pub opened_accounts: ::std::vec::Vec, + pub struct PostGetQueueLeafIndicesResponseResult { + pub context: Context, + pub value: ::std::vec::Vec, } - impl PostGetTransactionWithCompressionInfoResponseResultCompressionInfo { - pub fn builder() -> builder::PostGetTransactionWithCompressionInfoResponseResultCompressionInfo { + impl PostGetQueueLeafIndicesResponseResult { + pub fn builder() -> builder::PostGetQueueLeafIndicesResponseResult { Default::default() } } - ///`PostGetTransactionWithCompressionInfoV2Body` + ///`PostGetTransactionWithCompressionInfoBody` /// ///
JSON schema /// @@ -25419,7 +25456,7 @@ All endpoints return AccountV2.*/ /// "description": "The name of the method to invoke.", /// "type": "string", /// "enum": [ - /// "getTransactionWithCompressionInfoV2" + /// "getTransactionWithCompressionInfo" /// ] /// }, /// "params": { @@ -25439,17 +25476,17 @@ All endpoints return AccountV2.*/ /// ``` ///
#[derive(::serde::Deserialize, ::serde::Serialize, Clone, Debug)] - pub struct PostGetTransactionWithCompressionInfoV2Body { + pub struct PostGetTransactionWithCompressionInfoBody { ///An ID to identify the request. - pub id: PostGetTransactionWithCompressionInfoV2BodyId, + pub id: PostGetTransactionWithCompressionInfoBodyId, ///The version of the JSON-RPC protocol. - pub jsonrpc: PostGetTransactionWithCompressionInfoV2BodyJsonrpc, + pub jsonrpc: PostGetTransactionWithCompressionInfoBodyJsonrpc, ///The name of the method to invoke. - pub method: PostGetTransactionWithCompressionInfoV2BodyMethod, - pub params: PostGetTransactionWithCompressionInfoV2BodyParams, + pub method: PostGetTransactionWithCompressionInfoBodyMethod, + pub params: PostGetTransactionWithCompressionInfoBodyParams, } - impl PostGetTransactionWithCompressionInfoV2Body { - pub fn builder() -> builder::PostGetTransactionWithCompressionInfoV2Body { + impl PostGetTransactionWithCompressionInfoBody { + pub fn builder() -> builder::PostGetTransactionWithCompressionInfoBody { Default::default() } } @@ -25479,18 +25516,18 @@ All endpoints return AccountV2.*/ PartialEq, PartialOrd )] - pub enum PostGetTransactionWithCompressionInfoV2BodyId { + pub enum PostGetTransactionWithCompressionInfoBodyId { #[serde(rename = "test-account")] TestAccount, } - impl ::std::fmt::Display for PostGetTransactionWithCompressionInfoV2BodyId { + impl ::std::fmt::Display for PostGetTransactionWithCompressionInfoBodyId { fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { match *self { Self::TestAccount => f.write_str("test-account"), } } } - impl ::std::str::FromStr for PostGetTransactionWithCompressionInfoV2BodyId { + impl ::std::str::FromStr for PostGetTransactionWithCompressionInfoBodyId { type Err = self::error::ConversionError; fn from_str( value: &str, @@ -25501,8 +25538,7 @@ All endpoints return AccountV2.*/ } } } - impl ::std::convert::TryFrom<&str> - for PostGetTransactionWithCompressionInfoV2BodyId { + impl ::std::convert::TryFrom<&str> for PostGetTransactionWithCompressionInfoBodyId { type Error = self::error::ConversionError; fn try_from( value: &str, @@ -25511,7 +25547,7 @@ All endpoints return AccountV2.*/ } } impl ::std::convert::TryFrom<&::std::string::String> - for PostGetTransactionWithCompressionInfoV2BodyId { + for PostGetTransactionWithCompressionInfoBodyId { type Error = self::error::ConversionError; fn try_from( value: &::std::string::String, @@ -25520,7 +25556,7 @@ All endpoints return AccountV2.*/ } } impl ::std::convert::TryFrom<::std::string::String> - for PostGetTransactionWithCompressionInfoV2BodyId { + for PostGetTransactionWithCompressionInfoBodyId { type Error = self::error::ConversionError; fn try_from( value: ::std::string::String, @@ -25554,18 +25590,18 @@ All endpoints return AccountV2.*/ PartialEq, PartialOrd )] - pub enum PostGetTransactionWithCompressionInfoV2BodyJsonrpc { + pub enum PostGetTransactionWithCompressionInfoBodyJsonrpc { #[serde(rename = "2.0")] X20, } - impl ::std::fmt::Display for PostGetTransactionWithCompressionInfoV2BodyJsonrpc { + impl ::std::fmt::Display for PostGetTransactionWithCompressionInfoBodyJsonrpc { fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { match *self { Self::X20 => f.write_str("2.0"), } } } - impl ::std::str::FromStr for PostGetTransactionWithCompressionInfoV2BodyJsonrpc { + impl ::std::str::FromStr for PostGetTransactionWithCompressionInfoBodyJsonrpc { type Err = self::error::ConversionError; fn from_str( value: &str, @@ -25577,7 +25613,7 @@ All endpoints return AccountV2.*/ } } impl ::std::convert::TryFrom<&str> - for PostGetTransactionWithCompressionInfoV2BodyJsonrpc { + for PostGetTransactionWithCompressionInfoBodyJsonrpc { type Error = self::error::ConversionError; fn try_from( value: &str, @@ -25586,7 +25622,7 @@ All endpoints return AccountV2.*/ } } impl ::std::convert::TryFrom<&::std::string::String> - for PostGetTransactionWithCompressionInfoV2BodyJsonrpc { + for PostGetTransactionWithCompressionInfoBodyJsonrpc { type Error = self::error::ConversionError; fn try_from( value: &::std::string::String, @@ -25595,7 +25631,7 @@ All endpoints return AccountV2.*/ } } impl ::std::convert::TryFrom<::std::string::String> - for PostGetTransactionWithCompressionInfoV2BodyJsonrpc { + for PostGetTransactionWithCompressionInfoBodyJsonrpc { type Error = self::error::ConversionError; fn try_from( value: ::std::string::String, @@ -25612,7 +25648,7 @@ All endpoints return AccountV2.*/ /// "description": "The name of the method to invoke.", /// "type": "string", /// "enum": [ - /// "getTransactionWithCompressionInfoV2" + /// "getTransactionWithCompressionInfo" /// ] ///} /// ``` @@ -25629,34 +25665,34 @@ All endpoints return AccountV2.*/ PartialEq, PartialOrd )] - pub enum PostGetTransactionWithCompressionInfoV2BodyMethod { - #[serde(rename = "getTransactionWithCompressionInfoV2")] - GetTransactionWithCompressionInfoV2, + pub enum PostGetTransactionWithCompressionInfoBodyMethod { + #[serde(rename = "getTransactionWithCompressionInfo")] + GetTransactionWithCompressionInfo, } - impl ::std::fmt::Display for PostGetTransactionWithCompressionInfoV2BodyMethod { + impl ::std::fmt::Display for PostGetTransactionWithCompressionInfoBodyMethod { fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { match *self { - Self::GetTransactionWithCompressionInfoV2 => { - f.write_str("getTransactionWithCompressionInfoV2") + Self::GetTransactionWithCompressionInfo => { + f.write_str("getTransactionWithCompressionInfo") } } } } - impl ::std::str::FromStr for PostGetTransactionWithCompressionInfoV2BodyMethod { + impl ::std::str::FromStr for PostGetTransactionWithCompressionInfoBodyMethod { type Err = self::error::ConversionError; fn from_str( value: &str, ) -> ::std::result::Result { match value { - "getTransactionWithCompressionInfoV2" => { - Ok(Self::GetTransactionWithCompressionInfoV2) + "getTransactionWithCompressionInfo" => { + Ok(Self::GetTransactionWithCompressionInfo) } _ => Err("invalid value".into()), } } } impl ::std::convert::TryFrom<&str> - for PostGetTransactionWithCompressionInfoV2BodyMethod { + for PostGetTransactionWithCompressionInfoBodyMethod { type Error = self::error::ConversionError; fn try_from( value: &str, @@ -25665,7 +25701,7 @@ All endpoints return AccountV2.*/ } } impl ::std::convert::TryFrom<&::std::string::String> - for PostGetTransactionWithCompressionInfoV2BodyMethod { + for PostGetTransactionWithCompressionInfoBodyMethod { type Error = self::error::ConversionError; fn try_from( value: &::std::string::String, @@ -25674,7 +25710,7 @@ All endpoints return AccountV2.*/ } } impl ::std::convert::TryFrom<::std::string::String> - for PostGetTransactionWithCompressionInfoV2BodyMethod { + for PostGetTransactionWithCompressionInfoBodyMethod { type Error = self::error::ConversionError; fn try_from( value: ::std::string::String, @@ -25682,7 +25718,7 @@ All endpoints return AccountV2.*/ value.parse() } } - ///`PostGetTransactionWithCompressionInfoV2BodyParams` + ///`PostGetTransactionWithCompressionInfoBodyParams` /// ///
JSON schema /// @@ -25703,15 +25739,15 @@ All endpoints return AccountV2.*/ ///
#[derive(::serde::Deserialize, ::serde::Serialize, Clone, Debug)] #[serde(deny_unknown_fields)] - pub struct PostGetTransactionWithCompressionInfoV2BodyParams { + pub struct PostGetTransactionWithCompressionInfoBodyParams { pub signature: SerializableSignature, } - impl PostGetTransactionWithCompressionInfoV2BodyParams { - pub fn builder() -> builder::PostGetTransactionWithCompressionInfoV2BodyParams { + impl PostGetTransactionWithCompressionInfoBodyParams { + pub fn builder() -> builder::PostGetTransactionWithCompressionInfoBodyParams { Default::default() } } - ///`PostGetTransactionWithCompressionInfoV2Response` + ///`PostGetTransactionWithCompressionInfoResponse` /// ///
JSON schema /// @@ -25762,13 +25798,13 @@ All endpoints return AccountV2.*/ /// "closedAccounts": { /// "type": "array", /// "items": { - /// "$ref": "#/components/schemas/ClosedAccountWithOptionalTokenDataV2" + /// "$ref": "#/components/schemas/AccountWithOptionalTokenData" /// } /// }, /// "openedAccounts": { /// "type": "array", /// "items": { - /// "$ref": "#/components/schemas/AccountWithOptionalTokenDataV2" + /// "$ref": "#/components/schemas/AccountWithOptionalTokenData" /// } /// } /// }, @@ -25785,26 +25821,26 @@ All endpoints return AccountV2.*/ /// ``` ///
#[derive(::serde::Deserialize, ::serde::Serialize, Clone, Debug)] - pub struct PostGetTransactionWithCompressionInfoV2Response { + pub struct PostGetTransactionWithCompressionInfoResponse { #[serde(default, skip_serializing_if = "::std::option::Option::is_none")] pub error: ::std::option::Option< - PostGetTransactionWithCompressionInfoV2ResponseError, + PostGetTransactionWithCompressionInfoResponseError, >, ///An ID to identify the response. - pub id: PostGetTransactionWithCompressionInfoV2ResponseId, + pub id: PostGetTransactionWithCompressionInfoResponseId, ///The version of the JSON-RPC protocol. - pub jsonrpc: PostGetTransactionWithCompressionInfoV2ResponseJsonrpc, + pub jsonrpc: PostGetTransactionWithCompressionInfoResponseJsonrpc, #[serde(default, skip_serializing_if = "::std::option::Option::is_none")] pub result: ::std::option::Option< - PostGetTransactionWithCompressionInfoV2ResponseResult, + PostGetTransactionWithCompressionInfoResponseResult, >, } - impl PostGetTransactionWithCompressionInfoV2Response { - pub fn builder() -> builder::PostGetTransactionWithCompressionInfoV2Response { + impl PostGetTransactionWithCompressionInfoResponse { + pub fn builder() -> builder::PostGetTransactionWithCompressionInfoResponse { Default::default() } } - ///`PostGetTransactionWithCompressionInfoV2ResponseError` + ///`PostGetTransactionWithCompressionInfoResponseError` /// ///
JSON schema /// @@ -25823,14 +25859,13 @@ All endpoints return AccountV2.*/ /// ``` ///
#[derive(::serde::Deserialize, ::serde::Serialize, Clone, Debug)] - pub struct PostGetTransactionWithCompressionInfoV2ResponseError { + pub struct PostGetTransactionWithCompressionInfoResponseError { #[serde(default, skip_serializing_if = "::std::option::Option::is_none")] pub code: ::std::option::Option, #[serde(default, skip_serializing_if = "::std::option::Option::is_none")] pub message: ::std::option::Option<::std::string::String>, } - impl ::std::default::Default - for PostGetTransactionWithCompressionInfoV2ResponseError { + impl ::std::default::Default for PostGetTransactionWithCompressionInfoResponseError { fn default() -> Self { Self { code: Default::default(), @@ -25838,8 +25873,8 @@ All endpoints return AccountV2.*/ } } } - impl PostGetTransactionWithCompressionInfoV2ResponseError { - pub fn builder() -> builder::PostGetTransactionWithCompressionInfoV2ResponseError { + impl PostGetTransactionWithCompressionInfoResponseError { + pub fn builder() -> builder::PostGetTransactionWithCompressionInfoResponseError { Default::default() } } @@ -25869,18 +25904,18 @@ All endpoints return AccountV2.*/ PartialEq, PartialOrd )] - pub enum PostGetTransactionWithCompressionInfoV2ResponseId { + pub enum PostGetTransactionWithCompressionInfoResponseId { #[serde(rename = "test-account")] TestAccount, } - impl ::std::fmt::Display for PostGetTransactionWithCompressionInfoV2ResponseId { + impl ::std::fmt::Display for PostGetTransactionWithCompressionInfoResponseId { fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { match *self { Self::TestAccount => f.write_str("test-account"), } } } - impl ::std::str::FromStr for PostGetTransactionWithCompressionInfoV2ResponseId { + impl ::std::str::FromStr for PostGetTransactionWithCompressionInfoResponseId { type Err = self::error::ConversionError; fn from_str( value: &str, @@ -25892,7 +25927,7 @@ All endpoints return AccountV2.*/ } } impl ::std::convert::TryFrom<&str> - for PostGetTransactionWithCompressionInfoV2ResponseId { + for PostGetTransactionWithCompressionInfoResponseId { type Error = self::error::ConversionError; fn try_from( value: &str, @@ -25901,7 +25936,7 @@ All endpoints return AccountV2.*/ } } impl ::std::convert::TryFrom<&::std::string::String> - for PostGetTransactionWithCompressionInfoV2ResponseId { + for PostGetTransactionWithCompressionInfoResponseId { type Error = self::error::ConversionError; fn try_from( value: &::std::string::String, @@ -25910,7 +25945,7 @@ All endpoints return AccountV2.*/ } } impl ::std::convert::TryFrom<::std::string::String> - for PostGetTransactionWithCompressionInfoV2ResponseId { + for PostGetTransactionWithCompressionInfoResponseId { type Error = self::error::ConversionError; fn try_from( value: ::std::string::String, @@ -25944,18 +25979,18 @@ All endpoints return AccountV2.*/ PartialEq, PartialOrd )] - pub enum PostGetTransactionWithCompressionInfoV2ResponseJsonrpc { + pub enum PostGetTransactionWithCompressionInfoResponseJsonrpc { #[serde(rename = "2.0")] X20, } - impl ::std::fmt::Display for PostGetTransactionWithCompressionInfoV2ResponseJsonrpc { + impl ::std::fmt::Display for PostGetTransactionWithCompressionInfoResponseJsonrpc { fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { match *self { Self::X20 => f.write_str("2.0"), } } } - impl ::std::str::FromStr for PostGetTransactionWithCompressionInfoV2ResponseJsonrpc { + impl ::std::str::FromStr for PostGetTransactionWithCompressionInfoResponseJsonrpc { type Err = self::error::ConversionError; fn from_str( value: &str, @@ -25967,7 +26002,7 @@ All endpoints return AccountV2.*/ } } impl ::std::convert::TryFrom<&str> - for PostGetTransactionWithCompressionInfoV2ResponseJsonrpc { + for PostGetTransactionWithCompressionInfoResponseJsonrpc { type Error = self::error::ConversionError; fn try_from( value: &str, @@ -25976,7 +26011,7 @@ All endpoints return AccountV2.*/ } } impl ::std::convert::TryFrom<&::std::string::String> - for PostGetTransactionWithCompressionInfoV2ResponseJsonrpc { + for PostGetTransactionWithCompressionInfoResponseJsonrpc { type Error = self::error::ConversionError; fn try_from( value: &::std::string::String, @@ -25985,7 +26020,7 @@ All endpoints return AccountV2.*/ } } impl ::std::convert::TryFrom<::std::string::String> - for PostGetTransactionWithCompressionInfoV2ResponseJsonrpc { + for PostGetTransactionWithCompressionInfoResponseJsonrpc { type Error = self::error::ConversionError; fn try_from( value: ::std::string::String, @@ -26012,13 +26047,13 @@ All endpoints return AccountV2.*/ /// "closedAccounts": { /// "type": "array", /// "items": { - /// "$ref": "#/components/schemas/ClosedAccountWithOptionalTokenDataV2" + /// "$ref": "#/components/schemas/AccountWithOptionalTokenData" /// } /// }, /// "openedAccounts": { /// "type": "array", /// "items": { - /// "$ref": "#/components/schemas/AccountWithOptionalTokenDataV2" + /// "$ref": "#/components/schemas/AccountWithOptionalTokenData" /// } /// } /// }, @@ -26033,17 +26068,17 @@ All endpoints return AccountV2.*/ /// ``` ///
#[derive(::serde::Deserialize, ::serde::Serialize, Clone, Debug)] - pub struct PostGetTransactionWithCompressionInfoV2ResponseResult { + pub struct PostGetTransactionWithCompressionInfoResponseResult { #[serde(default, skip_serializing_if = "::std::option::Option::is_none")] pub compression_info: ::std::option::Option< - PostGetTransactionWithCompressionInfoV2ResponseResultCompressionInfo, + PostGetTransactionWithCompressionInfoResponseResultCompressionInfo, >, ///An encoded confirmed transaction with status meta #[serde(default, skip_serializing_if = "::serde_json::Map::is_empty")] pub transaction: ::serde_json::Map<::std::string::String, ::serde_json::Value>, } impl ::std::default::Default - for PostGetTransactionWithCompressionInfoV2ResponseResult { + for PostGetTransactionWithCompressionInfoResponseResult { fn default() -> Self { Self { compression_info: Default::default(), @@ -26051,12 +26086,12 @@ All endpoints return AccountV2.*/ } } } - impl PostGetTransactionWithCompressionInfoV2ResponseResult { - pub fn builder() -> builder::PostGetTransactionWithCompressionInfoV2ResponseResult { + impl PostGetTransactionWithCompressionInfoResponseResult { + pub fn builder() -> builder::PostGetTransactionWithCompressionInfoResponseResult { Default::default() } } - ///`PostGetTransactionWithCompressionInfoV2ResponseResultCompressionInfo` + ///`PostGetTransactionWithCompressionInfoResponseResultCompressionInfo` /// ///
JSON schema /// @@ -26071,13 +26106,13 @@ All endpoints return AccountV2.*/ /// "closedAccounts": { /// "type": "array", /// "items": { - /// "$ref": "#/components/schemas/ClosedAccountWithOptionalTokenDataV2" + /// "$ref": "#/components/schemas/AccountWithOptionalTokenData" /// } /// }, /// "openedAccounts": { /// "type": "array", /// "items": { - /// "$ref": "#/components/schemas/AccountWithOptionalTokenDataV2" + /// "$ref": "#/components/schemas/AccountWithOptionalTokenData" /// } /// } /// }, @@ -26087,18 +26122,18 @@ All endpoints return AccountV2.*/ ///
#[derive(::serde::Deserialize, ::serde::Serialize, Clone, Debug)] #[serde(deny_unknown_fields)] - pub struct PostGetTransactionWithCompressionInfoV2ResponseResultCompressionInfo { + pub struct PostGetTransactionWithCompressionInfoResponseResultCompressionInfo { #[serde(rename = "closedAccounts")] - pub closed_accounts: ::std::vec::Vec, + pub closed_accounts: ::std::vec::Vec, #[serde(rename = "openedAccounts")] - pub opened_accounts: ::std::vec::Vec, + pub opened_accounts: ::std::vec::Vec, } - impl PostGetTransactionWithCompressionInfoV2ResponseResultCompressionInfo { - pub fn builder() -> builder::PostGetTransactionWithCompressionInfoV2ResponseResultCompressionInfo { + impl PostGetTransactionWithCompressionInfoResponseResultCompressionInfo { + pub fn builder() -> builder::PostGetTransactionWithCompressionInfoResponseResultCompressionInfo { Default::default() } } - ///`PostGetValidityProofBody` + ///`PostGetTransactionWithCompressionInfoV2Body` /// ///
JSON schema /// @@ -26130,23 +26165,17 @@ All endpoints return AccountV2.*/ /// "description": "The name of the method to invoke.", /// "type": "string", /// "enum": [ - /// "getValidityProof" + /// "getTransactionWithCompressionInfoV2" /// ] /// }, /// "params": { /// "type": "object", + /// "required": [ + /// "signature" + /// ], /// "properties": { - /// "hashes": { - /// "type": "array", - /// "items": { - /// "$ref": "#/components/schemas/Hash" - /// } - /// }, - /// "newAddressesWithTrees": { - /// "type": "array", - /// "items": { - /// "$ref": "#/components/schemas/AddressWithTree" - /// } + /// "signature": { + /// "$ref": "#/components/schemas/SerializableSignature" /// } /// }, /// "additionalProperties": false @@ -26156,17 +26185,17 @@ All endpoints return AccountV2.*/ /// ``` ///
#[derive(::serde::Deserialize, ::serde::Serialize, Clone, Debug)] - pub struct PostGetValidityProofBody { + pub struct PostGetTransactionWithCompressionInfoV2Body { ///An ID to identify the request. - pub id: PostGetValidityProofBodyId, + pub id: PostGetTransactionWithCompressionInfoV2BodyId, ///The version of the JSON-RPC protocol. - pub jsonrpc: PostGetValidityProofBodyJsonrpc, + pub jsonrpc: PostGetTransactionWithCompressionInfoV2BodyJsonrpc, ///The name of the method to invoke. - pub method: PostGetValidityProofBodyMethod, - pub params: PostGetValidityProofBodyParams, + pub method: PostGetTransactionWithCompressionInfoV2BodyMethod, + pub params: PostGetTransactionWithCompressionInfoV2BodyParams, } - impl PostGetValidityProofBody { - pub fn builder() -> builder::PostGetValidityProofBody { + impl PostGetTransactionWithCompressionInfoV2Body { + pub fn builder() -> builder::PostGetTransactionWithCompressionInfoV2Body { Default::default() } } @@ -26196,18 +26225,18 @@ All endpoints return AccountV2.*/ PartialEq, PartialOrd )] - pub enum PostGetValidityProofBodyId { + pub enum PostGetTransactionWithCompressionInfoV2BodyId { #[serde(rename = "test-account")] TestAccount, } - impl ::std::fmt::Display for PostGetValidityProofBodyId { + impl ::std::fmt::Display for PostGetTransactionWithCompressionInfoV2BodyId { fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { match *self { Self::TestAccount => f.write_str("test-account"), } } } - impl ::std::str::FromStr for PostGetValidityProofBodyId { + impl ::std::str::FromStr for PostGetTransactionWithCompressionInfoV2BodyId { type Err = self::error::ConversionError; fn from_str( value: &str, @@ -26218,7 +26247,8 @@ All endpoints return AccountV2.*/ } } } - impl ::std::convert::TryFrom<&str> for PostGetValidityProofBodyId { + impl ::std::convert::TryFrom<&str> + for PostGetTransactionWithCompressionInfoV2BodyId { type Error = self::error::ConversionError; fn try_from( value: &str, @@ -26226,7 +26256,8 @@ All endpoints return AccountV2.*/ value.parse() } } - impl ::std::convert::TryFrom<&::std::string::String> for PostGetValidityProofBodyId { + impl ::std::convert::TryFrom<&::std::string::String> + for PostGetTransactionWithCompressionInfoV2BodyId { type Error = self::error::ConversionError; fn try_from( value: &::std::string::String, @@ -26234,7 +26265,8 @@ All endpoints return AccountV2.*/ value.parse() } } - impl ::std::convert::TryFrom<::std::string::String> for PostGetValidityProofBodyId { + impl ::std::convert::TryFrom<::std::string::String> + for PostGetTransactionWithCompressionInfoV2BodyId { type Error = self::error::ConversionError; fn try_from( value: ::std::string::String, @@ -26268,18 +26300,18 @@ All endpoints return AccountV2.*/ PartialEq, PartialOrd )] - pub enum PostGetValidityProofBodyJsonrpc { + pub enum PostGetTransactionWithCompressionInfoV2BodyJsonrpc { #[serde(rename = "2.0")] X20, } - impl ::std::fmt::Display for PostGetValidityProofBodyJsonrpc { + impl ::std::fmt::Display for PostGetTransactionWithCompressionInfoV2BodyJsonrpc { fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { match *self { Self::X20 => f.write_str("2.0"), } } } - impl ::std::str::FromStr for PostGetValidityProofBodyJsonrpc { + impl ::std::str::FromStr for PostGetTransactionWithCompressionInfoV2BodyJsonrpc { type Err = self::error::ConversionError; fn from_str( value: &str, @@ -26290,7 +26322,8 @@ All endpoints return AccountV2.*/ } } } - impl ::std::convert::TryFrom<&str> for PostGetValidityProofBodyJsonrpc { + impl ::std::convert::TryFrom<&str> + for PostGetTransactionWithCompressionInfoV2BodyJsonrpc { type Error = self::error::ConversionError; fn try_from( value: &str, @@ -26299,7 +26332,7 @@ All endpoints return AccountV2.*/ } } impl ::std::convert::TryFrom<&::std::string::String> - for PostGetValidityProofBodyJsonrpc { + for PostGetTransactionWithCompressionInfoV2BodyJsonrpc { type Error = self::error::ConversionError; fn try_from( value: &::std::string::String, @@ -26308,7 +26341,7 @@ All endpoints return AccountV2.*/ } } impl ::std::convert::TryFrom<::std::string::String> - for PostGetValidityProofBodyJsonrpc { + for PostGetTransactionWithCompressionInfoV2BodyJsonrpc { type Error = self::error::ConversionError; fn try_from( value: ::std::string::String, @@ -26325,7 +26358,7 @@ All endpoints return AccountV2.*/ /// "description": "The name of the method to invoke.", /// "type": "string", /// "enum": [ - /// "getValidityProof" + /// "getTransactionWithCompressionInfoV2" /// ] ///} /// ``` @@ -26342,29 +26375,34 @@ All endpoints return AccountV2.*/ PartialEq, PartialOrd )] - pub enum PostGetValidityProofBodyMethod { - #[serde(rename = "getValidityProof")] - GetValidityProof, + pub enum PostGetTransactionWithCompressionInfoV2BodyMethod { + #[serde(rename = "getTransactionWithCompressionInfoV2")] + GetTransactionWithCompressionInfoV2, } - impl ::std::fmt::Display for PostGetValidityProofBodyMethod { + impl ::std::fmt::Display for PostGetTransactionWithCompressionInfoV2BodyMethod { fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { match *self { - Self::GetValidityProof => f.write_str("getValidityProof"), + Self::GetTransactionWithCompressionInfoV2 => { + f.write_str("getTransactionWithCompressionInfoV2") + } } } } - impl ::std::str::FromStr for PostGetValidityProofBodyMethod { + impl ::std::str::FromStr for PostGetTransactionWithCompressionInfoV2BodyMethod { type Err = self::error::ConversionError; fn from_str( value: &str, ) -> ::std::result::Result { match value { - "getValidityProof" => Ok(Self::GetValidityProof), + "getTransactionWithCompressionInfoV2" => { + Ok(Self::GetTransactionWithCompressionInfoV2) + } _ => Err("invalid value".into()), } } } - impl ::std::convert::TryFrom<&str> for PostGetValidityProofBodyMethod { + impl ::std::convert::TryFrom<&str> + for PostGetTransactionWithCompressionInfoV2BodyMethod { type Error = self::error::ConversionError; fn try_from( value: &str, @@ -26373,7 +26411,7 @@ All endpoints return AccountV2.*/ } } impl ::std::convert::TryFrom<&::std::string::String> - for PostGetValidityProofBodyMethod { + for PostGetTransactionWithCompressionInfoV2BodyMethod { type Error = self::error::ConversionError; fn try_from( value: &::std::string::String, @@ -26382,7 +26420,7 @@ All endpoints return AccountV2.*/ } } impl ::std::convert::TryFrom<::std::string::String> - for PostGetValidityProofBodyMethod { + for PostGetTransactionWithCompressionInfoV2BodyMethod { type Error = self::error::ConversionError; fn try_from( value: ::std::string::String, @@ -26390,25 +26428,19 @@ All endpoints return AccountV2.*/ value.parse() } } - ///`PostGetValidityProofBodyParams` + ///`PostGetTransactionWithCompressionInfoV2BodyParams` /// ///
JSON schema /// /// ```json ///{ /// "type": "object", + /// "required": [ + /// "signature" + /// ], /// "properties": { - /// "hashes": { - /// "type": "array", - /// "items": { - /// "$ref": "#/components/schemas/Hash" - /// } - /// }, - /// "newAddressesWithTrees": { - /// "type": "array", - /// "items": { - /// "$ref": "#/components/schemas/AddressWithTree" - /// } + /// "signature": { + /// "$ref": "#/components/schemas/SerializableSignature" /// } /// }, /// "additionalProperties": false @@ -26417,30 +26449,15 @@ All endpoints return AccountV2.*/ ///
#[derive(::serde::Deserialize, ::serde::Serialize, Clone, Debug)] #[serde(deny_unknown_fields)] - pub struct PostGetValidityProofBodyParams { - #[serde(default, skip_serializing_if = "::std::vec::Vec::is_empty")] - pub hashes: ::std::vec::Vec, - #[serde( - rename = "newAddressesWithTrees", - default, - skip_serializing_if = "::std::vec::Vec::is_empty" - )] - pub new_addresses_with_trees: ::std::vec::Vec, - } - impl ::std::default::Default for PostGetValidityProofBodyParams { - fn default() -> Self { - Self { - hashes: Default::default(), - new_addresses_with_trees: Default::default(), - } - } + pub struct PostGetTransactionWithCompressionInfoV2BodyParams { + pub signature: SerializableSignature, } - impl PostGetValidityProofBodyParams { - pub fn builder() -> builder::PostGetValidityProofBodyParams { + impl PostGetTransactionWithCompressionInfoV2BodyParams { + pub fn builder() -> builder::PostGetTransactionWithCompressionInfoV2BodyParams { Default::default() } } - ///`PostGetValidityProofResponse` + ///`PostGetTransactionWithCompressionInfoV2Response` /// ///
JSON schema /// @@ -26478,42 +26495,62 @@ All endpoints return AccountV2.*/ /// ] /// }, /// "result": { + /// "description": "A Solana transaction with additional compression information", /// "type": "object", - /// "required": [ - /// "context", - /// "value" - /// ], /// "properties": { - /// "context": { - /// "$ref": "#/components/schemas/Context" + /// "compression_info": { + /// "type": "object", + /// "required": [ + /// "closedAccounts", + /// "openedAccounts" + /// ], + /// "properties": { + /// "closedAccounts": { + /// "type": "array", + /// "items": { + /// "$ref": "#/components/schemas/ClosedAccountWithOptionalTokenDataV2" + /// } + /// }, + /// "openedAccounts": { + /// "type": "array", + /// "items": { + /// "$ref": "#/components/schemas/AccountWithOptionalTokenDataV2" + /// } + /// } + /// }, + /// "additionalProperties": false /// }, - /// "value": { - /// "$ref": "#/components/schemas/CompressedProofWithContext" + /// "transaction": { + /// "description": "An encoded confirmed transaction with status meta", + /// "type": "object" /// } - /// }, - /// "additionalProperties": false + /// } /// } /// } ///} /// ``` ///
#[derive(::serde::Deserialize, ::serde::Serialize, Clone, Debug)] - pub struct PostGetValidityProofResponse { + pub struct PostGetTransactionWithCompressionInfoV2Response { #[serde(default, skip_serializing_if = "::std::option::Option::is_none")] - pub error: ::std::option::Option, + pub error: ::std::option::Option< + PostGetTransactionWithCompressionInfoV2ResponseError, + >, ///An ID to identify the response. - pub id: PostGetValidityProofResponseId, + pub id: PostGetTransactionWithCompressionInfoV2ResponseId, ///The version of the JSON-RPC protocol. - pub jsonrpc: PostGetValidityProofResponseJsonrpc, + pub jsonrpc: PostGetTransactionWithCompressionInfoV2ResponseJsonrpc, #[serde(default, skip_serializing_if = "::std::option::Option::is_none")] - pub result: ::std::option::Option, + pub result: ::std::option::Option< + PostGetTransactionWithCompressionInfoV2ResponseResult, + >, } - impl PostGetValidityProofResponse { - pub fn builder() -> builder::PostGetValidityProofResponse { + impl PostGetTransactionWithCompressionInfoV2Response { + pub fn builder() -> builder::PostGetTransactionWithCompressionInfoV2Response { Default::default() } } - ///`PostGetValidityProofResponseError` + ///`PostGetTransactionWithCompressionInfoV2ResponseError` /// ///
JSON schema /// @@ -26532,13 +26569,14 @@ All endpoints return AccountV2.*/ /// ``` ///
#[derive(::serde::Deserialize, ::serde::Serialize, Clone, Debug)] - pub struct PostGetValidityProofResponseError { + pub struct PostGetTransactionWithCompressionInfoV2ResponseError { #[serde(default, skip_serializing_if = "::std::option::Option::is_none")] pub code: ::std::option::Option, #[serde(default, skip_serializing_if = "::std::option::Option::is_none")] pub message: ::std::option::Option<::std::string::String>, } - impl ::std::default::Default for PostGetValidityProofResponseError { + impl ::std::default::Default + for PostGetTransactionWithCompressionInfoV2ResponseError { fn default() -> Self { Self { code: Default::default(), @@ -26546,8 +26584,8 @@ All endpoints return AccountV2.*/ } } } - impl PostGetValidityProofResponseError { - pub fn builder() -> builder::PostGetValidityProofResponseError { + impl PostGetTransactionWithCompressionInfoV2ResponseError { + pub fn builder() -> builder::PostGetTransactionWithCompressionInfoV2ResponseError { Default::default() } } @@ -26577,18 +26615,18 @@ All endpoints return AccountV2.*/ PartialEq, PartialOrd )] - pub enum PostGetValidityProofResponseId { + pub enum PostGetTransactionWithCompressionInfoV2ResponseId { #[serde(rename = "test-account")] TestAccount, } - impl ::std::fmt::Display for PostGetValidityProofResponseId { + impl ::std::fmt::Display for PostGetTransactionWithCompressionInfoV2ResponseId { fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { match *self { Self::TestAccount => f.write_str("test-account"), } } } - impl ::std::str::FromStr for PostGetValidityProofResponseId { + impl ::std::str::FromStr for PostGetTransactionWithCompressionInfoV2ResponseId { type Err = self::error::ConversionError; fn from_str( value: &str, @@ -26599,7 +26637,8 @@ All endpoints return AccountV2.*/ } } } - impl ::std::convert::TryFrom<&str> for PostGetValidityProofResponseId { + impl ::std::convert::TryFrom<&str> + for PostGetTransactionWithCompressionInfoV2ResponseId { type Error = self::error::ConversionError; fn try_from( value: &str, @@ -26608,7 +26647,7 @@ All endpoints return AccountV2.*/ } } impl ::std::convert::TryFrom<&::std::string::String> - for PostGetValidityProofResponseId { + for PostGetTransactionWithCompressionInfoV2ResponseId { type Error = self::error::ConversionError; fn try_from( value: &::std::string::String, @@ -26617,7 +26656,7 @@ All endpoints return AccountV2.*/ } } impl ::std::convert::TryFrom<::std::string::String> - for PostGetValidityProofResponseId { + for PostGetTransactionWithCompressionInfoV2ResponseId { type Error = self::error::ConversionError; fn try_from( value: ::std::string::String, @@ -26651,18 +26690,18 @@ All endpoints return AccountV2.*/ PartialEq, PartialOrd )] - pub enum PostGetValidityProofResponseJsonrpc { + pub enum PostGetTransactionWithCompressionInfoV2ResponseJsonrpc { #[serde(rename = "2.0")] X20, } - impl ::std::fmt::Display for PostGetValidityProofResponseJsonrpc { + impl ::std::fmt::Display for PostGetTransactionWithCompressionInfoV2ResponseJsonrpc { fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { match *self { Self::X20 => f.write_str("2.0"), } } } - impl ::std::str::FromStr for PostGetValidityProofResponseJsonrpc { + impl ::std::str::FromStr for PostGetTransactionWithCompressionInfoV2ResponseJsonrpc { type Err = self::error::ConversionError; fn from_str( value: &str, @@ -26673,7 +26712,8 @@ All endpoints return AccountV2.*/ } } } - impl ::std::convert::TryFrom<&str> for PostGetValidityProofResponseJsonrpc { + impl ::std::convert::TryFrom<&str> + for PostGetTransactionWithCompressionInfoV2ResponseJsonrpc { type Error = self::error::ConversionError; fn try_from( value: &str, @@ -26682,7 +26722,7 @@ All endpoints return AccountV2.*/ } } impl ::std::convert::TryFrom<&::std::string::String> - for PostGetValidityProofResponseJsonrpc { + for PostGetTransactionWithCompressionInfoV2ResponseJsonrpc { type Error = self::error::ConversionError; fn try_from( value: &::std::string::String, @@ -26691,7 +26731,7 @@ All endpoints return AccountV2.*/ } } impl ::std::convert::TryFrom<::std::string::String> - for PostGetValidityProofResponseJsonrpc { + for PostGetTransactionWithCompressionInfoV2ResponseJsonrpc { type Error = self::error::ConversionError; fn try_from( value: ::std::string::String, @@ -26699,7 +26739,70 @@ All endpoints return AccountV2.*/ value.parse() } } - ///`PostGetValidityProofResponseResult` + ///A Solana transaction with additional compression information + /// + ///
JSON schema + /// + /// ```json + ///{ + /// "description": "A Solana transaction with additional compression information", + /// "type": "object", + /// "properties": { + /// "compression_info": { + /// "type": "object", + /// "required": [ + /// "closedAccounts", + /// "openedAccounts" + /// ], + /// "properties": { + /// "closedAccounts": { + /// "type": "array", + /// "items": { + /// "$ref": "#/components/schemas/ClosedAccountWithOptionalTokenDataV2" + /// } + /// }, + /// "openedAccounts": { + /// "type": "array", + /// "items": { + /// "$ref": "#/components/schemas/AccountWithOptionalTokenDataV2" + /// } + /// } + /// }, + /// "additionalProperties": false + /// }, + /// "transaction": { + /// "description": "An encoded confirmed transaction with status meta", + /// "type": "object" + /// } + /// } + ///} + /// ``` + ///
+ #[derive(::serde::Deserialize, ::serde::Serialize, Clone, Debug)] + pub struct PostGetTransactionWithCompressionInfoV2ResponseResult { + #[serde(default, skip_serializing_if = "::std::option::Option::is_none")] + pub compression_info: ::std::option::Option< + PostGetTransactionWithCompressionInfoV2ResponseResultCompressionInfo, + >, + ///An encoded confirmed transaction with status meta + #[serde(default, skip_serializing_if = "::serde_json::Map::is_empty")] + pub transaction: ::serde_json::Map<::std::string::String, ::serde_json::Value>, + } + impl ::std::default::Default + for PostGetTransactionWithCompressionInfoV2ResponseResult { + fn default() -> Self { + Self { + compression_info: Default::default(), + transaction: Default::default(), + } + } + } + impl PostGetTransactionWithCompressionInfoV2ResponseResult { + pub fn builder() -> builder::PostGetTransactionWithCompressionInfoV2ResponseResult { + Default::default() + } + } + ///`PostGetTransactionWithCompressionInfoV2ResponseResultCompressionInfo` /// ///
JSON schema /// @@ -26707,15 +26810,21 @@ All endpoints return AccountV2.*/ ///{ /// "type": "object", /// "required": [ - /// "context", - /// "value" + /// "closedAccounts", + /// "openedAccounts" /// ], /// "properties": { - /// "context": { - /// "$ref": "#/components/schemas/Context" + /// "closedAccounts": { + /// "type": "array", + /// "items": { + /// "$ref": "#/components/schemas/ClosedAccountWithOptionalTokenDataV2" + /// } /// }, - /// "value": { - /// "$ref": "#/components/schemas/CompressedProofWithContext" + /// "openedAccounts": { + /// "type": "array", + /// "items": { + /// "$ref": "#/components/schemas/AccountWithOptionalTokenDataV2" + /// } /// } /// }, /// "additionalProperties": false @@ -26724,16 +26833,18 @@ All endpoints return AccountV2.*/ ///
#[derive(::serde::Deserialize, ::serde::Serialize, Clone, Debug)] #[serde(deny_unknown_fields)] - pub struct PostGetValidityProofResponseResult { - pub context: Context, - pub value: CompressedProofWithContext, + pub struct PostGetTransactionWithCompressionInfoV2ResponseResultCompressionInfo { + #[serde(rename = "closedAccounts")] + pub closed_accounts: ::std::vec::Vec, + #[serde(rename = "openedAccounts")] + pub opened_accounts: ::std::vec::Vec, } - impl PostGetValidityProofResponseResult { - pub fn builder() -> builder::PostGetValidityProofResponseResult { + impl PostGetTransactionWithCompressionInfoV2ResponseResultCompressionInfo { + pub fn builder() -> builder::PostGetTransactionWithCompressionInfoV2ResponseResultCompressionInfo { Default::default() } } - ///`PostGetValidityProofV2Body` + ///`PostGetValidityProofBody` /// ///
JSON schema /// @@ -26765,7 +26876,7 @@ All endpoints return AccountV2.*/ /// "description": "The name of the method to invoke.", /// "type": "string", /// "enum": [ - /// "getValidityProofV2" + /// "getValidityProof" /// ] /// }, /// "params": { @@ -26791,17 +26902,17 @@ All endpoints return AccountV2.*/ /// ``` ///
#[derive(::serde::Deserialize, ::serde::Serialize, Clone, Debug)] - pub struct PostGetValidityProofV2Body { + pub struct PostGetValidityProofBody { ///An ID to identify the request. - pub id: PostGetValidityProofV2BodyId, + pub id: PostGetValidityProofBodyId, ///The version of the JSON-RPC protocol. - pub jsonrpc: PostGetValidityProofV2BodyJsonrpc, + pub jsonrpc: PostGetValidityProofBodyJsonrpc, ///The name of the method to invoke. - pub method: PostGetValidityProofV2BodyMethod, - pub params: PostGetValidityProofV2BodyParams, + pub method: PostGetValidityProofBodyMethod, + pub params: PostGetValidityProofBodyParams, } - impl PostGetValidityProofV2Body { - pub fn builder() -> builder::PostGetValidityProofV2Body { + impl PostGetValidityProofBody { + pub fn builder() -> builder::PostGetValidityProofBody { Default::default() } } @@ -26831,18 +26942,18 @@ All endpoints return AccountV2.*/ PartialEq, PartialOrd )] - pub enum PostGetValidityProofV2BodyId { + pub enum PostGetValidityProofBodyId { #[serde(rename = "test-account")] TestAccount, } - impl ::std::fmt::Display for PostGetValidityProofV2BodyId { + impl ::std::fmt::Display for PostGetValidityProofBodyId { fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { match *self { Self::TestAccount => f.write_str("test-account"), } } } - impl ::std::str::FromStr for PostGetValidityProofV2BodyId { + impl ::std::str::FromStr for PostGetValidityProofBodyId { type Err = self::error::ConversionError; fn from_str( value: &str, @@ -26853,7 +26964,7 @@ All endpoints return AccountV2.*/ } } } - impl ::std::convert::TryFrom<&str> for PostGetValidityProofV2BodyId { + impl ::std::convert::TryFrom<&str> for PostGetValidityProofBodyId { type Error = self::error::ConversionError; fn try_from( value: &str, @@ -26861,8 +26972,7 @@ All endpoints return AccountV2.*/ value.parse() } } - impl ::std::convert::TryFrom<&::std::string::String> - for PostGetValidityProofV2BodyId { + impl ::std::convert::TryFrom<&::std::string::String> for PostGetValidityProofBodyId { type Error = self::error::ConversionError; fn try_from( value: &::std::string::String, @@ -26870,8 +26980,7 @@ All endpoints return AccountV2.*/ value.parse() } } - impl ::std::convert::TryFrom<::std::string::String> - for PostGetValidityProofV2BodyId { + impl ::std::convert::TryFrom<::std::string::String> for PostGetValidityProofBodyId { type Error = self::error::ConversionError; fn try_from( value: ::std::string::String, @@ -26905,18 +27014,18 @@ All endpoints return AccountV2.*/ PartialEq, PartialOrd )] - pub enum PostGetValidityProofV2BodyJsonrpc { + pub enum PostGetValidityProofBodyJsonrpc { #[serde(rename = "2.0")] X20, } - impl ::std::fmt::Display for PostGetValidityProofV2BodyJsonrpc { + impl ::std::fmt::Display for PostGetValidityProofBodyJsonrpc { fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { match *self { Self::X20 => f.write_str("2.0"), } } } - impl ::std::str::FromStr for PostGetValidityProofV2BodyJsonrpc { + impl ::std::str::FromStr for PostGetValidityProofBodyJsonrpc { type Err = self::error::ConversionError; fn from_str( value: &str, @@ -26927,7 +27036,7 @@ All endpoints return AccountV2.*/ } } } - impl ::std::convert::TryFrom<&str> for PostGetValidityProofV2BodyJsonrpc { + impl ::std::convert::TryFrom<&str> for PostGetValidityProofBodyJsonrpc { type Error = self::error::ConversionError; fn try_from( value: &str, @@ -26936,7 +27045,7 @@ All endpoints return AccountV2.*/ } } impl ::std::convert::TryFrom<&::std::string::String> - for PostGetValidityProofV2BodyJsonrpc { + for PostGetValidityProofBodyJsonrpc { type Error = self::error::ConversionError; fn try_from( value: &::std::string::String, @@ -26945,7 +27054,7 @@ All endpoints return AccountV2.*/ } } impl ::std::convert::TryFrom<::std::string::String> - for PostGetValidityProofV2BodyJsonrpc { + for PostGetValidityProofBodyJsonrpc { type Error = self::error::ConversionError; fn try_from( value: ::std::string::String, @@ -26962,7 +27071,7 @@ All endpoints return AccountV2.*/ /// "description": "The name of the method to invoke.", /// "type": "string", /// "enum": [ - /// "getValidityProofV2" + /// "getValidityProof" /// ] ///} /// ``` @@ -26979,29 +27088,29 @@ All endpoints return AccountV2.*/ PartialEq, PartialOrd )] - pub enum PostGetValidityProofV2BodyMethod { - #[serde(rename = "getValidityProofV2")] - GetValidityProofV2, + pub enum PostGetValidityProofBodyMethod { + #[serde(rename = "getValidityProof")] + GetValidityProof, } - impl ::std::fmt::Display for PostGetValidityProofV2BodyMethod { + impl ::std::fmt::Display for PostGetValidityProofBodyMethod { fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { match *self { - Self::GetValidityProofV2 => f.write_str("getValidityProofV2"), + Self::GetValidityProof => f.write_str("getValidityProof"), } } } - impl ::std::str::FromStr for PostGetValidityProofV2BodyMethod { + impl ::std::str::FromStr for PostGetValidityProofBodyMethod { type Err = self::error::ConversionError; fn from_str( value: &str, ) -> ::std::result::Result { match value { - "getValidityProofV2" => Ok(Self::GetValidityProofV2), + "getValidityProof" => Ok(Self::GetValidityProof), _ => Err("invalid value".into()), } } } - impl ::std::convert::TryFrom<&str> for PostGetValidityProofV2BodyMethod { + impl ::std::convert::TryFrom<&str> for PostGetValidityProofBodyMethod { type Error = self::error::ConversionError; fn try_from( value: &str, @@ -27010,7 +27119,7 @@ All endpoints return AccountV2.*/ } } impl ::std::convert::TryFrom<&::std::string::String> - for PostGetValidityProofV2BodyMethod { + for PostGetValidityProofBodyMethod { type Error = self::error::ConversionError; fn try_from( value: &::std::string::String, @@ -27019,7 +27128,7 @@ All endpoints return AccountV2.*/ } } impl ::std::convert::TryFrom<::std::string::String> - for PostGetValidityProofV2BodyMethod { + for PostGetValidityProofBodyMethod { type Error = self::error::ConversionError; fn try_from( value: ::std::string::String, @@ -27027,7 +27136,7 @@ All endpoints return AccountV2.*/ value.parse() } } - ///`PostGetValidityProofV2BodyParams` + ///`PostGetValidityProofBodyParams` /// ///
JSON schema /// @@ -27054,7 +27163,7 @@ All endpoints return AccountV2.*/ ///
#[derive(::serde::Deserialize, ::serde::Serialize, Clone, Debug)] #[serde(deny_unknown_fields)] - pub struct PostGetValidityProofV2BodyParams { + pub struct PostGetValidityProofBodyParams { #[serde(default, skip_serializing_if = "::std::vec::Vec::is_empty")] pub hashes: ::std::vec::Vec, #[serde( @@ -27064,7 +27173,7 @@ All endpoints return AccountV2.*/ )] pub new_addresses_with_trees: ::std::vec::Vec, } - impl ::std::default::Default for PostGetValidityProofV2BodyParams { + impl ::std::default::Default for PostGetValidityProofBodyParams { fn default() -> Self { Self { hashes: Default::default(), @@ -27072,12 +27181,649 @@ All endpoints return AccountV2.*/ } } } - impl PostGetValidityProofV2BodyParams { - pub fn builder() -> builder::PostGetValidityProofV2BodyParams { + impl PostGetValidityProofBodyParams { + pub fn builder() -> builder::PostGetValidityProofBodyParams { Default::default() } } - ///`PostGetValidityProofV2Response` + ///`PostGetValidityProofResponse` + /// + ///
JSON schema + /// + /// ```json + ///{ + /// "type": "object", + /// "required": [ + /// "id", + /// "jsonrpc" + /// ], + /// "properties": { + /// "error": { + /// "type": "object", + /// "properties": { + /// "code": { + /// "type": "integer" + /// }, + /// "message": { + /// "type": "string" + /// } + /// } + /// }, + /// "id": { + /// "description": "An ID to identify the response.", + /// "type": "string", + /// "enum": [ + /// "test-account" + /// ] + /// }, + /// "jsonrpc": { + /// "description": "The version of the JSON-RPC protocol.", + /// "type": "string", + /// "enum": [ + /// "2.0" + /// ] + /// }, + /// "result": { + /// "type": "object", + /// "required": [ + /// "context", + /// "value" + /// ], + /// "properties": { + /// "context": { + /// "$ref": "#/components/schemas/Context" + /// }, + /// "value": { + /// "$ref": "#/components/schemas/CompressedProofWithContext" + /// } + /// }, + /// "additionalProperties": false + /// } + /// } + ///} + /// ``` + ///
+ #[derive(::serde::Deserialize, ::serde::Serialize, Clone, Debug)] + pub struct PostGetValidityProofResponse { + #[serde(default, skip_serializing_if = "::std::option::Option::is_none")] + pub error: ::std::option::Option, + ///An ID to identify the response. + pub id: PostGetValidityProofResponseId, + ///The version of the JSON-RPC protocol. + pub jsonrpc: PostGetValidityProofResponseJsonrpc, + #[serde(default, skip_serializing_if = "::std::option::Option::is_none")] + pub result: ::std::option::Option, + } + impl PostGetValidityProofResponse { + pub fn builder() -> builder::PostGetValidityProofResponse { + Default::default() + } + } + ///`PostGetValidityProofResponseError` + /// + ///
JSON schema + /// + /// ```json + ///{ + /// "type": "object", + /// "properties": { + /// "code": { + /// "type": "integer" + /// }, + /// "message": { + /// "type": "string" + /// } + /// } + ///} + /// ``` + ///
+ #[derive(::serde::Deserialize, ::serde::Serialize, Clone, Debug)] + pub struct PostGetValidityProofResponseError { + #[serde(default, skip_serializing_if = "::std::option::Option::is_none")] + pub code: ::std::option::Option, + #[serde(default, skip_serializing_if = "::std::option::Option::is_none")] + pub message: ::std::option::Option<::std::string::String>, + } + impl ::std::default::Default for PostGetValidityProofResponseError { + fn default() -> Self { + Self { + code: Default::default(), + message: Default::default(), + } + } + } + impl PostGetValidityProofResponseError { + pub fn builder() -> builder::PostGetValidityProofResponseError { + Default::default() + } + } + ///An ID to identify the response. + /// + ///
JSON schema + /// + /// ```json + ///{ + /// "description": "An ID to identify the response.", + /// "type": "string", + /// "enum": [ + /// "test-account" + /// ] + ///} + /// ``` + ///
+ #[derive( + ::serde::Deserialize, + ::serde::Serialize, + Clone, + Copy, + Debug, + Eq, + Hash, + Ord, + PartialEq, + PartialOrd + )] + pub enum PostGetValidityProofResponseId { + #[serde(rename = "test-account")] + TestAccount, + } + impl ::std::fmt::Display for PostGetValidityProofResponseId { + fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { + match *self { + Self::TestAccount => f.write_str("test-account"), + } + } + } + impl ::std::str::FromStr for PostGetValidityProofResponseId { + type Err = self::error::ConversionError; + fn from_str( + value: &str, + ) -> ::std::result::Result { + match value { + "test-account" => Ok(Self::TestAccount), + _ => Err("invalid value".into()), + } + } + } + impl ::std::convert::TryFrom<&str> for PostGetValidityProofResponseId { + type Error = self::error::ConversionError; + fn try_from( + value: &str, + ) -> ::std::result::Result { + value.parse() + } + } + impl ::std::convert::TryFrom<&::std::string::String> + for PostGetValidityProofResponseId { + type Error = self::error::ConversionError; + fn try_from( + value: &::std::string::String, + ) -> ::std::result::Result { + value.parse() + } + } + impl ::std::convert::TryFrom<::std::string::String> + for PostGetValidityProofResponseId { + type Error = self::error::ConversionError; + fn try_from( + value: ::std::string::String, + ) -> ::std::result::Result { + value.parse() + } + } + ///The version of the JSON-RPC protocol. + /// + ///
JSON schema + /// + /// ```json + ///{ + /// "description": "The version of the JSON-RPC protocol.", + /// "type": "string", + /// "enum": [ + /// "2.0" + /// ] + ///} + /// ``` + ///
+ #[derive( + ::serde::Deserialize, + ::serde::Serialize, + Clone, + Copy, + Debug, + Eq, + Hash, + Ord, + PartialEq, + PartialOrd + )] + pub enum PostGetValidityProofResponseJsonrpc { + #[serde(rename = "2.0")] + X20, + } + impl ::std::fmt::Display for PostGetValidityProofResponseJsonrpc { + fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { + match *self { + Self::X20 => f.write_str("2.0"), + } + } + } + impl ::std::str::FromStr for PostGetValidityProofResponseJsonrpc { + type Err = self::error::ConversionError; + fn from_str( + value: &str, + ) -> ::std::result::Result { + match value { + "2.0" => Ok(Self::X20), + _ => Err("invalid value".into()), + } + } + } + impl ::std::convert::TryFrom<&str> for PostGetValidityProofResponseJsonrpc { + type Error = self::error::ConversionError; + fn try_from( + value: &str, + ) -> ::std::result::Result { + value.parse() + } + } + impl ::std::convert::TryFrom<&::std::string::String> + for PostGetValidityProofResponseJsonrpc { + type Error = self::error::ConversionError; + fn try_from( + value: &::std::string::String, + ) -> ::std::result::Result { + value.parse() + } + } + impl ::std::convert::TryFrom<::std::string::String> + for PostGetValidityProofResponseJsonrpc { + type Error = self::error::ConversionError; + fn try_from( + value: ::std::string::String, + ) -> ::std::result::Result { + value.parse() + } + } + ///`PostGetValidityProofResponseResult` + /// + ///
JSON schema + /// + /// ```json + ///{ + /// "type": "object", + /// "required": [ + /// "context", + /// "value" + /// ], + /// "properties": { + /// "context": { + /// "$ref": "#/components/schemas/Context" + /// }, + /// "value": { + /// "$ref": "#/components/schemas/CompressedProofWithContext" + /// } + /// }, + /// "additionalProperties": false + ///} + /// ``` + ///
+ #[derive(::serde::Deserialize, ::serde::Serialize, Clone, Debug)] + #[serde(deny_unknown_fields)] + pub struct PostGetValidityProofResponseResult { + pub context: Context, + pub value: CompressedProofWithContext, + } + impl PostGetValidityProofResponseResult { + pub fn builder() -> builder::PostGetValidityProofResponseResult { + Default::default() + } + } + ///`PostGetValidityProofV2Body` + /// + ///
JSON schema + /// + /// ```json + ///{ + /// "type": "object", + /// "required": [ + /// "id", + /// "jsonrpc", + /// "method", + /// "params" + /// ], + /// "properties": { + /// "id": { + /// "description": "An ID to identify the request.", + /// "type": "string", + /// "enum": [ + /// "test-account" + /// ] + /// }, + /// "jsonrpc": { + /// "description": "The version of the JSON-RPC protocol.", + /// "type": "string", + /// "enum": [ + /// "2.0" + /// ] + /// }, + /// "method": { + /// "description": "The name of the method to invoke.", + /// "type": "string", + /// "enum": [ + /// "getValidityProofV2" + /// ] + /// }, + /// "params": { + /// "type": "object", + /// "properties": { + /// "hashes": { + /// "type": "array", + /// "items": { + /// "$ref": "#/components/schemas/Hash" + /// } + /// }, + /// "newAddressesWithTrees": { + /// "type": "array", + /// "items": { + /// "$ref": "#/components/schemas/AddressWithTree" + /// } + /// } + /// }, + /// "additionalProperties": false + /// } + /// } + ///} + /// ``` + ///
+ #[derive(::serde::Deserialize, ::serde::Serialize, Clone, Debug)] + pub struct PostGetValidityProofV2Body { + ///An ID to identify the request. + pub id: PostGetValidityProofV2BodyId, + ///The version of the JSON-RPC protocol. + pub jsonrpc: PostGetValidityProofV2BodyJsonrpc, + ///The name of the method to invoke. + pub method: PostGetValidityProofV2BodyMethod, + pub params: PostGetValidityProofV2BodyParams, + } + impl PostGetValidityProofV2Body { + pub fn builder() -> builder::PostGetValidityProofV2Body { + Default::default() + } + } + ///An ID to identify the request. + /// + ///
JSON schema + /// + /// ```json + ///{ + /// "description": "An ID to identify the request.", + /// "type": "string", + /// "enum": [ + /// "test-account" + /// ] + ///} + /// ``` + ///
+ #[derive( + ::serde::Deserialize, + ::serde::Serialize, + Clone, + Copy, + Debug, + Eq, + Hash, + Ord, + PartialEq, + PartialOrd + )] + pub enum PostGetValidityProofV2BodyId { + #[serde(rename = "test-account")] + TestAccount, + } + impl ::std::fmt::Display for PostGetValidityProofV2BodyId { + fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { + match *self { + Self::TestAccount => f.write_str("test-account"), + } + } + } + impl ::std::str::FromStr for PostGetValidityProofV2BodyId { + type Err = self::error::ConversionError; + fn from_str( + value: &str, + ) -> ::std::result::Result { + match value { + "test-account" => Ok(Self::TestAccount), + _ => Err("invalid value".into()), + } + } + } + impl ::std::convert::TryFrom<&str> for PostGetValidityProofV2BodyId { + type Error = self::error::ConversionError; + fn try_from( + value: &str, + ) -> ::std::result::Result { + value.parse() + } + } + impl ::std::convert::TryFrom<&::std::string::String> + for PostGetValidityProofV2BodyId { + type Error = self::error::ConversionError; + fn try_from( + value: &::std::string::String, + ) -> ::std::result::Result { + value.parse() + } + } + impl ::std::convert::TryFrom<::std::string::String> + for PostGetValidityProofV2BodyId { + type Error = self::error::ConversionError; + fn try_from( + value: ::std::string::String, + ) -> ::std::result::Result { + value.parse() + } + } + ///The version of the JSON-RPC protocol. + /// + ///
JSON schema + /// + /// ```json + ///{ + /// "description": "The version of the JSON-RPC protocol.", + /// "type": "string", + /// "enum": [ + /// "2.0" + /// ] + ///} + /// ``` + ///
+ #[derive( + ::serde::Deserialize, + ::serde::Serialize, + Clone, + Copy, + Debug, + Eq, + Hash, + Ord, + PartialEq, + PartialOrd + )] + pub enum PostGetValidityProofV2BodyJsonrpc { + #[serde(rename = "2.0")] + X20, + } + impl ::std::fmt::Display for PostGetValidityProofV2BodyJsonrpc { + fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { + match *self { + Self::X20 => f.write_str("2.0"), + } + } + } + impl ::std::str::FromStr for PostGetValidityProofV2BodyJsonrpc { + type Err = self::error::ConversionError; + fn from_str( + value: &str, + ) -> ::std::result::Result { + match value { + "2.0" => Ok(Self::X20), + _ => Err("invalid value".into()), + } + } + } + impl ::std::convert::TryFrom<&str> for PostGetValidityProofV2BodyJsonrpc { + type Error = self::error::ConversionError; + fn try_from( + value: &str, + ) -> ::std::result::Result { + value.parse() + } + } + impl ::std::convert::TryFrom<&::std::string::String> + for PostGetValidityProofV2BodyJsonrpc { + type Error = self::error::ConversionError; + fn try_from( + value: &::std::string::String, + ) -> ::std::result::Result { + value.parse() + } + } + impl ::std::convert::TryFrom<::std::string::String> + for PostGetValidityProofV2BodyJsonrpc { + type Error = self::error::ConversionError; + fn try_from( + value: ::std::string::String, + ) -> ::std::result::Result { + value.parse() + } + } + ///The name of the method to invoke. + /// + ///
JSON schema + /// + /// ```json + ///{ + /// "description": "The name of the method to invoke.", + /// "type": "string", + /// "enum": [ + /// "getValidityProofV2" + /// ] + ///} + /// ``` + ///
+ #[derive( + ::serde::Deserialize, + ::serde::Serialize, + Clone, + Copy, + Debug, + Eq, + Hash, + Ord, + PartialEq, + PartialOrd + )] + pub enum PostGetValidityProofV2BodyMethod { + #[serde(rename = "getValidityProofV2")] + GetValidityProofV2, + } + impl ::std::fmt::Display for PostGetValidityProofV2BodyMethod { + fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { + match *self { + Self::GetValidityProofV2 => f.write_str("getValidityProofV2"), + } + } + } + impl ::std::str::FromStr for PostGetValidityProofV2BodyMethod { + type Err = self::error::ConversionError; + fn from_str( + value: &str, + ) -> ::std::result::Result { + match value { + "getValidityProofV2" => Ok(Self::GetValidityProofV2), + _ => Err("invalid value".into()), + } + } + } + impl ::std::convert::TryFrom<&str> for PostGetValidityProofV2BodyMethod { + type Error = self::error::ConversionError; + fn try_from( + value: &str, + ) -> ::std::result::Result { + value.parse() + } + } + impl ::std::convert::TryFrom<&::std::string::String> + for PostGetValidityProofV2BodyMethod { + type Error = self::error::ConversionError; + fn try_from( + value: &::std::string::String, + ) -> ::std::result::Result { + value.parse() + } + } + impl ::std::convert::TryFrom<::std::string::String> + for PostGetValidityProofV2BodyMethod { + type Error = self::error::ConversionError; + fn try_from( + value: ::std::string::String, + ) -> ::std::result::Result { + value.parse() + } + } + ///`PostGetValidityProofV2BodyParams` + /// + ///
JSON schema + /// + /// ```json + ///{ + /// "type": "object", + /// "properties": { + /// "hashes": { + /// "type": "array", + /// "items": { + /// "$ref": "#/components/schemas/Hash" + /// } + /// }, + /// "newAddressesWithTrees": { + /// "type": "array", + /// "items": { + /// "$ref": "#/components/schemas/AddressWithTree" + /// } + /// } + /// }, + /// "additionalProperties": false + ///} + /// ``` + ///
+ #[derive(::serde::Deserialize, ::serde::Serialize, Clone, Debug)] + #[serde(deny_unknown_fields)] + pub struct PostGetValidityProofV2BodyParams { + #[serde(default, skip_serializing_if = "::std::vec::Vec::is_empty")] + pub hashes: ::std::vec::Vec, + #[serde( + rename = "newAddressesWithTrees", + default, + skip_serializing_if = "::std::vec::Vec::is_empty" + )] + pub new_addresses_with_trees: ::std::vec::Vec, + } + impl ::std::default::Default for PostGetValidityProofV2BodyParams { + fn default() -> Self { + Self { + hashes: Default::default(), + new_addresses_with_trees: Default::default(), + } + } + } + impl PostGetValidityProofV2BodyParams { + pub fn builder() -> builder::PostGetValidityProofV2BodyParams { + Default::default() + } + } + ///`PostGetValidityProofV2Response` /// ///
JSON schema /// @@ -27418,6 +28164,52 @@ All endpoints return AccountV2.*/ Default::default() } } + ///A lightweight queue leaf index entry + /// + ///
JSON schema + /// + /// ```json + ///{ + /// "description": "A lightweight queue leaf index entry", + /// "type": "object", + /// "required": [ + /// "hash", + /// "leafIndex", + /// "queueIndex" + /// ], + /// "properties": { + /// "hash": { + /// "$ref": "#/components/schemas/Hash" + /// }, + /// "leafIndex": { + /// "type": "integer", + /// "format": "uint64", + /// "minimum": 0.0 + /// }, + /// "queueIndex": { + /// "type": "integer", + /// "format": "uint64", + /// "minimum": 0.0 + /// } + /// }, + /// "additionalProperties": false + ///} + /// ``` + ///
+ #[derive(::serde::Deserialize, ::serde::Serialize, Clone, Debug)] + #[serde(deny_unknown_fields)] + pub struct QueueLeafIndex { + pub hash: Hash, + #[serde(rename = "leafIndex")] + pub leaf_index: u64, + #[serde(rename = "queueIndex")] + pub queue_index: u64, + } + impl QueueLeafIndex { + pub fn builder() -> builder::QueueLeafIndex { + Default::default() + } + } ///Parameters for requesting queue elements /// ///
JSON schema @@ -30890,6 +31682,148 @@ All endpoints return AccountV2.*/ } } #[derive(Clone, Debug)] + pub struct GetQueueLeafIndicesRequest { + limit: ::std::result::Result, + start_index: ::std::result::Result< + ::std::option::Option, + ::std::string::String, + >, + tree: ::std::result::Result, + } + impl ::std::default::Default for GetQueueLeafIndicesRequest { + fn default() -> Self { + Self { + limit: Err("no value supplied for limit".to_string()), + start_index: Ok(Default::default()), + tree: Err("no value supplied for tree".to_string()), + } + } + } + impl GetQueueLeafIndicesRequest { + pub fn limit(mut self, value: T) -> Self + where + T: ::std::convert::TryInto, + T::Error: ::std::fmt::Display, + { + self.limit = value + .try_into() + .map_err(|e| { + format!("error converting supplied value for limit: {e}") + }); + self + } + pub fn start_index(mut self, value: T) -> Self + where + T: ::std::convert::TryInto<::std::option::Option>, + T::Error: ::std::fmt::Display, + { + self.start_index = value + .try_into() + .map_err(|e| { + format!("error converting supplied value for start_index: {e}") + }); + self + } + pub fn tree(mut self, value: T) -> Self + where + T: ::std::convert::TryInto, + T::Error: ::std::fmt::Display, + { + self.tree = value + .try_into() + .map_err(|e| { + format!("error converting supplied value for tree: {e}") + }); + self + } + } + impl ::std::convert::TryFrom + for super::GetQueueLeafIndicesRequest { + type Error = super::error::ConversionError; + fn try_from( + value: GetQueueLeafIndicesRequest, + ) -> ::std::result::Result { + Ok(Self { + limit: value.limit?, + start_index: value.start_index?, + tree: value.tree?, + }) + } + } + impl ::std::convert::From + for GetQueueLeafIndicesRequest { + fn from(value: super::GetQueueLeafIndicesRequest) -> Self { + Self { + limit: Ok(value.limit), + start_index: Ok(value.start_index), + tree: Ok(value.tree), + } + } + } + #[derive(Clone, Debug)] + pub struct GetQueueLeafIndicesResponse { + context: ::std::result::Result, + value: ::std::result::Result< + ::std::vec::Vec, + ::std::string::String, + >, + } + impl ::std::default::Default for GetQueueLeafIndicesResponse { + fn default() -> Self { + Self { + context: Err("no value supplied for context".to_string()), + value: Err("no value supplied for value".to_string()), + } + } + } + impl GetQueueLeafIndicesResponse { + pub fn context(mut self, value: T) -> Self + where + T: ::std::convert::TryInto, + T::Error: ::std::fmt::Display, + { + self.context = value + .try_into() + .map_err(|e| { + format!("error converting supplied value for context: {e}") + }); + self + } + pub fn value(mut self, value: T) -> Self + where + T: ::std::convert::TryInto<::std::vec::Vec>, + T::Error: ::std::fmt::Display, + { + self.value = value + .try_into() + .map_err(|e| { + format!("error converting supplied value for value: {e}") + }); + self + } + } + impl ::std::convert::TryFrom + for super::GetQueueLeafIndicesResponse { + type Error = super::error::ConversionError; + fn try_from( + value: GetQueueLeafIndicesResponse, + ) -> ::std::result::Result { + Ok(Self { + context: value.context?, + value: value.value?, + }) + } + } + impl ::std::convert::From + for GetQueueLeafIndicesResponse { + fn from(value: super::GetQueueLeafIndicesResponse) -> Self { + Self { + context: Ok(value.context), + value: Ok(value.value), + } + } + } + #[derive(Clone, Debug)] pub struct InputQueueData { account_hashes: ::std::result::Result< ::std::vec::Vec, @@ -46275,6 +47209,424 @@ All endpoints return AccountV2.*/ } } #[derive(Clone, Debug)] + pub struct PostGetQueueLeafIndicesBody { + id: ::std::result::Result< + super::PostGetQueueLeafIndicesBodyId, + ::std::string::String, + >, + jsonrpc: ::std::result::Result< + super::PostGetQueueLeafIndicesBodyJsonrpc, + ::std::string::String, + >, + method: ::std::result::Result< + super::PostGetQueueLeafIndicesBodyMethod, + ::std::string::String, + >, + params: ::std::result::Result< + super::PostGetQueueLeafIndicesBodyParams, + ::std::string::String, + >, + } + impl ::std::default::Default for PostGetQueueLeafIndicesBody { + fn default() -> Self { + Self { + id: Err("no value supplied for id".to_string()), + jsonrpc: Err("no value supplied for jsonrpc".to_string()), + method: Err("no value supplied for method".to_string()), + params: Err("no value supplied for params".to_string()), + } + } + } + impl PostGetQueueLeafIndicesBody { + pub fn id(mut self, value: T) -> Self + where + T: ::std::convert::TryInto, + T::Error: ::std::fmt::Display, + { + self.id = value + .try_into() + .map_err(|e| format!("error converting supplied value for id: {e}")); + self + } + pub fn jsonrpc(mut self, value: T) -> Self + where + T: ::std::convert::TryInto, + T::Error: ::std::fmt::Display, + { + self.jsonrpc = value + .try_into() + .map_err(|e| { + format!("error converting supplied value for jsonrpc: {e}") + }); + self + } + pub fn method(mut self, value: T) -> Self + where + T: ::std::convert::TryInto, + T::Error: ::std::fmt::Display, + { + self.method = value + .try_into() + .map_err(|e| { + format!("error converting supplied value for method: {e}") + }); + self + } + pub fn params(mut self, value: T) -> Self + where + T: ::std::convert::TryInto, + T::Error: ::std::fmt::Display, + { + self.params = value + .try_into() + .map_err(|e| { + format!("error converting supplied value for params: {e}") + }); + self + } + } + impl ::std::convert::TryFrom + for super::PostGetQueueLeafIndicesBody { + type Error = super::error::ConversionError; + fn try_from( + value: PostGetQueueLeafIndicesBody, + ) -> ::std::result::Result { + Ok(Self { + id: value.id?, + jsonrpc: value.jsonrpc?, + method: value.method?, + params: value.params?, + }) + } + } + impl ::std::convert::From + for PostGetQueueLeafIndicesBody { + fn from(value: super::PostGetQueueLeafIndicesBody) -> Self { + Self { + id: Ok(value.id), + jsonrpc: Ok(value.jsonrpc), + method: Ok(value.method), + params: Ok(value.params), + } + } + } + #[derive(Clone, Debug)] + pub struct PostGetQueueLeafIndicesBodyParams { + limit: ::std::result::Result, + start_index: ::std::result::Result< + ::std::option::Option, + ::std::string::String, + >, + tree: ::std::result::Result, + } + impl ::std::default::Default for PostGetQueueLeafIndicesBodyParams { + fn default() -> Self { + Self { + limit: Err("no value supplied for limit".to_string()), + start_index: Ok(Default::default()), + tree: Err("no value supplied for tree".to_string()), + } + } + } + impl PostGetQueueLeafIndicesBodyParams { + pub fn limit(mut self, value: T) -> Self + where + T: ::std::convert::TryInto, + T::Error: ::std::fmt::Display, + { + self.limit = value + .try_into() + .map_err(|e| { + format!("error converting supplied value for limit: {e}") + }); + self + } + pub fn start_index(mut self, value: T) -> Self + where + T: ::std::convert::TryInto<::std::option::Option>, + T::Error: ::std::fmt::Display, + { + self.start_index = value + .try_into() + .map_err(|e| { + format!("error converting supplied value for start_index: {e}") + }); + self + } + pub fn tree(mut self, value: T) -> Self + where + T: ::std::convert::TryInto, + T::Error: ::std::fmt::Display, + { + self.tree = value + .try_into() + .map_err(|e| { + format!("error converting supplied value for tree: {e}") + }); + self + } + } + impl ::std::convert::TryFrom + for super::PostGetQueueLeafIndicesBodyParams { + type Error = super::error::ConversionError; + fn try_from( + value: PostGetQueueLeafIndicesBodyParams, + ) -> ::std::result::Result { + Ok(Self { + limit: value.limit?, + start_index: value.start_index?, + tree: value.tree?, + }) + } + } + impl ::std::convert::From + for PostGetQueueLeafIndicesBodyParams { + fn from(value: super::PostGetQueueLeafIndicesBodyParams) -> Self { + Self { + limit: Ok(value.limit), + start_index: Ok(value.start_index), + tree: Ok(value.tree), + } + } + } + #[derive(Clone, Debug)] + pub struct PostGetQueueLeafIndicesResponse { + error: ::std::result::Result< + ::std::option::Option, + ::std::string::String, + >, + id: ::std::result::Result< + super::PostGetQueueLeafIndicesResponseId, + ::std::string::String, + >, + jsonrpc: ::std::result::Result< + super::PostGetQueueLeafIndicesResponseJsonrpc, + ::std::string::String, + >, + result: ::std::result::Result< + ::std::option::Option, + ::std::string::String, + >, + } + impl ::std::default::Default for PostGetQueueLeafIndicesResponse { + fn default() -> Self { + Self { + error: Ok(Default::default()), + id: Err("no value supplied for id".to_string()), + jsonrpc: Err("no value supplied for jsonrpc".to_string()), + result: Ok(Default::default()), + } + } + } + impl PostGetQueueLeafIndicesResponse { + pub fn error(mut self, value: T) -> Self + where + T: ::std::convert::TryInto< + ::std::option::Option, + >, + T::Error: ::std::fmt::Display, + { + self.error = value + .try_into() + .map_err(|e| { + format!("error converting supplied value for error: {e}") + }); + self + } + pub fn id(mut self, value: T) -> Self + where + T: ::std::convert::TryInto, + T::Error: ::std::fmt::Display, + { + self.id = value + .try_into() + .map_err(|e| format!("error converting supplied value for id: {e}")); + self + } + pub fn jsonrpc(mut self, value: T) -> Self + where + T: ::std::convert::TryInto< + super::PostGetQueueLeafIndicesResponseJsonrpc, + >, + T::Error: ::std::fmt::Display, + { + self.jsonrpc = value + .try_into() + .map_err(|e| { + format!("error converting supplied value for jsonrpc: {e}") + }); + self + } + pub fn result(mut self, value: T) -> Self + where + T: ::std::convert::TryInto< + ::std::option::Option, + >, + T::Error: ::std::fmt::Display, + { + self.result = value + .try_into() + .map_err(|e| { + format!("error converting supplied value for result: {e}") + }); + self + } + } + impl ::std::convert::TryFrom + for super::PostGetQueueLeafIndicesResponse { + type Error = super::error::ConversionError; + fn try_from( + value: PostGetQueueLeafIndicesResponse, + ) -> ::std::result::Result { + Ok(Self { + error: value.error?, + id: value.id?, + jsonrpc: value.jsonrpc?, + result: value.result?, + }) + } + } + impl ::std::convert::From + for PostGetQueueLeafIndicesResponse { + fn from(value: super::PostGetQueueLeafIndicesResponse) -> Self { + Self { + error: Ok(value.error), + id: Ok(value.id), + jsonrpc: Ok(value.jsonrpc), + result: Ok(value.result), + } + } + } + #[derive(Clone, Debug)] + pub struct PostGetQueueLeafIndicesResponseError { + code: ::std::result::Result< + ::std::option::Option, + ::std::string::String, + >, + message: ::std::result::Result< + ::std::option::Option<::std::string::String>, + ::std::string::String, + >, + } + impl ::std::default::Default for PostGetQueueLeafIndicesResponseError { + fn default() -> Self { + Self { + code: Ok(Default::default()), + message: Ok(Default::default()), + } + } + } + impl PostGetQueueLeafIndicesResponseError { + pub fn code(mut self, value: T) -> Self + where + T: ::std::convert::TryInto<::std::option::Option>, + T::Error: ::std::fmt::Display, + { + self.code = value + .try_into() + .map_err(|e| { + format!("error converting supplied value for code: {e}") + }); + self + } + pub fn message(mut self, value: T) -> Self + where + T: ::std::convert::TryInto<::std::option::Option<::std::string::String>>, + T::Error: ::std::fmt::Display, + { + self.message = value + .try_into() + .map_err(|e| { + format!("error converting supplied value for message: {e}") + }); + self + } + } + impl ::std::convert::TryFrom + for super::PostGetQueueLeafIndicesResponseError { + type Error = super::error::ConversionError; + fn try_from( + value: PostGetQueueLeafIndicesResponseError, + ) -> ::std::result::Result { + Ok(Self { + code: value.code?, + message: value.message?, + }) + } + } + impl ::std::convert::From + for PostGetQueueLeafIndicesResponseError { + fn from(value: super::PostGetQueueLeafIndicesResponseError) -> Self { + Self { + code: Ok(value.code), + message: Ok(value.message), + } + } + } + #[derive(Clone, Debug)] + pub struct PostGetQueueLeafIndicesResponseResult { + context: ::std::result::Result, + value: ::std::result::Result< + ::std::vec::Vec, + ::std::string::String, + >, + } + impl ::std::default::Default for PostGetQueueLeafIndicesResponseResult { + fn default() -> Self { + Self { + context: Err("no value supplied for context".to_string()), + value: Err("no value supplied for value".to_string()), + } + } + } + impl PostGetQueueLeafIndicesResponseResult { + pub fn context(mut self, value: T) -> Self + where + T: ::std::convert::TryInto, + T::Error: ::std::fmt::Display, + { + self.context = value + .try_into() + .map_err(|e| { + format!("error converting supplied value for context: {e}") + }); + self + } + pub fn value(mut self, value: T) -> Self + where + T: ::std::convert::TryInto<::std::vec::Vec>, + T::Error: ::std::fmt::Display, + { + self.value = value + .try_into() + .map_err(|e| { + format!("error converting supplied value for value: {e}") + }); + self + } + } + impl ::std::convert::TryFrom + for super::PostGetQueueLeafIndicesResponseResult { + type Error = super::error::ConversionError; + fn try_from( + value: PostGetQueueLeafIndicesResponseResult, + ) -> ::std::result::Result { + Ok(Self { + context: value.context?, + value: value.value?, + }) + } + } + impl ::std::convert::From + for PostGetQueueLeafIndicesResponseResult { + fn from(value: super::PostGetQueueLeafIndicesResponseResult) -> Self { + Self { + context: Ok(value.context), + value: Ok(value.value), + } + } + } + #[derive(Clone, Debug)] pub struct PostGetTransactionWithCompressionInfoBody { id: ::std::result::Result< super::PostGetTransactionWithCompressionInfoBodyId, @@ -48206,6 +49558,80 @@ All endpoints return AccountV2.*/ } } #[derive(Clone, Debug)] + pub struct QueueLeafIndex { + hash: ::std::result::Result, + leaf_index: ::std::result::Result, + queue_index: ::std::result::Result, + } + impl ::std::default::Default for QueueLeafIndex { + fn default() -> Self { + Self { + hash: Err("no value supplied for hash".to_string()), + leaf_index: Err("no value supplied for leaf_index".to_string()), + queue_index: Err("no value supplied for queue_index".to_string()), + } + } + } + impl QueueLeafIndex { + pub fn hash(mut self, value: T) -> Self + where + T: ::std::convert::TryInto, + T::Error: ::std::fmt::Display, + { + self.hash = value + .try_into() + .map_err(|e| { + format!("error converting supplied value for hash: {e}") + }); + self + } + pub fn leaf_index(mut self, value: T) -> Self + where + T: ::std::convert::TryInto, + T::Error: ::std::fmt::Display, + { + self.leaf_index = value + .try_into() + .map_err(|e| { + format!("error converting supplied value for leaf_index: {e}") + }); + self + } + pub fn queue_index(mut self, value: T) -> Self + where + T: ::std::convert::TryInto, + T::Error: ::std::fmt::Display, + { + self.queue_index = value + .try_into() + .map_err(|e| { + format!("error converting supplied value for queue_index: {e}") + }); + self + } + } + impl ::std::convert::TryFrom for super::QueueLeafIndex { + type Error = super::error::ConversionError; + fn try_from( + value: QueueLeafIndex, + ) -> ::std::result::Result { + Ok(Self { + hash: value.hash?, + leaf_index: value.leaf_index?, + queue_index: value.queue_index?, + }) + } + } + impl ::std::convert::From for QueueLeafIndex { + fn from(value: super::QueueLeafIndex) -> Self { + Self { + hash: Ok(value.hash), + leaf_index: Ok(value.leaf_index), + queue_index: Ok(value.queue_index), + } + } + } + #[derive(Clone, Debug)] pub struct QueueRequest { limit: ::std::result::Result, start_index: ::std::result::Result< @@ -50080,6 +51506,17 @@ let response = client.post_get_queue_info() pub fn post_get_queue_info(&self) -> builder::PostGetQueueInfo<'_> { builder::PostGetQueueInfo::new(self) } + /**Sends a `POST` request to `/getQueueLeafIndices` + +```ignore +let response = client.post_get_queue_leaf_indices() + .body(body) + .send() + .await; +```*/ + pub fn post_get_queue_leaf_indices(&self) -> builder::PostGetQueueLeafIndices<'_> { + builder::PostGetQueueLeafIndices::new(self) + } /**Sends a `POST` request to `/getTransactionWithCompressionInfo` ```ignore @@ -50447,15 +51884,221 @@ pub mod builder { } } } - /**Builder for [`Client::post_get_compressed_account_proof`] + /**Builder for [`Client::post_get_compressed_account_proof`] + +[`Client::post_get_compressed_account_proof`]: super::Client::post_get_compressed_account_proof*/ + #[derive(Debug, Clone)] + pub struct PostGetCompressedAccountProof<'a> { + client: &'a super::Client, + body: Result, + } + impl<'a> PostGetCompressedAccountProof<'a> { + pub fn new(client: &'a super::Client) -> Self { + Self { + client: client, + body: Ok(::std::default::Default::default()), + } + } + pub fn body(mut self, value: V) -> Self + where + V: std::convert::TryInto, + >::Error: std::fmt::Display, + { + self.body = value + .try_into() + .map(From::from) + .map_err(|s| { + format!( + "conversion to `PostGetCompressedAccountProofBody` for body failed: {}", + s + ) + }); + self + } + pub fn body_map(mut self, f: F) -> Self + where + F: std::ops::FnOnce( + types::builder::PostGetCompressedAccountProofBody, + ) -> types::builder::PostGetCompressedAccountProofBody, + { + self.body = self.body.map(f); + self + } + ///Sends a `POST` request to `/getCompressedAccountProof` + pub async fn send( + self, + ) -> Result< + ResponseValue, + Error, + > { + let Self { client, body } = self; + let body = body + .and_then(|v| { + types::PostGetCompressedAccountProofBody::try_from(v) + .map_err(|e| e.to_string()) + }) + .map_err(Error::InvalidRequest)?; + let url = format!("{}/getCompressedAccountProof", client.baseurl,); + let mut header_map = ::reqwest::header::HeaderMap::with_capacity(1usize); + header_map + .append( + ::reqwest::header::HeaderName::from_static("api-version"), + ::reqwest::header::HeaderValue::from_static( + super::Client::api_version(), + ), + ); + #[allow(unused_mut)] + let mut request = client + .client + .post(url) + .header( + ::reqwest::header::ACCEPT, + ::reqwest::header::HeaderValue::from_static("application/json"), + ) + .json(&body) + .headers(header_map) + .build()?; + let info = OperationInfo { + operation_id: "post_get_compressed_account_proof", + }; + client.pre(&mut request, &info).await?; + let result = client.exec(request, &info).await; + client.post(&result, &info).await?; + let response = result?; + match response.status().as_u16() { + 200u16 => ResponseValue::from_response(response).await, + 429u16 => { + Err( + Error::ErrorResponse( + ResponseValue::from_response(response).await?, + ), + ) + } + 500u16 => { + Err( + Error::ErrorResponse( + ResponseValue::from_response(response).await?, + ), + ) + } + _ => Err(Error::UnexpectedResponse(response)), + } + } + } + /**Builder for [`Client::post_get_compressed_account_proof_v2`] + +[`Client::post_get_compressed_account_proof_v2`]: super::Client::post_get_compressed_account_proof_v2*/ + #[derive(Debug, Clone)] + pub struct PostGetCompressedAccountProofV2<'a> { + client: &'a super::Client, + body: Result, + } + impl<'a> PostGetCompressedAccountProofV2<'a> { + pub fn new(client: &'a super::Client) -> Self { + Self { + client: client, + body: Ok(::std::default::Default::default()), + } + } + pub fn body(mut self, value: V) -> Self + where + V: std::convert::TryInto, + >::Error: std::fmt::Display, + { + self.body = value + .try_into() + .map(From::from) + .map_err(|s| { + format!( + "conversion to `PostGetCompressedAccountProofV2Body` for body failed: {}", + s + ) + }); + self + } + pub fn body_map(mut self, f: F) -> Self + where + F: std::ops::FnOnce( + types::builder::PostGetCompressedAccountProofV2Body, + ) -> types::builder::PostGetCompressedAccountProofV2Body, + { + self.body = self.body.map(f); + self + } + ///Sends a `POST` request to `/getCompressedAccountProofV2` + pub async fn send( + self, + ) -> Result< + ResponseValue, + Error, + > { + let Self { client, body } = self; + let body = body + .and_then(|v| { + types::PostGetCompressedAccountProofV2Body::try_from(v) + .map_err(|e| e.to_string()) + }) + .map_err(Error::InvalidRequest)?; + let url = format!("{}/getCompressedAccountProofV2", client.baseurl,); + let mut header_map = ::reqwest::header::HeaderMap::with_capacity(1usize); + header_map + .append( + ::reqwest::header::HeaderName::from_static("api-version"), + ::reqwest::header::HeaderValue::from_static( + super::Client::api_version(), + ), + ); + #[allow(unused_mut)] + let mut request = client + .client + .post(url) + .header( + ::reqwest::header::ACCEPT, + ::reqwest::header::HeaderValue::from_static("application/json"), + ) + .json(&body) + .headers(header_map) + .build()?; + let info = OperationInfo { + operation_id: "post_get_compressed_account_proof_v2", + }; + client.pre(&mut request, &info).await?; + let result = client.exec(request, &info).await; + client.post(&result, &info).await?; + let response = result?; + match response.status().as_u16() { + 200u16 => ResponseValue::from_response(response).await, + 429u16 => { + Err( + Error::ErrorResponse( + ResponseValue::from_response(response).await?, + ), + ) + } + 500u16 => { + Err( + Error::ErrorResponse( + ResponseValue::from_response(response).await?, + ), + ) + } + _ => Err(Error::UnexpectedResponse(response)), + } + } + } + /**Builder for [`Client::post_get_compressed_account_v2`] -[`Client::post_get_compressed_account_proof`]: super::Client::post_get_compressed_account_proof*/ +[`Client::post_get_compressed_account_v2`]: super::Client::post_get_compressed_account_v2*/ #[derive(Debug, Clone)] - pub struct PostGetCompressedAccountProof<'a> { + pub struct PostGetCompressedAccountV2<'a> { client: &'a super::Client, - body: Result, + body: Result, } - impl<'a> PostGetCompressedAccountProof<'a> { + impl<'a> PostGetCompressedAccountV2<'a> { pub fn new(client: &'a super::Client) -> Self { Self { client: client, @@ -50464,9 +52107,9 @@ pub mod builder { } pub fn body(mut self, value: V) -> Self where - V: std::convert::TryInto, + V: std::convert::TryInto, >::Error: std::fmt::Display, { self.body = value @@ -50474,7 +52117,7 @@ pub mod builder { .map(From::from) .map_err(|s| { format!( - "conversion to `PostGetCompressedAccountProofBody` for body failed: {}", + "conversion to `PostGetCompressedAccountV2Body` for body failed: {}", s ) }); @@ -50483,27 +52126,27 @@ pub mod builder { pub fn body_map(mut self, f: F) -> Self where F: std::ops::FnOnce( - types::builder::PostGetCompressedAccountProofBody, - ) -> types::builder::PostGetCompressedAccountProofBody, + types::builder::PostGetCompressedAccountV2Body, + ) -> types::builder::PostGetCompressedAccountV2Body, { self.body = self.body.map(f); self } - ///Sends a `POST` request to `/getCompressedAccountProof` + ///Sends a `POST` request to `/getCompressedAccountV2` pub async fn send( self, ) -> Result< - ResponseValue, - Error, + ResponseValue, + Error, > { let Self { client, body } = self; let body = body .and_then(|v| { - types::PostGetCompressedAccountProofBody::try_from(v) + types::PostGetCompressedAccountV2Body::try_from(v) .map_err(|e| e.to_string()) }) .map_err(Error::InvalidRequest)?; - let url = format!("{}/getCompressedAccountProof", client.baseurl,); + let url = format!("{}/getCompressedAccountV2", client.baseurl,); let mut header_map = ::reqwest::header::HeaderMap::with_capacity(1usize); header_map .append( @@ -50524,7 +52167,7 @@ pub mod builder { .headers(header_map) .build()?; let info = OperationInfo { - operation_id: "post_get_compressed_account_proof", + operation_id: "post_get_compressed_account_v2", }; client.pre(&mut request, &info).await?; let result = client.exec(request, &info).await; @@ -50550,15 +52193,15 @@ pub mod builder { } } } - /**Builder for [`Client::post_get_compressed_account_proof_v2`] + /**Builder for [`Client::post_get_compressed_accounts_by_owner`] -[`Client::post_get_compressed_account_proof_v2`]: super::Client::post_get_compressed_account_proof_v2*/ +[`Client::post_get_compressed_accounts_by_owner`]: super::Client::post_get_compressed_accounts_by_owner*/ #[derive(Debug, Clone)] - pub struct PostGetCompressedAccountProofV2<'a> { + pub struct PostGetCompressedAccountsByOwner<'a> { client: &'a super::Client, - body: Result, + body: Result, } - impl<'a> PostGetCompressedAccountProofV2<'a> { + impl<'a> PostGetCompressedAccountsByOwner<'a> { pub fn new(client: &'a super::Client) -> Self { Self { client: client, @@ -50567,9 +52210,9 @@ pub mod builder { } pub fn body(mut self, value: V) -> Self where - V: std::convert::TryInto, + V: std::convert::TryInto, >::Error: std::fmt::Display, { self.body = value @@ -50577,7 +52220,7 @@ pub mod builder { .map(From::from) .map_err(|s| { format!( - "conversion to `PostGetCompressedAccountProofV2Body` for body failed: {}", + "conversion to `PostGetCompressedAccountsByOwnerBody` for body failed: {}", s ) }); @@ -50586,27 +52229,27 @@ pub mod builder { pub fn body_map(mut self, f: F) -> Self where F: std::ops::FnOnce( - types::builder::PostGetCompressedAccountProofV2Body, - ) -> types::builder::PostGetCompressedAccountProofV2Body, + types::builder::PostGetCompressedAccountsByOwnerBody, + ) -> types::builder::PostGetCompressedAccountsByOwnerBody, { self.body = self.body.map(f); self } - ///Sends a `POST` request to `/getCompressedAccountProofV2` + ///Sends a `POST` request to `/getCompressedAccountsByOwner` pub async fn send( self, ) -> Result< - ResponseValue, - Error, + ResponseValue, + Error, > { let Self { client, body } = self; let body = body .and_then(|v| { - types::PostGetCompressedAccountProofV2Body::try_from(v) + types::PostGetCompressedAccountsByOwnerBody::try_from(v) .map_err(|e| e.to_string()) }) .map_err(Error::InvalidRequest)?; - let url = format!("{}/getCompressedAccountProofV2", client.baseurl,); + let url = format!("{}/getCompressedAccountsByOwner", client.baseurl,); let mut header_map = ::reqwest::header::HeaderMap::with_capacity(1usize); header_map .append( @@ -50627,7 +52270,7 @@ pub mod builder { .headers(header_map) .build()?; let info = OperationInfo { - operation_id: "post_get_compressed_account_proof_v2", + operation_id: "post_get_compressed_accounts_by_owner", }; client.pre(&mut request, &info).await?; let result = client.exec(request, &info).await; @@ -50653,15 +52296,15 @@ pub mod builder { } } } - /**Builder for [`Client::post_get_compressed_account_v2`] + /**Builder for [`Client::post_get_compressed_accounts_by_owner_v2`] -[`Client::post_get_compressed_account_v2`]: super::Client::post_get_compressed_account_v2*/ +[`Client::post_get_compressed_accounts_by_owner_v2`]: super::Client::post_get_compressed_accounts_by_owner_v2*/ #[derive(Debug, Clone)] - pub struct PostGetCompressedAccountV2<'a> { + pub struct PostGetCompressedAccountsByOwnerV2<'a> { client: &'a super::Client, - body: Result, + body: Result, } - impl<'a> PostGetCompressedAccountV2<'a> { + impl<'a> PostGetCompressedAccountsByOwnerV2<'a> { pub fn new(client: &'a super::Client) -> Self { Self { client: client, @@ -50670,9 +52313,9 @@ pub mod builder { } pub fn body(mut self, value: V) -> Self where - V: std::convert::TryInto, + V: std::convert::TryInto, >::Error: std::fmt::Display, { self.body = value @@ -50680,7 +52323,7 @@ pub mod builder { .map(From::from) .map_err(|s| { format!( - "conversion to `PostGetCompressedAccountV2Body` for body failed: {}", + "conversion to `PostGetCompressedAccountsByOwnerV2Body` for body failed: {}", s ) }); @@ -50689,27 +52332,27 @@ pub mod builder { pub fn body_map(mut self, f: F) -> Self where F: std::ops::FnOnce( - types::builder::PostGetCompressedAccountV2Body, - ) -> types::builder::PostGetCompressedAccountV2Body, + types::builder::PostGetCompressedAccountsByOwnerV2Body, + ) -> types::builder::PostGetCompressedAccountsByOwnerV2Body, { self.body = self.body.map(f); self } - ///Sends a `POST` request to `/getCompressedAccountV2` + ///Sends a `POST` request to `/getCompressedAccountsByOwnerV2` pub async fn send( self, ) -> Result< - ResponseValue, - Error, + ResponseValue, + Error, > { let Self { client, body } = self; let body = body .and_then(|v| { - types::PostGetCompressedAccountV2Body::try_from(v) + types::PostGetCompressedAccountsByOwnerV2Body::try_from(v) .map_err(|e| e.to_string()) }) .map_err(Error::InvalidRequest)?; - let url = format!("{}/getCompressedAccountV2", client.baseurl,); + let url = format!("{}/getCompressedAccountsByOwnerV2", client.baseurl,); let mut header_map = ::reqwest::header::HeaderMap::with_capacity(1usize); header_map .append( @@ -50730,7 +52373,7 @@ pub mod builder { .headers(header_map) .build()?; let info = OperationInfo { - operation_id: "post_get_compressed_account_v2", + operation_id: "post_get_compressed_accounts_by_owner_v2", }; client.pre(&mut request, &info).await?; let result = client.exec(request, &info).await; @@ -50756,15 +52399,15 @@ pub mod builder { } } } - /**Builder for [`Client::post_get_compressed_accounts_by_owner`] + /**Builder for [`Client::post_get_compressed_balance_by_owner`] -[`Client::post_get_compressed_accounts_by_owner`]: super::Client::post_get_compressed_accounts_by_owner*/ +[`Client::post_get_compressed_balance_by_owner`]: super::Client::post_get_compressed_balance_by_owner*/ #[derive(Debug, Clone)] - pub struct PostGetCompressedAccountsByOwner<'a> { + pub struct PostGetCompressedBalanceByOwner<'a> { client: &'a super::Client, - body: Result, + body: Result, } - impl<'a> PostGetCompressedAccountsByOwner<'a> { + impl<'a> PostGetCompressedBalanceByOwner<'a> { pub fn new(client: &'a super::Client) -> Self { Self { client: client, @@ -50773,9 +52416,9 @@ pub mod builder { } pub fn body(mut self, value: V) -> Self where - V: std::convert::TryInto, + V: std::convert::TryInto, >::Error: std::fmt::Display, { self.body = value @@ -50783,7 +52426,7 @@ pub mod builder { .map(From::from) .map_err(|s| { format!( - "conversion to `PostGetCompressedAccountsByOwnerBody` for body failed: {}", + "conversion to `PostGetCompressedBalanceByOwnerBody` for body failed: {}", s ) }); @@ -50792,27 +52435,27 @@ pub mod builder { pub fn body_map(mut self, f: F) -> Self where F: std::ops::FnOnce( - types::builder::PostGetCompressedAccountsByOwnerBody, - ) -> types::builder::PostGetCompressedAccountsByOwnerBody, + types::builder::PostGetCompressedBalanceByOwnerBody, + ) -> types::builder::PostGetCompressedBalanceByOwnerBody, { self.body = self.body.map(f); self } - ///Sends a `POST` request to `/getCompressedAccountsByOwner` + ///Sends a `POST` request to `/getCompressedBalanceByOwner` pub async fn send( self, ) -> Result< - ResponseValue, - Error, + ResponseValue, + Error, > { let Self { client, body } = self; let body = body .and_then(|v| { - types::PostGetCompressedAccountsByOwnerBody::try_from(v) + types::PostGetCompressedBalanceByOwnerBody::try_from(v) .map_err(|e| e.to_string()) }) .map_err(Error::InvalidRequest)?; - let url = format!("{}/getCompressedAccountsByOwner", client.baseurl,); + let url = format!("{}/getCompressedBalanceByOwner", client.baseurl,); let mut header_map = ::reqwest::header::HeaderMap::with_capacity(1usize); header_map .append( @@ -50833,7 +52476,7 @@ pub mod builder { .headers(header_map) .build()?; let info = OperationInfo { - operation_id: "post_get_compressed_accounts_by_owner", + operation_id: "post_get_compressed_balance_by_owner", }; client.pre(&mut request, &info).await?; let result = client.exec(request, &info).await; @@ -50859,15 +52502,15 @@ pub mod builder { } } } - /**Builder for [`Client::post_get_compressed_accounts_by_owner_v2`] + /**Builder for [`Client::post_get_compressed_mint_token_holders`] -[`Client::post_get_compressed_accounts_by_owner_v2`]: super::Client::post_get_compressed_accounts_by_owner_v2*/ +[`Client::post_get_compressed_mint_token_holders`]: super::Client::post_get_compressed_mint_token_holders*/ #[derive(Debug, Clone)] - pub struct PostGetCompressedAccountsByOwnerV2<'a> { + pub struct PostGetCompressedMintTokenHolders<'a> { client: &'a super::Client, - body: Result, + body: Result, } - impl<'a> PostGetCompressedAccountsByOwnerV2<'a> { + impl<'a> PostGetCompressedMintTokenHolders<'a> { pub fn new(client: &'a super::Client) -> Self { Self { client: client, @@ -50876,9 +52519,9 @@ pub mod builder { } pub fn body(mut self, value: V) -> Self where - V: std::convert::TryInto, + V: std::convert::TryInto, >::Error: std::fmt::Display, { self.body = value @@ -50886,7 +52529,7 @@ pub mod builder { .map(From::from) .map_err(|s| { format!( - "conversion to `PostGetCompressedAccountsByOwnerV2Body` for body failed: {}", + "conversion to `PostGetCompressedMintTokenHoldersBody` for body failed: {}", s ) }); @@ -50895,27 +52538,27 @@ pub mod builder { pub fn body_map(mut self, f: F) -> Self where F: std::ops::FnOnce( - types::builder::PostGetCompressedAccountsByOwnerV2Body, - ) -> types::builder::PostGetCompressedAccountsByOwnerV2Body, + types::builder::PostGetCompressedMintTokenHoldersBody, + ) -> types::builder::PostGetCompressedMintTokenHoldersBody, { self.body = self.body.map(f); self } - ///Sends a `POST` request to `/getCompressedAccountsByOwnerV2` + ///Sends a `POST` request to `/getCompressedMintTokenHolders` pub async fn send( self, ) -> Result< - ResponseValue, - Error, + ResponseValue, + Error, > { let Self { client, body } = self; let body = body .and_then(|v| { - types::PostGetCompressedAccountsByOwnerV2Body::try_from(v) + types::PostGetCompressedMintTokenHoldersBody::try_from(v) .map_err(|e| e.to_string()) }) .map_err(Error::InvalidRequest)?; - let url = format!("{}/getCompressedAccountsByOwnerV2", client.baseurl,); + let url = format!("{}/getCompressedMintTokenHolders", client.baseurl,); let mut header_map = ::reqwest::header::HeaderMap::with_capacity(1usize); header_map .append( @@ -50936,7 +52579,7 @@ pub mod builder { .headers(header_map) .build()?; let info = OperationInfo { - operation_id: "post_get_compressed_accounts_by_owner_v2", + operation_id: "post_get_compressed_mint_token_holders", }; client.pre(&mut request, &info).await?; let result = client.exec(request, &info).await; @@ -50962,15 +52605,15 @@ pub mod builder { } } } - /**Builder for [`Client::post_get_compressed_balance_by_owner`] + /**Builder for [`Client::post_get_compressed_token_account_balance`] -[`Client::post_get_compressed_balance_by_owner`]: super::Client::post_get_compressed_balance_by_owner*/ +[`Client::post_get_compressed_token_account_balance`]: super::Client::post_get_compressed_token_account_balance*/ #[derive(Debug, Clone)] - pub struct PostGetCompressedBalanceByOwner<'a> { + pub struct PostGetCompressedTokenAccountBalance<'a> { client: &'a super::Client, - body: Result, + body: Result, } - impl<'a> PostGetCompressedBalanceByOwner<'a> { + impl<'a> PostGetCompressedTokenAccountBalance<'a> { pub fn new(client: &'a super::Client) -> Self { Self { client: client, @@ -50979,9 +52622,9 @@ pub mod builder { } pub fn body(mut self, value: V) -> Self where - V: std::convert::TryInto, + V: std::convert::TryInto, >::Error: std::fmt::Display, { self.body = value @@ -50989,7 +52632,7 @@ pub mod builder { .map(From::from) .map_err(|s| { format!( - "conversion to `PostGetCompressedBalanceByOwnerBody` for body failed: {}", + "conversion to `PostGetCompressedTokenAccountBalanceBody` for body failed: {}", s ) }); @@ -50998,27 +52641,27 @@ pub mod builder { pub fn body_map(mut self, f: F) -> Self where F: std::ops::FnOnce( - types::builder::PostGetCompressedBalanceByOwnerBody, - ) -> types::builder::PostGetCompressedBalanceByOwnerBody, + types::builder::PostGetCompressedTokenAccountBalanceBody, + ) -> types::builder::PostGetCompressedTokenAccountBalanceBody, { self.body = self.body.map(f); self } - ///Sends a `POST` request to `/getCompressedBalanceByOwner` + ///Sends a `POST` request to `/getCompressedTokenAccountBalance` pub async fn send( self, ) -> Result< - ResponseValue, - Error, + ResponseValue, + Error, > { let Self { client, body } = self; let body = body .and_then(|v| { - types::PostGetCompressedBalanceByOwnerBody::try_from(v) + types::PostGetCompressedTokenAccountBalanceBody::try_from(v) .map_err(|e| e.to_string()) }) .map_err(Error::InvalidRequest)?; - let url = format!("{}/getCompressedBalanceByOwner", client.baseurl,); + let url = format!("{}/getCompressedTokenAccountBalance", client.baseurl,); let mut header_map = ::reqwest::header::HeaderMap::with_capacity(1usize); header_map .append( @@ -51039,7 +52682,7 @@ pub mod builder { .headers(header_map) .build()?; let info = OperationInfo { - operation_id: "post_get_compressed_balance_by_owner", + operation_id: "post_get_compressed_token_account_balance", }; client.pre(&mut request, &info).await?; let result = client.exec(request, &info).await; @@ -51065,15 +52708,18 @@ pub mod builder { } } } - /**Builder for [`Client::post_get_compressed_mint_token_holders`] + /**Builder for [`Client::post_get_compressed_token_accounts_by_delegate`] -[`Client::post_get_compressed_mint_token_holders`]: super::Client::post_get_compressed_mint_token_holders*/ +[`Client::post_get_compressed_token_accounts_by_delegate`]: super::Client::post_get_compressed_token_accounts_by_delegate*/ #[derive(Debug, Clone)] - pub struct PostGetCompressedMintTokenHolders<'a> { + pub struct PostGetCompressedTokenAccountsByDelegate<'a> { client: &'a super::Client, - body: Result, + body: Result< + types::builder::PostGetCompressedTokenAccountsByDelegateBody, + String, + >, } - impl<'a> PostGetCompressedMintTokenHolders<'a> { + impl<'a> PostGetCompressedTokenAccountsByDelegate<'a> { pub fn new(client: &'a super::Client) -> Self { Self { client: client, @@ -51082,9 +52728,11 @@ pub mod builder { } pub fn body(mut self, value: V) -> Self where - V: std::convert::TryInto, + V: std::convert::TryInto< + types::PostGetCompressedTokenAccountsByDelegateBody, + >, >::Error: std::fmt::Display, { self.body = value @@ -51092,7 +52740,7 @@ pub mod builder { .map(From::from) .map_err(|s| { format!( - "conversion to `PostGetCompressedMintTokenHoldersBody` for body failed: {}", + "conversion to `PostGetCompressedTokenAccountsByDelegateBody` for body failed: {}", s ) }); @@ -51101,27 +52749,29 @@ pub mod builder { pub fn body_map(mut self, f: F) -> Self where F: std::ops::FnOnce( - types::builder::PostGetCompressedMintTokenHoldersBody, - ) -> types::builder::PostGetCompressedMintTokenHoldersBody, + types::builder::PostGetCompressedTokenAccountsByDelegateBody, + ) -> types::builder::PostGetCompressedTokenAccountsByDelegateBody, { self.body = self.body.map(f); self } - ///Sends a `POST` request to `/getCompressedMintTokenHolders` + ///Sends a `POST` request to `/getCompressedTokenAccountsByDelegate` pub async fn send( self, ) -> Result< - ResponseValue, - Error, + ResponseValue, + Error, > { let Self { client, body } = self; let body = body .and_then(|v| { - types::PostGetCompressedMintTokenHoldersBody::try_from(v) + types::PostGetCompressedTokenAccountsByDelegateBody::try_from(v) .map_err(|e| e.to_string()) }) .map_err(Error::InvalidRequest)?; - let url = format!("{}/getCompressedMintTokenHolders", client.baseurl,); + let url = format!( + "{}/getCompressedTokenAccountsByDelegate", client.baseurl, + ); let mut header_map = ::reqwest::header::HeaderMap::with_capacity(1usize); header_map .append( @@ -51142,7 +52792,7 @@ pub mod builder { .headers(header_map) .build()?; let info = OperationInfo { - operation_id: "post_get_compressed_mint_token_holders", + operation_id: "post_get_compressed_token_accounts_by_delegate", }; client.pre(&mut request, &info).await?; let result = client.exec(request, &info).await; @@ -51168,15 +52818,18 @@ pub mod builder { } } } - /**Builder for [`Client::post_get_compressed_token_account_balance`] + /**Builder for [`Client::post_get_compressed_token_accounts_by_delegate_v2`] -[`Client::post_get_compressed_token_account_balance`]: super::Client::post_get_compressed_token_account_balance*/ +[`Client::post_get_compressed_token_accounts_by_delegate_v2`]: super::Client::post_get_compressed_token_accounts_by_delegate_v2*/ #[derive(Debug, Clone)] - pub struct PostGetCompressedTokenAccountBalance<'a> { + pub struct PostGetCompressedTokenAccountsByDelegateV2<'a> { client: &'a super::Client, - body: Result, + body: Result< + types::builder::PostGetCompressedTokenAccountsByDelegateV2Body, + String, + >, } - impl<'a> PostGetCompressedTokenAccountBalance<'a> { + impl<'a> PostGetCompressedTokenAccountsByDelegateV2<'a> { pub fn new(client: &'a super::Client) -> Self { Self { client: client, @@ -51185,9 +52838,11 @@ pub mod builder { } pub fn body(mut self, value: V) -> Self where - V: std::convert::TryInto, + V: std::convert::TryInto< + types::PostGetCompressedTokenAccountsByDelegateV2Body, + >, >::Error: std::fmt::Display, { self.body = value @@ -51195,7 +52850,7 @@ pub mod builder { .map(From::from) .map_err(|s| { format!( - "conversion to `PostGetCompressedTokenAccountBalanceBody` for body failed: {}", + "conversion to `PostGetCompressedTokenAccountsByDelegateV2Body` for body failed: {}", s ) }); @@ -51204,27 +52859,29 @@ pub mod builder { pub fn body_map(mut self, f: F) -> Self where F: std::ops::FnOnce( - types::builder::PostGetCompressedTokenAccountBalanceBody, - ) -> types::builder::PostGetCompressedTokenAccountBalanceBody, + types::builder::PostGetCompressedTokenAccountsByDelegateV2Body, + ) -> types::builder::PostGetCompressedTokenAccountsByDelegateV2Body, { self.body = self.body.map(f); self } - ///Sends a `POST` request to `/getCompressedTokenAccountBalance` + ///Sends a `POST` request to `/getCompressedTokenAccountsByDelegateV2` pub async fn send( self, ) -> Result< - ResponseValue, - Error, + ResponseValue, + Error, > { let Self { client, body } = self; let body = body .and_then(|v| { - types::PostGetCompressedTokenAccountBalanceBody::try_from(v) + types::PostGetCompressedTokenAccountsByDelegateV2Body::try_from(v) .map_err(|e| e.to_string()) }) .map_err(Error::InvalidRequest)?; - let url = format!("{}/getCompressedTokenAccountBalance", client.baseurl,); + let url = format!( + "{}/getCompressedTokenAccountsByDelegateV2", client.baseurl, + ); let mut header_map = ::reqwest::header::HeaderMap::with_capacity(1usize); header_map .append( @@ -51245,7 +52902,7 @@ pub mod builder { .headers(header_map) .build()?; let info = OperationInfo { - operation_id: "post_get_compressed_token_account_balance", + operation_id: "post_get_compressed_token_accounts_by_delegate_v2", }; client.pre(&mut request, &info).await?; let result = client.exec(request, &info).await; @@ -51271,18 +52928,15 @@ pub mod builder { } } } - /**Builder for [`Client::post_get_compressed_token_accounts_by_delegate`] + /**Builder for [`Client::post_get_compressed_token_accounts_by_owner`] -[`Client::post_get_compressed_token_accounts_by_delegate`]: super::Client::post_get_compressed_token_accounts_by_delegate*/ +[`Client::post_get_compressed_token_accounts_by_owner`]: super::Client::post_get_compressed_token_accounts_by_owner*/ #[derive(Debug, Clone)] - pub struct PostGetCompressedTokenAccountsByDelegate<'a> { + pub struct PostGetCompressedTokenAccountsByOwner<'a> { client: &'a super::Client, - body: Result< - types::builder::PostGetCompressedTokenAccountsByDelegateBody, - String, - >, + body: Result, } - impl<'a> PostGetCompressedTokenAccountsByDelegate<'a> { + impl<'a> PostGetCompressedTokenAccountsByOwner<'a> { pub fn new(client: &'a super::Client) -> Self { Self { client: client, @@ -51291,11 +52945,9 @@ pub mod builder { } pub fn body(mut self, value: V) -> Self where - V: std::convert::TryInto< - types::PostGetCompressedTokenAccountsByDelegateBody, - >, + V: std::convert::TryInto, >::Error: std::fmt::Display, { self.body = value @@ -51303,7 +52955,7 @@ pub mod builder { .map(From::from) .map_err(|s| { format!( - "conversion to `PostGetCompressedTokenAccountsByDelegateBody` for body failed: {}", + "conversion to `PostGetCompressedTokenAccountsByOwnerBody` for body failed: {}", s ) }); @@ -51312,29 +52964,27 @@ pub mod builder { pub fn body_map(mut self, f: F) -> Self where F: std::ops::FnOnce( - types::builder::PostGetCompressedTokenAccountsByDelegateBody, - ) -> types::builder::PostGetCompressedTokenAccountsByDelegateBody, + types::builder::PostGetCompressedTokenAccountsByOwnerBody, + ) -> types::builder::PostGetCompressedTokenAccountsByOwnerBody, { self.body = self.body.map(f); self } - ///Sends a `POST` request to `/getCompressedTokenAccountsByDelegate` + ///Sends a `POST` request to `/getCompressedTokenAccountsByOwner` pub async fn send( self, ) -> Result< - ResponseValue, - Error, + ResponseValue, + Error, > { let Self { client, body } = self; let body = body .and_then(|v| { - types::PostGetCompressedTokenAccountsByDelegateBody::try_from(v) + types::PostGetCompressedTokenAccountsByOwnerBody::try_from(v) .map_err(|e| e.to_string()) }) .map_err(Error::InvalidRequest)?; - let url = format!( - "{}/getCompressedTokenAccountsByDelegate", client.baseurl, - ); + let url = format!("{}/getCompressedTokenAccountsByOwner", client.baseurl,); let mut header_map = ::reqwest::header::HeaderMap::with_capacity(1usize); header_map .append( @@ -51355,7 +53005,7 @@ pub mod builder { .headers(header_map) .build()?; let info = OperationInfo { - operation_id: "post_get_compressed_token_accounts_by_delegate", + operation_id: "post_get_compressed_token_accounts_by_owner", }; client.pre(&mut request, &info).await?; let result = client.exec(request, &info).await; @@ -51381,18 +53031,18 @@ pub mod builder { } } } - /**Builder for [`Client::post_get_compressed_token_accounts_by_delegate_v2`] + /**Builder for [`Client::post_get_compressed_token_accounts_by_owner_v2`] -[`Client::post_get_compressed_token_accounts_by_delegate_v2`]: super::Client::post_get_compressed_token_accounts_by_delegate_v2*/ +[`Client::post_get_compressed_token_accounts_by_owner_v2`]: super::Client::post_get_compressed_token_accounts_by_owner_v2*/ #[derive(Debug, Clone)] - pub struct PostGetCompressedTokenAccountsByDelegateV2<'a> { + pub struct PostGetCompressedTokenAccountsByOwnerV2<'a> { client: &'a super::Client, body: Result< - types::builder::PostGetCompressedTokenAccountsByDelegateV2Body, + types::builder::PostGetCompressedTokenAccountsByOwnerV2Body, String, >, } - impl<'a> PostGetCompressedTokenAccountsByDelegateV2<'a> { + impl<'a> PostGetCompressedTokenAccountsByOwnerV2<'a> { pub fn new(client: &'a super::Client) -> Self { Self { client: client, @@ -51401,11 +53051,9 @@ pub mod builder { } pub fn body(mut self, value: V) -> Self where - V: std::convert::TryInto< - types::PostGetCompressedTokenAccountsByDelegateV2Body, - >, + V: std::convert::TryInto, >::Error: std::fmt::Display, { self.body = value @@ -51413,7 +53061,7 @@ pub mod builder { .map(From::from) .map_err(|s| { format!( - "conversion to `PostGetCompressedTokenAccountsByDelegateV2Body` for body failed: {}", + "conversion to `PostGetCompressedTokenAccountsByOwnerV2Body` for body failed: {}", s ) }); @@ -51422,29 +53070,27 @@ pub mod builder { pub fn body_map(mut self, f: F) -> Self where F: std::ops::FnOnce( - types::builder::PostGetCompressedTokenAccountsByDelegateV2Body, - ) -> types::builder::PostGetCompressedTokenAccountsByDelegateV2Body, + types::builder::PostGetCompressedTokenAccountsByOwnerV2Body, + ) -> types::builder::PostGetCompressedTokenAccountsByOwnerV2Body, { self.body = self.body.map(f); self } - ///Sends a `POST` request to `/getCompressedTokenAccountsByDelegateV2` + ///Sends a `POST` request to `/getCompressedTokenAccountsByOwnerV2` pub async fn send( self, ) -> Result< - ResponseValue, - Error, + ResponseValue, + Error, > { let Self { client, body } = self; let body = body .and_then(|v| { - types::PostGetCompressedTokenAccountsByDelegateV2Body::try_from(v) + types::PostGetCompressedTokenAccountsByOwnerV2Body::try_from(v) .map_err(|e| e.to_string()) }) .map_err(Error::InvalidRequest)?; - let url = format!( - "{}/getCompressedTokenAccountsByDelegateV2", client.baseurl, - ); + let url = format!("{}/getCompressedTokenAccountsByOwnerV2", client.baseurl,); let mut header_map = ::reqwest::header::HeaderMap::with_capacity(1usize); header_map .append( @@ -51465,7 +53111,7 @@ pub mod builder { .headers(header_map) .build()?; let info = OperationInfo { - operation_id: "post_get_compressed_token_accounts_by_delegate_v2", + operation_id: "post_get_compressed_token_accounts_by_owner_v2", }; client.pre(&mut request, &info).await?; let result = client.exec(request, &info).await; @@ -51491,15 +53137,15 @@ pub mod builder { } } } - /**Builder for [`Client::post_get_compressed_token_accounts_by_owner`] + /**Builder for [`Client::post_get_compressed_token_balances_by_owner`] -[`Client::post_get_compressed_token_accounts_by_owner`]: super::Client::post_get_compressed_token_accounts_by_owner*/ +[`Client::post_get_compressed_token_balances_by_owner`]: super::Client::post_get_compressed_token_balances_by_owner*/ #[derive(Debug, Clone)] - pub struct PostGetCompressedTokenAccountsByOwner<'a> { + pub struct PostGetCompressedTokenBalancesByOwner<'a> { client: &'a super::Client, - body: Result, + body: Result, } - impl<'a> PostGetCompressedTokenAccountsByOwner<'a> { + impl<'a> PostGetCompressedTokenBalancesByOwner<'a> { pub fn new(client: &'a super::Client) -> Self { Self { client: client, @@ -51508,9 +53154,9 @@ pub mod builder { } pub fn body(mut self, value: V) -> Self where - V: std::convert::TryInto, + V: std::convert::TryInto, >::Error: std::fmt::Display, { self.body = value @@ -51518,7 +53164,7 @@ pub mod builder { .map(From::from) .map_err(|s| { format!( - "conversion to `PostGetCompressedTokenAccountsByOwnerBody` for body failed: {}", + "conversion to `PostGetCompressedTokenBalancesByOwnerBody` for body failed: {}", s ) }); @@ -51527,27 +53173,27 @@ pub mod builder { pub fn body_map(mut self, f: F) -> Self where F: std::ops::FnOnce( - types::builder::PostGetCompressedTokenAccountsByOwnerBody, - ) -> types::builder::PostGetCompressedTokenAccountsByOwnerBody, + types::builder::PostGetCompressedTokenBalancesByOwnerBody, + ) -> types::builder::PostGetCompressedTokenBalancesByOwnerBody, { self.body = self.body.map(f); self } - ///Sends a `POST` request to `/getCompressedTokenAccountsByOwner` + ///Sends a `POST` request to `/getCompressedTokenBalancesByOwner` pub async fn send( self, ) -> Result< - ResponseValue, - Error, + ResponseValue, + Error, > { let Self { client, body } = self; let body = body .and_then(|v| { - types::PostGetCompressedTokenAccountsByOwnerBody::try_from(v) + types::PostGetCompressedTokenBalancesByOwnerBody::try_from(v) .map_err(|e| e.to_string()) }) .map_err(Error::InvalidRequest)?; - let url = format!("{}/getCompressedTokenAccountsByOwner", client.baseurl,); + let url = format!("{}/getCompressedTokenBalancesByOwner", client.baseurl,); let mut header_map = ::reqwest::header::HeaderMap::with_capacity(1usize); header_map .append( @@ -51568,7 +53214,7 @@ pub mod builder { .headers(header_map) .build()?; let info = OperationInfo { - operation_id: "post_get_compressed_token_accounts_by_owner", + operation_id: "post_get_compressed_token_balances_by_owner", }; client.pre(&mut request, &info).await?; let result = client.exec(request, &info).await; @@ -51594,18 +53240,18 @@ pub mod builder { } } } - /**Builder for [`Client::post_get_compressed_token_accounts_by_owner_v2`] + /**Builder for [`Client::post_get_compressed_token_balances_by_owner_v2`] -[`Client::post_get_compressed_token_accounts_by_owner_v2`]: super::Client::post_get_compressed_token_accounts_by_owner_v2*/ +[`Client::post_get_compressed_token_balances_by_owner_v2`]: super::Client::post_get_compressed_token_balances_by_owner_v2*/ #[derive(Debug, Clone)] - pub struct PostGetCompressedTokenAccountsByOwnerV2<'a> { + pub struct PostGetCompressedTokenBalancesByOwnerV2<'a> { client: &'a super::Client, body: Result< - types::builder::PostGetCompressedTokenAccountsByOwnerV2Body, + types::builder::PostGetCompressedTokenBalancesByOwnerV2Body, String, >, } - impl<'a> PostGetCompressedTokenAccountsByOwnerV2<'a> { + impl<'a> PostGetCompressedTokenBalancesByOwnerV2<'a> { pub fn new(client: &'a super::Client) -> Self { Self { client: client, @@ -51614,9 +53260,9 @@ pub mod builder { } pub fn body(mut self, value: V) -> Self where - V: std::convert::TryInto, + V: std::convert::TryInto, >::Error: std::fmt::Display, { self.body = value @@ -51624,7 +53270,7 @@ pub mod builder { .map(From::from) .map_err(|s| { format!( - "conversion to `PostGetCompressedTokenAccountsByOwnerV2Body` for body failed: {}", + "conversion to `PostGetCompressedTokenBalancesByOwnerV2Body` for body failed: {}", s ) }); @@ -51633,27 +53279,27 @@ pub mod builder { pub fn body_map(mut self, f: F) -> Self where F: std::ops::FnOnce( - types::builder::PostGetCompressedTokenAccountsByOwnerV2Body, - ) -> types::builder::PostGetCompressedTokenAccountsByOwnerV2Body, + types::builder::PostGetCompressedTokenBalancesByOwnerV2Body, + ) -> types::builder::PostGetCompressedTokenBalancesByOwnerV2Body, { self.body = self.body.map(f); self } - ///Sends a `POST` request to `/getCompressedTokenAccountsByOwnerV2` + ///Sends a `POST` request to `/getCompressedTokenBalancesByOwnerV2` pub async fn send( self, ) -> Result< - ResponseValue, - Error, + ResponseValue, + Error, > { let Self { client, body } = self; let body = body .and_then(|v| { - types::PostGetCompressedTokenAccountsByOwnerV2Body::try_from(v) + types::PostGetCompressedTokenBalancesByOwnerV2Body::try_from(v) .map_err(|e| e.to_string()) }) .map_err(Error::InvalidRequest)?; - let url = format!("{}/getCompressedTokenAccountsByOwnerV2", client.baseurl,); + let url = format!("{}/getCompressedTokenBalancesByOwnerV2", client.baseurl,); let mut header_map = ::reqwest::header::HeaderMap::with_capacity(1usize); header_map .append( @@ -51674,7 +53320,7 @@ pub mod builder { .headers(header_map) .build()?; let info = OperationInfo { - operation_id: "post_get_compressed_token_accounts_by_owner_v2", + operation_id: "post_get_compressed_token_balances_by_owner_v2", }; client.pre(&mut request, &info).await?; let result = client.exec(request, &info).await; @@ -51700,15 +53346,15 @@ pub mod builder { } } } - /**Builder for [`Client::post_get_compressed_token_balances_by_owner`] + /**Builder for [`Client::post_get_compression_signatures_for_account`] -[`Client::post_get_compressed_token_balances_by_owner`]: super::Client::post_get_compressed_token_balances_by_owner*/ +[`Client::post_get_compression_signatures_for_account`]: super::Client::post_get_compression_signatures_for_account*/ #[derive(Debug, Clone)] - pub struct PostGetCompressedTokenBalancesByOwner<'a> { + pub struct PostGetCompressionSignaturesForAccount<'a> { client: &'a super::Client, - body: Result, + body: Result, } - impl<'a> PostGetCompressedTokenBalancesByOwner<'a> { + impl<'a> PostGetCompressionSignaturesForAccount<'a> { pub fn new(client: &'a super::Client) -> Self { Self { client: client, @@ -51717,9 +53363,9 @@ pub mod builder { } pub fn body(mut self, value: V) -> Self where - V: std::convert::TryInto, + V: std::convert::TryInto, >::Error: std::fmt::Display, { self.body = value @@ -51727,7 +53373,7 @@ pub mod builder { .map(From::from) .map_err(|s| { format!( - "conversion to `PostGetCompressedTokenBalancesByOwnerBody` for body failed: {}", + "conversion to `PostGetCompressionSignaturesForAccountBody` for body failed: {}", s ) }); @@ -51736,27 +53382,27 @@ pub mod builder { pub fn body_map(mut self, f: F) -> Self where F: std::ops::FnOnce( - types::builder::PostGetCompressedTokenBalancesByOwnerBody, - ) -> types::builder::PostGetCompressedTokenBalancesByOwnerBody, + types::builder::PostGetCompressionSignaturesForAccountBody, + ) -> types::builder::PostGetCompressionSignaturesForAccountBody, { self.body = self.body.map(f); self } - ///Sends a `POST` request to `/getCompressedTokenBalancesByOwner` + ///Sends a `POST` request to `/getCompressionSignaturesForAccount` pub async fn send( self, ) -> Result< - ResponseValue, - Error, + ResponseValue, + Error, > { let Self { client, body } = self; let body = body .and_then(|v| { - types::PostGetCompressedTokenBalancesByOwnerBody::try_from(v) + types::PostGetCompressionSignaturesForAccountBody::try_from(v) .map_err(|e| e.to_string()) }) .map_err(Error::InvalidRequest)?; - let url = format!("{}/getCompressedTokenBalancesByOwner", client.baseurl,); + let url = format!("{}/getCompressionSignaturesForAccount", client.baseurl,); let mut header_map = ::reqwest::header::HeaderMap::with_capacity(1usize); header_map .append( @@ -51777,7 +53423,7 @@ pub mod builder { .headers(header_map) .build()?; let info = OperationInfo { - operation_id: "post_get_compressed_token_balances_by_owner", + operation_id: "post_get_compression_signatures_for_account", }; client.pre(&mut request, &info).await?; let result = client.exec(request, &info).await; @@ -51803,18 +53449,15 @@ pub mod builder { } } } - /**Builder for [`Client::post_get_compressed_token_balances_by_owner_v2`] + /**Builder for [`Client::post_get_compression_signatures_for_address`] -[`Client::post_get_compressed_token_balances_by_owner_v2`]: super::Client::post_get_compressed_token_balances_by_owner_v2*/ +[`Client::post_get_compression_signatures_for_address`]: super::Client::post_get_compression_signatures_for_address*/ #[derive(Debug, Clone)] - pub struct PostGetCompressedTokenBalancesByOwnerV2<'a> { + pub struct PostGetCompressionSignaturesForAddress<'a> { client: &'a super::Client, - body: Result< - types::builder::PostGetCompressedTokenBalancesByOwnerV2Body, - String, - >, + body: Result, } - impl<'a> PostGetCompressedTokenBalancesByOwnerV2<'a> { + impl<'a> PostGetCompressionSignaturesForAddress<'a> { pub fn new(client: &'a super::Client) -> Self { Self { client: client, @@ -51823,9 +53466,9 @@ pub mod builder { } pub fn body(mut self, value: V) -> Self where - V: std::convert::TryInto, + V: std::convert::TryInto, >::Error: std::fmt::Display, { self.body = value @@ -51833,7 +53476,7 @@ pub mod builder { .map(From::from) .map_err(|s| { format!( - "conversion to `PostGetCompressedTokenBalancesByOwnerV2Body` for body failed: {}", + "conversion to `PostGetCompressionSignaturesForAddressBody` for body failed: {}", s ) }); @@ -51842,27 +53485,27 @@ pub mod builder { pub fn body_map(mut self, f: F) -> Self where F: std::ops::FnOnce( - types::builder::PostGetCompressedTokenBalancesByOwnerV2Body, - ) -> types::builder::PostGetCompressedTokenBalancesByOwnerV2Body, + types::builder::PostGetCompressionSignaturesForAddressBody, + ) -> types::builder::PostGetCompressionSignaturesForAddressBody, { self.body = self.body.map(f); self } - ///Sends a `POST` request to `/getCompressedTokenBalancesByOwnerV2` + ///Sends a `POST` request to `/getCompressionSignaturesForAddress` pub async fn send( self, ) -> Result< - ResponseValue, - Error, + ResponseValue, + Error, > { let Self { client, body } = self; let body = body .and_then(|v| { - types::PostGetCompressedTokenBalancesByOwnerV2Body::try_from(v) + types::PostGetCompressionSignaturesForAddressBody::try_from(v) .map_err(|e| e.to_string()) }) .map_err(Error::InvalidRequest)?; - let url = format!("{}/getCompressedTokenBalancesByOwnerV2", client.baseurl,); + let url = format!("{}/getCompressionSignaturesForAddress", client.baseurl,); let mut header_map = ::reqwest::header::HeaderMap::with_capacity(1usize); header_map .append( @@ -51883,7 +53526,7 @@ pub mod builder { .headers(header_map) .build()?; let info = OperationInfo { - operation_id: "post_get_compressed_token_balances_by_owner_v2", + operation_id: "post_get_compression_signatures_for_address", }; client.pre(&mut request, &info).await?; let result = client.exec(request, &info).await; @@ -51909,15 +53552,15 @@ pub mod builder { } } } - /**Builder for [`Client::post_get_compression_signatures_for_account`] + /**Builder for [`Client::post_get_compression_signatures_for_owner`] -[`Client::post_get_compression_signatures_for_account`]: super::Client::post_get_compression_signatures_for_account*/ +[`Client::post_get_compression_signatures_for_owner`]: super::Client::post_get_compression_signatures_for_owner*/ #[derive(Debug, Clone)] - pub struct PostGetCompressionSignaturesForAccount<'a> { + pub struct PostGetCompressionSignaturesForOwner<'a> { client: &'a super::Client, - body: Result, + body: Result, } - impl<'a> PostGetCompressionSignaturesForAccount<'a> { + impl<'a> PostGetCompressionSignaturesForOwner<'a> { pub fn new(client: &'a super::Client) -> Self { Self { client: client, @@ -51926,9 +53569,9 @@ pub mod builder { } pub fn body(mut self, value: V) -> Self where - V: std::convert::TryInto, + V: std::convert::TryInto, >::Error: std::fmt::Display, { self.body = value @@ -51936,7 +53579,7 @@ pub mod builder { .map(From::from) .map_err(|s| { format!( - "conversion to `PostGetCompressionSignaturesForAccountBody` for body failed: {}", + "conversion to `PostGetCompressionSignaturesForOwnerBody` for body failed: {}", s ) }); @@ -51945,27 +53588,27 @@ pub mod builder { pub fn body_map(mut self, f: F) -> Self where F: std::ops::FnOnce( - types::builder::PostGetCompressionSignaturesForAccountBody, - ) -> types::builder::PostGetCompressionSignaturesForAccountBody, + types::builder::PostGetCompressionSignaturesForOwnerBody, + ) -> types::builder::PostGetCompressionSignaturesForOwnerBody, { self.body = self.body.map(f); self } - ///Sends a `POST` request to `/getCompressionSignaturesForAccount` + ///Sends a `POST` request to `/getCompressionSignaturesForOwner` pub async fn send( self, ) -> Result< - ResponseValue, - Error, + ResponseValue, + Error, > { let Self { client, body } = self; let body = body .and_then(|v| { - types::PostGetCompressionSignaturesForAccountBody::try_from(v) + types::PostGetCompressionSignaturesForOwnerBody::try_from(v) .map_err(|e| e.to_string()) }) .map_err(Error::InvalidRequest)?; - let url = format!("{}/getCompressionSignaturesForAccount", client.baseurl,); + let url = format!("{}/getCompressionSignaturesForOwner", client.baseurl,); let mut header_map = ::reqwest::header::HeaderMap::with_capacity(1usize); header_map .append( @@ -51986,7 +53629,7 @@ pub mod builder { .headers(header_map) .build()?; let info = OperationInfo { - operation_id: "post_get_compression_signatures_for_account", + operation_id: "post_get_compression_signatures_for_owner", }; client.pre(&mut request, &info).await?; let result = client.exec(request, &info).await; @@ -52012,15 +53655,18 @@ pub mod builder { } } } - /**Builder for [`Client::post_get_compression_signatures_for_address`] + /**Builder for [`Client::post_get_compression_signatures_for_token_owner`] -[`Client::post_get_compression_signatures_for_address`]: super::Client::post_get_compression_signatures_for_address*/ +[`Client::post_get_compression_signatures_for_token_owner`]: super::Client::post_get_compression_signatures_for_token_owner*/ #[derive(Debug, Clone)] - pub struct PostGetCompressionSignaturesForAddress<'a> { + pub struct PostGetCompressionSignaturesForTokenOwner<'a> { client: &'a super::Client, - body: Result, + body: Result< + types::builder::PostGetCompressionSignaturesForTokenOwnerBody, + String, + >, } - impl<'a> PostGetCompressionSignaturesForAddress<'a> { + impl<'a> PostGetCompressionSignaturesForTokenOwner<'a> { pub fn new(client: &'a super::Client) -> Self { Self { client: client, @@ -52029,9 +53675,11 @@ pub mod builder { } pub fn body(mut self, value: V) -> Self where - V: std::convert::TryInto, + V: std::convert::TryInto< + types::PostGetCompressionSignaturesForTokenOwnerBody, + >, >::Error: std::fmt::Display, { self.body = value @@ -52039,7 +53687,7 @@ pub mod builder { .map(From::from) .map_err(|s| { format!( - "conversion to `PostGetCompressionSignaturesForAddressBody` for body failed: {}", + "conversion to `PostGetCompressionSignaturesForTokenOwnerBody` for body failed: {}", s ) }); @@ -52048,27 +53696,29 @@ pub mod builder { pub fn body_map(mut self, f: F) -> Self where F: std::ops::FnOnce( - types::builder::PostGetCompressionSignaturesForAddressBody, - ) -> types::builder::PostGetCompressionSignaturesForAddressBody, + types::builder::PostGetCompressionSignaturesForTokenOwnerBody, + ) -> types::builder::PostGetCompressionSignaturesForTokenOwnerBody, { self.body = self.body.map(f); self } - ///Sends a `POST` request to `/getCompressionSignaturesForAddress` + ///Sends a `POST` request to `/getCompressionSignaturesForTokenOwner` pub async fn send( self, ) -> Result< - ResponseValue, - Error, + ResponseValue, + Error, > { let Self { client, body } = self; let body = body .and_then(|v| { - types::PostGetCompressionSignaturesForAddressBody::try_from(v) + types::PostGetCompressionSignaturesForTokenOwnerBody::try_from(v) .map_err(|e| e.to_string()) }) .map_err(Error::InvalidRequest)?; - let url = format!("{}/getCompressionSignaturesForAddress", client.baseurl,); + let url = format!( + "{}/getCompressionSignaturesForTokenOwner", client.baseurl, + ); let mut header_map = ::reqwest::header::HeaderMap::with_capacity(1usize); header_map .append( @@ -52089,7 +53739,7 @@ pub mod builder { .headers(header_map) .build()?; let info = OperationInfo { - operation_id: "post_get_compression_signatures_for_address", + operation_id: "post_get_compression_signatures_for_token_owner", }; client.pre(&mut request, &info).await?; let result = client.exec(request, &info).await; @@ -52115,15 +53765,15 @@ pub mod builder { } } } - /**Builder for [`Client::post_get_compression_signatures_for_owner`] + /**Builder for [`Client::post_get_indexer_health`] -[`Client::post_get_compression_signatures_for_owner`]: super::Client::post_get_compression_signatures_for_owner*/ +[`Client::post_get_indexer_health`]: super::Client::post_get_indexer_health*/ #[derive(Debug, Clone)] - pub struct PostGetCompressionSignaturesForOwner<'a> { + pub struct PostGetIndexerHealth<'a> { client: &'a super::Client, - body: Result, + body: Result, } - impl<'a> PostGetCompressionSignaturesForOwner<'a> { + impl<'a> PostGetIndexerHealth<'a> { pub fn new(client: &'a super::Client) -> Self { Self { client: client, @@ -52132,9 +53782,9 @@ pub mod builder { } pub fn body(mut self, value: V) -> Self where - V: std::convert::TryInto, + V: std::convert::TryInto, >::Error: std::fmt::Display, { self.body = value @@ -52142,8 +53792,7 @@ pub mod builder { .map(From::from) .map_err(|s| { format!( - "conversion to `PostGetCompressionSignaturesForOwnerBody` for body failed: {}", - s + "conversion to `PostGetIndexerHealthBody` for body failed: {}", s ) }); self @@ -52151,27 +53800,27 @@ pub mod builder { pub fn body_map(mut self, f: F) -> Self where F: std::ops::FnOnce( - types::builder::PostGetCompressionSignaturesForOwnerBody, - ) -> types::builder::PostGetCompressionSignaturesForOwnerBody, + types::builder::PostGetIndexerHealthBody, + ) -> types::builder::PostGetIndexerHealthBody, { self.body = self.body.map(f); self } - ///Sends a `POST` request to `/getCompressionSignaturesForOwner` + ///Sends a `POST` request to `/getIndexerHealth` pub async fn send( self, ) -> Result< - ResponseValue, - Error, + ResponseValue, + Error, > { let Self { client, body } = self; let body = body .and_then(|v| { - types::PostGetCompressionSignaturesForOwnerBody::try_from(v) + types::PostGetIndexerHealthBody::try_from(v) .map_err(|e| e.to_string()) }) .map_err(Error::InvalidRequest)?; - let url = format!("{}/getCompressionSignaturesForOwner", client.baseurl,); + let url = format!("{}/getIndexerHealth", client.baseurl,); let mut header_map = ::reqwest::header::HeaderMap::with_capacity(1usize); header_map .append( @@ -52192,7 +53841,7 @@ pub mod builder { .headers(header_map) .build()?; let info = OperationInfo { - operation_id: "post_get_compression_signatures_for_owner", + operation_id: "post_get_indexer_health", }; client.pre(&mut request, &info).await?; let result = client.exec(request, &info).await; @@ -52218,18 +53867,15 @@ pub mod builder { } } } - /**Builder for [`Client::post_get_compression_signatures_for_token_owner`] + /**Builder for [`Client::post_get_indexer_slot`] -[`Client::post_get_compression_signatures_for_token_owner`]: super::Client::post_get_compression_signatures_for_token_owner*/ +[`Client::post_get_indexer_slot`]: super::Client::post_get_indexer_slot*/ #[derive(Debug, Clone)] - pub struct PostGetCompressionSignaturesForTokenOwner<'a> { + pub struct PostGetIndexerSlot<'a> { client: &'a super::Client, - body: Result< - types::builder::PostGetCompressionSignaturesForTokenOwnerBody, - String, - >, + body: Result, } - impl<'a> PostGetCompressionSignaturesForTokenOwner<'a> { + impl<'a> PostGetIndexerSlot<'a> { pub fn new(client: &'a super::Client) -> Self { Self { client: client, @@ -52238,11 +53884,9 @@ pub mod builder { } pub fn body(mut self, value: V) -> Self where - V: std::convert::TryInto< - types::PostGetCompressionSignaturesForTokenOwnerBody, - >, + V: std::convert::TryInto, >::Error: std::fmt::Display, { self.body = value @@ -52250,8 +53894,7 @@ pub mod builder { .map(From::from) .map_err(|s| { format!( - "conversion to `PostGetCompressionSignaturesForTokenOwnerBody` for body failed: {}", - s + "conversion to `PostGetIndexerSlotBody` for body failed: {}", s ) }); self @@ -52259,29 +53902,26 @@ pub mod builder { pub fn body_map(mut self, f: F) -> Self where F: std::ops::FnOnce( - types::builder::PostGetCompressionSignaturesForTokenOwnerBody, - ) -> types::builder::PostGetCompressionSignaturesForTokenOwnerBody, + types::builder::PostGetIndexerSlotBody, + ) -> types::builder::PostGetIndexerSlotBody, { self.body = self.body.map(f); self } - ///Sends a `POST` request to `/getCompressionSignaturesForTokenOwner` + ///Sends a `POST` request to `/getIndexerSlot` pub async fn send( self, ) -> Result< - ResponseValue, - Error, + ResponseValue, + Error, > { let Self { client, body } = self; let body = body .and_then(|v| { - types::PostGetCompressionSignaturesForTokenOwnerBody::try_from(v) - .map_err(|e| e.to_string()) + types::PostGetIndexerSlotBody::try_from(v).map_err(|e| e.to_string()) }) .map_err(Error::InvalidRequest)?; - let url = format!( - "{}/getCompressionSignaturesForTokenOwner", client.baseurl, - ); + let url = format!("{}/getIndexerSlot", client.baseurl,); let mut header_map = ::reqwest::header::HeaderMap::with_capacity(1usize); header_map .append( @@ -52302,7 +53942,7 @@ pub mod builder { .headers(header_map) .build()?; let info = OperationInfo { - operation_id: "post_get_compression_signatures_for_token_owner", + operation_id: "post_get_indexer_slot", }; client.pre(&mut request, &info).await?; let result = client.exec(request, &info).await; @@ -52328,15 +53968,15 @@ pub mod builder { } } } - /**Builder for [`Client::post_get_indexer_health`] + /**Builder for [`Client::post_get_latest_compression_signatures`] -[`Client::post_get_indexer_health`]: super::Client::post_get_indexer_health*/ +[`Client::post_get_latest_compression_signatures`]: super::Client::post_get_latest_compression_signatures*/ #[derive(Debug, Clone)] - pub struct PostGetIndexerHealth<'a> { + pub struct PostGetLatestCompressionSignatures<'a> { client: &'a super::Client, - body: Result, + body: Result, } - impl<'a> PostGetIndexerHealth<'a> { + impl<'a> PostGetLatestCompressionSignatures<'a> { pub fn new(client: &'a super::Client) -> Self { Self { client: client, @@ -52345,9 +53985,9 @@ pub mod builder { } pub fn body(mut self, value: V) -> Self where - V: std::convert::TryInto, + V: std::convert::TryInto, >::Error: std::fmt::Display, { self.body = value @@ -52355,7 +53995,8 @@ pub mod builder { .map(From::from) .map_err(|s| { format!( - "conversion to `PostGetIndexerHealthBody` for body failed: {}", s + "conversion to `PostGetLatestCompressionSignaturesBody` for body failed: {}", + s ) }); self @@ -52363,27 +54004,27 @@ pub mod builder { pub fn body_map(mut self, f: F) -> Self where F: std::ops::FnOnce( - types::builder::PostGetIndexerHealthBody, - ) -> types::builder::PostGetIndexerHealthBody, + types::builder::PostGetLatestCompressionSignaturesBody, + ) -> types::builder::PostGetLatestCompressionSignaturesBody, { self.body = self.body.map(f); self } - ///Sends a `POST` request to `/getIndexerHealth` + ///Sends a `POST` request to `/getLatestCompressionSignatures` pub async fn send( self, ) -> Result< - ResponseValue, - Error, + ResponseValue, + Error, > { let Self { client, body } = self; let body = body .and_then(|v| { - types::PostGetIndexerHealthBody::try_from(v) + types::PostGetLatestCompressionSignaturesBody::try_from(v) .map_err(|e| e.to_string()) }) .map_err(Error::InvalidRequest)?; - let url = format!("{}/getIndexerHealth", client.baseurl,); + let url = format!("{}/getLatestCompressionSignatures", client.baseurl,); let mut header_map = ::reqwest::header::HeaderMap::with_capacity(1usize); header_map .append( @@ -52404,7 +54045,7 @@ pub mod builder { .headers(header_map) .build()?; let info = OperationInfo { - operation_id: "post_get_indexer_health", + operation_id: "post_get_latest_compression_signatures", }; client.pre(&mut request, &info).await?; let result = client.exec(request, &info).await; @@ -52430,15 +54071,15 @@ pub mod builder { } } } - /**Builder for [`Client::post_get_indexer_slot`] + /**Builder for [`Client::post_get_latest_non_voting_signatures`] -[`Client::post_get_indexer_slot`]: super::Client::post_get_indexer_slot*/ +[`Client::post_get_latest_non_voting_signatures`]: super::Client::post_get_latest_non_voting_signatures*/ #[derive(Debug, Clone)] - pub struct PostGetIndexerSlot<'a> { + pub struct PostGetLatestNonVotingSignatures<'a> { client: &'a super::Client, - body: Result, + body: Result, } - impl<'a> PostGetIndexerSlot<'a> { + impl<'a> PostGetLatestNonVotingSignatures<'a> { pub fn new(client: &'a super::Client) -> Self { Self { client: client, @@ -52447,9 +54088,9 @@ pub mod builder { } pub fn body(mut self, value: V) -> Self where - V: std::convert::TryInto, + V: std::convert::TryInto, >::Error: std::fmt::Display, { self.body = value @@ -52457,7 +54098,8 @@ pub mod builder { .map(From::from) .map_err(|s| { format!( - "conversion to `PostGetIndexerSlotBody` for body failed: {}", s + "conversion to `PostGetLatestNonVotingSignaturesBody` for body failed: {}", + s ) }); self @@ -52465,26 +54107,27 @@ pub mod builder { pub fn body_map(mut self, f: F) -> Self where F: std::ops::FnOnce( - types::builder::PostGetIndexerSlotBody, - ) -> types::builder::PostGetIndexerSlotBody, + types::builder::PostGetLatestNonVotingSignaturesBody, + ) -> types::builder::PostGetLatestNonVotingSignaturesBody, { self.body = self.body.map(f); self } - ///Sends a `POST` request to `/getIndexerSlot` + ///Sends a `POST` request to `/getLatestNonVotingSignatures` pub async fn send( self, ) -> Result< - ResponseValue, - Error, + ResponseValue, + Error, > { let Self { client, body } = self; let body = body .and_then(|v| { - types::PostGetIndexerSlotBody::try_from(v).map_err(|e| e.to_string()) + types::PostGetLatestNonVotingSignaturesBody::try_from(v) + .map_err(|e| e.to_string()) }) .map_err(Error::InvalidRequest)?; - let url = format!("{}/getIndexerSlot", client.baseurl,); + let url = format!("{}/getLatestNonVotingSignatures", client.baseurl,); let mut header_map = ::reqwest::header::HeaderMap::with_capacity(1usize); header_map .append( @@ -52505,7 +54148,7 @@ pub mod builder { .headers(header_map) .build()?; let info = OperationInfo { - operation_id: "post_get_indexer_slot", + operation_id: "post_get_latest_non_voting_signatures", }; client.pre(&mut request, &info).await?; let result = client.exec(request, &info).await; @@ -52531,15 +54174,15 @@ pub mod builder { } } } - /**Builder for [`Client::post_get_latest_compression_signatures`] + /**Builder for [`Client::post_get_multiple_account_interfaces`] -[`Client::post_get_latest_compression_signatures`]: super::Client::post_get_latest_compression_signatures*/ +[`Client::post_get_multiple_account_interfaces`]: super::Client::post_get_multiple_account_interfaces*/ #[derive(Debug, Clone)] - pub struct PostGetLatestCompressionSignatures<'a> { + pub struct PostGetMultipleAccountInterfaces<'a> { client: &'a super::Client, - body: Result, + body: Result, } - impl<'a> PostGetLatestCompressionSignatures<'a> { + impl<'a> PostGetMultipleAccountInterfaces<'a> { pub fn new(client: &'a super::Client) -> Self { Self { client: client, @@ -52548,9 +54191,9 @@ pub mod builder { } pub fn body(mut self, value: V) -> Self where - V: std::convert::TryInto, + V: std::convert::TryInto, >::Error: std::fmt::Display, { self.body = value @@ -52558,7 +54201,7 @@ pub mod builder { .map(From::from) .map_err(|s| { format!( - "conversion to `PostGetLatestCompressionSignaturesBody` for body failed: {}", + "conversion to `PostGetMultipleAccountInterfacesBody` for body failed: {}", s ) }); @@ -52567,27 +54210,27 @@ pub mod builder { pub fn body_map(mut self, f: F) -> Self where F: std::ops::FnOnce( - types::builder::PostGetLatestCompressionSignaturesBody, - ) -> types::builder::PostGetLatestCompressionSignaturesBody, + types::builder::PostGetMultipleAccountInterfacesBody, + ) -> types::builder::PostGetMultipleAccountInterfacesBody, { self.body = self.body.map(f); self } - ///Sends a `POST` request to `/getLatestCompressionSignatures` + ///Sends a `POST` request to `/getMultipleAccountInterfaces` pub async fn send( self, ) -> Result< - ResponseValue, - Error, + ResponseValue, + Error, > { let Self { client, body } = self; let body = body .and_then(|v| { - types::PostGetLatestCompressionSignaturesBody::try_from(v) + types::PostGetMultipleAccountInterfacesBody::try_from(v) .map_err(|e| e.to_string()) }) .map_err(Error::InvalidRequest)?; - let url = format!("{}/getLatestCompressionSignatures", client.baseurl,); + let url = format!("{}/getMultipleAccountInterfaces", client.baseurl,); let mut header_map = ::reqwest::header::HeaderMap::with_capacity(1usize); header_map .append( @@ -52608,7 +54251,7 @@ pub mod builder { .headers(header_map) .build()?; let info = OperationInfo { - operation_id: "post_get_latest_compression_signatures", + operation_id: "post_get_multiple_account_interfaces", }; client.pre(&mut request, &info).await?; let result = client.exec(request, &info).await; @@ -52634,15 +54277,15 @@ pub mod builder { } } } - /**Builder for [`Client::post_get_latest_non_voting_signatures`] + /**Builder for [`Client::post_get_multiple_compressed_account_proofs`] -[`Client::post_get_latest_non_voting_signatures`]: super::Client::post_get_latest_non_voting_signatures*/ +[`Client::post_get_multiple_compressed_account_proofs`]: super::Client::post_get_multiple_compressed_account_proofs*/ #[derive(Debug, Clone)] - pub struct PostGetLatestNonVotingSignatures<'a> { + pub struct PostGetMultipleCompressedAccountProofs<'a> { client: &'a super::Client, - body: Result, + body: Result, } - impl<'a> PostGetLatestNonVotingSignatures<'a> { + impl<'a> PostGetMultipleCompressedAccountProofs<'a> { pub fn new(client: &'a super::Client) -> Self { Self { client: client, @@ -52651,9 +54294,9 @@ pub mod builder { } pub fn body(mut self, value: V) -> Self where - V: std::convert::TryInto, + V: std::convert::TryInto, >::Error: std::fmt::Display, { self.body = value @@ -52661,7 +54304,7 @@ pub mod builder { .map(From::from) .map_err(|s| { format!( - "conversion to `PostGetLatestNonVotingSignaturesBody` for body failed: {}", + "conversion to `PostGetMultipleCompressedAccountProofsBody` for body failed: {}", s ) }); @@ -52670,27 +54313,27 @@ pub mod builder { pub fn body_map(mut self, f: F) -> Self where F: std::ops::FnOnce( - types::builder::PostGetLatestNonVotingSignaturesBody, - ) -> types::builder::PostGetLatestNonVotingSignaturesBody, + types::builder::PostGetMultipleCompressedAccountProofsBody, + ) -> types::builder::PostGetMultipleCompressedAccountProofsBody, { self.body = self.body.map(f); self } - ///Sends a `POST` request to `/getLatestNonVotingSignatures` + ///Sends a `POST` request to `/getMultipleCompressedAccountProofs` pub async fn send( self, ) -> Result< - ResponseValue, - Error, + ResponseValue, + Error, > { let Self { client, body } = self; let body = body .and_then(|v| { - types::PostGetLatestNonVotingSignaturesBody::try_from(v) + types::PostGetMultipleCompressedAccountProofsBody::try_from(v) .map_err(|e| e.to_string()) }) .map_err(Error::InvalidRequest)?; - let url = format!("{}/getLatestNonVotingSignatures", client.baseurl,); + let url = format!("{}/getMultipleCompressedAccountProofs", client.baseurl,); let mut header_map = ::reqwest::header::HeaderMap::with_capacity(1usize); header_map .append( @@ -52711,7 +54354,7 @@ pub mod builder { .headers(header_map) .build()?; let info = OperationInfo { - operation_id: "post_get_latest_non_voting_signatures", + operation_id: "post_get_multiple_compressed_account_proofs", }; client.pre(&mut request, &info).await?; let result = client.exec(request, &info).await; @@ -52737,15 +54380,18 @@ pub mod builder { } } } - /**Builder for [`Client::post_get_multiple_account_interfaces`] + /**Builder for [`Client::post_get_multiple_compressed_account_proofs_v2`] -[`Client::post_get_multiple_account_interfaces`]: super::Client::post_get_multiple_account_interfaces*/ +[`Client::post_get_multiple_compressed_account_proofs_v2`]: super::Client::post_get_multiple_compressed_account_proofs_v2*/ #[derive(Debug, Clone)] - pub struct PostGetMultipleAccountInterfaces<'a> { + pub struct PostGetMultipleCompressedAccountProofsV2<'a> { client: &'a super::Client, - body: Result, + body: Result< + types::builder::PostGetMultipleCompressedAccountProofsV2Body, + String, + >, } - impl<'a> PostGetMultipleAccountInterfaces<'a> { + impl<'a> PostGetMultipleCompressedAccountProofsV2<'a> { pub fn new(client: &'a super::Client) -> Self { Self { client: client, @@ -52754,9 +54400,11 @@ pub mod builder { } pub fn body(mut self, value: V) -> Self where - V: std::convert::TryInto, + V: std::convert::TryInto< + types::PostGetMultipleCompressedAccountProofsV2Body, + >, >::Error: std::fmt::Display, { self.body = value @@ -52764,7 +54412,7 @@ pub mod builder { .map(From::from) .map_err(|s| { format!( - "conversion to `PostGetMultipleAccountInterfacesBody` for body failed: {}", + "conversion to `PostGetMultipleCompressedAccountProofsV2Body` for body failed: {}", s ) }); @@ -52773,27 +54421,29 @@ pub mod builder { pub fn body_map(mut self, f: F) -> Self where F: std::ops::FnOnce( - types::builder::PostGetMultipleAccountInterfacesBody, - ) -> types::builder::PostGetMultipleAccountInterfacesBody, + types::builder::PostGetMultipleCompressedAccountProofsV2Body, + ) -> types::builder::PostGetMultipleCompressedAccountProofsV2Body, { self.body = self.body.map(f); self } - ///Sends a `POST` request to `/getMultipleAccountInterfaces` + ///Sends a `POST` request to `/getMultipleCompressedAccountProofsV2` pub async fn send( self, ) -> Result< - ResponseValue, - Error, + ResponseValue, + Error, > { let Self { client, body } = self; let body = body .and_then(|v| { - types::PostGetMultipleAccountInterfacesBody::try_from(v) + types::PostGetMultipleCompressedAccountProofsV2Body::try_from(v) .map_err(|e| e.to_string()) }) .map_err(Error::InvalidRequest)?; - let url = format!("{}/getMultipleAccountInterfaces", client.baseurl,); + let url = format!( + "{}/getMultipleCompressedAccountProofsV2", client.baseurl, + ); let mut header_map = ::reqwest::header::HeaderMap::with_capacity(1usize); header_map .append( @@ -52814,7 +54464,7 @@ pub mod builder { .headers(header_map) .build()?; let info = OperationInfo { - operation_id: "post_get_multiple_account_interfaces", + operation_id: "post_get_multiple_compressed_account_proofs_v2", }; client.pre(&mut request, &info).await?; let result = client.exec(request, &info).await; @@ -52840,15 +54490,15 @@ pub mod builder { } } } - /**Builder for [`Client::post_get_multiple_compressed_account_proofs`] + /**Builder for [`Client::post_get_multiple_compressed_accounts`] -[`Client::post_get_multiple_compressed_account_proofs`]: super::Client::post_get_multiple_compressed_account_proofs*/ +[`Client::post_get_multiple_compressed_accounts`]: super::Client::post_get_multiple_compressed_accounts*/ #[derive(Debug, Clone)] - pub struct PostGetMultipleCompressedAccountProofs<'a> { + pub struct PostGetMultipleCompressedAccounts<'a> { client: &'a super::Client, - body: Result, + body: Result, } - impl<'a> PostGetMultipleCompressedAccountProofs<'a> { + impl<'a> PostGetMultipleCompressedAccounts<'a> { pub fn new(client: &'a super::Client) -> Self { Self { client: client, @@ -52857,9 +54507,9 @@ pub mod builder { } pub fn body(mut self, value: V) -> Self where - V: std::convert::TryInto, + V: std::convert::TryInto, >::Error: std::fmt::Display, { self.body = value @@ -52867,7 +54517,7 @@ pub mod builder { .map(From::from) .map_err(|s| { format!( - "conversion to `PostGetMultipleCompressedAccountProofsBody` for body failed: {}", + "conversion to `PostGetMultipleCompressedAccountsBody` for body failed: {}", s ) }); @@ -52876,27 +54526,27 @@ pub mod builder { pub fn body_map(mut self, f: F) -> Self where F: std::ops::FnOnce( - types::builder::PostGetMultipleCompressedAccountProofsBody, - ) -> types::builder::PostGetMultipleCompressedAccountProofsBody, + types::builder::PostGetMultipleCompressedAccountsBody, + ) -> types::builder::PostGetMultipleCompressedAccountsBody, { self.body = self.body.map(f); self } - ///Sends a `POST` request to `/getMultipleCompressedAccountProofs` + ///Sends a `POST` request to `/getMultipleCompressedAccounts` pub async fn send( self, ) -> Result< - ResponseValue, - Error, + ResponseValue, + Error, > { let Self { client, body } = self; let body = body .and_then(|v| { - types::PostGetMultipleCompressedAccountProofsBody::try_from(v) + types::PostGetMultipleCompressedAccountsBody::try_from(v) .map_err(|e| e.to_string()) }) .map_err(Error::InvalidRequest)?; - let url = format!("{}/getMultipleCompressedAccountProofs", client.baseurl,); + let url = format!("{}/getMultipleCompressedAccounts", client.baseurl,); let mut header_map = ::reqwest::header::HeaderMap::with_capacity(1usize); header_map .append( @@ -52917,7 +54567,7 @@ pub mod builder { .headers(header_map) .build()?; let info = OperationInfo { - operation_id: "post_get_multiple_compressed_account_proofs", + operation_id: "post_get_multiple_compressed_accounts", }; client.pre(&mut request, &info).await?; let result = client.exec(request, &info).await; @@ -52943,18 +54593,15 @@ pub mod builder { } } } - /**Builder for [`Client::post_get_multiple_compressed_account_proofs_v2`] + /**Builder for [`Client::post_get_multiple_compressed_accounts_v2`] -[`Client::post_get_multiple_compressed_account_proofs_v2`]: super::Client::post_get_multiple_compressed_account_proofs_v2*/ +[`Client::post_get_multiple_compressed_accounts_v2`]: super::Client::post_get_multiple_compressed_accounts_v2*/ #[derive(Debug, Clone)] - pub struct PostGetMultipleCompressedAccountProofsV2<'a> { + pub struct PostGetMultipleCompressedAccountsV2<'a> { client: &'a super::Client, - body: Result< - types::builder::PostGetMultipleCompressedAccountProofsV2Body, - String, - >, + body: Result, } - impl<'a> PostGetMultipleCompressedAccountProofsV2<'a> { + impl<'a> PostGetMultipleCompressedAccountsV2<'a> { pub fn new(client: &'a super::Client) -> Self { Self { client: client, @@ -52963,11 +54610,9 @@ pub mod builder { } pub fn body(mut self, value: V) -> Self where - V: std::convert::TryInto< - types::PostGetMultipleCompressedAccountProofsV2Body, - >, + V: std::convert::TryInto, >::Error: std::fmt::Display, { self.body = value @@ -52975,7 +54620,7 @@ pub mod builder { .map(From::from) .map_err(|s| { format!( - "conversion to `PostGetMultipleCompressedAccountProofsV2Body` for body failed: {}", + "conversion to `PostGetMultipleCompressedAccountsV2Body` for body failed: {}", s ) }); @@ -52984,29 +54629,27 @@ pub mod builder { pub fn body_map(mut self, f: F) -> Self where F: std::ops::FnOnce( - types::builder::PostGetMultipleCompressedAccountProofsV2Body, - ) -> types::builder::PostGetMultipleCompressedAccountProofsV2Body, + types::builder::PostGetMultipleCompressedAccountsV2Body, + ) -> types::builder::PostGetMultipleCompressedAccountsV2Body, { self.body = self.body.map(f); self } - ///Sends a `POST` request to `/getMultipleCompressedAccountProofsV2` + ///Sends a `POST` request to `/getMultipleCompressedAccountsV2` pub async fn send( self, ) -> Result< - ResponseValue, - Error, + ResponseValue, + Error, > { let Self { client, body } = self; let body = body .and_then(|v| { - types::PostGetMultipleCompressedAccountProofsV2Body::try_from(v) + types::PostGetMultipleCompressedAccountsV2Body::try_from(v) .map_err(|e| e.to_string()) }) .map_err(Error::InvalidRequest)?; - let url = format!( - "{}/getMultipleCompressedAccountProofsV2", client.baseurl, - ); + let url = format!("{}/getMultipleCompressedAccountsV2", client.baseurl,); let mut header_map = ::reqwest::header::HeaderMap::with_capacity(1usize); header_map .append( @@ -53027,7 +54670,7 @@ pub mod builder { .headers(header_map) .build()?; let info = OperationInfo { - operation_id: "post_get_multiple_compressed_account_proofs_v2", + operation_id: "post_get_multiple_compressed_accounts_v2", }; client.pre(&mut request, &info).await?; let result = client.exec(request, &info).await; @@ -53053,15 +54696,15 @@ pub mod builder { } } } - /**Builder for [`Client::post_get_multiple_compressed_accounts`] + /**Builder for [`Client::post_get_multiple_new_address_proofs`] -[`Client::post_get_multiple_compressed_accounts`]: super::Client::post_get_multiple_compressed_accounts*/ +[`Client::post_get_multiple_new_address_proofs`]: super::Client::post_get_multiple_new_address_proofs*/ #[derive(Debug, Clone)] - pub struct PostGetMultipleCompressedAccounts<'a> { + pub struct PostGetMultipleNewAddressProofs<'a> { client: &'a super::Client, - body: Result, + body: Result, } - impl<'a> PostGetMultipleCompressedAccounts<'a> { + impl<'a> PostGetMultipleNewAddressProofs<'a> { pub fn new(client: &'a super::Client) -> Self { Self { client: client, @@ -53070,9 +54713,9 @@ pub mod builder { } pub fn body(mut self, value: V) -> Self where - V: std::convert::TryInto, + V: std::convert::TryInto, >::Error: std::fmt::Display, { self.body = value @@ -53080,7 +54723,7 @@ pub mod builder { .map(From::from) .map_err(|s| { format!( - "conversion to `PostGetMultipleCompressedAccountsBody` for body failed: {}", + "conversion to `PostGetMultipleNewAddressProofsBody` for body failed: {}", s ) }); @@ -53089,27 +54732,27 @@ pub mod builder { pub fn body_map(mut self, f: F) -> Self where F: std::ops::FnOnce( - types::builder::PostGetMultipleCompressedAccountsBody, - ) -> types::builder::PostGetMultipleCompressedAccountsBody, + types::builder::PostGetMultipleNewAddressProofsBody, + ) -> types::builder::PostGetMultipleNewAddressProofsBody, { self.body = self.body.map(f); self } - ///Sends a `POST` request to `/getMultipleCompressedAccounts` + ///Sends a `POST` request to `/getMultipleNewAddressProofs` pub async fn send( self, ) -> Result< - ResponseValue, - Error, + ResponseValue, + Error, > { let Self { client, body } = self; let body = body .and_then(|v| { - types::PostGetMultipleCompressedAccountsBody::try_from(v) + types::PostGetMultipleNewAddressProofsBody::try_from(v) .map_err(|e| e.to_string()) }) .map_err(Error::InvalidRequest)?; - let url = format!("{}/getMultipleCompressedAccounts", client.baseurl,); + let url = format!("{}/getMultipleNewAddressProofs", client.baseurl,); let mut header_map = ::reqwest::header::HeaderMap::with_capacity(1usize); header_map .append( @@ -53130,7 +54773,7 @@ pub mod builder { .headers(header_map) .build()?; let info = OperationInfo { - operation_id: "post_get_multiple_compressed_accounts", + operation_id: "post_get_multiple_new_address_proofs", }; client.pre(&mut request, &info).await?; let result = client.exec(request, &info).await; @@ -53156,15 +54799,15 @@ pub mod builder { } } } - /**Builder for [`Client::post_get_multiple_compressed_accounts_v2`] + /**Builder for [`Client::post_get_multiple_new_address_proofs_v2`] -[`Client::post_get_multiple_compressed_accounts_v2`]: super::Client::post_get_multiple_compressed_accounts_v2*/ +[`Client::post_get_multiple_new_address_proofs_v2`]: super::Client::post_get_multiple_new_address_proofs_v2*/ #[derive(Debug, Clone)] - pub struct PostGetMultipleCompressedAccountsV2<'a> { + pub struct PostGetMultipleNewAddressProofsV2<'a> { client: &'a super::Client, - body: Result, + body: Result, } - impl<'a> PostGetMultipleCompressedAccountsV2<'a> { + impl<'a> PostGetMultipleNewAddressProofsV2<'a> { pub fn new(client: &'a super::Client) -> Self { Self { client: client, @@ -53173,9 +54816,9 @@ pub mod builder { } pub fn body(mut self, value: V) -> Self where - V: std::convert::TryInto, + V: std::convert::TryInto, >::Error: std::fmt::Display, { self.body = value @@ -53183,7 +54826,7 @@ pub mod builder { .map(From::from) .map_err(|s| { format!( - "conversion to `PostGetMultipleCompressedAccountsV2Body` for body failed: {}", + "conversion to `PostGetMultipleNewAddressProofsV2Body` for body failed: {}", s ) }); @@ -53192,27 +54835,27 @@ pub mod builder { pub fn body_map(mut self, f: F) -> Self where F: std::ops::FnOnce( - types::builder::PostGetMultipleCompressedAccountsV2Body, - ) -> types::builder::PostGetMultipleCompressedAccountsV2Body, + types::builder::PostGetMultipleNewAddressProofsV2Body, + ) -> types::builder::PostGetMultipleNewAddressProofsV2Body, { self.body = self.body.map(f); self } - ///Sends a `POST` request to `/getMultipleCompressedAccountsV2` + ///Sends a `POST` request to `/getMultipleNewAddressProofsV2` pub async fn send( self, ) -> Result< - ResponseValue, - Error, + ResponseValue, + Error, > { let Self { client, body } = self; let body = body .and_then(|v| { - types::PostGetMultipleCompressedAccountsV2Body::try_from(v) + types::PostGetMultipleNewAddressProofsV2Body::try_from(v) .map_err(|e| e.to_string()) }) .map_err(Error::InvalidRequest)?; - let url = format!("{}/getMultipleCompressedAccountsV2", client.baseurl,); + let url = format!("{}/getMultipleNewAddressProofsV2", client.baseurl,); let mut header_map = ::reqwest::header::HeaderMap::with_capacity(1usize); header_map .append( @@ -53233,7 +54876,7 @@ pub mod builder { .headers(header_map) .build()?; let info = OperationInfo { - operation_id: "post_get_multiple_compressed_accounts_v2", + operation_id: "post_get_multiple_new_address_proofs_v2", }; client.pre(&mut request, &info).await?; let result = client.exec(request, &info).await; @@ -53259,15 +54902,15 @@ pub mod builder { } } } - /**Builder for [`Client::post_get_multiple_new_address_proofs`] + /**Builder for [`Client::post_get_queue_elements`] -[`Client::post_get_multiple_new_address_proofs`]: super::Client::post_get_multiple_new_address_proofs*/ +[`Client::post_get_queue_elements`]: super::Client::post_get_queue_elements*/ #[derive(Debug, Clone)] - pub struct PostGetMultipleNewAddressProofs<'a> { + pub struct PostGetQueueElements<'a> { client: &'a super::Client, - body: Result, + body: Result, } - impl<'a> PostGetMultipleNewAddressProofs<'a> { + impl<'a> PostGetQueueElements<'a> { pub fn new(client: &'a super::Client) -> Self { Self { client: client, @@ -53276,9 +54919,9 @@ pub mod builder { } pub fn body(mut self, value: V) -> Self where - V: std::convert::TryInto, + V: std::convert::TryInto, >::Error: std::fmt::Display, { self.body = value @@ -53286,8 +54929,7 @@ pub mod builder { .map(From::from) .map_err(|s| { format!( - "conversion to `PostGetMultipleNewAddressProofsBody` for body failed: {}", - s + "conversion to `PostGetQueueElementsBody` for body failed: {}", s ) }); self @@ -53295,27 +54937,27 @@ pub mod builder { pub fn body_map(mut self, f: F) -> Self where F: std::ops::FnOnce( - types::builder::PostGetMultipleNewAddressProofsBody, - ) -> types::builder::PostGetMultipleNewAddressProofsBody, + types::builder::PostGetQueueElementsBody, + ) -> types::builder::PostGetQueueElementsBody, { self.body = self.body.map(f); self } - ///Sends a `POST` request to `/getMultipleNewAddressProofs` + ///Sends a `POST` request to `/getQueueElements` pub async fn send( self, ) -> Result< - ResponseValue, - Error, + ResponseValue, + Error, > { let Self { client, body } = self; let body = body .and_then(|v| { - types::PostGetMultipleNewAddressProofsBody::try_from(v) + types::PostGetQueueElementsBody::try_from(v) .map_err(|e| e.to_string()) }) .map_err(Error::InvalidRequest)?; - let url = format!("{}/getMultipleNewAddressProofs", client.baseurl,); + let url = format!("{}/getQueueElements", client.baseurl,); let mut header_map = ::reqwest::header::HeaderMap::with_capacity(1usize); header_map .append( @@ -53336,7 +54978,7 @@ pub mod builder { .headers(header_map) .build()?; let info = OperationInfo { - operation_id: "post_get_multiple_new_address_proofs", + operation_id: "post_get_queue_elements", }; client.pre(&mut request, &info).await?; let result = client.exec(request, &info).await; @@ -53362,15 +55004,15 @@ pub mod builder { } } } - /**Builder for [`Client::post_get_multiple_new_address_proofs_v2`] + /**Builder for [`Client::post_get_queue_info`] -[`Client::post_get_multiple_new_address_proofs_v2`]: super::Client::post_get_multiple_new_address_proofs_v2*/ +[`Client::post_get_queue_info`]: super::Client::post_get_queue_info*/ #[derive(Debug, Clone)] - pub struct PostGetMultipleNewAddressProofsV2<'a> { + pub struct PostGetQueueInfo<'a> { client: &'a super::Client, - body: Result, + body: Result, } - impl<'a> PostGetMultipleNewAddressProofsV2<'a> { + impl<'a> PostGetQueueInfo<'a> { pub fn new(client: &'a super::Client) -> Self { Self { client: client, @@ -53379,9 +55021,9 @@ pub mod builder { } pub fn body(mut self, value: V) -> Self where - V: std::convert::TryInto, + V: std::convert::TryInto, >::Error: std::fmt::Display, { self.body = value @@ -53389,8 +55031,7 @@ pub mod builder { .map(From::from) .map_err(|s| { format!( - "conversion to `PostGetMultipleNewAddressProofsV2Body` for body failed: {}", - s + "conversion to `PostGetQueueInfoBody` for body failed: {}", s ) }); self @@ -53398,27 +55039,26 @@ pub mod builder { pub fn body_map(mut self, f: F) -> Self where F: std::ops::FnOnce( - types::builder::PostGetMultipleNewAddressProofsV2Body, - ) -> types::builder::PostGetMultipleNewAddressProofsV2Body, + types::builder::PostGetQueueInfoBody, + ) -> types::builder::PostGetQueueInfoBody, { self.body = self.body.map(f); self } - ///Sends a `POST` request to `/getMultipleNewAddressProofsV2` + ///Sends a `POST` request to `/getQueueInfo` pub async fn send( self, ) -> Result< - ResponseValue, - Error, + ResponseValue, + Error, > { let Self { client, body } = self; let body = body .and_then(|v| { - types::PostGetMultipleNewAddressProofsV2Body::try_from(v) - .map_err(|e| e.to_string()) + types::PostGetQueueInfoBody::try_from(v).map_err(|e| e.to_string()) }) .map_err(Error::InvalidRequest)?; - let url = format!("{}/getMultipleNewAddressProofsV2", client.baseurl,); + let url = format!("{}/getQueueInfo", client.baseurl,); let mut header_map = ::reqwest::header::HeaderMap::with_capacity(1usize); header_map .append( @@ -53439,7 +55079,7 @@ pub mod builder { .headers(header_map) .build()?; let info = OperationInfo { - operation_id: "post_get_multiple_new_address_proofs_v2", + operation_id: "post_get_queue_info", }; client.pre(&mut request, &info).await?; let result = client.exec(request, &info).await; @@ -53465,15 +55105,15 @@ pub mod builder { } } } - /**Builder for [`Client::post_get_queue_elements`] + /**Builder for [`Client::post_get_queue_leaf_indices`] -[`Client::post_get_queue_elements`]: super::Client::post_get_queue_elements*/ +[`Client::post_get_queue_leaf_indices`]: super::Client::post_get_queue_leaf_indices*/ #[derive(Debug, Clone)] - pub struct PostGetQueueElements<'a> { + pub struct PostGetQueueLeafIndices<'a> { client: &'a super::Client, - body: Result, + body: Result, } - impl<'a> PostGetQueueElements<'a> { + impl<'a> PostGetQueueLeafIndices<'a> { pub fn new(client: &'a super::Client) -> Self { Self { client: client, @@ -53482,9 +55122,9 @@ pub mod builder { } pub fn body(mut self, value: V) -> Self where - V: std::convert::TryInto, + V: std::convert::TryInto, >::Error: std::fmt::Display, { self.body = value @@ -53492,7 +55132,8 @@ pub mod builder { .map(From::from) .map_err(|s| { format!( - "conversion to `PostGetQueueElementsBody` for body failed: {}", s + "conversion to `PostGetQueueLeafIndicesBody` for body failed: {}", + s ) }); self @@ -53500,128 +55141,27 @@ pub mod builder { pub fn body_map(mut self, f: F) -> Self where F: std::ops::FnOnce( - types::builder::PostGetQueueElementsBody, - ) -> types::builder::PostGetQueueElementsBody, + types::builder::PostGetQueueLeafIndicesBody, + ) -> types::builder::PostGetQueueLeafIndicesBody, { self.body = self.body.map(f); self } - ///Sends a `POST` request to `/getQueueElements` + ///Sends a `POST` request to `/getQueueLeafIndices` pub async fn send( self, ) -> Result< - ResponseValue, - Error, + ResponseValue, + Error, > { let Self { client, body } = self; let body = body .and_then(|v| { - types::PostGetQueueElementsBody::try_from(v) + types::PostGetQueueLeafIndicesBody::try_from(v) .map_err(|e| e.to_string()) }) .map_err(Error::InvalidRequest)?; - let url = format!("{}/getQueueElements", client.baseurl,); - let mut header_map = ::reqwest::header::HeaderMap::with_capacity(1usize); - header_map - .append( - ::reqwest::header::HeaderName::from_static("api-version"), - ::reqwest::header::HeaderValue::from_static( - super::Client::api_version(), - ), - ); - #[allow(unused_mut)] - let mut request = client - .client - .post(url) - .header( - ::reqwest::header::ACCEPT, - ::reqwest::header::HeaderValue::from_static("application/json"), - ) - .json(&body) - .headers(header_map) - .build()?; - let info = OperationInfo { - operation_id: "post_get_queue_elements", - }; - client.pre(&mut request, &info).await?; - let result = client.exec(request, &info).await; - client.post(&result, &info).await?; - let response = result?; - match response.status().as_u16() { - 200u16 => ResponseValue::from_response(response).await, - 429u16 => { - Err( - Error::ErrorResponse( - ResponseValue::from_response(response).await?, - ), - ) - } - 500u16 => { - Err( - Error::ErrorResponse( - ResponseValue::from_response(response).await?, - ), - ) - } - _ => Err(Error::UnexpectedResponse(response)), - } - } - } - /**Builder for [`Client::post_get_queue_info`] - -[`Client::post_get_queue_info`]: super::Client::post_get_queue_info*/ - #[derive(Debug, Clone)] - pub struct PostGetQueueInfo<'a> { - client: &'a super::Client, - body: Result, - } - impl<'a> PostGetQueueInfo<'a> { - pub fn new(client: &'a super::Client) -> Self { - Self { - client: client, - body: Ok(::std::default::Default::default()), - } - } - pub fn body(mut self, value: V) -> Self - where - V: std::convert::TryInto, - >::Error: std::fmt::Display, - { - self.body = value - .try_into() - .map(From::from) - .map_err(|s| { - format!( - "conversion to `PostGetQueueInfoBody` for body failed: {}", s - ) - }); - self - } - pub fn body_map(mut self, f: F) -> Self - where - F: std::ops::FnOnce( - types::builder::PostGetQueueInfoBody, - ) -> types::builder::PostGetQueueInfoBody, - { - self.body = self.body.map(f); - self - } - ///Sends a `POST` request to `/getQueueInfo` - pub async fn send( - self, - ) -> Result< - ResponseValue, - Error, - > { - let Self { client, body } = self; - let body = body - .and_then(|v| { - types::PostGetQueueInfoBody::try_from(v).map_err(|e| e.to_string()) - }) - .map_err(Error::InvalidRequest)?; - let url = format!("{}/getQueueInfo", client.baseurl,); + let url = format!("{}/getQueueLeafIndices", client.baseurl,); let mut header_map = ::reqwest::header::HeaderMap::with_capacity(1usize); header_map .append( @@ -53642,7 +55182,7 @@ pub mod builder { .headers(header_map) .build()?; let info = OperationInfo { - operation_id: "post_get_queue_info", + operation_id: "post_get_queue_leaf_indices", }; client.pre(&mut request, &info).await?; let result = client.exec(request, &info).await; diff --git a/sdk-libs/photon-api/src/lib.rs b/sdk-libs/photon-api/src/lib.rs index 15f469d40f..480d524ae2 100644 --- a/sdk-libs/photon-api/src/lib.rs +++ b/sdk-libs/photon-api/src/lib.rs @@ -358,6 +358,17 @@ pub mod apis { } } + pub fn make_get_queue_leaf_indices_body( + params: types::PostGetQueueLeafIndicesBodyParams, + ) -> types::PostGetQueueLeafIndicesBody { + types::PostGetQueueLeafIndicesBody { + id: types::PostGetQueueLeafIndicesBodyId::TestAccount, + jsonrpc: types::PostGetQueueLeafIndicesBodyJsonrpc::X20, + method: types::PostGetQueueLeafIndicesBodyMethod::GetQueueLeafIndices, + params, + } + } + pub fn make_get_queue_info_body( params: types::PostGetQueueInfoBodyParams, ) -> types::PostGetQueueInfoBody { @@ -576,6 +587,12 @@ pub mod apis { types::PostGetQueueElementsBody, types::PostGetQueueElementsResponse ); + api_call!( + get_queue_leaf_indices_post, + "getQueueLeafIndices", + types::PostGetQueueLeafIndicesBody, + types::PostGetQueueLeafIndicesResponse + ); api_call!( get_queue_info_post, "getQueueInfo", diff --git a/sdk-libs/program-test/src/indexer/test_indexer.rs b/sdk-libs/program-test/src/indexer/test_indexer.rs index 0b5b0583a3..5485c21afb 100644 --- a/sdk-libs/program-test/src/indexer/test_indexer.rs +++ b/sdk-libs/program-test/src/indexer/test_indexer.rs @@ -28,8 +28,8 @@ use light_client::{ GetCompressedAccountsByOwnerConfig, GetCompressedTokenAccountsByOwnerOrDelegateOptions, Indexer, IndexerError, IndexerRpcConfig, Items, ItemsWithCursor, MerkleProof, NewAddressProofWithContext, OwnerBalance, PaginatedOptions, QueueElementsResult, - QueueElementsV2Options, Response, RetryConfig, RootIndex, SignatureWithMetadata, - StateMerkleTreeAccounts, TokenBalance, ValidityProofWithContext, + QueueElementsV2Options, QueueLeafIndex, Response, RetryConfig, RootIndex, + SignatureWithMetadata, StateMerkleTreeAccounts, TokenBalance, ValidityProofWithContext, }, }; use light_compressed_account::{ @@ -896,6 +896,16 @@ impl Indexer for TestIndexer { } } + async fn get_queue_leaf_indices( + &self, + _merkle_tree_pubkey: [u8; 32], + _limit: u16, + _start_index: Option, + _config: Option, + ) -> Result>, IndexerError> { + unimplemented!("get_queue_leaf_indices") + } + async fn get_queue_info( &self, _config: Option, diff --git a/sdk-libs/program-test/src/program_test/indexer.rs b/sdk-libs/program-test/src/program_test/indexer.rs index a1a80113ce..d0bfda4c95 100644 --- a/sdk-libs/program-test/src/program_test/indexer.rs +++ b/sdk-libs/program-test/src/program_test/indexer.rs @@ -4,8 +4,8 @@ use light_client::indexer::{ GetCompressedAccountsByOwnerConfig, GetCompressedTokenAccountsByOwnerOrDelegateOptions, Hash, Indexer, IndexerError, IndexerRpcConfig, Items, ItemsWithCursor, MerkleProof, NewAddressProofWithContext, OwnerBalance, PaginatedOptions, QueueElementsResult, - QueueElementsV2Options, Response, RetryConfig, SignatureWithMetadata, TokenBalance, - ValidityProofWithContext, + QueueElementsV2Options, QueueLeafIndex, Response, RetryConfig, SignatureWithMetadata, + TokenBalance, ValidityProofWithContext, }; use solana_sdk::pubkey::Pubkey; @@ -196,6 +196,21 @@ impl Indexer for LightProgramTest { .await?) } + async fn get_queue_leaf_indices( + &self, + merkle_tree_pubkey: [u8; 32], + limit: u16, + start_index: Option, + config: Option, + ) -> Result>, IndexerError> { + Ok(self + .indexer + .as_ref() + .ok_or(IndexerError::NotInitialized)? + .get_queue_leaf_indices(merkle_tree_pubkey, limit, start_index, config) + .await?) + } + async fn get_queue_info( &self, config: Option, From 6a8a115f029d2eb7ca009d7f89619be673c1001b Mon Sep 17 00:00:00 2001 From: Sergey Timoshin Date: Wed, 18 Mar 2026 14:33:03 +0000 Subject: [PATCH 10/16] refactor: simplify batched transaction configuration in EpochManager --- forester/src/epoch_manager.rs | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/forester/src/epoch_manager.rs b/forester/src/epoch_manager.rs index 46d930df20..fef06b495c 100644 --- a/forester/src/epoch_manager.rs +++ b/forester/src/epoch_manager.rs @@ -2985,7 +2985,7 @@ impl EpochManager { return Ok(0); }; - let mut batched_tx_config = SendBatchedTransactionsConfig { + let batched_tx_config = SendBatchedTransactionsConfig { num_batches: 1, build_transaction_batch_config: BuildTransactionBatchConfig { batch_size: self.config.transaction_config.legacy_ixs_per_tx as u64, @@ -3006,13 +3006,10 @@ impl EpochManager { ), confirmation_max_attempts: self.config.transaction_config.confirmation_max_attempts as usize, - min_queue_items: None, // set below after reading ALT + min_queue_items: self.config.min_queue_items, }; let alt_snapshot = (*self.address_lookup_tables).clone(); - if self.config.enable_v1_multi_nullify && !alt_snapshot.is_empty() { - batched_tx_config.min_queue_items = self.config.min_queue_items; - } let transaction_builder = Arc::new(EpochManagerTransactions::new( self.rpc_pool.clone(), epoch_info.epoch, From bd865d576cf014478b62c4808aecbc2f67fa30e3 Mon Sep 17 00:00:00 2001 From: Sergey Timoshin Date: Wed, 18 Mar 2026 14:51:27 +0000 Subject: [PATCH 11/16] format --- forester/src/processor/v1/helpers.rs | 4 +--- forester/src/processor/v1/tx_builder.rs | 6 ++++-- forester/src/queue_helpers.rs | 6 +++++- forester/src/smart_transaction.rs | 10 ++++++++-- sdk-libs/client/src/indexer/photon_indexer.rs | 6 ++---- 5 files changed, 20 insertions(+), 12 deletions(-) diff --git a/forester/src/processor/v1/helpers.rs b/forester/src/processor/v1/helpers.rs index f25b66ee77..c6eae32eb4 100644 --- a/forester/src/processor/v1/helpers.rs +++ b/forester/src/processor/v1/helpers.rs @@ -512,9 +512,7 @@ pub async fn fetch_proofs_and_create_instructions( } for (item, proof) in items_with_proofs.iter() { instructions.push(LabeledInstruction { - instruction: build_nullify_instruction( - item, proof, authority, derivation, epoch, - ), + instruction: build_nullify_instruction(item, proof, authority, derivation, epoch), label: "StateV1Nullify".to_string(), }); } diff --git a/forester/src/processor/v1/tx_builder.rs b/forester/src/processor/v1/tx_builder.rs index 74438bb7a7..7b1b92ad35 100644 --- a/forester/src/processor/v1/tx_builder.rs +++ b/forester/src/processor/v1/tx_builder.rs @@ -170,8 +170,10 @@ impl TransactionBuilder for EpochManagerTransactions { .map(|li| li.label.as_str()) .collect::>() .join("+"); - let instructions: Vec = - labeled_chunk.iter().map(|li| li.instruction.clone()).collect(); + let instructions: Vec = labeled_chunk + .iter() + .map(|li| li.instruction.clone()) + .collect(); let prepared = create_smart_transaction(CreateSmartTransactionConfig { payer: payer.insecure_clone(), instructions, diff --git a/forester/src/queue_helpers.rs b/forester/src/queue_helpers.rs index 4eaa7c421d..d9adcda049 100644 --- a/forester/src/queue_helpers.rs +++ b/forester/src/queue_helpers.rs @@ -232,7 +232,11 @@ pub async fn fetch_queue_item_data( .filter(|(index, _, is_pending)| { *index >= start_index as usize && *index < end_index && *is_pending }) - .map(|(index, hash, _)| QueueItemData { hash, index, leaf_index: None }) + .map(|(index, hash, _)| QueueItemData { + hash, + index, + leaf_index: None, + }) .collect(); tracing::debug!( diff --git a/forester/src/smart_transaction.rs b/forester/src/smart_transaction.rs index 53bd7069f0..91149905eb 100644 --- a/forester/src/smart_transaction.rs +++ b/forester/src/smart_transaction.rs @@ -402,7 +402,10 @@ async fn prepare_transaction( transaction .try_sign(signers, blockhash) .map_err(|e| RpcError::SigningError(e.to_string()))?; - Ok(PreparedTransaction::legacy(transaction, last_valid_block_height)) + Ok(PreparedTransaction::legacy( + transaction, + last_valid_block_height, + )) } else { let message = v0::Message::try_compile(payer, &final_instructions, address_lookup_tables, blockhash) @@ -411,7 +414,10 @@ async fn prepare_transaction( })?; let transaction = VersionedTransaction::try_new(VersionedMessage::V0(message), signers) .map_err(|e| RpcError::SigningError(e.to_string()))?; - Ok(PreparedTransaction::versioned(transaction, last_valid_block_height)) + Ok(PreparedTransaction::versioned( + transaction, + last_valid_block_height, + )) } } diff --git a/sdk-libs/client/src/indexer/photon_indexer.rs b/sdk-libs/client/src/indexer/photon_indexer.rs index bca2902ae6..bd0a14fbb4 100644 --- a/sdk-libs/client/src/indexer/photon_indexer.rs +++ b/sdk-libs/client/src/indexer/photon_indexer.rs @@ -1718,8 +1718,7 @@ impl Indexer for PhotonIndexer { limit, start_index, }; - let request = - photon_api::apis::default_api::make_get_queue_leaf_indices_body(params); + let request = photon_api::apis::default_api::make_get_queue_leaf_indices_body(params); let result = photon_api::apis::default_api::get_queue_leaf_indices_post( &self.configuration, @@ -1728,8 +1727,7 @@ impl Indexer for PhotonIndexer { .await?; Self::check_api_error("get_queue_leaf_indices", result.error)?; - let api_response = - Self::extract_result("get_queue_leaf_indices", result.result)?; + let api_response = Self::extract_result("get_queue_leaf_indices", result.result)?; if api_response.context.slot < config.slot { return Err(IndexerError::IndexerNotSyncedToSlot); From 2a2d2da56089504c285f35daa30f9433b726855f Mon Sep 17 00:00:00 2001 From: Sergey Timoshin Date: Wed, 18 Mar 2026 21:42:04 +0000 Subject: [PATCH 12/16] feat: add presort option for batched transactions to improve deduplication --- forester/src/epoch_manager.rs | 10 +++- forester/src/processor/v1/config.rs | 4 ++ forester/src/processor/v1/helpers.rs | 20 ++++---- forester/src/processor/v1/send_transaction.rs | 50 ++++++++++++++++++- 4 files changed, 71 insertions(+), 13 deletions(-) diff --git a/forester/src/epoch_manager.rs b/forester/src/epoch_manager.rs index fef06b495c..2c361f2f27 100644 --- a/forester/src/epoch_manager.rs +++ b/forester/src/epoch_manager.rs @@ -3006,7 +3006,15 @@ impl EpochManager { ), confirmation_max_attempts: self.config.transaction_config.confirmation_max_attempts as usize, - min_queue_items: self.config.min_queue_items, + min_queue_items: if self.config.enable_v1_multi_nullify + && !self.address_lookup_tables.is_empty() + { + self.config.min_queue_items + } else { + None + }, + enable_presort: self.config.enable_v1_multi_nullify + && !self.address_lookup_tables.is_empty(), }; let alt_snapshot = (*self.address_lookup_tables).clone(); diff --git a/forester/src/processor/v1/config.rs b/forester/src/processor/v1/config.rs index d472383985..893d950601 100644 --- a/forester/src/processor/v1/config.rs +++ b/forester/src/processor/v1/config.rs @@ -27,6 +27,10 @@ pub struct SendBatchedTransactionsConfig { /// Only applies to StateV1 trees. When `None`, processing starts immediately. /// When the timeout deadline is reached, returns 0 (re-scheduled next light slot). pub min_queue_items: Option, + /// When true, fetch leaf indices from the indexer and sort work items by + /// leaf_index before chunking, so adjacent leaves land in the same batch + /// for better dedup grouping. + pub enable_presort: bool, } #[derive(Debug, Clone, Copy)] diff --git a/forester/src/processor/v1/helpers.rs b/forester/src/processor/v1/helpers.rs index c6eae32eb4..f1beb34b58 100644 --- a/forester/src/processor/v1/helpers.rs +++ b/forester/src/processor/v1/helpers.rs @@ -93,16 +93,6 @@ pub async fn fetch_proofs_and_create_instructions( None }; - let state_data = if !state_items.is_empty() { - let states: Vec<[u8; 32]> = state_items - .iter() - .map(|item| item.queue_item_data.hash) - .collect(); - Some(states) - } else { - None - }; - let rpc = pool.get_connection().await?; if let Err(e) = wait_for_indexer(&*rpc).await { if should_emit_rate_limited_warning("v1_wait_for_indexer", Duration::from_secs(30)) { @@ -238,6 +228,16 @@ pub async fn fetch_proofs_and_create_instructions( Vec::new() }; + let state_data = if !state_items.is_empty() { + let states: Vec<[u8; 32]> = state_items + .iter() + .map(|item| item.queue_item_data.hash) + .collect(); + Some(states) + } else { + None + }; + let state_proofs = if let Some(states) = state_data { let total_states = states.len(); info!( diff --git a/forester/src/processor/v1/send_transaction.rs b/forester/src/processor/v1/send_transaction.rs index 87a273c630..a0328c5877 100644 --- a/forester/src/processor/v1/send_transaction.rs +++ b/forester/src/processor/v1/send_transaction.rs @@ -9,7 +9,7 @@ use std::{ use forester_utils::{forester_epoch::TreeAccounts, rpc_pool::SolanaRpcPool}; use futures::StreamExt; -use light_client::rpc::Rpc; +use light_client::{indexer::Indexer, rpc::Rpc}; use light_compressed_account::TreeType; use light_registry::utils::get_forester_epoch_pda_from_authority; use solana_sdk::{ @@ -80,7 +80,7 @@ pub async fn send_batched_transactions { + let leaf_index_map: std::collections::HashMap<[u8; 32], u64> = response + .value + .items + .into_iter() + .map(|item| (item.hash, item.leaf_index)) + .collect(); + data.work_items.sort_by_key(|item| { + leaf_index_map + .get(&item.queue_item_data.hash) + .copied() + .unwrap_or(u64::MAX) + }); + info!( + tree = %tree_accounts.merkle_tree, + count = data.work_items.len(), + leaf_indices = leaf_index_map.len(), + "Pre-sorted work items by leaf_index for dedup grouping" + ); + } + Err(e) => { + warn!( + tree = %tree_accounts.merkle_tree, + error = %e, + "Failed to fetch queue leaf indices, proceeding without pre-sort" + ); + } + } + } + } + let mut build_config = config.build_transaction_batch_config; build_config.queue_item_count = data.work_items.len(); From b2400ae3108b6e26963706678ae208739a8afce1 Mon Sep 17 00:00:00 2001 From: Sergey Timoshin Date: Fri, 20 Mar 2026 14:59:34 +0000 Subject: [PATCH 13/16] feat: update configuration for batching and nullification processes, including work item batch size and default values --- external/photon | 2 +- forester/src/cli.rs | 15 +- .../src/compressible/ctoken/compressor.rs | 39 ++- forester/src/compressible/ctoken/state.rs | 29 +++ forester/src/config.rs | 9 +- forester/src/epoch_manager.rs | 68 +++++- forester/src/processor/v1/config.rs | 9 +- forester/src/processor/v1/helpers.rs | 6 + forester/src/processor/v1/send_transaction.rs | 228 +++++++++--------- forester/src/processor/v1/tx_builder.rs | 22 +- 10 files changed, 266 insertions(+), 161 deletions(-) diff --git a/external/photon b/external/photon index 8a0bbce6a9..a52fd36570 160000 --- a/external/photon +++ b/external/photon @@ -1 +1 @@ -Subproject commit 8a0bbce6a9250e2cc41e50d10efa9256a180db58 +Subproject commit a52fd365706e235f538689c3afdf94c8371db80f diff --git a/forester/src/cli.rs b/forester/src/cli.rs index 838172a24c..b6c1769e82 100644 --- a/forester/src/cli.rs +++ b/forester/src/cli.rs @@ -102,8 +102,8 @@ pub struct StartArgs { #[arg( long, env = "MAX_CONCURRENT_SENDS", - default_value = "12", - help = "Maximum number of concurrent transaction sends per batch" + default_value = "50", + help = "Maximum number of concurrent transaction sends per batch. Defaults to 50 to match work-item-batch-size." )] pub max_concurrent_sends: usize, @@ -298,11 +298,18 @@ pub struct StartArgs { #[arg( long, env = "ENABLE_V1_MULTI_NULLIFY", - help = "Enable nullify_state_v1_multi instruction for batching 2-4 V1 state nullifications per instruction. Requires --lookup-table-address.", - default_value = "false" + help = "Enable nullify_state_v1_multi instruction for batching 2-4 V1 state nullifications per instruction. Requires --lookup-table-address. Enabled by default.", + default_value = "true" )] pub enable_v1_multi_nullify: bool, + #[arg( + long, + env = "WORK_ITEM_BATCH_SIZE", + help = "Number of queue items to process per batch cycle. Smaller values reduce blockhash expiry risk, larger values reduce per-batch overhead." + )] + pub work_item_batch_size: Option, + #[arg( long, env = "API_SERVER_PORT", diff --git a/forester/src/compressible/ctoken/compressor.rs b/forester/src/compressible/ctoken/compressor.rs index 80e5ca4382..0296ac1747 100644 --- a/forester/src/compressible/ctoken/compressor.rs +++ b/forester/src/compressible/ctoken/compressor.rs @@ -89,26 +89,45 @@ impl CTokenCompressor { let mut rpc = self.rpc_pool.get_connection().await?; - // Pre-check: filter out accounts that no longer exist on-chain + // Pre-check: filter out accounts that no longer exist on-chain or are no longer compressible let all_pubkeys: Vec = account_states.iter().map(|a| a.pubkey).collect(); let on_chain_accounts = rpc .get_multiple_accounts(&all_pubkeys) .await .map_err(|e| anyhow::anyhow!("Failed to pre-check accounts: {:?}", e))?; + let current_slot = rpc + .get_slot() + .await + .map_err(|e| anyhow::anyhow!("Failed to get current slot: {:?}", e))?; + let account_states: Vec<&CTokenAccountState> = account_states .iter() .zip(on_chain_accounts.iter()) .filter_map(|(state, on_chain)| { - if on_chain.is_some() { - Some(state) - } else { - debug!( - "CToken account {} no longer exists on-chain, removing from tracker", - state.pubkey - ); - self.tracker.remove(&state.pubkey); - None + let on_chain = on_chain.as_ref()?; + // Re-validate compressibility with fresh on-chain data + match super::state::revalidate_compressibility( + &on_chain.data, + on_chain.lamports, + current_slot, + ) { + Ok(true) => Some(state), + Ok(false) => { + debug!( + "CToken account {} is no longer compressible, skipping", + state.pubkey + ); + None + } + Err(e) => { + debug!( + "CToken account {} failed compressibility check: {}, removing", + state.pubkey, e + ); + self.tracker.remove(&state.pubkey); + None + } } }) .collect(); diff --git a/forester/src/compressible/ctoken/state.rs b/forester/src/compressible/ctoken/state.rs index 1c71e785f3..dfcf269140 100644 --- a/forester/src/compressible/ctoken/state.rs +++ b/forester/src/compressible/ctoken/state.rs @@ -13,6 +13,35 @@ use crate::{ Result, }; +/// Re-validate compressibility using fresh on-chain account data. +/// Returns true if the account is still compressible at the given slot. +pub fn revalidate_compressibility( + account_data: &[u8], + lamports: u64, + current_slot: u64, +) -> Result { + use light_token_interface::state::extensions::ExtensionStruct; + + let account = Token::try_from_slice(account_data) + .map_err(|e| anyhow::anyhow!("Failed to deserialize Token: {:?}", e))?; + + let compression_info = account + .extensions + .as_ref() + .and_then(|exts| { + exts.iter().find_map(|ext| match ext { + ExtensionStruct::Compressible(comp) => Some(&comp.info), + _ => None, + }) + }) + .ok_or_else(|| anyhow::anyhow!("Missing Compressible extension"))?; + + Ok(compression_info + .is_compressible(account_data.len() as u64, current_slot, lamports) + .map_err(|e| anyhow::anyhow!("is_compressible error: {:?}", e))? + .is_some()) +} + fn calculate_compressible_slot(account: &Token, lamports: u64, account_size: usize) -> Result { use light_token_interface::state::extensions::ExtensionStruct; diff --git a/forester/src/config.rs b/forester/src/config.rs index 7452283b1d..ef16f3b5c3 100644 --- a/forester/src/config.rs +++ b/forester/src/config.rs @@ -36,6 +36,8 @@ pub struct ForesterConfig { /// Enable nullify_state_v1_multi instruction for batching 2-4 V1 state nullifications. /// Requires lookup_table_address to be set. pub enable_v1_multi_nullify: bool, + /// Number of queue items to process per batch cycle. Default: 50. + pub work_item_batch_size: usize, } #[derive(Debug, Clone)] @@ -122,7 +124,7 @@ impl Default for GeneralConfig { skip_v2_address_trees: false, tree_ids: vec![], sleep_after_processing_ms: 10_000, - sleep_when_idle_ms: 45_000, + sleep_when_idle_ms: 5_000, queue_polling_mode: QueuePollingMode::Indexer, group_authority: None, helius_rpc: false, @@ -347,7 +349,7 @@ impl ForesterConfig { valid.into_iter().map(|r| r.unwrap()).collect() }, sleep_after_processing_ms: 10_000, - sleep_when_idle_ms: 45_000, + sleep_when_idle_ms: 5_000, queue_polling_mode: args.queue_polling_mode, group_authority: args .group_authority @@ -429,6 +431,7 @@ impl ForesterConfig { .transpose()?, min_queue_items: args.min_queue_items, enable_v1_multi_nullify: args.enable_v1_multi_nullify, + work_item_batch_size: args.work_item_batch_size.unwrap_or(50), }) } @@ -485,6 +488,7 @@ impl ForesterConfig { lookup_table_address: None, min_queue_items: None, enable_v1_multi_nullify: false, + work_item_batch_size: 50, }) } } @@ -507,6 +511,7 @@ impl Clone for ForesterConfig { lookup_table_address: self.lookup_table_address, min_queue_items: self.min_queue_items, enable_v1_multi_nullify: self.enable_v1_multi_nullify, + work_item_batch_size: self.work_item_batch_size, } } } diff --git a/forester/src/epoch_manager.rs b/forester/src/epoch_manager.rs index 2c361f2f27..1c6764f7bd 100644 --- a/forester/src/epoch_manager.rs +++ b/forester/src/epoch_manager.rs @@ -393,13 +393,19 @@ impl EpochManager { async move { self_clone.check_sol_balance_periodically().await } }); + let queue_metrics_handle = tokio::spawn({ + let self_clone = Arc::clone(&self); + async move { self_clone.update_queue_metrics_periodically().await } + }); + let _guard = scopeguard::guard( ( current_previous_handle, tree_discovery_handle, balance_check_handle, + queue_metrics_handle, ), - |(h2, h3, h4)| { + |(h2, h3, h4, h5)| { info!( event = "background_tasks_aborting", run_id = %self.run_id, @@ -408,6 +414,7 @@ impl EpochManager { h2.abort(); h3.abort(); h4.abort(); + h5.abort(); }, ); @@ -491,6 +498,50 @@ impl EpochManager { result } + /// Periodically updates queue_length and queue_capacity Prometheus gauges + /// so Grafana dashboards can show queue trends over time. + async fn update_queue_metrics_periodically(self: Arc) -> Result<()> { + let interval_secs = self.config.general_config.tree_discovery_interval_seconds; + if interval_secs == 0 { + return Ok(()); + } + // Use same interval as tree discovery (default 30s) + let mut interval = tokio::time::interval(Duration::from_secs(interval_secs)); + // Skip first tick — let tree discovery populate the tree list first + interval.tick().await; + + loop { + interval.tick().await; + + let trees = self.trees.lock().await; + let trees_snapshot: Vec<_> = trees.clone(); + drop(trees); + + if trees_snapshot.is_empty() { + continue; + } + + for tree_type in [ + TreeType::StateV1, + TreeType::AddressV1, + TreeType::StateV2, + TreeType::AddressV2, + ] { + if let Err(e) = + crate::run_queue_info(self.config.clone(), &trees_snapshot, tree_type).await + { + debug!( + event = "queue_metrics_update_failed", + run_id = %self.run_id, + tree_type = ?tree_type, + error = ?e, + "Failed to update queue metrics" + ); + } + } + } + } + async fn check_sol_balance_periodically(self: Arc) -> Result<()> { let interval_duration = Duration::from_secs(300); let mut interval = tokio::time::interval(interval_duration); @@ -2147,13 +2198,12 @@ impl EpochManager { } estimated_slot = self.slot_tracker.estimated_current_slot(); - let sleep_duration_ms = if items_processed_this_iteration > 0 { - self.config.general_config.sleep_after_processing_ms - } else { - self.config.general_config.sleep_when_idle_ms - }; - - tokio::time::sleep(Duration::from_millis(sleep_duration_ms)).await; + if items_processed_this_iteration == 0 { + // No items processed. Short sleep before re-checking — the queue + // may grow above min_queue_items within this light slot. + tokio::time::sleep(Duration::from_secs(5)).await; + } + // When items were processed, loop immediately to fetch the next batch. } Ok(()) } @@ -2993,7 +3043,6 @@ impl EpochManager { compute_unit_limit: Some(self.config.transaction_config.cu_limit), enable_priority_fees: self.config.transaction_config.enable_priority_fees, max_concurrent_sends: Some(self.config.transaction_config.max_concurrent_sends), - queue_item_count: 0, }, queue_config: self.config.queue_config, retry_config: RetryConfig { @@ -3015,6 +3064,7 @@ impl EpochManager { }, enable_presort: self.config.enable_v1_multi_nullify && !self.address_lookup_tables.is_empty(), + work_item_batch_size: self.config.work_item_batch_size, }; let alt_snapshot = (*self.address_lookup_tables).clone(); diff --git a/forester/src/processor/v1/config.rs b/forester/src/processor/v1/config.rs index 893d950601..41f1bc0c08 100644 --- a/forester/src/processor/v1/config.rs +++ b/forester/src/processor/v1/config.rs @@ -2,10 +2,6 @@ use light_client::rpc::RetryConfig; use crate::config::QueueConfig; -/// Maximum queue size for which multi-nullify grouping is enabled. -/// Above this threshold, fall back to single-nullify for more reliable throughput. -pub const MULTI_NULLIFY_MAX_QUEUE_SIZE: usize = 10_000; - #[derive(Debug, Clone, Copy)] pub struct CapConfig { pub rec_fee_microlamports_per_cu: u64, @@ -31,6 +27,8 @@ pub struct SendBatchedTransactionsConfig { /// leaf_index before chunking, so adjacent leaves land in the same batch /// for better dedup grouping. pub enable_presort: bool, + /// Number of queue items to process per batch cycle. + pub work_item_batch_size: usize, } #[derive(Debug, Clone, Copy)] @@ -40,7 +38,4 @@ pub struct BuildTransactionBatchConfig { pub compute_unit_limit: Option, pub enable_priority_fees: bool, pub max_concurrent_sends: Option, - /// Number of items in the queue at the time of batch preparation. - /// Used to disable multi-nullify when queue is very large (>10,000 items). - pub queue_item_count: usize, } diff --git a/forester/src/processor/v1/helpers.rs b/forester/src/processor/v1/helpers.rs index f1beb34b58..8af0e9c4ba 100644 --- a/forester/src/processor/v1/helpers.rs +++ b/forester/src/processor/v1/helpers.rs @@ -43,6 +43,8 @@ pub struct LabeledInstruction { pub instruction: Instruction, /// Label for logging, e.g. "StateV1Nullify" or "StateV1MultiNullify(3)" pub label: String, + /// Number of nullifications in this instruction (1 for single, 2-4 for multi). + pub nullify_count: u32, } /// Work items should be of only one type and tree @@ -377,6 +379,7 @@ pub async fn fetch_proofs_and_create_instructions( instructions.push(LabeledInstruction { instruction, label: "AddressV1Update".to_string(), + nullify_count: 1, }); } @@ -443,6 +446,7 @@ pub async fn fetch_proofs_and_create_instructions( item, proof, authority, derivation, epoch, ), label: "StateV1Nullify".to_string(), + nullify_count: 1, }); } else { let group_proofs: Vec<[[u8; 32]; 16]> = group_indices @@ -503,6 +507,7 @@ pub async fn fetch_proofs_and_create_instructions( instructions.push(LabeledInstruction { instruction, label: format!("StateV1MultiNullify({})", group_size), + nullify_count: group_size as u32, }); } } @@ -514,6 +519,7 @@ pub async fn fetch_proofs_and_create_instructions( instructions.push(LabeledInstruction { instruction: build_nullify_instruction(item, proof, authority, derivation, epoch), label: "StateV1Nullify".to_string(), + nullify_count: 1, }); } } diff --git a/forester/src/processor/v1/send_transaction.rs b/forester/src/processor/v1/send_transaction.rs index a0328c5877..de762e9cee 100644 --- a/forester/src/processor/v1/send_transaction.rs +++ b/forester/src/processor/v1/send_transaction.rs @@ -13,23 +13,18 @@ use light_client::{indexer::Indexer, rpc::Rpc}; use light_compressed_account::TreeType; use light_registry::utils::get_forester_epoch_pda_from_authority; use solana_sdk::{ - hash::Hash, pubkey::Pubkey, signature::{Keypair, Signature, Signer}, }; use tokio::time::Instant; -use tracing::{debug, error, info, trace, warn}; +use tracing::{error, info, trace, warn}; -const WORK_ITEM_BATCH_SIZE: usize = 100; use crate::{ epoch_manager::WorkItem, errors::ForesterError, metrics::increment_transactions_failed, priority_fee::PriorityFeeConfig, - processor::v1::{ - config::{SendBatchedTransactionsConfig, MULTI_NULLIFY_MAX_QUEUE_SIZE}, - tx_builder::TransactionBuilder, - }, + processor::v1::{config::SendBatchedTransactionsConfig, tx_builder::TransactionBuilder}, queue_helpers::fetch_queue_item_data, smart_transaction::{ConfirmationConfig, PreparedTransaction, SmartTransactionError}, Result, @@ -37,8 +32,6 @@ use crate::{ struct PreparedBatchData { work_items: Vec, - recent_blockhash: Hash, - last_valid_block_height: u64, priority_fee: Option, timeout_deadline: Instant, } @@ -146,17 +139,7 @@ pub async fn send_batched_transactions MULTI_NULLIFY_MAX_QUEUE_SIZE { - warn!( - tree = %tree_accounts.merkle_tree, - "v1 state multi-nullify disabled: queue_item_count {} exceeds threshold {}", - data.work_items.len(), MULTI_NULLIFY_MAX_QUEUE_SIZE - ); - } - + let build_config = config.build_transaction_batch_config; let max_concurrent_sends = build_config.max_concurrent_sends.unwrap_or(1).max(1); let effective_max_concurrent_sends = compute_effective_max_concurrent_sends(config, max_concurrent_sends, data.work_items.len()); @@ -165,107 +148,119 @@ pub async fn send_batched_transactions= data.timeout_deadline { - trace!(tree = %tree_accounts.merkle_tree, "Reached global timeout deadline before processing next chunk, stopping."); - break; - } + // Cap total items to stay within the merkle tree changelog capacity. + const MAX_ITEMS_PER_CYCLE: usize = 1400; + let items_to_process = if data.work_items.len() > MAX_ITEMS_PER_CYCLE { + &data.work_items[..MAX_ITEMS_PER_CYCLE] + } else { + &data.work_items + }; - // Refresh blockhash if it's getting stale - if last_blockhash_refresh.elapsed() > BLOCKHASH_REFRESH_INTERVAL { - match pool.get_connection().await { - Ok(mut rpc) => match rpc.get_latest_blockhash().await { - Ok((new_hash, new_height)) => { - recent_blockhash = new_hash; - last_valid_block_height = new_height; - last_blockhash_refresh = Instant::now(); - debug!(tree = %tree_accounts.merkle_tree, "Refreshed blockhash"); - } - Err(e) => { - warn!(tree = %tree_accounts.merkle_tree, "Failed to refresh blockhash: {:?}", e); - } - }, - Err(e) => { - warn!(tree = %tree_accounts.merkle_tree, "Failed to get RPC for blockhash refresh: {:?}", e); + // Process all chunks concurrently: each chunk fetches proofs, builds, and sends in parallel. + let chunks: Vec> = items_to_process + .chunks(work_item_batch_size) + .map(|c| c.to_vec()) + .collect(); + + let num_chunks = chunks.len(); + info!( + tree = %tree_accounts.merkle_tree, + "Processing {} concurrent chunks of up to {} items each", + num_chunks, work_item_batch_size + ); + + let chunk_futures: Vec<_> = chunks + .into_iter() + .map(|work_chunk| { + let pool = Arc::clone(&pool); + let transaction_builder = Arc::clone(&transaction_builder); + let cancel_signal = Arc::clone(&operation_cancel_signal); + let num_sent = Arc::clone(&num_sent_transactions); + let payer = payer.insecure_clone(); + let derivation = *derivation; + let tree_id = tree_accounts.merkle_tree; + let timeout_deadline = data.timeout_deadline; + let confirmation_max_attempts = config.confirmation_max_attempts; + let confirmation_poll_interval = config.confirmation_poll_interval; + + async move { + // Safety margin: stop 3s before deadline to avoid sending txs + // that land after our light slot ends (ForesterNotEligible). + let safe_deadline = timeout_deadline - std::time::Duration::from_secs(3); + if cancel_signal.load(Ordering::SeqCst) || Instant::now() >= safe_deadline { + return Ok(()); } - } - } - trace!(tree = %tree_accounts.merkle_tree, "Processing chunk of size {}", work_chunk.len()); - let build_start_time = Instant::now(); - - let (transactions_to_send, _) = match transaction_builder - .build_signed_transaction_batch( - payer, - derivation, - &recent_blockhash, - last_valid_block_height, - data.priority_fee, - work_chunk, - build_config, - ) - .await - { - Ok(res) => res, - Err(e) => { - error!(tree = %tree_accounts.merkle_tree, "Failed to build transaction batch: {:?}", e); - continue; - } - }; - trace!(tree = %tree_accounts.merkle_tree, "Built {} transactions in {:?}", transactions_to_send.len(), build_start_time.elapsed()); + // Each chunk gets a fresh blockhash + let (recent_blockhash, last_valid_block_height) = { + let mut rpc = pool.get_connection().await.map_err(ForesterError::from)?; + rpc.get_latest_blockhash().await.map_err(|e| { + ForesterError::General { error: format!("Failed to get blockhash: {:?}", e) } + })? + }; + + let build_start_time = Instant::now(); + let (transactions_to_send, _) = match transaction_builder + .build_signed_transaction_batch( + &payer, + &derivation, + &recent_blockhash, + last_valid_block_height, + data.priority_fee, + &work_chunk, + build_config, + ) + .await + { + Ok(res) => res, + Err(e) => { + error!(tree = %tree_id, "Failed to build transaction batch: {:?}", e); + return Ok(()); + } + }; + trace!(tree = %tree_id, "Built {} transactions in {:?}", transactions_to_send.len(), build_start_time.elapsed()); - if Instant::now() >= data.timeout_deadline { - trace!(tree = %tree_accounts.merkle_tree, "Reached global timeout deadline after building transactions, stopping."); - break; - } + if transactions_to_send.is_empty() || Instant::now() >= safe_deadline { + return Ok(()); + } - if transactions_to_send.is_empty() { - trace!(tree = %tree_accounts.merkle_tree, "Built batch resulted in 0 transactions, skipping send for this chunk."); - continue; - } + let send_context = ChunkSendContext { + pool: Arc::clone(&pool), + max_concurrent_sends: effective_max_concurrent_sends, + timeout_deadline, + cancel_signal: Arc::clone(&cancel_signal), + num_sent_transactions: Arc::clone(&num_sent), + confirmation: ConfirmationConfig { + max_attempts: confirmation_max_attempts as u32, + poll_interval: confirmation_poll_interval, + }, + }; + + if let Err(e) = execute_transaction_chunk_sending(transactions_to_send, &send_context).await { + if e.is_forester_not_eligible() { + cancel_signal.store(true, Ordering::SeqCst); + return Err(ForesterError::NotEligible); + } + warn!(tree = %tree_id, error = ?e, "Chunk send finished with recoverable errors"); + } - let send_context = ChunkSendContext { - pool: Arc::clone(&pool), - max_concurrent_sends: effective_max_concurrent_sends, - timeout_deadline: data.timeout_deadline, - cancel_signal: Arc::clone(&operation_cancel_signal), - num_sent_transactions: Arc::clone(&num_sent_transactions), - confirmation: ConfirmationConfig { - max_attempts: config.confirmation_max_attempts as u32, - poll_interval: config.confirmation_poll_interval, - }, - }; - - if let Err(e) = execute_transaction_chunk_sending(transactions_to_send, &send_context).await - { - if e.is_forester_not_eligible() { - warn!( - tree = %tree_accounts.merkle_tree, - "Detected ForesterNotEligible while sending V1 transactions; stopping batch loop for re-schedule" - ); - return Err(ForesterError::NotEligible); + Ok::<(), ForesterError>(()) } - warn!( - tree = %tree_accounts.merkle_tree, - error = ?e, - "Chunk send finished with recoverable errors" - ); + }) + .collect(); + + let results = futures::future::join_all(chunk_futures).await; + for result in results { + if let Err(ForesterError::NotEligible) = result { + return Err(ForesterError::NotEligible); } } @@ -334,19 +329,15 @@ async fn prepare_batch_prerequisites( } } - let (recent_blockhash, last_valid_block_height, priority_fee) = { - let mut rpc = pool.get_connection().await.map_err(|e| { + let priority_fee = { + let rpc = pool.get_connection().await.map_err(|e| { error!( tree = %tree_id_str, - "Failed to get RPC for blockhash/priority fee: {:?}", + "Failed to get RPC for priority fee: {:?}", e ); ForesterError::RpcPool(e) })?; - let r_blockhash = rpc.get_latest_blockhash().await.map_err(|e| { - error!(tree = %tree_id_str, "Failed to get latest blockhash: {:?}", e); - ForesterError::Rpc(e) - })?; let forester_epoch_pda_pubkey = get_forester_epoch_pda_from_authority(derivation, transaction_builder.epoch()).0; let account_keys = vec![ @@ -355,13 +346,12 @@ async fn prepare_batch_prerequisites( tree_accounts.queue, tree_accounts.merkle_tree, ]; - let priority_fee = PriorityFeeConfig { + PriorityFeeConfig { compute_unit_price: config.build_transaction_batch_config.compute_unit_price, enable_priority_fees: config.build_transaction_batch_config.enable_priority_fees, } .resolve(&*rpc, account_keys) - .await?; - (r_blockhash.0, r_blockhash.1, priority_fee) + .await? }; let work_items: Vec = queue_item_data @@ -376,8 +366,6 @@ async fn prepare_batch_prerequisites( Ok(Some(PreparedBatchData { work_items, - recent_blockhash, - last_valid_block_height, priority_fee, timeout_deadline, })) diff --git a/forester/src/processor/v1/tx_builder.rs b/forester/src/processor/v1/tx_builder.rs index 7b1b92ad35..36fee1eafc 100644 --- a/forester/src/processor/v1/tx_builder.rs +++ b/forester/src/processor/v1/tx_builder.rs @@ -17,10 +17,7 @@ use crate::{ epoch_manager::WorkItem, processor::{ tx_cache::ProcessedHashCache, - v1::{ - config::{BuildTransactionBatchConfig, MULTI_NULLIFY_MAX_QUEUE_SIZE}, - helpers::fetch_proofs_and_create_instructions, - }, + v1::{config::BuildTransactionBatchConfig, helpers::fetch_proofs_and_create_instructions}, }, smart_transaction::{ create_smart_transaction, CreateSmartTransactionConfig, PreparedTransaction, @@ -127,9 +124,8 @@ impl TransactionBuilder for EpochManagerTransactions { .map(|&item| item.clone()) .collect::>(); - let use_multi_nullify = self.enable_v1_multi_nullify - && !self.address_lookup_tables.is_empty() - && config.queue_item_count <= MULTI_NULLIFY_MAX_QUEUE_SIZE; + let use_multi_nullify = + self.enable_v1_multi_nullify && !self.address_lookup_tables.is_empty(); let mut transactions = vec![]; let all_instructions = match fetch_proofs_and_create_instructions( payer.pubkey(), @@ -174,12 +170,22 @@ impl TransactionBuilder for EpochManagerTransactions { .iter() .map(|li| li.instruction.clone()) .collect(); + + // Dynamic CU based on number of nullifications in the instruction. + let nullify_count: u32 = labeled_chunk.iter().map(|li| li.nullify_count).sum(); + let dynamic_cu_limit = Some(match nullify_count { + 1 => 300_000, + 2 => 600_000, + 3 => 900_000, + _ => 1_000_000, + }); + let prepared = create_smart_transaction(CreateSmartTransactionConfig { payer: payer.insecure_clone(), instructions, recent_blockhash: *recent_blockhash, compute_unit_price: priority_fee, - compute_unit_limit: config.compute_unit_limit, + compute_unit_limit: dynamic_cu_limit, last_valid_block_height, address_lookup_tables: self.address_lookup_tables.clone(), }) From fc3b88c40ab650340663aee4fdd70ee61ec771b0 Mon Sep 17 00:00:00 2001 From: Sergey Timoshin Date: Fri, 20 Mar 2026 15:12:56 +0000 Subject: [PATCH 14/16] update dependencies in Cargo.lock --- Cargo.lock | 280 +++++++++++++++++++++++++++++++---------------------- 1 file changed, 165 insertions(+), 115 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 070b02db66..51c3103bd5 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -520,7 +520,22 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "43d5b281e737544384e969a5ccad3f1cdd24b48086a0fc1b2a5262a26b8f4f4a" dependencies = [ "anstyle", - "anstyle-parse", + "anstyle-parse 0.2.7", + "anstyle-query", + "anstyle-wincon", + "colorchoice", + "is_terminal_polyfill", + "utf8parse", +] + +[[package]] +name = "anstream" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "824a212faf96e9acacdbd09febd34438f8f711fb84e09a8916013cd7815ca28d" +dependencies = [ + "anstyle", + "anstyle-parse 1.0.0", "anstyle-query", "anstyle-wincon", "colorchoice", @@ -530,9 +545,9 @@ dependencies = [ [[package]] name = "anstyle" -version = "1.0.13" +version = "1.0.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5192cca8006f1fd4f7237516f40fa183bb07f8fbdfedaa0036de5ea9b0b45e78" +checksum = "940b3a0ca603d1eade50a4846a2afffd5ef57a9feac2c0e2ec2e14f9ead76000" [[package]] name = "anstyle-parse" @@ -543,6 +558,15 @@ dependencies = [ "utf8parse", ] +[[package]] +name = "anstyle-parse" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "52ce7f38b242319f7cabaa6813055467063ecdc9d355bbb4ce0c68908cd8130e" +dependencies = [ + "utf8parse", +] + [[package]] name = "anstyle-query" version = "1.1.5" @@ -1109,11 +1133,12 @@ dependencies = [ [[package]] name = "borsh" -version = "1.6.0" +version = "1.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d1da5ab77c1437701eeff7c88d968729e7766172279eab0676857b3d63af7a6f" +checksum = "cfd1e3f8955a5d7de9fab72fc8373fade9fb8a703968cb200ae3dc6cf08e185a" dependencies = [ - "borsh-derive 1.6.0", + "borsh-derive 1.6.1", + "bytes", "cfg_aliases", ] @@ -1132,9 +1157,9 @@ dependencies = [ [[package]] name = "borsh-derive" -version = "1.6.0" +version = "1.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0686c856aa6aac0c4498f936d7d6a02df690f614c03e4d906d1018062b5c5e2c" +checksum = "bfcfdc083699101d5a7965e49925975f2f55060f94f9a05e7187be95d530ca59" dependencies = [ "once_cell", "proc-macro-crate 3.5.0", @@ -1273,9 +1298,9 @@ dependencies = [ [[package]] name = "cc" -version = "1.2.56" +version = "1.2.57" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aebf35691d1bfb0ac386a69bac2fde4dd276fb618cf8bf4f5318fe285e821bb2" +checksum = "7a0dd1ca384932ff3641c8718a02769f1698e7563dc6974ffd03346116310423" dependencies = [ "find-msvc-tools", "jobserver", @@ -1353,9 +1378,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.60" +version = "4.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2797f34da339ce31042b27d23607e051786132987f595b02ba4f6a6dffb7030a" +checksum = "b193af5b67834b676abd72466a96c1024e6a6ad978a1f484bd90b85c94041351" dependencies = [ "clap_builder", "clap_derive", @@ -1363,11 +1388,11 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.5.60" +version = "4.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "24a241312cea5059b13574bb9b3861cabf758b879c15190b37b6d6fd63ab6876" +checksum = "714a53001bf66416adb0e2ef5ac857140e7dc3a0c48fb28b2f10762fc4b5069f" dependencies = [ - "anstream", + "anstream 1.0.0", "anstyle", "clap_lex", "strsim 0.11.1", @@ -1375,9 +1400,9 @@ dependencies = [ [[package]] name = "clap_derive" -version = "4.5.55" +version = "4.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a92793da1a46a5f2a02a6f4c46c6496b28c43638adea8306fcb0caa1634f24e5" +checksum = "1110bd8a634a1ab8cb04345d8d878267d57c3cf1b38d91b71af6686408bbca6a" dependencies = [ "heck 0.5.0", "proc-macro2", @@ -1387,9 +1412,9 @@ dependencies = [ [[package]] name = "clap_lex" -version = "1.0.0" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3a822ea5bc7590f9d40f1ba12c0dc3c2760f3482c6984db1573ad11031420831" +checksum = "c8d4a3bb8b1e0c1050499d1815f5ab16d04f0959b233085fb31653fbfc9d98f9" [[package]] name = "client-test" @@ -1440,9 +1465,9 @@ dependencies = [ [[package]] name = "colorchoice" -version = "1.0.4" +version = "1.0.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b05b61dc5112cbb17e4b6cd61790d9845d13888356391624cbe7e41efeac1e75" +checksum = "1d07550c9036bf2ae0c684c4297d503f838287c83c53686d05370d0e139ae570" [[package]] name = "combine" @@ -1806,8 +1831,18 @@ version = "0.21.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9cdf337090841a411e2a7f3deb9187445851f91b309c0c0a29e05f74a00a48c0" dependencies = [ - "darling_core", - "darling_macro", + "darling_core 0.21.3", + "darling_macro 0.21.3", +] + +[[package]] +name = "darling" +version = "0.23.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "25ae13da2f202d56bd7f91c25fba009e7717a1e4a1cc98a76d844b65ae912e9d" +dependencies = [ + "darling_core 0.23.0", + "darling_macro 0.23.0", ] [[package]] @@ -1824,13 +1859,37 @@ dependencies = [ "syn 2.0.117", ] +[[package]] +name = "darling_core" +version = "0.23.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9865a50f7c335f53564bb694ef660825eb8610e0a53d3e11bf1b0d3df31e03b0" +dependencies = [ + "ident_case", + "proc-macro2", + "quote", + "strsim 0.11.1", + "syn 2.0.117", +] + [[package]] name = "darling_macro" version = "0.21.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d38308df82d1080de0afee5d069fa14b0326a88c14f15c5ccda35b4a6c414c81" dependencies = [ - "darling_core", + "darling_core 0.21.3", + "quote", + "syn 2.0.117", +] + +[[package]] +name = "darling_macro" +version = "0.23.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ac3984ec7bd6cfa798e62b4a642426a5be0e68f9401cfc2a01e3fa9ea2fcdb8d" +dependencies = [ + "darling_core 0.23.0", "quote", "syn 2.0.117", ] @@ -1903,9 +1962,9 @@ dependencies = [ [[package]] name = "deranged" -version = "0.3.11" +version = "0.5.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b42b6fa04a440b495c8b04d0e71b707c585f83cb9cb28cf8cd0d976c315e31b4" +checksum = "7cd812cc2bc1d69d4764bd80df88b4317eaef9e773c75226407d9bc0876b211c" dependencies = [ "powerfmt", ] @@ -2230,7 +2289,7 @@ version = "0.11.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b2daee4ea451f429a58296525ddf28b45a3b64f1acf6587e2067437bb11e218d" dependencies = [ - "anstream", + "anstream 0.6.21", "anstyle", "env_filter", "jiff", @@ -2404,7 +2463,7 @@ dependencies = [ "base64 0.13.1", "borsh 0.10.4", "bs58", - "clap 4.5.60", + "clap 4.6.0", "create-address-test-program", "csdk-anchor-full-derived-test", "dashmap 6.1.0", @@ -3402,9 +3461,9 @@ dependencies = [ [[package]] name = "itoa" -version = "1.0.17" +version = "1.0.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "92ecc6618181def0457392ccd0ee51198e065e016d1d527a7ac1b6dc7c1f09d2" +checksum = "8f42a60cbdf9a97f5d2305f08a87dc4e09308d1276d28c869c684d7777685682" [[package]] name = "jiff" @@ -4016,7 +4075,7 @@ name = "light-instruction-decoder-derive" version = "0.4.0" dependencies = [ "bs58", - "darling", + "darling 0.21.3", "heck 0.5.0", "proc-macro2", "quote", @@ -4257,7 +4316,7 @@ name = "light-sdk-macros" version = "0.23.0" dependencies = [ "borsh 0.10.4", - "darling", + "darling 0.21.3", "light-account-checks", "light-compressed-account", "light-hasher", @@ -4965,9 +5024,9 @@ dependencies = [ [[package]] name = "num_enum" -version = "0.7.5" +version = "0.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1207a7e20ad57b847bbddc6776b968420d38292bbfe2089accff5e19e82454c" +checksum = "5d0bca838442ec211fa11de3a8b0e0e8f3a4522575b5c4c06ed722e005036f26" dependencies = [ "num_enum_derive", "rustversion", @@ -4975,9 +5034,9 @@ dependencies = [ [[package]] name = "num_enum_derive" -version = "0.7.5" +version = "0.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ff32365de1b6743cb203b710788263c44a03de03802daf96092f2da4fe6ba4d7" +checksum = "680998035259dcfcafe653688bf2aa6d3e2dc05e98be6ab46afb089dc84f1df8" dependencies = [ "proc-macro-crate 3.5.0", "proc-macro2", @@ -5002,9 +5061,9 @@ dependencies = [ [[package]] name = "once_cell" -version = "1.21.3" +version = "1.21.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "42f5e15c9953c5e4ccceeb2e7382a716482c34515315f7b03532b8b4e8393d2d" +checksum = "9f7c3e4beb33f85d45ae3e3a1792185706c8e16d043238c593331cc7cd313b50" [[package]] name = "once_cell_polyfill" @@ -5031,9 +5090,9 @@ dependencies = [ [[package]] name = "openssl" -version = "0.10.75" +version = "0.10.76" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08838db121398ad17ab8531ce9de97b244589089e290a384c900cb9ff7434328" +checksum = "951c002c75e16ea2c65b8c7e4d3d51d5530d8dfa7d060b4776828c88cfb18ecf" dependencies = [ "bitflags 2.11.0", "cfg-if", @@ -5072,9 +5131,9 @@ dependencies = [ [[package]] name = "openssl-sys" -version = "0.9.111" +version = "0.9.112" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "82cab2d520aa75e3c58898289429321eb788c3106963d0dc886ec7a5f4adc321" +checksum = "57d55af3b3e226502be1526dfdba67ab0e9c96fc293004e79576b2b9edb0dbdb" dependencies = [ "cc", "libc", @@ -5248,9 +5307,9 @@ checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" [[package]] name = "pinocchio" -version = "0.9.2" +version = "0.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b971851087bc3699b001954ad02389d50c41405ece3548cbcafc88b3e20017a" +checksum = "b8afe4f39c0e25cc471b35b89963312791a5162d45a86578cbeaad9e5e7d1b3b" [[package]] name = "pinocchio-light-program-test" @@ -5405,9 +5464,9 @@ checksum = "c33a9471896f1c69cecef8d20cbe2f7accd12527ce60845ff44c153bb2a21b49" [[package]] name = "portable-atomic-util" -version = "0.2.5" +version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a9db96d7fa8782dd8c15ce32ffe8680bbd1e978a43bf51a34d39483540495f5" +checksum = "091397be61a01d4be58e7841595bd4bfedb15f1cd54977d79b8271e94ed799a3" dependencies = [ "portable-atomic", ] @@ -5467,7 +5526,7 @@ version = "3.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e67ba7e9b2b56446f1d419b1d807906278ffa1a658a8a5d8a39dcb1f5a78614f" dependencies = [ - "toml_edit 0.25.4+spec-1.1.0", + "toml_edit 0.25.5+spec-1.1.0", ] [[package]] @@ -6752,9 +6811,9 @@ dependencies = [ [[package]] name = "serde_with" -version = "3.17.0" +version = "3.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "381b283ce7bc6b476d903296fb59d0d36633652b633b27f64db4fb46dcbfc3b9" +checksum = "dd5414fad8e6907dbdd5bc441a50ae8d6e26151a03b1de04d89a5576de61d01f" dependencies = [ "serde_core", "serde_with_macros", @@ -6762,11 +6821,11 @@ dependencies = [ [[package]] name = "serde_with_macros" -version = "3.17.0" +version = "3.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a6d4e30573c8cb306ed6ab1dca8423eec9a463ea0e155f45399455e0368b27e0" +checksum = "d3db8978e608f1fe7357e211969fd9abdcae80bac1ba7a3369bb7eb6b404eb65" dependencies = [ - "darling", + "darling 0.23.0", "proc-macro2", "quote", "syn 2.0.117", @@ -7227,7 +7286,7 @@ version = "2.3.13" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "68548570c38a021c724b5aa0112f45a54bdf7ff1b041a042848e034a95a96994" dependencies = [ - "borsh 1.6.0", + "borsh 1.6.1", "futures", "solana-account", "solana-banks-interface", @@ -7341,7 +7400,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "718333bcd0a1a7aed6655aa66bef8d7fb047944922b2d3a18f49cbc13e73d004" dependencies = [ "borsh 0.10.4", - "borsh 1.6.0", + "borsh 1.6.1", ] [[package]] @@ -7656,7 +7715,7 @@ version = "2.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8432d2c4c22d0499aa06d62e4f7e333f81777b3d7c96050ae9e5cb71a8c3aee4" dependencies = [ - "borsh 1.6.0", + "borsh 1.6.1", "serde", "serde_derive", "solana-instruction", @@ -7965,7 +8024,7 @@ version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b5b96e9f0300fa287b545613f007dfe20043d7812bee255f418c1eb649c93b63" dependencies = [ - "borsh 1.6.0", + "borsh 1.6.1", "bytemuck", "bytemuck_derive", "five8", @@ -7994,7 +8053,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bab5682934bd1f65f8d2c16f21cb532526fcc1a09f796e2cacdb091eee5774ad" dependencies = [ "bincode", - "borsh 1.6.0", + "borsh 1.6.1", "getrandom 0.2.17", "js-sys", "num-traits", @@ -8411,7 +8470,7 @@ dependencies = [ "bincode", "blake3", "borsh 0.10.4", - "borsh 1.6.0", + "borsh 1.6.1", "bs58", "bytemuck", "console_error_panic_hook", @@ -8500,7 +8559,7 @@ version = "2.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9ee2e0217d642e2ea4bee237f37bd61bb02aec60da3647c48ff88f6556ade775" dependencies = [ - "borsh 1.6.0", + "borsh 1.6.1", "num-traits", "serde", "serde_derive", @@ -8584,7 +8643,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9b62adb9c3261a052ca1f999398c388f1daf558a1b492f60a6d9e64857db4ff1" dependencies = [ "borsh 0.10.4", - "borsh 1.6.0", + "borsh 1.6.1", "bytemuck", "bytemuck_derive", "curve25519-dalek 4.1.3", @@ -9010,7 +9069,7 @@ version = "2.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "baa3120b6cdaa270f39444f5093a90a7b03d296d362878f7a6991d6de3bbe496" dependencies = [ - "borsh 1.6.0", + "borsh 1.6.1", "libsecp256k1", "solana-define-syscall 2.3.0", "thiserror 2.0.18", @@ -9188,7 +9247,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5269e89fde216b4d7e1d1739cf5303f8398a1ff372a81232abbee80e554a838c" dependencies = [ "borsh 0.10.4", - "borsh 1.6.0", + "borsh 1.6.1", "num-traits", "serde", "serde_derive", @@ -9589,7 +9648,7 @@ dependencies = [ "agave-reserved-account-keys", "base64 0.22.1", "bincode", - "borsh 1.6.0", + "borsh 1.6.1", "bs58", "log", "serde", @@ -9870,7 +9929,7 @@ version = "6.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "76fee7d65013667032d499adc3c895e286197a35a0d3a4643c80e7fd3e9969e3" dependencies = [ - "borsh 1.6.0", + "borsh 1.6.1", "num-derive 0.4.2", "num-traits", "solana-program", @@ -9886,7 +9945,7 @@ version = "7.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ae179d4a26b3c7a20c839898e6aed84cb4477adf108a366c95532f058aea041b" dependencies = [ - "borsh 1.6.0", + "borsh 1.6.1", "num-derive 0.4.2", "num-traits", "solana-program", @@ -10008,7 +10067,7 @@ version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d994afaf86b779104b4a95ba9ca75b8ced3fdb17ee934e38cb69e72afbe17799" dependencies = [ - "borsh 1.6.0", + "borsh 1.6.1", "bytemuck", "bytemuck_derive", "num-derive 0.4.2", @@ -10446,7 +10505,7 @@ version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dfb9c89dbc877abd735f05547dcf9e6e12c00c11d6d74d8817506cab4c99fdbb" dependencies = [ - "borsh 1.6.0", + "borsh 1.6.1", "num-derive 0.4.2", "num-traits", "solana-borsh", @@ -10467,7 +10526,7 @@ version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "304d6e06f0de0c13a621464b1fd5d4b1bebf60d15ca71a44d3839958e0da16ee" dependencies = [ - "borsh 1.6.0", + "borsh 1.6.1", "num-derive 0.4.2", "num-traits", "solana-borsh", @@ -10845,9 +10904,9 @@ dependencies = [ [[package]] name = "tempfile" -version = "3.26.0" +version = "3.27.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "82a72c767771b47409d2345987fda8628641887d5466101319899796367354a0" +checksum = "32497e9a4c7b38532efcdebeef879707aa9f794296a4f0244f6f69e9bc8574bd" dependencies = [ "fastrand 2.3.0", "getrandom 0.4.2", @@ -10934,38 +10993,21 @@ dependencies = [ [[package]] name = "time" -<<<<<<< HEAD -version = "0.3.37" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "35e7868883861bd0e56d9ac6efcaaca0d6d5d82a2a7ec8209ff492c07cf37b21" -======= version = "0.3.45" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f9e442fc33d7fdb45aa9bfeb312c095964abdf596f7567261062b2a7107aaabd" ->>>>>>> ebaaabf32 (fix: pin time <0.3.46 for Solana platform-tools Cargo 1.84 compatibility) dependencies = [ "deranged", "itoa", "num-conv", "powerfmt", - "serde", + "serde_core", "time-core", "time-macros", ] [[package]] name = "time-core" -<<<<<<< HEAD -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ef927ca75afb808a4d64dd374f00a2adf8d0fcff8e7b184af886c3c87ec4a3f3" - -[[package]] -name = "time-macros" -version = "0.2.19" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2834e6017e3e5e4b9834939793b282bc03b37a3336245fa820e35e233e2a85de" -======= version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b36ee98fd31ec7426d599183e8fe26932a8dc1fb76ddb6214d05493377d34ca" @@ -10975,7 +11017,6 @@ name = "time-macros" version = "0.2.25" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "71e552d1249bf61ac2a52db88179fd0673def1e1ad8243a00d9ec9ed71fee3dd" ->>>>>>> ebaaabf32 (fix: pin time <0.3.46 for Solana platform-tools Cargo 1.84 compatibility) dependencies = [ "num-conv", "time-core", @@ -11012,9 +11053,9 @@ dependencies = [ [[package]] name = "tinyvec" -version = "1.10.0" +version = "1.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bfa5fdc3bce6191a1dbc8c02d5c8bffcf557bafa17c124c5264a458f1b0613fa" +checksum = "3e61e67053d25a4e82c844e8424039d9745781b3fc4f32b8d55ed50f5f667ef3" dependencies = [ "tinyvec_macros", ] @@ -11192,17 +11233,17 @@ dependencies = [ [[package]] name = "toml" -version = "1.0.6+spec-1.1.0" +version = "1.0.7+spec-1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "399b1124a3c9e16766831c6bba21e50192572cdd98706ea114f9502509686ffc" +checksum = "dd28d57d8a6f6e458bc0b8784f8fdcc4b99a437936056fa122cb234f18656a96" dependencies = [ "indexmap", "serde_core", "serde_spanned 1.0.4", - "toml_datetime 1.0.0+spec-1.1.0", + "toml_datetime 1.0.1+spec-1.1.0", "toml_parser", "toml_writer", - "winnow", + "winnow 1.0.0", ] [[package]] @@ -11216,9 +11257,9 @@ dependencies = [ [[package]] name = "toml_datetime" -version = "1.0.0+spec-1.1.0" +version = "1.0.1+spec-1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "32c2555c699578a4f59f0cc68e5116c8d7cabbd45e1409b989d4be085b53f13e" +checksum = "9b320e741db58cac564e26c607d3cc1fdc4a88fd36c879568c07856ed83ff3e9" dependencies = [ "serde_core", ] @@ -11234,28 +11275,28 @@ dependencies = [ "serde_spanned 0.6.9", "toml_datetime 0.6.11", "toml_write", - "winnow", + "winnow 0.7.15", ] [[package]] name = "toml_edit" -version = "0.25.4+spec-1.1.0" +version = "0.25.5+spec-1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7193cbd0ce53dc966037f54351dbbcf0d5a642c7f0038c382ef9e677ce8c13f2" +checksum = "8ca1a40644a28bce036923f6a431df0b34236949d111cc07cb6dca830c9ef2e1" dependencies = [ "indexmap", - "toml_datetime 1.0.0+spec-1.1.0", + "toml_datetime 1.0.1+spec-1.1.0", "toml_parser", - "winnow", + "winnow 1.0.0", ] [[package]] name = "toml_parser" -version = "1.0.9+spec-1.1.0" +version = "1.0.10+spec-1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "702d4415e08923e7e1ef96cd5727c0dfed80b4d2fa25db9647fe5eb6f7c5a4c4" +checksum = "7df25b4befd31c4816df190124375d5a20c6b6921e2cad937316de3fccd63420" dependencies = [ - "winnow", + "winnow 1.0.0", ] [[package]] @@ -11266,9 +11307,9 @@ checksum = "5d99f8c9a7727884afe522e9bd5edbfc91a3312b36a77b5fb8926e4c31a41801" [[package]] name = "toml_writer" -version = "1.0.6+spec-1.1.0" +version = "1.0.7+spec-1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab16f14aed21ee8bfd8ec22513f7287cd4a91aa92e44edfe2c17ddd004e92607" +checksum = "f17aaa1c6e3dc22b1da4b6bba97d066e354c7945cac2f7852d4e4e7ca7a6b56d" [[package]] name = "tower" @@ -11401,9 +11442,9 @@ dependencies = [ [[package]] name = "tracing-subscriber" -version = "0.3.22" +version = "0.3.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2f30143827ddab0d256fd843b7a66d164e9f271cfa0dde49142c5ca0ca291f1e" +checksum = "cb7f578e5945fb242538965c2d0b04418d38ec25c79d160cd279bf0731c8d319" dependencies = [ "matchers", "nu-ansi-term", @@ -11438,7 +11479,7 @@ dependencies = [ "serde_json", "target-triple", "termcolor", - "toml 1.0.6+spec-1.1.0", + "toml 1.0.7+spec-1.1.0", ] [[package]] @@ -12322,6 +12363,15 @@ dependencies = [ "memchr", ] +[[package]] +name = "winnow" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a90e88e4667264a994d34e6d1ab2d26d398dcdca8b7f52bec8668957517fc7d8" +dependencies = [ + "memchr", +] + [[package]] name = "winreg" version = "0.50.0" @@ -12487,7 +12537,7 @@ dependencies = [ "base64 0.13.1", "bs58", "chrono", - "clap 4.5.60", + "clap 4.6.0", "dirs", "groth16-solana", "light-batched-merkle-tree", @@ -12555,18 +12605,18 @@ dependencies = [ [[package]] name = "zerocopy" -version = "0.8.42" +version = "0.8.47" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2578b716f8a7a858b7f02d5bd870c14bf4ddbbcf3a4c05414ba6503640505e3" +checksum = "efbb2a062be311f2ba113ce66f697a4dc589f85e78a4aea276200804cea0ed87" dependencies = [ "zerocopy-derive", ] [[package]] name = "zerocopy-derive" -version = "0.8.42" +version = "0.8.47" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7e6cc098ea4d3bd6246687de65af3f920c430e236bee1e3bf2e441463f08a02f" +checksum = "0e8bc7269b54418e7aeeef514aa68f8690b8c0489a06b0136e5f57c4c5ccab89" dependencies = [ "proc-macro2", "quote", From 69f2d92c8d70ec933006a1f121a657ac988e041f Mon Sep 17 00:00:00 2001 From: Sergey Timoshin Date: Fri, 20 Mar 2026 16:49:53 +0000 Subject: [PATCH 15/16] feat: set default work item batch size to 50 in EpochManager configuration --- forester/src/epoch_manager.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/forester/src/epoch_manager.rs b/forester/src/epoch_manager.rs index 1c6764f7bd..a253b4bad3 100644 --- a/forester/src/epoch_manager.rs +++ b/forester/src/epoch_manager.rs @@ -4730,6 +4730,7 @@ mod tests { lookup_table_address: None, min_queue_items: None, enable_v1_multi_nullify: false, + work_item_batch_size: 50, } } From 3354c88ab9f0370d2ae8011c61c40a1c59a3b1dc Mon Sep 17 00:00:00 2001 From: Sergey Timoshin Date: Fri, 20 Mar 2026 17:32:35 +0000 Subject: [PATCH 16/16] fix tests --- forester/tests/e2e_test.rs | 1 + forester/tests/legacy/test_utils.rs | 1 + forester/tests/priority_fee_test.rs | 1 + forester/tests/test_utils.rs | 1 + 4 files changed, 4 insertions(+) diff --git a/forester/tests/e2e_test.rs b/forester/tests/e2e_test.rs index 295c4eeb82..c11500f29a 100644 --- a/forester/tests/e2e_test.rs +++ b/forester/tests/e2e_test.rs @@ -303,6 +303,7 @@ async fn e2e_test() { }), min_queue_items: None, enable_v1_multi_nullify: false, + work_item_batch_size: 50, }; let test_mode = TestMode::from_env(); diff --git a/forester/tests/legacy/test_utils.rs b/forester/tests/legacy/test_utils.rs index 4642bd3af7..fdac97ba34 100644 --- a/forester/tests/legacy/test_utils.rs +++ b/forester/tests/legacy/test_utils.rs @@ -125,6 +125,7 @@ pub fn forester_config() -> ForesterConfig { lookup_table_address: None, min_queue_items: None, enable_v1_multi_nullify: false, + work_item_batch_size: 50, } } diff --git a/forester/tests/priority_fee_test.rs b/forester/tests/priority_fee_test.rs index bb8149b94b..5067342d2f 100644 --- a/forester/tests/priority_fee_test.rs +++ b/forester/tests/priority_fee_test.rs @@ -103,6 +103,7 @@ async fn test_priority_fee_request() { fallback_indexer_url: None, rpc_pool_failure_threshold: 3, rpc_pool_primary_probe_interval_secs: 30, + work_item_batch_size: None, }; let config = ForesterConfig::new_for_start(&args).expect("Failed to create config"); diff --git a/forester/tests/test_utils.rs b/forester/tests/test_utils.rs index 358c25c6fe..4ae9352482 100644 --- a/forester/tests/test_utils.rs +++ b/forester/tests/test_utils.rs @@ -138,6 +138,7 @@ pub fn forester_config() -> ForesterConfig { lookup_table_address: None, min_queue_items: None, enable_v1_multi_nullify: false, + work_item_batch_size: 50, } }