From 98ae9f49fcc6ecac5f4fc458ec6b900b9d7b221c Mon Sep 17 00:00:00 2001 From: ch4r10t33r Date: Fri, 21 Nov 2025 16:49:24 +0000 Subject: [PATCH 1/2] fix: Corrected sequencer to work like op-node --- README.md | 807 ++++++++++++++++++++------------ src/api/server.zig | 146 +++++- src/core/conditional_tx.zig | 120 +++++ src/core/root.zig | 2 +- src/core/storage_trie.zig | 179 ------- src/l1/batch_parser.zig | 88 ++++ src/l1/client.zig | 88 ++++ src/l1/derivation.zig | 207 ++++++++ src/l1/execute_tx_builder.zig | 11 +- src/l1/root.zig | 2 + src/l2/engine_api_client.zig | 133 +++++- src/l2/payload_attrs.zig | 100 ++++ src/l2/root.zig | 8 +- src/l2/state_provider.zig | 4 +- src/l2/sync.zig | 286 ----------- src/l2/tx_forwarder.zig | 125 ----- src/main.zig | 38 +- src/mempool/mempool.zig | 36 +- src/sequencer/block_state.zig | 157 +++++++ src/sequencer/reorg_handler.zig | 119 +++++ src/sequencer/root.zig | 2 + src/sequencer/sequencer.zig | 237 +++++++--- src/validation/ingress.zig | 6 +- src/validation/transaction.zig | 49 +- 24 files changed, 1957 insertions(+), 993 deletions(-) create mode 100644 src/core/conditional_tx.zig delete mode 100644 src/core/storage_trie.zig create mode 100644 src/l1/batch_parser.zig create mode 100644 src/l1/derivation.zig create mode 100644 src/l2/payload_attrs.zig delete mode 100644 src/l2/sync.zig delete mode 100644 src/l2/tx_forwarder.zig create mode 100644 src/sequencer/block_state.zig create mode 100644 src/sequencer/reorg_handler.zig diff --git a/README.md b/README.md index 633b402..4904566 100644 --- a/README.md +++ b/README.md @@ -20,56 +20,125 @@ The Native Sequencer is a high-performance transaction sequencer designed for La ## Features -- **API Layer**: JSON-RPC/HTTP endpoint for transaction submission -- **Ingress/Validation**: Fast transaction validation pipeline (signature, nonce, gas, balance checks) -- **Mempool**: In-memory priority queue with write-ahead-log persistence -- **Sequencing Engine**: MEV-aware transaction ordering with configurable policies -- **Batch Formation**: Efficient batch building with gas limit management -- **L1 Submission**: Submit batches to L1 via JSON-RPC -- **State Management**: Track nonces, balances, and receipts -- **Observability**: Metrics endpoint for monitoring -- **Operator Controls**: Emergency halt, rate limiting, configuration management -- **ExecuteTx Support**: Stateless transaction type (0x05) with automatic forwarding to L1 geth +- **op-node Style Architecture**: Delegates execution to L2 geth via Engine API +- **L1 Derivation**: Derives safe blocks from L1 batches +- **Witness Generation**: Generates witness data for stateless execution on L1 +- **ExecuteTx Support**: Uses ExecuteTx transactions (type 0x05) for L1 submission +- **State Queries**: Queries L2 geth for state instead of maintaining local state +- **MEV-Aware Ordering**: Transaction ordering with MEV support +- **Metrics & Observability**: Prometheus-style metrics endpoint ## Architecture -The sequencer follows a modular architecture: - -``` -┌─────────────┐ -│ API Server │ ← JSON-RPC requests from users/relayers -└──────┬──────┘ - │ -┌──────▼──────┐ -│ Ingress │ ← Validates transactions -└──────┬──────┘ - │ -┌──────▼──────┐ -│ Mempool │ ← Priority queue of pending transactions -└──────┬──────┘ - │ -┌──────▼────────┐ -│ Sequencer │ ← Builds blocks from mempool -└──────┬────────┘ - │ -┌──────▼──────────┐ -│ Batch Builder │ ← Groups blocks into batches -└──────┬───────────┘ - │ -┌──────▼──────┐ -│ L1 Client │ ← Submits batches to L1 -└─────────────┘ -``` - -**Core Components**: -- **API Server**: Handles JSON-RPC requests from users/relayers -- **Ingress**: Validates and accepts transactions into mempool -- **Mempool**: Maintains priority queue of pending transactions -- **Sequencer**: Builds blocks from mempool transactions -- **Batch Builder**: Groups blocks into batches for L1 submission -- **L1 Client**: Submits batches to L1 blockchain -- **State Manager**: Tracks account state (nonces, balances) -- **Metrics**: Exposes observability metrics +The native-sequencer follows an op-node style architecture, delegating execution to L2 geth while handling consensus, transaction ordering, and L1 derivation. It uses ExecuteTx transactions for stateless execution on L1. + +### High-Level Flow + +``` +┌──────────────────┐ +│ native-sequencer │ (Consensus Layer) +└────────┬─────────┘ + │ 1. Request block building + │ engine_forkchoiceUpdated(payload_attrs) + ▼ +┌──────────────────┐ +│ L2 geth │ (Execution Layer) +│ │ 2. Build block +│ │ 3. Execute transactions +│ │ 4. Return payload +└────────┬─────────┘ + │ 5. Generate Witness data + │ (State trie nodes, contract code, block headers) + ▼ +┌──────────────────┐ +│ native-sequencer │ +│ │ 6. Build witness from execution +│ │ 7. Update fork choice +│ │ 8. Submit ExecuteTx transaction to L1 +└────────┬─────────┘ + │ + ▼ +┌──────────────────┐ +│ L1 │ (Stateless execution via EXECUTE precompile) +│ │ (Witness provides state for execution) +└──────────────────┘ +``` + +### Module Responsibilities + +#### Core Modules (`src/core/`) +- **`transaction.zig`**: Transaction data structures, RLP encoding/decoding, signature recovery +- **`transaction_execute.zig`**: ExecuteTx transaction type (0x05) implementation for stateless execution +- **`block.zig`**: Block data structures and serialization +- **`batch.zig`**: Batch data structures for grouping blocks +- **`witness.zig`**: Witness data structures (state trie nodes, contract code, block headers) +- **`witness_builder.zig`**: Witness generation from execution traces +- **`rlp.zig`**: RLP (Recursive Length Prefix) encoding/decoding for Ethereum data +- **`types.zig`**: Common type definitions (Hash, Address, etc.) +- **`signature.zig`**: ECDSA signature verification and recovery + +#### API Layer (`src/api/`) +- **`server.zig`**: JSON-RPC HTTP server for transaction submission and queries +- **`jsonrpc.zig`**: JSON-RPC protocol implementation +- **`http.zig`**: HTTP request/response handling + +#### Validation (`src/validation/`) +- **`ingress.zig`**: Transaction ingress handler - accepts and validates transactions +- **`transaction.zig`**: Transaction validator - validates signatures, nonces, balances using L2 geth state + +#### Mempool (`src/mempool/`) +- **`mempool.zig`**: Priority queue of pending transactions with gas price ordering +- **`wal.zig`**: Write-ahead log for mempool persistence + +#### Sequencer (`src/sequencer/`) +- **`sequencer.zig`**: Main sequencer logic - requests payloads from L2 geth, manages block state +- **`block_state.zig`**: Tracks safe/unsafe/finalized/head blocks (op-node style) +- **`execution.zig`**: Local execution engine for witness generation (not used in main block building path) +- **`mev.zig`**: MEV-aware transaction ordering +- **`reorg_handler.zig`**: Chain reorganization detection and handling + +#### L1 Integration (`src/l1/`) +- **`client.zig`**: L1 JSON-RPC client for batch submission and block queries +- **`derivation.zig`**: L1 derivation pipeline - derives L2 blocks from L1 batches (op-node style) +- **`batch_parser.zig`**: Parses L2 batch data from L1 transaction calldata +- **`execute_tx_builder.zig`**: Builds ExecuteTx transactions with witness data for L1 submission + +#### L2 Integration (`src/l2/`) +- **`engine_api_client.zig`**: Engine API client for requesting payloads from L2 geth +- **`payload_attrs.zig`**: Payload attributes builder for `engine_forkchoiceUpdated` +- **`state_provider.zig`**: State provider for querying L2 geth state (nonces, balances, code) + +#### Batch Management (`src/batch/`) +- **`builder.zig`**: Groups blocks into batches for L1 submission with size/gas limits + +#### State Management (`src/state/`) +- **`manager.zig`**: State manager for tracking nonces, balances, receipts (used for witness generation) +- **`state_root.zig`**: State root computation utilities + +#### Persistence (`src/persistence/`) +- **`lmdb.zig`**: LMDB database bindings for persistent state storage +- **`witness_storage.zig`**: Witness data storage and retrieval + +#### Configuration (`src/config/`) +- **`config.zig`**: Configuration management from environment variables + +#### Metrics (`src/metrics/`) +- **`metrics.zig`**: Metrics collection (transaction counts, blocks created, batches submitted) +- **`server.zig`**: Metrics HTTP server for Prometheus-style metrics + +#### Crypto (`src/crypto/`) +- **`hash.zig`**: Cryptographic hashing (Keccak256) +- **`secp256k1_wrapper.zig`**: ECDSA signature operations via libsecp256k1 +- **`signature.zig`**: Signature verification and address recovery + +### Architecture Characteristics + +1. **op-node Style**: Delegates execution to L2 geth via Engine API (same as op-node) +2. **L1 Derivation**: Derives safe blocks from L1 batches (op-node style) +3. **Safe/Unsafe Blocks**: Tracks safe (L1-derived) and unsafe (sequencer-proposed) blocks +4. **Witness Generation**: Generates witness data for stateless execution on L1 +5. **ExecuteTx Submission**: Uses ExecuteTx transactions (type 0x05) for L1 submission +6. **State Queries**: Queries L2 geth for state (nonces, balances) instead of maintaining local state ## Building @@ -132,69 +201,6 @@ docker stop sequencer docker rm sequencer ``` -#### Dockerfile Details - -The Dockerfile uses a multi-stage build: - -1. **Builder Stage**: Installs Zig 0.14.1 and builds the sequencer -2. **Runtime Stage**: Creates a minimal runtime image with just the binary - -#### Runtime Environment Variables - -The container accepts the following environment variables (all have defaults set in the Dockerfile): - -**API Configuration**: -- `API_HOST`: API server host (default: `0.0.0.0`) -- `API_PORT`: API server port (default: `6197`) - -**L1 Configuration**: -- `L1_RPC_URL`: L1 JSON-RPC endpoint (default: `http://host.docker.internal:8545`) -- `L1_CHAIN_ID`: L1 chain ID (default: `1`) -- `SEQUENCER_KEY`: Sequencer private key in hex format - -**Sequencer Configuration**: -- `BATCH_SIZE_LIMIT`: Maximum blocks per batch (default: `1000`) -- `BLOCK_GAS_LIMIT`: Gas limit per block (default: `30000000`) -- `BATCH_INTERVAL_MS`: Batch interval in milliseconds (default: `2000`) - -**Mempool Configuration**: -- `MEMPOOL_MAX_SIZE`: Maximum mempool size (default: `100000`) -- `MEMPOOL_WAL_PATH`: Write-ahead log path (default: `/app/data/mempool.wal`) - -**State Configuration**: -- `STATE_DB_PATH`: State database path (default: `/app/data/state.db`) - -**Observability**: -- `METRICS_PORT`: Metrics server port (default: `9090`) -- `ENABLE_TRACING`: Enable tracing (default: `false`) - -**Operator Controls**: -- `EMERGENCY_HALT`: Emergency halt flag (default: `false`) -- `RATE_LIMIT_PER_SECOND`: Rate limit per second (default: `1000`) - -#### Ports - -The container exposes two ports: -- **6197**: JSON-RPC API endpoint -- **9090**: Metrics endpoint - -#### Volumes - -The container uses a named volume `sequencer-data` to persist: -- Mempool write-ahead log (`mempool.wal`) -- State database (`state.db`) - -To use a host directory instead: -```bash -docker run -v /path/to/data:/app/data ... -``` - -#### Security - -- The container runs as a non-root user (`sequencer`, UID 1000) -- Only necessary runtime dependencies are included -- Source code is not included in the final image - #### Troubleshooting **Container won't start**: @@ -224,158 +230,211 @@ Ensure the data directory has correct permissions: sudo chown -R 1000:1000 /path/to/data ``` -#### Building for Different Architectures -**Build for ARM64** (Apple Silicon, Raspberry Pi): -```bash -docker buildx build --platform linux/arm64 -t native-sequencer:arm64 . -``` +## Running -**Build for AMD64**: -```bash -docker buildx build --platform linux/amd64 -t native-sequencer:amd64 . -``` +### Basic Usage -**Build multi-architecture image**: ```bash -docker buildx build --platform linux/amd64,linux/arm64 -t native-sequencer:latest --push . +# Run with default configuration +zig build run + +# Or run the built executable directly +./zig-out/bin/sequencer ``` -#### Deployment Considerations +### Configuration -For deployments, consider: +Configure the sequencer using environment variables. Key variables: -1. **Use a specific tag** instead of `latest` -2. **Set resource limits** -3. **Use secrets** for sensitive data like `SEQUENCER_KEY` -4. **Enable health checks** (currently placeholder) -5. **Set up log aggregation** -6. **Configure monitoring** for metrics endpoint +- **API**: `API_HOST`, `API_PORT` (default: `0.0.0.0:6197`) +- **L1**: `L1_RPC_URL`, `L1_CHAIN_ID`, `SEQUENCER_KEY` +- **L2**: `L2_RPC_URL`, `L2_ENGINE_API_PORT` (default: `http://localhost:8545:8551`) +- **Mempool**: `MEMPOOL_MAX_SIZE`, `MEMPOOL_WAL_PATH` +- **Batch**: `BATCH_SIZE_LIMIT`, `BATCH_INTERVAL_MS`, `BLOCK_GAS_LIMIT` +- **State**: `STATE_DB_PATH` +- **Metrics**: `METRICS_PORT` (default: `9090`) +- **Controls**: `EMERGENCY_HALT`, `RATE_LIMIT_PER_SECOND` -**Example: Docker with systemd service**: -```bash -# Create systemd service file -cat > /etc/systemd/system/sequencer.service <:/ ``` -## Running +Default: `http://0.0.0.0:6197/` -### Basic Usage +### JSON-RPC Methods -```bash -# Run with default configuration -zig build run +#### `eth_sendRawTransaction` -# Or run the built executable directly -./zig-out/bin/sequencer -``` +Submit a raw transaction to the sequencer. Supports both legacy transactions and ExecuteTx transactions (type 0x05). -### Configuration +**Request**: +```json +{ + "jsonrpc": "2.0", + "method": "eth_sendRawTransaction", + "params": ["0x"], + "id": 1 +} +``` -Configure the sequencer using environment variables: +**Parameters**: +- `params[0]` (string, required): Hex-encoded raw transaction bytes (with or without `0x` prefix) -```bash -# API Server Configuration -export API_HOST=0.0.0.0 # API server host (default: 0.0.0.0) -export API_PORT=6197 # API server port (default: 6197) +**Response (Success)**: +```json +{ + "jsonrpc": "2.0", + "result": "0x", + "id": 1 +} +``` -# L1 Configuration -export L1_RPC_URL=http://localhost:8545 # L1 JSON-RPC endpoint -export L1_CHAIN_ID=1 # L1 chain ID (default: 1) -export SEQUENCER_KEY= # Sequencer private key (hex) +**Response (Error)**: +```json +{ + "jsonrpc": "2.0", + "error": { + "code": -32602, + "message": "Invalid transaction encoding" + }, + "id": 1 +} +``` -# Metrics Configuration -export METRICS_PORT=9090 # Metrics server port (default: 9090) +**Transaction Types Supported**: -# Mempool Configuration -export MEMPOOL_MAX_SIZE=10000 # Maximum mempool size -export MEMPOOL_WAL_PATH=./wal # Write-ahead log path +1. **Legacy Transactions** (Standard Ethereum transactions): + - Validated for signature, nonce, balance, and gas price + - Added to mempool if valid + - Sequenced into blocks by the sequencer + - Returns transaction hash -# Batch Configuration -export BATCH_SIZE_LIMIT=100 # Maximum blocks per batch -export BATCH_INTERVAL_MS=1000 # Batch interval in milliseconds -export BLOCK_GAS_LIMIT=30000000 # Gas limit per block -``` +2. **ExecuteTx Transactions (Type 0x05)**: + - Stateless transactions designed for L1 execution + - Minimally validated (signature check for deduplication) + - Automatically forwarded to L1 geth via `eth_sendRawTransaction` + - Not stored in sequencer's mempool + - Returns transaction hash (from L1 if forwarded, or computed locally) -### Example +**Error Codes**: +- `-32602` (InvalidParams): Missing or invalid transaction data +- `-32000` (ServerError): Transaction validation failed, processing failed, or forwarding failed +**Example (Legacy Transaction)**: ```bash -# Set configuration -export API_PORT=6197 -export L1_RPC_URL=https://eth-mainnet.g.alchemy.com/v2/YOUR_API_KEY -export L1_CHAIN_ID=1 -export SEQUENCER_KEY=0x1234567890abcdef... +curl -X POST http://localhost:6197/ \ + -H "Content-Type: application/json" \ + -d '{ + "jsonrpc": "2.0", + "method": "eth_sendRawTransaction", + "params": ["0xf86c808502540be400825208943535353535353535353535353535353535353535880de0b6b3a76400008025a028ef61340bd939bc2195fe537567866003e1a15d3c71ff63e1590620aa636276a067cbe9d8997f761aecb703304b3800ccf555c9f3dc9e3c0a9f6eccdf15726f5f"], + "id": 1 + }' +``` -# Run sequencer -zig build run +**Example (ExecuteTx Transaction)**: +```bash +curl -X POST http://localhost:6197/ \ + -H "Content-Type: application/json" \ + -d '{ + "jsonrpc": "2.0", + "method": "eth_sendRawTransaction", + "params": ["0x05"], + "id": 1 + }' ``` -## API Endpoints +--- -### JSON-RPC Methods +#### `eth_sendRawTransactionConditional` -The sequencer exposes standard Ethereum JSON-RPC endpoints: +Submit a raw transaction with conditional inclusion criteria (EIP-7796). The transaction will only be included in a block if the specified conditions are met. -#### `eth_sendRawTransaction` +**Request**: +```json +{ + "jsonrpc": "2.0", + "method": "eth_sendRawTransactionConditional", + "params": [ + "0x", + { + "blockNumberMin": "0x42", + "blockNumberMax": "0x100", + "timestampMin": "0x1234567890", + "timestampMax": "0x123456789a" + } + ], + "id": 1 +} +``` -Submit a raw transaction to the sequencer. Supports both legacy transactions and ExecuteTx transactions (type 0x05). +**Parameters**: +- `params[0]` (string, required): Hex-encoded raw transaction bytes (with or without `0x` prefix) +- `params[1]` (object, required): Conditional options object with the following optional fields: + - `blockNumberMin` (string, optional): Minimum block number (hex string, e.g., `"0x42"`) + - `blockNumberMax` (string, optional): Maximum block number (hex string, e.g., `"0x100"`) + - `timestampMin` (string or integer, optional): Minimum block timestamp (hex string or integer) + - `timestampMax` (string or integer, optional): Maximum block timestamp (hex string or integer) -**Request**: +**Response (Success)**: ```json { "jsonrpc": "2.0", - "method": "eth_sendRawTransaction", - "params": ["0x..."], + "result": "0x", "id": 1 } ``` -**Response**: +**Response (Error)**: ```json { "jsonrpc": "2.0", - "result": "0x1234...", + "error": { + "code": -32602, + "message": "Failed to parse conditional options" + }, "id": 1 } ``` -**Transaction Types Supported**: -- **Legacy Transactions**: Standard Ethereum transactions that are validated, stored in mempool, and sequenced into blocks -- **ExecuteTx Transactions (Type 0x05)**: Stateless transactions that are forwarded directly to L1 geth for execution. These transactions include: - - Pre-state hash and witness data for stateless execution - - Withdrawals data - - Blob versioned hashes - - Standard EIP-1559 fields (chainId, nonce, gas, value, etc.) - -**ExecuteTx Handling**: -- ExecuteTx transactions are stateless and designed to be executed by L1 geth -- The sequencer performs minimal validation (signature check for deduplication) -- ExecuteTx transactions are automatically forwarded to L1 geth via `eth_sendRawTransaction` -- Full validation and execution is handled by L1 geth -- ExecuteTx transactions are not stored in the sequencer's mempool +**Conditional Inclusion**: +- The transaction is added to the mempool but will only be included in a block when all specified conditions are satisfied +- Conditions are checked against the current block number and timestamp when building blocks +- If conditions are not met, the transaction remains in the mempool until conditions are satisfied or it expires +- ExecuteTx transactions (type 0x05) do not support conditional submission + +**Error Codes**: +- `-32602` (InvalidParams): Missing or invalid transaction data or options +- `-32000` (ServerError): Transaction validation failed, processing failed, or insertion failed + +**Example**: +```bash +curl -X POST http://localhost:6197/ \ + -H "Content-Type: application/json" \ + -d '{ + "jsonrpc": "2.0", + "method": "eth_sendRawTransactionConditional", + "params": [ + "0xf86c808502540be400825208943535353535353535353535353535353535353535880de0b6b3a76400008025a028ef61340bd939bc2195fe537567866003e1a15d3c71ff63e1590620aa636276a067cbe9d8997f761aecb703304b3800ccf555c9f3dc9e3c0a9f6eccdf15726f5f", + { + "blockNumberMin": "0x100", + "blockNumberMax": "0x200" + } + ], + "id": 1 + }' +``` + +--- #### `eth_getTransactionReceipt` @@ -386,11 +445,51 @@ Get transaction receipt by transaction hash. { "jsonrpc": "2.0", "method": "eth_getTransactionReceipt", - "params": ["0x..."], + "params": ["0x"], + "id": 1 +} +``` + +**Parameters**: +- `params[0]` (string, required): Transaction hash (hex-encoded, with or without `0x` prefix) + +**Response (Success)**: +```json +{ + "jsonrpc": "2.0", + "result": null, + "id": 1 +} +``` + +**Note**: Currently returns `null` as receipt storage is not yet implemented. Future versions will return full receipt data including block number, gas used, logs, etc. + +**Response (Error)**: +```json +{ + "jsonrpc": "2.0", + "error": { + "code": -32602, + "message": "Missing params" + }, "id": 1 } ``` +**Example**: +```bash +curl -X POST http://localhost:6197/ \ + -H "Content-Type: application/json" \ + -d '{ + "jsonrpc": "2.0", + "method": "eth_getTransactionReceipt", + "params": ["0x1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef"], + "id": 1 + }' +``` + +--- + #### `eth_blockNumber` Get the current block number. @@ -405,28 +504,193 @@ Get the current block number. } ``` -### L1 Client Features +**Parameters**: None (empty array) + +**Response (Success)**: +```json +{ + "jsonrpc": "2.0", + "result": "0x0", + "id": 1 +} +``` + +**Note**: Currently returns `0x0` as block number tracking is not yet fully implemented. Future versions will return the actual current block number. + +**Example**: +```bash +curl -X POST http://localhost:6197/ \ + -H "Content-Type: application/json" \ + -d '{ + "jsonrpc": "2.0", + "method": "eth_blockNumber", + "params": [], + "id": 1 + }' +``` + +--- + +#### `debug_generateWitness` (Debug Endpoint) + +Generate witness data for a single transaction. This is a debug/testing endpoint for witness generation. + +**Request**: +```json +{ + "jsonrpc": "2.0", + "method": "debug_generateWitness", + "params": ["0x"], + "id": 1 +} +``` + +**Parameters**: +- `params[0]` (string, required): Hex-encoded raw transaction bytes (with or without `0x` prefix) + +**Response (Success)**: +```json +{ + "jsonrpc": "2.0", + "result": { + "witness": "0x", + "witnessSize": 1234 + }, + "id": 1 +} +``` + +**Response Fields**: +- `witness` (string): Hex-encoded RLP-encoded witness data containing state trie nodes, contract code, and block headers +- `witnessSize` (integer): Size of the witness in bytes + +**Response (Error)**: +```json +{ + "jsonrpc": "2.0", + "error": { + "code": -32000, + "message": "Sequencer not available for witness generation" + }, + "id": 1 +} +``` + +**Error Codes**: +- `-32602` (InvalidParams): Missing or invalid transaction data +- `-32000` (ServerError): Sequencer not available, transaction execution failed, or witness generation failed + +**Note**: This endpoint executes the transaction locally to track state access. In production, witness generation happens during ExecuteTx building. -The sequencer includes a full-featured HTTP client for L1 communication: +**Example**: +```bash +curl -X POST http://localhost:6197/ \ + -H "Content-Type: application/json" \ + -d '{ + "jsonrpc": "2.0", + "method": "debug_generateWitness", + "params": ["0xf86c808502540be400825208943535353535353535353535353535353535353535880de0b6b3a76400008025a028ef61340bd939bc2195fe537567866003e1a15d3c71ff63e1590620aa636276a067cbe9d8997f761aecb703304b3800ccf555c9f3dc9e3c0a9f6eccdf15726f5f"], + "id": 1 + }' +``` -- **Standard Transaction Submission**: `eth_sendRawTransaction` for submitting batches to L1 -- **Conditional Transaction Submission**: `eth_sendRawTransactionConditional` (EIP-7796) for conditional batch submission with block number constraints -- **Transaction Receipt Polling**: `eth_getTransactionReceipt` for tracking batch inclusion -- **Block Number Queries**: `eth_blockNumber` for L1 state synchronization -- **Automatic Confirmation Waiting**: `waitForInclusion()` method for polling transaction confirmations +--- -#### Conditional Transaction Submission +#### `debug_generateBlockWitness` (Debug Endpoint) -The sequencer supports EIP-7796 conditional transaction submission, allowing batches to be submitted with preconditions: +Generate witness data for a block. This is a debug/testing endpoint for block witness generation. -```zig -const options = l1.Client.ConditionalOptions{ - .block_number_max = 1000000, // Only include if block <= 1000000 -}; -const tx_hash = try l1_client.submitBatchConditional(batch, options); +**Request**: +```json +{ + "jsonrpc": "2.0", + "method": "debug_generateBlockWitness", + "params": ["latest"] +} ``` -This feature enables more efficient batch submission by allowing the sequencer to specify maximum block numbers for inclusion, reducing the need for extensive simulations. +or + +```json +{ + "jsonrpc": "2.0", + "method": "debug_generateBlockWitness", + "params": ["0x42"] +} +``` + +**Parameters**: +- `params[0]` (string or integer, required): Block number as hex string (`"0x42"`), decimal integer (`42`), or `"latest"` for the latest block + +**Response (Success)**: +```json +{ + "jsonrpc": "2.0", + "result": { + "witness": "0x", + "witnessSize": 5678, + "blockNumber": 42, + "transactionCount": 5 + }, + "id": 1 +} +``` + +**Response Fields**: +- `witness` (string): Hex-encoded RLP-encoded witness data containing state trie nodes, contract code, and block headers for all transactions in the block +- `witnessSize` (integer): Size of the witness in bytes +- `blockNumber` (integer): Block number for which witness was generated +- `transactionCount` (integer): Number of transactions in the block + +**Response (Error)**: +```json +{ + "jsonrpc": "2.0", + "error": { + "code": -32000, + "message": "Failed to build block" + }, + "id": 1 +} +``` + +**Error Codes**: +- `-32602` (InvalidParams): Missing or invalid block number +- `-32000` (ServerError): Sequencer not available or failed to build block + +**Note**: This endpoint builds a new block from the mempool and generates witness for it. In production, witness generation happens during ExecuteTx batch building. + +**Example**: +```bash +curl -X POST http://localhost:6197/ \ + -H "Content-Type: application/json" \ + -d '{ + "jsonrpc": "2.0", + "method": "debug_generateBlockWitness", + "params": ["latest"], + "id": 1 + }' +``` + +--- + +### JSON-RPC Error Codes + +The sequencer uses standard JSON-RPC 2.0 error codes: + +| Code | Name | Description | +|------|------|-------------| +| `-32700` | ParseError | Invalid JSON was received | +| `-32600` | InvalidRequest | The JSON sent is not a valid Request object | +| `-32601` | MethodNotFound | The method does not exist | +| `-32602` | InvalidParams | Invalid method parameters | +| `-32603` | InternalError | Internal JSON-RPC error | +| `-32000` | ServerError | Server error (validation failures, processing errors) | + +### HTTP Status Codes + +- `200 OK`: Request processed successfully (even if JSON-RPC returns an error) +- `404 Not Found`: Invalid HTTP method or path (must be POST `/`) ### Metrics @@ -441,26 +705,13 @@ Available metrics: ## Development Status -This is an experimental implementation. The following features are implemented or in progress: - -- ✅ Core sequencer architecture +This is experimental software. Core features are implemented: +- ✅ op-node style architecture (L1 derivation, Engine API, safe/unsafe blocks) - ✅ Transaction validation and mempool -- ✅ Batch formation and L1 submission -- ✅ Basic state management -- ✅ RLP encoding/decoding (complete implementation with tests) -- ✅ Docker support -- ✅ HTTP server implementation (Zig 0.14.1 networking APIs) -- ✅ HTTP client for L1 communication (JSON-RPC support) -- ✅ Conditional transaction submission (EIP-7796 support) -- ✅ ExecuteTx transaction type support (type 0x05) -- ✅ ExecuteTx JSON serialization/deserialization -- ✅ ExecuteTx forwarding to L1 geth -- ⏳ Complete ECDSA signature verification and recovery (basic implementation) -- ⏳ Full transaction execution engine -- ✅ LMDB integration for persistence -- ⏳ WebSocket/gRPC support for real-time subscriptions -- ⏳ Complete MEV bundle detection -- ⏳ Proper error handling and retry logic +- ✅ Batch formation and L1 submission via ExecuteTx +- ✅ LMDB persistence +- ✅ Witness generation for stateless execution +- ⏳ L1 subscription monitoring (WebSocket support) - ⏳ Comprehensive testing ## Linting @@ -489,63 +740,19 @@ zig build lint-fix ### CI/CD Integration -A comprehensive GitHub Actions workflow (`.github/workflows/ci.yml`) automatically runs on: -- Push to main/master/develop branches -- Pull requests targeting main/master/develop branches - -The CI pipeline includes: - -#### Linting & Testing -- **Code formatting validation** (`zig fmt --check`) -- **AST syntax checks** for key modules (`zig ast-check`) -- **Unit tests** (`zig build test`) - -#### Multi-Platform Builds -- **Linux (x86_64)**: Builds and verifies binary for Linux -- **macOS (x86_64)**: Builds and verifies binary for Intel Macs -- **macOS (ARM64)**: Builds and verifies binary for Apple Silicon -- **Windows (x86_64)**: Builds and verifies binary for Windows - -#### Docker Build Validation -- **Multi-architecture Docker builds**: Tests Docker image builds for `linux/amd64` (ARM64 builds are currently disabled in CI) -- **Image verification**: Validates Docker image structure and metadata -- **Runtime testing**: Verifies that the Docker image can start and contains the expected binary - -The workflow will fail if: -- Code is not properly formatted -- AST checks reveal syntax or type errors -- Unit tests fail -- Build fails on any platform -- Docker image build or validation fails +GitHub Actions workflow (`.github/workflows/ci.yml`) runs linting, testing, and multi-platform builds (Linux, macOS, Windows) on push/PR. ## Technical Details -### Networking Implementation - -The sequencer uses Zig 0.14.1's standard library networking APIs: - -- **HTTP Server**: Built on `std.net.Server` and `std.net.Stream` for accepting JSON-RPC connections -- **HTTP Client**: Uses `std.net.tcpConnectToAddress` for L1 RPC communication -- **Connection Handling**: Thread-based concurrent request handling with proper resource cleanup -- **RLP Transaction Parsing**: Full RLP decoding support for transaction deserialization - ### ExecuteTx Transaction Support -The sequencer supports ExecuteTx transactions (type 0x05), a stateless transaction type designed for execution by L1 geth nodes. Key features: - -- **Transaction Type**: EIP-2718 typed transaction with type prefix `0x05` -- **RLP Encoding/Decoding**: Full RLP serialization support matching go-ethereum's ExecuteTx format -- **JSON Serialization**: Complete JSON-RPC serialization/deserialization for ExecuteTx fields -- **Signature Recovery**: ECDSA signature verification and sender address recovery -- **L1 Forwarding**: Automatic forwarding to L1 geth via `eth_sendRawTransaction` -- **Minimal Validation**: Only signature check for deduplication (full validation done by L1 geth) - -ExecuteTx transactions include: -- Standard EIP-1559 fields (chainId, nonce, gas, gasTipCap, gasFeeCap, value, to, data) -- ExecuteTx-specific fields (preStateHash, witness, withdrawals, coinbase, blockNumber, timestamp, blobHashes) -- Signature components (v, r, s) +The sequencer supports ExecuteTx transactions (type 0x05) for stateless execution on L1. ExecuteTx transactions are: +- **Stateless**: Designed for execution by L1 geth nodes +- **Forwarded to L1**: Automatically forwarded to L1 geth via `eth_sendRawTransaction` +- **Minimally Validated**: Only signature check for deduplication (full validation by L1 geth) +- **Not Mempooled**: Not stored in sequencer's mempool -See `src/core/transaction_execute.zig` for the complete implementation. +ExecuteTx includes pre-state hash, witness data, withdrawals, and standard EIP-1559 fields. See `src/core/transaction_execute.zig` for implementation details. ## Known Issues & Workarounds diff --git a/src/api/server.zig b/src/api/server.zig index 62b6343..7ac2ed5 100644 --- a/src/api/server.zig +++ b/src/api/server.zig @@ -111,6 +111,8 @@ pub const JsonRpcServer = struct { // Handle method if (std.mem.eql(u8, request.method, "eth_sendRawTransaction")) { return try self.handleSendRawTransaction(&request); + } else if (std.mem.eql(u8, request.method, "eth_sendRawTransactionConditional")) { + return try self.handleSendRawTransactionConditional(&request); } else if (std.mem.eql(u8, request.method, "eth_getTransactionReceipt")) { return try self.handleGetTransactionReceipt(&request); } else if (std.mem.eql(u8, request.method, "eth_blockNumber")) { @@ -267,6 +269,136 @@ pub const JsonRpcServer = struct { } } + fn handleSendRawTransactionConditional(self: *JsonRpcServer, request: *const jsonrpc.JsonRpcRequest) ![]u8 { + self.metrics.incrementTransactionsReceived(); + + // Parse params + const params = request.params orelse { + return try jsonrpc.JsonRpcResponse.errorResponse(self.allocator, request.id, jsonrpc.ErrorCode.InvalidParams, "Missing params"); + }; + + const params_array = switch (params) { + .array => |arr| arr, + else => { + return try jsonrpc.JsonRpcResponse.errorResponse(self.allocator, request.id, jsonrpc.ErrorCode.InvalidParams, "Invalid params - expected array"); + }, + }; + + if (params_array.items.len < 2) { + return try jsonrpc.JsonRpcResponse.errorResponse(self.allocator, request.id, jsonrpc.ErrorCode.InvalidParams, "Missing transaction data or options"); + } + + // Parse transaction hex + const first_param = params_array.items[0]; + const tx_hex = switch (first_param) { + .string => |s| s, + else => { + return try jsonrpc.JsonRpcResponse.errorResponse(self.allocator, request.id, jsonrpc.ErrorCode.InvalidParams, "Invalid transaction format"); + }, + }; + + // Parse conditional options + const second_param = params_array.items[1]; + const options_json = switch (second_param) { + .object => |obj| std.json.Value{ .object = obj }, + else => { + return try jsonrpc.JsonRpcResponse.errorResponse(self.allocator, request.id, jsonrpc.ErrorCode.InvalidParams, "Invalid options format - expected object"); + }, + }; + + // Parse conditional options + const conditional_options = core.conditional_tx.ConditionalOptions.fromJson(self.allocator, options_json) catch { + return try jsonrpc.JsonRpcResponse.errorResponse(self.allocator, request.id, jsonrpc.ErrorCode.InvalidParams, "Failed to parse conditional options"); + }; + + // Decode hex string (remove 0x prefix if present) + const hex_start: usize = if (std.mem.startsWith(u8, tx_hex, "0x")) 2 else 0; + const hex_data = tx_hex[hex_start..]; + + var tx_bytes = std.ArrayList(u8).init(self.allocator); + defer tx_bytes.deinit(); + + var i: usize = 0; + while (i < hex_data.len) : (i += 2) { + if (i + 1 >= hex_data.len) break; + const byte = try std.fmt.parseInt(u8, hex_data[i .. i + 2], 16); + try tx_bytes.append(byte); + } + + const tx_bytes_slice = try tx_bytes.toOwnedSlice(); + defer self.allocator.free(tx_bytes_slice); + + // Check transaction type (EIP-2718) + if (tx_bytes_slice.len > 0 and tx_bytes_slice[0] == core.transaction.ExecuteTxType) { + // ExecuteTx transactions don't support conditional submission + return try jsonrpc.JsonRpcResponse.errorResponse(self.allocator, request.id, jsonrpc.ErrorCode.InvalidParams, "ExecuteTx transactions do not support conditional submission"); + } + + // Parse legacy transaction + const tx = core.transaction.Transaction.fromRaw(self.allocator, tx_bytes_slice) catch { + return try jsonrpc.JsonRpcResponse.errorResponse(self.allocator, request.id, jsonrpc.ErrorCode.InvalidParams, "Invalid transaction encoding"); + }; + defer self.allocator.free(tx.data); + + // Validate transaction first + const result = self.ingress_handler.acceptTransaction(tx) catch { + self.metrics.incrementTransactionsRejected(); + return try jsonrpc.JsonRpcResponse.errorResponse(self.allocator, request.id, jsonrpc.ErrorCode.ServerError, "Transaction processing failed"); + }; + + if (result != .valid) { + self.metrics.incrementTransactionsRejected(); + return try jsonrpc.JsonRpcResponse.errorResponse(self.allocator, request.id, jsonrpc.ErrorCode.ServerError, "Transaction validation failed"); + } + + // Clone transaction data since acceptTransaction may have consumed it + const tx_data_clone = try self.allocator.dupe(u8, tx.data); + const tx_clone = core.transaction.Transaction{ + .nonce = tx.nonce, + .gas_price = tx.gas_price, + .gas_limit = tx.gas_limit, + .to = tx.to, + .value = tx.value, + .data = tx_data_clone, + .v = tx.v, + .r = tx.r, + .s = tx.s, + }; + + // Insert transaction with conditional options into mempool + const inserted = self.ingress_handler.mempool.insertWithConditions(tx_clone, conditional_options) catch { + self.allocator.free(tx_data_clone); + self.metrics.incrementTransactionsRejected(); + return try jsonrpc.JsonRpcResponse.errorResponse(self.allocator, request.id, jsonrpc.ErrorCode.ServerError, "Failed to insert conditional transaction"); + }; + + if (!inserted) { + self.allocator.free(tx_data_clone); + self.metrics.incrementTransactionsRejected(); + return try jsonrpc.JsonRpcResponse.errorResponse(self.allocator, request.id, jsonrpc.ErrorCode.ServerError, "Transaction already in mempool"); + } + + self.metrics.incrementTransactionsAccepted(); + + // Return transaction hash + const tx_hash = try tx.hash(self.allocator); + const hash_bytes = core.types.hashToBytes(tx_hash); + var hex_buf: [66]u8 = undefined; // 0x + 64 hex chars + hex_buf[0] = '0'; + hex_buf[1] = 'x'; + var j: usize = 0; + while (j < 32) : (j += 1) { + const hex_digits = "0123456789abcdef"; + hex_buf[2 + j * 2] = hex_digits[hash_bytes[j] >> 4]; + hex_buf[2 + j * 2 + 1] = hex_digits[hash_bytes[j] & 0xf]; + } + const hash_hex = try std.fmt.allocPrint(self.allocator, "{s}", .{&hex_buf}); + defer self.allocator.free(hash_hex); + + const result_value = std.json.Value{ .string = hash_hex }; + return try jsonrpc.JsonRpcResponse.success(self.allocator, request.id, result_value); + } + fn handleGetTransactionReceipt(self: *JsonRpcServer, request: *const jsonrpc.JsonRpcRequest) ![]u8 { // In production, fetch receipt from state manager const result_value = std.json.Value{ .null = {} }; @@ -343,8 +475,10 @@ pub const JsonRpcServer = struct { var witness_builder = core.witness_builder.WitnessBuilder.init(self.allocator); defer witness_builder.deinit(); - // Create execution engine with witness builder - var exec_engine = sequencer.execution_engine; + // Create execution engine with witness builder for witness generation + // Note: In op-node architecture, execution is delegated to L2 geth, + // but we still need local execution for witness generation (debug endpoint) + var exec_engine = @import("../sequencer/execution.zig").ExecutionEngine.init(self.allocator, sequencer.state_manager); exec_engine.witness_builder = &witness_builder; // Execute transaction (this will track state access) @@ -445,8 +579,14 @@ pub const JsonRpcServer = struct { var witness_builder = core.witness_builder.WitnessBuilder.init(self.allocator); defer witness_builder.deinit(); + // Create execution engine for witness generation + // Note: In op-node architecture, execution is delegated to L2 geth, + // but we still need local execution for witness generation (debug endpoint) + var exec_engine = @import("../sequencer/execution.zig").ExecutionEngine.init(self.allocator, sequencer.state_manager); + exec_engine.witness_builder = &witness_builder; + // Generate witness for the block - try witness_builder.generateBlockWitness(&block, &sequencer.execution_engine); + try witness_builder.generateBlockWitness(&block, &exec_engine); // Build witness _ = try witness_builder.buildWitness(sequencer.state_manager, null); diff --git a/src/core/conditional_tx.zig b/src/core/conditional_tx.zig new file mode 100644 index 0000000..de69652 --- /dev/null +++ b/src/core/conditional_tx.zig @@ -0,0 +1,120 @@ +// Conditional transaction options (EIP-7796) +// Supports conditional transaction submission with block number and timestamp constraints + +const std = @import("std"); +const types = @import("types.zig"); + +/// Conditional options for transaction submission +pub const ConditionalOptions = struct { + block_number_min: ?u64 = null, + block_number_max: ?u64 = null, + timestamp_min: ?u64 = null, + timestamp_max: ?u64 = null, + // known_accounts: ?std.json.Value = null, // Future: support account state checks + + pub fn deinit(self: *ConditionalOptions) void { + _ = self; + // No cleanup needed for now + } + + /// Check if conditions are satisfied given current block state + pub fn checkConditions(self: *const ConditionalOptions, current_block_number: u64, current_timestamp: u64) bool { + // Check block number constraints + if (self.block_number_min) |min| { + if (current_block_number < min) { + return false; + } + } + if (self.block_number_max) |max| { + if (current_block_number > max) { + return false; + } + } + + // Check timestamp constraints + if (self.timestamp_min) |min| { + if (current_timestamp < min) { + return false; + } + } + if (self.timestamp_max) |max| { + if (current_timestamp > max) { + return false; + } + } + + return true; + } + + /// Parse conditional options from JSON-RPC params + pub fn fromJson(allocator: std.mem.Allocator, options_json: std.json.Value) !ConditionalOptions { + _ = allocator; + var options = ConditionalOptions{}; + + const options_obj = switch (options_json) { + .object => |obj| obj, + else => return error.InvalidOptionsFormat, + }; + + // Parse blockNumberMin + if (options_obj.get("blockNumberMin")) |value| { + const block_num_str = switch (value) { + .string => |s| s, + else => return error.InvalidBlockNumberFormat, + }; + const hex_start: usize = if (std.mem.startsWith(u8, block_num_str, "0x")) 2 else 0; + options.block_number_min = try std.fmt.parseInt(u64, block_num_str[hex_start..], 16); + } + + // Parse blockNumberMax + if (options_obj.get("blockNumberMax")) |value| { + const block_num_str = switch (value) { + .string => |s| s, + else => return error.InvalidBlockNumberFormat, + }; + const hex_start: usize = if (std.mem.startsWith(u8, block_num_str, "0x")) 2 else 0; + options.block_number_max = try std.fmt.parseInt(u64, block_num_str[hex_start..], 16); + } + + // Parse timestampMin + if (options_obj.get("timestampMin")) |value| { + const timestamp_val = switch (value) { + .string => |s| blk: { + const hex_start: usize = if (std.mem.startsWith(u8, s, "0x")) 2 else 0; + break :blk try std.fmt.parseInt(u64, s[hex_start..], 16); + }, + .integer => |i| @as(u64, @intCast(i)), + else => return error.InvalidTimestampFormat, + }; + options.timestamp_min = timestamp_val; + } + + // Parse timestampMax + if (options_obj.get("timestampMax")) |value| { + const timestamp_val = switch (value) { + .string => |s| blk: { + const hex_start: usize = if (std.mem.startsWith(u8, s, "0x")) 2 else 0; + break :blk try std.fmt.parseInt(u64, s[hex_start..], 16); + }, + .integer => |i| @as(u64, @intCast(i)), + else => return error.InvalidTimestampFormat, + }; + options.timestamp_max = timestamp_val; + } + + return options; + } +}; + +/// Conditional transaction entry (transaction + conditions) +pub const ConditionalTx = struct { + tx: transaction.Transaction, + conditions: ConditionalOptions, + + pub fn deinit(self: *ConditionalTx, allocator: std.mem.Allocator) void { + self.tx.deinit(allocator); + self.conditions.deinit(); + } +}; + +const transaction = @import("transaction.zig"); diff --git a/src/core/root.zig b/src/core/root.zig index 53fdc60..57cd81c 100644 --- a/src/core/root.zig +++ b/src/core/root.zig @@ -12,4 +12,4 @@ pub const rlp = @import("rlp.zig"); pub const witness = @import("witness.zig"); pub const witness_builder = @import("witness_builder.zig"); pub const trie = @import("trie.zig"); -pub const storage_trie = @import("storage_trie.zig"); +pub const conditional_tx = @import("conditional_tx.zig"); diff --git a/src/core/storage_trie.zig b/src/core/storage_trie.zig deleted file mode 100644 index 4699866..0000000 --- a/src/core/storage_trie.zig +++ /dev/null @@ -1,179 +0,0 @@ -// Storage trie support for contract storage slots -// Each contract has its own storage trie for tracking storage values - -const std = @import("std"); -const types = @import("types.zig"); -const trie_module = @import("trie.zig"); -const crypto_hash = @import("../crypto/hash.zig"); - -pub const StorageTrie = struct { - allocator: std.mem.Allocator, - contract_address: types.Address, - trie: trie_module.MerklePatriciaTrie, - /// Track accessed storage slots for witness generation - accessed_slots: std.ArrayList(u256), - - const Self = @This(); - - pub fn init(allocator: std.mem.Allocator, contract_address: types.Address) Self { - return .{ - .allocator = allocator, - .contract_address = contract_address, - .trie = trie_module.MerklePatriciaTrie.init(allocator), - .accessed_slots = std.ArrayList(u256).init(allocator), - }; - } - - pub fn deinit(self: *Self) void { - self.trie.deinit(); - self.accessed_slots.deinit(); - } - - /// Put storage value at slot - pub fn put(self: *Self, slot: u256, value: u256) !void { - // Track slot access - try self.trackSlotAccess(slot); - - // Convert slot to bytes - const slot_bytes = types.u256ToBytes(slot); - const value_bytes = types.u256ToBytes(value); - - // Store in trie - try self.trie.put(&slot_bytes, &value_bytes); - } - - /// Get storage value at slot - pub fn get(self: *Self, slot: u256) !?u256 { - // Track slot access - try self.trackSlotAccess(slot); - - // Convert slot to bytes - const slot_bytes = types.u256ToBytes(slot); - - // Get from trie - const value_bytes = self.trie.get(&slot_bytes) catch return null; - if (value_bytes == null) { - return null; - } - - // Convert bytes to u256 - if (value_bytes.?.len != 32) { - return error.InvalidStorageValue; - } - - var value_bytes_array: [32]u8 = undefined; - @memcpy(&value_bytes_array, value_bytes.?[0..32]); - - return types.u256FromBytes(value_bytes_array); - } - - /// Compute storage root hash - pub fn rootHash(self: *Self) !types.Hash { - return try self.trie.rootHash(); - } - - /// Generate storage trie nodes for witness - pub fn generateWitnessNodes(self: *Self, slots: []const u256) !std.ArrayList(*trie_module.Node) { - var result = std.ArrayList(*trie_module.Node).init(self.allocator); - - for (slots) |slot| { - const slot_bytes = types.u256ToBytes(slot); - const nodes = try self.trie.generateWitnessNodes(&slot_bytes); - defer nodes.deinit(); - - for (nodes.items) |node| { - try result.append(node); - } - } - - return result; - } - - /// Handle storage slot access tracking - pub fn trackSlotAccess(self: *Self, slot: u256) !void { - // Avoid duplicates - for (self.accessed_slots.items) |accessed_slot| { - if (accessed_slot == slot) { - return; - } - } - try self.accessed_slots.append(slot); - } - - /// Get all accessed storage slots - pub fn getAccessedSlots(self: *const Self) []const u256 { - return self.accessed_slots.items; - } - - /// Clear accessed slots tracking - pub fn clearAccessedSlots(self: *Self) void { - self.accessed_slots.clearAndFree(); - } -}; - -/// Storage trie manager for multiple contracts -pub const StorageTrieManager = struct { - allocator: std.mem.Allocator, - /// Map contract address to its storage trie - storage_tries: std.HashMap(types.Address, *StorageTrie, std.hash_map.AutoContext(types.Address), std.hash_map.default_max_load_percentage), - - const Self = @This(); - - pub fn init(allocator: std.mem.Allocator) Self { - return .{ - .allocator = allocator, - .storage_tries = std.HashMap(types.Address, *StorageTrie, std.hash_map.AutoContext(types.Address), std.hash_map.default_max_load_percentage).init(allocator), - }; - } - - pub fn deinit(self: *Self) void { - var iter = self.storage_tries.iterator(); - while (iter.next()) |entry| { - entry.value_ptr.*.deinit(); - self.allocator.destroy(entry.value_ptr.*); - } - self.storage_tries.deinit(); - } - - /// Get or create storage trie for contract - pub fn getStorageTrie(self: *Self, contract_address: types.Address) !*StorageTrie { - if (self.storage_tries.get(contract_address)) |trie| { - return trie; - } - - // Create new storage trie - const trie = try self.allocator.create(StorageTrie); - trie.* = StorageTrie.init(self.allocator, contract_address); - try self.storage_tries.put(contract_address, trie); - - return trie; - } - - /// Put storage value for contract - pub fn put(self: *Self, contract_address: types.Address, slot: u256, value: u256) !void { - const trie = try self.getStorageTrie(contract_address); - try trie.put(slot, value); - } - - /// Get storage value for contract - pub fn get(self: *Self, contract_address: types.Address, slot: u256) !?u256 { - const trie = self.storage_tries.get(contract_address) orelse return null; - return try trie.get(slot); - } - - /// Get storage root for contract - pub fn getStorageRoot(self: *Self, contract_address: types.Address) !?types.Hash { - const trie = self.storage_tries.get(contract_address) orelse return null; - return try trie.rootHash(); - } - - /// Generate witness nodes for all accessed storage slots - pub fn generateWitnessNodes(self: *Self, contract_address: types.Address) !std.ArrayList(*trie_module.Node) { - const trie = self.storage_tries.get(contract_address) orelse { - return std.ArrayList(*trie_module.Node).init(self.allocator); - }; - - const accessed_slots = trie.getAccessedSlots(); - return try trie.generateWitnessNodes(accessed_slots); - } -}; diff --git a/src/l1/batch_parser.zig b/src/l1/batch_parser.zig new file mode 100644 index 0000000..aed2857 --- /dev/null +++ b/src/l1/batch_parser.zig @@ -0,0 +1,88 @@ +// L1 Batch Parser +// Parses L2 batch data from L1 transaction calldata (op-node style) + +const std = @import("std"); +const core = @import("../core/root.zig"); +const types = @import("../core/types.zig"); +const rlp = @import("../core/rlp.zig"); + +pub const BatchData = struct { + l2_transactions: []core.transaction.Transaction, + allocator: std.mem.Allocator, + + pub fn deinit(self: *BatchData) void { + for (self.l2_transactions) |*tx| { + self.allocator.free(tx.data); + } + self.allocator.free(self.l2_transactions); + } +}; + +pub const BatchParser = struct { + allocator: std.mem.Allocator, + batch_inbox_address: ?types.Address, + + const Self = @This(); + + pub fn init(allocator: std.mem.Allocator, batch_inbox_address: ?types.Address) Self { + return .{ + .allocator = allocator, + .batch_inbox_address = batch_inbox_address, + }; + } + + /// Parse batch from L1 transaction calldata + /// In Optimism, batches are submitted to a batch inbox contract + /// The calldata contains RLP-encoded L2 transactions + pub fn parseBatchFromL1Tx(self: *Self, l1_tx_calldata: []const u8) !?BatchData { + // TODO: Implement full batch parsing + // For now, return empty batch + // In production, would: + // 1. Decode RLP batch structure + // 2. Extract L2 transactions from batch + // 3. Validate batch structure + // 4. Return parsed batch data + + _ = l1_tx_calldata; // Unused for now + + // Return empty batch for now + return BatchData{ + .l2_transactions = try self.allocator.alloc(core.transaction.Transaction, 0), + .allocator = self.allocator, + }; + } + + /// Extract L2 transactions from batch data + pub fn extractL2Transactions(self: *Self, batch: BatchData) ![]core.transaction.Transaction { + // Clone transactions + const txs = try self.allocator.alloc(core.transaction.Transaction, batch.l2_transactions.len); + for (batch.l2_transactions, 0..) |tx, i| { + const data_copy = try self.allocator.dupe(u8, tx.data); + txs[i] = core.transaction.Transaction{ + .nonce = tx.nonce, + .gas_price = tx.gas_price, + .gas_limit = tx.gas_limit, + .to = tx.to, + .value = tx.value, + .data = data_copy, + .v = tx.v, + .r = tx.r, + .s = tx.s, + }; + } + return txs; + } + + /// Validate batch structure + pub fn validateBatch(self: *Self, batch: BatchData) bool { + _ = self; + // Basic validation: check that transactions are valid + for (batch.l2_transactions) |tx| { + // Validate transaction fields + if (tx.gas_limit == 0) { + return false; + } + } + return true; + } +}; diff --git a/src/l1/client.zig b/src/l1/client.zig index a4416a5..e071a87 100644 --- a/src/l1/client.zig +++ b/src/l1/client.zig @@ -322,6 +322,94 @@ pub const Client = struct { return try std.fmt.parseInt(u64, hex_str[hex_start..], 16); } + pub const L1BlockTx = struct { + to: ?[]const u8, + data: []const u8, + }; + + pub const L1Block = struct { + number: u64, + hash: core.types.Hash, + parent_hash: core.types.Hash, + timestamp: u64, + transactions: []L1BlockTx, + }; + + /// Get L1 block by number (for batch parsing) + pub fn getBlockByNumber(self: *Client, block_number: u64, include_txs: bool) !L1Block { + const block_hex = try std.fmt.allocPrint(self.allocator, "0x{x}", .{block_number}); + defer self.allocator.free(block_hex); + + var params = std.json.Array.init(self.allocator); + defer params.deinit(); + try params.append(std.json.Value{ .string = block_hex }); + try params.append(std.json.Value{ .bool = include_txs }); + + const result = try self.callRpc("eth_getBlockByNumber", std.json.Value{ .array = params }); + defer self.allocator.free(result); + + // Parse result + const parsed = try std.json.parseFromSliceLeaky( + struct { + result: struct { + number: []const u8, + hash: []const u8, + parentHash: []const u8, + timestamp: []const u8, + transactions: []struct { to: ?[]const u8, input: []const u8 }, + }, + }, + self.allocator, + result, + .{}, + ); + + const hex_start: usize = if (std.mem.startsWith(u8, parsed.result.number, "0x")) 2 else 0; + const number = try std.fmt.parseInt(u64, parsed.result.number[hex_start..], 16); + + const hash = try self.hexToHash(parsed.result.hash); + const parent_hash = try self.hexToHash(parsed.result.parentHash); + const timestamp_hex_start: usize = if (std.mem.startsWith(u8, parsed.result.timestamp, "0x")) 2 else 0; + const timestamp = try std.fmt.parseInt(u64, parsed.result.timestamp[timestamp_hex_start..], 16); + + // Clone transactions + const txs = try self.allocator.alloc(L1BlockTx, parsed.result.transactions.len); + for (parsed.result.transactions, 0..) |tx, i| { + txs[i] = L1BlockTx{ + .to = if (tx.to) |to| try self.allocator.dupe(u8, to) else null, + .data = try self.allocator.dupe(u8, tx.input), + }; + } + + return L1Block{ + .number = number, + .hash = hash, + .parent_hash = parent_hash, + .timestamp = timestamp, + .transactions = txs, + }; + } + + fn hexToHash(self: *Client, hex: []const u8) !core.types.Hash { + _ = self; + const hex_start: usize = if (std.mem.startsWith(u8, hex, "0x")) 2 else 0; + const hex_data = hex[hex_start..]; + + if (hex_data.len != 64) { + return error.InvalidHashLength; + } + + var result: [32]u8 = undefined; + var i: usize = 0; + while (i < 32) : (i += 1) { + const high = try std.fmt.parseInt(u8, hex_data[i * 2 .. i * 2 + 1], 16); + const low = try std.fmt.parseInt(u8, hex_data[i * 2 + 1 .. i * 2 + 2], 16); + result[i] = (high << 4) | low; + } + + return core.types.hashFromBytes(result); + } + fn getTransactionReceipt(self: *Client, tx_hash: core.types.Hash) !?struct { block_number: u64 } { const hash_bytes = core.types.hashToBytes(tx_hash); const hash_hex = try self.bytesToHex(&hash_bytes); diff --git a/src/l1/derivation.zig b/src/l1/derivation.zig new file mode 100644 index 0000000..0249fb4 --- /dev/null +++ b/src/l1/derivation.zig @@ -0,0 +1,207 @@ +// L1 Derivation Pipeline +// Derives L2 blocks from L1 blocks and receipts (op-node style) + +const std = @import("std"); +const core = @import("../core/root.zig"); +const types = @import("../core/types.zig"); +const client = @import("client.zig"); +const batch_parser = @import("batch_parser.zig"); + +pub const DerivedL2Data = struct { + block_number: u64, + transactions: []core.transaction.Transaction, + timestamp: u64, + l1_block_number: u64, + l1_block_hash: types.Hash, +}; + +pub const L1Derivation = struct { + allocator: std.mem.Allocator, + l1_client: *client.Client, + batch_parser: batch_parser.BatchParser, + current_l1_block: u64 = 0, + safe_l2_block: u64 = 0, + last_derived_l1_block: u64 = 0, + + const Self = @This(); + + pub fn init(allocator: std.mem.Allocator, l1_cli: *client.Client) Self { + return .{ + .allocator = allocator, + .l1_client = l1_cli, + .batch_parser = batch_parser.BatchParser.init(allocator, null), // TODO: Set batch inbox address + .current_l1_block = 0, + .safe_l2_block = 0, + .last_derived_l1_block = 0, + }; + } + + pub fn deinit(self: *Self) void { + _ = self; + // No cleanup needed + } + + /// Get current L1 block number + pub fn getCurrentL1Block(self: *Self) !u64 { + // Use L1 client's getLatestBlockNumber method + const block_number = try self.l1_client.getLatestBlockNumber(); + self.current_l1_block = block_number; + return block_number; + } + + /// Derive L2 data from L1 block + /// Parses batches from L1 transactions and extracts L2 transactions + pub fn deriveL2FromL1(self: *Self, l1_block_number: u64) !?DerivedL2Data { + // Skip if already derived this block + if (l1_block_number <= self.last_derived_l1_block) { + return null; + } + + // Get L1 block with transactions + const l1_block = self.l1_client.getBlockByNumber(l1_block_number, true) catch |err| { + std.log.warn("[L1Derivation] Failed to get L1 block #{d}: {any}, using simplified derivation", .{ l1_block_number, err }); + // Fallback to simplified derivation + return self.deriveL2FromL1Simplified(l1_block_number); + }; + defer { + // Free transaction data + for (l1_block.transactions) |tx| { + if (tx.to) |to| self.allocator.free(to); + self.allocator.free(tx.data); + } + self.allocator.free(l1_block.transactions); + } + + // Derive L2 block number (1:1 with L1 for now, can be adjusted) + const l2_block_number = l1_block_number; + + // Parse batches from L1 transactions + var all_l2_txs = std.ArrayList(core.transaction.Transaction).init(self.allocator); + defer { + for (all_l2_txs.items) |*tx| { + self.allocator.free(tx.data); + } + all_l2_txs.deinit(); + } + + // Filter transactions to batch inbox address (if configured) + // For now, parse all transactions as potential batches + for (l1_block.transactions) |l1_tx| { + // Parse batch from transaction calldata + const batch_data_opt = self.batch_parser.parseBatchFromL1Tx(l1_tx.data) catch |err| { + // Failed to parse batch, skip this transaction + std.log.debug("[L1Derivation] Failed to parse batch from L1 tx: {any}", .{err}); + continue; + }; + + if (batch_data_opt) |bd| { + var batch_data = bd; // Make mutable copy + defer batch_data.deinit(); + + // Extract L2 transactions from batch + const l2_txs = self.batch_parser.extractL2Transactions(batch_data) catch continue; + defer { + for (l2_txs) |*tx| { + self.allocator.free(tx.data); + } + self.allocator.free(l2_txs); + } + + // Add to aggregated list + for (l2_txs) |tx| { + const data_copy = try self.allocator.dupe(u8, tx.data); + try all_l2_txs.append(core.transaction.Transaction{ + .nonce = tx.nonce, + .gas_price = tx.gas_price, + .gas_limit = tx.gas_limit, + .to = tx.to, + .value = tx.value, + .data = data_copy, + .v = tx.v, + .r = tx.r, + .s = tx.s, + }); + } + } else { + // No batch data, skip this transaction + continue; + } + } + + self.last_derived_l1_block = l1_block_number; + + return DerivedL2Data{ + .block_number = l2_block_number, + .transactions = try all_l2_txs.toOwnedSlice(), + .timestamp = l1_block.timestamp, + .l1_block_number = l1_block_number, + .l1_block_hash = l1_block.hash, + }; + } + + /// Simplified derivation (fallback when block fetching fails) + fn deriveL2FromL1Simplified(self: *Self, l1_block_number: u64) !?DerivedL2Data { + const l2_block_number = l1_block_number; + const transactions = try self.allocator.alloc(core.transaction.Transaction, 0); + const l1_hash = types.hashFromBytes([_]u8{0} ** 32); + + self.last_derived_l1_block = l1_block_number; + + return DerivedL2Data{ + .block_number = l2_block_number, + .transactions = transactions, + .timestamp = @intCast(std.time.timestamp()), + .l1_block_number = l1_block_number, + .l1_block_hash = l1_hash, + }; + } + + /// Handle L1 reorg (chain reorganization) + /// Resets derivation state to common ancestor + pub fn handleReorg(self: *Self, new_l1_block: u64, common_ancestor: u64) !void { + std.log.warn("[L1Derivation] L1 reorg detected: new_block={d}, common_ancestor={d}", .{ new_l1_block, common_ancestor }); + + // Reset derivation state to common ancestor + if (common_ancestor < self.last_derived_l1_block) { + self.last_derived_l1_block = common_ancestor; + self.current_l1_block = common_ancestor; + + // Reset safe L2 block to match common ancestor + if (common_ancestor < self.safe_l2_block) { + self.safe_l2_block = common_ancestor; + } + } + } + + /// Update safe block (fully derived from L1) + pub fn updateSafeBlock(self: *Self, l2_block_number: u64) void { + if (l2_block_number > self.safe_l2_block) { + self.safe_l2_block = l2_block_number; + } + } + + /// Get safe L2 block number + pub fn getSafeBlock(self: *Self) u64 { + return self.safe_l2_block; + } + + fn hexToHash(self: *Self, hex: []const u8) !types.Hash { + _ = self; + const hex_start: usize = if (std.mem.startsWith(u8, hex, "0x")) 2 else 0; + const hex_data = hex[hex_start..]; + + if (hex_data.len != 64) { + return error.InvalidHashLength; + } + + var result: [32]u8 = undefined; + var i: usize = 0; + while (i < 32) : (i += 1) { + const high = try std.fmt.parseInt(u8, hex_data[i * 2 .. i * 2 + 1], 16); + const low = try std.fmt.parseInt(u8, hex_data[i * 2 + 1 .. i * 2 + 2], 16); + result[i] = (high << 4) | low; + } + + return types.hashFromBytes(result); + } +}; diff --git a/src/l1/execute_tx_builder.zig b/src/l1/execute_tx_builder.zig index f5e75c3..0480d62 100644 --- a/src/l1/execute_tx_builder.zig +++ b/src/l1/execute_tx_builder.zig @@ -34,7 +34,7 @@ pub const ExecuteTxBuilder = struct { self: *Self, batch: *const batch_module.Batch, state_manager: *const state.StateManager, - sequencer: *const sequencer_module.Sequencer, + sequencer: *const sequencer_module.Sequencer, // Unused in op-node architecture (execution delegated to L2 geth) nonce: u64, gas_tip_cap: u256, gas_fee_cap: u256, @@ -48,15 +48,18 @@ pub const ExecuteTxBuilder = struct { defer witness_builder.deinit(); // Process all blocks in batch to build witness - // Note: We need a mutable reference to execution_engine for witness tracking - // Create a temporary mutable copy of the execution engine + // Note: In op-node architecture, execution is delegated to L2 geth, + // but we still need local execution for witness generation + // Create a temporary execution engine for witness tracking + // Note: state_manager is const, but ExecutionEngine needs mutable - cast for witness generation only if (batch.blocks.len > 0) { - var temp_exec_engine = sequencer.execution_engine; + var temp_exec_engine = @import("../sequencer/execution.zig").ExecutionEngine.init(self.allocator, @constCast(state_manager)); temp_exec_engine.witness_builder = &witness_builder; for (batch.blocks) |block| { try witness_builder.generateBlockWitness(&block, &temp_exec_engine); } } + _ = sequencer; // Unused in op-node architecture // Build witness _ = try witness_builder.buildWitness(state_manager, null); diff --git a/src/l1/root.zig b/src/l1/root.zig index 6b35684..c8600fa 100644 --- a/src/l1/root.zig +++ b/src/l1/root.zig @@ -1,2 +1,4 @@ pub const Client = @import("client.zig").Client; pub const execute_tx_builder = @import("execute_tx_builder.zig"); +pub const derivation = @import("derivation.zig"); +pub const batch_parser = @import("batch_parser.zig"); diff --git a/src/l2/engine_api_client.zig b/src/l2/engine_api_client.zig index 58b64a6..f202913 100644 --- a/src/l2/engine_api_client.zig +++ b/src/l2/engine_api_client.zig @@ -132,8 +132,31 @@ pub const EngineApiClient = struct { return status; } + /// Execution payload returned from engine_getPayload + pub const ExecutionPayload = struct { + block_hash: types.Hash, + block_number: u64, + parent_hash: types.Hash, + timestamp: u64, + fee_recipient: types.Address, + state_root: types.Hash, + receipts_root: types.Hash, + logs_bloom: [256]u8, + prev_randao: types.Hash, + gas_limit: u64, + gas_used: u64, + transactions: [][]const u8, // RLP-encoded transactions + + pub fn deinit(self: *ExecutionPayload, allocator: std.mem.Allocator) void { + for (self.transactions) |tx| { + allocator.free(tx); + } + allocator.free(self.transactions); + } + }; + /// Get payload from L2 geth via engine_getPayload - pub fn getPayload(self: *Self, payload_id: []const u8) !struct { block_hash: types.Hash, block_number: u64 } { + pub fn getPayload(self: *Self, payload_id: []const u8) !ExecutionPayload { std.log.info("[EngineAPI] Calling engine_getPayload with payload_id: {s}", .{payload_id}); var params = std.json.Array.init(self.allocator); @@ -148,12 +171,22 @@ pub const EngineApiClient = struct { const result = try self.callRpc("engine_getPayload", std.json.Value{ .array = params }); defer self.allocator.free(result); - // Parse response + // Parse full execution payload response const parsed = try std.json.parseFromSliceLeaky( struct { result: struct { blockHash: []const u8, blockNumber: []const u8, + parentHash: []const u8, + timestamp: []const u8, + feeRecipient: []const u8, + stateRoot: []const u8, + receiptsRoot: []const u8, + logsBloom: []const u8, + prevRandao: []const u8, + gasLimit: []const u8, + gasUsed: []const u8, + transactions: [][]const u8, }, }, self.allocator, @@ -162,25 +195,72 @@ pub const EngineApiClient = struct { ); const block_hash = try self.hexToHash(parsed.result.blockHash); + const parent_hash = try self.hexToHash(parsed.result.parentHash); + const state_root = try self.hexToHash(parsed.result.stateRoot); + const receipts_root = try self.hexToHash(parsed.result.receiptsRoot); + const prev_randao = try self.hexToHash(parsed.result.prevRandao); + const fee_recipient = try self.hexToAddress(parsed.result.feeRecipient); + const hex_start: usize = if (std.mem.startsWith(u8, parsed.result.blockNumber, "0x")) 2 else 0; const block_number = try std.fmt.parseInt(u64, parsed.result.blockNumber[hex_start..], 16); + const timestamp_start: usize = if (std.mem.startsWith(u8, parsed.result.timestamp, "0x")) 2 else 0; + const timestamp = try std.fmt.parseInt(u64, parsed.result.timestamp[timestamp_start..], 16); + + const gas_limit_start: usize = if (std.mem.startsWith(u8, parsed.result.gasLimit, "0x")) 2 else 0; + const gas_limit = try std.fmt.parseInt(u64, parsed.result.gasLimit[gas_limit_start..], 16); + + const gas_used_start: usize = if (std.mem.startsWith(u8, parsed.result.gasUsed, "0x")) 2 else 0; + const gas_used = try std.fmt.parseInt(u64, parsed.result.gasUsed[gas_used_start..], 16); + + // Parse logs bloom + var logs_bloom: [256]u8 = undefined; + const bloom_hex_start: usize = if (std.mem.startsWith(u8, parsed.result.logsBloom, "0x")) 2 else 0; + const bloom_hex = parsed.result.logsBloom[bloom_hex_start..]; + if (bloom_hex.len != 512) { + return error.InvalidLogsBloomLength; + } + var i: usize = 0; + while (i < 256) : (i += 1) { + const high = try std.fmt.parseInt(u8, bloom_hex[i * 2 .. i * 2 + 1], 16); + const low = try std.fmt.parseInt(u8, bloom_hex[i * 2 + 1 .. i * 2 + 2], 16); + logs_bloom[i] = (high << 4) | low; + } + + // Clone transactions + const transactions = try self.allocator.alloc([]const u8, parsed.result.transactions.len); + for (parsed.result.transactions, 0..) |tx_hex, idx| { + transactions[idx] = try self.allocator.dupe(u8, tx_hex); + } + const block_hash_hex = try self.hashToHex(block_hash); defer self.allocator.free(block_hash_hex); - std.log.info("[EngineAPI] engine_getPayload response: block_hash={s}, block_number={d}", .{ + std.log.info("[EngineAPI] engine_getPayload response: block_hash={s}, block_number={d}, {d} txs", .{ block_hash_hex, block_number, + transactions.len, }); - return .{ + return ExecutionPayload{ .block_hash = block_hash, .block_number = block_number, + .parent_hash = parent_hash, + .timestamp = timestamp, + .fee_recipient = fee_recipient, + .state_root = state_root, + .receipts_root = receipts_root, + .logs_bloom = logs_bloom, + .prev_randao = prev_randao, + .gas_limit = gas_limit, + .gas_used = gas_used, + .transactions = transactions, }; } - /// Update fork choice state via engine_forkchoiceUpdated - pub fn forkchoiceUpdated(self: *Self, head_block_hash: types.Hash, safe_block_hash: types.Hash, finalized_block_hash: types.Hash) !ForkChoiceUpdateResponse { + /// Update fork choice state via engine_forkchoiceUpdated (with optional payload attributes) + /// If payload_attrs is provided, requests L2 geth to build a payload + pub fn forkchoiceUpdated(self: *Self, head_block_hash: types.Hash, safe_block_hash: types.Hash, finalized_block_hash: types.Hash, payload_attrs: ?std.json.ObjectMap) !ForkChoiceUpdateResponse { const head_hex = try self.hashToHex(head_block_hash); defer self.allocator.free(head_hex); const safe_hex = try self.hashToHex(safe_block_hash); @@ -204,16 +284,18 @@ pub const EngineApiClient = struct { try fork_choice_obj.put("safeBlockHash", std.json.Value{ .string = safe_hex }); try fork_choice_obj.put("finalizedBlockHash", std.json.Value{ .string = finalized_hex }); - // Payload attributes (optional) - var payload_attrs_obj = std.json.ObjectMap.init(self.allocator); - defer payload_attrs_obj.deinit(); - const timestamp = @as(u64, @intCast(std.time.timestamp())); - try payload_attrs_obj.put("timestamp", std.json.Value{ .string = try std.fmt.allocPrint(self.allocator, "0x{x}", .{timestamp}) }); - try payload_attrs_obj.put("prevRandao", std.json.Value{ .string = try self.hashToHex(types.hashFromBytes([_]u8{0} ** 32)) }); - try payload_attrs_obj.put("suggestedFeeRecipient", std.json.Value{ .string = try self.addressToHex(types.addressFromBytes([_]u8{0} ** 20)) }); - - try params.append(std.json.Value{ .object = fork_choice_obj }); - try params.append(std.json.Value{ .object = payload_attrs_obj }); + // Payload attributes (optional - if provided, L2 geth will build payload) + if (payload_attrs) |attrs| { + try params.append(std.json.Value{ .object = fork_choice_obj }); + try params.append(std.json.Value{ .object = attrs }); + } else { + // No payload attributes - just update fork choice + try params.append(std.json.Value{ .object = fork_choice_obj }); + // Add empty payload attributes + var empty_attrs = std.json.ObjectMap.init(self.allocator); + defer empty_attrs.deinit(); + try params.append(std.json.Value{ .object = empty_attrs }); + } const result = try self.callRpc("engine_forkchoiceUpdated", std.json.Value{ .array = params }); defer self.allocator.free(result); @@ -498,4 +580,23 @@ pub const EngineApiClient = struct { return types.hashFromBytes(result); } + + fn hexToAddress(_: *Self, hex: []const u8) !types.Address { + const hex_start: usize = if (std.mem.startsWith(u8, hex, "0x")) 2 else 0; + const hex_data = hex[hex_start..]; + + if (hex_data.len != 40) { + return error.InvalidAddressLength; + } + + var result: [20]u8 = undefined; + var i: usize = 0; + while (i < 20) : (i += 1) { + const high = try std.fmt.parseInt(u8, hex_data[i * 2 .. i * 2 + 1], 16); + const low = try std.fmt.parseInt(u8, hex_data[i * 2 + 1 .. i * 2 + 2], 16); + result[i] = (high << 4) | low; + } + + return types.addressFromBytes(result); + } }; diff --git a/src/l2/payload_attrs.zig b/src/l2/payload_attrs.zig new file mode 100644 index 0000000..5b5814f --- /dev/null +++ b/src/l2/payload_attrs.zig @@ -0,0 +1,100 @@ +// Payload Attributes Builder +// Builds payload attributes for engine_forkchoiceUpdated (op-node style) + +const std = @import("std"); +const core = @import("../core/root.zig"); +const types = @import("../core/types.zig"); +const transaction = @import("../core/transaction.zig"); + +pub const PayloadAttributes = struct { + timestamp: u64, + prev_randao: types.Hash, + suggested_fee_recipient: types.Address, + transactions: []transaction.Transaction, + withdrawals: []void = &[_]void{}, // Empty for now + + pub fn deinit(self: *PayloadAttributes, allocator: std.mem.Allocator) void { + allocator.free(self.transactions); + } +}; + +pub const PayloadAttributesBuilder = struct { + allocator: std.mem.Allocator, + + const Self = @This(); + + pub fn init(allocator: std.mem.Allocator) Self { + return .{ + .allocator = allocator, + }; + } + + /// Build payload attributes from transactions + pub fn build(self: *Self, transactions: []transaction.Transaction, fee_recipient: types.Address) !PayloadAttributes { + // Clone transactions + const txs = try self.allocator.dupe(transaction.Transaction, transactions); + + return PayloadAttributes{ + .timestamp = @intCast(std.time.timestamp()), + .prev_randao = types.hashFromBytes([_]u8{0} ** 32), // TODO: Get from L1 + .suggested_fee_recipient = fee_recipient, + .transactions = txs, + .withdrawals = &[_]void{}, + }; + } + + /// Convert to JSON-RPC format for engine_forkchoiceUpdated + pub fn toJsonRpc(self: *Self, attrs: PayloadAttributes) !std.json.ObjectMap { + var obj = std.json.ObjectMap.init(self.allocator); + + // Timestamp + const timestamp_hex = try std.fmt.allocPrint(self.allocator, "0x{x}", .{attrs.timestamp}); + defer self.allocator.free(timestamp_hex); + try obj.put("timestamp", std.json.Value{ .string = timestamp_hex }); + + // PrevRandao + const prev_randao_bytes = types.hashToBytes(attrs.prev_randao); + const prev_randao_hex = try self.hashToHex(&prev_randao_bytes); + defer self.allocator.free(prev_randao_hex); + try obj.put("prevRandao", std.json.Value{ .string = prev_randao_hex }); + + // SuggestedFeeRecipient + const fee_recipient_bytes = types.addressToBytes(attrs.suggested_fee_recipient); + const fee_recipient_hex = try self.hashToHex(&fee_recipient_bytes); + defer self.allocator.free(fee_recipient_hex); + try obj.put("suggestedFeeRecipient", std.json.Value{ .string = fee_recipient_hex }); + + // Transactions (serialize to RLP hex) + var tx_array = std.json.Array.init(self.allocator); + defer tx_array.deinit(); + for (attrs.transactions) |tx| { + const tx_rlp = try tx.serialize(self.allocator); + defer self.allocator.free(tx_rlp); + const tx_hex = try self.hashToHex(tx_rlp); + defer self.allocator.free(tx_hex); + try tx_array.append(std.json.Value{ .string = tx_hex }); + } + try obj.put("transactions", std.json.Value{ .array = tx_array }); + + // Withdrawals (empty for now) + var withdrawals_array = std.json.Array.init(self.allocator); + defer withdrawals_array.deinit(); + try obj.put("withdrawals", std.json.Value{ .array = withdrawals_array }); + + return obj; + } + + fn hashToHex(self: *Self, bytes: []const u8) ![]u8 { + var result = std.ArrayList(u8).init(self.allocator); + defer result.deinit(); + + try result.appendSlice("0x"); + const hex_digits = "0123456789abcdef"; + for (bytes) |byte| { + try result.append(hex_digits[byte >> 4]); + try result.append(hex_digits[byte & 0xf]); + } + + return result.toOwnedSlice(); + } +}; diff --git a/src/l2/root.zig b/src/l2/root.zig index aa6b518..b817014 100644 --- a/src/l2/root.zig +++ b/src/l2/root.zig @@ -1,9 +1,9 @@ pub const engine_api_client = @import("engine_api_client.zig"); pub const state_provider = @import("state_provider.zig"); -pub const tx_forwarder = @import("tx_forwarder.zig"); -pub const sync = @import("sync.zig"); +pub const payload_attrs = @import("payload_attrs.zig"); pub const EngineApiClient = engine_api_client.EngineApiClient; pub const StateProvider = state_provider.StateProvider; -pub const TransactionForwarder = tx_forwarder.TransactionForwarder; -pub const BlockSync = sync.BlockSync; +pub const PayloadAttributesBuilder = payload_attrs.PayloadAttributesBuilder; +pub const PayloadAttributes = payload_attrs.PayloadAttributes; +pub const ExecutionPayload = engine_api_client.ExecutionPayload; diff --git a/src/l2/state_provider.zig b/src/l2/state_provider.zig index a4308ac..a1cdd2b 100644 --- a/src/l2/state_provider.zig +++ b/src/l2/state_provider.zig @@ -153,7 +153,7 @@ pub const StateProvider = struct { var params = std.json.Array.init(self.allocator); defer params.deinit(); try params.append(std.json.Value{ .string = block_hex }); - try params.append(std.json.Value{ .boolean = include_txs }); + try params.append(std.json.Value{ .bool = include_txs }); const result = try self.callRpc("eth_getBlockByNumber", std.json.Value{ .array = params }); defer self.allocator.free(result); @@ -317,7 +317,7 @@ pub const StateProvider = struct { .string => |s| { return try std.fmt.allocPrint(self.allocator, "\"{s}\"", .{s}); }, - .boolean => |b| { + .bool => |b| { return try std.fmt.allocPrint(self.allocator, "{}", .{b}); }, else => return error.UnsupportedJsonType, diff --git a/src/l2/sync.zig b/src/l2/sync.zig deleted file mode 100644 index e48577c..0000000 --- a/src/l2/sync.zig +++ /dev/null @@ -1,286 +0,0 @@ -// Block synchronization for syncing sequencer blocks to L2 geth - -const std = @import("std"); -const core = @import("../core/root.zig"); -const types = @import("../core/types.zig"); -const block_module = @import("../core/block.zig"); -const engine_api = @import("engine_api_client.zig"); -const state_provider = @import("state_provider.zig"); - -pub const BlockSync = struct { - allocator: std.mem.Allocator, - engine_client: *engine_api.EngineApiClient, - state_provider: *state_provider.StateProvider, - head_block_hash: ?types.Hash = null, - safe_block_hash: ?types.Hash = null, - finalized_block_hash: ?types.Hash = null, - - const Self = @This(); - - pub fn init(allocator: std.mem.Allocator, engine: *engine_api.EngineApiClient, state: *state_provider.StateProvider) Self { - return .{ - .allocator = allocator, - .engine_client = engine, - .state_provider = state, - .head_block_hash = null, - .safe_block_hash = null, - .finalized_block_hash = null, - }; - } - - pub fn deinit(self: *Self) void { - _ = self; - // No cleanup needed - } - - /// Sync block to L2 geth via engine_newPayload - pub fn syncBlock(self: *Self, block: *const block_module.Block) !engine_api.PayloadStatus { - std.log.info("[BlockSync] Syncing block #{d} to L2 geth", .{block.number}); - - // Submit block to L2 geth - const status = try self.engine_client.newPayload(block); - - // Update fork choice if block is valid - if (std.mem.eql(u8, status.status, "VALID")) { - const block_hash = block.hash(); - self.head_block_hash = block_hash; - - std.log.info("[BlockSync] Block #{d} accepted, updating fork choice", .{block.number}); - - // Update fork choice state - _ = try self.updateForkChoice(block_hash, block_hash, block_hash); - } else { - std.log.warn("[BlockSync] Block #{d} sync failed with status: {s}", .{ block.number, status.status }); - if (status.validation_error) |err| { - std.log.warn("[BlockSync] Validation error: {s}", .{err}); - } - } - - return status; - } - - /// Update fork choice state in L2 geth - pub fn updateForkChoice(self: *Self, head_hash: types.Hash, safe_hash: types.Hash, finalized_hash: types.Hash) !engine_api.ForkChoiceUpdateResponse { - std.log.info("[BlockSync] Updating fork choice state", .{}); - - const response = try self.engine_client.forkchoiceUpdated(head_hash, safe_hash, finalized_hash); - - // Update local fork choice state - self.head_block_hash = head_hash; - self.safe_block_hash = safe_hash; - self.finalized_block_hash = finalized_hash; - - std.log.info("[BlockSync] Fork choice updated successfully", .{}); - - return response; - } - - /// Handle chain reorganization (reorg) - pub fn handleReorg(self: *Self, new_head_hash: types.Hash, common_ancestor_hash: types.Hash) !void { - // Update fork choice to new head - // Safe and finalized blocks remain unchanged unless explicitly updated - const safe_hash = self.safe_block_hash orelse common_ancestor_hash; - const finalized_hash = self.finalized_block_hash orelse common_ancestor_hash; - - _ = try self.updateForkChoice(new_head_hash, safe_hash, finalized_hash); - } - - /// Get L2 geth sync status - pub fn getSyncStatus(self: *Self) !struct { - synced: bool, - current_block: u64, - highest_block: u64, - } { - // Query L2 geth for sync status via eth_syncing - const result = try self.state_provider.callRpc("eth_syncing", std.json.Value{ .array = std.json.Array.init(self.allocator) }); - defer self.allocator.free(result); - - // Parse response - const parsed = try std.json.parseFromSliceLeaky( - struct { - result: union(enum) { - boolean: bool, - object: struct { - currentBlock: []const u8, - highestBlock: []const u8, - }, - }, - }, - self.allocator, - result, - .{}, - ); - - switch (parsed.result) { - .boolean => |synced| { - if (synced) { - // Still syncing, but we don't have block numbers - return .{ - .synced = false, - .current_block = 0, - .highest_block = 0, - }; - } else { - // Synced, get current block number - const current_block = try self.state_provider.getBlockByNumber(0, false); // Get latest - return .{ - .synced = true, - .current_block = current_block.number, - .highest_block = current_block.number, - }; - } - }, - .object => |sync_info| { - const hex_start: usize = if (std.mem.startsWith(u8, sync_info.currentBlock, "0x")) 2 else 0; - const current = try std.fmt.parseInt(u64, sync_info.currentBlock[hex_start..], 16); - const highest_hex_start: usize = if (std.mem.startsWith(u8, sync_info.highestBlock, "0x")) 2 else 0; - const highest = try std.fmt.parseInt(u64, sync_info.highestBlock[highest_hex_start..], 16); - - return .{ - .synced = false, - .current_block = current, - .highest_block = highest, - }; - }, - } - } - - /// Sync multiple blocks in sequence - pub fn syncBlocks(self: *Self, blocks: []const block_module.Block) !void { - for (blocks) |block| { - const status = try self.syncBlock(&block); - - if (!std.mem.eql(u8, status.status, "VALID")) { - std.log.warn("Block #{d} sync failed: {s}", .{ block.number, status.status }); - if (status.validation_error) |err| { - std.log.warn("Validation error: {s}", .{err}); - } - // Continue syncing other blocks - } - } - } - - /// Call RPC via state provider (helper method) - fn callRpcViaStateProvider(self: *Self, method: []const u8, params: std.json.Value) ![]u8 { - // Parse URL - const url_parts = try self.parseUrl(self.state_provider.l2_rpc_url); - const host = url_parts.host; - const port = url_parts.port; - - // Connect to L2 RPC - const address = try std.net.Address.parseIp(host, port); - const stream = try std.net.tcpConnectToAddress(address); - defer stream.close(); - - // Build JSON-RPC request - var request_json = std.ArrayList(u8).init(self.allocator); - defer request_json.deinit(); - - try request_json.writer().print( - \\{{"jsonrpc":"2.0","method":"{s}","params":{s},"id":1}} - , .{ method, try self.jsonValueToString(params) }); - - const request_body = try request_json.toOwnedSlice(); - defer self.allocator.free(request_body); - - // Build HTTP request - var http_request = std.ArrayList(u8).init(self.allocator); - defer http_request.deinit(); - - try http_request.writer().print( - \\POST / HTTP/1.1\r - \\Host: {s}:{d}\r - \\Content-Type: application/json\r - \\Content-Length: {d}\r - \\\r - \\{s} - , .{ host, port, request_body.len, request_body }); - - const http_request_bytes = try http_request.toOwnedSlice(); - defer self.allocator.free(http_request_bytes); - - // Send request - try stream.writeAll(http_request_bytes); - - // Read response - var response_buffer: [8192]u8 = undefined; - const bytes_read = try stream.read(&response_buffer); - const response = response_buffer[0..bytes_read]; - - // Parse HTTP response - const body_start = std.mem.indexOf(u8, response, "\r\n\r\n") orelse return error.InvalidResponse; - const json_body = response[body_start + 4 ..]; - - // Return JSON body (caller will free) - return try self.allocator.dupe(u8, json_body); - } - - const UrlParts = struct { - host: []const u8, - port: u16, - }; - - fn parseUrl(self: *Self, url: []const u8) !UrlParts { - _ = self; - // Simple URL parsing - assumes http://host:port format - if (!std.mem.startsWith(u8, url, "http://")) { - return error.InvalidUrl; - } - - const host_start = 7; // Skip "http://" - const colon_idx = std.mem.indexOfScalar(u8, url[host_start..], ':') orelse { - // No port specified, use default 8545 - return UrlParts{ .host = url[host_start..], .port = 8545 }; - }; - - const host = url[host_start .. host_start + colon_idx]; - const port_str = url[host_start + colon_idx + 1 ..]; - const port = try std.fmt.parseInt(u16, port_str, 10); - - return UrlParts{ .host = host, .port = port }; - } - - fn jsonValueToString(self: *Self, value: std.json.Value) ![]const u8 { - // Simple JSON serialization for params - switch (value) { - .array => |arr| { - var result = std.ArrayList(u8).init(self.allocator); - defer result.deinit(); - try result.append('['); - for (arr.items, 0..) |item, i| { - if (i > 0) try result.append(','); - const item_str = try self.jsonValueToString(item); - defer self.allocator.free(item_str); - try result.writer().print("{s}", .{item_str}); - } - try result.append(']'); - return result.toOwnedSlice(); - }, - .object => |obj| { - var result = std.ArrayList(u8).init(self.allocator); - defer result.deinit(); - try result.append('{'); - var first = true; - var it = obj.iterator(); - while (it.next()) |entry| { - if (!first) try result.append(','); - first = false; - const key_str = try std.fmt.allocPrint(self.allocator, "\"{s}\"", .{entry.key_ptr.*}); - defer self.allocator.free(key_str); - const val_str = try self.jsonValueToString(entry.value_ptr.*); - defer self.allocator.free(val_str); - try result.writer().print("{s}:{s}", .{ key_str, val_str }); - } - try result.append('}'); - return result.toOwnedSlice(); - }, - .string => |s| { - return try std.fmt.allocPrint(self.allocator, "\"{s}\"", .{s}); - }, - .boolean => |b| { - return try std.fmt.allocPrint(self.allocator, "{}", .{b}); - }, - else => return error.UnsupportedJsonType, - } - } -}; diff --git a/src/l2/tx_forwarder.zig b/src/l2/tx_forwarder.zig deleted file mode 100644 index 06dbb21..0000000 --- a/src/l2/tx_forwarder.zig +++ /dev/null @@ -1,125 +0,0 @@ -// Transaction forwarder for handling transactions from L2 geth - -const std = @import("std"); -const core = @import("../core/root.zig"); -const types = @import("../core/types.zig"); -const validation = @import("../validation/root.zig"); -const mempool = @import("../mempool/root.zig"); -const engine_api = @import("engine_api_client.zig"); - -pub const TransactionForwarder = struct { - allocator: std.mem.Allocator, - ingress_handler: *validation.ingress.Ingress, - mempool: *mempool.Mempool, - engine_client: *engine_api.EngineApiClient, - - const Self = @This(); - - pub fn init(allocator: std.mem.Allocator, ingress: *validation.ingress.Ingress, mp: *mempool.Mempool, engine: *engine_api.EngineApiClient) Self { - return .{ - .allocator = allocator, - .ingress_handler = ingress, - .mempool = mp, - .engine_client = engine, - }; - } - - pub fn deinit(self: *Self) void { - _ = self; - // No cleanup needed - } - - /// Forward transaction from L2 geth to sequencer - /// This is called when L2 geth receives a transaction and forwards it to the sequencer - pub const ForwardResult = struct { - accepted: bool, - tx_hash: ?types.Hash, - error_message: ?[]const u8, - }; - - pub fn forwardTransaction(self: *Self, raw_tx_hex: []const u8) !ForwardResult { - // Decode hex transaction - const raw_tx = try self.hexToBytes(raw_tx_hex); - defer self.allocator.free(raw_tx); - - // Parse transaction - const tx = core.transaction.Transaction.fromRaw(self.allocator, raw_tx) catch |err| { - const error_msg = try std.fmt.allocPrint(self.allocator, "Invalid transaction: {any}", .{err}); - return ForwardResult{ - .accepted = false, - .tx_hash = null, - .error_message = error_msg, - }; - }; - defer self.allocator.free(tx.data); - - // Validate and accept transaction - const validation_result = self.ingress_handler.acceptTransaction(tx) catch |err| { - const error_msg = try std.fmt.allocPrint(self.allocator, "Validation error: {any}", .{err}); - return ForwardResult{ - .accepted = false, - .tx_hash = null, - .error_message = error_msg, - }; - }; - - if (validation_result != .accepted) { - const error_msg = switch (validation_result) { - .invalid => "Transaction is invalid", - .duplicate => "Transaction already in mempool", - .insufficient_funds => "Insufficient funds", - .nonce_too_low => "Nonce too low", - .nonce_too_high => "Nonce too high", - .gas_price_too_low => "Gas price too low", - .accepted => unreachable, - }; - - const error_str = try std.fmt.allocPrint(self.allocator, "{s}", .{error_msg}); - return ForwardResult{ - .accepted = false, - .tx_hash = null, - .error_message = error_str, - }; - } - - // Get transaction hash - const tx_hash = try tx.hash(self.allocator); - // tx_hash is u256, no need to free - - return ForwardResult{ - .accepted = true, - .tx_hash = tx_hash, - .error_message = null, - }; - } - - /// Submit sequenced block back to L2 geth via engine_newPayload - pub fn submitBlockToL2(self: *Self, block: *const core.block.Block) !engine_api.PayloadStatus { - std.log.info("[TxForwarder] Submitting block #{d} to L2 geth via engine_newPayload", .{block.number}); - const status = try self.engine_client.newPayload(block); - std.log.info("[TxForwarder] Block #{d} submission result: {s}", .{ block.number, status.status }); - return status; - } - - /// Convert hex string to bytes - fn hexToBytes(self: *Self, hex: []const u8) ![]u8 { - const hex_start: usize = if (std.mem.startsWith(u8, hex, "0x")) 2 else 0; - const hex_data = hex[hex_start..]; - - if (hex_data.len % 2 != 0) { - return error.InvalidHexLength; - } - - var result = std.ArrayList(u8).init(self.allocator); - defer result.deinit(); - - var i: usize = 0; - while (i < hex_data.len) : (i += 2) { - const high = try std.fmt.parseInt(u8, hex_data[i .. i + 1], 16); - const low = try std.fmt.parseInt(u8, hex_data[i + 1 .. i + 2], 16); - try result.append((high << 4) | low); - } - - return result.toOwnedSlice(); - } -}; diff --git a/src/main.zig b/src/main.zig index b53330a..a24b413 100644 --- a/src/main.zig +++ b/src/main.zig @@ -80,14 +80,30 @@ pub fn main() !void { defer batch_builder.deinit(); std.log.info("Batch builder initialized (size_limit={d}, gas_limit={d})", .{ cfg.batch_size_limit, cfg.block_gas_limit }); - var ingress_handler = lib.validation.ingress.Ingress.init(allocator, &mp, &state_manager); + // Initialize L2 state provider for validation queries (op-node style) + var state_provider = lib.l2.StateProvider.init(allocator, cfg.l2_rpc_url); + std.log.info("L2 state provider initialized (rpc_url={s})", .{cfg.l2_rpc_url}); - var seq = lib.sequencer.Sequencer.init(allocator, &cfg, &mp, &state_manager, &batch_builder); + // Initialize ingress with state manager (for witness generation) and state provider (for validation) + var ingress_handler = lib.validation.ingress.Ingress.init(allocator, &mp, &state_manager, &state_provider); + // Initialize L1 client for derivation var l1_client = lib.l1.Client.init(allocator, &cfg); defer l1_client.deinit(); std.log.info("L1 client initialized (rpc_url={s}, chain_id={d})", .{ cfg.l1_rpc_url, cfg.l1_chain_id }); + // Initialize L1 derivation pipeline + var l1_derivation = lib.l1.derivation.L1Derivation.init(allocator, &l1_client); + std.log.info("L1 derivation pipeline initialized", .{}); + + // Initialize L2 Engine API client + var engine_client = lib.l2.EngineApiClient.init(allocator, cfg.l2_rpc_url, cfg.l2_engine_api_port); + std.log.info("L2 Engine API client initialized (rpc_url={s}, engine_port={d})", .{ cfg.l2_rpc_url, cfg.l2_engine_api_port }); + + // Initialize sequencer with op-node style components + var seq = lib.sequencer.Sequencer.init(allocator, &cfg, &mp, &state_manager, &batch_builder, &l1_derivation, &engine_client); + defer seq.deinit(); + var m = lib.metrics.Metrics.init(allocator); // Start API server @@ -117,15 +133,23 @@ fn sequencingLoop(seq: *lib.sequencer.Sequencer, batch_builder: *lib.batch.Build while (true) { std.Thread.sleep(cfg.batch_interval_ms * std.time.ns_per_ms); - // Build block (even if empty - this increments block number and maintains chain continuity) - // Note: Empty blocks change local L2 state (block number, parent hash) but are not submitted to L1 - // This saves L1 gas costs while maintaining proper L2 chain progression + // Update safe blocks from L1 derivation (op-node style) + if (l1_client.getLatestBlockNumber()) |block_num| { + seq.updateSafeBlock(block_num) catch |err| { + std.log.warn("Failed to update safe block from L1: {any}", .{err}); + }; + } else |err| { + std.log.warn("Failed to get L1 block number: {any}", .{err}); + } + + // Build unsafe block (sequencer-proposed) via payload request to L2 geth (op-node style) + // This requests L2 geth to build a block with transactions from mempool const block = seq.buildBlock() catch |err| { - std.log.err("Error building block: {any}", .{err}); + std.log.err("Error building block (payload request failed): {any}", .{err}); continue; }; m.incrementBlocksCreated(); - std.log.info("Block #{d} created: {d} transactions, {d} gas used", .{ block.number, block.transactions.len, block.gas_used }); + std.log.info("Block #{d} created via L2 geth payload: {d} transactions, {d} gas used", .{ block.number, block.transactions.len, block.gas_used }); // Only add blocks with transactions to batch (empty blocks advance L2 state but aren't submitted to L1) if (block.transactions.len > 0) { diff --git a/src/mempool/mempool.zig b/src/mempool/mempool.zig index ea57554..f16abe4 100644 --- a/src/mempool/mempool.zig +++ b/src/mempool/mempool.zig @@ -2,6 +2,7 @@ const std = @import("std"); const core = @import("../core/root.zig"); const config = @import("../config/root.zig"); const wal = @import("wal.zig"); +const conditional_tx = @import("../core/conditional_tx.zig"); // Metadata for priority queue comparison const EntryMetadata = struct { @@ -148,6 +149,8 @@ pub const Mempool = struct { // Use HashMap where usize is the first transaction index for that sender // Then scan storage for all transactions from that sender by_sender: std.HashMap(core.types.Address, usize, std.hash_map.AutoContext(core.types.Address), std.hash_map.default_max_load_percentage), + // Conditional transactions: hash -> conditional options + conditional_txs: std.HashMap(core.types.Hash, conditional_tx.ConditionalOptions, std.hash_map.AutoContext(core.types.Hash), std.hash_map.default_max_load_percentage), wal: ?wal.WriteAheadLog = null, size: usize = 0, @@ -165,6 +168,7 @@ pub const Mempool = struct { .metadata = std.HashMap(core.types.Hash, EntryMetadata, std.hash_map.AutoContext(core.types.Hash), std.hash_map.default_max_load_percentage).init(allocator), .entries = std.PriorityQueue(QueueEntry, void, compareQueueEntry).init(allocator, {}), .by_sender = std.HashMap(core.types.Address, usize, std.hash_map.AutoContext(core.types.Address), std.hash_map.default_max_load_percentage).init(allocator), + .conditional_txs = std.HashMap(core.types.Hash, conditional_tx.ConditionalOptions, std.hash_map.AutoContext(core.types.Hash), std.hash_map.default_max_load_percentage).init(allocator), }; // Initialize WAL if configured @@ -181,6 +185,13 @@ pub const Mempool = struct { // Clean up custom storage self.storage.deinit(); + // Clean up conditional transactions + var cond_iter = self.conditional_txs.iterator(); + while (cond_iter.next()) |entry| { + entry.value_ptr.*.deinit(); + } + self.conditional_txs.deinit(); + self.by_sender.deinit(); // Clear priority queue @@ -195,7 +206,13 @@ pub const Mempool = struct { } } + /// Insert a regular transaction pub fn insert(self: *Mempool, tx: core.transaction.Transaction) !bool { + return self.insertWithConditions(tx, null); + } + + /// Insert a transaction with conditional options + pub fn insertWithConditions(self: *Mempool, tx: core.transaction.Transaction, conditions_opt: ?conditional_tx.ConditionalOptions) !bool { // Check size limit if (self.size >= self.config.mempool_max_size) { return error.MempoolFull; @@ -225,6 +242,11 @@ pub const Mempool = struct { .received_at = @intCast(now), }); + // Store conditional options if provided + if (conditions_opt) |*cond| { + try self.conditional_txs.put(tx_hash, cond.*); + } + // Write to WAL if (self.wal) |*wal_instance| { // Create temporary entry for WAL @@ -291,7 +313,9 @@ pub const Mempool = struct { return tx; } - pub fn getTopN(self: *Mempool, gas_limit: u64, max_count: usize) ![]core.transaction.Transaction { + /// Get top N transactions, checking conditional transaction conditions + /// current_block_number and current_timestamp are used to filter conditional transactions + pub fn getTopN(self: *Mempool, gas_limit: u64, max_count: usize, current_block_number: u64, current_timestamp: u64) ![]core.transaction.Transaction { var result = std.ArrayList(core.transaction.Transaction).init(self.allocator); errdefer result.deinit(); @@ -314,6 +338,15 @@ pub const Mempool = struct { continue; }; + // Check conditional transaction conditions + if (self.conditional_txs.get(entry.hash)) |conditions| { + if (!conditions.checkConditions(current_block_number, current_timestamp)) { + // Conditions not met, put back in queue + try temp_queue.add(entry); + continue; + } + } + if (tx.gas_limit <= remaining_gas and count < max_count) { try result.append(tx); remaining_gas -= tx.gas_limit; @@ -321,6 +354,7 @@ pub const Mempool = struct { // Remove from maps since we're using it _ = self.by_hash.remove(entry.hash); _ = self.metadata.remove(entry.hash); + _ = self.conditional_txs.remove(entry.hash); } else { // Put back in queue try temp_queue.add(entry); diff --git a/src/sequencer/block_state.zig b/src/sequencer/block_state.zig new file mode 100644 index 0000000..75c7572 --- /dev/null +++ b/src/sequencer/block_state.zig @@ -0,0 +1,157 @@ +// Block State Management (Safe/Unsafe blocks) +// Tracks safe blocks (derived from L1) and unsafe blocks (sequencer-proposed) + +const std = @import("std"); +const core = @import("../core/root.zig"); +const types = @import("../core/types.zig"); + +pub const BlockState = struct { + allocator: std.mem.Allocator, + safe_block: ?core.block.Block = null, + unsafe_block: ?core.block.Block = null, + finalized_block: ?core.block.Block = null, + head_block: ?core.block.Block = null, + + const Self = @This(); + + pub fn init(allocator: std.mem.Allocator) Self { + return .{ + .allocator = allocator, + .safe_block = null, + .unsafe_block = null, + .finalized_block = null, + .head_block = null, + }; + } + + pub fn deinit(self: *Self) void { + if (self.safe_block) |*block| { + self.allocator.free(block.transactions); + } + if (self.unsafe_block) |*block| { + self.allocator.free(block.transactions); + } + if (self.finalized_block) |*block| { + self.allocator.free(block.transactions); + } + if (self.head_block) |*block| { + self.allocator.free(block.transactions); + } + } + + /// Set safe block (derived from L1) + pub fn setSafeBlock(self: *Self, block: core.block.Block) !void { + // Free old safe block + if (self.safe_block) |*old_block| { + self.allocator.free(old_block.transactions); + } + + // Clone block + const transactions = try self.allocator.dupe(core.transaction.Transaction, block.transactions); + var safe_block = block; + safe_block.transactions = transactions; + + self.safe_block = safe_block; + std.log.info("[BlockState] Safe block updated to #{d}", .{block.number}); + } + + /// Set unsafe block (sequencer-proposed) + pub fn setUnsafeBlock(self: *Self, block: core.block.Block) !void { + // Free old unsafe block + if (self.unsafe_block) |*old_block| { + self.allocator.free(old_block.transactions); + } + + // Clone block + const transactions = try self.allocator.dupe(core.transaction.Transaction, block.transactions); + var unsafe_block = block; + unsafe_block.transactions = transactions; + + self.unsafe_block = unsafe_block; + std.log.info("[BlockState] Unsafe block updated to #{d}", .{block.number}); + } + + /// Set finalized block + pub fn setFinalizedBlock(self: *Self, block: core.block.Block) !void { + // Free old finalized block + if (self.finalized_block) |*old_block| { + self.allocator.free(old_block.transactions); + } + + // Clone block + const transactions = try self.allocator.dupe(core.transaction.Transaction, block.transactions); + var finalized_block = block; + finalized_block.transactions = transactions; + + self.finalized_block = finalized_block; + std.log.info("[BlockState] Finalized block updated to #{d}", .{block.number}); + } + + /// Set head block + pub fn setHeadBlock(self: *Self, block: core.block.Block) !void { + // Free old head block + if (self.head_block) |*old_block| { + self.allocator.free(old_block.transactions); + } + + // Clone block + const transactions = try self.allocator.dupe(core.transaction.Transaction, block.transactions); + var head_block = block; + head_block.transactions = transactions; + + self.head_block = head_block; + std.log.info("[BlockState] Head block updated to #{d}", .{block.number}); + } + + /// Get safe block + pub fn getSafeBlock(self: *const Self) ?core.block.Block { + return self.safe_block; + } + + /// Get unsafe block + pub fn getUnsafeBlock(self: *const Self) ?core.block.Block { + return self.unsafe_block; + } + + /// Get finalized block + pub fn getFinalizedBlock(self: *const Self) ?core.block.Block { + return self.finalized_block; + } + + /// Get head block + pub fn getHeadBlock(self: *const Self) ?core.block.Block { + return self.head_block; + } + + /// Get safe block hash + pub fn getSafeBlockHash(self: *const Self) ?types.Hash { + if (self.safe_block) |block| { + return block.hash(); + } + return null; + } + + /// Get unsafe block hash + pub fn getUnsafeBlockHash(self: *const Self) ?types.Hash { + if (self.unsafe_block) |block| { + return block.hash(); + } + return null; + } + + /// Get finalized block hash + pub fn getFinalizedBlockHash(self: *const Self) ?types.Hash { + if (self.finalized_block) |block| { + return block.hash(); + } + return null; + } + + /// Get head block hash + pub fn getHeadBlockHash(self: *const Self) ?types.Hash { + if (self.head_block) |block| { + return block.hash(); + } + return null; + } +}; diff --git a/src/sequencer/reorg_handler.zig b/src/sequencer/reorg_handler.zig new file mode 100644 index 0000000..9ded0b3 --- /dev/null +++ b/src/sequencer/reorg_handler.zig @@ -0,0 +1,119 @@ +// Reorg Handler +// Handles chain reorganizations for L1 and L2 chains (op-node style) + +const std = @import("std"); +const core = @import("../core/root.zig"); +const types = @import("../core/types.zig"); +const block_state = @import("block_state.zig"); + +pub const ReorgHandler = struct { + allocator: std.mem.Allocator, + block_state: *block_state.BlockState, + // Store recent L1 block hashes for reorg detection + l1_block_hashes: std.HashMap(u64, types.Hash, std.hash_map.AutoContext(u64), std.hash_map.default_max_load_percentage), + max_stored_blocks: u64 = 100, // Store last 100 blocks for reorg detection + + const Self = @This(); + + pub fn init(allocator: std.mem.Allocator, bs: *block_state.BlockState) Self { + return .{ + .allocator = allocator, + .block_state = bs, + .l1_block_hashes = std.HashMap(u64, types.Hash, std.hash_map.AutoContext(u64), std.hash_map.default_max_load_percentage).init(allocator), + }; + } + + pub fn deinit(self: *Self) void { + self.l1_block_hashes.deinit(); + } + + /// Store L1 block hash for reorg detection + pub fn storeL1BlockHash(self: *Self, block_number: u64, block_hash: types.Hash) !void { + try self.l1_block_hashes.put(block_number, block_hash); + + // Clean up old blocks (keep only last max_stored_blocks) + if (self.l1_block_hashes.count() > self.max_stored_blocks) { + var to_remove = std.ArrayList(u64).init(self.allocator); + defer to_remove.deinit(); + + var it = self.l1_block_hashes.iterator(); + while (it.next()) |entry| { + if (entry.key_ptr.* < block_number - self.max_stored_blocks) { + try to_remove.append(entry.key_ptr.*); + } + } + + for (to_remove.items) |block_num| { + _ = self.l1_block_hashes.remove(block_num); + } + } + } + + /// Detect L1 reorg by comparing block hashes + /// Returns common ancestor block number if reorg detected, null otherwise + pub fn detectL1Reorg(self: *Self, expected_block_number: u64, actual_block_hash: types.Hash) !?u64 { + // Check if we have stored hash for this block + if (self.l1_block_hashes.get(expected_block_number)) |expected_hash| { + // Compare hashes + if (!std.mem.eql(u8, &types.hashToBytes(expected_hash), &types.hashToBytes(actual_block_hash))) { + std.log.warn("[ReorgHandler] L1 reorg detected at block #{d}", .{expected_block_number}); + + // Find common ancestor by checking previous blocks + var check_block = expected_block_number; + while (check_block > 0) : (check_block -= 1) { + if (self.l1_block_hashes.get(check_block)) |_| { + // Found a stored block, assume it's the common ancestor + return check_block; + } + } + + // No common ancestor found, return genesis + return 0; + } + } + + // Store this block hash for future reorg detection + try self.storeL1BlockHash(expected_block_number, actual_block_hash); + + return null; // No reorg detected + } + + /// Detect L2 reorg by comparing block hashes + /// Returns common ancestor block number if reorg detected, null otherwise + pub fn detectL2Reorg(self: *Self, expected_block_number: u64, actual_block_hash: types.Hash) !?u64 { + // Get current head block + const head_hash = self.block_state.getHeadBlockHash(); + + if (head_hash) |current_hash| { + // Compare with expected hash + if (!std.mem.eql(u8, &types.hashToBytes(current_hash), &types.hashToBytes(actual_block_hash))) { + std.log.warn("[ReorgHandler] L2 reorg detected at block #{d}", .{expected_block_number}); + + // For now, return previous block as common ancestor + // In production, would traverse chain to find actual common ancestor + if (expected_block_number > 0) { + return expected_block_number - 1; + } + return 0; // Genesis + } + } + + return null; // No reorg detected + } + + /// Handle L2 reorg by resetting block state + pub fn handleL2Reorg(self: *Self, common_ancestor: u64) !void { + std.log.warn("[ReorgHandler] L2 reorg detected, resetting to block #{d}", .{common_ancestor}); + + // Reset head block to common ancestor + // In production, would: + // 1. Fetch block at common ancestor from storage + // 2. Reset head block to that block + // 3. Reset safe/unsafe blocks if they're after common ancestor + // 4. Clear blocks after common ancestor + + // For now, just log the reorg + // Full implementation would require block storage/retrieval + _ = self; + } +}; diff --git a/src/sequencer/root.zig b/src/sequencer/root.zig index eb141a7..ee68520 100644 --- a/src/sequencer/root.zig +++ b/src/sequencer/root.zig @@ -1,2 +1,4 @@ pub const Sequencer = @import("sequencer.zig").Sequencer; pub const mev = @import("mev.zig"); +pub const block_state = @import("block_state.zig"); +pub const reorg_handler = @import("reorg_handler.zig"); diff --git a/src/sequencer/sequencer.zig b/src/sequencer/sequencer.zig index 06ff1ea..dd06bdc 100644 --- a/src/sequencer/sequencer.zig +++ b/src/sequencer/sequencer.zig @@ -1,3 +1,6 @@ +// Sequencer refactored for op-node style architecture +// Requests payloads from L2 geth instead of building blocks directly + const std = @import("std"); const core = @import("../core/root.zig"); const mempool = @import("../mempool/root.zig"); @@ -5,22 +8,11 @@ const batch = @import("../batch/root.zig"); const state = @import("../state/root.zig"); const config = @import("../config/root.zig"); const mev = @import("mev.zig"); -const execution = @import("execution.zig"); - -fn formatHash(hash: core.types.Hash) []const u8 { - // Format hash as hex string for logging - const bytes = core.types.hashToBytes(hash); - var buffer: [66]u8 = undefined; // "0x" + 64 hex chars - buffer[0] = '0'; - buffer[1] = 'x'; - // Format each byte as hex - for (bytes, 0..) |byte, i| { - const hex_chars = "0123456789abcdef"; - buffer[2 + i * 2] = hex_chars[byte >> 4]; - buffer[2 + i * 2 + 1] = hex_chars[byte & 0xf]; - } - return buffer[0..66]; -} +const block_state = @import("block_state.zig"); +const l1_derivation = @import("../l1/derivation.zig"); +const l2_engine = @import("../l2/engine_api_client.zig"); +const l2_payload = @import("../l2/payload_attrs.zig"); +const types = @import("../core/types.zig"); pub const Sequencer = struct { allocator: std.mem.Allocator, @@ -32,9 +24,23 @@ pub const Sequencer = struct { current_block_number: u64 = 0, parent_hash: core.types.Hash = core.types.hashFromBytes([_]u8{0} ** 32), - execution_engine: execution.ExecutionEngine, - - pub fn init(allocator: std.mem.Allocator, cfg: *const config.Config, mp: *mempool.Mempool, sm: *state.StateManager, bb: *batch.Builder) Sequencer { + // op-node style components + block_state: block_state.BlockState, + l1_derivation: *l1_derivation.L1Derivation, + engine_client: *l2_engine.EngineApiClient, + payload_builder: l2_payload.PayloadAttributesBuilder, + + const Self = @This(); + + pub fn init( + allocator: std.mem.Allocator, + cfg: *const config.Config, + mp: *mempool.Mempool, + sm: *state.StateManager, + bb: *batch.Builder, + derivation: *l1_derivation.L1Derivation, + engine: *l2_engine.EngineApiClient, + ) Sequencer { return .{ .allocator = allocator, .config = cfg, @@ -42,74 +48,191 @@ pub const Sequencer = struct { .state_manager = sm, .batch_builder = bb, .mev_orderer = mev.MEVOrderer.init(allocator), - .execution_engine = execution.ExecutionEngine.init(allocator, sm), + .block_state = block_state.BlockState.init(allocator), + .l1_derivation = derivation, + .engine_client = engine, + .payload_builder = l2_payload.PayloadAttributesBuilder.init(allocator), }; } - pub fn buildBlock(self: *Sequencer) !core.block.Block { - // Get top transactions from mempool - const txs = try self.mempool.getTopN(self.config.block_gas_limit, self.config.batch_size_limit); + pub fn deinit(self: *Self) void { + self.block_state.deinit(); + } + + /// Request payload from L2 geth (op-node style) + /// Returns payload_id if successful + pub fn requestPayload(self: *Self) !?[]const u8 { + // Get transactions from mempool (check conditional transaction conditions) + const current_block = if (self.block_state.head_block) |head| head.number else self.current_block_number; + const current_timestamp = @as(u64, @intCast(std.time.timestamp())); + const txs = try self.mempool.getTopN(self.config.block_gas_limit, self.config.batch_size_limit, current_block, current_timestamp); defer self.allocator.free(txs); // Apply MEV ordering const mev_txs = try self.mev_orderer.order(txs); defer self.allocator.free(mev_txs); - // Build block - var gas_used: u64 = 0; - var valid_txs = std.ArrayList(core.transaction.Transaction).init(self.allocator); - defer valid_txs.deinit(); + // Build payload attributes + const fee_recipient = types.addressFromBytes([_]u8{0} ** 20); // Default coinbase + var payload_attrs = try self.payload_builder.build(mev_txs, fee_recipient); + defer payload_attrs.deinit(self.allocator); - for (mev_txs) |tx| { - // Check if transaction fits in block gas limit - const estimated_gas = tx.gas_limit; - if (gas_used + estimated_gas > self.config.block_gas_limit) break; + // Convert to JSON-RPC format + var payload_attrs_json = try self.payload_builder.toJsonRpc(payload_attrs); + defer payload_attrs_json.deinit(); - // Execute transaction - const exec_result = self.execution_engine.executeTransaction(tx) catch |err| { - std.log.warn("Transaction execution error: {any}", .{err}); - continue; - }; + // Get fork choice state + const head_hash = self.block_state.getHeadBlockHash() orelse self.parent_hash; + const safe_hash = self.block_state.getSafeBlockHash() orelse self.parent_hash; + const finalized_hash = self.block_state.getFinalizedBlockHash() orelse self.parent_hash; - // Skip failed transactions - if (!exec_result.success) { - const tx_hash = tx.hash(self.allocator) catch continue; - std.log.warn("Transaction execution failed (hash={s}, gas_used={d})", .{ formatHash(tx_hash), exec_result.gas_used }); - continue; + // Request payload via engine_forkchoiceUpdated + const response = try self.engine_client.forkchoiceUpdated(head_hash, safe_hash, finalized_hash, payload_attrs_json); + + if (response.payload_id) |payload_id| { + std.log.info("[Sequencer] Payload requested, payload_id: {s}", .{payload_id}); + return payload_id; + } + + return null; + } + + /// Get built payload from L2 geth + pub fn getPayload(self: *Self, payload_id: []const u8) !l2_engine.EngineApiClient.ExecutionPayload { + return try self.engine_client.getPayload(payload_id); + } + + /// Convert execution payload to block + pub fn payloadToBlock(self: *Self, payload: l2_engine.EngineApiClient.ExecutionPayload) !core.block.Block { + // Parse transactions from RLP hex + var transactions = std.ArrayList(core.transaction.Transaction).init(self.allocator); + errdefer { + for (transactions.items) |*tx| { + self.allocator.free(tx.data); } + transactions.deinit(); + } + + // Parse each transaction from RLP hex string + for (payload.transactions) |tx_hex| { + // Convert hex string to bytes + const tx_bytes = try self.hexToBytes(tx_hex); + defer self.allocator.free(tx_bytes); + + // Decode transaction from RLP + const tx = core.transaction.Transaction.fromRaw(self.allocator, tx_bytes) catch |err| { + std.log.warn("[Sequencer] Failed to parse transaction from payload: {any}, skipping", .{err}); + continue; // Skip invalid transactions + }; + + try transactions.append(tx); + } + + const block = core.block.Block{ + .number = payload.block_number, + .parent_hash = payload.parent_hash, + .timestamp = payload.timestamp, + .transactions = try transactions.toOwnedSlice(), + .gas_used = payload.gas_used, + .gas_limit = payload.gas_limit, + .state_root = payload.state_root, + .receipts_root = payload.receipts_root, + .logs_bloom = payload.logs_bloom, + }; + + return block; + } + + /// Update safe block from L1 derivation + pub fn updateSafeBlock(self: *Self, l1_block_number: u64) !void { + // Derive L2 from L1 + if (try self.l1_derivation.deriveL2FromL1(l1_block_number)) |derived| { + // Create block from derived data + const block = core.block.Block{ + .number = derived.block_number, + .parent_hash = self.parent_hash, + .timestamp = derived.timestamp, + .transactions = derived.transactions, + .gas_used = 0, + .gas_limit = self.config.block_gas_limit, + .state_root = core.types.hashFromBytes([_]u8{0} ** 32), + .receipts_root = core.types.hashFromBytes([_]u8{0} ** 32), + .logs_bloom = [_]u8{0} ** 256, + }; + + try self.block_state.setSafeBlock(block); + self.l1_derivation.updateSafeBlock(derived.block_number); + } + } + + /// Build unsafe block (sequencer-proposed, not yet on L1) - op-node style + /// Requests payload from L2 geth instead of building directly + pub fn buildBlock(self: *Self) !core.block.Block { + // Request payload from L2 geth + const payload_id_opt = try self.requestPayload(); + if (payload_id_opt) |payload_id| { + // Get built payload + var payload = try self.getPayload(payload_id); + defer payload.deinit(self.allocator); + + // Convert to block + const block = try self.payloadToBlock(payload); - // Check if execution fits in block gas limit - if (gas_used + exec_result.gas_used > self.config.block_gas_limit) break; + // Update unsafe block + try self.block_state.setUnsafeBlock(block); - // Apply state changes (execution engine already updated state) - // Create receipt - const tx_hash = try tx.hash(self.allocator); - _ = try self.state_manager.applyTransaction(tx, exec_result.gas_used); + // Update head + try self.block_state.setHeadBlock(block); - gas_used += exec_result.gas_used; - try valid_txs.append(tx); + self.parent_hash = block.hash(); + self.current_block_number = block.number; - // Remove from mempool - // tx_hash is u256 (not allocated), no need to free - _ = try self.mempool.remove(tx_hash); + // Remove transactions from mempool (they're now in the block) + for (block.transactions) |tx| { + const tx_hash = tx.hash(self.allocator) catch continue; + _ = self.mempool.remove(tx_hash) catch {}; + } + + return block; } + // Fallback: create empty block if payload request fails const block = core.block.Block{ .number = self.current_block_number, .parent_hash = self.parent_hash, .timestamp = @intCast(std.time.timestamp()), - .transactions = try valid_txs.toOwnedSlice(), - .gas_used = gas_used, + .transactions = try self.allocator.alloc(core.transaction.Transaction, 0), + .gas_used = 0, .gas_limit = self.config.block_gas_limit, - .state_root = core.types.hashFromBytes([_]u8{0} ** 32), // In production, compute from state - .receipts_root = core.types.hashFromBytes([_]u8{0} ** 32), // In production, compute from receipts + .state_root = core.types.hashFromBytes([_]u8{0} ** 32), + .receipts_root = core.types.hashFromBytes([_]u8{0} ** 32), .logs_bloom = [_]u8{0} ** 256, }; - try self.state_manager.finalizeBlock(block); self.parent_hash = block.hash(); self.current_block_number += 1; return block; } + + fn hexToBytes(self: *Self, hex: []const u8) ![]u8 { + const hex_start: usize = if (std.mem.startsWith(u8, hex, "0x")) 2 else 0; + const hex_data = hex[hex_start..]; + + if (hex_data.len % 2 != 0) { + return error.InvalidHexLength; + } + + var result = std.ArrayList(u8).init(self.allocator); + defer result.deinit(); + + var i: usize = 0; + while (i < hex_data.len) : (i += 2) { + const high = try std.fmt.parseInt(u8, hex_data[i .. i + 1], 16); + const low = try std.fmt.parseInt(u8, hex_data[i + 1 .. i + 2], 16); + try result.append((high << 4) | low); + } + + return result.toOwnedSlice(); + } }; diff --git a/src/validation/ingress.zig b/src/validation/ingress.zig index 4242cc8..db14fb6 100644 --- a/src/validation/ingress.zig +++ b/src/validation/ingress.zig @@ -2,6 +2,7 @@ const std = @import("std"); const core = @import("../core/root.zig"); const mempool = @import("../mempool/root.zig"); const state = @import("../state/root.zig"); +const l2_state = @import("../l2/state_provider.zig"); const validator = @import("transaction.zig"); pub const Ingress = struct { @@ -9,11 +10,12 @@ pub const Ingress = struct { mempool: *mempool.Mempool, validator: validator.TransactionValidator, - pub fn init(allocator: std.mem.Allocator, mp: *mempool.Mempool, sm: *state.StateManager) Ingress { + /// Initialize with state manager (for witness generation) and optional state provider (for validation) + pub fn init(allocator: std.mem.Allocator, mp: *mempool.Mempool, sm: ?*state.StateManager, sp: ?*l2_state.StateProvider) Ingress { return .{ .allocator = allocator, .mempool = mp, - .validator = validator.TransactionValidator.init(allocator, sm), + .validator = validator.TransactionValidator.init(allocator, sm, sp), }; } diff --git a/src/validation/transaction.zig b/src/validation/transaction.zig index c9b53f0..f47ad78 100644 --- a/src/validation/transaction.zig +++ b/src/validation/transaction.zig @@ -2,6 +2,7 @@ const std = @import("std"); const core = @import("../core/root.zig"); const crypto = @import("../crypto/root.zig"); const state = @import("../state/root.zig"); +const l2_state = @import("../l2/state_provider.zig"); pub const ValidationResult = enum { valid, @@ -16,12 +17,14 @@ pub const ValidationResult = enum { pub const TransactionValidator = struct { allocator: std.mem.Allocator, - state_manager: *state.StateManager, + state_manager: ?*state.StateManager, // Optional - kept for witness generation + state_provider: ?*l2_state.StateProvider, // Optional - used for validation queries (op-node style) - pub fn init(allocator: std.mem.Allocator, sm: *state.StateManager) TransactionValidator { + pub fn init(allocator: std.mem.Allocator, sm: ?*state.StateManager, sp: ?*l2_state.StateProvider) TransactionValidator { return .{ .allocator = allocator, .state_manager = sm, + .state_provider = sp, }; } @@ -35,15 +38,49 @@ pub const TransactionValidator = struct { // 2. Get sender const sender = try tx.sender(); - // 3. Check nonce - const expected_nonce = try self.state_manager.getNonce(sender); + // 3. Check nonce - prefer state provider (L2 geth) over local state manager + const expected_nonce: u64 = if (self.state_provider) |sp| blk: { + // Query L2 geth for nonce (op-node style) + break :blk sp.getNonce(sender, "latest") catch |err| { + std.log.warn("[Validator] Failed to get nonce from L2 geth: {any}, falling back to local state", .{err}); + // Fallback to local state manager if available + if (self.state_manager) |sm| { + break :blk try sm.getNonce(sender); + } else { + return .invalid_nonce; + } + }; + } else if (self.state_manager) |sm| blk: { + // Use local state manager as fallback + break :blk try sm.getNonce(sender); + } else { + return error.NoStateSource; + }; + if (tx.nonce < expected_nonce) { return .invalid_nonce; } - // 4. Check balance (for value transfer) + // 4. Check balance (for value transfer) - prefer state provider (L2 geth) if (tx.value > 0) { - const balance = try self.state_manager.getBalance(sender); + const balance: u256 = if (self.state_provider) |sp| blk: { + // Query L2 geth for balance (op-node style) + break :blk sp.getBalance(sender, "latest") catch |err| { + std.log.warn("[Validator] Failed to get balance from L2 geth: {any}, falling back to local state", .{err}); + // Fallback to local state manager if available + if (self.state_manager) |sm| { + break :blk try sm.getBalance(sender); + } else { + return .insufficient_balance; + } + }; + } else if (self.state_manager) |sm| blk: { + // Use local state manager as fallback + break :blk try sm.getBalance(sender); + } else { + return error.NoStateSource; + }; + const total_cost = tx.value + (tx.gas_price * tx.gas_limit); if (balance < total_cost) { return .insufficient_balance; From 8fe23c3d28e3ef0f8590b6bf92f7f7a230ce97a0 Mon Sep 17 00:00:00 2001 From: ch4r10t33r Date: Fri, 21 Nov 2025 19:05:41 +0000 Subject: [PATCH 2/2] fix: Fixed bugs in L1/L2 API consumptions --- README.md | 66 ++++- build.zig | 5 +- src/config/config.zig | 40 ++- src/crypto/jwt.zig | 90 +++++++ src/crypto/root.zig | 1 + src/l1/client.zig | 119 +++++++-- src/l2/engine_api_client.zig | 491 ++++++++++++++++++++++++++++------- src/l2/payload_attrs.zig | 60 ++++- src/l2/state_provider.zig | 4 +- src/main.zig | 9 +- src/sequencer/sequencer.zig | 16 +- 11 files changed, 764 insertions(+), 137 deletions(-) create mode 100644 src/crypto/jwt.zig diff --git a/README.md b/README.md index 4904566..e7b8760 100644 --- a/README.md +++ b/README.md @@ -39,7 +39,7 @@ The native-sequencer follows an op-node style architecture, delegating execution │ native-sequencer │ (Consensus Layer) └────────┬─────────┘ │ 1. Request block building - │ engine_forkchoiceUpdated(payload_attrs) + │ engine_forkchoiceUpdatedV3(payload_attrs) ▼ ┌──────────────────┐ │ L2 geth │ (Execution Layer) @@ -104,8 +104,8 @@ The native-sequencer follows an op-node style architecture, delegating execution - **`execute_tx_builder.zig`**: Builds ExecuteTx transactions with witness data for L1 submission #### L2 Integration (`src/l2/`) -- **`engine_api_client.zig`**: Engine API client for requesting payloads from L2 geth -- **`payload_attrs.zig`**: Payload attributes builder for `engine_forkchoiceUpdated` +- **`engine_api_client.zig`**: Engine API client for requesting payloads from L2 geth with full field parsing (extraData, baseFeePerGas, withdrawals, blobGasUsed, excessBlobGas) and JWT authentication +- **`payload_attrs.zig`**: Payload attributes builder for `engine_forkchoiceUpdatedV3` - **`state_provider.zig`**: State provider for querying L2 geth state (nonces, balances, code) #### Batch Management (`src/batch/`) @@ -130,6 +130,7 @@ The native-sequencer follows an op-node style architecture, delegating execution - **`hash.zig`**: Cryptographic hashing (Keccak256) - **`secp256k1_wrapper.zig`**: ECDSA signature operations via libsecp256k1 - **`signature.zig`**: Signature verification and address recovery +- **`jwt.zig`**: JWT token generation for Engine API authentication (HMAC-SHA256) ### Architecture Characteristics @@ -154,9 +155,14 @@ The native-sequencer follows an op-node style architecture, delegating execution ### Build Commands ```bash -# Build the sequencer executable +# Build the sequencer executable (native architecture) zig build +# Build for specific target +zig build -Dtarget=x86_64-linux-gnu.2.38 # Linux x86_64 +zig build -Dtarget=aarch64-macos # macOS ARM64 +zig build -Dtarget=x86_64-windows # Windows x86_64 + # Build and run zig build run @@ -173,7 +179,17 @@ zig build fmt zig build lint-fix ``` -The build output will be in `zig-out/bin/sequencer`. +The build output will be in `zig-out/bin/sequencer` (or `sequencer.exe` on Windows). + +### Cross-Compilation Support + +The sequencer supports cross-compilation to multiple platforms: + +- **macOS**: Native ARM64 builds supported. x86_64 cross-compilation from ARM64 may require architecture-matched libraries. +- **Windows**: Cross-compilation supported (LMDB disabled, uses in-memory state). +- **Linux**: Cross-compilation requires proper setup with cross-compilation toolchains and libraries. + +**Note**: LMDB linking is automatically handled based on the target platform. Windows builds use in-memory state instead of LMDB persistence. ### Docker Build @@ -248,8 +264,8 @@ zig build run Configure the sequencer using environment variables. Key variables: - **API**: `API_HOST`, `API_PORT` (default: `0.0.0.0:6197`) -- **L1**: `L1_RPC_URL`, `L1_CHAIN_ID`, `SEQUENCER_KEY` -- **L2**: `L2_RPC_URL`, `L2_ENGINE_API_PORT` (default: `http://localhost:8545:8551`) +- **L1**: `L1_RPC_URL`, `L1_CHAIN_ID` (default: `61971`), `SEQUENCER_KEY` +- **L2**: `L2_RPC_URL` (default: `http://localhost:18545`), `L2_ENGINE_API_PORT` (default: `18551`), `L2_CHAIN_ID` (default: `61972`), `L2_JWT_SECRET` (hex-encoded 32-byte secret for Engine API authentication) - **Mempool**: `MEMPOOL_MAX_SIZE`, `MEMPOOL_WAL_PATH` - **Batch**: `BATCH_SIZE_LIMIT`, `BATCH_INTERVAL_MS`, `BLOCK_GAS_LIMIT` - **State**: `STATE_DB_PATH` @@ -711,6 +727,9 @@ This is experimental software. Core features are implemented: - ✅ Batch formation and L1 submission via ExecuteTx - ✅ LMDB persistence - ✅ Witness generation for stateless execution +- ✅ Engine API full compatibility (parses all fields: extraData, baseFeePerGas, withdrawals, blobGasUsed, excessBlobGas) +- ✅ JWT authentication for Engine API +- ✅ Cross-platform builds (macOS ARM64, Windows x86_64, Linux x86_64) - ⏳ L1 subscription monitoring (WebSocket support) - ⏳ Comprehensive testing @@ -754,6 +773,39 @@ The sequencer supports ExecuteTx transactions (type 0x05) for stateless executio ExecuteTx includes pre-state hash, witness data, withdrawals, and standard EIP-1559 fields. See `src/core/transaction_execute.zig` for implementation details. +### Engine API Compatibility + +The sequencer's Engine API client (`src/l2/engine_api_client.zig`) provides full compatibility with geth's Engine API: + +- **Full Field Parsing**: Parses all fields returned by geth, including: + - `extraData`: Block extra data + - `baseFeePerGas`: Base fee per gas (EIP-1559) + - `withdrawals`: Validator withdrawals array (Shanghai/Cancun upgrade) + - `blobGasUsed`: Blob gas used (Cancun upgrade) + - `excessBlobGas`: Excess blob gas (Cancun upgrade) +- **JWT Authentication**: Supports JWT token authentication for Engine API endpoints (configured via `L2_JWT_SECRET`) +- **Backward Compatibility**: Handles optional fields gracefully (null values for pre-upgrade blocks) +- **Memory Management**: Proper cleanup of all parsed data structures + +### JWT Authentication + +The sequencer supports JWT authentication for Engine API communication with L2 geth. Configure the JWT secret using the `L2_JWT_SECRET` environment variable: + +```bash +# Generate a random 32-byte hex secret +openssl rand -hex 32 + +# Set in environment +export L2_JWT_SECRET=0x + +# Or pass directly to the sequencer +L2_JWT_SECRET=0x ./zig-out/bin/sequencer +``` + +**Important**: The same JWT secret must be configured in both the sequencer and L2 geth. See your L2 geth documentation for JWT configuration. + +**Note**: If `L2_JWT_SECRET` is not provided, the sequencer will attempt Engine API calls without authentication. This may fail if L2 geth requires JWT authentication. + ## Known Issues & Workarounds ### Linux Build Requirements diff --git a/build.zig b/build.zig index 3adb433..d7f8aee 100644 --- a/build.zig +++ b/build.zig @@ -22,8 +22,9 @@ pub fn build(b: *std.Build) void { comp.addLibraryPath(.{ .cwd_relative = "/usr/lib/x86_64-linux-gnu" }); comp.addLibraryPath(.{ .cwd_relative = "/usr/x86_64-linux-gnu/lib" }); } - // For macOS, let Zig's linkSystemLibrary find the library automatically - // (Homebrew libraries are in standard locations) + // For macOS, try to link LMDB - if cross-compiling to different arch, it will fail gracefully + // (Homebrew installs architecture-specific libraries, so cross-compilation may not work) + // We let the linker fail if the library architecture doesn't match comp.linkSystemLibrary("lmdb"); } }.add; diff --git a/src/config/config.zig b/src/config/config.zig index b1bb72b..5ae403b 100644 --- a/src/config/config.zig +++ b/src/config/config.zig @@ -7,12 +7,13 @@ pub const Config = struct { // L1 Connection l1_rpc_url: []const u8 = "http://localhost:8545", - l1_chain_id: u64 = 1, + l1_chain_id: u64 = 61971, // L2 Connection - l2_rpc_url: []const u8 = "http://localhost:8545", - l2_engine_api_port: u16 = 8551, - l2_chain_id: u64 = 1337, + l2_rpc_url: []const u8 = "http://localhost:18545", + l2_engine_api_port: u16 = 18551, + l2_chain_id: u64 = 61972, + l2_jwt_secret: ?[32]u8 = null, // JWT secret for Engine API authentication (32 bytes) // Sequencer sequencer_private_key: ?[32]u8 = null, @@ -51,15 +52,46 @@ pub const Config = struct { config.l1_rpc_url = url; } else |_| {} + if (std.process.getEnvVarOwned(allocator, "L1_CHAIN_ID")) |chain_id_str| { + defer allocator.free(chain_id_str); + config.l1_chain_id = try std.fmt.parseInt(u64, chain_id_str, 10); + } else |_| {} + if (std.process.getEnvVarOwned(allocator, "L2_RPC_URL")) |url| { config.l2_rpc_url = url; } else |_| {} + if (std.process.getEnvVarOwned(allocator, "L2_CHAIN_ID")) |chain_id_str| { + defer allocator.free(chain_id_str); + config.l2_chain_id = try std.fmt.parseInt(u64, chain_id_str, 10); + } else |_| {} + if (std.process.getEnvVarOwned(allocator, "L2_ENGINE_API_PORT")) |port_str| { config.l2_engine_api_port = try std.fmt.parseInt(u16, port_str, 10); allocator.free(port_str); } else |_| {} + if (std.process.getEnvVarOwned(allocator, "L2_JWT_SECRET")) |secret_hex| { + defer allocator.free(secret_hex); + // Parse hex secret (remove 0x prefix if present) + const hex_start: usize = if (std.mem.startsWith(u8, secret_hex, "0x")) 2 else 0; + const hex_data = secret_hex[hex_start..]; + + if (hex_data.len != 64) { + return error.InvalidJWTSecret; + } + + var secret_bytes: [32]u8 = undefined; + var i: usize = 0; + while (i < 32) : (i += 1) { + const high = try std.fmt.parseInt(u8, hex_data[i * 2 .. i * 2 + 1], 16); + const low = try std.fmt.parseInt(u8, hex_data[i * 2 + 1 .. i * 2 + 2], 16); + secret_bytes[i] = (high << 4) | low; + } + + config.l2_jwt_secret = secret_bytes; + } else |_| {} + if (std.process.getEnvVarOwned(allocator, "SEQUENCER_KEY")) |key_hex| { defer allocator.free(key_hex); // Parse hex key (remove 0x prefix if present) diff --git a/src/crypto/jwt.zig b/src/crypto/jwt.zig new file mode 100644 index 0000000..741c231 --- /dev/null +++ b/src/crypto/jwt.zig @@ -0,0 +1,90 @@ +// JWT token generation for Engine API authentication +// Implements HS256 JWT signing with minimal claims (iat only) + +const std = @import("std"); +const crypto = std.crypto; + +const base64_url_chars = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_"; + +/// Base64 URL encoding (without padding) +fn base64UrlEncode(allocator: std.mem.Allocator, data: []const u8) ![]const u8 { + var result = std.ArrayList(u8).init(allocator); + defer result.deinit(); + + var i: usize = 0; + while (i < data.len) : (i += 3) { + var buf: [3]u8 = undefined; + var buf_len: usize = 0; + + // Read up to 3 bytes + while (buf_len < 3 and i + buf_len < data.len) { + buf[buf_len] = data[i + buf_len]; + buf_len += 1; + } + + // Encode to base64 + if (buf_len == 3) { + const b1 = buf[0] >> 2; + const b2 = ((buf[0] & 0x03) << 4) | (buf[1] >> 4); + const b3 = ((buf[1] & 0x0f) << 2) | (buf[2] >> 6); + const b4 = buf[2] & 0x3f; + + try result.append(base64_url_chars[b1]); + try result.append(base64_url_chars[b2]); + try result.append(base64_url_chars[b3]); + try result.append(base64_url_chars[b4]); + } else if (buf_len == 2) { + const b1 = buf[0] >> 2; + const b2 = ((buf[0] & 0x03) << 4) | (buf[1] >> 4); + const b3 = (buf[1] & 0x0f) << 2; + + try result.append(base64_url_chars[b1]); + try result.append(base64_url_chars[b2]); + try result.append(base64_url_chars[b3]); + } else if (buf_len == 1) { + const b1 = buf[0] >> 2; + const b2 = (buf[0] & 0x03) << 4; + + try result.append(base64_url_chars[b1]); + try result.append(base64_url_chars[b2]); + } + } + + return result.toOwnedSlice(); +} + +/// Generate JWT token for Engine API authentication +/// Uses HS256 signing with minimal claims (iat only) +/// Returns a JWT token string (format: header.payload.signature) +pub fn generateEngineAPIToken(allocator: std.mem.Allocator, secret: [32]u8) ![]const u8 { + // JWT Header: {"alg":"HS256","typ":"JWT"} + const header_json = "{\"alg\":\"HS256\",\"typ\":\"JWT\"}"; + const header_b64 = try base64UrlEncode(allocator, header_json); + defer allocator.free(header_b64); + + // JWT Payload: {"iat":} + const current_time = @as(i64, @intCast(std.time.timestamp())); + const payload_json = try std.fmt.allocPrint(allocator, "{{\"iat\":{d}}}", .{current_time}); + defer allocator.free(payload_json); + const payload_b64 = try base64UrlEncode(allocator, payload_json); + defer allocator.free(payload_b64); + + // Combine header and payload + const unsigned_token = try std.fmt.allocPrint(allocator, "{s}.{s}", .{ header_b64, payload_b64 }); + defer allocator.free(unsigned_token); + + // Sign with HMAC-SHA256 + var hmac_result: [32]u8 = undefined; + var hmac = crypto.auth.hmac.sha2.HmacSha256.init(&secret); + hmac.update(unsigned_token); + hmac.final(&hmac_result); + + // Encode signature + const signature_b64 = try base64UrlEncode(allocator, &hmac_result); + defer allocator.free(signature_b64); + + // Combine all parts + const token = try std.fmt.allocPrint(allocator, "{s}.{s}", .{ unsigned_token, signature_b64 }); + + return token; +} diff --git a/src/crypto/root.zig b/src/crypto/root.zig index e6eb773..0a1af8b 100644 --- a/src/crypto/root.zig +++ b/src/crypto/root.zig @@ -3,3 +3,4 @@ pub const signature = @import("signature.zig"); pub const keccak = @import("keccak.zig"); pub const secp256k1 = @import("secp256k1_wrapper.zig"); pub const utils = @import("utils.zig"); +pub const jwt = @import("jwt.zig"); diff --git a/src/l1/client.zig b/src/l1/client.zig index e071a87..032475b 100644 --- a/src/l1/client.zig +++ b/src/l1/client.zig @@ -305,21 +305,35 @@ pub const Client = struct { var params = std.json.Array.init(self.allocator); defer params.deinit(); - const result = try self.callRpc("eth_blockNumber", std.json.Value{ .array = params }); + const result = self.callRpc("eth_blockNumber", std.json.Value{ .array = params }) catch |err| { + // Connection errors should be handled by caller + return err; + }; defer self.allocator.free(result); + // Check if response is empty + if (result.len == 0) { + return error.EmptyResponse; + } + // Parse result - should be hex string - const parsed = try std.json.parseFromSliceLeaky( + const parsed = std.json.parseFromSliceLeaky( struct { result: []const u8 }, self.allocator, result, .{}, - ); + ) catch |err| { + std.log.err("[L1Client] Failed to parse eth_blockNumber response: {any}, response: {s}", .{ err, result }); + return error.UnexpectedToken; + }; // Convert hex string to u64 const hex_str = parsed.result; const hex_start: usize = if (std.mem.startsWith(u8, hex_str, "0x")) 2 else 0; - return try std.fmt.parseInt(u64, hex_str[hex_start..], 16); + return std.fmt.parseInt(u64, hex_str[hex_start..], 16) catch |err| { + std.log.err("[L1Client] Failed to parse block number hex '{s}': {any}", .{ hex_str, err }); + return err; + }; } pub const L1BlockTx = struct { @@ -450,8 +464,13 @@ pub const Client = struct { const port = url_parts.port; // Connect to L1 RPC - const address = try std.net.Address.parseIp(host, port); - const stream = try std.net.tcpConnectToAddress(address); + // Resolve hostname to IP address (handle "localhost" -> "127.0.0.1") + const ip_address = if (std.mem.eql(u8, host, "localhost")) "127.0.0.1" else host; + const address = try std.net.Address.parseIp(ip_address, port); + const stream = std.net.tcpConnectToAddress(address) catch |err| { + std.log.debug("[L1Client] Failed to connect to {s}:{d} for {s}: {any}", .{ host, port, method, err }); + return err; + }; defer stream.close(); // Build JSON-RPC request @@ -465,18 +484,27 @@ pub const Client = struct { const request_body = try request_json.toOwnedSlice(); defer self.allocator.free(request_body); - // Build HTTP request + // Build HTTP request - use same pattern as engine_api_client for consistency var http_request = std.ArrayList(u8).init(self.allocator); defer http_request.deinit(); - try http_request.writer().print( - \\POST / HTTP/1.1\r - \\Host: {s}:{d}\r - \\Content-Type: application/json\r - \\Content-Length: {d}\r - \\\r - \\{s} - , .{ host, port, request_body.len, request_body }); + // Request line + try http_request.writer().print("POST / HTTP/1.1\r\n", .{}); + + // Headers - write each line separately with explicit \r\n + try http_request.writer().print("Host: {s}:{d}\r\n", .{ host, port }); + try http_request.writer().print("Content-Type: application/json\r\n", .{}); + try http_request.writer().print("Content-Length: {d}\r\n", .{request_body.len}); + try http_request.writer().print("Connection: close\r\n", .{}); + + // End of headers + try http_request.writer().print("\r\n", .{}); + + // Append request body directly + try http_request.writer().writeAll(request_body); + + // Debug: log request for troubleshooting + std.log.debug("[L1Client] Sending HTTP request to {s}:{d} for {s}, Content-Length: {d}", .{ host, port, method, request_body.len }); const http_request_bytes = try http_request.toOwnedSlice(); defer self.allocator.free(http_request_bytes); @@ -486,13 +514,70 @@ pub const Client = struct { // Read response var response_buffer: [8192]u8 = undefined; - const bytes_read = try stream.read(&response_buffer); + const bytes_read = stream.read(&response_buffer) catch |err| { + std.log.debug("[L1Client] Failed to read response from {s}:{d} for {s}: {any}", .{ host, port, method, err }); + return err; + }; + + if (bytes_read == 0) { + return error.EmptyResponse; + } + const response = response_buffer[0..bytes_read]; // Parse HTTP response - const body_start = std.mem.indexOf(u8, response, "\r\n\r\n") orelse return error.InvalidResponse; + const body_start = std.mem.indexOf(u8, response, "\r\n\r\n") orelse { + std.log.err("[L1Client] Invalid HTTP response format from {s}:{d} for {s}, response: {s}", .{ host, port, method, response }); + return error.InvalidResponse; + }; + + // Check HTTP status code + const status_line_end = std.mem.indexOf(u8, response, "\r\n") orelse { + std.log.err("[L1Client] Invalid HTTP response format from {s}:{d} for {s}", .{ host, port, method }); + return error.InvalidResponse; + }; + const status_line = response[0..status_line_end]; + if (!std.mem.startsWith(u8, status_line, "HTTP/1.1 200") and !std.mem.startsWith(u8, status_line, "HTTP/1.0 200")) { + std.log.err("[L1Client] HTTP error response from {s}:{d} for {s}: {s}", .{ host, port, method, status_line }); + + // Log request details for debugging + const request_preview = if (http_request_bytes.len > 200) http_request_bytes[0..200] else http_request_bytes; + std.log.debug("[L1Client] HTTP request (first {d} bytes): {s}", .{ request_preview.len, request_preview }); + + // Try to extract error message from body + const json_body = response[body_start + 4 ..]; + if (json_body.len > 0) { + std.log.debug("[L1Client] Error response body: {s}", .{json_body}); + // Try to parse as JSON-RPC error response + const parsed_error = std.json.parseFromSliceLeaky( + struct { + @"error": ?struct { + code: i32, + message: []const u8, + }, + }, + self.allocator, + json_body, + .{ .ignore_unknown_fields = true }, + ) catch null; + if (parsed_error) |err| { + if (err.@"error") |e| { + std.log.err("[L1Client] JSON-RPC error: code={d}, message={s}", .{ e.code, e.message }); + } + } + } + // Log full response for debugging (truncate if too long) + const response_preview = if (response.len > 500) response[0..500] else response; + std.log.debug("[L1Client] Full HTTP response (first {d} bytes): {s}", .{ response_preview.len, response_preview }); + return error.HttpError; + } + const json_body = response[body_start + 4 ..]; + if (json_body.len == 0) { + return error.EmptyResponse; + } + // Return JSON body (caller will free) return try self.allocator.dupe(u8, json_body); } diff --git a/src/l2/engine_api_client.zig b/src/l2/engine_api_client.zig index f202913..9387b47 100644 --- a/src/l2/engine_api_client.zig +++ b/src/l2/engine_api_client.zig @@ -1,23 +1,26 @@ // Engine API client for L2 geth communication -// Implements Engine API endpoints: engine_newPayload, engine_getPayload, engine_forkchoiceUpdated +// Implements Engine API endpoints: engine_newPayload, engine_getPayload, engine_forkchoiceUpdatedV3 const std = @import("std"); const core = @import("../core/root.zig"); const types = @import("../core/types.zig"); const block_module = @import("../core/block.zig"); +const crypto = @import("../crypto/root.zig"); pub const EngineApiClient = struct { allocator: std.mem.Allocator, l2_rpc_url: []const u8, l2_engine_api_port: u16, + jwt_secret: ?[32]u8 = null, // JWT secret for Engine API authentication const Self = @This(); - pub fn init(allocator: std.mem.Allocator, l2_rpc_url: []const u8, l2_engine_api_port: u16) Self { + pub fn init(allocator: std.mem.Allocator, l2_rpc_url: []const u8, l2_engine_api_port: u16, jwt_secret: ?[32]u8) Self { return .{ .allocator = allocator, .l2_rpc_url = l2_rpc_url, .l2_engine_api_port = l2_engine_api_port, + .jwt_secret = jwt_secret, }; } @@ -132,6 +135,20 @@ pub const EngineApiClient = struct { return status; } + /// Withdrawal represents a validator withdrawal (EIP-4895) + pub const Withdrawal = struct { + index: u64, + validator_index: u64, + address: types.Address, + amount: u64, // Amount in Gwei + + pub fn deinit(self: *Withdrawal, allocator: std.mem.Allocator) void { + _ = self; + _ = allocator; + // No dynamic memory to free + } + }; + /// Execution payload returned from engine_getPayload pub const ExecutionPayload = struct { block_hash: types.Hash, @@ -146,12 +163,22 @@ pub const EngineApiClient = struct { gas_limit: u64, gas_used: u64, transactions: [][]const u8, // RLP-encoded transactions + extra_data: []const u8, // Block extra data + base_fee_per_gas: ?u256, // Base fee per gas (EIP-1559), null if not present + withdrawals: []Withdrawal, // Withdrawals array (Shanghai/Cancun), empty if not present + blob_gas_used: ?u64, // Blob gas used (Cancun), null if not present + excess_blob_gas: ?u64, // Excess blob gas (Cancun), null if not present pub fn deinit(self: *ExecutionPayload, allocator: std.mem.Allocator) void { for (self.transactions) |tx| { allocator.free(tx); } allocator.free(self.transactions); + allocator.free(self.extra_data); + for (self.withdrawals) |*w| { + w.deinit(allocator); + } + allocator.free(self.withdrawals); } }; @@ -171,10 +198,13 @@ pub const EngineApiClient = struct { const result = try self.callRpc("engine_getPayload", std.json.Value{ .array = params }); defer self.allocator.free(result); - // Parse full execution payload response - const parsed = try std.json.parseFromSliceLeaky( + // Debug: log raw response for troubleshooting + std.log.debug("[EngineAPI] Raw engine_getPayload response: {s}", .{result}); + + // Parse full execution payload response - parse all fields including extra ones + const parsed = std.json.parseFromSliceLeaky( struct { - result: struct { + result: ?struct { blockHash: []const u8, blockNumber: []const u8, parentHash: []const u8, @@ -187,36 +217,65 @@ pub const EngineApiClient = struct { gasLimit: []const u8, gasUsed: []const u8, transactions: [][]const u8, + extraData: ?[]const u8, // Optional - may not be present in older versions + baseFeePerGas: ?[]const u8, // Optional - EIP-1559, null for pre-London blocks + withdrawals: ?[]struct { + index: []const u8, + validatorIndex: []const u8, + address: []const u8, + amount: []const u8, + }, // Optional - Shanghai/Cancun upgrade + blobGasUsed: ?[]const u8, // Optional - Cancun upgrade + excessBlobGas: ?[]const u8, // Optional - Cancun upgrade + }, + @"error": ?struct { + code: i32, + message: []const u8, }, }, self.allocator, result, - .{}, - ); + .{ .ignore_unknown_fields = true }, + ) catch |err| { + std.log.err("[EngineAPI] Failed to parse engine_getPayload response: {any}, response: {s}", .{ err, result }); + return err; + }; + + // Check for error response + if (parsed.@"error") |err| { + std.log.err("[EngineAPI] engine_getPayload error response: code={d}, message={s}", .{ err.code, err.message }); + return error.EngineApiError; + } - const block_hash = try self.hexToHash(parsed.result.blockHash); - const parent_hash = try self.hexToHash(parsed.result.parentHash); - const state_root = try self.hexToHash(parsed.result.stateRoot); - const receipts_root = try self.hexToHash(parsed.result.receiptsRoot); - const prev_randao = try self.hexToHash(parsed.result.prevRandao); - const fee_recipient = try self.hexToAddress(parsed.result.feeRecipient); + // Check if result is present + const result_data = parsed.result orelse { + std.log.err("[EngineAPI] engine_getPayload response missing 'result' field, response: {s}", .{result}); + return error.MissingField; + }; + + const block_hash = try self.hexToHash(result_data.blockHash); + const parent_hash = try self.hexToHash(result_data.parentHash); + const state_root = try self.hexToHash(result_data.stateRoot); + const receipts_root = try self.hexToHash(result_data.receiptsRoot); + const prev_randao = try self.hexToHash(result_data.prevRandao); + const fee_recipient = try self.hexToAddress(result_data.feeRecipient); - const hex_start: usize = if (std.mem.startsWith(u8, parsed.result.blockNumber, "0x")) 2 else 0; - const block_number = try std.fmt.parseInt(u64, parsed.result.blockNumber[hex_start..], 16); + const hex_start: usize = if (std.mem.startsWith(u8, result_data.blockNumber, "0x")) 2 else 0; + const block_number = try std.fmt.parseInt(u64, result_data.blockNumber[hex_start..], 16); - const timestamp_start: usize = if (std.mem.startsWith(u8, parsed.result.timestamp, "0x")) 2 else 0; - const timestamp = try std.fmt.parseInt(u64, parsed.result.timestamp[timestamp_start..], 16); + const timestamp_start: usize = if (std.mem.startsWith(u8, result_data.timestamp, "0x")) 2 else 0; + const timestamp = try std.fmt.parseInt(u64, result_data.timestamp[timestamp_start..], 16); - const gas_limit_start: usize = if (std.mem.startsWith(u8, parsed.result.gasLimit, "0x")) 2 else 0; - const gas_limit = try std.fmt.parseInt(u64, parsed.result.gasLimit[gas_limit_start..], 16); + const gas_limit_start: usize = if (std.mem.startsWith(u8, result_data.gasLimit, "0x")) 2 else 0; + const gas_limit = try std.fmt.parseInt(u64, result_data.gasLimit[gas_limit_start..], 16); - const gas_used_start: usize = if (std.mem.startsWith(u8, parsed.result.gasUsed, "0x")) 2 else 0; - const gas_used = try std.fmt.parseInt(u64, parsed.result.gasUsed[gas_used_start..], 16); + const gas_used_start: usize = if (std.mem.startsWith(u8, result_data.gasUsed, "0x")) 2 else 0; + const gas_used = try std.fmt.parseInt(u64, result_data.gasUsed[gas_used_start..], 16); // Parse logs bloom var logs_bloom: [256]u8 = undefined; - const bloom_hex_start: usize = if (std.mem.startsWith(u8, parsed.result.logsBloom, "0x")) 2 else 0; - const bloom_hex = parsed.result.logsBloom[bloom_hex_start..]; + const bloom_hex_start: usize = if (std.mem.startsWith(u8, result_data.logsBloom, "0x")) 2 else 0; + const bloom_hex = result_data.logsBloom[bloom_hex_start..]; if (bloom_hex.len != 512) { return error.InvalidLogsBloomLength; } @@ -228,18 +287,71 @@ pub const EngineApiClient = struct { } // Clone transactions - const transactions = try self.allocator.alloc([]const u8, parsed.result.transactions.len); - for (parsed.result.transactions, 0..) |tx_hex, idx| { + const transactions = try self.allocator.alloc([]const u8, result_data.transactions.len); + for (result_data.transactions, 0..) |tx_hex, idx| { transactions[idx] = try self.allocator.dupe(u8, tx_hex); } + // Parse extraData (optional) + const extra_data = if (result_data.extraData) |ed| try self.hexToBytes(ed) else try self.allocator.alloc(u8, 0); + + // Parse baseFeePerGas (optional, EIP-1559) + const base_fee_per_gas: ?u256 = if (result_data.baseFeePerGas) |bfpg| blk: { + const bfpg_bytes = try self.hexToBytes(bfpg); + defer self.allocator.free(bfpg_bytes); + // Convert bytes to u256 (big-endian) + var value: u256 = 0; + for (bfpg_bytes) |byte| { + value = (value << 8) | byte; + } + break :blk value; + } else null; + + // Parse withdrawals (optional, Shanghai/Cancun) + const withdrawals = if (result_data.withdrawals) |wds| blk: { + const withdrawals_array = try self.allocator.alloc(Withdrawal, wds.len); + for (wds, 0..) |wd, idx| { + const index_hex_start: usize = if (std.mem.startsWith(u8, wd.index, "0x")) 2 else 0; + const index = try std.fmt.parseInt(u64, wd.index[index_hex_start..], 16); + + const validator_index_hex_start: usize = if (std.mem.startsWith(u8, wd.validatorIndex, "0x")) 2 else 0; + const validator_index = try std.fmt.parseInt(u64, wd.validatorIndex[validator_index_hex_start..], 16); + + const address = try self.hexToAddress(wd.address); + + const amount_hex_start: usize = if (std.mem.startsWith(u8, wd.amount, "0x")) 2 else 0; + const amount = try std.fmt.parseInt(u64, wd.amount[amount_hex_start..], 16); + + withdrawals_array[idx] = Withdrawal{ + .index = index, + .validator_index = validator_index, + .address = address, + .amount = amount, + }; + } + break :blk withdrawals_array; + } else try self.allocator.alloc(Withdrawal, 0); + + // Parse blobGasUsed (optional, Cancun) + const blob_gas_used: ?u64 = if (result_data.blobGasUsed) |bgu| blk: { + const bgu_hex_start: usize = if (std.mem.startsWith(u8, bgu, "0x")) 2 else 0; + break :blk try std.fmt.parseInt(u64, bgu[bgu_hex_start..], 16); + } else null; + + // Parse excessBlobGas (optional, Cancun) + const excess_blob_gas: ?u64 = if (result_data.excessBlobGas) |ebg| blk: { + const ebg_hex_start: usize = if (std.mem.startsWith(u8, ebg, "0x")) 2 else 0; + break :blk try std.fmt.parseInt(u64, ebg[ebg_hex_start..], 16); + } else null; + const block_hash_hex = try self.hashToHex(block_hash); defer self.allocator.free(block_hash_hex); - std.log.info("[EngineAPI] engine_getPayload response: block_hash={s}, block_number={d}, {d} txs", .{ + std.log.info("[EngineAPI] engine_getPayload response: block_hash={s}, block_number={d}, {d} txs, {d} withdrawals", .{ block_hash_hex, block_number, transactions.len, + withdrawals.len, }); return ExecutionPayload{ @@ -255,11 +367,17 @@ pub const EngineApiClient = struct { .gas_limit = gas_limit, .gas_used = gas_used, .transactions = transactions, + .extra_data = extra_data, + .base_fee_per_gas = base_fee_per_gas, + .withdrawals = withdrawals, + .blob_gas_used = blob_gas_used, + .excess_blob_gas = excess_blob_gas, }; } - /// Update fork choice state via engine_forkchoiceUpdated (with optional payload attributes) + /// Update fork choice state via engine_forkchoiceUpdatedV3 (with optional payload attributes) /// If payload_attrs is provided, requests L2 geth to build a payload + /// Note: payload_attrs ownership is NOT transferred - caller must manage its memory pub fn forkchoiceUpdated(self: *Self, head_block_hash: types.Hash, safe_block_hash: types.Hash, finalized_block_hash: types.Hash, payload_attrs: ?std.json.ObjectMap) !ForkChoiceUpdateResponse { const head_hex = try self.hashToHex(head_block_hash); defer self.allocator.free(head_hex); @@ -268,70 +386,179 @@ pub const EngineApiClient = struct { const finalized_hex = try self.hashToHex(finalized_block_hash); defer self.allocator.free(finalized_hex); - std.log.info("[EngineAPI] Calling engine_forkchoiceUpdated: head={s}, safe={s}, finalized={s}", .{ + std.log.info("[EngineAPI] Calling engine_forkchoiceUpdatedV3: head={s}, safe={s}, finalized={s}", .{ head_hex, safe_hex, finalized_hex, }); - var params = std.json.Array.init(self.allocator); - defer params.deinit(); + // Serialize params to JSON string before building HTTP request + // This ensures all string references are valid during serialization + var params_json_str = std.ArrayList(u8).init(self.allocator); + defer params_json_str.deinit(); - // Fork choice state - var fork_choice_obj = std.json.ObjectMap.init(self.allocator); - defer fork_choice_obj.deinit(); - try fork_choice_obj.put("headBlockHash", std.json.Value{ .string = head_hex }); - try fork_choice_obj.put("safeBlockHash", std.json.Value{ .string = safe_hex }); - try fork_choice_obj.put("finalizedBlockHash", std.json.Value{ .string = finalized_hex }); + try params_json_str.append('['); - // Payload attributes (optional - if provided, L2 geth will build payload) + // Fork choice state object + try params_json_str.append('{'); + try params_json_str.writer().print("\"headBlockHash\":\"{s}\"", .{head_hex}); + try params_json_str.append(','); + try params_json_str.writer().print("\"safeBlockHash\":\"{s}\"", .{safe_hex}); + try params_json_str.append(','); + try params_json_str.writer().print("\"finalizedBlockHash\":\"{s}\"", .{finalized_hex}); + try params_json_str.append('}'); + + // Payload attributes (optional) if (payload_attrs) |attrs| { - try params.append(std.json.Value{ .object = fork_choice_obj }); - try params.append(std.json.Value{ .object = attrs }); + try params_json_str.append(','); + const attrs_str = try self.jsonValueToString(std.json.Value{ .object = attrs }); + defer self.allocator.free(attrs_str); + try params_json_str.writer().print("{s}", .{attrs_str}); } else { - // No payload attributes - just update fork choice - try params.append(std.json.Value{ .object = fork_choice_obj }); - // Add empty payload attributes - var empty_attrs = std.json.ObjectMap.init(self.allocator); - defer empty_attrs.deinit(); - try params.append(std.json.Value{ .object = empty_attrs }); + try params_json_str.append(','); + try params_json_str.appendSlice("{}"); } - const result = try self.callRpc("engine_forkchoiceUpdated", std.json.Value{ .array = params }); + try params_json_str.append(']'); + const params_json = try params_json_str.toOwnedSlice(); + defer self.allocator.free(params_json); + + // Build JSON-RPC request directly with serialized params + var request_json = std.ArrayList(u8).init(self.allocator); + defer request_json.deinit(); + + try request_json.writer().print( + \\{{"jsonrpc":"2.0","method":"engine_forkchoiceUpdatedV3","params":{s},"id":1}} + , .{params_json}); + + const request_body = try request_json.toOwnedSlice(); + defer self.allocator.free(request_body); + + // Now make the HTTP call + const result = try self.callRpcWithBody("engine_forkchoiceUpdatedV3", request_body); defer self.allocator.free(result); - // Parse response - const parsed = try std.json.parseFromSliceLeaky( - struct { - result: struct { - payloadStatus: struct { - status: []const u8, - latestValidHash: ?[]const u8, - validationError: ?[]const u8, - }, - payloadId: ?[]const u8, - }, + // Debug: log raw response for troubleshooting + std.log.debug("[EngineAPI] Raw engine_forkchoiceUpdatedV3 response: {s}", .{result}); + + // Parse response - handle both success and error responses + // Use a two-step approach: first parse as generic JSON, then extract fields + var parsed_generic = try std.json.parseFromSlice(std.json.Value, self.allocator, result, .{}); + defer parsed_generic.deinit(); + + // Check for error response first + if (parsed_generic.value.object.get("error")) |err_val| { + const err_obj = err_val.object; + const err_code = if (err_obj.get("code")) |c| switch (c) { + .integer => |i| @as(i32, @intCast(i)), + else => return error.InvalidErrorCode, + } else return error.MissingErrorCode; + + const err_msg = if (err_obj.get("message")) |m| switch (m) { + .string => |s| s, + else => return error.InvalidErrorMessage, + } else return error.MissingErrorMessage; + + std.log.err("[EngineAPI] engine_forkchoiceUpdatedV3 error response: code={d}, message={s}", .{ err_code, err_msg }); + + // Log error data if present + if (err_obj.get("data")) |data_val| { + if (data_val == .object) { + if (data_val.object.get("err")) |err_data| { + if (err_data == .string) { + std.log.err("[EngineAPI] Error details: {s}", .{err_data.string}); + } + } + } + } + + // Provide helpful error messages for common issues + if (err_code == -32601) { + std.log.err("[EngineAPI] Method not found - ensure L2 geth has Engine API enabled (--authrpc.addr, --authrpc.port, --authrpc.jwtsecret)", .{}); + } else if (err_code == -38003) { + std.log.err("[EngineAPI] Invalid payload attributes - check that all required fields are present (timestamp, prevRandao, suggestedFeeRecipient, parentBeaconBlockRoot)", .{}); + } + + return error.EngineApiError; + } + + // Parse success response + const result_val = parsed_generic.value.object.get("result") orelse { + std.log.err("[EngineAPI] engine_forkchoiceUpdatedV3 response missing both 'result' and 'error' fields, response: {s}", .{result}); + return error.MissingField; + }; + + const result_obj = switch (result_val) { + .object => |o| o, + else => { + std.log.err("[EngineAPI] engine_forkchoiceUpdatedV3 result is not an object, response: {s}", .{result}); + return error.InvalidResponse; }, - self.allocator, - result, - .{}, - ); + }; + + // Extract payloadStatus + const payload_status_val = result_obj.get("payloadStatus") orelse { + std.log.err("[EngineAPI] engine_forkchoiceUpdatedV3 result missing 'payloadStatus' field, response: {s}", .{result}); + return error.MissingField; + }; + + const payload_status_obj = switch (payload_status_val) { + .object => |o| o, + else => { + std.log.err("[EngineAPI] engine_forkchoiceUpdatedV3 payloadStatus is not an object, response: {s}", .{result}); + return error.InvalidResponse; + }, + }; + const status_str = if (payload_status_obj.get("status")) |s| switch (s) { + .string => |str| str, + else => return error.InvalidStatus, + } else return error.MissingField; + + const latest_valid_hash = if (payload_status_obj.get("latestValidHash")) |h| switch (h) { + .string => |str| str, + .null => null, + else => null, + } else null; + + const validation_error = if (payload_status_obj.get("validationError")) |e| switch (e) { + .string => |str| str, + .null => null, + else => null, + } else null; + + const payload_id = if (result_obj.get("payloadId")) |pid| switch (pid) { + .string => |str| str, + .null => null, + else => null, + } else null; + + // Create response directly const response = ForkChoiceUpdateResponse{ .payload_status = PayloadStatus{ - .status = parsed.result.payloadStatus.status, - .latest_valid_hash = parsed.result.payloadStatus.latestValidHash, - .validation_error = parsed.result.payloadStatus.validationError, + .status = status_str, + .latest_valid_hash = latest_valid_hash, + .validation_error = validation_error, }, - .payload_id = parsed.result.payloadId, + .payload_id = payload_id, }; - std.log.info("[EngineAPI] engine_forkchoiceUpdated response: status={s}", .{ + std.log.info("[EngineAPI] engine_forkchoiceUpdatedV3 response: status={s}", .{ response.payload_status.status, }); + // Log warning if status is INVALID + if (std.mem.eql(u8, response.payload_status.status, "INVALID")) { + std.log.warn("[EngineAPI] Fork choice update returned INVALID status - fork choice state may be invalid", .{}); + if (response.payload_status.validation_error) |err| { + std.log.err("[EngineAPI] Validation error: {s}", .{err}); + } + } + if (response.payload_id) |pid| { std.log.info("[EngineAPI] Payload ID: {s}", .{pid}); + } else { + std.log.warn("[EngineAPI] No payload ID returned - payload attributes may have been invalid or missing", .{}); } if (response.payload_status.latest_valid_hash) |hash| { @@ -391,50 +618,62 @@ pub const EngineApiClient = struct { }; } - /// Call JSON-RPC endpoint - fn callRpc(self: *Self, method: []const u8, params: std.json.Value) ![]u8 { + /// Call JSON-RPC endpoint with pre-serialized request body + fn callRpcWithBody(self: *Self, method: []const u8, request_body: []const u8) ![]u8 { const start_time = std.time.nanoTimestamp(); // Parse URL const url_parts = try self.parseUrl(self.l2_rpc_url); const host = url_parts.host; - const port = if (std.mem.eql(u8, method[0..7], "engine_")) self.l2_engine_api_port else url_parts.port; + const port = if (std.mem.startsWith(u8, method, "engine_")) self.l2_engine_api_port else url_parts.port; std.log.debug("[EngineAPI] Connecting to {s}:{d} for method {s}", .{ host, port, method }); // Connect to L2 RPC - const address = try std.net.Address.parseIp(host, port); + // Resolve hostname to IP address (handle "localhost" -> "127.0.0.1") + const ip_address = if (std.mem.eql(u8, host, "localhost")) "127.0.0.1" else host; + const address = try std.net.Address.parseIp(ip_address, port); const stream = std.net.tcpConnectToAddress(address) catch |err| { - std.log.err("[EngineAPI] Failed to connect to {s}:{d} for {s}: {any}", .{ host, port, method, err }); + if (err == error.ConnectionRefused) { + std.log.debug("[EngineAPI] Connection refused to {s}:{d} for {s} - L2 geth may not be running", .{ host, port, method }); + } else { + std.log.err("[EngineAPI] Failed to connect to {s}:{d} for {s}: {any}", .{ host, port, method, err }); + } return err; }; defer stream.close(); std.log.debug("[EngineAPI] Connected to {s}:{d}", .{ host, port }); - // Build JSON-RPC request - var request_json = std.ArrayList(u8).init(self.allocator); - defer request_json.deinit(); - - try request_json.writer().print( - \\{{"jsonrpc":"2.0","method":"{s}","params":{s},"id":1}} - , .{ method, try self.jsonValueToString(params) }); + // Generate JWT token if JWT secret is configured (required for Engine API) + var jwt_token: []const u8 = ""; + defer if (jwt_token.len > 0) self.allocator.free(jwt_token); - const request_body = try request_json.toOwnedSlice(); - defer self.allocator.free(request_body); + if (self.jwt_secret) |secret| { + jwt_token = try crypto.jwt.generateEngineAPIToken(self.allocator, secret); + } - // Build HTTP request + // Build HTTP request - append directly to avoid format string issues var http_request = std.ArrayList(u8).init(self.allocator); defer http_request.deinit(); - try http_request.writer().print( - \\POST / HTTP/1.1\r - \\Host: {s}:{d}\r - \\Content-Type: application/json\r - \\Content-Length: {d}\r - \\\r - \\{s} - , .{ host, port, request_body.len, request_body }); + // Write HTTP headers + try http_request.writer().print("POST / HTTP/1.1\r\n", .{}); + try http_request.writer().print("Host: {s}:{d}\r\n", .{ host, port }); + try http_request.writer().print("Content-Type: application/json\r\n", .{}); + try http_request.writer().print("Content-Length: {d}\r\n", .{request_body.len}); + try http_request.writer().print("Connection: close\r\n", .{}); + + // Add Authorization header if JWT is configured + if (jwt_token.len > 0) { + try http_request.writer().print("Authorization: Bearer {s}\r\n", .{jwt_token}); + } + + // End of headers + try http_request.writer().print("\r\n", .{}); + + // Append request body directly (not via format string to avoid segfault) + try http_request.writer().writeAll(request_body); const http_request_bytes = try http_request.toOwnedSlice(); defer self.allocator.free(http_request_bytes); @@ -456,11 +695,28 @@ pub const EngineApiClient = struct { // Parse HTTP response const body_start = std.mem.indexOf(u8, response, "\r\n\r\n") orelse { + std.log.err("[EngineAPI] Invalid HTTP response format for {s}, response: {s}", .{ method, response }); + return error.InvalidResponse; + }; + + // Check HTTP status code + const status_line_end = std.mem.indexOf(u8, response, "\r\n") orelse { std.log.err("[EngineAPI] Invalid HTTP response format for {s}", .{method}); return error.InvalidResponse; }; + const status_line = response[0..status_line_end]; + if (!std.mem.startsWith(u8, status_line, "HTTP/1.1 200")) { + std.log.err("[EngineAPI] HTTP error response for {s}: {s}", .{ method, status_line }); + return error.HttpError; + } + const json_body = response[body_start + 4 ..]; + if (json_body.len == 0) { + std.log.err("[EngineAPI] Empty JSON body in response for {s}", .{method}); + return error.EmptyResponse; + } + const elapsed_ms = (@as(f64, @floatFromInt(std.time.nanoTimestamp() - start_time)) / 1_000_000.0); std.log.debug("[EngineAPI] {s} completed in {d:.2}ms, response size: {d} bytes", .{ method, @@ -472,6 +728,26 @@ pub const EngineApiClient = struct { return try self.allocator.dupe(u8, json_body); } + /// Call JSON-RPC endpoint (legacy method - kept for compatibility) + fn callRpc(self: *Self, method: []const u8, params: std.json.Value) ![]u8 { + // Serialize params first + const params_str = try self.jsonValueToString(params); + defer self.allocator.free(params_str); + + // Build request body + var request_json = std.ArrayList(u8).init(self.allocator); + defer request_json.deinit(); + + try request_json.writer().print( + \\{{"jsonrpc":"2.0","method":"{s}","params":{s},"id":1}} + , .{ method, params_str }); + + const request_body = try request_json.toOwnedSlice(); + defer self.allocator.free(request_body); + + return self.callRpcWithBody(method, request_body); + } + const UrlParts = struct { host: []const u8, port: u16, @@ -532,7 +808,22 @@ pub const EngineApiClient = struct { return result.toOwnedSlice(); }, .string => |s| { - return try std.fmt.allocPrint(self.allocator, "\"{s}\"", .{s}); + // Escape special characters in JSON strings + var result = std.ArrayList(u8).init(self.allocator); + defer result.deinit(); + try result.append('"'); + for (s) |char| { + switch (char) { + '"' => try result.appendSlice("\\\""), + '\\' => try result.appendSlice("\\\\"), + '\n' => try result.appendSlice("\\n"), + '\r' => try result.appendSlice("\\r"), + '\t' => try result.appendSlice("\\t"), + else => try result.append(char), + } + } + try result.append('"'); + return result.toOwnedSlice(); }, else => return error.UnsupportedJsonType, } @@ -599,4 +890,24 @@ pub const EngineApiClient = struct { return types.addressFromBytes(result); } + + /// Convert hex string to bytes (variable length) + fn hexToBytes(self: *Self, hex: []const u8) ![]u8 { + const hex_start: usize = if (std.mem.startsWith(u8, hex, "0x")) 2 else 0; + const hex_data = hex[hex_start..]; + + if (hex_data.len % 2 != 0) { + return error.InvalidHexLength; + } + + const result = try self.allocator.alloc(u8, hex_data.len / 2); + var i: usize = 0; + while (i < hex_data.len) : (i += 2) { + const high = try std.fmt.parseInt(u8, hex_data[i .. i + 1], 16); + const low = try std.fmt.parseInt(u8, hex_data[i + 1 .. i + 2], 16); + result[i / 2] = (high << 4) | low; + } + + return result; + } }; diff --git a/src/l2/payload_attrs.zig b/src/l2/payload_attrs.zig index 5b5814f..7b81b21 100644 --- a/src/l2/payload_attrs.zig +++ b/src/l2/payload_attrs.zig @@ -1,5 +1,5 @@ // Payload Attributes Builder -// Builds payload attributes for engine_forkchoiceUpdated (op-node style) +// Builds payload attributes for engine_forkchoiceUpdatedV3 (op-node style) const std = @import("std"); const core = @import("../core/root.zig"); @@ -12,6 +12,7 @@ pub const PayloadAttributes = struct { suggested_fee_recipient: types.Address, transactions: []transaction.Transaction, withdrawals: []void = &[_]void{}, // Empty for now + parent_beacon_block_root: ?types.Hash = null, // Required for V3 (Cancun), optional for V1/V2 pub fn deinit(self: *PayloadAttributes, allocator: std.mem.Allocator) void { allocator.free(self.transactions); @@ -40,50 +41,89 @@ pub const PayloadAttributesBuilder = struct { .suggested_fee_recipient = fee_recipient, .transactions = txs, .withdrawals = &[_]void{}, + .parent_beacon_block_root = types.hashFromBytes([_]u8{0} ** 32), // Default to zero hash for now (required for V3) }; } - /// Convert to JSON-RPC format for engine_forkchoiceUpdated + /// Convert to JSON-RPC format for engine_forkchoiceUpdatedV3 + /// Note: The returned ObjectMap owns the string memory - caller must use deinitJsonRpc to free pub fn toJsonRpc(self: *Self, attrs: PayloadAttributes) !std.json.ObjectMap { var obj = std.json.ObjectMap.init(self.allocator); // Timestamp const timestamp_hex = try std.fmt.allocPrint(self.allocator, "0x{x}", .{attrs.timestamp}); - defer self.allocator.free(timestamp_hex); + // String is stored in JSON object - will be freed in deinitJsonRpc try obj.put("timestamp", std.json.Value{ .string = timestamp_hex }); // PrevRandao const prev_randao_bytes = types.hashToBytes(attrs.prev_randao); const prev_randao_hex = try self.hashToHex(&prev_randao_bytes); - defer self.allocator.free(prev_randao_hex); + // String is stored in JSON object - will be freed in deinitJsonRpc try obj.put("prevRandao", std.json.Value{ .string = prev_randao_hex }); // SuggestedFeeRecipient const fee_recipient_bytes = types.addressToBytes(attrs.suggested_fee_recipient); const fee_recipient_hex = try self.hashToHex(&fee_recipient_bytes); - defer self.allocator.free(fee_recipient_hex); + // String is stored in JSON object - will be freed in deinitJsonRpc try obj.put("suggestedFeeRecipient", std.json.Value{ .string = fee_recipient_hex }); // Transactions (serialize to RLP hex) var tx_array = std.json.Array.init(self.allocator); - defer tx_array.deinit(); + // Array is stored in JSON object - will be freed in deinitJsonRpc for (attrs.transactions) |tx| { const tx_rlp = try tx.serialize(self.allocator); - defer self.allocator.free(tx_rlp); const tx_hex = try self.hashToHex(tx_rlp); - defer self.allocator.free(tx_hex); + // String is stored in JSON array - will be freed in deinitJsonRpc + self.allocator.free(tx_rlp); // Free RLP bytes immediately try tx_array.append(std.json.Value{ .string = tx_hex }); } try obj.put("transactions", std.json.Value{ .array = tx_array }); // Withdrawals (empty for now) - var withdrawals_array = std.json.Array.init(self.allocator); - defer withdrawals_array.deinit(); + const withdrawals_array = std.json.Array.init(self.allocator); + // Array is stored in JSON object - will be freed in deinitJsonRpc try obj.put("withdrawals", std.json.Value{ .array = withdrawals_array }); + // ParentBeaconBlockRoot (required for V3/Cancun) + if (attrs.parent_beacon_block_root) |beacon_root| { + const beacon_root_bytes = types.hashToBytes(beacon_root); + const beacon_root_hex = try self.hashToHex(&beacon_root_bytes); + // String is stored in JSON object - will be freed in deinitJsonRpc + try obj.put("parentBeaconBlockRoot", std.json.Value{ .string = beacon_root_hex }); + } else { + // For V3, we still need to provide it (use zero hash as default) + const zero_hash_hex = try self.hashToHex(&([_]u8{0} ** 32)); + try obj.put("parentBeaconBlockRoot", std.json.Value{ .string = zero_hash_hex }); + } + return obj; } + /// Properly deinitialize a JSON-RPC object map created by toJsonRpc + /// Frees all string values stored in the map + pub fn deinitJsonRpc(allocator: std.mem.Allocator, obj: *std.json.ObjectMap) void { + var it = obj.iterator(); + while (it.next()) |entry| { + switch (entry.value_ptr.*) { + .string => |s| { + allocator.free(s); + }, + .array => |arr| { + // Free strings in array + for (arr.items) |item| { + switch (item) { + .string => |str| allocator.free(str), + else => {}, + } + } + arr.deinit(); + }, + else => {}, + } + } + obj.deinit(); + } + fn hashToHex(self: *Self, bytes: []const u8) ![]u8 { var result = std.ArrayList(u8).init(self.allocator); defer result.deinit(); diff --git a/src/l2/state_provider.zig b/src/l2/state_provider.zig index a1cdd2b..7e7b74d 100644 --- a/src/l2/state_provider.zig +++ b/src/l2/state_provider.zig @@ -208,7 +208,9 @@ pub const StateProvider = struct { const port = url_parts.port; // Connect to L2 RPC - const address = try std.net.Address.parseIp(host, port); + // Resolve hostname to IP address (handle "localhost" -> "127.0.0.1") + const ip_address = if (std.mem.eql(u8, host, "localhost")) "127.0.0.1" else host; + const address = try std.net.Address.parseIp(ip_address, port); const stream = try std.net.tcpConnectToAddress(address); defer stream.close(); diff --git a/src/main.zig b/src/main.zig index a24b413..62f15c8 100644 --- a/src/main.zig +++ b/src/main.zig @@ -97,8 +97,13 @@ pub fn main() !void { std.log.info("L1 derivation pipeline initialized", .{}); // Initialize L2 Engine API client - var engine_client = lib.l2.EngineApiClient.init(allocator, cfg.l2_rpc_url, cfg.l2_engine_api_port); - std.log.info("L2 Engine API client initialized (rpc_url={s}, engine_port={d})", .{ cfg.l2_rpc_url, cfg.l2_engine_api_port }); + var engine_client = lib.l2.EngineApiClient.init(allocator, cfg.l2_rpc_url, cfg.l2_engine_api_port, cfg.l2_jwt_secret); + if (cfg.l2_jwt_secret) |_| { + std.log.info("L2 Engine API client initialized (rpc_url={s}, engine_port={d}, jwt_auth=enabled)", .{ cfg.l2_rpc_url, cfg.l2_engine_api_port }); + } else { + std.log.warn("L2 Engine API client initialized without JWT authentication - Engine API calls may fail", .{}); + std.log.info("L2 Engine API client initialized (rpc_url={s}, engine_port={d}, jwt_auth=disabled)", .{ cfg.l2_rpc_url, cfg.l2_engine_api_port }); + } // Initialize sequencer with op-node style components var seq = lib.sequencer.Sequencer.init(allocator, &cfg, &mp, &state_manager, &batch_builder, &l1_derivation, &engine_client); diff --git a/src/sequencer/sequencer.zig b/src/sequencer/sequencer.zig index dd06bdc..62dbfd2 100644 --- a/src/sequencer/sequencer.zig +++ b/src/sequencer/sequencer.zig @@ -78,15 +78,17 @@ pub const Sequencer = struct { defer payload_attrs.deinit(self.allocator); // Convert to JSON-RPC format - var payload_attrs_json = try self.payload_builder.toJsonRpc(payload_attrs); - defer payload_attrs_json.deinit(); + const payload_attrs_json = try self.payload_builder.toJsonRpc(payload_attrs); + // Note: payload_attrs_json ownership is transferred to forkchoiceUpdated + // It will be deinitialized in forkchoiceUpdated's defer block // Get fork choice state const head_hash = self.block_state.getHeadBlockHash() orelse self.parent_hash; const safe_hash = self.block_state.getSafeBlockHash() orelse self.parent_hash; const finalized_hash = self.block_state.getFinalizedBlockHash() orelse self.parent_hash; - // Request payload via engine_forkchoiceUpdated + // Request payload via engine_forkchoiceUpdatedV3 + // payload_attrs_json is moved into params array and will be deinitialized there const response = try self.engine_client.forkchoiceUpdated(head_hash, safe_hash, finalized_hash, payload_attrs_json); if (response.payload_id) |payload_id| { @@ -169,7 +171,13 @@ pub const Sequencer = struct { /// Requests payload from L2 geth instead of building directly pub fn buildBlock(self: *Self) !core.block.Block { // Request payload from L2 geth - const payload_id_opt = try self.requestPayload(); + const payload_id_opt = self.requestPayload() catch |err| { + // Log connection errors with more context + if (err == error.ConnectionRefused) { + std.log.warn("[Sequencer] L2 geth Engine API not available at {s}:{d}. Is L2 geth running?", .{ self.config.l2_rpc_url, self.config.l2_engine_api_port }); + } + return err; + }; if (payload_id_opt) |payload_id| { // Get built payload var payload = try self.getPayload(payload_id);