From cbb645179a0ea6a67c03d22b7c862aa3e22fdf87 Mon Sep 17 00:00:00 2001 From: Julien Robert Date: Thu, 30 Oct 2025 15:30:44 +0100 Subject: [PATCH 01/39] feat: forced inclusion for executor --- .mockery.yaml | 7 +- block/components.go | 3 + block/internal/common/event.go | 7 ++ block/internal/executing/executor.go | 31 +++++- .../internal/executing/executor_lazy_test.go | 3 + .../internal/executing/executor_logic_test.go | 3 + .../executing/executor_restart_test.go | 5 + block/internal/executing/executor_test.go | 11 +- block/internal/reaping/reaper_test.go | 1 + block/internal/syncing/da_retriever.go | 69 ++++++++++-- block/internal/syncing/da_retriever_test.go | 83 ++++++++++++++ block/internal/syncing/syncer.go | 5 +- block/internal/syncing/syncer_backoff_test.go | 6 +- .../internal/syncing/syncer_benchmark_test.go | 2 +- block/internal/syncing/syncer_mock.go | 104 +++++++++++++++--- block/internal/syncing/syncer_test.go | 4 +- core/sequencer/sequencing.go | 6 +- go.mod | 2 + pkg/config/config.go | 33 ++++-- pkg/config/config_test.go | 4 +- pkg/config/defaults.go | 16 +-- types/state.go | 3 +- 22 files changed, 335 insertions(+), 73 deletions(-) diff --git a/.mockery.yaml b/.mockery.yaml index 31883ab545..93c015b0ea 100644 --- a/.mockery.yaml +++ b/.mockery.yaml @@ -48,7 +48,7 @@ packages: filename: external/hstore.go github.com/evstack/ev-node/block/internal/syncing: interfaces: - daRetriever: + DaRetrieverI: config: dir: ./block/internal/syncing pkgname: syncing @@ -65,8 +65,3 @@ packages: dir: ./block/internal/common pkgname: common filename: broadcaster_mock.go - p2pHandler: - config: - dir: ./block/internal/syncing - pkgname: syncing - filename: syncer_mock.go diff --git a/block/components.go b/block/components.go index 3ee2062acf..7e6649090b 100644 --- a/block/components.go +++ b/block/components.go @@ -208,10 +208,13 @@ func NewAggregatorComponents( // error channel for critical failures errorCh := make(chan error, 1) + daRetriever := syncing.NewDARetriever(da, cacheManager, config, genesis, logger) + executor, err := executing.NewExecutor( store, exec, sequencer, + daRetriever, signer, cacheManager, metrics, diff --git a/block/internal/common/event.go b/block/internal/common/event.go index 69d0300f9f..05c560b724 100644 --- a/block/internal/common/event.go +++ b/block/internal/common/event.go @@ -21,3 +21,10 @@ type DAHeightEvent struct { // Source indicates where this event originated from (DA or P2P) Source EventSource } + +// ForcedIncluded represents a forced inclusion event for caching +type ForcedIncludedEvent struct { + Txs [][]byte + StartDaHeight uint64 + EndDaHeight uint64 +} diff --git a/block/internal/executing/executor.go b/block/internal/executing/executor.go index 4fef5db38d..206bf54bef 100644 --- a/block/internal/executing/executor.go +++ b/block/internal/executing/executor.go @@ -15,6 +15,7 @@ import ( "github.com/evstack/ev-node/block/internal/cache" "github.com/evstack/ev-node/block/internal/common" + "github.com/evstack/ev-node/block/internal/syncing" coreexecutor "github.com/evstack/ev-node/core/execution" coresequencer "github.com/evstack/ev-node/core/sequencer" "github.com/evstack/ev-node/pkg/config" @@ -27,10 +28,11 @@ import ( // Executor handles block production, transaction processing, and state management type Executor struct { // Core components - store store.Store - exec coreexecutor.Executor - sequencer coresequencer.Sequencer - signer signer.Signer + store store.Store + exec coreexecutor.Executor + sequencer coresequencer.Sequencer + signer signer.Signer + daRetriever syncing.DaRetrieverI // Shared components cache cache.Manager @@ -71,6 +73,7 @@ func NewExecutor( store store.Store, exec coreexecutor.Executor, sequencer coresequencer.Sequencer, + daRetriever syncing.DaRetrieverI, signer signer.Signer, cache cache.Manager, metrics *common.Metrics, @@ -99,6 +102,7 @@ func NewExecutor( store: store, exec: exec, sequencer: sequencer, + daRetriever: daRetriever, signer: signer, cache: cache, metrics: metrics, @@ -199,7 +203,7 @@ func (e *Executor) initializeState() error { LastBlockHeight: e.genesis.InitialHeight - 1, LastBlockTime: e.genesis.StartTime, AppHash: stateRoot, - DAHeight: 0, + DAHeight: e.genesis.DAStartHeight, } } @@ -330,6 +334,12 @@ func (e *Executor) produceBlock() error { } } + // fetch forced included txs + forcedIncludedTxsEvent, err := e.daRetriever.RetrieveForcedIncludedTxsFromDA(e.ctx, currentState.DAHeight) + if err != nil { + e.logger.Error().Err(err).Msg("failed to retrieve forced included txs") + } + var ( header *types.SignedHeader data *types.Data @@ -356,6 +366,12 @@ func (e *Executor) produceBlock() error { return fmt.Errorf("failed to retrieve batch: %w", err) } + // append forced included txs to batch data + // TODO(@julienrbrt): if the batch is at size, adding more txs isn't what we want. + // maybe we need to add a limit to retrieveBatch based on the forced included txs size. + // for the poc this is ok as is. + batchData.Transactions = append(batchData.Transactions, forcedIncludedTxsEvent.Txs...) + header, data, err = e.createBlock(e.ctx, newHeight, batchData) if err != nil { return fmt.Errorf("failed to create block: %w", err) @@ -379,6 +395,11 @@ func (e *Executor) produceBlock() error { return fmt.Errorf("failed to apply block: %w", err) } + // update da height, based on last retrieved. + if forcedIncludedTxsEvent.EndDaHeight > newState.DAHeight { + newState.DAHeight = forcedIncludedTxsEvent.EndDaHeight + } + // signing the header is done after applying the block // as for signing, the state of the block may be required by the signature payload provider. signature, err := e.signHeader(header.Header) diff --git a/block/internal/executing/executor_lazy_test.go b/block/internal/executing/executor_lazy_test.go index b72f0a856b..70226bbeb4 100644 --- a/block/internal/executing/executor_lazy_test.go +++ b/block/internal/executing/executor_lazy_test.go @@ -14,6 +14,7 @@ import ( "github.com/evstack/ev-node/block/internal/cache" "github.com/evstack/ev-node/block/internal/common" + "github.com/evstack/ev-node/block/internal/syncing" coreseq "github.com/evstack/ev-node/core/sequencer" "github.com/evstack/ev-node/pkg/config" "github.com/evstack/ev-node/pkg/genesis" @@ -56,6 +57,7 @@ func TestLazyMode_ProduceBlockLogic(t *testing.T) { memStore, mockExec, mockSeq, + syncing.NewMockDaRetrieverI(t), signerWrapper, cacheManager, metrics, @@ -166,6 +168,7 @@ func TestRegularMode_ProduceBlockLogic(t *testing.T) { memStore, mockExec, mockSeq, + syncing.NewMockDaRetrieverI(t), signerWrapper, cacheManager, metrics, diff --git a/block/internal/executing/executor_logic_test.go b/block/internal/executing/executor_logic_test.go index 9aa79d0c43..a2a5789b1c 100644 --- a/block/internal/executing/executor_logic_test.go +++ b/block/internal/executing/executor_logic_test.go @@ -16,6 +16,7 @@ import ( "github.com/evstack/ev-node/block/internal/cache" "github.com/evstack/ev-node/block/internal/common" + "github.com/evstack/ev-node/block/internal/syncing" coreseq "github.com/evstack/ev-node/core/sequencer" "github.com/evstack/ev-node/pkg/config" "github.com/evstack/ev-node/pkg/genesis" @@ -78,6 +79,7 @@ func TestProduceBlock_EmptyBatch_SetsEmptyDataHash(t *testing.T) { memStore, mockExec, mockSeq, + syncing.NewMockDaRetrieverI(t), signerWrapper, cacheManager, metrics, @@ -165,6 +167,7 @@ func TestPendingLimit_SkipsProduction(t *testing.T) { memStore, mockExec, mockSeq, + syncing.NewMockDaRetrieverI(t), signerWrapper, cacheManager, metrics, diff --git a/block/internal/executing/executor_restart_test.go b/block/internal/executing/executor_restart_test.go index 3f0e8b500c..9f764ee7e6 100644 --- a/block/internal/executing/executor_restart_test.go +++ b/block/internal/executing/executor_restart_test.go @@ -14,6 +14,7 @@ import ( "github.com/evstack/ev-node/block/internal/cache" "github.com/evstack/ev-node/block/internal/common" + "github.com/evstack/ev-node/block/internal/syncing" coreseq "github.com/evstack/ev-node/core/sequencer" "github.com/evstack/ev-node/pkg/config" "github.com/evstack/ev-node/pkg/genesis" @@ -56,6 +57,7 @@ func TestExecutor_RestartUsesPendingHeader(t *testing.T) { memStore, mockExec1, mockSeq1, + syncing.NewMockDaRetrieverI(t), signerWrapper, cacheManager, metrics, @@ -175,6 +177,7 @@ func TestExecutor_RestartUsesPendingHeader(t *testing.T) { memStore, // same store mockExec2, mockSeq2, + syncing.NewMockDaRetrieverI(t), signerWrapper, cacheManager, metrics, @@ -273,6 +276,7 @@ func TestExecutor_RestartNoPendingHeader(t *testing.T) { memStore, mockExec1, mockSeq1, + syncing.NewMockDaRetrieverI(t), signerWrapper, cacheManager, metrics, @@ -325,6 +329,7 @@ func TestExecutor_RestartNoPendingHeader(t *testing.T) { memStore, mockExec2, mockSeq2, + syncing.NewMockDaRetrieverI(t), signerWrapper, cacheManager, metrics, diff --git a/block/internal/executing/executor_test.go b/block/internal/executing/executor_test.go index e310c6d40d..4ffa9cdfc7 100644 --- a/block/internal/executing/executor_test.go +++ b/block/internal/executing/executor_test.go @@ -13,6 +13,7 @@ import ( "github.com/evstack/ev-node/block/internal/cache" "github.com/evstack/ev-node/block/internal/common" + "github.com/evstack/ev-node/block/internal/syncing" "github.com/evstack/ev-node/pkg/config" "github.com/evstack/ev-node/pkg/genesis" "github.com/evstack/ev-node/pkg/store" @@ -46,8 +47,9 @@ func TestExecutor_BroadcasterIntegration(t *testing.T) { // Create executor with broadcasters executor, err := NewExecutor( memStore, - nil, // nil executor (we're not testing execution) - nil, // nil sequencer (we're not testing sequencing) + nil, // nil executor (we're not testing execution) + nil, // nil sequencer (we're not testing sequencing) + syncing.NewMockDaRetrieverI(t), testSigner, // test signer (required for executor) cacheManager, metrics, @@ -96,8 +98,9 @@ func TestExecutor_NilBroadcasters(t *testing.T) { // Create executor with nil broadcasters (light node scenario) executor, err := NewExecutor( memStore, - nil, // nil executor - nil, // nil sequencer + nil, // nil executor + nil, // nil sequencer + syncing.NewMockDaRetrieverI(t), testSigner, // test signer (required for executor) cacheManager, metrics, diff --git a/block/internal/reaping/reaper_test.go b/block/internal/reaping/reaper_test.go index 0bb152e9d2..74b1d629de 100644 --- a/block/internal/reaping/reaper_test.go +++ b/block/internal/reaping/reaper_test.go @@ -40,6 +40,7 @@ func newTestExecutor(t *testing.T) *executing.Executor { nil, // store (unused) nil, // core executor (unused) nil, // sequencer (unused) + nil, // daretriever (unused) s, // signer (required) nil, // cache (unused) nil, // metrics (unused) diff --git a/block/internal/syncing/da_retriever.go b/block/internal/syncing/da_retriever.go index de67e1fd1c..0a4338891f 100644 --- a/block/internal/syncing/da_retriever.go +++ b/block/internal/syncing/da_retriever.go @@ -30,8 +30,12 @@ type DARetriever struct { logger zerolog.Logger // calculate namespaces bytes once and reuse them - namespaceBz []byte - namespaceDataBz []byte + namespaceBz []byte + namespaceDataBz []byte + namespaceForcedInclusionBz []byte + + hasForcedInclusionNs bool + daEpochSize uint64 // transient cache, only full event need to be passed to the syncer // on restart, will be refetch as da height is updated by syncer @@ -47,15 +51,26 @@ func NewDARetriever( genesis genesis.Genesis, logger zerolog.Logger, ) *DARetriever { + forcedInclusionNs := config.DA.GetForcedInclusionNamespace() + hasForcedInclusionNs := forcedInclusionNs != "" + + var namespaceForcedInclusionBz []byte + if hasForcedInclusionNs { + namespaceForcedInclusionBz = coreda.NamespaceFromString(forcedInclusionNs).Bytes() + } + return &DARetriever{ - da: da, - cache: cache, - genesis: genesis, - logger: logger.With().Str("component", "da_retriever").Logger(), - namespaceBz: coreda.NamespaceFromString(config.DA.GetNamespace()).Bytes(), - namespaceDataBz: coreda.NamespaceFromString(config.DA.GetDataNamespace()).Bytes(), - pendingHeaders: make(map[uint64]*types.SignedHeader), - pendingData: make(map[uint64]*types.Data), + da: da, + cache: cache, + genesis: genesis, + logger: logger.With().Str("component", "da_retriever").Logger(), + namespaceBz: coreda.NamespaceFromString(config.DA.GetNamespace()).Bytes(), + namespaceDataBz: coreda.NamespaceFromString(config.DA.GetDataNamespace()).Bytes(), + namespaceForcedInclusionBz: namespaceForcedInclusionBz, + hasForcedInclusionNs: hasForcedInclusionNs, + daEpochSize: config.DA.ForcedInclusionDAEpoch, + pendingHeaders: make(map[uint64]*types.SignedHeader), + pendingData: make(map[uint64]*types.Data), } } @@ -76,6 +91,40 @@ func (r *DARetriever) RetrieveFromDA(ctx context.Context, daHeight uint64) ([]co return r.processBlobs(ctx, blobsResp.Data, daHeight), nil } +// RetrieveForcedIncludedTxsFromDA retrieves forced inclusion transactions from the DA layer. +// It fetches from the daHeight for the da epoch range defined in the config. +func (r *DARetriever) RetrieveForcedIncludedTxsFromDA(ctx context.Context, daHeight uint64) (*common.ForcedIncludedEvent, error) { + if !r.hasForcedInclusionNs { + return nil, fmt.Errorf("forced inclusion namespace not configured") + } + + event := &common.ForcedIncludedEvent{ + StartDaHeight: daHeight, + } + + r.logger.Debug().Uint64("da_height", daHeight).Uint64("range", r.daEpochSize).Msg("retrieving forced included transactions from DA") + + for epochHeight := daHeight + 1; epochHeight <= daHeight+r.daEpochSize; epochHeight++ { + result := types.RetrieveWithHelpers(ctx, r.da, r.logger, epochHeight, r.namespaceForcedInclusionBz, defaultDATimeout) + + // quickly break if we are too ahead. + if result.Code == coreda.StatusHeightFromFuture { + break + } + + if result.Code == coreda.StatusSuccess { + if err := r.validateBlobResponse(result, epochHeight); !errors.Is(err, coreda.ErrBlobNotFound) && err != nil { + return nil, err + } + + event.StartDaHeight = epochHeight + event.Txs = append(event.Txs, result.Data...) + } + } + + return event, nil +} + // fetchBlobs retrieves blobs from the DA layer func (r *DARetriever) fetchBlobs(ctx context.Context, daHeight uint64) (coreda.ResultRetrieve, error) { // Retrieve from both namespaces diff --git a/block/internal/syncing/da_retriever_test.go b/block/internal/syncing/da_retriever_test.go index 0d97d5940f..71929a7436 100644 --- a/block/internal/syncing/da_retriever_test.go +++ b/block/internal/syncing/da_retriever_test.go @@ -421,3 +421,86 @@ func Test_isEmptyDataExpected(t *testing.T) { h.DataHash = common.DataHashForEmptyTxs assert.True(t, isEmptyDataExpected(h)) } + +func TestDARetriever_RetrieveForcedIncludedTxsFromDA_Success(t *testing.T) { + ds := dssync.MutexWrap(datastore.NewMapDatastore()) + st := store.New(ds) + cm, err := cache.NewManager(config.DefaultConfig(), st, zerolog.Nop()) + require.NoError(t, err) + + addr, pub, signer := buildSyncTestSigner(t) + gen := genesis.Genesis{ChainID: "tchain", InitialHeight: 1, StartTime: time.Now().Add(-time.Second), ProposerAddress: addr} + + // Prepare forced inclusion transaction data + dataBin, _ := makeSignedDataBytes(t, gen.ChainID, 10, addr, pub, signer, 3) + + cfg := config.DefaultConfig() + cfg.DA.ForcedInclusionNamespace = "nsForcedInclusion" + cfg.DA.ForcedInclusionDAEpoch = 1 // Limit to 1 iteration for test + + namespaceForcedInclusionBz := coreda.NamespaceFromString(cfg.DA.GetForcedInclusionNamespace()).Bytes() + + mockDA := testmocks.NewMockDA(t) + mockDA.EXPECT().GetIDs(mock.Anything, uint64(5679), mock.MatchedBy(func(ns []byte) bool { + return bytes.Equal(ns, namespaceForcedInclusionBz) + })).Return(&coreda.GetIDsResult{IDs: [][]byte{[]byte("fi1")}, Timestamp: time.Now()}, nil).Once() + + mockDA.EXPECT().Get(mock.Anything, mock.Anything, mock.MatchedBy(func(ns []byte) bool { + return bytes.Equal(ns, namespaceForcedInclusionBz) + })).Return([][]byte{dataBin}, nil).Once() + + r := NewDARetriever(mockDA, cm, cfg, gen, zerolog.Nop()) + + result, err := r.RetrieveForcedIncludedTxsFromDA(context.Background(), 5678) + require.NoError(t, err) + require.NotNil(t, result) + require.Len(t, result.Txs, 1) + assert.Equal(t, dataBin, result.Txs[0]) +} + +func TestDARetriever_FetchForcedIncludedTxs_NoNamespaceConfigured(t *testing.T) { + ds := dssync.MutexWrap(datastore.NewMapDatastore()) + st := store.New(ds) + cm, err := cache.NewManager(config.DefaultConfig(), st, zerolog.Nop()) + require.NoError(t, err) + + addr, _, _ := buildSyncTestSigner(t) + gen := genesis.Genesis{ChainID: "tchain", InitialHeight: 1, StartTime: time.Now().Add(-time.Second), ProposerAddress: addr} + + cfg := config.DefaultConfig() + // Leave ForcedInclusionNamespace empty + + r := NewDARetriever(nil, cm, cfg, gen, zerolog.Nop()) + + result, err := r.RetrieveForcedIncludedTxsFromDA(context.Background(), 1234) + require.Error(t, err) + require.Nil(t, result) +} + +func TestDARetriever_FetchForcedIncludedTxs_NotFound(t *testing.T) { + ds := dssync.MutexWrap(datastore.NewMapDatastore()) + st := store.New(ds) + cm, err := cache.NewManager(config.DefaultConfig(), st, zerolog.Nop()) + require.NoError(t, err) + + addr, _, _ := buildSyncTestSigner(t) + gen := genesis.Genesis{ChainID: "tchain", InitialHeight: 1, StartTime: time.Now().Add(-time.Second), ProposerAddress: addr} + + cfg := config.DefaultConfig() + cfg.DA.ForcedInclusionNamespace = "nsForcedInclusion" + cfg.DA.ForcedInclusionDAEpoch = 1 // Limit to 1 iteration for test + + namespaceForcedInclusionBz := coreda.NamespaceFromString(cfg.DA.GetForcedInclusionNamespace()).Bytes() + + mockDA := testmocks.NewMockDA(t) + mockDA.EXPECT().GetIDs(mock.Anything, uint64(10000), mock.MatchedBy(func(ns []byte) bool { + return bytes.Equal(ns, namespaceForcedInclusionBz) + })).Return(&coreda.GetIDsResult{IDs: [][]byte{}, Timestamp: time.Now()}, nil).Once() + + r := NewDARetriever(mockDA, cm, cfg, gen, zerolog.Nop()) + + result, err := r.RetrieveForcedIncludedTxsFromDA(context.Background(), 9999) + require.NoError(t, err) + require.NotNil(t, result) + require.Empty(t, result.Txs) +} diff --git a/block/internal/syncing/syncer.go b/block/internal/syncing/syncer.go index bdd496ea8c..2ddcaceebb 100644 --- a/block/internal/syncing/syncer.go +++ b/block/internal/syncing/syncer.go @@ -22,8 +22,9 @@ import ( "github.com/evstack/ev-node/types" ) -type daRetriever interface { +type DaRetrieverI interface { RetrieveFromDA(ctx context.Context, daHeight uint64) ([]common.DAHeightEvent, error) + RetrieveForcedIncludedTxsFromDA(ctx context.Context, daHeight uint64) (*common.ForcedIncludedEvent, error) } type p2pHandler interface { @@ -62,7 +63,7 @@ type Syncer struct { errorCh chan<- error // Channel to report critical execution client failures // Handlers - daRetriever daRetriever + daRetriever DaRetrieverI p2pHandler p2pHandler // Logging diff --git a/block/internal/syncing/syncer_backoff_test.go b/block/internal/syncing/syncer_backoff_test.go index 7a9e80dbbd..be1913ee42 100644 --- a/block/internal/syncing/syncer_backoff_test.go +++ b/block/internal/syncing/syncer_backoff_test.go @@ -69,7 +69,7 @@ func TestSyncer_BackoffOnDAError(t *testing.T) { syncer.ctx = ctx // Setup mocks - daRetriever := newMockdaRetriever(t) + daRetriever := NewMockDaRetrieverI(t) p2pHandler := newMockp2pHandler(t) syncer.daRetriever = daRetriever syncer.p2pHandler = p2pHandler @@ -167,7 +167,7 @@ func TestSyncer_BackoffResetOnSuccess(t *testing.T) { addr, pub, signer := buildSyncTestSigner(t) gen := syncer.genesis - daRetriever := newMockdaRetriever(t) + daRetriever := NewMockDaRetrieverI(t) p2pHandler := newMockp2pHandler(t) syncer.daRetriever = daRetriever syncer.p2pHandler = p2pHandler @@ -260,7 +260,7 @@ func TestSyncer_BackoffBehaviorIntegration(t *testing.T) { syncer := setupTestSyncer(t, 500*time.Millisecond) syncer.ctx = ctx - daRetriever := newMockdaRetriever(t) + daRetriever := NewMockDaRetrieverI(t) p2pHandler := newMockp2pHandler(t) syncer.daRetriever = daRetriever syncer.p2pHandler = p2pHandler diff --git a/block/internal/syncing/syncer_benchmark_test.go b/block/internal/syncing/syncer_benchmark_test.go index 8c9cfea362..07baf8f155 100644 --- a/block/internal/syncing/syncer_benchmark_test.go +++ b/block/internal/syncing/syncer_benchmark_test.go @@ -131,7 +131,7 @@ func newBenchFixture(b *testing.B, totalHeights uint64, shuffledTx bool, daDelay } // Mock DA retriever to emit exactly totalHeights events, then HFF and cancel - daR := newMockdaRetriever(b) + daR := NewMockDaRetrieverI(b) for i := uint64(0); i < totalHeights; i++ { daHeight := i + daHeightOffset daR.On("RetrieveFromDA", mock.Anything, daHeight). diff --git a/block/internal/syncing/syncer_mock.go b/block/internal/syncing/syncer_mock.go index 85cad46960..7b9f39958a 100644 --- a/block/internal/syncing/syncer_mock.go +++ b/block/internal/syncing/syncer_mock.go @@ -11,13 +11,13 @@ import ( mock "github.com/stretchr/testify/mock" ) -// newMockdaRetriever creates a new instance of mockdaRetriever. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// NewMockDaRetrieverI creates a new instance of MockDaRetrieverI. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. // The first argument is typically a *testing.T value. -func newMockdaRetriever(t interface { +func NewMockDaRetrieverI(t interface { mock.TestingT Cleanup(func()) -}) *mockdaRetriever { - mock := &mockdaRetriever{} +}) *MockDaRetrieverI { + mock := &MockDaRetrieverI{} mock.Mock.Test(t) t.Cleanup(func() { mock.AssertExpectations(t) }) @@ -25,21 +25,89 @@ func newMockdaRetriever(t interface { return mock } -// mockdaRetriever is an autogenerated mock type for the daRetriever type -type mockdaRetriever struct { +// MockDaRetrieverI is an autogenerated mock type for the DaRetrieverI type +type MockDaRetrieverI struct { mock.Mock } -type mockdaRetriever_Expecter struct { +type MockDaRetrieverI_Expecter struct { mock *mock.Mock } -func (_m *mockdaRetriever) EXPECT() *mockdaRetriever_Expecter { - return &mockdaRetriever_Expecter{mock: &_m.Mock} +func (_m *MockDaRetrieverI) EXPECT() *MockDaRetrieverI_Expecter { + return &MockDaRetrieverI_Expecter{mock: &_m.Mock} } -// RetrieveFromDA provides a mock function for the type mockdaRetriever -func (_mock *mockdaRetriever) RetrieveFromDA(ctx context.Context, daHeight uint64) ([]common.DAHeightEvent, error) { +// RetrieveForcedIncludedTxsFromDA provides a mock function for the type MockDaRetrieverI +func (_mock *MockDaRetrieverI) RetrieveForcedIncludedTxsFromDA(ctx context.Context, daHeight uint64) (*common.ForcedIncludedEvent, error) { + ret := _mock.Called(ctx, daHeight) + + if len(ret) == 0 { + panic("no return value specified for RetrieveForcedIncludedTxsFromDA") + } + + var r0 *common.ForcedIncludedEvent + var r1 error + if returnFunc, ok := ret.Get(0).(func(context.Context, uint64) (*common.ForcedIncludedEvent, error)); ok { + return returnFunc(ctx, daHeight) + } + if returnFunc, ok := ret.Get(0).(func(context.Context, uint64) *common.ForcedIncludedEvent); ok { + r0 = returnFunc(ctx, daHeight) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*common.ForcedIncludedEvent) + } + } + if returnFunc, ok := ret.Get(1).(func(context.Context, uint64) error); ok { + r1 = returnFunc(ctx, daHeight) + } else { + r1 = ret.Error(1) + } + return r0, r1 +} + +// MockDaRetrieverI_RetrieveForcedIncludedTxsFromDA_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'RetrieveForcedIncludedTxsFromDA' +type MockDaRetrieverI_RetrieveForcedIncludedTxsFromDA_Call struct { + *mock.Call +} + +// RetrieveForcedIncludedTxsFromDA is a helper method to define mock.On call +// - ctx context.Context +// - daHeight uint64 +func (_e *MockDaRetrieverI_Expecter) RetrieveForcedIncludedTxsFromDA(ctx interface{}, daHeight interface{}) *MockDaRetrieverI_RetrieveForcedIncludedTxsFromDA_Call { + return &MockDaRetrieverI_RetrieveForcedIncludedTxsFromDA_Call{Call: _e.mock.On("RetrieveForcedIncludedTxsFromDA", ctx, daHeight)} +} + +func (_c *MockDaRetrieverI_RetrieveForcedIncludedTxsFromDA_Call) Run(run func(ctx context.Context, daHeight uint64)) *MockDaRetrieverI_RetrieveForcedIncludedTxsFromDA_Call { + _c.Call.Run(func(args mock.Arguments) { + var arg0 context.Context + if args[0] != nil { + arg0 = args[0].(context.Context) + } + var arg1 uint64 + if args[1] != nil { + arg1 = args[1].(uint64) + } + run( + arg0, + arg1, + ) + }) + return _c +} + +func (_c *MockDaRetrieverI_RetrieveForcedIncludedTxsFromDA_Call) Return(forcedIncludedEvent *common.ForcedIncludedEvent, err error) *MockDaRetrieverI_RetrieveForcedIncludedTxsFromDA_Call { + _c.Call.Return(forcedIncludedEvent, err) + return _c +} + +func (_c *MockDaRetrieverI_RetrieveForcedIncludedTxsFromDA_Call) RunAndReturn(run func(ctx context.Context, daHeight uint64) (*common.ForcedIncludedEvent, error)) *MockDaRetrieverI_RetrieveForcedIncludedTxsFromDA_Call { + _c.Call.Return(run) + return _c +} + +// RetrieveFromDA provides a mock function for the type MockDaRetrieverI +func (_mock *MockDaRetrieverI) RetrieveFromDA(ctx context.Context, daHeight uint64) ([]common.DAHeightEvent, error) { ret := _mock.Called(ctx, daHeight) if len(ret) == 0 { @@ -66,19 +134,19 @@ func (_mock *mockdaRetriever) RetrieveFromDA(ctx context.Context, daHeight uint6 return r0, r1 } -// mockdaRetriever_RetrieveFromDA_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'RetrieveFromDA' -type mockdaRetriever_RetrieveFromDA_Call struct { +// MockDaRetrieverI_RetrieveFromDA_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'RetrieveFromDA' +type MockDaRetrieverI_RetrieveFromDA_Call struct { *mock.Call } // RetrieveFromDA is a helper method to define mock.On call // - ctx context.Context // - daHeight uint64 -func (_e *mockdaRetriever_Expecter) RetrieveFromDA(ctx interface{}, daHeight interface{}) *mockdaRetriever_RetrieveFromDA_Call { - return &mockdaRetriever_RetrieveFromDA_Call{Call: _e.mock.On("RetrieveFromDA", ctx, daHeight)} +func (_e *MockDaRetrieverI_Expecter) RetrieveFromDA(ctx interface{}, daHeight interface{}) *MockDaRetrieverI_RetrieveFromDA_Call { + return &MockDaRetrieverI_RetrieveFromDA_Call{Call: _e.mock.On("RetrieveFromDA", ctx, daHeight)} } -func (_c *mockdaRetriever_RetrieveFromDA_Call) Run(run func(ctx context.Context, daHeight uint64)) *mockdaRetriever_RetrieveFromDA_Call { +func (_c *MockDaRetrieverI_RetrieveFromDA_Call) Run(run func(ctx context.Context, daHeight uint64)) *MockDaRetrieverI_RetrieveFromDA_Call { _c.Call.Run(func(args mock.Arguments) { var arg0 context.Context if args[0] != nil { @@ -96,12 +164,12 @@ func (_c *mockdaRetriever_RetrieveFromDA_Call) Run(run func(ctx context.Context, return _c } -func (_c *mockdaRetriever_RetrieveFromDA_Call) Return(dAHeightEvents []common.DAHeightEvent, err error) *mockdaRetriever_RetrieveFromDA_Call { +func (_c *MockDaRetrieverI_RetrieveFromDA_Call) Return(dAHeightEvents []common.DAHeightEvent, err error) *MockDaRetrieverI_RetrieveFromDA_Call { _c.Call.Return(dAHeightEvents, err) return _c } -func (_c *mockdaRetriever_RetrieveFromDA_Call) RunAndReturn(run func(ctx context.Context, daHeight uint64) ([]common.DAHeightEvent, error)) *mockdaRetriever_RetrieveFromDA_Call { +func (_c *MockDaRetrieverI_RetrieveFromDA_Call) RunAndReturn(run func(ctx context.Context, daHeight uint64) ([]common.DAHeightEvent, error)) *MockDaRetrieverI_RetrieveFromDA_Call { _c.Call.Return(run) return _c } diff --git a/block/internal/syncing/syncer_test.go b/block/internal/syncing/syncer_test.go index 002ca13962..9c8bf6846c 100644 --- a/block/internal/syncing/syncer_test.go +++ b/block/internal/syncing/syncer_test.go @@ -360,7 +360,7 @@ func TestSyncLoopPersistState(t *testing.T) { ctx, cancel := context.WithCancel(t.Context()) syncerInst1.ctx = ctx - daRtrMock, p2pHndlMock := newMockdaRetriever(t), newMockp2pHandler(t) + daRtrMock, p2pHndlMock := NewMockDaRetrieverI(t), newMockp2pHandler(t) p2pHndlMock.On("ProcessHeaderRange", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return().Maybe() p2pHndlMock.On("ProcessDataRange", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return().Maybe() syncerInst1.daRetriever, syncerInst1.p2pHandler = daRtrMock, p2pHndlMock @@ -441,7 +441,7 @@ func TestSyncLoopPersistState(t *testing.T) { ctx, cancel = context.WithCancel(t.Context()) t.Cleanup(cancel) syncerInst2.ctx = ctx - daRtrMock, p2pHndlMock = newMockdaRetriever(t), newMockp2pHandler(t) + daRtrMock, p2pHndlMock = NewMockDaRetrieverI(t), newMockp2pHandler(t) p2pHndlMock.On("ProcessHeaderRange", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return().Maybe() p2pHndlMock.On("ProcessDataRange", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return().Maybe() syncerInst2.daRetriever, syncerInst2.p2pHandler = daRtrMock, p2pHndlMock diff --git a/core/sequencer/sequencing.go b/core/sequencer/sequencing.go index 006c892d8f..055f01483e 100644 --- a/core/sequencer/sequencing.go +++ b/core/sequencer/sequencing.go @@ -7,15 +7,15 @@ import ( "time" ) -// Sequencer is a generic interface for a sequencer +// Sequencer defines the minimal sequencing interface used by the block executor. type Sequencer interface { - // SubmitBatchTxs submits a batch of transactions from to sequencer + // SubmitBatchTxs submits a batch of transactions from executor to sequencer // Id is the unique identifier for the target chain // Batch is the batch of transactions to submit // returns an error if any from the sequencer SubmitBatchTxs(ctx context.Context, req SubmitBatchTxsRequest) (*SubmitBatchTxsResponse, error) - // GetNextBatch returns the next batch of transactions from sequencer to + // GetNextBatch returns the next batch of transactions from sequencer and from DA to // Id is the unique identifier for the target chain // LastBatchHash is the cryptographic hash of the last batch received by the // MaxBytes is the maximum number of bytes to return in the batch diff --git a/go.mod b/go.mod index 73a0a33966..5845146b56 100644 --- a/go.mod +++ b/go.mod @@ -2,6 +2,8 @@ module github.com/evstack/ev-node go 1.24.6 +replace github.com/evstack/ev-node/core => ./core + retract v0.12.0 // Published by accident require ( diff --git a/pkg/config/config.go b/pkg/config/config.go index eef506b075..48e9fa40e7 100644 --- a/pkg/config/config.go +++ b/pkg/config/config.go @@ -68,6 +68,10 @@ const ( FlagDANamespace = FlagPrefixEvnode + "da.namespace" // FlagDADataNamespace is a flag for specifying the DA data namespace ID FlagDADataNamespace = FlagPrefixEvnode + "da.data_namespace" + // FlagDAForcedInclusionNamespace is a flag for specifying the DA forced inclusion namespace ID + FlagDAForcedInclusionNamespace = FlagPrefixEvnode + "da.forced_inclusion_namespace" + // FlagDAForcedInclusionDAEpoch is a flag for specifying the DA forced inclusion DA epoch + FlagDAForcedInclusionDAEpoch = FlagPrefixEvnode + "da.forced_inclusion_da_epoch" // FlagDASubmitOptions is a flag for data availability submit options FlagDASubmitOptions = FlagPrefixEvnode + "da.submit_options" // FlagDAMempoolTTL is a flag for specifying the DA mempool TTL @@ -157,16 +161,18 @@ type Config struct { // DAConfig contains all Data Availability configuration parameters type DAConfig struct { - Address string `mapstructure:"address" yaml:"address" comment:"Address of the data availability layer service (host:port). This is the endpoint where Rollkit will connect to submit and retrieve data."` - AuthToken string `mapstructure:"auth_token" yaml:"auth_token" comment:"Authentication token for the data availability layer service. Required if the DA service needs authentication."` - GasPrice float64 `mapstructure:"gas_price" yaml:"gas_price" comment:"Gas price for data availability transactions. Use -1 for automatic gas price determination. Higher values may result in faster inclusion."` - GasMultiplier float64 `mapstructure:"gas_multiplier" yaml:"gas_multiplier" comment:"Multiplier applied to gas price when retrying failed DA submissions. Values > 1 increase gas price on retries to improve chances of inclusion."` - SubmitOptions string `mapstructure:"submit_options" yaml:"submit_options" comment:"Additional options passed to the DA layer when submitting data. Format depends on the specific DA implementation being used."` - Namespace string `mapstructure:"namespace" yaml:"namespace" comment:"Namespace ID used when submitting blobs to the DA layer. When a DataNamespace is provided, only the header is sent to this namespace."` - DataNamespace string `mapstructure:"data_namespace" yaml:"data_namespace" comment:"Namespace ID for submitting data to DA layer. Use this to speed-up light clients."` - BlockTime DurationWrapper `mapstructure:"block_time" yaml:"block_time" comment:"Average block time of the DA chain (duration). Determines frequency of DA layer syncing, maximum backoff time for retries, and is multiplied by MempoolTTL to calculate transaction expiration. Examples: \"15s\", \"30s\", \"1m\", \"2m30s\", \"10m\"."` - MempoolTTL uint64 `mapstructure:"mempool_ttl" yaml:"mempool_ttl" comment:"Number of DA blocks after which a transaction is considered expired and dropped from the mempool. Controls retry backoff timing."` - MaxSubmitAttempts int `mapstructure:"max_submit_attempts" yaml:"max_submit_attempts" comment:"Maximum number of attempts to submit data to the DA layer before giving up. Higher values provide more resilience but can delay error reporting."` + Address string `mapstructure:"address" yaml:"address" comment:"Address of the data availability layer service (host:port). This is the endpoint where Rollkit will connect to submit and retrieve data."` + AuthToken string `mapstructure:"auth_token" yaml:"auth_token" comment:"Authentication token for the data availability layer service. Required if the DA service needs authentication."` + GasPrice float64 `mapstructure:"gas_price" yaml:"gas_price" comment:"Gas price for data availability transactions. Use -1 for automatic gas price determination. Higher values may result in faster inclusion."` + GasMultiplier float64 `mapstructure:"gas_multiplier" yaml:"gas_multiplier" comment:"Multiplier applied to gas price when retrying failed DA submissions. Values > 1 increase gas price on retries to improve chances of inclusion."` + SubmitOptions string `mapstructure:"submit_options" yaml:"submit_options" comment:"Additional options passed to the DA layer when submitting data. Format depends on the specific DA implementation being used."` + Namespace string `mapstructure:"namespace" yaml:"namespace" comment:"Namespace ID used when submitting blobs to the DA layer. When a DataNamespace is provided, only the header is sent to this namespace."` + DataNamespace string `mapstructure:"data_namespace" yaml:"data_namespace" comment:"Namespace ID for submitting data to DA layer. Use this to speed-up light clients."` + ForcedInclusionNamespace string `mapstructure:"forced_inclusion_namespace" yaml:"forced_inclusion_namespace" comment:"Namespace ID for forced inclusion transactions on the DA layer."` + ForcedInclusionDAEpoch uint64 `mapstructure:"forced_inclusion_da_epoch" yaml:"forced_inclusion_da_epoch" comment:"DA epoch for forced inclusion transactions on the DA layer."` + BlockTime DurationWrapper `mapstructure:"block_time" yaml:"block_time" comment:"Average block time of the DA chain (duration). Determines frequency of DA layer syncing, maximum backoff time for retries, and is multiplied by MempoolTTL to calculate transaction expiration. Examples: \"15s\", \"30s\", \"1m\", \"2m30s\", \"10m\"."` + MempoolTTL uint64 `mapstructure:"mempool_ttl" yaml:"mempool_ttl" comment:"Number of DA blocks after which a transaction is considered expired and dropped from the mempool. Controls retry backoff timing."` + MaxSubmitAttempts int `mapstructure:"max_submit_attempts" yaml:"max_submit_attempts" comment:"Maximum number of attempts to submit data to the DA layer before giving up. Higher values provide more resilience but can delay error reporting."` } // GetNamespace returns the namespace for header submissions. @@ -183,6 +189,11 @@ func (d *DAConfig) GetDataNamespace() string { return d.GetNamespace() } +// GetForcedInclusionNamespace returns the namespace for forced inclusion transactions +func (d *DAConfig) GetForcedInclusionNamespace() string { + return d.ForcedInclusionNamespace +} + // NodeConfig contains all Rollkit specific configuration parameters type NodeConfig struct { // Node mode configuration @@ -327,6 +338,8 @@ func AddFlags(cmd *cobra.Command) { cmd.Flags().Float64(FlagDAGasMultiplier, def.DA.GasMultiplier, "DA gas price multiplier for retrying blob transactions") cmd.Flags().String(FlagDANamespace, def.DA.Namespace, "DA namespace for header (or blob) submissions") cmd.Flags().String(FlagDADataNamespace, def.DA.DataNamespace, "DA namespace for data submissions") + cmd.Flags().String(FlagDAForcedInclusionNamespace, def.DA.ForcedInclusionNamespace, "DA namespace for forced inclusion transactions") + cmd.Flags().Uint64(FlagDAForcedInclusionDAEpoch, def.DA.ForcedInclusionDAEpoch, "DA epoch for forced inclusion transactions (i.e: how many DA blocks processed to include transactions)") cmd.Flags().String(FlagDASubmitOptions, def.DA.SubmitOptions, "DA submit options") cmd.Flags().Uint64(FlagDAMempoolTTL, def.DA.MempoolTTL, "number of DA blocks until transaction is dropped from the mempool") cmd.Flags().Int(FlagDAMaxSubmitAttempts, def.DA.MaxSubmitAttempts, "maximum number of attempts to submit data to the DA layer before giving up") diff --git a/pkg/config/config_test.go b/pkg/config/config_test.go index 7116e26362..0417ae405b 100644 --- a/pkg/config/config_test.go +++ b/pkg/config/config_test.go @@ -72,6 +72,8 @@ func TestAddFlags(t *testing.T) { assertFlagValue(t, flags, FlagDAGasPrice, DefaultConfig().DA.GasPrice) assertFlagValue(t, flags, FlagDAGasMultiplier, DefaultConfig().DA.GasMultiplier) assertFlagValue(t, flags, FlagDANamespace, DefaultConfig().DA.Namespace) + assertFlagValue(t, flags, FlagDADataNamespace, DefaultConfig().DA.DataNamespace) + assertFlagValue(t, flags, FlagDAForcedInclusionNamespace, DefaultConfig().DA.ForcedInclusionNamespace) assertFlagValue(t, flags, FlagDASubmitOptions, DefaultConfig().DA.SubmitOptions) assertFlagValue(t, flags, FlagDAMempoolTTL, DefaultConfig().DA.MempoolTTL) assertFlagValue(t, flags, FlagDAMaxSubmitAttempts, DefaultConfig().DA.MaxSubmitAttempts) @@ -104,7 +106,7 @@ func TestAddFlags(t *testing.T) { assertFlagValue(t, flags, FlagRPCAddress, DefaultConfig().RPC.Address) // Count the number of flags we're explicitly checking - expectedFlagCount := 39 // Update this number if you add more flag checks above + expectedFlagCount := 40 // Update this number if you add more flag checks above // Get the actual number of flags (both regular and persistent) actualFlagCount := 0 diff --git a/pkg/config/defaults.go b/pkg/config/defaults.go index 0e20e2a874..305a2eeb89 100644 --- a/pkg/config/defaults.go +++ b/pkg/config/defaults.go @@ -71,13 +71,15 @@ func DefaultConfig() Config { ReadinessMaxBlocksBehind: calculateReadinessMaxBlocksBehind(defaultBlockTime.Duration, defaultReadinessWindowSeconds), }, DA: DAConfig{ - Address: "http://localhost:7980", - BlockTime: DurationWrapper{6 * time.Second}, - GasPrice: -1, - GasMultiplier: 0, - MaxSubmitAttempts: 30, - Namespace: randString(10), - DataNamespace: "", + Address: "http://localhost:7980", + BlockTime: DurationWrapper{6 * time.Second}, + GasPrice: -1, + GasMultiplier: 0, + MaxSubmitAttempts: 30, + Namespace: randString(10), + DataNamespace: "", + ForcedInclusionNamespace: "", + ForcedInclusionDAEpoch: 50, }, Instrumentation: DefaultInstrumentationConfig(), Log: LogConfig{ diff --git a/types/state.go b/types/state.go index 374b119c92..c16c2b5137 100644 --- a/types/state.go +++ b/types/state.go @@ -25,7 +25,8 @@ type State struct { LastBlockHeight uint64 LastBlockTime time.Time - // DAHeight identifies DA block containing the latest applied Evolve block. + // DAHeight identifies DA block containing the latest applied Evolve block for a syncing node. + // In the case of an aggregator, this corresponds as the last fetched DA block height for forced inclused transactions. DAHeight uint64 // the latest AppHash we've received from calling abci.Commit() From eb5144c271b8225f9f91e62449f807662fc40087 Mon Sep 17 00:00:00 2001 From: Julien Robert Date: Thu, 30 Oct 2025 15:44:24 +0100 Subject: [PATCH 02/39] add disclaimer --- core/execution/execution.go | 1 + 1 file changed, 1 insertion(+) diff --git a/core/execution/execution.go b/core/execution/execution.go index 896e2d65af..5085ebe578 100644 --- a/core/execution/execution.go +++ b/core/execution/execution.go @@ -52,6 +52,7 @@ type Executor interface { // Requirements: // - Must validate state transition against previous state root // - Must handle empty transaction list + // - Must handle gracefully gibberish transactions // - Must maintain deterministic execution // - Must respect context cancellation/timeout // - The rest of the rules are defined by the specific execution layer From 3dd46e8943339221574f03b0bf13fade4451e4dd Mon Sep 17 00:00:00 2001 From: Julien Robert Date: Wed, 5 Nov 2025 10:17:50 +0100 Subject: [PATCH 03/39] add for sync node --- block/internal/executing/executor.go | 2 +- block/internal/syncing/da_retriever.go | 5 +- block/internal/syncing/syncer.go | 65 +++- .../syncing/syncer_forced_inclusion_test.go | 357 ++++++++++++++++++ block/internal/syncing/syncer_test.go | 2 + 5 files changed, 428 insertions(+), 3 deletions(-) create mode 100644 block/internal/syncing/syncer_forced_inclusion_test.go diff --git a/block/internal/executing/executor.go b/block/internal/executing/executor.go index ec02e6dadd..aa3f359419 100644 --- a/block/internal/executing/executor.go +++ b/block/internal/executing/executor.go @@ -336,7 +336,7 @@ func (e *Executor) produceBlock() error { // fetch forced included txs forcedIncludedTxsEvent, err := e.daRetriever.RetrieveForcedIncludedTxsFromDA(e.ctx, currentState.DAHeight) - if err != nil { + if err != nil && !errors.Is(err, syncing.ErrForceInclusionNotConfigured) { e.logger.Error().Err(err).Msg("failed to retrieve forced included txs") } diff --git a/block/internal/syncing/da_retriever.go b/block/internal/syncing/da_retriever.go index 0a4338891f..b9e4d81f8a 100644 --- a/block/internal/syncing/da_retriever.go +++ b/block/internal/syncing/da_retriever.go @@ -91,11 +91,14 @@ func (r *DARetriever) RetrieveFromDA(ctx context.Context, daHeight uint64) ([]co return r.processBlobs(ctx, blobsResp.Data, daHeight), nil } +// ErrForceInclusionNotConfigured is returned when the forced inclusion namespace is not configured. +var ErrForceInclusionNotConfigured = errors.New("forced inclusion namespace not configured") + // RetrieveForcedIncludedTxsFromDA retrieves forced inclusion transactions from the DA layer. // It fetches from the daHeight for the da epoch range defined in the config. func (r *DARetriever) RetrieveForcedIncludedTxsFromDA(ctx context.Context, daHeight uint64) (*common.ForcedIncludedEvent, error) { if !r.hasForcedInclusionNs { - return nil, fmt.Errorf("forced inclusion namespace not configured") + return nil, ErrForceInclusionNotConfigured } event := &common.ForcedIncludedEvent{ diff --git a/block/internal/syncing/syncer.go b/block/internal/syncing/syncer.go index 1b9f188f4e..2f63d5b4aa 100644 --- a/block/internal/syncing/syncer.go +++ b/block/internal/syncing/syncer.go @@ -5,11 +5,12 @@ import ( "context" "errors" "fmt" - pubsub "github.com/libp2p/go-libp2p-pubsub" "sync" "sync/atomic" "time" + pubsub "github.com/libp2p/go-libp2p-pubsub" + "github.com/rs/zerolog" "golang.org/x/sync/errgroup" @@ -472,6 +473,12 @@ func (s *Syncer) trySyncNextBlock(event *common.DAHeightEvent) error { return err } + // Verify forced inclusion transactions if configured + if err := s.verifyForcedInclusionTxs(currentState, data); err != nil { + s.logger.Error().Err(err).Uint64("height", nextHeight).Msg("forced inclusion verification failed") + // TODO(@julienrbrt): Eventually halt the syncer and request the node to be started using the based sequencer. + } + // Apply block newState, err := s.applyBlock(header.Header, data, currentState) if err != nil { @@ -589,6 +596,62 @@ func (s *Syncer) validateBlock(currState types.State, data *types.Data, header * return nil } +// verifyForcedInclusionTxs verifies that all forced inclusion transactions from DA are included in the block +func (s *Syncer) verifyForcedInclusionTxs(currentState types.State, data *types.Data) error { + if s.daRetriever == nil { + return nil + } + + // Retrieve forced inclusion transactions from DA + forcedIncludedTxsEvent, err := s.daRetriever.RetrieveForcedIncludedTxsFromDA(s.ctx, currentState.DAHeight) + if err != nil { + if errors.Is(err, ErrForceInclusionNotConfigured) { + s.logger.Debug().Msg("forced inclusion namespace not configured, skipping verification") + return nil + } + + return fmt.Errorf("failed to retrieve forced included txs from DA: %w", err) + } + + // If no forced inclusion transactions found, nothing to verify + if len(forcedIncludedTxsEvent.Txs) == 0 { + s.logger.Debug().Uint64("da_height", currentState.DAHeight).Msg("no forced inclusion transactions to verify") + return nil + } + + blockTxMap := make(map[string]bool) + for _, tx := range data.Txs { + blockTxMap[string(tx)] = true + } + + // Check if all forced inclusion transactions are present in the block + var missingTxs [][]byte + for _, forcedTx := range forcedIncludedTxsEvent.Txs { + if !blockTxMap[string(forcedTx)] { + missingTxs = append(missingTxs, forcedTx) + } + } + + if len(missingTxs) > 0 { + s.logger.Error(). + Uint64("height", data.Height()). + Uint64("da_height", currentState.DAHeight). + Uint64("da_epoch_start", forcedIncludedTxsEvent.StartDaHeight). + Uint64("da_epoch_end", forcedIncludedTxsEvent.EndDaHeight). + Int("missing_count", len(missingTxs)). + Int("total_forced", len(forcedIncludedTxsEvent.Txs)). + Msg("SEQUENCER IS MALICIOUS: forced inclusion transactions missing from block") + return fmt.Errorf("sequencer is malicious: %d forced inclusion transactions not included in block", len(missingTxs)) + } + + s.logger.Debug(). + Uint64("height", data.Height()). + Int("forced_txs", len(forcedIncludedTxsEvent.Txs)). + Msg("all forced inclusion transactions verified in block") + + return nil +} + // sendCriticalError sends a critical error to the error channel without blocking func (s *Syncer) sendCriticalError(err error) { if s.errorCh != nil { diff --git a/block/internal/syncing/syncer_forced_inclusion_test.go b/block/internal/syncing/syncer_forced_inclusion_test.go new file mode 100644 index 0000000000..2ddf890197 --- /dev/null +++ b/block/internal/syncing/syncer_forced_inclusion_test.go @@ -0,0 +1,357 @@ +package syncing + +import ( + "bytes" + "context" + "testing" + "time" + + "github.com/ipfs/go-datastore" + dssync "github.com/ipfs/go-datastore/sync" + "github.com/rs/zerolog" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + + "github.com/evstack/ev-node/block/internal/cache" + "github.com/evstack/ev-node/block/internal/common" + coreda "github.com/evstack/ev-node/core/da" + "github.com/evstack/ev-node/pkg/config" + "github.com/evstack/ev-node/pkg/genesis" + "github.com/evstack/ev-node/pkg/store" + testmocks "github.com/evstack/ev-node/test/mocks" + "github.com/evstack/ev-node/types" +) + +func TestVerifyForcedInclusionTxs_AllTransactionsIncluded(t *testing.T) { + ds := dssync.MutexWrap(datastore.NewMapDatastore()) + st := store.New(ds) + cm, err := cache.NewManager(config.DefaultConfig(), st, zerolog.Nop()) + require.NoError(t, err) + + addr, pub, signer := buildSyncTestSigner(t) + gen := genesis.Genesis{ + ChainID: "tchain", + InitialHeight: 1, + StartTime: time.Now().Add(-time.Second), + ProposerAddress: addr, + } + + cfg := config.DefaultConfig() + cfg.DA.ForcedInclusionNamespace = "nsForcedInclusion" + cfg.DA.ForcedInclusionDAEpoch = 1 + + mockExec := testmocks.NewMockExecutor(t) + mockExec.EXPECT().InitChain(mock.Anything, mock.Anything, uint64(1), "tchain"). + Return([]byte("app0"), uint64(1024), nil).Once() + + mockDA := testmocks.NewMockDA(t) + daRetriever := NewDARetriever(mockDA, cm, cfg, gen, zerolog.Nop()) + + s := NewSyncer( + st, + mockExec, + mockDA, + cm, + common.NopMetrics(), + cfg, + gen, + common.NewMockBroadcaster[*types.SignedHeader](t), + common.NewMockBroadcaster[*types.Data](t), + zerolog.Nop(), + common.DefaultBlockOptions(), + make(chan error, 1), + ) + s.daRetriever = daRetriever + + require.NoError(t, s.initializeState()) + s.ctx = context.Background() + + // Mock DA to return forced inclusion transactions + namespaceForcedInclusionBz := coreda.NamespaceFromString(cfg.DA.GetForcedInclusionNamespace()).Bytes() + + // Create forced inclusion transaction blob (SignedData) in DA + dataBin, _ := makeSignedDataBytes(t, gen.ChainID, 10, addr, pub, signer, 2) + + mockDA.EXPECT().GetIDs(mock.Anything, uint64(1), mock.MatchedBy(func(ns []byte) bool { + return bytes.Equal(ns, namespaceForcedInclusionBz) + })).Return(&coreda.GetIDsResult{IDs: [][]byte{[]byte("fi1")}, Timestamp: time.Now()}, nil).Once() + + mockDA.EXPECT().Get(mock.Anything, mock.Anything, mock.MatchedBy(func(ns []byte) bool { + return bytes.Equal(ns, namespaceForcedInclusionBz) + })).Return([][]byte{dataBin}, nil).Once() + + // Create block data that includes the forced transaction blob as a single transaction + data := makeData(gen.ChainID, 1, 1) + data.Txs[0] = types.Tx(dataBin) + + currentState := s.GetLastState() + currentState.DAHeight = 0 + + // Verify - should pass since all forced txs are included + err = s.verifyForcedInclusionTxs(currentState, data) + require.NoError(t, err) +} + +func TestVerifyForcedInclusionTxs_MissingTransactions(t *testing.T) { + ds := dssync.MutexWrap(datastore.NewMapDatastore()) + st := store.New(ds) + cm, err := cache.NewManager(config.DefaultConfig(), st, zerolog.Nop()) + require.NoError(t, err) + + addr, pub, signer := buildSyncTestSigner(t) + gen := genesis.Genesis{ + ChainID: "tchain", + InitialHeight: 1, + StartTime: time.Now().Add(-time.Second), + ProposerAddress: addr, + } + + cfg := config.DefaultConfig() + cfg.DA.ForcedInclusionNamespace = "nsForcedInclusion" + cfg.DA.ForcedInclusionDAEpoch = 1 + + mockExec := testmocks.NewMockExecutor(t) + mockExec.EXPECT().InitChain(mock.Anything, mock.Anything, uint64(1), "tchain"). + Return([]byte("app0"), uint64(1024), nil).Once() + + mockDA := testmocks.NewMockDA(t) + daRetriever := NewDARetriever(mockDA, cm, cfg, gen, zerolog.Nop()) + + s := NewSyncer( + st, + mockExec, + mockDA, + cm, + common.NopMetrics(), + cfg, + gen, + common.NewMockBroadcaster[*types.SignedHeader](t), + common.NewMockBroadcaster[*types.Data](t), + zerolog.Nop(), + common.DefaultBlockOptions(), + make(chan error, 1), + ) + s.daRetriever = daRetriever + + require.NoError(t, s.initializeState()) + s.ctx = context.Background() + + // Mock DA to return forced inclusion transactions + namespaceForcedInclusionBz := coreda.NamespaceFromString(cfg.DA.GetForcedInclusionNamespace()).Bytes() + + // Create forced inclusion transaction blob (SignedData) in DA + dataBin, _ := makeSignedDataBytes(t, gen.ChainID, 10, addr, pub, signer, 2) + + mockDA.EXPECT().GetIDs(mock.Anything, uint64(1), mock.MatchedBy(func(ns []byte) bool { + return bytes.Equal(ns, namespaceForcedInclusionBz) + })).Return(&coreda.GetIDsResult{IDs: [][]byte{[]byte("fi1")}, Timestamp: time.Now()}, nil).Once() + + mockDA.EXPECT().Get(mock.Anything, mock.Anything, mock.MatchedBy(func(ns []byte) bool { + return bytes.Equal(ns, namespaceForcedInclusionBz) + })).Return([][]byte{dataBin}, nil).Once() + + // Create block data that does NOT include the forced transaction blob + data := makeData(gen.ChainID, 1, 2) + data.Txs[0] = types.Tx([]byte("regular_tx_1")) + data.Txs[1] = types.Tx([]byte("regular_tx_2")) + + currentState := s.GetLastState() + currentState.DAHeight = 0 + + // Verify - should fail since forced tx blob is missing + err = s.verifyForcedInclusionTxs(currentState, data) + require.Error(t, err) + require.Contains(t, err.Error(), "sequencer is malicious") + require.Contains(t, err.Error(), "1 forced inclusion transactions not included") +} + +func TestVerifyForcedInclusionTxs_PartiallyIncluded(t *testing.T) { + ds := dssync.MutexWrap(datastore.NewMapDatastore()) + st := store.New(ds) + cm, err := cache.NewManager(config.DefaultConfig(), st, zerolog.Nop()) + require.NoError(t, err) + + addr, pub, signer := buildSyncTestSigner(t) + gen := genesis.Genesis{ + ChainID: "tchain", + InitialHeight: 1, + StartTime: time.Now().Add(-time.Second), + ProposerAddress: addr, + } + + cfg := config.DefaultConfig() + cfg.DA.ForcedInclusionNamespace = "nsForcedInclusion" + cfg.DA.ForcedInclusionDAEpoch = 1 + + mockExec := testmocks.NewMockExecutor(t) + mockExec.EXPECT().InitChain(mock.Anything, mock.Anything, uint64(1), "tchain"). + Return([]byte("app0"), uint64(1024), nil).Once() + + mockDA := testmocks.NewMockDA(t) + daRetriever := NewDARetriever(mockDA, cm, cfg, gen, zerolog.Nop()) + + s := NewSyncer( + st, + mockExec, + mockDA, + cm, + common.NopMetrics(), + cfg, + gen, + common.NewMockBroadcaster[*types.SignedHeader](t), + common.NewMockBroadcaster[*types.Data](t), + zerolog.Nop(), + common.DefaultBlockOptions(), + make(chan error, 1), + ) + s.daRetriever = daRetriever + + require.NoError(t, s.initializeState()) + s.ctx = context.Background() + + // Mock DA to return two forced inclusion transaction blobs + namespaceForcedInclusionBz := coreda.NamespaceFromString(cfg.DA.GetForcedInclusionNamespace()).Bytes() + + // Create two forced inclusion transaction blobs in DA + dataBin1, _ := makeSignedDataBytes(t, gen.ChainID, 10, addr, pub, signer, 2) + dataBin2, _ := makeSignedDataBytes(t, gen.ChainID, 11, addr, pub, signer, 1) + + mockDA.EXPECT().GetIDs(mock.Anything, uint64(1), mock.MatchedBy(func(ns []byte) bool { + return bytes.Equal(ns, namespaceForcedInclusionBz) + })).Return(&coreda.GetIDsResult{IDs: [][]byte{[]byte("fi1"), []byte("fi2")}, Timestamp: time.Now()}, nil).Once() + + mockDA.EXPECT().Get(mock.Anything, mock.Anything, mock.MatchedBy(func(ns []byte) bool { + return bytes.Equal(ns, namespaceForcedInclusionBz) + })).Return([][]byte{dataBin1, dataBin2}, nil).Once() + + // Create block data that includes only one of the forced transaction blobs + data := makeData(gen.ChainID, 1, 2) + data.Txs[0] = types.Tx(dataBin1) + data.Txs[1] = types.Tx([]byte("regular_tx")) + // dataBin2 is missing + + currentState := s.GetLastState() + currentState.DAHeight = 0 + + // Verify - should fail since one forced tx is missing + err = s.verifyForcedInclusionTxs(currentState, data) + require.Error(t, err) + require.Contains(t, err.Error(), "sequencer is malicious") + require.Contains(t, err.Error(), "1 forced inclusion transactions not included") +} + +func TestVerifyForcedInclusionTxs_NoForcedTransactions(t *testing.T) { + ds := dssync.MutexWrap(datastore.NewMapDatastore()) + st := store.New(ds) + cm, err := cache.NewManager(config.DefaultConfig(), st, zerolog.Nop()) + require.NoError(t, err) + + addr, _, _ := buildSyncTestSigner(t) + gen := genesis.Genesis{ + ChainID: "tchain", + InitialHeight: 1, + StartTime: time.Now().Add(-time.Second), + ProposerAddress: addr, + } + + cfg := config.DefaultConfig() + cfg.DA.ForcedInclusionNamespace = "nsForcedInclusion" + cfg.DA.ForcedInclusionDAEpoch = 1 + + mockExec := testmocks.NewMockExecutor(t) + mockExec.EXPECT().InitChain(mock.Anything, mock.Anything, uint64(1), "tchain"). + Return([]byte("app0"), uint64(1024), nil).Once() + + mockDA := testmocks.NewMockDA(t) + daRetriever := NewDARetriever(mockDA, cm, cfg, gen, zerolog.Nop()) + + s := NewSyncer( + st, + mockExec, + mockDA, + cm, + common.NopMetrics(), + cfg, + gen, + common.NewMockBroadcaster[*types.SignedHeader](t), + common.NewMockBroadcaster[*types.Data](t), + zerolog.Nop(), + common.DefaultBlockOptions(), + make(chan error, 1), + ) + s.daRetriever = daRetriever + + require.NoError(t, s.initializeState()) + s.ctx = context.Background() + + // Mock DA to return no forced inclusion transactions + namespaceForcedInclusionBz := coreda.NamespaceFromString(cfg.DA.GetForcedInclusionNamespace()).Bytes() + + mockDA.EXPECT().GetIDs(mock.Anything, uint64(1), mock.MatchedBy(func(ns []byte) bool { + return bytes.Equal(ns, namespaceForcedInclusionBz) + })).Return(&coreda.GetIDsResult{IDs: [][]byte{}, Timestamp: time.Now()}, nil).Once() + + // Create block data + data := makeData(gen.ChainID, 1, 2) + + currentState := s.GetLastState() + currentState.DAHeight = 0 + + // Verify - should pass since no forced txs to verify + err = s.verifyForcedInclusionTxs(currentState, data) + require.NoError(t, err) +} + +func TestVerifyForcedInclusionTxs_NamespaceNotConfigured(t *testing.T) { + ds := dssync.MutexWrap(datastore.NewMapDatastore()) + st := store.New(ds) + cm, err := cache.NewManager(config.DefaultConfig(), st, zerolog.Nop()) + require.NoError(t, err) + + addr, _, _ := buildSyncTestSigner(t) + gen := genesis.Genesis{ + ChainID: "tchain", + InitialHeight: 1, + StartTime: time.Now().Add(-time.Second), + ProposerAddress: addr, + } + + cfg := config.DefaultConfig() + // Leave ForcedInclusionNamespace empty + + mockExec := testmocks.NewMockExecutor(t) + mockExec.EXPECT().InitChain(mock.Anything, mock.Anything, uint64(1), "tchain"). + Return([]byte("app0"), uint64(1024), nil).Once() + + mockDA := testmocks.NewMockDA(t) + daRetriever := NewDARetriever(mockDA, cm, cfg, gen, zerolog.Nop()) + + s := NewSyncer( + st, + mockExec, + mockDA, + cm, + common.NopMetrics(), + cfg, + gen, + common.NewMockBroadcaster[*types.SignedHeader](t), + common.NewMockBroadcaster[*types.Data](t), + zerolog.Nop(), + common.DefaultBlockOptions(), + make(chan error, 1), + ) + s.daRetriever = daRetriever + + require.NoError(t, s.initializeState()) + s.ctx = context.Background() + + // Create block data + data := makeData(gen.ChainID, 1, 2) + + currentState := s.GetLastState() + currentState.DAHeight = 0 + + // Verify - should pass since namespace not configured + err = s.verifyForcedInclusionTxs(currentState, data) + require.NoError(t, err) +} diff --git a/block/internal/syncing/syncer_test.go b/block/internal/syncing/syncer_test.go index c0c14f53b4..59a9b5cb8f 100644 --- a/block/internal/syncing/syncer_test.go +++ b/block/internal/syncing/syncer_test.go @@ -406,6 +406,7 @@ func TestSyncLoopPersistState(t *testing.T) { DaHeight: daHeight, }} daRtrMock.On("RetrieveFromDA", mock.Anything, daHeight).Return(evts, nil) + daRtrMock.On("RetrieveForcedIncludedTxsFromDA", mock.Anything, mock.Anything).Return(&common.ForcedIncludedEvent{Txs: [][]byte{}}, nil).Maybe() prevHeaderHash = sigHeader.Hash() hasher := sha512.New() hasher.Write(prevAppHash) @@ -478,6 +479,7 @@ func TestSyncLoopPersistState(t *testing.T) { p2pHndlMock.On("ProcessDataRange", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return().Maybe() syncerInst2.daRetriever, syncerInst2.p2pHandler = daRtrMock, p2pHndlMock + daRtrMock.On("RetrieveForcedIncludedTxsFromDA", mock.Anything, mock.Anything).Return(&common.ForcedIncludedEvent{Txs: [][]byte{}}, nil).Maybe() daRtrMock.On("RetrieveFromDA", mock.Anything, mock.Anything). Run(func(arg mock.Arguments) { cancel() From 0f8d7d54181a4f4f0a0ac018043b6de0bc55d11e Mon Sep 17 00:00:00 2001 From: Julien Robert Date: Wed, 5 Nov 2025 10:18:44 +0100 Subject: [PATCH 04/39] updates --- go.mod | 2 -- 1 file changed, 2 deletions(-) diff --git a/go.mod b/go.mod index 88532de701..440911fd28 100644 --- a/go.mod +++ b/go.mod @@ -2,8 +2,6 @@ module github.com/evstack/ev-node go 1.24.6 -replace github.com/evstack/ev-node/core => ./core - retract v0.12.0 // Published by accident require ( From c0a38c78c6bed8b61dd569a799fc1dda4cd66f21 Mon Sep 17 00:00:00 2001 From: Julien Robert Date: Wed, 5 Nov 2025 15:16:39 +0100 Subject: [PATCH 05/39] based --- apps/evm/single/cmd/run.go | 90 +++-- apps/evm/single/go.mod | 2 +- apps/grpc/single/cmd/run.go | 81 +++- apps/grpc/single/go.mod | 4 +- apps/testapp/cmd/run.go | 92 +++-- apps/testapp/go.mod | 2 +- block/internal/common/event.go | 16 +- block/internal/executing/executor.go | 16 +- block/internal/syncing/p2p_handler.go | 5 + block/internal/syncing/syncer.go | 12 +- block/public.go | 33 +- pkg/config/config.go | 20 +- pkg/config/config_test.go | 56 ++- sequencers/based/README.md | 90 +++++ sequencers/based/based.go | 226 +++++++++++ sequencers/based/based_test.go | 518 ++++++++++++++++++++++++++ 16 files changed, 1173 insertions(+), 90 deletions(-) create mode 100644 sequencers/based/README.md create mode 100644 sequencers/based/based.go create mode 100644 sequencers/based/based_test.go diff --git a/apps/evm/single/cmd/run.go b/apps/evm/single/cmd/run.go index 80ea927822..37b99036d6 100644 --- a/apps/evm/single/cmd/run.go +++ b/apps/evm/single/cmd/run.go @@ -7,23 +7,27 @@ import ( "os" "path/filepath" - "github.com/evstack/ev-node/core/da" - "github.com/evstack/ev-node/da/jsonrpc" - "github.com/evstack/ev-node/node" - "github.com/evstack/ev-node/sequencers/single" - "github.com/ethereum/go-ethereum/common" + "github.com/ipfs/go-datastore" + "github.com/rs/zerolog" "github.com/spf13/cobra" - "github.com/evstack/ev-node/execution/evm" - + "github.com/evstack/ev-node/block" + "github.com/evstack/ev-node/core/da" "github.com/evstack/ev-node/core/execution" + coresequencer "github.com/evstack/ev-node/core/sequencer" + "github.com/evstack/ev-node/da/jsonrpc" + "github.com/evstack/ev-node/execution/evm" + "github.com/evstack/ev-node/node" rollcmd "github.com/evstack/ev-node/pkg/cmd" "github.com/evstack/ev-node/pkg/config" + "github.com/evstack/ev-node/pkg/genesis" genesispkg "github.com/evstack/ev-node/pkg/genesis" "github.com/evstack/ev-node/pkg/p2p" "github.com/evstack/ev-node/pkg/p2p/key" "github.com/evstack/ev-node/pkg/store" + "github.com/evstack/ev-node/sequencers/based" + "github.com/evstack/ev-node/sequencers/single" ) var RunCmd = &cobra.Command{ @@ -73,21 +77,8 @@ var RunCmd = &cobra.Command{ logger.Warn().Msg("da_start_height is not set in genesis.json, ask your chain developer") } - singleMetrics, err := single.DefaultMetricsProvider(nodeConfig.Instrumentation.IsPrometheusEnabled())(genesis.ChainID) - if err != nil { - return err - } - - sequencer, err := single.NewSequencer( - context.Background(), - logger, - datastore, - &daJrpc.DA, - []byte(genesis.ChainID), - nodeConfig.Node.BlockTime.Duration, - singleMetrics, - nodeConfig.Node.Aggregator, - ) + // Create sequencer based on configuration + sequencer, err := createSequencer(context.Background(), logger, datastore, &daJrpc.DA, nodeConfig, genesis) if err != nil { return err } @@ -111,6 +102,61 @@ func init() { addFlags(RunCmd) } +// createSequencer creates a sequencer based on the configuration. +// If BasedSequencer is enabled, it creates a based sequencer that fetches transactions from DA. +// Otherwise, it creates a single (traditional) sequencer. +func createSequencer( + ctx context.Context, + logger zerolog.Logger, + datastore datastore.Batching, + da da.DA, + nodeConfig config.Config, + genesis genesis.Genesis, +) (coresequencer.Sequencer, error) { + if nodeConfig.Node.BasedSequencer { + // Based sequencer mode - fetch transactions only from DA + if !nodeConfig.Node.Aggregator { + return nil, fmt.Errorf("based sequencer mode requires aggregator mode to be enabled") + } + + daRetriever, err := block.NewDARetriever(da, nodeConfig, genesis, logger) + if err != nil { + return nil, fmt.Errorf("failed to create DA retriever: %w", err) + } + + adapter := based.NewDARetrieverAdapter(daRetriever.RetrieveForcedIncludedTxsFromDA) + basedSeq := based.NewBasedSequencer(adapter, da, nodeConfig, genesis, logger) + + logger.Info(). + Str("forced_inclusion_namespace", nodeConfig.DA.GetForcedInclusionNamespace()). + Uint64("da_epoch", nodeConfig.DA.ForcedInclusionDAEpoch). + Msg("based sequencer initialized") + + return basedSeq, nil + } + + singleMetrics, err := single.NopMetrics() + if err != nil { + return nil, fmt.Errorf("failed to create single sequencer metrics: %w", err) + } + + sequencer, err := single.NewSequencer( + ctx, + logger, + datastore, + da, + []byte(genesis.ChainID), + nodeConfig.Node.BlockTime.Duration, + singleMetrics, + nodeConfig.Node.Aggregator, + ) + if err != nil { + return nil, fmt.Errorf("failed to create single sequencer: %w", err) + } + + return sequencer, nil +} + func createExecutionClient(cmd *cobra.Command) (execution.Executor, error) { // Read execution client parameters from flags ethURL, err := cmd.Flags().GetString(evm.FlagEvmEthURL) diff --git a/apps/evm/single/go.mod b/apps/evm/single/go.mod index f1351da31c..e65a2e79ed 100644 --- a/apps/evm/single/go.mod +++ b/apps/evm/single/go.mod @@ -15,6 +15,7 @@ require ( github.com/evstack/ev-node/execution/evm v1.0.0-beta.3 github.com/evstack/ev-node/sequencers/single v1.0.0-beta.3 github.com/ipfs/go-datastore v0.9.0 + github.com/rs/zerolog v1.34.0 github.com/spf13/cobra v1.10.1 ) @@ -146,7 +147,6 @@ require ( github.com/quic-go/quic-go v0.54.1 // indirect github.com/quic-go/webtransport-go v0.9.0 // indirect github.com/rivo/uniseg v0.2.0 // indirect - github.com/rs/zerolog v1.34.0 // indirect github.com/sagikazarmark/locafero v0.11.0 // indirect github.com/shirou/gopsutil v3.21.4-0.20210419000835-c7a38de76ee5+incompatible // indirect github.com/sourcegraph/conc v0.3.1-0.20240121214520-5f936abd7ae8 // indirect diff --git a/apps/grpc/single/cmd/run.go b/apps/grpc/single/cmd/run.go index b99e3b2fcd..0bb79a7b9a 100644 --- a/apps/grpc/single/cmd/run.go +++ b/apps/grpc/single/cmd/run.go @@ -1,22 +1,29 @@ package cmd import ( + "context" "fmt" "path/filepath" + "github.com/ipfs/go-datastore" + "github.com/rs/zerolog" "github.com/spf13/cobra" + "github.com/evstack/ev-node/block" "github.com/evstack/ev-node/core/da" "github.com/evstack/ev-node/core/execution" + coresequencer "github.com/evstack/ev-node/core/sequencer" "github.com/evstack/ev-node/da/jsonrpc" executiongrpc "github.com/evstack/ev-node/execution/grpc" "github.com/evstack/ev-node/node" rollcmd "github.com/evstack/ev-node/pkg/cmd" "github.com/evstack/ev-node/pkg/config" + "github.com/evstack/ev-node/pkg/genesis" rollgenesis "github.com/evstack/ev-node/pkg/genesis" "github.com/evstack/ev-node/pkg/p2p" "github.com/evstack/ev-node/pkg/p2p/key" "github.com/evstack/ev-node/pkg/store" + "github.com/evstack/ev-node/sequencers/based" "github.com/evstack/ev-node/sequencers/single" ) @@ -73,23 +80,8 @@ The execution client must implement the Evolve execution gRPC interface.`, logger.Warn().Msg("da_start_height is not set in genesis.json, ask your chain developer") } - // Create metrics provider - singleMetrics, err := single.DefaultMetricsProvider(nodeConfig.Instrumentation.IsPrometheusEnabled())(genesis.ChainID) - if err != nil { - return err - } - - // Create sequencer - sequencer, err := single.NewSequencer( - cmd.Context(), - logger, - datastore, - &daJrpc.DA, - []byte(genesis.ChainID), - nodeConfig.Node.BlockTime.Duration, - singleMetrics, - nodeConfig.Node.Aggregator, - ) + // Create sequencer based on configuration + sequencer, err := createSequencer(cmd.Context(), logger, datastore, &daJrpc.DA, nodeConfig, genesis) if err != nil { return err } @@ -119,6 +111,61 @@ func init() { addGRPCFlags(RunCmd) } +// createSequencer creates a sequencer based on the configuration. +// If BasedSequencer is enabled, it creates a based sequencer that fetches transactions from DA. +// Otherwise, it creates a single (traditional) sequencer. +func createSequencer( + ctx context.Context, + logger zerolog.Logger, + datastore datastore.Batching, + da da.DA, + nodeConfig config.Config, + genesis genesis.Genesis, +) (coresequencer.Sequencer, error) { + if nodeConfig.Node.BasedSequencer { + // Based sequencer mode - fetch transactions only from DA + if !nodeConfig.Node.Aggregator { + return nil, fmt.Errorf("based sequencer mode requires aggregator mode to be enabled") + } + + daRetriever, err := block.NewDARetriever(da, nodeConfig, genesis, logger) + if err != nil { + return nil, fmt.Errorf("failed to create DA retriever: %w", err) + } + + adapter := based.NewDARetrieverAdapter(daRetriever.RetrieveForcedIncludedTxsFromDA) + basedSeq := based.NewBasedSequencer(adapter, da, nodeConfig, genesis, logger) + + logger.Info(). + Str("forced_inclusion_namespace", nodeConfig.DA.GetForcedInclusionNamespace()). + Uint64("da_epoch", nodeConfig.DA.ForcedInclusionDAEpoch). + Msg("based sequencer initialized") + + return basedSeq, nil + } + + singleMetrics, err := single.NopMetrics() + if err != nil { + return nil, fmt.Errorf("failed to create single sequencer metrics: %w", err) + } + + sequencer, err := single.NewSequencer( + ctx, + logger, + datastore, + da, + []byte(genesis.ChainID), + nodeConfig.Node.BlockTime.Duration, + singleMetrics, + nodeConfig.Node.Aggregator, + ) + if err != nil { + return nil, fmt.Errorf("failed to create single sequencer: %w", err) + } + + return sequencer, nil +} + // createGRPCExecutionClient creates a new gRPC execution client from command flags func createGRPCExecutionClient(cmd *cobra.Command) (execution.Executor, error) { // Get the gRPC executor URL from flags diff --git a/apps/grpc/single/go.mod b/apps/grpc/single/go.mod index d21ddf1c44..e80b8a1a99 100644 --- a/apps/grpc/single/go.mod +++ b/apps/grpc/single/go.mod @@ -8,6 +8,8 @@ require ( github.com/evstack/ev-node/da v1.0.0-beta.5 github.com/evstack/ev-node/execution/grpc v0.0.0 github.com/evstack/ev-node/sequencers/single v1.0.0-beta.3 + github.com/ipfs/go-datastore v0.9.0 + github.com/rs/zerolog v1.34.0 github.com/spf13/cobra v1.10.1 ) @@ -48,7 +50,6 @@ require ( github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/ipfs/boxo v0.35.0 // indirect github.com/ipfs/go-cid v0.5.0 // indirect - github.com/ipfs/go-datastore v0.9.0 // indirect github.com/ipfs/go-ds-badger4 v0.1.8 // indirect github.com/ipfs/go-log/v2 v2.8.1 // indirect github.com/ipld/go-ipld-prime v0.21.0 // indirect @@ -122,7 +123,6 @@ require ( github.com/quic-go/qpack v0.5.1 // indirect github.com/quic-go/quic-go v0.54.1 // indirect github.com/quic-go/webtransport-go v0.9.0 // indirect - github.com/rs/zerolog v1.34.0 // indirect github.com/sagikazarmark/locafero v0.11.0 // indirect github.com/sourcegraph/conc v0.3.1-0.20240121214520-5f936abd7ae8 // indirect github.com/spaolacci/murmur3 v1.1.0 // indirect diff --git a/apps/testapp/cmd/run.go b/apps/testapp/cmd/run.go index 061b8075ca..c20f6a1631 100644 --- a/apps/testapp/cmd/run.go +++ b/apps/testapp/cmd/run.go @@ -5,17 +5,23 @@ import ( "fmt" "path/filepath" + "github.com/ipfs/go-datastore" + "github.com/rs/zerolog" "github.com/spf13/cobra" kvexecutor "github.com/evstack/ev-node/apps/testapp/kv" + "github.com/evstack/ev-node/block" "github.com/evstack/ev-node/core/da" + coresequencer "github.com/evstack/ev-node/core/sequencer" "github.com/evstack/ev-node/da/jsonrpc" "github.com/evstack/ev-node/node" - rollcmd "github.com/evstack/ev-node/pkg/cmd" + "github.com/evstack/ev-node/pkg/cmd" + "github.com/evstack/ev-node/pkg/config" genesispkg "github.com/evstack/ev-node/pkg/genesis" "github.com/evstack/ev-node/pkg/p2p" "github.com/evstack/ev-node/pkg/p2p/key" "github.com/evstack/ev-node/pkg/store" + "github.com/evstack/ev-node/sequencers/based" "github.com/evstack/ev-node/sequencers/single" ) @@ -23,16 +29,16 @@ var RunCmd = &cobra.Command{ Use: "start", Aliases: []string{"node", "run"}, Short: "Run the testapp node", - RunE: func(cmd *cobra.Command, args []string) error { - nodeConfig, err := rollcmd.ParseConfig(cmd) + RunE: func(command *cobra.Command, args []string) error { + nodeConfig, err := cmd.ParseConfig(command) if err != nil { return err } - logger := rollcmd.SetupLogger(nodeConfig.Log) + logger := cmd.SetupLogger(nodeConfig.Log) // Get KV endpoint flag - kvEndpoint, _ := cmd.Flags().GetString(flagKVEndpoint) + kvEndpoint, _ := command.Flags().GetString(flagKVEndpoint) if kvEndpoint == "" { logger.Info().Msg("KV endpoint flag not set, using default from http_server") } @@ -51,7 +57,7 @@ var RunCmd = &cobra.Command{ logger.Info().Str("headerNamespace", headerNamespace.HexString()).Str("dataNamespace", dataNamespace.HexString()).Msg("namespaces") - daJrpc, err := jsonrpc.NewClient(ctx, logger, nodeConfig.DA.Address, nodeConfig.DA.AuthToken, nodeConfig.DA.GasPrice, nodeConfig.DA.GasMultiplier, rollcmd.DefaultMaxBlobSize) + daJrpc, err := jsonrpc.NewClient(ctx, logger, nodeConfig.DA.Address, nodeConfig.DA.AuthToken, nodeConfig.DA.GasPrice, nodeConfig.DA.GasMultiplier, cmd.DefaultMaxBlobSize) if err != nil { return err } @@ -66,11 +72,6 @@ var RunCmd = &cobra.Command{ return err } - singleMetrics, err := single.NopMetrics() - if err != nil { - return err - } - // Start the KV executor HTTP server if kvEndpoint != "" { // Only start if endpoint is provided httpServer := kvexecutor.NewHTTPServer(executor, kvEndpoint) @@ -92,16 +93,8 @@ var RunCmd = &cobra.Command{ logger.Warn().Msg("da_start_height is not set in genesis.json, ask your chain developer") } - sequencer, err := single.NewSequencer( - ctx, - logger, - datastore, - &daJrpc.DA, - []byte(genesis.ChainID), - nodeConfig.Node.BlockTime.Duration, - singleMetrics, - nodeConfig.Node.Aggregator, - ) + // Create sequencer based on configuration + sequencer, err := createSequencer(ctx, logger, datastore, &daJrpc.DA, nodeConfig, genesis) if err != nil { return err } @@ -111,6 +104,61 @@ var RunCmd = &cobra.Command{ return err } - return rollcmd.StartNode(logger, cmd, executor, sequencer, &daJrpc.DA, p2pClient, datastore, nodeConfig, genesis, node.NodeOptions{}) + return cmd.StartNode(logger, command, executor, sequencer, &daJrpc.DA, p2pClient, datastore, nodeConfig, genesis, node.NodeOptions{}) }, } + +// createSequencer creates a sequencer based on the configuration. +// If BasedSequencer is enabled, it creates a based sequencer that fetches transactions from DA. +// Otherwise, it creates a single (traditional) sequencer. +func createSequencer( + ctx context.Context, + logger zerolog.Logger, + datastore datastore.Batching, + da da.DA, + nodeConfig config.Config, + genesis genesispkg.Genesis, +) (coresequencer.Sequencer, error) { + if nodeConfig.Node.BasedSequencer { + // Based sequencer mode - fetch transactions only from DA + if !nodeConfig.Node.Aggregator { + return nil, fmt.Errorf("based sequencer mode requires aggregator mode to be enabled") + } + + daRetriever, err := block.NewDARetriever(da, nodeConfig, genesis, logger) + if err != nil { + return nil, fmt.Errorf("failed to create DA retriever: %w", err) + } + + adapter := based.NewDARetrieverAdapter(daRetriever.RetrieveForcedIncludedTxsFromDA) + basedSeq := based.NewBasedSequencer(adapter, da, nodeConfig, genesis, logger) + + logger.Info(). + Str("forced_inclusion_namespace", nodeConfig.DA.GetForcedInclusionNamespace()). + Uint64("da_epoch", nodeConfig.DA.ForcedInclusionDAEpoch). + Msg("based sequencer initialized") + + return basedSeq, nil + } + + singleMetrics, err := single.NopMetrics() + if err != nil { + return nil, fmt.Errorf("failed to create single sequencer metrics: %w", err) + } + + sequencer, err := single.NewSequencer( + ctx, + logger, + datastore, + da, + []byte(genesis.ChainID), + nodeConfig.Node.BlockTime.Duration, + singleMetrics, + nodeConfig.Node.Aggregator, + ) + if err != nil { + return nil, fmt.Errorf("failed to create single sequencer: %w", err) + } + + return sequencer, nil +} diff --git a/apps/testapp/go.mod b/apps/testapp/go.mod index 6603ce3222..ea0bc6d0f7 100644 --- a/apps/testapp/go.mod +++ b/apps/testapp/go.mod @@ -18,6 +18,7 @@ require ( github.com/evstack/ev-node/da v0.0.0-00010101000000-000000000000 github.com/evstack/ev-node/sequencers/single v0.0.0-00010101000000-000000000000 github.com/ipfs/go-datastore v0.9.0 + github.com/rs/zerolog v1.34.0 github.com/spf13/cobra v1.10.1 github.com/stretchr/testify v1.11.1 ) @@ -131,7 +132,6 @@ require ( github.com/quic-go/qpack v0.5.1 // indirect github.com/quic-go/quic-go v0.54.1 // indirect github.com/quic-go/webtransport-go v0.9.0 // indirect - github.com/rs/zerolog v1.34.0 // indirect github.com/sagikazarmark/locafero v0.11.0 // indirect github.com/sourcegraph/conc v0.3.1-0.20240121214520-5f936abd7ae8 // indirect github.com/spaolacci/murmur3 v1.1.0 // indirect diff --git a/block/internal/common/event.go b/block/internal/common/event.go index 05c560b724..3fad0e6fba 100644 --- a/block/internal/common/event.go +++ b/block/internal/common/event.go @@ -1,6 +1,10 @@ package common -import "github.com/evstack/ev-node/types" +import ( + "context" + + "github.com/evstack/ev-node/types" +) // EventSource represents the origin of a block event type EventSource string @@ -12,8 +16,14 @@ const ( SourceP2P EventSource = "P2P" ) +// DARetriever defines the interface for retrieving events from the DA layer +type DARetriever interface { + RetrieveFromDA(ctx context.Context, daHeight uint64) ([]DAHeightEvent, error) + RetrieveForcedIncludedTxsFromDA(ctx context.Context, daHeight uint64) (*ForcedIncludedEvent, error) +} + // DAHeightEvent represents a DA event for caching -type DAHeightEvent struct { +type DAHeightEvent = struct { Header *types.SignedHeader Data *types.Data // DaHeight corresponds to the highest DA included height between the Header and Data. @@ -23,7 +33,7 @@ type DAHeightEvent struct { } // ForcedIncluded represents a forced inclusion event for caching -type ForcedIncludedEvent struct { +type ForcedIncludedEvent = struct { Txs [][]byte StartDaHeight uint64 EndDaHeight uint64 diff --git a/block/internal/executing/executor.go b/block/internal/executing/executor.go index aa3f359419..b2ae3122f3 100644 --- a/block/internal/executing/executor.go +++ b/block/internal/executing/executor.go @@ -32,7 +32,7 @@ type Executor struct { exec coreexecutor.Executor sequencer coresequencer.Sequencer signer signer.Signer - daRetriever syncing.DaRetrieverI + daRetriever common.DARetriever // Shared components cache cache.Manager @@ -73,7 +73,7 @@ func NewExecutor( store store.Store, exec coreexecutor.Executor, sequencer coresequencer.Sequencer, - daRetriever syncing.DaRetrieverI, + daRetriever common.DARetriever, signer signer.Signer, cache cache.Manager, metrics *common.Metrics, @@ -335,9 +335,15 @@ func (e *Executor) produceBlock() error { } // fetch forced included txs - forcedIncludedTxsEvent, err := e.daRetriever.RetrieveForcedIncludedTxsFromDA(e.ctx, currentState.DAHeight) - if err != nil && !errors.Is(err, syncing.ErrForceInclusionNotConfigured) { - e.logger.Error().Err(err).Msg("failed to retrieve forced included txs") + var ( + forcedIncludedTxsEvent *common.ForcedIncludedEvent + err error + ) + if !e.config.Node.BasedSequencer { + forcedIncludedTxsEvent, err = e.daRetriever.RetrieveForcedIncludedTxsFromDA(e.ctx, currentState.DAHeight) + if err != nil && !errors.Is(err, syncing.ErrForceInclusionNotConfigured) { + e.logger.Error().Err(err).Msg("failed to retrieve forced included txs") + } } var ( diff --git a/block/internal/syncing/p2p_handler.go b/block/internal/syncing/p2p_handler.go index 551254ff4b..3c77334ec6 100644 --- a/block/internal/syncing/p2p_handler.go +++ b/block/internal/syncing/p2p_handler.go @@ -16,6 +16,11 @@ import ( "github.com/evstack/ev-node/types" ) +type p2pHandler interface { + ProcessHeaderRange(ctx context.Context, fromHeight, toHeight uint64, heightInCh chan<- common.DAHeightEvent) + ProcessDataRange(ctx context.Context, fromHeight, toHeight uint64, heightInCh chan<- common.DAHeightEvent) +} + // P2PHandler handles all P2P operations for the syncer type P2PHandler struct { headerStore goheader.Store[*types.SignedHeader] diff --git a/block/internal/syncing/syncer.go b/block/internal/syncing/syncer.go index 2f63d5b4aa..7f4b9985cc 100644 --- a/block/internal/syncing/syncer.go +++ b/block/internal/syncing/syncer.go @@ -25,16 +25,6 @@ import ( "github.com/evstack/ev-node/types" ) -type DaRetrieverI interface { - RetrieveFromDA(ctx context.Context, daHeight uint64) ([]common.DAHeightEvent, error) - RetrieveForcedIncludedTxsFromDA(ctx context.Context, daHeight uint64) (*common.ForcedIncludedEvent, error) -} - -type p2pHandler interface { - ProcessHeaderRange(ctx context.Context, fromHeight, toHeight uint64, heightInCh chan<- common.DAHeightEvent) - ProcessDataRange(ctx context.Context, fromHeight, toHeight uint64, heightInCh chan<- common.DAHeightEvent) -} - // Syncer handles block synchronization from DA and P2P sources. type Syncer struct { // Core components @@ -66,7 +56,7 @@ type Syncer struct { errorCh chan<- error // Channel to report critical execution client failures // Handlers - daRetriever DaRetrieverI + daRetriever common.DARetriever p2pHandler p2pHandler // Logging diff --git a/block/public.go b/block/public.go index 8bfc4c1674..534a77a1e4 100644 --- a/block/public.go +++ b/block/public.go @@ -1,6 +1,16 @@ package block -import "github.com/evstack/ev-node/block/internal/common" +import ( + "fmt" + + "github.com/evstack/ev-node/block/internal/cache" + "github.com/evstack/ev-node/block/internal/common" + "github.com/evstack/ev-node/block/internal/syncing" + coreda "github.com/evstack/ev-node/core/da" + "github.com/evstack/ev-node/pkg/config" + "github.com/evstack/ev-node/pkg/genesis" + "github.com/rs/zerolog" +) // BlockOptions defines the options for creating block components type BlockOptions = common.BlockOptions @@ -22,3 +32,24 @@ func PrometheusMetrics(namespace string, labelsAndValues ...string) *Metrics { func NopMetrics() *Metrics { return common.NopMetrics() } + +// NewDaRetriver creates a new DA retriever instance. +func NewDARetriever( + da coreda.DA, + config config.Config, + genesis genesis.Genesis, + logger zerolog.Logger, +) (common.DARetriever, error) { + cacheManager, err := cache.NewManager(config, nil /* pending data not used */, logger) + if err != nil { + return nil, fmt.Errorf("failed to create cache manager: %w", err) + } + + return syncing.NewDARetriever( + da, + cacheManager, + config, + genesis, + logger, + ), nil +} diff --git a/pkg/config/config.go b/pkg/config/config.go index 48e9fa40e7..d089929bbe 100644 --- a/pkg/config/config.go +++ b/pkg/config/config.go @@ -33,6 +33,8 @@ const ( // FlagAggregator is a flag for running node in aggregator mode FlagAggregator = FlagPrefixEvnode + "node.aggregator" + // FlagBasedSequencer is a flag for enabling based sequencer mode (requires aggregator mode) + FlagBasedSequencer = FlagPrefixEvnode + "node.based_sequencer" // FlagLight is a flag for running the node in light mode FlagLight = FlagPrefixEvnode + "node.light" // FlagBlockTime is a flag for specifying the block time @@ -197,8 +199,9 @@ func (d *DAConfig) GetForcedInclusionNamespace() string { // NodeConfig contains all Rollkit specific configuration parameters type NodeConfig struct { // Node mode configuration - Aggregator bool `yaml:"aggregator" comment:"Run node in aggregator mode"` - Light bool `yaml:"light" comment:"Run node in light mode"` + Aggregator bool `yaml:"aggregator" comment:"Run node in aggregator mode"` + BasedSequencer bool `yaml:"based_sequencer" comment:"Run node with based sequencer (fetches transactions only from DA forced inclusion namespace). Requires aggregator mode to be enabled."` + Light bool `yaml:"light" comment:"Run node in light mode"` // Block management configuration BlockTime DurationWrapper `mapstructure:"block_time" yaml:"block_time" comment:"Block time (duration). Examples: \"500ms\", \"1s\", \"5s\", \"1m\", \"2m30s\", \"10m\"."` @@ -253,6 +256,11 @@ func (c *Config) Validate() error { return fmt.Errorf("could not create directory %q: %w", fullDir, err) } + // Validate based sequencer requires aggregator mode + if c.Node.BasedSequencer && !c.Node.Aggregator { + return fmt.Errorf("based sequencer mode requires aggregator mode to be enabled") + } + // Validate namespaces if err := validateNamespace(c.DA.GetNamespace()); err != nil { return fmt.Errorf("could not validate namespace (%s): %w", c.DA.GetNamespace(), err) @@ -320,8 +328,9 @@ func AddFlags(cmd *cobra.Command) { cmd.Flags().Bool(FlagClearCache, def.ClearCache, "clear the cache") // Node configuration flags - cmd.Flags().Bool(FlagAggregator, def.Node.Aggregator, "run node in aggregator mode") - cmd.Flags().Bool(FlagLight, def.Node.Light, "run light client") + cmd.Flags().Bool(FlagAggregator, def.Node.Aggregator, "run node as an aggregator") + cmd.Flags().Bool(FlagBasedSequencer, def.Node.BasedSequencer, "run node with based sequencer (requires aggregator mode)") + cmd.Flags().Bool(FlagLight, def.Node.Light, "run node in light mode") cmd.Flags().Duration(FlagBlockTime, def.Node.BlockTime.Duration, "block time (for aggregator mode)") cmd.Flags().String(FlagTrustedHash, def.Node.TrustedHash, "initial trusted hash to start the header exchange service") cmd.Flags().Bool(FlagLazyAggregator, def.Node.LazyMode, "produce blocks only when transactions are available or after lazy block time") @@ -366,6 +375,9 @@ func AddFlags(cmd *cobra.Command) { cmd.Flags().String(FlagSignerType, def.Signer.SignerType, "type of signer to use (file, grpc)") cmd.Flags().String(FlagSignerPath, def.Signer.SignerPath, "path to the signer file or address") cmd.Flags().String(FlagSignerPassphraseFile, "", "path to file containing the signer passphrase (required for file signer and if aggregator is enabled)") + + // flag constraints + cmd.MarkFlagsMutuallyExclusive(FlagLight, FlagAggregator) } // Load loads the node configuration in the following order of precedence: diff --git a/pkg/config/config_test.go b/pkg/config/config_test.go index 0417ae405b..af27889a1b 100644 --- a/pkg/config/config_test.go +++ b/pkg/config/config_test.go @@ -106,7 +106,7 @@ func TestAddFlags(t *testing.T) { assertFlagValue(t, flags, FlagRPCAddress, DefaultConfig().RPC.Address) // Count the number of flags we're explicitly checking - expectedFlagCount := 40 // Update this number if you add more flag checks above + expectedFlagCount := 46 // Update this number if you add more flag checks above // Get the actual number of flags (both regular and persistent) actualFlagCount := 0 @@ -378,3 +378,57 @@ func assertFlagValue(t *testing.T, flags *pflag.FlagSet, name string, expectedVa } } } + +func TestBasedSequencerValidation(t *testing.T) { + tests := []struct { + name string + aggregator bool + basedSeq bool + expectError bool + errorMsg string + }{ + { + name: "based sequencer without aggregator should fail", + aggregator: false, + basedSeq: true, + expectError: true, + errorMsg: "based sequencer mode requires aggregator mode to be enabled", + }, + { + name: "based sequencer with aggregator should pass", + aggregator: true, + basedSeq: true, + expectError: false, + }, + { + name: "aggregator without based sequencer should pass", + aggregator: true, + basedSeq: false, + expectError: false, + }, + { + name: "neither aggregator nor based sequencer should pass", + aggregator: false, + basedSeq: false, + expectError: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + cfg := DefaultConfig() + cfg.RootDir = t.TempDir() + cfg.Node.Aggregator = tt.aggregator + cfg.Node.BasedSequencer = tt.basedSeq + + err := cfg.Validate() + + if tt.expectError { + require.Error(t, err) + assert.Contains(t, err.Error(), tt.errorMsg) + } else { + require.NoError(t, err) + } + }) + } +} diff --git a/sequencers/based/README.md b/sequencers/based/README.md new file mode 100644 index 0000000000..d833768d81 --- /dev/null +++ b/sequencers/based/README.md @@ -0,0 +1,90 @@ +# Based Sequencer + +## Overview + +The Based Sequencer is a sequencer implementation that retrieves transactions exclusively from the Data Availability (DA) layer via the forced inclusion mechanism. Unlike traditional sequencers that accept transactions from mempools or external sources, the based sequencer only processes transactions that have been posted to the DA layer's forced inclusion namespace. + +## What is a Based Sequencer? + +A "based" sequencer (also known as "based rollup") is a rollup architecture where transaction ordering is derived entirely from the base layer (DA layer) rather than from a centralized sequencer. This provides several benefits: + +- **Censorship Resistance**: Users can submit transactions directly to DA, bypassing the sequencer +- **Decentralization**: No single entity controls transaction ordering +- **Liveness**: The rollup continues operating as long as the DA layer is available +- **Trustless**: Users don't need to trust the sequencer to include their transactions + +## Architecture + +``` +┌─────────────────────────────────────────────────────────────┐ +│ Based Sequencer │ +│ │ +│ ┌────────────────┐ ┌─────────────────┐ │ +│ │ Transaction │ │ DA Retriever │ │ +│ │ Queue │◄────────│ (Interface) │ │ +│ └────────────────┘ └─────────────────┘ │ +│ │ │ │ +│ │ │ │ +│ ▼ ▼ │ +│ ┌────────────────┐ ┌─────────────────┐ │ +│ │ GetNextBatch │ │ Fetch Forced │ │ +│ │ (Method) │────────►│ Inclusion Txs │ │ +│ └────────────────┘ └─────────────────┘ │ +│ │ │ +└──────────────────────────────────────┼───────────────────────┘ + │ + ▼ + ┌─────────────────┐ + │ DA Layer │ + │ (Forced Inc. │ + │ Namespace) │ + └─────────────────┘ +``` + +## Features + +- **DA-Only Transaction Source**: Fetches transactions exclusively from DA forced inclusion namespace +- **Batch Size Management**: Respects MaxBytes limits when creating batches +- **Transaction Queue**: Buffers transactions when they exceed batch size limits +- **DA Height Tracking**: Maintains synchronization with DA layer height +- **Concurrent-Safe**: Thread-safe operations with mutex protection +- **Automatic Height Management**: Handles "height from future" errors gracefully + +## Interface Compliance + +The Based Sequencer implements the `core/sequencer.Sequencer` interface: + +```go +type Sequencer interface { + SubmitBatchTxs(ctx context.Context, req SubmitBatchTxsRequest) (*SubmitBatchTxsResponse, error) + GetNextBatch(ctx context.Context, req GetNextBatchRequest) (*GetNextBatchResponse, error) + VerifyBatch(ctx context.Context, req VerifyBatchRequest) (*VerifyBatchResponse, error) +} +``` + +## Configuration + +The based sequencer uses the following configuration from `config.Config`: + +- `DA.ForcedInclusionNamespace`: Namespace for forced inclusion transactions +- `DA.ForcedInclusionDAEpoch`: Number of DA blocks to scan per fetch + +If `ForcedInclusionNamespace` is not configured, the sequencer returns empty batches. + +## Performance Considerations + +- **Batching**: Transactions are batched to reduce DA queries +- **Queue**: In-memory queue prevents repeated DA fetches +- **Mutex Protection**: Thread-safe but may block on concurrent access +- **DA Epoch**: Configure `ForcedInclusionDAEpoch` to balance freshness vs. efficiency + +## Comparison to Traditional Sequencer + +| Feature | Traditional Sequencer | Based Sequencer | +| --------------------- | --------------------- | --------------- | +| Transaction Source | Mempool, RPC | DA Layer Only | +| Censorship Resistance | Low | High | +| Centralization | High | Low | +| Latency | Low | Higher | +| MEV Opportunity | High | Low | +| Trust Requirements | High | Low | diff --git a/sequencers/based/based.go b/sequencers/based/based.go new file mode 100644 index 0000000000..5d02de0a21 --- /dev/null +++ b/sequencers/based/based.go @@ -0,0 +1,226 @@ +package based + +import ( + "context" + "errors" + "strings" + "sync" + "time" + + "github.com/rs/zerolog" + + coreda "github.com/evstack/ev-node/core/da" + coresequencer "github.com/evstack/ev-node/core/sequencer" + "github.com/evstack/ev-node/pkg/config" + "github.com/evstack/ev-node/pkg/genesis" +) + +// ErrForceInclusionNotConfigured is returned when forced inclusion is not configured +var ErrForceInclusionNotConfigured = errors.New("forced inclusion namespace not configured") + +// ForcedInclusionEvent represents forced inclusion transactions retrieved from DA +type ForcedInclusionEvent = struct { + Txs [][]byte + StartDaHeight uint64 + EndDaHeight uint64 +} + +// DARetriever defines the interface for retrieving forced inclusion transactions from DA +// This interface is intentionally generic to allow different implementations +type DARetriever interface { + RetrieveForcedIncludedTxsFromDA(ctx context.Context, daHeight uint64) (*ForcedInclusionEvent, error) +} + +// DARetrieverAdapter adapts any retriever that returns a compatible event type +type DARetrieverAdapter struct { + retrieveFunc func(ctx context.Context, daHeight uint64) (*ForcedInclusionEvent, error) +} + +// NewDARetrieverAdapter creates a new adapter with a custom retrieval function +func NewDARetrieverAdapter(retrieveFunc func(ctx context.Context, daHeight uint64) (*ForcedInclusionEvent, error)) *DARetrieverAdapter { + return &DARetrieverAdapter{ + retrieveFunc: retrieveFunc, + } +} + +// RetrieveForcedIncludedTxsFromDA implements the DARetriever interface +func (a *DARetrieverAdapter) RetrieveForcedIncludedTxsFromDA(ctx context.Context, daHeight uint64) (*ForcedInclusionEvent, error) { + return a.retrieveFunc(ctx, daHeight) +} + +// BasedSequencer is a sequencer that only retrieves transactions from the DA layer +// via the forced inclusion mechanism. It does not accept transactions from the reaper. +type BasedSequencer struct { + daRetriever DARetriever + da coreda.DA + config config.Config + genesis genesis.Genesis + logger zerolog.Logger + + mu sync.RWMutex + currentHeight uint64 + daHeight uint64 + txQueue [][]byte +} + +// NewBasedSequencer creates a new based sequencer instance +func NewBasedSequencer( + daRetriever DARetriever, + da coreda.DA, + config config.Config, + genesis genesis.Genesis, + logger zerolog.Logger, +) *BasedSequencer { + return &BasedSequencer{ + daRetriever: daRetriever, + da: da, + config: config, + genesis: genesis, + logger: logger.With().Str("component", "based_sequencer").Logger(), + currentHeight: 0, + daHeight: genesis.DAStartHeight, + txQueue: make([][]byte, 0), + } +} + +// SubmitBatchTxs does nothing for a based sequencer as it only pulls from DA +// This satisfies the Sequencer interface but transactions submitted here are ignored +func (s *BasedSequencer) SubmitBatchTxs(ctx context.Context, req coresequencer.SubmitBatchTxsRequest) (*coresequencer.SubmitBatchTxsResponse, error) { + s.logger.Debug().Msg("based sequencer ignores submitted transactions - only DA transactions are processed") + return &coresequencer.SubmitBatchTxsResponse{}, nil +} + +// GetNextBatch retrieves the next batch of transactions from the DA layer +// It fetches forced inclusion transactions and returns them as the next batch +func (s *BasedSequencer) GetNextBatch(ctx context.Context, req coresequencer.GetNextBatchRequest) (*coresequencer.GetNextBatchResponse, error) { + s.mu.Lock() + defer s.mu.Unlock() + + // If we have transactions in the queue, return them first + if len(s.txQueue) > 0 { + batch := s.createBatchFromQueue(req.MaxBytes) + if len(batch.Transactions) > 0 { + s.logger.Debug(). + Int("tx_count", len(batch.Transactions)). + Int("remaining", len(s.txQueue)). + Msg("returning batch from queue") + return &coresequencer.GetNextBatchResponse{ + Batch: batch, + Timestamp: time.Now(), + BatchData: req.LastBatchData, + }, nil + } + } + + // Fetch forced inclusion transactions from DA + s.logger.Debug().Uint64("da_height", s.daHeight).Msg("fetching forced inclusion transactions from DA") + + forcedTxsEvent, err := s.daRetriever.RetrieveForcedIncludedTxsFromDA(ctx, s.daHeight) + if err != nil { + if strings.Contains(err.Error(), ErrForceInclusionNotConfigured.Error()) { + s.logger.Error().Msg("forced inclusion not configured, returning empty batch") + return &coresequencer.GetNextBatchResponse{ + Batch: &coresequencer.Batch{Transactions: nil}, + Timestamp: time.Now(), + BatchData: req.LastBatchData, + }, nil + } + + if errors.Is(err, coreda.ErrHeightFromFuture) { + s.logger.Debug().Uint64("da_height", s.daHeight).Msg("DA height from future, incrementing") + return &coresequencer.GetNextBatchResponse{ + Batch: &coresequencer.Batch{Transactions: nil}, + Timestamp: time.Now(), + BatchData: req.LastBatchData, + }, nil + } + + s.logger.Error().Err(err).Uint64("da_height", s.daHeight).Msg("failed to retrieve forced inclusion transactions") + return nil, err + } + + // Update DA height based on the retrieved event + if forcedTxsEvent.EndDaHeight > s.daHeight { + s.daHeight = forcedTxsEvent.EndDaHeight + } else if forcedTxsEvent.StartDaHeight > s.daHeight { + s.daHeight = forcedTxsEvent.StartDaHeight + } + + // Add transactions to queue + s.txQueue = append(s.txQueue, forcedTxsEvent.Txs...) + + s.logger.Info(). + Int("tx_count", len(forcedTxsEvent.Txs)). + Uint64("da_height_start", forcedTxsEvent.StartDaHeight). + Uint64("da_height_end", forcedTxsEvent.EndDaHeight). + Msg("retrieved forced inclusion transactions from DA") + + batch := s.createBatchFromQueue(req.MaxBytes) + + return &coresequencer.GetNextBatchResponse{ + Batch: batch, + Timestamp: time.Now(), + BatchData: req.LastBatchData, + }, nil +} + +// createBatchFromQueue creates a batch from the transaction queue respecting MaxBytes +func (s *BasedSequencer) createBatchFromQueue(maxBytes uint64) *coresequencer.Batch { + if len(s.txQueue) == 0 { + return &coresequencer.Batch{Transactions: nil} + } + + var batch [][]byte + var totalBytes uint64 + + for i, tx := range s.txQueue { + txSize := uint64(len(tx)) + if totalBytes+txSize > maxBytes && len(batch) > 0 { + // Would exceed max bytes, stop here + s.txQueue = s.txQueue[i:] + break + } + + batch = append(batch, tx) + totalBytes += txSize + + // If this is the last transaction, clear the queue + if i == len(s.txQueue)-1 { + s.ClearQueue() + } + } + + return &coresequencer.Batch{Transactions: batch} +} + +// VerifyBatch verifies a batch of transactions +// For a based sequencer, we always return true as all transactions come from DA +func (s *BasedSequencer) VerifyBatch(ctx context.Context, req coresequencer.VerifyBatchRequest) (*coresequencer.VerifyBatchResponse, error) { + return &coresequencer.VerifyBatchResponse{ + Status: true, + }, nil +} + +// SetDAHeight sets the current DA height for the sequencer +// This should be called when the sequencer needs to sync to a specific DA height +func (s *BasedSequencer) SetDAHeight(height uint64) { + s.mu.Lock() + defer s.mu.Unlock() + s.daHeight = height + s.logger.Debug().Uint64("da_height", height).Msg("DA height updated") +} + +// GetDAHeight returns the current DA height +func (s *BasedSequencer) GetDAHeight() uint64 { + s.mu.RLock() + defer s.mu.RUnlock() + return s.daHeight +} + +// ClearQueue clears the transaction queue +func (s *BasedSequencer) ClearQueue() { + s.mu.Lock() + defer s.mu.Unlock() + s.txQueue = s.txQueue[:0] + s.logger.Debug().Msg("transaction queue cleared") +} diff --git a/sequencers/based/based_test.go b/sequencers/based/based_test.go new file mode 100644 index 0000000000..297c5fe32d --- /dev/null +++ b/sequencers/based/based_test.go @@ -0,0 +1,518 @@ +package based + +import ( + "context" + "errors" + "testing" + "time" + + "github.com/rs/zerolog" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + + coreda "github.com/evstack/ev-node/core/da" + coresequencer "github.com/evstack/ev-node/core/sequencer" + "github.com/evstack/ev-node/pkg/config" + "github.com/evstack/ev-node/pkg/genesis" +) + +// MockDARetriever is a mock implementation of DARetriever for testing +type MockDARetriever struct { + mock.Mock +} + +func (m *MockDARetriever) RetrieveForcedIncludedTxsFromDA(ctx context.Context, daHeight uint64) (*ForcedInclusionEvent, error) { + args := m.Called(ctx, daHeight) + if args.Get(0) == nil { + return nil, args.Error(1) + } + return args.Get(0).(*ForcedInclusionEvent), args.Error(1) +} + +// MockDA is a mock implementation of DA for testing +type MockDA struct { + mock.Mock +} + +func (m *MockDA) Submit(ctx context.Context, blobs [][]byte, gasPrice float64, namespace []byte) ([][]byte, error) { + args := m.Called(ctx, blobs, gasPrice, namespace) + if args.Get(0) == nil { + return nil, args.Error(1) + } + return args.Get(0).([][]byte), args.Error(1) +} + +func (m *MockDA) SubmitWithOptions(ctx context.Context, blobs [][]byte, gasPrice float64, namespace []byte, options []byte) ([][]byte, error) { + args := m.Called(ctx, blobs, gasPrice, namespace, options) + if args.Get(0) == nil { + return nil, args.Error(1) + } + return args.Get(0).([][]byte), args.Error(1) +} + +func (m *MockDA) GetIDs(ctx context.Context, height uint64, namespace []byte) (*coreda.GetIDsResult, error) { + args := m.Called(ctx, height, namespace) + if args.Get(0) == nil { + return nil, args.Error(1) + } + return args.Get(0).(*coreda.GetIDsResult), args.Error(1) +} + +func (m *MockDA) Get(ctx context.Context, ids [][]byte, namespace []byte) ([][]byte, error) { + args := m.Called(ctx, ids, namespace) + if args.Get(0) == nil { + return nil, args.Error(1) + } + return args.Get(0).([][]byte), args.Error(1) +} + +func (m *MockDA) GetProofs(ctx context.Context, ids [][]byte, namespace []byte) ([]coreda.Proof, error) { + args := m.Called(ctx, ids, namespace) + if args.Get(0) == nil { + return nil, args.Error(1) + } + return args.Get(0).([]coreda.Proof), args.Error(1) +} + +func (m *MockDA) Validate(ctx context.Context, ids [][]byte, proofs []coreda.Proof, namespace []byte) ([]bool, error) { + args := m.Called(ctx, ids, proofs, namespace) + if args.Get(0) == nil { + return nil, args.Error(1) + } + return args.Get(0).([]bool), args.Error(1) +} + +func (m *MockDA) Commit(ctx context.Context, blobs [][]byte, namespace []byte) ([][]byte, error) { + args := m.Called(ctx, blobs, namespace) + if args.Get(0) == nil { + return nil, args.Error(1) + } + return args.Get(0).([][]byte), args.Error(1) +} + +func (m *MockDA) GasPrice(ctx context.Context) (float64, error) { + args := m.Called(ctx) + return args.Get(0).(float64), args.Error(1) +} + +func (m *MockDA) GasMultiplier(ctx context.Context) (float64, error) { + args := m.Called(ctx) + return args.Get(0).(float64), args.Error(1) +} + +func TestNewBasedSequencer(t *testing.T) { + mockRetriever := new(MockDARetriever) + mockDA := new(MockDA) + cfg := config.DefaultConfig() + gen := genesis.Genesis{ + ChainID: "test-chain", + DAStartHeight: 100, + } + + seq := NewBasedSequencer(mockRetriever, mockDA, cfg, gen, zerolog.Nop()) + + require.NotNil(t, seq) + assert.Equal(t, uint64(100), seq.daHeight) + assert.Equal(t, 0, len(seq.txQueue)) +} + +func TestBasedSequencer_SubmitBatchTxs(t *testing.T) { + mockRetriever := new(MockDARetriever) + mockDA := new(MockDA) + cfg := config.DefaultConfig() + gen := genesis.Genesis{ChainID: "test-chain"} + + seq := NewBasedSequencer(mockRetriever, mockDA, cfg, gen, zerolog.Nop()) + + // Submit should succeed but be ignored + req := coresequencer.SubmitBatchTxsRequest{ + Id: []byte("test-chain"), + Batch: &coresequencer.Batch{ + Transactions: [][]byte{[]byte("tx1"), []byte("tx2")}, + }, + } + + resp, err := seq.SubmitBatchTxs(context.Background(), req) + require.NoError(t, err) + require.NotNil(t, resp) + + // Queue should still be empty + assert.Equal(t, 0, len(seq.txQueue)) +} + +func TestBasedSequencer_GetNextBatch_WithForcedTxs(t *testing.T) { + mockRetriever := new(MockDARetriever) + mockDA := new(MockDA) + cfg := config.DefaultConfig() + gen := genesis.Genesis{ + ChainID: "test-chain", + DAStartHeight: 100, + } + + seq := NewBasedSequencer(mockRetriever, mockDA, cfg, gen, zerolog.Nop()) + + // Mock retriever to return forced inclusion transactions + forcedTxs := &ForcedInclusionEvent{ + Txs: [][]byte{[]byte("forced_tx1"), []byte("forced_tx2")}, + StartDaHeight: 101, + EndDaHeight: 105, + } + mockRetriever.On("RetrieveForcedIncludedTxsFromDA", mock.Anything, uint64(100)). + Return(forcedTxs, nil).Once() + + req := coresequencer.GetNextBatchRequest{ + Id: []byte("test-chain"), + MaxBytes: 10000, + } + + resp, err := seq.GetNextBatch(context.Background(), req) + require.NoError(t, err) + require.NotNil(t, resp) + require.NotNil(t, resp.Batch) + assert.Equal(t, 2, len(resp.Batch.Transactions)) + assert.Equal(t, []byte("forced_tx1"), resp.Batch.Transactions[0]) + assert.Equal(t, []byte("forced_tx2"), resp.Batch.Transactions[1]) + + // DA height should be updated + assert.Equal(t, uint64(105), seq.GetDAHeight()) + + mockRetriever.AssertExpectations(t) +} + +func TestBasedSequencer_GetNextBatch_EmptyDA(t *testing.T) { + mockRetriever := new(MockDARetriever) + mockDA := new(MockDA) + cfg := config.DefaultConfig() + gen := genesis.Genesis{ + ChainID: "test-chain", + DAStartHeight: 100, + } + + seq := NewBasedSequencer(mockRetriever, mockDA, cfg, gen, zerolog.Nop()) + + // Mock retriever to return empty transactions + emptyEvent := &ForcedInclusionEvent{ + Txs: [][]byte{}, + StartDaHeight: 100, + EndDaHeight: 100, + } + mockRetriever.On("RetrieveForcedIncludedTxsFromDA", mock.Anything, uint64(100)). + Return(emptyEvent, nil).Once() + + req := coresequencer.GetNextBatchRequest{ + Id: []byte("test-chain"), + MaxBytes: 10000, + } + + resp, err := seq.GetNextBatch(context.Background(), req) + require.NoError(t, err) + require.NotNil(t, resp) + require.NotNil(t, resp.Batch) + assert.Equal(t, 0, len(resp.Batch.Transactions)) + + mockRetriever.AssertExpectations(t) +} + +func TestBasedSequencer_GetNextBatch_NotConfigured(t *testing.T) { + mockRetriever := new(MockDARetriever) + mockDA := new(MockDA) + cfg := config.DefaultConfig() + gen := genesis.Genesis{ + ChainID: "test-chain", + DAStartHeight: 100, + } + + seq := NewBasedSequencer(mockRetriever, mockDA, cfg, gen, zerolog.Nop()) + + // Mock retriever to return not configured error + mockRetriever.On("RetrieveForcedIncludedTxsFromDA", mock.Anything, uint64(100)). + Return(nil, errors.New("forced inclusion namespace not configured")).Once() + + req := coresequencer.GetNextBatchRequest{ + Id: []byte("test-chain"), + MaxBytes: 10000, + } + + resp, err := seq.GetNextBatch(context.Background(), req) + require.NoError(t, err) + require.NotNil(t, resp) + require.NotNil(t, resp.Batch) + assert.Nil(t, resp.Batch.Transactions) + + mockRetriever.AssertExpectations(t) +} + +func TestBasedSequencer_GetNextBatch_HeightFromFuture(t *testing.T) { + mockRetriever := new(MockDARetriever) + mockDA := new(MockDA) + cfg := config.DefaultConfig() + gen := genesis.Genesis{ + ChainID: "test-chain", + DAStartHeight: 100, + } + + seq := NewBasedSequencer(mockRetriever, mockDA, cfg, gen, zerolog.Nop()) + + // Mock retriever to return height from future error + mockRetriever.On("RetrieveForcedIncludedTxsFromDA", mock.Anything, uint64(100)). + Return(nil, coreda.ErrHeightFromFuture).Once() + + req := coresequencer.GetNextBatchRequest{ + Id: []byte("test-chain"), + MaxBytes: 10000, + } + + resp, err := seq.GetNextBatch(context.Background(), req) + require.NoError(t, err) + require.NotNil(t, resp) + require.NotNil(t, resp.Batch) + assert.Nil(t, resp.Batch.Transactions) + + // DA height should be incremented + assert.Equal(t, uint64(101), seq.GetDAHeight()) + + mockRetriever.AssertExpectations(t) +} + +func TestBasedSequencer_GetNextBatch_WithMaxBytes(t *testing.T) { + mockRetriever := new(MockDARetriever) + mockDA := new(MockDA) + cfg := config.DefaultConfig() + gen := genesis.Genesis{ + ChainID: "test-chain", + DAStartHeight: 100, + } + + seq := NewBasedSequencer(mockRetriever, mockDA, cfg, gen, zerolog.Nop()) + + // Create transactions that will exceed max bytes + tx1 := make([]byte, 50) + tx2 := make([]byte, 50) + tx3 := make([]byte, 50) + + forcedTxs := &ForcedInclusionEvent{ + Txs: [][]byte{tx1, tx2, tx3}, + StartDaHeight: 101, + EndDaHeight: 105, + } + mockRetriever.On("RetrieveForcedIncludedTxsFromDA", mock.Anything, uint64(100)). + Return(forcedTxs, nil).Once() + + // Request with max bytes that only fits 2 transactions + req := coresequencer.GetNextBatchRequest{ + Id: []byte("test-chain"), + MaxBytes: 100, // Only fits 2 transactions + } + + resp, err := seq.GetNextBatch(context.Background(), req) + require.NoError(t, err) + require.NotNil(t, resp) + require.NotNil(t, resp.Batch) + assert.Equal(t, 2, len(resp.Batch.Transactions)) + + // Third transaction should still be in queue + assert.Equal(t, 1, len(seq.txQueue)) + + // Next request should return the remaining transaction + req2 := coresequencer.GetNextBatchRequest{ + Id: []byte("test-chain"), + MaxBytes: 100, + } + + resp2, err := seq.GetNextBatch(context.Background(), req2) + require.NoError(t, err) + require.NotNil(t, resp2) + require.NotNil(t, resp2.Batch) + assert.Equal(t, 1, len(resp2.Batch.Transactions)) + assert.Equal(t, 0, len(seq.txQueue)) + + mockRetriever.AssertExpectations(t) +} + +func TestBasedSequencer_GetNextBatch_FromQueue(t *testing.T) { + mockRetriever := new(MockDARetriever) + mockDA := new(MockDA) + cfg := config.DefaultConfig() + gen := genesis.Genesis{ + ChainID: "test-chain", + DAStartHeight: 100, + } + + seq := NewBasedSequencer(mockRetriever, mockDA, cfg, gen, zerolog.Nop()) + + // Pre-populate the queue + seq.txQueue = [][]byte{[]byte("queued_tx1"), []byte("queued_tx2")} + + req := coresequencer.GetNextBatchRequest{ + Id: []byte("test-chain"), + MaxBytes: 10000, + } + + // Should return from queue without calling retriever + resp, err := seq.GetNextBatch(context.Background(), req) + require.NoError(t, err) + require.NotNil(t, resp) + require.NotNil(t, resp.Batch) + assert.Equal(t, 2, len(resp.Batch.Transactions)) + assert.Equal(t, []byte("queued_tx1"), resp.Batch.Transactions[0]) + assert.Equal(t, []byte("queued_tx2"), resp.Batch.Transactions[1]) + assert.Equal(t, 0, len(seq.txQueue)) + + // No expectations on retriever since it shouldn't be called + mockRetriever.AssertExpectations(t) +} + +func TestBasedSequencer_VerifyBatch(t *testing.T) { + mockRetriever := new(MockDARetriever) + mockDA := new(MockDA) + cfg := config.DefaultConfig() + gen := genesis.Genesis{ChainID: "test-chain"} + + seq := NewBasedSequencer(mockRetriever, mockDA, cfg, gen, zerolog.Nop()) + + req := coresequencer.VerifyBatchRequest{ + Id: []byte("test-chain"), + BatchData: [][]byte{[]byte("tx1")}, + } + + resp, err := seq.VerifyBatch(context.Background(), req) + require.NoError(t, err) + require.NotNil(t, resp) + assert.True(t, resp.Status) +} + +func TestBasedSequencer_SetDAHeight(t *testing.T) { + mockRetriever := new(MockDARetriever) + mockDA := new(MockDA) + cfg := config.DefaultConfig() + gen := genesis.Genesis{ + ChainID: "test-chain", + DAStartHeight: 100, + } + + seq := NewBasedSequencer(mockRetriever, mockDA, cfg, gen, zerolog.Nop()) + + assert.Equal(t, uint64(100), seq.GetDAHeight()) + + seq.SetDAHeight(200) + assert.Equal(t, uint64(200), seq.GetDAHeight()) +} + +func TestBasedSequencer_ClearQueue(t *testing.T) { + mockRetriever := new(MockDARetriever) + mockDA := new(MockDA) + cfg := config.DefaultConfig() + gen := genesis.Genesis{ChainID: "test-chain"} + + seq := NewBasedSequencer(mockRetriever, mockDA, cfg, gen, zerolog.Nop()) + + // Pre-populate the queue + seq.txQueue = [][]byte{[]byte("tx1"), []byte("tx2"), []byte("tx3")} + assert.Equal(t, 3, len(seq.txQueue)) + + seq.ClearQueue() + assert.Equal(t, 0, len(seq.txQueue)) +} + +func TestBasedSequencer_ConcurrentAccess(t *testing.T) { + mockRetriever := new(MockDARetriever) + mockDA := new(MockDA) + cfg := config.DefaultConfig() + gen := genesis.Genesis{ + ChainID: "test-chain", + DAStartHeight: 100, + } + + seq := NewBasedSequencer(mockRetriever, mockDA, cfg, gen, zerolog.Nop()) + + // Mock retriever to return transactions + forcedTxs := &ForcedInclusionEvent{ + Txs: [][]byte{[]byte("tx1")}, + StartDaHeight: 101, + EndDaHeight: 105, + } + mockRetriever.On("RetrieveForcedIncludedTxsFromDA", mock.Anything, mock.Anything). + Return(forcedTxs, nil).Maybe() + + // Test concurrent access + done := make(chan bool, 3) + + // Concurrent GetNextBatch calls + go func() { + req := coresequencer.GetNextBatchRequest{Id: []byte("test-chain"), MaxBytes: 1000} + _, _ = seq.GetNextBatch(context.Background(), req) + done <- true + }() + + // Concurrent SetDAHeight calls + go func() { + seq.SetDAHeight(200) + done <- true + }() + + // Concurrent GetDAHeight calls + go func() { + _ = seq.GetDAHeight() + done <- true + }() + + // Wait for all goroutines + timeout := time.After(5 * time.Second) + for i := 0; i < 3; i++ { + select { + case <-done: + case <-timeout: + t.Fatal("test timed out") + } + } +} + +func TestDARetrieverAdapter(t *testing.T) { + called := false + expectedEvent := &ForcedInclusionEvent{ + Txs: [][]byte{[]byte("tx1")}, + StartDaHeight: 100, + EndDaHeight: 105, + } + + adapter := NewDARetrieverAdapter(func(ctx context.Context, daHeight uint64) (*ForcedInclusionEvent, error) { + called = true + assert.Equal(t, uint64(100), daHeight) + return expectedEvent, nil + }) + + event, err := adapter.RetrieveForcedIncludedTxsFromDA(context.Background(), 100) + require.NoError(t, err) + assert.True(t, called) + assert.Equal(t, expectedEvent, event) +} + +func TestBasedSequencer_GetNextBatch_ErrorHandling(t *testing.T) { + mockRetriever := new(MockDARetriever) + mockDA := new(MockDA) + cfg := config.DefaultConfig() + gen := genesis.Genesis{ + ChainID: "test-chain", + DAStartHeight: 100, + } + + seq := NewBasedSequencer(mockRetriever, mockDA, cfg, gen, zerolog.Nop()) + + // Mock retriever to return an unexpected error + expectedErr := errors.New("unexpected DA error") + mockRetriever.On("RetrieveForcedIncludedTxsFromDA", mock.Anything, uint64(100)). + Return(nil, expectedErr).Once() + + req := coresequencer.GetNextBatchRequest{ + Id: []byte("test-chain"), + MaxBytes: 10000, + } + + resp, err := seq.GetNextBatch(context.Background(), req) + require.Error(t, err) + assert.Nil(t, resp) + assert.Equal(t, expectedErr, err) + + mockRetriever.AssertExpectations(t) +} From 97345be422d46c1c96cb9cf99387904af7a21456 Mon Sep 17 00:00:00 2001 From: Julien Robert Date: Wed, 5 Nov 2025 15:30:23 +0100 Subject: [PATCH 06/39] fix mock --- .mockery.yaml | 10 +- block/internal/common/da_retriever_mock.go | 174 ++++++++++++++++++ .../internal/executing/executor_lazy_test.go | 5 +- .../internal/executing/executor_logic_test.go | 5 +- .../executing/executor_restart_test.go | 9 +- block/internal/executing/executor_test.go | 5 +- block/internal/syncing/syncer_backoff_test.go | 6 +- .../internal/syncing/syncer_benchmark_test.go | 2 +- block/internal/syncing/syncer_mock.go | 163 ---------------- block/internal/syncing/syncer_test.go | 4 +- 10 files changed, 195 insertions(+), 188 deletions(-) create mode 100644 block/internal/common/da_retriever_mock.go diff --git a/.mockery.yaml b/.mockery.yaml index 93c015b0ea..1f87c57e00 100644 --- a/.mockery.yaml +++ b/.mockery.yaml @@ -48,11 +48,6 @@ packages: filename: external/hstore.go github.com/evstack/ev-node/block/internal/syncing: interfaces: - DaRetrieverI: - config: - dir: ./block/internal/syncing - pkgname: syncing - filename: syncer_mock.go p2pHandler: config: dir: ./block/internal/syncing @@ -65,3 +60,8 @@ packages: dir: ./block/internal/common pkgname: common filename: broadcaster_mock.go + DARetriever: + config: + dir: ./block/internal/common + pkgname: common + filename: da_retriever_mock.go diff --git a/block/internal/common/da_retriever_mock.go b/block/internal/common/da_retriever_mock.go new file mode 100644 index 0000000000..c83ad564db --- /dev/null +++ b/block/internal/common/da_retriever_mock.go @@ -0,0 +1,174 @@ +// Code generated by mockery; DO NOT EDIT. +// github.com/vektra/mockery +// template: testify + +package common + +import ( + "context" + + mock "github.com/stretchr/testify/mock" +) + +// NewMockDARetriever creates a new instance of MockDARetriever. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewMockDARetriever(t interface { + mock.TestingT + Cleanup(func()) +}) *MockDARetriever { + mock := &MockDARetriever{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} + +// MockDARetriever is an autogenerated mock type for the DARetriever type +type MockDARetriever struct { + mock.Mock +} + +type MockDARetriever_Expecter struct { + mock *mock.Mock +} + +func (_m *MockDARetriever) EXPECT() *MockDARetriever_Expecter { + return &MockDARetriever_Expecter{mock: &_m.Mock} +} + +// RetrieveForcedIncludedTxsFromDA provides a mock function for the type MockDARetriever +func (_mock *MockDARetriever) RetrieveForcedIncludedTxsFromDA(ctx context.Context, daHeight uint64) (*ForcedIncludedEvent, error) { + ret := _mock.Called(ctx, daHeight) + + if len(ret) == 0 { + panic("no return value specified for RetrieveForcedIncludedTxsFromDA") + } + + var r0 *ForcedIncludedEvent + var r1 error + if returnFunc, ok := ret.Get(0).(func(context.Context, uint64) (*ForcedIncludedEvent, error)); ok { + return returnFunc(ctx, daHeight) + } + if returnFunc, ok := ret.Get(0).(func(context.Context, uint64) *ForcedIncludedEvent); ok { + r0 = returnFunc(ctx, daHeight) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*ForcedIncludedEvent) + } + } + if returnFunc, ok := ret.Get(1).(func(context.Context, uint64) error); ok { + r1 = returnFunc(ctx, daHeight) + } else { + r1 = ret.Error(1) + } + return r0, r1 +} + +// MockDARetriever_RetrieveForcedIncludedTxsFromDA_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'RetrieveForcedIncludedTxsFromDA' +type MockDARetriever_RetrieveForcedIncludedTxsFromDA_Call struct { + *mock.Call +} + +// RetrieveForcedIncludedTxsFromDA is a helper method to define mock.On call +// - ctx context.Context +// - daHeight uint64 +func (_e *MockDARetriever_Expecter) RetrieveForcedIncludedTxsFromDA(ctx interface{}, daHeight interface{}) *MockDARetriever_RetrieveForcedIncludedTxsFromDA_Call { + return &MockDARetriever_RetrieveForcedIncludedTxsFromDA_Call{Call: _e.mock.On("RetrieveForcedIncludedTxsFromDA", ctx, daHeight)} +} + +func (_c *MockDARetriever_RetrieveForcedIncludedTxsFromDA_Call) Run(run func(ctx context.Context, daHeight uint64)) *MockDARetriever_RetrieveForcedIncludedTxsFromDA_Call { + _c.Call.Run(func(args mock.Arguments) { + var arg0 context.Context + if args[0] != nil { + arg0 = args[0].(context.Context) + } + var arg1 uint64 + if args[1] != nil { + arg1 = args[1].(uint64) + } + run( + arg0, + arg1, + ) + }) + return _c +} + +func (_c *MockDARetriever_RetrieveForcedIncludedTxsFromDA_Call) Return(v *ForcedIncludedEvent, err error) *MockDARetriever_RetrieveForcedIncludedTxsFromDA_Call { + _c.Call.Return(v, err) + return _c +} + +func (_c *MockDARetriever_RetrieveForcedIncludedTxsFromDA_Call) RunAndReturn(run func(ctx context.Context, daHeight uint64) (*ForcedIncludedEvent, error)) *MockDARetriever_RetrieveForcedIncludedTxsFromDA_Call { + _c.Call.Return(run) + return _c +} + +// RetrieveFromDA provides a mock function for the type MockDARetriever +func (_mock *MockDARetriever) RetrieveFromDA(ctx context.Context, daHeight uint64) ([]DAHeightEvent, error) { + ret := _mock.Called(ctx, daHeight) + + if len(ret) == 0 { + panic("no return value specified for RetrieveFromDA") + } + + var r0 []DAHeightEvent + var r1 error + if returnFunc, ok := ret.Get(0).(func(context.Context, uint64) ([]DAHeightEvent, error)); ok { + return returnFunc(ctx, daHeight) + } + if returnFunc, ok := ret.Get(0).(func(context.Context, uint64) []DAHeightEvent); ok { + r0 = returnFunc(ctx, daHeight) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]DAHeightEvent) + } + } + if returnFunc, ok := ret.Get(1).(func(context.Context, uint64) error); ok { + r1 = returnFunc(ctx, daHeight) + } else { + r1 = ret.Error(1) + } + return r0, r1 +} + +// MockDARetriever_RetrieveFromDA_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'RetrieveFromDA' +type MockDARetriever_RetrieveFromDA_Call struct { + *mock.Call +} + +// RetrieveFromDA is a helper method to define mock.On call +// - ctx context.Context +// - daHeight uint64 +func (_e *MockDARetriever_Expecter) RetrieveFromDA(ctx interface{}, daHeight interface{}) *MockDARetriever_RetrieveFromDA_Call { + return &MockDARetriever_RetrieveFromDA_Call{Call: _e.mock.On("RetrieveFromDA", ctx, daHeight)} +} + +func (_c *MockDARetriever_RetrieveFromDA_Call) Run(run func(ctx context.Context, daHeight uint64)) *MockDARetriever_RetrieveFromDA_Call { + _c.Call.Run(func(args mock.Arguments) { + var arg0 context.Context + if args[0] != nil { + arg0 = args[0].(context.Context) + } + var arg1 uint64 + if args[1] != nil { + arg1 = args[1].(uint64) + } + run( + arg0, + arg1, + ) + }) + return _c +} + +func (_c *MockDARetriever_RetrieveFromDA_Call) Return(vs []DAHeightEvent, err error) *MockDARetriever_RetrieveFromDA_Call { + _c.Call.Return(vs, err) + return _c +} + +func (_c *MockDARetriever_RetrieveFromDA_Call) RunAndReturn(run func(ctx context.Context, daHeight uint64) ([]DAHeightEvent, error)) *MockDARetriever_RetrieveFromDA_Call { + _c.Call.Return(run) + return _c +} diff --git a/block/internal/executing/executor_lazy_test.go b/block/internal/executing/executor_lazy_test.go index 70226bbeb4..df72885239 100644 --- a/block/internal/executing/executor_lazy_test.go +++ b/block/internal/executing/executor_lazy_test.go @@ -14,7 +14,6 @@ import ( "github.com/evstack/ev-node/block/internal/cache" "github.com/evstack/ev-node/block/internal/common" - "github.com/evstack/ev-node/block/internal/syncing" coreseq "github.com/evstack/ev-node/core/sequencer" "github.com/evstack/ev-node/pkg/config" "github.com/evstack/ev-node/pkg/genesis" @@ -57,7 +56,7 @@ func TestLazyMode_ProduceBlockLogic(t *testing.T) { memStore, mockExec, mockSeq, - syncing.NewMockDaRetrieverI(t), + common.NewMockDARetriever(t), signerWrapper, cacheManager, metrics, @@ -168,7 +167,7 @@ func TestRegularMode_ProduceBlockLogic(t *testing.T) { memStore, mockExec, mockSeq, - syncing.NewMockDaRetrieverI(t), + common.NewMockDARetriever(t), signerWrapper, cacheManager, metrics, diff --git a/block/internal/executing/executor_logic_test.go b/block/internal/executing/executor_logic_test.go index a2a5789b1c..945bc4ace7 100644 --- a/block/internal/executing/executor_logic_test.go +++ b/block/internal/executing/executor_logic_test.go @@ -16,7 +16,6 @@ import ( "github.com/evstack/ev-node/block/internal/cache" "github.com/evstack/ev-node/block/internal/common" - "github.com/evstack/ev-node/block/internal/syncing" coreseq "github.com/evstack/ev-node/core/sequencer" "github.com/evstack/ev-node/pkg/config" "github.com/evstack/ev-node/pkg/genesis" @@ -79,7 +78,7 @@ func TestProduceBlock_EmptyBatch_SetsEmptyDataHash(t *testing.T) { memStore, mockExec, mockSeq, - syncing.NewMockDaRetrieverI(t), + common.NewMockDARetriever(t), signerWrapper, cacheManager, metrics, @@ -167,7 +166,7 @@ func TestPendingLimit_SkipsProduction(t *testing.T) { memStore, mockExec, mockSeq, - syncing.NewMockDaRetrieverI(t), + common.NewMockDARetriever(t), signerWrapper, cacheManager, metrics, diff --git a/block/internal/executing/executor_restart_test.go b/block/internal/executing/executor_restart_test.go index 9f764ee7e6..a950d6dc21 100644 --- a/block/internal/executing/executor_restart_test.go +++ b/block/internal/executing/executor_restart_test.go @@ -14,7 +14,6 @@ import ( "github.com/evstack/ev-node/block/internal/cache" "github.com/evstack/ev-node/block/internal/common" - "github.com/evstack/ev-node/block/internal/syncing" coreseq "github.com/evstack/ev-node/core/sequencer" "github.com/evstack/ev-node/pkg/config" "github.com/evstack/ev-node/pkg/genesis" @@ -57,7 +56,7 @@ func TestExecutor_RestartUsesPendingHeader(t *testing.T) { memStore, mockExec1, mockSeq1, - syncing.NewMockDaRetrieverI(t), + common.NewMockDARetriever(t), signerWrapper, cacheManager, metrics, @@ -177,7 +176,7 @@ func TestExecutor_RestartUsesPendingHeader(t *testing.T) { memStore, // same store mockExec2, mockSeq2, - syncing.NewMockDaRetrieverI(t), + common.NewMockDARetriever(t), signerWrapper, cacheManager, metrics, @@ -276,7 +275,7 @@ func TestExecutor_RestartNoPendingHeader(t *testing.T) { memStore, mockExec1, mockSeq1, - syncing.NewMockDaRetrieverI(t), + common.NewMockDARetriever(t), signerWrapper, cacheManager, metrics, @@ -329,7 +328,7 @@ func TestExecutor_RestartNoPendingHeader(t *testing.T) { memStore, mockExec2, mockSeq2, - syncing.NewMockDaRetrieverI(t), + common.NewMockDARetriever(t), signerWrapper, cacheManager, metrics, diff --git a/block/internal/executing/executor_test.go b/block/internal/executing/executor_test.go index 4ffa9cdfc7..fc73b94a0c 100644 --- a/block/internal/executing/executor_test.go +++ b/block/internal/executing/executor_test.go @@ -13,7 +13,6 @@ import ( "github.com/evstack/ev-node/block/internal/cache" "github.com/evstack/ev-node/block/internal/common" - "github.com/evstack/ev-node/block/internal/syncing" "github.com/evstack/ev-node/pkg/config" "github.com/evstack/ev-node/pkg/genesis" "github.com/evstack/ev-node/pkg/store" @@ -49,7 +48,7 @@ func TestExecutor_BroadcasterIntegration(t *testing.T) { memStore, nil, // nil executor (we're not testing execution) nil, // nil sequencer (we're not testing sequencing) - syncing.NewMockDaRetrieverI(t), + common.NewMockDARetriever(t), testSigner, // test signer (required for executor) cacheManager, metrics, @@ -100,7 +99,7 @@ func TestExecutor_NilBroadcasters(t *testing.T) { memStore, nil, // nil executor nil, // nil sequencer - syncing.NewMockDaRetrieverI(t), + common.NewMockDARetriever(t), testSigner, // test signer (required for executor) cacheManager, metrics, diff --git a/block/internal/syncing/syncer_backoff_test.go b/block/internal/syncing/syncer_backoff_test.go index 501786a840..2ed5b17559 100644 --- a/block/internal/syncing/syncer_backoff_test.go +++ b/block/internal/syncing/syncer_backoff_test.go @@ -69,7 +69,7 @@ func TestSyncer_BackoffOnDAError(t *testing.T) { syncer.ctx = ctx // Setup mocks - daRetriever := NewMockDaRetrieverI(t) + daRetriever := common.NewMockDARetriever(t) p2pHandler := newMockp2pHandler(t) syncer.daRetriever = daRetriever syncer.p2pHandler = p2pHandler @@ -167,7 +167,7 @@ func TestSyncer_BackoffResetOnSuccess(t *testing.T) { addr, pub, signer := buildSyncTestSigner(t) gen := syncer.genesis - daRetriever := NewMockDaRetrieverI(t) + daRetriever := common.NewMockDARetriever(t) p2pHandler := newMockp2pHandler(t) syncer.daRetriever = daRetriever syncer.p2pHandler = p2pHandler @@ -260,7 +260,7 @@ func TestSyncer_BackoffBehaviorIntegration(t *testing.T) { syncer := setupTestSyncer(t, 500*time.Millisecond) syncer.ctx = ctx - daRetriever := NewMockDaRetrieverI(t) + daRetriever := common.NewMockDARetriever(t) p2pHandler := newMockp2pHandler(t) syncer.daRetriever = daRetriever syncer.p2pHandler = p2pHandler diff --git a/block/internal/syncing/syncer_benchmark_test.go b/block/internal/syncing/syncer_benchmark_test.go index 20c2eb66bc..77745c3a7d 100644 --- a/block/internal/syncing/syncer_benchmark_test.go +++ b/block/internal/syncing/syncer_benchmark_test.go @@ -131,7 +131,7 @@ func newBenchFixture(b *testing.B, totalHeights uint64, shuffledTx bool, daDelay } // Mock DA retriever to emit exactly totalHeights events, then HFF and cancel - daR := NewMockDaRetrieverI(b) + daR := common.NewMockDARetriever(b) for i := uint64(0); i < totalHeights; i++ { daHeight := i + daHeightOffset daR.On("RetrieveFromDA", mock.Anything, daHeight). diff --git a/block/internal/syncing/syncer_mock.go b/block/internal/syncing/syncer_mock.go index 7b9f39958a..f20622c20e 100644 --- a/block/internal/syncing/syncer_mock.go +++ b/block/internal/syncing/syncer_mock.go @@ -11,169 +11,6 @@ import ( mock "github.com/stretchr/testify/mock" ) -// NewMockDaRetrieverI creates a new instance of MockDaRetrieverI. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -// The first argument is typically a *testing.T value. -func NewMockDaRetrieverI(t interface { - mock.TestingT - Cleanup(func()) -}) *MockDaRetrieverI { - mock := &MockDaRetrieverI{} - mock.Mock.Test(t) - - t.Cleanup(func() { mock.AssertExpectations(t) }) - - return mock -} - -// MockDaRetrieverI is an autogenerated mock type for the DaRetrieverI type -type MockDaRetrieverI struct { - mock.Mock -} - -type MockDaRetrieverI_Expecter struct { - mock *mock.Mock -} - -func (_m *MockDaRetrieverI) EXPECT() *MockDaRetrieverI_Expecter { - return &MockDaRetrieverI_Expecter{mock: &_m.Mock} -} - -// RetrieveForcedIncludedTxsFromDA provides a mock function for the type MockDaRetrieverI -func (_mock *MockDaRetrieverI) RetrieveForcedIncludedTxsFromDA(ctx context.Context, daHeight uint64) (*common.ForcedIncludedEvent, error) { - ret := _mock.Called(ctx, daHeight) - - if len(ret) == 0 { - panic("no return value specified for RetrieveForcedIncludedTxsFromDA") - } - - var r0 *common.ForcedIncludedEvent - var r1 error - if returnFunc, ok := ret.Get(0).(func(context.Context, uint64) (*common.ForcedIncludedEvent, error)); ok { - return returnFunc(ctx, daHeight) - } - if returnFunc, ok := ret.Get(0).(func(context.Context, uint64) *common.ForcedIncludedEvent); ok { - r0 = returnFunc(ctx, daHeight) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*common.ForcedIncludedEvent) - } - } - if returnFunc, ok := ret.Get(1).(func(context.Context, uint64) error); ok { - r1 = returnFunc(ctx, daHeight) - } else { - r1 = ret.Error(1) - } - return r0, r1 -} - -// MockDaRetrieverI_RetrieveForcedIncludedTxsFromDA_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'RetrieveForcedIncludedTxsFromDA' -type MockDaRetrieverI_RetrieveForcedIncludedTxsFromDA_Call struct { - *mock.Call -} - -// RetrieveForcedIncludedTxsFromDA is a helper method to define mock.On call -// - ctx context.Context -// - daHeight uint64 -func (_e *MockDaRetrieverI_Expecter) RetrieveForcedIncludedTxsFromDA(ctx interface{}, daHeight interface{}) *MockDaRetrieverI_RetrieveForcedIncludedTxsFromDA_Call { - return &MockDaRetrieverI_RetrieveForcedIncludedTxsFromDA_Call{Call: _e.mock.On("RetrieveForcedIncludedTxsFromDA", ctx, daHeight)} -} - -func (_c *MockDaRetrieverI_RetrieveForcedIncludedTxsFromDA_Call) Run(run func(ctx context.Context, daHeight uint64)) *MockDaRetrieverI_RetrieveForcedIncludedTxsFromDA_Call { - _c.Call.Run(func(args mock.Arguments) { - var arg0 context.Context - if args[0] != nil { - arg0 = args[0].(context.Context) - } - var arg1 uint64 - if args[1] != nil { - arg1 = args[1].(uint64) - } - run( - arg0, - arg1, - ) - }) - return _c -} - -func (_c *MockDaRetrieverI_RetrieveForcedIncludedTxsFromDA_Call) Return(forcedIncludedEvent *common.ForcedIncludedEvent, err error) *MockDaRetrieverI_RetrieveForcedIncludedTxsFromDA_Call { - _c.Call.Return(forcedIncludedEvent, err) - return _c -} - -func (_c *MockDaRetrieverI_RetrieveForcedIncludedTxsFromDA_Call) RunAndReturn(run func(ctx context.Context, daHeight uint64) (*common.ForcedIncludedEvent, error)) *MockDaRetrieverI_RetrieveForcedIncludedTxsFromDA_Call { - _c.Call.Return(run) - return _c -} - -// RetrieveFromDA provides a mock function for the type MockDaRetrieverI -func (_mock *MockDaRetrieverI) RetrieveFromDA(ctx context.Context, daHeight uint64) ([]common.DAHeightEvent, error) { - ret := _mock.Called(ctx, daHeight) - - if len(ret) == 0 { - panic("no return value specified for RetrieveFromDA") - } - - var r0 []common.DAHeightEvent - var r1 error - if returnFunc, ok := ret.Get(0).(func(context.Context, uint64) ([]common.DAHeightEvent, error)); ok { - return returnFunc(ctx, daHeight) - } - if returnFunc, ok := ret.Get(0).(func(context.Context, uint64) []common.DAHeightEvent); ok { - r0 = returnFunc(ctx, daHeight) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).([]common.DAHeightEvent) - } - } - if returnFunc, ok := ret.Get(1).(func(context.Context, uint64) error); ok { - r1 = returnFunc(ctx, daHeight) - } else { - r1 = ret.Error(1) - } - return r0, r1 -} - -// MockDaRetrieverI_RetrieveFromDA_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'RetrieveFromDA' -type MockDaRetrieverI_RetrieveFromDA_Call struct { - *mock.Call -} - -// RetrieveFromDA is a helper method to define mock.On call -// - ctx context.Context -// - daHeight uint64 -func (_e *MockDaRetrieverI_Expecter) RetrieveFromDA(ctx interface{}, daHeight interface{}) *MockDaRetrieverI_RetrieveFromDA_Call { - return &MockDaRetrieverI_RetrieveFromDA_Call{Call: _e.mock.On("RetrieveFromDA", ctx, daHeight)} -} - -func (_c *MockDaRetrieverI_RetrieveFromDA_Call) Run(run func(ctx context.Context, daHeight uint64)) *MockDaRetrieverI_RetrieveFromDA_Call { - _c.Call.Run(func(args mock.Arguments) { - var arg0 context.Context - if args[0] != nil { - arg0 = args[0].(context.Context) - } - var arg1 uint64 - if args[1] != nil { - arg1 = args[1].(uint64) - } - run( - arg0, - arg1, - ) - }) - return _c -} - -func (_c *MockDaRetrieverI_RetrieveFromDA_Call) Return(dAHeightEvents []common.DAHeightEvent, err error) *MockDaRetrieverI_RetrieveFromDA_Call { - _c.Call.Return(dAHeightEvents, err) - return _c -} - -func (_c *MockDaRetrieverI_RetrieveFromDA_Call) RunAndReturn(run func(ctx context.Context, daHeight uint64) ([]common.DAHeightEvent, error)) *MockDaRetrieverI_RetrieveFromDA_Call { - _c.Call.Return(run) - return _c -} - // newMockp2pHandler creates a new instance of mockp2pHandler. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. // The first argument is typically a *testing.T value. func newMockp2pHandler(t interface { diff --git a/block/internal/syncing/syncer_test.go b/block/internal/syncing/syncer_test.go index 59a9b5cb8f..50abf4dba2 100644 --- a/block/internal/syncing/syncer_test.go +++ b/block/internal/syncing/syncer_test.go @@ -381,7 +381,7 @@ func TestSyncLoopPersistState(t *testing.T) { ctx, cancel := context.WithCancel(t.Context()) syncerInst1.ctx = ctx - daRtrMock, p2pHndlMock := NewMockDaRetrieverI(t), newMockp2pHandler(t) + daRtrMock, p2pHndlMock := common.NewMockDARetriever(t), newMockp2pHandler(t) p2pHndlMock.On("ProcessHeaderRange", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return().Maybe() p2pHndlMock.On("ProcessDataRange", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return().Maybe() syncerInst1.daRetriever, syncerInst1.p2pHandler = daRtrMock, p2pHndlMock @@ -474,7 +474,7 @@ func TestSyncLoopPersistState(t *testing.T) { ctx, cancel = context.WithCancel(t.Context()) t.Cleanup(cancel) syncerInst2.ctx = ctx - daRtrMock, p2pHndlMock = NewMockDaRetrieverI(t), newMockp2pHandler(t) + daRtrMock, p2pHndlMock = common.NewMockDARetriever(t), newMockp2pHandler(t) p2pHndlMock.On("ProcessHeaderRange", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return().Maybe() p2pHndlMock.On("ProcessDataRange", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return().Maybe() syncerInst2.daRetriever, syncerInst2.p2pHandler = daRtrMock, p2pHndlMock From 2c5fdbf7bdd61b37ec40f4b7f76d1be3e0641e37 Mon Sep 17 00:00:00 2001 From: Julien Robert Date: Wed, 5 Nov 2025 15:35:05 +0100 Subject: [PATCH 07/39] updates --- block/components.go | 9 +++++++++ block/internal/syncing/syncer.go | 10 ++++++++-- 2 files changed, 17 insertions(+), 2 deletions(-) diff --git a/block/components.go b/block/components.go index f3d88342c5..acdd69e946 100644 --- a/block/components.go +++ b/block/components.go @@ -246,6 +246,15 @@ func NewAggregatorComponents( return nil, fmt.Errorf("failed to create reaper: %w", err) } + if config.Node.BasedSequencer { // no submissions needed for bases sequencer + return &Components{ + Executor: executor, + Reaper: reaper, + Cache: cacheManager, + errorCh: errorCh, + }, nil + } + // Create DA submitter for aggregator nodes (with signer for submission) daSubmitter := submitting.NewDASubmitter(da, config, genesis, blockOpts, metrics, logger) submitter := submitting.NewSubmitter( diff --git a/block/internal/syncing/syncer.go b/block/internal/syncing/syncer.go index 7f4b9985cc..9c88d40c7e 100644 --- a/block/internal/syncing/syncer.go +++ b/block/internal/syncing/syncer.go @@ -466,7 +466,11 @@ func (s *Syncer) trySyncNextBlock(event *common.DAHeightEvent) error { // Verify forced inclusion transactions if configured if err := s.verifyForcedInclusionTxs(currentState, data); err != nil { s.logger.Error().Err(err).Uint64("height", nextHeight).Msg("forced inclusion verification failed") - // TODO(@julienrbrt): Eventually halt the syncer and request the node to be started using the based sequencer. + if errors.Is(err, errMaliciousProposer) { + s.logger.Error().Msg("Restart with based sequencer.") + s.cache.RemoveHeaderDAIncluded(headerHash) + return err + } } // Apply block @@ -586,6 +590,8 @@ func (s *Syncer) validateBlock(currState types.State, data *types.Data, header * return nil } +var errMaliciousProposer = errors.New("malicious proposer detected") + // verifyForcedInclusionTxs verifies that all forced inclusion transactions from DA are included in the block func (s *Syncer) verifyForcedInclusionTxs(currentState types.State, data *types.Data) error { if s.daRetriever == nil { @@ -631,7 +637,7 @@ func (s *Syncer) verifyForcedInclusionTxs(currentState types.State, data *types. Int("missing_count", len(missingTxs)). Int("total_forced", len(forcedIncludedTxsEvent.Txs)). Msg("SEQUENCER IS MALICIOUS: forced inclusion transactions missing from block") - return fmt.Errorf("sequencer is malicious: %d forced inclusion transactions not included in block", len(missingTxs)) + return errors.Join(errMaliciousProposer, fmt.Errorf("sequencer is malicious: %d forced inclusion transactions not included in block", len(missingTxs))) } s.logger.Debug(). From 16889f5d2f928d47d65384e2aee147a59fba1048 Mon Sep 17 00:00:00 2001 From: Julien Robert Date: Wed, 5 Nov 2025 16:52:48 +0100 Subject: [PATCH 08/39] move to sequencer --- .mockery.yaml | 10 +- apps/evm/single/cmd/run.go | 31 ++++ apps/evm/single/go.mod | 6 +- apps/evm/single/go.sum | 4 - apps/grpc/single/cmd/run.go | 30 ++++ apps/grpc/single/go.mod | 14 +- apps/grpc/single/go.sum | 7 +- apps/testapp/cmd/run.go | 31 ++++ apps/testapp/go.mod | 4 +- apps/testapp/go.sum | 3 +- block/components.go | 3 - block/internal/common/event.go | 8 - block/internal/executing/executor.go | 45 ++--- .../internal/executing/executor_lazy_test.go | 2 - .../internal/executing/executor_logic_test.go | 2 - .../executing/executor_restart_test.go | 4 - block/internal/executing/executor_test.go | 10 +- block/internal/reaping/reaper_test.go | 1 - block/internal/syncing/da_retriever.go | 32 ++-- .../{common => syncing}/da_retriever_mock.go | 31 ++-- block/internal/syncing/da_retriever_test.go | 2 +- block/internal/syncing/syncer.go | 2 +- block/internal/syncing/syncer_backoff_test.go | 6 +- .../internal/syncing/syncer_benchmark_test.go | 2 +- block/internal/syncing/syncer_test.go | 4 +- block/public.go | 2 +- core/sequencer/dummy.go | 5 + core/sequencer/sequencing.go | 4 + go.mod | 4 +- go.sum | 5 +- pkg/cmd/run_node.go | 2 +- sequencers/based/based.go | 14 +- sequencers/based/based_test.go | 20 +-- sequencers/single/go.mod | 2 +- sequencers/single/go.sum | 3 +- sequencers/single/sequencer.go | 154 +++++++++++++++--- sequencers/single/sequencer_test.go | 13 +- test/mocks/sequencer.go | 40 +++++ 38 files changed, 380 insertions(+), 182 deletions(-) rename block/internal/{common => syncing}/da_retriever_mock.go (82%) diff --git a/.mockery.yaml b/.mockery.yaml index 1f87c57e00..8f139231cb 100644 --- a/.mockery.yaml +++ b/.mockery.yaml @@ -48,6 +48,11 @@ packages: filename: external/hstore.go github.com/evstack/ev-node/block/internal/syncing: interfaces: + DARetriever: + config: + dir: ./block/internal/syncing + pkgname: syncing + filename: da_retriever_mock.go p2pHandler: config: dir: ./block/internal/syncing @@ -60,8 +65,3 @@ packages: dir: ./block/internal/common pkgname: common filename: broadcaster_mock.go - DARetriever: - config: - dir: ./block/internal/common - pkgname: common - filename: da_retriever_mock.go diff --git a/apps/evm/single/cmd/run.go b/apps/evm/single/cmd/run.go index 37b99036d6..df4c880b0d 100644 --- a/apps/evm/single/cmd/run.go +++ b/apps/evm/single/cmd/run.go @@ -140,6 +140,29 @@ func createSequencer( return nil, fmt.Errorf("failed to create single sequencer metrics: %w", err) } + // Create DA retriever for forced inclusion support + var daRetriever single.DARetriever + if nodeConfig.Node.Aggregator { + commonDARetriever, err := block.NewDARetriever(da, nodeConfig, genesis, logger) + if err != nil { + return nil, fmt.Errorf("failed to create DA retriever: %w", err) + } + + // Adapter function to convert between common and single event types + adapterFunc := func(ctx context.Context, daHeight uint64) (*single.ForcedInclusionEvent, error) { + event, err := commonDARetriever.RetrieveForcedIncludedTxsFromDA(ctx, daHeight) + if err != nil { + return nil, err + } + return &single.ForcedInclusionEvent{ + Txs: event.Txs, + StartDaHeight: event.StartDaHeight, + EndDaHeight: event.EndDaHeight, + }, nil + } + daRetriever = single.NewDARetrieverAdapter(adapterFunc) + } + sequencer, err := single.NewSequencer( ctx, logger, @@ -149,11 +172,19 @@ func createSequencer( nodeConfig.Node.BlockTime.Duration, singleMetrics, nodeConfig.Node.Aggregator, + 1000, + daRetriever, + genesis, ) if err != nil { return nil, fmt.Errorf("failed to create single sequencer: %w", err) } + logger.Info(). + Bool("forced_inclusion_enabled", daRetriever != nil). + Str("forced_inclusion_namespace", nodeConfig.DA.GetForcedInclusionNamespace()). + Msg("single sequencer initialized") + return sequencer, nil } diff --git a/apps/evm/single/go.mod b/apps/evm/single/go.mod index e65a2e79ed..1264f34727 100644 --- a/apps/evm/single/go.mod +++ b/apps/evm/single/go.mod @@ -4,7 +4,11 @@ go 1.24.6 replace github.com/celestiaorg/go-header => github.com/julienrbrt/go-header v0.0.0-20251008134330-747c8c192fa8 // TODO: to remove after https://github.com/celestiaorg/go-header/pull/347 -replace github.com/evstack/ev-node => ../../../ +replace ( + github.com/evstack/ev-node => ../../../ + github.com/evstack/ev-node/core => ../../../core + github.com/evstack/ev-node/sequencers/single => ../../../sequencers/single +) require ( github.com/celestiaorg/go-header v0.7.3 diff --git a/apps/evm/single/go.sum b/apps/evm/single/go.sum index c26f06a35f..59f8793e44 100644 --- a/apps/evm/single/go.sum +++ b/apps/evm/single/go.sum @@ -103,14 +103,10 @@ github.com/ethereum/go-ethereum v1.16.5 h1:GZI995PZkzP7ySCxEFaOPzS8+bd8NldE//1qv github.com/ethereum/go-ethereum v1.16.5/go.mod h1:kId9vOtlYg3PZk9VwKbGlQmSACB5ESPTBGT+M9zjmok= github.com/ethereum/go-verkle v0.2.2 h1:I2W0WjnrFUIzzVPwm8ykY+7pL2d4VhlsePn4j7cnFk8= github.com/ethereum/go-verkle v0.2.2/go.mod h1:M3b90YRnzqKyyzBEWJGqj8Qff4IDeXnzFw0P9bFw3uk= -github.com/evstack/ev-node/core v1.0.0-beta.4 h1:F/rqHCrZ+ViUY4I6RuoBVvkhYfosD68yo/6gCdGRdmo= -github.com/evstack/ev-node/core v1.0.0-beta.4/go.mod h1:n2w/LhYQTPsi48m6lMj16YiIqsaQw6gxwjyJvR+B3sY= github.com/evstack/ev-node/da v1.0.0-beta.5 h1:rWs/H0Nkj9uwTqD7Tzu+PpaNHGFE71B1ZVKCFLx0yVI= github.com/evstack/ev-node/da v1.0.0-beta.5/go.mod h1:lJ7vGlczBwiqTaTE9C4zV9tEsQO+oi0sqyUyYzj3zpo= github.com/evstack/ev-node/execution/evm v1.0.0-beta.3 h1:xo0mZz3CJtntP1RPLFDBubBKpNkqStImt9H9N0xysj8= github.com/evstack/ev-node/execution/evm v1.0.0-beta.3/go.mod h1:yazCKZaVczYwizfHYSQ4KIYqW0d42M7q7e9AxuSXV3s= -github.com/evstack/ev-node/sequencers/single v1.0.0-beta.3 h1:BT/UeH7Tf8z0btzomCzTbbDDZGAT8/yHcd6xY6P/aaw= -github.com/evstack/ev-node/sequencers/single v1.0.0-beta.3/go.mod h1:eCkDecdJ3s7TB3R5nFdPDyz7jjRmwYen6lGe9D2sSH4= github.com/ferranbt/fastssz v0.1.4 h1:OCDB+dYDEQDvAgtAGnTSidK1Pe2tW3nFV40XyMkTeDY= github.com/ferranbt/fastssz v0.1.4/go.mod h1:Ea3+oeoRGGLGm5shYAeDgu6PGUlcvQhE2fILyD9+tGg= github.com/filecoin-project/go-clock v0.1.0 h1:SFbYIM75M8NnFm1yMHhN9Ahy3W5bEZV9gd6MPfXbKVU= diff --git a/apps/grpc/single/cmd/run.go b/apps/grpc/single/cmd/run.go index 0bb79a7b9a..41e4fbf927 100644 --- a/apps/grpc/single/cmd/run.go +++ b/apps/grpc/single/cmd/run.go @@ -149,6 +149,28 @@ func createSequencer( return nil, fmt.Errorf("failed to create single sequencer metrics: %w", err) } + // Create DA retriever for forced inclusion support + var daRetriever single.DARetriever + if nodeConfig.Node.Aggregator { + commonDARetriever, err := block.NewDARetriever(da, nodeConfig, genesis, logger) + if err != nil { + return nil, fmt.Errorf("failed to create DA retriever: %w", err) + } + // Adapter function to convert between common and single event types + adapterFunc := func(ctx context.Context, daHeight uint64) (*single.ForcedInclusionEvent, error) { + event, err := commonDARetriever.RetrieveForcedIncludedTxsFromDA(ctx, daHeight) + if err != nil { + return nil, err + } + return &single.ForcedInclusionEvent{ + Txs: event.Txs, + StartDaHeight: event.StartDaHeight, + EndDaHeight: event.EndDaHeight, + }, nil + } + daRetriever = single.NewDARetrieverAdapter(adapterFunc) + } + sequencer, err := single.NewSequencer( ctx, logger, @@ -158,11 +180,19 @@ func createSequencer( nodeConfig.Node.BlockTime.Duration, singleMetrics, nodeConfig.Node.Aggregator, + 1000, + daRetriever, + genesis, ) if err != nil { return nil, fmt.Errorf("failed to create single sequencer: %w", err) } + logger.Info(). + Bool("forced_inclusion_enabled", daRetriever != nil). + Str("forced_inclusion_namespace", nodeConfig.DA.GetForcedInclusionNamespace()). + Msg("single sequencer initialized") + return sequencer, nil } diff --git a/apps/grpc/single/go.mod b/apps/grpc/single/go.mod index e80b8a1a99..44e2e20436 100644 --- a/apps/grpc/single/go.mod +++ b/apps/grpc/single/go.mod @@ -2,6 +2,13 @@ module github.com/evstack/ev-node/apps/grpc/single go 1.24.6 +replace ( + github.com/evstack/ev-node => ../../../ + github.com/evstack/ev-node/core => ../../../core + github.com/evstack/ev-node/execution/grpc => ../../../execution/grpc + github.com/evstack/ev-node/sequencers/single => ../../../sequencers/single +) + require ( github.com/evstack/ev-node v1.0.0-beta.9 github.com/evstack/ev-node/core v1.0.0-beta.4 @@ -73,7 +80,7 @@ require ( github.com/libp2p/go-reuseport v0.4.0 // indirect github.com/libp2p/go-yamux/v5 v5.0.1 // indirect github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd // indirect - github.com/mattn/go-colorable v0.1.13 // indirect + github.com/mattn/go-colorable v0.1.14 // indirect github.com/mattn/go-isatty v0.0.20 // indirect github.com/miekg/dns v1.1.68 // indirect github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b // indirect @@ -163,8 +170,3 @@ require ( gopkg.in/yaml.v3 v3.0.1 // indirect lukechampine.com/blake3 v1.4.1 // indirect ) - -replace ( - github.com/evstack/ev-node => ../../../ - github.com/evstack/ev-node/execution/grpc => ../../../execution/grpc -) diff --git a/apps/grpc/single/go.sum b/apps/grpc/single/go.sum index 60fae2c55a..aa1c134f35 100644 --- a/apps/grpc/single/go.sum +++ b/apps/grpc/single/go.sum @@ -62,12 +62,8 @@ github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymF github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/evstack/ev-node/core v1.0.0-beta.4 h1:F/rqHCrZ+ViUY4I6RuoBVvkhYfosD68yo/6gCdGRdmo= -github.com/evstack/ev-node/core v1.0.0-beta.4/go.mod h1:n2w/LhYQTPsi48m6lMj16YiIqsaQw6gxwjyJvR+B3sY= github.com/evstack/ev-node/da v1.0.0-beta.5 h1:rWs/H0Nkj9uwTqD7Tzu+PpaNHGFE71B1ZVKCFLx0yVI= github.com/evstack/ev-node/da v1.0.0-beta.5/go.mod h1:lJ7vGlczBwiqTaTE9C4zV9tEsQO+oi0sqyUyYzj3zpo= -github.com/evstack/ev-node/sequencers/single v1.0.0-beta.3 h1:BT/UeH7Tf8z0btzomCzTbbDDZGAT8/yHcd6xY6P/aaw= -github.com/evstack/ev-node/sequencers/single v1.0.0-beta.3/go.mod h1:eCkDecdJ3s7TB3R5nFdPDyz7jjRmwYen6lGe9D2sSH4= github.com/filecoin-project/go-clock v0.1.0 h1:SFbYIM75M8NnFm1yMHhN9Ahy3W5bEZV9gd6MPfXbKVU= github.com/filecoin-project/go-clock v0.1.0/go.mod h1:4uB/O4PvOjlx1VCMdZ9MyDZXRm//gkj1ELEbxfI1AZs= github.com/filecoin-project/go-jsonrpc v0.9.0 h1:G47qEF52w7GholpI21vPSTVBFvsrip6geIoqNiqyZtQ= @@ -235,8 +231,9 @@ github.com/lunixbochs/vtclean v1.0.0/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd h1:br0buuQ854V8u83wA0rVZ8ttrq5CpaPZdvrK0LP2lOk= github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd/go.mod h1:QuCEs1Nt24+FYQEqAAncTDPJIuGs+LxK1MCiFL25pMU= -github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= +github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHPsaIE= +github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8= github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= diff --git a/apps/testapp/cmd/run.go b/apps/testapp/cmd/run.go index c20f6a1631..c6bcb66937 100644 --- a/apps/testapp/cmd/run.go +++ b/apps/testapp/cmd/run.go @@ -146,6 +146,29 @@ func createSequencer( return nil, fmt.Errorf("failed to create single sequencer metrics: %w", err) } + // Create DA retriever for forced inclusion support + var daRetriever single.DARetriever + if nodeConfig.Node.Aggregator { + commonDARetriever, err := block.NewDARetriever(da, nodeConfig, genesis, logger) + if err != nil { + return nil, fmt.Errorf("failed to create DA retriever: %w", err) + } + + // Adapter function to convert between common and single event types + adapterFunc := func(ctx context.Context, daHeight uint64) (*single.ForcedInclusionEvent, error) { + event, err := commonDARetriever.RetrieveForcedIncludedTxsFromDA(ctx, daHeight) + if err != nil { + return nil, err + } + return &single.ForcedInclusionEvent{ + Txs: event.Txs, + StartDaHeight: event.StartDaHeight, + EndDaHeight: event.EndDaHeight, + }, nil + } + daRetriever = single.NewDARetrieverAdapter(adapterFunc) + } + sequencer, err := single.NewSequencer( ctx, logger, @@ -155,10 +178,18 @@ func createSequencer( nodeConfig.Node.BlockTime.Duration, singleMetrics, nodeConfig.Node.Aggregator, + 1000, + daRetriever, + genesis, ) if err != nil { return nil, fmt.Errorf("failed to create single sequencer: %w", err) } + logger.Info(). + Bool("forced_inclusion_enabled", daRetriever != nil). + Str("forced_inclusion_namespace", nodeConfig.DA.GetForcedInclusionNamespace()). + Msg("single sequencer initialized") + return sequencer, nil } diff --git a/apps/testapp/go.mod b/apps/testapp/go.mod index ea0bc6d0f7..3a2f42434b 100644 --- a/apps/testapp/go.mod +++ b/apps/testapp/go.mod @@ -15,7 +15,7 @@ require ( github.com/celestiaorg/go-header v0.7.3 github.com/evstack/ev-node v1.0.0-beta.9 github.com/evstack/ev-node/core v1.0.0-beta.4 - github.com/evstack/ev-node/da v0.0.0-00010101000000-000000000000 + github.com/evstack/ev-node/da v1.0.0-beta.5 github.com/evstack/ev-node/sequencers/single v0.0.0-00010101000000-000000000000 github.com/ipfs/go-datastore v0.9.0 github.com/rs/zerolog v1.34.0 @@ -82,7 +82,7 @@ require ( github.com/libp2p/go-reuseport v0.4.0 // indirect github.com/libp2p/go-yamux/v5 v5.0.1 // indirect github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd // indirect - github.com/mattn/go-colorable v0.1.13 // indirect + github.com/mattn/go-colorable v0.1.14 // indirect github.com/mattn/go-isatty v0.0.20 // indirect github.com/miekg/dns v1.1.68 // indirect github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b // indirect diff --git a/apps/testapp/go.sum b/apps/testapp/go.sum index 9f9519135f..eabd0c4654 100644 --- a/apps/testapp/go.sum +++ b/apps/testapp/go.sum @@ -229,8 +229,9 @@ github.com/lunixbochs/vtclean v1.0.0/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd h1:br0buuQ854V8u83wA0rVZ8ttrq5CpaPZdvrK0LP2lOk= github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd/go.mod h1:QuCEs1Nt24+FYQEqAAncTDPJIuGs+LxK1MCiFL25pMU= -github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= +github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHPsaIE= +github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8= github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= diff --git a/block/components.go b/block/components.go index acdd69e946..9e9e6af425 100644 --- a/block/components.go +++ b/block/components.go @@ -211,13 +211,10 @@ func NewAggregatorComponents( // error channel for critical failures errorCh := make(chan error, 1) - daRetriever := syncing.NewDARetriever(da, cacheManager, config, genesis, logger) - executor, err := executing.NewExecutor( store, exec, sequencer, - daRetriever, signer, cacheManager, metrics, diff --git a/block/internal/common/event.go b/block/internal/common/event.go index 3fad0e6fba..1117683a51 100644 --- a/block/internal/common/event.go +++ b/block/internal/common/event.go @@ -1,8 +1,6 @@ package common import ( - "context" - "github.com/evstack/ev-node/types" ) @@ -16,12 +14,6 @@ const ( SourceP2P EventSource = "P2P" ) -// DARetriever defines the interface for retrieving events from the DA layer -type DARetriever interface { - RetrieveFromDA(ctx context.Context, daHeight uint64) ([]DAHeightEvent, error) - RetrieveForcedIncludedTxsFromDA(ctx context.Context, daHeight uint64) (*ForcedIncludedEvent, error) -} - // DAHeightEvent represents a DA event for caching type DAHeightEvent = struct { Header *types.SignedHeader diff --git a/block/internal/executing/executor.go b/block/internal/executing/executor.go index b2ae3122f3..ddfaa82e63 100644 --- a/block/internal/executing/executor.go +++ b/block/internal/executing/executor.go @@ -15,7 +15,6 @@ import ( "github.com/evstack/ev-node/block/internal/cache" "github.com/evstack/ev-node/block/internal/common" - "github.com/evstack/ev-node/block/internal/syncing" coreexecutor "github.com/evstack/ev-node/core/execution" coresequencer "github.com/evstack/ev-node/core/sequencer" "github.com/evstack/ev-node/pkg/config" @@ -28,11 +27,10 @@ import ( // Executor handles block production, transaction processing, and state management type Executor struct { // Core components - store store.Store - exec coreexecutor.Executor - sequencer coresequencer.Sequencer - signer signer.Signer - daRetriever common.DARetriever + store store.Store + exec coreexecutor.Executor + sequencer coresequencer.Sequencer + signer signer.Signer // Shared components cache cache.Manager @@ -73,7 +71,6 @@ func NewExecutor( store store.Store, exec coreexecutor.Executor, sequencer coresequencer.Sequencer, - daRetriever common.DARetriever, signer signer.Signer, cache cache.Manager, metrics *common.Metrics, @@ -102,7 +99,6 @@ func NewExecutor( store: store, exec: exec, sequencer: sequencer, - daRetriever: daRetriever, signer: signer, cache: cache, metrics: metrics, @@ -334,18 +330,6 @@ func (e *Executor) produceBlock() error { } } - // fetch forced included txs - var ( - forcedIncludedTxsEvent *common.ForcedIncludedEvent - err error - ) - if !e.config.Node.BasedSequencer { - forcedIncludedTxsEvent, err = e.daRetriever.RetrieveForcedIncludedTxsFromDA(e.ctx, currentState.DAHeight) - if err != nil && !errors.Is(err, syncing.ErrForceInclusionNotConfigured) { - e.logger.Error().Err(err).Msg("failed to retrieve forced included txs") - } - } - var ( header *types.SignedHeader data *types.Data @@ -372,12 +356,6 @@ func (e *Executor) produceBlock() error { return fmt.Errorf("failed to retrieve batch: %w", err) } - // append forced included txs to batch data - // TODO(@julienrbrt): if the batch is at size, adding more txs isn't what we want. - // maybe we need to add a limit to retrieveBatch based on the forced included txs size. - // for the poc this is ok as is. - batchData.Transactions = append(batchData.Transactions, forcedIncludedTxsEvent.Txs...) - header, data, err = e.createBlock(e.ctx, newHeight, batchData) if err != nil { return fmt.Errorf("failed to create block: %w", err) @@ -401,11 +379,6 @@ func (e *Executor) produceBlock() error { return fmt.Errorf("failed to apply block: %w", err) } - // update da height, based on last retrieved. - if forcedIncludedTxsEvent.EndDaHeight > newState.DAHeight { - newState.DAHeight = forcedIncludedTxsEvent.EndDaHeight - } - // signing the header is done after applying the block // as for signing, the state of the block may be required by the signature payload provider. signature, err := e.signHeader(header.Header) @@ -466,9 +439,15 @@ func (e *Executor) produceBlock() error { // retrieveBatch gets the next batch of transactions from the sequencer func (e *Executor) retrieveBatch(ctx context.Context) (*BatchData, error) { + currentState := e.getLastState() + + // Update sequencer's DA height for forced inclusion tracking + e.sequencer.SetDAHeight(currentState.DAHeight) + req := coresequencer.GetNextBatchRequest{ - Id: []byte(e.genesis.ChainID), - MaxBytes: common.DefaultMaxBlobSize, + Id: []byte(e.genesis.ChainID), + MaxBytes: common.DefaultMaxBlobSize, + LastBatchData: [][]byte{}, // Can be populated if needed for sequencer context } res, err := e.sequencer.GetNextBatch(ctx, req) diff --git a/block/internal/executing/executor_lazy_test.go b/block/internal/executing/executor_lazy_test.go index df72885239..b72f0a856b 100644 --- a/block/internal/executing/executor_lazy_test.go +++ b/block/internal/executing/executor_lazy_test.go @@ -56,7 +56,6 @@ func TestLazyMode_ProduceBlockLogic(t *testing.T) { memStore, mockExec, mockSeq, - common.NewMockDARetriever(t), signerWrapper, cacheManager, metrics, @@ -167,7 +166,6 @@ func TestRegularMode_ProduceBlockLogic(t *testing.T) { memStore, mockExec, mockSeq, - common.NewMockDARetriever(t), signerWrapper, cacheManager, metrics, diff --git a/block/internal/executing/executor_logic_test.go b/block/internal/executing/executor_logic_test.go index 945bc4ace7..9aa79d0c43 100644 --- a/block/internal/executing/executor_logic_test.go +++ b/block/internal/executing/executor_logic_test.go @@ -78,7 +78,6 @@ func TestProduceBlock_EmptyBatch_SetsEmptyDataHash(t *testing.T) { memStore, mockExec, mockSeq, - common.NewMockDARetriever(t), signerWrapper, cacheManager, metrics, @@ -166,7 +165,6 @@ func TestPendingLimit_SkipsProduction(t *testing.T) { memStore, mockExec, mockSeq, - common.NewMockDARetriever(t), signerWrapper, cacheManager, metrics, diff --git a/block/internal/executing/executor_restart_test.go b/block/internal/executing/executor_restart_test.go index a950d6dc21..3f0e8b500c 100644 --- a/block/internal/executing/executor_restart_test.go +++ b/block/internal/executing/executor_restart_test.go @@ -56,7 +56,6 @@ func TestExecutor_RestartUsesPendingHeader(t *testing.T) { memStore, mockExec1, mockSeq1, - common.NewMockDARetriever(t), signerWrapper, cacheManager, metrics, @@ -176,7 +175,6 @@ func TestExecutor_RestartUsesPendingHeader(t *testing.T) { memStore, // same store mockExec2, mockSeq2, - common.NewMockDARetriever(t), signerWrapper, cacheManager, metrics, @@ -275,7 +273,6 @@ func TestExecutor_RestartNoPendingHeader(t *testing.T) { memStore, mockExec1, mockSeq1, - common.NewMockDARetriever(t), signerWrapper, cacheManager, metrics, @@ -328,7 +325,6 @@ func TestExecutor_RestartNoPendingHeader(t *testing.T) { memStore, mockExec2, mockSeq2, - common.NewMockDARetriever(t), signerWrapper, cacheManager, metrics, diff --git a/block/internal/executing/executor_test.go b/block/internal/executing/executor_test.go index fc73b94a0c..e310c6d40d 100644 --- a/block/internal/executing/executor_test.go +++ b/block/internal/executing/executor_test.go @@ -46,9 +46,8 @@ func TestExecutor_BroadcasterIntegration(t *testing.T) { // Create executor with broadcasters executor, err := NewExecutor( memStore, - nil, // nil executor (we're not testing execution) - nil, // nil sequencer (we're not testing sequencing) - common.NewMockDARetriever(t), + nil, // nil executor (we're not testing execution) + nil, // nil sequencer (we're not testing sequencing) testSigner, // test signer (required for executor) cacheManager, metrics, @@ -97,9 +96,8 @@ func TestExecutor_NilBroadcasters(t *testing.T) { // Create executor with nil broadcasters (light node scenario) executor, err := NewExecutor( memStore, - nil, // nil executor - nil, // nil sequencer - common.NewMockDARetriever(t), + nil, // nil executor + nil, // nil sequencer testSigner, // test signer (required for executor) cacheManager, metrics, diff --git a/block/internal/reaping/reaper_test.go b/block/internal/reaping/reaper_test.go index 23104d2eb9..d9dc701276 100644 --- a/block/internal/reaping/reaper_test.go +++ b/block/internal/reaping/reaper_test.go @@ -43,7 +43,6 @@ func newTestExecutor(t *testing.T) *executing.Executor { nil, // store (unused) nil, // core executor (unused) nil, // sequencer (unused) - nil, // daretriever (unused) s, // signer (required) nil, // cache (unused) nil, // metrics (unused) diff --git a/block/internal/syncing/da_retriever.go b/block/internal/syncing/da_retriever.go index b9e4d81f8a..56c289101f 100644 --- a/block/internal/syncing/da_retriever.go +++ b/block/internal/syncing/da_retriever.go @@ -22,8 +22,14 @@ import ( // defaultDATimeout is the default timeout for DA retrieval operations const defaultDATimeout = 10 * time.Second -// DARetriever handles DA retrieval operations for syncing -type DARetriever struct { +// DARetriever defines the interface for retrieving events from the DA layer +type DARetriever interface { + RetrieveFromDA(ctx context.Context, daHeight uint64) ([]common.DAHeightEvent, error) + RetrieveForcedIncludedTxsFromDA(ctx context.Context, daHeight uint64) (*common.ForcedIncludedEvent, error) +} + +// daRetriever handles DA retrieval operations for syncing +type daRetriever struct { da coreda.DA cache cache.Manager genesis genesis.Genesis @@ -50,7 +56,7 @@ func NewDARetriever( config config.Config, genesis genesis.Genesis, logger zerolog.Logger, -) *DARetriever { +) *daRetriever { forcedInclusionNs := config.DA.GetForcedInclusionNamespace() hasForcedInclusionNs := forcedInclusionNs != "" @@ -59,7 +65,7 @@ func NewDARetriever( namespaceForcedInclusionBz = coreda.NamespaceFromString(forcedInclusionNs).Bytes() } - return &DARetriever{ + return &daRetriever{ da: da, cache: cache, genesis: genesis, @@ -75,7 +81,7 @@ func NewDARetriever( } // RetrieveFromDA retrieves blocks from the specified DA height and returns height events -func (r *DARetriever) RetrieveFromDA(ctx context.Context, daHeight uint64) ([]common.DAHeightEvent, error) { +func (r *daRetriever) RetrieveFromDA(ctx context.Context, daHeight uint64) ([]common.DAHeightEvent, error) { r.logger.Debug().Uint64("da_height", daHeight).Msg("retrieving from DA") blobsResp, err := r.fetchBlobs(ctx, daHeight) if err != nil { @@ -96,7 +102,7 @@ var ErrForceInclusionNotConfigured = errors.New("forced inclusion namespace not // RetrieveForcedIncludedTxsFromDA retrieves forced inclusion transactions from the DA layer. // It fetches from the daHeight for the da epoch range defined in the config. -func (r *DARetriever) RetrieveForcedIncludedTxsFromDA(ctx context.Context, daHeight uint64) (*common.ForcedIncludedEvent, error) { +func (r *daRetriever) RetrieveForcedIncludedTxsFromDA(ctx context.Context, daHeight uint64) (*common.ForcedIncludedEvent, error) { if !r.hasForcedInclusionNs { return nil, ErrForceInclusionNotConfigured } @@ -129,7 +135,7 @@ func (r *DARetriever) RetrieveForcedIncludedTxsFromDA(ctx context.Context, daHei } // fetchBlobs retrieves blobs from the DA layer -func (r *DARetriever) fetchBlobs(ctx context.Context, daHeight uint64) (coreda.ResultRetrieve, error) { +func (r *daRetriever) fetchBlobs(ctx context.Context, daHeight uint64) (coreda.ResultRetrieve, error) { // Retrieve from both namespaces headerRes := types.RetrieveWithHelpers(ctx, r.da, r.logger, daHeight, r.namespaceBz, defaultDATimeout) @@ -185,7 +191,7 @@ func (r *DARetriever) fetchBlobs(ctx context.Context, daHeight uint64) (coreda.R // validateBlobResponse validates a blob response from DA layer // those are the only error code returned by da.RetrieveWithHelpers -func (r *DARetriever) validateBlobResponse(res coreda.ResultRetrieve, daHeight uint64) error { +func (r *daRetriever) validateBlobResponse(res coreda.ResultRetrieve, daHeight uint64) error { switch res.Code { case coreda.StatusError: return fmt.Errorf("DA retrieval failed: %s", res.Message) @@ -202,7 +208,7 @@ func (r *DARetriever) validateBlobResponse(res coreda.ResultRetrieve, daHeight u } // processBlobs processes retrieved blobs to extract headers and data and returns height events -func (r *DARetriever) processBlobs(ctx context.Context, blobs [][]byte, daHeight uint64) []common.DAHeightEvent { +func (r *daRetriever) processBlobs(ctx context.Context, blobs [][]byte, daHeight uint64) []common.DAHeightEvent { // Decode all blobs for _, bz := range blobs { if len(bz) == 0 { @@ -271,7 +277,7 @@ func (r *DARetriever) processBlobs(ctx context.Context, blobs [][]byte, daHeight } // tryDecodeHeader attempts to decode a blob as a header -func (r *DARetriever) tryDecodeHeader(bz []byte, daHeight uint64) *types.SignedHeader { +func (r *daRetriever) tryDecodeHeader(bz []byte, daHeight uint64) *types.SignedHeader { header := new(types.SignedHeader) var headerPb pb.SignedHeader @@ -311,7 +317,7 @@ func (r *DARetriever) tryDecodeHeader(bz []byte, daHeight uint64) *types.SignedH } // tryDecodeData attempts to decode a blob as signed data -func (r *DARetriever) tryDecodeData(bz []byte, daHeight uint64) *types.Data { +func (r *daRetriever) tryDecodeData(bz []byte, daHeight uint64) *types.Data { var signedData types.SignedData if err := signedData.UnmarshalBinary(bz); err != nil { return nil @@ -342,7 +348,7 @@ func (r *DARetriever) tryDecodeData(bz []byte, daHeight uint64) *types.Data { } // assertExpectedProposer validates the proposer address -func (r *DARetriever) assertExpectedProposer(proposerAddr []byte) error { +func (r *daRetriever) assertExpectedProposer(proposerAddr []byte) error { if string(proposerAddr) != string(r.genesis.ProposerAddress) { return fmt.Errorf("unexpected proposer: got %x, expected %x", proposerAddr, r.genesis.ProposerAddress) @@ -351,7 +357,7 @@ func (r *DARetriever) assertExpectedProposer(proposerAddr []byte) error { } // assertValidSignedData validates signed data using the configured signature provider -func (r *DARetriever) assertValidSignedData(signedData *types.SignedData) error { +func (r *daRetriever) assertValidSignedData(signedData *types.SignedData) error { if signedData == nil || signedData.Txs == nil { return errors.New("empty signed data") } diff --git a/block/internal/common/da_retriever_mock.go b/block/internal/syncing/da_retriever_mock.go similarity index 82% rename from block/internal/common/da_retriever_mock.go rename to block/internal/syncing/da_retriever_mock.go index c83ad564db..505987aee6 100644 --- a/block/internal/common/da_retriever_mock.go +++ b/block/internal/syncing/da_retriever_mock.go @@ -2,11 +2,12 @@ // github.com/vektra/mockery // template: testify -package common +package syncing import ( "context" + "github.com/evstack/ev-node/block/internal/common" mock "github.com/stretchr/testify/mock" ) @@ -38,23 +39,23 @@ func (_m *MockDARetriever) EXPECT() *MockDARetriever_Expecter { } // RetrieveForcedIncludedTxsFromDA provides a mock function for the type MockDARetriever -func (_mock *MockDARetriever) RetrieveForcedIncludedTxsFromDA(ctx context.Context, daHeight uint64) (*ForcedIncludedEvent, error) { +func (_mock *MockDARetriever) RetrieveForcedIncludedTxsFromDA(ctx context.Context, daHeight uint64) (*common.ForcedIncludedEvent, error) { ret := _mock.Called(ctx, daHeight) if len(ret) == 0 { panic("no return value specified for RetrieveForcedIncludedTxsFromDA") } - var r0 *ForcedIncludedEvent + var r0 *common.ForcedIncludedEvent var r1 error - if returnFunc, ok := ret.Get(0).(func(context.Context, uint64) (*ForcedIncludedEvent, error)); ok { + if returnFunc, ok := ret.Get(0).(func(context.Context, uint64) (*common.ForcedIncludedEvent, error)); ok { return returnFunc(ctx, daHeight) } - if returnFunc, ok := ret.Get(0).(func(context.Context, uint64) *ForcedIncludedEvent); ok { + if returnFunc, ok := ret.Get(0).(func(context.Context, uint64) *common.ForcedIncludedEvent); ok { r0 = returnFunc(ctx, daHeight) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*ForcedIncludedEvent) + r0 = ret.Get(0).(*common.ForcedIncludedEvent) } } if returnFunc, ok := ret.Get(1).(func(context.Context, uint64) error); ok { @@ -95,34 +96,34 @@ func (_c *MockDARetriever_RetrieveForcedIncludedTxsFromDA_Call) Run(run func(ctx return _c } -func (_c *MockDARetriever_RetrieveForcedIncludedTxsFromDA_Call) Return(v *ForcedIncludedEvent, err error) *MockDARetriever_RetrieveForcedIncludedTxsFromDA_Call { +func (_c *MockDARetriever_RetrieveForcedIncludedTxsFromDA_Call) Return(v *common.ForcedIncludedEvent, err error) *MockDARetriever_RetrieveForcedIncludedTxsFromDA_Call { _c.Call.Return(v, err) return _c } -func (_c *MockDARetriever_RetrieveForcedIncludedTxsFromDA_Call) RunAndReturn(run func(ctx context.Context, daHeight uint64) (*ForcedIncludedEvent, error)) *MockDARetriever_RetrieveForcedIncludedTxsFromDA_Call { +func (_c *MockDARetriever_RetrieveForcedIncludedTxsFromDA_Call) RunAndReturn(run func(ctx context.Context, daHeight uint64) (*common.ForcedIncludedEvent, error)) *MockDARetriever_RetrieveForcedIncludedTxsFromDA_Call { _c.Call.Return(run) return _c } // RetrieveFromDA provides a mock function for the type MockDARetriever -func (_mock *MockDARetriever) RetrieveFromDA(ctx context.Context, daHeight uint64) ([]DAHeightEvent, error) { +func (_mock *MockDARetriever) RetrieveFromDA(ctx context.Context, daHeight uint64) ([]common.DAHeightEvent, error) { ret := _mock.Called(ctx, daHeight) if len(ret) == 0 { panic("no return value specified for RetrieveFromDA") } - var r0 []DAHeightEvent + var r0 []common.DAHeightEvent var r1 error - if returnFunc, ok := ret.Get(0).(func(context.Context, uint64) ([]DAHeightEvent, error)); ok { + if returnFunc, ok := ret.Get(0).(func(context.Context, uint64) ([]common.DAHeightEvent, error)); ok { return returnFunc(ctx, daHeight) } - if returnFunc, ok := ret.Get(0).(func(context.Context, uint64) []DAHeightEvent); ok { + if returnFunc, ok := ret.Get(0).(func(context.Context, uint64) []common.DAHeightEvent); ok { r0 = returnFunc(ctx, daHeight) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).([]DAHeightEvent) + r0 = ret.Get(0).([]common.DAHeightEvent) } } if returnFunc, ok := ret.Get(1).(func(context.Context, uint64) error); ok { @@ -163,12 +164,12 @@ func (_c *MockDARetriever_RetrieveFromDA_Call) Run(run func(ctx context.Context, return _c } -func (_c *MockDARetriever_RetrieveFromDA_Call) Return(vs []DAHeightEvent, err error) *MockDARetriever_RetrieveFromDA_Call { +func (_c *MockDARetriever_RetrieveFromDA_Call) Return(vs []common.DAHeightEvent, err error) *MockDARetriever_RetrieveFromDA_Call { _c.Call.Return(vs, err) return _c } -func (_c *MockDARetriever_RetrieveFromDA_Call) RunAndReturn(run func(ctx context.Context, daHeight uint64) ([]DAHeightEvent, error)) *MockDARetriever_RetrieveFromDA_Call { +func (_c *MockDARetriever_RetrieveFromDA_Call) RunAndReturn(run func(ctx context.Context, daHeight uint64) ([]common.DAHeightEvent, error)) *MockDARetriever_RetrieveFromDA_Call { _c.Call.Return(run) return _c } diff --git a/block/internal/syncing/da_retriever_test.go b/block/internal/syncing/da_retriever_test.go index af5734a4f6..ae6e73197a 100644 --- a/block/internal/syncing/da_retriever_test.go +++ b/block/internal/syncing/da_retriever_test.go @@ -255,7 +255,7 @@ func TestDARetriever_tryDecodeData_InvalidSignatureOrProposer(t *testing.T) { } func TestDARetriever_validateBlobResponse(t *testing.T) { - r := &DARetriever{logger: zerolog.Nop()} + r := &daRetriever{logger: zerolog.Nop()} // StatusSuccess -> nil err := r.validateBlobResponse(coreda.ResultRetrieve{BaseResult: coreda.BaseResult{Code: coreda.StatusSuccess}}, 1) assert.NoError(t, err) diff --git a/block/internal/syncing/syncer.go b/block/internal/syncing/syncer.go index 9c88d40c7e..66bdcae6ab 100644 --- a/block/internal/syncing/syncer.go +++ b/block/internal/syncing/syncer.go @@ -56,7 +56,7 @@ type Syncer struct { errorCh chan<- error // Channel to report critical execution client failures // Handlers - daRetriever common.DARetriever + daRetriever DARetriever p2pHandler p2pHandler // Logging diff --git a/block/internal/syncing/syncer_backoff_test.go b/block/internal/syncing/syncer_backoff_test.go index 2ed5b17559..3cf918bf9a 100644 --- a/block/internal/syncing/syncer_backoff_test.go +++ b/block/internal/syncing/syncer_backoff_test.go @@ -69,7 +69,7 @@ func TestSyncer_BackoffOnDAError(t *testing.T) { syncer.ctx = ctx // Setup mocks - daRetriever := common.NewMockDARetriever(t) + daRetriever := NewMockDARetriever(t) p2pHandler := newMockp2pHandler(t) syncer.daRetriever = daRetriever syncer.p2pHandler = p2pHandler @@ -167,7 +167,7 @@ func TestSyncer_BackoffResetOnSuccess(t *testing.T) { addr, pub, signer := buildSyncTestSigner(t) gen := syncer.genesis - daRetriever := common.NewMockDARetriever(t) + daRetriever := NewMockDARetriever(t) p2pHandler := newMockp2pHandler(t) syncer.daRetriever = daRetriever syncer.p2pHandler = p2pHandler @@ -260,7 +260,7 @@ func TestSyncer_BackoffBehaviorIntegration(t *testing.T) { syncer := setupTestSyncer(t, 500*time.Millisecond) syncer.ctx = ctx - daRetriever := common.NewMockDARetriever(t) + daRetriever := NewMockDARetriever(t) p2pHandler := newMockp2pHandler(t) syncer.daRetriever = daRetriever syncer.p2pHandler = p2pHandler diff --git a/block/internal/syncing/syncer_benchmark_test.go b/block/internal/syncing/syncer_benchmark_test.go index 77745c3a7d..3c4c511f7b 100644 --- a/block/internal/syncing/syncer_benchmark_test.go +++ b/block/internal/syncing/syncer_benchmark_test.go @@ -131,7 +131,7 @@ func newBenchFixture(b *testing.B, totalHeights uint64, shuffledTx bool, daDelay } // Mock DA retriever to emit exactly totalHeights events, then HFF and cancel - daR := common.NewMockDARetriever(b) + daR := NewMockDARetriever(b) for i := uint64(0); i < totalHeights; i++ { daHeight := i + daHeightOffset daR.On("RetrieveFromDA", mock.Anything, daHeight). diff --git a/block/internal/syncing/syncer_test.go b/block/internal/syncing/syncer_test.go index 50abf4dba2..b80d08925e 100644 --- a/block/internal/syncing/syncer_test.go +++ b/block/internal/syncing/syncer_test.go @@ -381,7 +381,7 @@ func TestSyncLoopPersistState(t *testing.T) { ctx, cancel := context.WithCancel(t.Context()) syncerInst1.ctx = ctx - daRtrMock, p2pHndlMock := common.NewMockDARetriever(t), newMockp2pHandler(t) + daRtrMock, p2pHndlMock := NewMockDARetriever(t), newMockp2pHandler(t) p2pHndlMock.On("ProcessHeaderRange", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return().Maybe() p2pHndlMock.On("ProcessDataRange", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return().Maybe() syncerInst1.daRetriever, syncerInst1.p2pHandler = daRtrMock, p2pHndlMock @@ -474,7 +474,7 @@ func TestSyncLoopPersistState(t *testing.T) { ctx, cancel = context.WithCancel(t.Context()) t.Cleanup(cancel) syncerInst2.ctx = ctx - daRtrMock, p2pHndlMock = common.NewMockDARetriever(t), newMockp2pHandler(t) + daRtrMock, p2pHndlMock = NewMockDARetriever(t), newMockp2pHandler(t) p2pHndlMock.On("ProcessHeaderRange", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return().Maybe() p2pHndlMock.On("ProcessDataRange", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return().Maybe() syncerInst2.daRetriever, syncerInst2.p2pHandler = daRtrMock, p2pHndlMock diff --git a/block/public.go b/block/public.go index 534a77a1e4..f8e09531c9 100644 --- a/block/public.go +++ b/block/public.go @@ -39,7 +39,7 @@ func NewDARetriever( config config.Config, genesis genesis.Genesis, logger zerolog.Logger, -) (common.DARetriever, error) { +) (syncing.DARetriever, error) { cacheManager, err := cache.NewManager(config, nil /* pending data not used */, logger) if err != nil { return nil, fmt.Errorf("failed to create cache manager: %w", err) diff --git a/core/sequencer/dummy.go b/core/sequencer/dummy.go index 5f44dae2a8..5b0e16e51e 100644 --- a/core/sequencer/dummy.go +++ b/core/sequencer/dummy.go @@ -64,3 +64,8 @@ func (s *DummySequencer) VerifyBatch(ctx context.Context, req VerifyBatchRequest Status: true, }, nil } + +// SetDAHeight sets the current DA height for the sequencer +func (s *DummySequencer) SetDAHeight(height uint64) { + // No-op for dummy sequencer +} diff --git a/core/sequencer/sequencing.go b/core/sequencer/sequencing.go index 055f01483e..6d48880dce 100644 --- a/core/sequencer/sequencing.go +++ b/core/sequencer/sequencing.go @@ -27,6 +27,10 @@ type Sequencer interface { // BatchHash is the cryptographic hash of the batch to verify // returns a boolean indicating if the batch is valid and an error if any from the sequencer VerifyBatch(ctx context.Context, req VerifyBatchRequest) (*VerifyBatchResponse, error) + + // SetDAHeight sets the current DA height for the sequencer + // This allows the sequencer to track DA height for forced inclusion retrieval + SetDAHeight(height uint64) } // Batch is a collection of transactions diff --git a/go.mod b/go.mod index 440911fd28..40c94adcdc 100644 --- a/go.mod +++ b/go.mod @@ -4,6 +4,8 @@ go 1.24.6 retract v0.12.0 // Published by accident +replace github.com/evstack/ev-node/core => ./core + require ( connectrpc.com/connect v1.19.1 connectrpc.com/grpcreflect v1.3.0 @@ -82,7 +84,7 @@ require ( github.com/libp2p/go-reuseport v0.4.0 // indirect github.com/libp2p/go-yamux/v5 v5.0.1 // indirect github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd // indirect - github.com/mattn/go-colorable v0.1.13 // indirect + github.com/mattn/go-colorable v0.1.14 // indirect github.com/mattn/go-isatty v0.0.20 // indirect github.com/miekg/dns v1.1.68 // indirect github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b // indirect diff --git a/go.sum b/go.sum index 9f6f5f2b25..2c1c2db026 100644 --- a/go.sum +++ b/go.sum @@ -62,8 +62,6 @@ github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymF github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/evstack/ev-node/core v1.0.0-beta.4 h1:F/rqHCrZ+ViUY4I6RuoBVvkhYfosD68yo/6gCdGRdmo= -github.com/evstack/ev-node/core v1.0.0-beta.4/go.mod h1:n2w/LhYQTPsi48m6lMj16YiIqsaQw6gxwjyJvR+B3sY= github.com/filecoin-project/go-clock v0.1.0 h1:SFbYIM75M8NnFm1yMHhN9Ahy3W5bEZV9gd6MPfXbKVU= github.com/filecoin-project/go-clock v0.1.0/go.mod h1:4uB/O4PvOjlx1VCMdZ9MyDZXRm//gkj1ELEbxfI1AZs= github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= @@ -229,8 +227,9 @@ github.com/lunixbochs/vtclean v1.0.0/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd h1:br0buuQ854V8u83wA0rVZ8ttrq5CpaPZdvrK0LP2lOk= github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd/go.mod h1:QuCEs1Nt24+FYQEqAAncTDPJIuGs+LxK1MCiFL25pMU= -github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= +github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHPsaIE= +github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8= github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= diff --git a/pkg/cmd/run_node.go b/pkg/cmd/run_node.go index e0efe0b652..d22627baa6 100644 --- a/pkg/cmd/run_node.go +++ b/pkg/cmd/run_node.go @@ -93,7 +93,7 @@ func StartNode( // create a new remote signer var signer signer.Signer - if nodeConfig.Signer.SignerType == "file" && nodeConfig.Node.Aggregator { + if nodeConfig.Signer.SignerType == "file" && (nodeConfig.Node.Aggregator && !nodeConfig.Node.BasedSequencer) { // Get passphrase file path passphraseFile, err := cmd.Flags().GetString(rollconf.FlagSignerPassphraseFile) if err != nil { diff --git a/sequencers/based/based.go b/sequencers/based/based.go index 5d02de0a21..a5cfee0af6 100644 --- a/sequencers/based/based.go +++ b/sequencers/based/based.go @@ -127,7 +127,9 @@ func (s *BasedSequencer) GetNextBatch(ctx context.Context, req coresequencer.Get } if errors.Is(err, coreda.ErrHeightFromFuture) { - s.logger.Debug().Uint64("da_height", s.daHeight).Msg("DA height from future, incrementing") + s.logger.Debug(). + Uint64("da_height", s.daHeight). + Msg("DA height from future, waiting for DA to produce block") return &coresequencer.GetNextBatchResponse{ Batch: &coresequencer.Batch{Transactions: nil}, Timestamp: time.Now(), @@ -186,7 +188,7 @@ func (s *BasedSequencer) createBatchFromQueue(maxBytes uint64) *coresequencer.Ba // If this is the last transaction, clear the queue if i == len(s.txQueue)-1 { - s.ClearQueue() + s.txQueue = s.txQueue[:0] } } @@ -216,11 +218,3 @@ func (s *BasedSequencer) GetDAHeight() uint64 { defer s.mu.RUnlock() return s.daHeight } - -// ClearQueue clears the transaction queue -func (s *BasedSequencer) ClearQueue() { - s.mu.Lock() - defer s.mu.Unlock() - s.txQueue = s.txQueue[:0] - s.logger.Debug().Msg("transaction queue cleared") -} diff --git a/sequencers/based/based_test.go b/sequencers/based/based_test.go index 297c5fe32d..8e71aebc86 100644 --- a/sequencers/based/based_test.go +++ b/sequencers/based/based_test.go @@ -269,8 +269,8 @@ func TestBasedSequencer_GetNextBatch_HeightFromFuture(t *testing.T) { require.NotNil(t, resp.Batch) assert.Nil(t, resp.Batch.Transactions) - // DA height should be incremented - assert.Equal(t, uint64(101), seq.GetDAHeight()) + // DA height should NOT increment on ErrHeightFromFuture - we wait for DA to catch up + assert.Equal(t, uint64(100), seq.GetDAHeight()) mockRetriever.AssertExpectations(t) } @@ -399,22 +399,6 @@ func TestBasedSequencer_SetDAHeight(t *testing.T) { assert.Equal(t, uint64(200), seq.GetDAHeight()) } -func TestBasedSequencer_ClearQueue(t *testing.T) { - mockRetriever := new(MockDARetriever) - mockDA := new(MockDA) - cfg := config.DefaultConfig() - gen := genesis.Genesis{ChainID: "test-chain"} - - seq := NewBasedSequencer(mockRetriever, mockDA, cfg, gen, zerolog.Nop()) - - // Pre-populate the queue - seq.txQueue = [][]byte{[]byte("tx1"), []byte("tx2"), []byte("tx3")} - assert.Equal(t, 3, len(seq.txQueue)) - - seq.ClearQueue() - assert.Equal(t, 0, len(seq.txQueue)) -} - func TestBasedSequencer_ConcurrentAccess(t *testing.T) { mockRetriever := new(MockDARetriever) mockDA := new(MockDA) diff --git a/sequencers/single/go.mod b/sequencers/single/go.mod index 8735545298..d7bd82f09b 100644 --- a/sequencers/single/go.mod +++ b/sequencers/single/go.mod @@ -68,7 +68,7 @@ require ( github.com/libp2p/go-reuseport v0.4.0 // indirect github.com/libp2p/go-yamux/v5 v5.0.1 // indirect github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd // indirect - github.com/mattn/go-colorable v0.1.13 // indirect + github.com/mattn/go-colorable v0.1.14 // indirect github.com/mattn/go-isatty v0.0.20 // indirect github.com/miekg/dns v1.1.68 // indirect github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b // indirect diff --git a/sequencers/single/go.sum b/sequencers/single/go.sum index 5262ff27e1..e53b045413 100644 --- a/sequencers/single/go.sum +++ b/sequencers/single/go.sum @@ -221,8 +221,9 @@ github.com/lunixbochs/vtclean v1.0.0/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd h1:br0buuQ854V8u83wA0rVZ8ttrq5CpaPZdvrK0LP2lOk= github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd/go.mod h1:QuCEs1Nt24+FYQEqAAncTDPJIuGs+LxK1MCiFL25pMU= -github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= +github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHPsaIE= +github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8= github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= diff --git a/sequencers/single/sequencer.go b/sequencers/single/sequencer.go index dbc5bc567c..f1b5dd8eb6 100644 --- a/sequencers/single/sequencer.go +++ b/sequencers/single/sequencer.go @@ -5,6 +5,8 @@ import ( "context" "errors" "fmt" + "strings" + "sync" "time" ds "github.com/ipfs/go-datastore" @@ -12,13 +14,45 @@ import ( coreda "github.com/evstack/ev-node/core/da" coresequencer "github.com/evstack/ev-node/core/sequencer" + "github.com/evstack/ev-node/pkg/genesis" ) -// ErrInvalidId is returned when the chain id is invalid var ( + // ErrInvalidId is returned when the chain id is invalid ErrInvalidId = errors.New("invalid chain id") + // ErrForceInclusionNotConfigured is returned when forced inclusion is not configured + ErrForceInclusionNotConfigured = errors.New("forced inclusion namespace not configured") ) +// ForcedInclusionEvent represents forced inclusion transactions retrieved from DA +type ForcedInclusionEvent = struct { + Txs [][]byte + StartDaHeight uint64 + EndDaHeight uint64 +} + +// DARetriever defines the interface for retrieving forced inclusion transactions from DA +type DARetriever interface { + RetrieveForcedIncludedTxsFromDA(ctx context.Context, daHeight uint64) (*ForcedInclusionEvent, error) +} + +// DARetrieverAdapter adapts any retriever that returns a compatible event type +type DARetrieverAdapter struct { + retrieveFunc func(ctx context.Context, daHeight uint64) (*ForcedInclusionEvent, error) +} + +// NewDARetrieverAdapter creates a new adapter with a custom retrieval function +func NewDARetrieverAdapter(retrieveFunc func(ctx context.Context, daHeight uint64) (*ForcedInclusionEvent, error)) *DARetrieverAdapter { + return &DARetrieverAdapter{ + retrieveFunc: retrieveFunc, + } +} + +// RetrieveForcedIncludedTxsFromDA implements the DARetriever interface +func (a *DARetrieverAdapter) RetrieveForcedIncludedTxsFromDA(ctx context.Context, daHeight uint64) (*ForcedInclusionEvent, error) { + return a.retrieveFunc(ctx, daHeight) +} + var _ coresequencer.Sequencer = &Sequencer{} // Sequencer implements core sequencing interface @@ -35,6 +69,12 @@ type Sequencer struct { queue *BatchQueue // single queue for immediate availability metrics *Metrics + + // Forced inclusion support + daRetriever DARetriever + genesis genesis.Genesis + mu sync.RWMutex + daHeight uint64 } // NewSequencer creates a new Single Sequencer @@ -47,30 +87,21 @@ func NewSequencer( batchTime time.Duration, metrics *Metrics, proposer bool, -) (*Sequencer, error) { - return NewSequencerWithQueueSize(ctx, logger, db, da, id, batchTime, metrics, proposer, 1000) -} - -// NewSequencerWithQueueSize creates a new Single Sequencer with configurable queue size -func NewSequencerWithQueueSize( - ctx context.Context, - logger zerolog.Logger, - db ds.Batching, - da coreda.DA, - id []byte, - batchTime time.Duration, - metrics *Metrics, - proposer bool, maxQueueSize int, + daRetriever DARetriever, + gen genesis.Genesis, ) (*Sequencer, error) { s := &Sequencer{ - logger: logger, - da: da, - batchTime: batchTime, - Id: id, - queue: NewBatchQueue(db, "batches", maxQueueSize), - metrics: metrics, - proposer: proposer, + logger: logger, + da: da, + batchTime: batchTime, + Id: id, + queue: NewBatchQueue(db, "batches", maxQueueSize), + metrics: metrics, + proposer: proposer, + daRetriever: daRetriever, + genesis: gen, + daHeight: gen.DAStartHeight, } loadCtx, cancel := context.WithTimeout(context.Background(), 10*time.Second) @@ -117,14 +148,77 @@ func (c *Sequencer) GetNextBatch(ctx context.Context, req coresequencer.GetNextB return nil, ErrInvalidId } + // Retrieve forced inclusion transactions if DARetriever is configured + var forcedTxs [][]byte + if c.daRetriever != nil { + c.mu.Lock() + currentDAHeight := c.daHeight + c.mu.Unlock() + + forcedEvent, err := c.daRetriever.RetrieveForcedIncludedTxsFromDA(ctx, currentDAHeight) + if err != nil { + // If forced inclusion is not configured, continue without forced txs + if !strings.Contains(err.Error(), ErrForceInclusionNotConfigured.Error()) { + // If we get a height from future error, keep the current DA height and return batch + // We'll retry the same height on the next call until DA produces that block + if errors.Is(err, coreda.ErrHeightFromFuture) { + c.logger.Debug(). + Uint64("da_height", currentDAHeight). + Msg("DA height from future, waiting for DA to produce block") + + batch, err := c.queue.Next(ctx) + if err != nil { + return nil, err + } + + return &coresequencer.GetNextBatchResponse{ + Batch: batch, + Timestamp: time.Now(), + BatchData: req.LastBatchData, + }, nil + } + + c.logger.Error().Err(err).Uint64("da_height", currentDAHeight).Msg("failed to retrieve forced inclusion transactions") + // Continue without forced txs on other errors + } + } else { + forcedTxs = forcedEvent.Txs + + // Update DA height based on the retrieved event + c.mu.Lock() + if forcedEvent.EndDaHeight > c.daHeight { + c.daHeight = forcedEvent.EndDaHeight + } else if forcedEvent.StartDaHeight > c.daHeight { + c.daHeight = forcedEvent.StartDaHeight + } + c.mu.Unlock() + + c.logger.Info(). + Int("tx_count", len(forcedEvent.Txs)). + Uint64("da_height_start", forcedEvent.StartDaHeight). + Uint64("da_height_end", forcedEvent.EndDaHeight). + Msg("retrieved forced inclusion transactions from DA") + } + } + batch, err := c.queue.Next(ctx) if err != nil { return nil, err } + // Prepend forced inclusion transactions to the batch + if len(forcedTxs) > 0 { + batch.Transactions = append(forcedTxs, batch.Transactions...) + c.logger.Debug(). + Int("forced_tx_count", len(forcedTxs)). + Int("total_tx_count", len(batch.Transactions)). + Msg("prepended forced inclusion transactions to batch") + } + return &coresequencer.GetNextBatchResponse{ Batch: batch, Timestamp: time.Now(), + BatchData: req.LastBatchData, }, nil } @@ -171,3 +265,19 @@ func (c *Sequencer) VerifyBatch(ctx context.Context, req coresequencer.VerifyBat func (c *Sequencer) isValid(Id []byte) bool { return bytes.Equal(c.Id, Id) } + +// SetDAHeight sets the current DA height for the sequencer +// This should be called when the sequencer needs to sync to a specific DA height +func (c *Sequencer) SetDAHeight(height uint64) { + c.mu.Lock() + defer c.mu.Unlock() + c.daHeight = height + c.logger.Debug().Uint64("da_height", height).Msg("DA height updated") +} + +// GetDAHeight returns the current DA height +func (c *Sequencer) GetDAHeight() uint64 { + c.mu.RLock() + defer c.mu.RUnlock() + return c.daHeight +} diff --git a/sequencers/single/sequencer_test.go b/sequencers/single/sequencer_test.go index 585339bcd2..1794d0d325 100644 --- a/sequencers/single/sequencer_test.go +++ b/sequencers/single/sequencer_test.go @@ -15,6 +15,7 @@ import ( coreda "github.com/evstack/ev-node/core/da" coresequencer "github.com/evstack/ev-node/core/sequencer" + "github.com/evstack/ev-node/pkg/genesis" damocks "github.com/evstack/ev-node/test/mocks" ) @@ -26,7 +27,7 @@ func TestNewSequencer(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second) defer cancel() logger := zerolog.Nop() - seq, err := NewSequencer(ctx, logger, db, dummyDA, []byte("test1"), 10*time.Second, metrics, false) + seq, err := NewSequencer(ctx, logger, db, dummyDA, []byte("test1"), 10*time.Second, metrics, false, 1000, nil, genesis.Genesis{}) if err != nil { t.Fatalf("Failed to create sequencer: %v", err) } @@ -59,7 +60,7 @@ func TestSequencer_SubmitBatchTxs(t *testing.T) { defer cancel() Id := []byte("test1") logger := zerolog.Nop() - seq, err := NewSequencer(ctx, logger, db, dummyDA, Id, 10*time.Second, metrics, false) + seq, err := NewSequencer(ctx, logger, db, dummyDA, Id, 10*time.Second, metrics, false, 1000, nil, genesis.Genesis{}) if err != nil { t.Fatalf("Failed to create sequencer: %v", err) } @@ -112,7 +113,7 @@ func TestSequencer_SubmitBatchTxs_EmptyBatch(t *testing.T) { defer cancel() Id := []byte("test1") logger := zerolog.Nop() - seq, err := NewSequencer(ctx, logger, db, dummyDA, Id, 10*time.Second, metrics, false) + seq, err := NewSequencer(ctx, logger, db, dummyDA, Id, 10*time.Second, metrics, false, 1000, nil, genesis.Genesis{}) require.NoError(t, err, "Failed to create sequencer") defer func() { err := db.Close() @@ -385,7 +386,7 @@ func TestSequencer_GetNextBatch_BeforeDASubmission(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) defer cancel() logger := zerolog.Nop() - seq, err := NewSequencer(ctx, logger, db, mockDA, []byte("test1"), 1*time.Second, metrics, false) + seq, err := NewSequencer(ctx, logger, db, mockDA, []byte("test1"), 1*time.Second, metrics, false, 1000, nil, genesis.Genesis{}) if err != nil { t.Fatalf("Failed to create sequencer: %v", err) } @@ -643,7 +644,7 @@ func TestSequencer_DAFailureAndQueueThrottling_Integration(t *testing.T) { // Create sequencer with small queue size to trigger throttling quickly queueSize := 3 // Small for testing logger := zerolog.Nop() - seq, err := NewSequencerWithQueueSize( + seq, err := NewSequencer( context.Background(), logger, db, @@ -653,6 +654,8 @@ func TestSequencer_DAFailureAndQueueThrottling_Integration(t *testing.T) { nil, // metrics true, // proposer queueSize, + nil, // daRetriever + genesis.Genesis{}, // genesis ) require.NoError(t, err) diff --git a/test/mocks/sequencer.go b/test/mocks/sequencer.go index c3894f846b..072d9a731f 100644 --- a/test/mocks/sequencer.go +++ b/test/mocks/sequencer.go @@ -106,6 +106,46 @@ func (_c *MockSequencer_GetNextBatch_Call) RunAndReturn(run func(ctx context.Con return _c } +// SetDAHeight provides a mock function for the type MockSequencer +func (_mock *MockSequencer) SetDAHeight(height uint64) { + _mock.Called(height) + return +} + +// MockSequencer_SetDAHeight_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SetDAHeight' +type MockSequencer_SetDAHeight_Call struct { + *mock.Call +} + +// SetDAHeight is a helper method to define mock.On call +// - height uint64 +func (_e *MockSequencer_Expecter) SetDAHeight(height interface{}) *MockSequencer_SetDAHeight_Call { + return &MockSequencer_SetDAHeight_Call{Call: _e.mock.On("SetDAHeight", height)} +} + +func (_c *MockSequencer_SetDAHeight_Call) Run(run func(height uint64)) *MockSequencer_SetDAHeight_Call { + _c.Call.Run(func(args mock.Arguments) { + var arg0 uint64 + if args[0] != nil { + arg0 = args[0].(uint64) + } + run( + arg0, + ) + }) + return _c +} + +func (_c *MockSequencer_SetDAHeight_Call) Return() *MockSequencer_SetDAHeight_Call { + _c.Call.Return() + return _c +} + +func (_c *MockSequencer_SetDAHeight_Call) RunAndReturn(run func(height uint64)) *MockSequencer_SetDAHeight_Call { + _c.Run(run) + return _c +} + // SubmitBatchTxs provides a mock function for the type MockSequencer func (_mock *MockSequencer) SubmitBatchTxs(ctx context.Context, req sequencer.SubmitBatchTxsRequest) (*sequencer.SubmitBatchTxsResponse, error) { ret := _mock.Called(ctx, req) From 4f808c8878460e1ff847816405344bff6e08db77 Mon Sep 17 00:00:00 2001 From: Julien Robert Date: Wed, 5 Nov 2025 16:56:48 +0100 Subject: [PATCH 09/39] simplify --- apps/evm/single/cmd/run.go | 35 ++++++---------------------------- apps/grpc/single/cmd/run.go | 34 ++++++--------------------------- apps/testapp/cmd/run.go | 38 ++++++++----------------------------- 3 files changed, 20 insertions(+), 87 deletions(-) diff --git a/apps/evm/single/cmd/run.go b/apps/evm/single/cmd/run.go index df4c880b0d..6efcf4309f 100644 --- a/apps/evm/single/cmd/run.go +++ b/apps/evm/single/cmd/run.go @@ -113,18 +113,18 @@ func createSequencer( nodeConfig config.Config, genesis genesis.Genesis, ) (coresequencer.Sequencer, error) { + daRetriever, err := block.NewDARetriever(da, nodeConfig, genesis, logger) + if err != nil { + return nil, fmt.Errorf("failed to create DA retriever: %w", err) + } + adapter := based.NewDARetrieverAdapter(daRetriever.RetrieveForcedIncludedTxsFromDA) + if nodeConfig.Node.BasedSequencer { // Based sequencer mode - fetch transactions only from DA if !nodeConfig.Node.Aggregator { return nil, fmt.Errorf("based sequencer mode requires aggregator mode to be enabled") } - daRetriever, err := block.NewDARetriever(da, nodeConfig, genesis, logger) - if err != nil { - return nil, fmt.Errorf("failed to create DA retriever: %w", err) - } - - adapter := based.NewDARetrieverAdapter(daRetriever.RetrieveForcedIncludedTxsFromDA) basedSeq := based.NewBasedSequencer(adapter, da, nodeConfig, genesis, logger) logger.Info(). @@ -140,29 +140,6 @@ func createSequencer( return nil, fmt.Errorf("failed to create single sequencer metrics: %w", err) } - // Create DA retriever for forced inclusion support - var daRetriever single.DARetriever - if nodeConfig.Node.Aggregator { - commonDARetriever, err := block.NewDARetriever(da, nodeConfig, genesis, logger) - if err != nil { - return nil, fmt.Errorf("failed to create DA retriever: %w", err) - } - - // Adapter function to convert between common and single event types - adapterFunc := func(ctx context.Context, daHeight uint64) (*single.ForcedInclusionEvent, error) { - event, err := commonDARetriever.RetrieveForcedIncludedTxsFromDA(ctx, daHeight) - if err != nil { - return nil, err - } - return &single.ForcedInclusionEvent{ - Txs: event.Txs, - StartDaHeight: event.StartDaHeight, - EndDaHeight: event.EndDaHeight, - }, nil - } - daRetriever = single.NewDARetrieverAdapter(adapterFunc) - } - sequencer, err := single.NewSequencer( ctx, logger, diff --git a/apps/grpc/single/cmd/run.go b/apps/grpc/single/cmd/run.go index 41e4fbf927..bd67639b9a 100644 --- a/apps/grpc/single/cmd/run.go +++ b/apps/grpc/single/cmd/run.go @@ -122,18 +122,18 @@ func createSequencer( nodeConfig config.Config, genesis genesis.Genesis, ) (coresequencer.Sequencer, error) { + daRetriever, err := block.NewDARetriever(da, nodeConfig, genesis, logger) + if err != nil { + return nil, fmt.Errorf("failed to create DA retriever: %w", err) + } + adapter := based.NewDARetrieverAdapter(daRetriever.RetrieveForcedIncludedTxsFromDA) + if nodeConfig.Node.BasedSequencer { // Based sequencer mode - fetch transactions only from DA if !nodeConfig.Node.Aggregator { return nil, fmt.Errorf("based sequencer mode requires aggregator mode to be enabled") } - daRetriever, err := block.NewDARetriever(da, nodeConfig, genesis, logger) - if err != nil { - return nil, fmt.Errorf("failed to create DA retriever: %w", err) - } - - adapter := based.NewDARetrieverAdapter(daRetriever.RetrieveForcedIncludedTxsFromDA) basedSeq := based.NewBasedSequencer(adapter, da, nodeConfig, genesis, logger) logger.Info(). @@ -149,28 +149,6 @@ func createSequencer( return nil, fmt.Errorf("failed to create single sequencer metrics: %w", err) } - // Create DA retriever for forced inclusion support - var daRetriever single.DARetriever - if nodeConfig.Node.Aggregator { - commonDARetriever, err := block.NewDARetriever(da, nodeConfig, genesis, logger) - if err != nil { - return nil, fmt.Errorf("failed to create DA retriever: %w", err) - } - // Adapter function to convert between common and single event types - adapterFunc := func(ctx context.Context, daHeight uint64) (*single.ForcedInclusionEvent, error) { - event, err := commonDARetriever.RetrieveForcedIncludedTxsFromDA(ctx, daHeight) - if err != nil { - return nil, err - } - return &single.ForcedInclusionEvent{ - Txs: event.Txs, - StartDaHeight: event.StartDaHeight, - EndDaHeight: event.EndDaHeight, - }, nil - } - daRetriever = single.NewDARetrieverAdapter(adapterFunc) - } - sequencer, err := single.NewSequencer( ctx, logger, diff --git a/apps/testapp/cmd/run.go b/apps/testapp/cmd/run.go index c6bcb66937..8bb1403173 100644 --- a/apps/testapp/cmd/run.go +++ b/apps/testapp/cmd/run.go @@ -17,6 +17,7 @@ import ( "github.com/evstack/ev-node/node" "github.com/evstack/ev-node/pkg/cmd" "github.com/evstack/ev-node/pkg/config" + "github.com/evstack/ev-node/pkg/genesis" genesispkg "github.com/evstack/ev-node/pkg/genesis" "github.com/evstack/ev-node/pkg/p2p" "github.com/evstack/ev-node/pkg/p2p/key" @@ -117,20 +118,20 @@ func createSequencer( datastore datastore.Batching, da da.DA, nodeConfig config.Config, - genesis genesispkg.Genesis, + genesis genesis.Genesis, ) (coresequencer.Sequencer, error) { + daRetriever, err := block.NewDARetriever(da, nodeConfig, genesis, logger) + if err != nil { + return nil, fmt.Errorf("failed to create DA retriever: %w", err) + } + adapter := based.NewDARetrieverAdapter(daRetriever.RetrieveForcedIncludedTxsFromDA) + if nodeConfig.Node.BasedSequencer { // Based sequencer mode - fetch transactions only from DA if !nodeConfig.Node.Aggregator { return nil, fmt.Errorf("based sequencer mode requires aggregator mode to be enabled") } - daRetriever, err := block.NewDARetriever(da, nodeConfig, genesis, logger) - if err != nil { - return nil, fmt.Errorf("failed to create DA retriever: %w", err) - } - - adapter := based.NewDARetrieverAdapter(daRetriever.RetrieveForcedIncludedTxsFromDA) basedSeq := based.NewBasedSequencer(adapter, da, nodeConfig, genesis, logger) logger.Info(). @@ -146,29 +147,6 @@ func createSequencer( return nil, fmt.Errorf("failed to create single sequencer metrics: %w", err) } - // Create DA retriever for forced inclusion support - var daRetriever single.DARetriever - if nodeConfig.Node.Aggregator { - commonDARetriever, err := block.NewDARetriever(da, nodeConfig, genesis, logger) - if err != nil { - return nil, fmt.Errorf("failed to create DA retriever: %w", err) - } - - // Adapter function to convert between common and single event types - adapterFunc := func(ctx context.Context, daHeight uint64) (*single.ForcedInclusionEvent, error) { - event, err := commonDARetriever.RetrieveForcedIncludedTxsFromDA(ctx, daHeight) - if err != nil { - return nil, err - } - return &single.ForcedInclusionEvent{ - Txs: event.Txs, - StartDaHeight: event.StartDaHeight, - EndDaHeight: event.EndDaHeight, - }, nil - } - daRetriever = single.NewDARetrieverAdapter(adapterFunc) - } - sequencer, err := single.NewSequencer( ctx, logger, From 0c06e1c32fdb6b22a2d187c16e52f9b3772874af Mon Sep 17 00:00:00 2001 From: Julien Robert Date: Wed, 5 Nov 2025 17:05:50 +0100 Subject: [PATCH 10/39] fix --- block/internal/executing/executor.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/block/internal/executing/executor.go b/block/internal/executing/executor.go index ddfaa82e63..9657e33202 100644 --- a/block/internal/executing/executor.go +++ b/block/internal/executing/executor.go @@ -204,6 +204,7 @@ func (e *Executor) initializeState() error { } e.setLastState(state) + e.sequencer.SetDAHeight(state.DAHeight) // Initialize store height using batch for atomicity batch, err := e.store.NewBatch(e.ctx) @@ -439,10 +440,9 @@ func (e *Executor) produceBlock() error { // retrieveBatch gets the next batch of transactions from the sequencer func (e *Executor) retrieveBatch(ctx context.Context) (*BatchData, error) { - currentState := e.getLastState() // Update sequencer's DA height for forced inclusion tracking - e.sequencer.SetDAHeight(currentState.DAHeight) + // e.sequencer.SetDAHeight(currentState.DAHeight) req := coresequencer.GetNextBatchRequest{ Id: []byte(e.genesis.ChainID), From 0a7c258b8fd7ce4b7001df978b3541b379a3c180 Mon Sep 17 00:00:00 2001 From: Julien Robert Date: Thu, 6 Nov 2025 14:06:41 +0100 Subject: [PATCH 11/39] refactor: remove adaptor + add comments (feedback 1/n) --- apps/evm/single/cmd/run.go | 3 +-- apps/grpc/single/cmd/run.go | 3 +-- apps/testapp/cmd/run.go | 3 +-- sequencers/based/based.go | 19 ++----------------- sequencers/based/based_test.go | 20 -------------------- sequencers/single/sequencer.go | 17 ----------------- 6 files changed, 5 insertions(+), 60 deletions(-) diff --git a/apps/evm/single/cmd/run.go b/apps/evm/single/cmd/run.go index 6efcf4309f..df35bbbcd0 100644 --- a/apps/evm/single/cmd/run.go +++ b/apps/evm/single/cmd/run.go @@ -117,7 +117,6 @@ func createSequencer( if err != nil { return nil, fmt.Errorf("failed to create DA retriever: %w", err) } - adapter := based.NewDARetrieverAdapter(daRetriever.RetrieveForcedIncludedTxsFromDA) if nodeConfig.Node.BasedSequencer { // Based sequencer mode - fetch transactions only from DA @@ -125,7 +124,7 @@ func createSequencer( return nil, fmt.Errorf("based sequencer mode requires aggregator mode to be enabled") } - basedSeq := based.NewBasedSequencer(adapter, da, nodeConfig, genesis, logger) + basedSeq := based.NewBasedSequencer(daRetriever, da, nodeConfig, genesis, logger) logger.Info(). Str("forced_inclusion_namespace", nodeConfig.DA.GetForcedInclusionNamespace()). diff --git a/apps/grpc/single/cmd/run.go b/apps/grpc/single/cmd/run.go index bd67639b9a..2ac93dec0e 100644 --- a/apps/grpc/single/cmd/run.go +++ b/apps/grpc/single/cmd/run.go @@ -126,7 +126,6 @@ func createSequencer( if err != nil { return nil, fmt.Errorf("failed to create DA retriever: %w", err) } - adapter := based.NewDARetrieverAdapter(daRetriever.RetrieveForcedIncludedTxsFromDA) if nodeConfig.Node.BasedSequencer { // Based sequencer mode - fetch transactions only from DA @@ -134,7 +133,7 @@ func createSequencer( return nil, fmt.Errorf("based sequencer mode requires aggregator mode to be enabled") } - basedSeq := based.NewBasedSequencer(adapter, da, nodeConfig, genesis, logger) + basedSeq := based.NewBasedSequencer(daRetriever, da, nodeConfig, genesis, logger) logger.Info(). Str("forced_inclusion_namespace", nodeConfig.DA.GetForcedInclusionNamespace()). diff --git a/apps/testapp/cmd/run.go b/apps/testapp/cmd/run.go index 8bb1403173..14b9cd3fa5 100644 --- a/apps/testapp/cmd/run.go +++ b/apps/testapp/cmd/run.go @@ -124,7 +124,6 @@ func createSequencer( if err != nil { return nil, fmt.Errorf("failed to create DA retriever: %w", err) } - adapter := based.NewDARetrieverAdapter(daRetriever.RetrieveForcedIncludedTxsFromDA) if nodeConfig.Node.BasedSequencer { // Based sequencer mode - fetch transactions only from DA @@ -132,7 +131,7 @@ func createSequencer( return nil, fmt.Errorf("based sequencer mode requires aggregator mode to be enabled") } - basedSeq := based.NewBasedSequencer(adapter, da, nodeConfig, genesis, logger) + basedSeq := based.NewBasedSequencer(daRetriever, da, nodeConfig, genesis, logger) logger.Info(). Str("forced_inclusion_namespace", nodeConfig.DA.GetForcedInclusionNamespace()). diff --git a/sequencers/based/based.go b/sequencers/based/based.go index a5cfee0af6..c45cf40afa 100644 --- a/sequencers/based/based.go +++ b/sequencers/based/based.go @@ -31,23 +31,6 @@ type DARetriever interface { RetrieveForcedIncludedTxsFromDA(ctx context.Context, daHeight uint64) (*ForcedInclusionEvent, error) } -// DARetrieverAdapter adapts any retriever that returns a compatible event type -type DARetrieverAdapter struct { - retrieveFunc func(ctx context.Context, daHeight uint64) (*ForcedInclusionEvent, error) -} - -// NewDARetrieverAdapter creates a new adapter with a custom retrieval function -func NewDARetrieverAdapter(retrieveFunc func(ctx context.Context, daHeight uint64) (*ForcedInclusionEvent, error)) *DARetrieverAdapter { - return &DARetrieverAdapter{ - retrieveFunc: retrieveFunc, - } -} - -// RetrieveForcedIncludedTxsFromDA implements the DARetriever interface -func (a *DARetrieverAdapter) RetrieveForcedIncludedTxsFromDA(ctx context.Context, daHeight uint64) (*ForcedInclusionEvent, error) { - return a.retrieveFunc(ctx, daHeight) -} - // BasedSequencer is a sequencer that only retrieves transactions from the DA layer // via the forced inclusion mechanism. It does not accept transactions from the reaper. type BasedSequencer struct { @@ -126,6 +109,8 @@ func (s *BasedSequencer) GetNextBatch(ctx context.Context, req coresequencer.Get }, nil } + // If we get a height from future error, keep the current DA height and return batch + // We'll retry the same height on the next call until DA produces that block if errors.Is(err, coreda.ErrHeightFromFuture) { s.logger.Debug(). Uint64("da_height", s.daHeight). diff --git a/sequencers/based/based_test.go b/sequencers/based/based_test.go index 8e71aebc86..dc4a1b7287 100644 --- a/sequencers/based/based_test.go +++ b/sequencers/based/based_test.go @@ -452,26 +452,6 @@ func TestBasedSequencer_ConcurrentAccess(t *testing.T) { } } -func TestDARetrieverAdapter(t *testing.T) { - called := false - expectedEvent := &ForcedInclusionEvent{ - Txs: [][]byte{[]byte("tx1")}, - StartDaHeight: 100, - EndDaHeight: 105, - } - - adapter := NewDARetrieverAdapter(func(ctx context.Context, daHeight uint64) (*ForcedInclusionEvent, error) { - called = true - assert.Equal(t, uint64(100), daHeight) - return expectedEvent, nil - }) - - event, err := adapter.RetrieveForcedIncludedTxsFromDA(context.Background(), 100) - require.NoError(t, err) - assert.True(t, called) - assert.Equal(t, expectedEvent, event) -} - func TestBasedSequencer_GetNextBatch_ErrorHandling(t *testing.T) { mockRetriever := new(MockDARetriever) mockDA := new(MockDA) diff --git a/sequencers/single/sequencer.go b/sequencers/single/sequencer.go index f1b5dd8eb6..df9842f0d5 100644 --- a/sequencers/single/sequencer.go +++ b/sequencers/single/sequencer.go @@ -36,23 +36,6 @@ type DARetriever interface { RetrieveForcedIncludedTxsFromDA(ctx context.Context, daHeight uint64) (*ForcedInclusionEvent, error) } -// DARetrieverAdapter adapts any retriever that returns a compatible event type -type DARetrieverAdapter struct { - retrieveFunc func(ctx context.Context, daHeight uint64) (*ForcedInclusionEvent, error) -} - -// NewDARetrieverAdapter creates a new adapter with a custom retrieval function -func NewDARetrieverAdapter(retrieveFunc func(ctx context.Context, daHeight uint64) (*ForcedInclusionEvent, error)) *DARetrieverAdapter { - return &DARetrieverAdapter{ - retrieveFunc: retrieveFunc, - } -} - -// RetrieveForcedIncludedTxsFromDA implements the DARetriever interface -func (a *DARetrieverAdapter) RetrieveForcedIncludedTxsFromDA(ctx context.Context, daHeight uint64) (*ForcedInclusionEvent, error) { - return a.retrieveFunc(ctx, daHeight) -} - var _ coresequencer.Sequencer = &Sequencer{} // Sequencer implements core sequencing interface From 2d6d933344c6d920bf9d6d53c48edfbd1bd2af4f Mon Sep 17 00:00:00 2001 From: Julien Robert Date: Thu, 6 Nov 2025 14:15:25 +0100 Subject: [PATCH 12/39] fix: add description error and halt node (feedback 2/n) --- block/internal/syncing/syncer.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/block/internal/syncing/syncer.go b/block/internal/syncing/syncer.go index 66bdcae6ab..c7e1ce4d2d 100644 --- a/block/internal/syncing/syncer.go +++ b/block/internal/syncing/syncer.go @@ -398,6 +398,8 @@ func (s *Syncer) processHeightEvent(event *common.DAHeightEvent) { switch { case errors.Is(err, errInvalidBlock): // do not reschedule + case errors.Is(err, errMaliciousProposer): + s.sendCriticalError(fmt.Errorf("Sequencer malicious. Restart the node with --node.aggregator --node.based_sequencer or keep the chain halted: %w", err)) case errors.Is(err, errInvalidState): s.sendCriticalError(fmt.Errorf("invalid state detected (block-height %d, state-height %d) "+ "- block references do not match local state. Manual intervention required: %w", event.Header.Height(), @@ -467,7 +469,6 @@ func (s *Syncer) trySyncNextBlock(event *common.DAHeightEvent) error { if err := s.verifyForcedInclusionTxs(currentState, data); err != nil { s.logger.Error().Err(err).Uint64("height", nextHeight).Msg("forced inclusion verification failed") if errors.Is(err, errMaliciousProposer) { - s.logger.Error().Msg("Restart with based sequencer.") s.cache.RemoveHeaderDAIncluded(headerHash) return err } From 1bef7d91dc61c9b56795dc704d08a553f99f3d8d Mon Sep 17 00:00:00 2001 From: Julien Robert Date: Thu, 6 Nov 2025 17:31:57 +0100 Subject: [PATCH 13/39] refactor: add limits check and improvements (feedback 3/n) --- block/internal/syncing/da_retriever.go | 48 +++++++++++++++++++++++--- block/internal/syncing/syncer.go | 6 ++-- sequencers/based/based.go | 22 ++++++------ 3 files changed, 57 insertions(+), 19 deletions(-) diff --git a/block/internal/syncing/da_retriever.go b/block/internal/syncing/da_retriever.go index 56c289101f..7497968583 100644 --- a/block/internal/syncing/da_retriever.go +++ b/block/internal/syncing/da_retriever.go @@ -97,8 +97,13 @@ func (r *daRetriever) RetrieveFromDA(ctx context.Context, daHeight uint64) ([]co return r.processBlobs(ctx, blobsResp.Data, daHeight), nil } -// ErrForceInclusionNotConfigured is returned when the forced inclusion namespace is not configured. -var ErrForceInclusionNotConfigured = errors.New("forced inclusion namespace not configured") +var ( + // ErrForceInclusionNotConfigured is returned when the forced inclusion namespace is not configured. + ErrForceInclusionNotConfigured = errors.New("forced inclusion namespace not configured") + + // ErrForcedInclusionDataTooLarge is returned when forced inclusion data exceeds the maximum blob size. + ErrForcedInclusionDataTooLarge = errors.New("forced inclusion data exceeds maximum blob size limit") +) // RetrieveForcedIncludedTxsFromDA retrieves forced inclusion transactions from the DA layer. // It fetches from the daHeight for the da epoch range defined in the config. @@ -113,6 +118,9 @@ func (r *daRetriever) RetrieveForcedIncludedTxsFromDA(ctx context.Context, daHei r.logger.Debug().Uint64("da_height", daHeight).Uint64("range", r.daEpochSize).Msg("retrieving forced included transactions from DA") + var currentSize int + lastProcessedHeight := daHeight + for epochHeight := daHeight + 1; epochHeight <= daHeight+r.daEpochSize; epochHeight++ { result := types.RetrieveWithHelpers(ctx, r.da, r.logger, epochHeight, r.namespaceForcedInclusionBz, defaultDATimeout) @@ -126,11 +134,43 @@ func (r *daRetriever) RetrieveForcedIncludedTxsFromDA(ctx context.Context, daHei return nil, err } - event.StartDaHeight = epochHeight - event.Txs = append(event.Txs, result.Data...) + for i, data := range result.Data { + if len(data) > common.DefaultMaxBlobSize { + r.logger.Debug(). + Uint64("da_height", epochHeight). + Int("index", i). + Uint64("blob_size", uint64(len(data))). + Msg("Following data exceeds maximum blob size. Skipping...") + continue + } + + // Calculate size of new data + var newDataSize int + for _, data := range result.Data { + newDataSize += len(data) + } + + // Check if adding this data would exceed max blob size + if currentSize+newDataSize > common.DefaultMaxBlobSize { + r.logger.Warn().Msg("forced inclusion data exceeds maximum blob size - reduce ForcedInclusionDAEpoch configuration if this happens often") + + // TODO(@julienrbrt): we need to keep track of which that haven't been included, so they are retried in the next epoch + + continue + } + + event.Txs = append(event.Txs, data) + currentSize += newDataSize + lastProcessedHeight = epochHeight + } + } } + // Set the DA height range based on what we actually processed + event.StartDaHeight = daHeight + event.EndDaHeight = lastProcessedHeight + return event, nil } diff --git a/block/internal/syncing/syncer.go b/block/internal/syncing/syncer.go index c7e1ce4d2d..863783bf8e 100644 --- a/block/internal/syncing/syncer.go +++ b/block/internal/syncing/syncer.go @@ -616,15 +616,15 @@ func (s *Syncer) verifyForcedInclusionTxs(currentState types.State, data *types. return nil } - blockTxMap := make(map[string]bool) + blockTxMap := make(map[string]struct{}) for _, tx := range data.Txs { - blockTxMap[string(tx)] = true + blockTxMap[string(tx)] = struct{}{} } // Check if all forced inclusion transactions are present in the block var missingTxs [][]byte for _, forcedTx := range forcedIncludedTxsEvent.Txs { - if !blockTxMap[string(forcedTx)] { + if _, ok := blockTxMap[string(forcedTx)]; !ok { missingTxs = append(missingTxs, forcedTx) } } diff --git a/sequencers/based/based.go b/sequencers/based/based.go index c45cf40afa..3d0fba7c32 100644 --- a/sequencers/based/based.go +++ b/sequencers/based/based.go @@ -40,10 +40,9 @@ type BasedSequencer struct { genesis genesis.Genesis logger zerolog.Logger - mu sync.RWMutex - currentHeight uint64 - daHeight uint64 - txQueue [][]byte + mu sync.RWMutex + daHeight uint64 + txQueue [][]byte } // NewBasedSequencer creates a new based sequencer instance @@ -55,14 +54,13 @@ func NewBasedSequencer( logger zerolog.Logger, ) *BasedSequencer { return &BasedSequencer{ - daRetriever: daRetriever, - da: da, - config: config, - genesis: genesis, - logger: logger.With().Str("component", "based_sequencer").Logger(), - currentHeight: 0, - daHeight: genesis.DAStartHeight, - txQueue: make([][]byte, 0), + daRetriever: daRetriever, + da: da, + config: config, + genesis: genesis, + logger: logger.With().Str("component", "based_sequencer").Logger(), + daHeight: genesis.DAStartHeight, + txQueue: make([][]byte, 0), } } From c417d0a44553d10ae9b8431291b70c351a350c01 Mon Sep 17 00:00:00 2001 From: Julien Robert Date: Thu, 6 Nov 2025 17:50:08 +0100 Subject: [PATCH 14/39] test: add max size test --- block/internal/syncing/da_retriever_test.go | 111 ++++++++++++++++++++ 1 file changed, 111 insertions(+) diff --git a/block/internal/syncing/da_retriever_test.go b/block/internal/syncing/da_retriever_test.go index ae6e73197a..78202cdcb4 100644 --- a/block/internal/syncing/da_retriever_test.go +++ b/block/internal/syncing/da_retriever_test.go @@ -504,3 +504,114 @@ func TestDARetriever_FetchForcedIncludedTxs_NotFound(t *testing.T) { require.NotNil(t, result) require.Empty(t, result.Txs) } + +func TestDARetriever_RetrieveForcedIncludedTxsFromDA_ExceedsMaxBlobSize(t *testing.T) { + ds := dssync.MutexWrap(datastore.NewMapDatastore()) + st := store.New(ds) + cm, err := cache.NewManager(config.DefaultConfig(), st, zerolog.Nop()) + require.NoError(t, err) + + addr, pub, signer := buildSyncTestSigner(t) + gen := genesis.Genesis{ChainID: "tchain", InitialHeight: 1, StartTime: time.Now().Add(-time.Second), ProposerAddress: addr} + + cfg := config.DefaultConfig() + cfg.DA.ForcedInclusionNamespace = "nsForcedInclusion" + cfg.DA.ForcedInclusionDAEpoch = 3 // Test with multiple epochs + + namespaceForcedInclusionBz := coreda.NamespaceFromString(cfg.DA.GetForcedInclusionNamespace()).Bytes() + + // Create signed data blobs that will exceed DefaultMaxBlobSize when accumulated + // DefaultMaxBlobSize is 1.5MB = 1,572,864 bytes + // Each 700KB tx becomes ~719KB blob, so 2 blobs = ~1.44MB (fits), 3 blobs = ~2.16MB (exceeds) + d1 := &types.Data{ + Metadata: &types.Metadata{ChainID: gen.ChainID, Height: 10, Time: uint64(time.Now().UnixNano())}, + Txs: make(types.Txs, 1), + } + d1.Txs[0] = make([]byte, 700*1024) // 700KB transaction + + payload1, err := d1.MarshalBinary() + require.NoError(t, err) + sig1, err := signer.Sign(payload1) + require.NoError(t, err) + sd1 := &types.SignedData{Data: *d1, Signature: sig1, Signer: types.Signer{PubKey: pub, Address: addr}} + dataBin1, err := sd1.MarshalBinary() + require.NoError(t, err) + + d2 := &types.Data{ + Metadata: &types.Metadata{ChainID: gen.ChainID, Height: 11, Time: uint64(time.Now().UnixNano())}, + Txs: make(types.Txs, 1), + } + d2.Txs[0] = make([]byte, 700*1024) // 700KB transaction + + payload2, err := d2.MarshalBinary() + require.NoError(t, err) + sig2, err := signer.Sign(payload2) + require.NoError(t, err) + sd2 := &types.SignedData{Data: *d2, Signature: sig2, Signer: types.Signer{PubKey: pub, Address: addr}} + dataBin2, err := sd2.MarshalBinary() + require.NoError(t, err) + + d3 := &types.Data{ + Metadata: &types.Metadata{ChainID: gen.ChainID, Height: 12, Time: uint64(time.Now().UnixNano())}, + Txs: make(types.Txs, 1), + } + d3.Txs[0] = make([]byte, 700*1024) // 700KB transaction + + payload3, err := d3.MarshalBinary() + require.NoError(t, err) + sig3, err := signer.Sign(payload3) + require.NoError(t, err) + sd3 := &types.SignedData{Data: *d3, Signature: sig3, Signer: types.Signer{PubKey: pub, Address: addr}} + dataBin3, err := sd3.MarshalBinary() + require.NoError(t, err) + + mockDA := testmocks.NewMockDA(t) + + // First epoch - should succeed + mockDA.EXPECT().GetIDs(mock.Anything, uint64(1001), mock.MatchedBy(func(ns []byte) bool { + return bytes.Equal(ns, namespaceForcedInclusionBz) + })).Return(&coreda.GetIDsResult{IDs: [][]byte{[]byte("fi1")}, Timestamp: time.Now()}, nil).Once() + + mockDA.EXPECT().Get(mock.Anything, mock.Anything, mock.MatchedBy(func(ns []byte) bool { + return bytes.Equal(ns, namespaceForcedInclusionBz) + })).Return([][]byte{dataBin1}, nil).Once() + + // Second epoch - should succeed + mockDA.EXPECT().GetIDs(mock.Anything, uint64(1002), mock.MatchedBy(func(ns []byte) bool { + return bytes.Equal(ns, namespaceForcedInclusionBz) + })).Return(&coreda.GetIDsResult{IDs: [][]byte{[]byte("fi2")}, Timestamp: time.Now()}, nil).Once() + + mockDA.EXPECT().Get(mock.Anything, mock.Anything, mock.MatchedBy(func(ns []byte) bool { + return bytes.Equal(ns, namespaceForcedInclusionBz) + })).Return([][]byte{dataBin2}, nil).Once() + + // Third epoch - should be retrieved but cause error due to size limit + mockDA.EXPECT().GetIDs(mock.Anything, uint64(1003), mock.MatchedBy(func(ns []byte) bool { + return bytes.Equal(ns, namespaceForcedInclusionBz) + })).Return(&coreda.GetIDsResult{IDs: [][]byte{[]byte("fi3")}, Timestamp: time.Now()}, nil).Once() + + mockDA.EXPECT().Get(mock.Anything, mock.Anything, mock.MatchedBy(func(ns []byte) bool { + return bytes.Equal(ns, namespaceForcedInclusionBz) + })).Return([][]byte{dataBin3}, nil).Once() + + r := NewDARetriever(mockDA, cm, cfg, gen, zerolog.Nop()) + + result, err := r.RetrieveForcedIncludedTxsFromDA(context.Background(), 1000) + + // Should succeed but skip the third blob due to size limit (using continue) + require.NoError(t, err) + require.NotNil(t, result) + + // Should only have 2 transactions, third is skipped due to size + require.Len(t, result.Txs, 2) + assert.Equal(t, dataBin1, result.Txs[0]) + assert.Equal(t, dataBin2, result.Txs[1]) + + // Verify total size is within limits + totalSize := len(dataBin1) + len(dataBin2) + assert.LessOrEqual(t, totalSize, int(common.DefaultMaxBlobSize)) + + // Verify that adding the third would have exceeded the limit + totalSizeWithThird := totalSize + len(dataBin3) + assert.Greater(t, totalSizeWithThird, int(common.DefaultMaxBlobSize)) +} From e0dc6b25ae6c007f0555f956591674f954d86fb0 Mon Sep 17 00:00:00 2001 From: Julien Robert Date: Thu, 6 Nov 2025 21:44:08 +0100 Subject: [PATCH 15/39] feat: add epoch calculation, set sequencer height, improve fetching --- block/internal/executing/executor.go | 7 +- block/internal/syncing/da_retriever.go | 187 ++++++-- block/internal/syncing/da_retriever_test.go | 403 +++++++++++++++++- .../syncing/syncer_forced_inclusion_test.go | 27 +- core/sequencer/dummy.go | 5 + core/sequencer/sequencing.go | 3 + sequencers/based/based.go | 2 + sequencers/single/sequencer.go | 2 +- test/mocks/sequencer.go | 44 ++ 9 files changed, 609 insertions(+), 71 deletions(-) diff --git a/block/internal/executing/executor.go b/block/internal/executing/executor.go index 9657e33202..a3c29a1131 100644 --- a/block/internal/executing/executor.go +++ b/block/internal/executing/executor.go @@ -380,6 +380,9 @@ func (e *Executor) produceBlock() error { return fmt.Errorf("failed to apply block: %w", err) } + // set the DA height in the sequencer + newState.DAHeight = e.sequencer.GetDAHeight() + // signing the header is done after applying the block // as for signing, the state of the block may be required by the signature payload provider. signature, err := e.signHeader(header.Header) @@ -440,10 +443,6 @@ func (e *Executor) produceBlock() error { // retrieveBatch gets the next batch of transactions from the sequencer func (e *Executor) retrieveBatch(ctx context.Context) (*BatchData, error) { - - // Update sequencer's DA height for forced inclusion tracking - // e.sequencer.SetDAHeight(currentState.DAHeight) - req := coresequencer.GetNextBatchRequest{ Id: []byte(e.genesis.ChainID), MaxBytes: common.DefaultMaxBlobSize, diff --git a/block/internal/syncing/da_retriever.go b/block/internal/syncing/da_retriever.go index 7497968583..1fed543d96 100644 --- a/block/internal/syncing/da_retriever.go +++ b/block/internal/syncing/da_retriever.go @@ -106,74 +106,179 @@ var ( ) // RetrieveForcedIncludedTxsFromDA retrieves forced inclusion transactions from the DA layer. -// It fetches from the daHeight for the da epoch range defined in the config. +// It only fetches when daHeight is at the start of an epoch to prevent redundant fetching. +// Returns an error if the epoch start height is not yet available on DA (caller should backoff). func (r *daRetriever) RetrieveForcedIncludedTxsFromDA(ctx context.Context, daHeight uint64) (*common.ForcedIncludedEvent, error) { if !r.hasForcedInclusionNs { return nil, ErrForceInclusionNotConfigured } + // Calculate deterministic epoch boundaries + epochStart, epochEnd := r.calculateEpochBoundaries(daHeight) + + // Only fetch at epoch start to prevent double fetching as DA height progresses + if daHeight != epochStart { + r.logger.Debug(). + Uint64("da_height", daHeight). + Uint64("epoch_start", epochStart). + Msg("skipping forced inclusion fetch - not at epoch start") + return &common.ForcedIncludedEvent{ + StartDaHeight: daHeight, + EndDaHeight: daHeight, + Txs: [][]byte{}, + }, nil + } + event := &common.ForcedIncludedEvent{ - StartDaHeight: daHeight, + StartDaHeight: epochStart, + } + + r.logger.Debug(). + Uint64("da_height", daHeight). + Uint64("epoch_start", epochStart). + Uint64("epoch_end", epochEnd). + Uint64("epoch_num", r.calculateEpochNumber(daHeight)). + Msg("retrieving forced included transactions from DA") + + // Check if both epoch start and end are available before fetching + // This ensures we can retrieve the complete epoch in one go + epochStartResult := types.RetrieveWithHelpers(ctx, r.da, r.logger, epochStart, r.namespaceForcedInclusionBz, defaultDATimeout) + if epochStartResult.Code == coreda.StatusHeightFromFuture { + r.logger.Debug(). + Uint64("epoch_start", epochStart). + Msg("epoch start height not yet available on DA - backoff required") + return nil, fmt.Errorf("%w: epoch start height %d not yet available", coreda.ErrHeightFromFuture, epochStart) } - r.logger.Debug().Uint64("da_height", daHeight).Uint64("range", r.daEpochSize).Msg("retrieving forced included transactions from DA") + epochEndResult := epochStartResult + if epochStart != epochEnd { + epochEndResult = types.RetrieveWithHelpers(ctx, r.da, r.logger, epochEnd, r.namespaceForcedInclusionBz, defaultDATimeout) + if epochEndResult.Code == coreda.StatusHeightFromFuture { + r.logger.Debug(). + Uint64("epoch_end", epochEnd). + Msg("epoch end height not yet available on DA - backoff required") + return nil, fmt.Errorf("%w: epoch end height %d not yet available", coreda.ErrHeightFromFuture, epochEnd) + } + } var currentSize int - lastProcessedHeight := daHeight + lastProcessedHeight := epochStart + + // Process epoch start + if err := r.processForcedInclusionBlobs(event, ¤tSize, &lastProcessedHeight, epochStartResult, epochStart); err != nil { + return nil, err + } - for epochHeight := daHeight + 1; epochHeight <= daHeight+r.daEpochSize; epochHeight++ { + // Process heights between start and end (exclusive) + for epochHeight := epochStart + 1; epochHeight < epochEnd; epochHeight++ { result := types.RetrieveWithHelpers(ctx, r.da, r.logger, epochHeight, r.namespaceForcedInclusionBz, defaultDATimeout) - // quickly break if we are too ahead. + // If any intermediate height is from future, break early if result.Code == coreda.StatusHeightFromFuture { + r.logger.Debug(). + Uint64("epoch_height", epochHeight). + Uint64("last_processed", lastProcessedHeight). + Msg("reached future DA height within epoch - stopping") break } - if result.Code == coreda.StatusSuccess { - if err := r.validateBlobResponse(result, epochHeight); !errors.Is(err, coreda.ErrBlobNotFound) && err != nil { - return nil, err - } - - for i, data := range result.Data { - if len(data) > common.DefaultMaxBlobSize { - r.logger.Debug(). - Uint64("da_height", epochHeight). - Int("index", i). - Uint64("blob_size", uint64(len(data))). - Msg("Following data exceeds maximum blob size. Skipping...") - continue - } - - // Calculate size of new data - var newDataSize int - for _, data := range result.Data { - newDataSize += len(data) - } - - // Check if adding this data would exceed max blob size - if currentSize+newDataSize > common.DefaultMaxBlobSize { - r.logger.Warn().Msg("forced inclusion data exceeds maximum blob size - reduce ForcedInclusionDAEpoch configuration if this happens often") - - // TODO(@julienrbrt): we need to keep track of which that haven't been included, so they are retried in the next epoch - - continue - } - - event.Txs = append(event.Txs, data) - currentSize += newDataSize - lastProcessedHeight = epochHeight - } + if err := r.processForcedInclusionBlobs(event, ¤tSize, &lastProcessedHeight, result, epochHeight); err != nil { + return nil, err + } + } + // Process epoch end (only if different from start) + if epochEnd != epochStart { + if err := r.processForcedInclusionBlobs(event, ¤tSize, &lastProcessedHeight, epochEndResult, epochEnd); err != nil { + return nil, err } } // Set the DA height range based on what we actually processed - event.StartDaHeight = daHeight + event.StartDaHeight = epochStart event.EndDaHeight = lastProcessedHeight return event, nil } +// processForcedInclusionBlobs processes blobs from a DA retrieval result and adds them to the event +func (r *daRetriever) processForcedInclusionBlobs( + event *common.ForcedIncludedEvent, + currentSize *int, + lastProcessedHeight *uint64, + result coreda.ResultRetrieve, + daHeight uint64, +) error { + if result.Code != coreda.StatusSuccess { + return nil + } + + if err := r.validateBlobResponse(result, daHeight); !errors.Is(err, coreda.ErrBlobNotFound) && err != nil { + return err + } + + for i, data := range result.Data { + if len(data) > common.DefaultMaxBlobSize { + r.logger.Debug(). + Uint64("da_height", daHeight). + Int("index", i). + Uint64("blob_size", uint64(len(data))). + Msg("Following data exceeds maximum blob size. Skipping...") + continue + } + + // Calculate size of this specific data item + dataSize := len(data) + + // Check if adding this data would exceed max blob size + if *currentSize+dataSize > common.DefaultMaxBlobSize { + r.logger.Warn().Msg("forced inclusion data exceeds maximum blob size - reduce ForcedInclusionDAEpoch configuration if this happens often") + + // TODO(@julienrbrt): we need to keep track of which that haven't been included, so they are retried in the next epoch + + continue + } + + event.Txs = append(event.Txs, data) + *currentSize += dataSize + *lastProcessedHeight = daHeight + } + + return nil +} + +// calculateEpochNumber returns the deterministic epoch number for a given DA height. +// Epoch 1 starts at DAStartHeight. +func (r *daRetriever) calculateEpochNumber(daHeight uint64) uint64 { + if daHeight < r.genesis.DAStartHeight { + return 0 + } + + if r.daEpochSize == 0 { + return 1 + } + + return ((daHeight - r.genesis.DAStartHeight) / r.daEpochSize) + 1 +} + +// calculateEpochBoundaries returns the start and end DA heights for the epoch +// containing the given DA height. The boundaries are inclusive. +func (r *daRetriever) calculateEpochBoundaries(daHeight uint64) (start, end uint64) { + if daHeight < r.genesis.DAStartHeight { + return r.genesis.DAStartHeight, r.genesis.DAStartHeight + r.daEpochSize - 1 + } + + if r.daEpochSize == 0 { + return r.genesis.DAStartHeight, r.genesis.DAStartHeight + } + + epochNum := r.calculateEpochNumber(daHeight) + start = r.genesis.DAStartHeight + (epochNum-1)*r.daEpochSize + end = r.genesis.DAStartHeight + epochNum*r.daEpochSize - 1 + + return start, end +} + // fetchBlobs retrieves blobs from the DA layer func (r *daRetriever) fetchBlobs(ctx context.Context, daHeight uint64) (coreda.ResultRetrieve, error) { // Retrieve from both namespaces diff --git a/block/internal/syncing/da_retriever_test.go b/block/internal/syncing/da_retriever_test.go index 78202cdcb4..f27ba373c0 100644 --- a/block/internal/syncing/da_retriever_test.go +++ b/block/internal/syncing/da_retriever_test.go @@ -429,22 +429,25 @@ func TestDARetriever_RetrieveForcedIncludedTxsFromDA_Success(t *testing.T) { require.NoError(t, err) addr, pub, signer := buildSyncTestSigner(t) - gen := genesis.Genesis{ChainID: "tchain", InitialHeight: 1, StartTime: time.Now().Add(-time.Second), ProposerAddress: addr} + gen := genesis.Genesis{ChainID: "tchain", InitialHeight: 1, StartTime: time.Now().Add(-time.Second), ProposerAddress: addr, DAStartHeight: 5678} // Prepare forced inclusion transaction data dataBin, _ := makeSignedDataBytes(t, gen.ChainID, 10, addr, pub, signer, 3) cfg := config.DefaultConfig() cfg.DA.ForcedInclusionNamespace = "nsForcedInclusion" - cfg.DA.ForcedInclusionDAEpoch = 1 // Limit to 1 iteration for test + cfg.DA.ForcedInclusionDAEpoch = 1 // Epoch size of 1 namespaceForcedInclusionBz := coreda.NamespaceFromString(cfg.DA.GetForcedInclusionNamespace()).Bytes() mockDA := testmocks.NewMockDA(t) - mockDA.EXPECT().GetIDs(mock.Anything, uint64(5679), mock.MatchedBy(func(ns []byte) bool { + // With DAStartHeight=5678, epoch size=1, daHeight=5678 -> epoch boundaries are [5678, 5678] + // Check epoch start only (end check is skipped when same as start) + mockDA.EXPECT().GetIDs(mock.Anything, uint64(5678), mock.MatchedBy(func(ns []byte) bool { return bytes.Equal(ns, namespaceForcedInclusionBz) })).Return(&coreda.GetIDsResult{IDs: [][]byte{[]byte("fi1")}, Timestamp: time.Now()}, nil).Once() + // Fetch epoch start data mockDA.EXPECT().Get(mock.Anything, mock.Anything, mock.MatchedBy(func(ns []byte) bool { return bytes.Equal(ns, namespaceForcedInclusionBz) })).Return([][]byte{dataBin}, nil).Once() @@ -454,7 +457,7 @@ func TestDARetriever_RetrieveForcedIncludedTxsFromDA_Success(t *testing.T) { result, err := r.RetrieveForcedIncludedTxsFromDA(context.Background(), 5678) require.NoError(t, err) require.NotNil(t, result) - require.Len(t, result.Txs, 1) + require.Len(t, result.Txs, 1) // Only fetched once since start == end assert.Equal(t, dataBin, result.Txs[0]) } @@ -465,7 +468,7 @@ func TestDARetriever_FetchForcedIncludedTxs_NoNamespaceConfigured(t *testing.T) require.NoError(t, err) addr, _, _ := buildSyncTestSigner(t) - gen := genesis.Genesis{ChainID: "tchain", InitialHeight: 1, StartTime: time.Now().Add(-time.Second), ProposerAddress: addr} + gen := genesis.Genesis{ChainID: "tchain", InitialHeight: 1, StartTime: time.Now().Add(-time.Second), ProposerAddress: addr, DAStartHeight: 0} cfg := config.DefaultConfig() // Leave ForcedInclusionNamespace empty @@ -484,16 +487,18 @@ func TestDARetriever_FetchForcedIncludedTxs_NotFound(t *testing.T) { require.NoError(t, err) addr, _, _ := buildSyncTestSigner(t) - gen := genesis.Genesis{ChainID: "tchain", InitialHeight: 1, StartTime: time.Now().Add(-time.Second), ProposerAddress: addr} + gen := genesis.Genesis{ChainID: "tchain", InitialHeight: 1, StartTime: time.Now().Add(-time.Second), ProposerAddress: addr, DAStartHeight: 9999} cfg := config.DefaultConfig() cfg.DA.ForcedInclusionNamespace = "nsForcedInclusion" - cfg.DA.ForcedInclusionDAEpoch = 1 // Limit to 1 iteration for test + cfg.DA.ForcedInclusionDAEpoch = 1 // Epoch size of 1 namespaceForcedInclusionBz := coreda.NamespaceFromString(cfg.DA.GetForcedInclusionNamespace()).Bytes() mockDA := testmocks.NewMockDA(t) - mockDA.EXPECT().GetIDs(mock.Anything, uint64(10000), mock.MatchedBy(func(ns []byte) bool { + // With DAStartHeight=9999, epoch size=1, daHeight=9999 -> epoch boundaries are [9999, 9999] + // Check epoch start only (end check is skipped when same as start) + mockDA.EXPECT().GetIDs(mock.Anything, uint64(9999), mock.MatchedBy(func(ns []byte) bool { return bytes.Equal(ns, namespaceForcedInclusionBz) })).Return(&coreda.GetIDsResult{IDs: [][]byte{}, Timestamp: time.Now()}, nil).Once() @@ -512,11 +517,11 @@ func TestDARetriever_RetrieveForcedIncludedTxsFromDA_ExceedsMaxBlobSize(t *testi require.NoError(t, err) addr, pub, signer := buildSyncTestSigner(t) - gen := genesis.Genesis{ChainID: "tchain", InitialHeight: 1, StartTime: time.Now().Add(-time.Second), ProposerAddress: addr} + gen := genesis.Genesis{ChainID: "tchain", InitialHeight: 1, StartTime: time.Now().Add(-time.Second), ProposerAddress: addr, DAStartHeight: 1000} cfg := config.DefaultConfig() cfg.DA.ForcedInclusionNamespace = "nsForcedInclusion" - cfg.DA.ForcedInclusionDAEpoch = 3 // Test with multiple epochs + cfg.DA.ForcedInclusionDAEpoch = 3 // Epoch size of 3 namespaceForcedInclusionBz := coreda.NamespaceFromString(cfg.DA.GetForcedInclusionNamespace()).Bytes() @@ -567,17 +572,24 @@ func TestDARetriever_RetrieveForcedIncludedTxsFromDA_ExceedsMaxBlobSize(t *testi mockDA := testmocks.NewMockDA(t) - // First epoch - should succeed - mockDA.EXPECT().GetIDs(mock.Anything, uint64(1001), mock.MatchedBy(func(ns []byte) bool { + // With DAStartHeight=1000, epoch size=3, daHeight=1000 -> epoch boundaries are [1000, 1002] + // Check epoch start + mockDA.EXPECT().GetIDs(mock.Anything, uint64(1000), mock.MatchedBy(func(ns []byte) bool { return bytes.Equal(ns, namespaceForcedInclusionBz) })).Return(&coreda.GetIDsResult{IDs: [][]byte{[]byte("fi1")}, Timestamp: time.Now()}, nil).Once() + // Check epoch end + mockDA.EXPECT().GetIDs(mock.Anything, uint64(1002), mock.MatchedBy(func(ns []byte) bool { + return bytes.Equal(ns, namespaceForcedInclusionBz) + })).Return(&coreda.GetIDsResult{IDs: [][]byte{[]byte("fi3")}, Timestamp: time.Now()}, nil).Once() + + // Fetch epoch start data mockDA.EXPECT().Get(mock.Anything, mock.Anything, mock.MatchedBy(func(ns []byte) bool { return bytes.Equal(ns, namespaceForcedInclusionBz) })).Return([][]byte{dataBin1}, nil).Once() - // Second epoch - should succeed - mockDA.EXPECT().GetIDs(mock.Anything, uint64(1002), mock.MatchedBy(func(ns []byte) bool { + // Second height in epoch - should succeed + mockDA.EXPECT().GetIDs(mock.Anything, uint64(1001), mock.MatchedBy(func(ns []byte) bool { return bytes.Equal(ns, namespaceForcedInclusionBz) })).Return(&coreda.GetIDsResult{IDs: [][]byte{[]byte("fi2")}, Timestamp: time.Now()}, nil).Once() @@ -585,11 +597,7 @@ func TestDARetriever_RetrieveForcedIncludedTxsFromDA_ExceedsMaxBlobSize(t *testi return bytes.Equal(ns, namespaceForcedInclusionBz) })).Return([][]byte{dataBin2}, nil).Once() - // Third epoch - should be retrieved but cause error due to size limit - mockDA.EXPECT().GetIDs(mock.Anything, uint64(1003), mock.MatchedBy(func(ns []byte) bool { - return bytes.Equal(ns, namespaceForcedInclusionBz) - })).Return(&coreda.GetIDsResult{IDs: [][]byte{[]byte("fi3")}, Timestamp: time.Now()}, nil).Once() - + // Fetch epoch end data - should be retrieved but skipped due to size limit mockDA.EXPECT().Get(mock.Anything, mock.Anything, mock.MatchedBy(func(ns []byte) bool { return bytes.Equal(ns, namespaceForcedInclusionBz) })).Return([][]byte{dataBin3}, nil).Once() @@ -615,3 +623,360 @@ func TestDARetriever_RetrieveForcedIncludedTxsFromDA_ExceedsMaxBlobSize(t *testi totalSizeWithThird := totalSize + len(dataBin3) assert.Greater(t, totalSizeWithThird, int(common.DefaultMaxBlobSize)) } + +func TestDARetriever_CalculateEpochNumber(t *testing.T) { + tests := []struct { + name string + daStartHeight uint64 + daEpochSize uint64 + daHeight uint64 + expectedEpoch uint64 + }{ + { + name: "first epoch - start height", + daStartHeight: 100, + daEpochSize: 10, + daHeight: 100, + expectedEpoch: 1, + }, + { + name: "first epoch - middle", + daStartHeight: 100, + daEpochSize: 10, + daHeight: 105, + expectedEpoch: 1, + }, + { + name: "first epoch - last height", + daStartHeight: 100, + daEpochSize: 10, + daHeight: 109, + expectedEpoch: 1, + }, + { + name: "second epoch - start", + daStartHeight: 100, + daEpochSize: 10, + daHeight: 110, + expectedEpoch: 2, + }, + { + name: "second epoch - middle", + daStartHeight: 100, + daEpochSize: 10, + daHeight: 115, + expectedEpoch: 2, + }, + { + name: "tenth epoch", + daStartHeight: 100, + daEpochSize: 10, + daHeight: 195, + expectedEpoch: 10, + }, + { + name: "before start height", + daStartHeight: 100, + daEpochSize: 10, + daHeight: 50, + expectedEpoch: 0, + }, + { + name: "zero epoch size", + daStartHeight: 100, + daEpochSize: 0, + daHeight: 200, + expectedEpoch: 1, + }, + { + name: "large epoch size", + daStartHeight: 1000, + daEpochSize: 1000, + daHeight: 2500, + expectedEpoch: 2, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ds := dssync.MutexWrap(datastore.NewMapDatastore()) + st := store.New(ds) + cm, err := cache.NewManager(config.DefaultConfig(), st, zerolog.Nop()) + require.NoError(t, err) + + mockDA := testmocks.NewMockDA(t) + gen := genesis.Genesis{DAStartHeight: tt.daStartHeight} + cfg := config.DefaultConfig() + cfg.DA.ForcedInclusionDAEpoch = tt.daEpochSize + + r := NewDARetriever(mockDA, cm, cfg, gen, zerolog.Nop()) + epoch := r.calculateEpochNumber(tt.daHeight) + + assert.Equal(t, tt.expectedEpoch, epoch) + }) + } +} + +func TestDARetriever_CalculateEpochBoundaries(t *testing.T) { + tests := []struct { + name string + daStartHeight uint64 + daEpochSize uint64 + daHeight uint64 + expectedStart uint64 + expectedEnd uint64 + }{ + { + name: "first epoch", + daStartHeight: 100, + daEpochSize: 10, + daHeight: 105, + expectedStart: 100, + expectedEnd: 109, + }, + { + name: "second epoch", + daStartHeight: 100, + daEpochSize: 10, + daHeight: 110, + expectedStart: 110, + expectedEnd: 119, + }, + { + name: "third epoch - last height", + daStartHeight: 100, + daEpochSize: 10, + daHeight: 129, + expectedStart: 120, + expectedEnd: 129, + }, + { + name: "before start height returns first epoch", + daStartHeight: 100, + daEpochSize: 10, + daHeight: 50, + expectedStart: 100, + expectedEnd: 109, + }, + { + name: "zero epoch size", + daStartHeight: 100, + daEpochSize: 0, + daHeight: 200, + expectedStart: 100, + expectedEnd: 100, + }, + { + name: "large epoch", + daStartHeight: 1000, + daEpochSize: 1000, + daHeight: 1500, + expectedStart: 1000, + expectedEnd: 1999, + }, + { + name: "epoch boundary exact start", + daStartHeight: 100, + daEpochSize: 50, + daHeight: 100, + expectedStart: 100, + expectedEnd: 149, + }, + { + name: "epoch boundary exact end of first epoch", + daStartHeight: 100, + daEpochSize: 50, + daHeight: 149, + expectedStart: 100, + expectedEnd: 149, + }, + { + name: "epoch boundary exact start of second epoch", + daStartHeight: 100, + daEpochSize: 50, + daHeight: 150, + expectedStart: 150, + expectedEnd: 199, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ds := dssync.MutexWrap(datastore.NewMapDatastore()) + st := store.New(ds) + cm, err := cache.NewManager(config.DefaultConfig(), st, zerolog.Nop()) + require.NoError(t, err) + + mockDA := testmocks.NewMockDA(t) + gen := genesis.Genesis{DAStartHeight: tt.daStartHeight} + cfg := config.DefaultConfig() + cfg.DA.ForcedInclusionDAEpoch = tt.daEpochSize + + r := NewDARetriever(mockDA, cm, cfg, gen, zerolog.Nop()) + start, end := r.calculateEpochBoundaries(tt.daHeight) + + assert.Equal(t, tt.expectedStart, start, "start height mismatch") + assert.Equal(t, tt.expectedEnd, end, "end height mismatch") + }) + } +} + +func TestDARetriever_RetrieveForcedIncludedTxsFromDA_NotAtEpochStart(t *testing.T) { + ds := dssync.MutexWrap(datastore.NewMapDatastore()) + st := store.New(ds) + cm, err := cache.NewManager(config.DefaultConfig(), st, zerolog.Nop()) + require.NoError(t, err) + + addr, _, _ := buildSyncTestSigner(t) + gen := genesis.Genesis{ChainID: "tchain", InitialHeight: 1, StartTime: time.Now().Add(-time.Second), ProposerAddress: addr, DAStartHeight: 100} + + cfg := config.DefaultConfig() + cfg.DA.ForcedInclusionNamespace = "nsForcedInclusion" + cfg.DA.ForcedInclusionDAEpoch = 10 + + mockDA := testmocks.NewMockDA(t) + + r := NewDARetriever(mockDA, cm, cfg, gen, zerolog.Nop()) + + // With DAStartHeight=100, epoch size=10, daHeight=105 -> epoch boundaries are [100, 109] + // But daHeight=105 is NOT the epoch start, so it should be a no-op + result, err := r.RetrieveForcedIncludedTxsFromDA(context.Background(), 105) + require.NoError(t, err) + require.NotNil(t, result) + require.Empty(t, result.Txs) + require.Equal(t, uint64(105), result.StartDaHeight) + require.Equal(t, uint64(105), result.EndDaHeight) +} + +func TestDARetriever_RetrieveForcedIncludedTxsFromDA_EpochStartFromFuture(t *testing.T) { + ds := dssync.MutexWrap(datastore.NewMapDatastore()) + st := store.New(ds) + cm, err := cache.NewManager(config.DefaultConfig(), st, zerolog.Nop()) + require.NoError(t, err) + + addr, _, _ := buildSyncTestSigner(t) + gen := genesis.Genesis{ChainID: "tchain", InitialHeight: 1, StartTime: time.Now().Add(-time.Second), ProposerAddress: addr, DAStartHeight: 1000} + + cfg := config.DefaultConfig() + cfg.DA.ForcedInclusionNamespace = "nsForcedInclusion" + cfg.DA.ForcedInclusionDAEpoch = 10 + + namespaceForcedInclusionBz := coreda.NamespaceFromString(cfg.DA.GetForcedInclusionNamespace()).Bytes() + + mockDA := testmocks.NewMockDA(t) + // With DAStartHeight=1000, epoch size=10, daHeight=1000 -> epoch boundaries are [1000, 1009] + // Mock that height 1000 (epoch start) is from the future + mockDA.EXPECT().GetIDs(mock.Anything, uint64(1000), mock.MatchedBy(func(ns []byte) bool { + return bytes.Equal(ns, namespaceForcedInclusionBz) + })).Return(nil, fmt.Errorf("%s: not yet available", coreda.ErrHeightFromFuture.Error())).Once() + + r := NewDARetriever(mockDA, cm, cfg, gen, zerolog.Nop()) + + result, err := r.RetrieveForcedIncludedTxsFromDA(context.Background(), 1000) + require.Error(t, err) + require.Nil(t, result) + require.True(t, errors.Is(err, coreda.ErrHeightFromFuture)) + require.Contains(t, err.Error(), "epoch start height 1000 not yet available") +} + +func TestDARetriever_RetrieveForcedIncludedTxsFromDA_EpochEndFromFuture(t *testing.T) { + ds := dssync.MutexWrap(datastore.NewMapDatastore()) + st := store.New(ds) + cm, err := cache.NewManager(config.DefaultConfig(), st, zerolog.Nop()) + require.NoError(t, err) + + addr, _, _ := buildSyncTestSigner(t) + gen := genesis.Genesis{ChainID: "tchain", InitialHeight: 1, StartTime: time.Now().Add(-time.Second), ProposerAddress: addr, DAStartHeight: 1000} + + cfg := config.DefaultConfig() + cfg.DA.ForcedInclusionNamespace = "nsForcedInclusion" + cfg.DA.ForcedInclusionDAEpoch = 10 + + namespaceForcedInclusionBz := coreda.NamespaceFromString(cfg.DA.GetForcedInclusionNamespace()).Bytes() + + mockDA := testmocks.NewMockDA(t) + // With DAStartHeight=1000, epoch size=10, daHeight=1000 -> epoch boundaries are [1000, 1009] + // Epoch start is available but epoch end (1009) is from the future + mockDA.EXPECT().GetIDs(mock.Anything, uint64(1000), mock.MatchedBy(func(ns []byte) bool { + return bytes.Equal(ns, namespaceForcedInclusionBz) + })).Return(&coreda.GetIDsResult{IDs: [][]byte{}, Timestamp: time.Now()}, nil).Once() + + mockDA.EXPECT().GetIDs(mock.Anything, uint64(1009), mock.MatchedBy(func(ns []byte) bool { + return bytes.Equal(ns, namespaceForcedInclusionBz) + })).Return(nil, fmt.Errorf("%s: not yet available", coreda.ErrHeightFromFuture.Error())).Once() + + r := NewDARetriever(mockDA, cm, cfg, gen, zerolog.Nop()) + + result, err := r.RetrieveForcedIncludedTxsFromDA(context.Background(), 1000) + require.Error(t, err) + require.Nil(t, result) + require.True(t, errors.Is(err, coreda.ErrHeightFromFuture)) + require.Contains(t, err.Error(), "epoch end height 1009 not yet available") +} + +func TestDARetriever_RetrieveForcedIncludedTxsFromDA_CompleteEpoch(t *testing.T) { + ds := dssync.MutexWrap(datastore.NewMapDatastore()) + st := store.New(ds) + cm, err := cache.NewManager(config.DefaultConfig(), st, zerolog.Nop()) + require.NoError(t, err) + + addr, pub, signer := buildSyncTestSigner(t) + gen := genesis.Genesis{ChainID: "tchain", InitialHeight: 1, StartTime: time.Now().Add(-time.Second), ProposerAddress: addr, DAStartHeight: 2000} + + // Prepare forced inclusion transaction data + dataBin1, _ := makeSignedDataBytes(t, gen.ChainID, 10, addr, pub, signer, 2) + dataBin2, _ := makeSignedDataBytes(t, gen.ChainID, 11, addr, pub, signer, 1) + dataBin3, _ := makeSignedDataBytes(t, gen.ChainID, 12, addr, pub, signer, 1) + + cfg := config.DefaultConfig() + cfg.DA.ForcedInclusionNamespace = "nsForcedInclusion" + cfg.DA.ForcedInclusionDAEpoch = 3 + + namespaceForcedInclusionBz := coreda.NamespaceFromString(cfg.DA.GetForcedInclusionNamespace()).Bytes() + + mockDA := testmocks.NewMockDA(t) + + // With DAStartHeight=2000, epoch size=3, daHeight=2000 -> epoch boundaries are [2000, 2002] + // All heights available + + // Check epoch start (2000) + mockDA.EXPECT().GetIDs(mock.Anything, uint64(2000), mock.MatchedBy(func(ns []byte) bool { + return bytes.Equal(ns, namespaceForcedInclusionBz) + })).Return(&coreda.GetIDsResult{IDs: [][]byte{[]byte("fi1")}, Timestamp: time.Now()}, nil).Once() + + // Check epoch end (2002) + mockDA.EXPECT().GetIDs(mock.Anything, uint64(2002), mock.MatchedBy(func(ns []byte) bool { + return bytes.Equal(ns, namespaceForcedInclusionBz) + })).Return(&coreda.GetIDsResult{IDs: [][]byte{[]byte("fi3")}, Timestamp: time.Now()}, nil).Once() + + // Fetch epoch start data + mockDA.EXPECT().Get(mock.Anything, mock.Anything, mock.MatchedBy(func(ns []byte) bool { + return bytes.Equal(ns, namespaceForcedInclusionBz) + })).Return([][]byte{dataBin1}, nil).Once() + + // Fetch middle height (2001) + mockDA.EXPECT().GetIDs(mock.Anything, uint64(2001), mock.MatchedBy(func(ns []byte) bool { + return bytes.Equal(ns, namespaceForcedInclusionBz) + })).Return(&coreda.GetIDsResult{IDs: [][]byte{[]byte("fi2")}, Timestamp: time.Now()}, nil).Once() + + mockDA.EXPECT().Get(mock.Anything, mock.Anything, mock.MatchedBy(func(ns []byte) bool { + return bytes.Equal(ns, namespaceForcedInclusionBz) + })).Return([][]byte{dataBin2}, nil).Once() + + // Fetch epoch end data + mockDA.EXPECT().Get(mock.Anything, mock.Anything, mock.MatchedBy(func(ns []byte) bool { + return bytes.Equal(ns, namespaceForcedInclusionBz) + })).Return([][]byte{dataBin3}, nil).Once() + + r := NewDARetriever(mockDA, cm, cfg, gen, zerolog.Nop()) + + result, err := r.RetrieveForcedIncludedTxsFromDA(context.Background(), 2000) + require.NoError(t, err) + require.NotNil(t, result) + require.Len(t, result.Txs, 3) + require.Equal(t, dataBin1, result.Txs[0]) + require.Equal(t, dataBin2, result.Txs[1]) + require.Equal(t, dataBin3, result.Txs[2]) + require.Equal(t, uint64(2000), result.StartDaHeight) + require.Equal(t, uint64(2002), result.EndDaHeight) +} diff --git a/block/internal/syncing/syncer_forced_inclusion_test.go b/block/internal/syncing/syncer_forced_inclusion_test.go index 2ddf890197..1921a4f6ba 100644 --- a/block/internal/syncing/syncer_forced_inclusion_test.go +++ b/block/internal/syncing/syncer_forced_inclusion_test.go @@ -34,6 +34,7 @@ func TestVerifyForcedInclusionTxs_AllTransactionsIncluded(t *testing.T) { InitialHeight: 1, StartTime: time.Now().Add(-time.Second), ProposerAddress: addr, + DAStartHeight: 0, } cfg := config.DefaultConfig() @@ -72,15 +73,18 @@ func TestVerifyForcedInclusionTxs_AllTransactionsIncluded(t *testing.T) { // Create forced inclusion transaction blob (SignedData) in DA dataBin, _ := makeSignedDataBytes(t, gen.ChainID, 10, addr, pub, signer, 2) - mockDA.EXPECT().GetIDs(mock.Anything, uint64(1), mock.MatchedBy(func(ns []byte) bool { + // With DAStartHeight=0, epoch size=1, daHeight=0 -> epoch boundaries are [0, 0] + // Check epoch start only (end check is skipped when same as start) + mockDA.EXPECT().GetIDs(mock.Anything, uint64(0), mock.MatchedBy(func(ns []byte) bool { return bytes.Equal(ns, namespaceForcedInclusionBz) })).Return(&coreda.GetIDsResult{IDs: [][]byte{[]byte("fi1")}, Timestamp: time.Now()}, nil).Once() + // Fetch epoch start data mockDA.EXPECT().Get(mock.Anything, mock.Anything, mock.MatchedBy(func(ns []byte) bool { return bytes.Equal(ns, namespaceForcedInclusionBz) })).Return([][]byte{dataBin}, nil).Once() - // Create block data that includes the forced transaction blob as a single transaction + // Create block data that includes the forced transaction blob data := makeData(gen.ChainID, 1, 1) data.Txs[0] = types.Tx(dataBin) @@ -104,6 +108,7 @@ func TestVerifyForcedInclusionTxs_MissingTransactions(t *testing.T) { InitialHeight: 1, StartTime: time.Now().Add(-time.Second), ProposerAddress: addr, + DAStartHeight: 0, } cfg := config.DefaultConfig() @@ -142,10 +147,13 @@ func TestVerifyForcedInclusionTxs_MissingTransactions(t *testing.T) { // Create forced inclusion transaction blob (SignedData) in DA dataBin, _ := makeSignedDataBytes(t, gen.ChainID, 10, addr, pub, signer, 2) - mockDA.EXPECT().GetIDs(mock.Anything, uint64(1), mock.MatchedBy(func(ns []byte) bool { + // With DAStartHeight=0, epoch size=1, daHeight=0 -> epoch boundaries are [0, 0] + // Check epoch start only (end check is skipped when same as start) + mockDA.EXPECT().GetIDs(mock.Anything, uint64(0), mock.MatchedBy(func(ns []byte) bool { return bytes.Equal(ns, namespaceForcedInclusionBz) })).Return(&coreda.GetIDsResult{IDs: [][]byte{[]byte("fi1")}, Timestamp: time.Now()}, nil).Once() + // Fetch epoch start data mockDA.EXPECT().Get(mock.Anything, mock.Anything, mock.MatchedBy(func(ns []byte) bool { return bytes.Equal(ns, namespaceForcedInclusionBz) })).Return([][]byte{dataBin}, nil).Once() @@ -177,6 +185,7 @@ func TestVerifyForcedInclusionTxs_PartiallyIncluded(t *testing.T) { InitialHeight: 1, StartTime: time.Now().Add(-time.Second), ProposerAddress: addr, + DAStartHeight: 0, } cfg := config.DefaultConfig() @@ -216,10 +225,13 @@ func TestVerifyForcedInclusionTxs_PartiallyIncluded(t *testing.T) { dataBin1, _ := makeSignedDataBytes(t, gen.ChainID, 10, addr, pub, signer, 2) dataBin2, _ := makeSignedDataBytes(t, gen.ChainID, 11, addr, pub, signer, 1) - mockDA.EXPECT().GetIDs(mock.Anything, uint64(1), mock.MatchedBy(func(ns []byte) bool { + // With DAStartHeight=0, epoch size=1, daHeight=0 -> epoch boundaries are [0, 0] + // Check epoch start only (end check is skipped when same as start) + mockDA.EXPECT().GetIDs(mock.Anything, uint64(0), mock.MatchedBy(func(ns []byte) bool { return bytes.Equal(ns, namespaceForcedInclusionBz) })).Return(&coreda.GetIDsResult{IDs: [][]byte{[]byte("fi1"), []byte("fi2")}, Timestamp: time.Now()}, nil).Once() + // Fetch epoch start data mockDA.EXPECT().Get(mock.Anything, mock.Anything, mock.MatchedBy(func(ns []byte) bool { return bytes.Equal(ns, namespaceForcedInclusionBz) })).Return([][]byte{dataBin1, dataBin2}, nil).Once() @@ -233,7 +245,7 @@ func TestVerifyForcedInclusionTxs_PartiallyIncluded(t *testing.T) { currentState := s.GetLastState() currentState.DAHeight = 0 - // Verify - should fail since one forced tx is missing + // Verify - should fail since dataBin2 is missing err = s.verifyForcedInclusionTxs(currentState, data) require.Error(t, err) require.Contains(t, err.Error(), "sequencer is malicious") @@ -252,6 +264,7 @@ func TestVerifyForcedInclusionTxs_NoForcedTransactions(t *testing.T) { InitialHeight: 1, StartTime: time.Now().Add(-time.Second), ProposerAddress: addr, + DAStartHeight: 0, } cfg := config.DefaultConfig() @@ -287,7 +300,9 @@ func TestVerifyForcedInclusionTxs_NoForcedTransactions(t *testing.T) { // Mock DA to return no forced inclusion transactions namespaceForcedInclusionBz := coreda.NamespaceFromString(cfg.DA.GetForcedInclusionNamespace()).Bytes() - mockDA.EXPECT().GetIDs(mock.Anything, uint64(1), mock.MatchedBy(func(ns []byte) bool { + // With DAStartHeight=0, epoch size=1, daHeight=0 -> epoch boundaries are [0, 0] + // Check epoch start only (end check is skipped when same as start) + mockDA.EXPECT().GetIDs(mock.Anything, uint64(0), mock.MatchedBy(func(ns []byte) bool { return bytes.Equal(ns, namespaceForcedInclusionBz) })).Return(&coreda.GetIDsResult{IDs: [][]byte{}, Timestamp: time.Now()}, nil).Once() diff --git a/core/sequencer/dummy.go b/core/sequencer/dummy.go index 5b0e16e51e..ef614173a8 100644 --- a/core/sequencer/dummy.go +++ b/core/sequencer/dummy.go @@ -69,3 +69,8 @@ func (s *DummySequencer) VerifyBatch(ctx context.Context, req VerifyBatchRequest func (s *DummySequencer) SetDAHeight(height uint64) { // No-op for dummy sequencer } + +// GetDAHeight returns the current DA height for the sequencer +func (s *DummySequencer) GetDAHeight() uint64 { + return 0 +} diff --git a/core/sequencer/sequencing.go b/core/sequencer/sequencing.go index 6d48880dce..e97ef93dd3 100644 --- a/core/sequencer/sequencing.go +++ b/core/sequencer/sequencing.go @@ -31,6 +31,9 @@ type Sequencer interface { // SetDAHeight sets the current DA height for the sequencer // This allows the sequencer to track DA height for forced inclusion retrieval SetDAHeight(height uint64) + + // GetDAHeight returns the current DA height for the sequencer + GetDAHeight() uint64 } // Batch is a collection of transactions diff --git a/sequencers/based/based.go b/sequencers/based/based.go index 3d0fba7c32..ae64bad898 100644 --- a/sequencers/based/based.go +++ b/sequencers/based/based.go @@ -31,6 +31,8 @@ type DARetriever interface { RetrieveForcedIncludedTxsFromDA(ctx context.Context, daHeight uint64) (*ForcedInclusionEvent, error) } +var _ coresequencer.Sequencer = (*BasedSequencer)(nil) + // BasedSequencer is a sequencer that only retrieves transactions from the DA layer // via the forced inclusion mechanism. It does not accept transactions from the reaper. type BasedSequencer struct { diff --git a/sequencers/single/sequencer.go b/sequencers/single/sequencer.go index df9842f0d5..ba5d92c0fd 100644 --- a/sequencers/single/sequencer.go +++ b/sequencers/single/sequencer.go @@ -36,7 +36,7 @@ type DARetriever interface { RetrieveForcedIncludedTxsFromDA(ctx context.Context, daHeight uint64) (*ForcedInclusionEvent, error) } -var _ coresequencer.Sequencer = &Sequencer{} +var _ coresequencer.Sequencer = (*Sequencer)(nil) // Sequencer implements core sequencing interface type Sequencer struct { diff --git a/test/mocks/sequencer.go b/test/mocks/sequencer.go index 072d9a731f..e1ef0afb4d 100644 --- a/test/mocks/sequencer.go +++ b/test/mocks/sequencer.go @@ -38,6 +38,50 @@ func (_m *MockSequencer) EXPECT() *MockSequencer_Expecter { return &MockSequencer_Expecter{mock: &_m.Mock} } +// GetDAHeight provides a mock function for the type MockSequencer +func (_mock *MockSequencer) GetDAHeight() uint64 { + ret := _mock.Called() + + if len(ret) == 0 { + panic("no return value specified for GetDAHeight") + } + + var r0 uint64 + if returnFunc, ok := ret.Get(0).(func() uint64); ok { + r0 = returnFunc() + } else { + r0 = ret.Get(0).(uint64) + } + return r0 +} + +// MockSequencer_GetDAHeight_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetDAHeight' +type MockSequencer_GetDAHeight_Call struct { + *mock.Call +} + +// GetDAHeight is a helper method to define mock.On call +func (_e *MockSequencer_Expecter) GetDAHeight() *MockSequencer_GetDAHeight_Call { + return &MockSequencer_GetDAHeight_Call{Call: _e.mock.On("GetDAHeight")} +} + +func (_c *MockSequencer_GetDAHeight_Call) Run(run func()) *MockSequencer_GetDAHeight_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *MockSequencer_GetDAHeight_Call) Return(v uint64) *MockSequencer_GetDAHeight_Call { + _c.Call.Return(v) + return _c +} + +func (_c *MockSequencer_GetDAHeight_Call) RunAndReturn(run func() uint64) *MockSequencer_GetDAHeight_Call { + _c.Call.Return(run) + return _c +} + // GetNextBatch provides a mock function for the type MockSequencer func (_mock *MockSequencer) GetNextBatch(ctx context.Context, req sequencer.GetNextBatchRequest) (*sequencer.GetNextBatchResponse, error) { ret := _mock.Called(ctx, req) From bd0a9a1d0f47a37006a2888768e5b30b025bbf27 Mon Sep 17 00:00:00 2001 From: Julien Robert Date: Thu, 6 Nov 2025 21:46:51 +0100 Subject: [PATCH 16/39] lint --- block/internal/syncing/syncer.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/block/internal/syncing/syncer.go b/block/internal/syncing/syncer.go index 863783bf8e..cb3231d9a8 100644 --- a/block/internal/syncing/syncer.go +++ b/block/internal/syncing/syncer.go @@ -399,7 +399,7 @@ func (s *Syncer) processHeightEvent(event *common.DAHeightEvent) { case errors.Is(err, errInvalidBlock): // do not reschedule case errors.Is(err, errMaliciousProposer): - s.sendCriticalError(fmt.Errorf("Sequencer malicious. Restart the node with --node.aggregator --node.based_sequencer or keep the chain halted: %w", err)) + s.sendCriticalError(fmt.Errorf("sequencer malicious. Restart the node with --node.aggregator --node.based_sequencer or keep the chain halted: %w", err)) case errors.Is(err, errInvalidState): s.sendCriticalError(fmt.Errorf("invalid state detected (block-height %d, state-height %d) "+ "- block references do not match local state. Manual intervention required: %w", event.Header.Height(), From 9f03c71ed04c2b87575bac7962d93b1ec0fe2da0 Mon Sep 17 00:00:00 2001 From: Julien Robert Date: Mon, 10 Nov 2025 13:09:33 +0100 Subject: [PATCH 17/39] Update adr-019-forced-inclusion-mechanism.md --- .../adr/adr-019-forced-inclusion-mechanism.md | 779 ++++++++++-------- 1 file changed, 450 insertions(+), 329 deletions(-) diff --git a/docs/adr/adr-019-forced-inclusion-mechanism.md b/docs/adr/adr-019-forced-inclusion-mechanism.md index 378dd9b17f..09127f313e 100644 --- a/docs/adr/adr-019-forced-inclusion-mechanism.md +++ b/docs/adr/adr-019-forced-inclusion-mechanism.md @@ -4,435 +4,556 @@ - 2025-03-24: Initial draft - 2025-04-23: Renumbered from ADR-018 to ADR-019 to maintain chronological order. +- 2025-11-10: Updated to reflect actual implementation ## Context -Evolve currently supports a single sequencer implementation as described in ADR-013. While this approach provides a simple and efficient solution, it introduces a single point of failure that can impact the liveness of the network. If the sequencer goes down or becomes unresponsive, the chain cannot progress. +In a single-sequencer rollup architecture, users depend entirely on the sequencer to include their transactions in blocks. This creates several problems: -To address this limitation and improve the liveness properties of applications built with Evolve, we propose implementing a forced inclusion mechanism. This mechanism will allow transactions to be included directly from the Data Availability (DA) layer when the sequencer is unresponsive, creating an "unstoppable" property for Evolve-based chains. +1. **Censorship Risk**: A malicious or coerced sequencer can selectively exclude transactions +2. **Liveness Failure**: If the sequencer goes offline, no new transactions can be processed +3. **Centralization**: Users must trust a single entity to behave honestly +4. **No Recourse**: Users have no alternative path to submit transactions if the sequencer refuses them -This enhancement aligns with the requirements defined in the [L2 Beat framework](https://forum.l2beat.com/t/the-stages-framework/291#p-516-stage-1-requirements-3) for Stage 1 L2s, advancing Evolve's capabilities as a robust sequencer library. +While eventual solutions like decentralized sequencer networks exist, they introduce significant complexity. We need a simpler mechanism that provides censorship resistance and liveness guarantees while maintaining the performance benefits of a single sequencer. ## Alternative Approaches ### Decentralized Sequencer -A fully decentralized sequencer could solve the liveness issue by distributing sequencing responsibilities across multiple nodes. However, this approach introduces significant complexity in terms of consensus, leader election, and coordination between nodes. It would require substantial development effort and resources, making it less suitable as an immediate solution. +A fully decentralized sequencer network would eliminate single points of failure but requires: + +- Complex consensus mechanisms +- Increased latency due to coordination +- More infrastructure and operational complexity ### Automatic Sequencer Failover -Another approach would be to implement an automatic failover mechanism where backup sequencers take over when the primary sequencer fails. While simpler than a fully decentralized solution, this approach still requires managing multiple sequencers and introduces complexity in coordination and state transfer between them. +Implementing automatic failover to backup sequencers when the primary goes down requires: -## Decision +- Complex monitoring and health checks +- Coordination between sequencers to prevent forks +- Does not solve censorship issues with a malicious sequencer -We will implement a forced inclusion mechanism for the Evolve single sequencer architecture that uses a time-based inclusion delay approach. This approach will: +## Decision -1. Track when transactions are first seen in terms of DA block time -2. Require a minimum number of DA blocks to pass before including a direct transaction -3. Let full nodes enforce inclusion within a fixed period of time window +We implement a **forced inclusion mechanism** that allows users to submit transactions directly to the Data Availability (DA) layer. This approach provides: -The mechanism will be designed to maintain backward compatibility with existing Evolve deployments while providing enhanced liveness guarantees. +1. **Censorship Resistance**: Users can always bypass the sequencer by posting to DA +2. **Verifiable Inclusion**: Full nodes verify that sequencers include all forced transactions +3. **Based Rollup Option**: A based sequencer mode for fully DA-driven transaction ordering +4. **Simplicity**: No complex timing mechanisms or fallback modes ### High-Level Architecture -The following diagram illustrates the high-level architecture of the forced inclusion mechanism: - -```mermaid -flowchart TB - subgraph DAL["Data Availability Layer"] - end - - subgraph SEQ["Single Sequencer"] - subgraph NO["Normal Operation"] - direction TB - process["Process user txs"] - create["Create batches"] - include["Include direct txs from DA"] - checkDelay["Check MinDADelay"] - end - end - - subgraph FN["Full Nodes"] - subgraph NormalOp["Normal Operation"] - follow["Follow sequencer produced blocks"] - validate["Validate time windows"] - validateDelay["Validate MinDADelay"] - end - - subgraph FallbackMode["Fallback Mode"] - detect["Detect sequencer down"] - scan["Scan DA for direct txs"] - createBlocks["Create deterministic blocks from direct txs"] - end - end - - SEQ -->|"Publish Batches"| DAL - DAL -->|"Direct Txs"| SEQ - DAL -->|"Direct Txs"| FN - SEQ -->|"Blocks"| FN - NormalOp <--> FallbackMode +``` +┌─────────────────────────────────────────────────────────────────┐ +│ User Actions │ +├─────────────────────────────────────────────────────────────────┤ +│ │ +│ Normal Path: Forced Inclusion Path: │ +│ Submit tx to Sequencer ────► Submit tx directly to DA │ +│ (Fast) (Censorship-resistant) │ +│ │ +└──────────┬────────────────────────────────────┬─────────────────┘ + │ │ + ▼ ▼ + ┌─────────────┐ ┌──────────────────┐ + │ Sequencer │ │ DA Layer │ + │ (Mempool) │ │ (Forced Inc. NS) │ + └──────┬──────┘ └─────────┬────────┘ + │ │ + │ 1. Fetch forced inc. txs │ + │◄────────────────────────────────────┘ + │ + │ 2. Prepend forced txs to batch + │ + ▼ + ┌─────────────┐ + │ Block │ + │ Production │ + └──────┬──────┘ + │ + │ 3. Submit block to DA + │ + ▼ + ┌─────────────┐ + │ DA Layer │ + └──────┬──────┘ + │ + │ 4. Full nodes retrieve block + │ + ▼ + ┌─────────────────────┐ + │ Full Nodes │ + │ (Verification) │ + │ │ + │ 5. Verify forced │ + │ inc. txs are │ + │ included │ + └─────────────────────┘ ``` +### Key Components + +1. **Forced Inclusion Namespace**: A dedicated DA namespace where users can post transactions +2. **DA Retriever**: Fetches forced inclusion transactions from DA using epoch-based scanning +3. **Single Sequencer**: Enhanced to include forced transactions from DA in every batch +4. **Based Sequencer**: Alternative sequencer that ONLY retrieves transactions from DA +5. **Verification**: Full nodes validate that blocks include all forced transactions + ## Detailed Design ### User Requirements -- Developers need a mechanism to ensure their chains can progress even when the single sequencer is unavailable -- The system should maintain a deterministic and consistent state regardless of sequencer availability -- The transition between sequencer-led and forced inclusion modes should be seamless -- Transactions must be included within a fixed time window from when they are first seen -- Direct transactions must wait for a minimum number of DA blocks before inclusion +Users can submit transactions in two ways: -### Systems Affected +1. **Normal Path**: Submit to sequencer's mempool/RPC (fast, low cost) +2. **Forced Inclusion Path**: Submit directly to DA forced inclusion namespace (censorship-resistant) -The implementation of the forced inclusion mechanism will affect several components of the Evolve framework: +No additional requirements or monitoring needed from users. -1. **Single Sequencer**: Must be modified to track and include direct transactions from the DA layer within the time window and after minimum DA block delay -2. **Full Node**: Must be updated to recognize and validate blocks with forced inclusions -3. **Block Processing Logic**: Must implement the modified fork choice rule -4. **DA Client**: Must be enhanced to scan for direct transactions -5. **Transaction Validation**: Must validate both sequencer-batched and direct transactions +### Systems Affected + +1. **DA Layer**: New namespace for forced inclusion transactions +2. **Sequencer (Single)**: Fetches and includes forced transactions +3. **Sequencer (Based)**: New sequencer type that only uses DA transactions +4. **DA Retriever**: New component for fetching forced transactions +5. **Syncer**: Verifies forced transaction inclusion in blocks +6. **Configuration**: New fields for forced inclusion settings ### Data Structures -#### Direct Transaction Tracking +#### Forced Inclusion Event ```go -type ForcedInclusionConfig struct { - MaxInclusionDelay uint64 // Max inclusion time in DA block time units - MinDADelay uint64 // Minimum number of DA blocks before including a direct tx +type ForcedIncludedEvent struct { + Txs [][]byte // Forced inclusion transactions + StartDaHeight uint64 // Start of DA height range + EndDaHeight uint64 // End of DA height range } +``` -type DirectTransaction struct { - TxHash common.Hash - FirstSeenAt uint64 // DA block time when the tx was seen - Included bool // Whether it has been included in a block - IncludedAt uint64 // Height at which it was included -} +#### DA Retriever Interface -type DirectTxTracker struct { - txs map[common.Hash]DirectTransaction // Map of direct transactions - mu sync.RWMutex // Mutex for thread-safe access - latestSeenTime uint64 // Latest DA block time scanned - latestDAHeight uint64 // Latest DA block height +```go +type DARetriever interface { + // Retrieve forced inclusion transactions from DA at specified height + RetrieveForcedIncludedTxsFromDA(ctx context.Context, daHeight uint64) (*ForcedIncludedEvent, error) } ``` -#### Sequencer Status Tracking +### APIs and Interfaces + +#### DA Retriever + +The DA Retriever component handles fetching forced inclusion transactions: ```go -type SequencerStatus struct { - IsActive bool // Whether the sequencer is considered active - LastActiveTime uint64 // Last DA block time where sequencer posted a batch - InactiveTime uint64 // Time since last sequencer activity +type daRetriever struct { + da coreda.DA + cache cache.Manager + genesis genesis.Genesis + logger zerolog.Logger + namespaceForcedInclusionBz []byte + hasForcedInclusionNs bool + daEpochSize uint64 } + +// RetrieveForcedIncludedTxsFromDA fetches forced inclusion transactions +// Only fetches at epoch boundaries to prevent redundant DA queries +func (r *daRetriever) RetrieveForcedIncludedTxsFromDA( + ctx context.Context, + daHeight uint64, +) (*ForcedIncludedEvent, error) ``` -### APIs and Interfaces +#### Single Sequencer Extension -#### Enhanced DA Client Interface +The single sequencer is enhanced to fetch and include forced transactions: ```go -type DAClient interface { - // Existing methods - // ... +type Sequencer struct { + // ... existing fields ... + daRetriever DARetriever + genesis genesis.Genesis + mu sync.RWMutex + daHeight uint64 +} + +func (s *Sequencer) GetNextBatch(ctx context.Context, req GetNextBatchRequest) (*GetNextBatchResponse, error) { + // 1. Fetch forced inclusion transactions from DA + forcedEvent, err := s.daRetriever.RetrieveForcedIncludedTxsFromDA(ctx, s.daHeight) - // New method for forced inclusion - GetDirectTransactions(ctx context.Context, fromTime, toTime uint64) ([][]byte, error) - // Note: SubmitDirectTransaction is removed as it's not a responsibility of the node + // 2. Get batch from mempool + batch, err := s.queue.Next(ctx) + + // 3. Prepend forced transactions to batch + if len(forcedEvent.Txs) > 0 { + batch.Transactions = append(forcedEvent.Txs, batch.Transactions...) + } + + return &GetNextBatchResponse{Batch: batch} } ``` -#### Sequencer Interface Extensions +#### Based Sequencer + +A new sequencer implementation that ONLY retrieves transactions from DA: ```go -// New methods added to the Sequencer interface -func (s *Sequencer) ScanDALayerForDirectTxs(ctx context.Context) error -func (s *Sequencer) IncludeDirectTransactions(ctx context.Context, batch *Batch) error +type BasedSequencer struct { + daRetriever DARetriever + da coreda.DA + config config.Config + genesis genesis.Genesis + logger zerolog.Logger + mu sync.RWMutex + daHeight uint64 + txQueue [][]byte // Buffer for transactions exceeding batch size +} + +func (s *BasedSequencer) GetNextBatch(ctx context.Context, req GetNextBatchRequest) (*GetNextBatchResponse, error) { + // Fetch forced inclusion transactions from DA + forcedEvent, err := s.daRetriever.RetrieveForcedIncludedTxsFromDA(ctx, s.daHeight) + + // Add transactions to queue + s.txQueue = append(s.txQueue, forcedEvent.Txs...) + + // Create batch from queue respecting MaxBytes + batch := s.createBatchFromQueue(req.MaxBytes) + + return &GetNextBatchResponse{Batch: batch} +} + +// SubmitBatchTxs is a no-op for based sequencer +func (s *BasedSequencer) SubmitBatchTxs(ctx context.Context, req SubmitBatchTxsRequest) (*SubmitBatchTxsResponse, error) { + // Based sequencer ignores submitted transactions + return &SubmitBatchTxsResponse{}, nil +} ``` -#### Full Node Interface Extensions +#### Syncer Verification + +Full nodes verify forced inclusion in the sync process: ```go -// New methods added to the Node interface -func (n *Node) CheckSequencerStatus(ctx context.Context) (bool, error) -func (n *Node) ProcessDirectTransactions(ctx context.Context) error -func (n *Node) ValidateBlockTimeWindow(ctx context.Context, block *types.Block) error +func (s *Syncer) verifyForcedInclusionTxs(currentState State, data *Data) error { + // 1. Retrieve forced inclusion transactions from DA + forcedEvent, err := s.daRetriever.RetrieveForcedIncludedTxsFromDA(s.ctx, currentState.DAHeight) + if err != nil { + return err + } + + // 2. Build map of transactions in block + blockTxMap := make(map[string]struct{}) + for _, tx := range data.Txs { + blockTxMap[string(tx)] = struct{}{} + } + + // 3. Verify all forced transactions are included + for _, forcedTx := range forcedEvent.Txs { + if _, ok := blockTxMap[string(forcedTx)]; !ok { + return errMaliciousProposer + } + } + + return nil +} ``` -### Implementation Changes - -#### Single Sequencer Node Changes - -1. **DA Layer Scanner**: - - Implement a periodic scanner that queries the DA layer for direct transactions - - Track all direct transactions in the DirectTxTracker data structure - - Update the latest seen DA block time and height after each scan - -2. **Transaction Inclusion Logic**: - - Modify the batch creation process to include direct transactions from the DA layer - - Ensure all direct transactions are included within the MaxInclusionDelay time window - - Check that transactions have waited for MinDADelay DA blocks - - Track transaction inclusion times and enforce both delay constraints - -3. **Validation Rules**: - - Implement time window validation to ensure transactions are included within MaxInclusionDelay - - Implement DA block delay validation to ensure transactions wait for MinDADelay blocks - - Track both time-based and DA block-based delays for each transaction - -4. **Recovery Mechanism**: - - Add logic to detect when the sequencer comes back online after downtime - - Implement state synchronization to catch up with any forced inclusions that occurred during downtime - - Resume normal operation by building on top of the canonical chain tip - -#### Sequencer Operation Flow - -The following diagram illustrates the operation flow for the sequencer with forced inclusion: - -```txt -┌─────────────────────────────────────────────────────────────────────────────────┐ -│ Sequencer Operation Flow │ -└─────────────────┬───────────────────────────────────────────────────────────────┘ - │ - ▼ -┌─────────────────────────────────┐ ┌────────────────────────────────────────┐ -│ 1. Process User Transactions │ │ 2. Periodic DA Layer Scanning │ -│ │ │ │ -│ - Accept transactions from users│ │ - Query DA layer for direct txs │ -│ - Validate and queue txs │ │ - Update DirectTxTracker │ -│ - Process queue based on policy │ │ - Track latest seen DA block time │ -└─────────────────┬───────────────┘ └────────────────────┬───────────────────┘ - │ │ - ▼ ▼ -┌─────────────────────────────────┐ ┌────────────────────────────────────────┐ -│ 3. Batch Creation │ │ 4. Direct Transaction Inclusion │ -│ │ │ │ -│ - Create batch of txs │◄─────┤ - Include unprocessed direct txs │ -│ - Apply ordering policy │ │ - Prioritize by first seen │ -│ - Calculate batch metadata │ │ - Mark included txs as processed │ -└─────────────────┬───────────────┘ └────────────────────────────────────────┘ - │ - ▼ -┌──────────────────────────────────┐ ┌────────────────────────────────────────┐ -│ 5. Time Window Validation │ │ 6. Block Production │ -│ │ │ │ -│ - Check transaction timestamps │ │ - Create block with batch │ -│ - Ensure within MaxInclusionDelay│─────►│ - Sign and publish block │ -│ - Track inclusion times │ │ │ -└──────────────────────────────────┘ └─────────────────┬──────────────────────┘ - │ - ▼ - ┌────────────────────────────────────────┐ - │ 7. DA Batch Submission │ - │ │ - │ - Submit batch to DA layer │ - │ - Track submission status │ - │ - Handle retry on failure │ - └────────────────────────────────────────┘ +### Implementation Details + +#### Epoch-Based Fetching + +To avoid excessive DA queries, the DA Retriever uses epoch-based fetching: + +- **Epoch Size**: Configurable number of DA blocks (e.g., 10) +- **Epoch Boundaries**: Deterministically calculated based on `DAStartHeight` +- **Fetch Timing**: Only fetch at epoch start to prevent duplicate fetches + +```go +// Calculate epoch boundaries +func (r *daRetriever) calculateEpochBoundaries(daHeight uint64) (start, end uint64) { + epochNum := r.calculateEpochNumber(daHeight) + start = r.genesis.DAStartHeight + (epochNum-1)*r.daEpochSize + end = r.genesis.DAStartHeight + epochNum*r.daEpochSize - 1 + return start, end +} + +// Only fetch at epoch start +if daHeight != epochStart { + return &ForcedIncludedEvent{Txs: [][]byte{}} +} + +// Fetch all heights in epoch range +for height := epochStart; height <= epochEnd; height++ { + // Fetch forced inclusion blobs from this DA height +} ``` -#### Full Node Operation Flow - -The following diagram illustrates the operation flow for full nodes with forced inclusion support: - -```txt -┌─────────────────────────────────────────────────────────────────────────────────┐ -│ Full Node Operation Flow │ -└─────────────────────────────────────────────────────────────────────────────────┘ - │ - ▼ -┌─────────────────────────────────┐ ┌────────────────────────────────────────┐ -│ 1. Normal Operation Mode │ │ 2. Sequencer Status Monitoring │ -│ │ │ │ -│ - Receive blocks from sequencer │ │ - Monitor sequencer activity on DA │ -│ - Validate time windows │◄───►│ - Track time since last sequencer batch│ -│ - Apply state transitions │ │ - Check against downtime threshold │ -└─────────────────────────────────┘ └───────────────────┬────────────────────┘ - │ - ▼ - ┌────────────────────────────────────────┐ - │ Is Sequencer Down? │ - │ (Based on configurable threshold) │ - └───────────┬───────────────┬────────────┘ - │ │ - │ Yes │ No - ▼ │ - ┌────────────────────────┐ │ - │ 3. Enter Fallback Mode │ │ - │ │ │ - │ - Switch to direct tx │ │ - │ processing │ │ - │ - Notify subsystems │ │ - └──────────┬─────────────┘ │ - │ │ - ▼ │ - ┌────────────────────────┐ │ - │ 4. DA Layer Scanning │ │ - │ │ │ - │ - Scan DA for direct │ │ - │ transactions │ │ - │ - Track latest seen │ │ - │ DA block time │ │ - └──────────┬─────────────┘ │ - │ │ - ▼ │ - ┌────────────────────────┐ │ - │ 5. Deterministic Block │ │ - │ Creation │ │ - │ │ │ - │ - Create blocks with │ │ - │ direct txs only │ │ - │ - Apply deterministic │ │ - │ ordering rules │ │ - └──────────┬─────────────┘ │ - │ │ - ▼ ▼ -┌─────────────────────────────────────────────────────────────────────────────────┐ -│ 6. Block Processing and State Update │ -│ │ -│ - Execute transactions │ -│ - Update state │ -│ - Persist blocks and state │ -└─────────────────────────────────────────────────────────────────────────────────┘ +#### Height From Future Handling + +When DA height is not yet available: + +```go +if errors.Is(err, coreda.ErrHeightFromFuture) { + // Keep current DA height, return empty batch + // Retry same height on next call + return &ForcedIncludedEvent{Txs: [][]byte{}}, nil +} ``` -### Fallback Mode Transition - -The following diagram illustrates the transition between normal operation and fallback mode: - -```mermaid -sequenceDiagram - participant DA as Data Availability Layer - participant S as Sequencer - participant R as Chain - - Note over S,R: Normal Operation - DA->>S: DA Block N - S->>R: Sequencer Block N - DA->>S: DA Block N+1 - S->>R: Sequencer Block N+1 - DA->>S: DA Block N+2 - S->>R: Sequencer Block N+2 - - Note over S,R: Sequencer Down - DA->>R: DA Block N+3 (Direct Txs) - Note over R: Fallback Mode Start - R->>R: Create Block from Direct Txs - DA->>R: DA Block N+4 (Direct Txs) - R->>R: Create Block from Direct Txs - DA->>R: DA Block N+5 (Direct Txs) - R->>R: Create Block from Direct Txs - - Note over S,R: Sequencer Back Online - DA->>S: DA Block N+6 - S->>R: Sequencer Block N+6 - DA->>S: DA Block N+7 - S->>R: Sequencer Block N+7 - - Note over R: Timeline shows: - Note over R: 1. Normal sequencer operation - Note over R: 2. Sequencer downtime & fallback - Note over R: 3. Sequencer recovery +#### Transaction Queue Management + +The based sequencer uses a queue to handle transactions exceeding batch size: + +```go +func (s *BasedSequencer) createBatchFromQueue(maxBytes uint64) *Batch { + var batch [][]byte + var totalBytes uint64 + + for i, tx := range s.txQueue { + txSize := uint64(len(tx)) + if totalBytes+txSize > maxBytes && len(batch) > 0 { + // Would exceed max bytes, stop here + s.txQueue = s.txQueue[i:] + break + } + + batch = append(batch, tx) + totalBytes += txSize + } + + return &Batch{Transactions: batch} +} ``` ### Configuration -The forced inclusion mechanism will be configurable with the following parameters: - ```go -type ForcedInclusionConfig struct { - Enabled bool // Whether forced inclusion is enabled - MaxInclusionDelay uint64 // Maximum time window for transaction inclusion - SequencerDownTime uint64 // Time after which the sequencer is considered down - MinDADelay uint64 // Minimum number of DA blocks before including a direct tx +type DAConfig struct { + // ... existing fields ... + + // Namespace for forced inclusion transactions + ForcedInclusionNamespace string + + // Number of DA blocks to scan per forced inclusion fetch + // Higher values reduce DA queries but increase latency + // Lower values increase DA queries but improve responsiveness + ForcedInclusionDAEpoch uint64 +} + +type NodeConfig struct { + // ... existing fields ... + + // Run node with based sequencer (requires aggregator mode) + BasedSequencer bool } ``` +### Configuration Examples + +#### Traditional Sequencer with Forced Inclusion + +```yaml +da: + forced_inclusion_namespace: "0x0000000000000000000000000000000000000000000000000000666f72636564" + forced_inclusion_da_epoch: 10 # Scan 10 DA blocks at a time + +node: + aggregator: true + based_sequencer: false # Use traditional sequencer +``` + +#### Based Sequencer (DA-Only) + +```yaml +da: + forced_inclusion_namespace: "0x0000000000000000000000000000000000000000000000000000666f72636564" + forced_inclusion_da_epoch: 5 # Scan 5 DA blocks at a time + +node: + aggregator: true + based_sequencer: true # Use based sequencer +``` + +### Sequencer Operation Flows + +#### Single Sequencer Flow + +``` +1. Timer triggers GetNextBatch +2. Fetch forced inclusion txs from DA (via DA Retriever) + - Only at epoch boundaries + - Scan epoch range for forced transactions +3. Get batch from mempool queue +4. Prepend forced txs to batch +5. Return batch for block production +``` + +#### Based Sequencer Flow + +``` +1. Timer triggers GetNextBatch +2. Check transaction queue for buffered txs +3. If queue empty or epoch boundary: + - Fetch forced inclusion txs from DA + - Add to queue +4. Create batch from queue (respecting MaxBytes) +5. Return batch for block production +``` + +### Full Node Verification Flow + +``` +1. Receive block from DA or P2P +2. Before applying block: + a. Fetch forced inclusion txs from DA at block's DA height + b. Build map of transactions in block + c. Verify all forced txs are in block + d. If missing: reject block, flag malicious proposer +3. Apply block if verification passes +``` + ### Efficiency Considerations -- DA layer scanning is integrated into the core block processing pipeline for continuous monitoring -- Direct transactions are indexed by hash for quick lookups -- The sequencer status is tracked by DA block time rather than block heights -- Time-based tracking simplifies the implementation and reduces overhead -- DA block height tracking adds minimal overhead to existing block processing +1. **Epoch-Based Fetching**: Reduces DA queries by batching multiple DA heights +2. **Deterministic Epochs**: All nodes calculate same epoch boundaries +3. **Fetch at Epoch Start**: Prevents duplicate fetches as DA height progresses +4. **Transaction Queue**: Buffers excess transactions across multiple blocks +5. **Conditional Fetching**: Only when forced inclusion namespace is configured + +**DA Query Frequency**: + +- Traditional: Every `ForcedInclusionDAEpoch` DA blocks +- Based Sequencer: Every `ForcedInclusionDAEpoch` DA blocks or when queue empty +- Full Nodes: At each block height for verification ### Security Considerations -- The mechanism ensures that only valid direct transactions can be included in the chain -- Time window validation prevents delayed inclusion of transactions -- The configurable time threshold prevents premature switching to fallback mode due to temporary sequencer issues -- All transactions, whether sequencer-batched or direct, undergo the same validation rules -- MinDADelay provides protection against DA layer censorship by requiring multiple block proposers to collude -- Block-based delay prevents single block proposer censorship by ensuring transactions must be visible across multiple DA layer blocks -- The delay mechanism is inspired by the "Based Sequencing with Soft Confirmations" design from [Sovereign SDK #408](https://github.com/Sovereign-Labs/sovereign-sdk/issues/408), which uses deferred execution to prevent DA layer block proposers from censoring transactions +1. **Malicious Proposer Detection**: Full nodes reject blocks missing forced transactions +2. **No Timing Attacks**: Epoch boundaries are deterministic, no time-based logic +3. **Blob Size Limits**: Enforces maximum blob size to prevent DoS +4. **Graceful Degradation**: Continues operation if forced inclusion not configured +5. **Height Validation**: Handles "height from future" errors without state corruption + +**Attack Vectors**: + +- **Censorship**: Mitigated by forced inclusion verification +- **DA Spam**: Limited by DA layer's native spam protection and blob size limits +- **Block Withholding**: Full nodes can fetch and verify from DA independently ### Privacy Considerations -- Direct transactions posted to the DA layer are publicly visible, just like sequencer-batched transactions -- No additional privacy concerns are introduced beyond the existing model +1. **Public Transactions**: Forced inclusion transactions are public on DA layer +2. **Timing Analysis**: Transaction submission timing visible on DA +3. **No Metadata**: No additional metadata beyond transaction content +4. **Same Privacy Model**: Privacy properties same as normal transactions + +Users requiring privacy should use application-level encryption or privacy protocols. ### Testing Strategy -1. **Unit Tests**: - - Test individual components of the forced inclusion mechanism - - Verify time window validation logic - - Test the DA scanner functionality - - Test transaction inclusion timing constraints - - Test MinDADelay validation - -2. **Integration Tests**: - - Test the interaction between the sequencer and the DA layer - - Verify correct inclusion of direct transactions within time windows - - Test DA block delay validation - - Verify both time and block delay constraints - -3. **End-to-End Tests**: - - Simulate sequencer downtime and verify chain progression - - Test the transition between normal and fallback modes - - Verify the sequencer's recovery process after downtime - - Test transaction inclusion with various delay configurations - -4. **Performance Testing**: - - Measure the overhead introduced by the DA scanner - - Benchmark the system's performance in fallback mode - - Evaluate the impact of time-based tracking - - Measure the performance impact of DA block delay validation +#### Unit Tests + +1. **DA Retriever**: + - Epoch boundary calculations + - Height from future handling + - Blob size validation + - Empty epoch handling + +2. **Single Sequencer**: + - Forced transaction prepending + - DA height tracking + - Error handling + +3. **Based Sequencer**: + - Queue management + - Batch size limits + - DA-only operation + +4. **Syncer Verification**: + - All forced txs included (pass) + - Missing forced txs (fail) + - No forced txs (pass) + +#### Integration Tests + +1. **Single Sequencer Integration**: + - Submit to mempool and forced inclusion + - Verify both included in block + - Forced txs appear first + +2. **Based Sequencer Integration**: + - Submit only to DA forced inclusion + - Verify block production + - Mempool submissions ignored + +3. **Verification Flow**: + - Full node rejects block missing forced tx + - Full node accepts block with all forced txs + +#### End-to-End Tests + +1. **User Flow**: + - User submits tx to forced inclusion namespace + - Sequencer includes tx in next epoch + - Full nodes verify inclusion + +2. **Based Rollup**: + - Start network with based sequencer + - Submit transactions to DA + - Verify block production and finalization + +3. **Censorship Resistance**: + - Sequencer ignores specific transaction + - User submits to forced inclusion + - Transaction included in next epoch + - Attempting to exclude causes block rejection ### Breaking Changes -This enhancement introduces no breaking changes to the existing API or data structures. It extends the current functionality by implementing time-based transaction tracking and inclusion rules, along with DA block-based delay validation, without modifying the core interfaces that developers interact with. +1. **Sequencer Initialization**: Requires `DARetriever` and `Genesis` parameters +2. **Configuration**: New fields in `DAConfig` and `NodeConfig` +3. **Syncer**: New verification step in block processing + +**Migration Path**: + +- Forced inclusion is optional (enabled when namespace configured) +- Existing deployments work without configuration changes +- Can enable incrementally per network ## Status -Proposed +Accepted and Implemented ## Consequences ### Positive -- Improves the liveness guarantees of Evolve-based chains -- Provides a path for Evolve to meet Stage 1 L2 requirements per the L2 Beat framework -- Creates an "unstoppable" property for applications, enhancing their reliability -- Maintains a deterministic chain state regardless of sequencer availability -- More predictable deadlines in DA time -- Easier to reason about for users and developers -- Prevents DA layer censorship by requiring multiple block proposers to collude +1. **Censorship Resistance**: Users have guaranteed path to include transactions +2. **Verifiable**: Full nodes enforce forced inclusion, detecting malicious sequencers +3. **Simple Design**: No complex timing mechanisms or fallback modes +4. **Based Rollup Option**: Fully DA-driven transaction ordering available +5. **Optional**: Forced inclusion can be disabled for permissioned deployments +6. **Efficient**: Epoch-based fetching minimizes DA queries +7. **Flexible**: Configurable epoch size allows tuning latency vs efficiency ### Negative -- Adds complexity to the block processing and validation logic -- Introduces overhead from scanning the DA layer for direct transactions -- Could potentially slow block production during fallback mode -- May need careful tuning of time window parameters -- Could be affected by variations in block production rate -- Additional complexity from tracking DA block heights for delay validation +1. **Increased Latency**: Forced transactions subject to epoch boundaries +2. **DA Dependency**: Requires DA layer to support multiple namespaces +3. **Higher DA Costs**: Users pay DA posting fees for forced inclusion +4. **Additional Complexity**: New component (DA Retriever) and verification logic +5. **Epoch Configuration**: Requires tuning `ForcedInclusionDAEpoch` per network ### Neutral -- Requires application developers to consider both sequencer-batched and direct transaction flows -- Introduces configuration options that developers need to understand and set appropriately -- Changes the mental model of how the chain progresses, from purely sequencer-driven to a hybrid approach -- Users will need to use external tools or services to submit direct transactions to the DA layer during sequencer downtime +1. **Two Sequencer Types**: Choice between single (hybrid) and based (DA-only) +2. **Privacy Model Unchanged**: Forced inclusion has same privacy as normal path +3. **Monitoring**: Operators should monitor forced inclusion namespace usage +4. **Documentation**: Users need guidance on when to use forced inclusion ## References From c3bac1bc55a0467b8e72de48eb061293ea17d400 Mon Sep 17 00:00:00 2001 From: Julien Robert Date: Mon, 10 Nov 2025 14:41:05 +0100 Subject: [PATCH 18/39] extract epoch calculation --- block/internal/syncing/da_retriever.go | 36 +-- block/internal/syncing/da_retriever_test.go | 197 ------------- types/epoch.go | 50 ++++ types/epoch_test.go | 292 ++++++++++++++++++++ 4 files changed, 344 insertions(+), 231 deletions(-) create mode 100644 types/epoch.go create mode 100644 types/epoch_test.go diff --git a/block/internal/syncing/da_retriever.go b/block/internal/syncing/da_retriever.go index 1fed543d96..95a26c0ada 100644 --- a/block/internal/syncing/da_retriever.go +++ b/block/internal/syncing/da_retriever.go @@ -114,7 +114,7 @@ func (r *daRetriever) RetrieveForcedIncludedTxsFromDA(ctx context.Context, daHei } // Calculate deterministic epoch boundaries - epochStart, epochEnd := r.calculateEpochBoundaries(daHeight) + epochStart, epochEnd := types.CalculateEpochBoundaries(daHeight, r.genesis.DAStartHeight, r.daEpochSize) // Only fetch at epoch start to prevent double fetching as DA height progresses if daHeight != epochStart { @@ -137,7 +137,7 @@ func (r *daRetriever) RetrieveForcedIncludedTxsFromDA(ctx context.Context, daHei Uint64("da_height", daHeight). Uint64("epoch_start", epochStart). Uint64("epoch_end", epochEnd). - Uint64("epoch_num", r.calculateEpochNumber(daHeight)). + Uint64("epoch_num", types.CalculateEpochNumber(daHeight, r.genesis.DAStartHeight, r.daEpochSize)). Msg("retrieving forced included transactions from DA") // Check if both epoch start and end are available before fetching @@ -247,38 +247,6 @@ func (r *daRetriever) processForcedInclusionBlobs( return nil } -// calculateEpochNumber returns the deterministic epoch number for a given DA height. -// Epoch 1 starts at DAStartHeight. -func (r *daRetriever) calculateEpochNumber(daHeight uint64) uint64 { - if daHeight < r.genesis.DAStartHeight { - return 0 - } - - if r.daEpochSize == 0 { - return 1 - } - - return ((daHeight - r.genesis.DAStartHeight) / r.daEpochSize) + 1 -} - -// calculateEpochBoundaries returns the start and end DA heights for the epoch -// containing the given DA height. The boundaries are inclusive. -func (r *daRetriever) calculateEpochBoundaries(daHeight uint64) (start, end uint64) { - if daHeight < r.genesis.DAStartHeight { - return r.genesis.DAStartHeight, r.genesis.DAStartHeight + r.daEpochSize - 1 - } - - if r.daEpochSize == 0 { - return r.genesis.DAStartHeight, r.genesis.DAStartHeight - } - - epochNum := r.calculateEpochNumber(daHeight) - start = r.genesis.DAStartHeight + (epochNum-1)*r.daEpochSize - end = r.genesis.DAStartHeight + epochNum*r.daEpochSize - 1 - - return start, end -} - // fetchBlobs retrieves blobs from the DA layer func (r *daRetriever) fetchBlobs(ctx context.Context, daHeight uint64) (coreda.ResultRetrieve, error) { // Retrieve from both namespaces diff --git a/block/internal/syncing/da_retriever_test.go b/block/internal/syncing/da_retriever_test.go index f27ba373c0..05ff8f5b6d 100644 --- a/block/internal/syncing/da_retriever_test.go +++ b/block/internal/syncing/da_retriever_test.go @@ -624,203 +624,6 @@ func TestDARetriever_RetrieveForcedIncludedTxsFromDA_ExceedsMaxBlobSize(t *testi assert.Greater(t, totalSizeWithThird, int(common.DefaultMaxBlobSize)) } -func TestDARetriever_CalculateEpochNumber(t *testing.T) { - tests := []struct { - name string - daStartHeight uint64 - daEpochSize uint64 - daHeight uint64 - expectedEpoch uint64 - }{ - { - name: "first epoch - start height", - daStartHeight: 100, - daEpochSize: 10, - daHeight: 100, - expectedEpoch: 1, - }, - { - name: "first epoch - middle", - daStartHeight: 100, - daEpochSize: 10, - daHeight: 105, - expectedEpoch: 1, - }, - { - name: "first epoch - last height", - daStartHeight: 100, - daEpochSize: 10, - daHeight: 109, - expectedEpoch: 1, - }, - { - name: "second epoch - start", - daStartHeight: 100, - daEpochSize: 10, - daHeight: 110, - expectedEpoch: 2, - }, - { - name: "second epoch - middle", - daStartHeight: 100, - daEpochSize: 10, - daHeight: 115, - expectedEpoch: 2, - }, - { - name: "tenth epoch", - daStartHeight: 100, - daEpochSize: 10, - daHeight: 195, - expectedEpoch: 10, - }, - { - name: "before start height", - daStartHeight: 100, - daEpochSize: 10, - daHeight: 50, - expectedEpoch: 0, - }, - { - name: "zero epoch size", - daStartHeight: 100, - daEpochSize: 0, - daHeight: 200, - expectedEpoch: 1, - }, - { - name: "large epoch size", - daStartHeight: 1000, - daEpochSize: 1000, - daHeight: 2500, - expectedEpoch: 2, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - ds := dssync.MutexWrap(datastore.NewMapDatastore()) - st := store.New(ds) - cm, err := cache.NewManager(config.DefaultConfig(), st, zerolog.Nop()) - require.NoError(t, err) - - mockDA := testmocks.NewMockDA(t) - gen := genesis.Genesis{DAStartHeight: tt.daStartHeight} - cfg := config.DefaultConfig() - cfg.DA.ForcedInclusionDAEpoch = tt.daEpochSize - - r := NewDARetriever(mockDA, cm, cfg, gen, zerolog.Nop()) - epoch := r.calculateEpochNumber(tt.daHeight) - - assert.Equal(t, tt.expectedEpoch, epoch) - }) - } -} - -func TestDARetriever_CalculateEpochBoundaries(t *testing.T) { - tests := []struct { - name string - daStartHeight uint64 - daEpochSize uint64 - daHeight uint64 - expectedStart uint64 - expectedEnd uint64 - }{ - { - name: "first epoch", - daStartHeight: 100, - daEpochSize: 10, - daHeight: 105, - expectedStart: 100, - expectedEnd: 109, - }, - { - name: "second epoch", - daStartHeight: 100, - daEpochSize: 10, - daHeight: 110, - expectedStart: 110, - expectedEnd: 119, - }, - { - name: "third epoch - last height", - daStartHeight: 100, - daEpochSize: 10, - daHeight: 129, - expectedStart: 120, - expectedEnd: 129, - }, - { - name: "before start height returns first epoch", - daStartHeight: 100, - daEpochSize: 10, - daHeight: 50, - expectedStart: 100, - expectedEnd: 109, - }, - { - name: "zero epoch size", - daStartHeight: 100, - daEpochSize: 0, - daHeight: 200, - expectedStart: 100, - expectedEnd: 100, - }, - { - name: "large epoch", - daStartHeight: 1000, - daEpochSize: 1000, - daHeight: 1500, - expectedStart: 1000, - expectedEnd: 1999, - }, - { - name: "epoch boundary exact start", - daStartHeight: 100, - daEpochSize: 50, - daHeight: 100, - expectedStart: 100, - expectedEnd: 149, - }, - { - name: "epoch boundary exact end of first epoch", - daStartHeight: 100, - daEpochSize: 50, - daHeight: 149, - expectedStart: 100, - expectedEnd: 149, - }, - { - name: "epoch boundary exact start of second epoch", - daStartHeight: 100, - daEpochSize: 50, - daHeight: 150, - expectedStart: 150, - expectedEnd: 199, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - ds := dssync.MutexWrap(datastore.NewMapDatastore()) - st := store.New(ds) - cm, err := cache.NewManager(config.DefaultConfig(), st, zerolog.Nop()) - require.NoError(t, err) - - mockDA := testmocks.NewMockDA(t) - gen := genesis.Genesis{DAStartHeight: tt.daStartHeight} - cfg := config.DefaultConfig() - cfg.DA.ForcedInclusionDAEpoch = tt.daEpochSize - - r := NewDARetriever(mockDA, cm, cfg, gen, zerolog.Nop()) - start, end := r.calculateEpochBoundaries(tt.daHeight) - - assert.Equal(t, tt.expectedStart, start, "start height mismatch") - assert.Equal(t, tt.expectedEnd, end, "end height mismatch") - }) - } -} - func TestDARetriever_RetrieveForcedIncludedTxsFromDA_NotAtEpochStart(t *testing.T) { ds := dssync.MutexWrap(datastore.NewMapDatastore()) st := store.New(ds) diff --git a/types/epoch.go b/types/epoch.go new file mode 100644 index 0000000000..5e849520e5 --- /dev/null +++ b/types/epoch.go @@ -0,0 +1,50 @@ +package types + +// CalculateEpochNumber returns the deterministic epoch number for a given DA height. +// Epoch 1 starts at daStartHeight. +// +// Parameters: +// - daHeight: The DA height to calculate the epoch for +// - daStartHeight: The genesis DA start height +// - daEpochSize: The number of DA blocks per epoch (0 means all blocks in epoch 1) +// +// Returns: +// - Epoch number (0 if before daStartHeight, 1+ otherwise) +func CalculateEpochNumber(daHeight, daStartHeight, daEpochSize uint64) uint64 { + if daHeight < daStartHeight { + return 0 + } + + if daEpochSize == 0 { + return 1 + } + + return ((daHeight - daStartHeight) / daEpochSize) + 1 +} + +// CalculateEpochBoundaries returns the start and end DA heights for the epoch +// containing the given DA height. The boundaries are inclusive. +// +// Parameters: +// - daHeight: The DA height to calculate boundaries for +// - daStartHeight: The genesis DA start height +// - daEpochSize: The number of DA blocks per epoch (0 means single epoch) +// +// Returns: +// - start: The first DA height in the epoch (inclusive) +// - end: The last DA height in the epoch (inclusive) +func CalculateEpochBoundaries(daHeight, daStartHeight, daEpochSize uint64) (start, end uint64) { + if daHeight < daStartHeight { + return daStartHeight, daStartHeight + daEpochSize - 1 + } + + if daEpochSize == 0 { + return daStartHeight, daStartHeight + } + + epochNum := CalculateEpochNumber(daHeight, daStartHeight, daEpochSize) + start = daStartHeight + (epochNum-1)*daEpochSize + end = daStartHeight + epochNum*daEpochSize - 1 + + return start, end +} diff --git a/types/epoch_test.go b/types/epoch_test.go new file mode 100644 index 0000000000..6c202b8956 --- /dev/null +++ b/types/epoch_test.go @@ -0,0 +1,292 @@ +package types + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestCalculateEpochNumber(t *testing.T) { + tests := []struct { + name string + daStartHeight uint64 + daEpochSize uint64 + daHeight uint64 + expectedEpoch uint64 + }{ + { + name: "first epoch - start height", + daStartHeight: 100, + daEpochSize: 10, + daHeight: 100, + expectedEpoch: 1, + }, + { + name: "first epoch - middle", + daStartHeight: 100, + daEpochSize: 10, + daHeight: 105, + expectedEpoch: 1, + }, + { + name: "first epoch - last height", + daStartHeight: 100, + daEpochSize: 10, + daHeight: 109, + expectedEpoch: 1, + }, + { + name: "second epoch - start", + daStartHeight: 100, + daEpochSize: 10, + daHeight: 110, + expectedEpoch: 2, + }, + { + name: "second epoch - middle", + daStartHeight: 100, + daEpochSize: 10, + daHeight: 115, + expectedEpoch: 2, + }, + { + name: "tenth epoch", + daStartHeight: 100, + daEpochSize: 10, + daHeight: 195, + expectedEpoch: 10, + }, + { + name: "before start height", + daStartHeight: 100, + daEpochSize: 10, + daHeight: 50, + expectedEpoch: 0, + }, + { + name: "zero epoch size", + daStartHeight: 100, + daEpochSize: 0, + daHeight: 200, + expectedEpoch: 1, + }, + { + name: "large epoch size", + daStartHeight: 1000, + daEpochSize: 1000, + daHeight: 2500, + expectedEpoch: 2, + }, + { + name: "start height zero", + daStartHeight: 0, + daEpochSize: 5, + daHeight: 10, + expectedEpoch: 3, + }, + { + name: "epoch size one", + daStartHeight: 100, + daEpochSize: 1, + daHeight: 105, + expectedEpoch: 6, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + epoch := CalculateEpochNumber(tt.daHeight, tt.daStartHeight, tt.daEpochSize) + assert.Equal(t, tt.expectedEpoch, epoch) + }) + } +} + +func TestCalculateEpochBoundaries(t *testing.T) { + tests := []struct { + name string + daStartHeight uint64 + daEpochSize uint64 + daHeight uint64 + expectedStart uint64 + expectedEnd uint64 + }{ + { + name: "first epoch", + daStartHeight: 100, + daEpochSize: 10, + daHeight: 105, + expectedStart: 100, + expectedEnd: 109, + }, + { + name: "second epoch", + daStartHeight: 100, + daEpochSize: 10, + daHeight: 110, + expectedStart: 110, + expectedEnd: 119, + }, + { + name: "third epoch - last height", + daStartHeight: 100, + daEpochSize: 10, + daHeight: 129, + expectedStart: 120, + expectedEnd: 129, + }, + { + name: "before start height returns first epoch", + daStartHeight: 100, + daEpochSize: 10, + daHeight: 50, + expectedStart: 100, + expectedEnd: 109, + }, + { + name: "zero epoch size", + daStartHeight: 100, + daEpochSize: 0, + daHeight: 200, + expectedStart: 100, + expectedEnd: 100, + }, + { + name: "large epoch", + daStartHeight: 1000, + daEpochSize: 1000, + daHeight: 1500, + expectedStart: 1000, + expectedEnd: 1999, + }, + { + name: "epoch boundary exact start", + daStartHeight: 100, + daEpochSize: 50, + daHeight: 100, + expectedStart: 100, + expectedEnd: 149, + }, + { + name: "epoch boundary exact end of first epoch", + daStartHeight: 100, + daEpochSize: 50, + daHeight: 149, + expectedStart: 100, + expectedEnd: 149, + }, + { + name: "epoch boundary exact start of second epoch", + daStartHeight: 100, + daEpochSize: 50, + daHeight: 150, + expectedStart: 150, + expectedEnd: 199, + }, + { + name: "start height zero", + daStartHeight: 0, + daEpochSize: 5, + daHeight: 10, + expectedStart: 10, + expectedEnd: 14, + }, + { + name: "epoch size one", + daStartHeight: 100, + daEpochSize: 1, + daHeight: 105, + expectedStart: 105, + expectedEnd: 105, + }, + { + name: "very large numbers", + daStartHeight: 1000000, + daEpochSize: 100000, + daHeight: 5500000, + expectedStart: 5500000, + expectedEnd: 5599999, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + start, end := CalculateEpochBoundaries(tt.daHeight, tt.daStartHeight, tt.daEpochSize) + assert.Equal(t, tt.expectedStart, start, "start height mismatch") + assert.Equal(t, tt.expectedEnd, end, "end height mismatch") + }) + } +} + +func TestEpochConsistency(t *testing.T) { + tests := []struct { + name string + daStartHeight uint64 + daEpochSize uint64 + }{ + { + name: "standard epoch", + daStartHeight: 100, + daEpochSize: 10, + }, + { + name: "large epoch", + daStartHeight: 1000, + daEpochSize: 1000, + }, + { + name: "small epoch", + daStartHeight: 0, + daEpochSize: 1, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Test that all heights in an epoch return the same epoch number + // and boundaries + for epoch := uint64(1); epoch <= 10; epoch++ { + // Calculate expected boundaries for this epoch + expectedStart := tt.daStartHeight + (epoch-1)*tt.daEpochSize + expectedEnd := tt.daStartHeight + epoch*tt.daEpochSize - 1 + + // Test every height in the epoch + for h := expectedStart; h <= expectedEnd; h++ { + epochNum := CalculateEpochNumber(h, tt.daStartHeight, tt.daEpochSize) + assert.Equal(t, epoch, epochNum, "height %d should be in epoch %d", h, epoch) + + start, end := CalculateEpochBoundaries(h, tt.daStartHeight, tt.daEpochSize) + assert.Equal(t, expectedStart, start, "height %d should have start %d", h, expectedStart) + assert.Equal(t, expectedEnd, end, "height %d should have end %d", h, expectedEnd) + } + } + }) + } +} + +func TestEpochBoundaryTransitions(t *testing.T) { + daStartHeight := uint64(100) + daEpochSize := uint64(10) + + // Test that epoch boundaries are correctly calculated at transitions + transitions := []struct { + height uint64 + expectedEpoch uint64 + expectedStart uint64 + expectedEnd uint64 + }{ + {100, 1, 100, 109}, // First height of epoch 1 + {109, 1, 100, 109}, // Last height of epoch 1 + {110, 2, 110, 119}, // First height of epoch 2 + {119, 2, 110, 119}, // Last height of epoch 2 + {120, 3, 120, 129}, // First height of epoch 3 + } + + for _, tr := range transitions { + epoch := CalculateEpochNumber(tr.height, daStartHeight, daEpochSize) + assert.Equal(t, tr.expectedEpoch, epoch, "height %d epoch mismatch", tr.height) + + start, end := CalculateEpochBoundaries(tr.height, daStartHeight, daEpochSize) + assert.Equal(t, tr.expectedStart, start, "height %d start mismatch", tr.height) + assert.Equal(t, tr.expectedEnd, end, "height %d end mismatch", tr.height) + } +} From 3c4132d481b677a3dc72498b3a0524e6629592a5 Mon Sep 17 00:00:00 2001 From: Julien Robert Date: Mon, 10 Nov 2025 14:51:05 +0100 Subject: [PATCH 19/39] move da epoch from config to genesis --- apps/evm/single/cmd/run.go | 2 +- apps/grpc/single/cmd/run.go | 2 +- apps/testapp/cmd/run.go | 2 +- block/internal/syncing/da_retriever.go | 4 +- block/internal/syncing/da_retriever_test.go | 23 +++---- .../syncing/syncer_forced_inclusion_test.go | 48 +++++++-------- .../adr/adr-019-forced-inclusion-mechanism.md | 60 ++++++++++++------- pkg/config/config.go | 4 -- pkg/config/defaults.go | 1 - pkg/genesis/genesis.go | 26 ++++---- sequencers/based/README.md | 11 +++- 11 files changed, 101 insertions(+), 82 deletions(-) diff --git a/apps/evm/single/cmd/run.go b/apps/evm/single/cmd/run.go index df35bbbcd0..e18326fd5f 100644 --- a/apps/evm/single/cmd/run.go +++ b/apps/evm/single/cmd/run.go @@ -128,7 +128,7 @@ func createSequencer( logger.Info(). Str("forced_inclusion_namespace", nodeConfig.DA.GetForcedInclusionNamespace()). - Uint64("da_epoch", nodeConfig.DA.ForcedInclusionDAEpoch). + Uint64("da_epoch", genesis.DAEpochForcedInclusion). Msg("based sequencer initialized") return basedSeq, nil diff --git a/apps/grpc/single/cmd/run.go b/apps/grpc/single/cmd/run.go index 2ac93dec0e..491d364bf6 100644 --- a/apps/grpc/single/cmd/run.go +++ b/apps/grpc/single/cmd/run.go @@ -137,7 +137,7 @@ func createSequencer( logger.Info(). Str("forced_inclusion_namespace", nodeConfig.DA.GetForcedInclusionNamespace()). - Uint64("da_epoch", nodeConfig.DA.ForcedInclusionDAEpoch). + Uint64("da_epoch", genesis.DAEpochForcedInclusion). Msg("based sequencer initialized") return basedSeq, nil diff --git a/apps/testapp/cmd/run.go b/apps/testapp/cmd/run.go index 14b9cd3fa5..719383c9df 100644 --- a/apps/testapp/cmd/run.go +++ b/apps/testapp/cmd/run.go @@ -135,7 +135,7 @@ func createSequencer( logger.Info(). Str("forced_inclusion_namespace", nodeConfig.DA.GetForcedInclusionNamespace()). - Uint64("da_epoch", nodeConfig.DA.ForcedInclusionDAEpoch). + Uint64("da_epoch", genesis.DAEpochForcedInclusion). Msg("based sequencer initialized") return basedSeq, nil diff --git a/block/internal/syncing/da_retriever.go b/block/internal/syncing/da_retriever.go index 95a26c0ada..4e1c5ae760 100644 --- a/block/internal/syncing/da_retriever.go +++ b/block/internal/syncing/da_retriever.go @@ -74,7 +74,7 @@ func NewDARetriever( namespaceDataBz: coreda.NamespaceFromString(config.DA.GetDataNamespace()).Bytes(), namespaceForcedInclusionBz: namespaceForcedInclusionBz, hasForcedInclusionNs: hasForcedInclusionNs, - daEpochSize: config.DA.ForcedInclusionDAEpoch, + daEpochSize: genesis.DAEpochForcedInclusion, pendingHeaders: make(map[uint64]*types.SignedHeader), pendingData: make(map[uint64]*types.Data), } @@ -232,7 +232,7 @@ func (r *daRetriever) processForcedInclusionBlobs( // Check if adding this data would exceed max blob size if *currentSize+dataSize > common.DefaultMaxBlobSize { - r.logger.Warn().Msg("forced inclusion data exceeds maximum blob size - reduce ForcedInclusionDAEpoch configuration if this happens often") + r.logger.Warn().Msg("forced inclusion data exceeds maximum blob size - reduce DAEpochForcedInclusion if always often") // TODO(@julienrbrt): we need to keep track of which that haven't been included, so they are retried in the next epoch diff --git a/block/internal/syncing/da_retriever_test.go b/block/internal/syncing/da_retriever_test.go index 05ff8f5b6d..c048e8467b 100644 --- a/block/internal/syncing/da_retriever_test.go +++ b/block/internal/syncing/da_retriever_test.go @@ -429,14 +429,13 @@ func TestDARetriever_RetrieveForcedIncludedTxsFromDA_Success(t *testing.T) { require.NoError(t, err) addr, pub, signer := buildSyncTestSigner(t) - gen := genesis.Genesis{ChainID: "tchain", InitialHeight: 1, StartTime: time.Now().Add(-time.Second), ProposerAddress: addr, DAStartHeight: 5678} + gen := genesis.Genesis{ChainID: "tchain", InitialHeight: 1, StartTime: time.Now().Add(-time.Second), ProposerAddress: addr, DAStartHeight: 5678, DAEpochForcedInclusion: 1} // Prepare forced inclusion transaction data dataBin, _ := makeSignedDataBytes(t, gen.ChainID, 10, addr, pub, signer, 3) cfg := config.DefaultConfig() cfg.DA.ForcedInclusionNamespace = "nsForcedInclusion" - cfg.DA.ForcedInclusionDAEpoch = 1 // Epoch size of 1 namespaceForcedInclusionBz := coreda.NamespaceFromString(cfg.DA.GetForcedInclusionNamespace()).Bytes() @@ -468,7 +467,7 @@ func TestDARetriever_FetchForcedIncludedTxs_NoNamespaceConfigured(t *testing.T) require.NoError(t, err) addr, _, _ := buildSyncTestSigner(t) - gen := genesis.Genesis{ChainID: "tchain", InitialHeight: 1, StartTime: time.Now().Add(-time.Second), ProposerAddress: addr, DAStartHeight: 0} + gen := genesis.Genesis{ChainID: "tchain", InitialHeight: 1, StartTime: time.Now().Add(-time.Second), ProposerAddress: addr, DAStartHeight: 0, DAEpochForcedInclusion: 1} cfg := config.DefaultConfig() // Leave ForcedInclusionNamespace empty @@ -487,11 +486,10 @@ func TestDARetriever_FetchForcedIncludedTxs_NotFound(t *testing.T) { require.NoError(t, err) addr, _, _ := buildSyncTestSigner(t) - gen := genesis.Genesis{ChainID: "tchain", InitialHeight: 1, StartTime: time.Now().Add(-time.Second), ProposerAddress: addr, DAStartHeight: 9999} + gen := genesis.Genesis{ChainID: "tchain", InitialHeight: 1, StartTime: time.Now().Add(-time.Second), ProposerAddress: addr, DAStartHeight: 9999, DAEpochForcedInclusion: 1} cfg := config.DefaultConfig() cfg.DA.ForcedInclusionNamespace = "nsForcedInclusion" - cfg.DA.ForcedInclusionDAEpoch = 1 // Epoch size of 1 namespaceForcedInclusionBz := coreda.NamespaceFromString(cfg.DA.GetForcedInclusionNamespace()).Bytes() @@ -517,11 +515,10 @@ func TestDARetriever_RetrieveForcedIncludedTxsFromDA_ExceedsMaxBlobSize(t *testi require.NoError(t, err) addr, pub, signer := buildSyncTestSigner(t) - gen := genesis.Genesis{ChainID: "tchain", InitialHeight: 1, StartTime: time.Now().Add(-time.Second), ProposerAddress: addr, DAStartHeight: 1000} + gen := genesis.Genesis{ChainID: "tchain", InitialHeight: 1, StartTime: time.Now().Add(-time.Second), ProposerAddress: addr, DAStartHeight: 1000, DAEpochForcedInclusion: 3} cfg := config.DefaultConfig() cfg.DA.ForcedInclusionNamespace = "nsForcedInclusion" - cfg.DA.ForcedInclusionDAEpoch = 3 // Epoch size of 3 namespaceForcedInclusionBz := coreda.NamespaceFromString(cfg.DA.GetForcedInclusionNamespace()).Bytes() @@ -631,11 +628,10 @@ func TestDARetriever_RetrieveForcedIncludedTxsFromDA_NotAtEpochStart(t *testing. require.NoError(t, err) addr, _, _ := buildSyncTestSigner(t) - gen := genesis.Genesis{ChainID: "tchain", InitialHeight: 1, StartTime: time.Now().Add(-time.Second), ProposerAddress: addr, DAStartHeight: 100} + gen := genesis.Genesis{ChainID: "tchain", InitialHeight: 1, StartTime: time.Now().Add(-time.Second), ProposerAddress: addr, DAStartHeight: 100, DAEpochForcedInclusion: 10} cfg := config.DefaultConfig() cfg.DA.ForcedInclusionNamespace = "nsForcedInclusion" - cfg.DA.ForcedInclusionDAEpoch = 10 mockDA := testmocks.NewMockDA(t) @@ -658,11 +654,10 @@ func TestDARetriever_RetrieveForcedIncludedTxsFromDA_EpochStartFromFuture(t *tes require.NoError(t, err) addr, _, _ := buildSyncTestSigner(t) - gen := genesis.Genesis{ChainID: "tchain", InitialHeight: 1, StartTime: time.Now().Add(-time.Second), ProposerAddress: addr, DAStartHeight: 1000} + gen := genesis.Genesis{ChainID: "tchain", InitialHeight: 1, StartTime: time.Now().Add(-time.Second), ProposerAddress: addr, DAStartHeight: 100, DAEpochForcedInclusion: 10} cfg := config.DefaultConfig() cfg.DA.ForcedInclusionNamespace = "nsForcedInclusion" - cfg.DA.ForcedInclusionDAEpoch = 10 namespaceForcedInclusionBz := coreda.NamespaceFromString(cfg.DA.GetForcedInclusionNamespace()).Bytes() @@ -689,11 +684,10 @@ func TestDARetriever_RetrieveForcedIncludedTxsFromDA_EpochEndFromFuture(t *testi require.NoError(t, err) addr, _, _ := buildSyncTestSigner(t) - gen := genesis.Genesis{ChainID: "tchain", InitialHeight: 1, StartTime: time.Now().Add(-time.Second), ProposerAddress: addr, DAStartHeight: 1000} + gen := genesis.Genesis{ChainID: "tchain", InitialHeight: 1, StartTime: time.Now().Add(-time.Second), ProposerAddress: addr, DAStartHeight: 100, DAEpochForcedInclusion: 10} cfg := config.DefaultConfig() cfg.DA.ForcedInclusionNamespace = "nsForcedInclusion" - cfg.DA.ForcedInclusionDAEpoch = 10 namespaceForcedInclusionBz := coreda.NamespaceFromString(cfg.DA.GetForcedInclusionNamespace()).Bytes() @@ -724,7 +718,7 @@ func TestDARetriever_RetrieveForcedIncludedTxsFromDA_CompleteEpoch(t *testing.T) require.NoError(t, err) addr, pub, signer := buildSyncTestSigner(t) - gen := genesis.Genesis{ChainID: "tchain", InitialHeight: 1, StartTime: time.Now().Add(-time.Second), ProposerAddress: addr, DAStartHeight: 2000} + gen := genesis.Genesis{ChainID: "tchain", InitialHeight: 1, StartTime: time.Now().Add(-time.Second), ProposerAddress: addr, DAStartHeight: 2000, DAEpochForcedInclusion: 3} // Prepare forced inclusion transaction data dataBin1, _ := makeSignedDataBytes(t, gen.ChainID, 10, addr, pub, signer, 2) @@ -733,7 +727,6 @@ func TestDARetriever_RetrieveForcedIncludedTxsFromDA_CompleteEpoch(t *testing.T) cfg := config.DefaultConfig() cfg.DA.ForcedInclusionNamespace = "nsForcedInclusion" - cfg.DA.ForcedInclusionDAEpoch = 3 namespaceForcedInclusionBz := coreda.NamespaceFromString(cfg.DA.GetForcedInclusionNamespace()).Bytes() diff --git a/block/internal/syncing/syncer_forced_inclusion_test.go b/block/internal/syncing/syncer_forced_inclusion_test.go index 1921a4f6ba..2a6a9a8631 100644 --- a/block/internal/syncing/syncer_forced_inclusion_test.go +++ b/block/internal/syncing/syncer_forced_inclusion_test.go @@ -30,16 +30,16 @@ func TestVerifyForcedInclusionTxs_AllTransactionsIncluded(t *testing.T) { addr, pub, signer := buildSyncTestSigner(t) gen := genesis.Genesis{ - ChainID: "tchain", - InitialHeight: 1, - StartTime: time.Now().Add(-time.Second), - ProposerAddress: addr, - DAStartHeight: 0, + ChainID: "tchain", + InitialHeight: 1, + StartTime: time.Now().Add(-time.Second), + ProposerAddress: addr, + DAStartHeight: 0, + DAEpochForcedInclusion: 1, } cfg := config.DefaultConfig() cfg.DA.ForcedInclusionNamespace = "nsForcedInclusion" - cfg.DA.ForcedInclusionDAEpoch = 1 mockExec := testmocks.NewMockExecutor(t) mockExec.EXPECT().InitChain(mock.Anything, mock.Anything, uint64(1), "tchain"). @@ -104,16 +104,16 @@ func TestVerifyForcedInclusionTxs_MissingTransactions(t *testing.T) { addr, pub, signer := buildSyncTestSigner(t) gen := genesis.Genesis{ - ChainID: "tchain", - InitialHeight: 1, - StartTime: time.Now().Add(-time.Second), - ProposerAddress: addr, - DAStartHeight: 0, + ChainID: "tchain", + InitialHeight: 1, + StartTime: time.Now().Add(-time.Second), + ProposerAddress: addr, + DAStartHeight: 0, + DAEpochForcedInclusion: 1, } cfg := config.DefaultConfig() cfg.DA.ForcedInclusionNamespace = "nsForcedInclusion" - cfg.DA.ForcedInclusionDAEpoch = 1 mockExec := testmocks.NewMockExecutor(t) mockExec.EXPECT().InitChain(mock.Anything, mock.Anything, uint64(1), "tchain"). @@ -181,16 +181,16 @@ func TestVerifyForcedInclusionTxs_PartiallyIncluded(t *testing.T) { addr, pub, signer := buildSyncTestSigner(t) gen := genesis.Genesis{ - ChainID: "tchain", - InitialHeight: 1, - StartTime: time.Now().Add(-time.Second), - ProposerAddress: addr, - DAStartHeight: 0, + ChainID: "tchain", + InitialHeight: 1, + StartTime: time.Now().Add(-time.Second), + ProposerAddress: addr, + DAStartHeight: 0, + DAEpochForcedInclusion: 1, } cfg := config.DefaultConfig() cfg.DA.ForcedInclusionNamespace = "nsForcedInclusion" - cfg.DA.ForcedInclusionDAEpoch = 1 mockExec := testmocks.NewMockExecutor(t) mockExec.EXPECT().InitChain(mock.Anything, mock.Anything, uint64(1), "tchain"). @@ -260,16 +260,16 @@ func TestVerifyForcedInclusionTxs_NoForcedTransactions(t *testing.T) { addr, _, _ := buildSyncTestSigner(t) gen := genesis.Genesis{ - ChainID: "tchain", - InitialHeight: 1, - StartTime: time.Now().Add(-time.Second), - ProposerAddress: addr, - DAStartHeight: 0, + ChainID: "tchain", + InitialHeight: 1, + StartTime: time.Now().Add(-time.Second), + ProposerAddress: addr, + DAStartHeight: 0, + DAEpochForcedInclusion: 1, } cfg := config.DefaultConfig() cfg.DA.ForcedInclusionNamespace = "nsForcedInclusion" - cfg.DA.ForcedInclusionDAEpoch = 1 mockExec := testmocks.NewMockExecutor(t) mockExec.EXPECT().InitChain(mock.Anything, mock.Anything, uint64(1), "tchain"). diff --git a/docs/adr/adr-019-forced-inclusion-mechanism.md b/docs/adr/adr-019-forced-inclusion-mechanism.md index 09127f313e..daacf78bad 100644 --- a/docs/adr/adr-019-forced-inclusion-mechanism.md +++ b/docs/adr/adr-019-forced-inclusion-mechanism.md @@ -332,16 +332,23 @@ func (s *BasedSequencer) createBatchFromQueue(maxBytes uint64) *Batch { ### Configuration ```go +type Genesis struct { + ChainID string + StartTime time.Time + InitialHeight uint64 + ProposerAddress []byte + DAStartHeight uint64 + // Number of DA blocks to scan per forced inclusion fetch + // Higher values reduce DA queries but increase latency + // Lower values increase DA queries but improve responsiveness + DAEpochForcedInclusion uint64 +} + type DAConfig struct { // ... existing fields ... // Namespace for forced inclusion transactions ForcedInclusionNamespace string - - // Number of DA blocks to scan per forced inclusion fetch - // Higher values reduce DA queries but increase latency - // Lower values increase DA queries but improve responsiveness - ForcedInclusionDAEpoch uint64 } type NodeConfig struct { @@ -357,25 +364,37 @@ type NodeConfig struct { #### Traditional Sequencer with Forced Inclusion ```yaml -da: - forced_inclusion_namespace: "0x0000000000000000000000000000000000000000000000000000666f72636564" - forced_inclusion_da_epoch: 10 # Scan 10 DA blocks at a time +# genesis.json +{ + "chain_id": "my-rollup", + "forced_inclusion_da_epoch": 10 # Scan 10 DA blocks at a time +} -node: - aggregator: true - based_sequencer: false # Use traditional sequencer +# config.toml +[da] +forced_inclusion_namespace = "0x0000000000000000000000000000000000000000000000000000666f72636564" + +[node] +aggregator = true +based_sequencer = false # Use traditional sequencer ``` #### Based Sequencer (DA-Only) ```yaml -da: - forced_inclusion_namespace: "0x0000000000000000000000000000000000000000000000000000666f72636564" - forced_inclusion_da_epoch: 5 # Scan 5 DA blocks at a time +# genesis.json +{ + "chain_id": "my-rollup", + "forced_inclusion_da_epoch": 5 # Scan 5 DA blocks at a time +} + +# config.toml +[da] +forced_inclusion_namespace = "0x0000000000000000000000000000000000000000000000000000666f72636564" -node: - aggregator: true - based_sequencer: true # Use based sequencer +[node] +aggregator = true +based_sequencer = true # Use based sequencer ``` ### Sequencer Operation Flows @@ -426,8 +445,8 @@ node: **DA Query Frequency**: -- Traditional: Every `ForcedInclusionDAEpoch` DA blocks -- Based Sequencer: Every `ForcedInclusionDAEpoch` DA blocks or when queue empty +- Traditional: Every `DAEpochForcedInclusion` DA blocks +- Based Sequencer: Every `DAEpochForcedInclusion` DA blocks or when queue empty - Full Nodes: At each block height for verification ### Security Considerations @@ -546,7 +565,7 @@ Accepted and Implemented 2. **DA Dependency**: Requires DA layer to support multiple namespaces 3. **Higher DA Costs**: Users pay DA posting fees for forced inclusion 4. **Additional Complexity**: New component (DA Retriever) and verification logic -5. **Epoch Configuration**: Requires tuning `ForcedInclusionDAEpoch` per network +5. **Epoch Configuration**: Requires setting `DAEpochForcedInclusion` in genesis (consensus parameter) ### Neutral @@ -554,6 +573,7 @@ Accepted and Implemented 2. **Privacy Model Unchanged**: Forced inclusion has same privacy as normal path 3. **Monitoring**: Operators should monitor forced inclusion namespace usage 4. **Documentation**: Users need guidance on when to use forced inclusion +5. **Genesis Parameter**: `DAEpochForcedInclusion` is a consensus parameter fixed at genesis ## References diff --git a/pkg/config/config.go b/pkg/config/config.go index d089929bbe..3782e9390c 100644 --- a/pkg/config/config.go +++ b/pkg/config/config.go @@ -72,8 +72,6 @@ const ( FlagDADataNamespace = FlagPrefixEvnode + "da.data_namespace" // FlagDAForcedInclusionNamespace is a flag for specifying the DA forced inclusion namespace ID FlagDAForcedInclusionNamespace = FlagPrefixEvnode + "da.forced_inclusion_namespace" - // FlagDAForcedInclusionDAEpoch is a flag for specifying the DA forced inclusion DA epoch - FlagDAForcedInclusionDAEpoch = FlagPrefixEvnode + "da.forced_inclusion_da_epoch" // FlagDASubmitOptions is a flag for data availability submit options FlagDASubmitOptions = FlagPrefixEvnode + "da.submit_options" // FlagDAMempoolTTL is a flag for specifying the DA mempool TTL @@ -171,7 +169,6 @@ type DAConfig struct { Namespace string `mapstructure:"namespace" yaml:"namespace" comment:"Namespace ID used when submitting blobs to the DA layer. When a DataNamespace is provided, only the header is sent to this namespace."` DataNamespace string `mapstructure:"data_namespace" yaml:"data_namespace" comment:"Namespace ID for submitting data to DA layer. Use this to speed-up light clients."` ForcedInclusionNamespace string `mapstructure:"forced_inclusion_namespace" yaml:"forced_inclusion_namespace" comment:"Namespace ID for forced inclusion transactions on the DA layer."` - ForcedInclusionDAEpoch uint64 `mapstructure:"forced_inclusion_da_epoch" yaml:"forced_inclusion_da_epoch" comment:"DA epoch for forced inclusion transactions on the DA layer."` BlockTime DurationWrapper `mapstructure:"block_time" yaml:"block_time" comment:"Average block time of the DA chain (duration). Determines frequency of DA layer syncing, maximum backoff time for retries, and is multiplied by MempoolTTL to calculate transaction expiration. Examples: \"15s\", \"30s\", \"1m\", \"2m30s\", \"10m\"."` MempoolTTL uint64 `mapstructure:"mempool_ttl" yaml:"mempool_ttl" comment:"Number of DA blocks after which a transaction is considered expired and dropped from the mempool. Controls retry backoff timing."` MaxSubmitAttempts int `mapstructure:"max_submit_attempts" yaml:"max_submit_attempts" comment:"Maximum number of attempts to submit data to the DA layer before giving up. Higher values provide more resilience but can delay error reporting."` @@ -348,7 +345,6 @@ func AddFlags(cmd *cobra.Command) { cmd.Flags().String(FlagDANamespace, def.DA.Namespace, "DA namespace for header (or blob) submissions") cmd.Flags().String(FlagDADataNamespace, def.DA.DataNamespace, "DA namespace for data submissions") cmd.Flags().String(FlagDAForcedInclusionNamespace, def.DA.ForcedInclusionNamespace, "DA namespace for forced inclusion transactions") - cmd.Flags().Uint64(FlagDAForcedInclusionDAEpoch, def.DA.ForcedInclusionDAEpoch, "DA epoch for forced inclusion transactions (i.e: how many DA blocks processed to include transactions)") cmd.Flags().String(FlagDASubmitOptions, def.DA.SubmitOptions, "DA submit options") cmd.Flags().Uint64(FlagDAMempoolTTL, def.DA.MempoolTTL, "number of DA blocks until transaction is dropped from the mempool") cmd.Flags().Int(FlagDAMaxSubmitAttempts, def.DA.MaxSubmitAttempts, "maximum number of attempts to submit data to the DA layer before giving up") diff --git a/pkg/config/defaults.go b/pkg/config/defaults.go index 305a2eeb89..996513b37c 100644 --- a/pkg/config/defaults.go +++ b/pkg/config/defaults.go @@ -79,7 +79,6 @@ func DefaultConfig() Config { Namespace: randString(10), DataNamespace: "", ForcedInclusionNamespace: "", - ForcedInclusionDAEpoch: 50, }, Instrumentation: DefaultInstrumentationConfig(), Log: LogConfig{ diff --git a/pkg/genesis/genesis.go b/pkg/genesis/genesis.go index 1fae146431..65cbed1737 100644 --- a/pkg/genesis/genesis.go +++ b/pkg/genesis/genesis.go @@ -11,11 +11,12 @@ const ChainIDFlag = "chain_id" // This genesis struct only contains the fields required by evolve. // The app state or other fields are not included here. type Genesis struct { - ChainID string `json:"chain_id"` - StartTime time.Time `json:"start_time"` - InitialHeight uint64 `json:"initial_height"` - ProposerAddress []byte `json:"proposer_address"` - DAStartHeight uint64 `json:"da_start_height"` + ChainID string `json:"chain_id"` + StartTime time.Time `json:"start_time"` + InitialHeight uint64 `json:"initial_height"` + ProposerAddress []byte `json:"proposer_address"` + DAStartHeight uint64 `json:"da_start_height"` + DAEpochForcedInclusion uint64 `json:"da_epoch_forced_inclusion"` } // NewGenesis creates a new Genesis instance. @@ -26,11 +27,12 @@ func NewGenesis( proposerAddress []byte, ) Genesis { genesis := Genesis{ - ChainID: chainID, - StartTime: startTime, - InitialHeight: initialHeight, - ProposerAddress: proposerAddress, - DAStartHeight: 0, + ChainID: chainID, + StartTime: startTime, + InitialHeight: initialHeight, + ProposerAddress: proposerAddress, + DAStartHeight: 0, + DAEpochForcedInclusion: 50, // Default epoch size } return genesis @@ -54,5 +56,9 @@ func (g Genesis) Validate() error { return fmt.Errorf("proposer_address cannot be nil") } + if g.DAEpochForcedInclusion < 1 { + return fmt.Errorf("da_epoch_forced_inclusion must be at least 1, got %d", g.DAEpochForcedInclusion) + } + return nil } diff --git a/sequencers/based/README.md b/sequencers/based/README.md index d833768d81..c93fd513f1 100644 --- a/sequencers/based/README.md +++ b/sequencers/based/README.md @@ -64,10 +64,15 @@ type Sequencer interface { ## Configuration -The based sequencer uses the following configuration from `config.Config`: +The based sequencer uses the following configuration: + +From `config.Config`: - `DA.ForcedInclusionNamespace`: Namespace for forced inclusion transactions -- `DA.ForcedInclusionDAEpoch`: Number of DA blocks to scan per fetch + +From `genesis.Genesis`: + +- `DAEpochForcedInclusion`: Number of DA blocks to scan per fetch (consensus parameter) If `ForcedInclusionNamespace` is not configured, the sequencer returns empty batches. @@ -76,7 +81,7 @@ If `ForcedInclusionNamespace` is not configured, the sequencer returns empty bat - **Batching**: Transactions are batched to reduce DA queries - **Queue**: In-memory queue prevents repeated DA fetches - **Mutex Protection**: Thread-safe but may block on concurrent access -- **DA Epoch**: Configure `ForcedInclusionDAEpoch` to balance freshness vs. efficiency +- **DA Epoch**: Set `DAEpochForcedInclusion` in genesis to balance freshness vs. efficiency ## Comparison to Traditional Sequencer From ecc55ce1b840645b53256ea464798449d73e189b Mon Sep 17 00:00:00 2001 From: Julien Robert Date: Mon, 10 Nov 2025 16:53:21 +0100 Subject: [PATCH 20/39] fixes --- block/components_test.go | 3 ++ .../internal/executing/executor_lazy_test.go | 8 ++++ .../internal/executing/executor_logic_test.go | 6 +++ .../executing/executor_restart_test.go | 14 +++++- block/internal/syncing/da_retriever_test.go | 24 ++++++---- pkg/genesis/genesis_test.go | 45 ++++++++++--------- pkg/genesis/io_test.go | 45 ++++++++++--------- 7 files changed, 96 insertions(+), 49 deletions(-) diff --git a/block/components_test.go b/block/components_test.go index 60a1df6a22..5fc66a5dcc 100644 --- a/block/components_test.go +++ b/block/components_test.go @@ -203,6 +203,9 @@ func TestExecutor_RealExecutionClientFailure_StopsNode(t *testing.T) { mockExec.On("InitChain", mock.Anything, mock.Anything, mock.Anything, mock.Anything). Return([]byte("state-root"), uint64(1024), nil).Once() + // Mock SetDAHeight to be called during initialization + mockSeq.On("SetDAHeight", uint64(0)).Return().Once() + // Mock GetNextBatch to return empty batch mockSeq.On("GetNextBatch", mock.Anything, mock.Anything). Return(&coresequencer.GetNextBatchResponse{ diff --git a/block/internal/executing/executor_lazy_test.go b/block/internal/executing/executor_lazy_test.go index b72f0a856b..a11cf6a1c2 100644 --- a/block/internal/executing/executor_lazy_test.go +++ b/block/internal/executing/executor_lazy_test.go @@ -73,6 +73,7 @@ func TestLazyMode_ProduceBlockLogic(t *testing.T) { initStateRoot := []byte("init_root") mockExec.EXPECT().InitChain(mock.Anything, mock.AnythingOfType("time.Time"), gen.InitialHeight, gen.ChainID). Return(initStateRoot, uint64(1024), nil).Once() + mockSeq.EXPECT().SetDAHeight(uint64(0)).Return().Once() require.NoError(t, exec.initializeState()) // Set up context for the executor (normally done in Start method) @@ -91,6 +92,8 @@ func TestLazyMode_ProduceBlockLogic(t *testing.T) { mockExec.EXPECT().ExecuteTxs(mock.Anything, mock.Anything, uint64(1), mock.AnythingOfType("time.Time"), initStateRoot). Return([]byte("new_root_1"), uint64(1024), nil).Once() + mockSeq.EXPECT().GetDAHeight().Return(uint64(0)).Once() + // Direct call to produceBlock should work (this is what lazy timer does) err = exec.produceBlock() require.NoError(t, err) @@ -113,6 +116,8 @@ func TestLazyMode_ProduceBlockLogic(t *testing.T) { mockExec.EXPECT().ExecuteTxs(mock.Anything, mock.Anything, uint64(2), mock.AnythingOfType("time.Time"), []byte("new_root_1")). Return([]byte("new_root_2"), uint64(1024), nil).Once() + mockSeq.EXPECT().GetDAHeight().Return(uint64(0)).Once() + err = exec.produceBlock() require.NoError(t, err) @@ -183,6 +188,7 @@ func TestRegularMode_ProduceBlockLogic(t *testing.T) { initStateRoot := []byte("init_root") mockExec.EXPECT().InitChain(mock.Anything, mock.AnythingOfType("time.Time"), gen.InitialHeight, gen.ChainID). Return(initStateRoot, uint64(1024), nil).Once() + mockSeq.EXPECT().SetDAHeight(uint64(0)).Return().Once() require.NoError(t, exec.initializeState()) // Set up context for the executor (normally done in Start method) @@ -201,6 +207,8 @@ func TestRegularMode_ProduceBlockLogic(t *testing.T) { mockExec.EXPECT().ExecuteTxs(mock.Anything, mock.Anything, uint64(1), mock.AnythingOfType("time.Time"), initStateRoot). Return([]byte("new_root_1"), uint64(1024), nil).Once() + mockSeq.EXPECT().GetDAHeight().Return(uint64(0)).Once() + err = exec.produceBlock() require.NoError(t, err) diff --git a/block/internal/executing/executor_logic_test.go b/block/internal/executing/executor_logic_test.go index 9aa79d0c43..6029186e86 100644 --- a/block/internal/executing/executor_logic_test.go +++ b/block/internal/executing/executor_logic_test.go @@ -95,6 +95,7 @@ func TestProduceBlock_EmptyBatch_SetsEmptyDataHash(t *testing.T) { initStateRoot := []byte("init_root") mockExec.EXPECT().InitChain(mock.Anything, mock.AnythingOfType("time.Time"), gen.InitialHeight, gen.ChainID). Return(initStateRoot, uint64(1024), nil).Once() + mockSeq.EXPECT().SetDAHeight(uint64(0)).Return().Once() // initialize state (creates genesis block in store and sets state) require.NoError(t, exec.initializeState()) @@ -113,6 +114,8 @@ func TestProduceBlock_EmptyBatch_SetsEmptyDataHash(t *testing.T) { mockExec.EXPECT().ExecuteTxs(mock.Anything, mock.Anything, uint64(1), mock.AnythingOfType("time.Time"), initStateRoot). Return([]byte("new_root"), uint64(1024), nil).Once() + mockSeq.EXPECT().GetDAHeight().Return(uint64(0)).Once() + // produce one block err = exec.produceBlock() require.NoError(t, err) @@ -180,6 +183,7 @@ func TestPendingLimit_SkipsProduction(t *testing.T) { mockExec.EXPECT().InitChain(mock.Anything, mock.AnythingOfType("time.Time"), gen.InitialHeight, gen.ChainID). Return([]byte("i0"), uint64(1024), nil).Once() + mockSeq.EXPECT().SetDAHeight(uint64(0)).Return().Once() require.NoError(t, exec.initializeState()) // Set up context for the executor (normally done in Start method) @@ -196,6 +200,8 @@ func TestPendingLimit_SkipsProduction(t *testing.T) { mockExec.EXPECT().ExecuteTxs(mock.Anything, mock.Anything, uint64(1), mock.AnythingOfType("time.Time"), []byte("i0")). Return([]byte("i1"), uint64(1024), nil).Once() + mockSeq.EXPECT().GetDAHeight().Return(uint64(0)).Once() + require.NoError(t, exec.produceBlock()) h1, err := memStore.Height(context.Background()) require.NoError(t, err) diff --git a/block/internal/executing/executor_restart_test.go b/block/internal/executing/executor_restart_test.go index 3f0e8b500c..14daccddcc 100644 --- a/block/internal/executing/executor_restart_test.go +++ b/block/internal/executing/executor_restart_test.go @@ -73,6 +73,7 @@ func TestExecutor_RestartUsesPendingHeader(t *testing.T) { initStateRoot := []byte("init_root") mockExec1.EXPECT().InitChain(mock.Anything, mock.AnythingOfType("time.Time"), gen.InitialHeight, gen.ChainID). Return(initStateRoot, uint64(1024), nil).Once() + mockSeq1.EXPECT().SetDAHeight(uint64(0)).Return().Once() require.NoError(t, exec1.initializeState()) // Set up context for first executor @@ -92,6 +93,8 @@ func TestExecutor_RestartUsesPendingHeader(t *testing.T) { mockExec1.EXPECT().ExecuteTxs(mock.Anything, mock.Anything, uint64(1), mock.AnythingOfType("time.Time"), initStateRoot). Return([]byte("new_root_1"), uint64(1024), nil).Once() + mockSeq1.EXPECT().GetDAHeight().Return(uint64(0)).Once() + err = exec1.produceBlock() require.NoError(t, err) @@ -189,6 +192,7 @@ func TestExecutor_RestartUsesPendingHeader(t *testing.T) { require.NoError(t, err) // Initialize state for second executor (should load existing state) + mockSeq2.EXPECT().SetDAHeight(uint64(0)).Return().Once() require.NoError(t, exec2.initializeState()) // Set up context for second executor @@ -206,7 +210,9 @@ func TestExecutor_RestartUsesPendingHeader(t *testing.T) { mockExec2.EXPECT().ExecuteTxs(mock.Anything, mock.Anything, uint64(2), mock.AnythingOfType("time.Time"), currentState2.AppHash). Return([]byte("new_root_2"), uint64(1024), nil).Once() - // Note: mockSeq2 should NOT receive any calls because pending block should be used + mockSeq2.EXPECT().GetDAHeight().Return(uint64(0)).Once() + + // Note: mockSeq2 should NOT receive GetNextBatch calls because pending block should be used err = exec2.produceBlock() require.NoError(t, err) @@ -289,6 +295,7 @@ func TestExecutor_RestartNoPendingHeader(t *testing.T) { initStateRoot := []byte("init_root") mockExec1.EXPECT().InitChain(mock.Anything, mock.AnythingOfType("time.Time"), gen.InitialHeight, gen.ChainID). Return(initStateRoot, uint64(1024), nil).Once() + mockSeq1.EXPECT().SetDAHeight(uint64(0)).Return().Once() require.NoError(t, exec1.initializeState()) exec1.ctx, exec1.cancel = context.WithCancel(context.Background()) @@ -307,6 +314,8 @@ func TestExecutor_RestartNoPendingHeader(t *testing.T) { mockExec1.EXPECT().ExecuteTxs(mock.Anything, mock.Anything, uint64(1), mock.AnythingOfType("time.Time"), initStateRoot). Return([]byte("new_root_1"), uint64(1024), nil).Once() + mockSeq1.EXPECT().GetDAHeight().Return(uint64(0)).Once() + err = exec1.produceBlock() require.NoError(t, err) @@ -338,6 +347,7 @@ func TestExecutor_RestartNoPendingHeader(t *testing.T) { ) require.NoError(t, err) + mockSeq2.EXPECT().SetDAHeight(uint64(0)).Return().Once() require.NoError(t, exec2.initializeState()) exec2.ctx, exec2.cancel = context.WithCancel(context.Background()) defer exec2.cancel() @@ -360,6 +370,8 @@ func TestExecutor_RestartNoPendingHeader(t *testing.T) { mockExec2.EXPECT().ExecuteTxs(mock.Anything, mock.Anything, uint64(2), mock.AnythingOfType("time.Time"), []byte("new_root_1")). Return([]byte("new_root_2"), uint64(1024), nil).Once() + mockSeq2.EXPECT().GetDAHeight().Return(uint64(0)).Once() + err = exec2.produceBlock() require.NoError(t, err) diff --git a/block/internal/syncing/da_retriever_test.go b/block/internal/syncing/da_retriever_test.go index c048e8467b..87701dfa40 100644 --- a/block/internal/syncing/da_retriever_test.go +++ b/block/internal/syncing/da_retriever_test.go @@ -29,7 +29,11 @@ import ( // makeSignedDataBytes builds SignedData containing the provided Data and returns its binary encoding func makeSignedDataBytes(t *testing.T, chainID string, height uint64, proposer []byte, pub crypto.PubKey, signer signerpkg.Signer, txs int) ([]byte, *types.SignedData) { - d := &types.Data{Metadata: &types.Metadata{ChainID: chainID, Height: height, Time: uint64(time.Now().UnixNano())}} + return makeSignedDataBytesWithTime(t, chainID, height, proposer, pub, signer, txs, uint64(time.Now().UnixNano())) +} + +func makeSignedDataBytesWithTime(t *testing.T, chainID string, height uint64, proposer []byte, pub crypto.PubKey, signer signerpkg.Signer, txs int, timestamp uint64) ([]byte, *types.SignedData) { + d := &types.Data{Metadata: &types.Metadata{ChainID: chainID, Height: height, Time: timestamp}} if txs > 0 { d.Txs = make(types.Txs, txs) for i := 0; i < txs; i++ { @@ -522,11 +526,14 @@ func TestDARetriever_RetrieveForcedIncludedTxsFromDA_ExceedsMaxBlobSize(t *testi namespaceForcedInclusionBz := coreda.NamespaceFromString(cfg.DA.GetForcedInclusionNamespace()).Bytes() + // Use fixed timestamp for deterministic test data + fixedTime := uint64(1234567890) + // Create signed data blobs that will exceed DefaultMaxBlobSize when accumulated // DefaultMaxBlobSize is 1.5MB = 1,572,864 bytes // Each 700KB tx becomes ~719KB blob, so 2 blobs = ~1.44MB (fits), 3 blobs = ~2.16MB (exceeds) d1 := &types.Data{ - Metadata: &types.Metadata{ChainID: gen.ChainID, Height: 10, Time: uint64(time.Now().UnixNano())}, + Metadata: &types.Metadata{ChainID: gen.ChainID, Height: 10, Time: fixedTime}, Txs: make(types.Txs, 1), } d1.Txs[0] = make([]byte, 700*1024) // 700KB transaction @@ -540,7 +547,7 @@ func TestDARetriever_RetrieveForcedIncludedTxsFromDA_ExceedsMaxBlobSize(t *testi require.NoError(t, err) d2 := &types.Data{ - Metadata: &types.Metadata{ChainID: gen.ChainID, Height: 11, Time: uint64(time.Now().UnixNano())}, + Metadata: &types.Metadata{ChainID: gen.ChainID, Height: 11, Time: fixedTime}, Txs: make(types.Txs, 1), } d2.Txs[0] = make([]byte, 700*1024) // 700KB transaction @@ -554,7 +561,7 @@ func TestDARetriever_RetrieveForcedIncludedTxsFromDA_ExceedsMaxBlobSize(t *testi require.NoError(t, err) d3 := &types.Data{ - Metadata: &types.Metadata{ChainID: gen.ChainID, Height: 12, Time: uint64(time.Now().UnixNano())}, + Metadata: &types.Metadata{ChainID: gen.ChainID, Height: 12, Time: fixedTime}, Txs: make(types.Txs, 1), } d3.Txs[0] = make([]byte, 700*1024) // 700KB transaction @@ -720,10 +727,11 @@ func TestDARetriever_RetrieveForcedIncludedTxsFromDA_CompleteEpoch(t *testing.T) addr, pub, signer := buildSyncTestSigner(t) gen := genesis.Genesis{ChainID: "tchain", InitialHeight: 1, StartTime: time.Now().Add(-time.Second), ProposerAddress: addr, DAStartHeight: 2000, DAEpochForcedInclusion: 3} - // Prepare forced inclusion transaction data - dataBin1, _ := makeSignedDataBytes(t, gen.ChainID, 10, addr, pub, signer, 2) - dataBin2, _ := makeSignedDataBytes(t, gen.ChainID, 11, addr, pub, signer, 1) - dataBin3, _ := makeSignedDataBytes(t, gen.ChainID, 12, addr, pub, signer, 1) + // Prepare forced inclusion transaction data with fixed timestamp + fixedTime := uint64(1234567890) + dataBin1, _ := makeSignedDataBytesWithTime(t, gen.ChainID, 10, addr, pub, signer, 2, fixedTime) + dataBin2, _ := makeSignedDataBytesWithTime(t, gen.ChainID, 11, addr, pub, signer, 1, fixedTime) + dataBin3, _ := makeSignedDataBytesWithTime(t, gen.ChainID, 12, addr, pub, signer, 1, fixedTime) cfg := config.DefaultConfig() cfg.DA.ForcedInclusionNamespace = "nsForcedInclusion" diff --git a/pkg/genesis/genesis_test.go b/pkg/genesis/genesis_test.go index a5c1d280db..da3cc14b1f 100644 --- a/pkg/genesis/genesis_test.go +++ b/pkg/genesis/genesis_test.go @@ -72,50 +72,55 @@ func TestGenesis_Validate(t *testing.T) { { name: "valid genesis - chain ID can contain any character", genesis: Genesis{ - ChainID: "test@chain#123!", - StartTime: validTime, - InitialHeight: 1, - ProposerAddress: []byte("proposer"), + ChainID: "test@chain#123!", + StartTime: validTime, + InitialHeight: 1, + ProposerAddress: []byte("proposer"), + DAEpochForcedInclusion: 1, }, wantErr: false, }, { name: "invalid - empty chain_id", genesis: Genesis{ - ChainID: "", - StartTime: validTime, - InitialHeight: 1, - ProposerAddress: []byte("proposer"), + ChainID: "", + StartTime: validTime, + InitialHeight: 1, + ProposerAddress: []byte("proposer"), + DAEpochForcedInclusion: 1, }, wantErr: true, }, { name: "invalid - zero initial height", genesis: Genesis{ - ChainID: "test-chain", - StartTime: validTime, - InitialHeight: 0, - ProposerAddress: []byte("proposer"), + ChainID: "test-chain", + StartTime: validTime, + InitialHeight: 0, + ProposerAddress: []byte("proposer"), + DAEpochForcedInclusion: 1, }, wantErr: true, }, { name: "invalid - zero time DA start height", genesis: Genesis{ - ChainID: "test-chain", - StartTime: time.Time{}, - InitialHeight: 1, - ProposerAddress: []byte("proposer"), + ChainID: "test-chain", + StartTime: time.Time{}, + InitialHeight: 1, + ProposerAddress: []byte("proposer"), + DAEpochForcedInclusion: 1, }, wantErr: true, }, { name: "invalid - nil proposer address", genesis: Genesis{ - ChainID: "test-chain", - StartTime: validTime, - InitialHeight: 1, - ProposerAddress: nil, + ChainID: "test-chain", + StartTime: validTime, + InitialHeight: 1, + ProposerAddress: nil, + DAEpochForcedInclusion: 1, }, wantErr: true, }, diff --git a/pkg/genesis/io_test.go b/pkg/genesis/io_test.go index fb6f223070..7c8b882a6f 100644 --- a/pkg/genesis/io_test.go +++ b/pkg/genesis/io_test.go @@ -30,40 +30,44 @@ func TestLoadAndSaveGenesis(t *testing.T) { { name: "valid genesis", genesis: Genesis{ - ChainID: "test-chain-1", - InitialHeight: 1, - StartTime: validTime, - ProposerAddress: []byte("proposer-address"), + ChainID: "test-chain-1", + InitialHeight: 1, + StartTime: validTime, + ProposerAddress: []byte("proposer-address"), + DAEpochForcedInclusion: 1, }, wantErr: false, }, { name: "valid genesis - minimal", genesis: Genesis{ - ChainID: "test-chain-2", - InitialHeight: 1, - StartTime: validTime, - ProposerAddress: []byte("proposer-address"), + ChainID: "test-chain-2", + InitialHeight: 1, + StartTime: validTime, + ProposerAddress: []byte("proposer-address"), + DAEpochForcedInclusion: 1, }, wantErr: false, }, { name: "invalid genesis - empty chain ID", genesis: Genesis{ - ChainID: "", - InitialHeight: 1, - StartTime: validTime, - ProposerAddress: []byte("proposer-address"), + ChainID: "", + InitialHeight: 1, + StartTime: validTime, + ProposerAddress: []byte("proposer-address"), + DAEpochForcedInclusion: 1, }, wantErr: true, }, { name: "invalid genesis - zero initial height", genesis: Genesis{ - ChainID: "test-chain", - InitialHeight: 0, - StartTime: validTime, - ProposerAddress: []byte("proposer-address"), + ChainID: "test-chain", + InitialHeight: 0, + StartTime: validTime, + ProposerAddress: []byte("proposer-address"), + DAEpochForcedInclusion: 1, }, wantErr: true, }, @@ -177,10 +181,11 @@ func TestSaveGenesis_InvalidPath(t *testing.T) { } genesis := Genesis{ - ChainID: "test-chain", - InitialHeight: 1, - StartTime: time.Now().UTC(), - ProposerAddress: []byte("proposer-address"), + ChainID: "test-chain", + InitialHeight: 1, + StartTime: time.Now().UTC(), + ProposerAddress: []byte("proposer-address"), + DAEpochForcedInclusion: 1, } err := genesis.Save(tc.path) From e192c821d463175a84ee3d9456212ea74fdf747b Mon Sep 17 00:00:00 2001 From: Julien Robert Date: Mon, 10 Nov 2025 16:58:31 +0100 Subject: [PATCH 21/39] fixes --- block/internal/syncing/da_retriever_test.go | 36 ++++++++++++--------- 1 file changed, 20 insertions(+), 16 deletions(-) diff --git a/block/internal/syncing/da_retriever_test.go b/block/internal/syncing/da_retriever_test.go index 87701dfa40..d66d19640a 100644 --- a/block/internal/syncing/da_retriever_test.go +++ b/block/internal/syncing/da_retriever_test.go @@ -577,35 +577,38 @@ func TestDARetriever_RetrieveForcedIncludedTxsFromDA_ExceedsMaxBlobSize(t *testi mockDA := testmocks.NewMockDA(t) // With DAStartHeight=1000, epoch size=3, daHeight=1000 -> epoch boundaries are [1000, 1002] + // RetrieveWithHelpers calls in order: start (1000), end (1002), then intermediate (1001) + // Check epoch start mockDA.EXPECT().GetIDs(mock.Anything, uint64(1000), mock.MatchedBy(func(ns []byte) bool { return bytes.Equal(ns, namespaceForcedInclusionBz) })).Return(&coreda.GetIDsResult{IDs: [][]byte{[]byte("fi1")}, Timestamp: time.Now()}, nil).Once() + // Fetch epoch start data (height 1000) + mockDA.EXPECT().Get(mock.Anything, mock.Anything, mock.MatchedBy(func(ns []byte) bool { + return bytes.Equal(ns, namespaceForcedInclusionBz) + })).Return([][]byte{dataBin1}, nil).Once() + // Check epoch end mockDA.EXPECT().GetIDs(mock.Anything, uint64(1002), mock.MatchedBy(func(ns []byte) bool { return bytes.Equal(ns, namespaceForcedInclusionBz) })).Return(&coreda.GetIDsResult{IDs: [][]byte{[]byte("fi3")}, Timestamp: time.Now()}, nil).Once() - // Fetch epoch start data + // Fetch epoch end data (height 1002) - should be retrieved but skipped due to size limit mockDA.EXPECT().Get(mock.Anything, mock.Anything, mock.MatchedBy(func(ns []byte) bool { return bytes.Equal(ns, namespaceForcedInclusionBz) - })).Return([][]byte{dataBin1}, nil).Once() + })).Return([][]byte{dataBin3}, nil).Once() - // Second height in epoch - should succeed + // Check intermediate height in epoch (height 1001) mockDA.EXPECT().GetIDs(mock.Anything, uint64(1001), mock.MatchedBy(func(ns []byte) bool { return bytes.Equal(ns, namespaceForcedInclusionBz) })).Return(&coreda.GetIDsResult{IDs: [][]byte{[]byte("fi2")}, Timestamp: time.Now()}, nil).Once() + // Fetch intermediate height data (height 1001) mockDA.EXPECT().Get(mock.Anything, mock.Anything, mock.MatchedBy(func(ns []byte) bool { return bytes.Equal(ns, namespaceForcedInclusionBz) })).Return([][]byte{dataBin2}, nil).Once() - // Fetch epoch end data - should be retrieved but skipped due to size limit - mockDA.EXPECT().Get(mock.Anything, mock.Anything, mock.MatchedBy(func(ns []byte) bool { - return bytes.Equal(ns, namespaceForcedInclusionBz) - })).Return([][]byte{dataBin3}, nil).Once() - r := NewDARetriever(mockDA, cm, cfg, gen, zerolog.Nop()) result, err := r.RetrieveForcedIncludedTxsFromDA(context.Background(), 1000) @@ -741,37 +744,38 @@ func TestDARetriever_RetrieveForcedIncludedTxsFromDA_CompleteEpoch(t *testing.T) mockDA := testmocks.NewMockDA(t) // With DAStartHeight=2000, epoch size=3, daHeight=2000 -> epoch boundaries are [2000, 2002] - // All heights available + // RetrieveWithHelpers calls in order: start (2000), end (2002), then intermediate (2001) // Check epoch start (2000) mockDA.EXPECT().GetIDs(mock.Anything, uint64(2000), mock.MatchedBy(func(ns []byte) bool { return bytes.Equal(ns, namespaceForcedInclusionBz) })).Return(&coreda.GetIDsResult{IDs: [][]byte{[]byte("fi1")}, Timestamp: time.Now()}, nil).Once() + // Fetch epoch start data (height 2000) + mockDA.EXPECT().Get(mock.Anything, mock.Anything, mock.MatchedBy(func(ns []byte) bool { + return bytes.Equal(ns, namespaceForcedInclusionBz) + })).Return([][]byte{dataBin1}, nil).Once() + // Check epoch end (2002) mockDA.EXPECT().GetIDs(mock.Anything, uint64(2002), mock.MatchedBy(func(ns []byte) bool { return bytes.Equal(ns, namespaceForcedInclusionBz) })).Return(&coreda.GetIDsResult{IDs: [][]byte{[]byte("fi3")}, Timestamp: time.Now()}, nil).Once() - // Fetch epoch start data + // Fetch epoch end data (height 2002) mockDA.EXPECT().Get(mock.Anything, mock.Anything, mock.MatchedBy(func(ns []byte) bool { return bytes.Equal(ns, namespaceForcedInclusionBz) - })).Return([][]byte{dataBin1}, nil).Once() + })).Return([][]byte{dataBin3}, nil).Once() // Fetch middle height (2001) mockDA.EXPECT().GetIDs(mock.Anything, uint64(2001), mock.MatchedBy(func(ns []byte) bool { return bytes.Equal(ns, namespaceForcedInclusionBz) })).Return(&coreda.GetIDsResult{IDs: [][]byte{[]byte("fi2")}, Timestamp: time.Now()}, nil).Once() + // Fetch intermediate height data (height 2001) mockDA.EXPECT().Get(mock.Anything, mock.Anything, mock.MatchedBy(func(ns []byte) bool { return bytes.Equal(ns, namespaceForcedInclusionBz) })).Return([][]byte{dataBin2}, nil).Once() - // Fetch epoch end data - mockDA.EXPECT().Get(mock.Anything, mock.Anything, mock.MatchedBy(func(ns []byte) bool { - return bytes.Equal(ns, namespaceForcedInclusionBz) - })).Return([][]byte{dataBin3}, nil).Once() - r := NewDARetriever(mockDA, cm, cfg, gen, zerolog.Nop()) result, err := r.RetrieveForcedIncludedTxsFromDA(context.Background(), 2000) From cdd8afae242d104c8e0e6a9bb293b32ceba1fdd8 Mon Sep 17 00:00:00 2001 From: Julien Robert Date: Mon, 10 Nov 2025 17:28:31 +0100 Subject: [PATCH 22/39] cleanup --- docs/adr/adr-019-forced-inclusion-mechanism.md | 13 +------------ 1 file changed, 1 insertion(+), 12 deletions(-) diff --git a/docs/adr/adr-019-forced-inclusion-mechanism.md b/docs/adr/adr-019-forced-inclusion-mechanism.md index daacf78bad..d86e9d4b54 100644 --- a/docs/adr/adr-019-forced-inclusion-mechanism.md +++ b/docs/adr/adr-019-forced-inclusion-mechanism.md @@ -445,9 +445,7 @@ based_sequencer = true # Use based sequencer **DA Query Frequency**: -- Traditional: Every `DAEpochForcedInclusion` DA blocks -- Based Sequencer: Every `DAEpochForcedInclusion` DA blocks or when queue empty -- Full Nodes: At each block height for verification +Every `DAEpochForcedInclusion` DA blocks ### Security Considerations @@ -463,15 +461,6 @@ based_sequencer = true # Use based sequencer - **DA Spam**: Limited by DA layer's native spam protection and blob size limits - **Block Withholding**: Full nodes can fetch and verify from DA independently -### Privacy Considerations - -1. **Public Transactions**: Forced inclusion transactions are public on DA layer -2. **Timing Analysis**: Transaction submission timing visible on DA -3. **No Metadata**: No additional metadata beyond transaction content -4. **Same Privacy Model**: Privacy properties same as normal transactions - -Users requiring privacy should use application-level encryption or privacy protocols. - ### Testing Strategy #### Unit Tests From 272027d0e3cd61e72f72197d8dcaadcba0a1709d Mon Sep 17 00:00:00 2001 From: Julien Robert Date: Tue, 11 Nov 2025 15:43:03 +0100 Subject: [PATCH 23/39] update executor for based sequencer flow --- block/internal/executing/executor.go | 60 +++++++++++++++++++--------- 1 file changed, 42 insertions(+), 18 deletions(-) diff --git a/block/internal/executing/executor.go b/block/internal/executing/executor.go index a3c29a1131..856932ce24 100644 --- a/block/internal/executing/executor.go +++ b/block/internal/executing/executor.go @@ -10,6 +10,7 @@ import ( "time" "github.com/ipfs/go-datastore" + "github.com/libp2p/go-libp2p/core/crypto" "github.com/rs/zerolog" "golang.org/x/sync/errgroup" @@ -67,6 +68,8 @@ type Executor struct { // - State transitions and validation // - P2P broadcasting of produced blocks // - DA submission of headers and data +// +// When BasedSequencer is enabled, signer can be nil as blocks are not signed. func NewExecutor( store store.Store, exec coreexecutor.Executor, @@ -82,17 +85,20 @@ func NewExecutor( options common.BlockOptions, errorCh chan<- error, ) (*Executor, error) { - if signer == nil { - return nil, errors.New("signer cannot be nil") - } + // For based sequencer, signer is optional as blocks are not signed + if !config.Node.BasedSequencer { + if signer == nil { + return nil, errors.New("signer cannot be nil") + } - addr, err := signer.GetAddress() - if err != nil { - return nil, fmt.Errorf("failed to get address: %w", err) - } + addr, err := signer.GetAddress() + if err != nil { + return nil, fmt.Errorf("failed to get address: %w", err) + } - if !bytes.Equal(addr, genesis.ProposerAddress) { - return nil, common.ErrNotProposer + if !bytes.Equal(addr, genesis.ProposerAddress) { + return nil, common.ErrNotProposer + } } return &Executor{ @@ -385,6 +391,7 @@ func (e *Executor) produceBlock() error { // signing the header is done after applying the block // as for signing, the state of the block may be required by the signature payload provider. + // For based sequencer, this will return an empty signature signature, err := e.signHeader(header.Header) if err != nil { return fmt.Errorf("failed to sign header: %w", err) @@ -500,16 +507,28 @@ func (e *Executor) createBlock(ctx context.Context, height uint64, batchData *Ba lastSignature = *lastSignaturePtr } - // Get signer info - pubKey, err := e.signer.GetPublic() - if err != nil { - return nil, nil, fmt.Errorf("failed to get public key: %w", err) - } + // Get signer info and validator hash + var pubKey crypto.PubKey + var validatorHash types.Hash - // Get validator hash - validatorHash, err := e.options.ValidatorHasherProvider(e.genesis.ProposerAddress, pubKey) - if err != nil { - return nil, nil, fmt.Errorf("failed to get validator hash: %w", err) + if e.signer != nil { + var err error + pubKey, err = e.signer.GetPublic() + if err != nil { + return nil, nil, fmt.Errorf("failed to get public key: %w", err) + } + + validatorHash, err = e.options.ValidatorHasherProvider(e.genesis.ProposerAddress, pubKey) + if err != nil { + return nil, nil, fmt.Errorf("failed to get validator hash: %w", err) + } + } else { + // For based sequencer without signer, use nil pubkey and compute validator hash + var err error + validatorHash, err = e.options.ValidatorHasherProvider(e.genesis.ProposerAddress, nil) + if err != nil { + return nil, nil, fmt.Errorf("failed to get validator hash: %w", err) + } } // Create header @@ -591,6 +610,11 @@ func (e *Executor) applyBlock(ctx context.Context, header types.Header, data *ty // signHeader signs the block header func (e *Executor) signHeader(header types.Header) (types.Signature, error) { + // For based sequencer, return empty signature as there is no signer + if e.signer == nil { + return types.Signature{}, nil + } + bz, err := e.options.AggregatorNodeSignatureBytesProvider(&header) if err != nil { return nil, fmt.Errorf("failed to get signature payload: %w", err) From 47bfd38a95400413088bd485a46212895bf45f51 Mon Sep 17 00:00:00 2001 From: Julien Robert Date: Wed, 12 Nov 2025 16:04:21 +0100 Subject: [PATCH 24/39] add size filtering in da retriever --- block/internal/syncing/da_retriever.go | 171 ++++++++++++++++++++++--- 1 file changed, 154 insertions(+), 17 deletions(-) diff --git a/block/internal/syncing/da_retriever.go b/block/internal/syncing/da_retriever.go index 4e1c5ae760..b7b75fa23c 100644 --- a/block/internal/syncing/da_retriever.go +++ b/block/internal/syncing/da_retriever.go @@ -22,6 +22,13 @@ import ( // defaultDATimeout is the default timeout for DA retrieval operations const defaultDATimeout = 10 * time.Second +// pendingForcedInclusionTx represents a forced inclusion transaction that couldn't fit in the current epoch +// and needs to be retried in future epochs. +type pendingForcedInclusionTx struct { + Data []byte // The transaction data + OriginalHeight uint64 // Original DA height where this transaction was found +} + // DARetriever defines the interface for retrieving events from the DA layer type DARetriever interface { RetrieveFromDA(ctx context.Context, daHeight uint64) ([]common.DAHeightEvent, error) @@ -47,6 +54,10 @@ type daRetriever struct { // on restart, will be refetch as da height is updated by syncer pendingHeaders map[uint64]*types.SignedHeader pendingData map[uint64]*types.Data + + // Forced inclusion transactions that couldn't fit in the current epoch + // and need to be retried in future epochs. + pendingForcedInclusionTxs []pendingForcedInclusionTx } // NewDARetriever creates a new DA retriever @@ -77,6 +88,7 @@ func NewDARetriever( daEpochSize: genesis.DAEpochForcedInclusion, pendingHeaders: make(map[uint64]*types.SignedHeader), pendingData: make(map[uint64]*types.Data), + pendingForcedInclusionTxs: make([]pendingForcedInclusionTx, 0), } } @@ -106,8 +118,18 @@ var ( ) // RetrieveForcedIncludedTxsFromDA retrieves forced inclusion transactions from the DA layer. -// It only fetches when daHeight is at the start of an epoch to prevent redundant fetching. -// Returns an error if the epoch start height is not yet available on DA (caller should backoff). +// +// Behavior: +// - At epoch boundaries (when daHeight == epochStart): fetches new forced-inclusion transactions +// from the DA layer for the entire epoch range, processes them, and returns all that fit within +// the max blob size limit. Transactions that don't fit are stored in the pending queue for retry. +// - Outside epoch boundaries (when daHeight != epochStart): returns any pending transactions from +// the queue that were deferred from previous epochs. +// - Pending transactions are kept in-memory only and will be lost on node restart. +// +// Returns: +// - ForcedIncludedEvent with transactions that should be included in the next block (may be empty) +// - Error if forced inclusion is not configured or DA layer is unavailable func (r *daRetriever) RetrieveForcedIncludedTxsFromDA(ctx context.Context, daHeight uint64) (*common.ForcedIncludedEvent, error) { if !r.hasForcedInclusionNs { return nil, ErrForceInclusionNotConfigured @@ -116,19 +138,42 @@ func (r *daRetriever) RetrieveForcedIncludedTxsFromDA(ctx context.Context, daHei // Calculate deterministic epoch boundaries epochStart, epochEnd := types.CalculateEpochBoundaries(daHeight, r.genesis.DAStartHeight, r.daEpochSize) - // Only fetch at epoch start to prevent double fetching as DA height progresses + // If we're not at epoch start, return pending transactions only (if any) if daHeight != epochStart { r.logger.Debug(). Uint64("da_height", daHeight). Uint64("epoch_start", epochStart). - Msg("skipping forced inclusion fetch - not at epoch start") - return &common.ForcedIncludedEvent{ + Int("pending_count", len(r.pendingForcedInclusionTxs)). + Msg("not at epoch start - returning pending transactions only") + + event := &common.ForcedIncludedEvent{ StartDaHeight: daHeight, EndDaHeight: daHeight, Txs: [][]byte{}, - }, nil + } + + // Return pending txs if any exist + if len(r.pendingForcedInclusionTxs) > 0 { + pendingTxs, indicesToRemove, _ := r.processPendingForcedInclusionTxs() + event.Txs = pendingTxs + + // Remove successfully included pending transactions + if len(indicesToRemove) > 0 { + r.removePendingForcedInclusionTxs(indicesToRemove) + r.logger.Debug(). + Int("included_count", len(indicesToRemove)). + Int("remaining_count", len(r.pendingForcedInclusionTxs)). + Msg("included pending forced inclusion transactions") + } + } + + return event, nil } + // We're at epoch start - fetch new transactions from DA + + currentEpochNumber := types.CalculateEpochNumber(daHeight, r.genesis.DAStartHeight, r.daEpochSize) + event := &common.ForcedIncludedEvent{ StartDaHeight: epochStart, } @@ -137,7 +182,7 @@ func (r *daRetriever) RetrieveForcedIncludedTxsFromDA(ctx context.Context, daHei Uint64("da_height", daHeight). Uint64("epoch_start", epochStart). Uint64("epoch_end", epochEnd). - Uint64("epoch_num", types.CalculateEpochNumber(daHeight, r.genesis.DAStartHeight, r.daEpochSize)). + Uint64("epoch_num", currentEpochNumber). Msg("retrieving forced included transactions from DA") // Check if both epoch start and end are available before fetching @@ -161,11 +206,24 @@ func (r *daRetriever) RetrieveForcedIncludedTxsFromDA(ctx context.Context, daHei } } - var currentSize int lastProcessedHeight := epochStart + newPendingTxs := []pendingForcedInclusionTx{} + + // Prepend pending transactions from previous epochs at the start of this epoch + pendingTxs, indicesToRemove, currentSize := r.processPendingForcedInclusionTxs() + event.Txs = pendingTxs + + // Remove successfully included pending transactions + if len(indicesToRemove) > 0 { + r.removePendingForcedInclusionTxs(indicesToRemove) + r.logger.Debug(). + Int("included_count", len(indicesToRemove)). + Int("remaining_count", len(r.pendingForcedInclusionTxs)). + Msg("included pending forced inclusion transactions") + } // Process epoch start - if err := r.processForcedInclusionBlobs(event, ¤tSize, &lastProcessedHeight, epochStartResult, epochStart); err != nil { + if err := r.processForcedInclusionBlobs(event, ¤tSize, &lastProcessedHeight, &newPendingTxs, epochStartResult, epochStart); err != nil { return nil, err } @@ -182,18 +240,27 @@ func (r *daRetriever) RetrieveForcedIncludedTxsFromDA(ctx context.Context, daHei break } - if err := r.processForcedInclusionBlobs(event, ¤tSize, &lastProcessedHeight, result, epochHeight); err != nil { + if err := r.processForcedInclusionBlobs(event, ¤tSize, &lastProcessedHeight, &newPendingTxs, result, epochHeight); err != nil { return nil, err } } // Process epoch end (only if different from start) if epochEnd != epochStart { - if err := r.processForcedInclusionBlobs(event, ¤tSize, &lastProcessedHeight, epochEndResult, epochEnd); err != nil { + if err := r.processForcedInclusionBlobs(event, ¤tSize, &lastProcessedHeight, &newPendingTxs, epochEndResult, epochEnd); err != nil { return nil, err } } + // Store any new pending transactions that couldn't fit in this epoch + if len(newPendingTxs) > 0 { + r.pendingForcedInclusionTxs = append(r.pendingForcedInclusionTxs, newPendingTxs...) + r.logger.Info(). + Int("new_pending_count", len(newPendingTxs)). + Int("total_pending_count", len(r.pendingForcedInclusionTxs)). + Msg("stored pending forced inclusion transactions for next epoch") + } + // Set the DA height range based on what we actually processed event.StartDaHeight = epochStart event.EndDaHeight = lastProcessedHeight @@ -201,11 +268,13 @@ func (r *daRetriever) RetrieveForcedIncludedTxsFromDA(ctx context.Context, daHei return event, nil } -// processForcedInclusionBlobs processes blobs from a DA retrieval result and adds them to the event +// processForcedInclusionBlobs processes forced inclusion blobs from a single DA height. +// It accumulates transactions that fit within maxBlobSize and stores excess in newPendingTxs. func (r *daRetriever) processForcedInclusionBlobs( event *common.ForcedIncludedEvent, currentSize *int, lastProcessedHeight *uint64, + newPendingTxs *[]pendingForcedInclusionTx, result coreda.ResultRetrieve, daHeight uint64, ) error { @@ -230,15 +299,33 @@ func (r *daRetriever) processForcedInclusionBlobs( // Calculate size of this specific data item dataSize := len(data) - // Check if adding this data would exceed max blob size - if *currentSize+dataSize > common.DefaultMaxBlobSize { - r.logger.Warn().Msg("forced inclusion data exceeds maximum blob size - reduce DAEpochForcedInclusion if always often") - - // TODO(@julienrbrt): we need to keep track of which that haven't been included, so they are retried in the next epoch + // Check if individual blob exceeds max size + if dataSize > int(common.DefaultMaxBlobSize) { + r.logger.Warn(). + Uint64("da_height", daHeight). + Int("blob_size", dataSize). + Float64("max_size", common.DefaultMaxBlobSize). + Msg("forced inclusion blob exceeds maximum size - skipping") + return fmt.Errorf("%w: blob size %d exceeds maximum %f", ErrForcedInclusionDataTooLarge, dataSize, common.DefaultMaxBlobSize) + } + // Check if adding this blob would exceed the current epoch's max size + if *currentSize+dataSize > int(common.DefaultMaxBlobSize) { + r.logger.Debug(). + Uint64("da_height", daHeight). + Int("current_size", *currentSize). + Int("blob_size", dataSize). + Msg("blob would exceed max size for this epoch - deferring to pending queue") + + // Store for next epoch + *newPendingTxs = append(*newPendingTxs, pendingForcedInclusionTx{ + Data: data, + OriginalHeight: daHeight, + }) continue } + // Include this transaction event.Txs = append(event.Txs, data) *currentSize += dataSize *lastProcessedHeight = daHeight @@ -513,3 +600,53 @@ func createEmptyDataForHeader(ctx context.Context, header *types.SignedHeader) * }, } } + +// processPendingForcedInclusionTxs processes pending transactions and returns those that fit within the max blob size. +// Returns the transactions to include, the indices of transactions to remove, and the total size used. +func (r *daRetriever) processPendingForcedInclusionTxs() ([][]byte, []int, int) { + var ( + currentSize int + txs [][]byte + indicesToRemove []int + ) + + for i, pendingTx := range r.pendingForcedInclusionTxs { + dataSize := len(pendingTx.Data) + if currentSize+dataSize > int(common.DefaultMaxBlobSize) { + r.logger.Debug(). + Int("current_size", currentSize). + Int("data_size", dataSize). + Msg("pending transaction would exceed max blob size, will retry later") + break + } + + txs = append(txs, pendingTx.Data) + currentSize += dataSize + indicesToRemove = append(indicesToRemove, i) + } + + return txs, indicesToRemove, currentSize +} + +// removePendingForcedInclusionTxs removes pending transactions at the specified indices. +// Indices must be sorted in ascending order. +func (r *daRetriever) removePendingForcedInclusionTxs(indices []int) { + if len(indices) == 0 { + return + } + + // Create a new slice without the removed elements + newPending := make([]pendingForcedInclusionTx, 0, len(r.pendingForcedInclusionTxs)-len(indices)) + removeMap := make(map[int]bool, len(indices)) + for _, idx := range indices { + removeMap[idx] = true + } + + for i, tx := range r.pendingForcedInclusionTxs { + if !removeMap[i] { + newPending = append(newPending, tx) + } + } + + r.pendingForcedInclusionTxs = newPending +} From cb3135d8dd3ff2df486fc2b5506b694803c9b9ba Mon Sep 17 00:00:00 2001 From: Julien Robert Date: Wed, 12 Nov 2025 17:22:46 +0100 Subject: [PATCH 25/39] use checksum as key --- block/internal/syncing/syncer.go | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/block/internal/syncing/syncer.go b/block/internal/syncing/syncer.go index 51120770a2..71625504ee 100644 --- a/block/internal/syncing/syncer.go +++ b/block/internal/syncing/syncer.go @@ -3,6 +3,8 @@ package syncing import ( "bytes" "context" + "crypto/sha256" + "encoding/hex" "errors" "fmt" "sync" @@ -672,6 +674,12 @@ func (s *Syncer) validateBlock(currState types.State, data *types.Data, header * var errMaliciousProposer = errors.New("malicious proposer detected") +// hashTx returns a hex-encoded SHA256 hash of the transaction. +func hashTx(tx []byte) string { + hash := sha256.Sum256(tx) + return hex.EncodeToString(hash[:]) +} + // verifyForcedInclusionTxs verifies that all forced inclusion transactions from DA are included in the block func (s *Syncer) verifyForcedInclusionTxs(currentState types.State, data *types.Data) error { if s.daRetriever == nil { @@ -697,13 +705,13 @@ func (s *Syncer) verifyForcedInclusionTxs(currentState types.State, data *types. blockTxMap := make(map[string]struct{}) for _, tx := range data.Txs { - blockTxMap[string(tx)] = struct{}{} + blockTxMap[hashTx(tx)] = struct{}{} } // Check if all forced inclusion transactions are present in the block var missingTxs [][]byte for _, forcedTx := range forcedIncludedTxsEvent.Txs { - if _, ok := blockTxMap[string(forcedTx)]; !ok { + if _, ok := blockTxMap[hashTx(forcedTx)]; !ok { missingTxs = append(missingTxs, forcedTx) } } From 2e3472f4b94a828eccb1a65f07e4cc6dc9ab539b Mon Sep 17 00:00:00 2001 From: Julien Robert Date: Thu, 13 Nov 2025 07:45:49 +0100 Subject: [PATCH 26/39] extract errors --- block/internal/common/errors.go | 3 +++ block/internal/syncing/da_retriever.go | 12 ++---------- block/internal/syncing/syncer.go | 2 +- sequencers/based/based.go | 7 ++----- sequencers/single/sequencer.go | 5 +---- 5 files changed, 9 insertions(+), 20 deletions(-) diff --git a/block/internal/common/errors.go b/block/internal/common/errors.go index 5ae3218639..5ae797daab 100644 --- a/block/internal/common/errors.go +++ b/block/internal/common/errors.go @@ -20,4 +20,7 @@ var ( // ErrOversizedItem is an unrecoverable error indicating a single item exceeds DA blob size limit ErrOversizedItem = errors.New("single item exceeds DA blob size limit") + + // ErrForceInclusionNotConfigured is returned when the forced inclusion namespace is not configured. + ErrForceInclusionNotConfigured = errors.New("forced inclusion namespace not configured") ) diff --git a/block/internal/syncing/da_retriever.go b/block/internal/syncing/da_retriever.go index b7b75fa23c..fe34892d2c 100644 --- a/block/internal/syncing/da_retriever.go +++ b/block/internal/syncing/da_retriever.go @@ -109,14 +109,6 @@ func (r *daRetriever) RetrieveFromDA(ctx context.Context, daHeight uint64) ([]co return r.processBlobs(ctx, blobsResp.Data, daHeight), nil } -var ( - // ErrForceInclusionNotConfigured is returned when the forced inclusion namespace is not configured. - ErrForceInclusionNotConfigured = errors.New("forced inclusion namespace not configured") - - // ErrForcedInclusionDataTooLarge is returned when forced inclusion data exceeds the maximum blob size. - ErrForcedInclusionDataTooLarge = errors.New("forced inclusion data exceeds maximum blob size limit") -) - // RetrieveForcedIncludedTxsFromDA retrieves forced inclusion transactions from the DA layer. // // Behavior: @@ -132,7 +124,7 @@ var ( // - Error if forced inclusion is not configured or DA layer is unavailable func (r *daRetriever) RetrieveForcedIncludedTxsFromDA(ctx context.Context, daHeight uint64) (*common.ForcedIncludedEvent, error) { if !r.hasForcedInclusionNs { - return nil, ErrForceInclusionNotConfigured + return nil, common.ErrForceInclusionNotConfigured } // Calculate deterministic epoch boundaries @@ -306,7 +298,7 @@ func (r *daRetriever) processForcedInclusionBlobs( Int("blob_size", dataSize). Float64("max_size", common.DefaultMaxBlobSize). Msg("forced inclusion blob exceeds maximum size - skipping") - return fmt.Errorf("%w: blob size %d exceeds maximum %f", ErrForcedInclusionDataTooLarge, dataSize, common.DefaultMaxBlobSize) + return fmt.Errorf("blob size %d exceeds maximum %f", dataSize, common.DefaultMaxBlobSize) } // Check if adding this blob would exceed the current epoch's max size diff --git a/block/internal/syncing/syncer.go b/block/internal/syncing/syncer.go index 71625504ee..ddd039e63a 100644 --- a/block/internal/syncing/syncer.go +++ b/block/internal/syncing/syncer.go @@ -689,7 +689,7 @@ func (s *Syncer) verifyForcedInclusionTxs(currentState types.State, data *types. // Retrieve forced inclusion transactions from DA forcedIncludedTxsEvent, err := s.daRetriever.RetrieveForcedIncludedTxsFromDA(s.ctx, currentState.DAHeight) if err != nil { - if errors.Is(err, ErrForceInclusionNotConfigured) { + if errors.Is(err, common.ErrForceInclusionNotConfigured) { s.logger.Debug().Msg("forced inclusion namespace not configured, skipping verification") return nil } diff --git a/sequencers/based/based.go b/sequencers/based/based.go index ae64bad898..7f7c9980c3 100644 --- a/sequencers/based/based.go +++ b/sequencers/based/based.go @@ -3,21 +3,18 @@ package based import ( "context" "errors" - "strings" "sync" "time" "github.com/rs/zerolog" + "github.com/evstack/ev-node/block/internal/common" coreda "github.com/evstack/ev-node/core/da" coresequencer "github.com/evstack/ev-node/core/sequencer" "github.com/evstack/ev-node/pkg/config" "github.com/evstack/ev-node/pkg/genesis" ) -// ErrForceInclusionNotConfigured is returned when forced inclusion is not configured -var ErrForceInclusionNotConfigured = errors.New("forced inclusion namespace not configured") - // ForcedInclusionEvent represents forced inclusion transactions retrieved from DA type ForcedInclusionEvent = struct { Txs [][]byte @@ -100,7 +97,7 @@ func (s *BasedSequencer) GetNextBatch(ctx context.Context, req coresequencer.Get forcedTxsEvent, err := s.daRetriever.RetrieveForcedIncludedTxsFromDA(ctx, s.daHeight) if err != nil { - if strings.Contains(err.Error(), ErrForceInclusionNotConfigured.Error()) { + if errors.Is(err, common.ErrForceInclusionNotConfigured) { s.logger.Error().Msg("forced inclusion not configured, returning empty batch") return &coresequencer.GetNextBatchResponse{ Batch: &coresequencer.Batch{Transactions: nil}, diff --git a/sequencers/single/sequencer.go b/sequencers/single/sequencer.go index ba5d92c0fd..3e0f67eac2 100644 --- a/sequencers/single/sequencer.go +++ b/sequencers/single/sequencer.go @@ -5,7 +5,6 @@ import ( "context" "errors" "fmt" - "strings" "sync" "time" @@ -20,8 +19,6 @@ import ( var ( // ErrInvalidId is returned when the chain id is invalid ErrInvalidId = errors.New("invalid chain id") - // ErrForceInclusionNotConfigured is returned when forced inclusion is not configured - ErrForceInclusionNotConfigured = errors.New("forced inclusion namespace not configured") ) // ForcedInclusionEvent represents forced inclusion transactions retrieved from DA @@ -141,7 +138,7 @@ func (c *Sequencer) GetNextBatch(ctx context.Context, req coresequencer.GetNextB forcedEvent, err := c.daRetriever.RetrieveForcedIncludedTxsFromDA(ctx, currentDAHeight) if err != nil { // If forced inclusion is not configured, continue without forced txs - if !strings.Contains(err.Error(), ErrForceInclusionNotConfigured.Error()) { + if !errors.Is(err, common.ErrForceInclusionNotConfigured) { // If we get a height from future error, keep the current DA height and return batch // We'll retry the same height on the next call until DA produces that block if errors.Is(err, coreda.ErrHeightFromFuture) { From ef8849ae59f6df0527a94c30664c1e9244347065 Mon Sep 17 00:00:00 2001 From: Julien Robert Date: Thu, 13 Nov 2025 08:55:47 +0100 Subject: [PATCH 27/39] merge fixes --- apps/evm/single/go.mod | 6 +----- block/public.go | 4 ++++ go.mod | 2 -- sequencers/based/based.go | 4 ++-- sequencers/single/sequencer.go | 39 +++++++++++++++++----------------- 5 files changed, 27 insertions(+), 28 deletions(-) diff --git a/apps/evm/single/go.mod b/apps/evm/single/go.mod index 6c1ac61609..b80eaf4ef3 100644 --- a/apps/evm/single/go.mod +++ b/apps/evm/single/go.mod @@ -7,7 +7,7 @@ replace github.com/celestiaorg/go-header => github.com/julienrbrt/go-header v0.0 replace ( github.com/evstack/ev-node => ../../../ github.com/evstack/ev-node/core => ../../../core - github.com/evstack/ev-node/sequencers/single => ../../../sequencers/single + github.com/evstack/ev-node/da => ../../../da ) require ( @@ -195,7 +195,3 @@ require ( gopkg.in/yaml.v3 v3.0.1 // indirect lukechampine.com/blake3 v1.4.1 // indirect ) - -replace github.com/evstack/ev-node/core => ../../../core - -replace github.com/evstack/ev-node/da => ../../../da diff --git a/block/public.go b/block/public.go index f8e09531c9..678cf6e647 100644 --- a/block/public.go +++ b/block/public.go @@ -53,3 +53,7 @@ func NewDARetriever( logger, ), nil } + +// ErrForceInclusionNotConfigured is returned when force inclusion is not configured. +// It is exported because sequencers needs to check for this error. +var ErrForceInclusionNotConfigured = common.ErrForceInclusionNotConfigured diff --git a/go.mod b/go.mod index 190d2d3f42..0e386175cc 100644 --- a/go.mod +++ b/go.mod @@ -4,8 +4,6 @@ go 1.24.6 retract v0.12.0 // Published by accident -replace github.com/evstack/ev-node/core => ./core - require ( connectrpc.com/connect v1.19.1 connectrpc.com/grpcreflect v1.3.0 diff --git a/sequencers/based/based.go b/sequencers/based/based.go index 7f7c9980c3..6ed094477a 100644 --- a/sequencers/based/based.go +++ b/sequencers/based/based.go @@ -8,7 +8,7 @@ import ( "github.com/rs/zerolog" - "github.com/evstack/ev-node/block/internal/common" + "github.com/evstack/ev-node/block" coreda "github.com/evstack/ev-node/core/da" coresequencer "github.com/evstack/ev-node/core/sequencer" "github.com/evstack/ev-node/pkg/config" @@ -97,7 +97,7 @@ func (s *BasedSequencer) GetNextBatch(ctx context.Context, req coresequencer.Get forcedTxsEvent, err := s.daRetriever.RetrieveForcedIncludedTxsFromDA(ctx, s.daHeight) if err != nil { - if errors.Is(err, common.ErrForceInclusionNotConfigured) { + if errors.Is(err, block.ErrForceInclusionNotConfigured) { s.logger.Error().Msg("forced inclusion not configured, returning empty batch") return &coresequencer.GetNextBatchResponse{ Batch: &coresequencer.Batch{Transactions: nil}, diff --git a/sequencers/single/sequencer.go b/sequencers/single/sequencer.go index 3e0f67eac2..d8839ca195 100644 --- a/sequencers/single/sequencer.go +++ b/sequencers/single/sequencer.go @@ -11,6 +11,7 @@ import ( ds "github.com/ipfs/go-datastore" "github.com/rs/zerolog" + "github.com/evstack/ev-node/block" coreda "github.com/evstack/ev-node/core/da" coresequencer "github.com/evstack/ev-node/core/sequencer" "github.com/evstack/ev-node/pkg/genesis" @@ -137,27 +138,27 @@ func (c *Sequencer) GetNextBatch(ctx context.Context, req coresequencer.GetNextB forcedEvent, err := c.daRetriever.RetrieveForcedIncludedTxsFromDA(ctx, currentDAHeight) if err != nil { - // If forced inclusion is not configured, continue without forced txs - if !errors.Is(err, common.ErrForceInclusionNotConfigured) { - // If we get a height from future error, keep the current DA height and return batch - // We'll retry the same height on the next call until DA produces that block - if errors.Is(err, coreda.ErrHeightFromFuture) { - c.logger.Debug(). - Uint64("da_height", currentDAHeight). - Msg("DA height from future, waiting for DA to produce block") - - batch, err := c.queue.Next(ctx) - if err != nil { - return nil, err - } - - return &coresequencer.GetNextBatchResponse{ - Batch: batch, - Timestamp: time.Now(), - BatchData: req.LastBatchData, - }, nil + // If we get a height from future error, keep the current DA height and return batch + // We'll retry the same height on the next call until DA produces that block + if errors.Is(err, coreda.ErrHeightFromFuture) { + c.logger.Debug(). + Uint64("da_height", currentDAHeight). + Msg("DA height from future, waiting for DA to produce block") + + batch, err := c.queue.Next(ctx) + if err != nil { + return nil, err } + return &coresequencer.GetNextBatchResponse{ + Batch: batch, + Timestamp: time.Now(), + BatchData: req.LastBatchData, + }, nil + } + + // If forced inclusion is not configured, continue without forced txs + if !errors.Is(err, block.ErrForceInclusionNotConfigured) { c.logger.Error().Err(err).Uint64("da_height", currentDAHeight).Msg("failed to retrieve forced inclusion transactions") // Continue without forced txs on other errors } From 27c1390b1696a516cc9c9929b4ce510228a36a8b Mon Sep 17 00:00:00 2001 From: Julien Robert Date: Thu, 13 Nov 2025 10:31:19 +0100 Subject: [PATCH 28/39] fix unit tests --- pkg/config/config_test.go | 6 +++++- sequencers/based/based_test.go | 3 ++- 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/pkg/config/config_test.go b/pkg/config/config_test.go index a48cd79fe3..bfbf11329f 100644 --- a/pkg/config/config_test.go +++ b/pkg/config/config_test.go @@ -51,9 +51,11 @@ func TestAddFlags(t *testing.T) { // Test specific flags assertFlagValue(t, flags, FlagDBPath, DefaultConfig().DBPath) + assertFlagValue(t, flags, FlagClearCache, DefaultConfig().ClearCache) // Node flags assertFlagValue(t, flags, FlagAggregator, DefaultConfig().Node.Aggregator) + assertFlagValue(t, flags, FlagBasedSequencer, DefaultConfig().Node.BasedSequencer) assertFlagValue(t, flags, FlagLight, DefaultConfig().Node.Light) assertFlagValue(t, flags, FlagBlockTime, DefaultConfig().Node.BlockTime.Duration) assertFlagValue(t, flags, FlagTrustedHash, DefaultConfig().Node.TrustedHash) @@ -93,6 +95,7 @@ func TestAddFlags(t *testing.T) { assertFlagValue(t, persistentFlags, FlagLogLevel, DefaultConfig().Log.Level) assertFlagValue(t, persistentFlags, FlagLogFormat, "text") assertFlagValue(t, persistentFlags, FlagLogTrace, false) + assertFlagValue(t, persistentFlags, FlagRootDir, DefaultRootDirWithName("test")) // Signer flags assertFlagValue(t, flags, FlagSignerPassphraseFile, "") @@ -101,9 +104,10 @@ func TestAddFlags(t *testing.T) { // RPC flags assertFlagValue(t, flags, FlagRPCAddress, DefaultConfig().RPC.Address) + assertFlagValue(t, flags, FlagRPCEnableDAVisualization, DefaultConfig().RPC.EnableDAVisualization) // Count the number of flags we're explicitly checking - expectedFlagCount := 46 // Update this number if you add more flag checks above + expectedFlagCount := 44 // Update this number if you add more flag checks above // Get the actual number of flags (both regular and persistent) actualFlagCount := 0 diff --git a/sequencers/based/based_test.go b/sequencers/based/based_test.go index dc4a1b7287..ef952b2524 100644 --- a/sequencers/based/based_test.go +++ b/sequencers/based/based_test.go @@ -11,6 +11,7 @@ import ( "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" + "github.com/evstack/ev-node/block" coreda "github.com/evstack/ev-node/core/da" coresequencer "github.com/evstack/ev-node/core/sequencer" "github.com/evstack/ev-node/pkg/config" @@ -227,7 +228,7 @@ func TestBasedSequencer_GetNextBatch_NotConfigured(t *testing.T) { // Mock retriever to return not configured error mockRetriever.On("RetrieveForcedIncludedTxsFromDA", mock.Anything, uint64(100)). - Return(nil, errors.New("forced inclusion namespace not configured")).Once() + Return(nil, block.ErrForceInclusionNotConfigured).Once() req := coresequencer.GetNextBatchRequest{ Id: []byte("test-chain"), From 6ba40595ec78c0d96b96e2e837a0d8316676bf1d Mon Sep 17 00:00:00 2001 From: Julien Robert Date: Thu, 13 Nov 2025 10:51:19 +0100 Subject: [PATCH 29/39] remove nil check --- sequencers/single/sequencer.go | 97 ++++++++--------- sequencers/single/sequencer_test.go | 159 ++++++++++++++++++++-------- 2 files changed, 155 insertions(+), 101 deletions(-) diff --git a/sequencers/single/sequencer.go b/sequencers/single/sequencer.go index d8839ca195..1dbc145238 100644 --- a/sequencers/single/sequencer.go +++ b/sequencers/single/sequencer.go @@ -5,7 +5,7 @@ import ( "context" "errors" "fmt" - "sync" + "sync/atomic" "time" ds "github.com/ipfs/go-datastore" @@ -54,8 +54,7 @@ type Sequencer struct { // Forced inclusion support daRetriever DARetriever genesis genesis.Genesis - mu sync.RWMutex - daHeight uint64 + daHeight atomic.Uint64 } // NewSequencer creates a new Single Sequencer @@ -82,8 +81,8 @@ func NewSequencer( proposer: proposer, daRetriever: daRetriever, genesis: gen, - daHeight: gen.DAStartHeight, } + s.SetDAHeight(gen.DAStartHeight) // will be overridden by the executor loadCtx, cancel := context.WithTimeout(context.Background(), 10*time.Second) defer cancel() @@ -131,55 +130,49 @@ func (c *Sequencer) GetNextBatch(ctx context.Context, req coresequencer.GetNextB // Retrieve forced inclusion transactions if DARetriever is configured var forcedTxs [][]byte - if c.daRetriever != nil { - c.mu.Lock() - currentDAHeight := c.daHeight - c.mu.Unlock() + currentDAHeight := c.daHeight.Load() - forcedEvent, err := c.daRetriever.RetrieveForcedIncludedTxsFromDA(ctx, currentDAHeight) - if err != nil { - // If we get a height from future error, keep the current DA height and return batch - // We'll retry the same height on the next call until DA produces that block - if errors.Is(err, coreda.ErrHeightFromFuture) { - c.logger.Debug(). - Uint64("da_height", currentDAHeight). - Msg("DA height from future, waiting for DA to produce block") - - batch, err := c.queue.Next(ctx) - if err != nil { - return nil, err - } - - return &coresequencer.GetNextBatchResponse{ - Batch: batch, - Timestamp: time.Now(), - BatchData: req.LastBatchData, - }, nil + forcedEvent, err := c.daRetriever.RetrieveForcedIncludedTxsFromDA(ctx, currentDAHeight) + if err != nil { + // If we get a height from future error, keep the current DA height and return batch + // We'll retry the same height on the next call until DA produces that block + if errors.Is(err, coreda.ErrHeightFromFuture) { + c.logger.Debug(). + Uint64("da_height", currentDAHeight). + Msg("DA height from future, waiting for DA to produce block") + + batch, err := c.queue.Next(ctx) + if err != nil { + return nil, err } - // If forced inclusion is not configured, continue without forced txs - if !errors.Is(err, block.ErrForceInclusionNotConfigured) { - c.logger.Error().Err(err).Uint64("da_height", currentDAHeight).Msg("failed to retrieve forced inclusion transactions") - // Continue without forced txs on other errors - } - } else { - forcedTxs = forcedEvent.Txs - - // Update DA height based on the retrieved event - c.mu.Lock() - if forcedEvent.EndDaHeight > c.daHeight { - c.daHeight = forcedEvent.EndDaHeight - } else if forcedEvent.StartDaHeight > c.daHeight { - c.daHeight = forcedEvent.StartDaHeight - } - c.mu.Unlock() + return &coresequencer.GetNextBatchResponse{ + Batch: batch, + Timestamp: time.Now(), + BatchData: req.LastBatchData, + }, nil + } - c.logger.Info(). - Int("tx_count", len(forcedEvent.Txs)). - Uint64("da_height_start", forcedEvent.StartDaHeight). - Uint64("da_height_end", forcedEvent.EndDaHeight). - Msg("retrieved forced inclusion transactions from DA") + // If forced inclusion is not configured, continue without forced txs + if !errors.Is(err, block.ErrForceInclusionNotConfigured) { + c.logger.Error().Err(err).Uint64("da_height", currentDAHeight).Msg("failed to retrieve forced inclusion transactions") + // Continue without forced txs on other errors + } + } else { + forcedTxs = forcedEvent.Txs + + // Update DA height based on the retrieved event + if forcedEvent.EndDaHeight > currentDAHeight { + c.SetDAHeight(forcedEvent.EndDaHeight) + } else if forcedEvent.StartDaHeight > currentDAHeight { + c.SetDAHeight(forcedEvent.StartDaHeight) } + + c.logger.Info(). + Int("tx_count", len(forcedEvent.Txs)). + Uint64("da_height_start", forcedEvent.StartDaHeight). + Uint64("da_height_end", forcedEvent.EndDaHeight). + Msg("retrieved forced inclusion transactions from DA") } batch, err := c.queue.Next(ctx) @@ -250,15 +243,11 @@ func (c *Sequencer) isValid(Id []byte) bool { // SetDAHeight sets the current DA height for the sequencer // This should be called when the sequencer needs to sync to a specific DA height func (c *Sequencer) SetDAHeight(height uint64) { - c.mu.Lock() - defer c.mu.Unlock() - c.daHeight = height + c.daHeight.Store(height) c.logger.Debug().Uint64("da_height", height).Msg("DA height updated") } // GetDAHeight returns the current DA height func (c *Sequencer) GetDAHeight() uint64 { - c.mu.RLock() - defer c.mu.RUnlock() - return c.daHeight + return c.daHeight.Load() } diff --git a/sequencers/single/sequencer_test.go b/sequencers/single/sequencer_test.go index 257088b633..a73c8c0ba3 100644 --- a/sequencers/single/sequencer_test.go +++ b/sequencers/single/sequencer_test.go @@ -13,12 +13,26 @@ import ( "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" + "github.com/evstack/ev-node/block" coreda "github.com/evstack/ev-node/core/da" coresequencer "github.com/evstack/ev-node/core/sequencer" "github.com/evstack/ev-node/pkg/genesis" damocks "github.com/evstack/ev-node/test/mocks" ) +// MockDARetriever is a mock implementation of DARetriever for testing +type MockDARetriever struct { + mock.Mock +} + +func (m *MockDARetriever) RetrieveForcedIncludedTxsFromDA(ctx context.Context, daHeight uint64) (*ForcedInclusionEvent, error) { + args := m.Called(ctx, daHeight) + if args.Get(0) == nil { + return nil, args.Error(1) + } + return args.Get(0).(*ForcedInclusionEvent), args.Error(1) +} + func TestNewSequencer(t *testing.T) { // Create a new sequencer with mock DA client dummyDA := coreda.NewDummyDA(100_000_000, 10*time.Second) @@ -27,7 +41,10 @@ func TestNewSequencer(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second) defer cancel() logger := zerolog.Nop() - seq, err := NewSequencer(ctx, logger, db, dummyDA, []byte("test1"), 10*time.Second, metrics, false, 1000, nil, genesis.Genesis{}) + mockRetriever := new(MockDARetriever) + mockRetriever.On("RetrieveForcedIncludedTxsFromDA", mock.Anything, mock.Anything). + Return(nil, block.ErrForceInclusionNotConfigured).Maybe() + seq, err := NewSequencer(ctx, logger, db, dummyDA, []byte("test1"), 10*time.Second, metrics, false, 1000, mockRetriever, genesis.Genesis{}) if err != nil { t.Fatalf("Failed to create sequencer: %v", err) } @@ -60,7 +77,10 @@ func TestSequencer_SubmitBatchTxs(t *testing.T) { defer cancel() Id := []byte("test1") logger := zerolog.Nop() - seq, err := NewSequencer(ctx, logger, db, dummyDA, Id, 10*time.Second, metrics, false, 1000, nil, genesis.Genesis{}) + mockRetriever := new(MockDARetriever) + mockRetriever.On("RetrieveForcedIncludedTxsFromDA", mock.Anything, mock.Anything). + Return(nil, block.ErrForceInclusionNotConfigured).Maybe() + seq, err := NewSequencer(ctx, logger, db, dummyDA, Id, 10*time.Second, metrics, false, 1000, mockRetriever, genesis.Genesis{}) if err != nil { t.Fatalf("Failed to create sequencer: %v", err) } @@ -113,7 +133,10 @@ func TestSequencer_SubmitBatchTxs_EmptyBatch(t *testing.T) { defer cancel() Id := []byte("test1") logger := zerolog.Nop() - seq, err := NewSequencer(ctx, logger, db, dummyDA, Id, 10*time.Second, metrics, false, 1000, nil, genesis.Genesis{}) + mockRetriever := new(MockDARetriever) + mockRetriever.On("RetrieveForcedIncludedTxsFromDA", mock.Anything, mock.Anything). + Return(nil, block.ErrForceInclusionNotConfigured).Maybe() + seq, err := NewSequencer(ctx, logger, db, dummyDA, Id, 10*time.Second, metrics, false, 1000, mockRetriever, genesis.Genesis{}) require.NoError(t, err, "Failed to create sequencer") defer func() { err := db.Close() @@ -153,10 +176,14 @@ func TestSequencer_GetNextBatch_NoLastBatch(t *testing.T) { db := ds.NewMapDatastore() logger := zerolog.Nop() + mockRetriever := new(MockDARetriever) + mockRetriever.On("RetrieveForcedIncludedTxsFromDA", mock.Anything, mock.Anything). + Return(nil, block.ErrForceInclusionNotConfigured).Maybe() seq := &Sequencer{ - logger: logger, - queue: NewBatchQueue(db, "batches", 0), // 0 = unlimited for test - Id: []byte("test"), + logger: logger, + queue: NewBatchQueue(db, "batches", 0), // 0 = unlimited for test + Id: []byte("test"), + daRetriever: mockRetriever, } defer func() { err := db.Close() @@ -189,10 +216,14 @@ func TestSequencer_GetNextBatch_Success(t *testing.T) { db := ds.NewMapDatastore() logger := zerolog.Nop() + mockRetriever := new(MockDARetriever) + mockRetriever.On("RetrieveForcedIncludedTxsFromDA", mock.Anything, mock.Anything). + Return(nil, block.ErrForceInclusionNotConfigured).Maybe() seq := &Sequencer{ - logger: logger, - queue: NewBatchQueue(db, "batches", 0), // 0 = unlimited for test - Id: []byte("test"), + logger: logger, + queue: NewBatchQueue(db, "batches", 0), // 0 = unlimited for test + Id: []byte("test"), + daRetriever: mockRetriever, } defer func() { err := db.Close() @@ -248,13 +279,17 @@ func TestSequencer_VerifyBatch(t *testing.T) { t.Run("Proposer Mode", func(t *testing.T) { mockDA := damocks.NewMockDA(t) logger := zerolog.Nop() + mockRetriever := new(MockDARetriever) + mockRetriever.On("RetrieveForcedIncludedTxsFromDA", mock.Anything, mock.Anything). + Return(nil, block.ErrForceInclusionNotConfigured).Maybe() seq := &Sequencer{ - logger: logger, - Id: Id, - proposer: true, - da: mockDA, - queue: NewBatchQueue(db, "proposer_queue", 0), // 0 = unlimited for test + logger: logger, + Id: Id, + proposer: true, + da: mockDA, + queue: NewBatchQueue(db, "proposer_queue", 0), // 0 = unlimited for test + daRetriever: mockRetriever, } res, err := seq.VerifyBatch(context.Background(), coresequencer.VerifyBatchRequest{Id: seq.Id, BatchData: batchData}) @@ -270,12 +305,16 @@ func TestSequencer_VerifyBatch(t *testing.T) { t.Run("Valid Proofs", func(t *testing.T) { mockDA := damocks.NewMockDA(t) logger := zerolog.Nop() + mockRetriever := new(MockDARetriever) + mockRetriever.On("RetrieveForcedIncludedTxsFromDA", mock.Anything, mock.Anything). + Return(nil, block.ErrForceInclusionNotConfigured).Maybe() seq := &Sequencer{ - logger: logger, - Id: Id, - proposer: false, - da: mockDA, - queue: NewBatchQueue(db, "valid_proofs_queue", 0), + logger: logger, + Id: Id, + proposer: false, + da: mockDA, + queue: NewBatchQueue(db, "valid_proofs_queue", 0), + daRetriever: mockRetriever, } mockDA.On("GetProofs", context.Background(), batchData, Id).Return(proofs, nil).Once() @@ -291,12 +330,16 @@ func TestSequencer_VerifyBatch(t *testing.T) { t.Run("Invalid Proof", func(t *testing.T) { mockDA := damocks.NewMockDA(t) logger := zerolog.Nop() + mockRetriever := new(MockDARetriever) + mockRetriever.On("RetrieveForcedIncludedTxsFromDA", mock.Anything, mock.Anything). + Return(nil, block.ErrForceInclusionNotConfigured).Maybe() seq := &Sequencer{ - logger: logger, - Id: Id, - proposer: false, - da: mockDA, - queue: NewBatchQueue(db, "invalid_proof_queue", 0), + logger: logger, + Id: Id, + proposer: false, + da: mockDA, + queue: NewBatchQueue(db, "invalid_proof_queue", 0), + daRetriever: mockRetriever, } mockDA.On("GetProofs", context.Background(), batchData, Id).Return(proofs, nil).Once() @@ -312,12 +355,16 @@ func TestSequencer_VerifyBatch(t *testing.T) { t.Run("GetProofs Error", func(t *testing.T) { mockDA := damocks.NewMockDA(t) logger := zerolog.Nop() + mockRetriever := new(MockDARetriever) + mockRetriever.On("RetrieveForcedIncludedTxsFromDA", mock.Anything, mock.Anything). + Return(nil, block.ErrForceInclusionNotConfigured).Maybe() seq := &Sequencer{ - logger: logger, - Id: Id, - proposer: false, - da: mockDA, - queue: NewBatchQueue(db, "getproofs_err_queue", 0), + logger: logger, + Id: Id, + proposer: false, + da: mockDA, + queue: NewBatchQueue(db, "getproofs_err_queue", 0), + daRetriever: mockRetriever, } expectedErr := errors.New("get proofs failed") @@ -334,12 +381,16 @@ func TestSequencer_VerifyBatch(t *testing.T) { t.Run("Validate Error", func(t *testing.T) { mockDA := damocks.NewMockDA(t) logger := zerolog.Nop() + mockRetriever := new(MockDARetriever) + mockRetriever.On("RetrieveForcedIncludedTxsFromDA", mock.Anything, mock.Anything). + Return(nil, block.ErrForceInclusionNotConfigured).Maybe() seq := &Sequencer{ - logger: logger, - Id: Id, - proposer: false, - da: mockDA, - queue: NewBatchQueue(db, "validate_err_queue", 0), + logger: logger, + Id: Id, + proposer: false, + da: mockDA, + queue: NewBatchQueue(db, "validate_err_queue", 0), + daRetriever: mockRetriever, } expectedErr := errors.New("validate failed") @@ -356,13 +407,17 @@ func TestSequencer_VerifyBatch(t *testing.T) { t.Run("Invalid ID", func(t *testing.T) { mockDA := damocks.NewMockDA(t) logger := zerolog.Nop() + mockRetriever := new(MockDARetriever) + mockRetriever.On("RetrieveForcedIncludedTxsFromDA", mock.Anything, mock.Anything). + Return(nil, block.ErrForceInclusionNotConfigured).Maybe() seq := &Sequencer{ - logger: logger, - Id: Id, - proposer: false, - da: mockDA, - queue: NewBatchQueue(db, "invalid_queue", 0), + logger: logger, + Id: Id, + proposer: false, + da: mockDA, + queue: NewBatchQueue(db, "invalid_queue", 0), + daRetriever: mockRetriever, } invalidId := []byte("invalid") @@ -386,7 +441,10 @@ func TestSequencer_GetNextBatch_BeforeDASubmission(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) defer cancel() logger := zerolog.Nop() - seq, err := NewSequencer(ctx, logger, db, mockDA, []byte("test1"), 1*time.Second, metrics, false, 1000, nil, genesis.Genesis{}) + mockRetriever := new(MockDARetriever) + mockRetriever.On("RetrieveForcedIncludedTxsFromDA", mock.Anything, mock.Anything). + Return(nil, block.ErrForceInclusionNotConfigured).Maybe() + seq, err := NewSequencer(ctx, logger, db, mockDA, []byte("test1"), 1*time.Second, metrics, false, 1000, mockRetriever, genesis.Genesis{}) if err != nil { t.Fatalf("Failed to create sequencer: %v", err) } @@ -524,16 +582,20 @@ func TestSequencer_QueueLimit_Integration(t *testing.T) { defer db.Close() mockDA := &damocks.MockDA{} + mockRetriever := new(MockDARetriever) + mockRetriever.On("RetrieveForcedIncludedTxsFromDA", mock.Anything, mock.Anything). + Return(nil, block.ErrForceInclusionNotConfigured).Maybe() // Create a sequencer with a small queue limit for testing logger := zerolog.Nop() seq := &Sequencer{ - logger: logger, - da: mockDA, - batchTime: time.Second, - Id: []byte("test"), - queue: NewBatchQueue(db, "test_queue", 2), // Very small limit for testing - proposer: true, + logger: logger, + da: mockDA, + batchTime: time.Second, + Id: []byte("test"), + queue: NewBatchQueue(db, "test_queue", 2), // Very small limit for testing + proposer: true, + daRetriever: mockRetriever, } ctx := context.Background() @@ -642,6 +704,9 @@ func TestSequencer_DAFailureAndQueueThrottling_Integration(t *testing.T) { // Create sequencer with small queue size to trigger throttling quickly queueSize := 3 // Small for testing logger := zerolog.Nop() + mockRetriever := new(MockDARetriever) + mockRetriever.On("RetrieveForcedIncludedTxsFromDA", mock.Anything, mock.Anything). + Return(nil, block.ErrForceInclusionNotConfigured).Maybe() seq, err := NewSequencer( context.Background(), logger, @@ -652,7 +717,7 @@ func TestSequencer_DAFailureAndQueueThrottling_Integration(t *testing.T) { nil, // metrics true, // proposer queueSize, - nil, // daRetriever + mockRetriever, // daRetriever genesis.Genesis{}, // genesis ) require.NoError(t, err) From d89fed8a569141c1d458c26fa014cbedd9bae82e Mon Sep 17 00:00:00 2001 From: Julien Robert Date: Thu, 13 Nov 2025 11:36:41 +0100 Subject: [PATCH 30/39] split manager in two --- block/internal/cache/manager.go | 81 +++++++++++++++--- block/internal/reaping/reaper.go | 4 +- block/internal/reaping/reaper_test.go | 13 +-- block/internal/syncing/da_retriever.go | 4 +- block/internal/syncing/da_retriever_test.go | 83 +++++-------------- block/internal/syncing/p2p_handler.go | 4 +- block/internal/syncing/p2p_handler_test.go | 12 +-- block/internal/syncing/syncer.go | 4 +- block/internal/syncing/syncer_backoff_test.go | 3 +- .../internal/syncing/syncer_benchmark_test.go | 5 +- .../syncing/syncer_forced_inclusion_test.go | 15 ++-- block/internal/syncing/syncer_test.go | 16 ++-- block/public.go | 2 +- .../adr/adr-019-forced-inclusion-mechanism.md | 2 +- 14 files changed, 131 insertions(+), 117 deletions(-) diff --git a/block/internal/cache/manager.go b/block/internal/cache/manager.go index d8f4a6bf8c..04fe242049 100644 --- a/block/internal/cache/manager.go +++ b/block/internal/cache/manager.go @@ -41,8 +41,8 @@ func registerGobTypes() { }) } -// Manager provides centralized cache management for both executing and syncing components -type Manager interface { +// CacheManager provides centralized cache management for both executing and syncing components +type CacheManager interface { // Header operations IsHeaderSeen(hash string) bool SetHeaderSeen(hash string, blockHeight uint64) @@ -61,14 +61,6 @@ type Manager interface { SetTxSeen(hash string) CleanupOldTxs(olderThan time.Duration) int - // Pending operations - GetPendingHeaders(ctx context.Context) ([]*types.SignedHeader, error) - GetPendingData(ctx context.Context) ([]*types.SignedData, error) - SetLastSubmittedHeaderHeight(ctx context.Context, height uint64) - SetLastSubmittedDataHeight(ctx context.Context, height uint64) - NumPendingHeaders() uint64 - NumPendingData() uint64 - // Pending events syncing coordination GetNextPendingEvent(blockHeight uint64) *common.DAHeightEvent SetPendingEvent(blockHeight uint64, event *common.DAHeightEvent) @@ -82,6 +74,22 @@ type Manager interface { DeleteHeight(blockHeight uint64) } +// PendingManager provides operations for managing pending headers and data +type PendingManager interface { + GetPendingHeaders(ctx context.Context) ([]*types.SignedHeader, error) + GetPendingData(ctx context.Context) ([]*types.SignedData, error) + SetLastSubmittedHeaderHeight(ctx context.Context, height uint64) + SetLastSubmittedDataHeight(ctx context.Context, height uint64) + NumPendingHeaders() uint64 + NumPendingData() uint64 +} + +// Manager provides centralized cache management for both executing and syncing components +type Manager interface { + CacheManager + PendingManager +} + var _ Manager = (*implementation)(nil) // implementation provides the concrete implementation of cache Manager @@ -97,6 +105,59 @@ type implementation struct { logger zerolog.Logger } +// NewPendingManager creates a new pending manager instance +func NewPendingManager(store store.Store, logger zerolog.Logger) (PendingManager, error) { + pendingHeaders, err := NewPendingHeaders(store, logger) + if err != nil { + return nil, fmt.Errorf("failed to create pending headers: %w", err) + } + + pendingData, err := NewPendingData(store, logger) + if err != nil { + return nil, fmt.Errorf("failed to create pending data: %w", err) + } + + return &implementation{ + pendingHeaders: pendingHeaders, + pendingData: pendingData, + logger: logger, + }, nil +} + +// NewCacheManager creates a new cache manager instance +func NewCacheManager(cfg config.Config, logger zerolog.Logger) (CacheManager, error) { + // Initialize caches + headerCache := NewCache[types.SignedHeader]() + dataCache := NewCache[types.Data]() + txCache := NewCache[struct{}]() + pendingEventsCache := NewCache[common.DAHeightEvent]() + + registerGobTypes() + impl := &implementation{ + headerCache: headerCache, + dataCache: dataCache, + txCache: txCache, + txTimestamps: new(sync.Map), + pendingEventsCache: pendingEventsCache, + config: cfg, + logger: logger, + } + + if cfg.ClearCache { + // Clear the cache from disk + if err := impl.ClearFromDisk(); err != nil { + logger.Warn().Err(err).Msg("failed to clear cache from disk, starting with empty cache") + } + } else { + // Load existing cache from disk + if err := impl.LoadFromDisk(); err != nil { + logger.Warn().Err(err).Msg("failed to load cache from disk, starting with empty cache") + } + } + + return impl, nil +} + // NewManager creates a new cache manager instance func NewManager(cfg config.Config, store store.Store, logger zerolog.Logger) (Manager, error) { // Initialize caches diff --git a/block/internal/reaping/reaper.go b/block/internal/reaping/reaper.go index 64388b2ce0..62ff5dfba1 100644 --- a/block/internal/reaping/reaper.go +++ b/block/internal/reaping/reaper.go @@ -34,7 +34,7 @@ type Reaper struct { sequencer coresequencer.Sequencer chainID string interval time.Duration - cache cache.Manager + cache cache.CacheManager executor *executing.Executor // shared components @@ -53,7 +53,7 @@ func NewReaper( genesis genesis.Genesis, logger zerolog.Logger, executor *executing.Executor, - cache cache.Manager, + cache cache.CacheManager, scrapeInterval time.Duration, ) (*Reaper, error) { if executor == nil { diff --git a/block/internal/reaping/reaper_test.go b/block/internal/reaping/reaper_test.go index d9dc701276..fac03bdd5d 100644 --- a/block/internal/reaping/reaper_test.go +++ b/block/internal/reaping/reaper_test.go @@ -65,28 +65,21 @@ func newTestExecutor(t *testing.T) *executing.Executor { } // helper to create a cache manager for tests -func newTestCache(t *testing.T) cache.Manager { +func newTestCache(t *testing.T) cache.CacheManager { t.Helper() - // Create a mock store for the cache manager - storeMock := testmocks.NewMockStore(t) - storeMock.EXPECT().GetMetadata(mock.Anything, "last-submitted-header-height").Return(nil, ds.ErrNotFound).Maybe() - storeMock.EXPECT().GetMetadata(mock.Anything, "last-submitted-data-height").Return(nil, ds.ErrNotFound).Maybe() - storeMock.EXPECT().Height(mock.Anything).Return(uint64(0), nil).Maybe() - storeMock.EXPECT().SetMetadata(mock.Anything, mock.Anything, mock.Anything).Return(nil).Maybe() - cfg := config.Config{ RootDir: t.TempDir(), ClearCache: true, } - cacheManager, err := cache.NewManager(cfg, storeMock, zerolog.Nop()) + cacheManager, err := cache.NewCacheManager(cfg, zerolog.Nop()) require.NoError(t, err) return cacheManager } // reaper with mocks and cache manager -func newTestReaper(t *testing.T, chainID string, execMock *testmocks.MockExecutor, seqMock *testmocks.MockSequencer, e *executing.Executor, cm cache.Manager) *Reaper { +func newTestReaper(t *testing.T, chainID string, execMock *testmocks.MockExecutor, seqMock *testmocks.MockSequencer, e *executing.Executor, cm cache.CacheManager) *Reaper { t.Helper() r, err := NewReaper(execMock, seqMock, genesis.Genesis{ChainID: chainID}, zerolog.Nop(), e, cm, 100*time.Millisecond) diff --git a/block/internal/syncing/da_retriever.go b/block/internal/syncing/da_retriever.go index fe34892d2c..e52f8a4ce5 100644 --- a/block/internal/syncing/da_retriever.go +++ b/block/internal/syncing/da_retriever.go @@ -38,7 +38,7 @@ type DARetriever interface { // daRetriever handles DA retrieval operations for syncing type daRetriever struct { da coreda.DA - cache cache.Manager + cache cache.CacheManager genesis genesis.Genesis logger zerolog.Logger @@ -63,7 +63,7 @@ type daRetriever struct { // NewDARetriever creates a new DA retriever func NewDARetriever( da coreda.DA, - cache cache.Manager, + cache cache.CacheManager, config config.Config, genesis genesis.Genesis, logger zerolog.Logger, diff --git a/block/internal/syncing/da_retriever_test.go b/block/internal/syncing/da_retriever_test.go index d66d19640a..d4cd2b2dbc 100644 --- a/block/internal/syncing/da_retriever_test.go +++ b/block/internal/syncing/da_retriever_test.go @@ -8,8 +8,6 @@ import ( "testing" "time" - "github.com/ipfs/go-datastore" - dssync "github.com/ipfs/go-datastore/sync" "github.com/libp2p/go-libp2p/core/crypto" "github.com/rs/zerolog" "github.com/stretchr/testify/assert" @@ -22,7 +20,6 @@ import ( "github.com/evstack/ev-node/pkg/config" "github.com/evstack/ev-node/pkg/genesis" signerpkg "github.com/evstack/ev-node/pkg/signer" - "github.com/evstack/ev-node/pkg/store" testmocks "github.com/evstack/ev-node/test/mocks" "github.com/evstack/ev-node/types" ) @@ -53,9 +50,7 @@ func makeSignedDataBytesWithTime(t *testing.T, chainID string, height uint64, pr } func TestDARetriever_RetrieveFromDA_Invalid(t *testing.T) { - ds := dssync.MutexWrap(datastore.NewMapDatastore()) - st := store.New(ds) - cm, err := cache.NewManager(config.DefaultConfig(), st, zerolog.Nop()) + cm, err := cache.NewCacheManager(config.DefaultConfig(), zerolog.Nop()) assert.NoError(t, err) mockDA := testmocks.NewMockDA(t) @@ -70,9 +65,7 @@ func TestDARetriever_RetrieveFromDA_Invalid(t *testing.T) { } func TestDARetriever_RetrieveFromDA_NotFound(t *testing.T) { - ds := dssync.MutexWrap(datastore.NewMapDatastore()) - st := store.New(ds) - cm, err := cache.NewManager(config.DefaultConfig(), st, zerolog.Nop()) + cm, err := cache.NewCacheManager(config.DefaultConfig(), zerolog.Nop()) assert.NoError(t, err) mockDA := testmocks.NewMockDA(t) @@ -88,9 +81,7 @@ func TestDARetriever_RetrieveFromDA_NotFound(t *testing.T) { } func TestDARetriever_RetrieveFromDA_HeightFromFuture(t *testing.T) { - ds := dssync.MutexWrap(datastore.NewMapDatastore()) - st := store.New(ds) - cm, err := cache.NewManager(config.DefaultConfig(), st, zerolog.Nop()) + cm, err := cache.NewCacheManager(config.DefaultConfig(), zerolog.Nop()) require.NoError(t, err) mockDA := testmocks.NewMockDA(t) @@ -106,9 +97,7 @@ func TestDARetriever_RetrieveFromDA_HeightFromFuture(t *testing.T) { } func TestDARetriever_RetrieveFromDA_Timeout(t *testing.T) { - ds := dssync.MutexWrap(datastore.NewMapDatastore()) - st := store.New(ds) - cm, err := cache.NewManager(config.DefaultConfig(), st, zerolog.Nop()) + cm, err := cache.NewCacheManager(config.DefaultConfig(), zerolog.Nop()) require.NoError(t, err) mockDA := testmocks.NewMockDA(t) @@ -138,9 +127,7 @@ func TestDARetriever_RetrieveFromDA_Timeout(t *testing.T) { } func TestDARetriever_RetrieveFromDA_TimeoutFast(t *testing.T) { - ds := dssync.MutexWrap(datastore.NewMapDatastore()) - st := store.New(ds) - cm, err := cache.NewManager(config.DefaultConfig(), st, zerolog.Nop()) + cm, err := cache.NewCacheManager(config.DefaultConfig(), zerolog.Nop()) require.NoError(t, err) mockDA := testmocks.NewMockDA(t) @@ -161,9 +148,7 @@ func TestDARetriever_RetrieveFromDA_TimeoutFast(t *testing.T) { } func TestDARetriever_ProcessBlobs_HeaderAndData_Success(t *testing.T) { - ds := dssync.MutexWrap(datastore.NewMapDatastore()) - st := store.New(ds) - cm, err := cache.NewManager(config.DefaultConfig(), st, zerolog.Nop()) + cm, err := cache.NewCacheManager(config.DefaultConfig(), zerolog.Nop()) require.NoError(t, err) addr, pub, signer := buildSyncTestSigner(t) @@ -190,9 +175,7 @@ func TestDARetriever_ProcessBlobs_HeaderAndData_Success(t *testing.T) { } func TestDARetriever_ProcessBlobs_HeaderOnly_EmptyDataExpected(t *testing.T) { - ds := dssync.MutexWrap(datastore.NewMapDatastore()) - st := store.New(ds) - cm, err := cache.NewManager(config.DefaultConfig(), st, zerolog.Nop()) + cm, err := cache.NewCacheManager(config.DefaultConfig(), zerolog.Nop()) require.NoError(t, err) addr, pub, signer := buildSyncTestSigner(t) @@ -218,9 +201,7 @@ func TestDARetriever_ProcessBlobs_HeaderOnly_EmptyDataExpected(t *testing.T) { } func TestDARetriever_TryDecodeHeaderAndData_Basic(t *testing.T) { - ds := dssync.MutexWrap(datastore.NewMapDatastore()) - st := store.New(ds) - cm, err := cache.NewManager(config.DefaultConfig(), st, zerolog.Nop()) + cm, err := cache.NewCacheManager(config.DefaultConfig(), zerolog.Nop()) require.NoError(t, err) addr, pub, signer := buildSyncTestSigner(t) @@ -243,9 +224,7 @@ func TestDARetriever_TryDecodeHeaderAndData_Basic(t *testing.T) { } func TestDARetriever_tryDecodeData_InvalidSignatureOrProposer(t *testing.T) { - ds := dssync.MutexWrap(datastore.NewMapDatastore()) - st := store.New(ds) - cm, err := cache.NewManager(config.DefaultConfig(), st, zerolog.Nop()) + cm, err := cache.NewCacheManager(config.DefaultConfig(), zerolog.Nop()) require.NoError(t, err) goodAddr, pub, signer := buildSyncTestSigner(t) @@ -273,9 +252,7 @@ func TestDARetriever_validateBlobResponse(t *testing.T) { } func TestDARetriever_RetrieveFromDA_TwoNamespaces_Success(t *testing.T) { - ds := dssync.MutexWrap(datastore.NewMapDatastore()) - st := store.New(ds) - cm, err := cache.NewManager(config.DefaultConfig(), st, zerolog.Nop()) + cm, err := cache.NewCacheManager(config.DefaultConfig(), zerolog.Nop()) require.NoError(t, err) addr, pub, signer := buildSyncTestSigner(t) @@ -314,9 +291,7 @@ func TestDARetriever_RetrieveFromDA_TwoNamespaces_Success(t *testing.T) { } func TestDARetriever_ProcessBlobs_CrossDAHeightMatching(t *testing.T) { - ds := dssync.MutexWrap(datastore.NewMapDatastore()) - st := store.New(ds) - cm, err := cache.NewManager(config.DefaultConfig(), st, zerolog.Nop()) + cm, err := cache.NewCacheManager(config.DefaultConfig(), zerolog.Nop()) require.NoError(t, err) addr, pub, signer := buildSyncTestSigner(t) @@ -350,9 +325,7 @@ func TestDARetriever_ProcessBlobs_CrossDAHeightMatching(t *testing.T) { } func TestDARetriever_ProcessBlobs_MultipleHeadersCrossDAHeightMatching(t *testing.T) { - ds := dssync.MutexWrap(datastore.NewMapDatastore()) - st := store.New(ds) - cm, err := cache.NewManager(config.DefaultConfig(), st, zerolog.Nop()) + cm, err := cache.NewCacheManager(config.DefaultConfig(), zerolog.Nop()) require.NoError(t, err) addr, pub, signer := buildSyncTestSigner(t) @@ -427,9 +400,7 @@ func Test_isEmptyDataExpected(t *testing.T) { } func TestDARetriever_RetrieveForcedIncludedTxsFromDA_Success(t *testing.T) { - ds := dssync.MutexWrap(datastore.NewMapDatastore()) - st := store.New(ds) - cm, err := cache.NewManager(config.DefaultConfig(), st, zerolog.Nop()) + cm, err := cache.NewCacheManager(config.DefaultConfig(), zerolog.Nop()) require.NoError(t, err) addr, pub, signer := buildSyncTestSigner(t) @@ -465,9 +436,7 @@ func TestDARetriever_RetrieveForcedIncludedTxsFromDA_Success(t *testing.T) { } func TestDARetriever_FetchForcedIncludedTxs_NoNamespaceConfigured(t *testing.T) { - ds := dssync.MutexWrap(datastore.NewMapDatastore()) - st := store.New(ds) - cm, err := cache.NewManager(config.DefaultConfig(), st, zerolog.Nop()) + cm, err := cache.NewCacheManager(config.DefaultConfig(), zerolog.Nop()) require.NoError(t, err) addr, _, _ := buildSyncTestSigner(t) @@ -484,9 +453,7 @@ func TestDARetriever_FetchForcedIncludedTxs_NoNamespaceConfigured(t *testing.T) } func TestDARetriever_FetchForcedIncludedTxs_NotFound(t *testing.T) { - ds := dssync.MutexWrap(datastore.NewMapDatastore()) - st := store.New(ds) - cm, err := cache.NewManager(config.DefaultConfig(), st, zerolog.Nop()) + cm, err := cache.NewCacheManager(config.DefaultConfig(), zerolog.Nop()) require.NoError(t, err) addr, _, _ := buildSyncTestSigner(t) @@ -513,9 +480,7 @@ func TestDARetriever_FetchForcedIncludedTxs_NotFound(t *testing.T) { } func TestDARetriever_RetrieveForcedIncludedTxsFromDA_ExceedsMaxBlobSize(t *testing.T) { - ds := dssync.MutexWrap(datastore.NewMapDatastore()) - st := store.New(ds) - cm, err := cache.NewManager(config.DefaultConfig(), st, zerolog.Nop()) + cm, err := cache.NewCacheManager(config.DefaultConfig(), zerolog.Nop()) require.NoError(t, err) addr, pub, signer := buildSyncTestSigner(t) @@ -632,9 +597,7 @@ func TestDARetriever_RetrieveForcedIncludedTxsFromDA_ExceedsMaxBlobSize(t *testi } func TestDARetriever_RetrieveForcedIncludedTxsFromDA_NotAtEpochStart(t *testing.T) { - ds := dssync.MutexWrap(datastore.NewMapDatastore()) - st := store.New(ds) - cm, err := cache.NewManager(config.DefaultConfig(), st, zerolog.Nop()) + cm, err := cache.NewCacheManager(config.DefaultConfig(), zerolog.Nop()) require.NoError(t, err) addr, _, _ := buildSyncTestSigner(t) @@ -658,9 +621,7 @@ func TestDARetriever_RetrieveForcedIncludedTxsFromDA_NotAtEpochStart(t *testing. } func TestDARetriever_RetrieveForcedIncludedTxsFromDA_EpochStartFromFuture(t *testing.T) { - ds := dssync.MutexWrap(datastore.NewMapDatastore()) - st := store.New(ds) - cm, err := cache.NewManager(config.DefaultConfig(), st, zerolog.Nop()) + cm, err := cache.NewCacheManager(config.DefaultConfig(), zerolog.Nop()) require.NoError(t, err) addr, _, _ := buildSyncTestSigner(t) @@ -688,9 +649,7 @@ func TestDARetriever_RetrieveForcedIncludedTxsFromDA_EpochStartFromFuture(t *tes } func TestDARetriever_RetrieveForcedIncludedTxsFromDA_EpochEndFromFuture(t *testing.T) { - ds := dssync.MutexWrap(datastore.NewMapDatastore()) - st := store.New(ds) - cm, err := cache.NewManager(config.DefaultConfig(), st, zerolog.Nop()) + cm, err := cache.NewCacheManager(config.DefaultConfig(), zerolog.Nop()) require.NoError(t, err) addr, _, _ := buildSyncTestSigner(t) @@ -722,9 +681,7 @@ func TestDARetriever_RetrieveForcedIncludedTxsFromDA_EpochEndFromFuture(t *testi } func TestDARetriever_RetrieveForcedIncludedTxsFromDA_CompleteEpoch(t *testing.T) { - ds := dssync.MutexWrap(datastore.NewMapDatastore()) - st := store.New(ds) - cm, err := cache.NewManager(config.DefaultConfig(), st, zerolog.Nop()) + cm, err := cache.NewCacheManager(config.DefaultConfig(), zerolog.Nop()) require.NoError(t, err) addr, pub, signer := buildSyncTestSigner(t) diff --git a/block/internal/syncing/p2p_handler.go b/block/internal/syncing/p2p_handler.go index 4c69f164ba..3410d495fc 100644 --- a/block/internal/syncing/p2p_handler.go +++ b/block/internal/syncing/p2p_handler.go @@ -29,7 +29,7 @@ type p2pHandler interface { type P2PHandler struct { headerStore goheader.Store[*types.SignedHeader] dataStore goheader.Store[*types.Data] - cache cache.Manager + cache cache.CacheManager genesis genesis.Genesis logger zerolog.Logger @@ -40,7 +40,7 @@ type P2PHandler struct { func NewP2PHandler( headerStore goheader.Store[*types.SignedHeader], dataStore goheader.Store[*types.Data], - cache cache.Manager, + cache cache.CacheManager, genesis genesis.Genesis, logger zerolog.Logger, ) *P2PHandler { diff --git a/block/internal/syncing/p2p_handler_test.go b/block/internal/syncing/p2p_handler_test.go index bd9c4178af..dfab41faae 100644 --- a/block/internal/syncing/p2p_handler_test.go +++ b/block/internal/syncing/p2p_handler_test.go @@ -7,7 +7,6 @@ import ( "testing" "time" - ds "github.com/ipfs/go-datastore" "github.com/libp2p/go-libp2p/core/crypto" "github.com/rs/zerolog" "github.com/stretchr/testify/mock" @@ -19,7 +18,6 @@ import ( "github.com/evstack/ev-node/pkg/genesis" signerpkg "github.com/evstack/ev-node/pkg/signer" "github.com/evstack/ev-node/pkg/signer/noop" - storemocks "github.com/evstack/ev-node/test/mocks" extmocks "github.com/evstack/ev-node/test/mocks/external" "github.com/evstack/ev-node/types" ) @@ -61,7 +59,7 @@ type P2PTestData struct { Handler *P2PHandler HeaderStore *extmocks.MockStore[*types.SignedHeader] DataStore *extmocks.MockStore[*types.Data] - Cache cache.Manager + Cache cache.CacheManager Genesis genesis.Genesis ProposerAddr []byte ProposerPub crypto.PubKey @@ -78,17 +76,11 @@ func setupP2P(t *testing.T) *P2PTestData { headerStoreMock := extmocks.NewMockStore[*types.SignedHeader](t) dataStoreMock := extmocks.NewMockStore[*types.Data](t) - storeMock := storemocks.NewMockStore(t) - storeMock.EXPECT().GetMetadata(mock.Anything, "last-submitted-header-height").Return(nil, ds.ErrNotFound).Maybe() - storeMock.EXPECT().GetMetadata(mock.Anything, "last-submitted-data-height").Return(nil, ds.ErrNotFound).Maybe() - storeMock.EXPECT().Height(mock.Anything).Return(uint64(0), nil).Maybe() - storeMock.EXPECT().SetMetadata(mock.Anything, mock.Anything, mock.Anything).Return(nil).Maybe() - cfg := config.Config{ RootDir: t.TempDir(), ClearCache: true, } - cacheManager, err := cache.NewManager(cfg, storeMock, zerolog.Nop()) + cacheManager, err := cache.NewCacheManager(cfg, zerolog.Nop()) require.NoError(t, err, "failed to create cache manager") handler := NewP2PHandler(headerStoreMock, dataStoreMock, cacheManager, gen, zerolog.Nop()) diff --git a/block/internal/syncing/syncer.go b/block/internal/syncing/syncer.go index ddd039e63a..6fbe8735d4 100644 --- a/block/internal/syncing/syncer.go +++ b/block/internal/syncing/syncer.go @@ -34,7 +34,7 @@ type Syncer struct { da coreda.DA // Shared components - cache cache.Manager + cache cache.CacheManager metrics *common.Metrics // Configuration @@ -77,7 +77,7 @@ func NewSyncer( store store.Store, exec coreexecutor.Executor, da coreda.DA, - cache cache.Manager, + cache cache.CacheManager, metrics *common.Metrics, config config.Config, genesis genesis.Genesis, diff --git a/block/internal/syncing/syncer_backoff_test.go b/block/internal/syncing/syncer_backoff_test.go index f89b64fbda..65f2586966 100644 --- a/block/internal/syncing/syncer_backoff_test.go +++ b/block/internal/syncing/syncer_backoff_test.go @@ -325,7 +325,8 @@ func setupTestSyncer(t *testing.T, daBlockTime time.Duration) *Syncer { ds := dssync.MutexWrap(datastore.NewMapDatastore()) st := store.New(ds) - cm, err := cache.NewManager(config.DefaultConfig(), st, zerolog.Nop()) + + cm, err := cache.NewCacheManager(config.DefaultConfig(), zerolog.Nop()) require.NoError(t, err) addr, _, _ := buildSyncTestSigner(t) diff --git a/block/internal/syncing/syncer_benchmark_test.go b/block/internal/syncing/syncer_benchmark_test.go index 65f4202612..26b674b4b2 100644 --- a/block/internal/syncing/syncer_benchmark_test.go +++ b/block/internal/syncing/syncer_benchmark_test.go @@ -70,7 +70,7 @@ func BenchmarkSyncerIO(b *testing.B) { type benchFixture struct { s *Syncer st store.Store - cm cache.Manager + cm cache.CacheManager cancel context.CancelFunc } @@ -80,7 +80,8 @@ func newBenchFixture(b *testing.B, totalHeights uint64, shuffledTx bool, daDelay ds := dssync.MutexWrap(datastore.NewMapDatastore()) st := store.New(ds) - cm, err := cache.NewManager(config.DefaultConfig(), st, zerolog.Nop()) + + cm, err := cache.NewCacheManager(config.DefaultConfig(), zerolog.Nop()) require.NoError(b, err) addr, pub, signer := buildSyncTestSigner(b) diff --git a/block/internal/syncing/syncer_forced_inclusion_test.go b/block/internal/syncing/syncer_forced_inclusion_test.go index 2a6a9a8631..d4f12815d0 100644 --- a/block/internal/syncing/syncer_forced_inclusion_test.go +++ b/block/internal/syncing/syncer_forced_inclusion_test.go @@ -25,7 +25,8 @@ import ( func TestVerifyForcedInclusionTxs_AllTransactionsIncluded(t *testing.T) { ds := dssync.MutexWrap(datastore.NewMapDatastore()) st := store.New(ds) - cm, err := cache.NewManager(config.DefaultConfig(), st, zerolog.Nop()) + + cm, err := cache.NewCacheManager(config.DefaultConfig(), zerolog.Nop()) require.NoError(t, err) addr, pub, signer := buildSyncTestSigner(t) @@ -99,7 +100,8 @@ func TestVerifyForcedInclusionTxs_AllTransactionsIncluded(t *testing.T) { func TestVerifyForcedInclusionTxs_MissingTransactions(t *testing.T) { ds := dssync.MutexWrap(datastore.NewMapDatastore()) st := store.New(ds) - cm, err := cache.NewManager(config.DefaultConfig(), st, zerolog.Nop()) + + cm, err := cache.NewCacheManager(config.DefaultConfig(), zerolog.Nop()) require.NoError(t, err) addr, pub, signer := buildSyncTestSigner(t) @@ -176,7 +178,8 @@ func TestVerifyForcedInclusionTxs_MissingTransactions(t *testing.T) { func TestVerifyForcedInclusionTxs_PartiallyIncluded(t *testing.T) { ds := dssync.MutexWrap(datastore.NewMapDatastore()) st := store.New(ds) - cm, err := cache.NewManager(config.DefaultConfig(), st, zerolog.Nop()) + + cm, err := cache.NewCacheManager(config.DefaultConfig(), zerolog.Nop()) require.NoError(t, err) addr, pub, signer := buildSyncTestSigner(t) @@ -255,7 +258,8 @@ func TestVerifyForcedInclusionTxs_PartiallyIncluded(t *testing.T) { func TestVerifyForcedInclusionTxs_NoForcedTransactions(t *testing.T) { ds := dssync.MutexWrap(datastore.NewMapDatastore()) st := store.New(ds) - cm, err := cache.NewManager(config.DefaultConfig(), st, zerolog.Nop()) + + cm, err := cache.NewCacheManager(config.DefaultConfig(), zerolog.Nop()) require.NoError(t, err) addr, _, _ := buildSyncTestSigner(t) @@ -320,7 +324,8 @@ func TestVerifyForcedInclusionTxs_NoForcedTransactions(t *testing.T) { func TestVerifyForcedInclusionTxs_NamespaceNotConfigured(t *testing.T) { ds := dssync.MutexWrap(datastore.NewMapDatastore()) st := store.New(ds) - cm, err := cache.NewManager(config.DefaultConfig(), st, zerolog.Nop()) + + cm, err := cache.NewCacheManager(config.DefaultConfig(), zerolog.Nop()) require.NoError(t, err) addr, _, _ := buildSyncTestSigner(t) diff --git a/block/internal/syncing/syncer_test.go b/block/internal/syncing/syncer_test.go index fcc9707934..0bbac7363a 100644 --- a/block/internal/syncing/syncer_test.go +++ b/block/internal/syncing/syncer_test.go @@ -103,7 +103,8 @@ func makeData(chainID string, height uint64, txs int) *types.Data { func TestSyncer_validateBlock_DataHashMismatch(t *testing.T) { ds := dssync.MutexWrap(datastore.NewMapDatastore()) st := store.New(ds) - cm, err := cache.NewManager(config.DefaultConfig(), st, zerolog.Nop()) + + cm, err := cache.NewCacheManager(config.DefaultConfig(), zerolog.Nop()) require.NoError(t, err) addr, pub, signer := buildSyncTestSigner(t) @@ -151,7 +152,8 @@ func TestSyncer_validateBlock_DataHashMismatch(t *testing.T) { func TestProcessHeightEvent_SyncsAndUpdatesState(t *testing.T) { ds := dssync.MutexWrap(datastore.NewMapDatastore()) st := store.New(ds) - cm, err := cache.NewManager(config.DefaultConfig(), st, zerolog.Nop()) + + cm, err := cache.NewCacheManager(config.DefaultConfig(), zerolog.Nop()) require.NoError(t, err) addr, pub, signer := buildSyncTestSigner(t) @@ -205,7 +207,8 @@ func TestProcessHeightEvent_SyncsAndUpdatesState(t *testing.T) { func TestSequentialBlockSync(t *testing.T) { ds := dssync.MutexWrap(datastore.NewMapDatastore()) st := store.New(ds) - cm, err := cache.NewManager(config.DefaultConfig(), st, zerolog.Nop()) + + cm, err := cache.NewCacheManager(config.DefaultConfig(), zerolog.Nop()) require.NoError(t, err) addr, pub, signer := buildSyncTestSigner(t) @@ -294,7 +297,8 @@ func TestSyncer_sendNonBlockingSignal(t *testing.T) { func TestSyncer_processPendingEvents(t *testing.T) { ds := dssync.MutexWrap(datastore.NewMapDatastore()) st := store.New(ds) - cm, err := cache.NewManager(config.DefaultConfig(), st, zerolog.Nop()) + + cm, err := cache.NewCacheManager(config.DefaultConfig(), zerolog.Nop()) require.NoError(t, err) // current height 1 @@ -340,7 +344,7 @@ func TestSyncLoopPersistState(t *testing.T) { cfg.RootDir = t.TempDir() cfg.ClearCache = true - cacheMgr, err := cache.NewManager(cfg, st, zerolog.Nop()) + cacheMgr, err := cache.NewCacheManager(cfg, zerolog.Nop()) require.NoError(t, err) const myDAHeightOffset = uint64(1) @@ -452,7 +456,7 @@ func TestSyncLoopPersistState(t *testing.T) { require.Nil(t, event, "event at height %d should have been removed", blockHeight) } // and when new instance is up on restart - cacheMgr, err = cache.NewManager(cfg, st, zerolog.Nop()) + cacheMgr, err = cache.NewCacheManager(cfg, zerolog.Nop()) require.NoError(t, err) require.NoError(t, cacheMgr.LoadFromDisk()) diff --git a/block/public.go b/block/public.go index 678cf6e647..ce777da6e0 100644 --- a/block/public.go +++ b/block/public.go @@ -40,7 +40,7 @@ func NewDARetriever( genesis genesis.Genesis, logger zerolog.Logger, ) (syncing.DARetriever, error) { - cacheManager, err := cache.NewManager(config, nil /* pending data not used */, logger) + cacheManager, err := cache.NewCacheManager(config, logger) if err != nil { return nil, fmt.Errorf("failed to create cache manager: %w", err) } diff --git a/docs/adr/adr-019-forced-inclusion-mechanism.md b/docs/adr/adr-019-forced-inclusion-mechanism.md index d86e9d4b54..ddc30b16fe 100644 --- a/docs/adr/adr-019-forced-inclusion-mechanism.md +++ b/docs/adr/adr-019-forced-inclusion-mechanism.md @@ -152,7 +152,7 @@ The DA Retriever component handles fetching forced inclusion transactions: ```go type daRetriever struct { da coreda.DA - cache cache.Manager + cache cache.CacheManager genesis genesis.Genesis logger zerolog.Logger namespaceForcedInclusionBz []byte From 18ce1ce23cde59495f4c82b51fd9e5730a900847 Mon Sep 17 00:00:00 2001 From: Julien Robert Date: Thu, 13 Nov 2025 11:39:25 +0100 Subject: [PATCH 31/39] fix epochs calculation --- types/epoch.go | 8 ++++---- types/epoch_test.go | 8 ++++++++ 2 files changed, 12 insertions(+), 4 deletions(-) diff --git a/types/epoch.go b/types/epoch.go index 5e849520e5..75d43e8048 100644 --- a/types/epoch.go +++ b/types/epoch.go @@ -34,14 +34,14 @@ func CalculateEpochNumber(daHeight, daStartHeight, daEpochSize uint64) uint64 { // - start: The first DA height in the epoch (inclusive) // - end: The last DA height in the epoch (inclusive) func CalculateEpochBoundaries(daHeight, daStartHeight, daEpochSize uint64) (start, end uint64) { - if daHeight < daStartHeight { - return daStartHeight, daStartHeight + daEpochSize - 1 - } - if daEpochSize == 0 { return daStartHeight, daStartHeight } + if daHeight < daStartHeight { + return daStartHeight, daStartHeight + daEpochSize - 1 + } + epochNum := CalculateEpochNumber(daHeight, daStartHeight, daEpochSize) start = daStartHeight + (epochNum-1)*daEpochSize end = daStartHeight + epochNum*daEpochSize - 1 diff --git a/types/epoch_test.go b/types/epoch_test.go index 6c202b8956..295395d7b7 100644 --- a/types/epoch_test.go +++ b/types/epoch_test.go @@ -142,6 +142,14 @@ func TestCalculateEpochBoundaries(t *testing.T) { expectedStart: 100, expectedEnd: 109, }, + { + name: "before start height with zero epoch size", + daStartHeight: 2, + daEpochSize: 0, + daHeight: 1, + expectedStart: 2, + expectedEnd: 2, + }, { name: "zero epoch size", daStartHeight: 100, From 0d790efaf5b7c048bb1a1675c734dd766cb311a3 Mon Sep 17 00:00:00 2001 From: Julien Robert Date: Thu, 13 Nov 2025 12:04:05 +0100 Subject: [PATCH 32/39] disable force inclusion in prod --- pkg/config/config.go | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/pkg/config/config.go b/pkg/config/config.go index f5cc6e0170..51f7affa6d 100644 --- a/pkg/config/config.go +++ b/pkg/config/config.go @@ -266,6 +266,14 @@ func (c *Config) Validate() error { } } + if len(c.DA.GetForcedInclusionNamespace()) > 0 { + // if err := validateNamespace(c.DA.GetForcedInclusionNamespace()); err != nil { + // return fmt.Errorf("could not validate forced inclusion namespace (%s): %w", c.DA.GetForcedInclusionNamespace(), err) + // } + return fmt.Errorf("forced inclusion is not yet live") + + } + // Validate lazy mode configuration if c.Node.LazyMode && c.Node.LazyBlockInterval.Duration <= c.Node.BlockTime.Duration { return fmt.Errorf("LazyBlockInterval (%v) must be greater than BlockTime (%v) in lazy mode", From b39f28cb3c625e3d3295260fa51f2bba31e0cbd1 Mon Sep 17 00:00:00 2001 From: Julien Robert Date: Thu, 13 Nov 2025 15:51:12 +0100 Subject: [PATCH 33/39] add changelog --- CHANGELOG.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 30b04aea76..b5e0bd7970 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -12,6 +12,9 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ### Added - Enhanced health check system with separate liveness (`/health/live`) and readiness (`/health/ready`) HTTP endpoints. Readiness endpoint includes P2P listening check and aggregator block production rate validation (5x block time threshold). ([#2800](https://github.com/evstack/ev-node/pull/2800)) +- Implement forced inclusion and based sequencing ([#2797](https://github.com/evstack/ev-node/pull/2797)) + This changes requires to add a `da_epoch_forced_inclusion` field in `genesis.json` file. + To enable this feature, set the force inclusion namespace in the `evnode.yaml`. ### Changed From 9896bc650393a3460dc853296445b54fb564122c Mon Sep 17 00:00:00 2001 From: Julien Robert Date: Fri, 14 Nov 2025 17:50:01 +0100 Subject: [PATCH 34/39] feat: optimize force inclusion fetching --- block/internal/syncing/da_retriever.go | 359 +++++++++++++++++++- block/internal/syncing/da_retriever_mock.go | 40 +++ block/internal/syncing/da_retriever_test.go | 28 +- block/internal/syncing/syncer.go | 8 + da/internal/mocks/da.go | 120 ------- sequencers/based/based.go | 120 ++++--- sequencers/based/based_test.go | 36 +- sequencers/single/sequencer.go | 4 + sequencers/single/sequencer_test.go | 17 + test/mocks/da.go | 120 ------- 10 files changed, 531 insertions(+), 321 deletions(-) diff --git a/block/internal/syncing/da_retriever.go b/block/internal/syncing/da_retriever.go index e52f8a4ce5..a8def34578 100644 --- a/block/internal/syncing/da_retriever.go +++ b/block/internal/syncing/da_retriever.go @@ -5,6 +5,8 @@ import ( "context" "errors" "fmt" + "sync" + "sync/atomic" "time" "github.com/rs/zerolog" @@ -22,6 +24,18 @@ import ( // defaultDATimeout is the default timeout for DA retrieval operations const defaultDATimeout = 10 * time.Second +// defaultEpochLag is the default number of blocks to lag behind DA height when fetching forced inclusion txs +const defaultEpochLag = 10 + +// defaultMinEpochWindow is the minimum window size for epoch lag calculation +const defaultMinEpochWindow = 5 + +// defaultMaxEpochWindow is the maximum window size for epoch lag calculation +const defaultMaxEpochWindow = 100 + +// defaultFetchInterval is the interval between async fetch attempts +const defaultFetchInterval = 2 * time.Second + // pendingForcedInclusionTx represents a forced inclusion transaction that couldn't fit in the current epoch // and needs to be retried in future epochs. type pendingForcedInclusionTx struct { @@ -29,10 +43,95 @@ type pendingForcedInclusionTx struct { OriginalHeight uint64 // Original DA height where this transaction was found } +// epochCache stores fetched forced inclusion events by epoch start height +type epochCache struct { + events atomic.Pointer[map[uint64]*common.ForcedIncludedEvent] + fetchTimes atomic.Pointer[[]time.Duration] + maxSamples int +} + +func newEpochCache(maxSamples int) *epochCache { + c := &epochCache{ + maxSamples: maxSamples, + } + initialEvents := make(map[uint64]*common.ForcedIncludedEvent) + c.events.Store(&initialEvents) + initialTimes := make([]time.Duration, 0, maxSamples) + c.fetchTimes.Store(&initialTimes) + return c +} + +func (c *epochCache) get(epochStart uint64) (*common.ForcedIncludedEvent, bool) { + events := c.events.Load() + event, ok := (*events)[epochStart] + return event, ok +} + +func (c *epochCache) set(epochStart uint64, event *common.ForcedIncludedEvent) { + for { + oldEventsPtr := c.events.Load() + oldEvents := *oldEventsPtr + newEvents := make(map[uint64]*common.ForcedIncludedEvent, len(oldEvents)+1) + for k, v := range oldEvents { + newEvents[k] = v + } + newEvents[epochStart] = event + if c.events.CompareAndSwap(oldEventsPtr, &newEvents) { + return + } + } +} + +func (c *epochCache) recordFetchTime(duration time.Duration) { + for { + oldTimesPtr := c.fetchTimes.Load() + oldTimes := *oldTimesPtr + newTimes := make([]time.Duration, 0, c.maxSamples) + newTimes = append(newTimes, oldTimes...) + newTimes = append(newTimes, duration) + if len(newTimes) > c.maxSamples { + newTimes = newTimes[1:] + } + if c.fetchTimes.CompareAndSwap(oldTimesPtr, &newTimes) { + return + } + } +} + +func (c *epochCache) averageFetchTime() time.Duration { + timesPtr := c.fetchTimes.Load() + times := *timesPtr + if len(times) == 0 { + return 0 + } + var sum time.Duration + for _, d := range times { + sum += d + } + return sum / time.Duration(len(times)) +} + +func (c *epochCache) cleanup(beforeEpoch uint64) { + for { + oldEventsPtr := c.events.Load() + oldEvents := *oldEventsPtr + newEvents := make(map[uint64]*common.ForcedIncludedEvent) + for epoch, event := range oldEvents { + if epoch >= beforeEpoch { + newEvents[epoch] = event + } + } + if c.events.CompareAndSwap(oldEventsPtr, &newEvents) { + return + } + } +} + // DARetriever defines the interface for retrieving events from the DA layer type DARetriever interface { RetrieveFromDA(ctx context.Context, daHeight uint64) ([]common.DAHeightEvent, error) RetrieveForcedIncludedTxsFromDA(ctx context.Context, daHeight uint64) (*common.ForcedIncludedEvent, error) + SetDAHeight(height uint64) } // daRetriever handles DA retrieval operations for syncing @@ -58,6 +157,13 @@ type daRetriever struct { // Forced inclusion transactions that couldn't fit in the current epoch // and need to be retried in future epochs. pendingForcedInclusionTxs []pendingForcedInclusionTx + + // Async forced inclusion fetching + epochCache *epochCache + fetcherCtx context.Context + fetcherCancel context.CancelFunc + fetcherWg sync.WaitGroup + currentDAHeight atomic.Uint64 } // NewDARetriever creates a new DA retriever @@ -76,7 +182,9 @@ func NewDARetriever( namespaceForcedInclusionBz = coreda.NamespaceFromString(forcedInclusionNs).Bytes() } - return &daRetriever{ + ctx, cancel := context.WithCancel(context.Background()) + + r := &daRetriever{ da: da, cache: cache, genesis: genesis, @@ -89,24 +197,167 @@ func NewDARetriever( pendingHeaders: make(map[uint64]*types.SignedHeader), pendingData: make(map[uint64]*types.Data), pendingForcedInclusionTxs: make([]pendingForcedInclusionTx, 0), + epochCache: newEpochCache(10), // Keep last 10 fetch times for averaging + fetcherCtx: ctx, + fetcherCancel: cancel, + } + r.currentDAHeight.Store(genesis.DAStartHeight) + + // Start background fetcher if forced inclusion is configured + if hasForcedInclusionNs { + r.fetcherWg.Add(1) + go r.backgroundFetcher() + } + + return r +} + +// SetDAHeight updates the current DA height for async fetching +func (r *daRetriever) SetDAHeight(height uint64) { + for { + current := r.currentDAHeight.Load() + if height <= current { + return + } + if r.currentDAHeight.CompareAndSwap(current, height) { + return + } } } -// RetrieveFromDA retrieves blocks from the specified DA height and returns height events -func (r *daRetriever) RetrieveFromDA(ctx context.Context, daHeight uint64) ([]common.DAHeightEvent, error) { - r.logger.Debug().Uint64("da_height", daHeight).Msg("retrieving from DA") - blobsResp, err := r.fetchBlobs(ctx, daHeight) - if err != nil { - return nil, err +// GetDAHeight returns the current DA height +func (r *daRetriever) GetDAHeight() uint64 { + return r.currentDAHeight.Load() +} + +// calculateAdaptiveEpochWindow calculates the epoch lag window based on average fetch time +func (r *daRetriever) calculateAdaptiveEpochWindow() uint64 { + avgFetchTime := r.epochCache.averageFetchTime() + if avgFetchTime == 0 { + return defaultEpochLag } - // Check for context cancellation upfront - if err := ctx.Err(); err != nil { - return nil, err + // Scale window based on fetch time: faster fetches = smaller window + // If fetch takes 1 second, window = 5 + // If fetch takes 5 seconds, window = 25 + // If fetch takes 10 seconds, window = 50 + window := uint64(avgFetchTime.Seconds() * 5) + + if window < defaultMinEpochWindow { + window = defaultMinEpochWindow + } + if window > defaultMaxEpochWindow { + window = defaultMaxEpochWindow } - r.logger.Debug().Int("blobs", len(blobsResp.Data)).Uint64("da_height", daHeight).Msg("retrieved blob data") - return r.processBlobs(ctx, blobsResp.Data, daHeight), nil + return window +} + +// backgroundFetcher continuously fetches forced inclusion transactions ahead of time +func (r *daRetriever) backgroundFetcher() { + defer r.fetcherWg.Done() + + ticker := time.NewTicker(defaultFetchInterval) + defer ticker.Stop() + + r.logger.Info().Msg("started background forced inclusion fetcher") + + for { + select { + case <-r.fetcherCtx.Done(): + r.logger.Info().Msg("stopped background forced inclusion fetcher") + return + case <-ticker.C: + r.fetchNextEpoch() + } + } +} + +// fetchNextEpoch fetches the next epoch that should be available based on current DA height and lag +func (r *daRetriever) fetchNextEpoch() { + currentHeight := r.GetDAHeight() + if currentHeight == 0 { + return + } + + window := r.calculateAdaptiveEpochWindow() + + // Calculate which epoch the sequencer will need soon (lagging behind current height) + // We want to prefetch this epoch before it's actually requested + var targetHeight uint64 + if currentHeight > window { + targetHeight = currentHeight - window + } else { + targetHeight = r.genesis.DAStartHeight + } + + // Calculate epoch boundaries for the target height + epochStart, epochEnd := types.CalculateEpochBoundaries(targetHeight, r.genesis.DAStartHeight, r.daEpochSize) + + // Check if we already have this epoch cached + if _, exists := r.epochCache.get(epochStart); exists { + // Already cached, try to fetch the next epoch ahead + nextEpochStart := epochEnd + 1 + nextEpochStart, nextEpochEnd := types.CalculateEpochBoundaries(nextEpochStart, r.genesis.DAStartHeight, r.daEpochSize) + + // Only prefetch next epoch if we're not too far ahead + if nextEpochEnd <= currentHeight { + if _, exists := r.epochCache.get(nextEpochStart); !exists { + epochStart = nextEpochStart + epochEnd = nextEpochEnd + } else { + // Both current and next epoch are cached + return + } + } else { + // Current epoch cached and next epoch is too far ahead + return + } + } + + r.logger.Debug(). + Uint64("current_height", currentHeight). + Uint64("target_height", targetHeight). + Uint64("epoch_start", epochStart). + Uint64("epoch_end", epochEnd). + Uint64("window", window). + Msg("fetching epoch in background") + + startTime := time.Now() + event, err := r.fetchEpochSync(r.fetcherCtx, epochStart, epochEnd) + fetchDuration := time.Since(startTime) + + if err != nil { + // Don't log errors for heights that are from the future - this is expected + if !errors.Is(err, coreda.ErrHeightFromFuture) { + r.logger.Debug(). + Err(err). + Uint64("epoch_start", epochStart). + Uint64("epoch_end", epochEnd). + Msg("failed to fetch epoch in background") + } + return + } + + // Cache the result + r.epochCache.set(epochStart, event) + r.epochCache.recordFetchTime(fetchDuration) + + r.logger.Info(). + Uint64("epoch_start", epochStart). + Uint64("epoch_end", epochEnd). + Int("tx_count", len(event.Txs)). + Dur("fetch_duration", fetchDuration). + Msg("cached epoch in background") + + // Cleanup old epochs (keep last 5 epochs) + if epochStart >= r.genesis.DAStartHeight+r.daEpochSize*5 { + cleanupBefore := epochStart - r.daEpochSize*5 + if cleanupBefore < r.genesis.DAStartHeight { + cleanupBefore = r.genesis.DAStartHeight + } + r.epochCache.cleanup(cleanupBefore) + } } // RetrieveForcedIncludedTxsFromDA retrieves forced inclusion transactions from the DA layer. @@ -127,6 +378,9 @@ func (r *daRetriever) RetrieveForcedIncludedTxsFromDA(ctx context.Context, daHei return nil, common.ErrForceInclusionNotConfigured } + // Update our tracking of DA height + r.SetDAHeight(daHeight) + // Calculate deterministic epoch boundaries epochStart, epochEnd := types.CalculateEpochBoundaries(daHeight, r.genesis.DAStartHeight, r.daEpochSize) @@ -162,23 +416,69 @@ func (r *daRetriever) RetrieveForcedIncludedTxsFromDA(ctx context.Context, daHei return event, nil } - // We're at epoch start - fetch new transactions from DA + // We're at epoch start - check cache first + if cachedEvent, exists := r.epochCache.get(epochStart); exists { + r.logger.Info(). + Uint64("epoch_start", epochStart). + Uint64("epoch_end", epochEnd). + Int("tx_count", len(cachedEvent.Txs)). + Msg("using cached forced inclusion transactions") + + // Create a copy with pending txs prepended + event := &common.ForcedIncludedEvent{ + StartDaHeight: cachedEvent.StartDaHeight, + EndDaHeight: cachedEvent.EndDaHeight, + Txs: make([][]byte, 0, len(cachedEvent.Txs)), + } + + // Prepend pending transactions + if len(r.pendingForcedInclusionTxs) > 0 { + pendingTxs, indicesToRemove, _ := r.processPendingForcedInclusionTxs() + event.Txs = append(event.Txs, pendingTxs...) + + if len(indicesToRemove) > 0 { + r.removePendingForcedInclusionTxs(indicesToRemove) + } + } + + event.Txs = append(event.Txs, cachedEvent.Txs...) + return event, nil + } + + // Not in cache - fetch synchronously (fallback) + r.logger.Debug(). + Uint64("epoch_start", epochStart). + Uint64("epoch_end", epochEnd). + Msg("epoch not in cache, fetching synchronously") - currentEpochNumber := types.CalculateEpochNumber(daHeight, r.genesis.DAStartHeight, r.daEpochSize) + startTime := time.Now() + event, err := r.fetchEpochSync(ctx, epochStart, epochEnd) + if err != nil { + return nil, err + } + + // Record fetch time and cache the result + r.epochCache.recordFetchTime(time.Since(startTime)) + r.epochCache.set(epochStart, event) + + return event, nil +} + +// fetchEpochSync fetches an epoch synchronously (used by both background fetcher and fallback) +func (r *daRetriever) fetchEpochSync(ctx context.Context, epochStart, epochEnd uint64) (*common.ForcedIncludedEvent, error) { + currentEpochNumber := types.CalculateEpochNumber(epochStart, r.genesis.DAStartHeight, r.daEpochSize) event := &common.ForcedIncludedEvent{ StartDaHeight: epochStart, } r.logger.Debug(). - Uint64("da_height", daHeight). Uint64("epoch_start", epochStart). Uint64("epoch_end", epochEnd). Uint64("epoch_num", currentEpochNumber). - Msg("retrieving forced included transactions from DA") + Msg("fetching forced included transactions from DA") // Check if both epoch start and end are available before fetching - // This ensures we can retrieve the complete epoch in one go epochStartResult := types.RetrieveWithHelpers(ctx, r.da, r.logger, epochStart, r.namespaceForcedInclusionBz, defaultDATimeout) if epochStartResult.Code == coreda.StatusHeightFromFuture { r.logger.Debug(). @@ -260,6 +560,31 @@ func (r *daRetriever) RetrieveForcedIncludedTxsFromDA(ctx context.Context, daHei return event, nil } +// Stop stops the background fetcher +func (r *daRetriever) Stop() { + if r.fetcherCancel != nil { + r.fetcherCancel() + r.fetcherWg.Wait() + } +} + +// RetrieveFromDA retrieves blocks from the specified DA height and returns height events +func (r *daRetriever) RetrieveFromDA(ctx context.Context, daHeight uint64) ([]common.DAHeightEvent, error) { + r.logger.Debug().Uint64("da_height", daHeight).Msg("retrieving from DA") + blobsResp, err := r.fetchBlobs(ctx, daHeight) + if err != nil { + return nil, err + } + + // Check for context cancellation upfront + if err := ctx.Err(); err != nil { + return nil, err + } + + r.logger.Debug().Int("blobs", len(blobsResp.Data)).Uint64("da_height", daHeight).Msg("retrieved blob data") + return r.processBlobs(ctx, blobsResp.Data, daHeight), nil +} + // processForcedInclusionBlobs processes forced inclusion blobs from a single DA height. // It accumulates transactions that fit within maxBlobSize and stores excess in newPendingTxs. func (r *daRetriever) processForcedInclusionBlobs( diff --git a/block/internal/syncing/da_retriever_mock.go b/block/internal/syncing/da_retriever_mock.go index 505987aee6..1dc328b2a7 100644 --- a/block/internal/syncing/da_retriever_mock.go +++ b/block/internal/syncing/da_retriever_mock.go @@ -173,3 +173,43 @@ func (_c *MockDARetriever_RetrieveFromDA_Call) RunAndReturn(run func(ctx context _c.Call.Return(run) return _c } + +// SetDAHeight provides a mock function for the type MockDARetriever +func (_mock *MockDARetriever) SetDAHeight(height uint64) { + _mock.Called(height) + return +} + +// MockDARetriever_SetDAHeight_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SetDAHeight' +type MockDARetriever_SetDAHeight_Call struct { + *mock.Call +} + +// SetDAHeight is a helper method to define mock.On call +// - height uint64 +func (_e *MockDARetriever_Expecter) SetDAHeight(height interface{}) *MockDARetriever_SetDAHeight_Call { + return &MockDARetriever_SetDAHeight_Call{Call: _e.mock.On("SetDAHeight", height)} +} + +func (_c *MockDARetriever_SetDAHeight_Call) Run(run func(height uint64)) *MockDARetriever_SetDAHeight_Call { + _c.Call.Run(func(args mock.Arguments) { + var arg0 uint64 + if args[0] != nil { + arg0 = args[0].(uint64) + } + run( + arg0, + ) + }) + return _c +} + +func (_c *MockDARetriever_SetDAHeight_Call) Return() *MockDARetriever_SetDAHeight_Call { + _c.Call.Return() + return _c +} + +func (_c *MockDARetriever_SetDAHeight_Call) RunAndReturn(run func(height uint64)) *MockDARetriever_SetDAHeight_Call { + _c.Run(run) + return _c +} diff --git a/block/internal/syncing/da_retriever_test.go b/block/internal/syncing/da_retriever_test.go index d4cd2b2dbc..3ad97a6cc6 100644 --- a/block/internal/syncing/da_retriever_test.go +++ b/block/internal/syncing/da_retriever_test.go @@ -427,6 +427,7 @@ func TestDARetriever_RetrieveForcedIncludedTxsFromDA_Success(t *testing.T) { })).Return([][]byte{dataBin}, nil).Once() r := NewDARetriever(mockDA, cm, cfg, gen, zerolog.Nop()) + t.Cleanup(func() { r.Stop() }) result, err := r.RetrieveForcedIncludedTxsFromDA(context.Background(), 5678) require.NoError(t, err) @@ -446,8 +447,9 @@ func TestDARetriever_FetchForcedIncludedTxs_NoNamespaceConfigured(t *testing.T) // Leave ForcedInclusionNamespace empty r := NewDARetriever(nil, cm, cfg, gen, zerolog.Nop()) + t.Cleanup(func() { r.Stop() }) - result, err := r.RetrieveForcedIncludedTxsFromDA(context.Background(), 1234) + result, err := r.RetrieveForcedIncludedTxsFromDA(context.Background(), 100) require.Error(t, err) require.Nil(t, result) } @@ -472,6 +474,7 @@ func TestDARetriever_FetchForcedIncludedTxs_NotFound(t *testing.T) { })).Return(&coreda.GetIDsResult{IDs: [][]byte{}, Timestamp: time.Now()}, nil).Once() r := NewDARetriever(mockDA, cm, cfg, gen, zerolog.Nop()) + t.Cleanup(func() { r.Stop() }) result, err := r.RetrieveForcedIncludedTxsFromDA(context.Background(), 9999) require.NoError(t, err) @@ -575,6 +578,7 @@ func TestDARetriever_RetrieveForcedIncludedTxsFromDA_ExceedsMaxBlobSize(t *testi })).Return([][]byte{dataBin2}, nil).Once() r := NewDARetriever(mockDA, cm, cfg, gen, zerolog.Nop()) + t.Cleanup(func() { r.Stop() }) result, err := r.RetrieveForcedIncludedTxsFromDA(context.Background(), 1000) @@ -609,6 +613,7 @@ func TestDARetriever_RetrieveForcedIncludedTxsFromDA_NotAtEpochStart(t *testing. mockDA := testmocks.NewMockDA(t) r := NewDARetriever(mockDA, cm, cfg, gen, zerolog.Nop()) + t.Cleanup(func() { r.Stop() }) // With DAStartHeight=100, epoch size=10, daHeight=105 -> epoch boundaries are [100, 109] // But daHeight=105 is NOT the epoch start, so it should be a no-op @@ -635,17 +640,18 @@ func TestDARetriever_RetrieveForcedIncludedTxsFromDA_EpochStartFromFuture(t *tes mockDA := testmocks.NewMockDA(t) // With DAStartHeight=1000, epoch size=10, daHeight=1000 -> epoch boundaries are [1000, 1009] // Mock that height 1000 (epoch start) is from the future - mockDA.EXPECT().GetIDs(mock.Anything, uint64(1000), mock.MatchedBy(func(ns []byte) bool { + mockDA.EXPECT().GetIDs(mock.Anything, uint64(100), mock.MatchedBy(func(ns []byte) bool { return bytes.Equal(ns, namespaceForcedInclusionBz) })).Return(nil, fmt.Errorf("%s: not yet available", coreda.ErrHeightFromFuture.Error())).Once() r := NewDARetriever(mockDA, cm, cfg, gen, zerolog.Nop()) + t.Cleanup(func() { r.Stop() }) - result, err := r.RetrieveForcedIncludedTxsFromDA(context.Background(), 1000) + result, err := r.RetrieveForcedIncludedTxsFromDA(context.Background(), 100) require.Error(t, err) require.Nil(t, result) require.True(t, errors.Is(err, coreda.ErrHeightFromFuture)) - require.Contains(t, err.Error(), "epoch start height 1000 not yet available") + require.Contains(t, err.Error(), "epoch start height 100 not yet available") } func TestDARetriever_RetrieveForcedIncludedTxsFromDA_EpochEndFromFuture(t *testing.T) { @@ -661,23 +667,24 @@ func TestDARetriever_RetrieveForcedIncludedTxsFromDA_EpochEndFromFuture(t *testi namespaceForcedInclusionBz := coreda.NamespaceFromString(cfg.DA.GetForcedInclusionNamespace()).Bytes() mockDA := testmocks.NewMockDA(t) - // With DAStartHeight=1000, epoch size=10, daHeight=1000 -> epoch boundaries are [1000, 1009] - // Epoch start is available but epoch end (1009) is from the future - mockDA.EXPECT().GetIDs(mock.Anything, uint64(1000), mock.MatchedBy(func(ns []byte) bool { + // With DAStartHeight=100, epoch size=10, daHeight=100 -> epoch boundaries are [100, 109] + // Mock that height 100 (epoch start) is available, but height 109 (epoch end) is from the future + mockDA.EXPECT().GetIDs(mock.Anything, uint64(100), mock.MatchedBy(func(ns []byte) bool { return bytes.Equal(ns, namespaceForcedInclusionBz) })).Return(&coreda.GetIDsResult{IDs: [][]byte{}, Timestamp: time.Now()}, nil).Once() - mockDA.EXPECT().GetIDs(mock.Anything, uint64(1009), mock.MatchedBy(func(ns []byte) bool { + mockDA.EXPECT().GetIDs(mock.Anything, uint64(109), mock.MatchedBy(func(ns []byte) bool { return bytes.Equal(ns, namespaceForcedInclusionBz) })).Return(nil, fmt.Errorf("%s: not yet available", coreda.ErrHeightFromFuture.Error())).Once() r := NewDARetriever(mockDA, cm, cfg, gen, zerolog.Nop()) + t.Cleanup(func() { r.Stop() }) - result, err := r.RetrieveForcedIncludedTxsFromDA(context.Background(), 1000) + result, err := r.RetrieveForcedIncludedTxsFromDA(context.Background(), 100) require.Error(t, err) require.Nil(t, result) require.True(t, errors.Is(err, coreda.ErrHeightFromFuture)) - require.Contains(t, err.Error(), "epoch end height 1009 not yet available") + require.Contains(t, err.Error(), "epoch end height 109 not yet available") } func TestDARetriever_RetrieveForcedIncludedTxsFromDA_CompleteEpoch(t *testing.T) { @@ -734,6 +741,7 @@ func TestDARetriever_RetrieveForcedIncludedTxsFromDA_CompleteEpoch(t *testing.T) })).Return([][]byte{dataBin2}, nil).Once() r := NewDARetriever(mockDA, cm, cfg, gen, zerolog.Nop()) + t.Cleanup(func() { r.Stop() }) result, err := r.RetrieveForcedIncludedTxsFromDA(context.Background(), 2000) require.NoError(t, err) diff --git a/block/internal/syncing/syncer.go b/block/internal/syncing/syncer.go index 6fbe8735d4..9119d156b1 100644 --- a/block/internal/syncing/syncer.go +++ b/block/internal/syncing/syncer.go @@ -145,6 +145,14 @@ func (s *Syncer) Stop() error { } s.cancelP2PWait(0) s.wg.Wait() + + // Stop the DA retriever's background fetcher + if s.daRetriever != nil { + if dr, ok := s.daRetriever.(interface{ Stop() }); ok { + dr.Stop() + } + } + s.logger.Info().Msg("syncer stopped") return nil } diff --git a/da/internal/mocks/da.go b/da/internal/mocks/da.go index 37539d5480..bb3ad63391 100644 --- a/da/internal/mocks/da.go +++ b/da/internal/mocks/da.go @@ -112,126 +112,6 @@ func (_c *MockDA_Commit_Call) RunAndReturn(run func(ctx context.Context, blobs [ return _c } -// GasMultiplier provides a mock function for the type MockDA -func (_mock *MockDA) GasMultiplier(ctx context.Context) (float64, error) { - ret := _mock.Called(ctx) - - if len(ret) == 0 { - panic("no return value specified for GasMultiplier") - } - - var r0 float64 - var r1 error - if returnFunc, ok := ret.Get(0).(func(context.Context) (float64, error)); ok { - return returnFunc(ctx) - } - if returnFunc, ok := ret.Get(0).(func(context.Context) float64); ok { - r0 = returnFunc(ctx) - } else { - r0 = ret.Get(0).(float64) - } - if returnFunc, ok := ret.Get(1).(func(context.Context) error); ok { - r1 = returnFunc(ctx) - } else { - r1 = ret.Error(1) - } - return r0, r1 -} - -// MockDA_GasMultiplier_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GasMultiplier' -type MockDA_GasMultiplier_Call struct { - *mock.Call -} - -// GasMultiplier is a helper method to define mock.On call -// - ctx context.Context -func (_e *MockDA_Expecter) GasMultiplier(ctx interface{}) *MockDA_GasMultiplier_Call { - return &MockDA_GasMultiplier_Call{Call: _e.mock.On("GasMultiplier", ctx)} -} - -func (_c *MockDA_GasMultiplier_Call) Run(run func(ctx context.Context)) *MockDA_GasMultiplier_Call { - _c.Call.Run(func(args mock.Arguments) { - var arg0 context.Context - if args[0] != nil { - arg0 = args[0].(context.Context) - } - run( - arg0, - ) - }) - return _c -} - -func (_c *MockDA_GasMultiplier_Call) Return(f float64, err error) *MockDA_GasMultiplier_Call { - _c.Call.Return(f, err) - return _c -} - -func (_c *MockDA_GasMultiplier_Call) RunAndReturn(run func(ctx context.Context) (float64, error)) *MockDA_GasMultiplier_Call { - _c.Call.Return(run) - return _c -} - -// GasPrice provides a mock function for the type MockDA -func (_mock *MockDA) GasPrice(ctx context.Context) (float64, error) { - ret := _mock.Called(ctx) - - if len(ret) == 0 { - panic("no return value specified for GasPrice") - } - - var r0 float64 - var r1 error - if returnFunc, ok := ret.Get(0).(func(context.Context) (float64, error)); ok { - return returnFunc(ctx) - } - if returnFunc, ok := ret.Get(0).(func(context.Context) float64); ok { - r0 = returnFunc(ctx) - } else { - r0 = ret.Get(0).(float64) - } - if returnFunc, ok := ret.Get(1).(func(context.Context) error); ok { - r1 = returnFunc(ctx) - } else { - r1 = ret.Error(1) - } - return r0, r1 -} - -// MockDA_GasPrice_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GasPrice' -type MockDA_GasPrice_Call struct { - *mock.Call -} - -// GasPrice is a helper method to define mock.On call -// - ctx context.Context -func (_e *MockDA_Expecter) GasPrice(ctx interface{}) *MockDA_GasPrice_Call { - return &MockDA_GasPrice_Call{Call: _e.mock.On("GasPrice", ctx)} -} - -func (_c *MockDA_GasPrice_Call) Run(run func(ctx context.Context)) *MockDA_GasPrice_Call { - _c.Call.Run(func(args mock.Arguments) { - var arg0 context.Context - if args[0] != nil { - arg0 = args[0].(context.Context) - } - run( - arg0, - ) - }) - return _c -} - -func (_c *MockDA_GasPrice_Call) Return(f float64, err error) *MockDA_GasPrice_Call { - _c.Call.Return(f, err) - return _c -} - -func (_c *MockDA_GasPrice_Call) RunAndReturn(run func(ctx context.Context) (float64, error)) *MockDA_GasPrice_Call { - _c.Call.Return(run) - return _c -} - // Get provides a mock function for the type MockDA func (_mock *MockDA) Get(ctx context.Context, ids []da.ID, namespace []byte) ([]da.Blob, error) { ret := _mock.Called(ctx, ids, namespace) diff --git a/sequencers/based/based.go b/sequencers/based/based.go index 6ed094477a..225f702b51 100644 --- a/sequencers/based/based.go +++ b/sequencers/based/based.go @@ -3,7 +3,7 @@ package based import ( "context" "errors" - "sync" + "sync/atomic" "time" "github.com/rs/zerolog" @@ -26,6 +26,7 @@ type ForcedInclusionEvent = struct { // This interface is intentionally generic to allow different implementations type DARetriever interface { RetrieveForcedIncludedTxsFromDA(ctx context.Context, daHeight uint64) (*ForcedInclusionEvent, error) + SetDAHeight(height uint64) } var _ coresequencer.Sequencer = (*BasedSequencer)(nil) @@ -39,9 +40,8 @@ type BasedSequencer struct { genesis genesis.Genesis logger zerolog.Logger - mu sync.RWMutex - daHeight uint64 - txQueue [][]byte + daHeight atomic.Uint64 + txQueue atomic.Pointer[[][]byte] } // NewBasedSequencer creates a new based sequencer instance @@ -52,15 +52,17 @@ func NewBasedSequencer( genesis genesis.Genesis, logger zerolog.Logger, ) *BasedSequencer { - return &BasedSequencer{ + s := &BasedSequencer{ daRetriever: daRetriever, da: da, config: config, genesis: genesis, logger: logger.With().Str("component", "based_sequencer").Logger(), - daHeight: genesis.DAStartHeight, - txQueue: make([][]byte, 0), } + s.daHeight.Store(genesis.DAStartHeight) + initialQueue := make([][]byte, 0) + s.txQueue.Store(&initialQueue) + return s } // SubmitBatchTxs does nothing for a based sequencer as it only pulls from DA @@ -73,16 +75,17 @@ func (s *BasedSequencer) SubmitBatchTxs(ctx context.Context, req coresequencer.S // GetNextBatch retrieves the next batch of transactions from the DA layer // It fetches forced inclusion transactions and returns them as the next batch func (s *BasedSequencer) GetNextBatch(ctx context.Context, req coresequencer.GetNextBatchRequest) (*coresequencer.GetNextBatchResponse, error) { - s.mu.Lock() - defer s.mu.Unlock() - // If we have transactions in the queue, return them first - if len(s.txQueue) > 0 { + queuePtr := s.txQueue.Load() + queue := *queuePtr + if len(queue) > 0 { batch := s.createBatchFromQueue(req.MaxBytes) if len(batch.Transactions) > 0 { + queuePtr := s.txQueue.Load() + queue := *queuePtr s.logger.Debug(). Int("tx_count", len(batch.Transactions)). - Int("remaining", len(s.txQueue)). + Int("remaining", len(queue)). Msg("returning batch from queue") return &coresequencer.GetNextBatchResponse{ Batch: batch, @@ -93,9 +96,10 @@ func (s *BasedSequencer) GetNextBatch(ctx context.Context, req coresequencer.Get } // Fetch forced inclusion transactions from DA - s.logger.Debug().Uint64("da_height", s.daHeight).Msg("fetching forced inclusion transactions from DA") + currentHeight := s.daHeight.Load() + s.logger.Debug().Uint64("da_height", currentHeight).Msg("fetching forced inclusion transactions from DA") - forcedTxsEvent, err := s.daRetriever.RetrieveForcedIncludedTxsFromDA(ctx, s.daHeight) + forcedTxsEvent, err := s.daRetriever.RetrieveForcedIncludedTxsFromDA(ctx, currentHeight) if err != nil { if errors.Is(err, block.ErrForceInclusionNotConfigured) { s.logger.Error().Msg("forced inclusion not configured, returning empty batch") @@ -110,7 +114,7 @@ func (s *BasedSequencer) GetNextBatch(ctx context.Context, req coresequencer.Get // We'll retry the same height on the next call until DA produces that block if errors.Is(err, coreda.ErrHeightFromFuture) { s.logger.Debug(). - Uint64("da_height", s.daHeight). + Uint64("da_height", currentHeight). Msg("DA height from future, waiting for DA to produce block") return &coresequencer.GetNextBatchResponse{ Batch: &coresequencer.Batch{Transactions: nil}, @@ -119,19 +123,33 @@ func (s *BasedSequencer) GetNextBatch(ctx context.Context, req coresequencer.Get }, nil } - s.logger.Error().Err(err).Uint64("da_height", s.daHeight).Msg("failed to retrieve forced inclusion transactions") + s.logger.Error().Err(err).Uint64("da_height", currentHeight).Msg("failed to retrieve forced inclusion transactions") return nil, err } // Update DA height based on the retrieved event - if forcedTxsEvent.EndDaHeight > s.daHeight { - s.daHeight = forcedTxsEvent.EndDaHeight - } else if forcedTxsEvent.StartDaHeight > s.daHeight { - s.daHeight = forcedTxsEvent.StartDaHeight + for { + current := s.daHeight.Load() + newHeight := current + if forcedTxsEvent.EndDaHeight > current { + newHeight = forcedTxsEvent.EndDaHeight + } else if forcedTxsEvent.StartDaHeight > current { + newHeight = forcedTxsEvent.StartDaHeight + } + if newHeight == current || s.daHeight.CompareAndSwap(current, newHeight) { + break + } } // Add transactions to queue - s.txQueue = append(s.txQueue, forcedTxsEvent.Txs...) + for { + oldQueuePtr := s.txQueue.Load() + oldQueue := *oldQueuePtr + newQueue := append(oldQueue, forcedTxsEvent.Txs...) + if s.txQueue.CompareAndSwap(oldQueuePtr, &newQueue) { + break + } + } s.logger.Info(). Int("tx_count", len(forcedTxsEvent.Txs)). @@ -150,31 +168,40 @@ func (s *BasedSequencer) GetNextBatch(ctx context.Context, req coresequencer.Get // createBatchFromQueue creates a batch from the transaction queue respecting MaxBytes func (s *BasedSequencer) createBatchFromQueue(maxBytes uint64) *coresequencer.Batch { - if len(s.txQueue) == 0 { - return &coresequencer.Batch{Transactions: nil} - } - - var batch [][]byte - var totalBytes uint64 - - for i, tx := range s.txQueue { - txSize := uint64(len(tx)) - if totalBytes+txSize > maxBytes && len(batch) > 0 { - // Would exceed max bytes, stop here - s.txQueue = s.txQueue[i:] - break + for { + queuePtr := s.txQueue.Load() + queue := *queuePtr + if len(queue) == 0 { + return &coresequencer.Batch{Transactions: nil} } - batch = append(batch, tx) - totalBytes += txSize + var batch [][]byte + var totalBytes uint64 + var remaining [][]byte + + for i, tx := range queue { + txSize := uint64(len(tx)) + if totalBytes+txSize > maxBytes && len(batch) > 0 { + // Would exceed max bytes, stop here + remaining = queue[i:] + break + } + + batch = append(batch, tx) + totalBytes += txSize + + // If this is the last transaction, clear the queue + if i == len(queue)-1 { + remaining = nil + } + } - // If this is the last transaction, clear the queue - if i == len(s.txQueue)-1 { - s.txQueue = s.txQueue[:0] + // Try to update queue atomically + if s.txQueue.CompareAndSwap(queuePtr, &remaining) { + return &coresequencer.Batch{Transactions: batch} } + // If CAS failed, retry with new queue state } - - return &coresequencer.Batch{Transactions: batch} } // VerifyBatch verifies a batch of transactions @@ -188,15 +215,14 @@ func (s *BasedSequencer) VerifyBatch(ctx context.Context, req coresequencer.Veri // SetDAHeight sets the current DA height for the sequencer // This should be called when the sequencer needs to sync to a specific DA height func (s *BasedSequencer) SetDAHeight(height uint64) { - s.mu.Lock() - defer s.mu.Unlock() - s.daHeight = height + s.daHeight.Store(height) + if s.daRetriever != nil { + s.daRetriever.SetDAHeight(height) + } s.logger.Debug().Uint64("da_height", height).Msg("DA height updated") } // GetDAHeight returns the current DA height func (s *BasedSequencer) GetDAHeight() uint64 { - s.mu.RLock() - defer s.mu.RUnlock() - return s.daHeight + return s.daHeight.Load() } diff --git a/sequencers/based/based_test.go b/sequencers/based/based_test.go index ef952b2524..1468498ea4 100644 --- a/sequencers/based/based_test.go +++ b/sequencers/based/based_test.go @@ -31,6 +31,10 @@ func (m *MockDARetriever) RetrieveForcedIncludedTxsFromDA(ctx context.Context, d return args.Get(0).(*ForcedInclusionEvent), args.Error(1) } +func (m *MockDARetriever) SetDAHeight(height uint64) { + m.Called(height) +} + // MockDA is a mock implementation of DA for testing type MockDA struct { mock.Mock @@ -104,6 +108,7 @@ func (m *MockDA) GasMultiplier(ctx context.Context) (float64, error) { func TestNewBasedSequencer(t *testing.T) { mockRetriever := new(MockDARetriever) + mockRetriever.On("SetDAHeight", mock.Anything).Return().Maybe() mockDA := new(MockDA) cfg := config.DefaultConfig() gen := genesis.Genesis{ @@ -114,12 +119,14 @@ func TestNewBasedSequencer(t *testing.T) { seq := NewBasedSequencer(mockRetriever, mockDA, cfg, gen, zerolog.Nop()) require.NotNil(t, seq) - assert.Equal(t, uint64(100), seq.daHeight) - assert.Equal(t, 0, len(seq.txQueue)) + assert.Equal(t, uint64(100), seq.GetDAHeight()) + queuePtr := seq.txQueue.Load() + assert.Equal(t, 0, len(*queuePtr)) } func TestBasedSequencer_SubmitBatchTxs(t *testing.T) { mockRetriever := new(MockDARetriever) + mockRetriever.On("SetDAHeight", mock.Anything).Return().Maybe() mockDA := new(MockDA) cfg := config.DefaultConfig() gen := genesis.Genesis{ChainID: "test-chain"} @@ -139,11 +146,13 @@ func TestBasedSequencer_SubmitBatchTxs(t *testing.T) { require.NotNil(t, resp) // Queue should still be empty - assert.Equal(t, 0, len(seq.txQueue)) + queuePtr := seq.txQueue.Load() + assert.Equal(t, 0, len(*queuePtr)) } func TestBasedSequencer_GetNextBatch_WithForcedTxs(t *testing.T) { mockRetriever := new(MockDARetriever) + mockRetriever.On("SetDAHeight", mock.Anything).Return().Maybe() mockDA := new(MockDA) cfg := config.DefaultConfig() gen := genesis.Genesis{ @@ -183,6 +192,7 @@ func TestBasedSequencer_GetNextBatch_WithForcedTxs(t *testing.T) { func TestBasedSequencer_GetNextBatch_EmptyDA(t *testing.T) { mockRetriever := new(MockDARetriever) + mockRetriever.On("SetDAHeight", mock.Anything).Return().Maybe() mockDA := new(MockDA) cfg := config.DefaultConfig() gen := genesis.Genesis{ @@ -229,6 +239,7 @@ func TestBasedSequencer_GetNextBatch_NotConfigured(t *testing.T) { // Mock retriever to return not configured error mockRetriever.On("RetrieveForcedIncludedTxsFromDA", mock.Anything, uint64(100)). Return(nil, block.ErrForceInclusionNotConfigured).Once() + mockRetriever.On("SetDAHeight", mock.Anything).Return().Maybe() req := coresequencer.GetNextBatchRequest{ Id: []byte("test-chain"), @@ -258,6 +269,7 @@ func TestBasedSequencer_GetNextBatch_HeightFromFuture(t *testing.T) { // Mock retriever to return height from future error mockRetriever.On("RetrieveForcedIncludedTxsFromDA", mock.Anything, uint64(100)). Return(nil, coreda.ErrHeightFromFuture).Once() + mockRetriever.On("SetDAHeight", mock.Anything).Return().Maybe() req := coresequencer.GetNextBatchRequest{ Id: []byte("test-chain"), @@ -278,6 +290,7 @@ func TestBasedSequencer_GetNextBatch_HeightFromFuture(t *testing.T) { func TestBasedSequencer_GetNextBatch_WithMaxBytes(t *testing.T) { mockRetriever := new(MockDARetriever) + mockRetriever.On("SetDAHeight", mock.Anything).Return().Maybe() mockDA := new(MockDA) cfg := config.DefaultConfig() gen := genesis.Genesis{ @@ -313,7 +326,8 @@ func TestBasedSequencer_GetNextBatch_WithMaxBytes(t *testing.T) { assert.Equal(t, 2, len(resp.Batch.Transactions)) // Third transaction should still be in queue - assert.Equal(t, 1, len(seq.txQueue)) + queuePtr := seq.txQueue.Load() + assert.Equal(t, 1, len(*queuePtr)) // Next request should return the remaining transaction req2 := coresequencer.GetNextBatchRequest{ @@ -326,13 +340,15 @@ func TestBasedSequencer_GetNextBatch_WithMaxBytes(t *testing.T) { require.NotNil(t, resp2) require.NotNil(t, resp2.Batch) assert.Equal(t, 1, len(resp2.Batch.Transactions)) - assert.Equal(t, 0, len(seq.txQueue)) + queuePtr = seq.txQueue.Load() + assert.Equal(t, 0, len(*queuePtr)) mockRetriever.AssertExpectations(t) } func TestBasedSequencer_GetNextBatch_FromQueue(t *testing.T) { mockRetriever := new(MockDARetriever) + mockRetriever.On("SetDAHeight", mock.Anything).Return().Maybe() mockDA := new(MockDA) cfg := config.DefaultConfig() gen := genesis.Genesis{ @@ -343,7 +359,8 @@ func TestBasedSequencer_GetNextBatch_FromQueue(t *testing.T) { seq := NewBasedSequencer(mockRetriever, mockDA, cfg, gen, zerolog.Nop()) // Pre-populate the queue - seq.txQueue = [][]byte{[]byte("queued_tx1"), []byte("queued_tx2")} + initialQueue := [][]byte{[]byte("queued_tx1"), []byte("queued_tx2")} + seq.txQueue.Store(&initialQueue) req := coresequencer.GetNextBatchRequest{ Id: []byte("test-chain"), @@ -358,7 +375,8 @@ func TestBasedSequencer_GetNextBatch_FromQueue(t *testing.T) { assert.Equal(t, 2, len(resp.Batch.Transactions)) assert.Equal(t, []byte("queued_tx1"), resp.Batch.Transactions[0]) assert.Equal(t, []byte("queued_tx2"), resp.Batch.Transactions[1]) - assert.Equal(t, 0, len(seq.txQueue)) + queuePtr := seq.txQueue.Load() + assert.Equal(t, 0, len(*queuePtr)) // No expectations on retriever since it shouldn't be called mockRetriever.AssertExpectations(t) @@ -366,6 +384,7 @@ func TestBasedSequencer_GetNextBatch_FromQueue(t *testing.T) { func TestBasedSequencer_VerifyBatch(t *testing.T) { mockRetriever := new(MockDARetriever) + mockRetriever.On("SetDAHeight", mock.Anything).Return().Maybe() mockDA := new(MockDA) cfg := config.DefaultConfig() gen := genesis.Genesis{ChainID: "test-chain"} @@ -385,6 +404,7 @@ func TestBasedSequencer_VerifyBatch(t *testing.T) { func TestBasedSequencer_SetDAHeight(t *testing.T) { mockRetriever := new(MockDARetriever) + mockRetriever.On("SetDAHeight", mock.Anything).Return().Maybe() mockDA := new(MockDA) cfg := config.DefaultConfig() gen := genesis.Genesis{ @@ -402,6 +422,7 @@ func TestBasedSequencer_SetDAHeight(t *testing.T) { func TestBasedSequencer_ConcurrentAccess(t *testing.T) { mockRetriever := new(MockDARetriever) + mockRetriever.On("SetDAHeight", mock.Anything).Return().Maybe() mockDA := new(MockDA) cfg := config.DefaultConfig() gen := genesis.Genesis{ @@ -455,6 +476,7 @@ func TestBasedSequencer_ConcurrentAccess(t *testing.T) { func TestBasedSequencer_GetNextBatch_ErrorHandling(t *testing.T) { mockRetriever := new(MockDARetriever) + mockRetriever.On("SetDAHeight", mock.Anything).Return().Maybe() mockDA := new(MockDA) cfg := config.DefaultConfig() gen := genesis.Genesis{ diff --git a/sequencers/single/sequencer.go b/sequencers/single/sequencer.go index 1dbc145238..3dab543c8f 100644 --- a/sequencers/single/sequencer.go +++ b/sequencers/single/sequencer.go @@ -32,6 +32,7 @@ type ForcedInclusionEvent = struct { // DARetriever defines the interface for retrieving forced inclusion transactions from DA type DARetriever interface { RetrieveForcedIncludedTxsFromDA(ctx context.Context, daHeight uint64) (*ForcedInclusionEvent, error) + SetDAHeight(height uint64) } var _ coresequencer.Sequencer = (*Sequencer)(nil) @@ -244,6 +245,9 @@ func (c *Sequencer) isValid(Id []byte) bool { // This should be called when the sequencer needs to sync to a specific DA height func (c *Sequencer) SetDAHeight(height uint64) { c.daHeight.Store(height) + if c.daRetriever != nil { + c.daRetriever.SetDAHeight(height) + } c.logger.Debug().Uint64("da_height", height).Msg("DA height updated") } diff --git a/sequencers/single/sequencer_test.go b/sequencers/single/sequencer_test.go index a73c8c0ba3..9388694c59 100644 --- a/sequencers/single/sequencer_test.go +++ b/sequencers/single/sequencer_test.go @@ -33,6 +33,10 @@ func (m *MockDARetriever) RetrieveForcedIncludedTxsFromDA(ctx context.Context, d return args.Get(0).(*ForcedInclusionEvent), args.Error(1) } +func (m *MockDARetriever) SetDAHeight(height uint64) { + m.Called(height) +} + func TestNewSequencer(t *testing.T) { // Create a new sequencer with mock DA client dummyDA := coreda.NewDummyDA(100_000_000, 10*time.Second) @@ -44,6 +48,7 @@ func TestNewSequencer(t *testing.T) { mockRetriever := new(MockDARetriever) mockRetriever.On("RetrieveForcedIncludedTxsFromDA", mock.Anything, mock.Anything). Return(nil, block.ErrForceInclusionNotConfigured).Maybe() + mockRetriever.On("SetDAHeight", mock.Anything).Return().Maybe() seq, err := NewSequencer(ctx, logger, db, dummyDA, []byte("test1"), 10*time.Second, metrics, false, 1000, mockRetriever, genesis.Genesis{}) if err != nil { t.Fatalf("Failed to create sequencer: %v", err) @@ -80,6 +85,7 @@ func TestSequencer_SubmitBatchTxs(t *testing.T) { mockRetriever := new(MockDARetriever) mockRetriever.On("RetrieveForcedIncludedTxsFromDA", mock.Anything, mock.Anything). Return(nil, block.ErrForceInclusionNotConfigured).Maybe() + mockRetriever.On("SetDAHeight", mock.Anything).Return().Maybe() seq, err := NewSequencer(ctx, logger, db, dummyDA, Id, 10*time.Second, metrics, false, 1000, mockRetriever, genesis.Genesis{}) if err != nil { t.Fatalf("Failed to create sequencer: %v", err) @@ -136,6 +142,7 @@ func TestSequencer_SubmitBatchTxs_EmptyBatch(t *testing.T) { mockRetriever := new(MockDARetriever) mockRetriever.On("RetrieveForcedIncludedTxsFromDA", mock.Anything, mock.Anything). Return(nil, block.ErrForceInclusionNotConfigured).Maybe() + mockRetriever.On("SetDAHeight", mock.Anything).Return().Maybe() seq, err := NewSequencer(ctx, logger, db, dummyDA, Id, 10*time.Second, metrics, false, 1000, mockRetriever, genesis.Genesis{}) require.NoError(t, err, "Failed to create sequencer") defer func() { @@ -179,6 +186,7 @@ func TestSequencer_GetNextBatch_NoLastBatch(t *testing.T) { mockRetriever := new(MockDARetriever) mockRetriever.On("RetrieveForcedIncludedTxsFromDA", mock.Anything, mock.Anything). Return(nil, block.ErrForceInclusionNotConfigured).Maybe() + mockRetriever.On("SetDAHeight", mock.Anything).Return().Maybe() seq := &Sequencer{ logger: logger, queue: NewBatchQueue(db, "batches", 0), // 0 = unlimited for test @@ -219,6 +227,7 @@ func TestSequencer_GetNextBatch_Success(t *testing.T) { mockRetriever := new(MockDARetriever) mockRetriever.On("RetrieveForcedIncludedTxsFromDA", mock.Anything, mock.Anything). Return(nil, block.ErrForceInclusionNotConfigured).Maybe() + mockRetriever.On("SetDAHeight", mock.Anything).Return().Maybe() seq := &Sequencer{ logger: logger, queue: NewBatchQueue(db, "batches", 0), // 0 = unlimited for test @@ -282,6 +291,7 @@ func TestSequencer_VerifyBatch(t *testing.T) { mockRetriever := new(MockDARetriever) mockRetriever.On("RetrieveForcedIncludedTxsFromDA", mock.Anything, mock.Anything). Return(nil, block.ErrForceInclusionNotConfigured).Maybe() + mockRetriever.On("SetDAHeight", mock.Anything).Return().Maybe() seq := &Sequencer{ logger: logger, @@ -308,6 +318,7 @@ func TestSequencer_VerifyBatch(t *testing.T) { mockRetriever := new(MockDARetriever) mockRetriever.On("RetrieveForcedIncludedTxsFromDA", mock.Anything, mock.Anything). Return(nil, block.ErrForceInclusionNotConfigured).Maybe() + mockRetriever.On("SetDAHeight", mock.Anything).Return().Maybe() seq := &Sequencer{ logger: logger, Id: Id, @@ -333,6 +344,7 @@ func TestSequencer_VerifyBatch(t *testing.T) { mockRetriever := new(MockDARetriever) mockRetriever.On("RetrieveForcedIncludedTxsFromDA", mock.Anything, mock.Anything). Return(nil, block.ErrForceInclusionNotConfigured).Maybe() + mockRetriever.On("SetDAHeight", mock.Anything).Return().Maybe() seq := &Sequencer{ logger: logger, Id: Id, @@ -358,6 +370,7 @@ func TestSequencer_VerifyBatch(t *testing.T) { mockRetriever := new(MockDARetriever) mockRetriever.On("RetrieveForcedIncludedTxsFromDA", mock.Anything, mock.Anything). Return(nil, block.ErrForceInclusionNotConfigured).Maybe() + mockRetriever.On("SetDAHeight", mock.Anything).Return().Maybe() seq := &Sequencer{ logger: logger, Id: Id, @@ -384,6 +397,8 @@ func TestSequencer_VerifyBatch(t *testing.T) { mockRetriever := new(MockDARetriever) mockRetriever.On("RetrieveForcedIncludedTxsFromDA", mock.Anything, mock.Anything). Return(nil, block.ErrForceInclusionNotConfigured).Maybe() + mockRetriever.On("SetDAHeight", mock.Anything).Return().Maybe() + mockRetriever.On("SetDAHeight", mock.Anything).Return().Maybe() seq := &Sequencer{ logger: logger, Id: Id, @@ -444,6 +459,7 @@ func TestSequencer_GetNextBatch_BeforeDASubmission(t *testing.T) { mockRetriever := new(MockDARetriever) mockRetriever.On("RetrieveForcedIncludedTxsFromDA", mock.Anything, mock.Anything). Return(nil, block.ErrForceInclusionNotConfigured).Maybe() + mockRetriever.On("SetDAHeight", mock.Anything).Return().Maybe() seq, err := NewSequencer(ctx, logger, db, mockDA, []byte("test1"), 1*time.Second, metrics, false, 1000, mockRetriever, genesis.Genesis{}) if err != nil { t.Fatalf("Failed to create sequencer: %v", err) @@ -707,6 +723,7 @@ func TestSequencer_DAFailureAndQueueThrottling_Integration(t *testing.T) { mockRetriever := new(MockDARetriever) mockRetriever.On("RetrieveForcedIncludedTxsFromDA", mock.Anything, mock.Anything). Return(nil, block.ErrForceInclusionNotConfigured).Maybe() + mockRetriever.On("SetDAHeight", mock.Anything).Return().Maybe() seq, err := NewSequencer( context.Background(), logger, diff --git a/test/mocks/da.go b/test/mocks/da.go index 37539d5480..bb3ad63391 100644 --- a/test/mocks/da.go +++ b/test/mocks/da.go @@ -112,126 +112,6 @@ func (_c *MockDA_Commit_Call) RunAndReturn(run func(ctx context.Context, blobs [ return _c } -// GasMultiplier provides a mock function for the type MockDA -func (_mock *MockDA) GasMultiplier(ctx context.Context) (float64, error) { - ret := _mock.Called(ctx) - - if len(ret) == 0 { - panic("no return value specified for GasMultiplier") - } - - var r0 float64 - var r1 error - if returnFunc, ok := ret.Get(0).(func(context.Context) (float64, error)); ok { - return returnFunc(ctx) - } - if returnFunc, ok := ret.Get(0).(func(context.Context) float64); ok { - r0 = returnFunc(ctx) - } else { - r0 = ret.Get(0).(float64) - } - if returnFunc, ok := ret.Get(1).(func(context.Context) error); ok { - r1 = returnFunc(ctx) - } else { - r1 = ret.Error(1) - } - return r0, r1 -} - -// MockDA_GasMultiplier_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GasMultiplier' -type MockDA_GasMultiplier_Call struct { - *mock.Call -} - -// GasMultiplier is a helper method to define mock.On call -// - ctx context.Context -func (_e *MockDA_Expecter) GasMultiplier(ctx interface{}) *MockDA_GasMultiplier_Call { - return &MockDA_GasMultiplier_Call{Call: _e.mock.On("GasMultiplier", ctx)} -} - -func (_c *MockDA_GasMultiplier_Call) Run(run func(ctx context.Context)) *MockDA_GasMultiplier_Call { - _c.Call.Run(func(args mock.Arguments) { - var arg0 context.Context - if args[0] != nil { - arg0 = args[0].(context.Context) - } - run( - arg0, - ) - }) - return _c -} - -func (_c *MockDA_GasMultiplier_Call) Return(f float64, err error) *MockDA_GasMultiplier_Call { - _c.Call.Return(f, err) - return _c -} - -func (_c *MockDA_GasMultiplier_Call) RunAndReturn(run func(ctx context.Context) (float64, error)) *MockDA_GasMultiplier_Call { - _c.Call.Return(run) - return _c -} - -// GasPrice provides a mock function for the type MockDA -func (_mock *MockDA) GasPrice(ctx context.Context) (float64, error) { - ret := _mock.Called(ctx) - - if len(ret) == 0 { - panic("no return value specified for GasPrice") - } - - var r0 float64 - var r1 error - if returnFunc, ok := ret.Get(0).(func(context.Context) (float64, error)); ok { - return returnFunc(ctx) - } - if returnFunc, ok := ret.Get(0).(func(context.Context) float64); ok { - r0 = returnFunc(ctx) - } else { - r0 = ret.Get(0).(float64) - } - if returnFunc, ok := ret.Get(1).(func(context.Context) error); ok { - r1 = returnFunc(ctx) - } else { - r1 = ret.Error(1) - } - return r0, r1 -} - -// MockDA_GasPrice_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GasPrice' -type MockDA_GasPrice_Call struct { - *mock.Call -} - -// GasPrice is a helper method to define mock.On call -// - ctx context.Context -func (_e *MockDA_Expecter) GasPrice(ctx interface{}) *MockDA_GasPrice_Call { - return &MockDA_GasPrice_Call{Call: _e.mock.On("GasPrice", ctx)} -} - -func (_c *MockDA_GasPrice_Call) Run(run func(ctx context.Context)) *MockDA_GasPrice_Call { - _c.Call.Run(func(args mock.Arguments) { - var arg0 context.Context - if args[0] != nil { - arg0 = args[0].(context.Context) - } - run( - arg0, - ) - }) - return _c -} - -func (_c *MockDA_GasPrice_Call) Return(f float64, err error) *MockDA_GasPrice_Call { - _c.Call.Return(f, err) - return _c -} - -func (_c *MockDA_GasPrice_Call) RunAndReturn(run func(ctx context.Context) (float64, error)) *MockDA_GasPrice_Call { - _c.Call.Return(run) - return _c -} - // Get provides a mock function for the type MockDA func (_mock *MockDA) Get(ctx context.Context, ids []da.ID, namespace []byte) ([]da.Blob, error) { ret := _mock.Called(ctx, ids, namespace) From d60b5e7d9c681e326024c1a062b8e763283b8eea Mon Sep 17 00:00:00 2001 From: Julien Robert Date: Fri, 14 Nov 2025 17:53:43 +0100 Subject: [PATCH 35/39] improve api --- block/internal/syncing/da_retriever.go | 3 +- block/internal/syncing/da_retriever_mock.go | 33 +++++++++++++++++++++ block/internal/syncing/da_retriever_test.go | 16 +++++----- block/internal/syncing/syncer.go | 7 +---- 4 files changed, 44 insertions(+), 15 deletions(-) diff --git a/block/internal/syncing/da_retriever.go b/block/internal/syncing/da_retriever.go index a8def34578..03007c1a73 100644 --- a/block/internal/syncing/da_retriever.go +++ b/block/internal/syncing/da_retriever.go @@ -132,6 +132,7 @@ type DARetriever interface { RetrieveFromDA(ctx context.Context, daHeight uint64) ([]common.DAHeightEvent, error) RetrieveForcedIncludedTxsFromDA(ctx context.Context, daHeight uint64) (*common.ForcedIncludedEvent, error) SetDAHeight(height uint64) + StopBackgroundFetcher() } // daRetriever handles DA retrieval operations for syncing @@ -561,7 +562,7 @@ func (r *daRetriever) fetchEpochSync(ctx context.Context, epochStart, epochEnd u } // Stop stops the background fetcher -func (r *daRetriever) Stop() { +func (r *daRetriever) StopBackgroundFetcher() { if r.fetcherCancel != nil { r.fetcherCancel() r.fetcherWg.Wait() diff --git a/block/internal/syncing/da_retriever_mock.go b/block/internal/syncing/da_retriever_mock.go index 1dc328b2a7..74037637da 100644 --- a/block/internal/syncing/da_retriever_mock.go +++ b/block/internal/syncing/da_retriever_mock.go @@ -213,3 +213,36 @@ func (_c *MockDARetriever_SetDAHeight_Call) RunAndReturn(run func(height uint64) _c.Run(run) return _c } + +// StopBackgroundFetcher provides a mock function for the type MockDARetriever +func (_mock *MockDARetriever) StopBackgroundFetcher() { + _mock.Called() + return +} + +// MockDARetriever_StopBackgroundFetcher_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'StopBackgroundFetcher' +type MockDARetriever_StopBackgroundFetcher_Call struct { + *mock.Call +} + +// StopBackgroundFetcher is a helper method to define mock.On call +func (_e *MockDARetriever_Expecter) StopBackgroundFetcher() *MockDARetriever_StopBackgroundFetcher_Call { + return &MockDARetriever_StopBackgroundFetcher_Call{Call: _e.mock.On("StopBackgroundFetcher")} +} + +func (_c *MockDARetriever_StopBackgroundFetcher_Call) Run(run func()) *MockDARetriever_StopBackgroundFetcher_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *MockDARetriever_StopBackgroundFetcher_Call) Return() *MockDARetriever_StopBackgroundFetcher_Call { + _c.Call.Return() + return _c +} + +func (_c *MockDARetriever_StopBackgroundFetcher_Call) RunAndReturn(run func()) *MockDARetriever_StopBackgroundFetcher_Call { + _c.Run(run) + return _c +} diff --git a/block/internal/syncing/da_retriever_test.go b/block/internal/syncing/da_retriever_test.go index 3ad97a6cc6..74a90dc256 100644 --- a/block/internal/syncing/da_retriever_test.go +++ b/block/internal/syncing/da_retriever_test.go @@ -427,7 +427,7 @@ func TestDARetriever_RetrieveForcedIncludedTxsFromDA_Success(t *testing.T) { })).Return([][]byte{dataBin}, nil).Once() r := NewDARetriever(mockDA, cm, cfg, gen, zerolog.Nop()) - t.Cleanup(func() { r.Stop() }) + t.Cleanup(func() { r.StopBackgroundFetcher() }) result, err := r.RetrieveForcedIncludedTxsFromDA(context.Background(), 5678) require.NoError(t, err) @@ -447,7 +447,7 @@ func TestDARetriever_FetchForcedIncludedTxs_NoNamespaceConfigured(t *testing.T) // Leave ForcedInclusionNamespace empty r := NewDARetriever(nil, cm, cfg, gen, zerolog.Nop()) - t.Cleanup(func() { r.Stop() }) + t.Cleanup(func() { r.StopBackgroundFetcher() }) result, err := r.RetrieveForcedIncludedTxsFromDA(context.Background(), 100) require.Error(t, err) @@ -474,7 +474,7 @@ func TestDARetriever_FetchForcedIncludedTxs_NotFound(t *testing.T) { })).Return(&coreda.GetIDsResult{IDs: [][]byte{}, Timestamp: time.Now()}, nil).Once() r := NewDARetriever(mockDA, cm, cfg, gen, zerolog.Nop()) - t.Cleanup(func() { r.Stop() }) + t.Cleanup(func() { r.StopBackgroundFetcher() }) result, err := r.RetrieveForcedIncludedTxsFromDA(context.Background(), 9999) require.NoError(t, err) @@ -578,7 +578,7 @@ func TestDARetriever_RetrieveForcedIncludedTxsFromDA_ExceedsMaxBlobSize(t *testi })).Return([][]byte{dataBin2}, nil).Once() r := NewDARetriever(mockDA, cm, cfg, gen, zerolog.Nop()) - t.Cleanup(func() { r.Stop() }) + t.Cleanup(func() { r.StopBackgroundFetcher() }) result, err := r.RetrieveForcedIncludedTxsFromDA(context.Background(), 1000) @@ -613,7 +613,7 @@ func TestDARetriever_RetrieveForcedIncludedTxsFromDA_NotAtEpochStart(t *testing. mockDA := testmocks.NewMockDA(t) r := NewDARetriever(mockDA, cm, cfg, gen, zerolog.Nop()) - t.Cleanup(func() { r.Stop() }) + t.Cleanup(func() { r.StopBackgroundFetcher() }) // With DAStartHeight=100, epoch size=10, daHeight=105 -> epoch boundaries are [100, 109] // But daHeight=105 is NOT the epoch start, so it should be a no-op @@ -645,7 +645,7 @@ func TestDARetriever_RetrieveForcedIncludedTxsFromDA_EpochStartFromFuture(t *tes })).Return(nil, fmt.Errorf("%s: not yet available", coreda.ErrHeightFromFuture.Error())).Once() r := NewDARetriever(mockDA, cm, cfg, gen, zerolog.Nop()) - t.Cleanup(func() { r.Stop() }) + t.Cleanup(func() { r.StopBackgroundFetcher() }) result, err := r.RetrieveForcedIncludedTxsFromDA(context.Background(), 100) require.Error(t, err) @@ -678,7 +678,7 @@ func TestDARetriever_RetrieveForcedIncludedTxsFromDA_EpochEndFromFuture(t *testi })).Return(nil, fmt.Errorf("%s: not yet available", coreda.ErrHeightFromFuture.Error())).Once() r := NewDARetriever(mockDA, cm, cfg, gen, zerolog.Nop()) - t.Cleanup(func() { r.Stop() }) + t.Cleanup(func() { r.StopBackgroundFetcher() }) result, err := r.RetrieveForcedIncludedTxsFromDA(context.Background(), 100) require.Error(t, err) @@ -741,7 +741,7 @@ func TestDARetriever_RetrieveForcedIncludedTxsFromDA_CompleteEpoch(t *testing.T) })).Return([][]byte{dataBin2}, nil).Once() r := NewDARetriever(mockDA, cm, cfg, gen, zerolog.Nop()) - t.Cleanup(func() { r.Stop() }) + t.Cleanup(func() { r.StopBackgroundFetcher() }) result, err := r.RetrieveForcedIncludedTxsFromDA(context.Background(), 2000) require.NoError(t, err) diff --git a/block/internal/syncing/syncer.go b/block/internal/syncing/syncer.go index 9119d156b1..0adc9f9b72 100644 --- a/block/internal/syncing/syncer.go +++ b/block/internal/syncing/syncer.go @@ -146,12 +146,7 @@ func (s *Syncer) Stop() error { s.cancelP2PWait(0) s.wg.Wait() - // Stop the DA retriever's background fetcher - if s.daRetriever != nil { - if dr, ok := s.daRetriever.(interface{ Stop() }); ok { - dr.Stop() - } - } + s.daRetriever.StopBackgroundFetcher() s.logger.Info().Msg("syncer stopped") return nil From 60ad091e74e82ef0ea4511071a186a135abd9657 Mon Sep 17 00:00:00 2001 From: julienrbrt Date: Mon, 17 Nov 2025 15:22:56 +0100 Subject: [PATCH 36/39] refactor(block): extract da logic into da client and fi retriever (#2852) Extract basic da logic into da client and fi retriever. --- apps/evm/single/cmd/run.go | 14 +- apps/evm/single/go.sum | 2 + apps/grpc/single/cmd/run.go | 14 +- apps/grpc/single/go.sum | 2 + apps/testapp/cmd/run.go | 17 +- apps/testapp/go.sum | 2 + block/components.go | 10 +- block/internal/common/errors.go | 3 - block/internal/common/event.go | 7 - block/internal/da/client.go | 299 +++++++++ block/internal/da/client_test.go | 525 +++++++++++++++ .../internal/da/forced_inclusion_retriever.go | 177 +++++ .../da/forced_inclusion_retriever_test.go | 344 ++++++++++ block/internal/submitting/da_submitter.go | 21 +- .../da_submitter_integration_test.go | 9 +- .../submitting/da_submitter_mocks_test.go | 10 +- .../internal/submitting/da_submitter_test.go | 19 +- block/internal/submitting/submitter_test.go | 27 +- block/internal/syncing/da_retriever.go | 334 +--------- block/internal/syncing/da_retriever_mock.go | 68 -- block/internal/syncing/da_retriever_test.go | 437 ++----------- block/internal/syncing/syncer.go | 19 +- .../syncing/syncer_forced_inclusion_test.go | 61 +- block/internal/syncing/syncer_test.go | 2 - block/public.go | 61 +- da/internal/mocks/da.go | 120 ---- go.mod | 2 + go.sum | 2 + pkg/cmd/run_node.go | 2 - sequencers/based/based_test.go | 483 -------------- sequencers/based/{based.go => sequencer.go} | 127 ++-- sequencers/based/sequencer_test.go | 606 ++++++++++++++++++ sequencers/common/size_validation.go | 27 + sequencers/common/size_validation_test.go | 141 ++++ sequencers/single/queue.go | 20 + sequencers/single/queue_test.go | 154 +++++ sequencers/single/sequencer.go | 233 +++++-- sequencers/single/sequencer_test.go | 332 ++++++++-- test/mocks/da.go | 120 ---- types/CLAUDE.md | 11 +- types/da.go | 212 ------ types/da_test.go | 298 --------- 42 files changed, 3085 insertions(+), 2289 deletions(-) create mode 100644 block/internal/da/client.go create mode 100644 block/internal/da/client_test.go create mode 100644 block/internal/da/forced_inclusion_retriever.go create mode 100644 block/internal/da/forced_inclusion_retriever_test.go delete mode 100644 sequencers/based/based_test.go rename sequencers/based/{based.go => sequencer.go} (59%) create mode 100644 sequencers/based/sequencer_test.go create mode 100644 sequencers/common/size_validation.go create mode 100644 sequencers/common/size_validation_test.go delete mode 100644 types/da.go delete mode 100644 types/da_test.go diff --git a/apps/evm/single/cmd/run.go b/apps/evm/single/cmd/run.go index 033ac7798e..092cea1f15 100644 --- a/apps/evm/single/cmd/run.go +++ b/apps/evm/single/cmd/run.go @@ -27,6 +27,7 @@ import ( "github.com/evstack/ev-node/pkg/p2p/key" "github.com/evstack/ev-node/pkg/store" "github.com/evstack/ev-node/sequencers/based" + seqcommon "github.com/evstack/ev-node/sequencers/common" "github.com/evstack/ev-node/sequencers/single" ) @@ -57,7 +58,7 @@ var RunCmd = &cobra.Command{ logger.Info().Str("headerNamespace", headerNamespace.HexString()).Str("dataNamespace", dataNamespace.HexString()).Msg("namespaces") - daJrpc, err := jsonrpc.NewClient(context.Background(), logger, nodeConfig.DA.Address, nodeConfig.DA.AuthToken, rollcmd.DefaultMaxBlobSize) + daJrpc, err := jsonrpc.NewClient(context.Background(), logger, nodeConfig.DA.Address, nodeConfig.DA.AuthToken, seqcommon.AbsoluteMaxBlobSize) if err != nil { return err } @@ -113,10 +114,8 @@ func createSequencer( nodeConfig config.Config, genesis genesis.Genesis, ) (coresequencer.Sequencer, error) { - daRetriever, err := block.NewDARetriever(da, nodeConfig, genesis, logger) - if err != nil { - return nil, fmt.Errorf("failed to create DA retriever: %w", err) - } + daClient := block.NewDAClient(da, nodeConfig, logger) + fiRetriever := block.NewForcedInclusionRetriever(daClient, genesis, logger) if nodeConfig.Node.BasedSequencer { // Based sequencer mode - fetch transactions only from DA @@ -124,7 +123,7 @@ func createSequencer( return nil, fmt.Errorf("based sequencer mode requires aggregator mode to be enabled") } - basedSeq := based.NewBasedSequencer(daRetriever, da, nodeConfig, genesis, logger) + basedSeq := based.NewBasedSequencer(fiRetriever, da, nodeConfig, genesis, logger) logger.Info(). Str("forced_inclusion_namespace", nodeConfig.DA.GetForcedInclusionNamespace()). @@ -149,7 +148,7 @@ func createSequencer( singleMetrics, nodeConfig.Node.Aggregator, 1000, - daRetriever, + fiRetriever, genesis, ) if err != nil { @@ -157,7 +156,6 @@ func createSequencer( } logger.Info(). - Bool("forced_inclusion_enabled", daRetriever != nil). Str("forced_inclusion_namespace", nodeConfig.DA.GetForcedInclusionNamespace()). Msg("single sequencer initialized") diff --git a/apps/evm/single/go.sum b/apps/evm/single/go.sum index f8f9a34349..66243b255c 100644 --- a/apps/evm/single/go.sum +++ b/apps/evm/single/go.sum @@ -760,6 +760,8 @@ gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gotest.tools/v3 v3.5.2 h1:7koQfIKdy+I8UTetycgUqXWSDwpgv193Ka+qRsmBY8Q= +gotest.tools/v3 v3.5.2/go.mod h1:LtdLGcnqToBH83WByAAi/wiwSFCArdFIUV/xxN4pcjA= grpc.go4.org v0.0.0-20170609214715-11d0a25b4919/go.mod h1:77eQGdRu53HpSqPFJFmuJdjuHRquDANNeA4x7B8WQ9o= honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/apps/grpc/single/cmd/run.go b/apps/grpc/single/cmd/run.go index b82f61eceb..c1c5ba7e85 100644 --- a/apps/grpc/single/cmd/run.go +++ b/apps/grpc/single/cmd/run.go @@ -24,6 +24,7 @@ import ( "github.com/evstack/ev-node/pkg/p2p/key" "github.com/evstack/ev-node/pkg/store" "github.com/evstack/ev-node/sequencers/based" + seqcommon "github.com/evstack/ev-node/sequencers/common" "github.com/evstack/ev-node/sequencers/single" ) @@ -59,7 +60,7 @@ The execution client must implement the Evolve execution gRPC interface.`, logger.Info().Str("headerNamespace", headerNamespace.HexString()).Str("dataNamespace", dataNamespace.HexString()).Msg("namespaces") // Create DA client - daJrpc, err := jsonrpc.NewClient(cmd.Context(), logger, nodeConfig.DA.Address, nodeConfig.DA.AuthToken, rollcmd.DefaultMaxBlobSize) + daJrpc, err := jsonrpc.NewClient(cmd.Context(), logger, nodeConfig.DA.Address, nodeConfig.DA.AuthToken, seqcommon.AbsoluteMaxBlobSize) if err != nil { return err } @@ -122,10 +123,8 @@ func createSequencer( nodeConfig config.Config, genesis genesis.Genesis, ) (coresequencer.Sequencer, error) { - daRetriever, err := block.NewDARetriever(da, nodeConfig, genesis, logger) - if err != nil { - return nil, fmt.Errorf("failed to create DA retriever: %w", err) - } + daClient := block.NewDAClient(da, nodeConfig, logger) + fiRetriever := block.NewForcedInclusionRetriever(daClient, genesis, logger) if nodeConfig.Node.BasedSequencer { // Based sequencer mode - fetch transactions only from DA @@ -133,7 +132,7 @@ func createSequencer( return nil, fmt.Errorf("based sequencer mode requires aggregator mode to be enabled") } - basedSeq := based.NewBasedSequencer(daRetriever, da, nodeConfig, genesis, logger) + basedSeq := based.NewBasedSequencer(fiRetriever, da, nodeConfig, genesis, logger) logger.Info(). Str("forced_inclusion_namespace", nodeConfig.DA.GetForcedInclusionNamespace()). @@ -158,7 +157,7 @@ func createSequencer( singleMetrics, nodeConfig.Node.Aggregator, 1000, - daRetriever, + fiRetriever, genesis, ) if err != nil { @@ -166,7 +165,6 @@ func createSequencer( } logger.Info(). - Bool("forced_inclusion_enabled", daRetriever != nil). Str("forced_inclusion_namespace", nodeConfig.DA.GetForcedInclusionNamespace()). Msg("single sequencer initialized") diff --git a/apps/grpc/single/go.sum b/apps/grpc/single/go.sum index eabd0c4654..6dc9d5d9e1 100644 --- a/apps/grpc/single/go.sum +++ b/apps/grpc/single/go.sum @@ -654,6 +654,8 @@ gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gotest.tools/v3 v3.5.2 h1:7koQfIKdy+I8UTetycgUqXWSDwpgv193Ka+qRsmBY8Q= +gotest.tools/v3 v3.5.2/go.mod h1:LtdLGcnqToBH83WByAAi/wiwSFCArdFIUV/xxN4pcjA= grpc.go4.org v0.0.0-20170609214715-11d0a25b4919/go.mod h1:77eQGdRu53HpSqPFJFmuJdjuHRquDANNeA4x7B8WQ9o= honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/apps/testapp/cmd/run.go b/apps/testapp/cmd/run.go index 49852e6454..dd3440b864 100644 --- a/apps/testapp/cmd/run.go +++ b/apps/testapp/cmd/run.go @@ -18,11 +18,11 @@ import ( "github.com/evstack/ev-node/pkg/cmd" "github.com/evstack/ev-node/pkg/config" "github.com/evstack/ev-node/pkg/genesis" - genesispkg "github.com/evstack/ev-node/pkg/genesis" "github.com/evstack/ev-node/pkg/p2p" "github.com/evstack/ev-node/pkg/p2p/key" "github.com/evstack/ev-node/pkg/store" "github.com/evstack/ev-node/sequencers/based" + seqcommon "github.com/evstack/ev-node/sequencers/common" "github.com/evstack/ev-node/sequencers/single" ) @@ -58,7 +58,7 @@ var RunCmd = &cobra.Command{ logger.Info().Str("headerNamespace", headerNamespace.HexString()).Str("dataNamespace", dataNamespace.HexString()).Msg("namespaces") - daJrpc, err := jsonrpc.NewClient(ctx, logger, nodeConfig.DA.Address, nodeConfig.DA.AuthToken, cmd.DefaultMaxBlobSize) + daJrpc, err := jsonrpc.NewClient(ctx, logger, nodeConfig.DA.Address, nodeConfig.DA.AuthToken, seqcommon.AbsoluteMaxBlobSize) if err != nil { return err } @@ -85,7 +85,7 @@ var RunCmd = &cobra.Command{ } genesisPath := filepath.Join(filepath.Dir(nodeConfig.ConfigPath()), "genesis.json") - genesis, err := genesispkg.LoadGenesis(genesisPath) + genesis, err := genesis.LoadGenesis(genesisPath) if err != nil { return fmt.Errorf("failed to load genesis: %w", err) } @@ -120,10 +120,8 @@ func createSequencer( nodeConfig config.Config, genesis genesis.Genesis, ) (coresequencer.Sequencer, error) { - daRetriever, err := block.NewDARetriever(da, nodeConfig, genesis, logger) - if err != nil { - return nil, fmt.Errorf("failed to create DA retriever: %w", err) - } + daClient := block.NewDAClient(da, nodeConfig, logger) + fiRetriever := block.NewForcedInclusionRetriever(daClient, genesis, logger) if nodeConfig.Node.BasedSequencer { // Based sequencer mode - fetch transactions only from DA @@ -131,7 +129,7 @@ func createSequencer( return nil, fmt.Errorf("based sequencer mode requires aggregator mode to be enabled") } - basedSeq := based.NewBasedSequencer(daRetriever, da, nodeConfig, genesis, logger) + basedSeq := based.NewBasedSequencer(fiRetriever, da, nodeConfig, genesis, logger) logger.Info(). Str("forced_inclusion_namespace", nodeConfig.DA.GetForcedInclusionNamespace()). @@ -156,7 +154,7 @@ func createSequencer( singleMetrics, nodeConfig.Node.Aggregator, 1000, - daRetriever, + fiRetriever, genesis, ) if err != nil { @@ -164,7 +162,6 @@ func createSequencer( } logger.Info(). - Bool("forced_inclusion_enabled", daRetriever != nil). Str("forced_inclusion_namespace", nodeConfig.DA.GetForcedInclusionNamespace()). Msg("single sequencer initialized") diff --git a/apps/testapp/go.sum b/apps/testapp/go.sum index eabd0c4654..6dc9d5d9e1 100644 --- a/apps/testapp/go.sum +++ b/apps/testapp/go.sum @@ -654,6 +654,8 @@ gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gotest.tools/v3 v3.5.2 h1:7koQfIKdy+I8UTetycgUqXWSDwpgv193Ka+qRsmBY8Q= +gotest.tools/v3 v3.5.2/go.mod h1:LtdLGcnqToBH83WByAAi/wiwSFCArdFIUV/xxN4pcjA= grpc.go4.org v0.0.0-20170609214715-11d0a25b4919/go.mod h1:77eQGdRu53HpSqPFJFmuJdjuHRquDANNeA4x7B8WQ9o= honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/block/components.go b/block/components.go index 9e9e6af425..fee98db9fd 100644 --- a/block/components.go +++ b/block/components.go @@ -162,8 +162,9 @@ func NewSyncComponents( errorCh, ) - // Create DA submitter for sync nodes (no signer, only DA inclusion processing) - daSubmitter := submitting.NewDASubmitter(da, config, genesis, blockOpts, metrics, logger) + // Create DA client and submitter for sync nodes (no signer, only DA inclusion processing) + daClient := NewDAClient(da, config, logger) + daSubmitter := submitting.NewDASubmitter(daClient, config, genesis, blockOpts, metrics, logger) submitter := submitting.NewSubmitter( store, exec, @@ -252,8 +253,9 @@ func NewAggregatorComponents( }, nil } - // Create DA submitter for aggregator nodes (with signer for submission) - daSubmitter := submitting.NewDASubmitter(da, config, genesis, blockOpts, metrics, logger) + // Create DA client and submitter for aggregator nodes (with signer for submission) + daClient := NewDAClient(da, config, logger) + daSubmitter := submitting.NewDASubmitter(daClient, config, genesis, blockOpts, metrics, logger) submitter := submitting.NewSubmitter( store, exec, diff --git a/block/internal/common/errors.go b/block/internal/common/errors.go index 5ae797daab..5ae3218639 100644 --- a/block/internal/common/errors.go +++ b/block/internal/common/errors.go @@ -20,7 +20,4 @@ var ( // ErrOversizedItem is an unrecoverable error indicating a single item exceeds DA blob size limit ErrOversizedItem = errors.New("single item exceeds DA blob size limit") - - // ErrForceInclusionNotConfigured is returned when the forced inclusion namespace is not configured. - ErrForceInclusionNotConfigured = errors.New("forced inclusion namespace not configured") ) diff --git a/block/internal/common/event.go b/block/internal/common/event.go index 1117683a51..f1b4295c73 100644 --- a/block/internal/common/event.go +++ b/block/internal/common/event.go @@ -23,10 +23,3 @@ type DAHeightEvent = struct { // Source indicates where this event originated from (DA or P2P) Source EventSource } - -// ForcedIncluded represents a forced inclusion event for caching -type ForcedIncludedEvent = struct { - Txs [][]byte - StartDaHeight uint64 - EndDaHeight uint64 -} diff --git a/block/internal/da/client.go b/block/internal/da/client.go new file mode 100644 index 0000000000..07bd7d3af2 --- /dev/null +++ b/block/internal/da/client.go @@ -0,0 +1,299 @@ +package da + +import ( + "context" + "errors" + "fmt" + "strings" + "time" + + "github.com/rs/zerolog" + + coreda "github.com/evstack/ev-node/core/da" +) + +// Client is the interface representing the DA client. +type Client interface { + Submit(ctx context.Context, data [][]byte, gasPrice float64, namespace []byte, options []byte) coreda.ResultSubmit + Retrieve(ctx context.Context, height uint64, namespace []byte) coreda.ResultRetrieve + RetrieveHeaders(ctx context.Context, height uint64) coreda.ResultRetrieve + RetrieveData(ctx context.Context, height uint64) coreda.ResultRetrieve + RetrieveForcedInclusion(ctx context.Context, height uint64) coreda.ResultRetrieve + + GetHeaderNamespace() []byte + GetDataNamespace() []byte + GetForcedInclusionNamespace() []byte + HasForcedInclusionNamespace() bool + GetDA() coreda.DA +} + +// client provides a reusable wrapper around the core DA interface +// with common configuration for namespace handling and timeouts. +type client struct { + da coreda.DA + logger zerolog.Logger + defaultTimeout time.Duration + namespaceBz []byte + namespaceDataBz []byte + namespaceForcedInclusionBz []byte + hasForcedInclusionNs bool +} + +// Config contains configuration for the DA client. +type Config struct { + DA coreda.DA + Logger zerolog.Logger + DefaultTimeout time.Duration + Namespace string + DataNamespace string + ForcedInclusionNamespace string +} + +// NewClient creates a new DA client with pre-calculated namespace bytes. +func NewClient(cfg Config) *client { + if cfg.DefaultTimeout == 0 { + cfg.DefaultTimeout = 30 * time.Second + } + + hasForcedInclusionNs := cfg.ForcedInclusionNamespace != "" + var namespaceForcedInclusionBz []byte + if hasForcedInclusionNs { + namespaceForcedInclusionBz = coreda.NamespaceFromString(cfg.ForcedInclusionNamespace).Bytes() + } + + return &client{ + da: cfg.DA, + logger: cfg.Logger.With().Str("component", "da_client").Logger(), + defaultTimeout: cfg.DefaultTimeout, + namespaceBz: coreda.NamespaceFromString(cfg.Namespace).Bytes(), + namespaceDataBz: coreda.NamespaceFromString(cfg.DataNamespace).Bytes(), + namespaceForcedInclusionBz: namespaceForcedInclusionBz, + hasForcedInclusionNs: hasForcedInclusionNs, + } +} + +// Submit submits blobs to the DA layer with the specified options. +func (c *client) Submit(ctx context.Context, data [][]byte, gasPrice float64, namespace []byte, options []byte) coreda.ResultSubmit { + ids, err := c.da.SubmitWithOptions(ctx, data, gasPrice, namespace, options) + + // calculate blob size + var blobSize uint64 + for _, blob := range data { + blobSize += uint64(len(blob)) + } + + // Handle errors returned by Submit + if err != nil { + if errors.Is(err, context.Canceled) { + c.logger.Debug().Msg("DA submission canceled due to context cancellation") + return coreda.ResultSubmit{ + BaseResult: coreda.BaseResult{ + Code: coreda.StatusContextCanceled, + Message: "submission canceled", + IDs: ids, + BlobSize: blobSize, + }, + } + } + status := coreda.StatusError + switch { + case errors.Is(err, coreda.ErrTxTimedOut): + status = coreda.StatusNotIncludedInBlock + case errors.Is(err, coreda.ErrTxAlreadyInMempool): + status = coreda.StatusAlreadyInMempool + case errors.Is(err, coreda.ErrTxIncorrectAccountSequence): + status = coreda.StatusIncorrectAccountSequence + case errors.Is(err, coreda.ErrBlobSizeOverLimit): + status = coreda.StatusTooBig + case errors.Is(err, coreda.ErrContextDeadline): + status = coreda.StatusContextDeadline + } + + // Use debug level for StatusTooBig as it gets handled later in submitToDA through recursive splitting + if status == coreda.StatusTooBig { + c.logger.Debug().Err(err).Uint64("status", uint64(status)).Msg("DA submission failed") + } else { + c.logger.Error().Err(err).Uint64("status", uint64(status)).Msg("DA submission failed") + } + return coreda.ResultSubmit{ + BaseResult: coreda.BaseResult{ + Code: status, + Message: "failed to submit blobs: " + err.Error(), + IDs: ids, + SubmittedCount: uint64(len(ids)), + Height: 0, + Timestamp: time.Now(), + BlobSize: blobSize, + }, + } + } + + if len(ids) == 0 && len(data) > 0 { + c.logger.Warn().Msg("DA submission returned no IDs for non-empty input data") + return coreda.ResultSubmit{ + BaseResult: coreda.BaseResult{ + Code: coreda.StatusError, + Message: "failed to submit blobs: no IDs returned despite non-empty input", + }, + } + } + + // Get height from the first ID + var height uint64 + if len(ids) > 0 { + height, _, err = coreda.SplitID(ids[0]) + if err != nil { + c.logger.Error().Err(err).Msg("failed to split ID") + } + } + + c.logger.Debug().Int("num_ids", len(ids)).Msg("DA submission successful") + return coreda.ResultSubmit{ + BaseResult: coreda.BaseResult{ + Code: coreda.StatusSuccess, + IDs: ids, + SubmittedCount: uint64(len(ids)), + Height: height, + BlobSize: blobSize, + Timestamp: time.Now(), + }, + } +} + +// Retrieve retrieves blobs from the DA layer at the specified height and namespace. +func (c *client) Retrieve(ctx context.Context, height uint64, namespace []byte) coreda.ResultRetrieve { + // 1. Get IDs + getIDsCtx, cancel := context.WithTimeout(ctx, c.defaultTimeout) + defer cancel() + idsResult, err := c.da.GetIDs(getIDsCtx, height, namespace) + if err != nil { + // Handle specific "not found" error + if strings.Contains(err.Error(), coreda.ErrBlobNotFound.Error()) { + c.logger.Debug().Uint64("height", height).Msg("Blobs not found at height") + return coreda.ResultRetrieve{ + BaseResult: coreda.BaseResult{ + Code: coreda.StatusNotFound, + Message: coreda.ErrBlobNotFound.Error(), + Height: height, + Timestamp: time.Now(), + }, + } + } + if strings.Contains(err.Error(), coreda.ErrHeightFromFuture.Error()) { + c.logger.Debug().Uint64("height", height).Msg("Blobs not found at height") + return coreda.ResultRetrieve{ + BaseResult: coreda.BaseResult{ + Code: coreda.StatusHeightFromFuture, + Message: coreda.ErrHeightFromFuture.Error(), + Height: height, + Timestamp: time.Now(), + }, + } + } + // Handle other errors during GetIDs + c.logger.Error().Uint64("height", height).Err(err).Msg("Failed to get IDs") + return coreda.ResultRetrieve{ + BaseResult: coreda.BaseResult{ + Code: coreda.StatusError, + Message: fmt.Sprintf("failed to get IDs: %s", err.Error()), + Height: height, + Timestamp: time.Now(), + }, + } + } + + // This check should technically be redundant if GetIDs correctly returns ErrBlobNotFound + if idsResult == nil || len(idsResult.IDs) == 0 { + c.logger.Debug().Uint64("height", height).Msg("No IDs found at height") + return coreda.ResultRetrieve{ + BaseResult: coreda.BaseResult{ + Code: coreda.StatusNotFound, + Message: coreda.ErrBlobNotFound.Error(), + Height: height, + Timestamp: time.Now(), + }, + } + } + // 2. Get Blobs using the retrieved IDs in batches + batchSize := 100 + blobs := make([][]byte, 0, len(idsResult.IDs)) + for i := 0; i < len(idsResult.IDs); i += batchSize { + end := min(i+batchSize, len(idsResult.IDs)) + + getBlobsCtx, cancel := context.WithTimeout(ctx, c.defaultTimeout) + batchBlobs, err := c.da.Get(getBlobsCtx, idsResult.IDs[i:end], namespace) + cancel() + if err != nil { + // Handle errors during Get + c.logger.Error().Uint64("height", height).Int("num_ids", len(idsResult.IDs)).Err(err).Msg("Failed to get blobs") + return coreda.ResultRetrieve{ + BaseResult: coreda.BaseResult{ + Code: coreda.StatusError, + Message: fmt.Sprintf("failed to get blobs for batch %d-%d: %s", i, end-1, err.Error()), + Height: height, + Timestamp: time.Now(), + }, + } + } + blobs = append(blobs, batchBlobs...) + } + // Success + c.logger.Debug().Uint64("height", height).Int("num_blobs", len(blobs)).Msg("Successfully retrieved blobs") + return coreda.ResultRetrieve{ + BaseResult: coreda.BaseResult{ + Code: coreda.StatusSuccess, + Height: height, + IDs: idsResult.IDs, + Timestamp: idsResult.Timestamp, + }, + Data: blobs, + } +} + +// RetrieveHeaders retrieves blobs from the header namespace at the specified height. +func (c *client) RetrieveHeaders(ctx context.Context, height uint64) coreda.ResultRetrieve { + return c.Retrieve(ctx, height, c.namespaceBz) +} + +// RetrieveData retrieves blobs from the data namespace at the specified height. +func (c *client) RetrieveData(ctx context.Context, height uint64) coreda.ResultRetrieve { + return c.Retrieve(ctx, height, c.namespaceDataBz) +} + +// RetrieveForcedInclusion retrieves blobs from the forced inclusion namespace at the specified height. +func (c *client) RetrieveForcedInclusion(ctx context.Context, height uint64) coreda.ResultRetrieve { + if !c.hasForcedInclusionNs { + return coreda.ResultRetrieve{ + BaseResult: coreda.BaseResult{ + Code: coreda.StatusError, + Message: "forced inclusion namespace not configured", + }, + } + } + return c.Retrieve(ctx, height, c.namespaceForcedInclusionBz) +} + +// GetHeaderNamespace returns the header namespace bytes. +func (c *client) GetHeaderNamespace() []byte { + return c.namespaceBz +} + +// GetDataNamespace returns the data namespace bytes. +func (c *client) GetDataNamespace() []byte { + return c.namespaceDataBz +} + +// GetForcedInclusionNamespace returns the forced inclusion namespace bytes. +func (c *client) GetForcedInclusionNamespace() []byte { + return c.namespaceForcedInclusionBz +} + +// HasForcedInclusionNamespace returns whether forced inclusion namespace is configured. +func (c *client) HasForcedInclusionNamespace() bool { + return c.hasForcedInclusionNs +} + +// GetDA returns the underlying DA interface for advanced usage. +func (c *client) GetDA() coreda.DA { + return c.da +} diff --git a/block/internal/da/client_test.go b/block/internal/da/client_test.go new file mode 100644 index 0000000000..7bc7e972a6 --- /dev/null +++ b/block/internal/da/client_test.go @@ -0,0 +1,525 @@ +package da + +import ( + "context" + "errors" + "testing" + "time" + + "github.com/rs/zerolog" + "gotest.tools/v3/assert" + + coreda "github.com/evstack/ev-node/core/da" +) + +// mockDA is a simple mock implementation of coreda.DA for testing +type mockDA struct { + submitFunc func(ctx context.Context, blobs []coreda.Blob, gasPrice float64, namespace []byte) ([]coreda.ID, error) + submitWithOptions func(ctx context.Context, blobs []coreda.Blob, gasPrice float64, namespace []byte, options []byte) ([]coreda.ID, error) + getIDsFunc func(ctx context.Context, height uint64, namespace []byte) (*coreda.GetIDsResult, error) + getFunc func(ctx context.Context, ids []coreda.ID, namespace []byte) ([]coreda.Blob, error) +} + +func (m *mockDA) Submit(ctx context.Context, blobs []coreda.Blob, gasPrice float64, namespace []byte) ([]coreda.ID, error) { + if m.submitFunc != nil { + return m.submitFunc(ctx, blobs, gasPrice, namespace) + } + return nil, nil +} + +func (m *mockDA) SubmitWithOptions(ctx context.Context, blobs []coreda.Blob, gasPrice float64, namespace []byte, options []byte) ([]coreda.ID, error) { + if m.submitWithOptions != nil { + return m.submitWithOptions(ctx, blobs, gasPrice, namespace, options) + } + return nil, nil +} + +func (m *mockDA) GetIDs(ctx context.Context, height uint64, namespace []byte) (*coreda.GetIDsResult, error) { + if m.getIDsFunc != nil { + return m.getIDsFunc(ctx, height, namespace) + } + return nil, errors.New("not implemented") +} + +func (m *mockDA) Get(ctx context.Context, ids []coreda.ID, namespace []byte) ([]coreda.Blob, error) { + if m.getFunc != nil { + return m.getFunc(ctx, ids, namespace) + } + return nil, errors.New("not implemented") +} + +func (m *mockDA) GetProofs(ctx context.Context, ids []coreda.ID, namespace []byte) ([]coreda.Proof, error) { + return nil, errors.New("not implemented") +} + +func (m *mockDA) Commit(ctx context.Context, blobs []coreda.Blob, namespace []byte) ([]coreda.Commitment, error) { + return nil, errors.New("not implemented") +} + +func (m *mockDA) Validate(ctx context.Context, ids []coreda.ID, proofs []coreda.Proof, namespace []byte) ([]bool, error) { + return nil, errors.New("not implemented") +} + +func TestNewClient(t *testing.T) { + tests := []struct { + name string + cfg Config + }{ + { + name: "with all namespaces", + cfg: Config{ + DA: &mockDA{}, + Logger: zerolog.Nop(), + DefaultTimeout: 5 * time.Second, + Namespace: "test-ns", + DataNamespace: "test-data-ns", + ForcedInclusionNamespace: "test-fi-ns", + }, + }, + { + name: "without forced inclusion namespace", + cfg: Config{ + DA: &mockDA{}, + Logger: zerolog.Nop(), + DefaultTimeout: 5 * time.Second, + Namespace: "test-ns", + DataNamespace: "test-data-ns", + }, + }, + { + name: "with default timeout", + cfg: Config{ + DA: &mockDA{}, + Logger: zerolog.Nop(), + Namespace: "test-ns", + DataNamespace: "test-data-ns", + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + client := NewClient(tt.cfg) + assert.Assert(t, client != nil) + assert.Assert(t, client.da != nil) + assert.Assert(t, len(client.namespaceBz) > 0) + assert.Assert(t, len(client.namespaceDataBz) > 0) + + if tt.cfg.ForcedInclusionNamespace != "" { + assert.Assert(t, client.hasForcedInclusionNs) + assert.Assert(t, len(client.namespaceForcedInclusionBz) > 0) + } else { + assert.Assert(t, !client.hasForcedInclusionNs) + } + + expectedTimeout := tt.cfg.DefaultTimeout + if expectedTimeout == 0 { + expectedTimeout = 30 * time.Second + } + assert.Equal(t, client.defaultTimeout, expectedTimeout) + }) + } +} + +func TestClient_HasForcedInclusionNamespace(t *testing.T) { + tests := []struct { + name string + cfg Config + expected bool + }{ + { + name: "with forced inclusion namespace", + cfg: Config{ + DA: &mockDA{}, + Logger: zerolog.Nop(), + Namespace: "test-ns", + DataNamespace: "test-data-ns", + ForcedInclusionNamespace: "test-fi-ns", + }, + expected: true, + }, + { + name: "without forced inclusion namespace", + cfg: Config{ + DA: &mockDA{}, + Logger: zerolog.Nop(), + Namespace: "test-ns", + DataNamespace: "test-data-ns", + }, + expected: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + client := NewClient(tt.cfg) + assert.Equal(t, client.HasForcedInclusionNamespace(), tt.expected) + }) + } +} + +func TestClient_GetNamespaces(t *testing.T) { + cfg := Config{ + DA: &mockDA{}, + Logger: zerolog.Nop(), + Namespace: "test-header", + DataNamespace: "test-data", + ForcedInclusionNamespace: "test-fi", + } + + client := NewClient(cfg) + + headerNs := client.GetHeaderNamespace() + assert.Assert(t, len(headerNs) > 0) + + dataNs := client.GetDataNamespace() + assert.Assert(t, len(dataNs) > 0) + + fiNs := client.GetForcedInclusionNamespace() + assert.Assert(t, len(fiNs) > 0) + + // Namespaces should be different + assert.Assert(t, string(headerNs) != string(dataNs)) + assert.Assert(t, string(headerNs) != string(fiNs)) + assert.Assert(t, string(dataNs) != string(fiNs)) +} + +func TestClient_RetrieveForcedInclusion_NotConfigured(t *testing.T) { + cfg := Config{ + DA: &mockDA{}, + Logger: zerolog.Nop(), + Namespace: "test-ns", + DataNamespace: "test-data-ns", + } + + client := NewClient(cfg) + ctx := context.Background() + + result := client.RetrieveForcedInclusion(ctx, 100) + assert.Equal(t, result.Code, coreda.StatusError) + assert.Assert(t, result.Message != "") +} + +func TestClient_GetDA(t *testing.T) { + mockDAInstance := &mockDA{} + cfg := Config{ + DA: mockDAInstance, + Logger: zerolog.Nop(), + Namespace: "test-ns", + DataNamespace: "test-data-ns", + } + + client := NewClient(cfg) + da := client.GetDA() + assert.Equal(t, da, mockDAInstance) +} + +func TestClient_Submit(t *testing.T) { + logger := zerolog.Nop() + + testCases := []struct { + name string + data [][]byte + gasPrice float64 + options []byte + submitErr error + submitIDs [][]byte + expectedCode coreda.StatusCode + expectedErrMsg string + expectedIDs [][]byte + expectedCount uint64 + }{ + { + name: "successful submission", + data: [][]byte{[]byte("blob1"), []byte("blob2")}, + gasPrice: 1.0, + options: []byte("opts"), + submitIDs: [][]byte{[]byte("id1"), []byte("id2")}, + expectedCode: coreda.StatusSuccess, + expectedIDs: [][]byte{[]byte("id1"), []byte("id2")}, + expectedCount: 2, + }, + { + name: "context canceled error", + data: [][]byte{[]byte("blob1")}, + gasPrice: 1.0, + options: []byte("opts"), + submitErr: context.Canceled, + expectedCode: coreda.StatusContextCanceled, + expectedErrMsg: "submission canceled", + }, + { + name: "tx timed out error", + data: [][]byte{[]byte("blob1")}, + gasPrice: 1.0, + options: []byte("opts"), + submitErr: coreda.ErrTxTimedOut, + expectedCode: coreda.StatusNotIncludedInBlock, + expectedErrMsg: "failed to submit blobs: " + coreda.ErrTxTimedOut.Error(), + }, + { + name: "tx already in mempool error", + data: [][]byte{[]byte("blob1")}, + gasPrice: 1.0, + options: []byte("opts"), + submitErr: coreda.ErrTxAlreadyInMempool, + expectedCode: coreda.StatusAlreadyInMempool, + expectedErrMsg: "failed to submit blobs: " + coreda.ErrTxAlreadyInMempool.Error(), + }, + { + name: "incorrect account sequence error", + data: [][]byte{[]byte("blob1")}, + gasPrice: 1.0, + options: []byte("opts"), + submitErr: coreda.ErrTxIncorrectAccountSequence, + expectedCode: coreda.StatusIncorrectAccountSequence, + expectedErrMsg: "failed to submit blobs: " + coreda.ErrTxIncorrectAccountSequence.Error(), + }, + { + name: "blob size over limit error", + data: [][]byte{[]byte("blob1")}, + gasPrice: 1.0, + options: []byte("opts"), + submitErr: coreda.ErrBlobSizeOverLimit, + expectedCode: coreda.StatusTooBig, + expectedErrMsg: "failed to submit blobs: " + coreda.ErrBlobSizeOverLimit.Error(), + }, + { + name: "context deadline error", + data: [][]byte{[]byte("blob1")}, + gasPrice: 1.0, + options: []byte("opts"), + submitErr: coreda.ErrContextDeadline, + expectedCode: coreda.StatusContextDeadline, + expectedErrMsg: "failed to submit blobs: " + coreda.ErrContextDeadline.Error(), + }, + { + name: "generic submission error", + data: [][]byte{[]byte("blob1")}, + gasPrice: 1.0, + options: []byte("opts"), + submitErr: errors.New("some generic error"), + expectedCode: coreda.StatusError, + expectedErrMsg: "failed to submit blobs: some generic error", + }, + { + name: "no IDs returned for non-empty data", + data: [][]byte{[]byte("blob1")}, + gasPrice: 1.0, + options: []byte("opts"), + submitIDs: [][]byte{}, + expectedCode: coreda.StatusError, + expectedErrMsg: "failed to submit blobs: no IDs returned despite non-empty input", + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + mockDAInstance := &mockDA{ + submitWithOptions: func(ctx context.Context, blobs []coreda.Blob, gasPrice float64, namespace []byte, options []byte) ([]coreda.ID, error) { + return tc.submitIDs, tc.submitErr + }, + } + + client := NewClient(Config{ + DA: mockDAInstance, + Logger: logger, + Namespace: "test-namespace", + DataNamespace: "test-data-namespace", + }) + + encodedNamespace := coreda.NamespaceFromString("test-namespace") + result := client.Submit(context.Background(), tc.data, tc.gasPrice, encodedNamespace.Bytes(), tc.options) + + assert.Equal(t, tc.expectedCode, result.Code) + if tc.expectedErrMsg != "" { + assert.Assert(t, result.Message != "") + } + if tc.expectedIDs != nil { + assert.Equal(t, len(tc.expectedIDs), len(result.IDs)) + } + if tc.expectedCount != 0 { + assert.Equal(t, tc.expectedCount, result.SubmittedCount) + } + }) + } +} + +func TestClient_Retrieve(t *testing.T) { + logger := zerolog.Nop() + dataLayerHeight := uint64(100) + mockIDs := [][]byte{[]byte("id1"), []byte("id2")} + mockBlobs := [][]byte{[]byte("blobA"), []byte("blobB")} + mockTimestamp := time.Now() + + testCases := []struct { + name string + getIDsResult *coreda.GetIDsResult + getIDsErr error + getBlobsErr error + expectedCode coreda.StatusCode + expectedErrMsg string + expectedIDs [][]byte + expectedData [][]byte + expectedHeight uint64 + }{ + { + name: "successful retrieval", + getIDsResult: &coreda.GetIDsResult{ + IDs: mockIDs, + Timestamp: mockTimestamp, + }, + expectedCode: coreda.StatusSuccess, + expectedIDs: mockIDs, + expectedData: mockBlobs, + expectedHeight: dataLayerHeight, + }, + { + name: "blob not found error during GetIDs", + getIDsErr: coreda.ErrBlobNotFound, + expectedCode: coreda.StatusNotFound, + expectedErrMsg: coreda.ErrBlobNotFound.Error(), + expectedHeight: dataLayerHeight, + }, + { + name: "height from future error during GetIDs", + getIDsErr: coreda.ErrHeightFromFuture, + expectedCode: coreda.StatusHeightFromFuture, + expectedErrMsg: coreda.ErrHeightFromFuture.Error(), + expectedHeight: dataLayerHeight, + }, + { + name: "generic error during GetIDs", + getIDsErr: errors.New("failed to connect to DA"), + expectedCode: coreda.StatusError, + expectedErrMsg: "failed to get IDs: failed to connect to DA", + expectedHeight: dataLayerHeight, + }, + { + name: "GetIDs returns nil result", + getIDsResult: nil, + expectedCode: coreda.StatusNotFound, + expectedErrMsg: coreda.ErrBlobNotFound.Error(), + expectedHeight: dataLayerHeight, + }, + { + name: "GetIDs returns empty IDs", + getIDsResult: &coreda.GetIDsResult{ + IDs: [][]byte{}, + Timestamp: mockTimestamp, + }, + expectedCode: coreda.StatusNotFound, + expectedErrMsg: coreda.ErrBlobNotFound.Error(), + expectedHeight: dataLayerHeight, + }, + { + name: "error during Get (blobs retrieval)", + getIDsResult: &coreda.GetIDsResult{ + IDs: mockIDs, + Timestamp: mockTimestamp, + }, + getBlobsErr: errors.New("network error during blob retrieval"), + expectedCode: coreda.StatusError, + expectedErrMsg: "failed to get blobs for batch 0-1: network error during blob retrieval", + expectedHeight: dataLayerHeight, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + mockDAInstance := &mockDA{ + getIDsFunc: func(ctx context.Context, height uint64, namespace []byte) (*coreda.GetIDsResult, error) { + return tc.getIDsResult, tc.getIDsErr + }, + getFunc: func(ctx context.Context, ids []coreda.ID, namespace []byte) ([]coreda.Blob, error) { + if tc.getBlobsErr != nil { + return nil, tc.getBlobsErr + } + return mockBlobs, nil + }, + } + + client := NewClient(Config{ + DA: mockDAInstance, + Logger: logger, + Namespace: "test-namespace", + DataNamespace: "test-data-namespace", + DefaultTimeout: 5 * time.Second, + }) + + encodedNamespace := coreda.NamespaceFromString("test-namespace") + result := client.Retrieve(context.Background(), dataLayerHeight, encodedNamespace.Bytes()) + + assert.Equal(t, tc.expectedCode, result.Code) + assert.Equal(t, tc.expectedHeight, result.Height) + if tc.expectedErrMsg != "" { + assert.Assert(t, result.Message != "") + } + if tc.expectedIDs != nil { + assert.Equal(t, len(tc.expectedIDs), len(result.IDs)) + } + if tc.expectedData != nil { + assert.Equal(t, len(tc.expectedData), len(result.Data)) + } + }) + } +} + +func TestClient_Retrieve_Timeout(t *testing.T) { + logger := zerolog.Nop() + dataLayerHeight := uint64(100) + encodedNamespace := coreda.NamespaceFromString("test-namespace") + + t.Run("timeout during GetIDs", func(t *testing.T) { + mockDAInstance := &mockDA{ + getIDsFunc: func(ctx context.Context, height uint64, namespace []byte) (*coreda.GetIDsResult, error) { + <-ctx.Done() // Wait for context cancellation + return nil, context.DeadlineExceeded + }, + } + + client := NewClient(Config{ + DA: mockDAInstance, + Logger: logger, + Namespace: "test-namespace", + DataNamespace: "test-data-namespace", + DefaultTimeout: 1 * time.Millisecond, + }) + + result := client.Retrieve(context.Background(), dataLayerHeight, encodedNamespace.Bytes()) + + assert.Equal(t, coreda.StatusError, result.Code) + assert.Assert(t, result.Message != "") + }) + + t.Run("timeout during Get", func(t *testing.T) { + mockIDs := [][]byte{[]byte("id1")} + mockTimestamp := time.Now() + + mockDAInstance := &mockDA{ + getIDsFunc: func(ctx context.Context, height uint64, namespace []byte) (*coreda.GetIDsResult, error) { + return &coreda.GetIDsResult{ + IDs: mockIDs, + Timestamp: mockTimestamp, + }, nil + }, + getFunc: func(ctx context.Context, ids []coreda.ID, namespace []byte) ([]coreda.Blob, error) { + <-ctx.Done() // Wait for context cancellation + return nil, context.DeadlineExceeded + }, + } + + client := NewClient(Config{ + DA: mockDAInstance, + Logger: logger, + Namespace: "test-namespace", + DataNamespace: "test-data-namespace", + DefaultTimeout: 1 * time.Millisecond, + }) + + result := client.Retrieve(context.Background(), dataLayerHeight, encodedNamespace.Bytes()) + + assert.Equal(t, coreda.StatusError, result.Code) + assert.Assert(t, result.Message != "") + }) +} diff --git a/block/internal/da/forced_inclusion_retriever.go b/block/internal/da/forced_inclusion_retriever.go new file mode 100644 index 0000000000..5f50473386 --- /dev/null +++ b/block/internal/da/forced_inclusion_retriever.go @@ -0,0 +1,177 @@ +package da + +import ( + "context" + "errors" + "fmt" + + "github.com/rs/zerolog" + + coreda "github.com/evstack/ev-node/core/da" + "github.com/evstack/ev-node/pkg/genesis" + "github.com/evstack/ev-node/types" +) + +// ErrForceInclusionNotConfigured is returned when the forced inclusion namespace is not configured. +var ErrForceInclusionNotConfigured = errors.New("forced inclusion namespace not configured") + +// ForcedInclusionRetriever handles retrieval of forced inclusion transactions from DA. +type ForcedInclusionRetriever struct { + client Client + genesis genesis.Genesis + logger zerolog.Logger + daEpochSize uint64 +} + +// ForcedInclusionEvent contains forced inclusion transactions retrieved from DA. +type ForcedInclusionEvent struct { + StartDaHeight uint64 + EndDaHeight uint64 + Txs [][]byte +} + +// NewForcedInclusionRetriever creates a new forced inclusion retriever. +func NewForcedInclusionRetriever( + client Client, + genesis genesis.Genesis, + logger zerolog.Logger, +) *ForcedInclusionRetriever { + return &ForcedInclusionRetriever{ + client: client, + genesis: genesis, + logger: logger.With().Str("component", "forced_inclusion_retriever").Logger(), + daEpochSize: genesis.DAEpochForcedInclusion, + } +} + +// RetrieveForcedIncludedTxs retrieves forced inclusion transactions at the given DA height. +// It respects epoch boundaries and only fetches at epoch start. +func (r *ForcedInclusionRetriever) RetrieveForcedIncludedTxs(ctx context.Context, daHeight uint64) (*ForcedInclusionEvent, error) { + if !r.client.HasForcedInclusionNamespace() { + return nil, ErrForceInclusionNotConfigured + } + + epochStart, epochEnd := types.CalculateEpochBoundaries(daHeight, r.genesis.DAStartHeight, r.daEpochSize) + + if daHeight != epochStart { + r.logger.Debug(). + Uint64("da_height", daHeight). + Uint64("epoch_start", epochStart). + Msg("not at epoch start - returning empty transactions") + + return &ForcedInclusionEvent{ + StartDaHeight: daHeight, + EndDaHeight: daHeight, + Txs: [][]byte{}, + }, nil + } + + // We're at epoch start - fetch transactions from DA + currentEpochNumber := types.CalculateEpochNumber(daHeight, r.genesis.DAStartHeight, r.daEpochSize) + + event := &ForcedInclusionEvent{ + StartDaHeight: epochStart, + Txs: [][]byte{}, + } + + r.logger.Debug(). + Uint64("da_height", daHeight). + Uint64("epoch_start", epochStart). + Uint64("epoch_end", epochEnd). + Uint64("epoch_num", currentEpochNumber). + Msg("retrieving forced included transactions from DA") + + epochStartResult := r.client.RetrieveForcedInclusion(ctx, epochStart) + if epochStartResult.Code == coreda.StatusHeightFromFuture { + r.logger.Debug(). + Uint64("epoch_start", epochStart). + Msg("epoch start height not yet available on DA - backoff required") + return nil, fmt.Errorf("%w: epoch start height %d not yet available", coreda.ErrHeightFromFuture, epochStart) + } + + epochEndResult := epochStartResult + if epochStart != epochEnd { + epochEndResult = r.client.RetrieveForcedInclusion(ctx, epochEnd) + if epochEndResult.Code == coreda.StatusHeightFromFuture { + r.logger.Debug(). + Uint64("epoch_end", epochEnd). + Msg("epoch end height not yet available on DA - backoff required") + return nil, fmt.Errorf("%w: epoch end height %d not yet available", coreda.ErrHeightFromFuture, epochEnd) + } + } + + lastProcessedHeight := epochStart + + if err := r.processForcedInclusionBlobs(event, &lastProcessedHeight, epochStartResult, epochStart); err != nil { + return nil, err + } + + // Process heights between start and end (exclusive) + for epochHeight := epochStart + 1; epochHeight < epochEnd; epochHeight++ { + result := r.client.RetrieveForcedInclusion(ctx, epochHeight) + + // If any intermediate height is from future, break early + if result.Code == coreda.StatusHeightFromFuture { + r.logger.Debug(). + Uint64("epoch_height", epochHeight). + Uint64("last_processed", lastProcessedHeight). + Msg("reached future DA height within epoch - stopping") + break + } + + if err := r.processForcedInclusionBlobs(event, &lastProcessedHeight, result, epochHeight); err != nil { + return nil, err + } + } + + // Process epoch end (only if different from start) + if epochEnd != epochStart { + if err := r.processForcedInclusionBlobs(event, &lastProcessedHeight, epochEndResult, epochEnd); err != nil { + return nil, err + } + } + + event.EndDaHeight = lastProcessedHeight + + r.logger.Info(). + Uint64("epoch_start", epochStart). + Uint64("epoch_end", lastProcessedHeight). + Int("tx_count", len(event.Txs)). + Msg("retrieved forced inclusion transactions") + + return event, nil +} + +// processForcedInclusionBlobs processes blobs from a single DA height for forced inclusion. +func (r *ForcedInclusionRetriever) processForcedInclusionBlobs( + event *ForcedInclusionEvent, + lastProcessedHeight *uint64, + result coreda.ResultRetrieve, + height uint64, +) error { + if result.Code == coreda.StatusNotFound { + r.logger.Debug().Uint64("height", height).Msg("no forced inclusion blobs at height") + *lastProcessedHeight = height + return nil + } + + if result.Code != coreda.StatusSuccess { + return fmt.Errorf("failed to retrieve forced inclusion blobs at height %d: %s", height, result.Message) + } + + // Process each blob as a transaction + for _, blob := range result.Data { + if len(blob) > 0 { + event.Txs = append(event.Txs, blob) + } + } + + *lastProcessedHeight = height + + r.logger.Debug(). + Uint64("height", height). + Int("blob_count", len(result.Data)). + Msg("processed forced inclusion blobs") + + return nil +} diff --git a/block/internal/da/forced_inclusion_retriever_test.go b/block/internal/da/forced_inclusion_retriever_test.go new file mode 100644 index 0000000000..e586125730 --- /dev/null +++ b/block/internal/da/forced_inclusion_retriever_test.go @@ -0,0 +1,344 @@ +package da + +import ( + "context" + "errors" + "testing" + "time" + + "github.com/rs/zerolog" + "gotest.tools/v3/assert" + + coreda "github.com/evstack/ev-node/core/da" + "github.com/evstack/ev-node/pkg/genesis" +) + +func TestNewForcedInclusionRetriever(t *testing.T) { + client := NewClient(Config{ + DA: &mockDA{}, + Logger: zerolog.Nop(), + Namespace: "test-ns", + DataNamespace: "test-data-ns", + ForcedInclusionNamespace: "test-fi-ns", + }) + + gen := genesis.Genesis{ + DAStartHeight: 100, + DAEpochForcedInclusion: 10, + } + + retriever := NewForcedInclusionRetriever(client, gen, zerolog.Nop()) + assert.Assert(t, retriever != nil) + assert.Equal(t, retriever.daEpochSize, uint64(10)) +} + +func TestForcedInclusionRetriever_RetrieveForcedIncludedTxs_NoNamespace(t *testing.T) { + client := NewClient(Config{ + DA: &mockDA{}, + Logger: zerolog.Nop(), + Namespace: "test-ns", + DataNamespace: "test-data-ns", + // No forced inclusion namespace + }) + + gen := genesis.Genesis{ + DAStartHeight: 100, + DAEpochForcedInclusion: 10, + } + + retriever := NewForcedInclusionRetriever(client, gen, zerolog.Nop()) + ctx := context.Background() + + _, err := retriever.RetrieveForcedIncludedTxs(ctx, 100) + assert.Assert(t, err != nil) + assert.ErrorContains(t, err, "not configured") +} + +func TestForcedInclusionRetriever_RetrieveForcedIncludedTxs_NotAtEpochStart(t *testing.T) { + client := NewClient(Config{ + DA: &mockDA{}, + Logger: zerolog.Nop(), + Namespace: "test-ns", + DataNamespace: "test-data-ns", + ForcedInclusionNamespace: "test-fi-ns", + }) + + gen := genesis.Genesis{ + DAStartHeight: 100, + DAEpochForcedInclusion: 10, + } + + retriever := NewForcedInclusionRetriever(client, gen, zerolog.Nop()) + ctx := context.Background() + + // Height 105 is not an epoch start (100, 110, 120, etc. are epoch starts) + event, err := retriever.RetrieveForcedIncludedTxs(ctx, 105) + assert.NilError(t, err) + assert.Assert(t, event != nil) + assert.Equal(t, event.StartDaHeight, uint64(105)) + assert.Equal(t, event.EndDaHeight, uint64(105)) + assert.Equal(t, len(event.Txs), 0) +} + +func TestForcedInclusionRetriever_RetrieveForcedIncludedTxs_EpochStartSuccess(t *testing.T) { + testBlobs := [][]byte{ + []byte("tx1"), + []byte("tx2"), + []byte("tx3"), + } + + mockDAInstance := &mockDA{ + getIDsFunc: func(ctx context.Context, height uint64, namespace []byte) (*coreda.GetIDsResult, error) { + return &coreda.GetIDsResult{ + IDs: []coreda.ID{[]byte("id1"), []byte("id2"), []byte("id3")}, + Timestamp: time.Now(), + }, nil + }, + getFunc: func(ctx context.Context, ids []coreda.ID, namespace []byte) ([]coreda.Blob, error) { + return testBlobs, nil + }, + } + + client := NewClient(Config{ + DA: mockDAInstance, + Logger: zerolog.Nop(), + Namespace: "test-ns", + DataNamespace: "test-data-ns", + ForcedInclusionNamespace: "test-fi-ns", + }) + + gen := genesis.Genesis{ + DAStartHeight: 100, + DAEpochForcedInclusion: 1, // Single height epoch + } + + retriever := NewForcedInclusionRetriever(client, gen, zerolog.Nop()) + ctx := context.Background() + + // Height 100 is an epoch start + event, err := retriever.RetrieveForcedIncludedTxs(ctx, 100) + assert.NilError(t, err) + assert.Assert(t, event != nil) + assert.Equal(t, event.StartDaHeight, uint64(100)) + assert.Equal(t, event.EndDaHeight, uint64(100)) + assert.Equal(t, len(event.Txs), len(testBlobs)) + assert.DeepEqual(t, event.Txs[0], testBlobs[0]) +} + +func TestForcedInclusionRetriever_RetrieveForcedIncludedTxs_EpochStartNotAvailable(t *testing.T) { + mockDAInstance := &mockDA{ + getIDsFunc: func(ctx context.Context, height uint64, namespace []byte) (*coreda.GetIDsResult, error) { + return nil, coreda.ErrHeightFromFuture + }, + } + + client := NewClient(Config{ + DA: mockDAInstance, + Logger: zerolog.Nop(), + Namespace: "test-ns", + DataNamespace: "test-data-ns", + ForcedInclusionNamespace: "test-fi-ns", + }) + + gen := genesis.Genesis{ + DAStartHeight: 100, + DAEpochForcedInclusion: 10, + } + + retriever := NewForcedInclusionRetriever(client, gen, zerolog.Nop()) + ctx := context.Background() + + _, err := retriever.RetrieveForcedIncludedTxs(ctx, 100) + assert.Assert(t, err != nil) + assert.ErrorContains(t, err, "not yet available") +} + +func TestForcedInclusionRetriever_RetrieveForcedIncludedTxs_NoBlobsAtHeight(t *testing.T) { + mockDAInstance := &mockDA{ + getIDsFunc: func(ctx context.Context, height uint64, namespace []byte) (*coreda.GetIDsResult, error) { + return nil, coreda.ErrBlobNotFound + }, + } + + client := NewClient(Config{ + DA: mockDAInstance, + Logger: zerolog.Nop(), + Namespace: "test-ns", + DataNamespace: "test-data-ns", + ForcedInclusionNamespace: "test-fi-ns", + }) + + gen := genesis.Genesis{ + DAStartHeight: 100, + DAEpochForcedInclusion: 1, // Single height epoch + } + + retriever := NewForcedInclusionRetriever(client, gen, zerolog.Nop()) + ctx := context.Background() + + event, err := retriever.RetrieveForcedIncludedTxs(ctx, 100) + assert.NilError(t, err) + assert.Assert(t, event != nil) + assert.Equal(t, len(event.Txs), 0) +} + +func TestForcedInclusionRetriever_RetrieveForcedIncludedTxs_MultiHeightEpoch(t *testing.T) { + callCount := 0 + testBlobsByHeight := map[uint64][][]byte{ + 100: {[]byte("tx1"), []byte("tx2")}, + 101: {[]byte("tx3")}, + 102: {[]byte("tx4"), []byte("tx5"), []byte("tx6")}, + } + + mockDAInstance := &mockDA{ + getIDsFunc: func(ctx context.Context, height uint64, namespace []byte) (*coreda.GetIDsResult, error) { + callCount++ + blobs, exists := testBlobsByHeight[height] + if !exists { + return nil, coreda.ErrBlobNotFound + } + ids := make([]coreda.ID, len(blobs)) + for i := range blobs { + ids[i] = []byte("id") + } + return &coreda.GetIDsResult{ + IDs: ids, + Timestamp: time.Now(), + }, nil + }, + getFunc: func(ctx context.Context, ids []coreda.ID, namespace []byte) ([]coreda.Blob, error) { + // Return blobs based on current call count + switch callCount { + case 1: + return testBlobsByHeight[100], nil + case 2: + return testBlobsByHeight[101], nil + case 3: + return testBlobsByHeight[102], nil + default: + return nil, errors.New("unexpected call") + } + }, + } + + client := NewClient(Config{ + DA: mockDAInstance, + Logger: zerolog.Nop(), + Namespace: "test-ns", + DataNamespace: "test-data-ns", + ForcedInclusionNamespace: "test-fi-ns", + }) + + gen := genesis.Genesis{ + DAStartHeight: 100, + DAEpochForcedInclusion: 3, // Epoch: 100-102 + } + + retriever := NewForcedInclusionRetriever(client, gen, zerolog.Nop()) + ctx := context.Background() + + event, err := retriever.RetrieveForcedIncludedTxs(ctx, 100) + assert.NilError(t, err) + assert.Assert(t, event != nil) + assert.Equal(t, event.StartDaHeight, uint64(100)) + assert.Equal(t, event.EndDaHeight, uint64(102)) + + // Should have collected all txs from all heights + expectedTxCount := len(testBlobsByHeight[100]) + len(testBlobsByHeight[101]) + len(testBlobsByHeight[102]) + assert.Equal(t, len(event.Txs), expectedTxCount) +} + +func TestForcedInclusionRetriever_processForcedInclusionBlobs(t *testing.T) { + client := NewClient(Config{ + DA: &mockDA{}, + Logger: zerolog.Nop(), + Namespace: "test-ns", + DataNamespace: "test-data-ns", + ForcedInclusionNamespace: "test-fi-ns", + }) + + gen := genesis.Genesis{ + DAStartHeight: 100, + DAEpochForcedInclusion: 10, + } + + retriever := NewForcedInclusionRetriever(client, gen, zerolog.Nop()) + + tests := []struct { + name string + result coreda.ResultRetrieve + height uint64 + expectedTxCount int + expectedLastHeight uint64 + expectError bool + }{ + { + name: "success with blobs", + result: coreda.ResultRetrieve{ + BaseResult: coreda.BaseResult{ + Code: coreda.StatusSuccess, + }, + Data: [][]byte{[]byte("tx1"), []byte("tx2")}, + }, + height: 100, + expectedTxCount: 2, + expectedLastHeight: 100, + expectError: false, + }, + { + name: "not found", + result: coreda.ResultRetrieve{ + BaseResult: coreda.BaseResult{ + Code: coreda.StatusNotFound, + }, + }, + height: 100, + expectedTxCount: 0, + expectedLastHeight: 100, + expectError: false, + }, + { + name: "error status", + result: coreda.ResultRetrieve{ + BaseResult: coreda.BaseResult{ + Code: coreda.StatusError, + Message: "test error", + }, + }, + height: 100, + expectError: true, + }, + { + name: "empty blobs are skipped", + result: coreda.ResultRetrieve{ + BaseResult: coreda.BaseResult{ + Code: coreda.StatusSuccess, + }, + Data: [][]byte{[]byte("tx1"), {}, []byte("tx2")}, + }, + height: 100, + expectedTxCount: 2, + expectedLastHeight: 100, + expectError: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + event := &ForcedInclusionEvent{ + Txs: [][]byte{}, + } + lastHeight := uint64(0) + + err := retriever.processForcedInclusionBlobs(event, &lastHeight, tt.result, tt.height) + + if tt.expectError { + assert.Assert(t, err != nil) + } else { + assert.NilError(t, err) + assert.Equal(t, len(event.Txs), tt.expectedTxCount) + assert.Equal(t, lastHeight, tt.expectedLastHeight) + } + }) + } +} diff --git a/block/internal/submitting/da_submitter.go b/block/internal/submitting/da_submitter.go index 5a8fabc167..8cf741dcd9 100644 --- a/block/internal/submitting/da_submitter.go +++ b/block/internal/submitting/da_submitter.go @@ -12,6 +12,7 @@ import ( "github.com/evstack/ev-node/block/internal/cache" "github.com/evstack/ev-node/block/internal/common" + "github.com/evstack/ev-node/block/internal/da" coreda "github.com/evstack/ev-node/core/da" "github.com/evstack/ev-node/pkg/config" pkgda "github.com/evstack/ev-node/pkg/da" @@ -94,24 +95,20 @@ func clamp(v, min, max time.Duration) time.Duration { // DASubmitter handles DA submission operations type DASubmitter struct { - da coreda.DA + client da.Client config config.Config genesis genesis.Genesis options common.BlockOptions logger zerolog.Logger metrics *common.Metrics - // calculate namespaces bytes once and reuse them - namespaceBz []byte - namespaceDataBz []byte - // address selector for multi-account support addressSelector pkgda.AddressSelector } // NewDASubmitter creates a new DA submitter func NewDASubmitter( - da coreda.DA, + client da.Client, config config.Config, genesis genesis.Genesis, options common.BlockOptions, @@ -122,7 +119,7 @@ func NewDASubmitter( if config.RPC.EnableDAVisualization { visualizerLogger := logger.With().Str("component", "da_visualization").Logger() - server.SetDAVisualizationServer(server.NewDAVisualizationServer(da, visualizerLogger, config.Node.Aggregator)) + server.SetDAVisualizationServer(server.NewDAVisualizationServer(client.GetDA(), visualizerLogger, config.Node.Aggregator)) } // Use NoOp metrics if nil to avoid nil checks throughout the code @@ -142,14 +139,12 @@ func NewDASubmitter( } return &DASubmitter{ - da: da, + client: client, config: config, genesis: genesis, options: options, metrics: metrics, logger: daSubmitterLogger, - namespaceBz: coreda.NamespaceFromString(config.DA.GetNamespace()).Bytes(), - namespaceDataBz: coreda.NamespaceFromString(config.DA.GetDataNamespace()).Bytes(), addressSelector: addressSelector, } } @@ -199,7 +194,7 @@ func (s *DASubmitter) SubmitHeaders(ctx context.Context, cache cache.Manager) er } }, "header", - s.namespaceBz, + s.client.GetHeaderNamespace(), []byte(s.config.DA.SubmitOptions), func() uint64 { return cache.NumPendingHeaders() }, ) @@ -242,7 +237,7 @@ func (s *DASubmitter) SubmitData(ctx context.Context, cache cache.Manager, signe } }, "data", - s.namespaceDataBz, + s.client.GetDataNamespace(), []byte(s.config.DA.SubmitOptions), func() uint64 { return cache.NumPendingData() }, ) @@ -411,7 +406,7 @@ func submitToDA[T any]( // Perform submission start := time.Now() - res := types.SubmitWithHelpers(submitCtx, s.da, s.logger, marshaled, -1, namespace, mergedOptions) + res := s.client.Submit(submitCtx, marshaled, -1, namespace, mergedOptions) s.logger.Debug().Int("attempts", rs.Attempt).Dur("elapsed", time.Since(start)).Uint64("code", uint64(res.Code)).Msg("got SubmitWithHelpers response from celestia") // Record submission result for observability diff --git a/block/internal/submitting/da_submitter_integration_test.go b/block/internal/submitting/da_submitter_integration_test.go index 421340e11d..5b768e1a51 100644 --- a/block/internal/submitting/da_submitter_integration_test.go +++ b/block/internal/submitting/da_submitter_integration_test.go @@ -15,6 +15,7 @@ import ( "github.com/evstack/ev-node/block/internal/cache" "github.com/evstack/ev-node/block/internal/common" + "github.com/evstack/ev-node/block/internal/da" coreda "github.com/evstack/ev-node/core/da" "github.com/evstack/ev-node/pkg/config" "github.com/evstack/ev-node/pkg/genesis" @@ -86,7 +87,13 @@ func TestDASubmitter_SubmitHeadersAndData_MarksInclusionAndUpdatesLastSubmitted( dummyDA := coreda.NewDummyDA(10_000_000, 10*time.Millisecond) // Create DA submitter - daSubmitter := NewDASubmitter(dummyDA, cfg, gen, common.DefaultBlockOptions(), common.NopMetrics(), zerolog.Nop()) + daClient := da.NewClient(da.Config{ + DA: dummyDA, + Logger: zerolog.Nop(), + Namespace: cfg.DA.Namespace, + DataNamespace: cfg.DA.DataNamespace, + }) + daSubmitter := NewDASubmitter(daClient, cfg, gen, common.DefaultBlockOptions(), common.NopMetrics(), zerolog.Nop()) // Submit headers and data require.NoError(t, daSubmitter.SubmitHeaders(context.Background(), cm)) diff --git a/block/internal/submitting/da_submitter_mocks_test.go b/block/internal/submitting/da_submitter_mocks_test.go index d914e6db61..b215b0cf2f 100644 --- a/block/internal/submitting/da_submitter_mocks_test.go +++ b/block/internal/submitting/da_submitter_mocks_test.go @@ -11,6 +11,7 @@ import ( "github.com/stretchr/testify/mock" "github.com/evstack/ev-node/block/internal/common" + "github.com/evstack/ev-node/block/internal/da" coreda "github.com/evstack/ev-node/core/da" "github.com/evstack/ev-node/pkg/config" "github.com/evstack/ev-node/pkg/genesis" @@ -25,10 +26,17 @@ func newTestSubmitter(mockDA *mocks.MockDA, override func(*config.Config)) *DASu cfg.DA.MaxSubmitAttempts = 3 cfg.DA.SubmitOptions = "opts" cfg.DA.Namespace = "ns" + cfg.DA.DataNamespace = "ns-data" if override != nil { override(&cfg) } - return NewDASubmitter(mockDA, cfg, genesis.Genesis{} /*options=*/, common.BlockOptions{}, common.NopMetrics(), zerolog.Nop()) + daClient := da.NewClient(da.Config{ + DA: mockDA, + Logger: zerolog.Nop(), + Namespace: cfg.DA.Namespace, + DataNamespace: cfg.DA.DataNamespace, + }) + return NewDASubmitter(daClient, cfg, genesis.Genesis{} /*options=*/, common.BlockOptions{}, common.NopMetrics(), zerolog.Nop()) } // marshal helper for simple items diff --git a/block/internal/submitting/da_submitter_test.go b/block/internal/submitting/da_submitter_test.go index c657d8185b..214ab98db4 100644 --- a/block/internal/submitting/da_submitter_test.go +++ b/block/internal/submitting/da_submitter_test.go @@ -15,6 +15,7 @@ import ( "github.com/evstack/ev-node/block/internal/cache" "github.com/evstack/ev-node/block/internal/common" + "github.com/evstack/ev-node/block/internal/da" coreda "github.com/evstack/ev-node/core/da" "github.com/evstack/ev-node/pkg/config" "github.com/evstack/ev-node/pkg/genesis" @@ -51,8 +52,14 @@ func setupDASubmitterTest(t *testing.T) (*DASubmitter, store.Store, cache.Manage } // Create DA submitter + daClient := da.NewClient(da.Config{ + DA: dummyDA, + Logger: zerolog.Nop(), + Namespace: cfg.DA.Namespace, + DataNamespace: cfg.DA.DataNamespace, + }) daSubmitter := NewDASubmitter( - dummyDA, + daClient, cfg, gen, common.DefaultBlockOptions(), @@ -80,7 +87,7 @@ func TestDASubmitter_NewDASubmitter(t *testing.T) { submitter, _, _, _, _ := setupDASubmitterTest(t) assert.NotNil(t, submitter) - assert.NotNil(t, submitter.da) + assert.NotNil(t, submitter.client) assert.NotNil(t, submitter.config) assert.NotNil(t, submitter.genesis) } @@ -95,8 +102,14 @@ func TestNewDASubmitterSetsVisualizerWhenEnabled(t *testing.T) { dummyDA := coreda.NewDummyDA(10_000_000, 10*time.Millisecond) + daClient := da.NewClient(da.Config{ + DA: dummyDA, + Logger: zerolog.Nop(), + Namespace: cfg.DA.Namespace, + DataNamespace: cfg.DA.DataNamespace, + }) NewDASubmitter( - dummyDA, + daClient, cfg, genesis.Genesis{}, common.DefaultBlockOptions(), diff --git a/block/internal/submitting/submitter_test.go b/block/internal/submitting/submitter_test.go index c13d8a1df7..33350ae268 100644 --- a/block/internal/submitting/submitter_test.go +++ b/block/internal/submitting/submitter_test.go @@ -18,6 +18,7 @@ import ( "github.com/evstack/ev-node/block/internal/cache" "github.com/evstack/ev-node/block/internal/common" + "github.com/evstack/ev-node/block/internal/da" "github.com/evstack/ev-node/pkg/config" "github.com/evstack/ev-node/pkg/genesis" "github.com/evstack/ev-node/pkg/rpc/server" @@ -158,8 +159,16 @@ func TestSubmitter_setSequencerHeightToDAHeight(t *testing.T) { mockStore := testmocks.NewMockStore(t) cfg := config.DefaultConfig() + cfg.DA.Namespace = "test-ns" + cfg.DA.DataNamespace = "test-data-ns" metrics := common.NopMetrics() - daSub := NewDASubmitter(nil, cfg, genesis.Genesis{}, common.DefaultBlockOptions(), metrics, zerolog.Nop()) + daClient := da.NewClient(da.Config{ + DA: nil, + Logger: zerolog.Nop(), + Namespace: cfg.DA.Namespace, + DataNamespace: cfg.DA.DataNamespace, + }) + daSub := NewDASubmitter(daClient, cfg, genesis.Genesis{}, common.BlockOptions{}, metrics, zerolog.Nop()) s := NewSubmitter(mockStore, nil, cm, metrics, cfg, genesis.Genesis{}, daSub, nil, zerolog.Nop(), nil) s.ctx = ctx @@ -238,7 +247,13 @@ func TestSubmitter_processDAInclusionLoop_advances(t *testing.T) { exec.On("SetFinal", mock.Anything, uint64(1)).Return(nil).Once() exec.On("SetFinal", mock.Anything, uint64(2)).Return(nil).Once() - daSub := NewDASubmitter(nil, cfg, genesis.Genesis{}, common.DefaultBlockOptions(), metrics, zerolog.Nop()) + daClient := da.NewClient(da.Config{ + DA: nil, + Logger: zerolog.Nop(), + Namespace: cfg.DA.Namespace, + DataNamespace: cfg.DA.DataNamespace, + }) + daSub := NewDASubmitter(daClient, cfg, genesis.Genesis{}, common.BlockOptions{}, metrics, zerolog.Nop()) s := NewSubmitter(st, exec, cm, metrics, cfg, genesis.Genesis{}, daSub, nil, zerolog.Nop(), nil) // prepare two consecutive blocks in store with DA included in cache @@ -423,7 +438,13 @@ func TestSubmitter_CacheClearedOnHeightInclusion(t *testing.T) { exec.On("SetFinal", mock.Anything, uint64(1)).Return(nil).Once() exec.On("SetFinal", mock.Anything, uint64(2)).Return(nil).Once() - daSub := NewDASubmitter(nil, cfg, genesis.Genesis{}, common.DefaultBlockOptions(), metrics, zerolog.Nop()) + daClient := da.NewClient(da.Config{ + DA: nil, + Logger: zerolog.Nop(), + Namespace: cfg.DA.Namespace, + DataNamespace: cfg.DA.DataNamespace, + }) + daSub := NewDASubmitter(daClient, cfg, genesis.Genesis{}, common.BlockOptions{}, metrics, zerolog.Nop()) s := NewSubmitter(st, exec, cm, metrics, cfg, genesis.Genesis{}, daSub, nil, zerolog.Nop(), nil) // Create test blocks diff --git a/block/internal/syncing/da_retriever.go b/block/internal/syncing/da_retriever.go index e52f8a4ce5..c87750b0f5 100644 --- a/block/internal/syncing/da_retriever.go +++ b/block/internal/syncing/da_retriever.go @@ -5,90 +5,51 @@ import ( "context" "errors" "fmt" - "time" "github.com/rs/zerolog" "google.golang.org/protobuf/proto" "github.com/evstack/ev-node/block/internal/cache" "github.com/evstack/ev-node/block/internal/common" + "github.com/evstack/ev-node/block/internal/da" coreda "github.com/evstack/ev-node/core/da" - "github.com/evstack/ev-node/pkg/config" "github.com/evstack/ev-node/pkg/genesis" "github.com/evstack/ev-node/types" pb "github.com/evstack/ev-node/types/pb/evnode/v1" ) -// defaultDATimeout is the default timeout for DA retrieval operations -const defaultDATimeout = 10 * time.Second - -// pendingForcedInclusionTx represents a forced inclusion transaction that couldn't fit in the current epoch -// and needs to be retried in future epochs. -type pendingForcedInclusionTx struct { - Data []byte // The transaction data - OriginalHeight uint64 // Original DA height where this transaction was found -} - // DARetriever defines the interface for retrieving events from the DA layer type DARetriever interface { RetrieveFromDA(ctx context.Context, daHeight uint64) ([]common.DAHeightEvent, error) - RetrieveForcedIncludedTxsFromDA(ctx context.Context, daHeight uint64) (*common.ForcedIncludedEvent, error) } // daRetriever handles DA retrieval operations for syncing type daRetriever struct { - da coreda.DA + client da.Client cache cache.CacheManager genesis genesis.Genesis logger zerolog.Logger - // calculate namespaces bytes once and reuse them - namespaceBz []byte - namespaceDataBz []byte - namespaceForcedInclusionBz []byte - - hasForcedInclusionNs bool - daEpochSize uint64 - // transient cache, only full event need to be passed to the syncer // on restart, will be refetch as da height is updated by syncer pendingHeaders map[uint64]*types.SignedHeader pendingData map[uint64]*types.Data - - // Forced inclusion transactions that couldn't fit in the current epoch - // and need to be retried in future epochs. - pendingForcedInclusionTxs []pendingForcedInclusionTx } // NewDARetriever creates a new DA retriever func NewDARetriever( - da coreda.DA, + client da.Client, cache cache.CacheManager, - config config.Config, genesis genesis.Genesis, logger zerolog.Logger, ) *daRetriever { - forcedInclusionNs := config.DA.GetForcedInclusionNamespace() - hasForcedInclusionNs := forcedInclusionNs != "" - - var namespaceForcedInclusionBz []byte - if hasForcedInclusionNs { - namespaceForcedInclusionBz = coreda.NamespaceFromString(forcedInclusionNs).Bytes() - } - return &daRetriever{ - da: da, - cache: cache, - genesis: genesis, - logger: logger.With().Str("component", "da_retriever").Logger(), - namespaceBz: coreda.NamespaceFromString(config.DA.GetNamespace()).Bytes(), - namespaceDataBz: coreda.NamespaceFromString(config.DA.GetDataNamespace()).Bytes(), - namespaceForcedInclusionBz: namespaceForcedInclusionBz, - hasForcedInclusionNs: hasForcedInclusionNs, - daEpochSize: genesis.DAEpochForcedInclusion, - pendingHeaders: make(map[uint64]*types.SignedHeader), - pendingData: make(map[uint64]*types.Data), - pendingForcedInclusionTxs: make([]pendingForcedInclusionTx, 0), + client: client, + cache: cache, + genesis: genesis, + logger: logger.With().Str("component", "da_retriever").Logger(), + pendingHeaders: make(map[uint64]*types.SignedHeader), + pendingData: make(map[uint64]*types.Data), } } @@ -109,234 +70,17 @@ func (r *daRetriever) RetrieveFromDA(ctx context.Context, daHeight uint64) ([]co return r.processBlobs(ctx, blobsResp.Data, daHeight), nil } -// RetrieveForcedIncludedTxsFromDA retrieves forced inclusion transactions from the DA layer. -// -// Behavior: -// - At epoch boundaries (when daHeight == epochStart): fetches new forced-inclusion transactions -// from the DA layer for the entire epoch range, processes them, and returns all that fit within -// the max blob size limit. Transactions that don't fit are stored in the pending queue for retry. -// - Outside epoch boundaries (when daHeight != epochStart): returns any pending transactions from -// the queue that were deferred from previous epochs. -// - Pending transactions are kept in-memory only and will be lost on node restart. -// -// Returns: -// - ForcedIncludedEvent with transactions that should be included in the next block (may be empty) -// - Error if forced inclusion is not configured or DA layer is unavailable -func (r *daRetriever) RetrieveForcedIncludedTxsFromDA(ctx context.Context, daHeight uint64) (*common.ForcedIncludedEvent, error) { - if !r.hasForcedInclusionNs { - return nil, common.ErrForceInclusionNotConfigured - } - - // Calculate deterministic epoch boundaries - epochStart, epochEnd := types.CalculateEpochBoundaries(daHeight, r.genesis.DAStartHeight, r.daEpochSize) - - // If we're not at epoch start, return pending transactions only (if any) - if daHeight != epochStart { - r.logger.Debug(). - Uint64("da_height", daHeight). - Uint64("epoch_start", epochStart). - Int("pending_count", len(r.pendingForcedInclusionTxs)). - Msg("not at epoch start - returning pending transactions only") - - event := &common.ForcedIncludedEvent{ - StartDaHeight: daHeight, - EndDaHeight: daHeight, - Txs: [][]byte{}, - } - - // Return pending txs if any exist - if len(r.pendingForcedInclusionTxs) > 0 { - pendingTxs, indicesToRemove, _ := r.processPendingForcedInclusionTxs() - event.Txs = pendingTxs - - // Remove successfully included pending transactions - if len(indicesToRemove) > 0 { - r.removePendingForcedInclusionTxs(indicesToRemove) - r.logger.Debug(). - Int("included_count", len(indicesToRemove)). - Int("remaining_count", len(r.pendingForcedInclusionTxs)). - Msg("included pending forced inclusion transactions") - } - } - - return event, nil - } - - // We're at epoch start - fetch new transactions from DA - - currentEpochNumber := types.CalculateEpochNumber(daHeight, r.genesis.DAStartHeight, r.daEpochSize) - - event := &common.ForcedIncludedEvent{ - StartDaHeight: epochStart, - } - - r.logger.Debug(). - Uint64("da_height", daHeight). - Uint64("epoch_start", epochStart). - Uint64("epoch_end", epochEnd). - Uint64("epoch_num", currentEpochNumber). - Msg("retrieving forced included transactions from DA") - - // Check if both epoch start and end are available before fetching - // This ensures we can retrieve the complete epoch in one go - epochStartResult := types.RetrieveWithHelpers(ctx, r.da, r.logger, epochStart, r.namespaceForcedInclusionBz, defaultDATimeout) - if epochStartResult.Code == coreda.StatusHeightFromFuture { - r.logger.Debug(). - Uint64("epoch_start", epochStart). - Msg("epoch start height not yet available on DA - backoff required") - return nil, fmt.Errorf("%w: epoch start height %d not yet available", coreda.ErrHeightFromFuture, epochStart) - } - - epochEndResult := epochStartResult - if epochStart != epochEnd { - epochEndResult = types.RetrieveWithHelpers(ctx, r.da, r.logger, epochEnd, r.namespaceForcedInclusionBz, defaultDATimeout) - if epochEndResult.Code == coreda.StatusHeightFromFuture { - r.logger.Debug(). - Uint64("epoch_end", epochEnd). - Msg("epoch end height not yet available on DA - backoff required") - return nil, fmt.Errorf("%w: epoch end height %d not yet available", coreda.ErrHeightFromFuture, epochEnd) - } - } - - lastProcessedHeight := epochStart - newPendingTxs := []pendingForcedInclusionTx{} - - // Prepend pending transactions from previous epochs at the start of this epoch - pendingTxs, indicesToRemove, currentSize := r.processPendingForcedInclusionTxs() - event.Txs = pendingTxs - - // Remove successfully included pending transactions - if len(indicesToRemove) > 0 { - r.removePendingForcedInclusionTxs(indicesToRemove) - r.logger.Debug(). - Int("included_count", len(indicesToRemove)). - Int("remaining_count", len(r.pendingForcedInclusionTxs)). - Msg("included pending forced inclusion transactions") - } - - // Process epoch start - if err := r.processForcedInclusionBlobs(event, ¤tSize, &lastProcessedHeight, &newPendingTxs, epochStartResult, epochStart); err != nil { - return nil, err - } - - // Process heights between start and end (exclusive) - for epochHeight := epochStart + 1; epochHeight < epochEnd; epochHeight++ { - result := types.RetrieveWithHelpers(ctx, r.da, r.logger, epochHeight, r.namespaceForcedInclusionBz, defaultDATimeout) - - // If any intermediate height is from future, break early - if result.Code == coreda.StatusHeightFromFuture { - r.logger.Debug(). - Uint64("epoch_height", epochHeight). - Uint64("last_processed", lastProcessedHeight). - Msg("reached future DA height within epoch - stopping") - break - } - - if err := r.processForcedInclusionBlobs(event, ¤tSize, &lastProcessedHeight, &newPendingTxs, result, epochHeight); err != nil { - return nil, err - } - } - - // Process epoch end (only if different from start) - if epochEnd != epochStart { - if err := r.processForcedInclusionBlobs(event, ¤tSize, &lastProcessedHeight, &newPendingTxs, epochEndResult, epochEnd); err != nil { - return nil, err - } - } - - // Store any new pending transactions that couldn't fit in this epoch - if len(newPendingTxs) > 0 { - r.pendingForcedInclusionTxs = append(r.pendingForcedInclusionTxs, newPendingTxs...) - r.logger.Info(). - Int("new_pending_count", len(newPendingTxs)). - Int("total_pending_count", len(r.pendingForcedInclusionTxs)). - Msg("stored pending forced inclusion transactions for next epoch") - } - - // Set the DA height range based on what we actually processed - event.StartDaHeight = epochStart - event.EndDaHeight = lastProcessedHeight - - return event, nil -} - -// processForcedInclusionBlobs processes forced inclusion blobs from a single DA height. -// It accumulates transactions that fit within maxBlobSize and stores excess in newPendingTxs. -func (r *daRetriever) processForcedInclusionBlobs( - event *common.ForcedIncludedEvent, - currentSize *int, - lastProcessedHeight *uint64, - newPendingTxs *[]pendingForcedInclusionTx, - result coreda.ResultRetrieve, - daHeight uint64, -) error { - if result.Code != coreda.StatusSuccess { - return nil - } - - if err := r.validateBlobResponse(result, daHeight); !errors.Is(err, coreda.ErrBlobNotFound) && err != nil { - return err - } - - for i, data := range result.Data { - if len(data) > common.DefaultMaxBlobSize { - r.logger.Debug(). - Uint64("da_height", daHeight). - Int("index", i). - Uint64("blob_size", uint64(len(data))). - Msg("Following data exceeds maximum blob size. Skipping...") - continue - } - - // Calculate size of this specific data item - dataSize := len(data) - - // Check if individual blob exceeds max size - if dataSize > int(common.DefaultMaxBlobSize) { - r.logger.Warn(). - Uint64("da_height", daHeight). - Int("blob_size", dataSize). - Float64("max_size", common.DefaultMaxBlobSize). - Msg("forced inclusion blob exceeds maximum size - skipping") - return fmt.Errorf("blob size %d exceeds maximum %f", dataSize, common.DefaultMaxBlobSize) - } - - // Check if adding this blob would exceed the current epoch's max size - if *currentSize+dataSize > int(common.DefaultMaxBlobSize) { - r.logger.Debug(). - Uint64("da_height", daHeight). - Int("current_size", *currentSize). - Int("blob_size", dataSize). - Msg("blob would exceed max size for this epoch - deferring to pending queue") - - // Store for next epoch - *newPendingTxs = append(*newPendingTxs, pendingForcedInclusionTx{ - Data: data, - OriginalHeight: daHeight, - }) - continue - } - - // Include this transaction - event.Txs = append(event.Txs, data) - *currentSize += dataSize - *lastProcessedHeight = daHeight - } - - return nil -} - -// fetchBlobs retrieves blobs from the DA layer +// fetchBlobs retrieves blobs from both header and data namespaces func (r *daRetriever) fetchBlobs(ctx context.Context, daHeight uint64) (coreda.ResultRetrieve, error) { - // Retrieve from both namespaces - headerRes := types.RetrieveWithHelpers(ctx, r.da, r.logger, daHeight, r.namespaceBz, defaultDATimeout) + // Retrieve from both namespaces using the DA client + headerRes := r.client.RetrieveHeaders(ctx, daHeight) // If namespaces are the same, return header result - if bytes.Equal(r.namespaceBz, r.namespaceDataBz) { + if bytes.Equal(r.client.GetHeaderNamespace(), r.client.GetDataNamespace()) { return headerRes, r.validateBlobResponse(headerRes, daHeight) } - dataRes := types.RetrieveWithHelpers(ctx, r.da, r.logger, daHeight, r.namespaceDataBz, defaultDATimeout) + dataRes := r.client.RetrieveData(ctx, daHeight) // Validate responses headerErr := r.validateBlobResponse(headerRes, daHeight) @@ -592,53 +336,3 @@ func createEmptyDataForHeader(ctx context.Context, header *types.SignedHeader) * }, } } - -// processPendingForcedInclusionTxs processes pending transactions and returns those that fit within the max blob size. -// Returns the transactions to include, the indices of transactions to remove, and the total size used. -func (r *daRetriever) processPendingForcedInclusionTxs() ([][]byte, []int, int) { - var ( - currentSize int - txs [][]byte - indicesToRemove []int - ) - - for i, pendingTx := range r.pendingForcedInclusionTxs { - dataSize := len(pendingTx.Data) - if currentSize+dataSize > int(common.DefaultMaxBlobSize) { - r.logger.Debug(). - Int("current_size", currentSize). - Int("data_size", dataSize). - Msg("pending transaction would exceed max blob size, will retry later") - break - } - - txs = append(txs, pendingTx.Data) - currentSize += dataSize - indicesToRemove = append(indicesToRemove, i) - } - - return txs, indicesToRemove, currentSize -} - -// removePendingForcedInclusionTxs removes pending transactions at the specified indices. -// Indices must be sorted in ascending order. -func (r *daRetriever) removePendingForcedInclusionTxs(indices []int) { - if len(indices) == 0 { - return - } - - // Create a new slice without the removed elements - newPending := make([]pendingForcedInclusionTx, 0, len(r.pendingForcedInclusionTxs)-len(indices)) - removeMap := make(map[int]bool, len(indices)) - for _, idx := range indices { - removeMap[idx] = true - } - - for i, tx := range r.pendingForcedInclusionTxs { - if !removeMap[i] { - newPending = append(newPending, tx) - } - } - - r.pendingForcedInclusionTxs = newPending -} diff --git a/block/internal/syncing/da_retriever_mock.go b/block/internal/syncing/da_retriever_mock.go index 505987aee6..32e901bc1e 100644 --- a/block/internal/syncing/da_retriever_mock.go +++ b/block/internal/syncing/da_retriever_mock.go @@ -38,74 +38,6 @@ func (_m *MockDARetriever) EXPECT() *MockDARetriever_Expecter { return &MockDARetriever_Expecter{mock: &_m.Mock} } -// RetrieveForcedIncludedTxsFromDA provides a mock function for the type MockDARetriever -func (_mock *MockDARetriever) RetrieveForcedIncludedTxsFromDA(ctx context.Context, daHeight uint64) (*common.ForcedIncludedEvent, error) { - ret := _mock.Called(ctx, daHeight) - - if len(ret) == 0 { - panic("no return value specified for RetrieveForcedIncludedTxsFromDA") - } - - var r0 *common.ForcedIncludedEvent - var r1 error - if returnFunc, ok := ret.Get(0).(func(context.Context, uint64) (*common.ForcedIncludedEvent, error)); ok { - return returnFunc(ctx, daHeight) - } - if returnFunc, ok := ret.Get(0).(func(context.Context, uint64) *common.ForcedIncludedEvent); ok { - r0 = returnFunc(ctx, daHeight) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*common.ForcedIncludedEvent) - } - } - if returnFunc, ok := ret.Get(1).(func(context.Context, uint64) error); ok { - r1 = returnFunc(ctx, daHeight) - } else { - r1 = ret.Error(1) - } - return r0, r1 -} - -// MockDARetriever_RetrieveForcedIncludedTxsFromDA_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'RetrieveForcedIncludedTxsFromDA' -type MockDARetriever_RetrieveForcedIncludedTxsFromDA_Call struct { - *mock.Call -} - -// RetrieveForcedIncludedTxsFromDA is a helper method to define mock.On call -// - ctx context.Context -// - daHeight uint64 -func (_e *MockDARetriever_Expecter) RetrieveForcedIncludedTxsFromDA(ctx interface{}, daHeight interface{}) *MockDARetriever_RetrieveForcedIncludedTxsFromDA_Call { - return &MockDARetriever_RetrieveForcedIncludedTxsFromDA_Call{Call: _e.mock.On("RetrieveForcedIncludedTxsFromDA", ctx, daHeight)} -} - -func (_c *MockDARetriever_RetrieveForcedIncludedTxsFromDA_Call) Run(run func(ctx context.Context, daHeight uint64)) *MockDARetriever_RetrieveForcedIncludedTxsFromDA_Call { - _c.Call.Run(func(args mock.Arguments) { - var arg0 context.Context - if args[0] != nil { - arg0 = args[0].(context.Context) - } - var arg1 uint64 - if args[1] != nil { - arg1 = args[1].(uint64) - } - run( - arg0, - arg1, - ) - }) - return _c -} - -func (_c *MockDARetriever_RetrieveForcedIncludedTxsFromDA_Call) Return(v *common.ForcedIncludedEvent, err error) *MockDARetriever_RetrieveForcedIncludedTxsFromDA_Call { - _c.Call.Return(v, err) - return _c -} - -func (_c *MockDARetriever_RetrieveForcedIncludedTxsFromDA_Call) RunAndReturn(run func(ctx context.Context, daHeight uint64) (*common.ForcedIncludedEvent, error)) *MockDARetriever_RetrieveForcedIncludedTxsFromDA_Call { - _c.Call.Return(run) - return _c -} - // RetrieveFromDA provides a mock function for the type MockDARetriever func (_mock *MockDARetriever) RetrieveFromDA(ctx context.Context, daHeight uint64) ([]common.DAHeightEvent, error) { ret := _mock.Called(ctx, daHeight) diff --git a/block/internal/syncing/da_retriever_test.go b/block/internal/syncing/da_retriever_test.go index d4cd2b2dbc..04ba66e423 100644 --- a/block/internal/syncing/da_retriever_test.go +++ b/block/internal/syncing/da_retriever_test.go @@ -16,6 +16,7 @@ import ( "github.com/evstack/ev-node/block/internal/cache" "github.com/evstack/ev-node/block/internal/common" + "github.com/evstack/ev-node/block/internal/da" coreda "github.com/evstack/ev-node/core/da" "github.com/evstack/ev-node/pkg/config" "github.com/evstack/ev-node/pkg/genesis" @@ -24,6 +25,29 @@ import ( "github.com/evstack/ev-node/types" ) +// newTestDARetriever creates a DA retriever for testing with the given DA implementation +func newTestDARetriever(t *testing.T, mockDA coreda.DA, cfg config.Config, gen genesis.Genesis) *daRetriever { + t.Helper() + if cfg.DA.Namespace == "" { + cfg.DA.Namespace = "test-ns" + } + if cfg.DA.DataNamespace == "" { + cfg.DA.DataNamespace = "test-data-ns" + } + + cm, err := cache.NewCacheManager(cfg, zerolog.Nop()) + require.NoError(t, err) + + daClient := da.NewClient(da.Config{ + DA: mockDA, + Logger: zerolog.Nop(), + Namespace: cfg.DA.Namespace, + DataNamespace: cfg.DA.DataNamespace, + }) + + return NewDARetriever(daClient, cm, gen, zerolog.Nop()) +} + // makeSignedDataBytes builds SignedData containing the provided Data and returns its binary encoding func makeSignedDataBytes(t *testing.T, chainID string, height uint64, proposer []byte, pub crypto.PubKey, signer signerpkg.Signer, txs int) ([]byte, *types.SignedData) { return makeSignedDataBytesWithTime(t, chainID, height, proposer, pub, signer, txs, uint64(time.Now().UnixNano())) @@ -39,57 +63,45 @@ func makeSignedDataBytesWithTime(t *testing.T, chainID string, height uint64, pr } // For DA SignedData, sign the Data payload bytes (matches DA submission logic) - payload, err := d.MarshalBinary() - require.NoError(t, err) - sig, err := signer.Sign(payload) - require.NoError(t, err) + payload, _ := d.MarshalBinary() + sig, _ := signer.Sign(payload) sd := &types.SignedData{Data: *d, Signature: sig, Signer: types.Signer{PubKey: pub, Address: proposer}} - bin, err := sd.MarshalBinary() - require.NoError(t, err) + bin, _ := sd.MarshalBinary() return bin, sd } func TestDARetriever_RetrieveFromDA_Invalid(t *testing.T) { - cm, err := cache.NewCacheManager(config.DefaultConfig(), zerolog.Nop()) - assert.NoError(t, err) - mockDA := testmocks.NewMockDA(t) mockDA.EXPECT().GetIDs(mock.Anything, mock.Anything, mock.Anything). Return(nil, errors.New("just invalid")).Maybe() - r := NewDARetriever(mockDA, cm, config.DefaultConfig(), genesis.Genesis{}, zerolog.Nop()) + r := newTestDARetriever(t, mockDA, config.DefaultConfig(), genesis.Genesis{}) events, err := r.RetrieveFromDA(context.Background(), 42) assert.Error(t, err) assert.Len(t, events, 0) } func TestDARetriever_RetrieveFromDA_NotFound(t *testing.T) { - cm, err := cache.NewCacheManager(config.DefaultConfig(), zerolog.Nop()) - assert.NoError(t, err) - mockDA := testmocks.NewMockDA(t) // GetIDs returns ErrBlobNotFound -> helper maps to StatusNotFound mockDA.EXPECT().GetIDs(mock.Anything, mock.Anything, mock.Anything). Return(nil, fmt.Errorf("%s: whatever", coreda.ErrBlobNotFound.Error())).Maybe() - r := NewDARetriever(mockDA, cm, config.DefaultConfig(), genesis.Genesis{}, zerolog.Nop()) + r := newTestDARetriever(t, mockDA, config.DefaultConfig(), genesis.Genesis{}) events, err := r.RetrieveFromDA(context.Background(), 42) assert.True(t, errors.Is(err, coreda.ErrBlobNotFound)) assert.Len(t, events, 0) } func TestDARetriever_RetrieveFromDA_HeightFromFuture(t *testing.T) { - cm, err := cache.NewCacheManager(config.DefaultConfig(), zerolog.Nop()) - require.NoError(t, err) - mockDA := testmocks.NewMockDA(t) // GetIDs returns ErrHeightFromFuture -> helper maps to StatusHeightFromFuture, fetchBlobs returns error mockDA.EXPECT().GetIDs(mock.Anything, mock.Anything, mock.Anything). Return(nil, fmt.Errorf("%s: later", coreda.ErrHeightFromFuture.Error())).Maybe() - r := NewDARetriever(mockDA, cm, config.DefaultConfig(), genesis.Genesis{}, zerolog.Nop()) + r := newTestDARetriever(t, mockDA, config.DefaultConfig(), genesis.Genesis{}) events, derr := r.RetrieveFromDA(context.Background(), 1000) assert.Error(t, derr) assert.True(t, errors.Is(derr, coreda.ErrHeightFromFuture)) @@ -97,8 +109,7 @@ func TestDARetriever_RetrieveFromDA_HeightFromFuture(t *testing.T) { } func TestDARetriever_RetrieveFromDA_Timeout(t *testing.T) { - cm, err := cache.NewCacheManager(config.DefaultConfig(), zerolog.Nop()) - require.NoError(t, err) + t.Skip("Skipping flaky timeout test - timing is now controlled by DA client") mockDA := testmocks.NewMockDA(t) @@ -109,7 +120,7 @@ func TestDARetriever_RetrieveFromDA_Timeout(t *testing.T) { }). Return(nil, context.DeadlineExceeded).Maybe() - r := NewDARetriever(mockDA, cm, config.DefaultConfig(), genesis.Genesis{}, zerolog.Nop()) + r := newTestDARetriever(t, mockDA, config.DefaultConfig(), genesis.Genesis{}) start := time.Now() events, err := r.RetrieveFromDA(context.Background(), 42) @@ -122,13 +133,12 @@ func TestDARetriever_RetrieveFromDA_Timeout(t *testing.T) { assert.Len(t, events, 0) // Verify timeout occurred approximately at expected time (with some tolerance) - assert.Greater(t, duration, 9*time.Second, "should timeout after approximately 10 seconds") - assert.Less(t, duration, 12*time.Second, "should not take much longer than timeout") + // DA client has a 30-second default timeout + assert.Greater(t, duration, 29*time.Second, "should timeout after approximately 30 seconds") + assert.Less(t, duration, 35*time.Second, "should not take much longer than timeout") } func TestDARetriever_RetrieveFromDA_TimeoutFast(t *testing.T) { - cm, err := cache.NewCacheManager(config.DefaultConfig(), zerolog.Nop()) - require.NoError(t, err) mockDA := testmocks.NewMockDA(t) @@ -136,7 +146,7 @@ func TestDARetriever_RetrieveFromDA_TimeoutFast(t *testing.T) { mockDA.EXPECT().GetIDs(mock.Anything, mock.Anything, mock.Anything). Return(nil, context.DeadlineExceeded).Maybe() - r := NewDARetriever(mockDA, cm, config.DefaultConfig(), genesis.Genesis{}, zerolog.Nop()) + r := newTestDARetriever(t, mockDA, config.DefaultConfig(), genesis.Genesis{}) events, err := r.RetrieveFromDA(context.Background(), 42) @@ -148,13 +158,11 @@ func TestDARetriever_RetrieveFromDA_TimeoutFast(t *testing.T) { } func TestDARetriever_ProcessBlobs_HeaderAndData_Success(t *testing.T) { - cm, err := cache.NewCacheManager(config.DefaultConfig(), zerolog.Nop()) - require.NoError(t, err) addr, pub, signer := buildSyncTestSigner(t) gen := genesis.Genesis{ChainID: "tchain", InitialHeight: 1, StartTime: time.Now().Add(-time.Second), ProposerAddress: addr} - r := NewDARetriever(nil, cm, config.DefaultConfig(), gen, zerolog.Nop()) + r := newTestDARetriever(t, nil, config.DefaultConfig(), gen) dataBin, data := makeSignedDataBytes(t, gen.ChainID, 2, addr, pub, signer, 2) hdrBin, _ := makeSignedHeaderBytes(t, gen.ChainID, 2, addr, pub, signer, nil, &data.Data, nil) @@ -175,12 +183,10 @@ func TestDARetriever_ProcessBlobs_HeaderAndData_Success(t *testing.T) { } func TestDARetriever_ProcessBlobs_HeaderOnly_EmptyDataExpected(t *testing.T) { - cm, err := cache.NewCacheManager(config.DefaultConfig(), zerolog.Nop()) - require.NoError(t, err) addr, pub, signer := buildSyncTestSigner(t) gen := genesis.Genesis{ChainID: "tchain", InitialHeight: 1, StartTime: time.Now().Add(-time.Second), ProposerAddress: addr} - r := NewDARetriever(nil, cm, config.DefaultConfig(), gen, zerolog.Nop()) + r := newTestDARetriever(t, nil, config.DefaultConfig(), gen) // Header with no data hash present should trigger empty data creation (per current logic) hb, _ := makeSignedHeaderBytes(t, gen.ChainID, 3, addr, pub, signer, nil, nil, nil) @@ -201,12 +207,10 @@ func TestDARetriever_ProcessBlobs_HeaderOnly_EmptyDataExpected(t *testing.T) { } func TestDARetriever_TryDecodeHeaderAndData_Basic(t *testing.T) { - cm, err := cache.NewCacheManager(config.DefaultConfig(), zerolog.Nop()) - require.NoError(t, err) addr, pub, signer := buildSyncTestSigner(t) gen := genesis.Genesis{ChainID: "tchain", InitialHeight: 1, StartTime: time.Now().Add(-time.Second), ProposerAddress: addr} - r := NewDARetriever(nil, cm, config.DefaultConfig(), gen, zerolog.Nop()) + r := newTestDARetriever(t, nil, config.DefaultConfig(), gen) hb, sh := makeSignedHeaderBytes(t, gen.ChainID, 5, addr, pub, signer, nil, nil, nil) gotH := r.tryDecodeHeader(hb, 123) @@ -224,13 +228,11 @@ func TestDARetriever_TryDecodeHeaderAndData_Basic(t *testing.T) { } func TestDARetriever_tryDecodeData_InvalidSignatureOrProposer(t *testing.T) { - cm, err := cache.NewCacheManager(config.DefaultConfig(), zerolog.Nop()) - require.NoError(t, err) goodAddr, pub, signer := buildSyncTestSigner(t) badAddr := []byte("not-the-proposer") gen := genesis.Genesis{ChainID: "tchain", InitialHeight: 1, StartTime: time.Now().Add(-time.Second), ProposerAddress: badAddr} - r := NewDARetriever(nil, cm, config.DefaultConfig(), gen, zerolog.Nop()) + r := newTestDARetriever(t, nil, config.DefaultConfig(), gen) // Signed data is made by goodAddr; retriever expects badAddr -> should be rejected db, _ := makeSignedDataBytes(t, gen.ChainID, 7, goodAddr, pub, signer, 1) @@ -252,8 +254,6 @@ func TestDARetriever_validateBlobResponse(t *testing.T) { } func TestDARetriever_RetrieveFromDA_TwoNamespaces_Success(t *testing.T) { - cm, err := cache.NewCacheManager(config.DefaultConfig(), zerolog.Nop()) - require.NoError(t, err) addr, pub, signer := buildSyncTestSigner(t) gen := genesis.Genesis{ChainID: "tchain", InitialHeight: 1, StartTime: time.Now().Add(-time.Second), ProposerAddress: addr} @@ -281,7 +281,7 @@ func TestDARetriever_RetrieveFromDA_TwoNamespaces_Success(t *testing.T) { mockDA.EXPECT().Get(mock.Anything, mock.Anything, mock.MatchedBy(func(ns []byte) bool { return bytes.Equal(ns, namespaceDataBz) })). Return([][]byte{dataBin}, nil).Once() - r := NewDARetriever(mockDA, cm, cfg, gen, zerolog.Nop()) + r := newTestDARetriever(t, mockDA, cfg, gen) events, derr := r.RetrieveFromDA(context.Background(), 1234) require.NoError(t, derr) @@ -291,13 +291,11 @@ func TestDARetriever_RetrieveFromDA_TwoNamespaces_Success(t *testing.T) { } func TestDARetriever_ProcessBlobs_CrossDAHeightMatching(t *testing.T) { - cm, err := cache.NewCacheManager(config.DefaultConfig(), zerolog.Nop()) - require.NoError(t, err) addr, pub, signer := buildSyncTestSigner(t) gen := genesis.Genesis{ChainID: "tchain", InitialHeight: 1, StartTime: time.Now().Add(-time.Second), ProposerAddress: addr} - r := NewDARetriever(nil, cm, config.DefaultConfig(), gen, zerolog.Nop()) + r := newTestDARetriever(t, nil, config.DefaultConfig(), gen) // Create header and data for the same block height but from different DA heights dataBin, data := makeSignedDataBytes(t, gen.ChainID, 5, addr, pub, signer, 2) @@ -325,13 +323,11 @@ func TestDARetriever_ProcessBlobs_CrossDAHeightMatching(t *testing.T) { } func TestDARetriever_ProcessBlobs_MultipleHeadersCrossDAHeightMatching(t *testing.T) { - cm, err := cache.NewCacheManager(config.DefaultConfig(), zerolog.Nop()) - require.NoError(t, err) addr, pub, signer := buildSyncTestSigner(t) gen := genesis.Genesis{ChainID: "tchain", InitialHeight: 1, StartTime: time.Now().Add(-time.Second), ProposerAddress: addr} - r := NewDARetriever(nil, cm, config.DefaultConfig(), gen, zerolog.Nop()) + r := newTestDARetriever(t, nil, config.DefaultConfig(), gen) // Create multiple headers and data for different block heights data3Bin, data3 := makeSignedDataBytes(t, gen.ChainID, 3, addr, pub, signer, 1) @@ -398,350 +394,3 @@ func Test_isEmptyDataExpected(t *testing.T) { h.DataHash = common.DataHashForEmptyTxs assert.True(t, isEmptyDataExpected(h)) } - -func TestDARetriever_RetrieveForcedIncludedTxsFromDA_Success(t *testing.T) { - cm, err := cache.NewCacheManager(config.DefaultConfig(), zerolog.Nop()) - require.NoError(t, err) - - addr, pub, signer := buildSyncTestSigner(t) - gen := genesis.Genesis{ChainID: "tchain", InitialHeight: 1, StartTime: time.Now().Add(-time.Second), ProposerAddress: addr, DAStartHeight: 5678, DAEpochForcedInclusion: 1} - - // Prepare forced inclusion transaction data - dataBin, _ := makeSignedDataBytes(t, gen.ChainID, 10, addr, pub, signer, 3) - - cfg := config.DefaultConfig() - cfg.DA.ForcedInclusionNamespace = "nsForcedInclusion" - - namespaceForcedInclusionBz := coreda.NamespaceFromString(cfg.DA.GetForcedInclusionNamespace()).Bytes() - - mockDA := testmocks.NewMockDA(t) - // With DAStartHeight=5678, epoch size=1, daHeight=5678 -> epoch boundaries are [5678, 5678] - // Check epoch start only (end check is skipped when same as start) - mockDA.EXPECT().GetIDs(mock.Anything, uint64(5678), mock.MatchedBy(func(ns []byte) bool { - return bytes.Equal(ns, namespaceForcedInclusionBz) - })).Return(&coreda.GetIDsResult{IDs: [][]byte{[]byte("fi1")}, Timestamp: time.Now()}, nil).Once() - - // Fetch epoch start data - mockDA.EXPECT().Get(mock.Anything, mock.Anything, mock.MatchedBy(func(ns []byte) bool { - return bytes.Equal(ns, namespaceForcedInclusionBz) - })).Return([][]byte{dataBin}, nil).Once() - - r := NewDARetriever(mockDA, cm, cfg, gen, zerolog.Nop()) - - result, err := r.RetrieveForcedIncludedTxsFromDA(context.Background(), 5678) - require.NoError(t, err) - require.NotNil(t, result) - require.Len(t, result.Txs, 1) // Only fetched once since start == end - assert.Equal(t, dataBin, result.Txs[0]) -} - -func TestDARetriever_FetchForcedIncludedTxs_NoNamespaceConfigured(t *testing.T) { - cm, err := cache.NewCacheManager(config.DefaultConfig(), zerolog.Nop()) - require.NoError(t, err) - - addr, _, _ := buildSyncTestSigner(t) - gen := genesis.Genesis{ChainID: "tchain", InitialHeight: 1, StartTime: time.Now().Add(-time.Second), ProposerAddress: addr, DAStartHeight: 0, DAEpochForcedInclusion: 1} - - cfg := config.DefaultConfig() - // Leave ForcedInclusionNamespace empty - - r := NewDARetriever(nil, cm, cfg, gen, zerolog.Nop()) - - result, err := r.RetrieveForcedIncludedTxsFromDA(context.Background(), 1234) - require.Error(t, err) - require.Nil(t, result) -} - -func TestDARetriever_FetchForcedIncludedTxs_NotFound(t *testing.T) { - cm, err := cache.NewCacheManager(config.DefaultConfig(), zerolog.Nop()) - require.NoError(t, err) - - addr, _, _ := buildSyncTestSigner(t) - gen := genesis.Genesis{ChainID: "tchain", InitialHeight: 1, StartTime: time.Now().Add(-time.Second), ProposerAddress: addr, DAStartHeight: 9999, DAEpochForcedInclusion: 1} - - cfg := config.DefaultConfig() - cfg.DA.ForcedInclusionNamespace = "nsForcedInclusion" - - namespaceForcedInclusionBz := coreda.NamespaceFromString(cfg.DA.GetForcedInclusionNamespace()).Bytes() - - mockDA := testmocks.NewMockDA(t) - // With DAStartHeight=9999, epoch size=1, daHeight=9999 -> epoch boundaries are [9999, 9999] - // Check epoch start only (end check is skipped when same as start) - mockDA.EXPECT().GetIDs(mock.Anything, uint64(9999), mock.MatchedBy(func(ns []byte) bool { - return bytes.Equal(ns, namespaceForcedInclusionBz) - })).Return(&coreda.GetIDsResult{IDs: [][]byte{}, Timestamp: time.Now()}, nil).Once() - - r := NewDARetriever(mockDA, cm, cfg, gen, zerolog.Nop()) - - result, err := r.RetrieveForcedIncludedTxsFromDA(context.Background(), 9999) - require.NoError(t, err) - require.NotNil(t, result) - require.Empty(t, result.Txs) -} - -func TestDARetriever_RetrieveForcedIncludedTxsFromDA_ExceedsMaxBlobSize(t *testing.T) { - cm, err := cache.NewCacheManager(config.DefaultConfig(), zerolog.Nop()) - require.NoError(t, err) - - addr, pub, signer := buildSyncTestSigner(t) - gen := genesis.Genesis{ChainID: "tchain", InitialHeight: 1, StartTime: time.Now().Add(-time.Second), ProposerAddress: addr, DAStartHeight: 1000, DAEpochForcedInclusion: 3} - - cfg := config.DefaultConfig() - cfg.DA.ForcedInclusionNamespace = "nsForcedInclusion" - - namespaceForcedInclusionBz := coreda.NamespaceFromString(cfg.DA.GetForcedInclusionNamespace()).Bytes() - - // Use fixed timestamp for deterministic test data - fixedTime := uint64(1234567890) - - // Create signed data blobs that will exceed DefaultMaxBlobSize when accumulated - // DefaultMaxBlobSize is 1.5MB = 1,572,864 bytes - // Each 700KB tx becomes ~719KB blob, so 2 blobs = ~1.44MB (fits), 3 blobs = ~2.16MB (exceeds) - d1 := &types.Data{ - Metadata: &types.Metadata{ChainID: gen.ChainID, Height: 10, Time: fixedTime}, - Txs: make(types.Txs, 1), - } - d1.Txs[0] = make([]byte, 700*1024) // 700KB transaction - - payload1, err := d1.MarshalBinary() - require.NoError(t, err) - sig1, err := signer.Sign(payload1) - require.NoError(t, err) - sd1 := &types.SignedData{Data: *d1, Signature: sig1, Signer: types.Signer{PubKey: pub, Address: addr}} - dataBin1, err := sd1.MarshalBinary() - require.NoError(t, err) - - d2 := &types.Data{ - Metadata: &types.Metadata{ChainID: gen.ChainID, Height: 11, Time: fixedTime}, - Txs: make(types.Txs, 1), - } - d2.Txs[0] = make([]byte, 700*1024) // 700KB transaction - - payload2, err := d2.MarshalBinary() - require.NoError(t, err) - sig2, err := signer.Sign(payload2) - require.NoError(t, err) - sd2 := &types.SignedData{Data: *d2, Signature: sig2, Signer: types.Signer{PubKey: pub, Address: addr}} - dataBin2, err := sd2.MarshalBinary() - require.NoError(t, err) - - d3 := &types.Data{ - Metadata: &types.Metadata{ChainID: gen.ChainID, Height: 12, Time: fixedTime}, - Txs: make(types.Txs, 1), - } - d3.Txs[0] = make([]byte, 700*1024) // 700KB transaction - - payload3, err := d3.MarshalBinary() - require.NoError(t, err) - sig3, err := signer.Sign(payload3) - require.NoError(t, err) - sd3 := &types.SignedData{Data: *d3, Signature: sig3, Signer: types.Signer{PubKey: pub, Address: addr}} - dataBin3, err := sd3.MarshalBinary() - require.NoError(t, err) - - mockDA := testmocks.NewMockDA(t) - - // With DAStartHeight=1000, epoch size=3, daHeight=1000 -> epoch boundaries are [1000, 1002] - // RetrieveWithHelpers calls in order: start (1000), end (1002), then intermediate (1001) - - // Check epoch start - mockDA.EXPECT().GetIDs(mock.Anything, uint64(1000), mock.MatchedBy(func(ns []byte) bool { - return bytes.Equal(ns, namespaceForcedInclusionBz) - })).Return(&coreda.GetIDsResult{IDs: [][]byte{[]byte("fi1")}, Timestamp: time.Now()}, nil).Once() - - // Fetch epoch start data (height 1000) - mockDA.EXPECT().Get(mock.Anything, mock.Anything, mock.MatchedBy(func(ns []byte) bool { - return bytes.Equal(ns, namespaceForcedInclusionBz) - })).Return([][]byte{dataBin1}, nil).Once() - - // Check epoch end - mockDA.EXPECT().GetIDs(mock.Anything, uint64(1002), mock.MatchedBy(func(ns []byte) bool { - return bytes.Equal(ns, namespaceForcedInclusionBz) - })).Return(&coreda.GetIDsResult{IDs: [][]byte{[]byte("fi3")}, Timestamp: time.Now()}, nil).Once() - - // Fetch epoch end data (height 1002) - should be retrieved but skipped due to size limit - mockDA.EXPECT().Get(mock.Anything, mock.Anything, mock.MatchedBy(func(ns []byte) bool { - return bytes.Equal(ns, namespaceForcedInclusionBz) - })).Return([][]byte{dataBin3}, nil).Once() - - // Check intermediate height in epoch (height 1001) - mockDA.EXPECT().GetIDs(mock.Anything, uint64(1001), mock.MatchedBy(func(ns []byte) bool { - return bytes.Equal(ns, namespaceForcedInclusionBz) - })).Return(&coreda.GetIDsResult{IDs: [][]byte{[]byte("fi2")}, Timestamp: time.Now()}, nil).Once() - - // Fetch intermediate height data (height 1001) - mockDA.EXPECT().Get(mock.Anything, mock.Anything, mock.MatchedBy(func(ns []byte) bool { - return bytes.Equal(ns, namespaceForcedInclusionBz) - })).Return([][]byte{dataBin2}, nil).Once() - - r := NewDARetriever(mockDA, cm, cfg, gen, zerolog.Nop()) - - result, err := r.RetrieveForcedIncludedTxsFromDA(context.Background(), 1000) - - // Should succeed but skip the third blob due to size limit (using continue) - require.NoError(t, err) - require.NotNil(t, result) - - // Should only have 2 transactions, third is skipped due to size - require.Len(t, result.Txs, 2) - assert.Equal(t, dataBin1, result.Txs[0]) - assert.Equal(t, dataBin2, result.Txs[1]) - - // Verify total size is within limits - totalSize := len(dataBin1) + len(dataBin2) - assert.LessOrEqual(t, totalSize, int(common.DefaultMaxBlobSize)) - - // Verify that adding the third would have exceeded the limit - totalSizeWithThird := totalSize + len(dataBin3) - assert.Greater(t, totalSizeWithThird, int(common.DefaultMaxBlobSize)) -} - -func TestDARetriever_RetrieveForcedIncludedTxsFromDA_NotAtEpochStart(t *testing.T) { - cm, err := cache.NewCacheManager(config.DefaultConfig(), zerolog.Nop()) - require.NoError(t, err) - - addr, _, _ := buildSyncTestSigner(t) - gen := genesis.Genesis{ChainID: "tchain", InitialHeight: 1, StartTime: time.Now().Add(-time.Second), ProposerAddress: addr, DAStartHeight: 100, DAEpochForcedInclusion: 10} - - cfg := config.DefaultConfig() - cfg.DA.ForcedInclusionNamespace = "nsForcedInclusion" - - mockDA := testmocks.NewMockDA(t) - - r := NewDARetriever(mockDA, cm, cfg, gen, zerolog.Nop()) - - // With DAStartHeight=100, epoch size=10, daHeight=105 -> epoch boundaries are [100, 109] - // But daHeight=105 is NOT the epoch start, so it should be a no-op - result, err := r.RetrieveForcedIncludedTxsFromDA(context.Background(), 105) - require.NoError(t, err) - require.NotNil(t, result) - require.Empty(t, result.Txs) - require.Equal(t, uint64(105), result.StartDaHeight) - require.Equal(t, uint64(105), result.EndDaHeight) -} - -func TestDARetriever_RetrieveForcedIncludedTxsFromDA_EpochStartFromFuture(t *testing.T) { - cm, err := cache.NewCacheManager(config.DefaultConfig(), zerolog.Nop()) - require.NoError(t, err) - - addr, _, _ := buildSyncTestSigner(t) - gen := genesis.Genesis{ChainID: "tchain", InitialHeight: 1, StartTime: time.Now().Add(-time.Second), ProposerAddress: addr, DAStartHeight: 100, DAEpochForcedInclusion: 10} - - cfg := config.DefaultConfig() - cfg.DA.ForcedInclusionNamespace = "nsForcedInclusion" - - namespaceForcedInclusionBz := coreda.NamespaceFromString(cfg.DA.GetForcedInclusionNamespace()).Bytes() - - mockDA := testmocks.NewMockDA(t) - // With DAStartHeight=1000, epoch size=10, daHeight=1000 -> epoch boundaries are [1000, 1009] - // Mock that height 1000 (epoch start) is from the future - mockDA.EXPECT().GetIDs(mock.Anything, uint64(1000), mock.MatchedBy(func(ns []byte) bool { - return bytes.Equal(ns, namespaceForcedInclusionBz) - })).Return(nil, fmt.Errorf("%s: not yet available", coreda.ErrHeightFromFuture.Error())).Once() - - r := NewDARetriever(mockDA, cm, cfg, gen, zerolog.Nop()) - - result, err := r.RetrieveForcedIncludedTxsFromDA(context.Background(), 1000) - require.Error(t, err) - require.Nil(t, result) - require.True(t, errors.Is(err, coreda.ErrHeightFromFuture)) - require.Contains(t, err.Error(), "epoch start height 1000 not yet available") -} - -func TestDARetriever_RetrieveForcedIncludedTxsFromDA_EpochEndFromFuture(t *testing.T) { - cm, err := cache.NewCacheManager(config.DefaultConfig(), zerolog.Nop()) - require.NoError(t, err) - - addr, _, _ := buildSyncTestSigner(t) - gen := genesis.Genesis{ChainID: "tchain", InitialHeight: 1, StartTime: time.Now().Add(-time.Second), ProposerAddress: addr, DAStartHeight: 100, DAEpochForcedInclusion: 10} - - cfg := config.DefaultConfig() - cfg.DA.ForcedInclusionNamespace = "nsForcedInclusion" - - namespaceForcedInclusionBz := coreda.NamespaceFromString(cfg.DA.GetForcedInclusionNamespace()).Bytes() - - mockDA := testmocks.NewMockDA(t) - // With DAStartHeight=1000, epoch size=10, daHeight=1000 -> epoch boundaries are [1000, 1009] - // Epoch start is available but epoch end (1009) is from the future - mockDA.EXPECT().GetIDs(mock.Anything, uint64(1000), mock.MatchedBy(func(ns []byte) bool { - return bytes.Equal(ns, namespaceForcedInclusionBz) - })).Return(&coreda.GetIDsResult{IDs: [][]byte{}, Timestamp: time.Now()}, nil).Once() - - mockDA.EXPECT().GetIDs(mock.Anything, uint64(1009), mock.MatchedBy(func(ns []byte) bool { - return bytes.Equal(ns, namespaceForcedInclusionBz) - })).Return(nil, fmt.Errorf("%s: not yet available", coreda.ErrHeightFromFuture.Error())).Once() - - r := NewDARetriever(mockDA, cm, cfg, gen, zerolog.Nop()) - - result, err := r.RetrieveForcedIncludedTxsFromDA(context.Background(), 1000) - require.Error(t, err) - require.Nil(t, result) - require.True(t, errors.Is(err, coreda.ErrHeightFromFuture)) - require.Contains(t, err.Error(), "epoch end height 1009 not yet available") -} - -func TestDARetriever_RetrieveForcedIncludedTxsFromDA_CompleteEpoch(t *testing.T) { - cm, err := cache.NewCacheManager(config.DefaultConfig(), zerolog.Nop()) - require.NoError(t, err) - - addr, pub, signer := buildSyncTestSigner(t) - gen := genesis.Genesis{ChainID: "tchain", InitialHeight: 1, StartTime: time.Now().Add(-time.Second), ProposerAddress: addr, DAStartHeight: 2000, DAEpochForcedInclusion: 3} - - // Prepare forced inclusion transaction data with fixed timestamp - fixedTime := uint64(1234567890) - dataBin1, _ := makeSignedDataBytesWithTime(t, gen.ChainID, 10, addr, pub, signer, 2, fixedTime) - dataBin2, _ := makeSignedDataBytesWithTime(t, gen.ChainID, 11, addr, pub, signer, 1, fixedTime) - dataBin3, _ := makeSignedDataBytesWithTime(t, gen.ChainID, 12, addr, pub, signer, 1, fixedTime) - - cfg := config.DefaultConfig() - cfg.DA.ForcedInclusionNamespace = "nsForcedInclusion" - - namespaceForcedInclusionBz := coreda.NamespaceFromString(cfg.DA.GetForcedInclusionNamespace()).Bytes() - - mockDA := testmocks.NewMockDA(t) - - // With DAStartHeight=2000, epoch size=3, daHeight=2000 -> epoch boundaries are [2000, 2002] - // RetrieveWithHelpers calls in order: start (2000), end (2002), then intermediate (2001) - - // Check epoch start (2000) - mockDA.EXPECT().GetIDs(mock.Anything, uint64(2000), mock.MatchedBy(func(ns []byte) bool { - return bytes.Equal(ns, namespaceForcedInclusionBz) - })).Return(&coreda.GetIDsResult{IDs: [][]byte{[]byte("fi1")}, Timestamp: time.Now()}, nil).Once() - - // Fetch epoch start data (height 2000) - mockDA.EXPECT().Get(mock.Anything, mock.Anything, mock.MatchedBy(func(ns []byte) bool { - return bytes.Equal(ns, namespaceForcedInclusionBz) - })).Return([][]byte{dataBin1}, nil).Once() - - // Check epoch end (2002) - mockDA.EXPECT().GetIDs(mock.Anything, uint64(2002), mock.MatchedBy(func(ns []byte) bool { - return bytes.Equal(ns, namespaceForcedInclusionBz) - })).Return(&coreda.GetIDsResult{IDs: [][]byte{[]byte("fi3")}, Timestamp: time.Now()}, nil).Once() - - // Fetch epoch end data (height 2002) - mockDA.EXPECT().Get(mock.Anything, mock.Anything, mock.MatchedBy(func(ns []byte) bool { - return bytes.Equal(ns, namespaceForcedInclusionBz) - })).Return([][]byte{dataBin3}, nil).Once() - - // Fetch middle height (2001) - mockDA.EXPECT().GetIDs(mock.Anything, uint64(2001), mock.MatchedBy(func(ns []byte) bool { - return bytes.Equal(ns, namespaceForcedInclusionBz) - })).Return(&coreda.GetIDsResult{IDs: [][]byte{[]byte("fi2")}, Timestamp: time.Now()}, nil).Once() - - // Fetch intermediate height data (height 2001) - mockDA.EXPECT().Get(mock.Anything, mock.Anything, mock.MatchedBy(func(ns []byte) bool { - return bytes.Equal(ns, namespaceForcedInclusionBz) - })).Return([][]byte{dataBin2}, nil).Once() - - r := NewDARetriever(mockDA, cm, cfg, gen, zerolog.Nop()) - - result, err := r.RetrieveForcedIncludedTxsFromDA(context.Background(), 2000) - require.NoError(t, err) - require.NotNil(t, result) - require.Len(t, result.Txs, 3) - require.Equal(t, dataBin1, result.Txs[0]) - require.Equal(t, dataBin2, result.Txs[1]) - require.Equal(t, dataBin3, result.Txs[2]) - require.Equal(t, uint64(2000), result.StartDaHeight) - require.Equal(t, uint64(2002), result.EndDaHeight) -} diff --git a/block/internal/syncing/syncer.go b/block/internal/syncing/syncer.go index 6fbe8735d4..57854817db 100644 --- a/block/internal/syncing/syncer.go +++ b/block/internal/syncing/syncer.go @@ -20,6 +20,7 @@ import ( "github.com/evstack/ev-node/block/internal/cache" "github.com/evstack/ev-node/block/internal/common" + "github.com/evstack/ev-node/block/internal/da" "github.com/evstack/ev-node/pkg/config" "github.com/evstack/ev-node/pkg/genesis" "github.com/evstack/ev-node/pkg/store" @@ -58,6 +59,7 @@ type Syncer struct { // Handlers daRetriever DARetriever + fiRetriever *da.ForcedInclusionRetriever p2pHandler p2pHandler // Logging @@ -116,7 +118,16 @@ func (s *Syncer) Start(ctx context.Context) error { } // Initialize handlers - s.daRetriever = NewDARetriever(s.da, s.cache, s.config, s.genesis, s.logger) + daClient := da.NewClient(da.Config{ + DA: s.da, + Logger: s.logger, + DefaultTimeout: 30 * time.Second, + Namespace: s.config.DA.GetNamespace(), + DataNamespace: s.config.DA.GetDataNamespace(), + ForcedInclusionNamespace: s.config.DA.GetForcedInclusionNamespace(), + }) + s.daRetriever = NewDARetriever(daClient, s.cache, s.genesis, s.logger) + s.fiRetriever = da.NewForcedInclusionRetriever(daClient, s.genesis, s.logger) s.p2pHandler = NewP2PHandler(s.headerStore.Store(), s.dataStore.Store(), s.cache, s.genesis, s.logger) if currentHeight, err := s.store.Height(s.ctx); err != nil { s.logger.Error().Err(err).Msg("failed to set initial processed height for p2p handler") @@ -682,14 +693,14 @@ func hashTx(tx []byte) string { // verifyForcedInclusionTxs verifies that all forced inclusion transactions from DA are included in the block func (s *Syncer) verifyForcedInclusionTxs(currentState types.State, data *types.Data) error { - if s.daRetriever == nil { + if s.fiRetriever == nil { return nil } // Retrieve forced inclusion transactions from DA - forcedIncludedTxsEvent, err := s.daRetriever.RetrieveForcedIncludedTxsFromDA(s.ctx, currentState.DAHeight) + forcedIncludedTxsEvent, err := s.fiRetriever.RetrieveForcedIncludedTxs(s.ctx, currentState.DAHeight) if err != nil { - if errors.Is(err, common.ErrForceInclusionNotConfigured) { + if errors.Is(err, da.ErrForceInclusionNotConfigured) { s.logger.Debug().Msg("forced inclusion namespace not configured, skipping verification") return nil } diff --git a/block/internal/syncing/syncer_forced_inclusion_test.go b/block/internal/syncing/syncer_forced_inclusion_test.go index d4f12815d0..f1f855f911 100644 --- a/block/internal/syncing/syncer_forced_inclusion_test.go +++ b/block/internal/syncing/syncer_forced_inclusion_test.go @@ -14,6 +14,7 @@ import ( "github.com/evstack/ev-node/block/internal/cache" "github.com/evstack/ev-node/block/internal/common" + "github.com/evstack/ev-node/block/internal/da" coreda "github.com/evstack/ev-node/core/da" "github.com/evstack/ev-node/pkg/config" "github.com/evstack/ev-node/pkg/genesis" @@ -47,7 +48,16 @@ func TestVerifyForcedInclusionTxs_AllTransactionsIncluded(t *testing.T) { Return([]byte("app0"), uint64(1024), nil).Once() mockDA := testmocks.NewMockDA(t) - daRetriever := NewDARetriever(mockDA, cm, cfg, gen, zerolog.Nop()) + + daClient := da.NewClient(da.Config{ + DA: mockDA, + Logger: zerolog.Nop(), + Namespace: cfg.DA.Namespace, + DataNamespace: cfg.DA.DataNamespace, + ForcedInclusionNamespace: cfg.DA.ForcedInclusionNamespace, + }) + daRetriever := NewDARetriever(daClient, cm, gen, zerolog.Nop()) + fiRetriever := da.NewForcedInclusionRetriever(daClient, gen, zerolog.Nop()) s := NewSyncer( st, @@ -64,6 +74,7 @@ func TestVerifyForcedInclusionTxs_AllTransactionsIncluded(t *testing.T) { make(chan error, 1), ) s.daRetriever = daRetriever + s.fiRetriever = fiRetriever require.NoError(t, s.initializeState()) s.ctx = context.Background() @@ -122,7 +133,16 @@ func TestVerifyForcedInclusionTxs_MissingTransactions(t *testing.T) { Return([]byte("app0"), uint64(1024), nil).Once() mockDA := testmocks.NewMockDA(t) - daRetriever := NewDARetriever(mockDA, cm, cfg, gen, zerolog.Nop()) + + daClient := da.NewClient(da.Config{ + DA: mockDA, + Logger: zerolog.Nop(), + Namespace: cfg.DA.Namespace, + DataNamespace: cfg.DA.DataNamespace, + ForcedInclusionNamespace: cfg.DA.ForcedInclusionNamespace, + }) + daRetriever := NewDARetriever(daClient, cm, gen, zerolog.Nop()) + fiRetriever := da.NewForcedInclusionRetriever(daClient, gen, zerolog.Nop()) s := NewSyncer( st, @@ -139,6 +159,7 @@ func TestVerifyForcedInclusionTxs_MissingTransactions(t *testing.T) { make(chan error, 1), ) s.daRetriever = daRetriever + s.fiRetriever = fiRetriever require.NoError(t, s.initializeState()) s.ctx = context.Background() @@ -200,7 +221,16 @@ func TestVerifyForcedInclusionTxs_PartiallyIncluded(t *testing.T) { Return([]byte("app0"), uint64(1024), nil).Once() mockDA := testmocks.NewMockDA(t) - daRetriever := NewDARetriever(mockDA, cm, cfg, gen, zerolog.Nop()) + + daClient := da.NewClient(da.Config{ + DA: mockDA, + Logger: zerolog.Nop(), + Namespace: cfg.DA.Namespace, + DataNamespace: cfg.DA.DataNamespace, + ForcedInclusionNamespace: cfg.DA.ForcedInclusionNamespace, + }) + daRetriever := NewDARetriever(daClient, cm, gen, zerolog.Nop()) + fiRetriever := da.NewForcedInclusionRetriever(daClient, gen, zerolog.Nop()) s := NewSyncer( st, @@ -217,6 +247,7 @@ func TestVerifyForcedInclusionTxs_PartiallyIncluded(t *testing.T) { make(chan error, 1), ) s.daRetriever = daRetriever + s.fiRetriever = fiRetriever require.NoError(t, s.initializeState()) s.ctx = context.Background() @@ -280,7 +311,16 @@ func TestVerifyForcedInclusionTxs_NoForcedTransactions(t *testing.T) { Return([]byte("app0"), uint64(1024), nil).Once() mockDA := testmocks.NewMockDA(t) - daRetriever := NewDARetriever(mockDA, cm, cfg, gen, zerolog.Nop()) + + daClient := da.NewClient(da.Config{ + DA: mockDA, + Logger: zerolog.Nop(), + Namespace: cfg.DA.Namespace, + DataNamespace: cfg.DA.DataNamespace, + ForcedInclusionNamespace: cfg.DA.ForcedInclusionNamespace, + }) + daRetriever := NewDARetriever(daClient, cm, gen, zerolog.Nop()) + fiRetriever := da.NewForcedInclusionRetriever(daClient, gen, zerolog.Nop()) s := NewSyncer( st, @@ -297,6 +337,7 @@ func TestVerifyForcedInclusionTxs_NoForcedTransactions(t *testing.T) { make(chan error, 1), ) s.daRetriever = daRetriever + s.fiRetriever = fiRetriever require.NoError(t, s.initializeState()) s.ctx = context.Background() @@ -344,7 +385,16 @@ func TestVerifyForcedInclusionTxs_NamespaceNotConfigured(t *testing.T) { Return([]byte("app0"), uint64(1024), nil).Once() mockDA := testmocks.NewMockDA(t) - daRetriever := NewDARetriever(mockDA, cm, cfg, gen, zerolog.Nop()) + + daClient := da.NewClient(da.Config{ + DA: mockDA, + Logger: zerolog.Nop(), + Namespace: cfg.DA.Namespace, + DataNamespace: cfg.DA.DataNamespace, + // No ForcedInclusionNamespace - not configured + }) + daRetriever := NewDARetriever(daClient, cm, gen, zerolog.Nop()) + fiRetriever := da.NewForcedInclusionRetriever(daClient, gen, zerolog.Nop()) s := NewSyncer( st, @@ -361,6 +411,7 @@ func TestVerifyForcedInclusionTxs_NamespaceNotConfigured(t *testing.T) { make(chan error, 1), ) s.daRetriever = daRetriever + s.fiRetriever = fiRetriever require.NoError(t, s.initializeState()) s.ctx = context.Background() diff --git a/block/internal/syncing/syncer_test.go b/block/internal/syncing/syncer_test.go index 0bbac7363a..baa9b3b138 100644 --- a/block/internal/syncing/syncer_test.go +++ b/block/internal/syncing/syncer_test.go @@ -412,7 +412,6 @@ func TestSyncLoopPersistState(t *testing.T) { DaHeight: daHeight, }} daRtrMock.On("RetrieveFromDA", mock.Anything, daHeight).Return(evts, nil) - daRtrMock.On("RetrieveForcedIncludedTxsFromDA", mock.Anything, mock.Anything).Return(&common.ForcedIncludedEvent{Txs: [][]byte{}}, nil).Maybe() prevHeaderHash = sigHeader.Hash() hasher := sha512.New() hasher.Write(prevAppHash) @@ -485,7 +484,6 @@ func TestSyncLoopPersistState(t *testing.T) { p2pHndlMock.On("SetProcessedHeight", mock.Anything).Return().Maybe() syncerInst2.daRetriever, syncerInst2.p2pHandler = daRtrMock, p2pHndlMock - daRtrMock.On("RetrieveForcedIncludedTxsFromDA", mock.Anything, mock.Anything).Return(&common.ForcedIncludedEvent{Txs: [][]byte{}}, nil).Maybe() daRtrMock.On("RetrieveFromDA", mock.Anything, mock.Anything). Run(func(arg mock.Arguments) { cancel() diff --git a/block/public.go b/block/public.go index ce777da6e0..c06ad6ea55 100644 --- a/block/public.go +++ b/block/public.go @@ -1,11 +1,11 @@ package block import ( - "fmt" + "context" + "time" - "github.com/evstack/ev-node/block/internal/cache" "github.com/evstack/ev-node/block/internal/common" - "github.com/evstack/ev-node/block/internal/syncing" + "github.com/evstack/ev-node/block/internal/da" coreda "github.com/evstack/ev-node/core/da" "github.com/evstack/ev-node/pkg/config" "github.com/evstack/ev-node/pkg/genesis" @@ -33,27 +33,42 @@ func NopMetrics() *Metrics { return common.NopMetrics() } -// NewDaRetriver creates a new DA retriever instance. -func NewDARetriever( - da coreda.DA, +// ErrForceInclusionNotConfigured is returned when force inclusion is not configured. +// It is exported because sequencers needs to check for this error. +var ErrForceInclusionNotConfigured = da.ErrForceInclusionNotConfigured + +// DAClient is the interface representing the DA client for public use. +type DAClient = da.Client + +// ForcedInclusionEvent represents forced inclusion transactions retrieved from DA +type ForcedInclusionEvent = da.ForcedInclusionEvent + +// ForcedInclusionRetriever defines the interface for retrieving forced inclusion transactions from DA +type ForcedInclusionRetriever interface { + RetrieveForcedIncludedTxs(ctx context.Context, daHeight uint64) (*da.ForcedInclusionEvent, error) +} + +// NewDAClient creates a new DA client with configuration +func NewDAClient( + daLayer coreda.DA, config config.Config, - genesis genesis.Genesis, logger zerolog.Logger, -) (syncing.DARetriever, error) { - cacheManager, err := cache.NewCacheManager(config, logger) - if err != nil { - return nil, fmt.Errorf("failed to create cache manager: %w", err) - } - - return syncing.NewDARetriever( - da, - cacheManager, - config, - genesis, - logger, - ), nil +) DAClient { + return da.NewClient(da.Config{ + DA: daLayer, + Logger: logger, + DefaultTimeout: 10 * time.Second, + Namespace: config.DA.GetNamespace(), + DataNamespace: config.DA.GetDataNamespace(), + ForcedInclusionNamespace: config.DA.GetForcedInclusionNamespace(), + }) } -// ErrForceInclusionNotConfigured is returned when force inclusion is not configured. -// It is exported because sequencers needs to check for this error. -var ErrForceInclusionNotConfigured = common.ErrForceInclusionNotConfigured +// NewForcedInclusionRetriever creates a new forced inclusion retriever +func NewForcedInclusionRetriever( + client DAClient, + genesis genesis.Genesis, + logger zerolog.Logger, +) ForcedInclusionRetriever { + return da.NewForcedInclusionRetriever(client, genesis, logger) +} diff --git a/da/internal/mocks/da.go b/da/internal/mocks/da.go index 37539d5480..bb3ad63391 100644 --- a/da/internal/mocks/da.go +++ b/da/internal/mocks/da.go @@ -112,126 +112,6 @@ func (_c *MockDA_Commit_Call) RunAndReturn(run func(ctx context.Context, blobs [ return _c } -// GasMultiplier provides a mock function for the type MockDA -func (_mock *MockDA) GasMultiplier(ctx context.Context) (float64, error) { - ret := _mock.Called(ctx) - - if len(ret) == 0 { - panic("no return value specified for GasMultiplier") - } - - var r0 float64 - var r1 error - if returnFunc, ok := ret.Get(0).(func(context.Context) (float64, error)); ok { - return returnFunc(ctx) - } - if returnFunc, ok := ret.Get(0).(func(context.Context) float64); ok { - r0 = returnFunc(ctx) - } else { - r0 = ret.Get(0).(float64) - } - if returnFunc, ok := ret.Get(1).(func(context.Context) error); ok { - r1 = returnFunc(ctx) - } else { - r1 = ret.Error(1) - } - return r0, r1 -} - -// MockDA_GasMultiplier_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GasMultiplier' -type MockDA_GasMultiplier_Call struct { - *mock.Call -} - -// GasMultiplier is a helper method to define mock.On call -// - ctx context.Context -func (_e *MockDA_Expecter) GasMultiplier(ctx interface{}) *MockDA_GasMultiplier_Call { - return &MockDA_GasMultiplier_Call{Call: _e.mock.On("GasMultiplier", ctx)} -} - -func (_c *MockDA_GasMultiplier_Call) Run(run func(ctx context.Context)) *MockDA_GasMultiplier_Call { - _c.Call.Run(func(args mock.Arguments) { - var arg0 context.Context - if args[0] != nil { - arg0 = args[0].(context.Context) - } - run( - arg0, - ) - }) - return _c -} - -func (_c *MockDA_GasMultiplier_Call) Return(f float64, err error) *MockDA_GasMultiplier_Call { - _c.Call.Return(f, err) - return _c -} - -func (_c *MockDA_GasMultiplier_Call) RunAndReturn(run func(ctx context.Context) (float64, error)) *MockDA_GasMultiplier_Call { - _c.Call.Return(run) - return _c -} - -// GasPrice provides a mock function for the type MockDA -func (_mock *MockDA) GasPrice(ctx context.Context) (float64, error) { - ret := _mock.Called(ctx) - - if len(ret) == 0 { - panic("no return value specified for GasPrice") - } - - var r0 float64 - var r1 error - if returnFunc, ok := ret.Get(0).(func(context.Context) (float64, error)); ok { - return returnFunc(ctx) - } - if returnFunc, ok := ret.Get(0).(func(context.Context) float64); ok { - r0 = returnFunc(ctx) - } else { - r0 = ret.Get(0).(float64) - } - if returnFunc, ok := ret.Get(1).(func(context.Context) error); ok { - r1 = returnFunc(ctx) - } else { - r1 = ret.Error(1) - } - return r0, r1 -} - -// MockDA_GasPrice_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GasPrice' -type MockDA_GasPrice_Call struct { - *mock.Call -} - -// GasPrice is a helper method to define mock.On call -// - ctx context.Context -func (_e *MockDA_Expecter) GasPrice(ctx interface{}) *MockDA_GasPrice_Call { - return &MockDA_GasPrice_Call{Call: _e.mock.On("GasPrice", ctx)} -} - -func (_c *MockDA_GasPrice_Call) Run(run func(ctx context.Context)) *MockDA_GasPrice_Call { - _c.Call.Run(func(args mock.Arguments) { - var arg0 context.Context - if args[0] != nil { - arg0 = args[0].(context.Context) - } - run( - arg0, - ) - }) - return _c -} - -func (_c *MockDA_GasPrice_Call) Return(f float64, err error) *MockDA_GasPrice_Call { - _c.Call.Return(f, err) - return _c -} - -func (_c *MockDA_GasPrice_Call) RunAndReturn(run func(ctx context.Context) (float64, error)) *MockDA_GasPrice_Call { - _c.Call.Return(run) - return _c -} - // Get provides a mock function for the type MockDA func (_mock *MockDA) Get(ctx context.Context, ids []da.ID, namespace []byte) ([]da.Blob, error) { ret := _mock.Called(ctx, ids, namespace) diff --git a/go.mod b/go.mod index 0e386175cc..49467466e2 100644 --- a/go.mod +++ b/go.mod @@ -30,6 +30,7 @@ require ( golang.org/x/net v0.46.0 golang.org/x/sync v0.17.0 google.golang.org/protobuf v1.36.10 + gotest.tools/v3 v3.5.2 ) require ( @@ -53,6 +54,7 @@ require ( github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 // indirect github.com/google/flatbuffers v24.12.23+incompatible // indirect + github.com/google/go-cmp v0.7.0 // indirect github.com/google/gopacket v1.1.19 // indirect github.com/google/uuid v1.6.0 // indirect github.com/gopherjs/gopherjs v0.0.0-20190812055157-5d271430af9f // indirect diff --git a/go.sum b/go.sum index 2c1c2db026..ac49443213 100644 --- a/go.sum +++ b/go.sum @@ -650,6 +650,8 @@ gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gotest.tools/v3 v3.5.2 h1:7koQfIKdy+I8UTetycgUqXWSDwpgv193Ka+qRsmBY8Q= +gotest.tools/v3 v3.5.2/go.mod h1:LtdLGcnqToBH83WByAAi/wiwSFCArdFIUV/xxN4pcjA= grpc.go4.org v0.0.0-20170609214715-11d0a25b4919/go.mod h1:77eQGdRu53HpSqPFJFmuJdjuHRquDANNeA4x7B8WQ9o= honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/pkg/cmd/run_node.go b/pkg/cmd/run_node.go index d22627baa6..4dbc876879 100644 --- a/pkg/cmd/run_node.go +++ b/pkg/cmd/run_node.go @@ -26,8 +26,6 @@ import ( "github.com/evstack/ev-node/pkg/signer/file" ) -const DefaultMaxBlobSize = 1.5 * 1024 * 1024 // 1.5MB - // ParseConfig is an helpers that loads the node configuration and validates it. func ParseConfig(cmd *cobra.Command) (rollconf.Config, error) { nodeConfig, err := rollconf.Load(cmd) diff --git a/sequencers/based/based_test.go b/sequencers/based/based_test.go deleted file mode 100644 index ef952b2524..0000000000 --- a/sequencers/based/based_test.go +++ /dev/null @@ -1,483 +0,0 @@ -package based - -import ( - "context" - "errors" - "testing" - "time" - - "github.com/rs/zerolog" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/mock" - "github.com/stretchr/testify/require" - - "github.com/evstack/ev-node/block" - coreda "github.com/evstack/ev-node/core/da" - coresequencer "github.com/evstack/ev-node/core/sequencer" - "github.com/evstack/ev-node/pkg/config" - "github.com/evstack/ev-node/pkg/genesis" -) - -// MockDARetriever is a mock implementation of DARetriever for testing -type MockDARetriever struct { - mock.Mock -} - -func (m *MockDARetriever) RetrieveForcedIncludedTxsFromDA(ctx context.Context, daHeight uint64) (*ForcedInclusionEvent, error) { - args := m.Called(ctx, daHeight) - if args.Get(0) == nil { - return nil, args.Error(1) - } - return args.Get(0).(*ForcedInclusionEvent), args.Error(1) -} - -// MockDA is a mock implementation of DA for testing -type MockDA struct { - mock.Mock -} - -func (m *MockDA) Submit(ctx context.Context, blobs [][]byte, gasPrice float64, namespace []byte) ([][]byte, error) { - args := m.Called(ctx, blobs, gasPrice, namespace) - if args.Get(0) == nil { - return nil, args.Error(1) - } - return args.Get(0).([][]byte), args.Error(1) -} - -func (m *MockDA) SubmitWithOptions(ctx context.Context, blobs [][]byte, gasPrice float64, namespace []byte, options []byte) ([][]byte, error) { - args := m.Called(ctx, blobs, gasPrice, namespace, options) - if args.Get(0) == nil { - return nil, args.Error(1) - } - return args.Get(0).([][]byte), args.Error(1) -} - -func (m *MockDA) GetIDs(ctx context.Context, height uint64, namespace []byte) (*coreda.GetIDsResult, error) { - args := m.Called(ctx, height, namespace) - if args.Get(0) == nil { - return nil, args.Error(1) - } - return args.Get(0).(*coreda.GetIDsResult), args.Error(1) -} - -func (m *MockDA) Get(ctx context.Context, ids [][]byte, namespace []byte) ([][]byte, error) { - args := m.Called(ctx, ids, namespace) - if args.Get(0) == nil { - return nil, args.Error(1) - } - return args.Get(0).([][]byte), args.Error(1) -} - -func (m *MockDA) GetProofs(ctx context.Context, ids [][]byte, namespace []byte) ([]coreda.Proof, error) { - args := m.Called(ctx, ids, namespace) - if args.Get(0) == nil { - return nil, args.Error(1) - } - return args.Get(0).([]coreda.Proof), args.Error(1) -} - -func (m *MockDA) Validate(ctx context.Context, ids [][]byte, proofs []coreda.Proof, namespace []byte) ([]bool, error) { - args := m.Called(ctx, ids, proofs, namespace) - if args.Get(0) == nil { - return nil, args.Error(1) - } - return args.Get(0).([]bool), args.Error(1) -} - -func (m *MockDA) Commit(ctx context.Context, blobs [][]byte, namespace []byte) ([][]byte, error) { - args := m.Called(ctx, blobs, namespace) - if args.Get(0) == nil { - return nil, args.Error(1) - } - return args.Get(0).([][]byte), args.Error(1) -} - -func (m *MockDA) GasPrice(ctx context.Context) (float64, error) { - args := m.Called(ctx) - return args.Get(0).(float64), args.Error(1) -} - -func (m *MockDA) GasMultiplier(ctx context.Context) (float64, error) { - args := m.Called(ctx) - return args.Get(0).(float64), args.Error(1) -} - -func TestNewBasedSequencer(t *testing.T) { - mockRetriever := new(MockDARetriever) - mockDA := new(MockDA) - cfg := config.DefaultConfig() - gen := genesis.Genesis{ - ChainID: "test-chain", - DAStartHeight: 100, - } - - seq := NewBasedSequencer(mockRetriever, mockDA, cfg, gen, zerolog.Nop()) - - require.NotNil(t, seq) - assert.Equal(t, uint64(100), seq.daHeight) - assert.Equal(t, 0, len(seq.txQueue)) -} - -func TestBasedSequencer_SubmitBatchTxs(t *testing.T) { - mockRetriever := new(MockDARetriever) - mockDA := new(MockDA) - cfg := config.DefaultConfig() - gen := genesis.Genesis{ChainID: "test-chain"} - - seq := NewBasedSequencer(mockRetriever, mockDA, cfg, gen, zerolog.Nop()) - - // Submit should succeed but be ignored - req := coresequencer.SubmitBatchTxsRequest{ - Id: []byte("test-chain"), - Batch: &coresequencer.Batch{ - Transactions: [][]byte{[]byte("tx1"), []byte("tx2")}, - }, - } - - resp, err := seq.SubmitBatchTxs(context.Background(), req) - require.NoError(t, err) - require.NotNil(t, resp) - - // Queue should still be empty - assert.Equal(t, 0, len(seq.txQueue)) -} - -func TestBasedSequencer_GetNextBatch_WithForcedTxs(t *testing.T) { - mockRetriever := new(MockDARetriever) - mockDA := new(MockDA) - cfg := config.DefaultConfig() - gen := genesis.Genesis{ - ChainID: "test-chain", - DAStartHeight: 100, - } - - seq := NewBasedSequencer(mockRetriever, mockDA, cfg, gen, zerolog.Nop()) - - // Mock retriever to return forced inclusion transactions - forcedTxs := &ForcedInclusionEvent{ - Txs: [][]byte{[]byte("forced_tx1"), []byte("forced_tx2")}, - StartDaHeight: 101, - EndDaHeight: 105, - } - mockRetriever.On("RetrieveForcedIncludedTxsFromDA", mock.Anything, uint64(100)). - Return(forcedTxs, nil).Once() - - req := coresequencer.GetNextBatchRequest{ - Id: []byte("test-chain"), - MaxBytes: 10000, - } - - resp, err := seq.GetNextBatch(context.Background(), req) - require.NoError(t, err) - require.NotNil(t, resp) - require.NotNil(t, resp.Batch) - assert.Equal(t, 2, len(resp.Batch.Transactions)) - assert.Equal(t, []byte("forced_tx1"), resp.Batch.Transactions[0]) - assert.Equal(t, []byte("forced_tx2"), resp.Batch.Transactions[1]) - - // DA height should be updated - assert.Equal(t, uint64(105), seq.GetDAHeight()) - - mockRetriever.AssertExpectations(t) -} - -func TestBasedSequencer_GetNextBatch_EmptyDA(t *testing.T) { - mockRetriever := new(MockDARetriever) - mockDA := new(MockDA) - cfg := config.DefaultConfig() - gen := genesis.Genesis{ - ChainID: "test-chain", - DAStartHeight: 100, - } - - seq := NewBasedSequencer(mockRetriever, mockDA, cfg, gen, zerolog.Nop()) - - // Mock retriever to return empty transactions - emptyEvent := &ForcedInclusionEvent{ - Txs: [][]byte{}, - StartDaHeight: 100, - EndDaHeight: 100, - } - mockRetriever.On("RetrieveForcedIncludedTxsFromDA", mock.Anything, uint64(100)). - Return(emptyEvent, nil).Once() - - req := coresequencer.GetNextBatchRequest{ - Id: []byte("test-chain"), - MaxBytes: 10000, - } - - resp, err := seq.GetNextBatch(context.Background(), req) - require.NoError(t, err) - require.NotNil(t, resp) - require.NotNil(t, resp.Batch) - assert.Equal(t, 0, len(resp.Batch.Transactions)) - - mockRetriever.AssertExpectations(t) -} - -func TestBasedSequencer_GetNextBatch_NotConfigured(t *testing.T) { - mockRetriever := new(MockDARetriever) - mockDA := new(MockDA) - cfg := config.DefaultConfig() - gen := genesis.Genesis{ - ChainID: "test-chain", - DAStartHeight: 100, - } - - seq := NewBasedSequencer(mockRetriever, mockDA, cfg, gen, zerolog.Nop()) - - // Mock retriever to return not configured error - mockRetriever.On("RetrieveForcedIncludedTxsFromDA", mock.Anything, uint64(100)). - Return(nil, block.ErrForceInclusionNotConfigured).Once() - - req := coresequencer.GetNextBatchRequest{ - Id: []byte("test-chain"), - MaxBytes: 10000, - } - - resp, err := seq.GetNextBatch(context.Background(), req) - require.NoError(t, err) - require.NotNil(t, resp) - require.NotNil(t, resp.Batch) - assert.Nil(t, resp.Batch.Transactions) - - mockRetriever.AssertExpectations(t) -} - -func TestBasedSequencer_GetNextBatch_HeightFromFuture(t *testing.T) { - mockRetriever := new(MockDARetriever) - mockDA := new(MockDA) - cfg := config.DefaultConfig() - gen := genesis.Genesis{ - ChainID: "test-chain", - DAStartHeight: 100, - } - - seq := NewBasedSequencer(mockRetriever, mockDA, cfg, gen, zerolog.Nop()) - - // Mock retriever to return height from future error - mockRetriever.On("RetrieveForcedIncludedTxsFromDA", mock.Anything, uint64(100)). - Return(nil, coreda.ErrHeightFromFuture).Once() - - req := coresequencer.GetNextBatchRequest{ - Id: []byte("test-chain"), - MaxBytes: 10000, - } - - resp, err := seq.GetNextBatch(context.Background(), req) - require.NoError(t, err) - require.NotNil(t, resp) - require.NotNil(t, resp.Batch) - assert.Nil(t, resp.Batch.Transactions) - - // DA height should NOT increment on ErrHeightFromFuture - we wait for DA to catch up - assert.Equal(t, uint64(100), seq.GetDAHeight()) - - mockRetriever.AssertExpectations(t) -} - -func TestBasedSequencer_GetNextBatch_WithMaxBytes(t *testing.T) { - mockRetriever := new(MockDARetriever) - mockDA := new(MockDA) - cfg := config.DefaultConfig() - gen := genesis.Genesis{ - ChainID: "test-chain", - DAStartHeight: 100, - } - - seq := NewBasedSequencer(mockRetriever, mockDA, cfg, gen, zerolog.Nop()) - - // Create transactions that will exceed max bytes - tx1 := make([]byte, 50) - tx2 := make([]byte, 50) - tx3 := make([]byte, 50) - - forcedTxs := &ForcedInclusionEvent{ - Txs: [][]byte{tx1, tx2, tx3}, - StartDaHeight: 101, - EndDaHeight: 105, - } - mockRetriever.On("RetrieveForcedIncludedTxsFromDA", mock.Anything, uint64(100)). - Return(forcedTxs, nil).Once() - - // Request with max bytes that only fits 2 transactions - req := coresequencer.GetNextBatchRequest{ - Id: []byte("test-chain"), - MaxBytes: 100, // Only fits 2 transactions - } - - resp, err := seq.GetNextBatch(context.Background(), req) - require.NoError(t, err) - require.NotNil(t, resp) - require.NotNil(t, resp.Batch) - assert.Equal(t, 2, len(resp.Batch.Transactions)) - - // Third transaction should still be in queue - assert.Equal(t, 1, len(seq.txQueue)) - - // Next request should return the remaining transaction - req2 := coresequencer.GetNextBatchRequest{ - Id: []byte("test-chain"), - MaxBytes: 100, - } - - resp2, err := seq.GetNextBatch(context.Background(), req2) - require.NoError(t, err) - require.NotNil(t, resp2) - require.NotNil(t, resp2.Batch) - assert.Equal(t, 1, len(resp2.Batch.Transactions)) - assert.Equal(t, 0, len(seq.txQueue)) - - mockRetriever.AssertExpectations(t) -} - -func TestBasedSequencer_GetNextBatch_FromQueue(t *testing.T) { - mockRetriever := new(MockDARetriever) - mockDA := new(MockDA) - cfg := config.DefaultConfig() - gen := genesis.Genesis{ - ChainID: "test-chain", - DAStartHeight: 100, - } - - seq := NewBasedSequencer(mockRetriever, mockDA, cfg, gen, zerolog.Nop()) - - // Pre-populate the queue - seq.txQueue = [][]byte{[]byte("queued_tx1"), []byte("queued_tx2")} - - req := coresequencer.GetNextBatchRequest{ - Id: []byte("test-chain"), - MaxBytes: 10000, - } - - // Should return from queue without calling retriever - resp, err := seq.GetNextBatch(context.Background(), req) - require.NoError(t, err) - require.NotNil(t, resp) - require.NotNil(t, resp.Batch) - assert.Equal(t, 2, len(resp.Batch.Transactions)) - assert.Equal(t, []byte("queued_tx1"), resp.Batch.Transactions[0]) - assert.Equal(t, []byte("queued_tx2"), resp.Batch.Transactions[1]) - assert.Equal(t, 0, len(seq.txQueue)) - - // No expectations on retriever since it shouldn't be called - mockRetriever.AssertExpectations(t) -} - -func TestBasedSequencer_VerifyBatch(t *testing.T) { - mockRetriever := new(MockDARetriever) - mockDA := new(MockDA) - cfg := config.DefaultConfig() - gen := genesis.Genesis{ChainID: "test-chain"} - - seq := NewBasedSequencer(mockRetriever, mockDA, cfg, gen, zerolog.Nop()) - - req := coresequencer.VerifyBatchRequest{ - Id: []byte("test-chain"), - BatchData: [][]byte{[]byte("tx1")}, - } - - resp, err := seq.VerifyBatch(context.Background(), req) - require.NoError(t, err) - require.NotNil(t, resp) - assert.True(t, resp.Status) -} - -func TestBasedSequencer_SetDAHeight(t *testing.T) { - mockRetriever := new(MockDARetriever) - mockDA := new(MockDA) - cfg := config.DefaultConfig() - gen := genesis.Genesis{ - ChainID: "test-chain", - DAStartHeight: 100, - } - - seq := NewBasedSequencer(mockRetriever, mockDA, cfg, gen, zerolog.Nop()) - - assert.Equal(t, uint64(100), seq.GetDAHeight()) - - seq.SetDAHeight(200) - assert.Equal(t, uint64(200), seq.GetDAHeight()) -} - -func TestBasedSequencer_ConcurrentAccess(t *testing.T) { - mockRetriever := new(MockDARetriever) - mockDA := new(MockDA) - cfg := config.DefaultConfig() - gen := genesis.Genesis{ - ChainID: "test-chain", - DAStartHeight: 100, - } - - seq := NewBasedSequencer(mockRetriever, mockDA, cfg, gen, zerolog.Nop()) - - // Mock retriever to return transactions - forcedTxs := &ForcedInclusionEvent{ - Txs: [][]byte{[]byte("tx1")}, - StartDaHeight: 101, - EndDaHeight: 105, - } - mockRetriever.On("RetrieveForcedIncludedTxsFromDA", mock.Anything, mock.Anything). - Return(forcedTxs, nil).Maybe() - - // Test concurrent access - done := make(chan bool, 3) - - // Concurrent GetNextBatch calls - go func() { - req := coresequencer.GetNextBatchRequest{Id: []byte("test-chain"), MaxBytes: 1000} - _, _ = seq.GetNextBatch(context.Background(), req) - done <- true - }() - - // Concurrent SetDAHeight calls - go func() { - seq.SetDAHeight(200) - done <- true - }() - - // Concurrent GetDAHeight calls - go func() { - _ = seq.GetDAHeight() - done <- true - }() - - // Wait for all goroutines - timeout := time.After(5 * time.Second) - for i := 0; i < 3; i++ { - select { - case <-done: - case <-timeout: - t.Fatal("test timed out") - } - } -} - -func TestBasedSequencer_GetNextBatch_ErrorHandling(t *testing.T) { - mockRetriever := new(MockDARetriever) - mockDA := new(MockDA) - cfg := config.DefaultConfig() - gen := genesis.Genesis{ - ChainID: "test-chain", - DAStartHeight: 100, - } - - seq := NewBasedSequencer(mockRetriever, mockDA, cfg, gen, zerolog.Nop()) - - // Mock retriever to return an unexpected error - expectedErr := errors.New("unexpected DA error") - mockRetriever.On("RetrieveForcedIncludedTxsFromDA", mock.Anything, uint64(100)). - Return(nil, expectedErr).Once() - - req := coresequencer.GetNextBatchRequest{ - Id: []byte("test-chain"), - MaxBytes: 10000, - } - - resp, err := seq.GetNextBatch(context.Background(), req) - require.Error(t, err) - assert.Nil(t, resp) - assert.Equal(t, expectedErr, err) - - mockRetriever.AssertExpectations(t) -} diff --git a/sequencers/based/based.go b/sequencers/based/sequencer.go similarity index 59% rename from sequencers/based/based.go rename to sequencers/based/sequencer.go index 6ed094477a..7636292007 100644 --- a/sequencers/based/based.go +++ b/sequencers/based/sequencer.go @@ -3,7 +3,7 @@ package based import ( "context" "errors" - "sync" + "sync/atomic" "time" "github.com/rs/zerolog" @@ -13,19 +13,12 @@ import ( coresequencer "github.com/evstack/ev-node/core/sequencer" "github.com/evstack/ev-node/pkg/config" "github.com/evstack/ev-node/pkg/genesis" + seqcommon "github.com/evstack/ev-node/sequencers/common" ) -// ForcedInclusionEvent represents forced inclusion transactions retrieved from DA -type ForcedInclusionEvent = struct { - Txs [][]byte - StartDaHeight uint64 - EndDaHeight uint64 -} - -// DARetriever defines the interface for retrieving forced inclusion transactions from DA -// This interface is intentionally generic to allow different implementations -type DARetriever interface { - RetrieveForcedIncludedTxsFromDA(ctx context.Context, daHeight uint64) (*ForcedInclusionEvent, error) +// ForcedInclusionRetriever defines the interface for retrieving forced inclusion transactions from DA +type ForcedInclusionRetriever interface { + RetrieveForcedIncludedTxs(ctx context.Context, daHeight uint64) (*block.ForcedInclusionEvent, error) } var _ coresequencer.Sequencer = (*BasedSequencer)(nil) @@ -33,34 +26,35 @@ var _ coresequencer.Sequencer = (*BasedSequencer)(nil) // BasedSequencer is a sequencer that only retrieves transactions from the DA layer // via the forced inclusion mechanism. It does not accept transactions from the reaper. type BasedSequencer struct { - daRetriever DARetriever + fiRetriever ForcedInclusionRetriever da coreda.DA config config.Config genesis genesis.Genesis logger zerolog.Logger - mu sync.RWMutex - daHeight uint64 + daHeight atomic.Uint64 txQueue [][]byte } // NewBasedSequencer creates a new based sequencer instance func NewBasedSequencer( - daRetriever DARetriever, + fiRetriever ForcedInclusionRetriever, da coreda.DA, config config.Config, genesis genesis.Genesis, logger zerolog.Logger, ) *BasedSequencer { - return &BasedSequencer{ - daRetriever: daRetriever, + bs := &BasedSequencer{ + fiRetriever: fiRetriever, da: da, config: config, genesis: genesis, logger: logger.With().Str("component", "based_sequencer").Logger(), - daHeight: genesis.DAStartHeight, txQueue: make([][]byte, 0), } + bs.SetDAHeight(genesis.DAStartHeight) // will be overridden by the executor + + return bs } // SubmitBatchTxs does nothing for a based sequencer as it only pulls from DA @@ -73,30 +67,13 @@ func (s *BasedSequencer) SubmitBatchTxs(ctx context.Context, req coresequencer.S // GetNextBatch retrieves the next batch of transactions from the DA layer // It fetches forced inclusion transactions and returns them as the next batch func (s *BasedSequencer) GetNextBatch(ctx context.Context, req coresequencer.GetNextBatchRequest) (*coresequencer.GetNextBatchResponse, error) { - s.mu.Lock() - defer s.mu.Unlock() - - // If we have transactions in the queue, return them first - if len(s.txQueue) > 0 { - batch := s.createBatchFromQueue(req.MaxBytes) - if len(batch.Transactions) > 0 { - s.logger.Debug(). - Int("tx_count", len(batch.Transactions)). - Int("remaining", len(s.txQueue)). - Msg("returning batch from queue") - return &coresequencer.GetNextBatchResponse{ - Batch: batch, - Timestamp: time.Now(), - BatchData: req.LastBatchData, - }, nil - } - } + currentDAHeight := s.daHeight.Load() - // Fetch forced inclusion transactions from DA - s.logger.Debug().Uint64("da_height", s.daHeight).Msg("fetching forced inclusion transactions from DA") + s.logger.Debug().Uint64("da_height", currentDAHeight).Msg("fetching forced inclusion transactions from DA") - forcedTxsEvent, err := s.daRetriever.RetrieveForcedIncludedTxsFromDA(ctx, s.daHeight) + forcedTxsEvent, err := s.fiRetriever.RetrieveForcedIncludedTxs(ctx, currentDAHeight) if err != nil { + // Check if forced inclusion is not configured if errors.Is(err, block.ErrForceInclusionNotConfigured) { s.logger.Error().Msg("forced inclusion not configured, returning empty batch") return &coresequencer.GetNextBatchResponse{ @@ -104,40 +81,49 @@ func (s *BasedSequencer) GetNextBatch(ctx context.Context, req coresequencer.Get Timestamp: time.Now(), BatchData: req.LastBatchData, }, nil - } - - // If we get a height from future error, keep the current DA height and return batch - // We'll retry the same height on the next call until DA produces that block - if errors.Is(err, coreda.ErrHeightFromFuture) { + } else if errors.Is(err, coreda.ErrHeightFromFuture) { + // If we get a height from future error, keep the current DA height and return batch + // We'll retry the same height on the next call until DA produces that block s.logger.Debug(). - Uint64("da_height", s.daHeight). + Uint64("da_height", currentDAHeight). Msg("DA height from future, waiting for DA to produce block") - return &coresequencer.GetNextBatchResponse{ - Batch: &coresequencer.Batch{Transactions: nil}, - Timestamp: time.Now(), - BatchData: req.LastBatchData, - }, nil + } else { + s.logger.Error().Err(err).Uint64("da_height", currentDAHeight).Msg("failed to retrieve forced inclusion transactions") + return nil, err } - - s.logger.Error().Err(err).Uint64("da_height", s.daHeight).Msg("failed to retrieve forced inclusion transactions") - return nil, err } // Update DA height based on the retrieved event - if forcedTxsEvent.EndDaHeight > s.daHeight { - s.daHeight = forcedTxsEvent.EndDaHeight - } else if forcedTxsEvent.StartDaHeight > s.daHeight { - s.daHeight = forcedTxsEvent.StartDaHeight + if forcedTxsEvent.EndDaHeight > currentDAHeight { + s.SetDAHeight(forcedTxsEvent.EndDaHeight) + } else if forcedTxsEvent.StartDaHeight > currentDAHeight { + s.SetDAHeight(forcedTxsEvent.StartDaHeight) } - // Add transactions to queue - s.txQueue = append(s.txQueue, forcedTxsEvent.Txs...) + // Add forced inclusion transactions to the queue with validation + validTxs := 0 + skippedTxs := 0 + for _, tx := range forcedTxsEvent.Txs { + // Validate blob size against absolute maximum + if !seqcommon.ValidateBlobSize(tx) { + s.logger.Warn(). + Uint64("da_height", forcedTxsEvent.StartDaHeight). + Int("blob_size", len(tx)). + Msg("forced inclusion blob exceeds absolute maximum size - skipping") + skippedTxs++ + continue + } + s.txQueue = append(s.txQueue, tx) + validTxs++ + } s.logger.Info(). - Int("tx_count", len(forcedTxsEvent.Txs)). + Int("valid_tx_count", validTxs). + Int("skipped_tx_count", skippedTxs). + Int("queue_size", len(s.txQueue)). Uint64("da_height_start", forcedTxsEvent.StartDaHeight). Uint64("da_height_end", forcedTxsEvent.EndDaHeight). - Msg("retrieved forced inclusion transactions from DA") + Msg("processed forced inclusion transactions from DA") batch := s.createBatchFromQueue(req.MaxBytes) @@ -159,7 +145,8 @@ func (s *BasedSequencer) createBatchFromQueue(maxBytes uint64) *coresequencer.Ba for i, tx := range s.txQueue { txSize := uint64(len(tx)) - if totalBytes+txSize > maxBytes && len(batch) > 0 { + // Always respect maxBytes, even for the first transaction + if totalBytes+txSize > maxBytes { // Would exceed max bytes, stop here s.txQueue = s.txQueue[i:] break @@ -187,16 +174,12 @@ func (s *BasedSequencer) VerifyBatch(ctx context.Context, req coresequencer.Veri // SetDAHeight sets the current DA height for the sequencer // This should be called when the sequencer needs to sync to a specific DA height -func (s *BasedSequencer) SetDAHeight(height uint64) { - s.mu.Lock() - defer s.mu.Unlock() - s.daHeight = height - s.logger.Debug().Uint64("da_height", height).Msg("DA height updated") +func (c *BasedSequencer) SetDAHeight(height uint64) { + c.daHeight.Store(height) + c.logger.Debug().Uint64("da_height", height).Msg("DA height updated") } // GetDAHeight returns the current DA height -func (s *BasedSequencer) GetDAHeight() uint64 { - s.mu.RLock() - defer s.mu.RUnlock() - return s.daHeight +func (c *BasedSequencer) GetDAHeight() uint64 { + return c.daHeight.Load() } diff --git a/sequencers/based/sequencer_test.go b/sequencers/based/sequencer_test.go new file mode 100644 index 0000000000..57866bcaf6 --- /dev/null +++ b/sequencers/based/sequencer_test.go @@ -0,0 +1,606 @@ +package based + +import ( + "context" + "errors" + "testing" + "time" + + "github.com/rs/zerolog" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + + "github.com/evstack/ev-node/block" + coreda "github.com/evstack/ev-node/core/da" + coresequencer "github.com/evstack/ev-node/core/sequencer" + "github.com/evstack/ev-node/pkg/config" + "github.com/evstack/ev-node/pkg/genesis" +) + +// MockDA is a mock implementation of DA for testing +type MockDA struct { + mock.Mock +} + +func (m *MockDA) Submit(ctx context.Context, blobs [][]byte, gasPrice float64, namespace []byte) ([][]byte, error) { + args := m.Called(ctx, blobs, gasPrice, namespace) + if args.Get(0) == nil { + return nil, args.Error(1) + } + return args.Get(0).([][]byte), args.Error(1) +} + +func (m *MockDA) SubmitWithOptions(ctx context.Context, blobs [][]byte, gasPrice float64, namespace []byte, options []byte) ([][]byte, error) { + args := m.Called(ctx, blobs, gasPrice, namespace, options) + if args.Get(0) == nil { + return nil, args.Error(1) + } + return args.Get(0).([][]byte), args.Error(1) +} + +func (m *MockDA) GetIDs(ctx context.Context, height uint64, namespace []byte) (*coreda.GetIDsResult, error) { + args := m.Called(ctx, height, namespace) + if args.Get(0) == nil { + return nil, args.Error(1) + } + return args.Get(0).(*coreda.GetIDsResult), args.Error(1) +} + +func (m *MockDA) Get(ctx context.Context, ids [][]byte, namespace []byte) ([][]byte, error) { + args := m.Called(ctx, ids, namespace) + if args.Get(0) == nil { + return nil, args.Error(1) + } + return args.Get(0).([][]byte), args.Error(1) +} + +func (m *MockDA) GetProofs(ctx context.Context, ids [][]byte, namespace []byte) ([]coreda.Proof, error) { + args := m.Called(ctx, ids, namespace) + if args.Get(0) == nil { + return nil, args.Error(1) + } + return args.Get(0).([]coreda.Proof), args.Error(1) +} + +func (m *MockDA) Validate(ctx context.Context, ids [][]byte, proofs []coreda.Proof, namespace []byte) ([]bool, error) { + args := m.Called(ctx, ids, proofs, namespace) + if args.Get(0) == nil { + return nil, args.Error(1) + } + return args.Get(0).([]bool), args.Error(1) +} + +func (m *MockDA) Commit(ctx context.Context, blobs [][]byte, namespace []byte) ([][]byte, error) { + args := m.Called(ctx, blobs, namespace) + if args.Get(0) == nil { + return nil, args.Error(1) + } + return args.Get(0).([][]byte), args.Error(1) +} + +func TestNewBasedSequencer(t *testing.T) { + mockDA := new(MockDA) + gen := genesis.Genesis{ + ChainID: "test-chain", + DAStartHeight: 100, + DAEpochForcedInclusion: 10, + } + + cfg := config.DefaultConfig() + cfg.DA.Namespace = "test-ns" + cfg.DA.DataNamespace = "test-data-ns" + cfg.DA.ForcedInclusionNamespace = "test-fi-ns" + + daClient := block.NewDAClient(mockDA, cfg, zerolog.Nop()) + fiRetriever := block.NewForcedInclusionRetriever(daClient, gen, zerolog.Nop()) + + seq := NewBasedSequencer(fiRetriever, mockDA, cfg, gen, zerolog.Nop()) + + require.NotNil(t, seq) + assert.Equal(t, uint64(100), seq.daHeight.Load()) + assert.Equal(t, 0, len(seq.txQueue)) +} + +func TestBasedSequencer_SubmitBatchTxs(t *testing.T) { + mockDA := new(MockDA) + gen := genesis.Genesis{ + ChainID: "test-chain", + DAEpochForcedInclusion: 10, + } + + cfg := config.DefaultConfig() + cfg.DA.Namespace = "test-ns" + cfg.DA.DataNamespace = "test-data-ns" + cfg.DA.ForcedInclusionNamespace = "test-fi-ns" + + daClient := block.NewDAClient(mockDA, cfg, zerolog.Nop()) + fiRetriever := block.NewForcedInclusionRetriever(daClient, gen, zerolog.Nop()) + + seq := NewBasedSequencer(fiRetriever, mockDA, cfg, gen, zerolog.Nop()) + + // Submit should succeed but be ignored + req := coresequencer.SubmitBatchTxsRequest{ + Id: []byte("test-chain"), + Batch: &coresequencer.Batch{ + Transactions: [][]byte{[]byte("tx1"), []byte("tx2")}, + }, + } + + resp, err := seq.SubmitBatchTxs(context.Background(), req) + + require.NoError(t, err) + require.NotNil(t, resp) + // Transactions should not be added to queue for based sequencer + assert.Equal(t, 0, len(seq.txQueue)) +} + +func TestBasedSequencer_GetNextBatch_WithForcedTxs(t *testing.T) { + testBlobs := [][]byte{[]byte("tx1"), []byte("tx2")} + + mockDA := new(MockDA) + mockDA.On("GetIDs", mock.Anything, uint64(100), mock.Anything).Return(&coreda.GetIDsResult{ + IDs: []coreda.ID{[]byte("id1"), []byte("id2")}, + Timestamp: time.Now(), + }, nil) + mockDA.On("Get", mock.Anything, mock.Anything, mock.Anything).Return(testBlobs, nil) + + gen := genesis.Genesis{ + ChainID: "test-chain", + DAStartHeight: 100, + DAEpochForcedInclusion: 1, + } + + cfg := config.DefaultConfig() + cfg.DA.Namespace = "test-ns" + cfg.DA.DataNamespace = "test-data-ns" + cfg.DA.ForcedInclusionNamespace = "test-fi-ns" + + daClient := block.NewDAClient(mockDA, cfg, zerolog.Nop()) + fiRetriever := block.NewForcedInclusionRetriever(daClient, gen, zerolog.Nop()) + + seq := NewBasedSequencer(fiRetriever, mockDA, cfg, gen, zerolog.Nop()) + + req := coresequencer.GetNextBatchRequest{ + MaxBytes: 1000000, + LastBatchData: nil, + } + + resp, err := seq.GetNextBatch(context.Background(), req) + require.NoError(t, err) + require.NotNil(t, resp) + require.NotNil(t, resp.Batch) + assert.Equal(t, 2, len(resp.Batch.Transactions)) + assert.Equal(t, []byte("tx1"), resp.Batch.Transactions[0]) + assert.Equal(t, []byte("tx2"), resp.Batch.Transactions[1]) + + // DA height should be updated + assert.Equal(t, uint64(100), seq.GetDAHeight()) + + mockDA.AssertExpectations(t) +} + +func TestBasedSequencer_GetNextBatch_EmptyDA(t *testing.T) { + mockDA := new(MockDA) + mockDA.On("GetIDs", mock.Anything, uint64(100), mock.Anything).Return(nil, coreda.ErrBlobNotFound) + + gen := genesis.Genesis{ + ChainID: "test-chain", + DAStartHeight: 100, + DAEpochForcedInclusion: 1, + } + + cfg := config.DefaultConfig() + cfg.DA.Namespace = "test-ns" + cfg.DA.DataNamespace = "test-data-ns" + cfg.DA.ForcedInclusionNamespace = "test-fi-ns" + + daClient := block.NewDAClient(mockDA, cfg, zerolog.Nop()) + fiRetriever := block.NewForcedInclusionRetriever(daClient, gen, zerolog.Nop()) + + seq := NewBasedSequencer(fiRetriever, mockDA, cfg, gen, zerolog.Nop()) + + req := coresequencer.GetNextBatchRequest{ + MaxBytes: 1000000, + LastBatchData: nil, + } + + resp, err := seq.GetNextBatch(context.Background(), req) + require.NoError(t, err) + require.NotNil(t, resp) + require.NotNil(t, resp.Batch) + assert.Equal(t, 0, len(resp.Batch.Transactions)) + + mockDA.AssertExpectations(t) +} + +func TestBasedSequencer_GetNextBatch_NotConfigured(t *testing.T) { + mockDA := new(MockDA) + gen := genesis.Genesis{ + ChainID: "test-chain", + DAStartHeight: 100, + DAEpochForcedInclusion: 1, + } + + // Create config without forced inclusion namespace + cfgNoFI := config.DefaultConfig() + cfgNoFI.DA.ForcedInclusionNamespace = "" + daClient := block.NewDAClient(mockDA, cfgNoFI, zerolog.Nop()) + fiRetriever := block.NewForcedInclusionRetriever(daClient, gen, zerolog.Nop()) + + seq := NewBasedSequencer(fiRetriever, mockDA, cfgNoFI, gen, zerolog.Nop()) + + req := coresequencer.GetNextBatchRequest{ + MaxBytes: 1000000, + LastBatchData: nil, + } + + resp, err := seq.GetNextBatch(context.Background(), req) + require.NoError(t, err) + require.NotNil(t, resp) + require.NotNil(t, resp.Batch) + assert.Equal(t, 0, len(resp.Batch.Transactions)) +} + +func TestBasedSequencer_GetNextBatch_HeightFromFuture(t *testing.T) { + mockDA := new(MockDA) + mockDA.On("GetIDs", mock.Anything, uint64(100), mock.Anything).Return(nil, coreda.ErrHeightFromFuture) + + gen := genesis.Genesis{ + ChainID: "test-chain", + DAStartHeight: 100, + DAEpochForcedInclusion: 1, + } + + cfg := config.DefaultConfig() + cfg.DA.Namespace = "test-ns" + cfg.DA.DataNamespace = "test-data-ns" + cfg.DA.ForcedInclusionNamespace = "test-fi-ns" + + daClient := block.NewDAClient(mockDA, cfg, zerolog.Nop()) + fiRetriever := block.NewForcedInclusionRetriever(daClient, gen, zerolog.Nop()) + + seq := NewBasedSequencer(fiRetriever, mockDA, cfg, gen, zerolog.Nop()) + + req := coresequencer.GetNextBatchRequest{ + MaxBytes: 1000000, + LastBatchData: nil, + } + + resp, err := seq.GetNextBatch(context.Background(), req) + require.NoError(t, err) + require.NotNil(t, resp) + require.NotNil(t, resp.Batch) + assert.Equal(t, 0, len(resp.Batch.Transactions)) + + // DA height should remain the same + assert.Equal(t, uint64(100), seq.GetDAHeight()) + + mockDA.AssertExpectations(t) +} + +func TestBasedSequencer_GetNextBatch_WithMaxBytes(t *testing.T) { + testBlobs := [][]byte{ + make([]byte, 50), // 50 bytes + make([]byte, 60), // 60 bytes + make([]byte, 100), // 100 bytes + } + + mockDA := new(MockDA) + // First call returns forced txs + mockDA.On("GetIDs", mock.Anything, uint64(100), mock.Anything).Return(&coreda.GetIDsResult{ + IDs: []coreda.ID{[]byte("id1"), []byte("id2"), []byte("id3")}, + Timestamp: time.Now(), + }, nil).Once() + mockDA.On("Get", mock.Anything, mock.Anything, mock.Anything).Return(testBlobs, nil).Once() + + // Subsequent calls should return no new forced txs + mockDA.On("GetIDs", mock.Anything, uint64(100), mock.Anything).Return(nil, coreda.ErrBlobNotFound) + + gen := genesis.Genesis{ + ChainID: "test-chain", + DAStartHeight: 100, + DAEpochForcedInclusion: 1, + } + + cfg := config.DefaultConfig() + cfg.DA.Namespace = "test-ns" + cfg.DA.DataNamespace = "test-data-ns" + cfg.DA.ForcedInclusionNamespace = "test-fi-ns" + + daClient := block.NewDAClient(mockDA, cfg, zerolog.Nop()) + fiRetriever := block.NewForcedInclusionRetriever(daClient, gen, zerolog.Nop()) + + seq := NewBasedSequencer(fiRetriever, mockDA, cfg, gen, zerolog.Nop()) + + // First call with max 100 bytes - should get first 2 txs (50 + 60 = 110, but logic allows if batch has content) + req := coresequencer.GetNextBatchRequest{ + MaxBytes: 100, + LastBatchData: nil, + } + + resp, err := seq.GetNextBatch(context.Background(), req) + require.NoError(t, err) + require.NotNil(t, resp) + require.NotNil(t, resp.Batch) + // Should get first tx (50 bytes), second tx would exceed limit (50+60=110 > 100) + assert.Equal(t, 1, len(resp.Batch.Transactions)) + assert.Equal(t, 2, len(seq.txQueue)) // 2 remaining in queue + + // Second call should get next tx from queue + resp2, err := seq.GetNextBatch(context.Background(), req) + require.NoError(t, err) + require.NotNil(t, resp2) + require.NotNil(t, resp2.Batch) + assert.Equal(t, 1, len(resp2.Batch.Transactions)) + assert.Equal(t, 1, len(seq.txQueue)) // 1 remaining in queue + + // Third call with larger maxBytes to get the 100-byte tx + req3 := coresequencer.GetNextBatchRequest{ + MaxBytes: 200, + LastBatchData: nil, + } + resp3, err := seq.GetNextBatch(context.Background(), req3) + require.NoError(t, err) + require.NotNil(t, resp3) + require.NotNil(t, resp3.Batch) + assert.Equal(t, 1, len(resp3.Batch.Transactions)) + assert.Equal(t, 0, len(seq.txQueue)) // Queue should be empty + + mockDA.AssertExpectations(t) +} + +func TestBasedSequencer_GetNextBatch_FromQueue(t *testing.T) { + mockDA := new(MockDA) + mockDA.On("GetIDs", mock.Anything, mock.Anything, mock.Anything).Return(nil, coreda.ErrBlobNotFound) + + gen := genesis.Genesis{ + ChainID: "test-chain", + DAStartHeight: 100, + DAEpochForcedInclusion: 1, + } + + cfg := config.DefaultConfig() + cfg.DA.Namespace = "test-ns" + cfg.DA.DataNamespace = "test-data-ns" + cfg.DA.ForcedInclusionNamespace = "test-fi-ns" + + daClient := block.NewDAClient(mockDA, cfg, zerolog.Nop()) + fiRetriever := block.NewForcedInclusionRetriever(daClient, gen, zerolog.Nop()) + + seq := NewBasedSequencer(fiRetriever, mockDA, cfg, gen, zerolog.Nop()) + + // Pre-populate the queue + seq.txQueue = [][]byte{[]byte("queued_tx1"), []byte("queued_tx2")} + + req := coresequencer.GetNextBatchRequest{ + MaxBytes: 1000000, + LastBatchData: nil, + } + + resp, err := seq.GetNextBatch(context.Background(), req) + require.NoError(t, err) + require.NotNil(t, resp) + require.NotNil(t, resp.Batch) + assert.Equal(t, 2, len(resp.Batch.Transactions)) + assert.Equal(t, []byte("queued_tx1"), resp.Batch.Transactions[0]) + assert.Equal(t, []byte("queued_tx2"), resp.Batch.Transactions[1]) + + // Queue should be empty now + assert.Equal(t, 0, len(seq.txQueue)) +} + +func TestBasedSequencer_GetNextBatch_AlwaysCheckPendingForcedInclusion(t *testing.T) { + mockDA := new(MockDA) + + // First call: return a forced tx that will be added to queue + forcedTx := make([]byte, 150) + mockDA.On("GetIDs", mock.Anything, uint64(100), mock.Anything).Return(&coreda.GetIDsResult{ + IDs: []coreda.ID{[]byte("id1")}, + Timestamp: time.Now(), + }, nil).Once() + mockDA.On("Get", mock.Anything, mock.Anything, mock.Anything).Return([][]byte{forcedTx}, nil).Once() + + // Second call: no new forced txs + mockDA.On("GetIDs", mock.Anything, uint64(100), mock.Anything).Return(nil, coreda.ErrBlobNotFound).Once() + + gen := genesis.Genesis{ + ChainID: "test-chain", + DAStartHeight: 100, + DAEpochForcedInclusion: 1, + } + + cfg := config.DefaultConfig() + cfg.DA.Namespace = "test-ns" + cfg.DA.DataNamespace = "test-data-ns" + cfg.DA.ForcedInclusionNamespace = "test-fi-ns" + + daClient := block.NewDAClient(mockDA, cfg, zerolog.Nop()) + fiRetriever := block.NewForcedInclusionRetriever(daClient, gen, zerolog.Nop()) + + seq := NewBasedSequencer(fiRetriever, mockDA, cfg, gen, zerolog.Nop()) + + // First call with maxBytes = 100 + // Forced tx (150 bytes) is added to queue, but batch will be empty since it exceeds maxBytes + req1 := coresequencer.GetNextBatchRequest{ + MaxBytes: 100, + LastBatchData: nil, + } + + resp1, err := seq.GetNextBatch(context.Background(), req1) + require.NoError(t, err) + require.NotNil(t, resp1) + require.NotNil(t, resp1.Batch) + assert.Equal(t, 0, len(resp1.Batch.Transactions), "Should have no txs as forced tx exceeds maxBytes") + + // Verify forced tx is in queue + assert.Equal(t, 1, len(seq.txQueue), "Forced tx should be in queue") + + // Second call with larger maxBytes = 200 + // Should process tx from queue + req2 := coresequencer.GetNextBatchRequest{ + MaxBytes: 200, + LastBatchData: nil, + } + + resp2, err := seq.GetNextBatch(context.Background(), req2) + require.NoError(t, err) + require.NotNil(t, resp2) + require.NotNil(t, resp2.Batch) + assert.Equal(t, 1, len(resp2.Batch.Transactions), "Should include tx from queue") + assert.Equal(t, 150, len(resp2.Batch.Transactions[0])) + + // Queue should now be empty + assert.Equal(t, 0, len(seq.txQueue), "Queue should be empty") + + mockDA.AssertExpectations(t) +} + +func TestBasedSequencer_GetNextBatch_ForcedInclusionExceedsMaxBytes(t *testing.T) { + mockDA := new(MockDA) + + // Return forced txs where combined they exceed maxBytes + forcedTx1 := make([]byte, 100) + forcedTx2 := make([]byte, 80) + mockDA.On("GetIDs", mock.Anything, uint64(100), mock.Anything).Return(&coreda.GetIDsResult{ + IDs: []coreda.ID{[]byte("id1"), []byte("id2")}, + Timestamp: time.Now(), + }, nil).Once() + mockDA.On("Get", mock.Anything, mock.Anything, mock.Anything).Return([][]byte{forcedTx1, forcedTx2}, nil).Once() + + // Second call + mockDA.On("GetIDs", mock.Anything, uint64(100), mock.Anything).Return(nil, coreda.ErrBlobNotFound).Once() + + gen := genesis.Genesis{ + ChainID: "test-chain", + DAStartHeight: 100, + DAEpochForcedInclusion: 1, + } + + cfg := config.DefaultConfig() + cfg.DA.Namespace = "test-ns" + cfg.DA.DataNamespace = "test-data-ns" + cfg.DA.ForcedInclusionNamespace = "test-fi-ns" + + daClient := block.NewDAClient(mockDA, cfg, zerolog.Nop()) + fiRetriever := block.NewForcedInclusionRetriever(daClient, gen, zerolog.Nop()) + + seq := NewBasedSequencer(fiRetriever, mockDA, cfg, gen, zerolog.Nop()) + + // First call with maxBytes = 120 + // Should get only first forced tx (100 bytes), second stays in queue + req1 := coresequencer.GetNextBatchRequest{ + MaxBytes: 120, + LastBatchData: nil, + } + + resp1, err := seq.GetNextBatch(context.Background(), req1) + require.NoError(t, err) + require.NotNil(t, resp1) + require.NotNil(t, resp1.Batch) + assert.Equal(t, 1, len(resp1.Batch.Transactions), "Should only include first forced tx") + assert.Equal(t, 100, len(resp1.Batch.Transactions[0])) + + // Verify second tx is still in queue + assert.Equal(t, 1, len(seq.txQueue), "Second tx should be in queue") + + // Second call - should get the second tx from queue + req2 := coresequencer.GetNextBatchRequest{ + MaxBytes: 120, + LastBatchData: nil, + } + + resp2, err := seq.GetNextBatch(context.Background(), req2) + require.NoError(t, err) + require.NotNil(t, resp2) + require.NotNil(t, resp2.Batch) + assert.Equal(t, 1, len(resp2.Batch.Transactions), "Should include second tx from queue") + assert.Equal(t, 80, len(resp2.Batch.Transactions[0])) + + // Queue should now be empty + assert.Equal(t, 0, len(seq.txQueue), "Queue should be empty") + + mockDA.AssertExpectations(t) +} + +func TestBasedSequencer_VerifyBatch(t *testing.T) { + mockDA := new(MockDA) + gen := genesis.Genesis{ + ChainID: "test-chain", + DAEpochForcedInclusion: 1, + } + + cfg := config.DefaultConfig() + cfg.DA.Namespace = "test-ns" + cfg.DA.DataNamespace = "test-data-ns" + cfg.DA.ForcedInclusionNamespace = "test-fi-ns" + + daClient := block.NewDAClient(mockDA, cfg, zerolog.Nop()) + fiRetriever := block.NewForcedInclusionRetriever(daClient, gen, zerolog.Nop()) + + seq := NewBasedSequencer(fiRetriever, mockDA, cfg, gen, zerolog.Nop()) + + req := coresequencer.VerifyBatchRequest{ + Id: []byte("test-chain"), + BatchData: [][]byte{[]byte("tx1")}, + } + + resp, err := seq.VerifyBatch(context.Background(), req) + require.NoError(t, err) + assert.True(t, resp.Status) +} + +func TestBasedSequencer_SetDAHeight(t *testing.T) { + mockDA := new(MockDA) + gen := genesis.Genesis{ + ChainID: "test-chain", + DAStartHeight: 100, + DAEpochForcedInclusion: 1, + } + + cfg := config.DefaultConfig() + cfg.DA.Namespace = "test-ns" + cfg.DA.DataNamespace = "test-data-ns" + cfg.DA.ForcedInclusionNamespace = "test-fi-ns" + + daClient := block.NewDAClient(mockDA, cfg, zerolog.Nop()) + fiRetriever := block.NewForcedInclusionRetriever(daClient, gen, zerolog.Nop()) + + seq := NewBasedSequencer(fiRetriever, mockDA, cfg, gen, zerolog.Nop()) + + assert.Equal(t, uint64(100), seq.GetDAHeight()) + + seq.SetDAHeight(200) + assert.Equal(t, uint64(200), seq.GetDAHeight()) +} + +func TestBasedSequencer_GetNextBatch_ErrorHandling(t *testing.T) { + mockDA := new(MockDA) + mockDA.On("GetIDs", mock.Anything, uint64(100), mock.Anything).Return(nil, errors.New("DA connection error")) + + gen := genesis.Genesis{ + ChainID: "test-chain", + DAStartHeight: 100, + DAEpochForcedInclusion: 1, + } + + cfg := config.DefaultConfig() + cfg.DA.Namespace = "test-ns" + cfg.DA.DataNamespace = "test-data-ns" + cfg.DA.ForcedInclusionNamespace = "test-fi-ns" + + daClient := block.NewDAClient(mockDA, cfg, zerolog.Nop()) + fiRetriever := block.NewForcedInclusionRetriever(daClient, gen, zerolog.Nop()) + + seq := NewBasedSequencer(fiRetriever, mockDA, cfg, gen, zerolog.Nop()) + + req := coresequencer.GetNextBatchRequest{ + MaxBytes: 1000000, + LastBatchData: nil, + } + + _, err := seq.GetNextBatch(context.Background(), req) + require.Error(t, err) + + mockDA.AssertExpectations(t) +} diff --git a/sequencers/common/size_validation.go b/sequencers/common/size_validation.go new file mode 100644 index 0000000000..1032f5299f --- /dev/null +++ b/sequencers/common/size_validation.go @@ -0,0 +1,27 @@ +package common + +// TODO(@julienrbrt): technically we may need to check for block gas as well + +const ( + // AbsoluteMaxBlobSize is the absolute maximum size for a single blob (DA layer limit). + // Blobs exceeding this size are invalid and should be rejected permanently. + AbsoluteMaxBlobSize = 1.5 * 1024 * 1024 // 1.5MB +) + +// ValidateBlobSize checks if a single blob exceeds the absolute maximum allowed size. +// This checks against the DA layer limit, not the per-batch limit. +// Returns true if the blob is within the absolute size limit, false otherwise. +func ValidateBlobSize(blob []byte) bool { + return uint64(len(blob)) <= AbsoluteMaxBlobSize +} + +// WouldExceedCumulativeSize checks if adding a blob would exceed the cumulative size limit for a batch. +// Returns true if adding the blob would exceed the limit, false otherwise. +func WouldExceedCumulativeSize(currentSize int, blobSize int, maxBytes uint64) bool { + return uint64(currentSize)+uint64(blobSize) > maxBytes +} + +// GetBlobSize returns the size of a blob in bytes. +func GetBlobSize(blob []byte) int { + return len(blob) +} diff --git a/sequencers/common/size_validation_test.go b/sequencers/common/size_validation_test.go new file mode 100644 index 0000000000..103c66d8be --- /dev/null +++ b/sequencers/common/size_validation_test.go @@ -0,0 +1,141 @@ +package common + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestValidateBlobSize(t *testing.T) { + tests := []struct { + name string + blobSize int + want bool + }{ + { + name: "empty blob", + blobSize: 0, + want: true, + }, + { + name: "small blob", + blobSize: 100, + want: true, + }, + { + name: "exactly at limit", + blobSize: int(AbsoluteMaxBlobSize), + want: true, + }, + { + name: "one byte over limit", + blobSize: int(AbsoluteMaxBlobSize) + 1, + want: false, + }, + { + name: "far exceeds limit", + blobSize: int(AbsoluteMaxBlobSize) * 2, + want: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + blob := make([]byte, tt.blobSize) + got := ValidateBlobSize(blob) + assert.Equal(t, tt.want, got) + }) + } +} + +func TestWouldExceedCumulativeSize(t *testing.T) { + tests := []struct { + name string + currentSize int + blobSize int + maxBytes uint64 + want bool + }{ + { + name: "empty batch, small blob", + currentSize: 0, + blobSize: 50, + maxBytes: 100, + want: false, + }, + { + name: "would fit exactly", + currentSize: 50, + blobSize: 50, + maxBytes: 100, + want: false, + }, + { + name: "would exceed by one byte", + currentSize: 50, + blobSize: 51, + maxBytes: 100, + want: true, + }, + { + name: "far exceeds", + currentSize: 80, + blobSize: 100, + maxBytes: 100, + want: true, + }, + { + name: "zero max bytes", + currentSize: 0, + blobSize: 1, + maxBytes: 0, + want: true, + }, + { + name: "current already at limit", + currentSize: 100, + blobSize: 1, + maxBytes: 100, + want: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := WouldExceedCumulativeSize(tt.currentSize, tt.blobSize, tt.maxBytes) + assert.Equal(t, tt.want, got) + }) + } +} + +func TestGetBlobSize(t *testing.T) { + tests := []struct { + name string + blobSize int + want int + }{ + { + name: "empty blob", + blobSize: 0, + want: 0, + }, + { + name: "small blob", + blobSize: 42, + want: 42, + }, + { + name: "large blob", + blobSize: 1024 * 1024, + want: 1024 * 1024, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + blob := make([]byte, tt.blobSize) + got := GetBlobSize(blob) + assert.Equal(t, tt.want, got) + }) + } +} diff --git a/sequencers/single/queue.go b/sequencers/single/queue.go index dd69c26a2c..d992535ead 100644 --- a/sequencers/single/queue.go +++ b/sequencers/single/queue.go @@ -83,6 +83,26 @@ func (bq *BatchQueue) AddBatch(ctx context.Context, batch coresequencer.Batch) e return nil } +// Prepend adds a batch to the front of the queue (before head position). +// This is used to return transactions that couldn't fit in the current batch. +// The batch is NOT persisted to the DB since these are transactions that were +// already in the queue or were just processed. +func (bq *BatchQueue) Prepend(ctx context.Context, batch coresequencer.Batch) error { + bq.mu.Lock() + defer bq.mu.Unlock() + + // If we have room before head, use it + if bq.head > 0 { + bq.head-- + bq.queue[bq.head] = batch + } else { + // Need to expand the queue at the front + bq.queue = append([]coresequencer.Batch{batch}, bq.queue...) + } + + return nil +} + // Next extracts a batch of transactions from the queue and marks it as processed in the WAL func (bq *BatchQueue) Next(ctx context.Context) (*coresequencer.Batch, error) { bq.mu.Lock() diff --git a/sequencers/single/queue_test.go b/sequencers/single/queue_test.go index 0ede59a90e..b7665ee67f 100644 --- a/sequencers/single/queue_test.go +++ b/sequencers/single/queue_test.go @@ -12,6 +12,7 @@ import ( ds "github.com/ipfs/go-datastore" "github.com/ipfs/go-datastore/query" dssync "github.com/ipfs/go-datastore/sync" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "google.golang.org/protobuf/proto" @@ -567,3 +568,156 @@ func TestBatchQueue_QueueLimit_Concurrency(t *testing.T) { t.Logf("Successfully added %d batches, rejected %d due to queue being full", addedCount, errorCount) } + +func TestBatchQueue_Prepend(t *testing.T) { + ctx := context.Background() + db := ds.NewMapDatastore() + + t.Run("prepend to empty queue", func(t *testing.T) { + queue := NewBatchQueue(db, "test-prepend-empty", 0) + err := queue.Load(ctx) + require.NoError(t, err) + + batch := coresequencer.Batch{ + Transactions: [][]byte{[]byte("tx1"), []byte("tx2")}, + } + + err = queue.Prepend(ctx, batch) + require.NoError(t, err) + + assert.Equal(t, 1, queue.Size()) + + // Next should return the prepended batch + nextBatch, err := queue.Next(ctx) + require.NoError(t, err) + assert.Equal(t, 2, len(nextBatch.Transactions)) + assert.Equal(t, []byte("tx1"), nextBatch.Transactions[0]) + }) + + t.Run("prepend to queue with items", func(t *testing.T) { + queue := NewBatchQueue(db, "test-prepend-with-items", 0) + err := queue.Load(ctx) + require.NoError(t, err) + + // Add some batches first + batch1 := coresequencer.Batch{Transactions: [][]byte{[]byte("tx1")}} + batch2 := coresequencer.Batch{Transactions: [][]byte{[]byte("tx2")}} + err = queue.AddBatch(ctx, batch1) + require.NoError(t, err) + err = queue.AddBatch(ctx, batch2) + require.NoError(t, err) + + assert.Equal(t, 2, queue.Size()) + + // Prepend a batch + prependedBatch := coresequencer.Batch{Transactions: [][]byte{[]byte("prepended")}} + err = queue.Prepend(ctx, prependedBatch) + require.NoError(t, err) + + assert.Equal(t, 3, queue.Size()) + + // Next should return the prepended batch first + nextBatch, err := queue.Next(ctx) + require.NoError(t, err) + assert.Equal(t, 1, len(nextBatch.Transactions)) + assert.Equal(t, []byte("prepended"), nextBatch.Transactions[0]) + + // Then the original batches + nextBatch, err = queue.Next(ctx) + require.NoError(t, err) + assert.Equal(t, []byte("tx1"), nextBatch.Transactions[0]) + + nextBatch, err = queue.Next(ctx) + require.NoError(t, err) + assert.Equal(t, []byte("tx2"), nextBatch.Transactions[0]) + }) + + t.Run("prepend after consuming some items", func(t *testing.T) { + queue := NewBatchQueue(db, "test-prepend-after-consume", 0) + err := queue.Load(ctx) + require.NoError(t, err) + + // Add batches + batch1 := coresequencer.Batch{Transactions: [][]byte{[]byte("tx1")}} + batch2 := coresequencer.Batch{Transactions: [][]byte{[]byte("tx2")}} + batch3 := coresequencer.Batch{Transactions: [][]byte{[]byte("tx3")}} + err = queue.AddBatch(ctx, batch1) + require.NoError(t, err) + err = queue.AddBatch(ctx, batch2) + require.NoError(t, err) + err = queue.AddBatch(ctx, batch3) + require.NoError(t, err) + + assert.Equal(t, 3, queue.Size()) + + // Consume first batch + nextBatch, err := queue.Next(ctx) + require.NoError(t, err) + assert.Equal(t, []byte("tx1"), nextBatch.Transactions[0]) + assert.Equal(t, 2, queue.Size()) + + // Prepend - should reuse the head position + prependedBatch := coresequencer.Batch{Transactions: [][]byte{[]byte("prepended")}} + err = queue.Prepend(ctx, prependedBatch) + require.NoError(t, err) + + assert.Equal(t, 3, queue.Size()) + + // Should get prepended, then tx2, then tx3 + nextBatch, err = queue.Next(ctx) + require.NoError(t, err) + assert.Equal(t, []byte("prepended"), nextBatch.Transactions[0]) + + nextBatch, err = queue.Next(ctx) + require.NoError(t, err) + assert.Equal(t, []byte("tx2"), nextBatch.Transactions[0]) + + nextBatch, err = queue.Next(ctx) + require.NoError(t, err) + assert.Equal(t, []byte("tx3"), nextBatch.Transactions[0]) + + assert.Equal(t, 0, queue.Size()) + }) + + t.Run("multiple prepends", func(t *testing.T) { + queue := NewBatchQueue(db, "test-multiple-prepends", 0) + err := queue.Load(ctx) + require.NoError(t, err) + + // Add a batch + batch1 := coresequencer.Batch{Transactions: [][]byte{[]byte("tx1")}} + err = queue.AddBatch(ctx, batch1) + require.NoError(t, err) + + // Prepend multiple batches + prepend1 := coresequencer.Batch{Transactions: [][]byte{[]byte("prepend1")}} + prepend2 := coresequencer.Batch{Transactions: [][]byte{[]byte("prepend2")}} + prepend3 := coresequencer.Batch{Transactions: [][]byte{[]byte("prepend3")}} + + err = queue.Prepend(ctx, prepend1) + require.NoError(t, err) + err = queue.Prepend(ctx, prepend2) + require.NoError(t, err) + err = queue.Prepend(ctx, prepend3) + require.NoError(t, err) + + assert.Equal(t, 4, queue.Size()) + + // Should get in reverse order of prepending (LIFO for prepended items) + nextBatch, err := queue.Next(ctx) + require.NoError(t, err) + assert.Equal(t, []byte("prepend3"), nextBatch.Transactions[0]) + + nextBatch, err = queue.Next(ctx) + require.NoError(t, err) + assert.Equal(t, []byte("prepend2"), nextBatch.Transactions[0]) + + nextBatch, err = queue.Next(ctx) + require.NoError(t, err) + assert.Equal(t, []byte("prepend1"), nextBatch.Transactions[0]) + + nextBatch, err = queue.Next(ctx) + require.NoError(t, err) + assert.Equal(t, []byte("tx1"), nextBatch.Transactions[0]) + }) +} diff --git a/sequencers/single/sequencer.go b/sequencers/single/sequencer.go index 1dbc145238..e97d7a157e 100644 --- a/sequencers/single/sequencer.go +++ b/sequencers/single/sequencer.go @@ -15,6 +15,7 @@ import ( coreda "github.com/evstack/ev-node/core/da" coresequencer "github.com/evstack/ev-node/core/sequencer" "github.com/evstack/ev-node/pkg/genesis" + seqcommon "github.com/evstack/ev-node/sequencers/common" ) var ( @@ -22,16 +23,15 @@ var ( ErrInvalidId = errors.New("invalid chain id") ) -// ForcedInclusionEvent represents forced inclusion transactions retrieved from DA -type ForcedInclusionEvent = struct { - Txs [][]byte - StartDaHeight uint64 - EndDaHeight uint64 +// ForcedInclusionRetriever defines the interface for retrieving forced inclusion transactions from DA +type ForcedInclusionRetriever interface { + RetrieveForcedIncludedTxs(ctx context.Context, daHeight uint64) (*block.ForcedInclusionEvent, error) } -// DARetriever defines the interface for retrieving forced inclusion transactions from DA -type DARetriever interface { - RetrieveForcedIncludedTxsFromDA(ctx context.Context, daHeight uint64) (*ForcedInclusionEvent, error) +// pendingForcedInclusionTx represents a forced inclusion transaction that couldn't fit in the current epoch +type pendingForcedInclusionTx struct { + Data []byte + OriginalHeight uint64 } var _ coresequencer.Sequencer = (*Sequencer)(nil) @@ -52,9 +52,10 @@ type Sequencer struct { metrics *Metrics // Forced inclusion support - daRetriever DARetriever - genesis genesis.Genesis - daHeight atomic.Uint64 + fiRetriever ForcedInclusionRetriever + genesis genesis.Genesis + daHeight atomic.Uint64 + pendingForcedInclusionTxs []pendingForcedInclusionTx } // NewSequencer creates a new Single Sequencer @@ -68,19 +69,20 @@ func NewSequencer( metrics *Metrics, proposer bool, maxQueueSize int, - daRetriever DARetriever, + fiRetriever ForcedInclusionRetriever, gen genesis.Genesis, ) (*Sequencer, error) { s := &Sequencer{ - logger: logger, - da: da, - batchTime: batchTime, - Id: id, - queue: NewBatchQueue(db, "batches", maxQueueSize), - metrics: metrics, - proposer: proposer, - daRetriever: daRetriever, - genesis: gen, + logger: logger, + da: da, + batchTime: batchTime, + Id: id, + queue: NewBatchQueue(db, "batches", maxQueueSize), + metrics: metrics, + proposer: proposer, + fiRetriever: fiRetriever, + genesis: gen, + pendingForcedInclusionTxs: make([]pendingForcedInclusionTx, 0), } s.SetDAHeight(gen.DAStartHeight) // will be overridden by the executor @@ -128,51 +130,46 @@ func (c *Sequencer) GetNextBatch(ctx context.Context, req coresequencer.GetNextB return nil, ErrInvalidId } - // Retrieve forced inclusion transactions if DARetriever is configured - var forcedTxs [][]byte currentDAHeight := c.daHeight.Load() - forcedEvent, err := c.daRetriever.RetrieveForcedIncludedTxsFromDA(ctx, currentDAHeight) + forcedEvent, err := c.fiRetriever.RetrieveForcedIncludedTxs(ctx, currentDAHeight) if err != nil { - // If we get a height from future error, keep the current DA height and return batch - // We'll retry the same height on the next call until DA produces that block + // Continue without forced txs. Add logging for clarity. + if errors.Is(err, coreda.ErrHeightFromFuture) { c.logger.Debug(). Uint64("da_height", currentDAHeight). Msg("DA height from future, waiting for DA to produce block") - - batch, err := c.queue.Next(ctx) - if err != nil { - return nil, err - } - - return &coresequencer.GetNextBatchResponse{ - Batch: batch, - Timestamp: time.Now(), - BatchData: req.LastBatchData, - }, nil - } - - // If forced inclusion is not configured, continue without forced txs - if !errors.Is(err, block.ErrForceInclusionNotConfigured) { + } else if !errors.Is(err, block.ErrForceInclusionNotConfigured) { c.logger.Error().Err(err).Uint64("da_height", currentDAHeight).Msg("failed to retrieve forced inclusion transactions") - // Continue without forced txs on other errors } - } else { - forcedTxs = forcedEvent.Txs - - // Update DA height based on the retrieved event - if forcedEvent.EndDaHeight > currentDAHeight { - c.SetDAHeight(forcedEvent.EndDaHeight) - } else if forcedEvent.StartDaHeight > currentDAHeight { - c.SetDAHeight(forcedEvent.StartDaHeight) + + // Still create an empty forced inclusion event + forcedEvent = &block.ForcedInclusionEvent{ + Txs: [][]byte{}, + StartDaHeight: currentDAHeight, + EndDaHeight: currentDAHeight, } + } - c.logger.Info(). - Int("tx_count", len(forcedEvent.Txs)). - Uint64("da_height_start", forcedEvent.StartDaHeight). - Uint64("da_height_end", forcedEvent.EndDaHeight). - Msg("retrieved forced inclusion transactions from DA") + // Always try to process forced inclusion transactions (including pending from previous epochs) + forcedTxs := c.processForcedInclusionTxs(forcedEvent, req.MaxBytes) + if forcedEvent.EndDaHeight > currentDAHeight { + c.SetDAHeight(forcedEvent.EndDaHeight) + } else if forcedEvent.StartDaHeight > currentDAHeight { + c.SetDAHeight(forcedEvent.StartDaHeight) + } + + c.logger.Debug(). + Int("tx_count", len(forcedTxs)). + Uint64("da_height_start", forcedEvent.StartDaHeight). + Uint64("da_height_end", forcedEvent.EndDaHeight). + Msg("retrieved forced inclusion transactions from DA") + + // Calculate size used by forced inclusion transactions + forcedTxsSize := 0 + for _, tx := range forcedTxs { + forcedTxsSize += len(tx) } batch, err := c.queue.Next(ctx) @@ -181,12 +178,43 @@ func (c *Sequencer) GetNextBatch(ctx context.Context, req coresequencer.GetNextB } // Prepend forced inclusion transactions to the batch + // and ensure total size doesn't exceed maxBytes if len(forcedTxs) > 0 { - batch.Transactions = append(forcedTxs, batch.Transactions...) + // Trim batch transactions to fit within maxBytes + remainingBytes := int(req.MaxBytes) - forcedTxsSize + trimmedBatchTxs := make([][]byte, 0, len(batch.Transactions)) + currentBatchSize := 0 + + for i, tx := range batch.Transactions { + txSize := len(tx) + if currentBatchSize+txSize > remainingBytes { + // Would exceed limit, return remaining txs to the front of the queue + excludedBatch := coresequencer.Batch{Transactions: batch.Transactions[i:]} + if err := c.queue.Prepend(ctx, excludedBatch); err != nil { + c.logger.Error().Err(err). + Int("excluded_count", len(batch.Transactions)-i). + Msg("failed to prepend excluded transactions back to queue") + } else { + c.logger.Debug(). + Int("excluded_count", len(batch.Transactions)-i). + Msg("returned excluded batch transactions to front of queue") + } + break + } + trimmedBatchTxs = append(trimmedBatchTxs, tx) + currentBatchSize += txSize + } + + batch.Transactions = append(forcedTxs, trimmedBatchTxs...) + c.logger.Debug(). Int("forced_tx_count", len(forcedTxs)). + Int("forced_txs_size", forcedTxsSize). + Int("batch_tx_count", len(trimmedBatchTxs)). + Int("batch_size", currentBatchSize). Int("total_tx_count", len(batch.Transactions)). - Msg("prepended forced inclusion transactions to batch") + Int("total_size", forcedTxsSize+currentBatchSize). + Msg("combined forced inclusion and batch transactions") } return &coresequencer.GetNextBatchResponse{ @@ -251,3 +279,94 @@ func (c *Sequencer) SetDAHeight(height uint64) { func (c *Sequencer) GetDAHeight() uint64 { return c.daHeight.Load() } + +// processForcedInclusionTxs processes forced inclusion transactions with size validation and pending queue management +func (c *Sequencer) processForcedInclusionTxs(event *block.ForcedInclusionEvent, maxBytes uint64) [][]byte { + currentSize := 0 + var newPendingTxs []pendingForcedInclusionTx + var validatedTxs [][]byte + + // First, process any pending transactions from previous epochs + for _, pendingTx := range c.pendingForcedInclusionTxs { + txSize := seqcommon.GetBlobSize(pendingTx.Data) + + if !seqcommon.ValidateBlobSize(pendingTx.Data) { + c.logger.Warn(). + Uint64("original_height", pendingTx.OriginalHeight). + Int("blob_size", txSize). + Msg("pending forced inclusion blob exceeds absolute maximum size - skipping") + continue + } + + if seqcommon.WouldExceedCumulativeSize(currentSize, txSize, maxBytes) { + c.logger.Debug(). + Uint64("original_height", pendingTx.OriginalHeight). + Int("current_size", currentSize). + Int("blob_size", txSize). + Msg("pending blob would exceed max size for this epoch - deferring again") + newPendingTxs = append(newPendingTxs, pendingTx) + continue + } + + validatedTxs = append(validatedTxs, pendingTx.Data) + currentSize += txSize + + c.logger.Debug(). + Uint64("original_height", pendingTx.OriginalHeight). + Int("blob_size", txSize). + Int("current_size", currentSize). + Msg("processed pending forced inclusion transaction") + } + + // Now process new transactions from this epoch + for _, tx := range event.Txs { + txSize := seqcommon.GetBlobSize(tx) + + if !seqcommon.ValidateBlobSize(tx) { + c.logger.Warn(). + Uint64("da_height", event.StartDaHeight). + Int("blob_size", txSize). + Msg("forced inclusion blob exceeds absolute maximum size - skipping") + continue + } + + if seqcommon.WouldExceedCumulativeSize(currentSize, txSize, maxBytes) { + c.logger.Debug(). + Uint64("da_height", event.StartDaHeight). + Int("current_size", currentSize). + Int("blob_size", txSize). + Msg("blob would exceed max size for this epoch - deferring to pending queue") + + // Store for next call + newPendingTxs = append(newPendingTxs, pendingForcedInclusionTx{ + Data: tx, + OriginalHeight: event.StartDaHeight, + }) + continue + } + + validatedTxs = append(validatedTxs, tx) + currentSize += txSize + + c.logger.Debug(). + Int("blob_size", txSize). + Int("current_size", currentSize). + Msg("processed forced inclusion transaction") + } + + // Update pending queue + c.pendingForcedInclusionTxs = newPendingTxs + if len(newPendingTxs) > 0 { + c.logger.Info(). + Int("new_pending_count", len(newPendingTxs)). + Msg("stored pending forced inclusion transactions for next epoch") + } + + c.logger.Info(). + Int("processed_tx_count", len(validatedTxs)). + Int("pending_tx_count", len(newPendingTxs)). + Int("current_size", currentSize). + Msg("completed processing forced inclusion transactions") + + return validatedTxs +} diff --git a/sequencers/single/sequencer_test.go b/sequencers/single/sequencer_test.go index a73c8c0ba3..e80347115d 100644 --- a/sequencers/single/sequencer_test.go +++ b/sequencers/single/sequencer_test.go @@ -20,17 +20,17 @@ import ( damocks "github.com/evstack/ev-node/test/mocks" ) -// MockDARetriever is a mock implementation of DARetriever for testing -type MockDARetriever struct { +// MockForcedInclusionRetriever is a mock implementation of DARetriever for testing +type MockForcedInclusionRetriever struct { mock.Mock } -func (m *MockDARetriever) RetrieveForcedIncludedTxsFromDA(ctx context.Context, daHeight uint64) (*ForcedInclusionEvent, error) { +func (m *MockForcedInclusionRetriever) RetrieveForcedIncludedTxs(ctx context.Context, daHeight uint64) (*block.ForcedInclusionEvent, error) { args := m.Called(ctx, daHeight) if args.Get(0) == nil { return nil, args.Error(1) } - return args.Get(0).(*ForcedInclusionEvent), args.Error(1) + return args.Get(0).(*block.ForcedInclusionEvent), args.Error(1) } func TestNewSequencer(t *testing.T) { @@ -41,8 +41,8 @@ func TestNewSequencer(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second) defer cancel() logger := zerolog.Nop() - mockRetriever := new(MockDARetriever) - mockRetriever.On("RetrieveForcedIncludedTxsFromDA", mock.Anything, mock.Anything). + mockRetriever := new(MockForcedInclusionRetriever) + mockRetriever.On("RetrieveForcedIncludedTxs", mock.Anything, mock.Anything). Return(nil, block.ErrForceInclusionNotConfigured).Maybe() seq, err := NewSequencer(ctx, logger, db, dummyDA, []byte("test1"), 10*time.Second, metrics, false, 1000, mockRetriever, genesis.Genesis{}) if err != nil { @@ -77,8 +77,8 @@ func TestSequencer_SubmitBatchTxs(t *testing.T) { defer cancel() Id := []byte("test1") logger := zerolog.Nop() - mockRetriever := new(MockDARetriever) - mockRetriever.On("RetrieveForcedIncludedTxsFromDA", mock.Anything, mock.Anything). + mockRetriever := new(MockForcedInclusionRetriever) + mockRetriever.On("RetrieveForcedIncludedTxs", mock.Anything, mock.Anything). Return(nil, block.ErrForceInclusionNotConfigured).Maybe() seq, err := NewSequencer(ctx, logger, db, dummyDA, Id, 10*time.Second, metrics, false, 1000, mockRetriever, genesis.Genesis{}) if err != nil { @@ -133,8 +133,8 @@ func TestSequencer_SubmitBatchTxs_EmptyBatch(t *testing.T) { defer cancel() Id := []byte("test1") logger := zerolog.Nop() - mockRetriever := new(MockDARetriever) - mockRetriever.On("RetrieveForcedIncludedTxsFromDA", mock.Anything, mock.Anything). + mockRetriever := new(MockForcedInclusionRetriever) + mockRetriever.On("RetrieveForcedIncludedTxs", mock.Anything, mock.Anything). Return(nil, block.ErrForceInclusionNotConfigured).Maybe() seq, err := NewSequencer(ctx, logger, db, dummyDA, Id, 10*time.Second, metrics, false, 1000, mockRetriever, genesis.Genesis{}) require.NoError(t, err, "Failed to create sequencer") @@ -176,14 +176,14 @@ func TestSequencer_GetNextBatch_NoLastBatch(t *testing.T) { db := ds.NewMapDatastore() logger := zerolog.Nop() - mockRetriever := new(MockDARetriever) - mockRetriever.On("RetrieveForcedIncludedTxsFromDA", mock.Anything, mock.Anything). + mockRetriever := new(MockForcedInclusionRetriever) + mockRetriever.On("RetrieveForcedIncludedTxs", mock.Anything, mock.Anything). Return(nil, block.ErrForceInclusionNotConfigured).Maybe() seq := &Sequencer{ logger: logger, queue: NewBatchQueue(db, "batches", 0), // 0 = unlimited for test Id: []byte("test"), - daRetriever: mockRetriever, + fiRetriever: mockRetriever, } defer func() { err := db.Close() @@ -216,14 +216,14 @@ func TestSequencer_GetNextBatch_Success(t *testing.T) { db := ds.NewMapDatastore() logger := zerolog.Nop() - mockRetriever := new(MockDARetriever) - mockRetriever.On("RetrieveForcedIncludedTxsFromDA", mock.Anything, mock.Anything). + mockRetriever := new(MockForcedInclusionRetriever) + mockRetriever.On("RetrieveForcedIncludedTxs", mock.Anything, mock.Anything). Return(nil, block.ErrForceInclusionNotConfigured).Maybe() seq := &Sequencer{ logger: logger, queue: NewBatchQueue(db, "batches", 0), // 0 = unlimited for test Id: []byte("test"), - daRetriever: mockRetriever, + fiRetriever: mockRetriever, } defer func() { err := db.Close() @@ -279,8 +279,8 @@ func TestSequencer_VerifyBatch(t *testing.T) { t.Run("Proposer Mode", func(t *testing.T) { mockDA := damocks.NewMockDA(t) logger := zerolog.Nop() - mockRetriever := new(MockDARetriever) - mockRetriever.On("RetrieveForcedIncludedTxsFromDA", mock.Anything, mock.Anything). + mockRetriever := new(MockForcedInclusionRetriever) + mockRetriever.On("RetrieveForcedIncludedTxs", mock.Anything, mock.Anything). Return(nil, block.ErrForceInclusionNotConfigured).Maybe() seq := &Sequencer{ @@ -289,7 +289,7 @@ func TestSequencer_VerifyBatch(t *testing.T) { proposer: true, da: mockDA, queue: NewBatchQueue(db, "proposer_queue", 0), // 0 = unlimited for test - daRetriever: mockRetriever, + fiRetriever: mockRetriever, } res, err := seq.VerifyBatch(context.Background(), coresequencer.VerifyBatchRequest{Id: seq.Id, BatchData: batchData}) @@ -305,8 +305,8 @@ func TestSequencer_VerifyBatch(t *testing.T) { t.Run("Valid Proofs", func(t *testing.T) { mockDA := damocks.NewMockDA(t) logger := zerolog.Nop() - mockRetriever := new(MockDARetriever) - mockRetriever.On("RetrieveForcedIncludedTxsFromDA", mock.Anything, mock.Anything). + mockRetriever := new(MockForcedInclusionRetriever) + mockRetriever.On("RetrieveForcedIncludedTxs", mock.Anything, mock.Anything). Return(nil, block.ErrForceInclusionNotConfigured).Maybe() seq := &Sequencer{ logger: logger, @@ -314,7 +314,7 @@ func TestSequencer_VerifyBatch(t *testing.T) { proposer: false, da: mockDA, queue: NewBatchQueue(db, "valid_proofs_queue", 0), - daRetriever: mockRetriever, + fiRetriever: mockRetriever, } mockDA.On("GetProofs", context.Background(), batchData, Id).Return(proofs, nil).Once() @@ -330,8 +330,8 @@ func TestSequencer_VerifyBatch(t *testing.T) { t.Run("Invalid Proof", func(t *testing.T) { mockDA := damocks.NewMockDA(t) logger := zerolog.Nop() - mockRetriever := new(MockDARetriever) - mockRetriever.On("RetrieveForcedIncludedTxsFromDA", mock.Anything, mock.Anything). + mockRetriever := new(MockForcedInclusionRetriever) + mockRetriever.On("RetrieveForcedIncludedTxs", mock.Anything, mock.Anything). Return(nil, block.ErrForceInclusionNotConfigured).Maybe() seq := &Sequencer{ logger: logger, @@ -339,7 +339,7 @@ func TestSequencer_VerifyBatch(t *testing.T) { proposer: false, da: mockDA, queue: NewBatchQueue(db, "invalid_proof_queue", 0), - daRetriever: mockRetriever, + fiRetriever: mockRetriever, } mockDA.On("GetProofs", context.Background(), batchData, Id).Return(proofs, nil).Once() @@ -355,8 +355,8 @@ func TestSequencer_VerifyBatch(t *testing.T) { t.Run("GetProofs Error", func(t *testing.T) { mockDA := damocks.NewMockDA(t) logger := zerolog.Nop() - mockRetriever := new(MockDARetriever) - mockRetriever.On("RetrieveForcedIncludedTxsFromDA", mock.Anything, mock.Anything). + mockRetriever := new(MockForcedInclusionRetriever) + mockRetriever.On("RetrieveForcedIncludedTxs", mock.Anything, mock.Anything). Return(nil, block.ErrForceInclusionNotConfigured).Maybe() seq := &Sequencer{ logger: logger, @@ -364,7 +364,7 @@ func TestSequencer_VerifyBatch(t *testing.T) { proposer: false, da: mockDA, queue: NewBatchQueue(db, "getproofs_err_queue", 0), - daRetriever: mockRetriever, + fiRetriever: mockRetriever, } expectedErr := errors.New("get proofs failed") @@ -381,8 +381,8 @@ func TestSequencer_VerifyBatch(t *testing.T) { t.Run("Validate Error", func(t *testing.T) { mockDA := damocks.NewMockDA(t) logger := zerolog.Nop() - mockRetriever := new(MockDARetriever) - mockRetriever.On("RetrieveForcedIncludedTxsFromDA", mock.Anything, mock.Anything). + mockRetriever := new(MockForcedInclusionRetriever) + mockRetriever.On("RetrieveForcedIncludedTxs", mock.Anything, mock.Anything). Return(nil, block.ErrForceInclusionNotConfigured).Maybe() seq := &Sequencer{ logger: logger, @@ -390,7 +390,7 @@ func TestSequencer_VerifyBatch(t *testing.T) { proposer: false, da: mockDA, queue: NewBatchQueue(db, "validate_err_queue", 0), - daRetriever: mockRetriever, + fiRetriever: mockRetriever, } expectedErr := errors.New("validate failed") @@ -407,8 +407,8 @@ func TestSequencer_VerifyBatch(t *testing.T) { t.Run("Invalid ID", func(t *testing.T) { mockDA := damocks.NewMockDA(t) logger := zerolog.Nop() - mockRetriever := new(MockDARetriever) - mockRetriever.On("RetrieveForcedIncludedTxsFromDA", mock.Anything, mock.Anything). + mockRetriever := new(MockForcedInclusionRetriever) + mockRetriever.On("RetrieveForcedIncludedTxs", mock.Anything, mock.Anything). Return(nil, block.ErrForceInclusionNotConfigured).Maybe() seq := &Sequencer{ @@ -417,7 +417,7 @@ func TestSequencer_VerifyBatch(t *testing.T) { proposer: false, da: mockDA, queue: NewBatchQueue(db, "invalid_queue", 0), - daRetriever: mockRetriever, + fiRetriever: mockRetriever, } invalidId := []byte("invalid") @@ -441,8 +441,8 @@ func TestSequencer_GetNextBatch_BeforeDASubmission(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) defer cancel() logger := zerolog.Nop() - mockRetriever := new(MockDARetriever) - mockRetriever.On("RetrieveForcedIncludedTxsFromDA", mock.Anything, mock.Anything). + mockRetriever := new(MockForcedInclusionRetriever) + mockRetriever.On("RetrieveForcedIncludedTxs", mock.Anything, mock.Anything). Return(nil, block.ErrForceInclusionNotConfigured).Maybe() seq, err := NewSequencer(ctx, logger, db, mockDA, []byte("test1"), 1*time.Second, metrics, false, 1000, mockRetriever, genesis.Genesis{}) if err != nil { @@ -490,6 +490,254 @@ func TestSequencer_GetNextBatch_BeforeDASubmission(t *testing.T) { mockDA.AssertExpectations(t) } +func TestSequencer_GetNextBatch_ForcedInclusionAndBatch_MaxBytes(t *testing.T) { + ctx := context.Background() + logger := zerolog.New(zerolog.NewConsoleWriter()) + + // Create in-memory datastore + db := ds.NewMapDatastore() + + // Create mock forced inclusion retriever with txs that are 50 bytes each + mockFI := &MockForcedInclusionRetriever{} + forcedTx1 := make([]byte, 50) + forcedTx2 := make([]byte, 60) + mockFI.On("RetrieveForcedIncludedTxs", mock.Anything, uint64(100)).Return(&block.ForcedInclusionEvent{ + Txs: [][]byte{forcedTx1, forcedTx2}, // Total 110 bytes + StartDaHeight: 100, + EndDaHeight: 100, + }, nil) + + gen := genesis.Genesis{ + ChainID: "test-chain", + DAStartHeight: 100, + } + + seq, err := NewSequencer( + ctx, + logger, + db, + nil, + []byte("test-chain"), + 1*time.Second, + nil, + true, + 100, + mockFI, + gen, + ) + require.NoError(t, err) + + // Submit batch txs that are 40 bytes each + batchTx1 := make([]byte, 40) + batchTx2 := make([]byte, 40) + batchTx3 := make([]byte, 40) + + submitReq := coresequencer.SubmitBatchTxsRequest{ + Id: []byte("test-chain"), + Batch: &coresequencer.Batch{ + Transactions: [][]byte{batchTx1, batchTx2, batchTx3}, // Total 120 bytes + }, + } + + _, err = seq.SubmitBatchTxs(ctx, submitReq) + require.NoError(t, err) + + // Request batch with maxBytes = 150 + // Forced inclusion: 110 bytes (50 + 60) + // Batch txs: 120 bytes (40 + 40 + 40) + // Combined would be 230 bytes, exceeds 150 + // Should return forced txs + only 1 batch tx (110 + 40 = 150) + getReq := coresequencer.GetNextBatchRequest{ + Id: []byte("test-chain"), + MaxBytes: 150, + LastBatchData: nil, + } + + resp, err := seq.GetNextBatch(ctx, getReq) + require.NoError(t, err) + require.NotNil(t, resp.Batch) + + // Should have forced txs (2) + partial batch txs + // Total size should not exceed 150 bytes + totalSize := 0 + for _, tx := range resp.Batch.Transactions { + totalSize += len(tx) + } + assert.LessOrEqual(t, totalSize, 150, "Total batch size should not exceed maxBytes") + + // First 2 txs should be forced inclusion txs + assert.GreaterOrEqual(t, len(resp.Batch.Transactions), 2, "Should have at least forced inclusion txs") + assert.Equal(t, forcedTx1, resp.Batch.Transactions[0]) + assert.Equal(t, forcedTx2, resp.Batch.Transactions[1]) + + mockFI.AssertExpectations(t) +} + +func TestSequencer_GetNextBatch_ForcedInclusion_ExceedsMaxBytes(t *testing.T) { + ctx := context.Background() + logger := zerolog.New(zerolog.NewConsoleWriter()) + + db := ds.NewMapDatastore() + + // Create forced inclusion txs where combined they exceed maxBytes + mockFI := &MockForcedInclusionRetriever{} + forcedTx1 := make([]byte, 100) + forcedTx2 := make([]byte, 80) // This would be deferred + mockFI.On("RetrieveForcedIncludedTxs", mock.Anything, uint64(100)).Return(&block.ForcedInclusionEvent{ + Txs: [][]byte{forcedTx1, forcedTx2}, + StartDaHeight: 100, + EndDaHeight: 100, + }, nil).Once() + + // Second call should process pending tx + mockFI.On("RetrieveForcedIncludedTxs", mock.Anything, uint64(100)).Return(&block.ForcedInclusionEvent{ + Txs: [][]byte{}, + StartDaHeight: 100, + EndDaHeight: 100, + }, nil).Once() + + gen := genesis.Genesis{ + ChainID: "test-chain", + DAStartHeight: 100, + } + + seq, err := NewSequencer( + ctx, + logger, + db, + nil, + []byte("test-chain"), + 1*time.Second, + nil, + true, + 100, + mockFI, + gen, + ) + require.NoError(t, err) + + // Request batch with maxBytes = 120 + getReq := coresequencer.GetNextBatchRequest{ + Id: []byte("test-chain"), + MaxBytes: 120, + LastBatchData: nil, + } + + // First call - should get only first forced tx (100 bytes) + resp, err := seq.GetNextBatch(ctx, getReq) + require.NoError(t, err) + require.NotNil(t, resp.Batch) + assert.Equal(t, 1, len(resp.Batch.Transactions), "Should only include first forced tx") + assert.Equal(t, 100, len(resp.Batch.Transactions[0])) + + // Verify pending tx is stored + assert.Equal(t, 1, len(seq.pendingForcedInclusionTxs), "Second tx should be pending") + + // Second call - should get the pending forced tx + resp2, err := seq.GetNextBatch(ctx, getReq) + require.NoError(t, err) + require.NotNil(t, resp2.Batch) + assert.Equal(t, 1, len(resp2.Batch.Transactions), "Should include pending forced tx") + assert.Equal(t, 80, len(resp2.Batch.Transactions[0])) + + // Pending queue should now be empty + assert.Equal(t, 0, len(seq.pendingForcedInclusionTxs), "Pending queue should be empty") + + mockFI.AssertExpectations(t) +} + +func TestSequencer_GetNextBatch_AlwaysCheckPendingForcedInclusion(t *testing.T) { + ctx := context.Background() + logger := zerolog.New(zerolog.NewConsoleWriter()) + + db := ds.NewMapDatastore() + + mockFI := &MockForcedInclusionRetriever{} + + // First call returns a large forced tx that gets deferred + largeForcedTx := make([]byte, 150) + mockFI.On("RetrieveForcedIncludedTxs", mock.Anything, uint64(100)).Return(&block.ForcedInclusionEvent{ + Txs: [][]byte{largeForcedTx}, + StartDaHeight: 100, + EndDaHeight: 100, + }, nil).Once() + + // Second call returns no new forced txs, but pending should still be processed + mockFI.On("RetrieveForcedIncludedTxs", mock.Anything, uint64(100)).Return(&block.ForcedInclusionEvent{ + Txs: [][]byte{}, + StartDaHeight: 100, + EndDaHeight: 100, + }, nil).Once() + + gen := genesis.Genesis{ + ChainID: "test-chain", + DAStartHeight: 100, + } + + seq, err := NewSequencer( + ctx, + logger, + db, + nil, + []byte("test-chain"), + 1*time.Second, + nil, + true, + 100, + mockFI, + gen, + ) + require.NoError(t, err) + + // Submit a batch tx + batchTx := make([]byte, 50) + submitReq := coresequencer.SubmitBatchTxsRequest{ + Id: []byte("test-chain"), + Batch: &coresequencer.Batch{ + Transactions: [][]byte{batchTx}, + }, + } + _, err = seq.SubmitBatchTxs(ctx, submitReq) + require.NoError(t, err) + + // First call with maxBytes = 100 + // Large forced tx (150 bytes) won't fit, gets deferred + // Batch tx (50 bytes) should be returned + getReq := coresequencer.GetNextBatchRequest{ + Id: []byte("test-chain"), + MaxBytes: 100, + LastBatchData: nil, + } + + resp, err := seq.GetNextBatch(ctx, getReq) + require.NoError(t, err) + require.NotNil(t, resp.Batch) + assert.Equal(t, 1, len(resp.Batch.Transactions), "Should have batch tx only") + assert.Equal(t, 50, len(resp.Batch.Transactions[0])) + + // Verify pending forced tx is stored + assert.Equal(t, 1, len(seq.pendingForcedInclusionTxs), "Large forced tx should be pending") + + // Second call with larger maxBytes = 200 + // Should process pending forced tx first + getReq2 := coresequencer.GetNextBatchRequest{ + Id: []byte("test-chain"), + MaxBytes: 200, + LastBatchData: nil, + } + + resp2, err := seq.GetNextBatch(ctx, getReq2) + require.NoError(t, err) + require.NotNil(t, resp2.Batch) + assert.Equal(t, 1, len(resp2.Batch.Transactions), "Should include pending forced tx") + assert.Equal(t, 150, len(resp2.Batch.Transactions[0])) + + // Pending queue should now be empty + assert.Equal(t, 0, len(seq.pendingForcedInclusionTxs), "Pending queue should be empty") + + mockFI.AssertExpectations(t) +} + // TestSequencer_RecordMetrics tests the RecordMetrics method to ensure it properly updates metrics. func TestSequencer_RecordMetrics(t *testing.T) { t.Run("With Metrics", func(t *testing.T) { @@ -582,8 +830,8 @@ func TestSequencer_QueueLimit_Integration(t *testing.T) { defer db.Close() mockDA := &damocks.MockDA{} - mockRetriever := new(MockDARetriever) - mockRetriever.On("RetrieveForcedIncludedTxsFromDA", mock.Anything, mock.Anything). + mockRetriever := new(MockForcedInclusionRetriever) + mockRetriever.On("RetrieveForcedIncludedTxs", mock.Anything, mock.Anything). Return(nil, block.ErrForceInclusionNotConfigured).Maybe() // Create a sequencer with a small queue limit for testing @@ -595,7 +843,7 @@ func TestSequencer_QueueLimit_Integration(t *testing.T) { Id: []byte("test"), queue: NewBatchQueue(db, "test_queue", 2), // Very small limit for testing proposer: true, - daRetriever: mockRetriever, + fiRetriever: mockRetriever, } ctx := context.Background() @@ -704,8 +952,8 @@ func TestSequencer_DAFailureAndQueueThrottling_Integration(t *testing.T) { // Create sequencer with small queue size to trigger throttling quickly queueSize := 3 // Small for testing logger := zerolog.Nop() - mockRetriever := new(MockDARetriever) - mockRetriever.On("RetrieveForcedIncludedTxsFromDA", mock.Anything, mock.Anything). + mockRetriever := new(MockForcedInclusionRetriever) + mockRetriever.On("RetrieveForcedIncludedTxs", mock.Anything, mock.Anything). Return(nil, block.ErrForceInclusionNotConfigured).Maybe() seq, err := NewSequencer( context.Background(), @@ -717,7 +965,7 @@ func TestSequencer_DAFailureAndQueueThrottling_Integration(t *testing.T) { nil, // metrics true, // proposer queueSize, - mockRetriever, // daRetriever + mockRetriever, // fiRetriever genesis.Genesis{}, // genesis ) require.NoError(t, err) diff --git a/test/mocks/da.go b/test/mocks/da.go index 37539d5480..bb3ad63391 100644 --- a/test/mocks/da.go +++ b/test/mocks/da.go @@ -112,126 +112,6 @@ func (_c *MockDA_Commit_Call) RunAndReturn(run func(ctx context.Context, blobs [ return _c } -// GasMultiplier provides a mock function for the type MockDA -func (_mock *MockDA) GasMultiplier(ctx context.Context) (float64, error) { - ret := _mock.Called(ctx) - - if len(ret) == 0 { - panic("no return value specified for GasMultiplier") - } - - var r0 float64 - var r1 error - if returnFunc, ok := ret.Get(0).(func(context.Context) (float64, error)); ok { - return returnFunc(ctx) - } - if returnFunc, ok := ret.Get(0).(func(context.Context) float64); ok { - r0 = returnFunc(ctx) - } else { - r0 = ret.Get(0).(float64) - } - if returnFunc, ok := ret.Get(1).(func(context.Context) error); ok { - r1 = returnFunc(ctx) - } else { - r1 = ret.Error(1) - } - return r0, r1 -} - -// MockDA_GasMultiplier_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GasMultiplier' -type MockDA_GasMultiplier_Call struct { - *mock.Call -} - -// GasMultiplier is a helper method to define mock.On call -// - ctx context.Context -func (_e *MockDA_Expecter) GasMultiplier(ctx interface{}) *MockDA_GasMultiplier_Call { - return &MockDA_GasMultiplier_Call{Call: _e.mock.On("GasMultiplier", ctx)} -} - -func (_c *MockDA_GasMultiplier_Call) Run(run func(ctx context.Context)) *MockDA_GasMultiplier_Call { - _c.Call.Run(func(args mock.Arguments) { - var arg0 context.Context - if args[0] != nil { - arg0 = args[0].(context.Context) - } - run( - arg0, - ) - }) - return _c -} - -func (_c *MockDA_GasMultiplier_Call) Return(f float64, err error) *MockDA_GasMultiplier_Call { - _c.Call.Return(f, err) - return _c -} - -func (_c *MockDA_GasMultiplier_Call) RunAndReturn(run func(ctx context.Context) (float64, error)) *MockDA_GasMultiplier_Call { - _c.Call.Return(run) - return _c -} - -// GasPrice provides a mock function for the type MockDA -func (_mock *MockDA) GasPrice(ctx context.Context) (float64, error) { - ret := _mock.Called(ctx) - - if len(ret) == 0 { - panic("no return value specified for GasPrice") - } - - var r0 float64 - var r1 error - if returnFunc, ok := ret.Get(0).(func(context.Context) (float64, error)); ok { - return returnFunc(ctx) - } - if returnFunc, ok := ret.Get(0).(func(context.Context) float64); ok { - r0 = returnFunc(ctx) - } else { - r0 = ret.Get(0).(float64) - } - if returnFunc, ok := ret.Get(1).(func(context.Context) error); ok { - r1 = returnFunc(ctx) - } else { - r1 = ret.Error(1) - } - return r0, r1 -} - -// MockDA_GasPrice_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GasPrice' -type MockDA_GasPrice_Call struct { - *mock.Call -} - -// GasPrice is a helper method to define mock.On call -// - ctx context.Context -func (_e *MockDA_Expecter) GasPrice(ctx interface{}) *MockDA_GasPrice_Call { - return &MockDA_GasPrice_Call{Call: _e.mock.On("GasPrice", ctx)} -} - -func (_c *MockDA_GasPrice_Call) Run(run func(ctx context.Context)) *MockDA_GasPrice_Call { - _c.Call.Run(func(args mock.Arguments) { - var arg0 context.Context - if args[0] != nil { - arg0 = args[0].(context.Context) - } - run( - arg0, - ) - }) - return _c -} - -func (_c *MockDA_GasPrice_Call) Return(f float64, err error) *MockDA_GasPrice_Call { - _c.Call.Return(f, err) - return _c -} - -func (_c *MockDA_GasPrice_Call) RunAndReturn(run func(ctx context.Context) (float64, error)) *MockDA_GasPrice_Call { - _c.Call.Return(run) - return _c -} - // Get provides a mock function for the type MockDA func (_mock *MockDA) Get(ctx context.Context, ids []da.ID, namespace []byte) ([]da.Blob, error) { ret := _mock.Called(ctx, ids, namespace) diff --git a/types/CLAUDE.md b/types/CLAUDE.md index 9cd5496e56..aafdd289a2 100644 --- a/types/CLAUDE.md +++ b/types/CLAUDE.md @@ -77,17 +77,16 @@ The types package defines the core data structures and types used throughout ev- - Signature verification - Identity validation -### DA Integration (`da.go`, `da_test.go`) +### DA Integration -- **Purpose**: Data Availability layer helpers -- **Key Functions**: - - `SubmitWithHelpers`: DA submission with error handling +- **Purpose**: Data Availability layer helpers moved to `block/internal/da` package +- **See**: `block/internal/da/client.go` for DA submission and retrieval logic - **Key Features**: - - Error mapping to status codes + - Error mapping to status codes (in DA Client) - Namespace support - Gas price configuration - Submission options handling -- **Status Codes**: +- **Status Codes** (defined in `core/da`): - `StatusContextCanceled`: Submission canceled - `StatusNotIncludedInBlock`: Transaction timeout - `StatusAlreadyInMempool`: Duplicate transaction diff --git a/types/da.go b/types/da.go deleted file mode 100644 index e0d58710d9..0000000000 --- a/types/da.go +++ /dev/null @@ -1,212 +0,0 @@ -package types - -import ( - "context" - "errors" - "fmt" - "strings" - "time" - - "github.com/rs/zerolog" - - coreda "github.com/evstack/ev-node/core/da" -) - -// SubmitWithHelpers performs blob submission using the underlying DA layer, -// handling error mapping to produce a ResultSubmit. -// It assumes blob size filtering is handled within the DA implementation's Submit. -// It mimics the logic previously found in da.DAClient.Submit. -func SubmitWithHelpers( - ctx context.Context, - da coreda.DA, // Use the core DA interface - logger zerolog.Logger, - data [][]byte, - gasPrice float64, - namespace []byte, - options []byte, -) coreda.ResultSubmit { // Return core ResultSubmit type - ids, err := da.SubmitWithOptions(ctx, data, gasPrice, namespace, options) - - // calculate blob size - var blobSize uint64 - for _, blob := range data { - blobSize += uint64(len(blob)) - } - - // Handle errors returned by Submit - if err != nil { - if errors.Is(err, context.Canceled) { - logger.Debug().Msg("DA submission canceled via helper due to context cancellation") - return coreda.ResultSubmit{ - BaseResult: coreda.BaseResult{ - Code: coreda.StatusContextCanceled, - Message: "submission canceled", - IDs: ids, - BlobSize: blobSize, - }, - } - } - status := coreda.StatusError - switch { - case errors.Is(err, coreda.ErrTxTimedOut): - status = coreda.StatusNotIncludedInBlock - case errors.Is(err, coreda.ErrTxAlreadyInMempool): - status = coreda.StatusAlreadyInMempool - case errors.Is(err, coreda.ErrTxIncorrectAccountSequence): - status = coreda.StatusIncorrectAccountSequence - case errors.Is(err, coreda.ErrBlobSizeOverLimit): - status = coreda.StatusTooBig - case errors.Is(err, coreda.ErrContextDeadline): - status = coreda.StatusContextDeadline - } - - // Use debug level for StatusTooBig as it gets handled later in submitToDA through recursive splitting - if status == coreda.StatusTooBig { - logger.Debug().Err(err).Uint64("status", uint64(status)).Msg("DA submission failed via helper") - } else { - logger.Error().Err(err).Uint64("status", uint64(status)).Msg("DA submission failed via helper") - } - return coreda.ResultSubmit{ - BaseResult: coreda.BaseResult{ - Code: status, - Message: "failed to submit blobs: " + err.Error(), - IDs: ids, - SubmittedCount: uint64(len(ids)), - Height: 0, - Timestamp: time.Now(), - BlobSize: blobSize, - }, - } - } - - if len(ids) == 0 && len(data) > 0 { - logger.Warn().Msg("DA submission via helper returned no IDs for non-empty input data") - return coreda.ResultSubmit{ - BaseResult: coreda.BaseResult{ - Code: coreda.StatusError, - Message: "failed to submit blobs: no IDs returned despite non-empty input", - }, - } - } - - // Get height from the first ID - var height uint64 - if len(ids) > 0 { - height, _, err = coreda.SplitID(ids[0]) - if err != nil { - logger.Error().Err(err).Msg("failed to split ID") - } - } - - logger.Debug().Int("num_ids", len(ids)).Msg("DA submission successful via helper") - return coreda.ResultSubmit{ - BaseResult: coreda.BaseResult{ - Code: coreda.StatusSuccess, - IDs: ids, - SubmittedCount: uint64(len(ids)), - Height: height, - BlobSize: blobSize, - Timestamp: time.Now(), - }, - } -} - -// RetrieveWithHelpers performs blob retrieval using the underlying DA layer, -// handling error mapping to produce a ResultRetrieve. -// It mimics the logic previously found in da.DAClient.Retrieve. -// requestTimeout defines the timeout for the each retrieval request. -func RetrieveWithHelpers( - ctx context.Context, - da coreda.DA, - logger zerolog.Logger, - dataLayerHeight uint64, - namespace []byte, - requestTimeout time.Duration, -) coreda.ResultRetrieve { - // 1. Get IDs - getIDsCtx, cancel := context.WithTimeout(ctx, requestTimeout) - defer cancel() - idsResult, err := da.GetIDs(getIDsCtx, dataLayerHeight, namespace) - if err != nil { - // Handle specific "not found" error - if strings.Contains(err.Error(), coreda.ErrBlobNotFound.Error()) { - logger.Debug().Uint64("height", dataLayerHeight).Msg("Retrieve helper: Blobs not found at height") - return coreda.ResultRetrieve{ - BaseResult: coreda.BaseResult{ - Code: coreda.StatusNotFound, - Message: coreda.ErrBlobNotFound.Error(), - Height: dataLayerHeight, - Timestamp: time.Now(), - }, - } - } - if strings.Contains(err.Error(), coreda.ErrHeightFromFuture.Error()) { - logger.Debug().Uint64("height", dataLayerHeight).Msg("Retrieve helper: Blobs not found at height") - return coreda.ResultRetrieve{ - BaseResult: coreda.BaseResult{ - Code: coreda.StatusHeightFromFuture, - Message: coreda.ErrHeightFromFuture.Error(), - Height: dataLayerHeight, - Timestamp: time.Now(), - }, - } - } - // Handle other errors during GetIDs - logger.Error().Uint64("height", dataLayerHeight).Err(err).Msg("Retrieve helper: Failed to get IDs") - return coreda.ResultRetrieve{ - BaseResult: coreda.BaseResult{ - Code: coreda.StatusError, - Message: fmt.Sprintf("failed to get IDs: %s", err.Error()), - Height: dataLayerHeight, - Timestamp: time.Now(), - }, - } - } - - // This check should technically be redundant if GetIDs correctly returns ErrBlobNotFound - if idsResult == nil || len(idsResult.IDs) == 0 { - logger.Debug().Uint64("height", dataLayerHeight).Msg("Retrieve helper: No IDs found at height") - return coreda.ResultRetrieve{ - BaseResult: coreda.BaseResult{ - Code: coreda.StatusNotFound, - Message: coreda.ErrBlobNotFound.Error(), - Height: dataLayerHeight, - Timestamp: time.Now(), - }, - } - } - // 2. Get Blobs using the retrieved IDs in batches - batchSize := 100 - blobs := make([][]byte, 0, len(idsResult.IDs)) - for i := 0; i < len(idsResult.IDs); i += batchSize { - end := min(i+batchSize, len(idsResult.IDs)) - - getBlobsCtx, cancel := context.WithTimeout(ctx, requestTimeout) - batchBlobs, err := da.Get(getBlobsCtx, idsResult.IDs[i:end], namespace) - cancel() - if err != nil { - // Handle errors during Get - logger.Error().Uint64("height", dataLayerHeight).Int("num_ids", len(idsResult.IDs)).Err(err).Msg("Retrieve helper: Failed to get blobs") - return coreda.ResultRetrieve{ - BaseResult: coreda.BaseResult{ - Code: coreda.StatusError, - Message: fmt.Sprintf("failed to get blobs for batch %d-%d: %s", i, end-1, err.Error()), - Height: dataLayerHeight, - Timestamp: time.Now(), - }, - } - } - blobs = append(blobs, batchBlobs...) - } - // Success - logger.Debug().Uint64("height", dataLayerHeight).Int("num_blobs", len(blobs)).Msg("Retrieve helper: Successfully retrieved blobs") - return coreda.ResultRetrieve{ - BaseResult: coreda.BaseResult{ - Code: coreda.StatusSuccess, - Height: dataLayerHeight, - IDs: idsResult.IDs, - Timestamp: idsResult.Timestamp, - }, - Data: blobs, - } -} diff --git a/types/da_test.go b/types/da_test.go deleted file mode 100644 index 4a111499dc..0000000000 --- a/types/da_test.go +++ /dev/null @@ -1,298 +0,0 @@ -package types_test - -import ( - "context" - "errors" - "testing" - "time" - - "github.com/rs/zerolog" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/mock" - - coreda "github.com/evstack/ev-node/core/da" - "github.com/evstack/ev-node/test/mocks" - "github.com/evstack/ev-node/types" -) - -func TestSubmitWithHelpers(t *testing.T) { - logger := zerolog.Nop() - - testCases := []struct { - name string - data [][]byte - gasPrice float64 - options []byte - submitErr error - submitIDs [][]byte - expectedCode coreda.StatusCode - expectedErrMsg string - expectedIDs [][]byte - expectedCount uint64 - }{ - { - name: "successful submission", - data: [][]byte{[]byte("blob1"), []byte("blob2")}, - gasPrice: 1.0, - options: []byte("opts"), - submitIDs: [][]byte{[]byte("id1"), []byte("id2")}, - expectedCode: coreda.StatusSuccess, - expectedIDs: [][]byte{[]byte("id1"), []byte("id2")}, - expectedCount: 2, - }, - { - name: "context canceled error", - data: [][]byte{[]byte("blob1")}, - gasPrice: 1.0, - options: []byte("opts"), - submitErr: context.Canceled, - expectedCode: coreda.StatusContextCanceled, - expectedErrMsg: "submission canceled", - }, - { - name: "tx timed out error", - data: [][]byte{[]byte("blob1")}, - gasPrice: 1.0, - options: []byte("opts"), - submitErr: coreda.ErrTxTimedOut, - expectedCode: coreda.StatusNotIncludedInBlock, - expectedErrMsg: "failed to submit blobs: " + coreda.ErrTxTimedOut.Error(), - }, - { - name: "tx already in mempool error", - data: [][]byte{[]byte("blob1")}, - gasPrice: 1.0, - options: []byte("opts"), - submitErr: coreda.ErrTxAlreadyInMempool, - expectedCode: coreda.StatusAlreadyInMempool, - expectedErrMsg: "failed to submit blobs: " + coreda.ErrTxAlreadyInMempool.Error(), - }, - { - name: "incorrect account sequence error", - data: [][]byte{[]byte("blob1")}, - gasPrice: 1.0, - options: []byte("opts"), - submitErr: coreda.ErrTxIncorrectAccountSequence, - expectedCode: coreda.StatusIncorrectAccountSequence, - expectedErrMsg: "failed to submit blobs: " + coreda.ErrTxIncorrectAccountSequence.Error(), - }, - { - name: "blob size over limit error", - data: [][]byte{[]byte("blob1")}, - gasPrice: 1.0, - options: []byte("opts"), - submitErr: coreda.ErrBlobSizeOverLimit, - expectedCode: coreda.StatusTooBig, - expectedErrMsg: "failed to submit blobs: " + coreda.ErrBlobSizeOverLimit.Error(), - }, - { - name: "context deadline error", - data: [][]byte{[]byte("blob1")}, - gasPrice: 1.0, - options: []byte("opts"), - submitErr: coreda.ErrContextDeadline, - expectedCode: coreda.StatusContextDeadline, - expectedErrMsg: "failed to submit blobs: " + coreda.ErrContextDeadline.Error(), - }, - { - name: "generic submission error", - data: [][]byte{[]byte("blob1")}, - gasPrice: 1.0, - options: []byte("opts"), - submitErr: errors.New("some generic error"), - expectedCode: coreda.StatusError, - expectedErrMsg: "failed to submit blobs: some generic error", - }, - { - name: "no IDs returned for non-empty data", - data: [][]byte{[]byte("blob1")}, - gasPrice: 1.0, - options: []byte("opts"), - submitIDs: [][]byte{}, - expectedCode: coreda.StatusError, - expectedErrMsg: "failed to submit blobs: no IDs returned despite non-empty input", - }, - } - - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - mockDA := mocks.NewMockDA(t) - encodedNamespace := coreda.NamespaceFromString("test-namespace") - - mockDA.On("SubmitWithOptions", mock.Anything, tc.data, tc.gasPrice, encodedNamespace.Bytes(), tc.options).Return(tc.submitIDs, tc.submitErr) - - result := types.SubmitWithHelpers(context.Background(), mockDA, logger, tc.data, tc.gasPrice, encodedNamespace.Bytes(), tc.options) - - assert.Equal(t, tc.expectedCode, result.Code) - if tc.expectedErrMsg != "" { - assert.Contains(t, result.Message, tc.expectedErrMsg) - } - if tc.expectedIDs != nil { - assert.Equal(t, tc.expectedIDs, result.IDs) - } - if tc.expectedCount != 0 { - assert.Equal(t, tc.expectedCount, result.SubmittedCount) - } - mockDA.AssertExpectations(t) - }) - } -} - -func TestRetrieveWithHelpers(t *testing.T) { - logger := zerolog.Nop() - dataLayerHeight := uint64(100) - mockIDs := [][]byte{[]byte("id1"), []byte("id2")} - mockBlobs := [][]byte{[]byte("blobA"), []byte("blobB")} - mockTimestamp := time.Now() - - testCases := []struct { - name string - getIDsResult *coreda.GetIDsResult - getIDsErr error - getBlobsErr error - expectedCode coreda.StatusCode - expectedErrMsg string - expectedIDs [][]byte - expectedData [][]byte - expectedHeight uint64 - }{ - { - name: "successful retrieval", - getIDsResult: &coreda.GetIDsResult{ - IDs: mockIDs, - Timestamp: mockTimestamp, - }, - expectedCode: coreda.StatusSuccess, - expectedIDs: mockIDs, - expectedData: mockBlobs, - expectedHeight: dataLayerHeight, - }, - { - name: "blob not found error during GetIDs", - getIDsErr: coreda.ErrBlobNotFound, - expectedCode: coreda.StatusNotFound, - expectedErrMsg: coreda.ErrBlobNotFound.Error(), - expectedHeight: dataLayerHeight, - }, - { - name: "height from future error during GetIDs", - getIDsErr: coreda.ErrHeightFromFuture, - expectedCode: coreda.StatusHeightFromFuture, - expectedErrMsg: coreda.ErrHeightFromFuture.Error(), - expectedHeight: dataLayerHeight, - }, - { - name: "generic error during GetIDs", - getIDsErr: errors.New("failed to connect to DA"), - expectedCode: coreda.StatusError, - expectedErrMsg: "failed to get IDs: failed to connect to DA", - expectedHeight: dataLayerHeight, - }, - { - name: "GetIDs returns nil result", - getIDsResult: nil, - expectedCode: coreda.StatusNotFound, - expectedErrMsg: coreda.ErrBlobNotFound.Error(), - expectedHeight: dataLayerHeight, - }, - { - name: "GetIDs returns empty IDs", - getIDsResult: &coreda.GetIDsResult{ - IDs: [][]byte{}, - Timestamp: mockTimestamp, - }, - expectedCode: coreda.StatusNotFound, - expectedErrMsg: coreda.ErrBlobNotFound.Error(), - expectedHeight: dataLayerHeight, - }, - { - name: "error during Get (blobs retrieval)", - getIDsResult: &coreda.GetIDsResult{ - IDs: mockIDs, - Timestamp: mockTimestamp, - }, - getBlobsErr: errors.New("network error during blob retrieval"), - expectedCode: coreda.StatusError, - expectedErrMsg: "failed to get blobs for batch 0-1: network error during blob retrieval", - expectedHeight: dataLayerHeight, - }, - } - - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - mockDA := mocks.NewMockDA(t) - encodedNamespace := coreda.NamespaceFromString("test-namespace") - - mockDA.On("GetIDs", mock.Anything, dataLayerHeight, mock.Anything).Return(tc.getIDsResult, tc.getIDsErr) - - if tc.getIDsErr == nil && tc.getIDsResult != nil && len(tc.getIDsResult.IDs) > 0 { - mockDA.On("Get", mock.Anything, tc.getIDsResult.IDs, mock.Anything).Return(mockBlobs, tc.getBlobsErr) - } - - result := types.RetrieveWithHelpers(context.Background(), mockDA, logger, dataLayerHeight, encodedNamespace.Bytes(), 5*time.Second) - - assert.Equal(t, tc.expectedCode, result.Code) - assert.Equal(t, tc.expectedHeight, result.Height) - if tc.expectedErrMsg != "" { - assert.Contains(t, result.Message, tc.expectedErrMsg) - } - if tc.expectedIDs != nil { - assert.Equal(t, tc.expectedIDs, result.IDs) - } - if tc.expectedData != nil { - assert.Equal(t, tc.expectedData, result.Data) - } - mockDA.AssertExpectations(t) - }) - } -} - -func TestRetrieveWithHelpers_Timeout(t *testing.T) { - logger := zerolog.Nop() - dataLayerHeight := uint64(100) - encodedNamespace := coreda.NamespaceFromString("test-namespace") - - t.Run("timeout during GetIDs", func(t *testing.T) { - mockDA := mocks.NewMockDA(t) - - // Mock GetIDs to block until context is cancelled - mockDA.On("GetIDs", mock.Anything, dataLayerHeight, mock.Anything).Run(func(args mock.Arguments) { - ctx := args.Get(0).(context.Context) - <-ctx.Done() // Wait for context cancellation - }).Return(nil, context.DeadlineExceeded) - - // Use a very short timeout to ensure it triggers - result := types.RetrieveWithHelpers(context.Background(), mockDA, logger, dataLayerHeight, encodedNamespace.Bytes(), 1*time.Millisecond) - - assert.Equal(t, coreda.StatusError, result.Code) - assert.Contains(t, result.Message, "failed to get IDs") - assert.Contains(t, result.Message, "context deadline exceeded") - mockDA.AssertExpectations(t) - }) - - t.Run("timeout during Get", func(t *testing.T) { - mockDA := mocks.NewMockDA(t) - mockIDs := [][]byte{[]byte("id1")} - mockTimestamp := time.Now() - - // Mock GetIDs to succeed - mockDA.On("GetIDs", mock.Anything, dataLayerHeight, mock.Anything).Return(&coreda.GetIDsResult{ - IDs: mockIDs, - Timestamp: mockTimestamp, - }, nil) - - // Mock Get to block until context is cancelled - mockDA.On("Get", mock.Anything, mockIDs, mock.Anything).Run(func(args mock.Arguments) { - ctx := args.Get(0).(context.Context) - <-ctx.Done() // Wait for context cancellation - }).Return(nil, context.DeadlineExceeded) - - // Use a very short timeout to ensure it triggers - result := types.RetrieveWithHelpers(context.Background(), mockDA, logger, dataLayerHeight, encodedNamespace.Bytes(), 1*time.Millisecond) - - assert.Equal(t, coreda.StatusError, result.Code) - assert.Contains(t, result.Message, "failed to get blobs for batch") - assert.Contains(t, result.Message, "context deadline exceeded") - mockDA.AssertExpectations(t) - }) -} From 4f6ea3fd8709896195854a09e0094a1ec1cb7836 Mon Sep 17 00:00:00 2001 From: Julien Robert Date: Mon, 17 Nov 2025 15:24:13 +0100 Subject: [PATCH 37/39] remove file --- sequencers/based/README.md | 95 -------------------------------------- 1 file changed, 95 deletions(-) delete mode 100644 sequencers/based/README.md diff --git a/sequencers/based/README.md b/sequencers/based/README.md deleted file mode 100644 index c93fd513f1..0000000000 --- a/sequencers/based/README.md +++ /dev/null @@ -1,95 +0,0 @@ -# Based Sequencer - -## Overview - -The Based Sequencer is a sequencer implementation that retrieves transactions exclusively from the Data Availability (DA) layer via the forced inclusion mechanism. Unlike traditional sequencers that accept transactions from mempools or external sources, the based sequencer only processes transactions that have been posted to the DA layer's forced inclusion namespace. - -## What is a Based Sequencer? - -A "based" sequencer (also known as "based rollup") is a rollup architecture where transaction ordering is derived entirely from the base layer (DA layer) rather than from a centralized sequencer. This provides several benefits: - -- **Censorship Resistance**: Users can submit transactions directly to DA, bypassing the sequencer -- **Decentralization**: No single entity controls transaction ordering -- **Liveness**: The rollup continues operating as long as the DA layer is available -- **Trustless**: Users don't need to trust the sequencer to include their transactions - -## Architecture - -``` -┌─────────────────────────────────────────────────────────────┐ -│ Based Sequencer │ -│ │ -│ ┌────────────────┐ ┌─────────────────┐ │ -│ │ Transaction │ │ DA Retriever │ │ -│ │ Queue │◄────────│ (Interface) │ │ -│ └────────────────┘ └─────────────────┘ │ -│ │ │ │ -│ │ │ │ -│ ▼ ▼ │ -│ ┌────────────────┐ ┌─────────────────┐ │ -│ │ GetNextBatch │ │ Fetch Forced │ │ -│ │ (Method) │────────►│ Inclusion Txs │ │ -│ └────────────────┘ └─────────────────┘ │ -│ │ │ -└──────────────────────────────────────┼───────────────────────┘ - │ - ▼ - ┌─────────────────┐ - │ DA Layer │ - │ (Forced Inc. │ - │ Namespace) │ - └─────────────────┘ -``` - -## Features - -- **DA-Only Transaction Source**: Fetches transactions exclusively from DA forced inclusion namespace -- **Batch Size Management**: Respects MaxBytes limits when creating batches -- **Transaction Queue**: Buffers transactions when they exceed batch size limits -- **DA Height Tracking**: Maintains synchronization with DA layer height -- **Concurrent-Safe**: Thread-safe operations with mutex protection -- **Automatic Height Management**: Handles "height from future" errors gracefully - -## Interface Compliance - -The Based Sequencer implements the `core/sequencer.Sequencer` interface: - -```go -type Sequencer interface { - SubmitBatchTxs(ctx context.Context, req SubmitBatchTxsRequest) (*SubmitBatchTxsResponse, error) - GetNextBatch(ctx context.Context, req GetNextBatchRequest) (*GetNextBatchResponse, error) - VerifyBatch(ctx context.Context, req VerifyBatchRequest) (*VerifyBatchResponse, error) -} -``` - -## Configuration - -The based sequencer uses the following configuration: - -From `config.Config`: - -- `DA.ForcedInclusionNamespace`: Namespace for forced inclusion transactions - -From `genesis.Genesis`: - -- `DAEpochForcedInclusion`: Number of DA blocks to scan per fetch (consensus parameter) - -If `ForcedInclusionNamespace` is not configured, the sequencer returns empty batches. - -## Performance Considerations - -- **Batching**: Transactions are batched to reduce DA queries -- **Queue**: In-memory queue prevents repeated DA fetches -- **Mutex Protection**: Thread-safe but may block on concurrent access -- **DA Epoch**: Set `DAEpochForcedInclusion` in genesis to balance freshness vs. efficiency - -## Comparison to Traditional Sequencer - -| Feature | Traditional Sequencer | Based Sequencer | -| --------------------- | --------------------- | --------------- | -| Transaction Source | Mempool, RPC | DA Layer Only | -| Censorship Resistance | Low | High | -| Centralization | High | Low | -| Latency | Low | Higher | -| MEV Opportunity | High | Low | -| Trust Requirements | High | Low | From c65c330ac05524886510822755cd24264a3efede Mon Sep 17 00:00:00 2001 From: Julien Robert Date: Mon, 17 Nov 2025 15:27:52 +0100 Subject: [PATCH 38/39] update adr with ai --- .../adr/adr-019-forced-inclusion-mechanism.md | 184 +++++++++++++++--- 1 file changed, 158 insertions(+), 26 deletions(-) diff --git a/docs/adr/adr-019-forced-inclusion-mechanism.md b/docs/adr/adr-019-forced-inclusion-mechanism.md index ddc30b16fe..51e1be63cf 100644 --- a/docs/adr/adr-019-forced-inclusion-mechanism.md +++ b/docs/adr/adr-019-forced-inclusion-mechanism.md @@ -175,26 +175,86 @@ The single sequencer is enhanced to fetch and include forced transactions: ```go type Sequencer struct { // ... existing fields ... - daRetriever DARetriever - genesis genesis.Genesis - mu sync.RWMutex - daHeight uint64 + fiRetriever ForcedInclusionRetriever + genesis genesis.Genesis + daHeight atomic.Uint64 + pendingForcedInclusionTxs []pendingForcedInclusionTx + queue *BatchQueue +} + +type pendingForcedInclusionTx struct { + Data []byte + OriginalHeight uint64 } func (s *Sequencer) GetNextBatch(ctx context.Context, req GetNextBatchRequest) (*GetNextBatchResponse, error) { // 1. Fetch forced inclusion transactions from DA - forcedEvent, err := s.daRetriever.RetrieveForcedIncludedTxsFromDA(ctx, s.daHeight) + forcedEvent, err := s.fiRetriever.RetrieveForcedIncludedTxs(ctx, s.daHeight.Load()) + + // 2. Process forced txs with size validation and pending queue + forcedTxs := s.processForcedInclusionTxs(forcedEvent, req.MaxBytes) - // 2. Get batch from mempool + // 3. Get batch from mempool queue batch, err := s.queue.Next(ctx) - // 3. Prepend forced transactions to batch - if len(forcedEvent.Txs) > 0 { - batch.Transactions = append(forcedEvent.Txs, batch.Transactions...) + // 4. Prepend forced txs and trim batch to fit MaxBytes + if len(forcedTxs) > 0 { + forcedTxsSize := calculateSize(forcedTxs) + remainingBytes := req.MaxBytes - forcedTxsSize + + // Trim batch transactions to fit + trimmedBatchTxs := trimToSize(batch.Transactions, remainingBytes) + + // Return excluded txs to front of queue + if len(trimmedBatchTxs) < len(batch.Transactions) { + excludedBatch := batch.Transactions[len(trimmedBatchTxs):] + s.queue.Prepend(ctx, Batch{Transactions: excludedBatch}) + } + + batch.Transactions = append(forcedTxs, trimmedBatchTxs...) } return &GetNextBatchResponse{Batch: batch} } + +// processForcedInclusionTxs validates and queues forced txs +func (s *Sequencer) processForcedInclusionTxs(event *ForcedInclusionEvent, maxBytes uint64) [][]byte { + var validatedTxs [][]byte + var newPendingTxs []pendingForcedInclusionTx + currentSize := 0 + + // Process pending txs from previous epochs first + for _, pendingTx := range s.pendingForcedInclusionTxs { + if !ValidateBlobSize(pendingTx.Data) { + continue // Skip blobs exceeding absolute DA limit + } + if WouldExceedCumulativeSize(currentSize, len(pendingTx.Data), maxBytes) { + newPendingTxs = append(newPendingTxs, pendingTx) + continue + } + validatedTxs = append(validatedTxs, pendingTx.Data) + currentSize += len(pendingTx.Data) + } + + // Process new txs from this epoch + for _, tx := range event.Txs { + if !ValidateBlobSize(tx) { + continue // Skip blobs exceeding absolute DA limit + } + if WouldExceedCumulativeSize(currentSize, len(tx), maxBytes) { + newPendingTxs = append(newPendingTxs, pendingForcedInclusionTx{ + Data: tx, + OriginalHeight: event.StartDaHeight, + }) + continue + } + validatedTxs = append(validatedTxs, tx) + currentSize += len(tx) + } + + s.pendingForcedInclusionTxs = newPendingTxs + return validatedTxs +} ``` #### Based Sequencer @@ -203,7 +263,7 @@ A new sequencer implementation that ONLY retrieves transactions from DA: ```go type BasedSequencer struct { - daRetriever DARetriever + fiRetriever ForcedInclusionRetriever da coreda.DA config config.Config genesis genesis.Genesis @@ -214,11 +274,20 @@ type BasedSequencer struct { } func (s *BasedSequencer) GetNextBatch(ctx context.Context, req GetNextBatchRequest) (*GetNextBatchResponse, error) { - // Fetch forced inclusion transactions from DA - forcedEvent, err := s.daRetriever.RetrieveForcedIncludedTxsFromDA(ctx, s.daHeight) - // Add transactions to queue - s.txQueue = append(s.txQueue, forcedEvent.Txs...) + + // Always fetch forced inclusion transactions from DA + forcedEvent, err := s.fiRetriever.RetrieveForcedIncludedTxs(ctx, s.daHeight) + if err != nil && !errors.Is(err, ErrHeightFromFuture) { + return nil, err + } + + // Validate and add transactions to queue + for _, tx := range forcedEvent.Txs { + if ValidateBlobSize(tx) { + s.txQueue = append(s.txQueue, tx) + } + } // Create batch from queue respecting MaxBytes batch := s.createBatchFromQueue(req.MaxBytes) @@ -304,9 +373,37 @@ if errors.Is(err, coreda.ErrHeightFromFuture) { } ``` +#### Size Validation and Max Bytes Handling + +Both sequencers enforce strict size limits to prevent DoS and ensure batches never exceed the DA layer's limits: + +```go +// Size validation utilities +const AbsoluteMaxBlobSize = 1.5 * 1024 * 1024 // 1.5MB DA layer limit + +// ValidateBlobSize checks against absolute DA layer limit +func ValidateBlobSize(blob []byte) bool { + return uint64(len(blob)) <= AbsoluteMaxBlobSize +} + +// WouldExceedCumulativeSize checks against per-batch limit +func WouldExceedCumulativeSize(currentSize int, blobSize int, maxBytes uint64) bool { + return uint64(currentSize)+uint64(blobSize) > maxBytes +} +``` + +**Key Behaviors**: + +- **Absolute validation**: Blobs exceeding 1.5MB are permanently rejected +- **Batch size limits**: `req.MaxBytes` is NEVER exceeded in any batch +- **Transaction preservation**: + - Single sequencer: Trimmed batch txs returned to queue via `Prepend()` + - Based sequencer: Excess txs remain in `txQueue` for next batch + - Forced txs that don't fit go to `pendingForcedInclusionTxs` (single) or stay in `txQueue` (based) + #### Transaction Queue Management -The based sequencer uses a queue to handle transactions exceeding batch size: +The based sequencer uses a simplified queue to handle transactions: ```go func (s *BasedSequencer) createBatchFromQueue(maxBytes uint64) *Batch { @@ -315,20 +412,28 @@ func (s *BasedSequencer) createBatchFromQueue(maxBytes uint64) *Batch { for i, tx := range s.txQueue { txSize := uint64(len(tx)) - if totalBytes+txSize > maxBytes && len(batch) > 0 { - // Would exceed max bytes, stop here + // Always respect maxBytes, even for first transaction + if totalBytes+txSize > maxBytes { + // Would exceed max bytes, keep remaining in queue s.txQueue = s.txQueue[i:] break } batch = append(batch, tx) totalBytes += txSize + + // Clear queue if we processed everything + if i == len(s.txQueue)-1 { + s.txQueue = s.txQueue[:0] + } } return &Batch{Transactions: batch} } ``` +**Note**: The based sequencer is simpler than the single sequencer - it doesn't need a separate pending queue because `txQueue` naturally handles all transaction buffering. + ### Configuration ```go @@ -442,6 +547,10 @@ based_sequencer = true # Use based sequencer 3. **Fetch at Epoch Start**: Prevents duplicate fetches as DA height progresses 4. **Transaction Queue**: Buffers excess transactions across multiple blocks 5. **Conditional Fetching**: Only when forced inclusion namespace is configured +6. **Size Pre-validation**: Invalid blobs rejected early, before batch construction +7. **Efficient Queue Operations**: + - Single sequencer: `Prepend()` reuses space before head position + - Based sequencer: Simple slice operations for queue management **DA Query Frequency**: @@ -451,15 +560,20 @@ Every `DAEpochForcedInclusion` DA blocks 1. **Malicious Proposer Detection**: Full nodes reject blocks missing forced transactions 2. **No Timing Attacks**: Epoch boundaries are deterministic, no time-based logic -3. **Blob Size Limits**: Enforces maximum blob size to prevent DoS +3. **Blob Size Limits**: Two-tier size validation prevents DoS + - Absolute limit (1.5MB): Blobs exceeding this are permanently rejected + - Batch limit (`MaxBytes`): Ensures no batch exceeds DA submission limits 4. **Graceful Degradation**: Continues operation if forced inclusion not configured 5. **Height Validation**: Handles "height from future" errors without state corruption +6. **Transaction Preservation**: No valid transactions are lost due to size constraints +7. **Strict MaxBytes Enforcement**: Batches NEVER exceed `req.MaxBytes`, preventing DA layer rejections **Attack Vectors**: - **Censorship**: Mitigated by forced inclusion verification -- **DA Spam**: Limited by DA layer's native spam protection and blob size limits +- **DA Spam**: Limited by DA layer's native spam protection and two-tier blob size limits - **Block Withholding**: Full nodes can fetch and verify from DA independently +- **Oversized Batches**: Prevented by strict size validation at multiple levels ### Testing Strategy @@ -471,17 +585,32 @@ Every `DAEpochForcedInclusion` DA blocks - Blob size validation - Empty epoch handling -2. **Single Sequencer**: - - Forced transaction prepending +2. **Size Validation**: + - Individual blob size validation (absolute limit) + - Cumulative size checking (batch limit) + - Edge cases (empty blobs, exact limits, exceeding limits) + +3. **Single Sequencer**: + - Forced transaction prepending with size constraints + - Batch trimming when forced + batch exceeds MaxBytes + - Trimmed transactions returned to queue via Prepend + - Pending forced inclusion queue management - DA height tracking - Error handling -3. **Based Sequencer**: - - Queue management - - Batch size limits +4. **BatchQueue**: + - Prepend operation (empty queue, with items, after consuming) + - Multiple prepends (LIFO ordering) + - Space reuse before head position + +5. **Based Sequencer**: + - Queue management with size validation + - Batch size limits strictly enforced + - Transaction buffering across batches - DA-only operation + - Always checking for new forced txs -4. **Syncer Verification**: +6. **Syncer Verification**: - All forced txs included (pass) - Missing forced txs (fail) - No forced txs (pass) @@ -543,10 +672,13 @@ Accepted and Implemented 1. **Censorship Resistance**: Users have guaranteed path to include transactions 2. **Verifiable**: Full nodes enforce forced inclusion, detecting malicious sequencers 3. **Simple Design**: No complex timing mechanisms or fallback modes -4. **Based Rollup Option**: Fully DA-driven transaction ordering available +4. **Based Rollup Option**: Fully DA-driven transaction ordering available (simplified implementation) 5. **Optional**: Forced inclusion can be disabled for permissioned deployments 6. **Efficient**: Epoch-based fetching minimizes DA queries 7. **Flexible**: Configurable epoch size allows tuning latency vs efficiency +8. **Robust Size Handling**: Two-tier size validation prevents DoS and DA rejections +9. **Transaction Preservation**: All valid transactions are preserved in queues, nothing is lost +10. **Strict MaxBytes Compliance**: Batches never exceed limits, preventing DA submission failures ### Negative From 096d70ef0c2230f1a717bb21f72a8239dff32c47 Mon Sep 17 00:00:00 2001 From: Julien Robert Date: Mon, 17 Nov 2025 15:50:45 +0100 Subject: [PATCH 39/39] rebase --- .../internal/da/forced_inclusion_retriever.go | 298 +++++++- .../da/forced_inclusion_retriever_test.go | 8 + block/internal/syncing/da_retriever.go | 662 +----------------- block/internal/syncing/syncer.go | 2 +- .../syncing/syncer_forced_inclusion_test.go | 5 + block/public.go | 3 + sequencers/based/based_test.go | 505 ------------- sequencers/based/sequencer.go | 3 +- sequencers/based/sequencer_test.go | 29 +- sequencers/single/sequencer_test.go | 5 - 10 files changed, 336 insertions(+), 1184 deletions(-) delete mode 100644 sequencers/based/based_test.go diff --git a/block/internal/da/forced_inclusion_retriever.go b/block/internal/da/forced_inclusion_retriever.go index 5f50473386..8b0375634c 100644 --- a/block/internal/da/forced_inclusion_retriever.go +++ b/block/internal/da/forced_inclusion_retriever.go @@ -4,6 +4,9 @@ import ( "context" "errors" "fmt" + "sync" + "sync/atomic" + "time" "github.com/rs/zerolog" @@ -12,15 +15,120 @@ import ( "github.com/evstack/ev-node/types" ) +const ( + // defaultEpochLag is the default number of blocks to lag behind DA height when fetching forced inclusion txs + defaultEpochLag = 10 + + // defaultMinEpochWindow is the minimum window size for epoch lag calculation + defaultMinEpochWindow = 5 + + // defaultMaxEpochWindow is the maximum window size for epoch lag calculation + defaultMaxEpochWindow = 100 + + // defaultFetchInterval is the interval between async fetch attempts + defaultFetchInterval = 2 * time.Second +) + // ErrForceInclusionNotConfigured is returned when the forced inclusion namespace is not configured. var ErrForceInclusionNotConfigured = errors.New("forced inclusion namespace not configured") +// epochCache stores fetched forced inclusion events by epoch start height +type epochCache struct { + events atomic.Pointer[map[uint64]*ForcedInclusionEvent] + fetchTimes atomic.Pointer[[]time.Duration] + maxSamples int +} + +func newEpochCache(maxSamples int) *epochCache { + c := &epochCache{ + maxSamples: maxSamples, + } + initialEvents := make(map[uint64]*ForcedInclusionEvent) + c.events.Store(&initialEvents) + initialTimes := make([]time.Duration, 0, maxSamples) + c.fetchTimes.Store(&initialTimes) + return c +} + +func (c *epochCache) get(epochStart uint64) (*ForcedInclusionEvent, bool) { + events := c.events.Load() + event, ok := (*events)[epochStart] + return event, ok +} + +func (c *epochCache) set(epochStart uint64, event *ForcedInclusionEvent) { + for { + oldEventsPtr := c.events.Load() + oldEvents := *oldEventsPtr + newEvents := make(map[uint64]*ForcedInclusionEvent, len(oldEvents)+1) + for k, v := range oldEvents { + newEvents[k] = v + } + newEvents[epochStart] = event + if c.events.CompareAndSwap(oldEventsPtr, &newEvents) { + return + } + } +} + +func (c *epochCache) recordFetchTime(duration time.Duration) { + for { + oldTimesPtr := c.fetchTimes.Load() + oldTimes := *oldTimesPtr + newTimes := make([]time.Duration, 0, c.maxSamples) + newTimes = append(newTimes, oldTimes...) + newTimes = append(newTimes, duration) + if len(newTimes) > c.maxSamples { + newTimes = newTimes[1:] + } + if c.fetchTimes.CompareAndSwap(oldTimesPtr, &newTimes) { + return + } + } +} + +func (c *epochCache) averageFetchTime() time.Duration { + timesPtr := c.fetchTimes.Load() + times := *timesPtr + if len(times) == 0 { + return 0 + } + var sum time.Duration + for _, d := range times { + sum += d + } + return sum / time.Duration(len(times)) +} + +func (c *epochCache) cleanup(beforeEpoch uint64) { + for { + oldEventsPtr := c.events.Load() + oldEvents := *oldEventsPtr + newEvents := make(map[uint64]*ForcedInclusionEvent) + for epoch, event := range oldEvents { + if epoch >= beforeEpoch { + newEvents[epoch] = event + } + } + if c.events.CompareAndSwap(oldEventsPtr, &newEvents) { + return + } + } +} + // ForcedInclusionRetriever handles retrieval of forced inclusion transactions from DA. type ForcedInclusionRetriever struct { client Client genesis genesis.Genesis logger zerolog.Logger daEpochSize uint64 + + // Async forced inclusion fetching + epochCache *epochCache + fetcherCtx context.Context + fetcherCancel context.CancelFunc + fetcherWg sync.WaitGroup + currentDAHeight atomic.Uint64 } // ForcedInclusionEvent contains forced inclusion transactions retrieved from DA. @@ -36,22 +144,173 @@ func NewForcedInclusionRetriever( genesis genesis.Genesis, logger zerolog.Logger, ) *ForcedInclusionRetriever { - return &ForcedInclusionRetriever{ - client: client, - genesis: genesis, - logger: logger.With().Str("component", "forced_inclusion_retriever").Logger(), - daEpochSize: genesis.DAEpochForcedInclusion, + ctx, cancel := context.WithCancel(context.Background()) + + r := &ForcedInclusionRetriever{ + client: client, + genesis: genesis, + logger: logger.With().Str("component", "forced_inclusion_retriever").Logger(), + daEpochSize: genesis.DAEpochForcedInclusion, + epochCache: newEpochCache(10), // Keep last 10 fetch times for averaging + fetcherCtx: ctx, + fetcherCancel: cancel, + } + r.currentDAHeight.Store(genesis.DAStartHeight) + + // Start background fetcher if forced inclusion is configured + if client.HasForcedInclusionNamespace() { + r.fetcherWg.Add(1) + go r.backgroundFetcher() + } + + return r +} + +// StopBackgroundFetcher stops the background fetcher goroutine +func (r *ForcedInclusionRetriever) StopBackgroundFetcher() { + if r.fetcherCancel != nil { + r.fetcherCancel() + } + r.fetcherWg.Wait() +} + +// SetDAHeight updates the current DA height for async fetching +func (r *ForcedInclusionRetriever) SetDAHeight(height uint64) { + for { + current := r.currentDAHeight.Load() + if height <= current { + return + } + if r.currentDAHeight.CompareAndSwap(current, height) { + return + } + } +} + +// GetDAHeight returns the current DA height +func (r *ForcedInclusionRetriever) GetDAHeight() uint64 { + return r.currentDAHeight.Load() +} + +// calculateAdaptiveEpochWindow calculates the epoch lag window based on average fetch time +func (r *ForcedInclusionRetriever) calculateAdaptiveEpochWindow() uint64 { + avgFetchTime := r.epochCache.averageFetchTime() + if avgFetchTime == 0 { + return defaultEpochLag + } + + // Scale window based on fetch time: faster fetches = smaller window + // If fetch takes 1 second, window = 5 + // If fetch takes 5 seconds, window = 25 + // If fetch takes 10 seconds, window = 50 + window := uint64(avgFetchTime.Seconds() * 5) + + if window < defaultMinEpochWindow { + window = defaultMinEpochWindow + } + if window > defaultMaxEpochWindow { + window = defaultMaxEpochWindow + } + + return window +} + +// backgroundFetcher continuously fetches forced inclusion transactions ahead of time +func (r *ForcedInclusionRetriever) backgroundFetcher() { + defer r.fetcherWg.Done() + + ticker := time.NewTicker(defaultFetchInterval) + defer ticker.Stop() + + r.logger.Info().Msg("started background forced inclusion fetcher") + + for { + select { + case <-r.fetcherCtx.Done(): + r.logger.Info().Msg("stopped background forced inclusion fetcher") + return + case <-ticker.C: + r.fetchNextEpoch() + } + } +} + +// fetchNextEpoch fetches the next epoch that should be available based on current DA height and lag +func (r *ForcedInclusionRetriever) fetchNextEpoch() { + currentHeight := r.GetDAHeight() + if currentHeight == 0 { + return + } + + window := r.calculateAdaptiveEpochWindow() + + // Calculate which epoch the sequencer will need soon (lagging behind current height) + // We want to prefetch this epoch before it's actually requested + laggedHeight := currentHeight + if currentHeight > window { + laggedHeight = currentHeight - window + } + + epochStart, _ := types.CalculateEpochBoundaries(laggedHeight, r.genesis.DAStartHeight, r.daEpochSize) + + // Check if we already have this epoch cached + if _, ok := r.epochCache.get(epochStart); ok { + return + } + + // Fetch this epoch in the background + r.logger.Debug(). + Uint64("current_height", currentHeight). + Uint64("lagged_height", laggedHeight). + Uint64("epoch_start", epochStart). + Uint64("window", window). + Msg("fetching epoch in background") + + startTime := time.Now() + ctx, cancel := context.WithTimeout(r.fetcherCtx, 30*time.Second) + defer cancel() + + event, err := r.fetchEpochSync(ctx, epochStart) + if err != nil { + r.logger.Debug().Err(err).Uint64("epoch_start", epochStart).Msg("failed to fetch epoch in background") + return + } + + // Record fetch time for adaptive window + fetchDuration := time.Since(startTime) + r.epochCache.recordFetchTime(fetchDuration) + + // Cache the event + r.epochCache.set(epochStart, event) + + r.logger.Debug(). + Uint64("epoch_start", epochStart). + Int("tx_count", len(event.Txs)). + Dur("fetch_duration", fetchDuration). + Msg("cached epoch in background") + + // Cleanup old epochs (keep last 5 epochs) + if epochStart >= r.genesis.DAStartHeight+r.daEpochSize*5 { + cleanupBefore := epochStart - r.daEpochSize*5 + if cleanupBefore < r.genesis.DAStartHeight { + cleanupBefore = r.genesis.DAStartHeight + } + r.epochCache.cleanup(cleanupBefore) } } // RetrieveForcedIncludedTxs retrieves forced inclusion transactions at the given DA height. // It respects epoch boundaries and only fetches at epoch start. +// Uses cached results from background fetcher when available. func (r *ForcedInclusionRetriever) RetrieveForcedIncludedTxs(ctx context.Context, daHeight uint64) (*ForcedInclusionEvent, error) { if !r.client.HasForcedInclusionNamespace() { return nil, ErrForceInclusionNotConfigured } - epochStart, epochEnd := types.CalculateEpochBoundaries(daHeight, r.genesis.DAStartHeight, r.daEpochSize) + // Update our tracking of DA height + r.SetDAHeight(daHeight) + + epochStart, _ := types.CalculateEpochBoundaries(daHeight, r.genesis.DAStartHeight, r.daEpochSize) if daHeight != epochStart { r.logger.Debug(). @@ -66,8 +325,28 @@ func (r *ForcedInclusionRetriever) RetrieveForcedIncludedTxs(ctx context.Context }, nil } - // We're at epoch start - fetch transactions from DA - currentEpochNumber := types.CalculateEpochNumber(daHeight, r.genesis.DAStartHeight, r.daEpochSize) + // Check if we have this epoch cached from background fetcher + if cachedEvent, ok := r.epochCache.get(epochStart); ok { + r.logger.Debug(). + Uint64("epoch_start", epochStart). + Int("tx_count", len(cachedEvent.Txs)). + Msg("using cached forced inclusion transactions") + return cachedEvent, nil + } + + // Not cached, fetch synchronously + r.logger.Debug(). + Uint64("da_height", daHeight). + Uint64("epoch_start", epochStart). + Msg("cache miss, fetching forced inclusion transactions synchronously") + + return r.fetchEpochSync(ctx, epochStart) +} + +// fetchEpochSync synchronously fetches an entire epoch's forced inclusion transactions +func (r *ForcedInclusionRetriever) fetchEpochSync(ctx context.Context, epochStart uint64) (*ForcedInclusionEvent, error) { + epochEnd := epochStart + r.daEpochSize - 1 + currentEpochNumber := types.CalculateEpochNumber(epochStart, r.genesis.DAStartHeight, r.daEpochSize) event := &ForcedInclusionEvent{ StartDaHeight: epochStart, @@ -75,11 +354,10 @@ func (r *ForcedInclusionRetriever) RetrieveForcedIncludedTxs(ctx context.Context } r.logger.Debug(). - Uint64("da_height", daHeight). Uint64("epoch_start", epochStart). Uint64("epoch_end", epochEnd). Uint64("epoch_num", currentEpochNumber). - Msg("retrieving forced included transactions from DA") + Msg("fetching forced included transactions from DA") epochStartResult := r.client.RetrieveForcedInclusion(ctx, epochStart) if epochStartResult.Code == coreda.StatusHeightFromFuture { diff --git a/block/internal/da/forced_inclusion_retriever_test.go b/block/internal/da/forced_inclusion_retriever_test.go index e586125730..0897bc23fd 100644 --- a/block/internal/da/forced_inclusion_retriever_test.go +++ b/block/internal/da/forced_inclusion_retriever_test.go @@ -28,6 +28,7 @@ func TestNewForcedInclusionRetriever(t *testing.T) { } retriever := NewForcedInclusionRetriever(client, gen, zerolog.Nop()) + t.Cleanup(func() { retriever.StopBackgroundFetcher() }) assert.Assert(t, retriever != nil) assert.Equal(t, retriever.daEpochSize, uint64(10)) } @@ -47,6 +48,7 @@ func TestForcedInclusionRetriever_RetrieveForcedIncludedTxs_NoNamespace(t *testi } retriever := NewForcedInclusionRetriever(client, gen, zerolog.Nop()) + t.Cleanup(func() { retriever.StopBackgroundFetcher() }) ctx := context.Background() _, err := retriever.RetrieveForcedIncludedTxs(ctx, 100) @@ -69,6 +71,7 @@ func TestForcedInclusionRetriever_RetrieveForcedIncludedTxs_NotAtEpochStart(t *t } retriever := NewForcedInclusionRetriever(client, gen, zerolog.Nop()) + t.Cleanup(func() { retriever.StopBackgroundFetcher() }) ctx := context.Background() // Height 105 is not an epoch start (100, 110, 120, etc. are epoch starts) @@ -113,6 +116,7 @@ func TestForcedInclusionRetriever_RetrieveForcedIncludedTxs_EpochStartSuccess(t } retriever := NewForcedInclusionRetriever(client, gen, zerolog.Nop()) + t.Cleanup(func() { retriever.StopBackgroundFetcher() }) ctx := context.Background() // Height 100 is an epoch start @@ -146,6 +150,7 @@ func TestForcedInclusionRetriever_RetrieveForcedIncludedTxs_EpochStartNotAvailab } retriever := NewForcedInclusionRetriever(client, gen, zerolog.Nop()) + t.Cleanup(func() { retriever.StopBackgroundFetcher() }) ctx := context.Background() _, err := retriever.RetrieveForcedIncludedTxs(ctx, 100) @@ -174,6 +179,7 @@ func TestForcedInclusionRetriever_RetrieveForcedIncludedTxs_NoBlobsAtHeight(t *t } retriever := NewForcedInclusionRetriever(client, gen, zerolog.Nop()) + t.Cleanup(func() { retriever.StopBackgroundFetcher() }) ctx := context.Background() event, err := retriever.RetrieveForcedIncludedTxs(ctx, 100) @@ -235,6 +241,7 @@ func TestForcedInclusionRetriever_RetrieveForcedIncludedTxs_MultiHeightEpoch(t * } retriever := NewForcedInclusionRetriever(client, gen, zerolog.Nop()) + t.Cleanup(func() { retriever.StopBackgroundFetcher() }) ctx := context.Background() event, err := retriever.RetrieveForcedIncludedTxs(ctx, 100) @@ -263,6 +270,7 @@ func TestForcedInclusionRetriever_processForcedInclusionBlobs(t *testing.T) { } retriever := NewForcedInclusionRetriever(client, gen, zerolog.Nop()) + t.Cleanup(func() { retriever.StopBackgroundFetcher() }) tests := []struct { name string diff --git a/block/internal/syncing/da_retriever.go b/block/internal/syncing/da_retriever.go index 33e095931c..c87750b0f5 100644 --- a/block/internal/syncing/da_retriever.go +++ b/block/internal/syncing/da_retriever.go @@ -5,567 +5,51 @@ import ( "context" "errors" "fmt" - "sync" - "sync/atomic" - "time" "github.com/rs/zerolog" "google.golang.org/protobuf/proto" "github.com/evstack/ev-node/block/internal/cache" "github.com/evstack/ev-node/block/internal/common" + "github.com/evstack/ev-node/block/internal/da" coreda "github.com/evstack/ev-node/core/da" - "github.com/evstack/ev-node/pkg/config" "github.com/evstack/ev-node/pkg/genesis" "github.com/evstack/ev-node/types" pb "github.com/evstack/ev-node/types/pb/evnode/v1" ) -// defaultDATimeout is the default timeout for DA retrieval operations -const defaultDATimeout = 10 * time.Second - -// defaultEpochLag is the default number of blocks to lag behind DA height when fetching forced inclusion txs -const defaultEpochLag = 10 - -// defaultMinEpochWindow is the minimum window size for epoch lag calculation -const defaultMinEpochWindow = 5 - -// defaultMaxEpochWindow is the maximum window size for epoch lag calculation -const defaultMaxEpochWindow = 100 - -// defaultFetchInterval is the interval between async fetch attempts -const defaultFetchInterval = 2 * time.Second - -// pendingForcedInclusionTx represents a forced inclusion transaction that couldn't fit in the current epoch -// and needs to be retried in future epochs. -type pendingForcedInclusionTx struct { - Data []byte // The transaction data - OriginalHeight uint64 // Original DA height where this transaction was found -} - -// epochCache stores fetched forced inclusion events by epoch start height -type epochCache struct { - events atomic.Pointer[map[uint64]*da.ForcedInclusionEvent] - fetchTimes atomic.Pointer[[]time.Duration] - maxSamples int -} - -func newEpochCache(maxSamples int) *epochCache { - c := &epochCache{ - maxSamples: maxSamples, - } - initialEvents := make(map[uint64]*da.ForcedInclusionEvent) - c.events.Store(&initialEvents) - initialTimes := make([]time.Duration, 0, maxSamples) - c.fetchTimes.Store(&initialTimes) - return c -} - -func (c *epochCache) get(epochStart uint64) (*da.ForcedInclusionEvent, bool) { - events := c.events.Load() - event, ok := (*events)[epochStart] - return event, ok -} - -func (c *epochCache) set(epochStart uint64, event *da.ForcedInclusionEvent) { - for { - oldEventsPtr := c.events.Load() - oldEvents := *oldEventsPtr - newEvents := make(map[uint64]*da.ForcedInclusionEvent, len(oldEvents)+1) - for k, v := range oldEvents { - newEvents[k] = v - } - newEvents[epochStart] = event - if c.events.CompareAndSwap(oldEventsPtr, &newEvents) { - return - } - } -} - -func (c *epochCache) recordFetchTime(duration time.Duration) { - for { - oldTimesPtr := c.fetchTimes.Load() - oldTimes := *oldTimesPtr - newTimes := make([]time.Duration, 0, c.maxSamples) - newTimes = append(newTimes, oldTimes...) - newTimes = append(newTimes, duration) - if len(newTimes) > c.maxSamples { - newTimes = newTimes[1:] - } - if c.fetchTimes.CompareAndSwap(oldTimesPtr, &newTimes) { - return - } - } -} - -func (c *epochCache) averageFetchTime() time.Duration { - timesPtr := c.fetchTimes.Load() - times := *timesPtr - if len(times) == 0 { - return 0 - } - var sum time.Duration - for _, d := range times { - sum += d - } - return sum / time.Duration(len(times)) -} - -func (c *epochCache) cleanup(beforeEpoch uint64) { - for { - oldEventsPtr := c.events.Load() - oldEvents := *oldEventsPtr - newEvents := make(map[uint64]*da.ForcedInclusionEvent) - for epoch, event := range oldEvents { - if epoch >= beforeEpoch { - newEvents[epoch] = event - } - } - if c.events.CompareAndSwap(oldEventsPtr, &newEvents) { - return - } - } -} - // DARetriever defines the interface for retrieving events from the DA layer type DARetriever interface { RetrieveFromDA(ctx context.Context, daHeight uint64) ([]common.DAHeightEvent, error) - RetrieveForcedIncludedTxsFromDA(ctx context.Context, daHeight uint64) (*da.ForcedInclusionEvent, error) - SetDAHeight(height uint64) - StopBackgroundFetcher() } // daRetriever handles DA retrieval operations for syncing type daRetriever struct { - da coreda.DA + client da.Client cache cache.CacheManager genesis genesis.Genesis logger zerolog.Logger - // calculate namespaces bytes once and reuse them - namespaceBz []byte - namespaceDataBz []byte - namespaceForcedInclusionBz []byte - - hasForcedInclusionNs bool - daEpochSize uint64 - // transient cache, only full event need to be passed to the syncer // on restart, will be refetch as da height is updated by syncer pendingHeaders map[uint64]*types.SignedHeader pendingData map[uint64]*types.Data - - // Forced inclusion transactions that couldn't fit in the current epoch - // and need to be retried in future epochs. - pendingForcedInclusionTxs []pendingForcedInclusionTx - - // Async forced inclusion fetching - epochCache *epochCache - fetcherCtx context.Context - fetcherCancel context.CancelFunc - fetcherWg sync.WaitGroup - currentDAHeight atomic.Uint64 } // NewDARetriever creates a new DA retriever func NewDARetriever( - da coreda.DA, + client da.Client, cache cache.CacheManager, - config config.Config, genesis genesis.Genesis, logger zerolog.Logger, ) *daRetriever { - forcedInclusionNs := config.DA.GetForcedInclusionNamespace() - hasForcedInclusionNs := forcedInclusionNs != "" - - var namespaceForcedInclusionBz []byte - if hasForcedInclusionNs { - namespaceForcedInclusionBz = coreda.NamespaceFromString(forcedInclusionNs).Bytes() - } - - ctx, cancel := context.WithCancel(context.Background()) - - r := &daRetriever{ - da: da, - cache: cache, - genesis: genesis, - logger: logger.With().Str("component", "da_retriever").Logger(), - namespaceBz: coreda.NamespaceFromString(config.DA.GetNamespace()).Bytes(), - namespaceDataBz: coreda.NamespaceFromString(config.DA.GetDataNamespace()).Bytes(), - namespaceForcedInclusionBz: namespaceForcedInclusionBz, - hasForcedInclusionNs: hasForcedInclusionNs, - daEpochSize: genesis.DAEpochForcedInclusion, - pendingHeaders: make(map[uint64]*types.SignedHeader), - pendingData: make(map[uint64]*types.Data), - pendingForcedInclusionTxs: make([]pendingForcedInclusionTx, 0), - epochCache: newEpochCache(10), // Keep last 10 fetch times for averaging - fetcherCtx: ctx, - fetcherCancel: cancel, - } - r.currentDAHeight.Store(genesis.DAStartHeight) - - // Start background fetcher if forced inclusion is configured - if hasForcedInclusionNs { - r.fetcherWg.Add(1) - go r.backgroundFetcher() - } - - return r -} - -// SetDAHeight updates the current DA height for async fetching -func (r *daRetriever) SetDAHeight(height uint64) { - for { - current := r.currentDAHeight.Load() - if height <= current { - return - } - if r.currentDAHeight.CompareAndSwap(current, height) { - return - } - } -} - -// GetDAHeight returns the current DA height -func (r *daRetriever) GetDAHeight() uint64 { - return r.currentDAHeight.Load() -} - -// calculateAdaptiveEpochWindow calculates the epoch lag window based on average fetch time -func (r *daRetriever) calculateAdaptiveEpochWindow() uint64 { - avgFetchTime := r.epochCache.averageFetchTime() - if avgFetchTime == 0 { - return defaultEpochLag - } - - // Scale window based on fetch time: faster fetches = smaller window - // If fetch takes 1 second, window = 5 - // If fetch takes 5 seconds, window = 25 - // If fetch takes 10 seconds, window = 50 - window := uint64(avgFetchTime.Seconds() * 5) - - if window < defaultMinEpochWindow { - window = defaultMinEpochWindow - } - if window > defaultMaxEpochWindow { - window = defaultMaxEpochWindow - } - - return window -} - -// backgroundFetcher continuously fetches forced inclusion transactions ahead of time -func (r *daRetriever) backgroundFetcher() { - defer r.fetcherWg.Done() - - ticker := time.NewTicker(defaultFetchInterval) - defer ticker.Stop() - - r.logger.Info().Msg("started background forced inclusion fetcher") - - for { - select { - case <-r.fetcherCtx.Done(): - r.logger.Info().Msg("stopped background forced inclusion fetcher") - return - case <-ticker.C: - r.fetchNextEpoch() - } - } -} - -// fetchNextEpoch fetches the next epoch that should be available based on current DA height and lag -func (r *daRetriever) fetchNextEpoch() { - currentHeight := r.GetDAHeight() - if currentHeight == 0 { - return - } - - window := r.calculateAdaptiveEpochWindow() - - // Calculate which epoch the sequencer will need soon (lagging behind current height) - // We want to prefetch this epoch before it's actually requested - var targetHeight uint64 - if currentHeight > window { - targetHeight = currentHeight - window - } else { - targetHeight = r.genesis.DAStartHeight - } - - // Calculate epoch boundaries for the target height - epochStart, epochEnd := types.CalculateEpochBoundaries(targetHeight, r.genesis.DAStartHeight, r.daEpochSize) - - // Check if we already have this epoch cached - if _, exists := r.epochCache.get(epochStart); exists { - // Already cached, try to fetch the next epoch ahead - nextEpochStart := epochEnd + 1 - nextEpochStart, nextEpochEnd := types.CalculateEpochBoundaries(nextEpochStart, r.genesis.DAStartHeight, r.daEpochSize) - - // Only prefetch next epoch if we're not too far ahead - if nextEpochEnd <= currentHeight { - if _, exists := r.epochCache.get(nextEpochStart); !exists { - epochStart = nextEpochStart - epochEnd = nextEpochEnd - } else { - // Both current and next epoch are cached - return - } - } else { - // Current epoch cached and next epoch is too far ahead - return - } - } - - r.logger.Debug(). - Uint64("current_height", currentHeight). - Uint64("target_height", targetHeight). - Uint64("epoch_start", epochStart). - Uint64("epoch_end", epochEnd). - Uint64("window", window). - Msg("fetching epoch in background") - - startTime := time.Now() - event, err := r.fetchEpochSync(r.fetcherCtx, epochStart, epochEnd) - fetchDuration := time.Since(startTime) - - if err != nil { - // Don't log errors for heights that are from the future - this is expected - if !errors.Is(err, coreda.ErrHeightFromFuture) { - r.logger.Debug(). - Err(err). - Uint64("epoch_start", epochStart). - Uint64("epoch_end", epochEnd). - Msg("failed to fetch epoch in background") - } - return - } - - // Cache the result - r.epochCache.set(epochStart, event) - r.epochCache.recordFetchTime(fetchDuration) - - r.logger.Info(). - Uint64("epoch_start", epochStart). - Uint64("epoch_end", epochEnd). - Int("tx_count", len(event.Txs)). - Dur("fetch_duration", fetchDuration). - Msg("cached epoch in background") - - // Cleanup old epochs (keep last 5 epochs) - if epochStart >= r.genesis.DAStartHeight+r.daEpochSize*5 { - cleanupBefore := epochStart - r.daEpochSize*5 - if cleanupBefore < r.genesis.DAStartHeight { - cleanupBefore = r.genesis.DAStartHeight - } - r.epochCache.cleanup(cleanupBefore) - } -} - -// RetrieveForcedIncludedTxsFromDA retrieves forced inclusion transactions from the DA layer. -// -// Behavior: -// - At epoch boundaries (when daHeight == epochStart): fetches new forced-inclusion transactions -// from the DA layer for the entire epoch range, processes them, and returns all that fit within -// the max blob size limit. Transactions that don't fit are stored in the pending queue for retry. -// - Outside epoch boundaries (when daHeight != epochStart): returns any pending transactions from -// the queue that were deferred from previous epochs. -// - Pending transactions are kept in-memory only and will be lost on node restart. -// -// Returns: -// - ForcedIncludedEvent with transactions that should be included in the next block (may be empty) -// - Error if forced inclusion is not configured or DA layer is unavailable -func (r *daRetriever) RetrieveForcedIncludedTxsFromDA(ctx context.Context, daHeight uint64) (*da.ForcedInclusionEvent, error) { - if !r.hasForcedInclusionNs { - return nil, common.ErrForceInclusionNotConfigured - } - - // Update our tracking of DA height - r.SetDAHeight(daHeight) - - // Calculate deterministic epoch boundaries - epochStart, epochEnd := types.CalculateEpochBoundaries(daHeight, r.genesis.DAStartHeight, r.daEpochSize) - - // If we're not at epoch start, return pending transactions only (if any) - if daHeight != epochStart { - r.logger.Debug(). - Uint64("da_height", daHeight). - Uint64("epoch_start", epochStart). - Int("pending_count", len(r.pendingForcedInclusionTxs)). - Msg("not at epoch start - returning pending transactions only") - - event := &da.ForcedInclusionEvent{ - StartDaHeight: daHeight, - EndDaHeight: daHeight, - Txs: [][]byte{}, - } - - // Return pending txs if any exist - if len(r.pendingForcedInclusionTxs) > 0 { - pendingTxs, indicesToRemove, _ := r.processPendingForcedInclusionTxs() - event.Txs = pendingTxs - - // Remove successfully included pending transactions - if len(indicesToRemove) > 0 { - r.removePendingForcedInclusionTxs(indicesToRemove) - r.logger.Debug(). - Int("included_count", len(indicesToRemove)). - Int("remaining_count", len(r.pendingForcedInclusionTxs)). - Msg("included pending forced inclusion transactions") - } - } - - return event, nil - } - - // We're at epoch start - check cache first - if cachedEvent, exists := r.epochCache.get(epochStart); exists { - r.logger.Info(). - Uint64("epoch_start", epochStart). - Uint64("epoch_end", epochEnd). - Int("tx_count", len(cachedEvent.Txs)). - Msg("using cached forced inclusion transactions") - - // Create a copy with pending txs prepended - event := &da.ForcedInclusionEvent{ - StartDaHeight: cachedEvent.StartDaHeight, - EndDaHeight: cachedEvent.EndDaHeight, - Txs: make([][]byte, 0, len(cachedEvent.Txs)), - } - - // Prepend pending transactions - if len(r.pendingForcedInclusionTxs) > 0 { - pendingTxs, indicesToRemove, _ := r.processPendingForcedInclusionTxs() - event.Txs = append(event.Txs, pendingTxs...) - - if len(indicesToRemove) > 0 { - r.removePendingForcedInclusionTxs(indicesToRemove) - } - } - - event.Txs = append(event.Txs, cachedEvent.Txs...) - return event, nil - } - - // Not in cache - fetch synchronously (fallback) - r.logger.Debug(). - Uint64("epoch_start", epochStart). - Uint64("epoch_end", epochEnd). - Msg("epoch not in cache, fetching synchronously") - - startTime := time.Now() - event, err := r.fetchEpochSync(ctx, epochStart, epochEnd) - if err != nil { - return nil, err - } - - // Record fetch time and cache the result - r.epochCache.recordFetchTime(time.Since(startTime)) - r.epochCache.set(epochStart, event) - - return event, nil -} - -// fetchEpochSync fetches an epoch synchronously (used by both background fetcher and fallback) -func (r *daRetriever) fetchEpochSync(ctx context.Context, epochStart, epochEnd uint64) (*da.ForcedInclusionEvent, error) { - currentEpochNumber := types.CalculateEpochNumber(epochStart, r.genesis.DAStartHeight, r.daEpochSize) - - event := &da.ForcedInclusionEvent{ - StartDaHeight: epochStart, - } - - r.logger.Debug(). - Uint64("epoch_start", epochStart). - Uint64("epoch_end", epochEnd). - Uint64("epoch_num", currentEpochNumber). - Msg("fetching forced included transactions from DA") - - // Check if both epoch start and end are available before fetching - epochStartResult := types.RetrieveWithHelpers(ctx, r.da, r.logger, epochStart, r.namespaceForcedInclusionBz, defaultDATimeout) - if epochStartResult.Code == coreda.StatusHeightFromFuture { - r.logger.Debug(). - Uint64("epoch_start", epochStart). - Msg("epoch start height not yet available on DA - backoff required") - return nil, fmt.Errorf("%w: epoch start height %d not yet available", coreda.ErrHeightFromFuture, epochStart) - } - - epochEndResult := epochStartResult - if epochStart != epochEnd { - epochEndResult = types.RetrieveWithHelpers(ctx, r.da, r.logger, epochEnd, r.namespaceForcedInclusionBz, defaultDATimeout) - if epochEndResult.Code == coreda.StatusHeightFromFuture { - r.logger.Debug(). - Uint64("epoch_end", epochEnd). - Msg("epoch end height not yet available on DA - backoff required") - return nil, fmt.Errorf("%w: epoch end height %d not yet available", coreda.ErrHeightFromFuture, epochEnd) - } - } - - lastProcessedHeight := epochStart - newPendingTxs := []pendingForcedInclusionTx{} - - // Prepend pending transactions from previous epochs at the start of this epoch - pendingTxs, indicesToRemove, currentSize := r.processPendingForcedInclusionTxs() - event.Txs = pendingTxs - - // Remove successfully included pending transactions - if len(indicesToRemove) > 0 { - r.removePendingForcedInclusionTxs(indicesToRemove) - r.logger.Debug(). - Int("included_count", len(indicesToRemove)). - Int("remaining_count", len(r.pendingForcedInclusionTxs)). - Msg("included pending forced inclusion transactions") - } - - // Process epoch start - if err := r.processForcedInclusionBlobs(event, ¤tSize, &lastProcessedHeight, &newPendingTxs, epochStartResult, epochStart); err != nil { - return nil, err - } - - // Process heights between start and end (exclusive) - for epochHeight := epochStart + 1; epochHeight < epochEnd; epochHeight++ { - result := types.RetrieveWithHelpers(ctx, r.da, r.logger, epochHeight, r.namespaceForcedInclusionBz, defaultDATimeout) - - // If any intermediate height is from future, break early - if result.Code == coreda.StatusHeightFromFuture { - r.logger.Debug(). - Uint64("epoch_height", epochHeight). - Uint64("last_processed", lastProcessedHeight). - Msg("reached future DA height within epoch - stopping") - break - } - - if err := r.processForcedInclusionBlobs(event, ¤tSize, &lastProcessedHeight, &newPendingTxs, result, epochHeight); err != nil { - return nil, err - } - } - - // Process epoch end (only if different from start) - if epochEnd != epochStart { - if err := r.processForcedInclusionBlobs(event, ¤tSize, &lastProcessedHeight, &newPendingTxs, epochEndResult, epochEnd); err != nil { - return nil, err - } - } - - // Store any new pending transactions that couldn't fit in this epoch - if len(newPendingTxs) > 0 { - r.pendingForcedInclusionTxs = append(r.pendingForcedInclusionTxs, newPendingTxs...) - r.logger.Info(). - Int("new_pending_count", len(newPendingTxs)). - Int("total_pending_count", len(r.pendingForcedInclusionTxs)). - Msg("stored pending forced inclusion transactions for next epoch") - } - - // Set the DA height range based on what we actually processed - event.StartDaHeight = epochStart - event.EndDaHeight = lastProcessedHeight - - return event, nil -} - -// Stop stops the background fetcher -func (r *daRetriever) StopBackgroundFetcher() { - if r.fetcherCancel != nil { - r.fetcherCancel() - r.fetcherWg.Wait() + return &daRetriever{ + client: client, + cache: cache, + genesis: genesis, + logger: logger.With().Str("component", "da_retriever").Logger(), + pendingHeaders: make(map[uint64]*types.SignedHeader), + pendingData: make(map[uint64]*types.Data), } } @@ -586,83 +70,17 @@ func (r *daRetriever) RetrieveFromDA(ctx context.Context, daHeight uint64) ([]co return r.processBlobs(ctx, blobsResp.Data, daHeight), nil } -// processForcedInclusionBlobs processes forced inclusion blobs from a single DA height. -// It accumulates transactions that fit within maxBlobSize and stores excess in newPendingTxs. -func (r *daRetriever) processForcedInclusionBlobs( - event *da.ForcedInclusionEvent, - currentSize *int, - lastProcessedHeight *uint64, - newPendingTxs *[]pendingForcedInclusionTx, - result coreda.ResultRetrieve, - daHeight uint64, -) error { - if result.Code != coreda.StatusSuccess { - return nil - } - - if err := r.validateBlobResponse(result, daHeight); !errors.Is(err, coreda.ErrBlobNotFound) && err != nil { - return err - } - - for i, data := range result.Data { - if len(data) > common.DefaultMaxBlobSize { - r.logger.Debug(). - Uint64("da_height", daHeight). - Int("index", i). - Uint64("blob_size", uint64(len(data))). - Msg("Following data exceeds maximum blob size. Skipping...") - continue - } - - // Calculate size of this specific data item - dataSize := len(data) - - // Check if individual blob exceeds max size - if dataSize > int(common.DefaultMaxBlobSize) { - r.logger.Warn(). - Uint64("da_height", daHeight). - Int("blob_size", dataSize). - Float64("max_size", common.DefaultMaxBlobSize). - Msg("forced inclusion blob exceeds maximum size - skipping") - return fmt.Errorf("blob size %d exceeds maximum %f", dataSize, common.DefaultMaxBlobSize) - } - - // Check if adding this blob would exceed the current epoch's max size - if *currentSize+dataSize > int(common.DefaultMaxBlobSize) { - r.logger.Debug(). - Uint64("da_height", daHeight). - Int("current_size", *currentSize). - Int("blob_size", dataSize). - Msg("blob would exceed max size for this epoch - deferring to pending queue") - - // Store for next epoch - *newPendingTxs = append(*newPendingTxs, pendingForcedInclusionTx{ - Data: data, - OriginalHeight: daHeight, - }) - continue - } - - // Include this transaction - event.Txs = append(event.Txs, data) - *currentSize += dataSize - *lastProcessedHeight = daHeight - } - - return nil -} - -// fetchBlobs retrieves blobs from the DA layer +// fetchBlobs retrieves blobs from both header and data namespaces func (r *daRetriever) fetchBlobs(ctx context.Context, daHeight uint64) (coreda.ResultRetrieve, error) { - // Retrieve from both namespaces - headerRes := types.RetrieveWithHelpers(ctx, r.da, r.logger, daHeight, r.namespaceBz, defaultDATimeout) + // Retrieve from both namespaces using the DA client + headerRes := r.client.RetrieveHeaders(ctx, daHeight) // If namespaces are the same, return header result - if bytes.Equal(r.namespaceBz, r.namespaceDataBz) { + if bytes.Equal(r.client.GetHeaderNamespace(), r.client.GetDataNamespace()) { return headerRes, r.validateBlobResponse(headerRes, daHeight) } - dataRes := types.RetrieveWithHelpers(ctx, r.da, r.logger, daHeight, r.namespaceDataBz, defaultDATimeout) + dataRes := r.client.RetrieveData(ctx, daHeight) // Validate responses headerErr := r.validateBlobResponse(headerRes, daHeight) @@ -918,53 +336,3 @@ func createEmptyDataForHeader(ctx context.Context, header *types.SignedHeader) * }, } } - -// processPendingForcedInclusionTxs processes pending transactions and returns those that fit within the max blob size. -// Returns the transactions to include, the indices of transactions to remove, and the total size used. -func (r *daRetriever) processPendingForcedInclusionTxs() ([][]byte, []int, int) { - var ( - currentSize int - txs [][]byte - indicesToRemove []int - ) - - for i, pendingTx := range r.pendingForcedInclusionTxs { - dataSize := len(pendingTx.Data) - if currentSize+dataSize > int(common.DefaultMaxBlobSize) { - r.logger.Debug(). - Int("current_size", currentSize). - Int("data_size", dataSize). - Msg("pending transaction would exceed max blob size, will retry later") - break - } - - txs = append(txs, pendingTx.Data) - currentSize += dataSize - indicesToRemove = append(indicesToRemove, i) - } - - return txs, indicesToRemove, currentSize -} - -// removePendingForcedInclusionTxs removes pending transactions at the specified indices. -// Indices must be sorted in ascending order. -func (r *daRetriever) removePendingForcedInclusionTxs(indices []int) { - if len(indices) == 0 { - return - } - - // Create a new slice without the removed elements - newPending := make([]pendingForcedInclusionTx, 0, len(r.pendingForcedInclusionTxs)-len(indices)) - removeMap := make(map[int]bool, len(indices)) - for _, idx := range indices { - removeMap[idx] = true - } - - for i, tx := range r.pendingForcedInclusionTxs { - if !removeMap[i] { - newPending = append(newPending, tx) - } - } - - r.pendingForcedInclusionTxs = newPending -} diff --git a/block/internal/syncing/syncer.go b/block/internal/syncing/syncer.go index eb78de2b8f..930906a014 100644 --- a/block/internal/syncing/syncer.go +++ b/block/internal/syncing/syncer.go @@ -157,7 +157,7 @@ func (s *Syncer) Stop() error { s.cancelP2PWait(0) s.wg.Wait() - s.daRetriever.StopBackgroundFetcher() + s.fiRetriever.StopBackgroundFetcher() s.logger.Info().Msg("syncer stopped") return nil diff --git a/block/internal/syncing/syncer_forced_inclusion_test.go b/block/internal/syncing/syncer_forced_inclusion_test.go index f1f855f911..9a0525c5f0 100644 --- a/block/internal/syncing/syncer_forced_inclusion_test.go +++ b/block/internal/syncing/syncer_forced_inclusion_test.go @@ -58,6 +58,7 @@ func TestVerifyForcedInclusionTxs_AllTransactionsIncluded(t *testing.T) { }) daRetriever := NewDARetriever(daClient, cm, gen, zerolog.Nop()) fiRetriever := da.NewForcedInclusionRetriever(daClient, gen, zerolog.Nop()) + t.Cleanup(func() { fiRetriever.StopBackgroundFetcher() }) s := NewSyncer( st, @@ -143,6 +144,7 @@ func TestVerifyForcedInclusionTxs_MissingTransactions(t *testing.T) { }) daRetriever := NewDARetriever(daClient, cm, gen, zerolog.Nop()) fiRetriever := da.NewForcedInclusionRetriever(daClient, gen, zerolog.Nop()) + t.Cleanup(func() { fiRetriever.StopBackgroundFetcher() }) s := NewSyncer( st, @@ -231,6 +233,7 @@ func TestVerifyForcedInclusionTxs_PartiallyIncluded(t *testing.T) { }) daRetriever := NewDARetriever(daClient, cm, gen, zerolog.Nop()) fiRetriever := da.NewForcedInclusionRetriever(daClient, gen, zerolog.Nop()) + t.Cleanup(func() { fiRetriever.StopBackgroundFetcher() }) s := NewSyncer( st, @@ -321,6 +324,7 @@ func TestVerifyForcedInclusionTxs_NoForcedTransactions(t *testing.T) { }) daRetriever := NewDARetriever(daClient, cm, gen, zerolog.Nop()) fiRetriever := da.NewForcedInclusionRetriever(daClient, gen, zerolog.Nop()) + t.Cleanup(func() { fiRetriever.StopBackgroundFetcher() }) s := NewSyncer( st, @@ -395,6 +399,7 @@ func TestVerifyForcedInclusionTxs_NamespaceNotConfigured(t *testing.T) { }) daRetriever := NewDARetriever(daClient, cm, gen, zerolog.Nop()) fiRetriever := da.NewForcedInclusionRetriever(daClient, gen, zerolog.Nop()) + t.Cleanup(func() { fiRetriever.StopBackgroundFetcher() }) s := NewSyncer( st, diff --git a/block/public.go b/block/public.go index c06ad6ea55..ef633f5004 100644 --- a/block/public.go +++ b/block/public.go @@ -46,6 +46,9 @@ type ForcedInclusionEvent = da.ForcedInclusionEvent // ForcedInclusionRetriever defines the interface for retrieving forced inclusion transactions from DA type ForcedInclusionRetriever interface { RetrieveForcedIncludedTxs(ctx context.Context, daHeight uint64) (*da.ForcedInclusionEvent, error) + StopBackgroundFetcher() + SetDAHeight(height uint64) + GetDAHeight() uint64 } // NewDAClient creates a new DA client with configuration diff --git a/sequencers/based/based_test.go b/sequencers/based/based_test.go deleted file mode 100644 index 1468498ea4..0000000000 --- a/sequencers/based/based_test.go +++ /dev/null @@ -1,505 +0,0 @@ -package based - -import ( - "context" - "errors" - "testing" - "time" - - "github.com/rs/zerolog" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/mock" - "github.com/stretchr/testify/require" - - "github.com/evstack/ev-node/block" - coreda "github.com/evstack/ev-node/core/da" - coresequencer "github.com/evstack/ev-node/core/sequencer" - "github.com/evstack/ev-node/pkg/config" - "github.com/evstack/ev-node/pkg/genesis" -) - -// MockDARetriever is a mock implementation of DARetriever for testing -type MockDARetriever struct { - mock.Mock -} - -func (m *MockDARetriever) RetrieveForcedIncludedTxsFromDA(ctx context.Context, daHeight uint64) (*ForcedInclusionEvent, error) { - args := m.Called(ctx, daHeight) - if args.Get(0) == nil { - return nil, args.Error(1) - } - return args.Get(0).(*ForcedInclusionEvent), args.Error(1) -} - -func (m *MockDARetriever) SetDAHeight(height uint64) { - m.Called(height) -} - -// MockDA is a mock implementation of DA for testing -type MockDA struct { - mock.Mock -} - -func (m *MockDA) Submit(ctx context.Context, blobs [][]byte, gasPrice float64, namespace []byte) ([][]byte, error) { - args := m.Called(ctx, blobs, gasPrice, namespace) - if args.Get(0) == nil { - return nil, args.Error(1) - } - return args.Get(0).([][]byte), args.Error(1) -} - -func (m *MockDA) SubmitWithOptions(ctx context.Context, blobs [][]byte, gasPrice float64, namespace []byte, options []byte) ([][]byte, error) { - args := m.Called(ctx, blobs, gasPrice, namespace, options) - if args.Get(0) == nil { - return nil, args.Error(1) - } - return args.Get(0).([][]byte), args.Error(1) -} - -func (m *MockDA) GetIDs(ctx context.Context, height uint64, namespace []byte) (*coreda.GetIDsResult, error) { - args := m.Called(ctx, height, namespace) - if args.Get(0) == nil { - return nil, args.Error(1) - } - return args.Get(0).(*coreda.GetIDsResult), args.Error(1) -} - -func (m *MockDA) Get(ctx context.Context, ids [][]byte, namespace []byte) ([][]byte, error) { - args := m.Called(ctx, ids, namespace) - if args.Get(0) == nil { - return nil, args.Error(1) - } - return args.Get(0).([][]byte), args.Error(1) -} - -func (m *MockDA) GetProofs(ctx context.Context, ids [][]byte, namespace []byte) ([]coreda.Proof, error) { - args := m.Called(ctx, ids, namespace) - if args.Get(0) == nil { - return nil, args.Error(1) - } - return args.Get(0).([]coreda.Proof), args.Error(1) -} - -func (m *MockDA) Validate(ctx context.Context, ids [][]byte, proofs []coreda.Proof, namespace []byte) ([]bool, error) { - args := m.Called(ctx, ids, proofs, namespace) - if args.Get(0) == nil { - return nil, args.Error(1) - } - return args.Get(0).([]bool), args.Error(1) -} - -func (m *MockDA) Commit(ctx context.Context, blobs [][]byte, namespace []byte) ([][]byte, error) { - args := m.Called(ctx, blobs, namespace) - if args.Get(0) == nil { - return nil, args.Error(1) - } - return args.Get(0).([][]byte), args.Error(1) -} - -func (m *MockDA) GasPrice(ctx context.Context) (float64, error) { - args := m.Called(ctx) - return args.Get(0).(float64), args.Error(1) -} - -func (m *MockDA) GasMultiplier(ctx context.Context) (float64, error) { - args := m.Called(ctx) - return args.Get(0).(float64), args.Error(1) -} - -func TestNewBasedSequencer(t *testing.T) { - mockRetriever := new(MockDARetriever) - mockRetriever.On("SetDAHeight", mock.Anything).Return().Maybe() - mockDA := new(MockDA) - cfg := config.DefaultConfig() - gen := genesis.Genesis{ - ChainID: "test-chain", - DAStartHeight: 100, - } - - seq := NewBasedSequencer(mockRetriever, mockDA, cfg, gen, zerolog.Nop()) - - require.NotNil(t, seq) - assert.Equal(t, uint64(100), seq.GetDAHeight()) - queuePtr := seq.txQueue.Load() - assert.Equal(t, 0, len(*queuePtr)) -} - -func TestBasedSequencer_SubmitBatchTxs(t *testing.T) { - mockRetriever := new(MockDARetriever) - mockRetriever.On("SetDAHeight", mock.Anything).Return().Maybe() - mockDA := new(MockDA) - cfg := config.DefaultConfig() - gen := genesis.Genesis{ChainID: "test-chain"} - - seq := NewBasedSequencer(mockRetriever, mockDA, cfg, gen, zerolog.Nop()) - - // Submit should succeed but be ignored - req := coresequencer.SubmitBatchTxsRequest{ - Id: []byte("test-chain"), - Batch: &coresequencer.Batch{ - Transactions: [][]byte{[]byte("tx1"), []byte("tx2")}, - }, - } - - resp, err := seq.SubmitBatchTxs(context.Background(), req) - require.NoError(t, err) - require.NotNil(t, resp) - - // Queue should still be empty - queuePtr := seq.txQueue.Load() - assert.Equal(t, 0, len(*queuePtr)) -} - -func TestBasedSequencer_GetNextBatch_WithForcedTxs(t *testing.T) { - mockRetriever := new(MockDARetriever) - mockRetriever.On("SetDAHeight", mock.Anything).Return().Maybe() - mockDA := new(MockDA) - cfg := config.DefaultConfig() - gen := genesis.Genesis{ - ChainID: "test-chain", - DAStartHeight: 100, - } - - seq := NewBasedSequencer(mockRetriever, mockDA, cfg, gen, zerolog.Nop()) - - // Mock retriever to return forced inclusion transactions - forcedTxs := &ForcedInclusionEvent{ - Txs: [][]byte{[]byte("forced_tx1"), []byte("forced_tx2")}, - StartDaHeight: 101, - EndDaHeight: 105, - } - mockRetriever.On("RetrieveForcedIncludedTxsFromDA", mock.Anything, uint64(100)). - Return(forcedTxs, nil).Once() - - req := coresequencer.GetNextBatchRequest{ - Id: []byte("test-chain"), - MaxBytes: 10000, - } - - resp, err := seq.GetNextBatch(context.Background(), req) - require.NoError(t, err) - require.NotNil(t, resp) - require.NotNil(t, resp.Batch) - assert.Equal(t, 2, len(resp.Batch.Transactions)) - assert.Equal(t, []byte("forced_tx1"), resp.Batch.Transactions[0]) - assert.Equal(t, []byte("forced_tx2"), resp.Batch.Transactions[1]) - - // DA height should be updated - assert.Equal(t, uint64(105), seq.GetDAHeight()) - - mockRetriever.AssertExpectations(t) -} - -func TestBasedSequencer_GetNextBatch_EmptyDA(t *testing.T) { - mockRetriever := new(MockDARetriever) - mockRetriever.On("SetDAHeight", mock.Anything).Return().Maybe() - mockDA := new(MockDA) - cfg := config.DefaultConfig() - gen := genesis.Genesis{ - ChainID: "test-chain", - DAStartHeight: 100, - } - - seq := NewBasedSequencer(mockRetriever, mockDA, cfg, gen, zerolog.Nop()) - - // Mock retriever to return empty transactions - emptyEvent := &ForcedInclusionEvent{ - Txs: [][]byte{}, - StartDaHeight: 100, - EndDaHeight: 100, - } - mockRetriever.On("RetrieveForcedIncludedTxsFromDA", mock.Anything, uint64(100)). - Return(emptyEvent, nil).Once() - - req := coresequencer.GetNextBatchRequest{ - Id: []byte("test-chain"), - MaxBytes: 10000, - } - - resp, err := seq.GetNextBatch(context.Background(), req) - require.NoError(t, err) - require.NotNil(t, resp) - require.NotNil(t, resp.Batch) - assert.Equal(t, 0, len(resp.Batch.Transactions)) - - mockRetriever.AssertExpectations(t) -} - -func TestBasedSequencer_GetNextBatch_NotConfigured(t *testing.T) { - mockRetriever := new(MockDARetriever) - mockDA := new(MockDA) - cfg := config.DefaultConfig() - gen := genesis.Genesis{ - ChainID: "test-chain", - DAStartHeight: 100, - } - - seq := NewBasedSequencer(mockRetriever, mockDA, cfg, gen, zerolog.Nop()) - - // Mock retriever to return not configured error - mockRetriever.On("RetrieveForcedIncludedTxsFromDA", mock.Anything, uint64(100)). - Return(nil, block.ErrForceInclusionNotConfigured).Once() - mockRetriever.On("SetDAHeight", mock.Anything).Return().Maybe() - - req := coresequencer.GetNextBatchRequest{ - Id: []byte("test-chain"), - MaxBytes: 10000, - } - - resp, err := seq.GetNextBatch(context.Background(), req) - require.NoError(t, err) - require.NotNil(t, resp) - require.NotNil(t, resp.Batch) - assert.Nil(t, resp.Batch.Transactions) - - mockRetriever.AssertExpectations(t) -} - -func TestBasedSequencer_GetNextBatch_HeightFromFuture(t *testing.T) { - mockRetriever := new(MockDARetriever) - mockDA := new(MockDA) - cfg := config.DefaultConfig() - gen := genesis.Genesis{ - ChainID: "test-chain", - DAStartHeight: 100, - } - - seq := NewBasedSequencer(mockRetriever, mockDA, cfg, gen, zerolog.Nop()) - - // Mock retriever to return height from future error - mockRetriever.On("RetrieveForcedIncludedTxsFromDA", mock.Anything, uint64(100)). - Return(nil, coreda.ErrHeightFromFuture).Once() - mockRetriever.On("SetDAHeight", mock.Anything).Return().Maybe() - - req := coresequencer.GetNextBatchRequest{ - Id: []byte("test-chain"), - MaxBytes: 10000, - } - - resp, err := seq.GetNextBatch(context.Background(), req) - require.NoError(t, err) - require.NotNil(t, resp) - require.NotNil(t, resp.Batch) - assert.Nil(t, resp.Batch.Transactions) - - // DA height should NOT increment on ErrHeightFromFuture - we wait for DA to catch up - assert.Equal(t, uint64(100), seq.GetDAHeight()) - - mockRetriever.AssertExpectations(t) -} - -func TestBasedSequencer_GetNextBatch_WithMaxBytes(t *testing.T) { - mockRetriever := new(MockDARetriever) - mockRetriever.On("SetDAHeight", mock.Anything).Return().Maybe() - mockDA := new(MockDA) - cfg := config.DefaultConfig() - gen := genesis.Genesis{ - ChainID: "test-chain", - DAStartHeight: 100, - } - - seq := NewBasedSequencer(mockRetriever, mockDA, cfg, gen, zerolog.Nop()) - - // Create transactions that will exceed max bytes - tx1 := make([]byte, 50) - tx2 := make([]byte, 50) - tx3 := make([]byte, 50) - - forcedTxs := &ForcedInclusionEvent{ - Txs: [][]byte{tx1, tx2, tx3}, - StartDaHeight: 101, - EndDaHeight: 105, - } - mockRetriever.On("RetrieveForcedIncludedTxsFromDA", mock.Anything, uint64(100)). - Return(forcedTxs, nil).Once() - - // Request with max bytes that only fits 2 transactions - req := coresequencer.GetNextBatchRequest{ - Id: []byte("test-chain"), - MaxBytes: 100, // Only fits 2 transactions - } - - resp, err := seq.GetNextBatch(context.Background(), req) - require.NoError(t, err) - require.NotNil(t, resp) - require.NotNil(t, resp.Batch) - assert.Equal(t, 2, len(resp.Batch.Transactions)) - - // Third transaction should still be in queue - queuePtr := seq.txQueue.Load() - assert.Equal(t, 1, len(*queuePtr)) - - // Next request should return the remaining transaction - req2 := coresequencer.GetNextBatchRequest{ - Id: []byte("test-chain"), - MaxBytes: 100, - } - - resp2, err := seq.GetNextBatch(context.Background(), req2) - require.NoError(t, err) - require.NotNil(t, resp2) - require.NotNil(t, resp2.Batch) - assert.Equal(t, 1, len(resp2.Batch.Transactions)) - queuePtr = seq.txQueue.Load() - assert.Equal(t, 0, len(*queuePtr)) - - mockRetriever.AssertExpectations(t) -} - -func TestBasedSequencer_GetNextBatch_FromQueue(t *testing.T) { - mockRetriever := new(MockDARetriever) - mockRetriever.On("SetDAHeight", mock.Anything).Return().Maybe() - mockDA := new(MockDA) - cfg := config.DefaultConfig() - gen := genesis.Genesis{ - ChainID: "test-chain", - DAStartHeight: 100, - } - - seq := NewBasedSequencer(mockRetriever, mockDA, cfg, gen, zerolog.Nop()) - - // Pre-populate the queue - initialQueue := [][]byte{[]byte("queued_tx1"), []byte("queued_tx2")} - seq.txQueue.Store(&initialQueue) - - req := coresequencer.GetNextBatchRequest{ - Id: []byte("test-chain"), - MaxBytes: 10000, - } - - // Should return from queue without calling retriever - resp, err := seq.GetNextBatch(context.Background(), req) - require.NoError(t, err) - require.NotNil(t, resp) - require.NotNil(t, resp.Batch) - assert.Equal(t, 2, len(resp.Batch.Transactions)) - assert.Equal(t, []byte("queued_tx1"), resp.Batch.Transactions[0]) - assert.Equal(t, []byte("queued_tx2"), resp.Batch.Transactions[1]) - queuePtr := seq.txQueue.Load() - assert.Equal(t, 0, len(*queuePtr)) - - // No expectations on retriever since it shouldn't be called - mockRetriever.AssertExpectations(t) -} - -func TestBasedSequencer_VerifyBatch(t *testing.T) { - mockRetriever := new(MockDARetriever) - mockRetriever.On("SetDAHeight", mock.Anything).Return().Maybe() - mockDA := new(MockDA) - cfg := config.DefaultConfig() - gen := genesis.Genesis{ChainID: "test-chain"} - - seq := NewBasedSequencer(mockRetriever, mockDA, cfg, gen, zerolog.Nop()) - - req := coresequencer.VerifyBatchRequest{ - Id: []byte("test-chain"), - BatchData: [][]byte{[]byte("tx1")}, - } - - resp, err := seq.VerifyBatch(context.Background(), req) - require.NoError(t, err) - require.NotNil(t, resp) - assert.True(t, resp.Status) -} - -func TestBasedSequencer_SetDAHeight(t *testing.T) { - mockRetriever := new(MockDARetriever) - mockRetriever.On("SetDAHeight", mock.Anything).Return().Maybe() - mockDA := new(MockDA) - cfg := config.DefaultConfig() - gen := genesis.Genesis{ - ChainID: "test-chain", - DAStartHeight: 100, - } - - seq := NewBasedSequencer(mockRetriever, mockDA, cfg, gen, zerolog.Nop()) - - assert.Equal(t, uint64(100), seq.GetDAHeight()) - - seq.SetDAHeight(200) - assert.Equal(t, uint64(200), seq.GetDAHeight()) -} - -func TestBasedSequencer_ConcurrentAccess(t *testing.T) { - mockRetriever := new(MockDARetriever) - mockRetriever.On("SetDAHeight", mock.Anything).Return().Maybe() - mockDA := new(MockDA) - cfg := config.DefaultConfig() - gen := genesis.Genesis{ - ChainID: "test-chain", - DAStartHeight: 100, - } - - seq := NewBasedSequencer(mockRetriever, mockDA, cfg, gen, zerolog.Nop()) - - // Mock retriever to return transactions - forcedTxs := &ForcedInclusionEvent{ - Txs: [][]byte{[]byte("tx1")}, - StartDaHeight: 101, - EndDaHeight: 105, - } - mockRetriever.On("RetrieveForcedIncludedTxsFromDA", mock.Anything, mock.Anything). - Return(forcedTxs, nil).Maybe() - - // Test concurrent access - done := make(chan bool, 3) - - // Concurrent GetNextBatch calls - go func() { - req := coresequencer.GetNextBatchRequest{Id: []byte("test-chain"), MaxBytes: 1000} - _, _ = seq.GetNextBatch(context.Background(), req) - done <- true - }() - - // Concurrent SetDAHeight calls - go func() { - seq.SetDAHeight(200) - done <- true - }() - - // Concurrent GetDAHeight calls - go func() { - _ = seq.GetDAHeight() - done <- true - }() - - // Wait for all goroutines - timeout := time.After(5 * time.Second) - for i := 0; i < 3; i++ { - select { - case <-done: - case <-timeout: - t.Fatal("test timed out") - } - } -} - -func TestBasedSequencer_GetNextBatch_ErrorHandling(t *testing.T) { - mockRetriever := new(MockDARetriever) - mockRetriever.On("SetDAHeight", mock.Anything).Return().Maybe() - mockDA := new(MockDA) - cfg := config.DefaultConfig() - gen := genesis.Genesis{ - ChainID: "test-chain", - DAStartHeight: 100, - } - - seq := NewBasedSequencer(mockRetriever, mockDA, cfg, gen, zerolog.Nop()) - - // Mock retriever to return an unexpected error - expectedErr := errors.New("unexpected DA error") - mockRetriever.On("RetrieveForcedIncludedTxsFromDA", mock.Anything, uint64(100)). - Return(nil, expectedErr).Once() - - req := coresequencer.GetNextBatchRequest{ - Id: []byte("test-chain"), - MaxBytes: 10000, - } - - resp, err := seq.GetNextBatch(context.Background(), req) - require.Error(t, err) - assert.Nil(t, resp) - assert.Equal(t, expectedErr, err) - - mockRetriever.AssertExpectations(t) -} diff --git a/sequencers/based/sequencer.go b/sequencers/based/sequencer.go index ba20062b16..c0b8054e08 100644 --- a/sequencers/based/sequencer.go +++ b/sequencers/based/sequencer.go @@ -181,7 +181,8 @@ func (s *BasedSequencer) createBatchFromQueue(maxBytes uint64) *coresequencer.Ba for i, tx := range queue { txSize := uint64(len(tx)) - if totalBytes+txSize > maxBytes && len(batch) > 0 { + // Always respect maxBytes, even for the first transaction + if totalBytes+txSize > maxBytes { // Would exceed max bytes, stop here remaining = queue[i:] break diff --git a/sequencers/based/sequencer_test.go b/sequencers/based/sequencer_test.go index 57866bcaf6..e0a4182488 100644 --- a/sequencers/based/sequencer_test.go +++ b/sequencers/based/sequencer_test.go @@ -99,7 +99,7 @@ func TestNewBasedSequencer(t *testing.T) { require.NotNil(t, seq) assert.Equal(t, uint64(100), seq.daHeight.Load()) - assert.Equal(t, 0, len(seq.txQueue)) + assert.Equal(t, 0, len(*seq.txQueue.Load())) } func TestBasedSequencer_SubmitBatchTxs(t *testing.T) { @@ -132,7 +132,7 @@ func TestBasedSequencer_SubmitBatchTxs(t *testing.T) { require.NoError(t, err) require.NotNil(t, resp) // Transactions should not be added to queue for based sequencer - assert.Equal(t, 0, len(seq.txQueue)) + assert.Equal(t, 0, len(*seq.txQueue.Load())) } func TestBasedSequencer_GetNextBatch_WithForcedTxs(t *testing.T) { @@ -325,7 +325,7 @@ func TestBasedSequencer_GetNextBatch_WithMaxBytes(t *testing.T) { require.NotNil(t, resp.Batch) // Should get first tx (50 bytes), second tx would exceed limit (50+60=110 > 100) assert.Equal(t, 1, len(resp.Batch.Transactions)) - assert.Equal(t, 2, len(seq.txQueue)) // 2 remaining in queue + assert.Equal(t, 2, len(*seq.txQueue.Load())) // 2 remaining in queue // Second call should get next tx from queue resp2, err := seq.GetNextBatch(context.Background(), req) @@ -333,7 +333,7 @@ func TestBasedSequencer_GetNextBatch_WithMaxBytes(t *testing.T) { require.NotNil(t, resp2) require.NotNil(t, resp2.Batch) assert.Equal(t, 1, len(resp2.Batch.Transactions)) - assert.Equal(t, 1, len(seq.txQueue)) // 1 remaining in queue + assert.Equal(t, 1, len(*seq.txQueue.Load())) // 1 remaining in queue // Third call with larger maxBytes to get the 100-byte tx req3 := coresequencer.GetNextBatchRequest{ @@ -345,7 +345,7 @@ func TestBasedSequencer_GetNextBatch_WithMaxBytes(t *testing.T) { require.NotNil(t, resp3) require.NotNil(t, resp3.Batch) assert.Equal(t, 1, len(resp3.Batch.Transactions)) - assert.Equal(t, 0, len(seq.txQueue)) // Queue should be empty + assert.Equal(t, 0, len(*seq.txQueue.Load())) // Queue should be empty mockDA.AssertExpectations(t) } @@ -371,7 +371,8 @@ func TestBasedSequencer_GetNextBatch_FromQueue(t *testing.T) { seq := NewBasedSequencer(fiRetriever, mockDA, cfg, gen, zerolog.Nop()) // Pre-populate the queue - seq.txQueue = [][]byte{[]byte("queued_tx1"), []byte("queued_tx2")} + preQueue := [][]byte{[]byte("queued_tx1"), []byte("queued_tx2")} + seq.txQueue.Store(&preQueue) req := coresequencer.GetNextBatchRequest{ MaxBytes: 1000000, @@ -387,7 +388,7 @@ func TestBasedSequencer_GetNextBatch_FromQueue(t *testing.T) { assert.Equal(t, []byte("queued_tx2"), resp.Batch.Transactions[1]) // Queue should be empty now - assert.Equal(t, 0, len(seq.txQueue)) + assert.Equal(t, 0, len(*seq.txQueue.Load())) } func TestBasedSequencer_GetNextBatch_AlwaysCheckPendingForcedInclusion(t *testing.T) { @@ -401,8 +402,7 @@ func TestBasedSequencer_GetNextBatch_AlwaysCheckPendingForcedInclusion(t *testin }, nil).Once() mockDA.On("Get", mock.Anything, mock.Anything, mock.Anything).Return([][]byte{forcedTx}, nil).Once() - // Second call: no new forced txs - mockDA.On("GetIDs", mock.Anything, uint64(100), mock.Anything).Return(nil, coreda.ErrBlobNotFound).Once() + // Second call: no new DA calls expected, should retrieve from queue gen := genesis.Genesis{ ChainID: "test-chain", @@ -434,7 +434,7 @@ func TestBasedSequencer_GetNextBatch_AlwaysCheckPendingForcedInclusion(t *testin assert.Equal(t, 0, len(resp1.Batch.Transactions), "Should have no txs as forced tx exceeds maxBytes") // Verify forced tx is in queue - assert.Equal(t, 1, len(seq.txQueue), "Forced tx should be in queue") + assert.Equal(t, 1, len(*seq.txQueue.Load()), "Forced tx should be in queue") // Second call with larger maxBytes = 200 // Should process tx from queue @@ -451,7 +451,7 @@ func TestBasedSequencer_GetNextBatch_AlwaysCheckPendingForcedInclusion(t *testin assert.Equal(t, 150, len(resp2.Batch.Transactions[0])) // Queue should now be empty - assert.Equal(t, 0, len(seq.txQueue), "Queue should be empty") + assert.Equal(t, 0, len(*seq.txQueue.Load()), "Queue should be empty") mockDA.AssertExpectations(t) } @@ -468,8 +468,7 @@ func TestBasedSequencer_GetNextBatch_ForcedInclusionExceedsMaxBytes(t *testing.T }, nil).Once() mockDA.On("Get", mock.Anything, mock.Anything, mock.Anything).Return([][]byte{forcedTx1, forcedTx2}, nil).Once() - // Second call - mockDA.On("GetIDs", mock.Anything, uint64(100), mock.Anything).Return(nil, coreda.ErrBlobNotFound).Once() + // Second call: no new DA calls expected, should retrieve from queue gen := genesis.Genesis{ ChainID: "test-chain", @@ -502,7 +501,7 @@ func TestBasedSequencer_GetNextBatch_ForcedInclusionExceedsMaxBytes(t *testing.T assert.Equal(t, 100, len(resp1.Batch.Transactions[0])) // Verify second tx is still in queue - assert.Equal(t, 1, len(seq.txQueue), "Second tx should be in queue") + assert.Equal(t, 1, len(*seq.txQueue.Load()), "Second tx should be in queue") // Second call - should get the second tx from queue req2 := coresequencer.GetNextBatchRequest{ @@ -518,7 +517,7 @@ func TestBasedSequencer_GetNextBatch_ForcedInclusionExceedsMaxBytes(t *testing.T assert.Equal(t, 80, len(resp2.Batch.Transactions[0])) // Queue should now be empty - assert.Equal(t, 0, len(seq.txQueue), "Queue should be empty") + assert.Equal(t, 0, len(*seq.txQueue.Load()), "Queue should be empty") mockDA.AssertExpectations(t) } diff --git a/sequencers/single/sequencer_test.go b/sequencers/single/sequencer_test.go index fbca671f28..f0d69a0ae1 100644 --- a/sequencers/single/sequencer_test.go +++ b/sequencers/single/sequencer_test.go @@ -33,10 +33,6 @@ func (m *MockForcedInclusionRetriever) RetrieveForcedIncludedTxs(ctx context.Con return args.Get(0).(*block.ForcedInclusionEvent), args.Error(1) } -func (m *MockDARetriever) SetDAHeight(height uint64) { - m.Called(height) -} - func TestNewSequencer(t *testing.T) { // Create a new sequencer with mock DA client dummyDA := coreda.NewDummyDA(100_000_000, 10*time.Second) @@ -48,7 +44,6 @@ func TestNewSequencer(t *testing.T) { mockRetriever := new(MockForcedInclusionRetriever) mockRetriever.On("RetrieveForcedIncludedTxs", mock.Anything, mock.Anything). Return(nil, block.ErrForceInclusionNotConfigured).Maybe() - mockRetriever.On("SetDAHeight", mock.Anything).Return().Maybe() seq, err := NewSequencer(ctx, logger, db, dummyDA, []byte("test1"), 10*time.Second, metrics, false, 1000, mockRetriever, genesis.Genesis{}) if err != nil { t.Fatalf("Failed to create sequencer: %v", err)