From 56789b892a1ab05ae0f56f0f882d4f534cb5a68c Mon Sep 17 00:00:00 2001 From: "Leo Zhang (zhangchiqing)" Date: Wed, 24 Sep 2025 10:14:03 -0700 Subject: [PATCH 01/87] add lockctx to save execution results operations --- engine/execution/state/state.go | 7 ++++--- storage/events.go | 8 +++++--- storage/operation/events.go | 13 +++++++++++-- storage/operation/results.go | 10 +++++++--- storage/operation/transaction_results.go | 11 +++++++++-- storage/results.go | 3 ++- storage/store/events.go | 13 +++++++------ storage/store/results.go | 5 +++-- storage/store/transaction_results.go | 7 ++++--- storage/transaction_results.go | 7 +++++-- 10 files changed, 57 insertions(+), 27 deletions(-) diff --git a/engine/execution/state/state.go b/engine/execution/state/state.go index 47a7accb8fc..5aa56499c12 100644 --- a/engine/execution/state/state.go +++ b/engine/execution/state/state.go @@ -436,17 +436,18 @@ func (s *state) saveExecutionResults( } }) - err = s.events.BatchStore(blockID, []flow.EventsList{result.AllEvents()}, batch) + err = s.events.BatchStore(lctx, blockID, []flow.EventsList{result.AllEvents()}, batch) if err != nil { return fmt.Errorf("cannot store events: %w", err) } - err = s.serviceEvents.BatchStore(blockID, result.AllServiceEvents(), batch) + err = s.serviceEvents.BatchStore(lctx, blockID, result.AllServiceEvents(), batch) if err != nil { return fmt.Errorf("cannot store service events: %w", err) } err = s.transactionResults.BatchStore( + lctx, blockID, result.AllTransactionResults(), batch) @@ -461,7 +462,7 @@ func (s *state) saveExecutionResults( return fmt.Errorf("could not persist execution result: %w", err) } - err = s.results.BatchIndex(blockID, executionResult.ID(), batch) + err = s.results.BatchIndex(lctx, blockID, executionResult.ID(), batch) if err != nil { return fmt.Errorf("cannot index execution result: %w", err) } diff --git a/storage/events.go b/storage/events.go index 4062acea82e..e8c73b28c34 100644 --- a/storage/events.go +++ b/storage/events.go @@ -1,6 +1,8 @@ package storage import ( + "github.com/jordanschalm/lockctx" + "github.com/onflow/flow-go/model/flow" ) @@ -23,10 +25,10 @@ type Events interface { EventsReader // Store will store events for the given block ID - Store(blockID flow.Identifier, blockEvents []flow.EventsList) error + Store(lctx lockctx.Proof, blockID flow.Identifier, blockEvents []flow.EventsList) error // BatchStore will store events for the given block ID in a given batch - BatchStore(blockID flow.Identifier, events []flow.EventsList, batch ReaderBatchWriter) error + BatchStore(lctx lockctx.Proof, blockID flow.Identifier, events []flow.EventsList, batch ReaderBatchWriter) error // BatchRemoveByBlockID removes events keyed by a blockID in provided batch // No errors are expected during normal operation, even if no entries are matched. @@ -38,7 +40,7 @@ type ServiceEvents interface { // BatchStore stores service events keyed by a blockID in provided batch // No errors are expected during normal operation, even if no entries are matched. // If database unexpectedly fails to process the request, the error is wrapped in a generic error and returned. - BatchStore(blockID flow.Identifier, events []flow.Event, batch ReaderBatchWriter) error + BatchStore(lctx lockctx.Proof, blockID flow.Identifier, events []flow.Event, batch ReaderBatchWriter) error // ByBlockID returns the events for the given block ID ByBlockID(blockID flow.Identifier) ([]flow.Event, error) diff --git a/storage/operation/events.go b/storage/operation/events.go index f07467fe6db..dadabd7bc89 100644 --- a/storage/operation/events.go +++ b/storage/operation/events.go @@ -1,6 +1,9 @@ package operation import ( + "fmt" + + "github.com/jordanschalm/lockctx" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/storage" ) @@ -9,11 +12,17 @@ func eventPrefix(prefix byte, blockID flow.Identifier, event flow.Event) []byte return MakePrefix(prefix, blockID, event.TransactionID, event.TransactionIndex, event.EventIndex) } -func InsertEvent(w storage.Writer, blockID flow.Identifier, event flow.Event) error { +func InsertEvent(lctx lockctx.Proof, w storage.Writer, blockID flow.Identifier, event flow.Event) error { + if !lctx.HoldsLock(storage.LockInsertOwnReceipt) { + return fmt.Errorf("InsertEvent requires LockInsertOwnReceipt to be held") + } return UpsertByKey(w, eventPrefix(codeEvent, blockID, event), event) } -func InsertServiceEvent(w storage.Writer, blockID flow.Identifier, event flow.Event) error { +func InsertServiceEvent(lctx lockctx.Proof, w storage.Writer, blockID flow.Identifier, event flow.Event) error { + if !lctx.HoldsLock(storage.LockInsertOwnReceipt) { + return fmt.Errorf("InsertServiceEvent requires LockInsertOwnReceipt to be held") + } return UpsertByKey(w, eventPrefix(codeServiceEvent, blockID, event), event) } diff --git a/storage/operation/results.go b/storage/operation/results.go index 29937653968..ddc2535f156 100644 --- a/storage/operation/results.go +++ b/storage/operation/results.go @@ -1,6 +1,9 @@ package operation import ( + "fmt" + + "github.com/jordanschalm/lockctx" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/storage" ) @@ -32,10 +35,11 @@ func RetrieveExecutionResult(r storage.Reader, resultID flow.Identifier, result // value. Changing data could cause the node to publish inconsistent data and to be slashed, or the protocol to be // compromised as a whole. This method does not contain any safeguards to prevent such data corruption. // -// TODO: USE LOCK, we want to protect this mapping from accidental overwrites (because the key is not derived from the value via a collision-resistant hash) -// // No errors are expected during normal operation. -func IndexExecutionResult(w storage.Writer, blockID flow.Identifier, resultID flow.Identifier) error { +func IndexExecutionResult(lctx lockctx.Proof, w storage.Writer, blockID flow.Identifier, resultID flow.Identifier) error { + if !lctx.HoldsLock(storage.LockInsertOwnReceipt) { + return fmt.Errorf("IndexExecutionResult requires LockInsertOwnReceipt to be held") + } return UpsertByKey(w, MakePrefix(codeIndexExecutionResultByBlock, blockID), resultID) } diff --git a/storage/operation/transaction_results.go b/storage/operation/transaction_results.go index a97197a5cde..896e0782205 100644 --- a/storage/operation/transaction_results.go +++ b/storage/operation/transaction_results.go @@ -3,15 +3,22 @@ package operation import ( "fmt" + "github.com/jordanschalm/lockctx" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/storage" ) -func InsertTransactionResult(w storage.Writer, blockID flow.Identifier, transactionResult *flow.TransactionResult) error { +func InsertTransactionResult(lctx lockctx.Proof, w storage.Writer, blockID flow.Identifier, transactionResult *flow.TransactionResult) error { + if !lctx.HoldsLock(storage.LockInsertOwnReceipt) { + return fmt.Errorf("InsertTransactionResult requires LockInsertOwnReceipt to be held") + } return UpsertByKey(w, MakePrefix(codeTransactionResult, blockID, transactionResult.TransactionID), transactionResult) } -func IndexTransactionResult(w storage.Writer, blockID flow.Identifier, txIndex uint32, transactionResult *flow.TransactionResult) error { +func IndexTransactionResult(lctx lockctx.Proof, w storage.Writer, blockID flow.Identifier, txIndex uint32, transactionResult *flow.TransactionResult) error { + if !lctx.HoldsLock(storage.LockInsertOwnReceipt) { + return fmt.Errorf("IndexTransactionResult requires LockInsertOwnReceipt to be held") + } return UpsertByKey(w, MakePrefix(codeTransactionResultIndex, blockID, txIndex), transactionResult) } diff --git a/storage/results.go b/storage/results.go index a943866370e..908b9211eed 100644 --- a/storage/results.go +++ b/storage/results.go @@ -1,6 +1,7 @@ package storage import ( + "github.com/jordanschalm/lockctx" "github.com/onflow/flow-go/model/flow" ) @@ -28,7 +29,7 @@ type ExecutionResults interface { ForceIndex(blockID flow.Identifier, resultID flow.Identifier) error // BatchIndex indexes an execution result by block ID in a given batch - BatchIndex(blockID flow.Identifier, resultID flow.Identifier, batch ReaderBatchWriter) error + BatchIndex(lctx lockctx.Proof, blockID flow.Identifier, resultID flow.Identifier, batch ReaderBatchWriter) error // BatchRemoveIndexByBlockID removes blockID-to-executionResultID index entries keyed by blockID in a provided batch. // No errors are expected during normal operation, even if no entries are matched. diff --git a/storage/store/events.go b/storage/store/events.go index dc6283e8d02..2ee8cfd0f9f 100644 --- a/storage/store/events.go +++ b/storage/store/events.go @@ -3,6 +3,7 @@ package store import ( "fmt" + "github.com/jordanschalm/lockctx" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module" "github.com/onflow/flow-go/module/metrics" @@ -40,7 +41,7 @@ func NewEvents(collector module.CacheMetrics, db storage.DB) *Events { // BatchStore stores events keyed by a blockID in provided batch // No errors are expected during normal operation, but it may return generic error // if badger fails to process request -func (e *Events) BatchStore(blockID flow.Identifier, blockEvents []flow.EventsList, batch storage.ReaderBatchWriter) error { +func (e *Events) BatchStore(lctx lockctx.Proof, blockID flow.Identifier, blockEvents []flow.EventsList, batch storage.ReaderBatchWriter) error { writer := batch.Writer() // pre-allocating and indexing slice is faster than appending @@ -55,7 +56,7 @@ func (e *Events) BatchStore(blockID flow.Identifier, blockEvents []flow.EventsLi for _, events := range blockEvents { for _, event := range events { - err := operation.InsertEvent(writer, blockID, event) + err := operation.InsertEvent(lctx, writer, blockID, event) if err != nil { return fmt.Errorf("cannot batch insert event: %w", err) } @@ -72,9 +73,9 @@ func (e *Events) BatchStore(blockID flow.Identifier, blockEvents []flow.EventsLi } // Store will store events for the given block ID -func (e *Events) Store(blockID flow.Identifier, blockEvents []flow.EventsList) error { +func (e *Events) Store(lctx lockctx.Proof, blockID flow.Identifier, blockEvents []flow.EventsList) error { return e.db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { - return e.BatchStore(blockID, blockEvents, rw) + return e.BatchStore(lctx, blockID, blockEvents, rw) }) } @@ -181,10 +182,10 @@ func NewServiceEvents(collector module.CacheMetrics, db storage.DB) *ServiceEven // BatchStore stores service events keyed by a blockID in provided batch // No errors are expected during normal operation, even if no entries are matched. // If Badger unexpectedly fails to process the request, the error is wrapped in a generic error and returned. -func (e *ServiceEvents) BatchStore(blockID flow.Identifier, events []flow.Event, rw storage.ReaderBatchWriter) error { +func (e *ServiceEvents) BatchStore(lctx lockctx.Proof, blockID flow.Identifier, events []flow.Event, rw storage.ReaderBatchWriter) error { writer := rw.Writer() for _, event := range events { - err := operation.InsertServiceEvent(writer, blockID, event) + err := operation.InsertServiceEvent(lctx, writer, blockID, event) if err != nil { return fmt.Errorf("cannot batch insert service event: %w", err) } diff --git a/storage/store/results.go b/storage/store/results.go index 45c269f5a7f..2e2c008f938 100644 --- a/storage/store/results.go +++ b/storage/store/results.go @@ -3,6 +3,7 @@ package store import ( "fmt" + "github.com/jordanschalm/lockctx" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module" "github.com/onflow/flow-go/module/metrics" @@ -108,8 +109,8 @@ func (r *ExecutionResults) BatchStore(result *flow.ExecutionResult, batch storag return r.store(batch, result) } -func (r *ExecutionResults) BatchIndex(blockID flow.Identifier, resultID flow.Identifier, batch storage.ReaderBatchWriter) error { - return operation.IndexExecutionResult(batch.Writer(), blockID, resultID) +func (r *ExecutionResults) BatchIndex(lctx lockctx.Proof, blockID flow.Identifier, resultID flow.Identifier, batch storage.ReaderBatchWriter) error { + return operation.IndexExecutionResult(lctx, batch.Writer(), blockID, resultID) } func (r *ExecutionResults) ByID(resultID flow.Identifier) (*flow.ExecutionResult, error) { diff --git a/storage/store/transaction_results.go b/storage/store/transaction_results.go index a870fe65ba8..e552b9d046f 100644 --- a/storage/store/transaction_results.go +++ b/storage/store/transaction_results.go @@ -4,6 +4,7 @@ import ( "encoding/binary" "fmt" + "github.com/jordanschalm/lockctx" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module" "github.com/onflow/flow-go/module/metrics" @@ -128,16 +129,16 @@ func NewTransactionResults(collector module.CacheMetrics, db storage.DB, transac } // BatchStore will store the transaction results for the given block ID in a batch -func (tr *TransactionResults) BatchStore(blockID flow.Identifier, transactionResults []flow.TransactionResult, batch storage.ReaderBatchWriter) error { +func (tr *TransactionResults) BatchStore(lctx lockctx.Proof, blockID flow.Identifier, transactionResults []flow.TransactionResult, batch storage.ReaderBatchWriter) error { w := batch.Writer() for i, result := range transactionResults { - err := operation.InsertTransactionResult(w, blockID, &result) + err := operation.InsertTransactionResult(lctx, w, blockID, &result) if err != nil { return fmt.Errorf("cannot batch insert tx result: %w", err) } - err = operation.IndexTransactionResult(w, blockID, uint32(i), &result) + err = operation.IndexTransactionResult(lctx, w, blockID, uint32(i), &result) if err != nil { return fmt.Errorf("cannot batch index tx result: %w", err) } diff --git a/storage/transaction_results.go b/storage/transaction_results.go index 9c7ecdbe1db..7bdb7ca4aec 100644 --- a/storage/transaction_results.go +++ b/storage/transaction_results.go @@ -1,6 +1,9 @@ package storage -import "github.com/onflow/flow-go/model/flow" +import ( + "github.com/jordanschalm/lockctx" + "github.com/onflow/flow-go/model/flow" +) type TransactionResultsReader interface { // ByBlockIDTransactionID returns the transaction result for the given block ID and transaction ID @@ -18,7 +21,7 @@ type TransactionResults interface { TransactionResultsReader // BatchStore inserts a batch of transaction result into a batch - BatchStore(blockID flow.Identifier, transactionResults []flow.TransactionResult, batch ReaderBatchWriter) error + BatchStore(lctx lockctx.Proof, blockID flow.Identifier, transactionResults []flow.TransactionResult, batch ReaderBatchWriter) error // RemoveByBlockID removes all transaction results for a block BatchRemoveByBlockID(id flow.Identifier, batch ReaderBatchWriter) error From 5f38c5cb3753ecc4bff69d7c173bc31f8a104fc7 Mon Sep 17 00:00:00 2001 From: "Leo Zhang (zhangchiqing)" Date: Wed, 24 Sep 2025 10:15:48 -0700 Subject: [PATCH 02/87] fix tests --- storage/operation/events_test.go | 6 +++++- storage/operation/stats_test.go | 5 ++++- 2 files changed, 9 insertions(+), 2 deletions(-) diff --git a/storage/operation/events_test.go b/storage/operation/events_test.go index 272ccea1410..04be3f8d855 100644 --- a/storage/operation/events_test.go +++ b/storage/operation/events_test.go @@ -19,6 +19,7 @@ import ( // and block id and event type func TestRetrieveEventByBlockIDTxID(t *testing.T) { dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + lockManager := storage.NewTestingLockManager() // create block ids, transaction ids and event types slices blockIDs := []flow.Identifier{flow.HashToID([]byte{0x01}), flow.HashToID([]byte{0x02})} @@ -53,9 +54,12 @@ func TestRetrieveEventByBlockIDTxID(t *testing.T) { ) // insert event into the db + lctx := lockManager.NewContext() + require.NoError(t, lctx.AcquireLock(storage.LockInsertOwnReceipt)) err := db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { - return operation.InsertEvent(rw.Writer(), b, event) + return operation.InsertEvent(lctx, rw.Writer(), b, event) }) + lctx.Release() require.Nil(t, err) // update event arrays in the maps diff --git a/storage/operation/stats_test.go b/storage/operation/stats_test.go index ff05671b1c3..7c8b9c51725 100644 --- a/storage/operation/stats_test.go +++ b/storage/operation/stats_test.go @@ -20,8 +20,11 @@ func TestSummarizeKeysByFirstByteConcurrent(t *testing.T) { // insert random events b := unittest.IdentifierFixture() events := unittest.EventsFixture(30) + lctx := lockManager.NewContext() + require.NoError(t, lctx.AcquireLock(storage.LockInsertOwnReceipt)) + defer lctx.Release() for _, evt := range events { - err := operation.InsertEvent(rw.Writer(), b, evt) + err := operation.InsertEvent(lctx, rw.Writer(), b, evt) if err != nil { return err } From ae157d1af2748e9878bb37a039bd5655203207c3 Mon Sep 17 00:00:00 2001 From: "Leo Zhang (zhangchiqing)" Date: Wed, 24 Sep 2025 10:16:48 -0700 Subject: [PATCH 03/87] Fix import formatting in storage/operation files --- storage/operation/events.go | 1 + storage/operation/results.go | 1 + storage/operation/transaction_results.go | 1 + 3 files changed, 3 insertions(+) diff --git a/storage/operation/events.go b/storage/operation/events.go index dadabd7bc89..2d022be699a 100644 --- a/storage/operation/events.go +++ b/storage/operation/events.go @@ -4,6 +4,7 @@ import ( "fmt" "github.com/jordanschalm/lockctx" + "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/storage" ) diff --git a/storage/operation/results.go b/storage/operation/results.go index ddc2535f156..16943ae94d2 100644 --- a/storage/operation/results.go +++ b/storage/operation/results.go @@ -4,6 +4,7 @@ import ( "fmt" "github.com/jordanschalm/lockctx" + "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/storage" ) diff --git a/storage/operation/transaction_results.go b/storage/operation/transaction_results.go index 896e0782205..8a4a077def1 100644 --- a/storage/operation/transaction_results.go +++ b/storage/operation/transaction_results.go @@ -4,6 +4,7 @@ import ( "fmt" "github.com/jordanschalm/lockctx" + "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/storage" ) From 11ebabae2820d11488bc2a8a60d11c73a6887d6b Mon Sep 17 00:00:00 2001 From: "Leo Zhang (zhangchiqing)" Date: Wed, 24 Sep 2025 10:26:01 -0700 Subject: [PATCH 04/87] update mocks --- storage/mock/events.go | 22 ++++++++++++---------- storage/mock/execution_results.go | 12 +++++++----- storage/mock/service_events.go | 12 +++++++----- storage/mock/transaction_results.go | 12 +++++++----- 4 files changed, 33 insertions(+), 25 deletions(-) diff --git a/storage/mock/events.go b/storage/mock/events.go index a23564f1e08..8df84d3b4e0 100644 --- a/storage/mock/events.go +++ b/storage/mock/events.go @@ -3,7 +3,9 @@ package mock import ( + lockctx "github.com/jordanschalm/lockctx" flow "github.com/onflow/flow-go/model/flow" + mock "github.com/stretchr/testify/mock" storage "github.com/onflow/flow-go/storage" @@ -32,17 +34,17 @@ func (_m *Events) BatchRemoveByBlockID(blockID flow.Identifier, batch storage.Re return r0 } -// BatchStore provides a mock function with given fields: blockID, events, batch -func (_m *Events) BatchStore(blockID flow.Identifier, events []flow.EventsList, batch storage.ReaderBatchWriter) error { - ret := _m.Called(blockID, events, batch) +// BatchStore provides a mock function with given fields: lctx, blockID, events, batch +func (_m *Events) BatchStore(lctx lockctx.Proof, blockID flow.Identifier, events []flow.EventsList, batch storage.ReaderBatchWriter) error { + ret := _m.Called(lctx, blockID, events, batch) if len(ret) == 0 { panic("no return value specified for BatchStore") } var r0 error - if rf, ok := ret.Get(0).(func(flow.Identifier, []flow.EventsList, storage.ReaderBatchWriter) error); ok { - r0 = rf(blockID, events, batch) + if rf, ok := ret.Get(0).(func(lockctx.Proof, flow.Identifier, []flow.EventsList, storage.ReaderBatchWriter) error); ok { + r0 = rf(lctx, blockID, events, batch) } else { r0 = ret.Error(0) } @@ -170,17 +172,17 @@ func (_m *Events) ByBlockIDTransactionIndex(blockID flow.Identifier, txIndex uin return r0, r1 } -// Store provides a mock function with given fields: blockID, blockEvents -func (_m *Events) Store(blockID flow.Identifier, blockEvents []flow.EventsList) error { - ret := _m.Called(blockID, blockEvents) +// Store provides a mock function with given fields: lctx, blockID, blockEvents +func (_m *Events) Store(lctx lockctx.Proof, blockID flow.Identifier, blockEvents []flow.EventsList) error { + ret := _m.Called(lctx, blockID, blockEvents) if len(ret) == 0 { panic("no return value specified for Store") } var r0 error - if rf, ok := ret.Get(0).(func(flow.Identifier, []flow.EventsList) error); ok { - r0 = rf(blockID, blockEvents) + if rf, ok := ret.Get(0).(func(lockctx.Proof, flow.Identifier, []flow.EventsList) error); ok { + r0 = rf(lctx, blockID, blockEvents) } else { r0 = ret.Error(0) } diff --git a/storage/mock/execution_results.go b/storage/mock/execution_results.go index 07702c1db3c..88f3ef8a9ff 100644 --- a/storage/mock/execution_results.go +++ b/storage/mock/execution_results.go @@ -3,7 +3,9 @@ package mock import ( + lockctx "github.com/jordanschalm/lockctx" flow "github.com/onflow/flow-go/model/flow" + mock "github.com/stretchr/testify/mock" storage "github.com/onflow/flow-go/storage" @@ -14,17 +16,17 @@ type ExecutionResults struct { mock.Mock } -// BatchIndex provides a mock function with given fields: blockID, resultID, batch -func (_m *ExecutionResults) BatchIndex(blockID flow.Identifier, resultID flow.Identifier, batch storage.ReaderBatchWriter) error { - ret := _m.Called(blockID, resultID, batch) +// BatchIndex provides a mock function with given fields: lctx, blockID, resultID, batch +func (_m *ExecutionResults) BatchIndex(lctx lockctx.Proof, blockID flow.Identifier, resultID flow.Identifier, batch storage.ReaderBatchWriter) error { + ret := _m.Called(lctx, blockID, resultID, batch) if len(ret) == 0 { panic("no return value specified for BatchIndex") } var r0 error - if rf, ok := ret.Get(0).(func(flow.Identifier, flow.Identifier, storage.ReaderBatchWriter) error); ok { - r0 = rf(blockID, resultID, batch) + if rf, ok := ret.Get(0).(func(lockctx.Proof, flow.Identifier, flow.Identifier, storage.ReaderBatchWriter) error); ok { + r0 = rf(lctx, blockID, resultID, batch) } else { r0 = ret.Error(0) } diff --git a/storage/mock/service_events.go b/storage/mock/service_events.go index fc2fc46db5c..a7c47994f77 100644 --- a/storage/mock/service_events.go +++ b/storage/mock/service_events.go @@ -3,7 +3,9 @@ package mock import ( + lockctx "github.com/jordanschalm/lockctx" flow "github.com/onflow/flow-go/model/flow" + mock "github.com/stretchr/testify/mock" storage "github.com/onflow/flow-go/storage" @@ -32,17 +34,17 @@ func (_m *ServiceEvents) BatchRemoveByBlockID(blockID flow.Identifier, batch sto return r0 } -// BatchStore provides a mock function with given fields: blockID, events, batch -func (_m *ServiceEvents) BatchStore(blockID flow.Identifier, events []flow.Event, batch storage.ReaderBatchWriter) error { - ret := _m.Called(blockID, events, batch) +// BatchStore provides a mock function with given fields: lctx, blockID, events, batch +func (_m *ServiceEvents) BatchStore(lctx lockctx.Proof, blockID flow.Identifier, events []flow.Event, batch storage.ReaderBatchWriter) error { + ret := _m.Called(lctx, blockID, events, batch) if len(ret) == 0 { panic("no return value specified for BatchStore") } var r0 error - if rf, ok := ret.Get(0).(func(flow.Identifier, []flow.Event, storage.ReaderBatchWriter) error); ok { - r0 = rf(blockID, events, batch) + if rf, ok := ret.Get(0).(func(lockctx.Proof, flow.Identifier, []flow.Event, storage.ReaderBatchWriter) error); ok { + r0 = rf(lctx, blockID, events, batch) } else { r0 = ret.Error(0) } diff --git a/storage/mock/transaction_results.go b/storage/mock/transaction_results.go index d94b4881ce7..5ce338423fd 100644 --- a/storage/mock/transaction_results.go +++ b/storage/mock/transaction_results.go @@ -3,7 +3,9 @@ package mock import ( + lockctx "github.com/jordanschalm/lockctx" flow "github.com/onflow/flow-go/model/flow" + mock "github.com/stretchr/testify/mock" storage "github.com/onflow/flow-go/storage" @@ -32,17 +34,17 @@ func (_m *TransactionResults) BatchRemoveByBlockID(id flow.Identifier, batch sto return r0 } -// BatchStore provides a mock function with given fields: blockID, transactionResults, batch -func (_m *TransactionResults) BatchStore(blockID flow.Identifier, transactionResults []flow.TransactionResult, batch storage.ReaderBatchWriter) error { - ret := _m.Called(blockID, transactionResults, batch) +// BatchStore provides a mock function with given fields: lctx, blockID, transactionResults, batch +func (_m *TransactionResults) BatchStore(lctx lockctx.Proof, blockID flow.Identifier, transactionResults []flow.TransactionResult, batch storage.ReaderBatchWriter) error { + ret := _m.Called(lctx, blockID, transactionResults, batch) if len(ret) == 0 { panic("no return value specified for BatchStore") } var r0 error - if rf, ok := ret.Get(0).(func(flow.Identifier, []flow.TransactionResult, storage.ReaderBatchWriter) error); ok { - r0 = rf(blockID, transactionResults, batch) + if rf, ok := ret.Get(0).(func(lockctx.Proof, flow.Identifier, []flow.TransactionResult, storage.ReaderBatchWriter) error); ok { + r0 = rf(lctx, blockID, transactionResults, batch) } else { r0 = ret.Error(0) } From cf8fbc8479fcdae7b28bfde3d9667d49d67bb6e0 Mon Sep 17 00:00:00 2001 From: "Leo Zhang (zhangchiqing)" Date: Wed, 24 Sep 2025 16:08:03 -0700 Subject: [PATCH 05/87] refactor indexing results --- cmd/util/cmd/reindex/cmd/results.go | 56 ---------------- cmd/util/cmd/reindex/cmd/root.go | 40 ----------- cmd/util/cmd/reindex/main.go | 7 -- cmd/util/cmd/root.go | 2 - engine/execution/state/state.go | 2 +- .../indexer/indexer_core.go | 2 +- storage/locks.go | 6 +- storage/operation/events.go | 4 +- storage/operation/events_test.go | 13 ++-- storage/operation/stats_test.go | 67 +++++++++---------- storage/results.go | 6 -- storage/store/events_test.go | 18 +++-- storage/store/results.go | 30 +-------- storage/store/results_test.go | 62 ++++++----------- storage/store/transaction_results_test.go | 15 ++--- 15 files changed, 89 insertions(+), 241 deletions(-) delete mode 100644 cmd/util/cmd/reindex/cmd/results.go delete mode 100644 cmd/util/cmd/reindex/cmd/root.go delete mode 100644 cmd/util/cmd/reindex/main.go diff --git a/cmd/util/cmd/reindex/cmd/results.go b/cmd/util/cmd/reindex/cmd/results.go deleted file mode 100644 index 33fcd92b87c..00000000000 --- a/cmd/util/cmd/reindex/cmd/results.go +++ /dev/null @@ -1,56 +0,0 @@ -package cmd - -import ( - "fmt" - - "github.com/rs/zerolog/log" - "github.com/spf13/cobra" - - "github.com/onflow/flow-go/cmd/util/cmd/common" - "github.com/onflow/flow-go/storage" -) - -func init() { - rootCmd.AddCommand(resultsCmd) -} - -var resultsCmd = &cobra.Command{ - Use: "results", - Short: "reindex sealed result IDs by block ID", - RunE: func(cmd *cobra.Command, args []string) error { - lockManager := storage.MakeSingletonLockManager() - return common.WithStorage(flagDatadir, func(db storage.DB) error { - storages := common.InitStorages(db) - state, err := common.OpenProtocolState(lockManager, db, storages) - if err != nil { - return fmt.Errorf("could not open protocol state: %w", err) - } - - results := storages.Results - blocks := storages.Blocks - - root := state.Params().FinalizedRoot() - final, err := state.Final().Head() - if err != nil { - return fmt.Errorf("could not get final header from protocol state: %w", err) - } - - for h := root.Height + 1; h <= final.Height; h++ { - block, err := blocks.ByHeight(h) - if err != nil { - return fmt.Errorf("could not get block at height %d: %w", h, err) - } - - for _, seal := range block.Payload.Seals { - err := results.Index(seal.BlockID, seal.ResultID) - if err != nil { - return fmt.Errorf("could not index result ID at height %d: %w", h, err) - } - } - } - - log.Info().Uint64("start_height", root.Height).Uint64("end_height", final.Height).Msg("indexed execution results") - return nil - }) - }, -} diff --git a/cmd/util/cmd/reindex/cmd/root.go b/cmd/util/cmd/reindex/cmd/root.go deleted file mode 100644 index 7abc84f81f4..00000000000 --- a/cmd/util/cmd/reindex/cmd/root.go +++ /dev/null @@ -1,40 +0,0 @@ -package cmd - -import ( - "fmt" - "os" - - "github.com/spf13/cobra" - "github.com/spf13/viper" - - "github.com/onflow/flow-go/cmd/util/cmd/common" -) - -var ( - flagDatadir string -) - -var rootCmd = &cobra.Command{ - Use: "reindex", - Short: "reindex data", -} - -var RootCmd = rootCmd - -func Execute() { - if err := rootCmd.Execute(); err != nil { - fmt.Println(err) - os.Exit(1) - } -} - -func init() { - common.InitDataDirFlag(rootCmd, &flagDatadir) - _ = rootCmd.MarkPersistentFlagRequired("data-dir") - - cobra.OnInitialize(initConfig) -} - -func initConfig() { - viper.AutomaticEnv() -} diff --git a/cmd/util/cmd/reindex/main.go b/cmd/util/cmd/reindex/main.go deleted file mode 100644 index 5d082d3d072..00000000000 --- a/cmd/util/cmd/reindex/main.go +++ /dev/null @@ -1,7 +0,0 @@ -package main - -import "github.com/onflow/flow-go/cmd/util/cmd/reindex/cmd" - -func main() { - cmd.Execute() -} diff --git a/cmd/util/cmd/root.go b/cmd/util/cmd/root.go index 2fc197470c8..959985d1a56 100644 --- a/cmd/util/cmd/root.go +++ b/cmd/util/cmd/root.go @@ -39,7 +39,6 @@ import ( read_execution_state "github.com/onflow/flow-go/cmd/util/cmd/read-execution-state" read_hotstuff "github.com/onflow/flow-go/cmd/util/cmd/read-hotstuff/cmd" read_protocol_state "github.com/onflow/flow-go/cmd/util/cmd/read-protocol-state/cmd" - index_er "github.com/onflow/flow-go/cmd/util/cmd/reindex/cmd" rollback_executed_height "github.com/onflow/flow-go/cmd/util/cmd/rollback-executed-height/cmd" run_script "github.com/onflow/flow-go/cmd/util/cmd/run-script" "github.com/onflow/flow-go/cmd/util/cmd/snapshot" @@ -111,7 +110,6 @@ func addCommands() { rootCmd.AddCommand(leaders.Cmd) rootCmd.AddCommand(epochs.RootCmd) rootCmd.AddCommand(edbs.RootCmd) - rootCmd.AddCommand(index_er.RootCmd) rootCmd.AddCommand(rollback_executed_height.Cmd) rootCmd.AddCommand(read_execution_state.Cmd) rootCmd.AddCommand(snapshot.Cmd) diff --git a/engine/execution/state/state.go b/engine/execution/state/state.go index 5aa56499c12..aed836430d4 100644 --- a/engine/execution/state/state.go +++ b/engine/execution/state/state.go @@ -411,7 +411,7 @@ func (s *state) saveExecutionResults( } // Acquire both locks to ensure it's concurrent safe when inserting the execution results and chunk data packs. - return storage.WithLocks(s.lockManager, []string{storage.LockInsertOwnReceipt, storage.LockInsertChunkDataPack}, func(lctx lockctx.Context) error { + return storage.WithLocks(s.lockManager, []string{storage.LockInsertOwnReceipt, storage.LockInsertEvent}, func(lctx lockctx.Context) error { err := s.chunkDataPacks.StoreByChunkID(lctx, chunks) if err != nil { return fmt.Errorf("can not store multiple chunk data pack: %w", err) diff --git a/module/state_synchronization/indexer/indexer_core.go b/module/state_synchronization/indexer/indexer_core.go index ef768840a7a..1246982b8c9 100644 --- a/module/state_synchronization/indexer/indexer_core.go +++ b/module/state_synchronization/indexer/indexer_core.go @@ -149,7 +149,7 @@ func (c *IndexerCore) IndexBlockData(data *execution_data.BlockExecutionDataEnti } err := c.protocolDB.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { - err := c.events.BatchStore(data.BlockID, []flow.EventsList{events}, rw) + err := c.events.BatchStore(lctx, data.BlockID, []flow.EventsList{events}, rw) if err != nil { return fmt.Errorf("could not index events at height %d: %w", header.Height, err) } diff --git a/storage/locks.go b/storage/locks.go index 5f7f968acaf..d9f68fcc675 100644 --- a/storage/locks.go +++ b/storage/locks.go @@ -20,6 +20,9 @@ const ( // The reason they are combined is because insertion process reads some data updated by finalization process, // in order to prevent dirty reads, we need to acquire the lock for both operations. LockInsertOrFinalizeClusterBlock = "lock_insert_or_finalize_cluster_block" + // LockInsertEvent protects the insertion of events. + // This lock is reused by both EN storing its own receipt and AN indexing execution data + LockInsertEvent = "lock_insert_event" // LockInsertOwnReceipt is intended for Execution Nodes to ensure that they never publish different receipts for the same block. // Specifically, with this lock we prevent accidental overwrites of the index `executed block ID` ➜ `Receipt ID`. LockInsertOwnReceipt = "lock_insert_own_receipt" @@ -38,6 +41,7 @@ func Locks() []string { LockFinalizeBlock, LockIndexResultApproval, LockInsertOrFinalizeClusterBlock, + LockInsertEvent, LockInsertOwnReceipt, LockInsertCollection, LockBootstrapping, @@ -65,7 +69,7 @@ func makeLockPolicy() lockctx.Policy { return lockctx.NewDAGPolicyBuilder(). Add(LockInsertBlock, LockFinalizeBlock). Add(LockFinalizeBlock, LockBootstrapping). - Add(LockInsertOwnReceipt, LockInsertChunkDataPack). + Add(LockInsertOwnReceipt, LockInsertEvent). Build() } diff --git a/storage/operation/events.go b/storage/operation/events.go index 2d022be699a..1a4007827b7 100644 --- a/storage/operation/events.go +++ b/storage/operation/events.go @@ -14,8 +14,8 @@ func eventPrefix(prefix byte, blockID flow.Identifier, event flow.Event) []byte } func InsertEvent(lctx lockctx.Proof, w storage.Writer, blockID flow.Identifier, event flow.Event) error { - if !lctx.HoldsLock(storage.LockInsertOwnReceipt) { - return fmt.Errorf("InsertEvent requires LockInsertOwnReceipt to be held") + if !lctx.HoldsLock(storage.LockInsertEvent) { + return fmt.Errorf("InsertEvent requires LockInsertEvent to be held") } return UpsertByKey(w, eventPrefix(codeEvent, blockID, event), event) } diff --git a/storage/operation/events_test.go b/storage/operation/events_test.go index 04be3f8d855..ecdc49dd914 100644 --- a/storage/operation/events_test.go +++ b/storage/operation/events_test.go @@ -6,6 +6,7 @@ import ( "golang.org/x/exp/slices" + "github.com/jordanschalm/lockctx" "github.com/stretchr/testify/require" "github.com/onflow/flow-go/model/flow" @@ -18,8 +19,8 @@ import ( // TestRetrieveEventByBlockIDTxID tests event insertion, event retrieval by block id, block id and transaction id, // and block id and event type func TestRetrieveEventByBlockIDTxID(t *testing.T) { + lockManager := storage.NewTestingLockManager() dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { - lockManager := storage.NewTestingLockManager() // create block ids, transaction ids and event types slices blockIDs := []flow.Identifier{flow.HashToID([]byte{0x01}), flow.HashToID([]byte{0x02})} @@ -54,13 +55,11 @@ func TestRetrieveEventByBlockIDTxID(t *testing.T) { ) // insert event into the db - lctx := lockManager.NewContext() - require.NoError(t, lctx.AcquireLock(storage.LockInsertOwnReceipt)) - err := db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { - return operation.InsertEvent(lctx, rw.Writer(), b, event) + unittest.WithLock(t, lockManager, storage.LockInsertOwnReceipt, func(lctx lockctx.Context) error { + return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.InsertEvent(lctx, rw.Writer(), b, event) + }) }) - lctx.Release() - require.Nil(t, err) // update event arrays in the maps bEvents = append(bEvents, event) diff --git a/storage/operation/stats_test.go b/storage/operation/stats_test.go index 7c8b9c51725..0ee19a7c829 100644 --- a/storage/operation/stats_test.go +++ b/storage/operation/stats_test.go @@ -13,49 +13,48 @@ import ( ) func TestSummarizeKeysByFirstByteConcurrent(t *testing.T) { + lockManager := storage.NewTestingLockManager() dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { - lockManager := storage.NewTestingLockManager() - err := db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { - // insert random events - b := unittest.IdentifierFixture() - events := unittest.EventsFixture(30) - lctx := lockManager.NewContext() - require.NoError(t, lctx.AcquireLock(storage.LockInsertOwnReceipt)) - defer lctx.Release() - for _, evt := range events { - err := operation.InsertEvent(lctx, rw.Writer(), b, evt) - if err != nil { - return err + unittest.WithLock(t, lockManager, storage.LockInsertOwnReceipt, func(lctx lockctx.Context) error { + return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + // insert random events + b := unittest.IdentifierFixture() + events := unittest.EventsFixture(30) + for _, evt := range events { + err := operation.InsertEvent(lctx, rw.Writer(), b, evt) + if err != nil { + return err + } } - } - // insert 100 chunk data packs - for i := 0; i < 100; i++ { - collectionID := unittest.IdentifierFixture() - cdp := &storage.StoredChunkDataPack{ - ChunkID: unittest.IdentifierFixture(), - StartState: unittest.StateCommitmentFixture(), - Proof: []byte{'p'}, - CollectionID: collectionID, + // insert 100 chunk data packs + for i := 0; i < 100; i++ { + collectionID := unittest.IdentifierFixture() + cdp := &storage.StoredChunkDataPack{ + ChunkID: unittest.IdentifierFixture(), + StartState: unittest.StateCommitmentFixture(), + Proof: []byte{'p'}, + CollectionID: collectionID, + } + err := operation.InsertChunkDataPack(lctx, rw, cdp) + if err != nil { + return err + } } - require.NoError(t, unittest.WithLock(t, lockManager, storage.LockInsertChunkDataPack, func(lctx lockctx.Context) error { - return operation.InsertChunkDataPack(lctx, rw, cdp) - })) - } - // insert 20 results - for i := 0; i < 20; i++ { - result := unittest.ExecutionResultFixture() - err := operation.InsertExecutionResult(rw.Writer(), result) - if err != nil { - return err + // insert 20 results + for i := 0; i < 20; i++ { + result := unittest.ExecutionResultFixture() + err := operation.InsertExecutionResult(rw.Writer(), result) + if err != nil { + return err + } } - } - return nil + return nil + }) }) - require.NoError(t, err) // summarize keys by first byte stats, err := operation.SummarizeKeysByFirstByteConcurrent(unittest.Logger(), db.Reader(), 10) diff --git a/storage/results.go b/storage/results.go index 908b9211eed..90b09864b64 100644 --- a/storage/results.go +++ b/storage/results.go @@ -22,12 +22,6 @@ type ExecutionResults interface { // BatchStore stores an execution result in a given batch BatchStore(result *flow.ExecutionResult, batch ReaderBatchWriter) error - // Index indexes an execution result by block ID. - Index(blockID flow.Identifier, resultID flow.Identifier) error - - // ForceIndex indexes an execution result by block ID overwriting existing database entry - ForceIndex(blockID flow.Identifier, resultID flow.Identifier) error - // BatchIndex indexes an execution result by block ID in a given batch BatchIndex(lctx lockctx.Proof, blockID flow.Identifier, resultID flow.Identifier, batch ReaderBatchWriter) error diff --git a/storage/store/events_test.go b/storage/store/events_test.go index 3db86249e8d..5803d04e3af 100644 --- a/storage/store/events_test.go +++ b/storage/store/events_test.go @@ -4,6 +4,7 @@ import ( "math/rand" "testing" + "github.com/jordanschalm/lockctx" "github.com/stretchr/testify/require" "github.com/onflow/flow-go/fvm/systemcontracts" @@ -16,6 +17,7 @@ import ( ) func TestEventStoreRetrieve(t *testing.T) { + lockManager := storage.NewTestingLockManager() dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { metrics := metrics.NewNoopCollector() events := store.NewEvents(metrics, db) @@ -48,10 +50,12 @@ func TestEventStoreRetrieve(t *testing.T) { {evt2_1}, } - require.NoError(t, db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { - // store event - return events.BatchStore(blockID, expected, rw) - })) + unittest.WithLock(t, lockManager, storage.LockInsertOwnReceipt, func(lctx lockctx.Context) error { + return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + // store event + return events.BatchStore(lctx, blockID, expected, rw) + }) + }) // retrieve by blockID actual, err := events.ByBlockID(blockID) @@ -136,6 +140,7 @@ func TestEventRetrieveWithoutStore(t *testing.T) { } func TestEventStoreAndRemove(t *testing.T) { + lockManager := storage.NewTestingLockManager() dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { metrics := metrics.NewNoopCollector() store := store.NewEvents(metrics, db) @@ -169,8 +174,9 @@ func TestEventStoreAndRemove(t *testing.T) { {evt2_1}, } - err := store.Store(blockID, expected) - require.NoError(t, err) + unittest.WithLock(t, lockManager, storage.LockInsertOwnReceipt, func(lctx lockctx.Context) error { + return store.Store(lctx, blockID, expected) + }) // Ensure it exists event, err := store.ByBlockID(blockID) diff --git a/storage/store/results.go b/storage/store/results.go index 2e2c008f938..70bad1361df 100644 --- a/storage/store/results.go +++ b/storage/store/results.go @@ -63,7 +63,7 @@ func (r *ExecutionResults) byBlockID(blockID flow.Identifier) (*flow.ExecutionRe return r.byID(resultID) } -func (r *ExecutionResults) index(w storage.Writer, blockID, resultID flow.Identifier, force bool) error { +func (r *ExecutionResults) index(lctx lockctx.Proof, w storage.Writer, blockID, resultID flow.Identifier, force bool) error { if !force { // when not forcing the index, check if the result is already indexed exist, err := operation.ExistExecutionResult(r.db.Reader(), blockID) @@ -91,7 +91,7 @@ func (r *ExecutionResults) index(w storage.Writer, blockID, resultID flow.Identi // if the result is not indexed, we can index it } - err := operation.IndexExecutionResult(w, blockID, resultID) + err := operation.IndexExecutionResult(lctx, w, blockID, resultID) if err == nil { return nil } @@ -117,32 +117,6 @@ func (r *ExecutionResults) ByID(resultID flow.Identifier) (*flow.ExecutionResult return r.byID(resultID) } -// Index indexes an execution result by block ID. -// Note: this method call is not concurrent safe, because it checks if the different result is already indexed -// by the same blockID, and if it is, it returns an error. -// The caller needs to ensure that there is no concurrent call to this method with the same blockID. -func (r *ExecutionResults) Index(blockID flow.Identifier, resultID flow.Identifier) error { - err := r.db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { - return r.index(rw.Writer(), blockID, resultID, false) - }) - - if err != nil { - return fmt.Errorf("could not index execution result: %w", err) - } - return nil -} - -func (r *ExecutionResults) ForceIndex(blockID flow.Identifier, resultID flow.Identifier) error { - err := r.db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { - return r.index(rw.Writer(), blockID, resultID, true) - }) - - if err != nil { - return fmt.Errorf("could not index execution result: %w", err) - } - return nil -} - func (r *ExecutionResults) ByBlockID(blockID flow.Identifier) (*flow.ExecutionResult, error) { return r.byBlockID(blockID) } diff --git a/storage/store/results_test.go b/storage/store/results_test.go index 34f63e1f885..e41ad5a6683 100644 --- a/storage/store/results_test.go +++ b/storage/store/results_test.go @@ -4,6 +4,7 @@ import ( "errors" "testing" + "github.com/jordanschalm/lockctx" "github.com/stretchr/testify/require" "github.com/onflow/flow-go/module/metrics" @@ -55,6 +56,7 @@ func TestResultStoreTwice(t *testing.T) { } func TestResultBatchStoreTwice(t *testing.T) { + lockManager := storage.NewTestingLockManager() dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { metrics := metrics.NewNoopCollector() store1 := store.NewExecutionResults(metrics, db) @@ -62,24 +64,28 @@ func TestResultBatchStoreTwice(t *testing.T) { result := unittest.ExecutionResultFixture() blockID := unittest.IdentifierFixture() - require.NoError(t, db.WithReaderBatchWriter(func(batch storage.ReaderBatchWriter) error { - err := store1.BatchStore(result, batch) - require.NoError(t, err) + unittest.WithLock(t, lockManager, storage.LockInsertOwnReceipt, func(lctx lockctx.Context) error { + return db.WithReaderBatchWriter(func(batch storage.ReaderBatchWriter) error { + err := store1.BatchStore(result, batch) + require.NoError(t, err) - err = store1.BatchIndex(blockID, result.ID(), batch) - require.NoError(t, err) - return nil - })) + err = store1.BatchIndex(lctx, blockID, result.ID(), batch) + require.NoError(t, err) + return nil + }) + }) - require.NoError(t, db.WithReaderBatchWriter(func(batch storage.ReaderBatchWriter) error { - err := store1.BatchStore(result, batch) - require.NoError(t, err) + unittest.WithLock(t, lockManager, storage.LockInsertOwnReceipt, func(lctx lockctx.Context) error { + return db.WithReaderBatchWriter(func(batch storage.ReaderBatchWriter) error { + err := store1.BatchStore(result, batch) + require.NoError(t, err) - err = store1.BatchIndex(blockID, result.ID(), batch) - require.NoError(t, err) + err = store1.BatchIndex(lctx, blockID, result.ID(), batch) + require.NoError(t, err) - return nil - })) + return nil + }) + }) }) } @@ -108,31 +114,3 @@ func TestResultStoreTwoDifferentResultsShouldFail(t *testing.T) { require.True(t, errors.Is(err, storage.ErrDataMismatch)) }) } - -func TestResultStoreForceIndexOverridesMapping(t *testing.T) { - dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { - metrics := metrics.NewNoopCollector() - store1 := store.NewExecutionResults(metrics, db) - - result1 := unittest.ExecutionResultFixture() - result2 := unittest.ExecutionResultFixture() - blockID := unittest.IdentifierFixture() - err := store1.Store(result1) - require.NoError(t, err) - err = store1.Index(blockID, result1.ID()) - require.NoError(t, err) - - err = store1.Store(result2) - require.NoError(t, err) - - // force index - err = store1.ForceIndex(blockID, result2.ID()) - require.NoError(t, err) - - // retrieve index to make sure it points to second ER now - byBlockID, err := store1.ByBlockID(blockID) - - require.Equal(t, result2, byBlockID) - require.NoError(t, err) - }) -} diff --git a/storage/store/transaction_results_test.go b/storage/store/transaction_results_test.go index aa082c7b2b7..28860d71869 100644 --- a/storage/store/transaction_results_test.go +++ b/storage/store/transaction_results_test.go @@ -7,6 +7,7 @@ import ( "slices" "testing" + "github.com/jordanschalm/lockctx" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "golang.org/x/exp/rand" @@ -21,6 +22,7 @@ import ( ) func TestBatchStoringTransactionResults(t *testing.T) { + lockManager := storage.NewTestingLockManager() dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { metrics := metrics.NewNoopCollector() st, err := store.NewTransactionResults(metrics, db, 1000) @@ -36,14 +38,11 @@ func TestBatchStoringTransactionResults(t *testing.T) { } txResults = append(txResults, expected) } - writeBatch := db.NewBatch() - defer writeBatch.Close() - - err = st.BatchStore(blockID, txResults, writeBatch) - require.NoError(t, err) - - err = writeBatch.Commit() - require.NoError(t, err) + unittest.WithLock(t, lockManager, storage.LockInsertOwnReceipt, func(lctx lockctx.Context) error { + return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return st.BatchStore(lctx, blockID, txResults, rw) + }) + }) for _, txResult := range txResults { actual, err := st.ByBlockIDTransactionID(blockID, txResult.TransactionID) From e069fd99fce34193d898c2a23dac11fedbe49daf Mon Sep 17 00:00:00 2001 From: "Leo Zhang (zhangchiqing)" Date: Wed, 24 Sep 2025 16:16:15 -0700 Subject: [PATCH 06/87] adding lock to index events and light transaction results --- .../indexer/indexer_core.go | 16 ++++++++++++++-- storage/events.go | 1 + storage/light_transaction_results.go | 10 +++++----- storage/locks.go | 2 ++ storage/mock/light_transaction_results.go | 18 ------------------ storage/operation/events_test.go | 2 +- storage/operation/stats_test.go | 2 +- .../light_transaction_results.go | 7 ------- storage/store/light_transaction_results.go | 10 +++++----- 9 files changed, 29 insertions(+), 39 deletions(-) diff --git a/module/state_synchronization/indexer/indexer_core.go b/module/state_synchronization/indexer/indexer_core.go index 1246982b8c9..0e254e5753c 100644 --- a/module/state_synchronization/indexer/indexer_core.go +++ b/module/state_synchronization/indexer/indexer_core.go @@ -148,13 +148,25 @@ func (c *IndexerCore) IndexBlockData(data *execution_data.BlockExecutionDataEnti results = append(results, chunk.TransactionResults...) } - err := c.protocolDB.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + lctx := c.lockManager.NewContext() + defer lctx.Release() + err := lctx.AcquireLock(storage.LockInsertEvent) + if err != nil { + return fmt.Errorf("could not acquire LockInsertEvent for indexing block data: %w", err) + } + + err = lctx.AcquireLock(storage.LockInsertLightTransactionResult) + if err != nil { + return fmt.Errorf("could not acquire LockInsertLightTransactionResult for indexing block data: %w", err) + } + + err = c.protocolDB.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { err := c.events.BatchStore(lctx, data.BlockID, []flow.EventsList{events}, rw) if err != nil { return fmt.Errorf("could not index events at height %d: %w", header.Height, err) } - err = c.results.BatchStore(data.BlockID, results, rw) + err = c.results.BatchStore(lctx, data.BlockID, results, rw) if err != nil { return fmt.Errorf("could not index transaction results at height %d: %w", header.Height, err) } diff --git a/storage/events.go b/storage/events.go index e8c73b28c34..0a794e63c1c 100644 --- a/storage/events.go +++ b/storage/events.go @@ -28,6 +28,7 @@ type Events interface { Store(lctx lockctx.Proof, blockID flow.Identifier, blockEvents []flow.EventsList) error // BatchStore will store events for the given block ID in a given batch + // it requires the caller to hold [storage.LockInsertEvent] BatchStore(lctx lockctx.Proof, blockID flow.Identifier, events []flow.EventsList, batch ReaderBatchWriter) error // BatchRemoveByBlockID removes events keyed by a blockID in provided batch diff --git a/storage/light_transaction_results.go b/storage/light_transaction_results.go index e2109d8e450..f3647154a57 100644 --- a/storage/light_transaction_results.go +++ b/storage/light_transaction_results.go @@ -1,6 +1,9 @@ package storage -import "github.com/onflow/flow-go/model/flow" +import ( + "github.com/jordanschalm/lockctx" + "github.com/onflow/flow-go/model/flow" +) // LightTransactionResultsReader represents persistent storage read operations for light transaction result type LightTransactionResultsReader interface { @@ -28,8 +31,5 @@ type LightTransactionResults interface { LightTransactionResultsReader // BatchStore inserts a batch of transaction result into a batch - BatchStore(blockID flow.Identifier, transactionResults []flow.LightTransactionResult, rw ReaderBatchWriter) error - - // Deprecated: deprecated as a part of transition from Badger to Pebble. use BatchStore instead - BatchStoreBadger(blockID flow.Identifier, transactionResults []flow.LightTransactionResult, batch BatchStorage) error + BatchStore(lctx lockctx.Proof, blockID flow.Identifier, transactionResults []flow.LightTransactionResult, rw ReaderBatchWriter) error } diff --git a/storage/locks.go b/storage/locks.go index d9f68fcc675..4b44b91a40c 100644 --- a/storage/locks.go +++ b/storage/locks.go @@ -23,6 +23,8 @@ const ( // LockInsertEvent protects the insertion of events. // This lock is reused by both EN storing its own receipt and AN indexing execution data LockInsertEvent = "lock_insert_event" + // LockInsertLightTransactionResult protects the insertion of light transaction results. + LockInsertLightTransactionResult = "lock_insert_light_transaction_result" // LockInsertOwnReceipt is intended for Execution Nodes to ensure that they never publish different receipts for the same block. // Specifically, with this lock we prevent accidental overwrites of the index `executed block ID` ➜ `Receipt ID`. LockInsertOwnReceipt = "lock_insert_own_receipt" diff --git a/storage/mock/light_transaction_results.go b/storage/mock/light_transaction_results.go index 6e6b277acb8..779a3f89cd8 100644 --- a/storage/mock/light_transaction_results.go +++ b/storage/mock/light_transaction_results.go @@ -32,24 +32,6 @@ func (_m *LightTransactionResults) BatchStore(blockID flow.Identifier, transacti return r0 } -// BatchStoreBadger provides a mock function with given fields: blockID, transactionResults, batch -func (_m *LightTransactionResults) BatchStoreBadger(blockID flow.Identifier, transactionResults []flow.LightTransactionResult, batch storage.BatchStorage) error { - ret := _m.Called(blockID, transactionResults, batch) - - if len(ret) == 0 { - panic("no return value specified for BatchStoreBadger") - } - - var r0 error - if rf, ok := ret.Get(0).(func(flow.Identifier, []flow.LightTransactionResult, storage.BatchStorage) error); ok { - r0 = rf(blockID, transactionResults, batch) - } else { - r0 = ret.Error(0) - } - - return r0 -} - // ByBlockID provides a mock function with given fields: id func (_m *LightTransactionResults) ByBlockID(id flow.Identifier) ([]flow.LightTransactionResult, error) { ret := _m.Called(id) diff --git a/storage/operation/events_test.go b/storage/operation/events_test.go index ecdc49dd914..5348189dfb0 100644 --- a/storage/operation/events_test.go +++ b/storage/operation/events_test.go @@ -55,7 +55,7 @@ func TestRetrieveEventByBlockIDTxID(t *testing.T) { ) // insert event into the db - unittest.WithLock(t, lockManager, storage.LockInsertOwnReceipt, func(lctx lockctx.Context) error { + unittest.WithLock(t, lockManager, storage.LockInsertEvent, func(lctx lockctx.Context) error { return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { return operation.InsertEvent(lctx, rw.Writer(), b, event) }) diff --git a/storage/operation/stats_test.go b/storage/operation/stats_test.go index 0ee19a7c829..de4824591ad 100644 --- a/storage/operation/stats_test.go +++ b/storage/operation/stats_test.go @@ -16,7 +16,7 @@ func TestSummarizeKeysByFirstByteConcurrent(t *testing.T) { lockManager := storage.NewTestingLockManager() dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { - unittest.WithLock(t, lockManager, storage.LockInsertOwnReceipt, func(lctx lockctx.Context) error { + unittest.WithLock(t, lockManager, storage.LockInsertEvent, func(lctx lockctx.Context) error { return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { // insert random events b := unittest.IdentifierFixture() diff --git a/storage/store/inmemory/unsynchronized/light_transaction_results.go b/storage/store/inmemory/unsynchronized/light_transaction_results.go index 56b4901abcb..8ccf536be0f 100644 --- a/storage/store/inmemory/unsynchronized/light_transaction_results.go +++ b/storage/store/inmemory/unsynchronized/light_transaction_results.go @@ -113,10 +113,3 @@ func (l *LightTransactionResults) Data() []flow.LightTransactionResult { func (l *LightTransactionResults) BatchStore(flow.Identifier, []flow.LightTransactionResult, storage.ReaderBatchWriter) error { return fmt.Errorf("not implemented") } - -// BatchStoreBadger inserts a batch of transaction result into a storage. -// Deprecated: deprecated as a part of transition from Badger to Pebble. use BatchStore instead. -// This method is not implemented and will always return an error. -func (l *LightTransactionResults) BatchStoreBadger(flow.Identifier, []flow.LightTransactionResult, storage.BatchStorage) error { - return fmt.Errorf("not implemented") -} diff --git a/storage/store/light_transaction_results.go b/storage/store/light_transaction_results.go index dd8cab8e29a..769244735db 100644 --- a/storage/store/light_transaction_results.go +++ b/storage/store/light_transaction_results.go @@ -3,6 +3,7 @@ package store import ( "fmt" + "github.com/jordanschalm/lockctx" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module" "github.com/onflow/flow-go/module/metrics" @@ -71,7 +72,10 @@ func NewLightTransactionResults(collector module.CacheMetrics, db storage.DB, tr } } -func (tr *LightTransactionResults) BatchStore(blockID flow.Identifier, transactionResults []flow.LightTransactionResult, rw storage.ReaderBatchWriter) error { +func (tr *LightTransactionResults) BatchStore(lctx lockctx.Proof, blockID flow.Identifier, transactionResults []flow.LightTransactionResult, rw storage.ReaderBatchWriter) error { + if !lctx.HoldsLock(storage.LockInsertLightTransactionResult) { + return fmt.Errorf("BatchStore LightTransactionResults requires %v", storage.LockInsertLightTransactionResult) + } w := rw.Writer() for i, result := range transactionResults { @@ -103,10 +107,6 @@ func (tr *LightTransactionResults) BatchStore(blockID flow.Identifier, transacti return nil } -func (tr *LightTransactionResults) BatchStoreBadger(blockID flow.Identifier, transactionResults []flow.LightTransactionResult, batch storage.BatchStorage) error { - panic("LightTransactionResults BatchStoreBadger not implemented") -} - // ByBlockIDTransactionID returns the transaction result for the given block ID and transaction ID func (tr *LightTransactionResults) ByBlockIDTransactionID(blockID flow.Identifier, txID flow.Identifier) (*flow.LightTransactionResult, error) { key := KeyFromBlockIDTransactionID(blockID, txID) From 0c4785d53cf79548dac00c407301f19fab330092 Mon Sep 17 00:00:00 2001 From: "Leo Zhang (zhangchiqing)" Date: Wed, 24 Sep 2025 16:54:58 -0700 Subject: [PATCH 07/87] refactor tests --- storage/locks.go | 1 + storage/store/events_test.go | 4 +- .../store/light_transaction_results_test.go | 18 +++++--- storage/store/results.go | 2 +- storage/store/results_test.go | 43 ++++++++++++++----- 5 files changed, 48 insertions(+), 20 deletions(-) diff --git a/storage/locks.go b/storage/locks.go index 4b44b91a40c..0b699443296 100644 --- a/storage/locks.go +++ b/storage/locks.go @@ -46,6 +46,7 @@ func Locks() []string { LockInsertEvent, LockInsertOwnReceipt, LockInsertCollection, + LockInsertLightTransactionResult, LockBootstrapping, LockInsertChunkDataPack, } diff --git a/storage/store/events_test.go b/storage/store/events_test.go index 5803d04e3af..a6b98609a9e 100644 --- a/storage/store/events_test.go +++ b/storage/store/events_test.go @@ -50,7 +50,7 @@ func TestEventStoreRetrieve(t *testing.T) { {evt2_1}, } - unittest.WithLock(t, lockManager, storage.LockInsertOwnReceipt, func(lctx lockctx.Context) error { + unittest.WithLock(t, lockManager, storage.LockInsertEvent, func(lctx lockctx.Context) error { return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { // store event return events.BatchStore(lctx, blockID, expected, rw) @@ -174,7 +174,7 @@ func TestEventStoreAndRemove(t *testing.T) { {evt2_1}, } - unittest.WithLock(t, lockManager, storage.LockInsertOwnReceipt, func(lctx lockctx.Context) error { + unittest.WithLock(t, lockManager, storage.LockInsertEvent, func(lctx lockctx.Context) error { return store.Store(lctx, blockID, expected) }) diff --git a/storage/store/light_transaction_results_test.go b/storage/store/light_transaction_results_test.go index c3ea965ab72..d311fe3c4bb 100644 --- a/storage/store/light_transaction_results_test.go +++ b/storage/store/light_transaction_results_test.go @@ -3,6 +3,7 @@ package store_test import ( "testing" + "github.com/jordanschalm/lockctx" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "golang.org/x/exp/rand" @@ -16,6 +17,7 @@ import ( ) func TestBatchStoringLightTransactionResults(t *testing.T) { + lockManager := storage.NewTestingLockManager() dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { metrics := metrics.NewNoopCollector() store1 := store.NewLightTransactionResults(metrics, db, 1000) @@ -24,14 +26,18 @@ func TestBatchStoringLightTransactionResults(t *testing.T) { txResults := getLightTransactionResultsFixture(10) t.Run("batch store1 results", func(t *testing.T) { - require.NoError(t, db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { - return store1.BatchStore(blockID, txResults, rw) - })) + unittest.WithLock(t, lockManager, storage.LockInsertLightTransactionResult, func(lctx lockctx.Context) error { + return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return store1.BatchStore(lctx, blockID, txResults, rw) + }) + }) // add a results to a new block to validate they are not included in lookups - require.NoError(t, db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { - return store1.BatchStore(unittest.IdentifierFixture(), getLightTransactionResultsFixture(2), rw) - })) + unittest.WithLock(t, lockManager, storage.LockInsertLightTransactionResult, func(lctx lockctx.Context) error { + return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return store1.BatchStore(lctx, unittest.IdentifierFixture(), getLightTransactionResultsFixture(2), rw) + }) + }) }) diff --git a/storage/store/results.go b/storage/store/results.go index 70bad1361df..b2bf62e4cb1 100644 --- a/storage/store/results.go +++ b/storage/store/results.go @@ -110,7 +110,7 @@ func (r *ExecutionResults) BatchStore(result *flow.ExecutionResult, batch storag } func (r *ExecutionResults) BatchIndex(lctx lockctx.Proof, blockID flow.Identifier, resultID flow.Identifier, batch storage.ReaderBatchWriter) error { - return operation.IndexExecutionResult(lctx, batch.Writer(), blockID, resultID) + return r.index(lctx, batch.Writer(), blockID, resultID, false) } func (r *ExecutionResults) ByID(resultID flow.Identifier) (*flow.ExecutionResult, error) { diff --git a/storage/store/results_test.go b/storage/store/results_test.go index e41ad5a6683..76c9b9d0a0b 100644 --- a/storage/store/results_test.go +++ b/storage/store/results_test.go @@ -15,6 +15,7 @@ import ( ) func TestResultStoreAndRetrieve(t *testing.T) { + lockManager := storage.NewTestingLockManager() dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { metrics := metrics.NewNoopCollector() store1 := store.NewExecutionResults(metrics, db) @@ -24,8 +25,11 @@ func TestResultStoreAndRetrieve(t *testing.T) { err := store1.Store(result) require.NoError(t, err) - err = store1.Index(blockID, result.ID()) - require.NoError(t, err) + unittest.WithLock(t, lockManager, storage.LockInsertOwnReceipt, func(lctx lockctx.Context) error { + return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return store1.BatchIndex(lctx, blockID, result.ID(), rw) + }) + }) actual, err := store1.ByBlockID(blockID) require.NoError(t, err) @@ -35,6 +39,7 @@ func TestResultStoreAndRetrieve(t *testing.T) { } func TestResultStoreTwice(t *testing.T) { + lockManager := storage.NewTestingLockManager() dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { metrics := metrics.NewNoopCollector() store1 := store.NewExecutionResults(metrics, db) @@ -44,14 +49,20 @@ func TestResultStoreTwice(t *testing.T) { err := store1.Store(result) require.NoError(t, err) - err = store1.Index(blockID, result.ID()) - require.NoError(t, err) + unittest.WithLock(t, lockManager, storage.LockInsertOwnReceipt, func(lctx lockctx.Context) error { + return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return store1.BatchIndex(lctx, blockID, result.ID(), rw) + }) + }) err = store1.Store(result) require.NoError(t, err) - err = store1.Index(blockID, result.ID()) - require.NoError(t, err) + unittest.WithLock(t, lockManager, storage.LockInsertOwnReceipt, func(lctx lockctx.Context) error { + return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return store1.BatchIndex(lctx, blockID, result.ID(), rw) + }) + }) }) } @@ -90,6 +101,7 @@ func TestResultBatchStoreTwice(t *testing.T) { } func TestResultStoreTwoDifferentResultsShouldFail(t *testing.T) { + lockManager := storage.NewTestingLockManager() dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { metrics := metrics.NewNoopCollector() store1 := store.NewExecutionResults(metrics, db) @@ -100,8 +112,11 @@ func TestResultStoreTwoDifferentResultsShouldFail(t *testing.T) { err := store1.Store(result1) require.NoError(t, err) - err = store1.Index(blockID, result1.ID()) - require.NoError(t, err) + unittest.WithLock(t, lockManager, storage.LockInsertOwnReceipt, func(lctx lockctx.Context) error { + return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return store1.BatchIndex(lctx, blockID, result1.ID(), rw) + }) + }) // we can store1 a different result, but we can't index // a different result for that block, because it will mean @@ -109,8 +124,14 @@ func TestResultStoreTwoDifferentResultsShouldFail(t *testing.T) { err = store1.Store(result2) require.NoError(t, err) - err = store1.Index(blockID, result2.ID()) - require.Error(t, err) - require.True(t, errors.Is(err, storage.ErrDataMismatch)) + var indexErr error + unittest.WithLock(t, lockManager, storage.LockInsertOwnReceipt, func(lctx lockctx.Context) error { + return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + indexErr = store1.BatchIndex(lctx, blockID, result2.ID(), rw) + return nil + }) + }) + require.Error(t, indexErr) + require.True(t, errors.Is(indexErr, storage.ErrDataMismatch)) }) } From 4c352026800fc5d14826ab7229701395865e9d34 Mon Sep 17 00:00:00 2001 From: "Leo Zhang (zhangchiqing)" Date: Thu, 25 Sep 2025 09:37:19 -0700 Subject: [PATCH 08/87] fix optimistic_sync persisters --- .../optimistic_sync/persisters/stores/results.go | 2 +- module/state_synchronization/indexer/indexer_core.go | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/module/executiondatasync/optimistic_sync/persisters/stores/results.go b/module/executiondatasync/optimistic_sync/persisters/stores/results.go index bcc1116462f..1ccc9ca9ae9 100644 --- a/module/executiondatasync/optimistic_sync/persisters/stores/results.go +++ b/module/executiondatasync/optimistic_sync/persisters/stores/results.go @@ -40,7 +40,7 @@ func (r *ResultsStore) Persist(lctx lockctx.Proof, batch storage.ReaderBatchWrit } if len(results) > 0 { - if err := r.persistedResults.BatchStore(r.blockID, results, batch); err != nil { + if err := r.persistedResults.BatchStore(lctx, r.blockID, results, batch); err != nil { return fmt.Errorf("could not add transaction results to batch: %w", err) } } diff --git a/module/state_synchronization/indexer/indexer_core.go b/module/state_synchronization/indexer/indexer_core.go index 0e254e5753c..e642da5fdb9 100644 --- a/module/state_synchronization/indexer/indexer_core.go +++ b/module/state_synchronization/indexer/indexer_core.go @@ -161,11 +161,13 @@ func (c *IndexerCore) IndexBlockData(data *execution_data.BlockExecutionDataEnti } err = c.protocolDB.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + // Needs storage.LockInsertEvent err := c.events.BatchStore(lctx, data.BlockID, []flow.EventsList{events}, rw) if err != nil { return fmt.Errorf("could not index events at height %d: %w", header.Height, err) } + // Needs storage.LockInsertLightTransactionResult err = c.results.BatchStore(lctx, data.BlockID, results, rw) if err != nil { return fmt.Errorf("could not index transaction results at height %d: %w", header.Height, err) From 1c7271fa1d99c4d022991dbd4b3872e9e9865124 Mon Sep 17 00:00:00 2001 From: "Leo Zhang (zhangchiqing)" Date: Thu, 25 Sep 2025 16:13:28 -0700 Subject: [PATCH 09/87] refactor IndexOwnExecutionResult --- engine/execution/state/bootstrap/bootstrap.go | 2 +- engine/execution/state/state.go | 6 +- state/protocol/badger/state.go | 6 +- storage/operation/results.go | 46 +++++++----- storage/results.go | 5 +- storage/store/results.go | 73 ++++++++----------- 6 files changed, 71 insertions(+), 67 deletions(-) diff --git a/engine/execution/state/bootstrap/bootstrap.go b/engine/execution/state/bootstrap/bootstrap.go index 19305dc257d..370429ac86a 100644 --- a/engine/execution/state/bootstrap/bootstrap.go +++ b/engine/execution/state/bootstrap/bootstrap.go @@ -113,7 +113,7 @@ func (b *Bootstrapper) BootstrapExecutionDatabase( return fmt.Errorf("could not index initial genesis execution block: %w", err) } - err = operation.IndexExecutionResult(w, rootSeal.BlockID, rootSeal.ResultID) + err = operation.IndexOwnExecutionResult(lctx, rw, rootSeal.BlockID, rootSeal.ResultID) if err != nil { return fmt.Errorf("could not index result for root result: %w", err) } diff --git a/engine/execution/state/state.go b/engine/execution/state/state.go index aed836430d4..b5363fa569a 100644 --- a/engine/execution/state/state.go +++ b/engine/execution/state/state.go @@ -356,11 +356,11 @@ func (s *state) GetExecutionResultID(ctx context.Context, blockID flow.Identifie span, _ := s.tracer.StartSpanFromContext(ctx, trace.EXEGetExecutionResultID) defer span.End() - result, err := s.results.ByBlockID(blockID) + resultID, err := s.results.IDByBlockID(blockID) if err != nil { return flow.ZeroID, err } - return result.ID(), nil + return resultID, nil } func (s *state) SaveExecutionResults( @@ -462,7 +462,7 @@ func (s *state) saveExecutionResults( return fmt.Errorf("could not persist execution result: %w", err) } - err = s.results.BatchIndex(lctx, blockID, executionResult.ID(), batch) + err = s.results.BatchIndex(lctx, batch, blockID, executionResult.ID()) if err != nil { return fmt.Errorf("cannot index execution result: %w", err) } diff --git a/state/protocol/badger/state.go b/state/protocol/badger/state.go index 5f8ac542ee5..55fa4ca81da 100644 --- a/state/protocol/badger/state.go +++ b/state/protocol/badger/state.go @@ -285,6 +285,8 @@ func bootstrapProtocolState( // segment, as it may or may not be included in SealingSegment.Blocks depending on how much // history is covered. The spork root block is persisted as a root proposal without proposer // signature (by convention). +// +// It requires [storage.LockInsertOwnReceipt] lock func bootstrapSealingSegment( lctx lockctx.Proof, db storage.DB, @@ -302,7 +304,7 @@ func bootstrapSealingSegment( if err != nil { return fmt.Errorf("could not insert execution result: %w", err) } - err = operation.IndexExecutionResult(w, result.BlockID, result.ID()) + err = operation.IndexOwnExecutionResult(lctx, rw, result.BlockID, result.ID()) if err != nil { return fmt.Errorf("could not index execution result: %w", err) } @@ -461,7 +463,7 @@ func bootstrapSealingSegment( // If the sealed root block is different from the finalized root block, then it means the node dynamically // bootstrapped. In that case, we index the result of the latest sealed result, so that the EN is able // to confirm that it is loading the correct state to execute the next block. - err = operation.IndexExecutionResult(rw.Writer(), rootSeal.BlockID, rootSeal.ResultID) + err = operation.IndexOwnExecutionResult(lctx, rw, rootSeal.BlockID, rootSeal.ResultID) if err != nil { return fmt.Errorf("could not index root result: %w", err) } diff --git a/storage/operation/results.go b/storage/operation/results.go index 16943ae94d2..8275c102ca9 100644 --- a/storage/operation/results.go +++ b/storage/operation/results.go @@ -27,21 +27,37 @@ func RetrieveExecutionResult(r storage.Reader, resultID flow.Identifier, result return RetrieveByKey(r, MakePrefix(codeExecutionResult, resultID), result) } -// IndexExecutionResult indexes the Execution Node's OWN Execution Result by the executed block's ID. -// -// CAUTION: -// - OVERWRITES existing data (potential for data corruption): -// This method silently overrides existing data without any sanity checks whether data for the same key already exits. -// Note that the Flow protocol mandates that for a previously persisted key, the data is never changed to a different -// value. Changing data could cause the node to publish inconsistent data and to be slashed, or the protocol to be -// compromised as a whole. This method does not contain any safeguards to prevent such data corruption. +// IndexOwnExecutionResult indexes the result of the given block. +// It is used by EN to index the result of a block to continue executing subsequent blocks. +// The caller must acquire either [storage.LockInsertOwnReceipt] or [storage.LockBootstrapping] // // No errors are expected during normal operation. -func IndexExecutionResult(lctx lockctx.Proof, w storage.Writer, blockID flow.Identifier, resultID flow.Identifier) error { - if !lctx.HoldsLock(storage.LockInsertOwnReceipt) { - return fmt.Errorf("IndexExecutionResult requires LockInsertOwnReceipt to be held") +func IndexOwnExecutionResult(lctx lockctx.Proof, rw storage.ReaderBatchWriter, blockID flow.Identifier, resultID flow.Identifier) error { + held := lctx.HoldsLock(storage.LockInsertOwnReceipt) || + // during bootstrapping, we index the sealed root block or the spork root block, which is not + // produced by the node itself, but we still need to index its execution result to be able to + // execute next block + lctx.HoldsLock(storage.LockBootstrapping) + if !held { + return fmt.Errorf("missing require locks: %s or %s", storage.LockInsertOwnReceipt, storage.LockBootstrapping) + } + + key := MakePrefix(codeIndexExecutionResultByBlock, blockID) + var existing flow.Identifier + err := RetrieveByKey(rw.GlobalReader(), key, &existing) + if err == nil { + if existing != resultID { + return fmt.Errorf("storing result that is different from the already stored one for block: %v, storing result: %v, stored result: %v. %w", + blockID, resultID, existing, storage.ErrDataMismatch) + } + // if the result is the same, we don't need to index it again + return nil + } else if err != storage.ErrNotFound { + return fmt.Errorf("could not check if execution result exists: %w", err) } - return UpsertByKey(w, MakePrefix(codeIndexExecutionResultByBlock, blockID), resultID) + + // if the result is not indexed, we can index it + return UpsertByKey(rw.Writer(), key, resultID) } // LookupExecutionResult retrieves the Execution Node's OWN Execution Result ID for the specified block. @@ -53,12 +69,6 @@ func LookupExecutionResult(r storage.Reader, blockID flow.Identifier, resultID * return RetrieveByKey(r, MakePrefix(codeIndexExecutionResultByBlock, blockID), resultID) } -// ExistExecutionResult checks if the execution node has its OWN Execution Result for the specified block. -// No errors are expected during normal operation. -func ExistExecutionResult(r storage.Reader, blockID flow.Identifier) (bool, error) { - return KeyExists(r, MakePrefix(codeIndexExecutionResultByBlock, blockID)) -} - // RemoveExecutionResultIndex removes Execution Node's OWN Execution Result for the given blockID. // CAUTION: this is for recovery purposes only, and should not be used during normal operations // It returns nil if the collection does not exist. diff --git a/storage/results.go b/storage/results.go index 90b09864b64..b052083c952 100644 --- a/storage/results.go +++ b/storage/results.go @@ -11,6 +11,9 @@ type ExecutionResultsReader interface { // ByBlockID retrieves an execution result by block ID. ByBlockID(blockID flow.Identifier) (*flow.ExecutionResult, error) + + // IDByBlockID retrieves an execution result ID by block ID. + IDByBlockID(blockID flow.Identifier) (flow.Identifier, error) } type ExecutionResults interface { @@ -23,7 +26,7 @@ type ExecutionResults interface { BatchStore(result *flow.ExecutionResult, batch ReaderBatchWriter) error // BatchIndex indexes an execution result by block ID in a given batch - BatchIndex(lctx lockctx.Proof, blockID flow.Identifier, resultID flow.Identifier, batch ReaderBatchWriter) error + BatchIndex(lctx lockctx.Proof, rw ReaderBatchWriter, blockID flow.Identifier, resultID flow.Identifier) error // BatchRemoveIndexByBlockID removes blockID-to-executionResultID index entries keyed by blockID in a provided batch. // No errors are expected during normal operation, even if no entries are matched. diff --git a/storage/store/results.go b/storage/store/results.go index b2bf62e4cb1..3fe1dadbd57 100644 --- a/storage/store/results.go +++ b/storage/store/results.go @@ -13,8 +13,9 @@ import ( // ExecutionResults implements persistent storage for execution results. type ExecutionResults struct { - db storage.DB - cache *Cache[flow.Identifier, *flow.ExecutionResult] + db storage.DB + cache *Cache[flow.Identifier, *flow.ExecutionResult] + indexCache *Cache[flow.Identifier, flow.Identifier] } var _ storage.ExecutionResults = (*ExecutionResults)(nil) @@ -31,12 +32,30 @@ func NewExecutionResults(collector module.CacheMetrics, db storage.DB) *Executio return &result, err } + indexByBlockID := func(lctx lockctx.Proof, rw storage.ReaderBatchWriter, blockID flow.Identifier, resultID flow.Identifier) error { + return operation.IndexOwnExecutionResult(lctx, rw, blockID, resultID) + } + + retrieveByBlockID := func(r storage.Reader, blockID flow.Identifier) (flow.Identifier, error) { + var resultID flow.Identifier + err := operation.LookupExecutionResult(r, blockID, &resultID) + return resultID, err + } + res := &ExecutionResults{ db: db, cache: newCache(collector, metrics.ResourceResult, withLimit[flow.Identifier, *flow.ExecutionResult](flow.DefaultTransactionExpiry+100), withStore(store), withRetrieve(retrieve)), + + indexCache: newCache(collector, metrics.ResourceResult, + // this API is only used to fetch result for last executed block, so in happy case, it only need to be 1, + // we use 100 here to be more resilient to forks + withLimit[flow.Identifier, flow.Identifier](100), + withStoreWithLock(indexByBlockID), + withRetrieve(retrieveByBlockID), + ), } return res @@ -55,48 +74,15 @@ func (r *ExecutionResults) byID(resultID flow.Identifier) (*flow.ExecutionResult } func (r *ExecutionResults) byBlockID(blockID flow.Identifier) (*flow.ExecutionResult, error) { - var resultID flow.Identifier - err := operation.LookupExecutionResult(r.db.Reader(), blockID, &resultID) + resultID, err := r.IDByBlockID(blockID) if err != nil { return nil, fmt.Errorf("could not lookup execution result ID: %w", err) } return r.byID(resultID) } -func (r *ExecutionResults) index(lctx lockctx.Proof, w storage.Writer, blockID, resultID flow.Identifier, force bool) error { - if !force { - // when not forcing the index, check if the result is already indexed - exist, err := operation.ExistExecutionResult(r.db.Reader(), blockID) - if err != nil { - return fmt.Errorf("could not check if execution result exists: %w", err) - } - - // if the result is already indexed, check if the stored result is the same - if exist { - var storedResultID flow.Identifier - err = operation.LookupExecutionResult(r.db.Reader(), blockID, &storedResultID) - if err != nil { - return fmt.Errorf("could not lookup execution result ID: %w", err) - } - - if storedResultID != resultID { - return fmt.Errorf("storing result that is different from the already stored one for block: %v, storing result: %v, stored result: %v. %w", - blockID, resultID, storedResultID, storage.ErrDataMismatch) - } - - // if the result is the same, we don't need to index it again - return nil - } - - // if the result is not indexed, we can index it - } - - err := operation.IndexExecutionResult(lctx, w, blockID, resultID) - if err == nil { - return nil - } - - return nil +func (r *ExecutionResults) BatchIndex(lctx lockctx.Proof, rw storage.ReaderBatchWriter, blockID flow.Identifier, resultID flow.Identifier) error { + return r.indexCache.PutWithLockTx(lctx, rw, blockID, resultID) } func (r *ExecutionResults) Store(result *flow.ExecutionResult) error { @@ -109,10 +95,6 @@ func (r *ExecutionResults) BatchStore(result *flow.ExecutionResult, batch storag return r.store(batch, result) } -func (r *ExecutionResults) BatchIndex(lctx lockctx.Proof, blockID flow.Identifier, resultID flow.Identifier, batch storage.ReaderBatchWriter) error { - return r.index(lctx, batch.Writer(), blockID, resultID, false) -} - func (r *ExecutionResults) ByID(resultID flow.Identifier) (*flow.ExecutionResult, error) { return r.byID(resultID) } @@ -121,8 +103,15 @@ func (r *ExecutionResults) ByBlockID(blockID flow.Identifier) (*flow.ExecutionRe return r.byBlockID(blockID) } +func (r *ExecutionResults) IDByBlockID(blockID flow.Identifier) (flow.Identifier, error) { + return r.indexCache.Get(r.db.Reader(), blockID) +} + func (r *ExecutionResults) RemoveIndexByBlockID(blockID flow.Identifier) error { return r.db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + storage.OnCommitSucceed(rw, func() { + r.indexCache.Remove(blockID) + }) return operation.RemoveExecutionResultIndex(rw.Writer(), blockID) }) } From 89688a214669b9c2239df767261502a01bc9e868 Mon Sep 17 00:00:00 2001 From: "Leo Zhang (zhangchiqing)" Date: Thu, 25 Sep 2025 16:53:37 -0700 Subject: [PATCH 10/87] update mocks --- storage/mock/execution_results.go | 52 ++++++++++------------- storage/mock/execution_results_reader.go | 30 +++++++++++++ storage/mock/light_transaction_results.go | 12 +++--- storage/store/results_test.go | 14 +++--- 4 files changed, 67 insertions(+), 41 deletions(-) diff --git a/storage/mock/execution_results.go b/storage/mock/execution_results.go index 88f3ef8a9ff..78af20d99f5 100644 --- a/storage/mock/execution_results.go +++ b/storage/mock/execution_results.go @@ -16,17 +16,17 @@ type ExecutionResults struct { mock.Mock } -// BatchIndex provides a mock function with given fields: lctx, blockID, resultID, batch -func (_m *ExecutionResults) BatchIndex(lctx lockctx.Proof, blockID flow.Identifier, resultID flow.Identifier, batch storage.ReaderBatchWriter) error { - ret := _m.Called(lctx, blockID, resultID, batch) +// BatchIndex provides a mock function with given fields: lctx, rw, blockID, resultID +func (_m *ExecutionResults) BatchIndex(lctx lockctx.Proof, rw storage.ReaderBatchWriter, blockID flow.Identifier, resultID flow.Identifier) error { + ret := _m.Called(lctx, rw, blockID, resultID) if len(ret) == 0 { panic("no return value specified for BatchIndex") } var r0 error - if rf, ok := ret.Get(0).(func(lockctx.Proof, flow.Identifier, flow.Identifier, storage.ReaderBatchWriter) error); ok { - r0 = rf(lctx, blockID, resultID, batch) + if rf, ok := ret.Get(0).(func(lockctx.Proof, storage.ReaderBatchWriter, flow.Identifier, flow.Identifier) error); ok { + r0 = rf(lctx, rw, blockID, resultID) } else { r0 = ret.Error(0) } @@ -130,40 +130,34 @@ func (_m *ExecutionResults) ByID(resultID flow.Identifier) (*flow.ExecutionResul return r0, r1 } -// ForceIndex provides a mock function with given fields: blockID, resultID -func (_m *ExecutionResults) ForceIndex(blockID flow.Identifier, resultID flow.Identifier) error { - ret := _m.Called(blockID, resultID) +// IDByBlockID provides a mock function with given fields: blockID +func (_m *ExecutionResults) IDByBlockID(blockID flow.Identifier) (flow.Identifier, error) { + ret := _m.Called(blockID) if len(ret) == 0 { - panic("no return value specified for ForceIndex") + panic("no return value specified for IDByBlockID") } - var r0 error - if rf, ok := ret.Get(0).(func(flow.Identifier, flow.Identifier) error); ok { - r0 = rf(blockID, resultID) - } else { - r0 = ret.Error(0) + var r0 flow.Identifier + var r1 error + if rf, ok := ret.Get(0).(func(flow.Identifier) (flow.Identifier, error)); ok { + return rf(blockID) } - - return r0 -} - -// Index provides a mock function with given fields: blockID, resultID -func (_m *ExecutionResults) Index(blockID flow.Identifier, resultID flow.Identifier) error { - ret := _m.Called(blockID, resultID) - - if len(ret) == 0 { - panic("no return value specified for Index") + if rf, ok := ret.Get(0).(func(flow.Identifier) flow.Identifier); ok { + r0 = rf(blockID) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(flow.Identifier) + } } - var r0 error - if rf, ok := ret.Get(0).(func(flow.Identifier, flow.Identifier) error); ok { - r0 = rf(blockID, resultID) + if rf, ok := ret.Get(1).(func(flow.Identifier) error); ok { + r1 = rf(blockID) } else { - r0 = ret.Error(0) + r1 = ret.Error(1) } - return r0 + return r0, r1 } // Store provides a mock function with given fields: result diff --git a/storage/mock/execution_results_reader.go b/storage/mock/execution_results_reader.go index b812a794d1d..244624758e9 100644 --- a/storage/mock/execution_results_reader.go +++ b/storage/mock/execution_results_reader.go @@ -72,6 +72,36 @@ func (_m *ExecutionResultsReader) ByID(resultID flow.Identifier) (*flow.Executio return r0, r1 } +// IDByBlockID provides a mock function with given fields: blockID +func (_m *ExecutionResultsReader) IDByBlockID(blockID flow.Identifier) (flow.Identifier, error) { + ret := _m.Called(blockID) + + if len(ret) == 0 { + panic("no return value specified for IDByBlockID") + } + + var r0 flow.Identifier + var r1 error + if rf, ok := ret.Get(0).(func(flow.Identifier) (flow.Identifier, error)); ok { + return rf(blockID) + } + if rf, ok := ret.Get(0).(func(flow.Identifier) flow.Identifier); ok { + r0 = rf(blockID) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(flow.Identifier) + } + } + + if rf, ok := ret.Get(1).(func(flow.Identifier) error); ok { + r1 = rf(blockID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + // NewExecutionResultsReader creates a new instance of ExecutionResultsReader. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. // The first argument is typically a *testing.T value. func NewExecutionResultsReader(t interface { diff --git a/storage/mock/light_transaction_results.go b/storage/mock/light_transaction_results.go index 779a3f89cd8..6f82df4ffe2 100644 --- a/storage/mock/light_transaction_results.go +++ b/storage/mock/light_transaction_results.go @@ -3,7 +3,9 @@ package mock import ( + lockctx "github.com/jordanschalm/lockctx" flow "github.com/onflow/flow-go/model/flow" + mock "github.com/stretchr/testify/mock" storage "github.com/onflow/flow-go/storage" @@ -14,17 +16,17 @@ type LightTransactionResults struct { mock.Mock } -// BatchStore provides a mock function with given fields: blockID, transactionResults, rw -func (_m *LightTransactionResults) BatchStore(blockID flow.Identifier, transactionResults []flow.LightTransactionResult, rw storage.ReaderBatchWriter) error { - ret := _m.Called(blockID, transactionResults, rw) +// BatchStore provides a mock function with given fields: lctx, blockID, transactionResults, rw +func (_m *LightTransactionResults) BatchStore(lctx lockctx.Proof, blockID flow.Identifier, transactionResults []flow.LightTransactionResult, rw storage.ReaderBatchWriter) error { + ret := _m.Called(lctx, blockID, transactionResults, rw) if len(ret) == 0 { panic("no return value specified for BatchStore") } var r0 error - if rf, ok := ret.Get(0).(func(flow.Identifier, []flow.LightTransactionResult, storage.ReaderBatchWriter) error); ok { - r0 = rf(blockID, transactionResults, rw) + if rf, ok := ret.Get(0).(func(lockctx.Proof, flow.Identifier, []flow.LightTransactionResult, storage.ReaderBatchWriter) error); ok { + r0 = rf(lctx, blockID, transactionResults, rw) } else { r0 = ret.Error(0) } diff --git a/storage/store/results_test.go b/storage/store/results_test.go index 76c9b9d0a0b..e1fe0375b3a 100644 --- a/storage/store/results_test.go +++ b/storage/store/results_test.go @@ -27,7 +27,7 @@ func TestResultStoreAndRetrieve(t *testing.T) { unittest.WithLock(t, lockManager, storage.LockInsertOwnReceipt, func(lctx lockctx.Context) error { return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { - return store1.BatchIndex(lctx, blockID, result.ID(), rw) + return store1.BatchIndex(lctx, rw, blockID, result.ID()) }) }) @@ -51,7 +51,7 @@ func TestResultStoreTwice(t *testing.T) { unittest.WithLock(t, lockManager, storage.LockInsertOwnReceipt, func(lctx lockctx.Context) error { return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { - return store1.BatchIndex(lctx, blockID, result.ID(), rw) + return store1.BatchIndex(lctx, rw, blockID, result.ID()) }) }) @@ -60,7 +60,7 @@ func TestResultStoreTwice(t *testing.T) { unittest.WithLock(t, lockManager, storage.LockInsertOwnReceipt, func(lctx lockctx.Context) error { return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { - return store1.BatchIndex(lctx, blockID, result.ID(), rw) + return store1.BatchIndex(lctx, rw, blockID, result.ID()) }) }) }) @@ -80,7 +80,7 @@ func TestResultBatchStoreTwice(t *testing.T) { err := store1.BatchStore(result, batch) require.NoError(t, err) - err = store1.BatchIndex(lctx, blockID, result.ID(), batch) + err = store1.BatchIndex(lctx, batch, blockID, result.ID()) require.NoError(t, err) return nil }) @@ -91,7 +91,7 @@ func TestResultBatchStoreTwice(t *testing.T) { err := store1.BatchStore(result, batch) require.NoError(t, err) - err = store1.BatchIndex(lctx, blockID, result.ID(), batch) + err = store1.BatchIndex(lctx, batch, blockID, result.ID()) require.NoError(t, err) return nil @@ -114,7 +114,7 @@ func TestResultStoreTwoDifferentResultsShouldFail(t *testing.T) { unittest.WithLock(t, lockManager, storage.LockInsertOwnReceipt, func(lctx lockctx.Context) error { return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { - return store1.BatchIndex(lctx, blockID, result1.ID(), rw) + return store1.BatchIndex(lctx, rw, blockID, result1.ID()) }) }) @@ -127,7 +127,7 @@ func TestResultStoreTwoDifferentResultsShouldFail(t *testing.T) { var indexErr error unittest.WithLock(t, lockManager, storage.LockInsertOwnReceipt, func(lctx lockctx.Context) error { return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { - indexErr = store1.BatchIndex(lctx, blockID, result2.ID(), rw) + indexErr = store1.BatchIndex(lctx, rw, blockID, result2.ID()) return nil }) }) From 3b874003eb22695d4c19a2f59dc521a8ed588598 Mon Sep 17 00:00:00 2001 From: "Leo Zhang (zhangchiqing)" Date: Fri, 3 Oct 2025 08:53:34 -0700 Subject: [PATCH 11/87] fix inmemory stores --- storage/store/inmemory/unsynchronized/events.go | 5 +++-- .../inmemory/unsynchronized/light_transaction_results.go | 3 ++- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/storage/store/inmemory/unsynchronized/events.go b/storage/store/inmemory/unsynchronized/events.go index 9bc54776625..c851b595175 100644 --- a/storage/store/inmemory/unsynchronized/events.go +++ b/storage/store/inmemory/unsynchronized/events.go @@ -4,6 +4,7 @@ import ( "fmt" "sync" + "github.com/jordanschalm/lockctx" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/storage" ) @@ -103,7 +104,7 @@ func (e *Events) ByBlockIDEventType(blockID flow.Identifier, eventType flow.Even // Store will store events for the given block ID. // No errors are expected during normal operation. -func (e *Events) Store(blockID flow.Identifier, blockEvents []flow.EventsList) error { +func (e *Events) Store(_ lockctx.Proof, blockID flow.Identifier, blockEvents []flow.EventsList) error { var events []flow.Event for _, eventList := range blockEvents { events = append(events, eventList...) @@ -131,7 +132,7 @@ func (e *Events) Data() flow.EventsList { // BatchStore will store events for the given block ID in a given batch. // // This method is NOT implemented and will always return an error. -func (e *Events) BatchStore(flow.Identifier, []flow.EventsList, storage.ReaderBatchWriter) error { +func (e *Events) BatchStore(lockctx.Proof, flow.Identifier, []flow.EventsList, storage.ReaderBatchWriter) error { return fmt.Errorf("not implemented") } diff --git a/storage/store/inmemory/unsynchronized/light_transaction_results.go b/storage/store/inmemory/unsynchronized/light_transaction_results.go index 8ccf536be0f..9072c7f10f9 100644 --- a/storage/store/inmemory/unsynchronized/light_transaction_results.go +++ b/storage/store/inmemory/unsynchronized/light_transaction_results.go @@ -4,6 +4,7 @@ import ( "fmt" "sync" + "github.com/jordanschalm/lockctx" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/storage" "github.com/onflow/flow-go/storage/store" @@ -110,6 +111,6 @@ func (l *LightTransactionResults) Data() []flow.LightTransactionResult { // BatchStore inserts a batch of transaction result into a batch. // This method is not implemented and will always return an error. -func (l *LightTransactionResults) BatchStore(flow.Identifier, []flow.LightTransactionResult, storage.ReaderBatchWriter) error { +func (l *LightTransactionResults) BatchStore(lockctx.Proof, flow.Identifier, []flow.LightTransactionResult, storage.ReaderBatchWriter) error { return fmt.Errorf("not implemented") } From 87f056a0dbb00710490c7532e3417b8600651860 Mon Sep 17 00:00:00 2001 From: "Leo Zhang (zhangchiqing)" Date: Fri, 3 Oct 2025 09:00:27 -0700 Subject: [PATCH 12/87] fix in memory indexer --- module/state_synchronization/indexer/in_memory_indexer.go | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/module/state_synchronization/indexer/in_memory_indexer.go b/module/state_synchronization/indexer/in_memory_indexer.go index 7af0586cfdc..6bfcb5b8631 100644 --- a/module/state_synchronization/indexer/in_memory_indexer.go +++ b/module/state_synchronization/indexer/in_memory_indexer.go @@ -98,6 +98,12 @@ func (i *InMemoryIndexer) IndexBlockData(data *execution_data.BlockExecutionData } defer lctx.Release() + err = lctx.AcquireLock(storage.LockInsertEvent) + if err != nil { + return fmt.Errorf("could not acquire lock for event insert: %w", err) + } + defer lctx.Release() + // Process all chunk data in a single pass for idx, chunk := range data.ChunkExecutionDatas { // Collect events @@ -129,7 +135,7 @@ func (i *InMemoryIndexer) IndexBlockData(data *execution_data.BlockExecutionData } } - if err := i.events.Store(data.BlockID, []flow.EventsList{events}); err != nil { + if err := i.events.Store(lctx, data.BlockID, []flow.EventsList{events}); err != nil { return fmt.Errorf("could not index events: %w", err) } From 81ec9d382e4b1e02f9b1dfc2de97d83bfa3832ca Mon Sep 17 00:00:00 2001 From: "Leo Zhang (zhangchiqing)" Date: Fri, 3 Oct 2025 09:54:32 -0700 Subject: [PATCH 13/87] add lock manager to an ingestion engine --- .../node_builder/access_node_builder.go | 2 ++ engine/access/ingestion/engine.go | 28 ++++++++++++++----- .../persisters/stores/events.go | 2 +- storage/store/events.go | 1 + storage/store/results.go | 6 +--- 5 files changed, 26 insertions(+), 13 deletions(-) diff --git a/cmd/access/node_builder/access_node_builder.go b/cmd/access/node_builder/access_node_builder.go index 3a2eeea0678..f44bfb05def 100644 --- a/cmd/access/node_builder/access_node_builder.go +++ b/cmd/access/node_builder/access_node_builder.go @@ -2238,6 +2238,8 @@ func (builder *FlowAccessNodeBuilder) Build() (cmd.Node, error) { node.EngineRegistry, node.State, node.Me, + node.StorageLockMgr, + node.ProtocolDB, node.Storage.Blocks, node.Storage.Results, node.Storage.Receipts, diff --git a/engine/access/ingestion/engine.go b/engine/access/ingestion/engine.go index d544b3effd8..79f2dfa6585 100644 --- a/engine/access/ingestion/engine.go +++ b/engine/access/ingestion/engine.go @@ -5,6 +5,7 @@ import ( "fmt" "time" + "github.com/jordanschalm/lockctx" "github.com/rs/zerolog" "github.com/onflow/flow-go/consensus/hotstuff/model" @@ -78,6 +79,8 @@ type Engine struct { // storage // FIX: remove direct DB access by substituting indexer module + db storage.DB + lockManager storage.LockManager blocks storage.Blocks executionReceipts storage.ExecutionReceipts maxReceiptHeight uint64 @@ -101,6 +104,8 @@ func New( net network.EngineRegistry, state protocol.State, me module.Local, + lockManager storage.LockManager, + db storage.DB, blocks storage.Blocks, executionResults storage.ExecutionResults, executionReceipts storage.ExecutionReceipts, @@ -133,6 +138,8 @@ func New( log: log.With().Str("engine", "ingestion").Logger(), state: state, me: me, + lockManager: lockManager, + db: db, blocks: blocks, executionResults: executionResults, executionReceipts: executionReceipts, @@ -371,19 +378,26 @@ func (e *Engine) processFinalizedBlock(block *flow.Block) error { // TODO: substitute an indexer module as layer between engine and storage + // TODO (leothis): to include in the batch update // index the block storage with each of the collection guarantee err := e.blocks.IndexBlockContainingCollectionGuarantees(block.ID(), flow.GetIDs(block.Payload.Guarantees)) if err != nil { return fmt.Errorf("could not index block for collections: %w", err) } - // loop through seals and index ID -> result ID - for _, seal := range block.Payload.Seals { - err := e.executionResults.Index(seal.BlockID, seal.ResultID) - if err != nil { - return fmt.Errorf("could not index block for execution result: %w", err) - } - } + // TODO (leothis): to use a different lock ID + storage.WithLock(e.lockManager, storage.LockInsertOwnReceipt, func(lctx lockctx.Context) error { + return e.db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + // loop through seals and index ID -> result ID + for _, seal := range block.Payload.Seals { + err := e.executionResults.BatchIndex(lctx, rw, seal.BlockID, seal.ResultID) + if err != nil { + return fmt.Errorf("could not index block for execution result: %w", err) + } + } + return nil + }) + }) e.collectionSyncer.RequestCollectionsForBlock(block.Height, block.Payload.Guarantees) e.collectionExecutedMetric.BlockFinalized(block) diff --git a/module/executiondatasync/optimistic_sync/persisters/stores/events.go b/module/executiondatasync/optimistic_sync/persisters/stores/events.go index 629adcc2a6f..9b3db41e90d 100644 --- a/module/executiondatasync/optimistic_sync/persisters/stores/events.go +++ b/module/executiondatasync/optimistic_sync/persisters/stores/events.go @@ -40,7 +40,7 @@ func (e *EventsStore) Persist(lctx lockctx.Proof, batch storage.ReaderBatchWrite } if len(eventsList) > 0 { - if err := e.persistedEvents.BatchStore(e.blockID, []flow.EventsList{eventsList}, batch); err != nil { + if err := e.persistedEvents.BatchStore(lctx, e.blockID, []flow.EventsList{eventsList}, batch); err != nil { return fmt.Errorf("could not add events to batch: %w", err) } } diff --git a/storage/store/events.go b/storage/store/events.go index 2ee8cfd0f9f..ccc6a4a020f 100644 --- a/storage/store/events.go +++ b/storage/store/events.go @@ -73,6 +73,7 @@ func (e *Events) BatchStore(lctx lockctx.Proof, blockID flow.Identifier, blockEv } // Store will store events for the given block ID +// TODO (leo) deprecate, only used by AN, AN should use BatchStore instead func (e *Events) Store(lctx lockctx.Proof, blockID flow.Identifier, blockEvents []flow.EventsList) error { return e.db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { return e.BatchStore(lctx, blockID, blockEvents, rw) diff --git a/storage/store/results.go b/storage/store/results.go index 3fe1dadbd57..e82a2b1a47f 100644 --- a/storage/store/results.go +++ b/storage/store/results.go @@ -32,10 +32,6 @@ func NewExecutionResults(collector module.CacheMetrics, db storage.DB) *Executio return &result, err } - indexByBlockID := func(lctx lockctx.Proof, rw storage.ReaderBatchWriter, blockID flow.Identifier, resultID flow.Identifier) error { - return operation.IndexOwnExecutionResult(lctx, rw, blockID, resultID) - } - retrieveByBlockID := func(r storage.Reader, blockID flow.Identifier) (flow.Identifier, error) { var resultID flow.Identifier err := operation.LookupExecutionResult(r, blockID, &resultID) @@ -53,7 +49,7 @@ func NewExecutionResults(collector module.CacheMetrics, db storage.DB) *Executio // this API is only used to fetch result for last executed block, so in happy case, it only need to be 1, // we use 100 here to be more resilient to forks withLimit[flow.Identifier, flow.Identifier](100), - withStoreWithLock(indexByBlockID), + withStoreWithLock(operation.IndexOwnExecutionResult), withRetrieve(retrieveByBlockID), ), } From f85f95139bf4a96bd07a1c514d65f4b853a3bd65 Mon Sep 17 00:00:00 2001 From: "Leo Zhang (zhangchiqing)" Date: Fri, 3 Oct 2025 10:10:57 -0700 Subject: [PATCH 14/87] fix access test --- engine/access/access_test.go | 22 ++++++++++++++++++++-- 1 file changed, 20 insertions(+), 2 deletions(-) diff --git a/engine/access/access_test.go b/engine/access/access_test.go index f5620fafe66..bc945feee72 100644 --- a/engine/access/access_test.go +++ b/engine/access/access_test.go @@ -544,6 +544,7 @@ func (suite *Suite) TestGetBlockByIDAndHeight() { func (suite *Suite) TestGetExecutionResultByBlockID() { suite.RunTest(func(handler *rpc.Handler, db storage.DB, all *store.All) { + lockManager := storage.NewTestingLockManager() // test block1 get by ID nonexistingID := unittest.IdentifierFixture() @@ -553,8 +554,16 @@ func (suite *Suite) TestGetExecutionResultByBlockID() { unittest.WithExecutionResultBlockID(blockID), unittest.WithServiceEvents(3)) - require.NoError(suite.T(), all.Results.Store(er)) - require.NoError(suite.T(), all.Results.Index(blockID, er.ID())) + // TODO (leothis): use a different lock + require.NoError(suite.T(), storage.WithLock(lockManager, storage.LockInsertOwnReceipt, func(lctx lockctx.Context) error { + return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + err := all.Results.BatchStore(er, rw) + if err != nil { + return err + } + return all.Results.BatchIndex(lctx, rw, blockID, er.ID()) + }) + })) assertResp := func( resp *accessproto.ExecutionResultForBlockIDResponse, @@ -626,6 +635,7 @@ func (suite *Suite) TestGetExecutionResultByBlockID() { // is reported as sealed func (suite *Suite) TestGetSealedTransaction() { unittest.RunWithPebbleDB(suite.T(), func(pdb *pebble.DB) { + lockManager := storage.NewTestingLockManager() db := pebbleimpl.ToDB(pdb) all := store.InitAll(metrics.NewNoopCollector(), db) enIdentities := unittest.IdentityListFixture(2, unittest.WithRole(flow.RoleExecution)) @@ -740,6 +750,8 @@ func (suite *Suite) TestGetSealedTransaction() { suite.net, suite.state, suite.me, + lockManager, + db, all.Blocks, all.Results, all.Receipts, @@ -816,6 +828,7 @@ func (suite *Suite) TestGetSealedTransaction() { // transaction ID, block ID, and collection ID. func (suite *Suite) TestGetTransactionResult() { unittest.RunWithPebbleDB(suite.T(), func(pdb *pebble.DB) { + lockManager := storage.NewTestingLockManager() db := pebbleimpl.ToDB(pdb) all := store.InitAll(metrics.NewNoopCollector(), db) originID := unittest.IdentifierFixture() @@ -963,6 +976,8 @@ func (suite *Suite) TestGetTransactionResult() { suite.net, suite.state, suite.me, + lockManager, + db, all.Blocks, all.Results, all.Receipts, @@ -1135,6 +1150,7 @@ func (suite *Suite) TestGetTransactionResult() { // the correct block id func (suite *Suite) TestExecuteScript() { unittest.RunWithPebbleDB(suite.T(), func(pdb *pebble.DB) { + lockManager := storage.NewTestingLockManager() db := pebbleimpl.ToDB(pdb) all := store.InitAll(metrics.NewNoopCollector(), db) identities := unittest.IdentityListFixture(2, unittest.WithRole(flow.RoleExecution)) @@ -1229,6 +1245,8 @@ func (suite *Suite) TestExecuteScript() { suite.net, suite.state, suite.me, + lockManager, + db, all.Blocks, all.Results, all.Receipts, From 060f4386882ec500d2460ce361e0675788a80a40 Mon Sep 17 00:00:00 2001 From: "Leo Zhang (zhangchiqing)" Date: Fri, 3 Oct 2025 10:14:16 -0700 Subject: [PATCH 15/87] fix tests for BatchIndex --- engine/execution/pruner/core_test.go | 6 +++++- .../optimistic_sync/pipeline_functional_test.go | 6 +++++- 2 files changed, 10 insertions(+), 2 deletions(-) diff --git a/engine/execution/pruner/core_test.go b/engine/execution/pruner/core_test.go index 16d1eaa5540..4ccb2d539d8 100644 --- a/engine/execution/pruner/core_test.go +++ b/engine/execution/pruner/core_test.go @@ -74,7 +74,11 @@ func TestLoopPruneExecutionDataFromRootToLatestSealed(t *testing.T) { }) require.NoError(t, err) require.NoError(t, results.Store(chunk.Result)) - require.NoError(t, results.Index(chunk.Result.BlockID, chunk.Result.ID())) + err = unittest.WithLock(t, lockManager, storage.LockInsertOwnReceipt, func(lctx lockctx.Context) error { + return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return results.BatchIndex(lctx, rw, chunk.Result.BlockID, chunk.Result.ID()) + }) + }) require.NoError(t, unittest.WithLock(t, lockManager, storage.LockInsertChunkDataPack, func(lctx lockctx.Context) error { return chunkDataPacks.StoreByChunkID(lctx, []*flow.ChunkDataPack{chunk.ChunkDataPack}) })) diff --git a/module/executiondatasync/optimistic_sync/pipeline_functional_test.go b/module/executiondatasync/optimistic_sync/pipeline_functional_test.go index da90197c80e..acff634df27 100644 --- a/module/executiondatasync/optimistic_sync/pipeline_functional_test.go +++ b/module/executiondatasync/optimistic_sync/pipeline_functional_test.go @@ -140,7 +140,11 @@ func (p *PipelineFunctionalSuite) SetupTest() { err = p.results.Store(sealedExecutionResult) p.Require().NoError(err) - err = p.results.Index(sealedBlock.ID(), sealedExecutionResult.ID()) + err = unittest.WithLock(t, p.lockManager, storage.LockInsertOwnReceipt, func(lctx lockctx.Context) error { + return p.db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return p.results.BatchIndex(lctx, rw, sealedBlock.ID(), sealedExecutionResult.ID()) + }) + }) p.Require().NoError(err) p.persistentLatestSealedResult, err = store.NewLatestPersistedSealedResult(p.consumerProgress, p.headers, p.results) From 14dcb42b9b68d8d64d5c9c71f89980761589f8c5 Mon Sep 17 00:00:00 2001 From: "Leo Zhang (zhangchiqing)" Date: Fri, 3 Oct 2025 11:15:28 -0700 Subject: [PATCH 16/87] fix tests --- .../inmemory/unsynchronized/events_test.go | 2 +- storage/store/transaction_results_test.go | 17 ++++++++++------- 2 files changed, 11 insertions(+), 8 deletions(-) diff --git a/storage/store/inmemory/unsynchronized/events_test.go b/storage/store/inmemory/unsynchronized/events_test.go index 3271bc9e0d7..87068e9f8ef 100644 --- a/storage/store/inmemory/unsynchronized/events_test.go +++ b/storage/store/inmemory/unsynchronized/events_test.go @@ -40,7 +40,7 @@ func TestEvents_HappyPath(t *testing.T) { // Store events expectedStoredEvents := flow.EventsList{event1, event2, event3} - err := eventsStore.Store(block.ID(), []flow.EventsList{expectedStoredEvents}) + err := eventsStore.Store(nil, block.ID(), []flow.EventsList{expectedStoredEvents}) require.NoError(t, err) // Retrieve events by block ID diff --git a/storage/store/transaction_results_test.go b/storage/store/transaction_results_test.go index 28860d71869..693ec7cb749 100644 --- a/storage/store/transaction_results_test.go +++ b/storage/store/transaction_results_test.go @@ -74,6 +74,7 @@ func TestBatchStoringTransactionResults(t *testing.T) { func TestBatchStoreAndBatchRemoveTransactionResults(t *testing.T) { dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + lockManager := storage.NewTestingLockManager() const blockCount = 10 const txCountPerBlock = 10 @@ -98,14 +99,16 @@ func TestBatchStoreAndBatchRemoveTransactionResults(t *testing.T) { } // Store transaction results of multiple blocks - err = db.WithReaderBatchWriter(func(rbw storage.ReaderBatchWriter) error { - for _, blockID := range blockIDs { - err := st.BatchStore(blockID, txResults[blockID], rbw) - if err != nil { - return err + err = storage.WithLock(lockManager, storage.LockInsertOwnReceipt, func(lctx lockctx.Context) error { + return db.WithReaderBatchWriter(func(rbw storage.ReaderBatchWriter) error { + for _, blockID := range blockIDs { + err := st.BatchStore(lctx, blockID, txResults[blockID], rbw) + if err != nil { + return err + } } - } - return nil + return nil + }) }) require.NoError(t, err) From a731f6982a7c72844b9ea254804202267de1f2f4 Mon Sep 17 00:00:00 2001 From: "Leo Zhang (zhangchiqing)" Date: Fri, 3 Oct 2025 11:20:49 -0700 Subject: [PATCH 17/87] fix linter --- engine/access/ingestion/engine_test.go | 2 ++ engine/access/ingestion2/engine_test.go | 2 ++ .../ingestion2/finalized_block_processor.go | 29 ++++++++++++++----- .../optimistic_sync/persisters/block_test.go | 4 +-- 4 files changed, 27 insertions(+), 10 deletions(-) diff --git a/engine/access/ingestion/engine_test.go b/engine/access/ingestion/engine_test.go index ece7712446a..c9679e33b7f 100644 --- a/engine/access/ingestion/engine_test.go +++ b/engine/access/ingestion/engine_test.go @@ -213,6 +213,8 @@ func (s *Suite) initEngineAndSyncer(ctx irrecoverable.SignalerContext) (*Engine, s.net, s.proto.state, s.me, + s.lockManager, + s.db, s.blocks, s.results, s.receipts, diff --git a/engine/access/ingestion2/engine_test.go b/engine/access/ingestion2/engine_test.go index c7813d7b028..fcfb086569d 100644 --- a/engine/access/ingestion2/engine_test.go +++ b/engine/access/ingestion2/engine_test.go @@ -225,6 +225,8 @@ func (s *Suite) initEngineAndSyncer(ctx irrecoverable.SignalerContext) (*Engine, blockProcessor, err := NewFinalizedBlockProcessor( s.log, s.proto.state, + s.lockManager, + s.db, s.blocks, s.results, processedHeightInitializer, diff --git a/engine/access/ingestion2/finalized_block_processor.go b/engine/access/ingestion2/finalized_block_processor.go index 78511dd9803..f5f9a62b535 100644 --- a/engine/access/ingestion2/finalized_block_processor.go +++ b/engine/access/ingestion2/finalized_block_processor.go @@ -3,6 +3,7 @@ package ingestion2 import ( "fmt" + "github.com/jordanschalm/lockctx" "github.com/rs/zerolog" "github.com/onflow/flow-go/engine" @@ -45,7 +46,10 @@ type FinalizedBlockProcessor struct { consumer *jobqueue.ComponentConsumer consumerNotifier engine.Notifier - blocks storage.Blocks + lockManager storage.LockManager + db storage.DB + + blocks storage.Blocks executionResults storage.ExecutionResults @@ -60,6 +64,8 @@ type FinalizedBlockProcessor struct { func NewFinalizedBlockProcessor( log zerolog.Logger, state protocol.State, + lockManager storage.LockManager, + db storage.DB, blocks storage.Blocks, executionResults storage.ExecutionResults, finalizedProcessedHeight storage.ConsumerProgressInitializer, @@ -75,6 +81,8 @@ func NewFinalizedBlockProcessor( consumerNotifier := engine.NewNotifier() processor := &FinalizedBlockProcessor{ log: log, + db: db, + lockManager: lockManager, blocks: blocks, executionResults: executionResults, consumerNotifier: consumerNotifier, @@ -150,13 +158,18 @@ func (p *FinalizedBlockProcessor) indexFinalizedBlock(block *flow.Block) error { return fmt.Errorf("could not index block for collections: %w", err) } - // loop through seals and index ID -> result ID - for _, seal := range block.Payload.Seals { - err := p.executionResults.Index(seal.BlockID, seal.ResultID) - if err != nil { - return fmt.Errorf("could not index block for execution result: %w", err) - } - } + err = storage.WithLock(p.lockManager, storage.LockInsertOwnReceipt, func(lctx lockctx.Context) error { + return p.db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + // loop through seals and index ID -> result ID + for _, seal := range block.Payload.Seals { + err := p.executionResults.BatchIndex(lctx, rw, seal.BlockID, seal.ResultID) + if err != nil { + return fmt.Errorf("could not index block for execution result: %w", err) + } + } + return nil + }) + }) p.collectionSyncer.RequestCollectionsForBlock(block.Height, block.Payload.Guarantees) p.collectionExecutedMetric.BlockFinalized(block) diff --git a/module/executiondatasync/optimistic_sync/persisters/block_test.go b/module/executiondatasync/optimistic_sync/persisters/block_test.go index 41ec8762fa3..b4fc80cd1dc 100644 --- a/module/executiondatasync/optimistic_sync/persisters/block_test.go +++ b/module/executiondatasync/optimistic_sync/persisters/block_test.go @@ -98,7 +98,7 @@ func (p *PersisterSuite) populateInMemoryStorages() { p.Require().NoError(err) eventsList := unittest.EventsFixture(5) - err = p.inMemoryEvents.Store(p.executionResult.BlockID, []flow.EventsList{eventsList}) + err = p.inMemoryEvents.Store(nil, p.executionResult.BlockID, []flow.EventsList{eventsList}) p.Require().NoError(err) for i := 0; i < 2; i++ { @@ -133,7 +133,7 @@ func (p *PersisterSuite) populateInMemoryStorages() { func (p *PersisterSuite) TestPersister_PersistWithEmptyData() { t := p.T() - err := p.inMemoryEvents.Store(p.executionResult.BlockID, []flow.EventsList{}) + err := p.inMemoryEvents.Store(nil, p.executionResult.BlockID, []flow.EventsList{}) p.Require().NoError(err) err = p.inMemoryResults.Store(p.executionResult.BlockID, []flow.LightTransactionResult{}) From 726eaec402630df637035083f6bf9a6ec0ef2284 Mon Sep 17 00:00:00 2001 From: "Leo Zhang (zhangchiqing)" Date: Fri, 3 Oct 2025 11:34:55 -0700 Subject: [PATCH 18/87] fix tests --- engine/access/ingestion/engine.go | 5 ++++- .../ingestion2/finalized_block_processor.go | 3 +++ engine/execution/pruner/core_test.go | 1 + storage/light_transaction_results.go | 1 + storage/operation/events_test.go | 3 ++- storage/operation/stats_test.go | 3 ++- storage/results.go | 1 + storage/store/events.go | 1 + storage/store/events_test.go | 6 ++++-- .../store/inmemory/unsynchronized/events.go | 1 + .../light_transaction_results.go | 1 + storage/store/light_transaction_results.go | 1 + .../store/light_transaction_results_test.go | 6 ++++-- storage/store/results.go | 1 + storage/store/results_test.go | 21 ++++++++++++------- storage/store/transaction_results.go | 1 + storage/store/transaction_results_test.go | 3 ++- storage/transaction_results.go | 1 + 18 files changed, 45 insertions(+), 15 deletions(-) diff --git a/engine/access/ingestion/engine.go b/engine/access/ingestion/engine.go index 79f2dfa6585..05fb257c066 100644 --- a/engine/access/ingestion/engine.go +++ b/engine/access/ingestion/engine.go @@ -386,7 +386,7 @@ func (e *Engine) processFinalizedBlock(block *flow.Block) error { } // TODO (leothis): to use a different lock ID - storage.WithLock(e.lockManager, storage.LockInsertOwnReceipt, func(lctx lockctx.Context) error { + err = storage.WithLock(e.lockManager, storage.LockInsertOwnReceipt, func(lctx lockctx.Context) error { return e.db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { // loop through seals and index ID -> result ID for _, seal := range block.Payload.Seals { @@ -398,6 +398,9 @@ func (e *Engine) processFinalizedBlock(block *flow.Block) error { return nil }) }) + if err != nil { + return fmt.Errorf("could not index execution results: %w", err) + } e.collectionSyncer.RequestCollectionsForBlock(block.Height, block.Payload.Guarantees) e.collectionExecutedMetric.BlockFinalized(block) diff --git a/engine/access/ingestion2/finalized_block_processor.go b/engine/access/ingestion2/finalized_block_processor.go index f5f9a62b535..bd468def6be 100644 --- a/engine/access/ingestion2/finalized_block_processor.go +++ b/engine/access/ingestion2/finalized_block_processor.go @@ -170,6 +170,9 @@ func (p *FinalizedBlockProcessor) indexFinalizedBlock(block *flow.Block) error { return nil }) }) + if err != nil { + return fmt.Errorf("could not index execution results: %w", err) + } p.collectionSyncer.RequestCollectionsForBlock(block.Height, block.Payload.Guarantees) p.collectionExecutedMetric.BlockFinalized(block) diff --git a/engine/execution/pruner/core_test.go b/engine/execution/pruner/core_test.go index 4ccb2d539d8..1fd1416e6e7 100644 --- a/engine/execution/pruner/core_test.go +++ b/engine/execution/pruner/core_test.go @@ -79,6 +79,7 @@ func TestLoopPruneExecutionDataFromRootToLatestSealed(t *testing.T) { return results.BatchIndex(lctx, rw, chunk.Result.BlockID, chunk.Result.ID()) }) }) + require.NoError(t, err) require.NoError(t, unittest.WithLock(t, lockManager, storage.LockInsertChunkDataPack, func(lctx lockctx.Context) error { return chunkDataPacks.StoreByChunkID(lctx, []*flow.ChunkDataPack{chunk.ChunkDataPack}) })) diff --git a/storage/light_transaction_results.go b/storage/light_transaction_results.go index f3647154a57..ba9e7b473d2 100644 --- a/storage/light_transaction_results.go +++ b/storage/light_transaction_results.go @@ -2,6 +2,7 @@ package storage import ( "github.com/jordanschalm/lockctx" + "github.com/onflow/flow-go/model/flow" ) diff --git a/storage/operation/events_test.go b/storage/operation/events_test.go index 5348189dfb0..259cbc307c3 100644 --- a/storage/operation/events_test.go +++ b/storage/operation/events_test.go @@ -55,11 +55,12 @@ func TestRetrieveEventByBlockIDTxID(t *testing.T) { ) // insert event into the db - unittest.WithLock(t, lockManager, storage.LockInsertEvent, func(lctx lockctx.Context) error { + err := unittest.WithLock(t, lockManager, storage.LockInsertEvent, func(lctx lockctx.Context) error { return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { return operation.InsertEvent(lctx, rw.Writer(), b, event) }) }) + require.NoError(t, err) // update event arrays in the maps bEvents = append(bEvents, event) diff --git a/storage/operation/stats_test.go b/storage/operation/stats_test.go index de4824591ad..563d118f200 100644 --- a/storage/operation/stats_test.go +++ b/storage/operation/stats_test.go @@ -16,7 +16,7 @@ func TestSummarizeKeysByFirstByteConcurrent(t *testing.T) { lockManager := storage.NewTestingLockManager() dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { - unittest.WithLock(t, lockManager, storage.LockInsertEvent, func(lctx lockctx.Context) error { + err := unittest.WithLock(t, lockManager, storage.LockInsertEvent, func(lctx lockctx.Context) error { return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { // insert random events b := unittest.IdentifierFixture() @@ -55,6 +55,7 @@ func TestSummarizeKeysByFirstByteConcurrent(t *testing.T) { return nil }) }) + require.NoError(t, err) // summarize keys by first byte stats, err := operation.SummarizeKeysByFirstByteConcurrent(unittest.Logger(), db.Reader(), 10) diff --git a/storage/results.go b/storage/results.go index b052083c952..08ee658406c 100644 --- a/storage/results.go +++ b/storage/results.go @@ -2,6 +2,7 @@ package storage import ( "github.com/jordanschalm/lockctx" + "github.com/onflow/flow-go/model/flow" ) diff --git a/storage/store/events.go b/storage/store/events.go index ccc6a4a020f..08024acc164 100644 --- a/storage/store/events.go +++ b/storage/store/events.go @@ -4,6 +4,7 @@ import ( "fmt" "github.com/jordanschalm/lockctx" + "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module" "github.com/onflow/flow-go/module/metrics" diff --git a/storage/store/events_test.go b/storage/store/events_test.go index a6b98609a9e..6c312d53590 100644 --- a/storage/store/events_test.go +++ b/storage/store/events_test.go @@ -50,12 +50,13 @@ func TestEventStoreRetrieve(t *testing.T) { {evt2_1}, } - unittest.WithLock(t, lockManager, storage.LockInsertEvent, func(lctx lockctx.Context) error { + err := unittest.WithLock(t, lockManager, storage.LockInsertEvent, func(lctx lockctx.Context) error { return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { // store event return events.BatchStore(lctx, blockID, expected, rw) }) }) + require.NoError(t, err) // retrieve by blockID actual, err := events.ByBlockID(blockID) @@ -174,9 +175,10 @@ func TestEventStoreAndRemove(t *testing.T) { {evt2_1}, } - unittest.WithLock(t, lockManager, storage.LockInsertEvent, func(lctx lockctx.Context) error { + err := unittest.WithLock(t, lockManager, storage.LockInsertEvent, func(lctx lockctx.Context) error { return store.Store(lctx, blockID, expected) }) + require.NoError(t, err) // Ensure it exists event, err := store.ByBlockID(blockID) diff --git a/storage/store/inmemory/unsynchronized/events.go b/storage/store/inmemory/unsynchronized/events.go index c851b595175..36188112d75 100644 --- a/storage/store/inmemory/unsynchronized/events.go +++ b/storage/store/inmemory/unsynchronized/events.go @@ -5,6 +5,7 @@ import ( "sync" "github.com/jordanschalm/lockctx" + "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/storage" ) diff --git a/storage/store/inmemory/unsynchronized/light_transaction_results.go b/storage/store/inmemory/unsynchronized/light_transaction_results.go index 9072c7f10f9..6014698471b 100644 --- a/storage/store/inmemory/unsynchronized/light_transaction_results.go +++ b/storage/store/inmemory/unsynchronized/light_transaction_results.go @@ -5,6 +5,7 @@ import ( "sync" "github.com/jordanschalm/lockctx" + "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/storage" "github.com/onflow/flow-go/storage/store" diff --git a/storage/store/light_transaction_results.go b/storage/store/light_transaction_results.go index 769244735db..7cb85644064 100644 --- a/storage/store/light_transaction_results.go +++ b/storage/store/light_transaction_results.go @@ -4,6 +4,7 @@ import ( "fmt" "github.com/jordanschalm/lockctx" + "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module" "github.com/onflow/flow-go/module/metrics" diff --git a/storage/store/light_transaction_results_test.go b/storage/store/light_transaction_results_test.go index d311fe3c4bb..76431d82746 100644 --- a/storage/store/light_transaction_results_test.go +++ b/storage/store/light_transaction_results_test.go @@ -26,18 +26,20 @@ func TestBatchStoringLightTransactionResults(t *testing.T) { txResults := getLightTransactionResultsFixture(10) t.Run("batch store1 results", func(t *testing.T) { - unittest.WithLock(t, lockManager, storage.LockInsertLightTransactionResult, func(lctx lockctx.Context) error { + err := unittest.WithLock(t, lockManager, storage.LockInsertLightTransactionResult, func(lctx lockctx.Context) error { return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { return store1.BatchStore(lctx, blockID, txResults, rw) }) }) + require.NoError(t, err) // add a results to a new block to validate they are not included in lookups - unittest.WithLock(t, lockManager, storage.LockInsertLightTransactionResult, func(lctx lockctx.Context) error { + err = unittest.WithLock(t, lockManager, storage.LockInsertLightTransactionResult, func(lctx lockctx.Context) error { return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { return store1.BatchStore(lctx, unittest.IdentifierFixture(), getLightTransactionResultsFixture(2), rw) }) }) + require.NoError(t, err) }) diff --git a/storage/store/results.go b/storage/store/results.go index e82a2b1a47f..aef8a398836 100644 --- a/storage/store/results.go +++ b/storage/store/results.go @@ -4,6 +4,7 @@ import ( "fmt" "github.com/jordanschalm/lockctx" + "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module" "github.com/onflow/flow-go/module/metrics" diff --git a/storage/store/results_test.go b/storage/store/results_test.go index e1fe0375b3a..8b268b35412 100644 --- a/storage/store/results_test.go +++ b/storage/store/results_test.go @@ -25,11 +25,12 @@ func TestResultStoreAndRetrieve(t *testing.T) { err := store1.Store(result) require.NoError(t, err) - unittest.WithLock(t, lockManager, storage.LockInsertOwnReceipt, func(lctx lockctx.Context) error { + err = unittest.WithLock(t, lockManager, storage.LockInsertOwnReceipt, func(lctx lockctx.Context) error { return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { return store1.BatchIndex(lctx, rw, blockID, result.ID()) }) }) + require.NoError(t, err) actual, err := store1.ByBlockID(blockID) require.NoError(t, err) @@ -49,20 +50,22 @@ func TestResultStoreTwice(t *testing.T) { err := store1.Store(result) require.NoError(t, err) - unittest.WithLock(t, lockManager, storage.LockInsertOwnReceipt, func(lctx lockctx.Context) error { + err = unittest.WithLock(t, lockManager, storage.LockInsertOwnReceipt, func(lctx lockctx.Context) error { return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { return store1.BatchIndex(lctx, rw, blockID, result.ID()) }) }) + require.NoError(t, err) err = store1.Store(result) require.NoError(t, err) - unittest.WithLock(t, lockManager, storage.LockInsertOwnReceipt, func(lctx lockctx.Context) error { + err = unittest.WithLock(t, lockManager, storage.LockInsertOwnReceipt, func(lctx lockctx.Context) error { return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { return store1.BatchIndex(lctx, rw, blockID, result.ID()) }) }) + require.NoError(t, err) }) } @@ -75,7 +78,7 @@ func TestResultBatchStoreTwice(t *testing.T) { result := unittest.ExecutionResultFixture() blockID := unittest.IdentifierFixture() - unittest.WithLock(t, lockManager, storage.LockInsertOwnReceipt, func(lctx lockctx.Context) error { + err := unittest.WithLock(t, lockManager, storage.LockInsertOwnReceipt, func(lctx lockctx.Context) error { return db.WithReaderBatchWriter(func(batch storage.ReaderBatchWriter) error { err := store1.BatchStore(result, batch) require.NoError(t, err) @@ -85,8 +88,9 @@ func TestResultBatchStoreTwice(t *testing.T) { return nil }) }) + require.NoError(t, err) - unittest.WithLock(t, lockManager, storage.LockInsertOwnReceipt, func(lctx lockctx.Context) error { + err = unittest.WithLock(t, lockManager, storage.LockInsertOwnReceipt, func(lctx lockctx.Context) error { return db.WithReaderBatchWriter(func(batch storage.ReaderBatchWriter) error { err := store1.BatchStore(result, batch) require.NoError(t, err) @@ -97,6 +101,7 @@ func TestResultBatchStoreTwice(t *testing.T) { return nil }) }) + require.NoError(t, err) }) } @@ -112,11 +117,12 @@ func TestResultStoreTwoDifferentResultsShouldFail(t *testing.T) { err := store1.Store(result1) require.NoError(t, err) - unittest.WithLock(t, lockManager, storage.LockInsertOwnReceipt, func(lctx lockctx.Context) error { + err = unittest.WithLock(t, lockManager, storage.LockInsertOwnReceipt, func(lctx lockctx.Context) error { return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { return store1.BatchIndex(lctx, rw, blockID, result1.ID()) }) }) + require.NoError(t, err) // we can store1 a different result, but we can't index // a different result for that block, because it will mean @@ -125,12 +131,13 @@ func TestResultStoreTwoDifferentResultsShouldFail(t *testing.T) { require.NoError(t, err) var indexErr error - unittest.WithLock(t, lockManager, storage.LockInsertOwnReceipt, func(lctx lockctx.Context) error { + err = unittest.WithLock(t, lockManager, storage.LockInsertOwnReceipt, func(lctx lockctx.Context) error { return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { indexErr = store1.BatchIndex(lctx, rw, blockID, result2.ID()) return nil }) }) + require.NoError(t, err) require.Error(t, indexErr) require.True(t, errors.Is(indexErr, storage.ErrDataMismatch)) }) diff --git a/storage/store/transaction_results.go b/storage/store/transaction_results.go index e552b9d046f..5df96556473 100644 --- a/storage/store/transaction_results.go +++ b/storage/store/transaction_results.go @@ -5,6 +5,7 @@ import ( "fmt" "github.com/jordanschalm/lockctx" + "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module" "github.com/onflow/flow-go/module/metrics" diff --git a/storage/store/transaction_results_test.go b/storage/store/transaction_results_test.go index 693ec7cb749..69664ceb46b 100644 --- a/storage/store/transaction_results_test.go +++ b/storage/store/transaction_results_test.go @@ -38,11 +38,12 @@ func TestBatchStoringTransactionResults(t *testing.T) { } txResults = append(txResults, expected) } - unittest.WithLock(t, lockManager, storage.LockInsertOwnReceipt, func(lctx lockctx.Context) error { + err = unittest.WithLock(t, lockManager, storage.LockInsertOwnReceipt, func(lctx lockctx.Context) error { return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { return st.BatchStore(lctx, blockID, txResults, rw) }) }) + require.NoError(t, err) for _, txResult := range txResults { actual, err := st.ByBlockIDTransactionID(blockID, txResult.TransactionID) diff --git a/storage/transaction_results.go b/storage/transaction_results.go index 7bdb7ca4aec..4b2631b717e 100644 --- a/storage/transaction_results.go +++ b/storage/transaction_results.go @@ -2,6 +2,7 @@ package storage import ( "github.com/jordanschalm/lockctx" + "github.com/onflow/flow-go/model/flow" ) From 0820d94b5e0ca0c21731bb020777c83d6ae83479 Mon Sep 17 00:00:00 2001 From: "Leo Zhang (zhangchiqing)" Date: Fri, 3 Oct 2025 11:46:42 -0700 Subject: [PATCH 19/87] remove results.Store replace with BatchStore --- engine/execution/pruner/core_test.go | 8 ++- .../pipeline_functional_test.go | 10 ++-- module/pruner/pruners/chunk_data_pack_test.go | 8 ++- storage/results.go | 3 -- storage/store/results.go | 6 --- storage/store/results_test.go | 53 ++++++++++++------- 6 files changed, 54 insertions(+), 34 deletions(-) diff --git a/engine/execution/pruner/core_test.go b/engine/execution/pruner/core_test.go index 1fd1416e6e7..827006f9fca 100644 --- a/engine/execution/pruner/core_test.go +++ b/engine/execution/pruner/core_test.go @@ -73,10 +73,14 @@ func TestLoopPruneExecutionDataFromRootToLatestSealed(t *testing.T) { }) }) require.NoError(t, err) - require.NoError(t, results.Store(chunk.Result)) err = unittest.WithLock(t, lockManager, storage.LockInsertOwnReceipt, func(lctx lockctx.Context) error { return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { - return results.BatchIndex(lctx, rw, chunk.Result.BlockID, chunk.Result.ID()) + err := results.BatchStore(chunk.Result, rw) + require.NoError(t, err) + + err = results.BatchIndex(lctx, rw, chunk.Result.BlockID, chunk.Result.ID()) + require.NoError(t, err) + return nil }) }) require.NoError(t, err) diff --git a/module/executiondatasync/optimistic_sync/pipeline_functional_test.go b/module/executiondatasync/optimistic_sync/pipeline_functional_test.go index acff634df27..cdc2d326f0e 100644 --- a/module/executiondatasync/optimistic_sync/pipeline_functional_test.go +++ b/module/executiondatasync/optimistic_sync/pipeline_functional_test.go @@ -137,12 +137,14 @@ func (p *PipelineFunctionalSuite) SetupTest() { require.NoError(t, err) // Store and index sealed block execution result - err = p.results.Store(sealedExecutionResult) - p.Require().NoError(err) - err = unittest.WithLock(t, p.lockManager, storage.LockInsertOwnReceipt, func(lctx lockctx.Context) error { return p.db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { - return p.results.BatchIndex(lctx, rw, sealedBlock.ID(), sealedExecutionResult.ID()) + err := p.results.BatchStore(sealedExecutionResult, rw) + p.Require().NoError(err) + + err = p.results.BatchIndex(lctx, rw, sealedBlock.ID(), sealedExecutionResult.ID()) + p.Require().NoError(err) + return nil }) }) p.Require().NoError(err) diff --git a/module/pruner/pruners/chunk_data_pack_test.go b/module/pruner/pruners/chunk_data_pack_test.go index f595ed30bcd..9dc4d63e1ee 100644 --- a/module/pruner/pruners/chunk_data_pack_test.go +++ b/module/pruner/pruners/chunk_data_pack_test.go @@ -30,7 +30,13 @@ func TestChunkDataPackPruner(t *testing.T) { // store the chunks cdp1, result1 := unittest.ChunkDataPacksFixtureAndResult() - require.NoError(t, results.Store(result1)) + require.NoError(t, unittest.WithLock(t, lockManager, storage.LockInsertOwnReceipt, func(lctx lockctx.Context) error { + return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + err := results.BatchStore(result1, rw) + require.NoError(t, err) + return nil + }) + })) require.NoError(t, unittest.WithLock(t, lockManager, storage.LockInsertChunkDataPack, func(lctx lockctx.Context) error { return chunks.StoreByChunkID(lctx, cdp1) })) diff --git a/storage/results.go b/storage/results.go index 08ee658406c..2051232604e 100644 --- a/storage/results.go +++ b/storage/results.go @@ -20,9 +20,6 @@ type ExecutionResultsReader interface { type ExecutionResults interface { ExecutionResultsReader - // Store stores an execution result. - Store(result *flow.ExecutionResult) error - // BatchStore stores an execution result in a given batch BatchStore(result *flow.ExecutionResult, batch ReaderBatchWriter) error diff --git a/storage/store/results.go b/storage/store/results.go index aef8a398836..e44f89121f3 100644 --- a/storage/store/results.go +++ b/storage/store/results.go @@ -82,12 +82,6 @@ func (r *ExecutionResults) BatchIndex(lctx lockctx.Proof, rw storage.ReaderBatch return r.indexCache.PutWithLockTx(lctx, rw, blockID, resultID) } -func (r *ExecutionResults) Store(result *flow.ExecutionResult) error { - return r.db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { - return r.store(rw, result) - }) -} - func (r *ExecutionResults) BatchStore(result *flow.ExecutionResult, batch storage.ReaderBatchWriter) error { return r.store(batch, result) } diff --git a/storage/store/results_test.go b/storage/store/results_test.go index 8b268b35412..d84ebcba68c 100644 --- a/storage/store/results_test.go +++ b/storage/store/results_test.go @@ -22,12 +22,15 @@ func TestResultStoreAndRetrieve(t *testing.T) { result := unittest.ExecutionResultFixture() blockID := unittest.IdentifierFixture() - err := store1.Store(result) - require.NoError(t, err) - err = unittest.WithLock(t, lockManager, storage.LockInsertOwnReceipt, func(lctx lockctx.Context) error { + err := unittest.WithLock(t, lockManager, storage.LockInsertOwnReceipt, func(lctx lockctx.Context) error { return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { - return store1.BatchIndex(lctx, rw, blockID, result.ID()) + err := store1.BatchStore(result, rw) + require.NoError(t, err) + + err = store1.BatchIndex(lctx, rw, blockID, result.ID()) + require.NoError(t, err) + return nil }) }) require.NoError(t, err) @@ -47,22 +50,27 @@ func TestResultStoreTwice(t *testing.T) { result := unittest.ExecutionResultFixture() blockID := unittest.IdentifierFixture() - err := store1.Store(result) - require.NoError(t, err) - err = unittest.WithLock(t, lockManager, storage.LockInsertOwnReceipt, func(lctx lockctx.Context) error { + err := unittest.WithLock(t, lockManager, storage.LockInsertOwnReceipt, func(lctx lockctx.Context) error { return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { - return store1.BatchIndex(lctx, rw, blockID, result.ID()) + err := store1.BatchStore(result, rw) + require.NoError(t, err) + + err = store1.BatchIndex(lctx, rw, blockID, result.ID()) + require.NoError(t, err) + return nil }) }) require.NoError(t, err) - err = store1.Store(result) - require.NoError(t, err) - err = unittest.WithLock(t, lockManager, storage.LockInsertOwnReceipt, func(lctx lockctx.Context) error { return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { - return store1.BatchIndex(lctx, rw, blockID, result.ID()) + err := store1.BatchStore(result, rw) + require.NoError(t, err) + + err = store1.BatchIndex(lctx, rw, blockID, result.ID()) + require.NoError(t, err) + return nil }) }) require.NoError(t, err) @@ -114,20 +122,29 @@ func TestResultStoreTwoDifferentResultsShouldFail(t *testing.T) { result1 := unittest.ExecutionResultFixture() result2 := unittest.ExecutionResultFixture() blockID := unittest.IdentifierFixture() - err := store1.Store(result1) - require.NoError(t, err) - err = unittest.WithLock(t, lockManager, storage.LockInsertOwnReceipt, func(lctx lockctx.Context) error { + err := unittest.WithLock(t, lockManager, storage.LockInsertOwnReceipt, func(lctx lockctx.Context) error { return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { - return store1.BatchIndex(lctx, rw, blockID, result1.ID()) + err := store1.BatchStore(result1, rw) + require.NoError(t, err) + + err = store1.BatchIndex(lctx, rw, blockID, result1.ID()) + require.NoError(t, err) + return nil }) }) require.NoError(t, err) - // we can store1 a different result, but we can't index + // we can store a different result, but we can't index // a different result for that block, because it will mean // one block has two different results. - err = store1.Store(result2) + err = unittest.WithLock(t, lockManager, storage.LockInsertOwnReceipt, func(lctx lockctx.Context) error { + return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + err := store1.BatchStore(result2, rw) + require.NoError(t, err) + return nil + }) + }) require.NoError(t, err) var indexErr error From e1c6cc7b6a2232c0aa5333e3098eac22de208ac9 Mon Sep 17 00:00:00 2001 From: "Leo Zhang (zhangchiqing)" Date: Fri, 3 Oct 2025 12:09:17 -0700 Subject: [PATCH 20/87] refactor InsertResult --- storage/operation/results.go | 4 ++-- storage/operation/results_test.go | 2 +- storage/store/results.go | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/storage/operation/results.go b/storage/operation/results.go index 8275c102ca9..b9133aca938 100644 --- a/storage/operation/results.go +++ b/storage/operation/results.go @@ -16,8 +16,8 @@ import ( // of data corruption, because for the same key, we expect the same value. // // No errors are expected during normal operation. -func InsertExecutionResult(w storage.Writer, result *flow.ExecutionResult) error { - return UpsertByKey(w, MakePrefix(codeExecutionResult, result.ID()), result) +func InsertExecutionResult(w storage.Writer, resultID flow.Identifier, result *flow.ExecutionResult) error { + return UpsertByKey(w, MakePrefix(codeExecutionResult, resultID), result) } // RetrieveExecutionResult retrieves an Execution Result by its ID. diff --git a/storage/operation/results_test.go b/storage/operation/results_test.go index a2f59425b1c..9b5dd6f7695 100644 --- a/storage/operation/results_test.go +++ b/storage/operation/results_test.go @@ -18,7 +18,7 @@ func TestResults_InsertRetrieve(t *testing.T) { expected := unittest.ExecutionResultFixture() err := db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { - return operation.InsertExecutionResult(rw.Writer(), expected) + return operation.InsertExecutionResult(rw.Writer(), expected.ID(), expected) }) require.Nil(t, err) diff --git a/storage/store/results.go b/storage/store/results.go index e44f89121f3..d5d0c5eda8c 100644 --- a/storage/store/results.go +++ b/storage/store/results.go @@ -23,8 +23,8 @@ var _ storage.ExecutionResults = (*ExecutionResults)(nil) func NewExecutionResults(collector module.CacheMetrics, db storage.DB) *ExecutionResults { - store := func(rw storage.ReaderBatchWriter, _ flow.Identifier, result *flow.ExecutionResult) error { - return operation.InsertExecutionResult(rw.Writer(), result) + store := func(rw storage.ReaderBatchWriter, resultID flow.Identifier, result *flow.ExecutionResult) error { + return operation.InsertExecutionResult(rw.Writer(), resultID, result) } retrieve := func(r storage.Reader, resultID flow.Identifier) (*flow.ExecutionResult, error) { From 9855b1ec1c08eab016634f07907600c5647d9f47 Mon Sep 17 00:00:00 2001 From: "Leo Zhang (zhangchiqing)" Date: Fri, 3 Oct 2025 13:53:46 -0700 Subject: [PATCH 21/87] fix mocks --- storage/mock/execution_results.go | 18 ------------------ 1 file changed, 18 deletions(-) diff --git a/storage/mock/execution_results.go b/storage/mock/execution_results.go index 78af20d99f5..868351f817f 100644 --- a/storage/mock/execution_results.go +++ b/storage/mock/execution_results.go @@ -160,24 +160,6 @@ func (_m *ExecutionResults) IDByBlockID(blockID flow.Identifier) (flow.Identifie return r0, r1 } -// Store provides a mock function with given fields: result -func (_m *ExecutionResults) Store(result *flow.ExecutionResult) error { - ret := _m.Called(result) - - if len(ret) == 0 { - panic("no return value specified for Store") - } - - var r0 error - if rf, ok := ret.Get(0).(func(*flow.ExecutionResult) error); ok { - r0 = rf(result) - } else { - r0 = ret.Error(0) - } - - return r0 -} - // NewExecutionResults creates a new instance of ExecutionResults. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. // The first argument is typically a *testing.T value. func NewExecutionResults(t interface { From 45dc92ad441fef3de53359c63f13e7cd8fb95ef8 Mon Sep 17 00:00:00 2001 From: "Leo Zhang (zhangchiqing)" Date: Fri, 3 Oct 2025 14:26:34 -0700 Subject: [PATCH 22/87] fix badger state --- state/protocol/badger/state.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/state/protocol/badger/state.go b/state/protocol/badger/state.go index 55fa4ca81da..ffd310dbc67 100644 --- a/state/protocol/badger/state.go +++ b/state/protocol/badger/state.go @@ -300,7 +300,7 @@ func bootstrapSealingSegment( err := db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { w := rw.Writer() for _, result := range segment.ExecutionResults { - err := operation.InsertExecutionResult(w, result) + err := operation.InsertExecutionResult(w, result.ID(), result) if err != nil { return fmt.Errorf("could not insert execution result: %w", err) } From ce2156e5e3be83f7a16d31f080995f14548dacc0 Mon Sep 17 00:00:00 2001 From: "Leo Zhang (zhangchiqing)" Date: Fri, 3 Oct 2025 14:27:57 -0700 Subject: [PATCH 23/87] fix tests --- storage/operation/stats_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/storage/operation/stats_test.go b/storage/operation/stats_test.go index 563d118f200..ded3cf8eb55 100644 --- a/storage/operation/stats_test.go +++ b/storage/operation/stats_test.go @@ -46,7 +46,7 @@ func TestSummarizeKeysByFirstByteConcurrent(t *testing.T) { // insert 20 results for i := 0; i < 20; i++ { result := unittest.ExecutionResultFixture() - err := operation.InsertExecutionResult(rw.Writer(), result) + err := operation.InsertExecutionResult(rw.Writer(), result.ID(), result) if err != nil { return err } From 32f653d455c2fcbd64e586284b0bc2ab98884495 Mon Sep 17 00:00:00 2001 From: "Leo Zhang (zhangchiqing)" Date: Fri, 3 Oct 2025 18:55:11 -0700 Subject: [PATCH 24/87] refactor BatchIndexBlockContainingCollectionGuarantees --- engine/access/ingestion/engine.go | 7 ++- engine/access/ingestion/engine_test.go | 6 +-- engine/access/ingestion2/engine_test.go | 6 +-- .../ingestion2/finalized_block_processor.go | 6 ++- storage/blocks.go | 8 +-- storage/locks.go | 3 ++ storage/mock/blocks.go | 18 +++++++ storage/operation/headers.go | 40 +++++++++++++++ storage/operation/results.go | 3 +- storage/operation/stats_test.go | 49 ++++++++++--------- storage/store/blocks.go | 24 ++------- 11 files changed, 116 insertions(+), 54 deletions(-) diff --git a/engine/access/ingestion/engine.go b/engine/access/ingestion/engine.go index 05fb257c066..c165af61369 100644 --- a/engine/access/ingestion/engine.go +++ b/engine/access/ingestion/engine.go @@ -378,9 +378,12 @@ func (e *Engine) processFinalizedBlock(block *flow.Block) error { // TODO: substitute an indexer module as layer between engine and storage - // TODO (leothis): to include in the batch update // index the block storage with each of the collection guarantee - err := e.blocks.IndexBlockContainingCollectionGuarantees(block.ID(), flow.GetIDs(block.Payload.Guarantees)) + err := storage.WithLock(e.lockManager, storage.LockIndexFinalizedBlock, func(lctx lockctx.Context) error { + return e.db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return e.blocks.BatchIndexBlockContainingCollectionGuarantees(lctx, rw, block.ID(), flow.GetIDs(block.Payload.Guarantees)) + }) + }) if err != nil { return fmt.Errorf("could not index block for collections: %w", err) } diff --git a/engine/access/ingestion/engine_test.go b/engine/access/ingestion/engine_test.go index c9679e33b7f..730859fcde8 100644 --- a/engine/access/ingestion/engine_test.go +++ b/engine/access/ingestion/engine_test.go @@ -296,7 +296,7 @@ func (s *Suite) TestOnFinalizedBlockSingle() { } // expect that the block storage is indexed with each of the collection guarantee - s.blocks.On("IndexBlockContainingCollectionGuarantees", block.ID(), []flow.Identifier(flow.GetIDs(block.Payload.Guarantees))).Return(nil).Once() + s.blocks.On("BatchIndexBlockContainingCollectionGuarantees", mock.Anything, mock.Anything, block.ID(), []flow.Identifier(flow.GetIDs(block.Payload.Guarantees))).Return(nil).Once() for _, seal := range block.Payload.Seals { s.results.On("Index", seal.BlockID, seal.ResultID).Return(nil).Once() } @@ -372,7 +372,7 @@ func (s *Suite) TestOnFinalizedBlockSeveralBlocksAhead() { // expected all new blocks after last block processed for _, block := range blocks { - s.blocks.On("IndexBlockContainingCollectionGuarantees", block.ID(), []flow.Identifier(flow.GetIDs(block.Payload.Guarantees))).Return(nil).Once() + s.blocks.On("BatchIndexBlockContainingCollectionGuarantees", mock.Anything, mock.Anything, block.ID(), []flow.Identifier(flow.GetIDs(block.Payload.Guarantees))).Return(nil).Once() for _, cg := range block.Payload.Guarantees { s.request.On("EntityByID", cg.CollectionID, mock.Anything).Return().Run(func(args mock.Arguments) { @@ -400,7 +400,7 @@ func (s *Suite) TestOnFinalizedBlockSeveralBlocksAhead() { } s.headers.AssertExpectations(s.T()) - s.blocks.AssertNumberOfCalls(s.T(), "IndexBlockContainingCollectionGuarantees", newBlocksCount) + s.blocks.AssertNumberOfCalls(s.T(), "BatchIndexBlockContainingCollectionGuarantees", newBlocksCount) s.request.AssertNumberOfCalls(s.T(), "EntityByID", expectedEntityByIDCalls) s.results.AssertNumberOfCalls(s.T(), "Index", expectedIndexCalls) } diff --git a/engine/access/ingestion2/engine_test.go b/engine/access/ingestion2/engine_test.go index fcfb086569d..a56c07a0259 100644 --- a/engine/access/ingestion2/engine_test.go +++ b/engine/access/ingestion2/engine_test.go @@ -316,7 +316,7 @@ func (s *Suite) TestOnFinalizedBlockSingle() { } // expect that the block storage is indexed with each of the collection guarantee - s.blocks.On("IndexBlockContainingCollectionGuarantees", block.ID(), []flow.Identifier(flow.GetIDs(block.Payload.Guarantees))).Return(nil).Once() + s.blocks.On("BatchIndexBlockContainingCollectionGuarantees", mock.Anything, mock.Anything, block.ID(), []flow.Identifier(flow.GetIDs(block.Payload.Guarantees))).Return(nil).Once() for _, seal := range block.Payload.Seals { s.results.On("Index", seal.BlockID, seal.ResultID).Return(nil).Once() } @@ -389,7 +389,7 @@ func (s *Suite) TestOnFinalizedBlockSeveralBlocksAhead() { // expected all new blocks after last block processed for _, block := range blocks { - s.blocks.On("IndexBlockContainingCollectionGuarantees", block.ID(), []flow.Identifier(flow.GetIDs(block.Payload.Guarantees))).Return(nil).Once() + s.blocks.On("BatchIndexBlockContainingCollectionGuarantees", mock.Anything, mock.Anything, block.ID(), []flow.Identifier(flow.GetIDs(block.Payload.Guarantees))).Return(nil).Once() for _, cg := range block.Payload.Guarantees { s.request.On("EntityByID", cg.CollectionID, mock.Anything).Return().Run(func(args mock.Arguments) { @@ -414,7 +414,7 @@ func (s *Suite) TestOnFinalizedBlockSeveralBlocksAhead() { } s.headers.AssertExpectations(s.T()) - s.blocks.AssertNumberOfCalls(s.T(), "IndexBlockContainingCollectionGuarantees", newBlocksCount) + s.blocks.AssertNumberOfCalls(s.T(), "BatchIndexBlockContainingCollectionGuarantees", newBlocksCount) s.request.AssertNumberOfCalls(s.T(), "EntityByID", expectedEntityByIDCalls) s.results.AssertNumberOfCalls(s.T(), "Index", expectedIndexCalls) } diff --git a/engine/access/ingestion2/finalized_block_processor.go b/engine/access/ingestion2/finalized_block_processor.go index bd468def6be..d0e88089b7c 100644 --- a/engine/access/ingestion2/finalized_block_processor.go +++ b/engine/access/ingestion2/finalized_block_processor.go @@ -153,7 +153,11 @@ func (p *FinalizedBlockProcessor) processFinalizedBlockJobCallback( // // No errors are expected during normal operations. func (p *FinalizedBlockProcessor) indexFinalizedBlock(block *flow.Block) error { - err := p.blocks.IndexBlockContainingCollectionGuarantees(block.ID(), flow.GetIDs(block.Payload.Guarantees)) + err := storage.WithLock(p.lockManager, storage.LockIndexFinalizedBlock, func(lctx lockctx.Context) error { + return p.db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return p.blocks.BatchIndexBlockContainingCollectionGuarantees(lctx, rw, block.ID(), flow.GetIDs(block.Payload.Guarantees)) + }) + }) if err != nil { return fmt.Errorf("could not index block for collections: %w", err) } diff --git a/storage/blocks.go b/storage/blocks.go index ad66f7f8b3b..bebf1c8aecb 100644 --- a/storage/blocks.go +++ b/storage/blocks.go @@ -87,8 +87,9 @@ type Blocks interface { // to decode an existing database value ByCollectionID(collID flow.Identifier) (*flow.Block, error) - // IndexBlockContainingCollectionGuarantees populates an index `guaranteeID->blockID` for each guarantee - // which appears in the block. + // BatchIndexBlockContainingCollectionGuarantees produces mappings from the IDs of [flow.CollectionGuarantee]s to the block ID containing these guarantees. + // The caller must acquire a storage.LockIndexFinalizedBlock lock. + // // CAUTION: a collection can be included in multiple *unfinalized* blocks. However, the implementation // assumes a one-to-one map from collection ID to a *single* block ID. This holds for FINALIZED BLOCKS ONLY // *and* only in the absence of byzantine collector clusters (which the mature protocol must tolerate). @@ -96,6 +97,7 @@ type Blocks interface { // (one-to-many mapping) for soft finality and the mature protocol. // // Error returns: + // - storage.ErrAlreadyExists if any collection guarantee is already indexed // - generic error in case of unexpected failure from the database layer or encoding failure. - IndexBlockContainingCollectionGuarantees(blockID flow.Identifier, collIDs []flow.Identifier) error + BatchIndexBlockContainingCollectionGuarantees(lctx lockctx.Proof, rw ReaderBatchWriter, blockID flow.Identifier, collIDs []flow.Identifier) error } diff --git a/storage/locks.go b/storage/locks.go index 0b699443296..02705061208 100644 --- a/storage/locks.go +++ b/storage/locks.go @@ -34,6 +34,8 @@ const ( LockBootstrapping = "lock_bootstrapping" // LockInsertChunkDataPack protects the insertion of chunk data packs (not yet used anywhere LockInsertChunkDataPack = "lock_insert_chunk_data_pack" + // LockIndexFinalizedBlock protects AN indexing finalized blocks by block ID. + LockIndexFinalizedBlock = "lock_index_finalized_block" ) // Locks returns a list of all named locks used by the storage layer. @@ -49,6 +51,7 @@ func Locks() []string { LockInsertLightTransactionResult, LockBootstrapping, LockInsertChunkDataPack, + LockIndexFinalizedBlock, } } diff --git a/storage/mock/blocks.go b/storage/mock/blocks.go index 15e799ccff2..5c44ab68739 100644 --- a/storage/mock/blocks.go +++ b/storage/mock/blocks.go @@ -154,6 +154,24 @@ func (_m *Blocks) ByView(view uint64) (*flow.Block, error) { return r0, r1 } +// BatchIndexBlockContainingCollectionGuarantees provides a mock function with given fields: lctx, rw, blockID, collIDs +func (_m *Blocks) BatchIndexBlockContainingCollectionGuarantees(lctx lockctx.Proof, rw storage.ReaderBatchWriter, blockID flow.Identifier, collIDs []flow.Identifier) error { + ret := _m.Called(lctx, rw, blockID, collIDs) + + if len(ret) == 0 { + panic("no return value specified for BatchIndexBlockContainingCollectionGuarantees") + } + + var r0 error + if rf, ok := ret.Get(0).(func(lockctx.Proof, storage.ReaderBatchWriter, flow.Identifier, []flow.Identifier) error); ok { + r0 = rf(lctx, rw, blockID, collIDs) + } else { + r0 = ret.Error(0) + } + + return r0 +} + // IndexBlockContainingCollectionGuarantees provides a mock function with given fields: blockID, collIDs func (_m *Blocks) IndexBlockContainingCollectionGuarantees(blockID flow.Identifier, collIDs []flow.Identifier) error { ret := _m.Called(blockID, collIDs) diff --git a/storage/operation/headers.go b/storage/operation/headers.go index 5036c08aa8f..1335ba9a203 100644 --- a/storage/operation/headers.go +++ b/storage/operation/headers.go @@ -141,6 +141,46 @@ func IndexBlockContainingCollectionGuarantee(w storage.Writer, collID flow.Ident return UpsertByKey(w, MakePrefix(codeCollectionBlock, collID), blockID) } +// BatchIndexBlockContainingCollectionGuarantees produces mappings from the IDs of [flow.CollectionGuarantee]s to the block ID containing these guarantees. +// The caller must acquire a storage.LockIndexFinalizedBlock lock. +// +// CAUTION: a collection can be included in multiple *unfinalized* blocks. However, the implementation +// assumes a one-to-one map from collection ID to a *single* block ID. This holds for FINALIZED BLOCKS ONLY +// *and* only in the absence of byzantine collector clusters (which the mature protocol must tolerate). +// Hence, this function should be treated as a temporary solution, which requires generalization +// (one-to-many mapping) for soft finality and the mature protocol. +// +// Expected errors during normal operations: +// - [storage.ErrAlreadyExists] if any collection guarantee is already indexed +func BatchIndexBlockContainingCollectionGuarantees(lctx lockctx.Proof, rw storage.ReaderBatchWriter, blockID flow.Identifier, collIDs []flow.Identifier) error { + if !lctx.HoldsLock(storage.LockIndexFinalizedBlock) { + return fmt.Errorf("BatchIndexBlockContainingCollectionGuarantees requires %v", storage.LockIndexFinalizedBlock) + } + + // Check if any keys already exist + for _, collID := range collIDs { + key := MakePrefix(codeCollectionBlock, collID) + exists, err := KeyExists(rw.GlobalReader(), key) + if err != nil { + return fmt.Errorf("could not check if collection guarantee is already indexed: %w", err) + } + if exists { + return fmt.Errorf("collection guarantee (%x) is already indexed: %w", collID, storage.ErrAlreadyExists) + } + } + + // Index all collection guarantees + for _, collID := range collIDs { + key := MakePrefix(codeCollectionBlock, collID) + err := UpsertByKey(rw.Writer(), key, blockID) + if err != nil { + return fmt.Errorf("could not index collection guarantee (%x): %w", collID, err) + } + } + + return nil +} + // LookupBlockContainingCollectionGuarantee retrieves the block containing the [flow.CollectionGuarantee] with the given ID. // // CAUTION: A collection can be included in multiple *unfinalized* blocks. However, the implementation diff --git a/storage/operation/results.go b/storage/operation/results.go index b9133aca938..785c68d0bd0 100644 --- a/storage/operation/results.go +++ b/storage/operation/results.go @@ -37,7 +37,8 @@ func IndexOwnExecutionResult(lctx lockctx.Proof, rw storage.ReaderBatchWriter, b // during bootstrapping, we index the sealed root block or the spork root block, which is not // produced by the node itself, but we still need to index its execution result to be able to // execute next block - lctx.HoldsLock(storage.LockBootstrapping) + lctx.HoldsLock(storage.LockBootstrapping) || + lctx.HoldsLock(storage.LockIndexFinalizedBlock) if !held { return fmt.Errorf("missing require locks: %s or %s", storage.LockInsertOwnReceipt, storage.LockBootstrapping) } diff --git a/storage/operation/stats_test.go b/storage/operation/stats_test.go index ded3cf8eb55..c5bdee6472f 100644 --- a/storage/operation/stats_test.go +++ b/storage/operation/stats_test.go @@ -29,31 +29,36 @@ func TestSummarizeKeysByFirstByteConcurrent(t *testing.T) { } // insert 100 chunk data packs - for i := 0; i < 100; i++ { - collectionID := unittest.IdentifierFixture() - cdp := &storage.StoredChunkDataPack{ - ChunkID: unittest.IdentifierFixture(), - StartState: unittest.StateCommitmentFixture(), - Proof: []byte{'p'}, - CollectionID: collectionID, + return unittest.WithLock(t, lockManager, storage.LockInsertChunkDataPack, func(lctx2 lockctx.Context) error { + for i := 0; i < 100; i++ { + collectionID := unittest.IdentifierFixture() + cdp := &storage.StoredChunkDataPack{ + ChunkID: unittest.IdentifierFixture(), + StartState: unittest.StateCommitmentFixture(), + Proof: []byte{'p'}, + CollectionID: collectionID, + } + err := operation.InsertChunkDataPack(lctx2, rw, cdp) + if err != nil { + return err + } } - err := operation.InsertChunkDataPack(lctx, rw, cdp) - if err != nil { - return err - } - } + return nil + }) + }) + }) + require.NoError(t, err) - // insert 20 results - for i := 0; i < 20; i++ { - result := unittest.ExecutionResultFixture() - err := operation.InsertExecutionResult(rw.Writer(), result.ID(), result) - if err != nil { - return err - } + // insert 20 results + err = db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + for i := 0; i < 20; i++ { + result := unittest.ExecutionResultFixture() + err := operation.InsertExecutionResult(rw.Writer(), result.ID(), result) + if err != nil { + return err } - - return nil - }) + } + return nil }) require.NoError(t, err) diff --git a/storage/store/blocks.go b/storage/store/blocks.go index c34a0cc53ea..8098ac62a5a 100644 --- a/storage/store/blocks.go +++ b/storage/store/blocks.go @@ -215,24 +215,10 @@ func (b *Blocks) ByCollectionID(collID flow.Identifier) (*flow.Block, error) { return b.ByID(blockID) } -// IndexBlockContainingCollectionGuarantees populates an index `guaranteeID->blockID` for each guarantee -// which appears in the block. -// CAUTION: a collection can be included in multiple *unfinalized* blocks. However, the implementation -// assumes a one-to-one map from collection ID to a *single* block ID. This holds for FINALIZED BLOCKS ONLY -// *and* only in the absence of byzantine collector clusters (which the mature protocol must tolerate). -// Hence, this function should be treated as a temporary solution, which requires generalization -// (one-to-many mapping) for soft finality and the mature protocol. -// +// BatchIndexBlockContainingCollectionGuarantees produces mappings from the IDs of [flow.CollectionGuarantee]s to the block ID containing these guarantees. +// The caller must acquire a storage.LockIndexFinalizedBlock lock. // Error returns: -// - generic error in case of unexpected failure from the database layer or encoding failure. -func (b *Blocks) IndexBlockContainingCollectionGuarantees(blockID flow.Identifier, guaranteeIDs []flow.Identifier) error { - return b.db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { - for _, guaranteeID := range guaranteeIDs { - err := operation.IndexBlockContainingCollectionGuarantee(rw.Writer(), guaranteeID, blockID) - if err != nil { - return fmt.Errorf("could not index collection block (%x): %w", guaranteeID, err) - } - } - return nil - }) +// - storage.ErrAlreadyExists if any collection ID has already been indexed +func (b *Blocks) BatchIndexBlockContainingCollectionGuarantees(lctx lockctx.Proof, rw storage.ReaderBatchWriter, blockID flow.Identifier, collIDs []flow.Identifier) error { + return operation.BatchIndexBlockContainingCollectionGuarantees(lctx, rw, blockID, collIDs) } From f47041df77f6e0b86aa8b164065bf2da4f313a29 Mon Sep 17 00:00:00 2001 From: "Leo Zhang (zhangchiqing)" Date: Fri, 3 Oct 2025 19:00:55 -0700 Subject: [PATCH 25/87] use different lock id --- engine/access/access_test.go | 3 +-- engine/access/ingestion/engine.go | 15 +++++---------- storage/operation/results.go | 2 +- 3 files changed, 7 insertions(+), 13 deletions(-) diff --git a/engine/access/access_test.go b/engine/access/access_test.go index bc945feee72..ceaa5c976af 100644 --- a/engine/access/access_test.go +++ b/engine/access/access_test.go @@ -554,8 +554,7 @@ func (suite *Suite) TestGetExecutionResultByBlockID() { unittest.WithExecutionResultBlockID(blockID), unittest.WithServiceEvents(3)) - // TODO (leothis): use a different lock - require.NoError(suite.T(), storage.WithLock(lockManager, storage.LockInsertOwnReceipt, func(lctx lockctx.Context) error { + require.NoError(suite.T(), storage.WithLock(lockManager, storage.LockIndexFinalizedBlock, func(lctx lockctx.Context) error { return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { err := all.Results.BatchStore(er, rw) if err != nil { diff --git a/engine/access/ingestion/engine.go b/engine/access/ingestion/engine.go index c165af61369..50a46f0ddfc 100644 --- a/engine/access/ingestion/engine.go +++ b/engine/access/ingestion/engine.go @@ -381,16 +381,11 @@ func (e *Engine) processFinalizedBlock(block *flow.Block) error { // index the block storage with each of the collection guarantee err := storage.WithLock(e.lockManager, storage.LockIndexFinalizedBlock, func(lctx lockctx.Context) error { return e.db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { - return e.blocks.BatchIndexBlockContainingCollectionGuarantees(lctx, rw, block.ID(), flow.GetIDs(block.Payload.Guarantees)) - }) - }) - if err != nil { - return fmt.Errorf("could not index block for collections: %w", err) - } + err := e.blocks.BatchIndexBlockContainingCollectionGuarantees(lctx, rw, block.ID(), flow.GetIDs(block.Payload.Guarantees)) + if err != nil { + return fmt.Errorf("could not index block for collections: %w", err) + } - // TODO (leothis): to use a different lock ID - err = storage.WithLock(e.lockManager, storage.LockInsertOwnReceipt, func(lctx lockctx.Context) error { - return e.db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { // loop through seals and index ID -> result ID for _, seal := range block.Payload.Seals { err := e.executionResults.BatchIndex(lctx, rw, seal.BlockID, seal.ResultID) @@ -402,7 +397,7 @@ func (e *Engine) processFinalizedBlock(block *flow.Block) error { }) }) if err != nil { - return fmt.Errorf("could not index execution results: %w", err) + return fmt.Errorf("could not index block for collections: %w", err) } e.collectionSyncer.RequestCollectionsForBlock(block.Height, block.Payload.Guarantees) diff --git a/storage/operation/results.go b/storage/operation/results.go index 785c68d0bd0..a57c69f699a 100644 --- a/storage/operation/results.go +++ b/storage/operation/results.go @@ -29,7 +29,7 @@ func RetrieveExecutionResult(r storage.Reader, resultID flow.Identifier, result // IndexOwnExecutionResult indexes the result of the given block. // It is used by EN to index the result of a block to continue executing subsequent blocks. -// The caller must acquire either [storage.LockInsertOwnReceipt] or [storage.LockBootstrapping] +// The caller must acquire either [storage.LockInsertOwnReceipt] or [storage.LockBootstrapping] or [storage.LockIndexFinalizedBlock] // // No errors are expected during normal operation. func IndexOwnExecutionResult(lctx lockctx.Proof, rw storage.ReaderBatchWriter, blockID flow.Identifier, resultID flow.Identifier) error { From bb10b84b12f99be38fe668169cc08964eefd3fe9 Mon Sep 17 00:00:00 2001 From: "Leo Zhang (zhangchiqing)" Date: Fri, 3 Oct 2025 19:57:17 -0700 Subject: [PATCH 26/87] update mocks --- storage/mock/blocks.go | 54 ++++++++++++++---------------------------- 1 file changed, 18 insertions(+), 36 deletions(-) diff --git a/storage/mock/blocks.go b/storage/mock/blocks.go index 5c44ab68739..b6807aaa983 100644 --- a/storage/mock/blocks.go +++ b/storage/mock/blocks.go @@ -16,6 +16,24 @@ type Blocks struct { mock.Mock } +// BatchIndexBlockContainingCollectionGuarantees provides a mock function with given fields: lctx, rw, blockID, collIDs +func (_m *Blocks) BatchIndexBlockContainingCollectionGuarantees(lctx lockctx.Proof, rw storage.ReaderBatchWriter, blockID flow.Identifier, collIDs []flow.Identifier) error { + ret := _m.Called(lctx, rw, blockID, collIDs) + + if len(ret) == 0 { + panic("no return value specified for BatchIndexBlockContainingCollectionGuarantees") + } + + var r0 error + if rf, ok := ret.Get(0).(func(lockctx.Proof, storage.ReaderBatchWriter, flow.Identifier, []flow.Identifier) error); ok { + r0 = rf(lctx, rw, blockID, collIDs) + } else { + r0 = ret.Error(0) + } + + return r0 +} + // BatchStore provides a mock function with given fields: lctx, rw, proposal func (_m *Blocks) BatchStore(lctx lockctx.Proof, rw storage.ReaderBatchWriter, proposal *flow.Proposal) error { ret := _m.Called(lctx, rw, proposal) @@ -154,42 +172,6 @@ func (_m *Blocks) ByView(view uint64) (*flow.Block, error) { return r0, r1 } -// BatchIndexBlockContainingCollectionGuarantees provides a mock function with given fields: lctx, rw, blockID, collIDs -func (_m *Blocks) BatchIndexBlockContainingCollectionGuarantees(lctx lockctx.Proof, rw storage.ReaderBatchWriter, blockID flow.Identifier, collIDs []flow.Identifier) error { - ret := _m.Called(lctx, rw, blockID, collIDs) - - if len(ret) == 0 { - panic("no return value specified for BatchIndexBlockContainingCollectionGuarantees") - } - - var r0 error - if rf, ok := ret.Get(0).(func(lockctx.Proof, storage.ReaderBatchWriter, flow.Identifier, []flow.Identifier) error); ok { - r0 = rf(lctx, rw, blockID, collIDs) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// IndexBlockContainingCollectionGuarantees provides a mock function with given fields: blockID, collIDs -func (_m *Blocks) IndexBlockContainingCollectionGuarantees(blockID flow.Identifier, collIDs []flow.Identifier) error { - ret := _m.Called(blockID, collIDs) - - if len(ret) == 0 { - panic("no return value specified for IndexBlockContainingCollectionGuarantees") - } - - var r0 error - if rf, ok := ret.Get(0).(func(flow.Identifier, []flow.Identifier) error); ok { - r0 = rf(blockID, collIDs) - } else { - r0 = ret.Error(0) - } - - return r0 -} - // ProposalByHeight provides a mock function with given fields: height func (_m *Blocks) ProposalByHeight(height uint64) (*flow.Proposal, error) { ret := _m.Called(height) From 08262708a2c4680487eb41f9f484e42f9da79b41 Mon Sep 17 00:00:00 2001 From: "Leo Zhang (zhangchiqing)" Date: Mon, 6 Oct 2025 06:56:44 -0700 Subject: [PATCH 27/87] fix execution tests --- engine/execution/state/state.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/engine/execution/state/state.go b/engine/execution/state/state.go index b5363fa569a..bdd325894f0 100644 --- a/engine/execution/state/state.go +++ b/engine/execution/state/state.go @@ -410,8 +410,8 @@ func (s *state) saveExecutionResults( return fmt.Errorf("can not retrieve chunk data packs: %w", err) } - // Acquire both locks to ensure it's concurrent safe when inserting the execution results and chunk data packs. - return storage.WithLocks(s.lockManager, []string{storage.LockInsertOwnReceipt, storage.LockInsertEvent}, func(lctx lockctx.Context) error { + // Acquire locks to ensure it's concurrent safe when inserting the execution results and chunk data packs. + return storage.WithLocks(s.lockManager, []string{storage.LockInsertOwnReceipt, storage.LockInsertEvent, storage.LockInsertChunkDataPack}, func(lctx lockctx.Context) error { err := s.chunkDataPacks.StoreByChunkID(lctx, chunks) if err != nil { return fmt.Errorf("can not store multiple chunk data pack: %w", err) From 42a46205a2a5ad7948c4a6847bf962b756c9b109 Mon Sep 17 00:00:00 2001 From: "Leo Zhang (zhangchiqing)" Date: Mon, 6 Oct 2025 07:18:18 -0700 Subject: [PATCH 28/87] fix optimisic sync persister tests --- .../optimistic_sync/persisters/block.go | 10 ++++++++++ .../optimistic_sync/pipeline_functional_test.go | 12 +++++++++++- .../indexer/in_memory_indexer.go | 4 ++-- storage/locks.go | 3 +++ 4 files changed, 26 insertions(+), 3 deletions(-) diff --git a/module/executiondatasync/optimistic_sync/persisters/block.go b/module/executiondatasync/optimistic_sync/persisters/block.go index cfb617e858b..0d6862d0ef0 100644 --- a/module/executiondatasync/optimistic_sync/persisters/block.go +++ b/module/executiondatasync/optimistic_sync/persisters/block.go @@ -67,6 +67,16 @@ func (p *BlockPersister) Persist() error { if err != nil { return fmt.Errorf("could not acquire lock for inserting light collections: %w", err) } + + err = lctx.AcquireLock(storage.LockInsertEvent) + if err != nil { + return fmt.Errorf("could not acquire lock for inserting events: %w", err) + } + + err = lctx.AcquireLock(storage.LockInsertLightTransactionResult) + if err != nil { + return fmt.Errorf("could not acquire lock for inserting light transaction results: %w", err) + } defer lctx.Release() err = p.protocolDB.WithReaderBatchWriter(func(batch storage.ReaderBatchWriter) error { diff --git a/module/executiondatasync/optimistic_sync/pipeline_functional_test.go b/module/executiondatasync/optimistic_sync/pipeline_functional_test.go index cdc2d326f0e..3b9777ca9b5 100644 --- a/module/executiondatasync/optimistic_sync/pipeline_functional_test.go +++ b/module/executiondatasync/optimistic_sync/pipeline_functional_test.go @@ -185,6 +185,16 @@ func (p *PipelineFunctionalSuite) TestPipelineCompletesSuccessfully() { p.txResultErrMsgsRequester.On("Request", mock.Anything).Return(p.expectedTxResultErrMsgs, nil).Once() p.WithRunningPipeline(func(pipeline Pipeline, updateChan chan State, errChan chan error, cancel context.CancelFunc) { + // Check for errors in a separate goroutine + go func() { + select { + case err := <-errChan: + if err != nil { + p.T().Errorf("Pipeline error: %v", err) + } + } + }() + pipeline.OnParentStateUpdated(StateComplete) waitForStateUpdates(p.T(), updateChan, StateProcessing, StateWaitingPersist) @@ -277,7 +287,7 @@ func (p *PipelineFunctionalSuite) TestPipelinePersistingError() { // Mock events storage to simulate an error on a persisting step. In normal flow and with real storages, // it is hard to make a meaningful error explicitly. mockEvents := storagemock.NewEvents(p.T()) - mockEvents.On("BatchStore", mock.Anything, mock.Anything, mock.Anything).Return(expectedError).Once() + mockEvents.On("BatchStore", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(expectedError).Once() p.persistentEvents = mockEvents p.execDataRequester.On("RequestExecutionData", mock.Anything).Return(p.expectedExecutionData, nil).Once() diff --git a/module/state_synchronization/indexer/in_memory_indexer.go b/module/state_synchronization/indexer/in_memory_indexer.go index 6bfcb5b8631..dfecdfd4e16 100644 --- a/module/state_synchronization/indexer/in_memory_indexer.go +++ b/module/state_synchronization/indexer/in_memory_indexer.go @@ -92,17 +92,17 @@ func (i *InMemoryIndexer) IndexBlockData(data *execution_data.BlockExecutionData indexedCollections := 0 lctx := i.lockManager.NewContext() + defer lctx.Release() + err := lctx.AcquireLock(storage.LockInsertCollection) if err != nil { return fmt.Errorf("could not acquire lock for collection insert: %w", err) } - defer lctx.Release() err = lctx.AcquireLock(storage.LockInsertEvent) if err != nil { return fmt.Errorf("could not acquire lock for event insert: %w", err) } - defer lctx.Release() // Process all chunk data in a single pass for idx, chunk := range data.ChunkExecutionDatas { diff --git a/storage/locks.go b/storage/locks.go index 02705061208..4d6983283b2 100644 --- a/storage/locks.go +++ b/storage/locks.go @@ -76,6 +76,9 @@ func makeLockPolicy() lockctx.Policy { Add(LockInsertBlock, LockFinalizeBlock). Add(LockFinalizeBlock, LockBootstrapping). Add(LockInsertOwnReceipt, LockInsertEvent). + Add(LockInsertCollection, LockInsertEvent). + Add(LockInsertCollection, LockInsertLightTransactionResult). + Add(LockInsertEvent, LockInsertLightTransactionResult). Build() } From 25c93b82afbc0872829a5c4e97056c4106e837db Mon Sep 17 00:00:00 2001 From: "Leo Zhang (zhangchiqing)" Date: Mon, 6 Oct 2025 07:22:08 -0700 Subject: [PATCH 29/87] fix execution engine tests --- storage/locks.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/storage/locks.go b/storage/locks.go index 4d6983283b2..88ba0928e42 100644 --- a/storage/locks.go +++ b/storage/locks.go @@ -76,6 +76,8 @@ func makeLockPolicy() lockctx.Policy { Add(LockInsertBlock, LockFinalizeBlock). Add(LockFinalizeBlock, LockBootstrapping). Add(LockInsertOwnReceipt, LockInsertEvent). + Add(LockInsertOwnReceipt, LockInsertChunkDataPack). + Add(LockInsertEvent, LockInsertChunkDataPack). Add(LockInsertCollection, LockInsertEvent). Add(LockInsertCollection, LockInsertLightTransactionResult). Add(LockInsertEvent, LockInsertLightTransactionResult). From 7ceae7dd32a6bb90925d4539c3d919ce4ba37451 Mon Sep 17 00:00:00 2001 From: "Leo Zhang (zhangchiqing)" Date: Mon, 6 Oct 2025 07:25:45 -0700 Subject: [PATCH 30/87] fix access ingestion engine tests --- engine/access/ingestion/engine_test.go | 5 +++-- engine/access/ingestion2/engine_test.go | 5 +++-- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/engine/access/ingestion/engine_test.go b/engine/access/ingestion/engine_test.go index 730859fcde8..e3f6e18f944 100644 --- a/engine/access/ingestion/engine_test.go +++ b/engine/access/ingestion/engine_test.go @@ -131,6 +131,7 @@ func (s *Suite) SetupTest() { s.receipts = new(storagemock.ExecutionReceipts) s.transactions = new(storagemock.Transactions) s.results = new(storagemock.ExecutionResults) + s.results.On("BatchIndex", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil).Maybe() collectionsToMarkFinalized := stdmap.NewTimes(100) collectionsToMarkExecuted := stdmap.NewTimes(100) blocksToMarkExecuted := stdmap.NewTimes(100) @@ -323,7 +324,7 @@ func (s *Suite) TestOnFinalizedBlockSingle() { // assert that the block was retrieved and all collections were requested s.headers.AssertExpectations(s.T()) s.request.AssertNumberOfCalls(s.T(), "EntityByID", len(block.Payload.Guarantees)) - s.results.AssertNumberOfCalls(s.T(), "Index", len(block.Payload.Seals)) + s.results.AssertNumberOfCalls(s.T(), "BatchIndex", len(block.Payload.Seals)) } // TestOnFinalizedBlockSeveralBlocksAhead checks OnFinalizedBlock with a block several blocks newer than the last block processed @@ -402,7 +403,7 @@ func (s *Suite) TestOnFinalizedBlockSeveralBlocksAhead() { s.headers.AssertExpectations(s.T()) s.blocks.AssertNumberOfCalls(s.T(), "BatchIndexBlockContainingCollectionGuarantees", newBlocksCount) s.request.AssertNumberOfCalls(s.T(), "EntityByID", expectedEntityByIDCalls) - s.results.AssertNumberOfCalls(s.T(), "Index", expectedIndexCalls) + s.results.AssertNumberOfCalls(s.T(), "BatchIndex", expectedIndexCalls) } // TestOnCollection checks that when a Collection is received, it is persisted diff --git a/engine/access/ingestion2/engine_test.go b/engine/access/ingestion2/engine_test.go index a56c07a0259..20ac8196343 100644 --- a/engine/access/ingestion2/engine_test.go +++ b/engine/access/ingestion2/engine_test.go @@ -131,6 +131,7 @@ func (s *Suite) SetupTest() { s.receipts = new(storagemock.ExecutionReceipts) s.transactions = new(storagemock.Transactions) s.results = new(storagemock.ExecutionResults) + s.results.On("BatchIndex", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil).Maybe() collectionsToMarkFinalized := stdmap.NewTimes(100) collectionsToMarkExecuted := stdmap.NewTimes(100) blocksToMarkExecuted := stdmap.NewTimes(100) @@ -340,7 +341,7 @@ func (s *Suite) TestOnFinalizedBlockSingle() { // assert that the block was retrieved and all collections were requested s.headers.AssertExpectations(s.T()) s.request.AssertNumberOfCalls(s.T(), "EntityByID", len(block.Payload.Guarantees)) - s.results.AssertNumberOfCalls(s.T(), "Index", len(block.Payload.Seals)) + s.results.AssertNumberOfCalls(s.T(), "BatchIndex", len(block.Payload.Seals)) } // TestOnFinalizedBlockSeveralBlocksAhead checks OnFinalizedBlock with a block several blocks newer than the last block processed @@ -416,7 +417,7 @@ func (s *Suite) TestOnFinalizedBlockSeveralBlocksAhead() { s.headers.AssertExpectations(s.T()) s.blocks.AssertNumberOfCalls(s.T(), "BatchIndexBlockContainingCollectionGuarantees", newBlocksCount) s.request.AssertNumberOfCalls(s.T(), "EntityByID", expectedEntityByIDCalls) - s.results.AssertNumberOfCalls(s.T(), "Index", expectedIndexCalls) + s.results.AssertNumberOfCalls(s.T(), "BatchIndex", expectedIndexCalls) } // TestOnCollection checks that when a Collection is received, it is persisted From 467cc95aea27bd806295c71879e6296bca6edb09 Mon Sep 17 00:00:00 2001 From: "Leo Zhang (zhangchiqing)" Date: Mon, 6 Oct 2025 08:03:07 -0700 Subject: [PATCH 31/87] fix lint and test cases --- engine/execution/state/state_storehouse_test.go | 10 +++++----- .../optimistic_sync/pipeline_functional_test.go | 8 +++----- 2 files changed, 8 insertions(+), 10 deletions(-) diff --git a/engine/execution/state/state_storehouse_test.go b/engine/execution/state/state_storehouse_test.go index 59b618e4031..3078cd3a329 100644 --- a/engine/execution/state/state_storehouse_test.go +++ b/engine/execution/state/state_storehouse_test.go @@ -55,17 +55,17 @@ func prepareStorehouseTest(f func(t *testing.T, es state.ExecutionState, l *ledg headers := storagemock.NewHeaders(t) blocks := storagemock.NewBlocks(t) events := storagemock.NewEvents(t) - events.On("BatchStore", mock.Anything, mock.Anything, mock.Anything).Return(nil) + events.On("BatchStore", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil) serviceEvents := storagemock.NewServiceEvents(t) - serviceEvents.On("BatchStore", mock.Anything, mock.Anything, mock.Anything).Return(nil) + serviceEvents.On("BatchStore", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil) txResults := storagemock.NewTransactionResults(t) - txResults.On("BatchStore", mock.Anything, mock.Anything, mock.Anything).Return(nil) + txResults.On("BatchStore", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil) chunkDataPacks := storagemock.NewChunkDataPacks(t) chunkDataPacks.On("StoreByChunkID", mock.Anything, mock.Anything).Return(nil) results := storagemock.NewExecutionResults(t) - results.On("BatchIndex", mock.Anything, mock.Anything, mock.Anything).Return(nil) + results.On("BatchIndex", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil) myReceipts := storagemock.NewMyExecutionReceipts(t) - myReceipts.On("BatchStoreMyReceipt", mock.Anything, mock.Anything, mock.Anything).Return(nil) + myReceipts.On("BatchStoreMyReceipt", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil) withRegisterStore(t, func(t *testing.T, rs *storehouse.RegisterStore, diff --git a/module/executiondatasync/optimistic_sync/pipeline_functional_test.go b/module/executiondatasync/optimistic_sync/pipeline_functional_test.go index 3b9777ca9b5..c20e99406af 100644 --- a/module/executiondatasync/optimistic_sync/pipeline_functional_test.go +++ b/module/executiondatasync/optimistic_sync/pipeline_functional_test.go @@ -187,11 +187,9 @@ func (p *PipelineFunctionalSuite) TestPipelineCompletesSuccessfully() { p.WithRunningPipeline(func(pipeline Pipeline, updateChan chan State, errChan chan error, cancel context.CancelFunc) { // Check for errors in a separate goroutine go func() { - select { - case err := <-errChan: - if err != nil { - p.T().Errorf("Pipeline error: %v", err) - } + err := <-errChan + if err != nil { + p.T().Errorf("Pipeline error: %v", err) } }() From 1b1d0d99314894306e68429daa7653c3eeb7bf65 Mon Sep 17 00:00:00 2001 From: "Leo Zhang (zhangchiqing)" Date: Mon, 6 Oct 2025 08:03:54 -0700 Subject: [PATCH 32/87] fix mocks --- .../optimistic_sync/persisters/block_test.go | 30 +++++++++---------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/module/executiondatasync/optimistic_sync/persisters/block_test.go b/module/executiondatasync/optimistic_sync/persisters/block_test.go index b4fc80cd1dc..d01bb04a0ce 100644 --- a/module/executiondatasync/optimistic_sync/persisters/block_test.go +++ b/module/executiondatasync/optimistic_sync/persisters/block_test.go @@ -164,14 +164,14 @@ func (p *PersisterSuite) TestPersister_PersistWithData() { storedResults := make([]flow.LightTransactionResult, 0) storedTxResultErrMsgs := make([]flow.TransactionResultErrorMessage, 0) - p.events.On("BatchStore", p.executionResult.BlockID, mock.Anything, mock.Anything).Run(func(args mock.Arguments) { - se, ok := args.Get(1).([]flow.EventsList) + p.events.On("BatchStore", mock.Anything, p.executionResult.BlockID, mock.Anything, mock.Anything).Run(func(args mock.Arguments) { + se, ok := args.Get(2).([]flow.EventsList) p.Require().True(ok) storedEvents = se }).Return(nil) - p.results.On("BatchStore", p.executionResult.BlockID, mock.Anything, mock.Anything).Run(func(args mock.Arguments) { - sr, ok := args.Get(1).([]flow.LightTransactionResult) + p.results.On("BatchStore", mock.Anything, p.executionResult.BlockID, mock.Anything, mock.Anything).Run(func(args mock.Arguments) { + sr, ok := args.Get(2).([]flow.LightTransactionResult) p.Require().True(ok) storedResults = sr }).Return(nil) @@ -223,23 +223,23 @@ func (p *PersisterSuite) TestPersister_PersistErrorHandling() { { name: "EventsBatchStoreError", setupMocks: func() { - p.events.On("BatchStore", p.executionResult.BlockID, mock.Anything, mock.Anything).Return(assert.AnError).Once() + p.events.On("BatchStore", mock.Anything, p.executionResult.BlockID, mock.Anything, mock.Anything).Return(assert.AnError).Once() }, expectedError: "could not add events to batch", }, { name: "ResultsBatchStoreError", setupMocks: func() { - p.events.On("BatchStore", p.executionResult.BlockID, mock.Anything, mock.Anything).Return(nil).Once() - p.results.On("BatchStore", p.executionResult.BlockID, mock.Anything, mock.Anything).Return(assert.AnError).Once() + p.events.On("BatchStore", mock.Anything, p.executionResult.BlockID, mock.Anything, mock.Anything).Return(nil).Once() + p.results.On("BatchStore", mock.Anything, p.executionResult.BlockID, mock.Anything, mock.Anything).Return(assert.AnError).Once() }, expectedError: "could not add transaction results to batch", }, { name: "CollectionsStoreError", setupMocks: func() { - p.events.On("BatchStore", p.executionResult.BlockID, mock.Anything, mock.Anything).Return(nil).Once() - p.results.On("BatchStore", p.executionResult.BlockID, mock.Anything, mock.Anything).Return(nil).Once() + p.events.On("BatchStore", mock.Anything, p.executionResult.BlockID, mock.Anything, mock.Anything).Return(nil).Once() + p.results.On("BatchStore", mock.Anything, p.executionResult.BlockID, mock.Anything, mock.Anything).Return(nil).Once() p.collections.On("BatchStoreAndIndexByTransaction", mock.Anything, mock.Anything, mock.Anything).Return(&flow.LightCollection{}, assert.AnError).Once() }, expectedError: "could not add light collections to batch", @@ -247,8 +247,8 @@ func (p *PersisterSuite) TestPersister_PersistErrorHandling() { { name: "TransactionsStoreError", setupMocks: func() { - p.events.On("BatchStore", p.executionResult.BlockID, mock.Anything, mock.Anything).Return(nil).Once() - p.results.On("BatchStore", p.executionResult.BlockID, mock.Anything, mock.Anything).Return(nil).Once() + p.events.On("BatchStore", mock.Anything, p.executionResult.BlockID, mock.Anything, mock.Anything).Return(nil).Once() + p.results.On("BatchStore", mock.Anything, p.executionResult.BlockID, mock.Anything, mock.Anything).Return(nil).Once() numberOfCollections := len(p.inMemoryCollections.Data()) p.collections.On("BatchStoreAndIndexByTransaction", mock.Anything, mock.Anything, mock.Anything).Return(&flow.LightCollection{}, nil).Times(numberOfCollections) p.transactions.On("BatchStore", mock.Anything, mock.Anything).Return(assert.AnError).Once() @@ -258,8 +258,8 @@ func (p *PersisterSuite) TestPersister_PersistErrorHandling() { { name: "TxResultErrMsgStoreError", setupMocks: func() { - p.events.On("BatchStore", p.executionResult.BlockID, mock.Anything, mock.Anything).Return(nil).Once() - p.results.On("BatchStore", p.executionResult.BlockID, mock.Anything, mock.Anything).Return(nil).Once() + p.events.On("BatchStore", mock.Anything, p.executionResult.BlockID, mock.Anything, mock.Anything).Return(nil).Once() + p.results.On("BatchStore", mock.Anything, p.executionResult.BlockID, mock.Anything, mock.Anything).Return(nil).Once() numberOfCollections := len(p.inMemoryCollections.Data()) p.collections.On("BatchStoreAndIndexByTransaction", mock.Anything, mock.Anything, mock.Anything).Return(&flow.LightCollection{}, nil).Times(numberOfCollections) numberOfTransactions := len(p.inMemoryTransactions.Data()) @@ -271,8 +271,8 @@ func (p *PersisterSuite) TestPersister_PersistErrorHandling() { { name: "LatestPersistedSealedResultStoreError", setupMocks: func() { - p.events.On("BatchStore", p.executionResult.BlockID, mock.Anything, mock.Anything).Return(nil).Once() - p.results.On("BatchStore", p.executionResult.BlockID, mock.Anything, mock.Anything).Return(nil).Once() + p.events.On("BatchStore", mock.Anything, p.executionResult.BlockID, mock.Anything, mock.Anything).Return(nil).Once() + p.results.On("BatchStore", mock.Anything, p.executionResult.BlockID, mock.Anything, mock.Anything).Return(nil).Once() numberOfCollections := len(p.inMemoryCollections.Data()) p.collections.On("BatchStoreAndIndexByTransaction", mock.Anything, mock.Anything, mock.Anything).Return(&flow.LightCollection{}, nil).Times(numberOfCollections) numberOfTransactions := len(p.inMemoryTransactions.Data()) From 0cb7f08fbaac42194af3db3176669fc59a6ca2cf Mon Sep 17 00:00:00 2001 From: "Leo Zhang (zhangchiqing)" Date: Mon, 6 Oct 2025 08:45:45 -0700 Subject: [PATCH 33/87] fix indexer tests --- .../indexer/indexer_core_test.go | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/module/state_synchronization/indexer/indexer_core_test.go b/module/state_synchronization/indexer/indexer_core_test.go index a1ccd0c3639..6b9685024c1 100644 --- a/module/state_synchronization/indexer/indexer_core_test.go +++ b/module/state_synchronization/indexer/indexer_core_test.go @@ -6,6 +6,7 @@ import ( "os" "testing" + "github.com/jordanschalm/lockctx" "github.com/rs/zerolog" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" @@ -131,8 +132,8 @@ func (i *indexCoreTest) setStoreRegisters(f func(t *testing.T, entries flow.Regi func (i *indexCoreTest) setStoreEvents(f func(*testing.T, flow.Identifier, []flow.EventsList) error) *indexCoreTest { i.events. - On("BatchStore", mock.AnythingOfType("flow.Identifier"), mock.AnythingOfType("[]flow.EventsList"), mock.Anything). - Return(func(blockID flow.Identifier, events []flow.EventsList, batch storage.ReaderBatchWriter) error { + On("BatchStore", mock.AnythingOfType("*lockctx.context"), mock.AnythingOfType("flow.Identifier"), mock.AnythingOfType("[]flow.EventsList"), mock.Anything). + Return(func(lctx lockctx.Proof, blockID flow.Identifier, events []flow.EventsList, batch storage.ReaderBatchWriter) error { require.NotNil(i.t, batch) return f(i.t, blockID, events) }) @@ -141,8 +142,8 @@ func (i *indexCoreTest) setStoreEvents(f func(*testing.T, flow.Identifier, []flo func (i *indexCoreTest) setStoreTransactionResults(f func(*testing.T, flow.Identifier, []flow.LightTransactionResult) error) *indexCoreTest { i.results. - On("BatchStore", mock.AnythingOfType("flow.Identifier"), mock.AnythingOfType("[]flow.LightTransactionResult"), mock.Anything). - Return(func(blockID flow.Identifier, results []flow.LightTransactionResult, batch storage.ReaderBatchWriter) error { + On("BatchStore", mock.AnythingOfType("*lockctx.context"), mock.AnythingOfType("flow.Identifier"), mock.AnythingOfType("[]flow.LightTransactionResult"), mock.Anything). + Return(func(lctx lockctx.Proof, blockID flow.Identifier, results []flow.LightTransactionResult, batch storage.ReaderBatchWriter) error { require.NotNil(i.t, batch) return f(i.t, blockID, results) }) @@ -168,14 +169,14 @@ func (i *indexCoreTest) useDefaultStorageMocks() *indexCoreTest { func (i *indexCoreTest) useDefaultEvents() *indexCoreTest { i.events. - On("BatchStore", mock.AnythingOfType("flow.Identifier"), mock.AnythingOfType("[]flow.EventsList"), mock.Anything). + On("BatchStore", mock.AnythingOfType("*lockctx.context"), mock.AnythingOfType("flow.Identifier"), mock.AnythingOfType("[]flow.EventsList"), mock.Anything). Return(nil) return i } func (i *indexCoreTest) useDefaultTransactionResults() *indexCoreTest { i.results. - On("BatchStore", mock.AnythingOfType("flow.Identifier"), mock.AnythingOfType("[]flow.LightTransactionResult"), mock.Anything). + On("BatchStore", mock.AnythingOfType("*lockctx.context"), mock.AnythingOfType("flow.Identifier"), mock.AnythingOfType("[]flow.LightTransactionResult"), mock.Anything). Return(nil) return i } From 2b061bfa2f9f330e2c68cf65c9dd7434afd9443f Mon Sep 17 00:00:00 2001 From: "Leo Zhang (zhangchiqing)" Date: Mon, 6 Oct 2025 14:06:59 -0700 Subject: [PATCH 34/87] move context.Release --- engine/access/ingestion2/collection_syncer.go | 2 +- module/executiondatasync/optimistic_sync/persisters/block.go | 2 +- state/cluster/badger/mutator.go | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/engine/access/ingestion2/collection_syncer.go b/engine/access/ingestion2/collection_syncer.go index 397c135b56a..ad055b60696 100644 --- a/engine/access/ingestion2/collection_syncer.go +++ b/engine/access/ingestion2/collection_syncer.go @@ -189,12 +189,12 @@ func (s *CollectionSyncer) StartWorkerLoop(ctx irrecoverable.SignalerContext, re // Create a lock context for indexing lctx := s.lockManager.NewContext() + defer lctx.Release() err := lctx.AcquireLock(storage.LockInsertCollection) if err != nil { ctx.Throw(fmt.Errorf("could not acquire lock for collection indexing: %w", err)) return } - defer lctx.Release() err = indexer.IndexCollection(lctx, collection, s.collections, s.logger, s.collectionExecutedMetric) if err != nil { diff --git a/module/executiondatasync/optimistic_sync/persisters/block.go b/module/executiondatasync/optimistic_sync/persisters/block.go index 0d6862d0ef0..653670f37c1 100644 --- a/module/executiondatasync/optimistic_sync/persisters/block.go +++ b/module/executiondatasync/optimistic_sync/persisters/block.go @@ -63,6 +63,7 @@ func (p *BlockPersister) Persist() error { start := time.Now() lctx := p.lockManager.NewContext() + defer lctx.Release() err := lctx.AcquireLock(storage.LockInsertCollection) if err != nil { return fmt.Errorf("could not acquire lock for inserting light collections: %w", err) @@ -77,7 +78,6 @@ func (p *BlockPersister) Persist() error { if err != nil { return fmt.Errorf("could not acquire lock for inserting light transaction results: %w", err) } - defer lctx.Release() err = p.protocolDB.WithReaderBatchWriter(func(batch storage.ReaderBatchWriter) error { for _, persister := range p.persisterStores { diff --git a/state/cluster/badger/mutator.go b/state/cluster/badger/mutator.go index 2402cca8976..2580c15f820 100644 --- a/state/cluster/badger/mutator.go +++ b/state/cluster/badger/mutator.go @@ -130,11 +130,11 @@ func (m *MutableState) Extend(proposal *cluster.Proposal) error { } lctx := m.lockManager.NewContext() + defer lctx.Release() err = lctx.AcquireLock(storage.LockInsertOrFinalizeClusterBlock) if err != nil { return fmt.Errorf("could not acquire lock for inserting cluster block: %w", err) } - defer lctx.Release() span, _ = m.tracer.StartSpanFromContext(ctx, trace.COLClusterStateMutatorExtendCheckTransactionsValid) err = m.checkPayloadTransactions(lctx, extendCtx) From 3120db313e9bf82f8b1db24aea30a18c3fcaa7b5 Mon Sep 17 00:00:00 2001 From: "Leo Zhang (zhangchiqing)" Date: Mon, 6 Oct 2025 14:18:31 -0700 Subject: [PATCH 35/87] rename IndexOwnExecutionResult to IndexOwnOrSealedExecutionResult --- engine/execution/state/bootstrap/bootstrap.go | 2 +- state/protocol/badger/state.go | 4 ++-- storage/operation/results.go | 4 ++-- storage/store/results.go | 2 +- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/engine/execution/state/bootstrap/bootstrap.go b/engine/execution/state/bootstrap/bootstrap.go index 370429ac86a..5e3dae78bbf 100644 --- a/engine/execution/state/bootstrap/bootstrap.go +++ b/engine/execution/state/bootstrap/bootstrap.go @@ -113,7 +113,7 @@ func (b *Bootstrapper) BootstrapExecutionDatabase( return fmt.Errorf("could not index initial genesis execution block: %w", err) } - err = operation.IndexOwnExecutionResult(lctx, rw, rootSeal.BlockID, rootSeal.ResultID) + err = operation.IndexOwnOrSealedExecutionResult(lctx, rw, rootSeal.BlockID, rootSeal.ResultID) if err != nil { return fmt.Errorf("could not index result for root result: %w", err) } diff --git a/state/protocol/badger/state.go b/state/protocol/badger/state.go index 0b72dca1329..33503e9f20f 100644 --- a/state/protocol/badger/state.go +++ b/state/protocol/badger/state.go @@ -304,7 +304,7 @@ func bootstrapSealingSegment( if err != nil { return fmt.Errorf("could not insert execution result: %w", err) } - err = operation.IndexOwnExecutionResult(lctx, rw, result.BlockID, result.ID()) + err = operation.IndexOwnOrSealedExecutionResult(lctx, rw, result.BlockID, result.ID()) if err != nil { return fmt.Errorf("could not index execution result: %w", err) } @@ -463,7 +463,7 @@ func bootstrapSealingSegment( // If the sealed root block is different from the finalized root block, then it means the node dynamically // bootstrapped. In that case, we index the result of the latest sealed result, so that the EN is able // to confirm that it is loading the correct state to execute the next block. - err = operation.IndexOwnExecutionResult(lctx, rw, rootSeal.BlockID, rootSeal.ResultID) + err = operation.IndexOwnOrSealedExecutionResult(lctx, rw, rootSeal.BlockID, rootSeal.ResultID) if err != nil { return fmt.Errorf("could not index root result: %w", err) } diff --git a/storage/operation/results.go b/storage/operation/results.go index a57c69f699a..7e5d2b4d3da 100644 --- a/storage/operation/results.go +++ b/storage/operation/results.go @@ -27,12 +27,12 @@ func RetrieveExecutionResult(r storage.Reader, resultID flow.Identifier, result return RetrieveByKey(r, MakePrefix(codeExecutionResult, resultID), result) } -// IndexOwnExecutionResult indexes the result of the given block. +// IndexOwnOrSealedExecutionResult indexes the result of the given block. // It is used by EN to index the result of a block to continue executing subsequent blocks. // The caller must acquire either [storage.LockInsertOwnReceipt] or [storage.LockBootstrapping] or [storage.LockIndexFinalizedBlock] // // No errors are expected during normal operation. -func IndexOwnExecutionResult(lctx lockctx.Proof, rw storage.ReaderBatchWriter, blockID flow.Identifier, resultID flow.Identifier) error { +func IndexOwnOrSealedExecutionResult(lctx lockctx.Proof, rw storage.ReaderBatchWriter, blockID flow.Identifier, resultID flow.Identifier) error { held := lctx.HoldsLock(storage.LockInsertOwnReceipt) || // during bootstrapping, we index the sealed root block or the spork root block, which is not // produced by the node itself, but we still need to index its execution result to be able to diff --git a/storage/store/results.go b/storage/store/results.go index d5d0c5eda8c..a72f3ad91ee 100644 --- a/storage/store/results.go +++ b/storage/store/results.go @@ -50,7 +50,7 @@ func NewExecutionResults(collector module.CacheMetrics, db storage.DB) *Executio // this API is only used to fetch result for last executed block, so in happy case, it only need to be 1, // we use 100 here to be more resilient to forks withLimit[flow.Identifier, flow.Identifier](100), - withStoreWithLock(operation.IndexOwnExecutionResult), + withStoreWithLock(operation.IndexOwnOrSealedExecutionResult), withRetrieve(retrieveByBlockID), ), } From b620feca6d6b6a258f83156fa49edec62ca4ae12 Mon Sep 17 00:00:00 2001 From: "Leo Zhang (zhangchiqing)" Date: Mon, 6 Oct 2025 14:44:44 -0700 Subject: [PATCH 36/87] add comments --- state/protocol/badger/state.go | 7 +++---- storage/locks.go | 9 +++++++-- storage/operation/results.go | 5 ++++- 3 files changed, 14 insertions(+), 7 deletions(-) diff --git a/state/protocol/badger/state.go b/state/protocol/badger/state.go index 33503e9f20f..824761588bb 100644 --- a/state/protocol/badger/state.go +++ b/state/protocol/badger/state.go @@ -112,19 +112,18 @@ func Bootstrap( // trusted root snapshot are presumed to be finalized) lctx := lockManager.NewContext() defer lctx.Release() - err := lctx.AcquireLock(storage.LockInsertBlock) + err := lctx.AcquireLock(storage.LockBootstrapping) if err != nil { return nil, err } - err = lctx.AcquireLock(storage.LockFinalizeBlock) + err = lctx.AcquireLock(storage.LockInsertBlock) if err != nil { return nil, err } - err = lctx.AcquireLock(storage.LockBootstrapping) + err = lctx.AcquireLock(storage.LockFinalizeBlock) if err != nil { return nil, err } - config := defaultBootstrapConfig() for _, opt := range options { opt(config) diff --git a/storage/locks.go b/storage/locks.go index 88ba0928e42..b2b93dc8b7c 100644 --- a/storage/locks.go +++ b/storage/locks.go @@ -73,11 +73,16 @@ type LockManager = lockctx.Manager // This function will panic if a policy is created which does not prevent deadlocks. func makeLockPolicy() lockctx.Policy { return lockctx.NewDAGPolicyBuilder(). + // for protocol to Bootstrap, during bootstrapping, + // we need to insert and finalize + Add(LockBootstrapping, LockFinalizeBlock). Add(LockInsertBlock, LockFinalizeBlock). - Add(LockFinalizeBlock, LockBootstrapping). + + // EN to save execution result Add(LockInsertOwnReceipt, LockInsertEvent). Add(LockInsertOwnReceipt, LockInsertChunkDataPack). - Add(LockInsertEvent, LockInsertChunkDataPack). + + // AN state sync to IndexBlockData Add(LockInsertCollection, LockInsertEvent). Add(LockInsertCollection, LockInsertLightTransactionResult). Add(LockInsertEvent, LockInsertLightTransactionResult). diff --git a/storage/operation/results.go b/storage/operation/results.go index 7e5d2b4d3da..9145d2a85cf 100644 --- a/storage/operation/results.go +++ b/storage/operation/results.go @@ -28,7 +28,10 @@ func RetrieveExecutionResult(r storage.Reader, resultID flow.Identifier, result } // IndexOwnOrSealedExecutionResult indexes the result of the given block. -// It is used by EN to index the result of a block to continue executing subsequent blocks. +// It is used by the following scenarios: +// 1. Execution Node indexes its own executed block's result when finish executing a block +// 2. Execution Node indexes the sealed root block's result during bootstrapping +// 3. Access Node indexes the sealed result during syncing from EN. // The caller must acquire either [storage.LockInsertOwnReceipt] or [storage.LockBootstrapping] or [storage.LockIndexFinalizedBlock] // // No errors are expected during normal operation. From a83ff30fefe91631f5d409192ffc8a6016f27b78 Mon Sep 17 00:00:00 2001 From: "Leo Zhang (zhangchiqing)" Date: Mon, 6 Oct 2025 18:23:33 -0700 Subject: [PATCH 37/87] fix lock policy --- storage/locks.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/storage/locks.go b/storage/locks.go index b2b93dc8b7c..2e31761180e 100644 --- a/storage/locks.go +++ b/storage/locks.go @@ -75,7 +75,7 @@ func makeLockPolicy() lockctx.Policy { return lockctx.NewDAGPolicyBuilder(). // for protocol to Bootstrap, during bootstrapping, // we need to insert and finalize - Add(LockBootstrapping, LockFinalizeBlock). + Add(LockBootstrapping, LockInsertBlock). Add(LockInsertBlock, LockFinalizeBlock). // EN to save execution result From 2cc0f55a56b087bd15cae1daf713ae3af1437576 Mon Sep 17 00:00:00 2001 From: "Leo Zhang (zhangchiqing)" Date: Mon, 6 Oct 2025 19:49:23 -0700 Subject: [PATCH 38/87] change lock order --- module/builder/consensus/builder_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/module/builder/consensus/builder_test.go b/module/builder/consensus/builder_test.go index 71671b4222b..ef299341706 100644 --- a/module/builder/consensus/builder_test.go +++ b/module/builder/consensus/builder_test.go @@ -256,7 +256,7 @@ func (bs *BuilderSuite) SetupTest() { // insert finalized height and root height db := bs.db - err := unittest.WithLocks(bs.T(), lockManager, []string{storage.LockFinalizeBlock, storage.LockBootstrapping}, func(lctx lockctx.Context) error { + err := unittest.WithLocks(bs.T(), lockManager, []string{storage.LockBootstrapping, storage.LockFinalizeBlock}, func(lctx lockctx.Context) error { return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { enc, err := datastore.NewVersionedInstanceParams( datastore.DefaultInstanceParamsVersion, From c24a29c5fe3c4d16d61a18120e1dde65b3884146 Mon Sep 17 00:00:00 2001 From: "Leo Zhang (zhangchiqing)" Date: Mon, 6 Oct 2025 19:51:40 -0700 Subject: [PATCH 39/87] fix builder test --- module/builder/consensus/builder_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/module/builder/consensus/builder_test.go b/module/builder/consensus/builder_test.go index ef299341706..8ce5546f857 100644 --- a/module/builder/consensus/builder_test.go +++ b/module/builder/consensus/builder_test.go @@ -256,7 +256,7 @@ func (bs *BuilderSuite) SetupTest() { // insert finalized height and root height db := bs.db - err := unittest.WithLocks(bs.T(), lockManager, []string{storage.LockBootstrapping, storage.LockFinalizeBlock}, func(lctx lockctx.Context) error { + err := unittest.WithLocks(bs.T(), lockManager, []string{storage.LockBootstrapping, storage.LockInsertBlock, storage.LockFinalizeBlock}, func(lctx lockctx.Context) error { return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { enc, err := datastore.NewVersionedInstanceParams( datastore.DefaultInstanceParamsVersion, From 27bdbdc775e64990a7f03650b71ebd26bc19cdc0 Mon Sep 17 00:00:00 2001 From: "Leo Zhang (zhangchiqing)" Date: Tue, 7 Oct 2025 11:59:52 -0700 Subject: [PATCH 40/87] refactor locks --- engine/execution/state/state.go | 16 +++++++++- storage/locks.go | 13 ++++++-- storage/operation/commits.go | 6 ++-- storage/operation/commits_test.go | 2 +- storage/operation/events.go | 4 +-- storage/operation/receipts.go | 38 +++++++++++++++++------- storage/operation/results.go | 2 ++ storage/operation/transaction_results.go | 8 ++--- storage/store/my_receipts.go | 22 ++------------ 9 files changed, 67 insertions(+), 44 deletions(-) diff --git a/engine/execution/state/state.go b/engine/execution/state/state.go index bdd325894f0..ba30a5a7b85 100644 --- a/engine/execution/state/state.go +++ b/engine/execution/state/state.go @@ -410,8 +410,16 @@ func (s *state) saveExecutionResults( return fmt.Errorf("can not retrieve chunk data packs: %w", err) } + locks := []string{ + storage.LockInsertChunkDataPack, + storage.LockInsertEvent, + storage.LockInsertAndIndexTxResult, + storage.LockInsertOwnReceipt, + storage.LockInsertAndIndexTxResult, + storage.LockIndexStateCommitment, + } // Acquire locks to ensure it's concurrent safe when inserting the execution results and chunk data packs. - return storage.WithLocks(s.lockManager, []string{storage.LockInsertOwnReceipt, storage.LockInsertEvent, storage.LockInsertChunkDataPack}, func(lctx lockctx.Context) error { + return storage.WithLocks(s.lockManager, locks, func(lctx lockctx.Context) error { err := s.chunkDataPacks.StoreByChunkID(lctx, chunks) if err != nil { return fmt.Errorf("can not store multiple chunk data pack: %w", err) @@ -436,16 +444,19 @@ func (s *state) saveExecutionResults( } }) + // require LockInsertEvent err = s.events.BatchStore(lctx, blockID, []flow.EventsList{result.AllEvents()}, batch) if err != nil { return fmt.Errorf("cannot store events: %w", err) } + // require LockInsertEvent err = s.serviceEvents.BatchStore(lctx, blockID, result.AllServiceEvents(), batch) if err != nil { return fmt.Errorf("cannot store service events: %w", err) } + // require LockInsertAndIndexTxResult err = s.transactionResults.BatchStore( lctx, blockID, @@ -456,12 +467,14 @@ func (s *state) saveExecutionResults( } executionResult := &result.ExecutionReceipt.ExecutionResult + // require [storage.LockInsertOwnReceipt] lock // saving my receipts will also save the execution result err = s.myReceipts.BatchStoreMyReceipt(lctx, result.ExecutionReceipt, batch) if err != nil { return fmt.Errorf("could not persist execution result: %w", err) } + // require [storage.LockInsertOwnReceipt] lock err = s.results.BatchIndex(lctx, batch, blockID, executionResult.ID()) if err != nil { return fmt.Errorf("cannot index execution result: %w", err) @@ -470,6 +483,7 @@ func (s *state) saveExecutionResults( // the state commitment is the last data item to be stored, so that // IsBlockExecuted can be implemented by checking whether state commitment exists // in the database + // require [storage.LockIndexStateCommitment] lock err = s.commits.BatchStore(lctx, blockID, result.CurrentEndState(), batch) if err != nil { return fmt.Errorf("cannot store state commitment: %w", err) diff --git a/storage/locks.go b/storage/locks.go index 2e31761180e..304d786bf6e 100644 --- a/storage/locks.go +++ b/storage/locks.go @@ -27,7 +27,9 @@ const ( LockInsertLightTransactionResult = "lock_insert_light_transaction_result" // LockInsertOwnReceipt is intended for Execution Nodes to ensure that they never publish different receipts for the same block. // Specifically, with this lock we prevent accidental overwrites of the index `executed block ID` ➜ `Receipt ID`. - LockInsertOwnReceipt = "lock_insert_own_receipt" + LockInsertOwnReceipt = "lock_insert_own_receipt" + LockIndexStateCommitment = "lock_index_state_commitment" + LockInsertAndIndexTxResult = "lock_insert_and_index_tx_result" // LockInsertCollection protects the insertion of collections. LockInsertCollection = "lock_insert_collection" // LockBootstrapping protects data that is *exclusively* written during bootstrapping. @@ -47,6 +49,8 @@ func Locks() []string { LockInsertOrFinalizeClusterBlock, LockInsertEvent, LockInsertOwnReceipt, + LockIndexStateCommitment, + LockInsertAndIndexTxResult, LockInsertCollection, LockInsertLightTransactionResult, LockBootstrapping, @@ -79,8 +83,11 @@ func makeLockPolicy() lockctx.Policy { Add(LockInsertBlock, LockFinalizeBlock). // EN to save execution result - Add(LockInsertOwnReceipt, LockInsertEvent). - Add(LockInsertOwnReceipt, LockInsertChunkDataPack). + Add(LockInsertChunkDataPack, LockInsertEvent). + Add(LockInsertEvent, LockInsertAndIndexTxResult). + Add(LockInsertAndIndexTxResult, LockInsertOwnReceipt). + Add(LockInsertOwnReceipt, LockInsertAndIndexTxResult). + Add(LockInsertAndIndexTxResult, LockIndexStateCommitment). // AN state sync to IndexBlockData Add(LockInsertCollection, LockInsertEvent). diff --git a/storage/operation/commits.go b/storage/operation/commits.go index 448a8543b6f..06b8cb92855 100644 --- a/storage/operation/commits.go +++ b/storage/operation/commits.go @@ -17,13 +17,13 @@ import ( // // CAUTION: // - Confirming that no value is already stored and the subsequent write must be atomic to prevent data corruption. -// The caller must acquire the [storage.LockInsertOwnReceipt] and hold it until the database write has been committed. +// The caller must acquire the [storage.LockIndexStateCommitment] and hold it until the database write has been committed. // // Expected error returns during normal operations: // - [storage.ErrDataMismatch] if a *different* state commitment is already indexed for the same block ID func IndexStateCommitment(lctx lockctx.Proof, rw storage.ReaderBatchWriter, blockID flow.Identifier, commit flow.StateCommitment) error { - if !lctx.HoldsLock(storage.LockInsertOwnReceipt) { - return fmt.Errorf("cannot index state commitment without holding lock %s", storage.LockInsertOwnReceipt) + if !lctx.HoldsLock(storage.LockIndexStateCommitment) { + return fmt.Errorf("cannot index state commitment without holding lock %s", storage.LockIndexStateCommitment) } var existingCommit flow.StateCommitment diff --git a/storage/operation/commits_test.go b/storage/operation/commits_test.go index 90545dde3ca..bb13a110813 100644 --- a/storage/operation/commits_test.go +++ b/storage/operation/commits_test.go @@ -19,7 +19,7 @@ func TestStateCommitments(t *testing.T) { expected := unittest.StateCommitmentFixture() id := unittest.IdentifierFixture() - err := unittest.WithLock(t, lockManager, storage.LockInsertOwnReceipt, func(lctx lockctx.Context) error { + err := unittest.WithLock(t, lockManager, storage.LockIndexStateCommitment, func(lctx lockctx.Context) error { return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { return operation.IndexStateCommitment(lctx, rw, id, expected) }) diff --git a/storage/operation/events.go b/storage/operation/events.go index 1a4007827b7..2a1e0ed325f 100644 --- a/storage/operation/events.go +++ b/storage/operation/events.go @@ -21,8 +21,8 @@ func InsertEvent(lctx lockctx.Proof, w storage.Writer, blockID flow.Identifier, } func InsertServiceEvent(lctx lockctx.Proof, w storage.Writer, blockID flow.Identifier, event flow.Event) error { - if !lctx.HoldsLock(storage.LockInsertOwnReceipt) { - return fmt.Errorf("InsertServiceEvent requires LockInsertOwnReceipt to be held") + if !lctx.HoldsLock(storage.LockInsertEvent) { + return fmt.Errorf("InsertServiceEvent requires LockInsertEvent to be held") } return UpsertByKey(w, eventPrefix(codeServiceEvent, blockID, event), event) } diff --git a/storage/operation/receipts.go b/storage/operation/receipts.go index 832a86544b4..cdd3b0fdefa 100644 --- a/storage/operation/receipts.go +++ b/storage/operation/receipts.go @@ -1,6 +1,10 @@ package operation import ( + "errors" + "fmt" + + "github.com/jordanschalm/lockctx" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/storage" ) @@ -24,17 +28,29 @@ func RetrieveExecutionReceiptStub(r storage.Reader, receiptID flow.Identifier, m // IndexOwnExecutionReceipt indexes the Execution Node's OWN execution receipt by the executed block ID. // -// CAUTION: -// - OVERWRITES existing data (potential for data corruption): -// This method silently overrides existing data without any sanity checks whether data for the same key already exits. -// Note that the Flow protocol mandates that for a previously persisted key, the data is never changed to a different -// value. Changing data could cause the node to publish inconsistent data and to be slashed, or the protocol to be -// compromised as a whole. This method does not contain any safeguards to prevent such data corruption. The caller -// is responsible to ensure that the DEDUPLICATION CHECK is done elsewhere ATOMICALLY with this write operation. -// -// No errors are expected during normal operation. -func IndexOwnExecutionReceipt(w storage.Writer, blockID flow.Identifier, receiptID flow.Identifier) error { - return UpsertByKey(w, MakePrefix(codeOwnBlockReceipt, blockID), receiptID) +// Error returns: +// - [storage.ErrDataMismatch] if a *different* receipt has already been indexed for the same block +func IndexOwnExecutionReceipt(lctx lockctx.Proof, rw storage.ReaderBatchWriter, blockID flow.Identifier, receiptID flow.Identifier) error { + if !lctx.HoldsLock(storage.LockInsertOwnReceipt) { + return fmt.Errorf("cannot index own execution receipt without holding lock %s", storage.LockInsertOwnReceipt) + } + + key := MakePrefix(codeOwnBlockReceipt, blockID) + + var existing flow.Identifier + err := RetrieveByKey(rw.GlobalReader(), key, &existing) + if err == nil { + if existing != receiptID { + return fmt.Errorf("own execution receipt for block %v already exists with different value, (existing: %v, new: %v), %w", blockID, existing, receiptID, storage.ErrDataMismatch) + } + return nil // The receipt already exists, no need to index again + } + + if !errors.Is(err, storage.ErrNotFound) { + return fmt.Errorf("could not check existing own execution receipt: %w", err) + } + + return UpsertByKey(rw.Writer(), key, receiptID) } // LookupOwnExecutionReceipt retrieves the Execution Node's OWN execution receipt ID for the specified block. diff --git a/storage/operation/results.go b/storage/operation/results.go index 9145d2a85cf..e42e2318f77 100644 --- a/storage/operation/results.go +++ b/storage/operation/results.go @@ -36,6 +36,8 @@ func RetrieveExecutionResult(r storage.Reader, resultID flow.Identifier, result // // No errors are expected during normal operation. func IndexOwnOrSealedExecutionResult(lctx lockctx.Proof, rw storage.ReaderBatchWriter, blockID flow.Identifier, resultID flow.Identifier) error { + // TOOD (leo): I think we should just check if holding + // LockIndexExecutionResult instead of these 3 locks held := lctx.HoldsLock(storage.LockInsertOwnReceipt) || // during bootstrapping, we index the sealed root block or the spork root block, which is not // produced by the node itself, but we still need to index its execution result to be able to diff --git a/storage/operation/transaction_results.go b/storage/operation/transaction_results.go index 8a4a077def1..66dc2b0c00f 100644 --- a/storage/operation/transaction_results.go +++ b/storage/operation/transaction_results.go @@ -10,15 +10,15 @@ import ( ) func InsertTransactionResult(lctx lockctx.Proof, w storage.Writer, blockID flow.Identifier, transactionResult *flow.TransactionResult) error { - if !lctx.HoldsLock(storage.LockInsertOwnReceipt) { - return fmt.Errorf("InsertTransactionResult requires LockInsertOwnReceipt to be held") + if !lctx.HoldsLock(storage.LockInsertAndIndexTxResult) { + return fmt.Errorf("InsertTransactionResult requires LockInsertAndIndexTxResult to be held") } return UpsertByKey(w, MakePrefix(codeTransactionResult, blockID, transactionResult.TransactionID), transactionResult) } func IndexTransactionResult(lctx lockctx.Proof, w storage.Writer, blockID flow.Identifier, txIndex uint32, transactionResult *flow.TransactionResult) error { - if !lctx.HoldsLock(storage.LockInsertOwnReceipt) { - return fmt.Errorf("IndexTransactionResult requires LockInsertOwnReceipt to be held") + if !lctx.HoldsLock(storage.LockInsertAndIndexTxResult) { + return fmt.Errorf("IndexTransactionResult requires LockInsertAndIndexTxResult to be held") } return UpsertByKey(w, MakePrefix(codeTransactionResultIndex, blockID, txIndex), transactionResult) } diff --git a/storage/store/my_receipts.go b/storage/store/my_receipts.go index 02c79cbbb56..f7a5ce31828 100644 --- a/storage/store/my_receipts.go +++ b/storage/store/my_receipts.go @@ -1,14 +1,12 @@ package store import ( - "errors" "fmt" "github.com/jordanschalm/lockctx" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module" - "github.com/onflow/flow-go/module/irrecoverable" "github.com/onflow/flow-go/module/metrics" "github.com/onflow/flow-go/storage" "github.com/onflow/flow-go/storage/operation" @@ -64,6 +62,7 @@ func (m *MyExecutionReceipts) myReceipt(blockID flow.Identifier) (*flow.Executio // // If entity fails marshalling, the error is wrapped in a generic error and returned. // If database unexpectedly fails to process the request, the error is wrapped in a generic error and returned. +// It requires [storage.LockInsertOwnReceipt] to be held. // // Expected error returns during *normal* operations: // - `storage.ErrDataMismatch` if a *different* receipt has already been indexed for the same block @@ -71,29 +70,14 @@ func (m *MyExecutionReceipts) BatchStoreMyReceipt(lctx lockctx.Proof, receipt *f receiptID := receipt.ID() blockID := receipt.ExecutionResult.BlockID - if lctx == nil || !lctx.HoldsLock(storage.LockInsertOwnReceipt) { - return fmt.Errorf("cannot store my receipt, missing lock %v", storage.LockInsertOwnReceipt) - } - // add DB operation to batch for storing receipt (execution deferred until batch is committed) err := m.genericReceipts.BatchStore(receipt, rw) if err != nil { return err } - // dd DB operation to batch for indexing receipt as one of my own (execution deferred until batch is committed) - var savedReceiptID flow.Identifier - err = operation.LookupOwnExecutionReceipt(rw.GlobalReader(), blockID, &savedReceiptID) - if err == nil { - if savedReceiptID == receiptID { - return nil // no-op we are storing *same* receipt - } - return fmt.Errorf("indexing my receipt %v failed: different receipt %v for the same block %v is already indexed: %w", receiptID, savedReceiptID, blockID, storage.ErrDataMismatch) - } - if !errors.Is(err, storage.ErrNotFound) { // `storage.ErrNotFound` is expected, as this indicates that no receipt is indexed yet; anything else is an exception - return irrecoverable.NewException(err) - } - err = operation.IndexOwnExecutionReceipt(rw.Writer(), blockID, receiptID) + // require [storage.LockInsertOwnReceipt] to be held + err = operation.IndexOwnExecutionReceipt(lctx, rw, blockID, receiptID) if err != nil { return err } From 6fa63e7f272b90e18a25305dd88f29e6b4ffd851 Mon Sep 17 00:00:00 2001 From: "Leo Zhang (zhangchiqing)" Date: Tue, 7 Oct 2025 12:36:58 -0700 Subject: [PATCH 41/87] fix tests --- engine/execution/state/state.go | 1 - storage/locks.go | 2 +- storage/operation/receipts_test.go | 8 ++++++-- 3 files changed, 7 insertions(+), 4 deletions(-) diff --git a/engine/execution/state/state.go b/engine/execution/state/state.go index ba30a5a7b85..c2a1d7c1eec 100644 --- a/engine/execution/state/state.go +++ b/engine/execution/state/state.go @@ -415,7 +415,6 @@ func (s *state) saveExecutionResults( storage.LockInsertEvent, storage.LockInsertAndIndexTxResult, storage.LockInsertOwnReceipt, - storage.LockInsertAndIndexTxResult, storage.LockIndexStateCommitment, } // Acquire locks to ensure it's concurrent safe when inserting the execution results and chunk data packs. diff --git a/storage/locks.go b/storage/locks.go index 304d786bf6e..4b174cadbb5 100644 --- a/storage/locks.go +++ b/storage/locks.go @@ -86,8 +86,8 @@ func makeLockPolicy() lockctx.Policy { Add(LockInsertChunkDataPack, LockInsertEvent). Add(LockInsertEvent, LockInsertAndIndexTxResult). Add(LockInsertAndIndexTxResult, LockInsertOwnReceipt). - Add(LockInsertOwnReceipt, LockInsertAndIndexTxResult). Add(LockInsertAndIndexTxResult, LockIndexStateCommitment). + Add(LockInsertOwnReceipt, LockIndexStateCommitment). // AN state sync to IndexBlockData Add(LockInsertCollection, LockInsertEvent). diff --git a/storage/operation/receipts_test.go b/storage/operation/receipts_test.go index 0bdf92d663f..6b094961c4f 100644 --- a/storage/operation/receipts_test.go +++ b/storage/operation/receipts_test.go @@ -3,6 +3,7 @@ package operation_test import ( "testing" + "github.com/jordanschalm/lockctx" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -33,12 +34,15 @@ func TestReceipts_InsertRetrieve(t *testing.T) { func TestReceipts_Index(t *testing.T) { dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + lockManager := storage.NewTestingLockManager() receipt := unittest.ExecutionReceiptFixture() expected := receipt.ID() blockID := receipt.ExecutionResult.BlockID - err := db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { - return operation.IndexOwnExecutionReceipt(rw.Writer(), blockID, expected) + err := storage.WithLock(lockManager, storage.LockInsertOwnReceipt, func(lctx lockctx.Context) error { + return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.IndexOwnExecutionReceipt(lctx, rw, blockID, expected) + }) }) require.Nil(t, err) From 08d0749fc17ebefbc370eb2a04eb01da26688fb0 Mon Sep 17 00:00:00 2001 From: "Leo Zhang (zhangchiqing)" Date: Tue, 7 Oct 2025 13:13:38 -0700 Subject: [PATCH 42/87] fix lint --- storage/operation/receipts.go | 1 + 1 file changed, 1 insertion(+) diff --git a/storage/operation/receipts.go b/storage/operation/receipts.go index cdd3b0fdefa..a5b31a4f8a0 100644 --- a/storage/operation/receipts.go +++ b/storage/operation/receipts.go @@ -5,6 +5,7 @@ import ( "fmt" "github.com/jordanschalm/lockctx" + "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/storage" ) From 096e441381a12ec6e8244c38798af505a5946c9d Mon Sep 17 00:00:00 2001 From: "Leo Zhang (zhangchiqing)" Date: Tue, 7 Oct 2025 13:14:55 -0700 Subject: [PATCH 43/87] update lock --- storage/locks.go | 1 - 1 file changed, 1 deletion(-) diff --git a/storage/locks.go b/storage/locks.go index 4b174cadbb5..e6146566028 100644 --- a/storage/locks.go +++ b/storage/locks.go @@ -86,7 +86,6 @@ func makeLockPolicy() lockctx.Policy { Add(LockInsertChunkDataPack, LockInsertEvent). Add(LockInsertEvent, LockInsertAndIndexTxResult). Add(LockInsertAndIndexTxResult, LockInsertOwnReceipt). - Add(LockInsertAndIndexTxResult, LockIndexStateCommitment). Add(LockInsertOwnReceipt, LockIndexStateCommitment). // AN state sync to IndexBlockData From 4595beadced258aa7dbdfc4c9e6368fa3e002f48 Mon Sep 17 00:00:00 2001 From: "Leo Zhang (zhangchiqing)" Date: Tue, 7 Oct 2025 13:31:22 -0700 Subject: [PATCH 44/87] update tests --- engine/execution/state/bootstrap/bootstrap.go | 4 ++++ storage/store/commits_test.go | 6 +++--- storage/store/my_receipts_test.go | 2 +- storage/store/transaction_results_test.go | 4 ++-- 4 files changed, 10 insertions(+), 6 deletions(-) diff --git a/engine/execution/state/bootstrap/bootstrap.go b/engine/execution/state/bootstrap/bootstrap.go index 5e3dae78bbf..f757440a521 100644 --- a/engine/execution/state/bootstrap/bootstrap.go +++ b/engine/execution/state/bootstrap/bootstrap.go @@ -104,6 +104,10 @@ func (b *Bootstrapper) BootstrapExecutionDatabase( if err != nil { return err } + err = lctx.AcquireLock(storage.LockIndexStateCommitment) + if err != nil { + return err + } commit := rootSeal.FinalState return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { diff --git a/storage/store/commits_test.go b/storage/store/commits_test.go index 4fcc00f37e5..259824582b2 100644 --- a/storage/store/commits_test.go +++ b/storage/store/commits_test.go @@ -30,7 +30,7 @@ func TestCommitsStoreAndRetrieve(t *testing.T) { // store a commit in db blockID := unittest.IdentifierFixture() expected := unittest.StateCommitmentFixture() - err = unittest.WithLock(t, lockManager, storage.LockInsertOwnReceipt, func(lctx lockctx.Context) error { + err = unittest.WithLock(t, lockManager, storage.LockIndexStateCommitment, func(lctx lockctx.Context) error { return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { return store1.BatchStore(lctx, blockID, expected, rw) }) @@ -43,7 +43,7 @@ func TestCommitsStoreAndRetrieve(t *testing.T) { assert.Equal(t, expected, actual) // re-insert the commit - should be idempotent - err = unittest.WithLock(t, lockManager, storage.LockInsertOwnReceipt, func(lctx lockctx.Context) error { + err = unittest.WithLock(t, lockManager, storage.LockIndexStateCommitment, func(lctx lockctx.Context) error { return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { return store1.BatchStore(lctx, blockID, expected, rw) }) @@ -61,7 +61,7 @@ func TestCommitStoreAndRemove(t *testing.T) { // Create and store a commit blockID := unittest.IdentifierFixture() expected := unittest.StateCommitmentFixture() - err := unittest.WithLock(t, lockManager, storage.LockInsertOwnReceipt, func(lctx lockctx.Context) error { + err := unittest.WithLock(t, lockManager, storage.LockIndexStateCommitment, func(lctx lockctx.Context) error { return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { return store.BatchStore(lctx, blockID, expected, rw) }) diff --git a/storage/store/my_receipts_test.go b/storage/store/my_receipts_test.go index 62d1e6e0286..9760440b6c6 100644 --- a/storage/store/my_receipts_test.go +++ b/storage/store/my_receipts_test.go @@ -154,7 +154,7 @@ func TestMyExecutionReceiptsStorage(t *testing.T) { for err := range errChan { if err != nil { errCount++ - require.Contains(t, err.Error(), "different receipt") + require.Contains(t, err.Error(), "data for key is different") } } require.Equal(t, 1, errCount, "Exactly one of the operations should fail") diff --git a/storage/store/transaction_results_test.go b/storage/store/transaction_results_test.go index 69664ceb46b..b6f621332d4 100644 --- a/storage/store/transaction_results_test.go +++ b/storage/store/transaction_results_test.go @@ -38,7 +38,7 @@ func TestBatchStoringTransactionResults(t *testing.T) { } txResults = append(txResults, expected) } - err = unittest.WithLock(t, lockManager, storage.LockInsertOwnReceipt, func(lctx lockctx.Context) error { + err = unittest.WithLock(t, lockManager, storage.LockInsertAndIndexTxResult, func(lctx lockctx.Context) error { return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { return st.BatchStore(lctx, blockID, txResults, rw) }) @@ -100,7 +100,7 @@ func TestBatchStoreAndBatchRemoveTransactionResults(t *testing.T) { } // Store transaction results of multiple blocks - err = storage.WithLock(lockManager, storage.LockInsertOwnReceipt, func(lctx lockctx.Context) error { + err = storage.WithLock(lockManager, storage.LockInsertAndIndexTxResult, func(lctx lockctx.Context) error { return db.WithReaderBatchWriter(func(rbw storage.ReaderBatchWriter) error { for _, blockID := range blockIDs { err := st.BatchStore(lctx, blockID, txResults[blockID], rbw) From 0d166a90d55c5a764d42b6392615b0a16c565f60 Mon Sep 17 00:00:00 2001 From: "Leo Zhang (zhangchiqing)" Date: Tue, 7 Oct 2025 13:50:22 -0700 Subject: [PATCH 45/87] fix execution state extraction test --- .../execution-state-extract/execution_state_extract_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cmd/util/cmd/execution-state-extract/execution_state_extract_test.go b/cmd/util/cmd/execution-state-extract/execution_state_extract_test.go index 0131a250bc3..eeb7412ee19 100644 --- a/cmd/util/cmd/execution-state-extract/execution_state_extract_test.go +++ b/cmd/util/cmd/execution-state-extract/execution_state_extract_test.go @@ -67,7 +67,7 @@ func TestExtractExecutionState(t *testing.T) { blockID := unittest.IdentifierFixture() stateCommitment := unittest.StateCommitmentFixture() - err := unittest.WithLock(t, lockManager, storage.LockInsertOwnReceipt, func(lctx lockctx.Context) error { + err := unittest.WithLock(t, lockManager, storage.LockIndexStateCommitment, func(lctx lockctx.Context) error { return storageDB.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { // Store the state commitment for the block ID return operation.IndexStateCommitment(lctx, rw, blockID, stateCommitment) @@ -139,7 +139,7 @@ func TestExtractExecutionState(t *testing.T) { // generate random block and map it to state commitment blockID := unittest.IdentifierFixture() - err = unittest.WithLock(t, lockManager, storage.LockInsertOwnReceipt, func(lctx lockctx.Context) error { + err = unittest.WithLock(t, lockManager, storage.LockIndexStateCommitment, func(lctx lockctx.Context) error { return storageDB.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { return operation.IndexStateCommitment(lctx, rw, blockID, flow.StateCommitment(stateCommitment)) }) From 84384b6ea6e1e346160965fe25a4cc0f767b5993 Mon Sep 17 00:00:00 2001 From: "Leo Zhang (zhangchiqing)" Date: Tue, 7 Oct 2025 14:31:53 -0700 Subject: [PATCH 46/87] remove LockIndexFinalizedBlock --- engine/access/access_test.go | 3 +- engine/access/ingestion/engine.go | 8 ++++- .../ingestion2/finalized_block_processor.go | 35 +++++++++---------- storage/blocks.go | 2 +- storage/locks.go | 7 ++-- storage/operation/headers.go | 23 ++---------- storage/operation/headers_test.go | 8 +++-- storage/operation/results.go | 7 ++-- storage/store/blocks.go | 2 +- 9 files changed, 43 insertions(+), 52 deletions(-) diff --git a/engine/access/access_test.go b/engine/access/access_test.go index ceaa5c976af..629b6d421ee 100644 --- a/engine/access/access_test.go +++ b/engine/access/access_test.go @@ -554,12 +554,13 @@ func (suite *Suite) TestGetExecutionResultByBlockID() { unittest.WithExecutionResultBlockID(blockID), unittest.WithServiceEvents(3)) - require.NoError(suite.T(), storage.WithLock(lockManager, storage.LockIndexFinalizedBlock, func(lctx lockctx.Context) error { + require.NoError(suite.T(), storage.WithLock(lockManager, storage.LockInsertOwnReceipt, func(lctx lockctx.Context) error { return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { err := all.Results.BatchStore(er, rw) if err != nil { return err } + // requires LockInsertOwnReceipt return all.Results.BatchIndex(lctx, rw, blockID, er.ID()) }) })) diff --git a/engine/access/ingestion/engine.go b/engine/access/ingestion/engine.go index 50a46f0ddfc..8b23f1e4d43 100644 --- a/engine/access/ingestion/engine.go +++ b/engine/access/ingestion/engine.go @@ -379,8 +379,13 @@ func (e *Engine) processFinalizedBlock(block *flow.Block) error { // TODO: substitute an indexer module as layer between engine and storage // index the block storage with each of the collection guarantee - err := storage.WithLock(e.lockManager, storage.LockIndexFinalizedBlock, func(lctx lockctx.Context) error { + err := storage.WithLocks(e.lockManager, []string{ + storage.LockIndexCollectionsByBlock, + // TODO (leo): consider change to LockIndexResultByBlock lock + storage.LockInsertOwnReceipt, + }, func(lctx lockctx.Context) error { return e.db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + // requires [storage.LockIndexCollectionsByBlock] lock err := e.blocks.BatchIndexBlockContainingCollectionGuarantees(lctx, rw, block.ID(), flow.GetIDs(block.Payload.Guarantees)) if err != nil { return fmt.Errorf("could not index block for collections: %w", err) @@ -388,6 +393,7 @@ func (e *Engine) processFinalizedBlock(block *flow.Block) error { // loop through seals and index ID -> result ID for _, seal := range block.Payload.Seals { + // requires [storage.LockInsertOwnReceipt] lock err := e.executionResults.BatchIndex(lctx, rw, seal.BlockID, seal.ResultID) if err != nil { return fmt.Errorf("could not index block for execution result: %w", err) diff --git a/engine/access/ingestion2/finalized_block_processor.go b/engine/access/ingestion2/finalized_block_processor.go index d0e88089b7c..eb5bc2eaa9f 100644 --- a/engine/access/ingestion2/finalized_block_processor.go +++ b/engine/access/ingestion2/finalized_block_processor.go @@ -153,27 +153,26 @@ func (p *FinalizedBlockProcessor) processFinalizedBlockJobCallback( // // No errors are expected during normal operations. func (p *FinalizedBlockProcessor) indexFinalizedBlock(block *flow.Block) error { - err := storage.WithLock(p.lockManager, storage.LockIndexFinalizedBlock, func(lctx lockctx.Context) error { - return p.db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { - return p.blocks.BatchIndexBlockContainingCollectionGuarantees(lctx, rw, block.ID(), flow.GetIDs(block.Payload.Guarantees)) - }) - }) - if err != nil { - return fmt.Errorf("could not index block for collections: %w", err) - } - - err = storage.WithLock(p.lockManager, storage.LockInsertOwnReceipt, func(lctx lockctx.Context) error { - return p.db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { - // loop through seals and index ID -> result ID - for _, seal := range block.Payload.Seals { - err := p.executionResults.BatchIndex(lctx, rw, seal.BlockID, seal.ResultID) + err := storage.WithLocks(p.lockManager, + []string{storage.LockIndexCollectionsByBlock, storage.LockInsertOwnReceipt}, func(lctx lockctx.Context) error { + return p.db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + // require storage.LockIndexCollectionsByBlock + err := p.blocks.BatchIndexBlockContainingCollectionGuarantees(lctx, rw, block.ID(), flow.GetIDs(block.Payload.Guarantees)) if err != nil { - return fmt.Errorf("could not index block for execution result: %w", err) + return fmt.Errorf("could not index block for collections: %w", err) + } + + // loop through seals and index ID -> result ID + for _, seal := range block.Payload.Seals { + // require storage.LockInsertOwnReceipt + err := p.executionResults.BatchIndex(lctx, rw, seal.BlockID, seal.ResultID) + if err != nil { + return fmt.Errorf("could not index block for execution result: %w", err) + } } - } - return nil + return nil + }) }) - }) if err != nil { return fmt.Errorf("could not index execution results: %w", err) } diff --git a/storage/blocks.go b/storage/blocks.go index bebf1c8aecb..714a481ddc3 100644 --- a/storage/blocks.go +++ b/storage/blocks.go @@ -88,7 +88,7 @@ type Blocks interface { ByCollectionID(collID flow.Identifier) (*flow.Block, error) // BatchIndexBlockContainingCollectionGuarantees produces mappings from the IDs of [flow.CollectionGuarantee]s to the block ID containing these guarantees. - // The caller must acquire a storage.LockIndexFinalizedBlock lock. + // The caller must acquire a storage.LockIndexCollectionsByBlock lock. // // CAUTION: a collection can be included in multiple *unfinalized* blocks. However, the implementation // assumes a one-to-one map from collection ID to a *single* block ID. This holds for FINALIZED BLOCKS ONLY diff --git a/storage/locks.go b/storage/locks.go index e6146566028..704909a298e 100644 --- a/storage/locks.go +++ b/storage/locks.go @@ -35,9 +35,8 @@ const ( // LockBootstrapping protects data that is *exclusively* written during bootstrapping. LockBootstrapping = "lock_bootstrapping" // LockInsertChunkDataPack protects the insertion of chunk data packs (not yet used anywhere - LockInsertChunkDataPack = "lock_insert_chunk_data_pack" - // LockIndexFinalizedBlock protects AN indexing finalized blocks by block ID. - LockIndexFinalizedBlock = "lock_index_finalized_block" + LockInsertChunkDataPack = "lock_insert_chunk_data_pack" + LockIndexCollectionsByBlock = "lock_index_collections_by_block" ) // Locks returns a list of all named locks used by the storage layer. @@ -55,7 +54,7 @@ func Locks() []string { LockInsertLightTransactionResult, LockBootstrapping, LockInsertChunkDataPack, - LockIndexFinalizedBlock, + LockIndexCollectionsByBlock, } } diff --git a/storage/operation/headers.go b/storage/operation/headers.go index 1335ba9a203..d2b8fffe7f0 100644 --- a/storage/operation/headers.go +++ b/storage/operation/headers.go @@ -124,25 +124,8 @@ func BlockExists(r storage.Reader, blockID flow.Identifier) (bool, error) { return KeyExists(r, MakePrefix(codeHeader, blockID)) } -// IndexBlockContainingCollectionGuarantee produces a mapping from the ID of a [flow.CollectionGuarantee] to the block ID containing this guarantee. -// -// CAUTION: -// - The caller must acquire the lock ??? and hold it until the database write has been committed. -// TODO: USE LOCK, we want to protect this mapping from accidental overwrites (because the key is not derived from the value via a collision-resistant hash) -// - A collection can be included in multiple *unfinalized* blocks. However, the implementation -// assumes a one-to-one map from collection ID to a *single* block ID. This holds for FINALIZED BLOCKS ONLY -// *and* only in the ABSENCE of BYZANTINE collector CLUSTERS (which the mature protocol must tolerate). -// Hence, this function should be treated as a temporary solution, which requires generalization -// (one-to-many mapping) for soft finality and the mature protocol. -// -// Expected errors during normal operations: -// TODO: return [storage.ErrAlreadyExists] or [storage.ErrDataMismatch] -func IndexBlockContainingCollectionGuarantee(w storage.Writer, collID flow.Identifier, blockID flow.Identifier) error { - return UpsertByKey(w, MakePrefix(codeCollectionBlock, collID), blockID) -} - // BatchIndexBlockContainingCollectionGuarantees produces mappings from the IDs of [flow.CollectionGuarantee]s to the block ID containing these guarantees. -// The caller must acquire a storage.LockIndexFinalizedBlock lock. +// The caller must acquire a storage.LockIndexCollectionsByBlock lock. // // CAUTION: a collection can be included in multiple *unfinalized* blocks. However, the implementation // assumes a one-to-one map from collection ID to a *single* block ID. This holds for FINALIZED BLOCKS ONLY @@ -153,8 +136,8 @@ func IndexBlockContainingCollectionGuarantee(w storage.Writer, collID flow.Ident // Expected errors during normal operations: // - [storage.ErrAlreadyExists] if any collection guarantee is already indexed func BatchIndexBlockContainingCollectionGuarantees(lctx lockctx.Proof, rw storage.ReaderBatchWriter, blockID flow.Identifier, collIDs []flow.Identifier) error { - if !lctx.HoldsLock(storage.LockIndexFinalizedBlock) { - return fmt.Errorf("BatchIndexBlockContainingCollectionGuarantees requires %v", storage.LockIndexFinalizedBlock) + if !lctx.HoldsLock(storage.LockIndexCollectionsByBlock) { + return fmt.Errorf("BatchIndexBlockContainingCollectionGuarantees requires %v", storage.LockIndexCollectionsByBlock) } // Check if any keys already exist diff --git a/storage/operation/headers_test.go b/storage/operation/headers_test.go index 9ec7b8b8644..e09a0d28014 100644 --- a/storage/operation/headers_test.go +++ b/storage/operation/headers_test.go @@ -53,8 +53,12 @@ func TestHeaderIDIndexByCollectionID(t *testing.T) { headerID := unittest.IdentifierFixture() collectionGuaranteeID := unittest.IdentifierFixture() - err := db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { - return operation.IndexBlockContainingCollectionGuarantee(rw.Writer(), collectionGuaranteeID, headerID) + lockManager := storage.NewTestingLockManager() + + err := unittest.WithLock(t, lockManager, storage.LockIndexCollectionByBlock, func(lctx lockctx.Context) error { + return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.BatchIndexBlockContainingCollectionGuarantees(lctx, rw, headerID, []flow.Identifier{collectionGuaranteeID}) + }) }) require.NoError(t, err) diff --git a/storage/operation/results.go b/storage/operation/results.go index e42e2318f77..8e74f0cfb3f 100644 --- a/storage/operation/results.go +++ b/storage/operation/results.go @@ -32,18 +32,17 @@ func RetrieveExecutionResult(r storage.Reader, resultID flow.Identifier, result // 1. Execution Node indexes its own executed block's result when finish executing a block // 2. Execution Node indexes the sealed root block's result during bootstrapping // 3. Access Node indexes the sealed result during syncing from EN. -// The caller must acquire either [storage.LockInsertOwnReceipt] or [storage.LockBootstrapping] or [storage.LockIndexFinalizedBlock] +// The caller must acquire either storage.LockInsertOwnReceipt] or [storage.LockBootstrapping] // // No errors are expected during normal operation. func IndexOwnOrSealedExecutionResult(lctx lockctx.Proof, rw storage.ReaderBatchWriter, blockID flow.Identifier, resultID flow.Identifier) error { // TOOD (leo): I think we should just check if holding - // LockIndexExecutionResult instead of these 3 locks + // LockIndexExecutionResult instead of these 2 locks held := lctx.HoldsLock(storage.LockInsertOwnReceipt) || // during bootstrapping, we index the sealed root block or the spork root block, which is not // produced by the node itself, but we still need to index its execution result to be able to // execute next block - lctx.HoldsLock(storage.LockBootstrapping) || - lctx.HoldsLock(storage.LockIndexFinalizedBlock) + lctx.HoldsLock(storage.LockBootstrapping) if !held { return fmt.Errorf("missing require locks: %s or %s", storage.LockInsertOwnReceipt, storage.LockBootstrapping) } diff --git a/storage/store/blocks.go b/storage/store/blocks.go index 8098ac62a5a..f501380c199 100644 --- a/storage/store/blocks.go +++ b/storage/store/blocks.go @@ -216,7 +216,7 @@ func (b *Blocks) ByCollectionID(collID flow.Identifier) (*flow.Block, error) { } // BatchIndexBlockContainingCollectionGuarantees produces mappings from the IDs of [flow.CollectionGuarantee]s to the block ID containing these guarantees. -// The caller must acquire a storage.LockIndexFinalizedBlock lock. +// The caller must acquire a storage.LockIndexCollectionByBlock lock. // Error returns: // - storage.ErrAlreadyExists if any collection ID has already been indexed func (b *Blocks) BatchIndexBlockContainingCollectionGuarantees(lctx lockctx.Proof, rw storage.ReaderBatchWriter, blockID flow.Identifier, collIDs []flow.Identifier) error { From f802992dd606b9d9d4900a6f4c04108ff4bf4a0d Mon Sep 17 00:00:00 2001 From: "Leo Zhang (zhangchiqing)" Date: Tue, 7 Oct 2025 15:15:39 -0700 Subject: [PATCH 47/87] remove deprecated methods --- storage/events.go | 2 ++ storage/light_transaction_results.go | 1 + storage/operation/headers_test.go | 2 +- storage/operation/transaction_results.go | 5 ----- 4 files changed, 4 insertions(+), 6 deletions(-) diff --git a/storage/events.go b/storage/events.go index 0a794e63c1c..d4718a4e3da 100644 --- a/storage/events.go +++ b/storage/events.go @@ -25,6 +25,7 @@ type Events interface { EventsReader // Store will store events for the given block ID + // it requires the caller to hold [storage.LockInsertEvent] Store(lctx lockctx.Proof, blockID flow.Identifier, blockEvents []flow.EventsList) error // BatchStore will store events for the given block ID in a given batch @@ -40,6 +41,7 @@ type Events interface { type ServiceEvents interface { // BatchStore stores service events keyed by a blockID in provided batch // No errors are expected during normal operation, even if no entries are matched. + // it requires the caller to hold [storage.LockInsertEvent] // If database unexpectedly fails to process the request, the error is wrapped in a generic error and returned. BatchStore(lctx lockctx.Proof, blockID flow.Identifier, events []flow.Event, batch ReaderBatchWriter) error diff --git a/storage/light_transaction_results.go b/storage/light_transaction_results.go index ba9e7b473d2..3ec2be945f5 100644 --- a/storage/light_transaction_results.go +++ b/storage/light_transaction_results.go @@ -32,5 +32,6 @@ type LightTransactionResults interface { LightTransactionResultsReader // BatchStore inserts a batch of transaction result into a batch + // it requires the caller to hold [storage.LockInsertLightTransactionResult] BatchStore(lctx lockctx.Proof, blockID flow.Identifier, transactionResults []flow.LightTransactionResult, rw ReaderBatchWriter) error } diff --git a/storage/operation/headers_test.go b/storage/operation/headers_test.go index e09a0d28014..61e9dad90b4 100644 --- a/storage/operation/headers_test.go +++ b/storage/operation/headers_test.go @@ -55,7 +55,7 @@ func TestHeaderIDIndexByCollectionID(t *testing.T) { lockManager := storage.NewTestingLockManager() - err := unittest.WithLock(t, lockManager, storage.LockIndexCollectionByBlock, func(lctx lockctx.Context) error { + err := unittest.WithLock(t, lockManager, storage.LockIndexCollectionsByBlock, func(lctx lockctx.Context) error { return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { return operation.BatchIndexBlockContainingCollectionGuarantees(lctx, rw, headerID, []flow.Identifier{collectionGuaranteeID}) }) diff --git a/storage/operation/transaction_results.go b/storage/operation/transaction_results.go index 66dc2b0c00f..5f72a3400f8 100644 --- a/storage/operation/transaction_results.go +++ b/storage/operation/transaction_results.go @@ -72,11 +72,6 @@ func BatchRemoveTransactionResultsByBlockID(blockID flow.Identifier, batch stora return nil } -// deprecated -func InsertLightTransactionResult(w storage.Writer, blockID flow.Identifier, transactionResult *flow.LightTransactionResult) error { - return UpsertByKey(w, MakePrefix(codeLightTransactionResult, blockID, transactionResult.TransactionID), transactionResult) -} - func BatchInsertLightTransactionResult(w storage.Writer, blockID flow.Identifier, transactionResult *flow.LightTransactionResult) error { return UpsertByKey(w, MakePrefix(codeLightTransactionResult, blockID, transactionResult.TransactionID), transactionResult) } From 13794877ad97da3617b6aef33035ec089d1c9934 Mon Sep 17 00:00:00 2001 From: "Leo Zhang (zhangchiqing)" Date: Tue, 7 Oct 2025 16:17:37 -0700 Subject: [PATCH 48/87] add consistency check for InsertAndIndexTransactionResults --- storage/operation/transaction_results.go | 42 ++++++++++++++++++++--- storage/store/transaction_results.go | 21 ++++-------- storage/store/transaction_results_test.go | 6 ++-- storage/transaction_results.go | 4 ++- 4 files changed, 50 insertions(+), 23 deletions(-) diff --git a/storage/operation/transaction_results.go b/storage/operation/transaction_results.go index 5f72a3400f8..debdd432ae2 100644 --- a/storage/operation/transaction_results.go +++ b/storage/operation/transaction_results.go @@ -9,17 +9,49 @@ import ( "github.com/onflow/flow-go/storage" ) -func InsertTransactionResult(lctx lockctx.Proof, w storage.Writer, blockID flow.Identifier, transactionResult *flow.TransactionResult) error { +// InsertAndIndexTransactionResults inserts and indexes multiple transaction results in a single batch write. +// The caller must hold the [storage.LockInsertAndIndexTxResult] lock. +// It returns [storage.ErrAlreadyExists] if transaction results for the block already exist. +func InsertAndIndexTransactionResults(lctx lockctx.Proof, rw storage.ReaderBatchWriter, blockID flow.Identifier, transactionResults []flow.TransactionResult) error { if !lctx.HoldsLock(storage.LockInsertAndIndexTxResult) { return fmt.Errorf("InsertTransactionResult requires LockInsertAndIndexTxResult to be held") } + + // Check if transaction results for the block already exist + // We can exit early if we found one result exist + // Because regardless transactionResults is empty, or has one, or more results, + // we don't want to overwrite existing results + prefix := MakePrefix(codeTransactionResult, blockID) + checkExists := func(key []byte) error { + return fmt.Errorf("transaction results for block %v already exist: %w", blockID, storage.ErrAlreadyExists) + } + err := IterateKeysByPrefixRange(rw.GlobalReader(), prefix, prefix, checkExists) + if err != nil { + return err + } + + // there is no existing transaction result for the block, we can proceed to insert + w := rw.Writer() + for i, result := range transactionResults { + err := insertTransactionResult(w, blockID, &result) + if err != nil { + return fmt.Errorf("cannot batch insert tx result: %w", err) + } + + err = indexTransactionResult(w, blockID, uint32(i), &result) + if err != nil { + return fmt.Errorf("cannot batch index tx result: %w", err) + } + } + + return nil +} + +func insertTransactionResult(w storage.Writer, blockID flow.Identifier, transactionResult *flow.TransactionResult) error { return UpsertByKey(w, MakePrefix(codeTransactionResult, blockID, transactionResult.TransactionID), transactionResult) } -func IndexTransactionResult(lctx lockctx.Proof, w storage.Writer, blockID flow.Identifier, txIndex uint32, transactionResult *flow.TransactionResult) error { - if !lctx.HoldsLock(storage.LockInsertAndIndexTxResult) { - return fmt.Errorf("IndexTransactionResult requires LockInsertAndIndexTxResult to be held") - } +func indexTransactionResult(w storage.Writer, blockID flow.Identifier, txIndex uint32, transactionResult *flow.TransactionResult) error { return UpsertByKey(w, MakePrefix(codeTransactionResultIndex, blockID, txIndex), transactionResult) } diff --git a/storage/store/transaction_results.go b/storage/store/transaction_results.go index 5df96556473..78754793503 100644 --- a/storage/store/transaction_results.go +++ b/storage/store/transaction_results.go @@ -130,22 +130,15 @@ func NewTransactionResults(collector module.CacheMetrics, db storage.DB, transac } // BatchStore will store the transaction results for the given block ID in a batch -func (tr *TransactionResults) BatchStore(lctx lockctx.Proof, blockID flow.Identifier, transactionResults []flow.TransactionResult, batch storage.ReaderBatchWriter) error { - w := batch.Writer() - - for i, result := range transactionResults { - err := operation.InsertTransactionResult(lctx, w, blockID, &result) - if err != nil { - return fmt.Errorf("cannot batch insert tx result: %w", err) - } - - err = operation.IndexTransactionResult(lctx, w, blockID, uint32(i), &result) - if err != nil { - return fmt.Errorf("cannot batch index tx result: %w", err) - } +// It returns [ErrAlreadyExists] if transaction results for the block already exist. +// It requires the caller to hold [storage.LockInsertAndIndexTxResult] +func (tr *TransactionResults) BatchStore(lctx lockctx.Proof, rw storage.ReaderBatchWriter, blockID flow.Identifier, transactionResults []flow.TransactionResult) error { + err := operation.InsertAndIndexTransactionResults(lctx, rw, blockID, transactionResults) + if err != nil { + return fmt.Errorf("cannot batch insert and index tx results: %w", err) } - storage.OnCommitSucceed(batch, func() { + storage.OnCommitSucceed(rw, func() { for i, result := range transactionResults { key := KeyFromBlockIDTransactionID(blockID, result.TransactionID) // cache for each transaction, so that it's faster to retrieve diff --git a/storage/store/transaction_results_test.go b/storage/store/transaction_results_test.go index b6f621332d4..1ebce6cb3a7 100644 --- a/storage/store/transaction_results_test.go +++ b/storage/store/transaction_results_test.go @@ -40,7 +40,7 @@ func TestBatchStoringTransactionResults(t *testing.T) { } err = unittest.WithLock(t, lockManager, storage.LockInsertAndIndexTxResult, func(lctx lockctx.Context) error { return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { - return st.BatchStore(lctx, blockID, txResults, rw) + return st.BatchStore(lctx, rw, blockID, txResults) }) }) require.NoError(t, err) @@ -101,9 +101,9 @@ func TestBatchStoreAndBatchRemoveTransactionResults(t *testing.T) { // Store transaction results of multiple blocks err = storage.WithLock(lockManager, storage.LockInsertAndIndexTxResult, func(lctx lockctx.Context) error { - return db.WithReaderBatchWriter(func(rbw storage.ReaderBatchWriter) error { + return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { for _, blockID := range blockIDs { - err := st.BatchStore(lctx, blockID, txResults[blockID], rbw) + err := st.BatchStore(lctx, rw, blockID, txResults[blockID]) if err != nil { return err } diff --git a/storage/transaction_results.go b/storage/transaction_results.go index 4b2631b717e..b3814a0aac5 100644 --- a/storage/transaction_results.go +++ b/storage/transaction_results.go @@ -22,7 +22,9 @@ type TransactionResults interface { TransactionResultsReader // BatchStore inserts a batch of transaction result into a batch - BatchStore(lctx lockctx.Proof, blockID flow.Identifier, transactionResults []flow.TransactionResult, batch ReaderBatchWriter) error + // It returns [ErrAlreadyExists] if transaction results for the block already exist. + // It requires the caller to hold [storage.LockInsertAndIndexTxResult] + BatchStore(lctx lockctx.Proof, rw ReaderBatchWriter, blockID flow.Identifier, transactionResults []flow.TransactionResult) error // RemoveByBlockID removes all transaction results for a block BatchRemoveByBlockID(id flow.Identifier, batch ReaderBatchWriter) error From 5213db52da9541fc2e22bb2e46e3bb604b4f7ed4 Mon Sep 17 00:00:00 2001 From: "Leo Zhang (zhangchiqing)" Date: Tue, 7 Oct 2025 16:23:20 -0700 Subject: [PATCH 49/87] handle already exists error --- engine/execution/state/state.go | 6 ++++-- storage/errors.go | 7 +++++++ 2 files changed, 11 insertions(+), 2 deletions(-) diff --git a/engine/execution/state/state.go b/engine/execution/state/state.go index c2a1d7c1eec..42228bce50e 100644 --- a/engine/execution/state/state.go +++ b/engine/execution/state/state.go @@ -456,11 +456,13 @@ func (s *state) saveExecutionResults( } // require LockInsertAndIndexTxResult - err = s.transactionResults.BatchStore( + // skip already exists error to make it idempotent + err = storage.SkipAlreadyExistsError(s.transactionResults.BatchStore( lctx, + batch, blockID, result.AllTransactionResults(), - batch) + )) if err != nil { return fmt.Errorf("cannot store transaction result: %w", err) } diff --git a/storage/errors.go b/storage/errors.go index b3d81d9709c..3399bdf37fc 100644 --- a/storage/errors.go +++ b/storage/errors.go @@ -57,3 +57,10 @@ func NewInvalidDKGStateTransitionErrorf(from, to flow.DKGState, msg string, args err: fmt.Errorf(msg, args...), } } + +func SkipAlreadyExistsError(err error) error { + if errors.Is(err, ErrAlreadyExists) { + return nil + } + return err +} From 5fe29ef96207f83ffc3de17675bfa0adbf975046 Mon Sep 17 00:00:00 2001 From: "Leo Zhang (zhangchiqing)" Date: Tue, 7 Oct 2025 16:44:11 -0700 Subject: [PATCH 50/87] skip already exist error --- engine/execution/state/state.go | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/engine/execution/state/state.go b/engine/execution/state/state.go index 42228bce50e..2cde0a71a78 100644 --- a/engine/execution/state/state.go +++ b/engine/execution/state/state.go @@ -426,7 +426,7 @@ func (s *state) saveExecutionResults( // Save entire execution result (including all chunk data packs) within one batch to minimize // the number of database interactions. - return s.db.WithReaderBatchWriter(func(batch storage.ReaderBatchWriter) error { + return storage.SkipAlreadyExistsError(s.db.WithReaderBatchWriter(func(batch storage.ReaderBatchWriter) error { batch.AddCallback(func(err error) { // Rollback if an error occurs during batch operations // Chunk data packs are saved in a separate database, there is a chance @@ -456,13 +456,12 @@ func (s *state) saveExecutionResults( } // require LockInsertAndIndexTxResult - // skip already exists error to make it idempotent - err = storage.SkipAlreadyExistsError(s.transactionResults.BatchStore( + err = s.transactionResults.BatchStore( lctx, batch, blockID, result.AllTransactionResults(), - )) + ) if err != nil { return fmt.Errorf("cannot store transaction result: %w", err) } @@ -491,7 +490,7 @@ func (s *state) saveExecutionResults( } return nil - }) + })) }) } From e0f954ef65187e9d0772027cde9fa11d7b766072 Mon Sep 17 00:00:00 2001 From: "Leo Zhang (zhangchiqing)" Date: Tue, 7 Oct 2025 16:47:31 -0700 Subject: [PATCH 51/87] update mocks --- storage/mock/transaction_results.go | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/storage/mock/transaction_results.go b/storage/mock/transaction_results.go index 5ce338423fd..c35cc476460 100644 --- a/storage/mock/transaction_results.go +++ b/storage/mock/transaction_results.go @@ -34,17 +34,17 @@ func (_m *TransactionResults) BatchRemoveByBlockID(id flow.Identifier, batch sto return r0 } -// BatchStore provides a mock function with given fields: lctx, blockID, transactionResults, batch -func (_m *TransactionResults) BatchStore(lctx lockctx.Proof, blockID flow.Identifier, transactionResults []flow.TransactionResult, batch storage.ReaderBatchWriter) error { - ret := _m.Called(lctx, blockID, transactionResults, batch) +// BatchStore provides a mock function with given fields: lctx, rw, blockID, transactionResults +func (_m *TransactionResults) BatchStore(lctx lockctx.Proof, rw storage.ReaderBatchWriter, blockID flow.Identifier, transactionResults []flow.TransactionResult) error { + ret := _m.Called(lctx, rw, blockID, transactionResults) if len(ret) == 0 { panic("no return value specified for BatchStore") } var r0 error - if rf, ok := ret.Get(0).(func(lockctx.Proof, flow.Identifier, []flow.TransactionResult, storage.ReaderBatchWriter) error); ok { - r0 = rf(lctx, blockID, transactionResults, batch) + if rf, ok := ret.Get(0).(func(lockctx.Proof, storage.ReaderBatchWriter, flow.Identifier, []flow.TransactionResult) error); ok { + r0 = rf(lctx, rw, blockID, transactionResults) } else { r0 = ret.Error(0) } From 2359222964a81eb78a07447bd1405cf93d6de7c1 Mon Sep 17 00:00:00 2001 From: "Leo Zhang (zhangchiqing)" Date: Tue, 7 Oct 2025 16:50:30 -0700 Subject: [PATCH 52/87] add test case --- storage/store/transaction_results_test.go | 61 +++++++++++++++++++++++ 1 file changed, 61 insertions(+) diff --git a/storage/store/transaction_results_test.go b/storage/store/transaction_results_test.go index 1ebce6cb3a7..db76302ed27 100644 --- a/storage/store/transaction_results_test.go +++ b/storage/store/transaction_results_test.go @@ -189,6 +189,67 @@ func TestIndexKeyConversion(t *testing.T) { require.Equal(t, txIndex, tID) } +func TestBatchStoreTransactionResultsErrAlreadyExists(t *testing.T) { + lockManager := storage.NewTestingLockManager() + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + metrics := metrics.NewNoopCollector() + st, err := store.NewTransactionResults(metrics, db, 1000) + require.NoError(t, err) + + blockID := unittest.IdentifierFixture() + txResults := make([]flow.TransactionResult, 0) + for i := 0; i < 3; i++ { + txID := unittest.IdentifierFixture() + expected := flow.TransactionResult{ + TransactionID: txID, + ErrorMessage: fmt.Sprintf("a runtime error %d", i), + } + txResults = append(txResults, expected) + } + + // First batch store should succeed + err = unittest.WithLock(t, lockManager, storage.LockInsertAndIndexTxResult, func(lctx lockctx.Context) error { + return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return st.BatchStore(lctx, rw, blockID, txResults) + }) + }) + require.NoError(t, err) + + // Second batch store with the same blockID should fail with ErrAlreadyExists + duplicateTxResults := make([]flow.TransactionResult, 0) + for i := 0; i < 2; i++ { + txID := unittest.IdentifierFixture() + expected := flow.TransactionResult{ + TransactionID: txID, + ErrorMessage: fmt.Sprintf("duplicate error %d", i), + } + duplicateTxResults = append(duplicateTxResults, expected) + } + + err = unittest.WithLock(t, lockManager, storage.LockInsertAndIndexTxResult, func(lctx lockctx.Context) error { + return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return st.BatchStore(lctx, rw, blockID, duplicateTxResults) + }) + }) + require.Error(t, err) + require.ErrorIs(t, err, storage.ErrAlreadyExists) + + // Verify that the original transaction results are still there and unchanged + for _, txResult := range txResults { + actual, err := st.ByBlockIDTransactionID(blockID, txResult.TransactionID) + require.NoError(t, err) + assert.Equal(t, txResult, *actual) + } + + // Verify that the duplicate transaction results were not stored + for _, txResult := range duplicateTxResults { + _, err := st.ByBlockIDTransactionID(blockID, txResult.TransactionID) + require.Error(t, err) + require.ErrorIs(t, err, storage.ErrNotFound) + } + }) +} + func BenchmarkTransactionResultCacheKey(b *testing.B) { b.Run("new: create cache key", func(b *testing.B) { blockID := unittest.IdentifierFixture() From bb43d95d060317bd4357e286c78d6c12f4d42621 Mon Sep 17 00:00:00 2001 From: "Leo Zhang (zhangchiqing)" Date: Tue, 7 Oct 2025 16:51:04 -0700 Subject: [PATCH 53/87] add test to check lock is hold --- storage/store/transaction_results_test.go | 60 +++++++++++++++++++++++ 1 file changed, 60 insertions(+) diff --git a/storage/store/transaction_results_test.go b/storage/store/transaction_results_test.go index db76302ed27..5d707e964d3 100644 --- a/storage/store/transaction_results_test.go +++ b/storage/store/transaction_results_test.go @@ -250,6 +250,66 @@ func TestBatchStoreTransactionResultsErrAlreadyExists(t *testing.T) { }) } +func TestBatchStoreTransactionResultsMissingLock(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + lockManager := storage.NewTestingLockManager() + metrics := metrics.NewNoopCollector() + st, err := store.NewTransactionResults(metrics, db, 1000) + require.NoError(t, err) + + blockID := unittest.IdentifierFixture() + txResults := make([]flow.TransactionResult, 0) + for i := 0; i < 3; i++ { + txID := unittest.IdentifierFixture() + expected := flow.TransactionResult{ + TransactionID: txID, + ErrorMessage: fmt.Sprintf("a runtime error %d", i), + } + txResults = append(txResults, expected) + } + + // Create a context without any locks + lctx := lockManager.NewContext() + defer lctx.Release() + + // Attempt to batch store without holding the required lock + err = db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return st.BatchStore(lctx, rw, blockID, txResults) + }) + require.Error(t, err) + require.Contains(t, err.Error(), "LockInsertAndIndexTxResult") + }) +} + +func TestBatchStoreTransactionResultsWrongLock(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + lockManager := storage.NewTestingLockManager() + metrics := metrics.NewNoopCollector() + st, err := store.NewTransactionResults(metrics, db, 1000) + require.NoError(t, err) + + blockID := unittest.IdentifierFixture() + txResults := make([]flow.TransactionResult, 0) + for i := 0; i < 3; i++ { + txID := unittest.IdentifierFixture() + expected := flow.TransactionResult{ + TransactionID: txID, + ErrorMessage: fmt.Sprintf("a runtime error %d", i), + } + txResults = append(txResults, expected) + } + + // Attempt to batch store with wrong lock (LockInsertBlock instead of LockInsertAndIndexTxResult) + err = unittest.WithLock(t, lockManager, storage.LockInsertBlock, func(lctx lockctx.Context) error { + return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return st.BatchStore(lctx, rw, blockID, txResults) + }) + }) + require.Error(t, err) + require.Contains(t, err.Error(), "LockInsertAndIndexTxResult") + }) +} + func BenchmarkTransactionResultCacheKey(b *testing.B) { b.Run("new: create cache key", func(b *testing.B) { blockID := unittest.IdentifierFixture() From 80bd5a6c78b7c427881e2569840d93f0403f1fe3 Mon Sep 17 00:00:00 2001 From: "Leo Zhang (zhangchiqing)" Date: Wed, 8 Oct 2025 09:20:50 -0700 Subject: [PATCH 54/87] add comments --- storage/errors.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/storage/errors.go b/storage/errors.go index 3399bdf37fc..a31d1b6f74c 100644 --- a/storage/errors.go +++ b/storage/errors.go @@ -58,6 +58,9 @@ func NewInvalidDKGStateTransitionErrorf(from, to flow.DKGState, msg string, args } } +// SkipAlreadyExistsError returns nil if the provided error is ErrAlreadyExists, otherwise returns the original error. +// It usually means the storage operation to insert a record was skipped because the key of the record already exists. +// CAUTION : it does NOT check the equality of the value of the record. func SkipAlreadyExistsError(err error) error { if errors.Is(err, ErrAlreadyExists) { return nil From e66a0f4fdd2cdbc873fe16887f8c5630cd852d5d Mon Sep 17 00:00:00 2001 From: "Leo Zhang (zhangchiqing)" Date: Wed, 8 Oct 2025 11:20:12 -0700 Subject: [PATCH 55/87] refactor InsertEvent with InsertBlockEvents --- storage/events.go | 1 + storage/operation/events.go | 67 ++++++++++++++++++++++++++++++-- storage/operation/events_test.go | 2 +- storage/operation/stats_test.go | 9 ++--- storage/store/events.go | 30 ++++++-------- 5 files changed, 80 insertions(+), 29 deletions(-) diff --git a/storage/events.go b/storage/events.go index d4718a4e3da..1d74a19bd2a 100644 --- a/storage/events.go +++ b/storage/events.go @@ -26,6 +26,7 @@ type Events interface { // Store will store events for the given block ID // it requires the caller to hold [storage.LockInsertEvent] + // deprecated (leo) Store(lctx lockctx.Proof, blockID flow.Identifier, blockEvents []flow.EventsList) error // BatchStore will store events for the given block ID in a given batch diff --git a/storage/operation/events.go b/storage/operation/events.go index 2a1e0ed325f..469d35e5643 100644 --- a/storage/operation/events.go +++ b/storage/operation/events.go @@ -13,17 +13,64 @@ func eventPrefix(prefix byte, blockID flow.Identifier, event flow.Event) []byte return MakePrefix(prefix, blockID, event.TransactionID, event.TransactionIndex, event.EventIndex) } -func InsertEvent(lctx lockctx.Proof, w storage.Writer, blockID flow.Identifier, event flow.Event) error { +// InsertBlockEvents stores all events for a given block in the database. +// Requires LockInsertEvent to be held for thread safety. +// This function iterates through all events in the provided EventsList and stores each event individually. +// +// Error returns: +// - generic error in case of unexpected failure from the database layer or encoding failure +func InsertBlockEvents(lctx lockctx.Proof, rw storage.ReaderBatchWriter, blockID flow.Identifier, events []flow.EventsList) error { if !lctx.HoldsLock(storage.LockInsertEvent) { - return fmt.Errorf("InsertEvent requires LockInsertEvent to be held") + return fmt.Errorf("InsertBlockEvents requires LockInsertEvent to be held") } + + writer := rw.Writer() + + for _, eventsList := range events { + for _, event := range eventsList { + err := insertEvent(writer, blockID, event) + if err != nil { + return fmt.Errorf("cannot batch insert event: %w", err) + } + } + } + + return nil +} + +// insertEvent stores a regular event in the database. +// The event is stored with a key that includes the block ID, transaction ID, transaction index, and event index. +func insertEvent(w storage.Writer, blockID flow.Identifier, event flow.Event) error { return UpsertByKey(w, eventPrefix(codeEvent, blockID, event), event) } -func InsertServiceEvent(lctx lockctx.Proof, w storage.Writer, blockID flow.Identifier, event flow.Event) error { +// InsertBlockServiceEvents stores all service events for a given block in the database. +// Requires LockInsertEvent to be held for thread safety. +// This function iterates through all service events in the provided list and stores each event individually. +// Service events are special events generated by the system (e.g., account creation, contract deployment). +// +// Error returns: +// - generic error in case of unexpected failure from the database layer or encoding failure +func InsertBlockServiceEvents(lctx lockctx.Proof, rw storage.ReaderBatchWriter, blockID flow.Identifier, events []flow.Event) error { if !lctx.HoldsLock(storage.LockInsertEvent) { - return fmt.Errorf("InsertServiceEvent requires LockInsertEvent to be held") + return fmt.Errorf("InsertBlockServiceEvents requires LockInsertEvent to be held") } + + writer := rw.Writer() + + for _, event := range events { + err := insertServiceEvent(writer, blockID, event) + if err != nil { + return fmt.Errorf("cannot batch insert service event: %w", err) + } + } + + return nil +} + +// insertServiceEvent stores a service event in the database. +// The event is stored with a key that includes the block ID, transaction ID, transaction index, and event index. +func insertServiceEvent(w storage.Writer, blockID flow.Identifier, event flow.Event) error { return UpsertByKey(w, eventPrefix(codeServiceEvent, blockID, event), event) } @@ -47,10 +94,22 @@ func LookupEventsByBlockIDEventType(r storage.Reader, blockID flow.Identifier, e return TraverseByPrefix(r, MakePrefix(codeEvent, blockID), iterationFunc, storage.DefaultIteratorOptions()) } +// RemoveServiceEventsByBlockID removes all service events associated with the given block ID. +// This operation is typically used during block rollback or cleanup scenarios. +// It removes all service events that were generated for the specified block. +// +// Error returns: +// - generic error in case of unexpected database error func RemoveServiceEventsByBlockID(r storage.Reader, w storage.Writer, blockID flow.Identifier) error { return RemoveByKeyPrefix(r, w, MakePrefix(codeServiceEvent, blockID)) } +// RemoveEventsByBlockID removes all regular events associated with the given block ID. +// This operation is typically used during block rollback or cleanup scenarios. +// It removes all regular events that were generated for the specified block. +// +// Error returns: +// - generic error in case of unexpected database error func RemoveEventsByBlockID(r storage.Reader, w storage.Writer, blockID flow.Identifier) error { return RemoveByKeyPrefix(r, w, MakePrefix(codeEvent, blockID)) } diff --git a/storage/operation/events_test.go b/storage/operation/events_test.go index 259cbc307c3..f1db5f6f4ac 100644 --- a/storage/operation/events_test.go +++ b/storage/operation/events_test.go @@ -57,7 +57,7 @@ func TestRetrieveEventByBlockIDTxID(t *testing.T) { // insert event into the db err := unittest.WithLock(t, lockManager, storage.LockInsertEvent, func(lctx lockctx.Context) error { return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { - return operation.InsertEvent(lctx, rw.Writer(), b, event) + return operation.InsertBlockEvents(lctx, rw, b, []flow.EventsList{[]flow.Event{event}}) }) }) require.NoError(t, err) diff --git a/storage/operation/stats_test.go b/storage/operation/stats_test.go index c5bdee6472f..2306813fdad 100644 --- a/storage/operation/stats_test.go +++ b/storage/operation/stats_test.go @@ -6,6 +6,7 @@ import ( "github.com/jordanschalm/lockctx" "github.com/stretchr/testify/require" + "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/storage" "github.com/onflow/flow-go/storage/operation" "github.com/onflow/flow-go/storage/operation/dbtest" @@ -21,11 +22,9 @@ func TestSummarizeKeysByFirstByteConcurrent(t *testing.T) { // insert random events b := unittest.IdentifierFixture() events := unittest.EventsFixture(30) - for _, evt := range events { - err := operation.InsertEvent(lctx, rw.Writer(), b, evt) - if err != nil { - return err - } + err := operation.InsertBlockEvents(lctx, rw, b, []flow.EventsList{events}) + if err != nil { + return err } // insert 100 chunk data packs diff --git a/storage/store/events.go b/storage/store/events.go index 08024acc164..b83a857b8cf 100644 --- a/storage/store/events.go +++ b/storage/store/events.go @@ -41,9 +41,12 @@ func NewEvents(collector module.CacheMetrics, db storage.DB) *Events { // BatchStore stores events keyed by a blockID in provided batch // No errors are expected during normal operation, but it may return generic error -// if badger fails to process request func (e *Events) BatchStore(lctx lockctx.Proof, blockID flow.Identifier, blockEvents []flow.EventsList, batch storage.ReaderBatchWriter) error { - writer := batch.Writer() + // Use the new InsertBlockEvents operation to store all events + err := operation.InsertBlockEvents(lctx, batch, blockID, blockEvents) + if err != nil { + return fmt.Errorf("cannot batch insert events: %w", err) + } // pre-allocating and indexing slice is faster than appending sliceSize := 0 @@ -52,24 +55,18 @@ func (e *Events) BatchStore(lctx lockctx.Proof, blockID flow.Identifier, blockEv } combinedEvents := make([]flow.Event, sliceSize) - eventIndex := 0 for _, events := range blockEvents { for _, event := range events { - err := operation.InsertEvent(lctx, writer, blockID, event) - if err != nil { - return fmt.Errorf("cannot batch insert event: %w", err) - } combinedEvents[eventIndex] = event eventIndex++ } } - callback := func() { + storage.OnCommitSucceed(batch, func() { e.cache.Insert(blockID, combinedEvents) - } - storage.OnCommitSucceed(batch, callback) + }) return nil } @@ -151,7 +148,6 @@ func (e *Events) RemoveByBlockID(blockID flow.Identifier) error { // BatchRemoveByBlockID removes events keyed by a blockID in provided batch // No errors are expected during normal operation, even if no entries are matched. -// If Badger unexpectedly fails to process the request, the error is wrapped in a generic error and returned. func (e *Events) BatchRemoveByBlockID(blockID flow.Identifier, rw storage.ReaderBatchWriter) error { return e.cache.RemoveTx(rw, blockID) } @@ -183,14 +179,11 @@ func NewServiceEvents(collector module.CacheMetrics, db storage.DB) *ServiceEven // BatchStore stores service events keyed by a blockID in provided batch // No errors are expected during normal operation, even if no entries are matched. -// If Badger unexpectedly fails to process the request, the error is wrapped in a generic error and returned. func (e *ServiceEvents) BatchStore(lctx lockctx.Proof, blockID flow.Identifier, events []flow.Event, rw storage.ReaderBatchWriter) error { - writer := rw.Writer() - for _, event := range events { - err := operation.InsertServiceEvent(lctx, writer, blockID, event) - if err != nil { - return fmt.Errorf("cannot batch insert service event: %w", err) - } + // Use the new InsertBlockServiceEvents operation to store all service events + err := operation.InsertBlockServiceEvents(lctx, rw, blockID, events) + if err != nil { + return fmt.Errorf("cannot batch insert service events: %w", err) } callback := func() { @@ -218,7 +211,6 @@ func (e *ServiceEvents) RemoveByBlockID(blockID flow.Identifier) error { // BatchRemoveByBlockID removes service events keyed by a blockID in provided batch // No errors are expected during normal operation, even if no entries are matched. -// If Badger unexpectedly fails to process the request, the error is wrapped in a generic error and returned. func (e *ServiceEvents) BatchRemoveByBlockID(blockID flow.Identifier, rw storage.ReaderBatchWriter) error { return e.cache.RemoveTx(rw, blockID) } From b87016aefb2161de340e89af7f7c1afde19dfc6c Mon Sep 17 00:00:00 2001 From: "Leo Zhang (zhangchiqing)" Date: Wed, 8 Oct 2025 13:15:58 -0700 Subject: [PATCH 56/87] fix core impl test --- module/executiondatasync/optimistic_sync/core_impl_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/module/executiondatasync/optimistic_sync/core_impl_test.go b/module/executiondatasync/optimistic_sync/core_impl_test.go index 19b87866603..14576deaffa 100644 --- a/module/executiondatasync/optimistic_sync/core_impl_test.go +++ b/module/executiondatasync/optimistic_sync/core_impl_test.go @@ -445,7 +445,7 @@ func (c *CoreImplSuite) TestCoreImpl_Persist() { indexerData := core.workingData.indexerData c.persistentRegisters.On("Store", flow.RegisterEntries(indexerData.Registers), tf.block.Height).Return(nil) - c.persistentEvents.On("BatchStore", blockID, []flow.EventsList{indexerData.Events}, mock.Anything).Return(nil) + c.persistentEvents.On("BatchStore", mock.Anything, blockID, []flow.EventsList{indexerData.Events}, mock.Anything).Return(nil) c.persistentCollections.On("BatchStoreAndIndexByTransaction", mock.Anything, mock.Anything, mock.Anything).Return(nil, nil) c.persistentResults.On("BatchStore", blockID, indexerData.Results, mock.Anything).Return(nil) c.persistentTxResultErrMsg.On("BatchStore", blockID, core.workingData.txResultErrMsgsData, mock.Anything).Return(nil) @@ -499,7 +499,7 @@ func (c *CoreImplSuite) TestCoreImpl_Persist() { indexerData := core.workingData.indexerData c.persistentRegisters.On("Store", flow.RegisterEntries(indexerData.Registers), tf.block.Height).Return(nil).Once() - c.persistentEvents.On("BatchStore", blockID, []flow.EventsList{indexerData.Events}, mock.Anything).Return(expectedErr).Once() + c.persistentEvents.On("BatchStore", mock.Anything, blockID, []flow.EventsList{indexerData.Events}, mock.Anything).Return(expectedErr).Once() err = core.Persist() c.ErrorIs(err, expectedErr) From f4cb249caa13a975cb76294b5286c5249839e6bc Mon Sep 17 00:00:00 2001 From: "Leo Zhang (zhangchiqing)" Date: Wed, 8 Oct 2025 13:19:32 -0700 Subject: [PATCH 57/87] refactor to use LockInsertServiceEvent --- engine/execution/state/state.go | 3 ++- storage/locks.go | 10 +++++++--- storage/operation/events.go | 6 +++--- 3 files changed, 12 insertions(+), 7 deletions(-) diff --git a/engine/execution/state/state.go b/engine/execution/state/state.go index 2cde0a71a78..dff2e0544d2 100644 --- a/engine/execution/state/state.go +++ b/engine/execution/state/state.go @@ -413,6 +413,7 @@ func (s *state) saveExecutionResults( locks := []string{ storage.LockInsertChunkDataPack, storage.LockInsertEvent, + storage.LockInsertServiceEvent, storage.LockInsertAndIndexTxResult, storage.LockInsertOwnReceipt, storage.LockIndexStateCommitment, @@ -449,7 +450,7 @@ func (s *state) saveExecutionResults( return fmt.Errorf("cannot store events: %w", err) } - // require LockInsertEvent + // require LockInsertServiceEvent err = s.serviceEvents.BatchStore(lctx, blockID, result.AllServiceEvents(), batch) if err != nil { return fmt.Errorf("cannot store service events: %w", err) diff --git a/storage/locks.go b/storage/locks.go index 704909a298e..6358b9f67b3 100644 --- a/storage/locks.go +++ b/storage/locks.go @@ -23,6 +23,8 @@ const ( // LockInsertEvent protects the insertion of events. // This lock is reused by both EN storing its own receipt and AN indexing execution data LockInsertEvent = "lock_insert_event" + // LockInsertServiceEvent protects the insertion of service events. + LockInsertServiceEvent = "lock_insert_service_event" // LockInsertLightTransactionResult protects the insertion of light transaction results. LockInsertLightTransactionResult = "lock_insert_light_transaction_result" // LockInsertOwnReceipt is intended for Execution Nodes to ensure that they never publish different receipts for the same block. @@ -47,6 +49,7 @@ func Locks() []string { LockIndexResultApproval, LockInsertOrFinalizeClusterBlock, LockInsertEvent, + LockInsertServiceEvent, LockInsertOwnReceipt, LockIndexStateCommitment, LockInsertAndIndexTxResult, @@ -83,14 +86,15 @@ func makeLockPolicy() lockctx.Policy { // EN to save execution result Add(LockInsertChunkDataPack, LockInsertEvent). - Add(LockInsertEvent, LockInsertAndIndexTxResult). + Add(LockInsertEvent, LockInsertServiceEvent). + Add(LockInsertServiceEvent, LockInsertAndIndexTxResult). Add(LockInsertAndIndexTxResult, LockInsertOwnReceipt). Add(LockInsertOwnReceipt, LockIndexStateCommitment). // AN state sync to IndexBlockData Add(LockInsertCollection, LockInsertEvent). - Add(LockInsertCollection, LockInsertLightTransactionResult). - Add(LockInsertEvent, LockInsertLightTransactionResult). + Add(LockInsertEvent, LockInsertServiceEvent). + Add(LockInsertServiceEvent, LockInsertLightTransactionResult). Build() } diff --git a/storage/operation/events.go b/storage/operation/events.go index 469d35e5643..08ad9e11ea2 100644 --- a/storage/operation/events.go +++ b/storage/operation/events.go @@ -45,15 +45,15 @@ func insertEvent(w storage.Writer, blockID flow.Identifier, event flow.Event) er } // InsertBlockServiceEvents stores all service events for a given block in the database. -// Requires LockInsertEvent to be held for thread safety. +// Requires LockInsertServiceEvent to be held for thread safety. // This function iterates through all service events in the provided list and stores each event individually. // Service events are special events generated by the system (e.g., account creation, contract deployment). // // Error returns: // - generic error in case of unexpected failure from the database layer or encoding failure func InsertBlockServiceEvents(lctx lockctx.Proof, rw storage.ReaderBatchWriter, blockID flow.Identifier, events []flow.Event) error { - if !lctx.HoldsLock(storage.LockInsertEvent) { - return fmt.Errorf("InsertBlockServiceEvents requires LockInsertEvent to be held") + if !lctx.HoldsLock(storage.LockInsertServiceEvent) { + return fmt.Errorf("InsertBlockServiceEvents requires LockInsertServiceEvent to be held") } writer := rw.Writer() From a5e1096a01b391bced1bc32badaf94cac9bf6195 Mon Sep 17 00:00:00 2001 From: "Leo Zhang (zhangchiqing)" Date: Wed, 8 Oct 2025 13:23:03 -0700 Subject: [PATCH 58/87] remove events.Store method --- storage/events.go | 5 ----- storage/mock/events.go | 18 ------------------ storage/store/events.go | 8 -------- storage/store/events_test.go | 4 +++- 4 files changed, 3 insertions(+), 32 deletions(-) diff --git a/storage/events.go b/storage/events.go index 1d74a19bd2a..a4c5c9e1e15 100644 --- a/storage/events.go +++ b/storage/events.go @@ -24,11 +24,6 @@ type EventsReader interface { type Events interface { EventsReader - // Store will store events for the given block ID - // it requires the caller to hold [storage.LockInsertEvent] - // deprecated (leo) - Store(lctx lockctx.Proof, blockID flow.Identifier, blockEvents []flow.EventsList) error - // BatchStore will store events for the given block ID in a given batch // it requires the caller to hold [storage.LockInsertEvent] BatchStore(lctx lockctx.Proof, blockID flow.Identifier, events []flow.EventsList, batch ReaderBatchWriter) error diff --git a/storage/mock/events.go b/storage/mock/events.go index 8df84d3b4e0..ea90103c5d3 100644 --- a/storage/mock/events.go +++ b/storage/mock/events.go @@ -172,24 +172,6 @@ func (_m *Events) ByBlockIDTransactionIndex(blockID flow.Identifier, txIndex uin return r0, r1 } -// Store provides a mock function with given fields: lctx, blockID, blockEvents -func (_m *Events) Store(lctx lockctx.Proof, blockID flow.Identifier, blockEvents []flow.EventsList) error { - ret := _m.Called(lctx, blockID, blockEvents) - - if len(ret) == 0 { - panic("no return value specified for Store") - } - - var r0 error - if rf, ok := ret.Get(0).(func(lockctx.Proof, flow.Identifier, []flow.EventsList) error); ok { - r0 = rf(lctx, blockID, blockEvents) - } else { - r0 = ret.Error(0) - } - - return r0 -} - // NewEvents creates a new instance of Events. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. // The first argument is typically a *testing.T value. func NewEvents(t interface { diff --git a/storage/store/events.go b/storage/store/events.go index b83a857b8cf..83aee5ba0d2 100644 --- a/storage/store/events.go +++ b/storage/store/events.go @@ -70,14 +70,6 @@ func (e *Events) BatchStore(lctx lockctx.Proof, blockID flow.Identifier, blockEv return nil } -// Store will store events for the given block ID -// TODO (leo) deprecate, only used by AN, AN should use BatchStore instead -func (e *Events) Store(lctx lockctx.Proof, blockID flow.Identifier, blockEvents []flow.EventsList) error { - return e.db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { - return e.BatchStore(lctx, blockID, blockEvents, rw) - }) -} - // ByBlockID returns the events for the given block ID // Note: This method will return an empty slice and no error if no entries for the blockID are found func (e *Events) ByBlockID(blockID flow.Identifier) ([]flow.Event, error) { diff --git a/storage/store/events_test.go b/storage/store/events_test.go index 6c312d53590..4a70a895000 100644 --- a/storage/store/events_test.go +++ b/storage/store/events_test.go @@ -176,7 +176,9 @@ func TestEventStoreAndRemove(t *testing.T) { } err := unittest.WithLock(t, lockManager, storage.LockInsertEvent, func(lctx lockctx.Context) error { - return store.Store(lctx, blockID, expected) + return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return store.BatchStore(lctx, blockID, expected, rw) + }) }) require.NoError(t, err) From 9f8690f218ef4fd0774453ef18fd914697b957ed Mon Sep 17 00:00:00 2001 From: "Leo Zhang (zhangchiqing)" Date: Wed, 8 Oct 2025 13:42:05 -0700 Subject: [PATCH 59/87] add LockIndexExecutionResult --- engine/execution/state/bootstrap/bootstrap.go | 2 +- module/builder/consensus/builder_test.go | 2 +- state/protocol/badger/state.go | 6 +++++- storage/locks.go | 5 ++++- storage/operation/results.go | 13 ++++++------- 5 files changed, 17 insertions(+), 11 deletions(-) diff --git a/engine/execution/state/bootstrap/bootstrap.go b/engine/execution/state/bootstrap/bootstrap.go index f757440a521..b129edefaa2 100644 --- a/engine/execution/state/bootstrap/bootstrap.go +++ b/engine/execution/state/bootstrap/bootstrap.go @@ -100,7 +100,7 @@ func (b *Bootstrapper) BootstrapExecutionDatabase( lctx := manager.NewContext() defer lctx.Release() - err := lctx.AcquireLock(storage.LockInsertOwnReceipt) + err := lctx.AcquireLock(storage.LockIndexExecutionResult) if err != nil { return err } diff --git a/module/builder/consensus/builder_test.go b/module/builder/consensus/builder_test.go index 8ce5546f857..c0b1a2ddc9f 100644 --- a/module/builder/consensus/builder_test.go +++ b/module/builder/consensus/builder_test.go @@ -256,7 +256,7 @@ func (bs *BuilderSuite) SetupTest() { // insert finalized height and root height db := bs.db - err := unittest.WithLocks(bs.T(), lockManager, []string{storage.LockBootstrapping, storage.LockInsertBlock, storage.LockFinalizeBlock}, func(lctx lockctx.Context) error { + err := unittest.WithLocks(bs.T(), lockManager, []string{storage.LockBootstrapping, storage.LockIndexExecutionResult, storage.LockInsertBlock, storage.LockFinalizeBlock}, func(lctx lockctx.Context) error { return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { enc, err := datastore.NewVersionedInstanceParams( datastore.DefaultInstanceParamsVersion, diff --git a/state/protocol/badger/state.go b/state/protocol/badger/state.go index 824761588bb..c6dee6aa097 100644 --- a/state/protocol/badger/state.go +++ b/state/protocol/badger/state.go @@ -116,6 +116,10 @@ func Bootstrap( if err != nil { return nil, err } + err = lctx.AcquireLock(storage.LockIndexExecutionResult) + if err != nil { + return nil, err + } err = lctx.AcquireLock(storage.LockInsertBlock) if err != nil { return nil, err @@ -285,7 +289,7 @@ func bootstrapProtocolState( // history is covered. The spork root block is persisted as a root proposal without proposer // signature (by convention). // -// It requires [storage.LockInsertOwnReceipt] lock +// It requires [storage.LockIndexExecutionResult] lock func bootstrapSealingSegment( lctx lockctx.Proof, db storage.DB, diff --git a/storage/locks.go b/storage/locks.go index 6358b9f67b3..6aba87a071a 100644 --- a/storage/locks.go +++ b/storage/locks.go @@ -30,6 +30,7 @@ const ( // LockInsertOwnReceipt is intended for Execution Nodes to ensure that they never publish different receipts for the same block. // Specifically, with this lock we prevent accidental overwrites of the index `executed block ID` ➜ `Receipt ID`. LockInsertOwnReceipt = "lock_insert_own_receipt" + LockIndexExecutionResult = "lock_index_execution_result" LockIndexStateCommitment = "lock_index_state_commitment" LockInsertAndIndexTxResult = "lock_insert_and_index_tx_result" // LockInsertCollection protects the insertion of collections. @@ -51,6 +52,7 @@ func Locks() []string { LockInsertEvent, LockInsertServiceEvent, LockInsertOwnReceipt, + LockIndexExecutionResult, LockIndexStateCommitment, LockInsertAndIndexTxResult, LockInsertCollection, @@ -81,7 +83,8 @@ func makeLockPolicy() lockctx.Policy { return lockctx.NewDAGPolicyBuilder(). // for protocol to Bootstrap, during bootstrapping, // we need to insert and finalize - Add(LockBootstrapping, LockInsertBlock). + Add(LockBootstrapping, LockIndexExecutionResult). + Add(LockIndexExecutionResult, LockInsertBlock). Add(LockInsertBlock, LockFinalizeBlock). // EN to save execution result diff --git a/storage/operation/results.go b/storage/operation/results.go index 8e74f0cfb3f..c74f2b0fc27 100644 --- a/storage/operation/results.go +++ b/storage/operation/results.go @@ -32,19 +32,18 @@ func RetrieveExecutionResult(r storage.Reader, resultID flow.Identifier, result // 1. Execution Node indexes its own executed block's result when finish executing a block // 2. Execution Node indexes the sealed root block's result during bootstrapping // 3. Access Node indexes the sealed result during syncing from EN. -// The caller must acquire either storage.LockInsertOwnReceipt] or [storage.LockBootstrapping] +// The caller must acquire either storage.LockIndexExecutionResult] // // No errors are expected during normal operation. func IndexOwnOrSealedExecutionResult(lctx lockctx.Proof, rw storage.ReaderBatchWriter, blockID flow.Identifier, resultID flow.Identifier) error { // TOOD (leo): I think we should just check if holding // LockIndexExecutionResult instead of these 2 locks - held := lctx.HoldsLock(storage.LockInsertOwnReceipt) || - // during bootstrapping, we index the sealed root block or the spork root block, which is not - // produced by the node itself, but we still need to index its execution result to be able to - // execute next block - lctx.HoldsLock(storage.LockBootstrapping) + held := lctx.HoldsLock(storage.LockIndexExecutionResult) + // during bootstrapping, we index the sealed root block or the spork root block, which is not + // produced by the node itself, but we still need to index its execution result to be able to + // execute next block if !held { - return fmt.Errorf("missing require locks: %s or %s", storage.LockInsertOwnReceipt, storage.LockBootstrapping) + return fmt.Errorf("missing require locks: %s", storage.LockIndexExecutionResult) } key := MakePrefix(codeIndexExecutionResultByBlock, blockID) From 951967faf204fa014c11734ae5750583f0fbbdcf Mon Sep 17 00:00:00 2001 From: "Leo Zhang (zhangchiqing)" Date: Wed, 8 Oct 2025 13:55:58 -0700 Subject: [PATCH 60/87] fix test --- engine/access/access_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/engine/access/access_test.go b/engine/access/access_test.go index 629b6d421ee..3056d36832d 100644 --- a/engine/access/access_test.go +++ b/engine/access/access_test.go @@ -554,13 +554,13 @@ func (suite *Suite) TestGetExecutionResultByBlockID() { unittest.WithExecutionResultBlockID(blockID), unittest.WithServiceEvents(3)) - require.NoError(suite.T(), storage.WithLock(lockManager, storage.LockInsertOwnReceipt, func(lctx lockctx.Context) error { + require.NoError(suite.T(), storage.WithLock(lockManager, storage.LockIndexExecutionResult, func(lctx lockctx.Context) error { return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { err := all.Results.BatchStore(er, rw) if err != nil { return err } - // requires LockInsertOwnReceipt + // requires LockIndexExecutionResult return all.Results.BatchIndex(lctx, rw, blockID, er.ID()) }) })) From 4821ca313460bb388a28abd97e63599b3da013d2 Mon Sep 17 00:00:00 2001 From: "Leo Zhang (zhangchiqing)" Date: Wed, 8 Oct 2025 14:01:21 -0700 Subject: [PATCH 61/87] fix execution state tests --- engine/execution/state/state.go | 1 + storage/locks.go | 3 ++- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/engine/execution/state/state.go b/engine/execution/state/state.go index dff2e0544d2..21041c04ba1 100644 --- a/engine/execution/state/state.go +++ b/engine/execution/state/state.go @@ -416,6 +416,7 @@ func (s *state) saveExecutionResults( storage.LockInsertServiceEvent, storage.LockInsertAndIndexTxResult, storage.LockInsertOwnReceipt, + storage.LockIndexExecutionResult, storage.LockIndexStateCommitment, } // Acquire locks to ensure it's concurrent safe when inserting the execution results and chunk data packs. diff --git a/storage/locks.go b/storage/locks.go index 6aba87a071a..4d85c97f40a 100644 --- a/storage/locks.go +++ b/storage/locks.go @@ -92,7 +92,8 @@ func makeLockPolicy() lockctx.Policy { Add(LockInsertEvent, LockInsertServiceEvent). Add(LockInsertServiceEvent, LockInsertAndIndexTxResult). Add(LockInsertAndIndexTxResult, LockInsertOwnReceipt). - Add(LockInsertOwnReceipt, LockIndexStateCommitment). + Add(LockInsertOwnReceipt, LockIndexExecutionResult). + Add(LockIndexExecutionResult, LockIndexStateCommitment). // AN state sync to IndexBlockData Add(LockInsertCollection, LockInsertEvent). From c458be618adf301ea65625d5c444da3da55f36f6 Mon Sep 17 00:00:00 2001 From: "Leo Zhang (zhangchiqing)" Date: Wed, 8 Oct 2025 14:12:52 -0700 Subject: [PATCH 62/87] remove outdated comments --- storage/operation/results.go | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/storage/operation/results.go b/storage/operation/results.go index c74f2b0fc27..f1abeba7933 100644 --- a/storage/operation/results.go +++ b/storage/operation/results.go @@ -36,13 +36,10 @@ func RetrieveExecutionResult(r storage.Reader, resultID flow.Identifier, result // // No errors are expected during normal operation. func IndexOwnOrSealedExecutionResult(lctx lockctx.Proof, rw storage.ReaderBatchWriter, blockID flow.Identifier, resultID flow.Identifier) error { - // TOOD (leo): I think we should just check if holding - // LockIndexExecutionResult instead of these 2 locks - held := lctx.HoldsLock(storage.LockIndexExecutionResult) // during bootstrapping, we index the sealed root block or the spork root block, which is not // produced by the node itself, but we still need to index its execution result to be able to // execute next block - if !held { + if !lctx.HoldsLock(storage.LockIndexExecutionResult) { return fmt.Errorf("missing require locks: %s", storage.LockIndexExecutionResult) } From 8e2be8a2b7ae2068dd3469b7f500e6e90131a857 Mon Sep 17 00:00:00 2001 From: "Leo Zhang (zhangchiqing)" Date: Wed, 8 Oct 2025 14:18:51 -0700 Subject: [PATCH 63/87] add test case --- storage/operation/transaction_results_test.go | 198 ++++++++++++++++++ 1 file changed, 198 insertions(+) create mode 100644 storage/operation/transaction_results_test.go diff --git a/storage/operation/transaction_results_test.go b/storage/operation/transaction_results_test.go new file mode 100644 index 00000000000..3e292757c7f --- /dev/null +++ b/storage/operation/transaction_results_test.go @@ -0,0 +1,198 @@ +package operation_test + +import ( + "testing" + + "github.com/jordanschalm/lockctx" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/operation" + "github.com/onflow/flow-go/storage/operation/dbtest" + "github.com/onflow/flow-go/utils/unittest" +) + +func TestInsertAndIndexTransactionResults(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + lockManager := storage.NewTestingLockManager() + blockID := unittest.IdentifierFixture() + + // Create test transaction results + transactionResults := unittest.TransactionResultsFixture(3) + + // Test successful insertion and indexing + err := unittest.WithLock(t, lockManager, storage.LockInsertAndIndexTxResult, func(lctx lockctx.Context) error { + return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.InsertAndIndexTransactionResults(lctx, rw, blockID, transactionResults) + }) + }) + require.NoError(t, err) + + // Verify that transaction results can be retrieved by transaction ID + for i, expected := range transactionResults { + var actual flow.TransactionResult + err = operation.RetrieveTransactionResult(db.Reader(), blockID, expected.TransactionID, &actual) + require.NoError(t, err) + assert.Equal(t, expected, actual) + + // Verify that transaction results can be retrieved by index + var actualByIndex flow.TransactionResult + err = operation.RetrieveTransactionResultByIndex(db.Reader(), blockID, uint32(i), &actualByIndex) + require.NoError(t, err) + assert.Equal(t, expected, actualByIndex) + } + + // Verify that all transaction results can be retrieved using the index + var retrievedResults []flow.TransactionResult + err = operation.LookupTransactionResultsByBlockIDUsingIndex(db.Reader(), blockID, &retrievedResults) + require.NoError(t, err) + assert.Len(t, retrievedResults, len(transactionResults)) + + // Verify the order matches the original order + for i, expected := range transactionResults { + assert.Equal(t, expected, retrievedResults[i]) + } + }) +} + +func TestInsertAndIndexTransactionResults_AlreadyExists(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + lockManager := storage.NewTestingLockManager() + blockID := unittest.IdentifierFixture() + + // Create initial transaction results + initialResults := unittest.TransactionResultsFixture(1) + + // Insert initial results + err := unittest.WithLock(t, lockManager, storage.LockInsertAndIndexTxResult, func(lctx lockctx.Context) error { + return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.InsertAndIndexTransactionResults(lctx, rw, blockID, initialResults) + }) + }) + require.NoError(t, err) + + // Try to insert results for the same block again - should fail + duplicateResults := unittest.TransactionResultsFixture(1) + + err = unittest.WithLock(t, lockManager, storage.LockInsertAndIndexTxResult, func(lctx lockctx.Context) error { + return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.InsertAndIndexTransactionResults(lctx, rw, blockID, duplicateResults) + }) + }) + require.Error(t, err) + require.ErrorIs(t, err, storage.ErrAlreadyExists) + }) +} + +func TestInsertAndIndexTransactionResults_NoLock(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + blockID := unittest.IdentifierFixture() + transactionResults := unittest.TransactionResultsFixture(1) + + // Try to insert without holding the required lock - should fail + err := db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + // Create a context without the required lock + lockManager := storage.NewTestingLockManager() + lctx := lockManager.NewContext() + defer lctx.Release() + + return operation.InsertAndIndexTransactionResults(lctx, rw, blockID, transactionResults) + }) + require.Error(t, err) + assert.Contains(t, err.Error(), "InsertTransactionResult requires LockInsertAndIndexTxResult to be held") + }) +} + +func TestInsertAndIndexTransactionResults_EmptyResults(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + lockManager := storage.NewTestingLockManager() + blockID := unittest.IdentifierFixture() + + // Test with empty transaction results + emptyResults := []flow.TransactionResult{} + + err := unittest.WithLock(t, lockManager, storage.LockInsertAndIndexTxResult, func(lctx lockctx.Context) error { + return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.InsertAndIndexTransactionResults(lctx, rw, blockID, emptyResults) + }) + }) + require.NoError(t, err) + + // Verify that no results are stored + var retrievedResults []flow.TransactionResult + err = operation.LookupTransactionResultsByBlockIDUsingIndex(db.Reader(), blockID, &retrievedResults) + require.NoError(t, err) + assert.Len(t, retrievedResults, 0) + }) +} + +func TestInsertAndIndexTransactionResults_SingleResult(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + lockManager := storage.NewTestingLockManager() + blockID := unittest.IdentifierFixture() + + // Test with single transaction result + singleResult := unittest.TransactionResultsFixture(1) + + err := unittest.WithLock(t, lockManager, storage.LockInsertAndIndexTxResult, func(lctx lockctx.Context) error { + return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.InsertAndIndexTransactionResults(lctx, rw, blockID, singleResult) + }) + }) + require.NoError(t, err) + + // Verify the single result can be retrieved + var retrievedResults []flow.TransactionResult + err = operation.LookupTransactionResultsByBlockIDUsingIndex(db.Reader(), blockID, &retrievedResults) + require.NoError(t, err) + assert.Len(t, retrievedResults, 1) + assert.Equal(t, singleResult[0], retrievedResults[0]) + }) +} + +func TestInsertAndIndexTransactionResults_MultipleResultsWithSameTransactionID(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + lockManager := storage.NewTestingLockManager() + blockID := unittest.IdentifierFixture() + + // Create transaction results with the same transaction ID (duplicate transactions in block) + transactionID := unittest.IdentifierFixture() + results := []flow.TransactionResult{ + { + TransactionID: transactionID, + ErrorMessage: "error1", + ComputationUsed: 100, + MemoryUsed: 200, + }, + { + TransactionID: transactionID, + ErrorMessage: "error2", + ComputationUsed: 150, + MemoryUsed: 250, + }, + } + + err := unittest.WithLock(t, lockManager, storage.LockInsertAndIndexTxResult, func(lctx lockctx.Context) error { + return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.InsertAndIndexTransactionResults(lctx, rw, blockID, results) + }) + }) + require.NoError(t, err) + + // Verify both results are stored and can be retrieved by index + for i, expected := range results { + var actual flow.TransactionResult + err = operation.RetrieveTransactionResultByIndex(db.Reader(), blockID, uint32(i), &actual) + require.NoError(t, err) + assert.Equal(t, expected, actual) + } + + // Verify that lookup by transaction ID returns the last stored result (due to overwrite) + var actual flow.TransactionResult + err = operation.RetrieveTransactionResult(db.Reader(), blockID, transactionID, &actual) + require.NoError(t, err) + assert.Equal(t, results[1], actual) // Should be the last one stored + }) +} From 90ef8d5e1cafd0b7f5980316cbd327d003616045 Mon Sep 17 00:00:00 2001 From: "Leo Zhang (zhangchiqing)" Date: Wed, 8 Oct 2025 15:46:38 -0700 Subject: [PATCH 64/87] update comments --- engine/access/ingestion/engine.go | 5 ++--- storage/locks.go | 3 +++ storage/operation/results.go | 5 +++-- storage/results.go | 3 +++ 4 files changed, 11 insertions(+), 5 deletions(-) diff --git a/engine/access/ingestion/engine.go b/engine/access/ingestion/engine.go index 8b23f1e4d43..2e2a25b4cfc 100644 --- a/engine/access/ingestion/engine.go +++ b/engine/access/ingestion/engine.go @@ -381,8 +381,7 @@ func (e *Engine) processFinalizedBlock(block *flow.Block) error { // index the block storage with each of the collection guarantee err := storage.WithLocks(e.lockManager, []string{ storage.LockIndexCollectionsByBlock, - // TODO (leo): consider change to LockIndexResultByBlock lock - storage.LockInsertOwnReceipt, + storage.LockIndexExecutionResult, }, func(lctx lockctx.Context) error { return e.db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { // requires [storage.LockIndexCollectionsByBlock] lock @@ -393,7 +392,7 @@ func (e *Engine) processFinalizedBlock(block *flow.Block) error { // loop through seals and index ID -> result ID for _, seal := range block.Payload.Seals { - // requires [storage.LockInsertOwnReceipt] lock + // requires [storage.LockIndexExecutionResult] lock err := e.executionResults.BatchIndex(lctx, rw, seal.BlockID, seal.ResultID) if err != nil { return fmt.Errorf("could not index block for execution result: %w", err) diff --git a/storage/locks.go b/storage/locks.go index 4d85c97f40a..814254cb62c 100644 --- a/storage/locks.go +++ b/storage/locks.go @@ -95,6 +95,9 @@ func makeLockPolicy() lockctx.Policy { Add(LockInsertOwnReceipt, LockIndexExecutionResult). Add(LockIndexExecutionResult, LockIndexStateCommitment). + // AN ingestion engine processing finalized block + Add(LockIndexCollectionsByBlock, LockIndexExecutionResult). + // AN state sync to IndexBlockData Add(LockInsertCollection, LockInsertEvent). Add(LockInsertEvent, LockInsertServiceEvent). diff --git a/storage/operation/results.go b/storage/operation/results.go index f1abeba7933..cc3d49af025 100644 --- a/storage/operation/results.go +++ b/storage/operation/results.go @@ -32,9 +32,10 @@ func RetrieveExecutionResult(r storage.Reader, resultID flow.Identifier, result // 1. Execution Node indexes its own executed block's result when finish executing a block // 2. Execution Node indexes the sealed root block's result during bootstrapping // 3. Access Node indexes the sealed result during syncing from EN. -// The caller must acquire either storage.LockIndexExecutionResult] +// The caller must acquire [storage.LockIndexExecutionResult] // -// No errors are expected during normal operation. +// It returns [storage.ErrDataMismatch] if there is already an indexed result for the given blockID, +// but it is different from the given resultID. func IndexOwnOrSealedExecutionResult(lctx lockctx.Proof, rw storage.ReaderBatchWriter, blockID flow.Identifier, resultID flow.Identifier) error { // during bootstrapping, we index the sealed root block or the spork root block, which is not // produced by the node itself, but we still need to index its execution result to be able to diff --git a/storage/results.go b/storage/results.go index 2051232604e..3650e8b9fe3 100644 --- a/storage/results.go +++ b/storage/results.go @@ -24,6 +24,9 @@ type ExecutionResults interface { BatchStore(result *flow.ExecutionResult, batch ReaderBatchWriter) error // BatchIndex indexes an execution result by block ID in a given batch + // The caller must acquire [storage.LockIndexExecutionResult] + // It returns [storage.ErrDataMismatch] if there is already an indexed result for the given blockID, + // but it is different from the given resultID. BatchIndex(lctx lockctx.Proof, rw ReaderBatchWriter, blockID flow.Identifier, resultID flow.Identifier) error // BatchRemoveIndexByBlockID removes blockID-to-executionResultID index entries keyed by blockID in a provided batch. From fe33e7f763536b411db9ba6603f8f73053bc0e0f Mon Sep 17 00:00:00 2001 From: "Leo Zhang (zhangchiqing)" Date: Wed, 8 Oct 2025 15:50:41 -0700 Subject: [PATCH 65/87] update comments --- storage/results.go | 2 +- storage/store/chunk_data_packs.go | 1 - storage/store/my_receipts.go | 1 - storage/store/results.go | 1 - storage/store/seals_test.go | 3 --- 5 files changed, 1 insertion(+), 7 deletions(-) diff --git a/storage/results.go b/storage/results.go index 3650e8b9fe3..fb00c7db3d6 100644 --- a/storage/results.go +++ b/storage/results.go @@ -21,6 +21,7 @@ type ExecutionResults interface { ExecutionResultsReader // BatchStore stores an execution result in a given batch + // No error is expected during normal operation. BatchStore(result *flow.ExecutionResult, batch ReaderBatchWriter) error // BatchIndex indexes an execution result by block ID in a given batch @@ -31,6 +32,5 @@ type ExecutionResults interface { // BatchRemoveIndexByBlockID removes blockID-to-executionResultID index entries keyed by blockID in a provided batch. // No errors are expected during normal operation, even if no entries are matched. - // If Badger unexpectedly fails to process the request, the error is wrapped in a generic error and returned. BatchRemoveIndexByBlockID(blockID flow.Identifier, batch ReaderBatchWriter) error } diff --git a/storage/store/chunk_data_packs.go b/storage/store/chunk_data_packs.go index 0568dd8f5c9..81aff93e46f 100644 --- a/storage/store/chunk_data_packs.go +++ b/storage/store/chunk_data_packs.go @@ -79,7 +79,6 @@ func (ch *ChunkDataPacks) StoreByChunkID(lctx lockctx.Proof, cs []*flow.ChunkDat // BatchRemove removes ChunkDataPack c keyed by its ChunkID in provided batch // No errors are expected during normal operation, even if no entries are matched. -// If Badger unexpectedly fails to process the request, the error is wrapped in a generic error and returned. func (ch *ChunkDataPacks) BatchRemove(chunkID flow.Identifier, rw storage.ReaderBatchWriter) error { storage.OnCommitSucceed(rw, func() { ch.byChunkIDCache.Remove(chunkID) diff --git a/storage/store/my_receipts.go b/storage/store/my_receipts.go index f7a5ce31828..e620e8fb9c9 100644 --- a/storage/store/my_receipts.go +++ b/storage/store/my_receipts.go @@ -103,7 +103,6 @@ func (m *MyExecutionReceipts) RemoveIndexByBlockID(blockID flow.Identifier) erro // BatchRemoveIndexByBlockID removes blockID-to-my-execution-receipt index entry keyed by a blockID in a provided batch // No errors are expected during normal operation, even if no entries are matched. -// If Badger unexpectedly fails to process the request, the error is wrapped in a generic error and returned. func (m *MyExecutionReceipts) BatchRemoveIndexByBlockID(blockID flow.Identifier, rw storage.ReaderBatchWriter) error { return m.cache.RemoveTx(rw, blockID) } diff --git a/storage/store/results.go b/storage/store/results.go index a72f3ad91ee..afeb5792f81 100644 --- a/storage/store/results.go +++ b/storage/store/results.go @@ -109,7 +109,6 @@ func (r *ExecutionResults) RemoveIndexByBlockID(blockID flow.Identifier) error { // BatchRemoveIndexByBlockID removes blockID-to-executionResultID index entries keyed by blockID in a provided batch. // No errors are expected during normal operation, even if no entries are matched. -// If Badger unexpectedly fails to process the request, the error is wrapped in a generic error and returned. func (r *ExecutionResults) BatchRemoveIndexByBlockID(blockID flow.Identifier, batch storage.ReaderBatchWriter) error { return operation.RemoveExecutionResultIndex(batch.Writer(), blockID) } diff --git a/storage/store/seals_test.go b/storage/store/seals_test.go index 02f79fdece2..852a245b3b0 100644 --- a/storage/store/seals_test.go +++ b/storage/store/seals_test.go @@ -47,9 +47,6 @@ func TestSealStoreRetrieve(t *testing.T) { // TestSealIndexAndRetrieve verifies that: // - for a block, we can s (aka index) the latest sealed block along this fork. -// -// Note: indexing the seal for a block is currently implemented only through a direct -// Badger operation. The Seals mempool only supports retrieving the latest sealed block. func TestSealIndexAndRetrieve(t *testing.T) { dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { lockManager := storage.NewTestingLockManager() From 1cadc17526e344108cf418c2cd39acdef5ae51e6 Mon Sep 17 00:00:00 2001 From: "Leo Zhang (zhangchiqing)" Date: Wed, 8 Oct 2025 15:59:25 -0700 Subject: [PATCH 66/87] update lock policy --- .../indexer/indexer_core.go | 44 ++++++++----------- storage/locks.go | 2 +- 2 files changed, 19 insertions(+), 27 deletions(-) diff --git a/module/state_synchronization/indexer/indexer_core.go b/module/state_synchronization/indexer/indexer_core.go index e642da5fdb9..1e77d70756c 100644 --- a/module/state_synchronization/indexer/indexer_core.go +++ b/module/state_synchronization/indexer/indexer_core.go @@ -148,32 +148,24 @@ func (c *IndexerCore) IndexBlockData(data *execution_data.BlockExecutionDataEnti results = append(results, chunk.TransactionResults...) } - lctx := c.lockManager.NewContext() - defer lctx.Release() - err := lctx.AcquireLock(storage.LockInsertEvent) - if err != nil { - return fmt.Errorf("could not acquire LockInsertEvent for indexing block data: %w", err) - } - - err = lctx.AcquireLock(storage.LockInsertLightTransactionResult) - if err != nil { - return fmt.Errorf("could not acquire LockInsertLightTransactionResult for indexing block data: %w", err) - } - - err = c.protocolDB.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { - // Needs storage.LockInsertEvent - err := c.events.BatchStore(lctx, data.BlockID, []flow.EventsList{events}, rw) - if err != nil { - return fmt.Errorf("could not index events at height %d: %w", header.Height, err) - } - - // Needs storage.LockInsertLightTransactionResult - err = c.results.BatchStore(lctx, data.BlockID, results, rw) - if err != nil { - return fmt.Errorf("could not index transaction results at height %d: %w", header.Height, err) - } - return nil - }) + err := storage.WithLocks(c.lockManager, + []string{storage.LockInsertEvent, storage.LockInsertLightTransactionResult}, + func(lctx lockctx.Context) error { + return c.protocolDB.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + // Needs storage.LockInsertEvent + err := c.events.BatchStore(lctx, data.BlockID, []flow.EventsList{events}, rw) + if err != nil { + return fmt.Errorf("could not index events at height %d: %w", header.Height, err) + } + + // Needs storage.LockInsertLightTransactionResult + err = c.results.BatchStore(lctx, data.BlockID, results, rw) + if err != nil { + return fmt.Errorf("could not index transaction results at height %d: %w", header.Height, err) + } + return nil + }) + }) if err != nil { return fmt.Errorf("could not commit block data: %w", err) diff --git a/storage/locks.go b/storage/locks.go index 814254cb62c..98b72a6cd2d 100644 --- a/storage/locks.go +++ b/storage/locks.go @@ -101,7 +101,7 @@ func makeLockPolicy() lockctx.Policy { // AN state sync to IndexBlockData Add(LockInsertCollection, LockInsertEvent). Add(LockInsertEvent, LockInsertServiceEvent). - Add(LockInsertServiceEvent, LockInsertLightTransactionResult). + Add(LockInsertEvent, LockInsertLightTransactionResult). Build() } From 9dafeccb5e8c94f6f76384c2a6e43dddc1da4deb Mon Sep 17 00:00:00 2001 From: "Leo Zhang (zhangchiqing)" Date: Wed, 8 Oct 2025 16:05:25 -0700 Subject: [PATCH 67/87] add comments and remove unused methods --- storage/store/my_receipts.go | 6 ------ storage/store/results.go | 20 +++++++++++--------- 2 files changed, 11 insertions(+), 15 deletions(-) diff --git a/storage/store/my_receipts.go b/storage/store/my_receipts.go index e620e8fb9c9..352bf7877a2 100644 --- a/storage/store/my_receipts.go +++ b/storage/store/my_receipts.go @@ -95,12 +95,6 @@ func (m *MyExecutionReceipts) MyReceipt(blockID flow.Identifier) (*flow.Executio return m.myReceipt(blockID) } -func (m *MyExecutionReceipts) RemoveIndexByBlockID(blockID flow.Identifier) error { - return m.db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { - return m.BatchRemoveIndexByBlockID(blockID, rw) - }) -} - // BatchRemoveIndexByBlockID removes blockID-to-my-execution-receipt index entry keyed by a blockID in a provided batch // No errors are expected during normal operation, even if no entries are matched. func (m *MyExecutionReceipts) BatchRemoveIndexByBlockID(blockID flow.Identifier, rw storage.ReaderBatchWriter) error { diff --git a/storage/store/results.go b/storage/store/results.go index afeb5792f81..c9966765224 100644 --- a/storage/store/results.go +++ b/storage/store/results.go @@ -78,35 +78,37 @@ func (r *ExecutionResults) byBlockID(blockID flow.Identifier) (*flow.ExecutionRe return r.byID(resultID) } +// BatchIndex indexes an execution result by block ID in a given batch +// The caller must acquire [storage.LockIndexExecutionResult] +// It returns [storage.ErrDataMismatch] if there is already an indexed result for the given blockID, +// but it is different from the given resultID. func (r *ExecutionResults) BatchIndex(lctx lockctx.Proof, rw storage.ReaderBatchWriter, blockID flow.Identifier, resultID flow.Identifier) error { return r.indexCache.PutWithLockTx(lctx, rw, blockID, resultID) } +// BatchStore stores an execution result in a given batch +// No error is expected during normal operation. func (r *ExecutionResults) BatchStore(result *flow.ExecutionResult, batch storage.ReaderBatchWriter) error { return r.store(batch, result) } +// ByID retrieves an execution result by its ID. Returns `ErrNotFound` if `resultID` is unknown. func (r *ExecutionResults) ByID(resultID flow.Identifier) (*flow.ExecutionResult, error) { return r.byID(resultID) } +// ByBlockID retrieves an execution result by block ID. +// It returns [storage.ErrNotFound] if `blockID` does not refer to a block executed by this node func (r *ExecutionResults) ByBlockID(blockID flow.Identifier) (*flow.ExecutionResult, error) { return r.byBlockID(blockID) } +// IDByBlockID retrieves an execution result ID by block ID. +// It returns [storage.ErrNotFound] if `blockID` does not refer to a block executed by this node func (r *ExecutionResults) IDByBlockID(blockID flow.Identifier) (flow.Identifier, error) { return r.indexCache.Get(r.db.Reader(), blockID) } -func (r *ExecutionResults) RemoveIndexByBlockID(blockID flow.Identifier) error { - return r.db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { - storage.OnCommitSucceed(rw, func() { - r.indexCache.Remove(blockID) - }) - return operation.RemoveExecutionResultIndex(rw.Writer(), blockID) - }) -} - // BatchRemoveIndexByBlockID removes blockID-to-executionResultID index entries keyed by blockID in a provided batch. // No errors are expected during normal operation, even if no entries are matched. func (r *ExecutionResults) BatchRemoveIndexByBlockID(blockID flow.Identifier, batch storage.ReaderBatchWriter) error { From 03c159b8140529a0ff7670afaabfd7d60b441371 Mon Sep 17 00:00:00 2001 From: "Leo Zhang (zhangchiqing)" Date: Wed, 8 Oct 2025 16:22:56 -0700 Subject: [PATCH 68/87] add existence check when inserting events --- module/pruner/pruners/chunk_data_pack_test.go | 4 +-- .../indexer/in_memory_indexer.go | 1 - storage/events.go | 5 +-- storage/operation/events.go | 36 +++++++++++++++---- storage/store/events.go | 4 +-- 5 files changed, 36 insertions(+), 14 deletions(-) diff --git a/module/pruner/pruners/chunk_data_pack_test.go b/module/pruner/pruners/chunk_data_pack_test.go index 9dc4d63e1ee..6c4f0b0885a 100644 --- a/module/pruner/pruners/chunk_data_pack_test.go +++ b/module/pruner/pruners/chunk_data_pack_test.go @@ -32,9 +32,7 @@ func TestChunkDataPackPruner(t *testing.T) { cdp1, result1 := unittest.ChunkDataPacksFixtureAndResult() require.NoError(t, unittest.WithLock(t, lockManager, storage.LockInsertOwnReceipt, func(lctx lockctx.Context) error { return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { - err := results.BatchStore(result1, rw) - require.NoError(t, err) - return nil + return results.BatchStore(result1, rw) }) })) require.NoError(t, unittest.WithLock(t, lockManager, storage.LockInsertChunkDataPack, func(lctx lockctx.Context) error { diff --git a/module/state_synchronization/indexer/in_memory_indexer.go b/module/state_synchronization/indexer/in_memory_indexer.go index 1fa53bfa620..c87b3e1eee3 100644 --- a/module/state_synchronization/indexer/in_memory_indexer.go +++ b/module/state_synchronization/indexer/in_memory_indexer.go @@ -107,7 +107,6 @@ func (i *InMemoryIndexer) IndexBlockData(data *execution_data.BlockExecutionData transactions := make([]*flow.TransactionBody, 0) registerUpdates := make(map[ledger.Path]*ledger.Payload) - // Process all chunk data in a single pass for idx, chunk := range data.ChunkExecutionDatas { events = append(events, chunk.Events...) results = append(results, chunk.TransactionResults...) diff --git a/storage/events.go b/storage/events.go index a4c5c9e1e15..041dcf3c64a 100644 --- a/storage/events.go +++ b/storage/events.go @@ -26,6 +26,7 @@ type Events interface { // BatchStore will store events for the given block ID in a given batch // it requires the caller to hold [storage.LockInsertEvent] + // It returns [storage.ErrAlreadyExists] if events for the block already exist. BatchStore(lctx lockctx.Proof, blockID flow.Identifier, events []flow.EventsList, batch ReaderBatchWriter) error // BatchRemoveByBlockID removes events keyed by a blockID in provided batch @@ -37,8 +38,8 @@ type Events interface { type ServiceEvents interface { // BatchStore stores service events keyed by a blockID in provided batch // No errors are expected during normal operation, even if no entries are matched. - // it requires the caller to hold [storage.LockInsertEvent] - // If database unexpectedly fails to process the request, the error is wrapped in a generic error and returned. + // it requires the caller to hold [storage.LockInsertServiceEvent] + // It returns [storage.ErrAlreadyExists] if events for the block already exist. BatchStore(lctx lockctx.Proof, blockID flow.Identifier, events []flow.Event, batch ReaderBatchWriter) error // ByBlockID returns the events for the given block ID diff --git a/storage/operation/events.go b/storage/operation/events.go index 08ad9e11ea2..ffb6b9fd1c9 100644 --- a/storage/operation/events.go +++ b/storage/operation/events.go @@ -16,14 +16,26 @@ func eventPrefix(prefix byte, blockID flow.Identifier, event flow.Event) []byte // InsertBlockEvents stores all events for a given block in the database. // Requires LockInsertEvent to be held for thread safety. // This function iterates through all events in the provided EventsList and stores each event individually. -// -// Error returns: -// - generic error in case of unexpected failure from the database layer or encoding failure +// It returns [storage.ErrAlreadyExists] if events for the block already exist. func InsertBlockEvents(lctx lockctx.Proof, rw storage.ReaderBatchWriter, blockID flow.Identifier, events []flow.EventsList) error { if !lctx.HoldsLock(storage.LockInsertEvent) { return fmt.Errorf("InsertBlockEvents requires LockInsertEvent to be held") } + // Check if events for the block already exist + // We can exit early if we found one exist + // Because regardless the list is empty, or has one, or more, + // we don't want to overwrite existing events indexed by the block + prefix := MakePrefix(codeEvent, blockID) + checkExists := func(key []byte) error { + return fmt.Errorf("event with key %x already exists under prefix %x: %w", key, prefix, storage.ErrAlreadyExists) + } + + err := IterateKeysByPrefixRange(rw.GlobalReader(), prefix, prefix, checkExists) + if err != nil { + return err + } + writer := rw.Writer() for _, eventsList := range events { @@ -48,14 +60,26 @@ func insertEvent(w storage.Writer, blockID flow.Identifier, event flow.Event) er // Requires LockInsertServiceEvent to be held for thread safety. // This function iterates through all service events in the provided list and stores each event individually. // Service events are special events generated by the system (e.g., account creation, contract deployment). -// -// Error returns: -// - generic error in case of unexpected failure from the database layer or encoding failure +// It returns [storage.ErrAlreadyExists] if events for the block already exist. func InsertBlockServiceEvents(lctx lockctx.Proof, rw storage.ReaderBatchWriter, blockID flow.Identifier, events []flow.Event) error { if !lctx.HoldsLock(storage.LockInsertServiceEvent) { return fmt.Errorf("InsertBlockServiceEvents requires LockInsertServiceEvent to be held") } + // Check if events for the block already exist + // We can exit early if we found one exist + // Because regardless the list is empty, or has one, or more, + // we don't want to overwrite existing events indexed by the block + prefix := MakePrefix(codeEvent, blockID) + checkExists := func(key []byte) error { + return fmt.Errorf("event with key %x already exists under prefix %x: %w", key, prefix, storage.ErrAlreadyExists) + } + + err := IterateKeysByPrefixRange(rw.GlobalReader(), prefix, prefix, checkExists) + if err != nil { + return err + } + writer := rw.Writer() for _, event := range events { diff --git a/storage/store/events.go b/storage/store/events.go index 83aee5ba0d2..8cc9ccb207b 100644 --- a/storage/store/events.go +++ b/storage/store/events.go @@ -39,8 +39,8 @@ func NewEvents(collector module.CacheMetrics, db storage.DB) *Events { )} } -// BatchStore stores events keyed by a blockID in provided batch -// No errors are expected during normal operation, but it may return generic error +// BatchStore will store events for the given block ID in a given batch +// it requires the caller to hold [storage.LockInsertEvent] func (e *Events) BatchStore(lctx lockctx.Proof, blockID flow.Identifier, blockEvents []flow.EventsList, batch storage.ReaderBatchWriter) error { // Use the new InsertBlockEvents operation to store all events err := operation.InsertBlockEvents(lctx, batch, blockID, blockEvents) From 7eca395e551cdf1db98ee4ed776837d51ba5598f Mon Sep 17 00:00:00 2001 From: "Leo Zhang (zhangchiqing)" Date: Wed, 8 Oct 2025 16:24:41 -0700 Subject: [PATCH 69/87] fix results tests --- storage/store/results_test.go | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/storage/store/results_test.go b/storage/store/results_test.go index d84ebcba68c..3afef928316 100644 --- a/storage/store/results_test.go +++ b/storage/store/results_test.go @@ -23,7 +23,7 @@ func TestResultStoreAndRetrieve(t *testing.T) { result := unittest.ExecutionResultFixture() blockID := unittest.IdentifierFixture() - err := unittest.WithLock(t, lockManager, storage.LockInsertOwnReceipt, func(lctx lockctx.Context) error { + err := unittest.WithLock(t, lockManager, storage.LockIndexExecutionResult, func(lctx lockctx.Context) error { return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { err := store1.BatchStore(result, rw) require.NoError(t, err) @@ -51,7 +51,7 @@ func TestResultStoreTwice(t *testing.T) { result := unittest.ExecutionResultFixture() blockID := unittest.IdentifierFixture() - err := unittest.WithLock(t, lockManager, storage.LockInsertOwnReceipt, func(lctx lockctx.Context) error { + err := unittest.WithLock(t, lockManager, storage.LockIndexExecutionResult, func(lctx lockctx.Context) error { return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { err := store1.BatchStore(result, rw) require.NoError(t, err) @@ -63,7 +63,7 @@ func TestResultStoreTwice(t *testing.T) { }) require.NoError(t, err) - err = unittest.WithLock(t, lockManager, storage.LockInsertOwnReceipt, func(lctx lockctx.Context) error { + err = unittest.WithLock(t, lockManager, storage.LockIndexExecutionResult, func(lctx lockctx.Context) error { return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { err := store1.BatchStore(result, rw) require.NoError(t, err) @@ -86,7 +86,7 @@ func TestResultBatchStoreTwice(t *testing.T) { result := unittest.ExecutionResultFixture() blockID := unittest.IdentifierFixture() - err := unittest.WithLock(t, lockManager, storage.LockInsertOwnReceipt, func(lctx lockctx.Context) error { + err := unittest.WithLock(t, lockManager, storage.LockIndexExecutionResult, func(lctx lockctx.Context) error { return db.WithReaderBatchWriter(func(batch storage.ReaderBatchWriter) error { err := store1.BatchStore(result, batch) require.NoError(t, err) @@ -98,7 +98,7 @@ func TestResultBatchStoreTwice(t *testing.T) { }) require.NoError(t, err) - err = unittest.WithLock(t, lockManager, storage.LockInsertOwnReceipt, func(lctx lockctx.Context) error { + err = unittest.WithLock(t, lockManager, storage.LockIndexExecutionResult, func(lctx lockctx.Context) error { return db.WithReaderBatchWriter(func(batch storage.ReaderBatchWriter) error { err := store1.BatchStore(result, batch) require.NoError(t, err) @@ -123,7 +123,7 @@ func TestResultStoreTwoDifferentResultsShouldFail(t *testing.T) { result2 := unittest.ExecutionResultFixture() blockID := unittest.IdentifierFixture() - err := unittest.WithLock(t, lockManager, storage.LockInsertOwnReceipt, func(lctx lockctx.Context) error { + err := unittest.WithLock(t, lockManager, storage.LockIndexExecutionResult, func(lctx lockctx.Context) error { return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { err := store1.BatchStore(result1, rw) require.NoError(t, err) @@ -138,7 +138,7 @@ func TestResultStoreTwoDifferentResultsShouldFail(t *testing.T) { // we can store a different result, but we can't index // a different result for that block, because it will mean // one block has two different results. - err = unittest.WithLock(t, lockManager, storage.LockInsertOwnReceipt, func(lctx lockctx.Context) error { + err = unittest.WithLock(t, lockManager, storage.LockIndexExecutionResult, func(lctx lockctx.Context) error { return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { err := store1.BatchStore(result2, rw) require.NoError(t, err) @@ -148,7 +148,7 @@ func TestResultStoreTwoDifferentResultsShouldFail(t *testing.T) { require.NoError(t, err) var indexErr error - err = unittest.WithLock(t, lockManager, storage.LockInsertOwnReceipt, func(lctx lockctx.Context) error { + err = unittest.WithLock(t, lockManager, storage.LockIndexExecutionResult, func(lctx lockctx.Context) error { return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { indexErr = store1.BatchIndex(lctx, rw, blockID, result2.ID()) return nil From 7313207c94974f9a779b5cd31d4b04b3f8340eaa Mon Sep 17 00:00:00 2001 From: "Leo Zhang (zhangchiqing)" Date: Wed, 8 Oct 2025 16:28:40 -0700 Subject: [PATCH 70/87] update events tests --- storage/operation/events_test.go | 21 ++++++++++++--------- 1 file changed, 12 insertions(+), 9 deletions(-) diff --git a/storage/operation/events_test.go b/storage/operation/events_test.go index f1db5f6f4ac..b8f35b39eb7 100644 --- a/storage/operation/events_test.go +++ b/storage/operation/events_test.go @@ -36,6 +36,7 @@ func TestRetrieveEventByBlockIDTxID(t *testing.T) { for _, b := range blockIDs { bEvents := make([]flow.Event, 0) + allEventsForBlock := make([]flow.Event, 0) // all blocks share the same transactions for i, tx := range txIDs { @@ -54,15 +55,8 @@ func TestRetrieveEventByBlockIDTxID(t *testing.T) { unittest.Event.WithTransactionID(tx), ) - // insert event into the db - err := unittest.WithLock(t, lockManager, storage.LockInsertEvent, func(lctx lockctx.Context) error { - return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { - return operation.InsertBlockEvents(lctx, rw, b, []flow.EventsList{[]flow.Event{event}}) - }) - }) - require.NoError(t, err) - - // update event arrays in the maps + // collect events for batch insertion + allEventsForBlock = append(allEventsForBlock, event) bEvents = append(bEvents, event) tEvents = append(tEvents, event) eEvents = append(eEvents, event) @@ -76,6 +70,15 @@ func TestRetrieveEventByBlockIDTxID(t *testing.T) { } txMap[b.String()+"_"+tx.String()] = tEvents } + + // insert all events for this block in one batch + err := unittest.WithLock(t, lockManager, storage.LockInsertEvent, func(lctx lockctx.Context) error { + return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.InsertBlockEvents(lctx, rw, b, []flow.EventsList{allEventsForBlock}) + }) + }) + require.NoError(t, err) + blockMap[b.String()] = bEvents } From f863089b31f3984d4ec2f923df40dc2cdd6a7b0c Mon Sep 17 00:00:00 2001 From: "Leo Zhang (zhangchiqing)" Date: Wed, 8 Oct 2025 16:47:00 -0700 Subject: [PATCH 71/87] fix tests --- engine/access/ingestion2/finalized_block_processor.go | 4 ++-- engine/execution/pruner/core_test.go | 2 +- engine/execution/state/state.go | 2 +- .../optimistic_sync/core_impl_test.go | 2 +- .../optimistic_sync/pipeline_functional_test.go | 2 +- module/pruner/pruners/chunk_data_pack_test.go | 10 ++++------ 6 files changed, 10 insertions(+), 12 deletions(-) diff --git a/engine/access/ingestion2/finalized_block_processor.go b/engine/access/ingestion2/finalized_block_processor.go index eb5bc2eaa9f..cb352c0d75b 100644 --- a/engine/access/ingestion2/finalized_block_processor.go +++ b/engine/access/ingestion2/finalized_block_processor.go @@ -154,7 +154,7 @@ func (p *FinalizedBlockProcessor) processFinalizedBlockJobCallback( // No errors are expected during normal operations. func (p *FinalizedBlockProcessor) indexFinalizedBlock(block *flow.Block) error { err := storage.WithLocks(p.lockManager, - []string{storage.LockIndexCollectionsByBlock, storage.LockInsertOwnReceipt}, func(lctx lockctx.Context) error { + []string{storage.LockIndexCollectionsByBlock, storage.LockIndexExecutionResult}, func(lctx lockctx.Context) error { return p.db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { // require storage.LockIndexCollectionsByBlock err := p.blocks.BatchIndexBlockContainingCollectionGuarantees(lctx, rw, block.ID(), flow.GetIDs(block.Payload.Guarantees)) @@ -164,7 +164,7 @@ func (p *FinalizedBlockProcessor) indexFinalizedBlock(block *flow.Block) error { // loop through seals and index ID -> result ID for _, seal := range block.Payload.Seals { - // require storage.LockInsertOwnReceipt + // require storage.LockIndexExecutionResult err := p.executionResults.BatchIndex(lctx, rw, seal.BlockID, seal.ResultID) if err != nil { return fmt.Errorf("could not index block for execution result: %w", err) diff --git a/engine/execution/pruner/core_test.go b/engine/execution/pruner/core_test.go index 827006f9fca..36cfd6195ef 100644 --- a/engine/execution/pruner/core_test.go +++ b/engine/execution/pruner/core_test.go @@ -73,7 +73,7 @@ func TestLoopPruneExecutionDataFromRootToLatestSealed(t *testing.T) { }) }) require.NoError(t, err) - err = unittest.WithLock(t, lockManager, storage.LockInsertOwnReceipt, func(lctx lockctx.Context) error { + err = unittest.WithLock(t, lockManager, storage.LockIndexExecutionResult, func(lctx lockctx.Context) error { return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { err := results.BatchStore(chunk.Result, rw) require.NoError(t, err) diff --git a/engine/execution/state/state.go b/engine/execution/state/state.go index 21041c04ba1..967ad471511 100644 --- a/engine/execution/state/state.go +++ b/engine/execution/state/state.go @@ -476,7 +476,7 @@ func (s *state) saveExecutionResults( return fmt.Errorf("could not persist execution result: %w", err) } - // require [storage.LockInsertOwnReceipt] lock + // require [storage.LockIndexExecutionResult] lock err = s.results.BatchIndex(lctx, batch, blockID, executionResult.ID()) if err != nil { return fmt.Errorf("cannot index execution result: %w", err) diff --git a/module/executiondatasync/optimistic_sync/core_impl_test.go b/module/executiondatasync/optimistic_sync/core_impl_test.go index 14576deaffa..008fc9aa51b 100644 --- a/module/executiondatasync/optimistic_sync/core_impl_test.go +++ b/module/executiondatasync/optimistic_sync/core_impl_test.go @@ -447,7 +447,7 @@ func (c *CoreImplSuite) TestCoreImpl_Persist() { c.persistentRegisters.On("Store", flow.RegisterEntries(indexerData.Registers), tf.block.Height).Return(nil) c.persistentEvents.On("BatchStore", mock.Anything, blockID, []flow.EventsList{indexerData.Events}, mock.Anything).Return(nil) c.persistentCollections.On("BatchStoreAndIndexByTransaction", mock.Anything, mock.Anything, mock.Anything).Return(nil, nil) - c.persistentResults.On("BatchStore", blockID, indexerData.Results, mock.Anything).Return(nil) + c.persistentResults.On("BatchStore", mock.Anything, blockID, indexerData.Results, mock.Anything).Return(nil) c.persistentTxResultErrMsg.On("BatchStore", blockID, core.workingData.txResultErrMsgsData, mock.Anything).Return(nil) c.latestPersistedSealedResult.On("BatchSet", tf.exeResult.ID(), tf.block.Height, mock.Anything).Return(nil) diff --git a/module/executiondatasync/optimistic_sync/pipeline_functional_test.go b/module/executiondatasync/optimistic_sync/pipeline_functional_test.go index 2d3c9ab88fb..9d9fd48c315 100644 --- a/module/executiondatasync/optimistic_sync/pipeline_functional_test.go +++ b/module/executiondatasync/optimistic_sync/pipeline_functional_test.go @@ -154,7 +154,7 @@ func (p *PipelineFunctionalSuite) SetupTest() { require.NoError(t, err) // Store and index sealed block execution result - err = unittest.WithLock(t, p.lockManager, storage.LockInsertOwnReceipt, func(lctx lockctx.Context) error { + err = unittest.WithLock(t, p.lockManager, storage.LockIndexExecutionResult, func(lctx lockctx.Context) error { return p.db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { err := p.results.BatchStore(sealedExecutionResult, rw) p.Require().NoError(err) diff --git a/module/pruner/pruners/chunk_data_pack_test.go b/module/pruner/pruners/chunk_data_pack_test.go index 6c4f0b0885a..ee333ae702c 100644 --- a/module/pruner/pruners/chunk_data_pack_test.go +++ b/module/pruner/pruners/chunk_data_pack_test.go @@ -30,11 +30,9 @@ func TestChunkDataPackPruner(t *testing.T) { // store the chunks cdp1, result1 := unittest.ChunkDataPacksFixtureAndResult() - require.NoError(t, unittest.WithLock(t, lockManager, storage.LockInsertOwnReceipt, func(lctx lockctx.Context) error { - return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { - return results.BatchStore(result1, rw) - }) - })) + err := db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return results.BatchStore(result1, rw) + }) require.NoError(t, unittest.WithLock(t, lockManager, storage.LockInsertChunkDataPack, func(lctx lockctx.Context) error { return chunks.StoreByChunkID(lctx, cdp1) })) @@ -47,7 +45,7 @@ func TestChunkDataPackPruner(t *testing.T) { })) // verify they are pruned - _, err := chunks.ByChunkID(cdp1[0].ChunkID) + _, err = chunks.ByChunkID(cdp1[0].ChunkID) require.True(t, errors.Is(err, storage.ErrNotFound), fmt.Errorf("expected ErrNotFound but got %v", err)) // prune again should not return error From bf3187ddd178ede98bdd047f6d89d3e026be1fde Mon Sep 17 00:00:00 2001 From: "Leo Zhang (zhangchiqing)" Date: Wed, 8 Oct 2025 16:58:20 -0700 Subject: [PATCH 72/87] fix persist block test --- .../executiondatasync/optimistic_sync/persisters/block_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/module/executiondatasync/optimistic_sync/persisters/block_test.go b/module/executiondatasync/optimistic_sync/persisters/block_test.go index c09b7bbb13c..4c49f1128f2 100644 --- a/module/executiondatasync/optimistic_sync/persisters/block_test.go +++ b/module/executiondatasync/optimistic_sync/persisters/block_test.go @@ -182,7 +182,7 @@ func (p *PersisterSuite) TestPersister_ErrorHandling() { Times(len(p.indexerData.Collections)) events := storagemock.NewEvents(p.T()) - events.On("BatchStore", p.executionResult.BlockID, mock.Anything, mock.Anything).Return(expectedErr).Once() + events.On("BatchStore", mock.Anything, p.executionResult.BlockID, mock.Anything, mock.Anything).Return(expectedErr).Once() persister := NewBlockPersister( unittest.Logger(), From e7e4059507b591a5026b836e37737b8125675967 Mon Sep 17 00:00:00 2001 From: "Leo Zhang (zhangchiqing)" Date: Wed, 8 Oct 2025 16:59:25 -0700 Subject: [PATCH 73/87] fix lint --- module/pruner/pruners/chunk_data_pack_test.go | 1 + 1 file changed, 1 insertion(+) diff --git a/module/pruner/pruners/chunk_data_pack_test.go b/module/pruner/pruners/chunk_data_pack_test.go index ee333ae702c..d8ab773c6bd 100644 --- a/module/pruner/pruners/chunk_data_pack_test.go +++ b/module/pruner/pruners/chunk_data_pack_test.go @@ -33,6 +33,7 @@ func TestChunkDataPackPruner(t *testing.T) { err := db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { return results.BatchStore(result1, rw) }) + require.NoError(t, err) require.NoError(t, unittest.WithLock(t, lockManager, storage.LockInsertChunkDataPack, func(lctx lockctx.Context) error { return chunks.StoreByChunkID(lctx, cdp1) })) From d725fedb69b233fe5d773f354e3632ce12fd52e9 Mon Sep 17 00:00:00 2001 From: "Leo Zhang (zhangchiqing)" Date: Thu, 9 Oct 2025 07:24:04 -0700 Subject: [PATCH 74/87] add comment to lock policy --- storage/locks.go | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/storage/locks.go b/storage/locks.go index 98b72a6cd2d..1b8fc25c617 100644 --- a/storage/locks.go +++ b/storage/locks.go @@ -83,11 +83,16 @@ func makeLockPolicy() lockctx.Policy { return lockctx.NewDAGPolicyBuilder(). // for protocol to Bootstrap, during bootstrapping, // we need to insert and finalize + // state/protocol/badger/state.go#Bootstrap Add(LockBootstrapping, LockIndexExecutionResult). + + // EN to bootstrap + // engine/execution/state/bootstrap/bootstrap.go#Bootstrapper.BootstrapExecutionDatabase Add(LockIndexExecutionResult, LockInsertBlock). Add(LockInsertBlock, LockFinalizeBlock). // EN to save execution result + // engine/execution/state/state.go#state.saveExecutionResults Add(LockInsertChunkDataPack, LockInsertEvent). Add(LockInsertEvent, LockInsertServiceEvent). Add(LockInsertServiceEvent, LockInsertAndIndexTxResult). @@ -96,11 +101,12 @@ func makeLockPolicy() lockctx.Policy { Add(LockIndexExecutionResult, LockIndexStateCommitment). // AN ingestion engine processing finalized block + // engine/access/ingestion/engine.go#Engine.processFinalizedBlock Add(LockIndexCollectionsByBlock, LockIndexExecutionResult). - // AN state sync to IndexBlockData + // AN optimistic syncing + // module/executiondatasync/optimistic_sync/persisters/block.go#BlockPersister.Persist Add(LockInsertCollection, LockInsertEvent). - Add(LockInsertEvent, LockInsertServiceEvent). Add(LockInsertEvent, LockInsertLightTransactionResult). Build() } From a0a2985048bc1dae900f0901a3eaf3a756fb23cb Mon Sep 17 00:00:00 2001 From: "Leo Zhang (zhangchiqing)" Date: Thu, 9 Oct 2025 07:28:32 -0700 Subject: [PATCH 75/87] rename LockBootstrapping to LockInsertInstanceParams --- module/builder/consensus/builder_test.go | 2 +- state/protocol/badger/state.go | 2 +- storage/locks.go | 8 ++++---- storage/locks_test.go | 2 +- storage/operation/instance_params.go | 6 +++--- storage/operation/instance_params_test.go | 10 +++++----- 6 files changed, 15 insertions(+), 15 deletions(-) diff --git a/module/builder/consensus/builder_test.go b/module/builder/consensus/builder_test.go index c0b1a2ddc9f..3c372c7d149 100644 --- a/module/builder/consensus/builder_test.go +++ b/module/builder/consensus/builder_test.go @@ -256,7 +256,7 @@ func (bs *BuilderSuite) SetupTest() { // insert finalized height and root height db := bs.db - err := unittest.WithLocks(bs.T(), lockManager, []string{storage.LockBootstrapping, storage.LockIndexExecutionResult, storage.LockInsertBlock, storage.LockFinalizeBlock}, func(lctx lockctx.Context) error { + err := unittest.WithLocks(bs.T(), lockManager, []string{storage.LockInsertInstanceParams, storage.LockIndexExecutionResult, storage.LockInsertBlock, storage.LockFinalizeBlock}, func(lctx lockctx.Context) error { return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { enc, err := datastore.NewVersionedInstanceParams( datastore.DefaultInstanceParamsVersion, diff --git a/state/protocol/badger/state.go b/state/protocol/badger/state.go index c6dee6aa097..877cde310cd 100644 --- a/state/protocol/badger/state.go +++ b/state/protocol/badger/state.go @@ -112,7 +112,7 @@ func Bootstrap( // trusted root snapshot are presumed to be finalized) lctx := lockManager.NewContext() defer lctx.Release() - err := lctx.AcquireLock(storage.LockBootstrapping) + err := lctx.AcquireLock(storage.LockInsertInstanceParams) if err != nil { return nil, err } diff --git a/storage/locks.go b/storage/locks.go index 1b8fc25c617..3fb808ade98 100644 --- a/storage/locks.go +++ b/storage/locks.go @@ -35,8 +35,8 @@ const ( LockInsertAndIndexTxResult = "lock_insert_and_index_tx_result" // LockInsertCollection protects the insertion of collections. LockInsertCollection = "lock_insert_collection" - // LockBootstrapping protects data that is *exclusively* written during bootstrapping. - LockBootstrapping = "lock_bootstrapping" + // LockInsertInstanceParams protects data that is *exclusively* written during bootstrapping. + LockInsertInstanceParams = "lock_insert_instance_params" // LockInsertChunkDataPack protects the insertion of chunk data packs (not yet used anywhere LockInsertChunkDataPack = "lock_insert_chunk_data_pack" LockIndexCollectionsByBlock = "lock_index_collections_by_block" @@ -57,7 +57,7 @@ func Locks() []string { LockInsertAndIndexTxResult, LockInsertCollection, LockInsertLightTransactionResult, - LockBootstrapping, + LockInsertInstanceParams, LockInsertChunkDataPack, LockIndexCollectionsByBlock, } @@ -84,7 +84,7 @@ func makeLockPolicy() lockctx.Policy { // for protocol to Bootstrap, during bootstrapping, // we need to insert and finalize // state/protocol/badger/state.go#Bootstrap - Add(LockBootstrapping, LockIndexExecutionResult). + Add(LockInsertInstanceParams, LockIndexExecutionResult). // EN to bootstrap // engine/execution/state/bootstrap/bootstrap.go#Bootstrapper.BootstrapExecutionDatabase diff --git a/storage/locks_test.go b/storage/locks_test.go index ee1907a61ea..f919e9667dc 100644 --- a/storage/locks_test.go +++ b/storage/locks_test.go @@ -62,7 +62,7 @@ func TestHeldOneLock(t *testing.T) { t.Run("holds different lock", func(t *testing.T) { lctx := lockManager.NewContext() defer lctx.Release() - err := lctx.AcquireLock(LockBootstrapping) + err := lctx.AcquireLock(LockInsertInstanceParams) require.NoError(t, err) held, msg := HeldOneLock(lctx, LockInsertBlock, LockFinalizeBlock) diff --git a/storage/operation/instance_params.go b/storage/operation/instance_params.go index 9fb67289d79..591faddc199 100644 --- a/storage/operation/instance_params.go +++ b/storage/operation/instance_params.go @@ -15,13 +15,13 @@ import ( // - This function is intended to be called exactly once during bootstrapping. // Overwrites are prevented by an explicit existence check; if data is already present, error is returned. // - To guarantee atomicity of existence-check plus database write, we require the caller to acquire -// the [storage.LockBootstrapping] lock and hold it until the database write has been committed. +// the [storage.LockInsertInstanceParams] lock and hold it until the database write has been committed. // // Expected errors during normal operations: // - [storage.ErrAlreadyExists] if instance params have already been stored. func InsertInstanceParams(lctx lockctx.Proof, rw storage.ReaderBatchWriter, params flow.VersionedInstanceParams) error { - if !lctx.HoldsLock(storage.LockBootstrapping) { - return fmt.Errorf("missing required lock: %s", storage.LockBootstrapping) + if !lctx.HoldsLock(storage.LockInsertInstanceParams) { + return fmt.Errorf("missing required lock: %s", storage.LockInsertInstanceParams) } key := MakePrefix(codeInstanceParams) exist, err := KeyExists(rw.GlobalReader(), key) diff --git a/storage/operation/instance_params_test.go b/storage/operation/instance_params_test.go index 26b0c88fdd2..76be031b2ad 100644 --- a/storage/operation/instance_params_test.go +++ b/storage/operation/instance_params_test.go @@ -20,7 +20,7 @@ import ( // 1. InstanceParams can be inserted and retrieved successfully. // 2. Overwrite attempts return storage.ErrAlreadyExists and do not change the // persisted value. -// 3. Writes without holding LockBootstrapping are denied. +// 3. Writes without holding LockInsertInstanceParams are denied. func TestInstanceParams_InsertRetrieve(t *testing.T) { lockManager := storage.NewTestingLockManager() enc, err := datastore.NewVersionedInstanceParams( @@ -34,7 +34,7 @@ func TestInstanceParams_InsertRetrieve(t *testing.T) { t.Run("happy path: insert and retrieve", func(t *testing.T) { dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { lctx := lockManager.NewContext() - require.NoError(t, lctx.AcquireLock(storage.LockBootstrapping)) + require.NoError(t, lctx.AcquireLock(storage.LockInsertInstanceParams)) defer lctx.Release() err = db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { @@ -52,7 +52,7 @@ func TestInstanceParams_InsertRetrieve(t *testing.T) { t.Run("overwrite returns ErrAlreadyExists", func(t *testing.T) { dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { lctx := lockManager.NewContext() - require.NoError(t, lctx.AcquireLock(storage.LockBootstrapping)) + require.NoError(t, lctx.AcquireLock(storage.LockInsertInstanceParams)) err = db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { return operation.InsertInstanceParams(lctx, rw, *enc) @@ -70,7 +70,7 @@ func TestInstanceParams_InsertRetrieve(t *testing.T) { require.NoError(t, err) lctx2 := lockManager.NewContext() - require.NoError(t, lctx2.AcquireLock(storage.LockBootstrapping)) + require.NoError(t, lctx2.AcquireLock(storage.LockInsertInstanceParams)) defer lctx2.Release() err = db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { @@ -94,7 +94,7 @@ func TestInstanceParams_InsertRetrieve(t *testing.T) { err = db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { return operation.InsertInstanceParams(lctx, rw, *enc) }) - require.ErrorContains(t, err, storage.LockBootstrapping) + require.ErrorContains(t, err, storage.LockInsertInstanceParams) }) }) } From 41a54b8bd6db9d3499cfa5d1b6f1db39fb0a1226 Mon Sep 17 00:00:00 2001 From: "Leo Zhang (zhangchiqing)" Date: Wed, 15 Oct 2025 16:05:50 -0700 Subject: [PATCH 76/87] fix lint --- module/pruner/pruners/chunk_data_pack_test.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/module/pruner/pruners/chunk_data_pack_test.go b/module/pruner/pruners/chunk_data_pack_test.go index 5be26748f02..23d8e9a2b12 100644 --- a/module/pruner/pruners/chunk_data_pack_test.go +++ b/module/pruner/pruners/chunk_data_pack_test.go @@ -31,9 +31,9 @@ func TestChunkDataPackPruner(t *testing.T) { // store the chunks cdp1, result1 := unittest.ChunkDataPacksFixtureAndResult() - err := db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + require.NoError(t, db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { return results.BatchStore(result1, rw) - }) + })) require.NoError(t, unittest.WithLock(t, lockManager, storage.LockIndexChunkDataPackByChunkID, func(lctx lockctx.Context) error { storeFunc, err := chunks.Store(cdp1) if err != nil { @@ -52,7 +52,7 @@ func TestChunkDataPackPruner(t *testing.T) { })) // verify they are pruned - _, err = chunks.ByChunkID(cdp1[0].ChunkID) + _, err := chunks.ByChunkID(cdp1[0].ChunkID) require.True(t, errors.Is(err, storage.ErrNotFound), fmt.Errorf("expected ErrNotFound but got %v", err)) // prune again should not return error From a35a911dde4c21af7fe3cab4f1080630955ecad0 Mon Sep 17 00:00:00 2001 From: "Leo Zhang (zhangchiqing)" Date: Wed, 15 Oct 2025 16:19:53 -0700 Subject: [PATCH 77/87] remove LockBootstrapping --- state/protocol/badger/state.go | 5 ----- storage/locks.go | 6 +----- 2 files changed, 1 insertion(+), 10 deletions(-) diff --git a/state/protocol/badger/state.go b/state/protocol/badger/state.go index 19e4ad9c423..94a0651905f 100644 --- a/state/protocol/badger/state.go +++ b/state/protocol/badger/state.go @@ -128,11 +128,6 @@ func Bootstrap( if err != nil { return nil, err } - - err = lctx.AcquireLock(storage.LockBootstrapping) - if err != nil { - return nil, err - } err = lctx.AcquireLock(storage.LockInsertSafetyData) if err != nil { return nil, err diff --git a/storage/locks.go b/storage/locks.go index 18dfdd9bfe0..ce5fd50a661 100644 --- a/storage/locks.go +++ b/storage/locks.go @@ -38,8 +38,6 @@ const ( // LockInsertChunkDataPack protects the insertion of chunk data packs (not yet used anywhere LockInsertChunkDataPack = "lock_insert_chunk_data_pack" LockIndexCollectionsByBlock = "lock_index_collections_by_block" - // LockBootstrapping protects data that is *exclusively* written during bootstrapping. - LockBootstrapping = "lock_bootstrapping" // LockIndexChunkDataPackByChunkID protects the insertion of chunk data packs LockIndexChunkDataPackByChunkID = "lock_index_chunk_data_pack_by_chunk_id" // LockInsertTransactionResultErrMessage protects the insertion of transaction result error messages @@ -72,7 +70,6 @@ func Locks() []string { LockInsertInstanceParams, LockInsertChunkDataPack, LockIndexCollectionsByBlock, - LockBootstrapping, LockIndexChunkDataPackByChunkID, LockInsertTransactionResultErrMessage, LockInsertLightTransactionResult, @@ -129,8 +126,7 @@ func makeLockPolicy() lockctx.Policy { // module/executiondatasync/optimistic_sync/persisters/block.go#BlockPersister.Persist Add(LockInsertCollection, LockInsertEvent). Add(LockInsertEvent, LockInsertLightTransactionResult). - Add(LockFinalizeBlock, LockBootstrapping). - Add(LockBootstrapping, LockInsertSafetyData). + Add(LockFinalizeBlock, LockInsertSafetyData). Add(LockInsertSafetyData, LockInsertLivenessData). Add(LockInsertOrFinalizeClusterBlock, LockInsertSafetyData). From 6dd240132c5e9ba2ee02dcd5f5040f30202223f6 Mon Sep 17 00:00:00 2001 From: "Leo Zhang (zhangchiqing)" Date: Wed, 15 Oct 2025 16:27:54 -0700 Subject: [PATCH 78/87] fix tests and locks policy --- .../indexer/indexer_core.go | 1 - storage/chunk_data_packs.go | 2 +- storage/locks.go | 19 ++++++------- storage/operation/stats_test.go | 28 +++++++++---------- storage/store/chunk_data_packs.go | 2 +- 5 files changed, 23 insertions(+), 29 deletions(-) diff --git a/module/state_synchronization/indexer/indexer_core.go b/module/state_synchronization/indexer/indexer_core.go index f00a40726dd..1525145732e 100644 --- a/module/state_synchronization/indexer/indexer_core.go +++ b/module/state_synchronization/indexer/indexer_core.go @@ -173,7 +173,6 @@ func (c *IndexerCore) IndexBlockData(data *execution_data.BlockExecutionDataEnti err = storage.WithLocks(c.lockManager, []string{ storage.LockInsertEvent, storage.LockInsertLightTransactionResult, - storage.LockInsertLightTransactionResult, storage.LockIndexScheduledTransaction, }, func(lctx lockctx.Context) error { diff --git a/storage/chunk_data_packs.go b/storage/chunk_data_packs.go index 01b8e368b27..3dc94cf4e72 100644 --- a/storage/chunk_data_packs.go +++ b/storage/chunk_data_packs.go @@ -41,7 +41,7 @@ type ChunkDataPacks interface { // to chunk data pack ID in the protocol database. This mapping persists that the Execution Node committed to the result // represented by this chunk data pack. This function returns [storage.ErrDataMismatch] when a _different_ chunk data pack // ID for the same chunk ID has already been stored (changing which result an execution Node committed to would be a - // slashable protocol violation). The caller must acquire [storage.LockInsertChunkDataPack] and hold it until the database + // slashable protocol violation). The caller must acquire [storage.LockIndexChunkDataPackByChunkID] and hold it until the database // write has been committed. // - error: No error should be returned during normal operation. Any error indicates a failure in the first phase. Store(cs []*flow.ChunkDataPack) (func(lctx lockctx.Proof, protocolDBBatch ReaderBatchWriter) error, error) diff --git a/storage/locks.go b/storage/locks.go index ce5fd50a661..64e956a1e14 100644 --- a/storage/locks.go +++ b/storage/locks.go @@ -34,9 +34,7 @@ const ( // LockInsertCollection protects the insertion of collections. LockInsertCollection = "lock_insert_collection" // LockInsertInstanceParams protects data that is *exclusively* written during bootstrapping. - LockInsertInstanceParams = "lock_insert_instance_params" - // LockInsertChunkDataPack protects the insertion of chunk data packs (not yet used anywhere - LockInsertChunkDataPack = "lock_insert_chunk_data_pack" + LockInsertInstanceParams = "lock_insert_instance_params" LockIndexCollectionsByBlock = "lock_index_collections_by_block" // LockIndexChunkDataPackByChunkID protects the insertion of chunk data packs LockIndexChunkDataPackByChunkID = "lock_index_chunk_data_pack_by_chunk_id" @@ -68,7 +66,6 @@ func Locks() []string { LockInsertCollection, LockInsertLightTransactionResult, LockInsertInstanceParams, - LockInsertChunkDataPack, LockIndexCollectionsByBlock, LockIndexChunkDataPackByChunkID, LockInsertTransactionResultErrMessage, @@ -110,8 +107,7 @@ func makeLockPolicy() lockctx.Policy { // EN to save execution result // engine/execution/state/state.go#state.saveExecutionResults - Add(LockIndexChunkDataPackByChunkID, LockInsertOwnReceipt). - Add(LockInsertChunkDataPack, LockInsertEvent). + Add(LockIndexChunkDataPackByChunkID, LockInsertEvent). Add(LockInsertEvent, LockInsertServiceEvent). Add(LockInsertServiceEvent, LockInsertAndIndexTxResult). Add(LockInsertAndIndexTxResult, LockInsertOwnReceipt). @@ -126,14 +122,15 @@ func makeLockPolicy() lockctx.Policy { // module/executiondatasync/optimistic_sync/persisters/block.go#BlockPersister.Persist Add(LockInsertCollection, LockInsertEvent). Add(LockInsertEvent, LockInsertLightTransactionResult). + Add(LockInsertLightTransactionResult, LockInsertTransactionResultErrMessage). + + // AN execution state sync + // module/state_synchronization/indexer/indexer_core.go#IndexerCore.IndexBlockData + Add(LockInsertEvent, LockInsertLightTransactionResult). // it's ok to have duplication + Add(LockInsertLightTransactionResult, LockIndexScheduledTransaction). Add(LockFinalizeBlock, LockInsertSafetyData). Add(LockInsertSafetyData, LockInsertLivenessData). Add(LockInsertOrFinalizeClusterBlock, LockInsertSafetyData). - - // module/executiondatasync/optimistic_sync/persisters/block.go#Persist - Add(LockInsertCollection, LockInsertLightTransactionResult). - Add(LockInsertLightTransactionResult, LockInsertTransactionResultErrMessage). - Add(LockInsertLightTransactionResult, LockIndexScheduledTransaction). Build() } diff --git a/storage/operation/stats_test.go b/storage/operation/stats_test.go index a07ddcc2b70..95ccab8cee2 100644 --- a/storage/operation/stats_test.go +++ b/storage/operation/stats_test.go @@ -28,22 +28,20 @@ func TestSummarizeKeysByFirstByteConcurrent(t *testing.T) { } // insert 100 chunk data packs - return unittest.WithLock(t, lockManager, storage.LockInsertChunkDataPack, func(lctx lockctx.Context) error { - for i := 0; i < 100; i++ { - collectionID := unittest.IdentifierFixture() - cdp := &storage.StoredChunkDataPack{ - ChunkID: unittest.IdentifierFixture(), - StartState: unittest.StateCommitmentFixture(), - Proof: []byte{'p'}, - CollectionID: collectionID, - } - err := operation.InsertChunkDataPack(rw, cdp.ID(), cdp) - if err != nil { - return err - } + for i := 0; i < 100; i++ { + collectionID := unittest.IdentifierFixture() + cdp := &storage.StoredChunkDataPack{ + ChunkID: unittest.IdentifierFixture(), + StartState: unittest.StateCommitmentFixture(), + Proof: []byte{'p'}, + CollectionID: collectionID, } - return nil - }) + err := operation.InsertChunkDataPack(rw, cdp.ID(), cdp) + if err != nil { + return err + } + } + return nil }) }) require.NoError(t, err) diff --git a/storage/store/chunk_data_packs.go b/storage/store/chunk_data_packs.go index 1aa7adf5b9f..f622e8e9a57 100644 --- a/storage/store/chunk_data_packs.go +++ b/storage/store/chunk_data_packs.go @@ -116,7 +116,7 @@ func NewChunkDataPacks(collector module.CacheMetrics, db storage.DB, cdpStorage // to chunk data pack ID in the protocol database. This mapping persists that the Execution Node committed to the result // represented by this chunk data pack. This function returns [storage.ErrDataMismatch] when a _different_ chunk data pack // ID for the same chunk ID has already been stored (changing which result an execution Node committed to would be a -// slashable protocol violation). The caller must acquire [storage.LockInsertChunkDataPack] and hold it until the database +// slashable protocol violation). The caller must acquire [storage.LockIndexChunkDataPackByChunkID] and hold it until the database // write has been committed. // - error: No error should be returned during normal operation. Any error indicates a failure in the first phase. func (ch *ChunkDataPacks) Store(cs []*flow.ChunkDataPack) ( From 6a91a9e52bb7024c0a3eb796b46b716d1b420ef5 Mon Sep 17 00:00:00 2001 From: "Leo Zhang (zhangchiqing)" Date: Wed, 15 Oct 2025 16:34:46 -0700 Subject: [PATCH 79/87] fix tests --- module/state_synchronization/indexer/indexer_core_test.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/module/state_synchronization/indexer/indexer_core_test.go b/module/state_synchronization/indexer/indexer_core_test.go index 4b0da2b03f5..b5e9a44fc47 100644 --- a/module/state_synchronization/indexer/indexer_core_test.go +++ b/module/state_synchronization/indexer/indexer_core_test.go @@ -257,10 +257,10 @@ func TestExecutionState_IndexBlockData(t *testing.T) { t.Run("Index AllTheThings", func(t *testing.T) { test := newIndexCoreTest(t, g, blocks, tf.ExecutionDataEntity()).initIndexer() - test.events.On("BatchStore", mock.Anything, []flow.EventsList{tf.ExpectedEvents}, mock.Anything). - Return(func(blockID flow.Identifier, events []flow.EventsList, batch storage.ReaderBatchWriter) error { + test.events.On("BatchStore", mock.Anything, blockID, []flow.EventsList{tf.ExpectedEvents}, mock.Anything). + Return(func(lctx lockctx.Proof, blockID flow.Identifier, events []flow.EventsList, batch storage.ReaderBatchWriter) error { + require.True(t, lctx.HoldsLock(storage.LockInsertEvent)) require.NotNil(t, batch) - // Events BatchStore doesn't require specific locks, but we validate the batch is provided return nil }) test.results.On("BatchStore", mock.Anything, mock.Anything, blockID, tf.ExpectedResults). From 87758a95dc4db6a51a18ea9ff8c1ea9f21872629 Mon Sep 17 00:00:00 2001 From: "Leo Zhang (zhangchiqing)" Date: Wed, 15 Oct 2025 17:10:34 -0700 Subject: [PATCH 80/87] refactor lock policys --- engine/access/ingestion/engine.go | 5 +- .../ingestion2/finalized_block_processor.go | 4 +- engine/execution/state/bootstrap/bootstrap.go | 71 ++++------ engine/execution/state/state.go | 11 +- .../optimistic_sync/persisters/block.go | 7 +- .../indexer/indexer_core.go | 6 +- state/cluster/badger/state.go | 134 ++++++++---------- state/protocol/badger/state.go | 120 +++++++--------- storage/locks.go | 105 +++++++++----- 9 files changed, 217 insertions(+), 246 deletions(-) diff --git a/engine/access/ingestion/engine.go b/engine/access/ingestion/engine.go index 2e2a25b4cfc..ca4de4d4850 100644 --- a/engine/access/ingestion/engine.go +++ b/engine/access/ingestion/engine.go @@ -379,10 +379,7 @@ func (e *Engine) processFinalizedBlock(block *flow.Block) error { // TODO: substitute an indexer module as layer between engine and storage // index the block storage with each of the collection guarantee - err := storage.WithLocks(e.lockManager, []string{ - storage.LockIndexCollectionsByBlock, - storage.LockIndexExecutionResult, - }, func(lctx lockctx.Context) error { + err := storage.WithLocks(e.lockManager, storage.LockGroupAccessFinalizingBlock, func(lctx lockctx.Context) error { return e.db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { // requires [storage.LockIndexCollectionsByBlock] lock err := e.blocks.BatchIndexBlockContainingCollectionGuarantees(lctx, rw, block.ID(), flow.GetIDs(block.Payload.Guarantees)) diff --git a/engine/access/ingestion2/finalized_block_processor.go b/engine/access/ingestion2/finalized_block_processor.go index cb352c0d75b..b139dd05035 100644 --- a/engine/access/ingestion2/finalized_block_processor.go +++ b/engine/access/ingestion2/finalized_block_processor.go @@ -153,8 +153,8 @@ func (p *FinalizedBlockProcessor) processFinalizedBlockJobCallback( // // No errors are expected during normal operations. func (p *FinalizedBlockProcessor) indexFinalizedBlock(block *flow.Block) error { - err := storage.WithLocks(p.lockManager, - []string{storage.LockIndexCollectionsByBlock, storage.LockIndexExecutionResult}, func(lctx lockctx.Context) error { + err := storage.WithLocks(p.lockManager, storage.LockGroupAccessFinalizingBlock, + func(lctx lockctx.Context) error { return p.db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { // require storage.LockIndexCollectionsByBlock err := p.blocks.BatchIndexBlockContainingCollectionGuarantees(lctx, rw, block.ID(), flow.GetIDs(block.Payload.Guarantees)) diff --git a/engine/execution/state/bootstrap/bootstrap.go b/engine/execution/state/bootstrap/bootstrap.go index b129edefaa2..098726aee37 100644 --- a/engine/execution/state/bootstrap/bootstrap.go +++ b/engine/execution/state/bootstrap/bootstrap.go @@ -98,47 +98,38 @@ func (b *Bootstrapper) BootstrapExecutionDatabase( rootSeal *flow.Seal, ) error { - lctx := manager.NewContext() - defer lctx.Release() - err := lctx.AcquireLock(storage.LockIndexExecutionResult) - if err != nil { - return err - } - err = lctx.AcquireLock(storage.LockIndexStateCommitment) - if err != nil { - return err - } - commit := rootSeal.FinalState - return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { - w := rw.Writer() - err := operation.UpdateExecutedBlock(w, rootSeal.BlockID) - if err != nil { - return fmt.Errorf("could not index initial genesis execution block: %w", err) - } - - err = operation.IndexOwnOrSealedExecutionResult(lctx, rw, rootSeal.BlockID, rootSeal.ResultID) - if err != nil { - return fmt.Errorf("could not index result for root result: %w", err) - } - - err = operation.IndexStateCommitment(lctx, rw, flow.ZeroID, commit) - if err != nil { - return fmt.Errorf("could not index void state commitment: %w", err) - } - - err = operation.IndexStateCommitment(lctx, rw, rootSeal.BlockID, commit) - if err != nil { - return fmt.Errorf("could not index genesis state commitment: %w", err) - } - - snapshots := make([]*snapshot.ExecutionSnapshot, 0) - err = operation.InsertExecutionStateInteractions(w, rootSeal.BlockID, snapshots) - if err != nil { - return fmt.Errorf("could not bootstrap execution state interactions: %w", err) - } - - return nil + return storage.WithLocks(manager, storage.LockGroupExecutionBootstrap, func(lctx lockctx.Context) error { + return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + w := rw.Writer() + err := operation.UpdateExecutedBlock(w, rootSeal.BlockID) + if err != nil { + return fmt.Errorf("could not index initial genesis execution block: %w", err) + } + + err = operation.IndexOwnOrSealedExecutionResult(lctx, rw, rootSeal.BlockID, rootSeal.ResultID) + if err != nil { + return fmt.Errorf("could not index result for root result: %w", err) + } + + err = operation.IndexStateCommitment(lctx, rw, flow.ZeroID, commit) + if err != nil { + return fmt.Errorf("could not index void state commitment: %w", err) + } + + err = operation.IndexStateCommitment(lctx, rw, rootSeal.BlockID, commit) + if err != nil { + return fmt.Errorf("could not index genesis state commitment: %w", err) + } + + snapshots := make([]*snapshot.ExecutionSnapshot, 0) + err = operation.InsertExecutionStateInteractions(w, rootSeal.BlockID, snapshots) + if err != nil { + return fmt.Errorf("could not bootstrap execution state interactions: %w", err) + } + + return nil + }) }) } diff --git a/engine/execution/state/state.go b/engine/execution/state/state.go index 73068a90f24..3904d251cfc 100644 --- a/engine/execution/state/state.go +++ b/engine/execution/state/state.go @@ -424,16 +424,7 @@ func (s *state) saveExecutionResults( return fmt.Errorf("can not store chunk data packs for block ID: %v: %w", blockID, err) } - locks := []string{ - storage.LockIndexChunkDataPackByChunkID, - storage.LockInsertEvent, - storage.LockInsertServiceEvent, - storage.LockInsertAndIndexTxResult, - storage.LockInsertOwnReceipt, - storage.LockIndexExecutionResult, - storage.LockIndexStateCommitment, - } - return storage.WithLocks(s.lockManager, locks, func(lctx lockctx.Context) error { + return storage.WithLocks(s.lockManager, storage.LockGroupExecutionSaveExecutionResult, func(lctx lockctx.Context) error { // The batch update writes all execution result data (except chunk data pack!) atomically. // Since the chunk data pack itself was already stored in a separate database (s.chunkDataPacks) // during the previous step, this step stores only the mapping between chunk ID diff --git a/module/executiondatasync/optimistic_sync/persisters/block.go b/module/executiondatasync/optimistic_sync/persisters/block.go index 61916c22f9d..65f58cfab4b 100644 --- a/module/executiondatasync/optimistic_sync/persisters/block.go +++ b/module/executiondatasync/optimistic_sync/persisters/block.go @@ -60,12 +60,7 @@ func (p *BlockPersister) Persist() error { p.log.Debug().Msg("started to persist execution data") start := time.Now() - err := storage.WithLocks(p.lockManager, []string{ - storage.LockInsertCollection, - storage.LockInsertEvent, - storage.LockInsertLightTransactionResult, - storage.LockInsertTransactionResultErrMessage, - }, func(lctx lockctx.Context) error { + err := storage.WithLocks(p.lockManager, storage.LockGroupAccessOptimisticSyncBlockPersist, func(lctx lockctx.Context) error { return p.protocolDB.WithReaderBatchWriter(func(batch storage.ReaderBatchWriter) error { for _, persister := range p.persisterStores { if err := persister.Persist(lctx, batch); err != nil { diff --git a/module/state_synchronization/indexer/indexer_core.go b/module/state_synchronization/indexer/indexer_core.go index 1525145732e..d27b59dd107 100644 --- a/module/state_synchronization/indexer/indexer_core.go +++ b/module/state_synchronization/indexer/indexer_core.go @@ -170,11 +170,7 @@ func (c *IndexerCore) IndexBlockData(data *execution_data.BlockExecutionDataEnti return fmt.Errorf("could not collect scheduled transaction data: %w", err) } - err = storage.WithLocks(c.lockManager, []string{ - storage.LockInsertEvent, - storage.LockInsertLightTransactionResult, - storage.LockIndexScheduledTransaction, - }, + err = storage.WithLocks(c.lockManager, storage.LockGroupAccessStateSyncIndexBlockData, func(lctx lockctx.Context) error { return c.protocolDB.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { err := c.events.BatchStore(lctx, data.BlockID, []flow.EventsList{events}, rw) diff --git a/state/cluster/badger/state.go b/state/cluster/badger/state.go index 10562e2ae7e..3c47f28407a 100644 --- a/state/cluster/badger/state.go +++ b/state/cluster/badger/state.go @@ -29,86 +29,76 @@ var _ cluster.State = (*State)(nil) // The genesis block must have height 0, a parent hash of 32 zero bytes, // and an empty collection as payload. func Bootstrap(db storage.DB, lockManager lockctx.Manager, stateRoot *StateRoot) (*State, error) { - lctx := lockManager.NewContext() - defer lctx.Release() - err := lctx.AcquireLock(storage.LockInsertOrFinalizeClusterBlock) - if err != nil { - return nil, fmt.Errorf("failed to acquire lock `storage.LockInsertOrFinalizeClusterBlock` for inserting cluster block: %w", err) - } - err = lctx.AcquireLock(storage.LockInsertSafetyData) - if err != nil { - return nil, fmt.Errorf("failed to acquire lock `storage.LockInsertSafetyData` for inserting safety data: %w", err) - } - err = lctx.AcquireLock(storage.LockInsertLivenessData) - if err != nil { - return nil, fmt.Errorf("failed to acquire lock `storage.LockInsertLivenessData` for inserting liveness data: %w", err) - } - isBootstrapped, err := IsBootstrapped(db, stateRoot.ClusterID()) - if err != nil { - return nil, fmt.Errorf("failed to determine whether database contains bootstrapped state: %w", err) - } - if isBootstrapped { - return nil, fmt.Errorf("expected empty cluster state for cluster ID %s", stateRoot.ClusterID()) - } - state := newState(db, stateRoot.ClusterID(), stateRoot.EpochCounter()) - - genesis := stateRoot.Block() - rootQC := stateRoot.QC() - - // bootstrap cluster state - err = state.db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { - chainID := genesis.ChainID - // insert the block - by protocol convention, the genesis block does not have a proposer signature, which must be handled by the implementation - proposal, err := clustermodel.NewRootProposal( - clustermodel.UntrustedProposal{ - Block: *genesis, - ProposerSigData: nil, - }, - ) - if err != nil { - return fmt.Errorf("could not build root cluster proposal: %w", err) - } - err = operation.InsertClusterBlock(lctx, rw, proposal) - if err != nil { - return fmt.Errorf("could not insert genesis block: %w", err) - } - // insert block height -> ID mapping - err = operation.IndexClusterBlockHeight(lctx, rw, chainID, genesis.Height, genesis.ID()) - if err != nil { - return fmt.Errorf("failed to map genesis block height to block: %w", err) - } - // insert boundary - err = operation.BootstrapClusterFinalizedHeight(lctx, rw, chainID, genesis.Height) - if err != nil { - return fmt.Errorf("could not insert genesis boundary: %w", err) - } - - safetyData := &hotstuff.SafetyData{ - LockedOneChainView: genesis.View, - HighestAcknowledgedView: genesis.View, - } + var state *State - livenessData := &hotstuff.LivenessData{ - CurrentView: genesis.View + 1, // starting view for hotstuff - NewestQC: rootQC, - } - // insert safety data - err = operation.UpsertSafetyData(lctx, rw, chainID, safetyData) + err := storage.WithLocks(lockManager, storage.LockGroupCollectionBootstrapClusterState, func(lctx lockctx.Context) error { + isBootstrapped, err := IsBootstrapped(db, stateRoot.ClusterID()) if err != nil { - return fmt.Errorf("could not insert safety data: %w", err) + return fmt.Errorf("failed to determine whether database contains bootstrapped state: %w", err) } - // insert liveness data - err = operation.UpsertLivenessData(lctx, rw, chainID, livenessData) - if err != nil { - return fmt.Errorf("could not insert liveness data: %w", err) + if isBootstrapped { + return fmt.Errorf("expected empty cluster state for cluster ID %s", stateRoot.ClusterID()) } - - return nil + state = newState(db, stateRoot.ClusterID(), stateRoot.EpochCounter()) + + genesis := stateRoot.Block() + rootQC := stateRoot.QC() + + // bootstrap cluster state + return state.db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + chainID := genesis.ChainID + // insert the block - by protocol convention, the genesis block does not have a proposer signature, which must be handled by the implementation + proposal, err := clustermodel.NewRootProposal( + clustermodel.UntrustedProposal{ + Block: *genesis, + ProposerSigData: nil, + }, + ) + if err != nil { + return fmt.Errorf("could not build root cluster proposal: %w", err) + } + err = operation.InsertClusterBlock(lctx, rw, proposal) + if err != nil { + return fmt.Errorf("could not insert genesis block: %w", err) + } + // insert block height -> ID mapping + err = operation.IndexClusterBlockHeight(lctx, rw, chainID, genesis.Height, genesis.ID()) + if err != nil { + return fmt.Errorf("failed to map genesis block height to block: %w", err) + } + // insert boundary + err = operation.BootstrapClusterFinalizedHeight(lctx, rw, chainID, genesis.Height) + if err != nil { + return fmt.Errorf("could not insert genesis boundary: %w", err) + } + + safetyData := &hotstuff.SafetyData{ + LockedOneChainView: genesis.View, + HighestAcknowledgedView: genesis.View, + } + + livenessData := &hotstuff.LivenessData{ + CurrentView: genesis.View + 1, // starting view for hotstuff + NewestQC: rootQC, + } + // insert safety data + err = operation.UpsertSafetyData(lctx, rw, chainID, safetyData) + if err != nil { + return fmt.Errorf("could not insert safety data: %w", err) + } + // insert liveness data + err = operation.UpsertLivenessData(lctx, rw, chainID, livenessData) + if err != nil { + return fmt.Errorf("could not insert liveness data: %w", err) + } + + return nil + }) }) + if err != nil { return nil, fmt.Errorf("bootstrapping failed: %w", err) } - return state, nil } diff --git a/state/protocol/badger/state.go b/state/protocol/badger/state.go index 94a0651905f..7fecea527e0 100644 --- a/state/protocol/badger/state.go +++ b/state/protocol/badger/state.go @@ -107,36 +107,6 @@ func Bootstrap( root protocol.Snapshot, options ...BootstrapConfigOptions, ) (*State, error) { - // we acquire both [storage.LockInsertBlock] and [storage.LockFinalizeBlock] because - // the bootstrapping process inserts and finalizes blocks (all blocks within the - // trusted root snapshot are presumed to be finalized) - lctx := lockManager.NewContext() - defer lctx.Release() - err := lctx.AcquireLock(storage.LockInsertInstanceParams) - if err != nil { - return nil, err - } - err = lctx.AcquireLock(storage.LockIndexExecutionResult) - if err != nil { - return nil, err - } - err = lctx.AcquireLock(storage.LockInsertBlock) - if err != nil { - return nil, err - } - err = lctx.AcquireLock(storage.LockFinalizeBlock) - if err != nil { - return nil, err - } - err = lctx.AcquireLock(storage.LockInsertSafetyData) - if err != nil { - return nil, err - } - err = lctx.AcquireLock(storage.LockInsertLivenessData) - if err != nil { - return nil, err - } - config := defaultBootstrapConfig() for _, opt := range options { opt(config) @@ -171,55 +141,67 @@ func Bootstrap( // (The lowest block in sealing segment is the last sealed block, but we don't use that here.) lastFinalized := segment.Finalized() // highest block in sealing segment; finalized by protocol convention - // bootstrap the sealing segment - // creating sealed root block with the rootResult - // creating finalized root block with lastFinalized - err = bootstrapSealingSegment(lctx, db, blocks, qcs, segment, lastFinalized, rootSeal) - if err != nil { - return nil, fmt.Errorf("could not bootstrap sealing chain segment blocks: %w", err) - } + var state *State - err = db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { - // bootstrap dynamic protocol state - err = bootstrapProtocolState(lctx, rw, segment, root.Params(), epochProtocolStateSnapshots, protocolKVStoreSnapshots, setups, commits, !config.SkipNetworkAddressValidation) + // we acquire both [storage.LockInsertBlock] and [storage.LockFinalizeBlock] because + // the bootstrapping process inserts and finalizes blocks (all blocks within the + // trusted root snapshot are presumed to be finalized) + err = storage.WithLocks(lockManager, storage.LockGroupProtocolStateBootstrap, func(lctx lockctx.Context) error { + + // bootstrap the sealing segment + // creating sealed root block with the rootResult + // creating finalized root block with lastFinalized + err = bootstrapSealingSegment(lctx, db, blocks, qcs, segment, lastFinalized, rootSeal) + if err != nil { + return fmt.Errorf("could not bootstrap sealing chain segment blocks: %w", err) + } + + err = db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + // bootstrap dynamic protocol state + err = bootstrapProtocolState(lctx, rw, segment, root.Params(), epochProtocolStateSnapshots, protocolKVStoreSnapshots, setups, commits, !config.SkipNetworkAddressValidation) + if err != nil { + return fmt.Errorf("could not bootstrap protocol state: %w", err) + } + + // initialize version beacon + err = boostrapVersionBeacon(rw, root) + if err != nil { + return fmt.Errorf("could not bootstrap version beacon: %w", err) + } + + return nil + }) if err != nil { - return fmt.Errorf("could not bootstrap protocol state: %w", err) + return fmt.Errorf("bootstrapping failed: %w", err) } - // initialize version beacon - err = boostrapVersionBeacon(rw, root) + // CAUTION: INSERT FINALIZED HEIGHT must be LAST, because we use its existence in the database + // as indicator that the protocol database has been bootstrapped successfully. Before we write the + // final piece of data to complete the bootstrapping, we query the current state of the database + // (sanity check) to ensure that it is still considered as not properly bootstrapped. + isBootstrapped, err = IsBootstrapped(db) + if err != nil { + return fmt.Errorf("determining whether database is successfully bootstrapped failed with unexpected exception: %w", err) + } + if isBootstrapped { // we haven't written the latest finalized height yet, so this vaule must be false + return fmt.Errorf("sanity check failed: while bootstrapping has not yet completed, the implementation already considers the protocol state as successfully bootstrapped") + } + err = db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + // initialize the current protocol state height/view pointers + return bootstrapStatePointers(lctx, rw, root) + }) if err != nil { - return fmt.Errorf("could not bootstrap version beacon: %w", err) + return fmt.Errorf("could not bootstrap height/view pointers: %w", err) } + state, err = OpenState(metrics, db, lockManager, headers, seals, results, blocks, qcs, setups, commits, epochProtocolStateSnapshots, protocolKVStoreSnapshots, versionBeacons) + if err != nil { + return fmt.Errorf("bootstrapping failed, because the resulting database state is rejected: %w", err) + } return nil }) if err != nil { - return nil, fmt.Errorf("bootstrapping failed: %w", err) - } - - // CAUTION: INSERT FINALIZED HEIGHT must be LAST, because we use its existence in the database - // as indicator that the protocol database has been bootstrapped successfully. Before we write the - // final piece of data to complete the bootstrapping, we query the current state of the database - // (sanity check) to ensure that it is still considered as not properly bootstrapped. - isBootstrapped, err = IsBootstrapped(db) - if err != nil { - return nil, fmt.Errorf("determining whether database is successfully bootstrapped failed with unexpected exception: %w", err) - } - if isBootstrapped { // we haven't written the latest finalized height yet, so this vaule must be false - return nil, fmt.Errorf("sanity check failed: while bootstrapping has not yet completed, the implementation already considers the protocol state as successfully bootstrapped") - } - err = db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { - // initialize the current protocol state height/view pointers - return bootstrapStatePointers(lctx, rw, root) - }) - if err != nil { - return nil, fmt.Errorf("could not bootstrap height/view pointers: %w", err) - } - - state, err := OpenState(metrics, db, lockManager, headers, seals, results, blocks, qcs, setups, commits, epochProtocolStateSnapshots, protocolKVStoreSnapshots, versionBeacons) - if err != nil { - return nil, fmt.Errorf("bootstrapping failed, because the resulting database state is rejected: %w", err) + return nil, err } return state, nil } diff --git a/storage/locks.go b/storage/locks.go index 64e956a1e14..91a21a0cf4d 100644 --- a/storage/locks.go +++ b/storage/locks.go @@ -79,6 +79,62 @@ func Locks() []string { type LockManager = lockctx.Manager +var LockGroupAccessStateSyncIndexBlockData = []string{ + LockInsertEvent, + LockInsertLightTransactionResult, + LockIndexScheduledTransaction, +} + +var LockGroupExecutionBootstrap = []string{ + LockIndexExecutionResult, + LockIndexStateCommitment, +} + +var LockGroupExecutionSaveExecutionResult = []string{ + LockIndexChunkDataPackByChunkID, + LockInsertEvent, + LockInsertServiceEvent, + LockInsertAndIndexTxResult, + LockInsertOwnReceipt, + LockIndexExecutionResult, + LockIndexStateCommitment, +} + +var LockGroupAccessFinalizingBlock = []string{ + LockIndexCollectionsByBlock, + LockIndexExecutionResult, +} + +var LockGroupAccessOptimisticSyncBlockPersist = []string{ + LockInsertCollection, + LockInsertEvent, + LockInsertLightTransactionResult, + LockInsertTransactionResultErrMessage, +} + +var LockGroupCollectionBootstrapClusterState = []string{ + LockInsertOrFinalizeClusterBlock, + LockInsertSafetyData, + LockInsertLivenessData, +} + +var LockGroupProtocolStateBootstrap = []string{ + LockInsertInstanceParams, + LockIndexExecutionResult, + LockInsertBlock, + LockFinalizeBlock, + LockInsertSafetyData, + LockInsertLivenessData, +} + +// addLocks adds a chain of locks to the builder in the order they appear in the locks slice. +// This creates a directed acyclic graph where each lock can be acquired after the previous one. +func addLocks(builder lockctx.DAGPolicyBuilder, locks []string) { + for i := 0; i < len(locks)-1; i++ { + builder.Add(locks[i], locks[i+1]) + } +} + // makeLockPolicy constructs the policy used by the storage layer to prevent deadlocks. // We use a policy defined by a directed acyclic graph, where vertices represent named locks. // A directed edge between two vertices A, B means: I can acquire B next after acquiring A. @@ -94,44 +150,17 @@ type LockManager = lockctx.Manager // // This function will panic if a policy is created which does not prevent deadlocks. func makeLockPolicy() lockctx.Policy { - return lockctx.NewDAGPolicyBuilder(). - // for protocol to Bootstrap, during bootstrapping, - // we need to insert and finalize - // state/protocol/badger/state.go#Bootstrap - Add(LockInsertInstanceParams, LockIndexExecutionResult). - - // EN to bootstrap - // engine/execution/state/bootstrap/bootstrap.go#Bootstrapper.BootstrapExecutionDatabase - Add(LockIndexExecutionResult, LockInsertBlock). - Add(LockInsertBlock, LockFinalizeBlock). - - // EN to save execution result - // engine/execution/state/state.go#state.saveExecutionResults - Add(LockIndexChunkDataPackByChunkID, LockInsertEvent). - Add(LockInsertEvent, LockInsertServiceEvent). - Add(LockInsertServiceEvent, LockInsertAndIndexTxResult). - Add(LockInsertAndIndexTxResult, LockInsertOwnReceipt). - Add(LockInsertOwnReceipt, LockIndexExecutionResult). - Add(LockIndexExecutionResult, LockIndexStateCommitment). - - // AN ingestion engine processing finalized block - // engine/access/ingestion/engine.go#Engine.processFinalizedBlock - Add(LockIndexCollectionsByBlock, LockIndexExecutionResult). - - // AN optimistic syncing - // module/executiondatasync/optimistic_sync/persisters/block.go#BlockPersister.Persist - Add(LockInsertCollection, LockInsertEvent). - Add(LockInsertEvent, LockInsertLightTransactionResult). - Add(LockInsertLightTransactionResult, LockInsertTransactionResultErrMessage). - - // AN execution state sync - // module/state_synchronization/indexer/indexer_core.go#IndexerCore.IndexBlockData - Add(LockInsertEvent, LockInsertLightTransactionResult). // it's ok to have duplication - Add(LockInsertLightTransactionResult, LockIndexScheduledTransaction). - Add(LockFinalizeBlock, LockInsertSafetyData). - Add(LockInsertSafetyData, LockInsertLivenessData). - Add(LockInsertOrFinalizeClusterBlock, LockInsertSafetyData). - Build() + builder := lockctx.NewDAGPolicyBuilder() + + addLocks(builder, LockGroupAccessFinalizingBlock) + addLocks(builder, LockGroupAccessStateSyncIndexBlockData) + addLocks(builder, LockGroupAccessOptimisticSyncBlockPersist) + addLocks(builder, LockGroupExecutionBootstrap) + addLocks(builder, LockGroupExecutionSaveExecutionResult) + addLocks(builder, LockGroupCollectionBootstrapClusterState) + addLocks(builder, LockGroupProtocolStateBootstrap) + + return builder.Build() } var makeLockManagerOnce sync.Once From bcf1655b89edb78c63595b4f958d2e8f4a3f6c0f Mon Sep 17 00:00:00 2001 From: Leo Zhang Date: Fri, 17 Oct 2025 08:53:30 -0700 Subject: [PATCH 81/87] Apply suggestions from code review Co-authored-by: Jordan Schalm --- engine/execution/state/state.go | 1 - state/cluster/badger/state.go | 3 +-- state/protocol/badger/state.go | 2 +- storage/blocks.go | 2 +- storage/errors.go | 10 ---------- storage/events.go | 2 +- storage/operation/events.go | 14 +++++++------- storage/operation/transaction_results.go | 7 ++++--- storage/store/events.go | 11 +++++++---- storage/store/results.go | 2 +- storage/store/seals_test.go | 2 +- 11 files changed, 24 insertions(+), 32 deletions(-) diff --git a/engine/execution/state/state.go b/engine/execution/state/state.go index 3904d251cfc..1652bbcdd05 100644 --- a/engine/execution/state/state.go +++ b/engine/execution/state/state.go @@ -499,7 +499,6 @@ func (s *state) saveExecutionResults( return nil }) - if errors.Is(err, storage.ErrAlreadyExists) { return nil } diff --git a/state/cluster/badger/state.go b/state/cluster/badger/state.go index 3c47f28407a..fc2234ec668 100644 --- a/state/cluster/badger/state.go +++ b/state/cluster/badger/state.go @@ -95,8 +95,7 @@ func Bootstrap(db storage.DB, lockManager lockctx.Manager, stateRoot *StateRoot) return nil }) }) - - if err != nil { +if err != nil { return nil, fmt.Errorf("bootstrapping failed: %w", err) } return state, nil diff --git a/state/protocol/badger/state.go b/state/protocol/badger/state.go index 7fecea527e0..0cdfcbc10e3 100644 --- a/state/protocol/badger/state.go +++ b/state/protocol/badger/state.go @@ -183,7 +183,7 @@ func Bootstrap( if err != nil { return fmt.Errorf("determining whether database is successfully bootstrapped failed with unexpected exception: %w", err) } - if isBootstrapped { // we haven't written the latest finalized height yet, so this vaule must be false + if isBootstrapped { // we haven't written the latest finalized height yet, so this value must be false return fmt.Errorf("sanity check failed: while bootstrapping has not yet completed, the implementation already considers the protocol state as successfully bootstrapped") } err = db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { diff --git a/storage/blocks.go b/storage/blocks.go index 714a481ddc3..52fd67b1e85 100644 --- a/storage/blocks.go +++ b/storage/blocks.go @@ -99,5 +99,5 @@ type Blocks interface { // Error returns: // - storage.ErrAlreadyExists if any collection guarantee is already indexed // - generic error in case of unexpected failure from the database layer or encoding failure. - BatchIndexBlockContainingCollectionGuarantees(lctx lockctx.Proof, rw ReaderBatchWriter, blockID flow.Identifier, collIDs []flow.Identifier) error + BatchIndexBlockContainingCollectionGuarantees(lctx lockctx.Proof, rw ReaderBatchWriter, blockID flow.Identifier, guaranteeIDs []flow.Identifier) error } diff --git a/storage/errors.go b/storage/errors.go index a31d1b6f74c..b3d81d9709c 100644 --- a/storage/errors.go +++ b/storage/errors.go @@ -57,13 +57,3 @@ func NewInvalidDKGStateTransitionErrorf(from, to flow.DKGState, msg string, args err: fmt.Errorf(msg, args...), } } - -// SkipAlreadyExistsError returns nil if the provided error is ErrAlreadyExists, otherwise returns the original error. -// It usually means the storage operation to insert a record was skipped because the key of the record already exists. -// CAUTION : it does NOT check the equality of the value of the record. -func SkipAlreadyExistsError(err error) error { - if errors.Is(err, ErrAlreadyExists) { - return nil - } - return err -} diff --git a/storage/events.go b/storage/events.go index 041dcf3c64a..1c3791ee2b8 100644 --- a/storage/events.go +++ b/storage/events.go @@ -39,7 +39,7 @@ type ServiceEvents interface { // BatchStore stores service events keyed by a blockID in provided batch // No errors are expected during normal operation, even if no entries are matched. // it requires the caller to hold [storage.LockInsertServiceEvent] - // It returns [storage.ErrAlreadyExists] if events for the block already exist. + // It returns [storage.ErrAlreadyExists] if any service events for the block already exist. BatchStore(lctx lockctx.Proof, blockID flow.Identifier, events []flow.Event, batch ReaderBatchWriter) error // ByBlockID returns the events for the given block ID diff --git a/storage/operation/events.go b/storage/operation/events.go index ffb6b9fd1c9..0f01dc9d2a0 100644 --- a/storage/operation/events.go +++ b/storage/operation/events.go @@ -23,9 +23,9 @@ func InsertBlockEvents(lctx lockctx.Proof, rw storage.ReaderBatchWriter, blockID } // Check if events for the block already exist - // We can exit early if we found one exist - // Because regardless the list is empty, or has one, or more, - // we don't want to overwrite existing events indexed by the block + // We can exit early if we find one existing event E, assuming that the process which wrote E in the past + // correctly inserted all other events for the block containing E. + // This function only inserts new events; it does not sanity check existing events or ever overwrite events. prefix := MakePrefix(codeEvent, blockID) checkExists := func(key []byte) error { return fmt.Errorf("event with key %x already exists under prefix %x: %w", key, prefix, storage.ErrAlreadyExists) @@ -67,10 +67,10 @@ func InsertBlockServiceEvents(lctx lockctx.Proof, rw storage.ReaderBatchWriter, } // Check if events for the block already exist - // We can exit early if we found one exist - // Because regardless the list is empty, or has one, or more, - // we don't want to overwrite existing events indexed by the block - prefix := MakePrefix(codeEvent, blockID) + // We can exit early if we find one existing event E, assuming that the process which wrote E in the past + // correctly inserted all other events for the block containing E. + // This function only inserts new events; it does not sanity check existing events or ever overwrite events. + prefix := MakePrefix(codeServiceEvent, blockID) checkExists := func(key []byte) error { return fmt.Errorf("event with key %x already exists under prefix %x: %w", key, prefix, storage.ErrAlreadyExists) } diff --git a/storage/operation/transaction_results.go b/storage/operation/transaction_results.go index 83ab1b75a33..bf464c3f8af 100644 --- a/storage/operation/transaction_results.go +++ b/storage/operation/transaction_results.go @@ -18,9 +18,10 @@ func InsertAndIndexTransactionResults(lctx lockctx.Proof, rw storage.ReaderBatch } // Check if transaction results for the block already exist - // We can exit early if we found one result exist - // Because regardless transactionResults is empty, or has one, or more results, - // we don't want to overwrite existing results + // We can exit early if we find one existing transaction result R, assuming that the process which wrote R in the past + // correctly inserted all other results for the block containing R. + // This function only inserts new transaction results; it does not sanity check existing results or ever overwrite results. + prefix := MakePrefix(codeTransactionResult, blockID) checkExists := func(key []byte) error { return fmt.Errorf("transaction results for block %v already exist: %w", blockID, storage.ErrAlreadyExists) diff --git a/storage/store/events.go b/storage/store/events.go index 8cc9ccb207b..8866b3174f0 100644 --- a/storage/store/events.go +++ b/storage/store/events.go @@ -40,7 +40,9 @@ func NewEvents(collector module.CacheMetrics, db storage.DB) *Events { } // BatchStore will store events for the given block ID in a given batch -// it requires the caller to hold [storage.LockInsertEvent] +// It requires the caller to hold [storage.LockInsertEvent] +// Expected error returns: +// - [storage.ErrAlreadyExists] if events for the block already exist. func (e *Events) BatchStore(lctx lockctx.Proof, blockID flow.Identifier, blockEvents []flow.EventsList, batch storage.ReaderBatchWriter) error { // Use the new InsertBlockEvents operation to store all events err := operation.InsertBlockEvents(lctx, batch, blockID, blockEvents) @@ -57,8 +59,8 @@ func (e *Events) BatchStore(lctx lockctx.Proof, blockID flow.Identifier, blockEv combinedEvents := make([]flow.Event, sliceSize) eventIndex := 0 - for _, events := range blockEvents { - for _, event := range events { +for _, txEvents := range blockEvents { +for _, event := range txEvents { combinedEvents[eventIndex] = event eventIndex++ } @@ -170,7 +172,8 @@ func NewServiceEvents(collector module.CacheMetrics, db storage.DB) *ServiceEven } // BatchStore stores service events keyed by a blockID in provided batch -// No errors are expected during normal operation, even if no entries are matched. +// Expected error returns: +// - [storage.ErrAlreadyExists] if events for the block already exist. func (e *ServiceEvents) BatchStore(lctx lockctx.Proof, blockID flow.Identifier, events []flow.Event, rw storage.ReaderBatchWriter) error { // Use the new InsertBlockServiceEvents operation to store all service events err := operation.InsertBlockServiceEvents(lctx, rw, blockID, events) diff --git a/storage/store/results.go b/storage/store/results.go index c9966765224..b15166b3549 100644 --- a/storage/store/results.go +++ b/storage/store/results.go @@ -98,7 +98,7 @@ func (r *ExecutionResults) ByID(resultID flow.Identifier) (*flow.ExecutionResult } // ByBlockID retrieves an execution result by block ID. -// It returns [storage.ErrNotFound] if `blockID` does not refer to a block executed by this node +// It returns [storage.ErrNotFound] if `blockID` refers to a block which is unknown, or for which a trusted (sealed or executed by this node) execution result does not exist. func (r *ExecutionResults) ByBlockID(blockID flow.Identifier) (*flow.ExecutionResult, error) { return r.byBlockID(blockID) } diff --git a/storage/store/seals_test.go b/storage/store/seals_test.go index 852a245b3b0..62300985515 100644 --- a/storage/store/seals_test.go +++ b/storage/store/seals_test.go @@ -46,7 +46,7 @@ func TestSealStoreRetrieve(t *testing.T) { } // TestSealIndexAndRetrieve verifies that: -// - for a block, we can s (aka index) the latest sealed block along this fork. +// - for a block, we can seal (aka index) the latest sealed block along this fork. func TestSealIndexAndRetrieve(t *testing.T) { dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { lockManager := storage.NewTestingLockManager() From 65c3ceb844e05bcdafcf69a29e92470cf0c7cab7 Mon Sep 17 00:00:00 2001 From: "Leo Zhang (zhangchiqing)" Date: Fri, 17 Oct 2025 09:01:59 -0700 Subject: [PATCH 82/87] rename to LockInsertMyReceipt --- engine/execution/state/state.go | 2 +- .../block_iterator/executor/executor_test.go | 4 ++-- storage/locks.go | 8 ++++---- storage/locks_test.go | 4 ++-- storage/operation/Documentation-Guidelines.md | 6 +++--- storage/operation/receipts.go | 4 ++-- storage/operation/receipts_test.go | 2 +- storage/store/my_receipts.go | 4 ++-- storage/store/my_receipts_test.go | 20 +++++++++---------- 9 files changed, 27 insertions(+), 27 deletions(-) diff --git a/engine/execution/state/state.go b/engine/execution/state/state.go index 1652bbcdd05..98dfa10bfed 100644 --- a/engine/execution/state/state.go +++ b/engine/execution/state/state.go @@ -475,7 +475,7 @@ func (s *state) saveExecutionResults( } executionResult := &result.ExecutionReceipt.ExecutionResult - // require [storage.LockInsertOwnReceipt] lock + // require [storage.LockInsertMyReceipt] lock // saving my receipts will also save the execution result err = s.myReceipts.BatchStoreMyReceipt(lctx, result.ExecutionReceipt, batch) if err != nil { diff --git a/module/block_iterator/executor/executor_test.go b/module/block_iterator/executor/executor_test.go index ae72b55ea6b..3274e6658d0 100644 --- a/module/block_iterator/executor/executor_test.go +++ b/module/block_iterator/executor/executor_test.go @@ -47,7 +47,7 @@ func TestExecute(t *testing.T) { // store the execution receipts to be pruned later for _, receipt := range receipts { - require.NoError(t, unittest.WithLock(t, lockManager, storage.LockInsertOwnReceipt, func(lctx lockctx.Context) error { + require.NoError(t, unittest.WithLock(t, lockManager, storage.LockInsertMyReceipt, func(lctx lockctx.Context) error { return pdb.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { return myReceipts.BatchStoreMyReceipt(lctx, receipt, rw) }) @@ -104,7 +104,7 @@ func TestExecuteCanBeResumed(t *testing.T) { // store the execution receipts to be pruned later for _, receipt := range receipts { - require.NoError(t, unittest.WithLock(t, lockManager, storage.LockInsertOwnReceipt, func(lctx lockctx.Context) error { + require.NoError(t, unittest.WithLock(t, lockManager, storage.LockInsertMyReceipt, func(lctx lockctx.Context) error { return pdb.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { return myReceipts.BatchStoreMyReceipt(lctx, receipt, rw) }) diff --git a/storage/locks.go b/storage/locks.go index 91a21a0cf4d..95e38303d97 100644 --- a/storage/locks.go +++ b/storage/locks.go @@ -25,9 +25,9 @@ const ( LockInsertEvent = "lock_insert_event" // LockInsertServiceEvent protects the insertion of service events. LockInsertServiceEvent = "lock_insert_service_event" - // LockInsertOwnReceipt is intended for Execution Nodes to ensure that they never publish different receipts for the same block. + // LockInsertMyReceipt is intended for Execution Nodes to ensure that they never publish different receipts for the same block. // Specifically, with this lock we prevent accidental overwrites of the index `executed block ID` ➜ `Receipt ID`. - LockInsertOwnReceipt = "lock_insert_own_receipt" + LockInsertMyReceipt = "lock_insert_my_receipt" LockIndexExecutionResult = "lock_index_execution_result" LockIndexStateCommitment = "lock_index_state_commitment" LockInsertAndIndexTxResult = "lock_insert_and_index_tx_result" @@ -59,7 +59,7 @@ func Locks() []string { LockInsertOrFinalizeClusterBlock, LockInsertEvent, LockInsertServiceEvent, - LockInsertOwnReceipt, + LockInsertMyReceipt, LockIndexExecutionResult, LockIndexStateCommitment, LockInsertAndIndexTxResult, @@ -95,7 +95,7 @@ var LockGroupExecutionSaveExecutionResult = []string{ LockInsertEvent, LockInsertServiceEvent, LockInsertAndIndexTxResult, - LockInsertOwnReceipt, + LockInsertMyReceipt, LockIndexExecutionResult, LockIndexStateCommitment, } diff --git a/storage/locks_test.go b/storage/locks_test.go index f919e9667dc..12586bbb4a0 100644 --- a/storage/locks_test.go +++ b/storage/locks_test.go @@ -75,10 +75,10 @@ func TestHeldOneLock(t *testing.T) { t.Run("with different lock combinations", func(t *testing.T) { lctx := lockManager.NewContext() defer lctx.Release() - err := lctx.AcquireLock(LockInsertOwnReceipt) + err := lctx.AcquireLock(LockInsertMyReceipt) require.NoError(t, err) - held, msg := HeldOneLock(lctx, LockInsertOwnReceipt, LockInsertCollection) + held, msg := HeldOneLock(lctx, LockInsertMyReceipt, LockInsertCollection) assert.True(t, held) assert.Empty(t, msg) }) diff --git a/storage/operation/Documentation-Guidelines.md b/storage/operation/Documentation-Guidelines.md index 396d0476ba7..b823dfc8dc7 100644 --- a/storage/operation/Documentation-Guidelines.md +++ b/storage/operation/Documentation-Guidelines.md @@ -141,13 +141,13 @@ As an example for functions of type (ii.a), consider `operation.IndexStateCommit // // CAUTION: // - Confirming that no value is already stored and the subsequent write must be atomic to prevent data corruption. -// The caller must acquire the [storage.LockInsertOwnReceipt] and hold it until the database write has been committed. +// The caller must acquire the [storage.LockInsertMyReceipt] and hold it until the database write has been committed. // // Expected error returns during normal operations: // - [storage.ErrDataMismatch] if a *different* state commitment is already indexed for the same block ID func IndexStateCommitment(lctx lockctx.Proof, rw storage.ReaderBatchWriter, blockID flow.Identifier, commit flow.StateCommitment) error { - if !lctx.HoldsLock(storage.LockInsertOwnReceipt) { - return fmt.Errorf("cannot index state commitment without holding lock %s", storage.LockInsertOwnReceipt) + if !lctx.HoldsLock(storage.LockInsertMyReceipt) { + return fmt.Errorf("cannot index state commitment without holding lock %s", storage.LockInsertMyReceipt) } var existingCommit flow.StateCommitment diff --git a/storage/operation/receipts.go b/storage/operation/receipts.go index a5b31a4f8a0..57fe3e7e1cc 100644 --- a/storage/operation/receipts.go +++ b/storage/operation/receipts.go @@ -32,8 +32,8 @@ func RetrieveExecutionReceiptStub(r storage.Reader, receiptID flow.Identifier, m // Error returns: // - [storage.ErrDataMismatch] if a *different* receipt has already been indexed for the same block func IndexOwnExecutionReceipt(lctx lockctx.Proof, rw storage.ReaderBatchWriter, blockID flow.Identifier, receiptID flow.Identifier) error { - if !lctx.HoldsLock(storage.LockInsertOwnReceipt) { - return fmt.Errorf("cannot index own execution receipt without holding lock %s", storage.LockInsertOwnReceipt) + if !lctx.HoldsLock(storage.LockInsertMyReceipt) { + return fmt.Errorf("cannot index own execution receipt without holding lock %s", storage.LockInsertMyReceipt) } key := MakePrefix(codeOwnBlockReceipt, blockID) diff --git a/storage/operation/receipts_test.go b/storage/operation/receipts_test.go index 6b094961c4f..31b06fcc0d9 100644 --- a/storage/operation/receipts_test.go +++ b/storage/operation/receipts_test.go @@ -39,7 +39,7 @@ func TestReceipts_Index(t *testing.T) { expected := receipt.ID() blockID := receipt.ExecutionResult.BlockID - err := storage.WithLock(lockManager, storage.LockInsertOwnReceipt, func(lctx lockctx.Context) error { + err := storage.WithLock(lockManager, storage.LockInsertMyReceipt, func(lctx lockctx.Context) error { return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { return operation.IndexOwnExecutionReceipt(lctx, rw, blockID, expected) }) diff --git a/storage/store/my_receipts.go b/storage/store/my_receipts.go index 352bf7877a2..57480b24583 100644 --- a/storage/store/my_receipts.go +++ b/storage/store/my_receipts.go @@ -62,7 +62,7 @@ func (m *MyExecutionReceipts) myReceipt(blockID flow.Identifier) (*flow.Executio // // If entity fails marshalling, the error is wrapped in a generic error and returned. // If database unexpectedly fails to process the request, the error is wrapped in a generic error and returned. -// It requires [storage.LockInsertOwnReceipt] to be held. +// It requires [storage.LockInsertMyReceipt] to be held. // // Expected error returns during *normal* operations: // - `storage.ErrDataMismatch` if a *different* receipt has already been indexed for the same block @@ -76,7 +76,7 @@ func (m *MyExecutionReceipts) BatchStoreMyReceipt(lctx lockctx.Proof, receipt *f return err } - // require [storage.LockInsertOwnReceipt] to be held + // require [storage.LockInsertMyReceipt] to be held err = operation.IndexOwnExecutionReceipt(lctx, rw, blockID, receiptID) if err != nil { return err diff --git a/storage/store/my_receipts_test.go b/storage/store/my_receipts_test.go index 9760440b6c6..29f00b603fe 100644 --- a/storage/store/my_receipts_test.go +++ b/storage/store/my_receipts_test.go @@ -34,7 +34,7 @@ func TestMyExecutionReceiptsStorage(t *testing.T) { receipt1 := unittest.ReceiptForBlockFixture(block) // STEP 1: Store receipt - err := unittest.WithLock(t, lockManager, storage.LockInsertOwnReceipt, func(lctx lockctx.Context) error { + err := unittest.WithLock(t, lockManager, storage.LockInsertMyReceipt, func(lctx lockctx.Context) error { return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { return myReceipts.BatchStoreMyReceipt(lctx, receipt1, rw) }) @@ -64,14 +64,14 @@ func TestMyExecutionReceiptsStorage(t *testing.T) { block := unittest.BlockFixture() receipt1 := unittest.ReceiptForBlockFixture(block) - err := unittest.WithLock(t, lockManager, storage.LockInsertOwnReceipt, func(lctx lockctx.Context) error { + err := unittest.WithLock(t, lockManager, storage.LockInsertMyReceipt, func(lctx lockctx.Context) error { return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { return myReceipts.BatchStoreMyReceipt(lctx, receipt1, rw) }) }) require.NoError(t, err) - err = unittest.WithLock(t, lockManager, storage.LockInsertOwnReceipt, func(lctx lockctx.Context) error { + err = unittest.WithLock(t, lockManager, storage.LockInsertMyReceipt, func(lctx lockctx.Context) error { return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { return myReceipts.BatchStoreMyReceipt(lctx, receipt1, rw) }) @@ -90,14 +90,14 @@ func TestMyExecutionReceiptsStorage(t *testing.T) { receipt1 := unittest.ReceiptForBlockExecutorFixture(block, executor1) receipt2 := unittest.ReceiptForBlockExecutorFixture(block, executor2) - err := unittest.WithLock(t, lockManager, storage.LockInsertOwnReceipt, func(lctx lockctx.Context) error { + err := unittest.WithLock(t, lockManager, storage.LockInsertMyReceipt, func(lctx lockctx.Context) error { return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { return myReceipts.BatchStoreMyReceipt(lctx, receipt1, rw) }) }) require.NoError(t, err) - err = unittest.WithLock(t, lockManager, storage.LockInsertOwnReceipt, func(lctx lockctx.Context) error { + err = unittest.WithLock(t, lockManager, storage.LockInsertMyReceipt, func(lctx lockctx.Context) error { return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { return myReceipts.BatchStoreMyReceipt(lctx, receipt2, rw) }) @@ -125,7 +125,7 @@ func TestMyExecutionReceiptsStorage(t *testing.T) { go func() { startSignal.Wait() - err := unittest.WithLock(t, lockManager, storage.LockInsertOwnReceipt, func(lctx lockctx.Context) error { + err := unittest.WithLock(t, lockManager, storage.LockInsertMyReceipt, func(lctx lockctx.Context) error { return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { return myReceipts.BatchStoreMyReceipt(lctx, receipt1, rw) }) @@ -136,7 +136,7 @@ func TestMyExecutionReceiptsStorage(t *testing.T) { go func() { startSignal.Wait() - err := unittest.WithLock(t, lockManager, storage.LockInsertOwnReceipt, func(lctx lockctx.Context) error { + err := unittest.WithLock(t, lockManager, storage.LockInsertMyReceipt, func(lctx lockctx.Context) error { return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { return myReceipts.BatchStoreMyReceipt(lctx, receipt2, rw) }) @@ -177,7 +177,7 @@ func TestMyExecutionReceiptsStorage(t *testing.T) { receipt := unittest.ReceiptForBlockExecutorFixture(block, executor) startSignal.Wait() - err := unittest.WithLock(t, lockManager, storage.LockInsertOwnReceipt, func(lctx lockctx.Context) error { + err := unittest.WithLock(t, lockManager, storage.LockInsertMyReceipt, func(lctx lockctx.Context) error { return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { return myReceipts.BatchStoreMyReceipt(lctx, receipt, rw) }) @@ -204,7 +204,7 @@ func TestMyExecutionReceiptsStorage(t *testing.T) { block := unittest.BlockFixture() receipt1 := unittest.ReceiptForBlockFixture(block) - err := unittest.WithLock(t, lockManager, storage.LockInsertOwnReceipt, func(lctx lockctx.Context) error { + err := unittest.WithLock(t, lockManager, storage.LockInsertMyReceipt, func(lctx lockctx.Context) error { return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { return myReceipts.BatchStoreMyReceipt(lctx, receipt1, rw) }) @@ -248,7 +248,7 @@ func TestMyExecutionReceiptsStorageMultipleStoreInSameBatch(t *testing.T) { receipt1 := unittest.ReceiptForBlockFixture(block) receipt2 := unittest.ReceiptForBlockFixture(block) - err := unittest.WithLock(t, lockManager, storage.LockInsertOwnReceipt, func(lctx lockctx.Context) error { + err := unittest.WithLock(t, lockManager, storage.LockInsertMyReceipt, func(lctx lockctx.Context) error { return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { err := myReceipts.BatchStoreMyReceipt(lctx, receipt1, rw) if err != nil { From 8ece7012cf7490ba56b9061a18e3fd82868b3ff3 Mon Sep 17 00:00:00 2001 From: "Leo Zhang (zhangchiqing)" Date: Fri, 17 Oct 2025 09:02:56 -0700 Subject: [PATCH 83/87] rename to LockIndexBlockByPayloadGuarantees --- engine/access/ingestion/engine.go | 2 +- engine/access/ingestion2/finalized_block_processor.go | 2 +- storage/blocks.go | 2 +- storage/locks.go | 8 ++++---- storage/operation/headers.go | 6 +++--- storage/operation/headers_test.go | 2 +- 6 files changed, 11 insertions(+), 11 deletions(-) diff --git a/engine/access/ingestion/engine.go b/engine/access/ingestion/engine.go index ca4de4d4850..583e6e788ca 100644 --- a/engine/access/ingestion/engine.go +++ b/engine/access/ingestion/engine.go @@ -381,7 +381,7 @@ func (e *Engine) processFinalizedBlock(block *flow.Block) error { // index the block storage with each of the collection guarantee err := storage.WithLocks(e.lockManager, storage.LockGroupAccessFinalizingBlock, func(lctx lockctx.Context) error { return e.db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { - // requires [storage.LockIndexCollectionsByBlock] lock + // requires [storage.LockIndexBlockByPayloadGuarantees] lock err := e.blocks.BatchIndexBlockContainingCollectionGuarantees(lctx, rw, block.ID(), flow.GetIDs(block.Payload.Guarantees)) if err != nil { return fmt.Errorf("could not index block for collections: %w", err) diff --git a/engine/access/ingestion2/finalized_block_processor.go b/engine/access/ingestion2/finalized_block_processor.go index b139dd05035..fef36b44077 100644 --- a/engine/access/ingestion2/finalized_block_processor.go +++ b/engine/access/ingestion2/finalized_block_processor.go @@ -156,7 +156,7 @@ func (p *FinalizedBlockProcessor) indexFinalizedBlock(block *flow.Block) error { err := storage.WithLocks(p.lockManager, storage.LockGroupAccessFinalizingBlock, func(lctx lockctx.Context) error { return p.db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { - // require storage.LockIndexCollectionsByBlock + // require storage.LockIndexBlockByPayloadGuarantees err := p.blocks.BatchIndexBlockContainingCollectionGuarantees(lctx, rw, block.ID(), flow.GetIDs(block.Payload.Guarantees)) if err != nil { return fmt.Errorf("could not index block for collections: %w", err) diff --git a/storage/blocks.go b/storage/blocks.go index 52fd67b1e85..bc852c61ee6 100644 --- a/storage/blocks.go +++ b/storage/blocks.go @@ -88,7 +88,7 @@ type Blocks interface { ByCollectionID(collID flow.Identifier) (*flow.Block, error) // BatchIndexBlockContainingCollectionGuarantees produces mappings from the IDs of [flow.CollectionGuarantee]s to the block ID containing these guarantees. - // The caller must acquire a storage.LockIndexCollectionsByBlock lock. + // The caller must acquire a storage.LockIndexBlockByPayloadGuarantees lock. // // CAUTION: a collection can be included in multiple *unfinalized* blocks. However, the implementation // assumes a one-to-one map from collection ID to a *single* block ID. This holds for FINALIZED BLOCKS ONLY diff --git a/storage/locks.go b/storage/locks.go index 95e38303d97..87f8228e9c0 100644 --- a/storage/locks.go +++ b/storage/locks.go @@ -34,8 +34,8 @@ const ( // LockInsertCollection protects the insertion of collections. LockInsertCollection = "lock_insert_collection" // LockInsertInstanceParams protects data that is *exclusively* written during bootstrapping. - LockInsertInstanceParams = "lock_insert_instance_params" - LockIndexCollectionsByBlock = "lock_index_collections_by_block" + LockInsertInstanceParams = "lock_insert_instance_params" + LockIndexBlockByPayloadGuarantees = "lock_index_block_by_payload_guarantees" // LockIndexChunkDataPackByChunkID protects the insertion of chunk data packs LockIndexChunkDataPackByChunkID = "lock_index_chunk_data_pack_by_chunk_id" // LockInsertTransactionResultErrMessage protects the insertion of transaction result error messages @@ -66,7 +66,7 @@ func Locks() []string { LockInsertCollection, LockInsertLightTransactionResult, LockInsertInstanceParams, - LockIndexCollectionsByBlock, + LockIndexBlockByPayloadGuarantees, LockIndexChunkDataPackByChunkID, LockInsertTransactionResultErrMessage, LockInsertLightTransactionResult, @@ -101,7 +101,7 @@ var LockGroupExecutionSaveExecutionResult = []string{ } var LockGroupAccessFinalizingBlock = []string{ - LockIndexCollectionsByBlock, + LockIndexBlockByPayloadGuarantees, LockIndexExecutionResult, } diff --git a/storage/operation/headers.go b/storage/operation/headers.go index d2b8fffe7f0..5254c50dd38 100644 --- a/storage/operation/headers.go +++ b/storage/operation/headers.go @@ -125,7 +125,7 @@ func BlockExists(r storage.Reader, blockID flow.Identifier) (bool, error) { } // BatchIndexBlockContainingCollectionGuarantees produces mappings from the IDs of [flow.CollectionGuarantee]s to the block ID containing these guarantees. -// The caller must acquire a storage.LockIndexCollectionsByBlock lock. +// The caller must acquire a storage.LockIndexBlockByPayloadGuarantees lock. // // CAUTION: a collection can be included in multiple *unfinalized* blocks. However, the implementation // assumes a one-to-one map from collection ID to a *single* block ID. This holds for FINALIZED BLOCKS ONLY @@ -136,8 +136,8 @@ func BlockExists(r storage.Reader, blockID flow.Identifier) (bool, error) { // Expected errors during normal operations: // - [storage.ErrAlreadyExists] if any collection guarantee is already indexed func BatchIndexBlockContainingCollectionGuarantees(lctx lockctx.Proof, rw storage.ReaderBatchWriter, blockID flow.Identifier, collIDs []flow.Identifier) error { - if !lctx.HoldsLock(storage.LockIndexCollectionsByBlock) { - return fmt.Errorf("BatchIndexBlockContainingCollectionGuarantees requires %v", storage.LockIndexCollectionsByBlock) + if !lctx.HoldsLock(storage.LockIndexBlockByPayloadGuarantees) { + return fmt.Errorf("BatchIndexBlockContainingCollectionGuarantees requires %v", storage.LockIndexBlockByPayloadGuarantees) } // Check if any keys already exist diff --git a/storage/operation/headers_test.go b/storage/operation/headers_test.go index 61e9dad90b4..763675e5fab 100644 --- a/storage/operation/headers_test.go +++ b/storage/operation/headers_test.go @@ -55,7 +55,7 @@ func TestHeaderIDIndexByCollectionID(t *testing.T) { lockManager := storage.NewTestingLockManager() - err := unittest.WithLock(t, lockManager, storage.LockIndexCollectionsByBlock, func(lctx lockctx.Context) error { + err := unittest.WithLock(t, lockManager, storage.LockIndexBlockByPayloadGuarantees, func(lctx lockctx.Context) error { return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { return operation.BatchIndexBlockContainingCollectionGuarantees(lctx, rw, headerID, []flow.Identifier{collectionGuaranteeID}) }) From 55e86da90050f598c26b76736f1e0b46c4356234 Mon Sep 17 00:00:00 2001 From: "Leo Zhang (zhangchiqing)" Date: Fri, 17 Oct 2025 09:03:45 -0700 Subject: [PATCH 84/87] rename variables in BatchIndexBlockContainingCollectionGuarantees --- storage/mock/blocks.go | 8 ++++---- storage/operation/headers.go | 14 +++++++------- storage/store/blocks.go | 4 ++-- 3 files changed, 13 insertions(+), 13 deletions(-) diff --git a/storage/mock/blocks.go b/storage/mock/blocks.go index b6807aaa983..9beee2b80d7 100644 --- a/storage/mock/blocks.go +++ b/storage/mock/blocks.go @@ -16,9 +16,9 @@ type Blocks struct { mock.Mock } -// BatchIndexBlockContainingCollectionGuarantees provides a mock function with given fields: lctx, rw, blockID, collIDs -func (_m *Blocks) BatchIndexBlockContainingCollectionGuarantees(lctx lockctx.Proof, rw storage.ReaderBatchWriter, blockID flow.Identifier, collIDs []flow.Identifier) error { - ret := _m.Called(lctx, rw, blockID, collIDs) +// BatchIndexBlockContainingCollectionGuarantees provides a mock function with given fields: lctx, rw, blockID, guaranteeIDs +func (_m *Blocks) BatchIndexBlockContainingCollectionGuarantees(lctx lockctx.Proof, rw storage.ReaderBatchWriter, blockID flow.Identifier, guaranteeIDs []flow.Identifier) error { + ret := _m.Called(lctx, rw, blockID, guaranteeIDs) if len(ret) == 0 { panic("no return value specified for BatchIndexBlockContainingCollectionGuarantees") @@ -26,7 +26,7 @@ func (_m *Blocks) BatchIndexBlockContainingCollectionGuarantees(lctx lockctx.Pro var r0 error if rf, ok := ret.Get(0).(func(lockctx.Proof, storage.ReaderBatchWriter, flow.Identifier, []flow.Identifier) error); ok { - r0 = rf(lctx, rw, blockID, collIDs) + r0 = rf(lctx, rw, blockID, guaranteeIDs) } else { r0 = ret.Error(0) } diff --git a/storage/operation/headers.go b/storage/operation/headers.go index 5254c50dd38..808e7b88406 100644 --- a/storage/operation/headers.go +++ b/storage/operation/headers.go @@ -135,29 +135,29 @@ func BlockExists(r storage.Reader, blockID flow.Identifier) (bool, error) { // // Expected errors during normal operations: // - [storage.ErrAlreadyExists] if any collection guarantee is already indexed -func BatchIndexBlockContainingCollectionGuarantees(lctx lockctx.Proof, rw storage.ReaderBatchWriter, blockID flow.Identifier, collIDs []flow.Identifier) error { +func BatchIndexBlockContainingCollectionGuarantees(lctx lockctx.Proof, rw storage.ReaderBatchWriter, blockID flow.Identifier, guaranteeIDs []flow.Identifier) error { if !lctx.HoldsLock(storage.LockIndexBlockByPayloadGuarantees) { return fmt.Errorf("BatchIndexBlockContainingCollectionGuarantees requires %v", storage.LockIndexBlockByPayloadGuarantees) } // Check if any keys already exist - for _, collID := range collIDs { - key := MakePrefix(codeCollectionBlock, collID) + for _, guaranteeID := range guaranteeIDs { + key := MakePrefix(codeCollectionBlock, guaranteeID) exists, err := KeyExists(rw.GlobalReader(), key) if err != nil { return fmt.Errorf("could not check if collection guarantee is already indexed: %w", err) } if exists { - return fmt.Errorf("collection guarantee (%x) is already indexed: %w", collID, storage.ErrAlreadyExists) + return fmt.Errorf("collection guarantee (%x) is already indexed: %w", guaranteeID, storage.ErrAlreadyExists) } } // Index all collection guarantees - for _, collID := range collIDs { - key := MakePrefix(codeCollectionBlock, collID) + for _, guaranteeID := range guaranteeIDs { + key := MakePrefix(codeCollectionBlock, guaranteeID) err := UpsertByKey(rw.Writer(), key, blockID) if err != nil { - return fmt.Errorf("could not index collection guarantee (%x): %w", collID, err) + return fmt.Errorf("could not index collection guarantee (%x): %w", guaranteeID, err) } } diff --git a/storage/store/blocks.go b/storage/store/blocks.go index f501380c199..b2b69868460 100644 --- a/storage/store/blocks.go +++ b/storage/store/blocks.go @@ -219,6 +219,6 @@ func (b *Blocks) ByCollectionID(collID flow.Identifier) (*flow.Block, error) { // The caller must acquire a storage.LockIndexCollectionByBlock lock. // Error returns: // - storage.ErrAlreadyExists if any collection ID has already been indexed -func (b *Blocks) BatchIndexBlockContainingCollectionGuarantees(lctx lockctx.Proof, rw storage.ReaderBatchWriter, blockID flow.Identifier, collIDs []flow.Identifier) error { - return operation.BatchIndexBlockContainingCollectionGuarantees(lctx, rw, blockID, collIDs) +func (b *Blocks) BatchIndexBlockContainingCollectionGuarantees(lctx lockctx.Proof, rw storage.ReaderBatchWriter, blockID flow.Identifier, guaranteeIDs []flow.Identifier) error { + return operation.BatchIndexBlockContainingCollectionGuarantees(lctx, rw, blockID, guaranteeIDs) } From 5bd55818b35b2f01100d714fb5ff699e982c3c92 Mon Sep 17 00:00:00 2001 From: "Leo Zhang (zhangchiqing)" Date: Fri, 17 Oct 2025 09:10:19 -0700 Subject: [PATCH 85/87] fix lint --- state/cluster/badger/state.go | 2 +- storage/operation/events.go | 4 ++-- storage/operation/transaction_results.go | 2 +- storage/store/events.go | 4 ++-- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/state/cluster/badger/state.go b/state/cluster/badger/state.go index fc2234ec668..e8cba051aa6 100644 --- a/state/cluster/badger/state.go +++ b/state/cluster/badger/state.go @@ -95,7 +95,7 @@ func Bootstrap(db storage.DB, lockManager lockctx.Manager, stateRoot *StateRoot) return nil }) }) -if err != nil { + if err != nil { return nil, fmt.Errorf("bootstrapping failed: %w", err) } return state, nil diff --git a/storage/operation/events.go b/storage/operation/events.go index 0f01dc9d2a0..b809f2bad10 100644 --- a/storage/operation/events.go +++ b/storage/operation/events.go @@ -23,7 +23,7 @@ func InsertBlockEvents(lctx lockctx.Proof, rw storage.ReaderBatchWriter, blockID } // Check if events for the block already exist - // We can exit early if we find one existing event E, assuming that the process which wrote E in the past + // We can exit early if we find one existing event E, assuming that the process which wrote E in the past // correctly inserted all other events for the block containing E. // This function only inserts new events; it does not sanity check existing events or ever overwrite events. prefix := MakePrefix(codeEvent, blockID) @@ -67,7 +67,7 @@ func InsertBlockServiceEvents(lctx lockctx.Proof, rw storage.ReaderBatchWriter, } // Check if events for the block already exist - // We can exit early if we find one existing event E, assuming that the process which wrote E in the past + // We can exit early if we find one existing event E, assuming that the process which wrote E in the past // correctly inserted all other events for the block containing E. // This function only inserts new events; it does not sanity check existing events or ever overwrite events. prefix := MakePrefix(codeServiceEvent, blockID) diff --git a/storage/operation/transaction_results.go b/storage/operation/transaction_results.go index bf464c3f8af..cc76e57c25f 100644 --- a/storage/operation/transaction_results.go +++ b/storage/operation/transaction_results.go @@ -18,7 +18,7 @@ func InsertAndIndexTransactionResults(lctx lockctx.Proof, rw storage.ReaderBatch } // Check if transaction results for the block already exist - // We can exit early if we find one existing transaction result R, assuming that the process which wrote R in the past + // We can exit early if we find one existing transaction result R, assuming that the process which wrote R in the past // correctly inserted all other results for the block containing R. // This function only inserts new transaction results; it does not sanity check existing results or ever overwrite results. diff --git a/storage/store/events.go b/storage/store/events.go index 8866b3174f0..bc2d432de27 100644 --- a/storage/store/events.go +++ b/storage/store/events.go @@ -59,8 +59,8 @@ func (e *Events) BatchStore(lctx lockctx.Proof, blockID flow.Identifier, blockEv combinedEvents := make([]flow.Event, sliceSize) eventIndex := 0 -for _, txEvents := range blockEvents { -for _, event := range txEvents { + for _, txEvents := range blockEvents { + for _, event := range txEvents { combinedEvents[eventIndex] = event eventIndex++ } From 50b4ba44c396fc4823f018b640b7ea21d9b304be Mon Sep 17 00:00:00 2001 From: "Leo Zhang (zhangchiqing)" Date: Fri, 17 Oct 2025 09:24:24 -0700 Subject: [PATCH 86/87] rename results operations --- engine/execution/state/bootstrap/bootstrap.go | 2 +- state/protocol/badger/state.go | 4 ++-- storage/operation/receipts.go | 4 ++-- storage/operation/receipts_test.go | 2 +- storage/operation/results.go | 4 ++-- storage/store/my_receipts.go | 2 +- storage/store/results.go | 2 +- 7 files changed, 10 insertions(+), 10 deletions(-) diff --git a/engine/execution/state/bootstrap/bootstrap.go b/engine/execution/state/bootstrap/bootstrap.go index 098726aee37..dc4267bc3f9 100644 --- a/engine/execution/state/bootstrap/bootstrap.go +++ b/engine/execution/state/bootstrap/bootstrap.go @@ -107,7 +107,7 @@ func (b *Bootstrapper) BootstrapExecutionDatabase( return fmt.Errorf("could not index initial genesis execution block: %w", err) } - err = operation.IndexOwnOrSealedExecutionResult(lctx, rw, rootSeal.BlockID, rootSeal.ResultID) + err = operation.IndexTrustedExecutionResult(lctx, rw, rootSeal.BlockID, rootSeal.ResultID) if err != nil { return fmt.Errorf("could not index result for root result: %w", err) } diff --git a/state/protocol/badger/state.go b/state/protocol/badger/state.go index 0cdfcbc10e3..316fe9bcefd 100644 --- a/state/protocol/badger/state.go +++ b/state/protocol/badger/state.go @@ -298,7 +298,7 @@ func bootstrapSealingSegment( if err != nil { return fmt.Errorf("could not insert execution result: %w", err) } - err = operation.IndexOwnOrSealedExecutionResult(lctx, rw, result.BlockID, result.ID()) + err = operation.IndexTrustedExecutionResult(lctx, rw, result.BlockID, result.ID()) if err != nil { return fmt.Errorf("could not index execution result: %w", err) } @@ -457,7 +457,7 @@ func bootstrapSealingSegment( // If the sealed root block is different from the finalized root block, then it means the node dynamically // bootstrapped. In that case, we index the result of the latest sealed result, so that the EN is able // to confirm that it is loading the correct state to execute the next block. - err = operation.IndexOwnOrSealedExecutionResult(lctx, rw, rootSeal.BlockID, rootSeal.ResultID) + err = operation.IndexTrustedExecutionResult(lctx, rw, rootSeal.BlockID, rootSeal.ResultID) if err != nil { return fmt.Errorf("could not index root result: %w", err) } diff --git a/storage/operation/receipts.go b/storage/operation/receipts.go index 57fe3e7e1cc..3b38b131c82 100644 --- a/storage/operation/receipts.go +++ b/storage/operation/receipts.go @@ -27,11 +27,11 @@ func RetrieveExecutionReceiptStub(r storage.Reader, receiptID flow.Identifier, m return RetrieveByKey(r, MakePrefix(codeExecutionReceiptMeta, receiptID), meta) } -// IndexOwnExecutionReceipt indexes the Execution Node's OWN execution receipt by the executed block ID. +// IndexMyExecutionReceipt indexes the Execution Node's OWN execution receipt by the executed block ID. // // Error returns: // - [storage.ErrDataMismatch] if a *different* receipt has already been indexed for the same block -func IndexOwnExecutionReceipt(lctx lockctx.Proof, rw storage.ReaderBatchWriter, blockID flow.Identifier, receiptID flow.Identifier) error { +func IndexMyExecutionReceipt(lctx lockctx.Proof, rw storage.ReaderBatchWriter, blockID flow.Identifier, receiptID flow.Identifier) error { if !lctx.HoldsLock(storage.LockInsertMyReceipt) { return fmt.Errorf("cannot index own execution receipt without holding lock %s", storage.LockInsertMyReceipt) } diff --git a/storage/operation/receipts_test.go b/storage/operation/receipts_test.go index 31b06fcc0d9..75ea0aa4ae7 100644 --- a/storage/operation/receipts_test.go +++ b/storage/operation/receipts_test.go @@ -41,7 +41,7 @@ func TestReceipts_Index(t *testing.T) { err := storage.WithLock(lockManager, storage.LockInsertMyReceipt, func(lctx lockctx.Context) error { return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { - return operation.IndexOwnExecutionReceipt(lctx, rw, blockID, expected) + return operation.IndexMyExecutionReceipt(lctx, rw, blockID, expected) }) }) require.Nil(t, err) diff --git a/storage/operation/results.go b/storage/operation/results.go index cc3d49af025..fd7b697359b 100644 --- a/storage/operation/results.go +++ b/storage/operation/results.go @@ -27,7 +27,7 @@ func RetrieveExecutionResult(r storage.Reader, resultID flow.Identifier, result return RetrieveByKey(r, MakePrefix(codeExecutionResult, resultID), result) } -// IndexOwnOrSealedExecutionResult indexes the result of the given block. +// IndexTrustedExecutionResult indexes the result of the given block. // It is used by the following scenarios: // 1. Execution Node indexes its own executed block's result when finish executing a block // 2. Execution Node indexes the sealed root block's result during bootstrapping @@ -36,7 +36,7 @@ func RetrieveExecutionResult(r storage.Reader, resultID flow.Identifier, result // // It returns [storage.ErrDataMismatch] if there is already an indexed result for the given blockID, // but it is different from the given resultID. -func IndexOwnOrSealedExecutionResult(lctx lockctx.Proof, rw storage.ReaderBatchWriter, blockID flow.Identifier, resultID flow.Identifier) error { +func IndexTrustedExecutionResult(lctx lockctx.Proof, rw storage.ReaderBatchWriter, blockID flow.Identifier, resultID flow.Identifier) error { // during bootstrapping, we index the sealed root block or the spork root block, which is not // produced by the node itself, but we still need to index its execution result to be able to // execute next block diff --git a/storage/store/my_receipts.go b/storage/store/my_receipts.go index 57480b24583..82ef0ec37e7 100644 --- a/storage/store/my_receipts.go +++ b/storage/store/my_receipts.go @@ -77,7 +77,7 @@ func (m *MyExecutionReceipts) BatchStoreMyReceipt(lctx lockctx.Proof, receipt *f } // require [storage.LockInsertMyReceipt] to be held - err = operation.IndexOwnExecutionReceipt(lctx, rw, blockID, receiptID) + err = operation.IndexMyExecutionReceipt(lctx, rw, blockID, receiptID) if err != nil { return err } diff --git a/storage/store/results.go b/storage/store/results.go index b15166b3549..cca00e3e702 100644 --- a/storage/store/results.go +++ b/storage/store/results.go @@ -50,7 +50,7 @@ func NewExecutionResults(collector module.CacheMetrics, db storage.DB) *Executio // this API is only used to fetch result for last executed block, so in happy case, it only need to be 1, // we use 100 here to be more resilient to forks withLimit[flow.Identifier, flow.Identifier](100), - withStoreWithLock(operation.IndexOwnOrSealedExecutionResult), + withStoreWithLock(operation.IndexTrustedExecutionResult), withRetrieve(retrieveByBlockID), ), } From 8d8d964852ac6bd18dc419affe8d4f0c55cced3b Mon Sep 17 00:00:00 2001 From: "Leo Zhang (zhangchiqing)" Date: Fri, 17 Oct 2025 09:24:48 -0700 Subject: [PATCH 87/87] add comments --- storage/store/results.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/storage/store/results.go b/storage/store/results.go index cca00e3e702..00a6e475c13 100644 --- a/storage/store/results.go +++ b/storage/store/results.go @@ -16,7 +16,7 @@ import ( type ExecutionResults struct { db storage.DB cache *Cache[flow.Identifier, *flow.ExecutionResult] - indexCache *Cache[flow.Identifier, flow.Identifier] + indexCache *Cache[flow.Identifier, flow.Identifier] // blockID -> resultID } var _ storage.ExecutionResults = (*ExecutionResults)(nil)