diff --git a/cli/operator/node.go b/cli/operator/node.go index ebd6b8ac3c..dfe9e77793 100644 --- a/cli/operator/node.go +++ b/cli/operator/node.go @@ -1066,7 +1066,7 @@ func syncContractEvents( ctx context.Context, logger *zap.Logger, executionClient executionclient.Provider, - validatorCtrl validator.Controller, + validatorCtrl *validator.Controller, networkConfig *networkconfig.Network, nodeStorage operatorstorage.Storage, operatorDataStore operatordatastore.OperatorDataStore, diff --git a/eth/ethtest/common_test.go b/eth/ethtest/common_test.go index 437e69581e..bdd28611e4 100644 --- a/eth/ethtest/common_test.go +++ b/eth/ethtest/common_test.go @@ -15,12 +15,12 @@ import ( "go.uber.org/mock/gomock" "go.uber.org/zap/zaptest" + "github.com/ssvlabs/ssv/eth/eventhandler/mocks" "github.com/ssvlabs/ssv/eth/eventsyncer" "github.com/ssvlabs/ssv/eth/executionclient" "github.com/ssvlabs/ssv/eth/simulator" "github.com/ssvlabs/ssv/eth/simulator/simcontract" "github.com/ssvlabs/ssv/operator/storage" - "github.com/ssvlabs/ssv/operator/validator/mocks" ) type CommonTestInput struct { @@ -62,7 +62,7 @@ type TestEnv struct { execClient *executionclient.ExecutionClient rpcServer *rpc.Server httpSrv *httptest.Server - validatorCtrl *mocks.MockController + taskExecutor *mocks.MockTaskExecutor mockCtrl *gomock.Controller followDistance *uint64 } @@ -116,15 +116,15 @@ func (e *TestEnv) setup( } } - eh, validatorCtrl, mockCtrl, nodeStorage, err := setupEventHandler(t, ctx, logger, ops[0], &testAddrAlice, true) + eh, taskExecutor, mockCtrl, nodeStorage, err := setupEventHandler(t, ctx, logger, ops[0], &testAddrAlice, true) e.mockCtrl = mockCtrl e.nodeStorage = nodeStorage if err != nil { return err } - if validatorCtrl == nil { - return fmt.Errorf("validatorCtrl is empty") + if taskExecutor == nil { + return fmt.Errorf("taskExecutor is empty") } // Adding testAddresses to the genesis block mostly to specify some balances for them @@ -196,7 +196,7 @@ func (e *TestEnv) setup( eventsyncer.WithLogger(logger), ) - e.validatorCtrl = validatorCtrl + e.taskExecutor = taskExecutor e.sim = sim e.auth = auth e.validators = validators diff --git a/eth/ethtest/eth_e2e_test.go b/eth/ethtest/eth_e2e_test.go index cd360e5716..26a0d79556 100644 --- a/eth/ethtest/eth_e2e_test.go +++ b/eth/ethtest/eth_e2e_test.go @@ -60,7 +60,7 @@ func TestEthExecLayer(t *testing.T) { validators = testEnv.validators eventSyncer = testEnv.eventSyncer shares = testEnv.shares - validatorCtrl = testEnv.validatorCtrl + taskExecutor = testEnv.taskExecutor ) blockNum := uint64(0x1) @@ -171,7 +171,7 @@ func TestEthExecLayer(t *testing.T) { // Step 2: Exit validator { - validatorCtrl.EXPECT().ExitValidator(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes() + taskExecutor.EXPECT().ExitValidator(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes() shares := nodeStorage.Shares().List(nil) require.Equal(t, 7, len(shares)) @@ -196,7 +196,7 @@ func TestEthExecLayer(t *testing.T) { // Step 3: Remove validator { - validatorCtrl.EXPECT().StopValidator(gomock.Any()).AnyTimes() + taskExecutor.EXPECT().StopValidator(gomock.Any()).AnyTimes() shares := nodeStorage.Shares().List(nil) require.Equal(t, 7, len(shares)) @@ -228,7 +228,7 @@ func TestEthExecLayer(t *testing.T) { // Step 4 Liquidate Cluster { - validatorCtrl.EXPECT().LiquidateCluster(gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes() + taskExecutor.EXPECT().LiquidateCluster(gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes() clusterLiquidate := NewTestClusterLiquidatedInput(common) clusterLiquidate.prepare([]*ClusterLiquidatedEventInput{ @@ -258,7 +258,7 @@ func TestEthExecLayer(t *testing.T) { // Step 5 Reactivate Cluster { - validatorCtrl.EXPECT().ReactivateCluster(gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes() + taskExecutor.EXPECT().ReactivateCluster(gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes() clusterID := ssvtypes.ComputeClusterIDHash(testAddrAlice, []uint64{1, 2, 3, 4}) @@ -310,7 +310,7 @@ func TestEthExecLayer(t *testing.T) { // Step 7 Update Fee Recipient { - validatorCtrl.EXPECT().UpdateFeeRecipient(gomock.Any(), gomock.Any(), gomock.Any()).Times(1) + taskExecutor.EXPECT().UpdateFeeRecipient(gomock.Any(), gomock.Any(), gomock.Any()).Times(1) setFeeRecipient := NewSetFeeRecipientAddressInput(common) setFeeRecipient.prepare([]*SetFeeRecipientAddressEventInput{ diff --git a/eth/ethtest/utils_test.go b/eth/ethtest/utils_test.go index f7d9b3c2b0..bf45dd9b45 100644 --- a/eth/ethtest/utils_test.go +++ b/eth/ethtest/utils_test.go @@ -21,6 +21,7 @@ import ( "github.com/ssvlabs/ssv/doppelganger" "github.com/ssvlabs/ssv/eth/contract" "github.com/ssvlabs/ssv/eth/eventhandler" + "github.com/ssvlabs/ssv/eth/eventhandler/mocks" "github.com/ssvlabs/ssv/eth/eventparser" "github.com/ssvlabs/ssv/eth/simulator" "github.com/ssvlabs/ssv/exporter" @@ -29,7 +30,6 @@ import ( operatordatastore "github.com/ssvlabs/ssv/operator/datastore" operatorstorage "github.com/ssvlabs/ssv/operator/storage" "github.com/ssvlabs/ssv/operator/validator" - "github.com/ssvlabs/ssv/operator/validator/mocks" registrystorage "github.com/ssvlabs/ssv/registry/storage" kv "github.com/ssvlabs/ssv/storage/badger" "github.com/ssvlabs/ssv/storage/basedb" @@ -152,7 +152,7 @@ func setupEventHandler( operator *testOperator, ownerAddress *ethcommon.Address, useMockCtrl bool, -) (*eventhandler.EventHandler, *mocks.MockController, *gomock.Controller, operatorstorage.Storage, error) { +) (*eventhandler.EventHandler, *mocks.MockTaskExecutor, *gomock.Controller, operatorstorage.Storage, error) { db, err := kv.NewInMemory(logger, basedb.Options{ Ctx: ctx, }) @@ -180,14 +180,14 @@ func setupEventHandler( dgHandler := doppelganger.NoOpHandler{} if useMockCtrl { - validatorCtrl := mocks.NewMockController(ctrl) + tExecutor := mocks.NewMockTaskExecutor(ctrl) parser := eventparser.New(contractFilterer) eh, err := eventhandler.New( nodeStorage, parser, - validatorCtrl, + tExecutor, testNetworkConfig, operatorDataStore, operator.privateKey, @@ -201,7 +201,7 @@ func setupEventHandler( return nil, nil, nil, nil, err } - return eh, validatorCtrl, ctrl, nodeStorage, nil + return eh, tExecutor, ctrl, nodeStorage, nil } validatorCtrl := validator.NewController(logger, validator.ControllerOptions{ diff --git a/eth/eventhandler/event_handler.go b/eth/eventhandler/event_handler.go index 6ec54e94c6..89b036ffa5 100644 --- a/eth/eventhandler/event_handler.go +++ b/eth/eventhandler/event_handler.go @@ -32,7 +32,7 @@ import ( "github.com/ssvlabs/ssv/storage/basedb" ) -// Event names +// Ethereum SSV network contract event names. const ( OperatorAdded = "OperatorAdded" OperatorRemoved = "OperatorRemoved" diff --git a/eth/eventhandler/event_handler_test.go b/eth/eventhandler/event_handler_test.go index 25413adc54..0aa3806728 100644 --- a/eth/eventhandler/event_handler_test.go +++ b/eth/eventhandler/event_handler_test.go @@ -32,6 +32,7 @@ import ( "github.com/ssvlabs/ssv/beacon/goclient" "github.com/ssvlabs/ssv/doppelganger" "github.com/ssvlabs/ssv/eth/contract" + "github.com/ssvlabs/ssv/eth/eventhandler/mocks" "github.com/ssvlabs/ssv/eth/eventparser" "github.com/ssvlabs/ssv/eth/executionclient" "github.com/ssvlabs/ssv/eth/simulator" @@ -42,7 +43,6 @@ import ( operatordatastore "github.com/ssvlabs/ssv/operator/datastore" operatorstorage "github.com/ssvlabs/ssv/operator/storage" "github.com/ssvlabs/ssv/operator/validator" - "github.com/ssvlabs/ssv/operator/validator/mocks" "github.com/ssvlabs/ssv/operator/validators" registrystorage "github.com/ssvlabs/ssv/registry/storage" kv "github.com/ssvlabs/ssv/storage/badger" @@ -1359,7 +1359,7 @@ func setupEventHandler( network *networkconfig.Network, operator *testOperator, useMockCtrl bool, -) (*EventHandler, *mocks.MockController, error) { +) (*EventHandler, *mocks.MockTaskExecutor, error) { db, err := kv.NewInMemory(logger, basedb.Options{ Ctx: ctx, }) @@ -1381,7 +1381,7 @@ func setupEventHandler( ctrl := gomock.NewController(t) defer ctrl.Finish() - validatorCtrl := mocks.NewMockController(ctrl) + tExecutor := mocks.NewMockTaskExecutor(ctrl) contractFilterer, err := contract.NewContractFilterer(ethcommon.Address{}, nil) require.NoError(t, err) @@ -1391,7 +1391,7 @@ func setupEventHandler( eh, err := New( nodeStorage, parser, - validatorCtrl, + tExecutor, network, operatorDataStore, operator.privateKey, @@ -1404,7 +1404,7 @@ func setupEventHandler( return nil, nil, err } - return eh, validatorCtrl, nil + return eh, tExecutor, nil } validatorCtrl := validator.NewController(logger, validator.ControllerOptions{ diff --git a/eth/eventhandler/mockgen.go b/eth/eventhandler/mockgen.go new file mode 100644 index 0000000000..2284c9ed4f --- /dev/null +++ b/eth/eventhandler/mockgen.go @@ -0,0 +1,8 @@ +package eventhandler + +//go:generate go tool -modfile=../../tool.mod mockgen -package=mocks -destination=./mocks/task_executor.go github.com/ssvlabs/ssv/eth/eventhandler TaskExecutor + +// TaskExecutor adapts package-private interface for mockgen to generate properly capitalized mock-name. +// The mockgen.go cannot be a mockgen_test.go for mockgen to work (if we want Golang to ignore mockgen.go file +// during builds, we probably need to do it with build-tags). +type TaskExecutor = taskExecutor diff --git a/eth/eventhandler/mocks/task_executor.go b/eth/eventhandler/mocks/task_executor.go new file mode 100644 index 0000000000..07f31c9314 --- /dev/null +++ b/eth/eventhandler/mocks/task_executor.go @@ -0,0 +1,114 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/ssvlabs/ssv/eth/eventhandler (interfaces: TaskExecutor) +// +// Generated by this command: +// +// mockgen -package=mocks -destination=./mocks/task_executor.go github.com/ssvlabs/ssv/eth/eventhandler TaskExecutor +// + +// Package mocks is a generated GoMock package. +package mocks + +import ( + reflect "reflect" + + phase0 "github.com/attestantio/go-eth2-client/spec/phase0" + common "github.com/ethereum/go-ethereum/common" + types "github.com/ssvlabs/ssv-spec/types" + types0 "github.com/ssvlabs/ssv/protocol/v2/types" + gomock "go.uber.org/mock/gomock" +) + +// MockTaskExecutor is a mock of TaskExecutor interface. +type MockTaskExecutor struct { + ctrl *gomock.Controller + recorder *MockTaskExecutorMockRecorder + isgomock struct{} +} + +// MockTaskExecutorMockRecorder is the mock recorder for MockTaskExecutor. +type MockTaskExecutorMockRecorder struct { + mock *MockTaskExecutor +} + +// NewMockTaskExecutor creates a new mock instance. +func NewMockTaskExecutor(ctrl *gomock.Controller) *MockTaskExecutor { + mock := &MockTaskExecutor{ctrl: ctrl} + mock.recorder = &MockTaskExecutorMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockTaskExecutor) EXPECT() *MockTaskExecutorMockRecorder { + return m.recorder +} + +// ExitValidator mocks base method. +func (m *MockTaskExecutor) ExitValidator(pubKey phase0.BLSPubKey, blockNumber uint64, validatorIndex phase0.ValidatorIndex, ownValidator bool) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ExitValidator", pubKey, blockNumber, validatorIndex, ownValidator) + ret0, _ := ret[0].(error) + return ret0 +} + +// ExitValidator indicates an expected call of ExitValidator. +func (mr *MockTaskExecutorMockRecorder) ExitValidator(pubKey, blockNumber, validatorIndex, ownValidator any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ExitValidator", reflect.TypeOf((*MockTaskExecutor)(nil).ExitValidator), pubKey, blockNumber, validatorIndex, ownValidator) +} + +// LiquidateCluster mocks base method. +func (m *MockTaskExecutor) LiquidateCluster(owner common.Address, operatorIDs []uint64, toLiquidate []*types0.SSVShare) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "LiquidateCluster", owner, operatorIDs, toLiquidate) + ret0, _ := ret[0].(error) + return ret0 +} + +// LiquidateCluster indicates an expected call of LiquidateCluster. +func (mr *MockTaskExecutorMockRecorder) LiquidateCluster(owner, operatorIDs, toLiquidate any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "LiquidateCluster", reflect.TypeOf((*MockTaskExecutor)(nil).LiquidateCluster), owner, operatorIDs, toLiquidate) +} + +// ReactivateCluster mocks base method. +func (m *MockTaskExecutor) ReactivateCluster(owner common.Address, operatorIDs []uint64, toReactivate []*types0.SSVShare) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ReactivateCluster", owner, operatorIDs, toReactivate) + ret0, _ := ret[0].(error) + return ret0 +} + +// ReactivateCluster indicates an expected call of ReactivateCluster. +func (mr *MockTaskExecutorMockRecorder) ReactivateCluster(owner, operatorIDs, toReactivate any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReactivateCluster", reflect.TypeOf((*MockTaskExecutor)(nil).ReactivateCluster), owner, operatorIDs, toReactivate) +} + +// StopValidator mocks base method. +func (m *MockTaskExecutor) StopValidator(pubKey types.ValidatorPK) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StopValidator", pubKey) + ret0, _ := ret[0].(error) + return ret0 +} + +// StopValidator indicates an expected call of StopValidator. +func (mr *MockTaskExecutorMockRecorder) StopValidator(pubKey any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StopValidator", reflect.TypeOf((*MockTaskExecutor)(nil).StopValidator), pubKey) +} + +// UpdateFeeRecipient mocks base method. +func (m *MockTaskExecutor) UpdateFeeRecipient(owner, recipient common.Address, blockNumber uint64) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UpdateFeeRecipient", owner, recipient, blockNumber) + ret0, _ := ret[0].(error) + return ret0 +} + +// UpdateFeeRecipient indicates an expected call of UpdateFeeRecipient. +func (mr *MockTaskExecutorMockRecorder) UpdateFeeRecipient(owner, recipient, blockNumber any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateFeeRecipient", reflect.TypeOf((*MockTaskExecutor)(nil).UpdateFeeRecipient), owner, recipient, blockNumber) +} diff --git a/operator/node.go b/operator/node.go index f46c2cda47..21cd907397 100644 --- a/operator/node.go +++ b/operator/node.go @@ -36,7 +36,7 @@ type Options struct { P2PNetwork network.P2PNetwork Context context.Context DB basedb.Database - ValidatorController validator.Controller + ValidatorController *validator.Controller ValidatorStore storage2.ValidatorStore ValidatorOptions validator.ControllerOptions `yaml:"ValidatorOptions"` DutyStore *dutystore.Store @@ -48,7 +48,7 @@ type Node struct { logger *zap.Logger network *networkconfig.Network context context.Context - validatorsCtrl validator.Controller + validatorsCtrl *validator.Controller validatorOptions validator.ControllerOptions exporterOptions exporter.Options consensusClient beaconprotocol.BeaconNode diff --git a/operator/validator/controller.go b/operator/validator/controller.go index f383542bd7..b4d8ba23e3 100644 --- a/operator/validator/controller.go +++ b/operator/validator/controller.go @@ -8,7 +8,6 @@ import ( "time" "github.com/attestantio/go-eth2-client/spec/phase0" - "github.com/ethereum/go-ethereum/common" "github.com/jellydator/ttlcache/v3" "github.com/pkg/errors" specqbft "github.com/ssvlabs/ssv-spec/qbft" @@ -96,33 +95,6 @@ type ControllerOptions struct { GasLimit uint64 `yaml:"ExperimentalGasLimit" env:"EXPERIMENTAL_GAS_LIMIT" env-description:"Gas limit for MEV block proposals (must match across committee, otherwise MEV fails). Do not change unless you know what you're doing"` } -// Controller represent the validators controller, -// it takes care of bootstrapping, updating and managing existing validators and their shares -type Controller interface { - StartValidators(ctx context.Context) error - HandleMetadataUpdates(ctx context.Context) - FilterIndices(afterInit bool, filter func(*ssvtypes.SSVShare) bool) []phase0.ValidatorIndex - GetValidator(pubKey spectypes.ValidatorPK) (*validator.Validator, bool) - StartNetworkHandlers() - // GetValidatorStats returns stats of validators, including the following: - // - the amount of validators in the network - // - the amount of active validators (i.e. not slashed or existed) - // - the amount of validators assigned to this operator - GetValidatorStats() (uint64, uint64, uint64, error) - IndicesChangeChan() chan struct{} - ValidatorRegistrationChan() <-chan duties.RegistrationDescriptor - ValidatorExitChan() <-chan duties.ExitDescriptor - FeeRecipientChangeChan() <-chan struct{} - - StopValidator(pubKey spectypes.ValidatorPK) error - LiquidateCluster(owner common.Address, operatorIDs []uint64, toLiquidate []*ssvtypes.SSVShare) error - ReactivateCluster(owner common.Address, operatorIDs []uint64, toReactivate []*ssvtypes.SSVShare) error - UpdateFeeRecipient(owner, recipient common.Address, blockNumber uint64) error - ExitValidator(pubKey phase0.BLSPubKey, blockNumber uint64, validatorIndex phase0.ValidatorIndex, ownValidator bool) error - ReportValidatorStatuses(ctx context.Context) - duties.DutyExecutor -} - type Nonce uint16 type SharesStorage interface { @@ -139,8 +111,8 @@ type P2PNetwork interface { FixedSubnets() commons.Subnets } -// controller implements Controller -type controller struct { +// Controller manages SSV node validators (their shares). +type Controller struct { ctx context.Context logger *zap.Logger @@ -173,7 +145,7 @@ type controller struct { historySyncBatchSize int messageValidator validation.MessageValidator - // nonCommittees is a cache of initialized committeeObserver instances + // committeesObservers is a cache of initialized committeeObserver instances committeesObservers *ttlcache.Cache[spectypes.MessageID, *validator.CommitteeObserver] committeesObserversMutex sync.Mutex @@ -191,8 +163,8 @@ type controller struct { traceCollector *dutytracer.Collector } -// NewController creates a new validator controller instance -func NewController(logger *zap.Logger, options ControllerOptions, exporterOptions exporter.Options) Controller { +// NewController creates a new validator controller instance. +func NewController(logger *zap.Logger, options ControllerOptions, exporterOptions exporter.Options) *Controller { logger.Debug("setting up validator controller") // lookup in a map that holds all relevant operators @@ -224,7 +196,7 @@ func NewController(logger *zap.Logger, options ControllerOptions, exporterOption cacheTTL := 2 * options.NetworkConfig.EpochDuration() // #nosec G115 - ctrl := controller{ + ctrl := &Controller{ logger: logger.Named(log.NameController), networkConfig: options.NetworkConfig, sharesStorage: options.RegistryStorage.Shares(), @@ -282,26 +254,30 @@ func NewController(logger *zap.Logger, options ControllerOptions, exporterOption go ctrl.domainCache.Start() go ctrl.beaconVoteRoots.Start() - return &ctrl + return ctrl } -func (c *controller) IndicesChangeChan() chan struct{} { +func (c *Controller) IndicesChangeChan() chan struct{} { return c.indicesChangeCh } -func (c *controller) ValidatorRegistrationChan() <-chan duties.RegistrationDescriptor { +func (c *Controller) ValidatorRegistrationChan() <-chan duties.RegistrationDescriptor { return c.validatorRegistrationCh } -func (c *controller) ValidatorExitChan() <-chan duties.ExitDescriptor { +func (c *Controller) ValidatorExitChan() <-chan duties.ExitDescriptor { return c.validatorExitCh } -func (c *controller) FeeRecipientChangeChan() <-chan struct{} { +func (c *Controller) FeeRecipientChangeChan() <-chan struct{} { return c.feeRecipientChangeCh } -func (c *controller) GetValidatorStats() (uint64, uint64, uint64, error) { +// GetValidatorStats returns stats of validators, including the following: +// - the amount of validators in the network +// - the amount of active validators (i.e. not slashed or existed) +// - the amount of validators assigned to this operator +func (c *Controller) GetValidatorStats() (uint64, uint64, uint64, error) { operatorShares := uint64(0) active, total := uint64(0), uint64(0) c.sharesStorage.Range(nil, func(s *ssvtypes.SSVShare) bool { @@ -317,7 +293,7 @@ func (c *controller) GetValidatorStats() (uint64, uint64, uint64, error) { return total, active, operatorShares, nil } -func (c *controller) handleRouterMessages() { +func (c *Controller) handleRouterMessages() { ctx, cancel := context.WithCancel(c.ctx) defer cancel() ch := c.messageRouter.GetMessageChan() @@ -368,7 +344,7 @@ var nonCommitteeValidatorTTLs = map[spectypes.RunnerRole]int{ spectypes.RoleSyncCommitteeContribution: 4, } -func (c *controller) handleWorkerMessages(ctx context.Context, msg network.DecodedSSVMessage) error { +func (c *Controller) handleWorkerMessages(ctx context.Context, msg network.DecodedSSVMessage) error { ssvMsg := msg.(*queue.SSVMessage) var ncv *validator.CommitteeObserver @@ -409,7 +385,7 @@ func (c *controller) handleWorkerMessages(ctx context.Context, msg network.Decod return c.handleNonCommitteeMessages(ctx, ssvMsg, ncv) } -func (c *controller) handleNonCommitteeMessages( +func (c *Controller) handleNonCommitteeMessages( ctx context.Context, msg *queue.SSVMessage, ncv *validator.CommitteeObserver, @@ -444,7 +420,7 @@ func (c *controller) handleNonCommitteeMessages( } // StartValidators loads all persisted shares and sets up the corresponding validators -func (c *controller) StartValidators(ctx context.Context) error { +func (c *Controller) StartValidators(ctx context.Context) error { // TODO: Pass context wherever the execution flow may be blocked. if c.validatorCommonOpts.ExporterOptions.Enabled { @@ -501,7 +477,7 @@ func (c *controller) StartValidators(ctx context.Context) error { // setupValidators initializes validators for the provided shares. // Share w/o validator's metadata won't start, but the metadata will be fetched and the validator will start afterward. -func (c *controller) setupValidators(shares []*ssvtypes.SSVShare) ([]*validator.Validator, []*validator.Committee) { +func (c *Controller) setupValidators(shares []*ssvtypes.SSVShare) ([]*validator.Validator, []*validator.Committee) { c.logger.Info("initializing validators ...", zap.Int("shares count", len(shares))) var errs []error var fetchMetadata [][]byte @@ -538,7 +514,7 @@ func (c *controller) setupValidators(shares []*ssvtypes.SSVShare) ([]*validator. return validators, committees } -func (c *controller) startValidators(validators []*validator.Validator, committees []*validator.Committee) int { +func (c *Controller) startValidators(validators []*validator.Validator, committees []*validator.Committee) int { var started int var errs []error for _, v := range validators { @@ -562,7 +538,7 @@ func (c *controller) startValidators(validators []*validator.Validator, committe } // StartNetworkHandlers init msg worker that handles network messages -func (c *controller) StartNetworkHandlers() { +func (c *Controller) StartNetworkHandlers() { c.network.UseMessageRouter(c.messageRouter) for i := 0; i < networkRouterConcurrency; i++ { go c.handleRouterMessages() @@ -571,7 +547,7 @@ func (c *controller) StartNetworkHandlers() { } // startEligibleValidators starts validators that transitioned to eligible to start due to a metadata update. -func (c *controller) startEligibleValidators(ctx context.Context, pubKeys []spectypes.ValidatorPK) (count int) { +func (c *Controller) startEligibleValidators(ctx context.Context, pubKeys []spectypes.ValidatorPK) (count int) { // Build a map for quick lookup to ensure only explicitly listed validators start. validatorsSet := make(map[spectypes.ValidatorPK]struct{}, len(pubKeys)) for _, v := range pubKeys { @@ -639,11 +615,11 @@ func (c *controller) startEligibleValidators(ctx context.Context, pubKeys []spec } // GetValidator returns a validator instance from ValidatorsMap -func (c *controller) GetValidator(pubKey spectypes.ValidatorPK) (*validator.Validator, bool) { +func (c *Controller) GetValidator(pubKey spectypes.ValidatorPK) (*validator.Validator, bool) { return c.validatorsMap.GetValidator(pubKey) } -func (c *controller) ExecuteDuty(ctx context.Context, duty *spectypes.ValidatorDuty) { +func (c *Controller) ExecuteDuty(ctx context.Context, duty *spectypes.ValidatorDuty) { dutyEpoch := c.networkConfig.EstimatedEpochAtSlot(duty.Slot) dutyID := fields.BuildDutyID(c.networkConfig.EstimatedEpochAtSlot(duty.Slot), duty.Slot, duty.RunnerRole(), duty.ValidatorIndex) ctx, span := tracer.Start(traces.Context(ctx, dutyID), @@ -688,7 +664,7 @@ func (c *controller) ExecuteDuty(ctx context.Context, duty *spectypes.ValidatorD span.SetStatus(codes.Ok, "") } -func (c *controller) ExecuteCommitteeDuty(ctx context.Context, committeeID spectypes.CommitteeID, duty *spectypes.CommitteeDuty) { +func (c *Controller) ExecuteCommitteeDuty(ctx context.Context, committeeID spectypes.CommitteeID, duty *spectypes.CommitteeDuty) { cm, ok := c.validatorsMap.GetCommittee(committeeID) if !ok { const eventMsg = "could not find committee" @@ -732,7 +708,7 @@ func (c *controller) ExecuteCommitteeDuty(ctx context.Context, committeeID spect span.SetStatus(codes.Ok, "") } -func (c *controller) FilterIndices(afterInit bool, filter func(*ssvtypes.SSVShare) bool) []phase0.ValidatorIndex { +func (c *Controller) FilterIndices(afterInit bool, filter func(*ssvtypes.SSVShare) bool) []phase0.ValidatorIndex { if afterInit { <-c.committeeValidatorSetup } @@ -747,7 +723,7 @@ func (c *controller) FilterIndices(afterInit bool, filter func(*ssvtypes.SSVShar } // onShareStop is called when a validator was removed or liquidated -func (c *controller) onShareStop(pubKey spectypes.ValidatorPK) { +func (c *Controller) onShareStop(pubKey spectypes.ValidatorPK) { // remove from ValidatorsMap v := c.validatorsMap.RemoveValidator(pubKey) @@ -776,7 +752,7 @@ func (c *controller) onShareStop(pubKey spectypes.ValidatorPK) { } } -func (c *controller) onShareInit(share *ssvtypes.SSVShare) (*validator.Validator, *validator.Committee, error) { +func (c *Controller) onShareInit(share *ssvtypes.SSVShare) (*validator.Validator, *validator.Committee, error) { if !share.HasBeaconMetadata() { // fetching index and status in case not exist c.logger.Warn("skipping validator until it becomes active", fields.PubKey(share.ValidatorPubKey[:])) return nil, nil, nil @@ -842,7 +818,7 @@ func (c *controller) onShareInit(share *ssvtypes.SSVShare) (*validator.Validator return v, vc, nil } -func (c *controller) committeeMemberFromShare(share *ssvtypes.SSVShare) (*spectypes.CommitteeMember, error) { +func (c *Controller) committeeMemberFromShare(share *ssvtypes.SSVShare) (*spectypes.CommitteeMember, error) { operators := make([]*spectypes.Operator, 0, len(share.Committee)) var activeOperators uint64 @@ -900,7 +876,7 @@ func (c *controller) committeeMemberFromShare(share *ssvtypes.SSVShare) (*specty }, nil } -func (c *controller) onShareStart(share *ssvtypes.SSVShare) (bool, error) { +func (c *Controller) onShareStart(share *ssvtypes.SSVShare) (bool, error) { v, _, err := c.onShareInit(share) if err != nil || v == nil { return false, err @@ -914,7 +890,7 @@ func (c *controller) onShareStart(share *ssvtypes.SSVShare) (bool, error) { return started, nil } -func (c *controller) printShare(s *ssvtypes.SSVShare, msg string) { +func (c *Controller) printShare(s *ssvtypes.SSVShare, msg string) { committee := make([]string, len(s.Committee)) for i, c := range s.Committee { committee[i] = fmt.Sprintf(`[OperatorID=%d, PubKey=%x]`, c.Signer, c.SharePubKey) @@ -927,7 +903,7 @@ func (c *controller) printShare(s *ssvtypes.SSVShare, msg string) { ) } -func (c *controller) validatorStart(validator *validator.Validator) (bool, error) { +func (c *Controller) validatorStart(validator *validator.Validator) (bool, error) { if c.validatorStartFunc == nil { return validator.Start() } @@ -935,7 +911,7 @@ func (c *controller) validatorStart(validator *validator.Validator) (bool, error } // startValidator will start the given validator if applicable -func (c *controller) startValidator(v *validator.Validator) (bool, error) { +func (c *Controller) startValidator(v *validator.Validator) (bool, error) { c.reportValidatorStatus(v.Share) if v.Share.ValidatorIndex == 0 { return false, errors.New("validator index not found") @@ -949,7 +925,7 @@ func (c *controller) startValidator(v *validator.Validator) (bool, error) { return started, nil } -func (c *controller) HandleMetadataUpdates(ctx context.Context) { +func (c *Controller) HandleMetadataUpdates(ctx context.Context) { // TODO: Consider getting rid of `Stream` method because it adds complexity. // Instead, validatorSyncer could return the next batch, which would be passed to handleMetadataUpdate afterwards. // There doesn't seem to exist any logic that requires these processes to be parallel. @@ -961,7 +937,7 @@ func (c *controller) HandleMetadataUpdates(ctx context.Context) { } // handleMetadataUpdate processes metadata changes for validators. -func (c *controller) handleMetadataUpdate(ctx context.Context, syncBatch metadata.SyncBatch) error { +func (c *Controller) handleMetadataUpdate(ctx context.Context, syncBatch metadata.SyncBatch) error { // Skip processing for full nodes (exporters) and operators that are still syncing // (i.e., haven't received their OperatorAdded event yet). if !c.operatorDataStore.OperatorIDReady() { @@ -1002,7 +978,7 @@ func (c *controller) handleMetadataUpdate(ctx context.Context, syncBatch metadat return nil } -func (c *controller) reportIndicesChange(ctx context.Context) bool { +func (c *Controller) reportIndicesChange(ctx context.Context) bool { timeoutCtx, cancel := context.WithTimeout(ctx, 2*c.networkConfig.SlotDuration) defer cancel() @@ -1014,7 +990,7 @@ func (c *controller) reportIndicesChange(ctx context.Context) bool { } } -func (c *controller) reportFeeRecipientChange(ctx context.Context) bool { +func (c *Controller) reportFeeRecipientChange(ctx context.Context) bool { timeoutCtx, cancel := context.WithTimeout(ctx, 2*c.networkConfig.SlotDuration) defer cancel() @@ -1026,7 +1002,7 @@ func (c *controller) reportFeeRecipientChange(ctx context.Context) bool { } } -func (c *controller) ReportValidatorStatuses(ctx context.Context) { +func (c *Controller) ReportValidatorStatuses(ctx context.Context) { ticker := time.NewTicker(time.Second * 30) defer ticker.Stop() @@ -1166,7 +1142,7 @@ func SetupRunners( return qbftCtrl } - shareMap := make(map[phase0.ValidatorIndex]*spectypes.Share) // TODO: fill the map + shareMap := make(map[phase0.ValidatorIndex]*spectypes.Share) shareMap[share.ValidatorIndex] = &share.Share runners := runner.ValidatorDutyRunners{} diff --git a/operator/validator/controller_test.go b/operator/validator/controller_test.go index a146252869..c370138291 100644 --- a/operator/validator/controller_test.go +++ b/operator/validator/controller_test.go @@ -94,7 +94,7 @@ func TestNewController(t *testing.T) { Context: t.Context(), } control := NewController(logger, controllerOptions, exporter.Options{}) - require.IsType(t, &controller{}, control) + require.IsType(t, &Controller{}, control) } func TestSetupValidatorsExporter(t *testing.T) { @@ -769,13 +769,13 @@ func TestUpdateFeeRecipient(t *testing.T) { }) } -func setupController(t *testing.T, logger *zap.Logger, opts MockControllerOptions) controller { +func setupController(t *testing.T, logger *zap.Logger, opts MockControllerOptions) Controller { // Default to test network config if not provided. if opts.networkConfig == nil { opts.networkConfig = networkconfig.TestNetwork } - return controller{ + return Controller{ logger: logger, beacon: opts.beacon, network: opts.network, @@ -880,10 +880,8 @@ func setupTestValidator(validatorPk spectypes.ValidatorPK, ownerAddressBytes []b OwnerAddress: common.BytesToAddress(ownerAddressBytes), }, - Queues: map[spectypes.RunnerRole]validator.QueueContainer{ - spectypes.RoleValidatorRegistration: { - Q: queue.New(1000), - }, + Queues: map[spectypes.RunnerRole]queue.Queue{ + spectypes.RoleValidatorRegistration: queue.New(1000), }, } } @@ -1166,7 +1164,7 @@ func waitForNoAction(logger *zap.Logger, indicesChange chan struct{}, timeout ti return done } -func prepareController(t *testing.T) (*controller, *mocks.MockSharesStorage) { +func prepareController(t *testing.T) (*Controller, *mocks.MockSharesStorage) { ctrl := gomock.NewController(t) t.Cleanup(ctrl.Finish) // Ensures gomock is properly cleaned up after the test @@ -1189,7 +1187,7 @@ func prepareController(t *testing.T) (*controller, *mocks.MockSharesStorage) { mockBeaconNode := beacon.NewMockBeaconNode(ctrl) mockValidatorsMap := validators.New(ctx) - validatorCtrl := &controller{ + validatorCtrl := &Controller{ ctx: ctx, beacon: mockBeaconNode, logger: logger, diff --git a/operator/validator/metrics.go b/operator/validator/metrics.go index 75d3638c60..9a5935a134 100644 --- a/operator/validator/metrics.go +++ b/operator/validator/metrics.go @@ -7,7 +7,7 @@ import ( "github.com/ssvlabs/ssv/protocol/v2/types" ) -func (c *controller) reportValidatorStatus(share *types.SSVShare) { +func (c *Controller) reportValidatorStatus(share *types.SSVShare) { if share == nil { c.logger.Debug("checking validator: validator share not found") return diff --git a/operator/validator/mocks/controller.go b/operator/validator/mocks/controller.go index 6976f02daa..89ea0b7ddb 100644 --- a/operator/validator/mocks/controller.go +++ b/operator/validator/mocks/controller.go @@ -10,292 +10,17 @@ package mocks import ( - context "context" reflect "reflect" - phase0 "github.com/attestantio/go-eth2-client/spec/phase0" - common "github.com/ethereum/go-ethereum/common" types "github.com/ssvlabs/ssv-spec/types" network "github.com/ssvlabs/ssv/network" commons "github.com/ssvlabs/ssv/network/commons" - duties "github.com/ssvlabs/ssv/operator/duties" - validator "github.com/ssvlabs/ssv/protocol/v2/ssv/validator" types0 "github.com/ssvlabs/ssv/protocol/v2/types" storage "github.com/ssvlabs/ssv/registry/storage" basedb "github.com/ssvlabs/ssv/storage/basedb" gomock "go.uber.org/mock/gomock" ) -// MockController is a mock of Controller interface. -type MockController struct { - ctrl *gomock.Controller - recorder *MockControllerMockRecorder - isgomock struct{} -} - -// MockControllerMockRecorder is the mock recorder for MockController. -type MockControllerMockRecorder struct { - mock *MockController -} - -// NewMockController creates a new mock instance. -func NewMockController(ctrl *gomock.Controller) *MockController { - mock := &MockController{ctrl: ctrl} - mock.recorder = &MockControllerMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockController) EXPECT() *MockControllerMockRecorder { - return m.recorder -} - -// ExecuteCommitteeDuty mocks base method. -func (m *MockController) ExecuteCommitteeDuty(ctx context.Context, committeeID types.CommitteeID, duty *types.CommitteeDuty) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "ExecuteCommitteeDuty", ctx, committeeID, duty) -} - -// ExecuteCommitteeDuty indicates an expected call of ExecuteCommitteeDuty. -func (mr *MockControllerMockRecorder) ExecuteCommitteeDuty(ctx, committeeID, duty any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ExecuteCommitteeDuty", reflect.TypeOf((*MockController)(nil).ExecuteCommitteeDuty), ctx, committeeID, duty) -} - -// ExecuteDuty mocks base method. -func (m *MockController) ExecuteDuty(ctx context.Context, duty *types.ValidatorDuty) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "ExecuteDuty", ctx, duty) -} - -// ExecuteDuty indicates an expected call of ExecuteDuty. -func (mr *MockControllerMockRecorder) ExecuteDuty(ctx, duty any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ExecuteDuty", reflect.TypeOf((*MockController)(nil).ExecuteDuty), ctx, duty) -} - -// ExitValidator mocks base method. -func (m *MockController) ExitValidator(pubKey phase0.BLSPubKey, blockNumber uint64, validatorIndex phase0.ValidatorIndex, ownValidator bool) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ExitValidator", pubKey, blockNumber, validatorIndex, ownValidator) - ret0, _ := ret[0].(error) - return ret0 -} - -// ExitValidator indicates an expected call of ExitValidator. -func (mr *MockControllerMockRecorder) ExitValidator(pubKey, blockNumber, validatorIndex, ownValidator any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ExitValidator", reflect.TypeOf((*MockController)(nil).ExitValidator), pubKey, blockNumber, validatorIndex, ownValidator) -} - -// FeeRecipientChangeChan mocks base method. -func (m *MockController) FeeRecipientChangeChan() <-chan struct{} { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "FeeRecipientChangeChan") - ret0, _ := ret[0].(<-chan struct{}) - return ret0 -} - -// FeeRecipientChangeChan indicates an expected call of FeeRecipientChangeChan. -func (mr *MockControllerMockRecorder) FeeRecipientChangeChan() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FeeRecipientChangeChan", reflect.TypeOf((*MockController)(nil).FeeRecipientChangeChan)) -} - -// FilterIndices mocks base method. -func (m *MockController) FilterIndices(afterInit bool, filter func(*types0.SSVShare) bool) []phase0.ValidatorIndex { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "FilterIndices", afterInit, filter) - ret0, _ := ret[0].([]phase0.ValidatorIndex) - return ret0 -} - -// FilterIndices indicates an expected call of FilterIndices. -func (mr *MockControllerMockRecorder) FilterIndices(afterInit, filter any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FilterIndices", reflect.TypeOf((*MockController)(nil).FilterIndices), afterInit, filter) -} - -// GetValidator mocks base method. -func (m *MockController) GetValidator(pubKey types.ValidatorPK) (*validator.Validator, bool) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetValidator", pubKey) - ret0, _ := ret[0].(*validator.Validator) - ret1, _ := ret[1].(bool) - return ret0, ret1 -} - -// GetValidator indicates an expected call of GetValidator. -func (mr *MockControllerMockRecorder) GetValidator(pubKey any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetValidator", reflect.TypeOf((*MockController)(nil).GetValidator), pubKey) -} - -// GetValidatorStats mocks base method. -func (m *MockController) GetValidatorStats() (uint64, uint64, uint64, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetValidatorStats") - ret0, _ := ret[0].(uint64) - ret1, _ := ret[1].(uint64) - ret2, _ := ret[2].(uint64) - ret3, _ := ret[3].(error) - return ret0, ret1, ret2, ret3 -} - -// GetValidatorStats indicates an expected call of GetValidatorStats. -func (mr *MockControllerMockRecorder) GetValidatorStats() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetValidatorStats", reflect.TypeOf((*MockController)(nil).GetValidatorStats)) -} - -// HandleMetadataUpdates mocks base method. -func (m *MockController) HandleMetadataUpdates(ctx context.Context) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "HandleMetadataUpdates", ctx) -} - -// HandleMetadataUpdates indicates an expected call of HandleMetadataUpdates. -func (mr *MockControllerMockRecorder) HandleMetadataUpdates(ctx any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HandleMetadataUpdates", reflect.TypeOf((*MockController)(nil).HandleMetadataUpdates), ctx) -} - -// IndicesChangeChan mocks base method. -func (m *MockController) IndicesChangeChan() chan struct{} { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "IndicesChangeChan") - ret0, _ := ret[0].(chan struct{}) - return ret0 -} - -// IndicesChangeChan indicates an expected call of IndicesChangeChan. -func (mr *MockControllerMockRecorder) IndicesChangeChan() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IndicesChangeChan", reflect.TypeOf((*MockController)(nil).IndicesChangeChan)) -} - -// LiquidateCluster mocks base method. -func (m *MockController) LiquidateCluster(owner common.Address, operatorIDs []uint64, toLiquidate []*types0.SSVShare) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "LiquidateCluster", owner, operatorIDs, toLiquidate) - ret0, _ := ret[0].(error) - return ret0 -} - -// LiquidateCluster indicates an expected call of LiquidateCluster. -func (mr *MockControllerMockRecorder) LiquidateCluster(owner, operatorIDs, toLiquidate any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "LiquidateCluster", reflect.TypeOf((*MockController)(nil).LiquidateCluster), owner, operatorIDs, toLiquidate) -} - -// ReactivateCluster mocks base method. -func (m *MockController) ReactivateCluster(owner common.Address, operatorIDs []uint64, toReactivate []*types0.SSVShare) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ReactivateCluster", owner, operatorIDs, toReactivate) - ret0, _ := ret[0].(error) - return ret0 -} - -// ReactivateCluster indicates an expected call of ReactivateCluster. -func (mr *MockControllerMockRecorder) ReactivateCluster(owner, operatorIDs, toReactivate any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReactivateCluster", reflect.TypeOf((*MockController)(nil).ReactivateCluster), owner, operatorIDs, toReactivate) -} - -// ReportValidatorStatuses mocks base method. -func (m *MockController) ReportValidatorStatuses(ctx context.Context) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "ReportValidatorStatuses", ctx) -} - -// ReportValidatorStatuses indicates an expected call of ReportValidatorStatuses. -func (mr *MockControllerMockRecorder) ReportValidatorStatuses(ctx any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReportValidatorStatuses", reflect.TypeOf((*MockController)(nil).ReportValidatorStatuses), ctx) -} - -// StartNetworkHandlers mocks base method. -func (m *MockController) StartNetworkHandlers() { - m.ctrl.T.Helper() - m.ctrl.Call(m, "StartNetworkHandlers") -} - -// StartNetworkHandlers indicates an expected call of StartNetworkHandlers. -func (mr *MockControllerMockRecorder) StartNetworkHandlers() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StartNetworkHandlers", reflect.TypeOf((*MockController)(nil).StartNetworkHandlers)) -} - -// StartValidators mocks base method. -func (m *MockController) StartValidators(ctx context.Context) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "StartValidators", ctx) - ret0, _ := ret[0].(error) - return ret0 -} - -// StartValidators indicates an expected call of StartValidators. -func (mr *MockControllerMockRecorder) StartValidators(ctx any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StartValidators", reflect.TypeOf((*MockController)(nil).StartValidators), ctx) -} - -// StopValidator mocks base method. -func (m *MockController) StopValidator(pubKey types.ValidatorPK) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "StopValidator", pubKey) - ret0, _ := ret[0].(error) - return ret0 -} - -// StopValidator indicates an expected call of StopValidator. -func (mr *MockControllerMockRecorder) StopValidator(pubKey any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StopValidator", reflect.TypeOf((*MockController)(nil).StopValidator), pubKey) -} - -// UpdateFeeRecipient mocks base method. -func (m *MockController) UpdateFeeRecipient(owner, recipient common.Address, blockNumber uint64) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "UpdateFeeRecipient", owner, recipient, blockNumber) - ret0, _ := ret[0].(error) - return ret0 -} - -// UpdateFeeRecipient indicates an expected call of UpdateFeeRecipient. -func (mr *MockControllerMockRecorder) UpdateFeeRecipient(owner, recipient, blockNumber any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateFeeRecipient", reflect.TypeOf((*MockController)(nil).UpdateFeeRecipient), owner, recipient, blockNumber) -} - -// ValidatorExitChan mocks base method. -func (m *MockController) ValidatorExitChan() <-chan duties.ExitDescriptor { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ValidatorExitChan") - ret0, _ := ret[0].(<-chan duties.ExitDescriptor) - return ret0 -} - -// ValidatorExitChan indicates an expected call of ValidatorExitChan. -func (mr *MockControllerMockRecorder) ValidatorExitChan() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ValidatorExitChan", reflect.TypeOf((*MockController)(nil).ValidatorExitChan)) -} - -// ValidatorRegistrationChan mocks base method. -func (m *MockController) ValidatorRegistrationChan() <-chan duties.RegistrationDescriptor { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ValidatorRegistrationChan") - ret0, _ := ret[0].(<-chan duties.RegistrationDescriptor) - return ret0 -} - -// ValidatorRegistrationChan indicates an expected call of ValidatorRegistrationChan. -func (mr *MockControllerMockRecorder) ValidatorRegistrationChan() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ValidatorRegistrationChan", reflect.TypeOf((*MockController)(nil).ValidatorRegistrationChan)) -} - // MockSharesStorage is a mock of SharesStorage interface. type MockSharesStorage struct { ctrl *gomock.Controller diff --git a/operator/validator/mocks/validator_map.go b/operator/validator/mocks/validator_map.go deleted file mode 100644 index e72b9255a1..0000000000 --- a/operator/validator/mocks/validator_map.go +++ /dev/null @@ -1,80 +0,0 @@ -// Code generated by MockGen. DO NOT EDIT. -// Source: ./validators_map.go - -// Package mocks is a generated GoMock package. -package mocks - -import ( - reflect "reflect" - - types "github.com/ssvlabs/ssv-spec/types" - queue "github.com/ssvlabs/ssv/protocol/v2/ssv/queue" - types0 "github.com/ssvlabs/ssv/protocol/v2/types" - gomock "go.uber.org/mock/gomock" - zap "go.uber.org/zap" -) - -// MockValidator is a mock of Validator interface. -type MockValidator struct { - ctrl *gomock.Controller - recorder *MockValidatorMockRecorder -} - -// MockValidatorMockRecorder is the mock recorder for MockValidator. -type MockValidatorMockRecorder struct { - mock *MockValidator -} - -// NewMockValidator creates a new mock instance. -func NewMockValidator(ctrl *gomock.Controller) *MockValidator { - mock := &MockValidator{ctrl: ctrl} - mock.recorder = &MockValidatorMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockValidator) EXPECT() *MockValidatorMockRecorder { - return m.recorder -} - -// GetShare mocks base method. -func (m *MockValidator) GetShare() *types0.SSVShare { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetShare") - ret0, _ := ret[0].(*types0.SSVShare) - return ret0 -} - -// GetShare indicates an expected call of GetShare. -func (mr *MockValidatorMockRecorder) GetShare() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetShare", reflect.TypeOf((*MockValidator)(nil).GetShare)) -} - -// ProcessMessage mocks base method. -func (m *MockValidator) ProcessMessage(logger *zap.Logger, msg *queue.SSVMessage) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ProcessMessage", logger, msg) - ret0, _ := ret[0].(error) - return ret0 -} - -// ProcessMessage indicates an expected call of ProcessMessage. -func (mr *MockValidatorMockRecorder) ProcessMessage(logger, msg interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ProcessMessage", reflect.TypeOf((*MockValidator)(nil).ProcessMessage), logger, msg) -} - -// StartDuty mocks base method. -func (m *MockValidator) StartDuty(logger *zap.Logger, duty *types.Duty) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "StartDuty", logger, duty) - ret0, _ := ret[0].(error) - return ret0 -} - -// StartDuty indicates an expected call of StartDuty. -func (mr *MockValidatorMockRecorder) StartDuty(logger, duty interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StartDuty", reflect.TypeOf((*MockValidator)(nil).StartDuty), logger, duty) -} diff --git a/operator/validator/task_executor.go b/operator/validator/task_executor.go index 516c15385a..38762bfbd1 100644 --- a/operator/validator/task_executor.go +++ b/operator/validator/task_executor.go @@ -17,13 +17,13 @@ import ( "github.com/ssvlabs/ssv/protocol/v2/types" ) -func (c *controller) taskLogger(taskName string, fields ...zap.Field) *zap.Logger { +func (c *Controller) taskLogger(taskName string, fields ...zap.Field) *zap.Logger { return c.logger.Named(log.NameControllerTaskExecutor). With(zap.String("task", taskName)). With(fields...) } -func (c *controller) StopValidator(pubKey spectypes.ValidatorPK) error { +func (c *Controller) StopValidator(pubKey spectypes.ValidatorPK) error { logger := c.taskLogger("StopValidator", fields.PubKey(pubKey[:])) validatorsRemovedCounter.Add(c.ctx, 1) @@ -34,7 +34,7 @@ func (c *controller) StopValidator(pubKey spectypes.ValidatorPK) error { return nil } -func (c *controller) LiquidateCluster(owner common.Address, operatorIDs []spectypes.OperatorID, toLiquidate []*types.SSVShare) error { +func (c *Controller) LiquidateCluster(owner common.Address, operatorIDs []spectypes.OperatorID, toLiquidate []*types.SSVShare) error { logger := c.taskLogger("LiquidateCluster", fields.Owner(owner), fields.OperatorIDs(operatorIDs)) for _, share := range toLiquidate { @@ -45,7 +45,7 @@ func (c *controller) LiquidateCluster(owner common.Address, operatorIDs []specty return nil } -func (c *controller) ReactivateCluster(owner common.Address, operatorIDs []spectypes.OperatorID, toReactivate []*types.SSVShare) error { +func (c *Controller) ReactivateCluster(owner common.Address, operatorIDs []spectypes.OperatorID, toReactivate []*types.SSVShare) error { logger := c.taskLogger("ReactivateCluster", fields.Owner(owner), fields.OperatorIDs(operatorIDs)) var startedValidators int var errs error @@ -80,7 +80,7 @@ func (c *controller) ReactivateCluster(owner common.Address, operatorIDs []spect return errs } -func (c *controller) UpdateFeeRecipient(owner, recipient common.Address, blockNumber uint64) error { +func (c *Controller) UpdateFeeRecipient(owner, recipient common.Address, blockNumber uint64) error { logger := c.taskLogger("UpdateFeeRecipient", zap.String("owner", owner.String()), zap.String("fee_recipient", recipient.String())) @@ -125,7 +125,7 @@ func (c *controller) UpdateFeeRecipient(owner, recipient common.Address, blockNu return nil } -func (c *controller) ExitValidator(pubKey phase0.BLSPubKey, blockNumber uint64, validatorIndex phase0.ValidatorIndex, ownValidator bool) error { +func (c *Controller) ExitValidator(pubKey phase0.BLSPubKey, blockNumber uint64, validatorIndex phase0.ValidatorIndex, ownValidator bool) error { logger := c.taskLogger("ExitValidator", fields.PubKey(pubKey[:]), fields.BlockNumber(blockNumber), diff --git a/protocol/v2/ssv/runner/aggregator.go b/protocol/v2/ssv/runner/aggregator.go index d1b53cd4b3..78b877c6e9 100644 --- a/protocol/v2/ssv/runner/aggregator.go +++ b/protocol/v2/ssv/runner/aggregator.go @@ -204,7 +204,7 @@ func (r *AggregatorRunner) ProcessConsensus(ctx context.Context, logger *zap.Log defer span.End() span.AddEvent("checking if instance is decided") - decided, encDecidedValue, err := r.BaseRunner.baseConsensusMsgProcessing(ctx, logger, r, signedMsg, &spectypes.ValidatorConsensusData{}) + decided, encDecidedValue, err := r.BaseRunner.baseConsensusMsgProcessing(ctx, logger, r.GetValCheckF(), signedMsg, &spectypes.ValidatorConsensusData{}) if err != nil { return traces.Errorf(span, "failed processing consensus message: %w", err) } @@ -235,7 +235,7 @@ func (r *AggregatorRunner) ProcessConsensus(ctx context.Context, logger *zap.Log span.AddEvent("signing post consensus") // specific duty sig - msg, err := r.BaseRunner.signBeaconObject( + msg, err := signBeaconObject( ctx, r, r.BaseRunner.State.StartingDuty.(*spectypes.ValidatorDuty), @@ -369,6 +369,10 @@ func (r *AggregatorRunner) ProcessPostConsensus(ctx context.Context, logger *zap return nil } +func (r *AggregatorRunner) OnTimeoutQBFT(ctx context.Context, logger *zap.Logger, msg ssvtypes.EventMsg) error { + return r.BaseRunner.OnTimeoutQBFT(ctx, logger, msg) +} + func (r *AggregatorRunner) expectedPreConsensusRootsAndDomain() ([]ssz.HashRoot, phase0.DomainType, error) { return []ssz.HashRoot{spectypes.SSZUint64(r.GetState().StartingDuty.DutySlot())}, spectypes.DomainSelectionProof, nil } @@ -407,7 +411,7 @@ func (r *AggregatorRunner) executeDuty(ctx context.Context, logger *zap.Logger, // sign selection proof span.AddEvent("signing beacon object") - msg, err := r.BaseRunner.signBeaconObject( + msg, err := signBeaconObject( ctx, r, duty.(*spectypes.ValidatorDuty), @@ -459,14 +463,14 @@ func (r *AggregatorRunner) executeDuty(ctx context.Context, logger *zap.Logger, return nil } -func (r *AggregatorRunner) GetBaseRunner() *BaseRunner { - return r.BaseRunner -} - func (r *AggregatorRunner) GetNetwork() specqbft.Network { return r.network } +func (r *AggregatorRunner) GetNetworkConfig() *networkconfig.Network { + return r.BaseRunner.NetworkConfig +} + func (r *AggregatorRunner) GetBeaconNode() beacon.BeaconNode { return r.beacon } @@ -494,6 +498,38 @@ func (r *AggregatorRunner) GetOperatorSigner() ssvtypes.OperatorSigner { return r.operatorSigner } +func (r *AggregatorRunner) HasRunningQBFTInstance() bool { + return r.BaseRunner.HasRunningQBFTInstance() +} + +func (r *AggregatorRunner) HasAcceptedProposalForCurrentRound() bool { + return r.BaseRunner.HasAcceptedProposalForCurrentRound() +} + +func (r *AggregatorRunner) GetShares() map[phase0.ValidatorIndex]*spectypes.Share { + return r.BaseRunner.GetShares() +} + +func (r *AggregatorRunner) GetRole() spectypes.RunnerRole { + return r.BaseRunner.GetRole() +} + +func (r *AggregatorRunner) GetLastHeight() specqbft.Height { + return r.BaseRunner.GetLastHeight() +} + +func (r *AggregatorRunner) GetLastRound() specqbft.Round { + return r.BaseRunner.GetLastRound() +} + +func (r *AggregatorRunner) GetStateRoot() ([32]byte, error) { + return r.BaseRunner.GetStateRoot() +} + +func (r *AggregatorRunner) SetTimeoutFunc(fn TimeoutF) { + r.BaseRunner.SetTimeoutFunc(fn) +} + // Encode returns the encoded struct in bytes or error func (r *AggregatorRunner) Encode() ([]byte, error) { return json.Marshal(r) diff --git a/protocol/v2/ssv/runner/committee.go b/protocol/v2/ssv/runner/committee.go index ffd04a79ef..fac458c0b1 100644 --- a/protocol/v2/ssv/runner/committee.go +++ b/protocol/v2/ssv/runner/committee.go @@ -196,8 +196,36 @@ func (cr *CommitteeRunner) UnmarshalJSON(data []byte) error { return nil } -func (cr *CommitteeRunner) GetBaseRunner() *BaseRunner { - return cr.BaseRunner +func (cr *CommitteeRunner) HasRunningQBFTInstance() bool { + return cr.BaseRunner.HasRunningQBFTInstance() +} + +func (cr *CommitteeRunner) HasAcceptedProposalForCurrentRound() bool { + return cr.BaseRunner.HasAcceptedProposalForCurrentRound() +} + +func (cr *CommitteeRunner) GetShares() map[phase0.ValidatorIndex]*spectypes.Share { + return cr.BaseRunner.GetShares() +} + +func (cr *CommitteeRunner) GetRole() spectypes.RunnerRole { + return cr.BaseRunner.GetRole() +} + +func (cr *CommitteeRunner) GetLastHeight() specqbft.Height { + return cr.BaseRunner.GetLastHeight() +} + +func (cr *CommitteeRunner) GetLastRound() specqbft.Round { + return cr.BaseRunner.GetLastRound() +} + +func (cr *CommitteeRunner) GetStateRoot() ([32]byte, error) { + return cr.BaseRunner.GetStateRoot() +} + +func (cr *CommitteeRunner) SetTimeoutFunc(fn TimeoutF) { + cr.BaseRunner.SetTimeoutFunc(fn) } func (cr *CommitteeRunner) GetBeaconNode() beacon.BeaconNode { @@ -212,6 +240,10 @@ func (cr *CommitteeRunner) GetNetwork() specqbft.Network { return cr.network } +func (cr *CommitteeRunner) GetNetworkConfig() *networkconfig.Network { + return cr.BaseRunner.NetworkConfig +} + func (cr *CommitteeRunner) GetBeaconSigner() ekm.BeaconSigner { return cr.signer } @@ -235,7 +267,7 @@ func (cr *CommitteeRunner) ProcessConsensus(ctx context.Context, logger *zap.Log defer span.End() span.AddEvent("checking if instance is decided") - decided, decidedValue, err := cr.BaseRunner.baseConsensusMsgProcessing(ctx, logger, cr, msg, &spectypes.BeaconVote{}) + decided, decidedValue, err := cr.BaseRunner.baseConsensusMsgProcessing(ctx, logger, cr.GetValCheckF(), msg, &spectypes.BeaconVote{}) if err != nil { return traces.Errorf(span, "failed processing consensus message: %w", err) } @@ -347,7 +379,7 @@ func (cr *CommitteeRunner) ProcessConsensus(ctx context.Context, logger *zap.Log case spectypes.BNRoleSyncCommittee: totalSyncCommitteeDuties.Add(1) - partialSigMsg, err := cr.BaseRunner.signBeaconObject( + partialSigMsg, err := signBeaconObject( ctx, cr, validatorDuty, @@ -420,7 +452,7 @@ listener: MsgType: spectypes.SSVPartialSignatureMsgType, MsgID: spectypes.NewMsgID( cr.BaseRunner.NetworkConfig.DomainType, - cr.GetBaseRunner().QBFTController.CommitteeMember.CommitteeID[:], + cr.BaseRunner.QBFTController.CommitteeMember.CommitteeID[:], cr.BaseRunner.RunnerRoleType, ), } @@ -480,7 +512,7 @@ func (cr *CommitteeRunner) signAttesterDuty( attestationData := constructAttestationData(beaconVote, validatorDuty, version) span.AddEvent("signing beacon object") - partialMsg, err := cr.BaseRunner.signBeaconObject( + partialMsg, err := signBeaconObject( ctx, cr, validatorDuty, @@ -742,7 +774,7 @@ func (cr *CommitteeRunner) ProcessPostConsensus(ctx context.Context, logger *zap recordSuccessfulSubmission( ctx, uint32(attestationsCount), - cr.BaseRunner.NetworkConfig.EstimatedEpochAtSlot(cr.GetBaseRunner().State.StartingDuty.DutySlot()), + cr.BaseRunner.NetworkConfig.EstimatedEpochAtSlot(cr.BaseRunner.State.StartingDuty.DutySlot()), spectypes.BNRoleAttester, ) } @@ -759,7 +791,7 @@ func (cr *CommitteeRunner) ProcessPostConsensus(ctx context.Context, logger *zap )) logger.Info(eventMsg, - fields.Epoch(cr.BaseRunner.NetworkConfig.EstimatedEpochAtSlot(cr.GetBaseRunner().State.StartingDuty.DutySlot())), + fields.Epoch(cr.BaseRunner.NetworkConfig.EstimatedEpochAtSlot(cr.BaseRunner.State.StartingDuty.DutySlot())), fields.QBFTHeight(cr.BaseRunner.QBFTController.Height), fields.QBFTRound(cr.BaseRunner.State.RunningInstance.State.Round), fields.BlockRoot(attData.BeaconBlockRoot), @@ -799,7 +831,7 @@ func (cr *CommitteeRunner) ProcessPostConsensus(ctx context.Context, logger *zap recordSuccessfulSubmission( ctx, uint32(syncMsgsCount), - cr.BaseRunner.NetworkConfig.EstimatedEpochAtSlot(cr.GetBaseRunner().State.StartingDuty.DutySlot()), + cr.BaseRunner.NetworkConfig.EstimatedEpochAtSlot(cr.BaseRunner.State.StartingDuty.DutySlot()), spectypes.BNRoleSyncCommittee, ) } @@ -842,6 +874,10 @@ func (cr *CommitteeRunner) ProcessPostConsensus(ctx context.Context, logger *zap return nil } +func (cr *CommitteeRunner) OnTimeoutQBFT(ctx context.Context, logger *zap.Logger, msg ssvtypes.EventMsg) error { + return cr.BaseRunner.OnTimeoutQBFT(ctx, logger, msg) +} + // HasSubmittedAllValidatorDuties -- Returns true if the runner has done submissions for all validators for the given slot func (cr *CommitteeRunner) HasSubmittedAllValidatorDuties(attestationMap map[phase0.ValidatorIndex][32]byte, syncCommitteeMap map[phase0.ValidatorIndex][32]byte) bool { // Expected total @@ -910,7 +946,7 @@ func findValidators( // Unneeded since no preconsensus phase func (cr *CommitteeRunner) expectedPreConsensusRootsAndDomain() ([]ssz.HashRoot, phase0.DomainType, error) { - return nil, spectypes.DomainError, errors.New("no pre consensus root for committee runner") + return nil, spectypes.DomainError, errors.New("no pre consensus roots for committee runner") } // This function signature returns only one domain type... but we can have mixed domains @@ -936,9 +972,8 @@ func (cr *CommitteeRunner) expectedPostConsensusRootsAndBeaconObjects(ctx contex } slot := duty.DutySlot() - epoch := cr.GetBaseRunner().NetworkConfig.EstimatedEpochAtSlot(slot) - - dataVersion, _ := cr.GetBaseRunner().NetworkConfig.ForkAtEpoch(epoch) + epoch := cr.BaseRunner.NetworkConfig.EstimatedEpochAtSlot(slot) + dataVersion, _ := cr.BaseRunner.NetworkConfig.ForkAtEpoch(epoch) for _, validatorDuty := range duty.(*spectypes.CommitteeDuty).ValidatorDuties { if validatorDuty == nil { @@ -950,7 +985,7 @@ func (cr *CommitteeRunner) expectedPostConsensusRootsAndBeaconObjects(ctx contex } logger := logger.With(fields.Validator(validatorDuty.PubKey[:])) slot := validatorDuty.DutySlot() - epoch := cr.GetBaseRunner().NetworkConfig.EstimatedEpochAtSlot(slot) + epoch := cr.BaseRunner.NetworkConfig.EstimatedEpochAtSlot(slot) switch validatorDuty.Type { case spectypes.BNRoleAttester: // Attestation object diff --git a/protocol/v2/ssv/runner/duty_runners.go b/protocol/v2/ssv/runner/duty_runners.go index b51ec268f3..ccd96adeeb 100644 --- a/protocol/v2/ssv/runner/duty_runners.go +++ b/protocol/v2/ssv/runner/duty_runners.go @@ -6,7 +6,7 @@ import spectypes "github.com/ssvlabs/ssv-spec/types" type ValidatorDutyRunners map[spectypes.RunnerRole]Runner // DutyRunnerForMsgID returns a Runner from the provided msg ID, or nil if not found -func (ci ValidatorDutyRunners) DutyRunnerForMsgID(msgID spectypes.MessageID) Runner { +func (r ValidatorDutyRunners) DutyRunnerForMsgID(msgID spectypes.MessageID) Runner { role := msgID.GetRoleType() - return ci[role] + return r[role] } diff --git a/protocol/v2/ssv/runner/proposer.go b/protocol/v2/ssv/runner/proposer.go index 59fc0a2793..e6dd2b15c7 100644 --- a/protocol/v2/ssv/runner/proposer.go +++ b/protocol/v2/ssv/runner/proposer.go @@ -224,7 +224,7 @@ func (r *ProposerRunner) ProcessConsensus(ctx context.Context, logger *zap.Logge defer span.End() span.AddEvent("checking if instance is decided") - decided, decidedValue, err := r.BaseRunner.baseConsensusMsgProcessing(ctx, logger, r, signedMsg, &spectypes.ValidatorConsensusData{}) + decided, decidedValue, err := r.BaseRunner.baseConsensusMsgProcessing(ctx, logger, r.GetValCheckF(), signedMsg, &spectypes.ValidatorConsensusData{}) if err != nil { return traces.Errorf(span, "failed processing consensus message: %w", err) } @@ -272,7 +272,14 @@ func (r *ProposerRunner) ProcessConsensus(ctx context.Context, logger *zap.Logge } span.AddEvent("signing beacon object") - msg, err := r.BaseRunner.signBeaconObject(ctx, r, duty, blkToSign, cd.Duty.Slot, spectypes.DomainProposer) + msg, err := signBeaconObject( + ctx, + r, + duty, + blkToSign, + cd.Duty.Slot, + spectypes.DomainProposer, + ) if err != nil { return traces.Errorf(span, "failed signing block: %w", err) } @@ -316,6 +323,10 @@ func (r *ProposerRunner) ProcessConsensus(ctx context.Context, logger *zap.Logge return nil } +func (r *ProposerRunner) OnTimeoutQBFT(ctx context.Context, logger *zap.Logger, msg ssvtypes.EventMsg) error { + return r.BaseRunner.OnTimeoutQBFT(ctx, logger, msg) +} + func (r *ProposerRunner) ProcessPostConsensus(ctx context.Context, logger *zap.Logger, signedMsg *spectypes.PartialSignatureMessages) error { ctx, span := tracer.Start(ctx, observability.InstrumentName(observabilityNamespace, "runner.process_post_consensus"), @@ -518,7 +529,7 @@ func (r *ProposerRunner) executeDuty(ctx context.Context, logger *zap.Logger, du // sign partial randao span.AddEvent("signing beacon object") epoch := r.BaseRunner.NetworkConfig.EstimatedEpochAtSlot(duty.DutySlot()) - msg, err := r.BaseRunner.signBeaconObject( + msg, err := signBeaconObject( ctx, r, proposerDuty, @@ -571,14 +582,46 @@ func (r *ProposerRunner) executeDuty(ctx context.Context, logger *zap.Logger, du return nil } -func (r *ProposerRunner) GetBaseRunner() *BaseRunner { - return r.BaseRunner +func (r *ProposerRunner) HasRunningQBFTInstance() bool { + return r.BaseRunner.HasRunningQBFTInstance() +} + +func (r *ProposerRunner) HasAcceptedProposalForCurrentRound() bool { + return r.BaseRunner.HasAcceptedProposalForCurrentRound() +} + +func (r *ProposerRunner) GetShares() map[phase0.ValidatorIndex]*spectypes.Share { + return r.BaseRunner.GetShares() +} + +func (r *ProposerRunner) GetRole() spectypes.RunnerRole { + return r.BaseRunner.GetRole() +} + +func (r *ProposerRunner) GetLastHeight() specqbft.Height { + return r.BaseRunner.GetLastHeight() +} + +func (r *ProposerRunner) GetLastRound() specqbft.Round { + return r.BaseRunner.GetLastRound() +} + +func (r *ProposerRunner) GetStateRoot() ([32]byte, error) { + return r.BaseRunner.GetStateRoot() +} + +func (r *ProposerRunner) SetTimeoutFunc(fn TimeoutF) { + r.BaseRunner.SetTimeoutFunc(fn) } func (r *ProposerRunner) GetNetwork() specqbft.Network { return r.network } +func (r *ProposerRunner) GetNetworkConfig() *networkconfig.Network { + return r.BaseRunner.NetworkConfig +} + func (r *ProposerRunner) GetBeaconNode() beacon.BeaconNode { return r.beacon } diff --git a/protocol/v2/ssv/runner/runner.go b/protocol/v2/ssv/runner/runner.go index ccbc9c0a13..5a27cd5b22 100644 --- a/protocol/v2/ssv/runner/runner.go +++ b/protocol/v2/ssv/runner/runner.go @@ -8,13 +8,12 @@ import ( "github.com/attestantio/go-eth2-client/spec/phase0" ssz "github.com/ferranbt/fastssz" "github.com/pkg/errors" + specqbft "github.com/ssvlabs/ssv-spec/qbft" + spectypes "github.com/ssvlabs/ssv-spec/types" "go.opentelemetry.io/otel/codes" "go.opentelemetry.io/otel/trace" "go.uber.org/zap" - specqbft "github.com/ssvlabs/ssv-spec/qbft" - spectypes "github.com/ssvlabs/ssv-spec/types" - "github.com/ssvlabs/ssv/ssvsigner/ekm" "github.com/ssvlabs/ssv/networkconfig" @@ -22,23 +21,37 @@ import ( "github.com/ssvlabs/ssv/observability/traces" "github.com/ssvlabs/ssv/protocol/v2/blockchain/beacon" "github.com/ssvlabs/ssv/protocol/v2/qbft/controller" + "github.com/ssvlabs/ssv/protocol/v2/qbft/instance" "github.com/ssvlabs/ssv/protocol/v2/ssv" ssvtypes "github.com/ssvlabs/ssv/protocol/v2/types" ) type Getters interface { - GetBaseRunner() *BaseRunner + HasRunningQBFTInstance() bool + HasAcceptedProposalForCurrentRound() bool + GetShares() map[phase0.ValidatorIndex]*spectypes.Share + GetRole() spectypes.RunnerRole + GetLastHeight() specqbft.Height + GetLastRound() specqbft.Round + GetStateRoot() ([32]byte, error) GetBeaconNode() beacon.BeaconNode GetValCheckF() specqbft.ProposedValueCheckF GetSigner() ekm.BeaconSigner GetOperatorSigner() ssvtypes.OperatorSigner GetNetwork() specqbft.Network + GetNetworkConfig() *networkconfig.Network +} + +type Setters interface { + SetTimeoutFunc(TimeoutF) } type Runner interface { spectypes.Encoder spectypes.Root + Getters + Setters // StartNewDuty starts a new duty for the runner, returns error if can't StartNewDuty(ctx context.Context, logger *zap.Logger, duty spectypes.Duty, quorum uint64) error @@ -50,6 +63,9 @@ type Runner interface { ProcessConsensus(ctx context.Context, logger *zap.Logger, msg *spectypes.SignedSSVMessage) error // ProcessPostConsensus processes all post-consensus msgs, returns error if can't process ProcessPostConsensus(ctx context.Context, logger *zap.Logger, signedMsg *spectypes.PartialSignatureMessages) error + // OnTimeoutQBFT processes timeout event that can arrive during QBFT consensus phase + OnTimeoutQBFT(ctx context.Context, logger *zap.Logger, msg ssvtypes.EventMsg) error + // expectedPreConsensusRootsAndDomain an INTERNAL function, returns the expected pre-consensus roots to sign expectedPreConsensusRootsAndDomain() ([]ssz.HashRoot, phase0.DomainType, error) // expectedPostConsensusRootsAndDomain an INTERNAL function, returns the expected post-consensus roots to sign @@ -81,6 +97,62 @@ type BaseRunner struct { highestDecidedSlot phase0.Slot } +func (b *BaseRunner) HasRunningQBFTInstance() bool { + var runningInstance *instance.Instance + if b.hasRunningDuty() { + runningInstance = b.State.RunningInstance + if runningInstance != nil { + decided, _ := runningInstance.IsDecided() + return !decided + } + } + return false +} + +func (b *BaseRunner) HasAcceptedProposalForCurrentRound() bool { + var runningInstance *instance.Instance + if b.hasRunningDuty() { + runningInstance = b.State.RunningInstance + if runningInstance != nil { + return runningInstance.State.ProposalAcceptedForCurrentRound != nil + } + } + return false +} + +func (b *BaseRunner) GetShares() map[phase0.ValidatorIndex]*spectypes.Share { + return b.Share +} + +func (b *BaseRunner) GetRole() spectypes.RunnerRole { + return b.RunnerRoleType +} + +func (b *BaseRunner) GetLastHeight() specqbft.Height { + if ctrl := b.QBFTController; ctrl != nil { + return ctrl.Height + } + return specqbft.Height(0) +} + +func (b *BaseRunner) GetLastRound() specqbft.Round { + if b.hasRunningDuty() { + inst := b.State.RunningInstance + if inst != nil { + return inst.State.Round + } + } + return specqbft.Round(1) +} + +func (b *BaseRunner) GetStateRoot() ([32]byte, error) { + return b.State.GetRoot() +} + +func (b *BaseRunner) SetTimeoutFunc(fn TimeoutF) { + b.TimeoutF = fn +} + func (b *BaseRunner) Encode() ([]byte, error) { return json.Marshal(b) } @@ -179,7 +251,7 @@ func (b *BaseRunner) basePreConsensusMsgProcessing( } // baseConsensusMsgProcessing is a base func that all runner implementation can call for processing a consensus msg -func (b *BaseRunner) baseConsensusMsgProcessing(ctx context.Context, logger *zap.Logger, runner Runner, msg *spectypes.SignedSSVMessage, decidedValue spectypes.Encoder) (bool, spectypes.Encoder, error) { +func (b *BaseRunner) baseConsensusMsgProcessing(ctx context.Context, logger *zap.Logger, valueCheckFn specqbft.ProposedValueCheckF, msg *spectypes.SignedSSVMessage, decidedValue spectypes.Encoder) (bool, spectypes.Encoder, error) { prevDecided := false if b.hasRunningDuty() && b.State != nil && b.State.RunningInstance != nil { prevDecided, _ = b.State.RunningInstance.IsDecided() @@ -193,8 +265,9 @@ func (b *BaseRunner) baseConsensusMsgProcessing(ctx context.Context, logger *zap return false, nil, err } - // we allow all consensus msgs to be processed, once the process finishes we check if there is an actual running duty - // do not return error if no running duty + // we allow all consensus msgs to be processed, once the process finishes, we check if there is + // an actual running duty - we consider this messaged "processed" (we might or might not get another + // message, hopefully we are running the duty by that time to finish the processing below) if !b.hasRunningDuty() { logger.Debug("no running duty") return false, nil, nil @@ -208,16 +281,17 @@ func (b *BaseRunner) baseConsensusMsgProcessing(ctx context.Context, logger *zap return true, nil, errors.Wrap(err, "failed to parse decided value to ValidatorConsensusData") } - if err := b.validateDecidedConsensusData(runner, decidedValue); err != nil { + if err := b.validateDecidedConsensusData(valueCheckFn, decidedValue); err != nil { return true, nil, errors.Wrap(err, "decided ValidatorConsensusData invalid") } - runner.GetBaseRunner().State.DecidedValue, err = decidedValue.Encode() + decidedValueEncoded, err := decidedValue.Encode() if err != nil { return true, nil, errors.Wrap(err, "could not encode decided value") } - // update the highest decided slot + // update the decided and the highest decided slot + b.State.DecidedValue = decidedValueEncoded b.highestDecidedSlot = b.State.StartingDuty.DutySlot() return true, decidedValue, nil @@ -302,7 +376,7 @@ func (b *BaseRunner) decide(ctx context.Context, logger *zap.Logger, runner Runn ctx, span := tracer.Start(ctx, observability.InstrumentName(observabilityNamespace, "base_runner.decide"), trace.WithAttributes( - observability.RunnerRoleAttribute(runner.GetBaseRunner().RunnerRoleType), + observability.RunnerRoleAttribute(runner.GetRole()), observability.BeaconSlotAttribute(slot))) defer span.End() @@ -316,7 +390,7 @@ func (b *BaseRunner) decide(ctx context.Context, logger *zap.Logger, runner Runn } span.AddEvent("start new instance") - if err := runner.GetBaseRunner().QBFTController.StartNewInstance( + if err := b.QBFTController.StartNewInstance( ctx, logger, specqbft.Height(slot), @@ -324,16 +398,15 @@ func (b *BaseRunner) decide(ctx context.Context, logger *zap.Logger, runner Runn ); err != nil { return traces.Errorf(span, "could not start new QBFT instance: %w", err) } - - newInstance := runner.GetBaseRunner().QBFTController.StoredInstances.FindInstance(runner.GetBaseRunner().QBFTController.Height) + newInstance := b.QBFTController.StoredInstances.FindInstance(b.QBFTController.Height) if newInstance == nil { return traces.Errorf(span, "could not find newly created QBFT instance") } - runner.GetBaseRunner().State.RunningInstance = newInstance + b.State.RunningInstance = newInstance span.AddEvent("register timeout handler") - b.registerTimeoutHandler(ctx, logger, newInstance, runner.GetBaseRunner().QBFTController.Height) + b.registerTimeoutHandler(ctx, logger, newInstance, b.QBFTController.Height) span.SetStatus(codes.Ok, "") return nil @@ -366,3 +439,7 @@ func (b *BaseRunner) ShouldProcessNonBeaconDuty(duty spectypes.Duty) error { } return nil } + +func (b *BaseRunner) OnTimeoutQBFT(ctx context.Context, logger *zap.Logger, msg ssvtypes.EventMsg) error { + return b.QBFTController.OnTimeout(ctx, logger, msg) +} diff --git a/protocol/v2/ssv/runner/runner_signatures.go b/protocol/v2/ssv/runner/runner_signatures.go index 9e21b8ff4c..d77dfebe96 100644 --- a/protocol/v2/ssv/runner/runner_signatures.go +++ b/protocol/v2/ssv/runner/runner_signatures.go @@ -15,27 +15,40 @@ import ( "github.com/ssvlabs/ssv/protocol/v2/types" ) -func (b *BaseRunner) signBeaconObject( +func signBeaconObject( ctx context.Context, runner Runner, duty *spectypes.ValidatorDuty, - obj ssz.HashRoot, + root ssz.HashRoot, slot spec.Slot, signatureDomain spec.DomainType, ) (*spectypes.PartialSignatureMessage, error) { - epoch := runner.GetBaseRunner().NetworkConfig.EstimatedEpochAtSlot(slot) + epoch := runner.GetNetworkConfig().EstimatedEpochAtSlot(slot) domain, err := runner.GetBeaconNode().DomainData(ctx, epoch, signatureDomain) if err != nil { - return nil, errors.Wrap(err, "could not get beacon domain") + return nil, fmt.Errorf("failed to fetch beacon domain: %w", err) } - if _, ok := runner.GetBaseRunner().Share[duty.ValidatorIndex]; !ok { - return nil, fmt.Errorf("unknown validator index %d", duty.ValidatorIndex) + return signAsValidator(ctx, runner, duty.ValidatorIndex, root, slot, signatureDomain, domain) +} + +func signAsValidator( + ctx context.Context, + runner Runner, + validatorIndex spec.ValidatorIndex, + root ssz.HashRoot, + slot spec.Slot, + signatureDomain spec.DomainType, + domain spec.Domain, +) (*spectypes.PartialSignatureMessage, error) { + share, ok := runner.GetShares()[validatorIndex] + if !ok { + return nil, fmt.Errorf("unknown validator index %d", validatorIndex) } sig, r, err := runner.GetSigner().SignBeaconObject( ctx, - obj, + root, domain, - spec.BLSPubKey(runner.GetBaseRunner().Share[duty.ValidatorIndex].SharePubKey), + spec.BLSPubKey(share.SharePubKey), slot, signatureDomain, ) @@ -47,23 +60,10 @@ func (b *BaseRunner) signBeaconObject( PartialSignature: sig, SigningRoot: r, Signer: runner.GetOperatorSigner().GetOperatorID(), - ValidatorIndex: duty.ValidatorIndex, + ValidatorIndex: validatorIndex, }, nil } -//func (b *BaseRunner) signPostConsensusMsg(runner Runner, msg *spectypes.PartialSignatureMessages) (*spectypes.SignedPartialSignatureMessage, error) { -// signature, err := runner.GetSigner().SignBeaconObject(msg, spectypes.PartialSignatureType, b.Share.SharePubKey) -// if err != nil { -// return nil, errors.Wrap(err, "could not sign PartialSignatureMessage for PostConsensusContainer") -// } -// -// return &spectypes.SignedPartialSignatureMessage{ -// Message: *msg, -// Signature: signature, -// Signer: b.Share.OperatorID, -// }, nil -//} - // Validate message content without verifying signatures func (b *BaseRunner) validatePartialSigMsgForSlot( psigMsgs *spectypes.PartialSignatureMessages, diff --git a/protocol/v2/ssv/runner/runner_validations.go b/protocol/v2/ssv/runner/runner_validations.go index 21250c034b..ff832083fe 100644 --- a/protocol/v2/ssv/runner/runner_validations.go +++ b/protocol/v2/ssv/runner/runner_validations.go @@ -8,6 +8,7 @@ import ( spec "github.com/attestantio/go-eth2-client/spec/phase0" ssz "github.com/ferranbt/fastssz" "github.com/pkg/errors" + specqbft "github.com/ssvlabs/ssv-spec/qbft" spectypes "github.com/ssvlabs/ssv-spec/types" @@ -97,12 +98,12 @@ func (b *BaseRunner) ValidatePostConsensusMsg(ctx context.Context, runner Runner } } -func (b *BaseRunner) validateDecidedConsensusData(runner Runner, val spectypes.Encoder) error { +func (b *BaseRunner) validateDecidedConsensusData(valueCheckFn specqbft.ProposedValueCheckF, val spectypes.Encoder) error { byts, err := val.Encode() if err != nil { return errors.Wrap(err, "could not encode decided value") } - if err := runner.GetValCheckF()(byts); err != nil { + if err := valueCheckFn(byts); err != nil { return errors.Wrap(err, "decided value is invalid") } diff --git a/protocol/v2/ssv/runner/sync_committee_contribution.go b/protocol/v2/ssv/runner/sync_committee_contribution.go index 811992458b..f8fd3c1cdf 100644 --- a/protocol/v2/ssv/runner/sync_committee_contribution.go +++ b/protocol/v2/ssv/runner/sync_committee_contribution.go @@ -11,12 +11,13 @@ import ( "github.com/attestantio/go-eth2-client/spec/phase0" ssz "github.com/ferranbt/fastssz" "github.com/pkg/errors" + specqbft "github.com/ssvlabs/ssv-spec/qbft" + spectypes "github.com/ssvlabs/ssv-spec/types" "go.opentelemetry.io/otel/codes" "go.opentelemetry.io/otel/trace" "go.uber.org/zap" - specqbft "github.com/ssvlabs/ssv-spec/qbft" - spectypes "github.com/ssvlabs/ssv-spec/types" + "github.com/ssvlabs/ssv/ssvsigner/ekm" "github.com/ssvlabs/ssv/networkconfig" "github.com/ssvlabs/ssv/observability" @@ -25,7 +26,6 @@ import ( "github.com/ssvlabs/ssv/protocol/v2/blockchain/beacon" "github.com/ssvlabs/ssv/protocol/v2/qbft/controller" ssvtypes "github.com/ssvlabs/ssv/protocol/v2/types" - "github.com/ssvlabs/ssv/ssvsigner/ekm" ) type SyncCommitteeAggregatorRunner struct { @@ -193,7 +193,7 @@ func (r *SyncCommitteeAggregatorRunner) ProcessConsensus(ctx context.Context, lo defer span.End() span.AddEvent("checking if instance is decided") - decided, decidedValue, err := r.BaseRunner.baseConsensusMsgProcessing(ctx, logger, r, signedMsg, &spectypes.ValidatorConsensusData{}) + decided, decidedValue, err := r.BaseRunner.baseConsensusMsgProcessing(ctx, logger, r.GetValCheckF(), signedMsg, &spectypes.ValidatorConsensusData{}) if err != nil { return traces.Errorf(span, "failed processing consensus message: %w", err) } @@ -230,7 +230,7 @@ func (r *SyncCommitteeAggregatorRunner) ProcessConsensus(ctx context.Context, lo return traces.Errorf(span, "could not generate contribution and proof: %w", err) } - signed, err := r.BaseRunner.signBeaconObject( + signed, err := signBeaconObject( ctx, r, r.BaseRunner.State.StartingDuty.(*spectypes.ValidatorDuty), @@ -285,6 +285,10 @@ func (r *SyncCommitteeAggregatorRunner) ProcessConsensus(ctx context.Context, lo return nil } +func (r *SyncCommitteeAggregatorRunner) OnTimeoutQBFT(ctx context.Context, logger *zap.Logger, msg ssvtypes.EventMsg) error { + return r.BaseRunner.OnTimeoutQBFT(ctx, logger, msg) +} + func (r *SyncCommitteeAggregatorRunner) ProcessPostConsensus(ctx context.Context, logger *zap.Logger, signedMsg *spectypes.PartialSignatureMessages) error { ctx, span := tracer.Start(ctx, observability.InstrumentName(observabilityNamespace, "runner.process_post_consensus"), @@ -483,7 +487,7 @@ func (r *SyncCommitteeAggregatorRunner) executeDuty(ctx context.Context, logger SubcommitteeIndex: subnet, } span.AddEvent("signing beacon object") - msg, err := r.BaseRunner.signBeaconObject( + msg, err := signBeaconObject( ctx, r, duty.(*spectypes.ValidatorDuty), @@ -531,14 +535,46 @@ func (r *SyncCommitteeAggregatorRunner) executeDuty(ctx context.Context, logger return nil } -func (r *SyncCommitteeAggregatorRunner) GetBaseRunner() *BaseRunner { - return r.BaseRunner +func (r *SyncCommitteeAggregatorRunner) HasRunningQBFTInstance() bool { + return r.BaseRunner.HasRunningQBFTInstance() +} + +func (r *SyncCommitteeAggregatorRunner) HasAcceptedProposalForCurrentRound() bool { + return r.BaseRunner.HasAcceptedProposalForCurrentRound() +} + +func (r *SyncCommitteeAggregatorRunner) GetShares() map[phase0.ValidatorIndex]*spectypes.Share { + return r.BaseRunner.GetShares() +} + +func (r *SyncCommitteeAggregatorRunner) GetRole() spectypes.RunnerRole { + return r.BaseRunner.GetRole() +} + +func (r *SyncCommitteeAggregatorRunner) GetLastHeight() specqbft.Height { + return r.BaseRunner.GetLastHeight() +} + +func (r *SyncCommitteeAggregatorRunner) GetLastRound() specqbft.Round { + return r.BaseRunner.GetLastRound() +} + +func (r *SyncCommitteeAggregatorRunner) GetStateRoot() ([32]byte, error) { + return r.BaseRunner.GetStateRoot() +} + +func (r *SyncCommitteeAggregatorRunner) SetTimeoutFunc(fn TimeoutF) { + r.BaseRunner.SetTimeoutFunc(fn) } func (r *SyncCommitteeAggregatorRunner) GetNetwork() specqbft.Network { return r.network } +func (r *SyncCommitteeAggregatorRunner) GetNetworkConfig() *networkconfig.Network { + return r.BaseRunner.NetworkConfig +} + func (r *SyncCommitteeAggregatorRunner) GetBeaconNode() beacon.BeaconNode { return r.beacon } diff --git a/protocol/v2/ssv/runner/validator_registration.go b/protocol/v2/ssv/runner/validator_registration.go index ed7bf73ce5..b1f9274bfc 100644 --- a/protocol/v2/ssv/runner/validator_registration.go +++ b/protocol/v2/ssv/runner/validator_registration.go @@ -166,6 +166,10 @@ func (r *ValidatorRegistrationRunner) ProcessConsensus(ctx context.Context, logg return fmt.Errorf("no consensus phase for validator registration") } +func (r *ValidatorRegistrationRunner) OnTimeoutQBFT(ctx context.Context, logger *zap.Logger, msg ssvtypes.EventMsg) error { + return r.BaseRunner.OnTimeoutQBFT(ctx, logger, msg) +} + func (r *ValidatorRegistrationRunner) ProcessPostConsensus(ctx context.Context, logger *zap.Logger, signedMsg *spectypes.PartialSignatureMessages) error { return fmt.Errorf("no post consensus phase for validator registration") } @@ -201,7 +205,7 @@ func (r *ValidatorRegistrationRunner) executeDuty(ctx context.Context, logger *z // sign partial randao span.AddEvent("signing beacon object") - msg, err := r.BaseRunner.signBeaconObject( + msg, err := signBeaconObject( ctx, r, duty.(*spectypes.ValidatorDuty), @@ -281,14 +285,46 @@ func (r *ValidatorRegistrationRunner) buildValidatorRegistration(slot phase0.Slo }, nil } -func (r *ValidatorRegistrationRunner) GetBaseRunner() *BaseRunner { - return r.BaseRunner +func (r *ValidatorRegistrationRunner) HasRunningQBFTInstance() bool { + return r.BaseRunner.HasRunningQBFTInstance() +} + +func (r *ValidatorRegistrationRunner) HasAcceptedProposalForCurrentRound() bool { + return r.BaseRunner.HasAcceptedProposalForCurrentRound() +} + +func (r *ValidatorRegistrationRunner) GetShares() map[phase0.ValidatorIndex]*spectypes.Share { + return r.BaseRunner.GetShares() +} + +func (r *ValidatorRegistrationRunner) GetRole() spectypes.RunnerRole { + return r.BaseRunner.GetRole() +} + +func (r *ValidatorRegistrationRunner) GetLastHeight() specqbft.Height { + return r.BaseRunner.GetLastHeight() +} + +func (r *ValidatorRegistrationRunner) GetLastRound() specqbft.Round { + return r.BaseRunner.GetLastRound() +} + +func (r *ValidatorRegistrationRunner) GetStateRoot() ([32]byte, error) { + return r.BaseRunner.GetStateRoot() +} + +func (r *ValidatorRegistrationRunner) SetTimeoutFunc(fn TimeoutF) { + r.BaseRunner.SetTimeoutFunc(fn) } func (r *ValidatorRegistrationRunner) GetNetwork() specqbft.Network { return r.network } +func (r *ValidatorRegistrationRunner) GetNetworkConfig() *networkconfig.Network { + return r.BaseRunner.NetworkConfig +} + func (r *ValidatorRegistrationRunner) GetBeaconNode() beacon.BeaconNode { return r.beacon } diff --git a/protocol/v2/ssv/runner/voluntary_exit.go b/protocol/v2/ssv/runner/voluntary_exit.go index 0b43742e36..6d385f86fd 100644 --- a/protocol/v2/ssv/runner/voluntary_exit.go +++ b/protocol/v2/ssv/runner/voluntary_exit.go @@ -9,12 +9,13 @@ import ( "github.com/attestantio/go-eth2-client/spec/phase0" ssz "github.com/ferranbt/fastssz" "github.com/pkg/errors" + specqbft "github.com/ssvlabs/ssv-spec/qbft" + spectypes "github.com/ssvlabs/ssv-spec/types" "go.opentelemetry.io/otel/codes" "go.opentelemetry.io/otel/trace" "go.uber.org/zap" - specqbft "github.com/ssvlabs/ssv-spec/qbft" - spectypes "github.com/ssvlabs/ssv-spec/types" + "github.com/ssvlabs/ssv/ssvsigner/ekm" "github.com/ssvlabs/ssv/networkconfig" "github.com/ssvlabs/ssv/observability" @@ -22,7 +23,6 @@ import ( "github.com/ssvlabs/ssv/observability/traces" "github.com/ssvlabs/ssv/protocol/v2/blockchain/beacon" ssvtypes "github.com/ssvlabs/ssv/protocol/v2/types" - "github.com/ssvlabs/ssv/ssvsigner/ekm" ) // VoluntaryExitRunner implements validator voluntary exit duty - this duty doesn't @@ -145,6 +145,10 @@ func (r *VoluntaryExitRunner) ProcessConsensus(ctx context.Context, logger *zap. return errors.New("no consensus phase for voluntary exit") } +func (r *VoluntaryExitRunner) OnTimeoutQBFT(ctx context.Context, logger *zap.Logger, msg ssvtypes.EventMsg) error { + return r.BaseRunner.OnTimeoutQBFT(ctx, logger, msg) +} + func (r *VoluntaryExitRunner) ProcessPostConsensus(ctx context.Context, logger *zap.Logger, signedMsg *spectypes.PartialSignatureMessages) error { return errors.New("no post consensus phase for voluntary exit") } @@ -177,7 +181,7 @@ func (r *VoluntaryExitRunner) executeDuty(ctx context.Context, logger *zap.Logge // get PartialSignatureMessage with voluntaryExit root and signature span.AddEvent("signing beacon object") - msg, err := r.BaseRunner.signBeaconObject( + msg, err := signBeaconObject( ctx, r, duty.(*spectypes.ValidatorDuty), @@ -241,14 +245,46 @@ func (r *VoluntaryExitRunner) calculateVoluntaryExit() (*phase0.VoluntaryExit, e }, nil } -func (r *VoluntaryExitRunner) GetBaseRunner() *BaseRunner { - return r.BaseRunner +func (r *VoluntaryExitRunner) HasRunningQBFTInstance() bool { + return r.BaseRunner.HasRunningQBFTInstance() +} + +func (r *VoluntaryExitRunner) HasAcceptedProposalForCurrentRound() bool { + return r.BaseRunner.HasAcceptedProposalForCurrentRound() +} + +func (r *VoluntaryExitRunner) GetShares() map[phase0.ValidatorIndex]*spectypes.Share { + return r.BaseRunner.GetShares() +} + +func (r *VoluntaryExitRunner) GetRole() spectypes.RunnerRole { + return r.BaseRunner.GetRole() +} + +func (r *VoluntaryExitRunner) GetLastHeight() specqbft.Height { + return r.BaseRunner.GetLastHeight() +} + +func (r *VoluntaryExitRunner) GetLastRound() specqbft.Round { + return r.BaseRunner.GetLastRound() +} + +func (r *VoluntaryExitRunner) GetStateRoot() ([32]byte, error) { + return r.BaseRunner.GetStateRoot() +} + +func (r *VoluntaryExitRunner) SetTimeoutFunc(fn TimeoutF) { + r.BaseRunner.SetTimeoutFunc(fn) } func (r *VoluntaryExitRunner) GetNetwork() specqbft.Network { return r.network } +func (r *VoluntaryExitRunner) GetNetworkConfig() *networkconfig.Network { + return r.BaseRunner.NetworkConfig +} + func (r *VoluntaryExitRunner) GetBeaconNode() beacon.BeaconNode { return r.beacon } diff --git a/protocol/v2/ssv/spectest/msg_processing_type.go b/protocol/v2/ssv/spectest/msg_processing_type.go index 0cc5fa4e43..5c020dc121 100644 --- a/protocol/v2/ssv/spectest/msg_processing_type.go +++ b/protocol/v2/ssv/spectest/msg_processing_type.go @@ -4,20 +4,17 @@ import ( "context" "encoding/hex" "encoding/json" - "path/filepath" "reflect" "strings" "testing" "github.com/attestantio/go-eth2-client/spec/phase0" "github.com/pkg/errors" - "github.com/stretchr/testify/require" - "go.uber.org/zap" - specqbft "github.com/ssvlabs/ssv-spec/qbft" spectypes "github.com/ssvlabs/ssv-spec/types" spectestingutils "github.com/ssvlabs/ssv-spec/types/testingutils" - typescomparable "github.com/ssvlabs/ssv-spec/types/testingutils/comparable" + "github.com/stretchr/testify/require" + "go.uber.org/zap" "github.com/ssvlabs/ssv/ssvsigner/ekm" @@ -64,15 +61,15 @@ func RunMsgProcessing(t *testing.T, test *MsgProcessingSpecTest) { func (test *MsgProcessingSpecTest) runPreTesting(ctx context.Context, logger *zap.Logger) (*validator.Validator, *validator.Committee, error) { var share *spectypes.Share ketSetMap := make(map[phase0.ValidatorIndex]*spectestingutils.TestKeySet) - if len(test.Runner.GetBaseRunner().Share) == 0 { + if len(test.Runner.GetShares()) == 0 { panic("No share in base runner for tests") } - for _, validatorShare := range test.Runner.GetBaseRunner().Share { + for _, validatorShare := range test.Runner.GetShares() { share = validatorShare break } - for valIdx, validatorShare := range test.Runner.GetBaseRunner().Share { + for valIdx, validatorShare := range test.Runner.GetShares() { ketSetMap[valIdx] = spectestingutils.KeySetForShare(validatorShare) } @@ -121,14 +118,14 @@ func (test *MsgProcessingSpecTest) runPreTesting(ctx context.Context, logger *za lastErr = err } if test.DecidedSlashable && IsQBFTProposalMessage(msg) { - for _, validatorShare := range test.Runner.GetBaseRunner().Share { + for _, validatorShare := range test.Runner.GetShares() { test.Runner.GetSigner().(*ekm.TestingKeyManagerAdapter).AddSlashableSlot(validatorShare.SharePubKey, spectestingutils.TestingDutySlot) } } } default: v = ssvprotocoltesting.BaseValidator(logger, spectestingutils.KeySetForShare(share)) - v.DutyRunners[test.Runner.GetBaseRunner().RunnerRoleType] = test.Runner + v.DutyRunners[test.Runner.GetRole()] = test.Runner v.Network = test.Runner.GetNetwork() if !test.DontStartDuty { @@ -202,31 +199,8 @@ func (test *MsgProcessingSpecTest) overrideStateComparison(t *testing.T) { } func overrideStateComparison(t *testing.T, test *MsgProcessingSpecTest, name string, testType string) { - var r runner.Runner - switch test.Runner.(type) { - case *runner.CommitteeRunner: - r = &runner.CommitteeRunner{} - case *runner.AggregatorRunner: - r = &runner.AggregatorRunner{} - case *runner.ProposerRunner: - r = &runner.ProposerRunner{} - case *runner.SyncCommitteeAggregatorRunner: - r = &runner.SyncCommitteeAggregatorRunner{} - case *runner.ValidatorRegistrationRunner: - r = &runner.ValidatorRegistrationRunner{} - case *runner.VoluntaryExitRunner: - r = &runner.VoluntaryExitRunner{} - default: - t.Fatalf("unknown runner type") - } - specDir, err := protocoltesting.GetSpecDir("", filepath.Join("ssv", "spectest")) - require.NoError(t, err) - r, err = typescomparable.UnmarshalStateComparison(specDir, name, testType, r) - require.NoError(t, err) - - r.GetBaseRunner().NetworkConfig = networkconfig.TestNetwork + r := runnerForTest(t, test.Runner, name, testType) - // override test.PostDutyRunnerState = r root, err := r.GetRoot() @@ -281,7 +255,7 @@ var baseCommitteeWithRunnerSample = func( ctx, cancel, logger, - runnerSample.GetBaseRunner().NetworkConfig, + runnerSample.BaseRunner.NetworkConfig, spectestingutils.TestingCommitteeMember(keySetSample), createRunnerF, shareMap, diff --git a/protocol/v2/ssv/spectest/multi_start_new_runner_duty_type.go b/protocol/v2/ssv/spectest/multi_start_new_runner_duty_type.go index 95e42d33ed..2786274a64 100644 --- a/protocol/v2/ssv/spectest/multi_start_new_runner_duty_type.go +++ b/protocol/v2/ssv/spectest/multi_start_new_runner_duty_type.go @@ -14,11 +14,8 @@ import ( spectypes "github.com/ssvlabs/ssv-spec/types" spectestingutils "github.com/ssvlabs/ssv-spec/types/testingutils" - typescomparable "github.com/ssvlabs/ssv-spec/types/testingutils/comparable" - "github.com/ssvlabs/ssv/networkconfig" "github.com/ssvlabs/ssv/protocol/v2/ssv/runner" - protocoltesting "github.com/ssvlabs/ssv/protocol/v2/testing" ) type StartNewRunnerDutySpecTest struct { @@ -143,31 +140,8 @@ func (tests *MultiStartNewRunnerDutySpecTest) overrideStateComparison(t *testing } func overrideStateComparisonForStartNewRunnerDutySpecTest(t *testing.T, test *StartNewRunnerDutySpecTest, name string, testType string) { - var r runner.Runner - switch test.Runner.(type) { - case *runner.CommitteeRunner: - r = &runner.CommitteeRunner{} - case *runner.AggregatorRunner: - r = &runner.AggregatorRunner{} - case *runner.ProposerRunner: - r = &runner.ProposerRunner{} - case *runner.SyncCommitteeAggregatorRunner: - r = &runner.SyncCommitteeAggregatorRunner{} - case *runner.ValidatorRegistrationRunner: - r = &runner.ValidatorRegistrationRunner{} - case *runner.VoluntaryExitRunner: - r = &runner.VoluntaryExitRunner{} - default: - t.Fatalf("unknown runner type") - } - specDir, err := protocoltesting.GetSpecDir("", filepath.Join("ssv", "spectest")) - require.NoError(t, err) - r, err = typescomparable.UnmarshalStateComparison(specDir, name, testType, r) - require.NoError(t, err) - - r.GetBaseRunner().NetworkConfig = networkconfig.TestNetwork + r := runnerForTest(t, test.Runner, name, testType) - // override test.PostDutyRunnerState = r root, err := r.GetRoot() diff --git a/protocol/v2/ssv/spectest/ssv_mapping_test.go b/protocol/v2/ssv/spectest/ssv_mapping_test.go index 1240607674..a80a02f040 100644 --- a/protocol/v2/ssv/spectest/ssv_mapping_test.go +++ b/protocol/v2/ssv/spectest/ssv_mapping_test.go @@ -360,29 +360,27 @@ func msgProcessingSpecTestFromMap(t *testing.T, m map[string]interface{}) *MsgPr } func fixRunnerForRun(t *testing.T, runnerMap map[string]interface{}, ks *spectestingutils.TestKeySet) runner.Runner { + logger := log.TestLogger(t) + baseRunnerMap := runnerMap["BaseRunner"].(map[string]interface{}) - base := &runner.BaseRunner{} + baseRunner := &runner.BaseRunner{} byts, _ := json.Marshal(baseRunnerMap) - require.NoError(t, json.Unmarshal(byts, &base)) - base.NetworkConfig = networkconfig.TestNetwork + require.NoError(t, json.Unmarshal(byts, &baseRunner)) + baseRunner.NetworkConfig = networkconfig.TestNetwork - logger := log.TestLogger(t) + ret := createRunnerWithBaseRunner(logger, baseRunner.RunnerRoleType, baseRunner, ks) - ret := baseRunnerForRole(logger, base.RunnerRoleType, base, ks) - - if ret.GetBaseRunner().QBFTController != nil { - ret.GetBaseRunner().QBFTController = fixControllerForRun(t, logger, ret, ret.GetBaseRunner().QBFTController, ks) - if ret.GetBaseRunner().State != nil { - if ret.GetBaseRunner().State.RunningInstance != nil { + if baseRunner.QBFTController != nil { + baseRunner.QBFTController = fixControllerForRun(t, logger, ret, baseRunner.QBFTController, ks) + if baseRunner.State != nil { + if baseRunner.State.RunningInstance != nil { operator := spectestingutils.TestingCommitteeMember(ks) - ret.GetBaseRunner().State.RunningInstance = fixInstanceForRun(t, ks, ret.GetBaseRunner().State.RunningInstance, ret.GetBaseRunner().QBFTController, operator) + baseRunner.State.RunningInstance = fixInstanceForRun(t, ks, baseRunner.State.RunningInstance, baseRunner.QBFTController, operator) } } } - ret.GetBaseRunner().NetworkConfig = networkconfig.TestNetwork - return ret } @@ -436,7 +434,7 @@ func fixInstanceForRun(t *testing.T, ks *spectestingutils.TestKeySet, inst *inst return newInst } -func baseRunnerForRole(logger *zap.Logger, role spectypes.RunnerRole, base *runner.BaseRunner, ks *spectestingutils.TestKeySet) runner.Runner { +func createRunnerWithBaseRunner(logger *zap.Logger, role spectypes.RunnerRole, base *runner.BaseRunner, ks *spectestingutils.TestKeySet) runner.Runner { switch role { case spectypes.RoleCommittee: ret := ssvtesting.CommitteeRunner(logger, ks) diff --git a/protocol/v2/ssv/spectest/sync_committee_aggregator_proof_type.go b/protocol/v2/ssv/spectest/sync_committee_aggregator_proof_type.go index 60d0192524..e57c1a1c88 100644 --- a/protocol/v2/ssv/spectest/sync_committee_aggregator_proof_type.go +++ b/protocol/v2/ssv/spectest/sync_committee_aggregator_proof_type.go @@ -50,7 +50,7 @@ func RunSyncCommitteeAggProof(t *testing.T, test *synccommitteeaggregator.SyncCo } // post root - postRoot, err := r.GetBaseRunner().State.GetRoot() + postRoot, err := r.GetStateRoot() require.NoError(t, err) require.EqualValues(t, test.PostDutyRunnerStateRoot, hex.EncodeToString(postRoot[:])) } diff --git a/protocol/v2/ssv/spectest/util.go b/protocol/v2/ssv/spectest/util.go new file mode 100644 index 0000000000..b75bd4fd4f --- /dev/null +++ b/protocol/v2/ssv/spectest/util.go @@ -0,0 +1,58 @@ +package spectest + +import ( + "path/filepath" + "testing" + + typescomparable "github.com/ssvlabs/ssv-spec/types/testingutils/comparable" + "github.com/stretchr/testify/require" + + "github.com/ssvlabs/ssv/networkconfig" + "github.com/ssvlabs/ssv/protocol/v2/ssv/runner" + protocoltesting "github.com/ssvlabs/ssv/protocol/v2/testing" +) + +func runnerForTest(t *testing.T, runnerType runner.Runner, name string, testType string) runner.Runner { + var r runner.Runner + + switch runnerType.(type) { + case *runner.CommitteeRunner: + r = &runner.CommitteeRunner{} + case *runner.AggregatorRunner: + r = &runner.AggregatorRunner{} + case *runner.ProposerRunner: + r = &runner.ProposerRunner{} + case *runner.SyncCommitteeAggregatorRunner: + r = &runner.SyncCommitteeAggregatorRunner{} + case *runner.ValidatorRegistrationRunner: + r = &runner.ValidatorRegistrationRunner{} + case *runner.VoluntaryExitRunner: + r = &runner.VoluntaryExitRunner{} + default: + t.Fatalf("unknown runner type") + } + specDir, err := protocoltesting.GetSpecDir("", filepath.Join("ssv", "spectest")) + require.NoError(t, err) + r, err = typescomparable.UnmarshalStateComparison(specDir, name, testType, r) + require.NoError(t, err) + + // override base-runner NetworkConfig now + switch runnerType.(type) { + case *runner.CommitteeRunner: + r.(*runner.CommitteeRunner).BaseRunner.NetworkConfig = networkconfig.TestNetwork + case *runner.AggregatorRunner: + r.(*runner.AggregatorRunner).BaseRunner.NetworkConfig = networkconfig.TestNetwork + case *runner.ProposerRunner: + r.(*runner.ProposerRunner).BaseRunner.NetworkConfig = networkconfig.TestNetwork + case *runner.SyncCommitteeAggregatorRunner: + r.(*runner.SyncCommitteeAggregatorRunner).BaseRunner.NetworkConfig = networkconfig.TestNetwork + case *runner.ValidatorRegistrationRunner: + r.(*runner.ValidatorRegistrationRunner).BaseRunner.NetworkConfig = networkconfig.TestNetwork + case *runner.VoluntaryExitRunner: + r.(*runner.VoluntaryExitRunner).BaseRunner.NetworkConfig = networkconfig.TestNetwork + default: + t.Fatalf("unknown runner type") + } + + return r +} diff --git a/protocol/v2/ssv/validator/committee.go b/protocol/v2/ssv/validator/committee.go index 2997e9a77a..cb0c622dbf 100644 --- a/protocol/v2/ssv/validator/committee.go +++ b/protocol/v2/ssv/validator/committee.go @@ -43,7 +43,7 @@ type Committee struct { // mtx syncs access to Queues, Runners, Shares. mtx sync.RWMutex - Queues map[phase0.Slot]QueueContainer + Queues map[phase0.Slot]queueContainer Runners map[phase0.Slot]*runner.CommitteeRunner Shares map[phase0.ValidatorIndex]*spectypes.Share @@ -77,7 +77,7 @@ func NewCommittee( networkConfig: networkConfig, ctx: ctx, cancel: cancel, - Queues: make(map[phase0.Slot]QueueContainer), + Queues: make(map[phase0.Slot]queueContainer), Runners: make(map[phase0.Slot]*runner.CommitteeRunner), Shares: shares, CommitteeMember: operator, @@ -156,13 +156,12 @@ func (c *Committee) prepareDutyAndRunner(ctx context.Context, logger *zap.Logger if err != nil { return nil, nil, traces.Errorf(span, "could not create CommitteeRunner: %w", err) } - - // Set timeout function. - r.GetBaseRunner().TimeoutF = c.onTimeout + r.SetTimeoutFunc(c.onTimeout) c.Runners[duty.Slot] = r + _, queueExists := c.Queues[duty.Slot] if !queueExists { - c.Queues[duty.Slot] = QueueContainer{ + c.Queues[duty.Slot] = queueContainer{ Q: queue.New(1000), // TODO alan: get queue opts from options queueState: &queue.State{ HasRunningInstance: false, diff --git a/protocol/v2/ssv/validator/non_committee_validator.go b/protocol/v2/ssv/validator/committee_observer.go similarity index 100% rename from protocol/v2/ssv/validator/non_committee_validator.go rename to protocol/v2/ssv/validator/committee_observer.go diff --git a/protocol/v2/ssv/validator/committee_queue.go b/protocol/v2/ssv/validator/committee_queue.go index 25da0b7319..573d149fec 100644 --- a/protocol/v2/ssv/validator/committee_queue.go +++ b/protocol/v2/ssv/validator/committee_queue.go @@ -15,12 +15,17 @@ import ( "github.com/ssvlabs/ssv/observability/log/fields" "github.com/ssvlabs/ssv/observability/traces" "github.com/ssvlabs/ssv/protocol/v2/message" - "github.com/ssvlabs/ssv/protocol/v2/qbft/instance" "github.com/ssvlabs/ssv/protocol/v2/ssv/queue" "github.com/ssvlabs/ssv/protocol/v2/ssv/runner" "github.com/ssvlabs/ssv/protocol/v2/types" ) +// queueContainer wraps a queue with its corresponding state +type queueContainer struct { + Q queue.Queue + queueState *queue.State +} + // EnqueueMessage enqueues a spectypes.SSVMessage for processing. // TODO: accept DecodedSSVMessage once p2p is upgraded to decode messages during validation. func (c *Committee) EnqueueMessage(ctx context.Context, msg *queue.SSVMessage) { @@ -60,7 +65,7 @@ func (c *Committee) EnqueueMessage(ctx context.Context, msg *queue.SSVMessage) { c.mtx.Lock() q, ok := c.Queues[slot] if !ok { - q = QueueContainer{ + q = queueContainer{ Q: queue.New(1000), // TODO alan: get queue opts from options queueState: &queue.State{ HasRunningInstance: false, @@ -121,29 +126,22 @@ func (c *Committee) StartConsumeQueue(ctx context.Context, logger *zap.Logger, d // it checks for current state func (c *Committee) ConsumeQueue( ctx context.Context, - q QueueContainer, + q queueContainer, logger *zap.Logger, handler MessageHandler, rnr *runner.CommitteeRunner, ) error { + // Construct a representation of the current state. state := *q.queueState logger.Debug("📬 queue consumer is running") lens := make([]int, 0, 10) for ctx.Err() == nil { - // Construct a representation of the current state. - var runningInstance *instance.Instance - if rnr.HasRunningDuty() { - runningInstance = rnr.GetBaseRunner().State.RunningInstance - if runningInstance != nil { - decided, _ := runningInstance.IsDecided() - state.HasRunningInstance = !decided - } - } + state.HasRunningInstance = rnr.HasRunningQBFTInstance() filter := queue.FilterAny - if runningInstance != nil && runningInstance.State.ProposalAcceptedForCurrentRound == nil { + if state.HasRunningInstance && !rnr.HasAcceptedProposalForCurrentRound() { // If no proposal was accepted for the current round, skip prepare & commit messages // for the current round. filter = func(m *queue.SSVMessage) bool { @@ -158,7 +156,7 @@ func (c *Committee) ConsumeQueue( return sm.MsgType != specqbft.PrepareMsgType && sm.MsgType != specqbft.CommitMsgType } - } else if runningInstance != nil && !runningInstance.State.Decided { + } else if state.HasRunningInstance { filter = func(ssvMessage *queue.SSVMessage) bool { // don't read post consensus until decided return ssvMessage.MsgType != spectypes.SSVPartialSignatureMsgType diff --git a/protocol/v2/ssv/validator/committee_queue_test.go b/protocol/v2/ssv/validator/committee_queue_test.go index 128622171d..a3c2d61b87 100644 --- a/protocol/v2/ssv/validator/committee_queue_test.go +++ b/protocol/v2/ssv/validator/committee_queue_test.go @@ -79,7 +79,7 @@ func makeTestSSVMessage(t *testing.T, msgType spectypes.MsgType, msgID spectypes } // runConsumeQueueAsync wraps ConsumeQueue execution in a goroutine. -func runConsumeQueueAsync(t *testing.T, ctx context.Context, committee *Committee, q QueueContainer, +func runConsumeQueueAsync(t *testing.T, ctx context.Context, committee *Committee, q queueContainer, logger *zap.Logger, handler func(context.Context, *queue.SSVMessage) error, committeeRunner *runner.CommitteeRunner) { t.Helper() @@ -180,7 +180,7 @@ func TestHandleMessageCreatesQueue(t *testing.T) { committee := &Committee{ logger: logger, ctx: ctx, - Queues: make(map[phase0.Slot]QueueContainer), + Queues: make(map[phase0.Slot]queueContainer), Runners: make(map[phase0.Slot]*runner.CommitteeRunner), networkConfig: networkconfig.TestNetwork, CommitteeMember: &spectypes.CommitteeMember{}, @@ -233,7 +233,7 @@ func TestConsumeQueueBasic(t *testing.T) { committee := &Committee{ logger: logger, ctx: ctx, - Queues: make(map[phase0.Slot]QueueContainer), + Queues: make(map[phase0.Slot]queueContainer), Runners: make(map[phase0.Slot]*runner.CommitteeRunner), networkConfig: networkconfig.TestNetwork, } @@ -256,7 +256,7 @@ func TestConsumeQueueBasic(t *testing.T) { } testMsg2 := makeTestSSVMessage(t, spectypes.SSVConsensusMsgType, msgID2, qbftMsg2) - q := QueueContainer{ + q := queueContainer{ Q: queue.New(1000), queueState: &queue.State{ HasRunningInstance: true, @@ -317,14 +317,14 @@ func TestStartConsumeQueue(t *testing.T) { committee := &Committee{ ctx: ctx, - Queues: make(map[phase0.Slot]QueueContainer), + Queues: make(map[phase0.Slot]queueContainer), Runners: make(map[phase0.Slot]*runner.CommitteeRunner), networkConfig: networkconfig.TestNetwork, } slot := phase0.Slot(123) - q := QueueContainer{ + q := queueContainer{ Q: queue.New(1000), queueState: &queue.State{ HasRunningInstance: false, @@ -379,7 +379,7 @@ func TestFilterNoProposalAccepted(t *testing.T) { committee := &Committee{ ctx: ctx, - Queues: make(map[phase0.Slot]QueueContainer), + Queues: make(map[phase0.Slot]queueContainer), Runners: make(map[phase0.Slot]*runner.CommitteeRunner), } @@ -422,7 +422,7 @@ func TestFilterNoProposalAccepted(t *testing.T) { combinedMessages[i], combinedMessages[j] = combinedMessages[j], combinedMessages[i] }) - q := QueueContainer{ + q := queueContainer{ Q: queue.New(1000), queueState: &queue.State{ HasRunningInstance: true, @@ -498,7 +498,7 @@ func TestFilterNotDecidedSkipsPartialSignatures(t *testing.T) { committee := &Committee{ ctx: ctx, - Queues: make(map[phase0.Slot]QueueContainer), + Queues: make(map[phase0.Slot]queueContainer), Runners: make(map[phase0.Slot]*runner.CommitteeRunner), } @@ -528,7 +528,7 @@ func TestFilterNotDecidedSkipsPartialSignatures(t *testing.T) { testMsg1 := makeTestSSVMessage(t, spectypes.SSVConsensusMsgType, msgID1, qbftMsg) testMsg2 := makeTestSSVMessage(t, spectypes.SSVPartialSignatureMsgType, msgID2, partialSigMsg) - q := QueueContainer{ + q := queueContainer{ Q: queue.New(1000), queueState: &queue.State{ HasRunningInstance: true, @@ -577,7 +577,7 @@ func TestFilterDecidedAllowsAll(t *testing.T) { committee := &Committee{ ctx: ctx, - Queues: make(map[phase0.Slot]QueueContainer), + Queues: make(map[phase0.Slot]queueContainer), Runners: make(map[phase0.Slot]*runner.CommitteeRunner), } @@ -607,7 +607,7 @@ func TestFilterDecidedAllowsAll(t *testing.T) { testMsg1 := makeTestSSVMessage(t, spectypes.SSVConsensusMsgType, msgID1, qbftMsg) testMsg2 := makeTestSSVMessage(t, spectypes.SSVPartialSignatureMsgType, msgID2, partialSigMsg) - q := QueueContainer{ + q := queueContainer{ Q: queue.New(1000), queueState: &queue.State{ HasRunningInstance: true, @@ -688,7 +688,7 @@ func TestChangingFilterState(t *testing.T) { return fmt.Errorf("intentionally stopping ConsumeQueue after first message") } - q := QueueContainer{ + q := queueContainer{ Q: queue.New(1), queueState: &queue.State{ HasRunningInstance: true, @@ -806,13 +806,13 @@ func TestCommitteeQueueFilteringScenarios(t *testing.T) { committee := &Committee{ ctx: ctx, - Queues: make(map[phase0.Slot]QueueContainer), + Queues: make(map[phase0.Slot]queueContainer), Runners: make(map[phase0.Slot]*runner.CommitteeRunner), } slot := phase0.Slot(123) - q := QueueContainer{ + q := queueContainer{ Q: queue.New(10), queueState: &queue.State{ HasRunningInstance: tc.hasRunningDuty, @@ -967,13 +967,13 @@ func TestFilterPartialSignatureMessages(t *testing.T) { committee := &Committee{ ctx: ctx, - Queues: make(map[phase0.Slot]QueueContainer), + Queues: make(map[phase0.Slot]queueContainer), Runners: make(map[phase0.Slot]*runner.CommitteeRunner), } slot := phase0.Slot(123) - q := QueueContainer{ + q := queueContainer{ Q: queue.New(10), queueState: &queue.State{ HasRunningInstance: true, @@ -1053,7 +1053,7 @@ func TestConsumeQueuePrioritization(t *testing.T) { committee := &Committee{ ctx: ctx, - Queues: make(map[phase0.Slot]QueueContainer), + Queues: make(map[phase0.Slot]queueContainer), Runners: make(map[phase0.Slot]*runner.CommitteeRunner), } @@ -1081,7 +1081,7 @@ func TestConsumeQueuePrioritization(t *testing.T) { makeTestSSVMessage(t, message.SSVEventMsgType, spectypes.MessageID{5}, eventMsgBody), } - q := QueueContainer{ + q := queueContainer{ Q: queue.New(10), queueState: &queue.State{ HasRunningInstance: true, @@ -1179,13 +1179,13 @@ func TestHandleMessageQueueFullAndDropping(t *testing.T) { committee := &Committee{ logger: logger, ctx: ctx, - Queues: make(map[phase0.Slot]QueueContainer), + Queues: make(map[phase0.Slot]queueContainer), CommitteeMember: &spectypes.CommitteeMember{}, networkConfig: networkconfig.TestNetwork, } // Step 0: Create the queue container with the desired small capacity and add it to the committee - qContainer := QueueContainer{ + qContainer := queueContainer{ Q: queue.New(queueCapacity), queueState: &queue.State{ HasRunningInstance: false, @@ -1283,7 +1283,7 @@ func TestConsumeQueueStopsOnErrNoValidDuties(t *testing.T) { } slot := phase0.Slot(123) - q := QueueContainer{ + q := queueContainer{ Q: queue.New(10), queueState: &queue.State{ HasRunningInstance: true, @@ -1347,10 +1347,10 @@ func TestConsumeQueueBurstTraffic(t *testing.T) { slot := phase0.Slot(42) committee := &Committee{ ctx: ctx, - Queues: make(map[phase0.Slot]QueueContainer), + Queues: make(map[phase0.Slot]queueContainer), Runners: make(map[phase0.Slot]*runner.CommitteeRunner), } - qc := QueueContainer{ + qc := queueContainer{ Q: queue.New(1000), queueState: &queue.State{ HasRunningInstance: true, @@ -1563,7 +1563,7 @@ func TestQueueLoadAndSaturationScenarios(t *testing.T) { committee := &Committee{ logger: mainLogger.Named("DropWhenInboxStrictlyFull"), ctx: ctx, - Queues: make(map[phase0.Slot]QueueContainer), + Queues: make(map[phase0.Slot]queueContainer), Runners: make(map[phase0.Slot]*runner.CommitteeRunner), CommitteeMember: &spectypes.CommitteeMember{}, networkConfig: networkconfig.TestNetwork, @@ -1573,7 +1573,7 @@ func TestQueueLoadAndSaturationScenarios(t *testing.T) { nextRound := specqbft.Round(2) queueCapacity := 3 - qContainer := QueueContainer{ + qContainer := queueContainer{ Q: queue.New(queueCapacity), queueState: &queue.State{ HasRunningInstance: true, @@ -1638,7 +1638,7 @@ func TestQueueLoadAndSaturationScenarios(t *testing.T) { committee := &Committee{ logger: mainLogger.Named("HighPriorityDropping"), ctx: ctx, - Queues: make(map[phase0.Slot]QueueContainer), + Queues: make(map[phase0.Slot]queueContainer), Runners: make(map[phase0.Slot]*runner.CommitteeRunner), CommitteeMember: &spectypes.CommitteeMember{}, networkConfig: networkconfig.TestNetwork, @@ -1647,7 +1647,7 @@ func TestQueueLoadAndSaturationScenarios(t *testing.T) { currentRound := specqbft.Round(1) queueCapacity := 3 - qContainer := QueueContainer{ + qContainer := queueContainer{ Q: queue.New(queueCapacity), queueState: &queue.State{ HasRunningInstance: true, @@ -1732,7 +1732,7 @@ func TestQueueLoadAndSaturationScenarios(t *testing.T) { committee := &Committee{ ctx: ctx, - Queues: make(map[phase0.Slot]QueueContainer), + Queues: make(map[phase0.Slot]queueContainer), Runners: make(map[phase0.Slot]*runner.CommitteeRunner), } @@ -1755,7 +1755,7 @@ func TestQueueLoadAndSaturationScenarios(t *testing.T) { slot := phase0.Slot(456) - q := QueueContainer{ + q := queueContainer{ Q: queue.New(queueCapacity), queueState: &queue.State{ HasRunningInstance: true, diff --git a/protocol/v2/ssv/validator/duty_executor.go b/protocol/v2/ssv/validator/duty_executor.go index 39ab49c103..a0040d9891 100644 --- a/protocol/v2/ssv/validator/duty_executor.go +++ b/protocol/v2/ssv/validator/duty_executor.go @@ -31,7 +31,7 @@ func (v *Validator) ExecuteDuty(ctx context.Context, duty *spectypes.ValidatorDu dec.TraceContext = ctx - if pushed := v.Queues[duty.RunnerRole()].Q.TryPush(dec); !pushed { + if pushed := v.Queues[duty.RunnerRole()].TryPush(dec); !pushed { return fmt.Errorf("dropping ExecuteDuty message for validator %s because the queue is full", duty.PubKey.String()) } diff --git a/protocol/v2/ssv/validator/events.go b/protocol/v2/ssv/validator/events.go index ba09b22142..0e865258bc 100644 --- a/protocol/v2/ssv/validator/events.go +++ b/protocol/v2/ssv/validator/events.go @@ -27,7 +27,7 @@ func (v *Validator) handleEventMessage(ctx context.Context, logger *zap.Logger, switch eventMsg.Type { case types.Timeout: - if err := dutyRunner.GetBaseRunner().QBFTController.OnTimeout(ctx, logger, *eventMsg); err != nil { + if err := dutyRunner.OnTimeoutQBFT(ctx, logger, *eventMsg); err != nil { return traces.Errorf(span, "timeout event: %w", err) } span.SetStatus(codes.Ok, "") @@ -71,7 +71,7 @@ func (c *Committee) handleEventMessage(ctx context.Context, logger *zap.Logger, return nil } - if err := dutyRunner.GetBaseRunner().QBFTController.OnTimeout(ctx, logger, *eventMsg); err != nil { + if err := dutyRunner.OnTimeoutQBFT(ctx, logger, *eventMsg); err != nil { return traces.Errorf(span, "timeout event: %w", err) } span.SetStatus(codes.Ok, "") diff --git a/protocol/v2/ssv/validator/opts.go b/protocol/v2/ssv/validator/opts.go index 8a8ca11007..1995a06bd7 100644 --- a/protocol/v2/ssv/validator/opts.go +++ b/protocol/v2/ssv/validator/opts.go @@ -106,13 +106,3 @@ func (o *CommonOptions) NewOptions( DutyRunners: dutyRunners, } } - -// State of the validator -type State uint32 - -const ( - // NotStarted the validator hasn't started - NotStarted State = iota - // Started validator is running - Started -) diff --git a/protocol/v2/ssv/validator/validator_queue.go b/protocol/v2/ssv/validator/queue_validator.go similarity index 79% rename from protocol/v2/ssv/validator/validator_queue.go rename to protocol/v2/ssv/validator/queue_validator.go index 3e44fd3ada..75eb6d226b 100644 --- a/protocol/v2/ssv/validator/validator_queue.go +++ b/protocol/v2/ssv/validator/queue_validator.go @@ -15,7 +15,6 @@ import ( "github.com/ssvlabs/ssv/observability/log/fields" "github.com/ssvlabs/ssv/observability/traces" "github.com/ssvlabs/ssv/protocol/v2/message" - "github.com/ssvlabs/ssv/protocol/v2/qbft/instance" "github.com/ssvlabs/ssv/protocol/v2/ssv/queue" "github.com/ssvlabs/ssv/protocol/v2/types" ) @@ -23,12 +22,6 @@ import ( // MessageHandler process the msg. return error if exist type MessageHandler func(ctx context.Context, msg *queue.SSVMessage) error -// QueueContainer wraps a queue with its corresponding state -type QueueContainer struct { - Q queue.Queue - queueState *queue.State -} - // EnqueueMessage enqueues a spectypes.SSVMessage for processing. // TODO: accept DecodedSSVMessage once p2p is upgraded to decode messages during validation. func (v *Validator) EnqueueMessage(ctx context.Context, msg *queue.SSVMessage) { @@ -67,7 +60,7 @@ func (v *Validator) EnqueueMessage(ctx context.Context, msg *queue.SSVMessage) { defer v.mtx.RUnlock() if q, ok := v.Queues[msg.MsgID.GetRoleType()]; ok { span.AddEvent("pushing message to queue") - if pushed := q.Q.TryPush(msg); !pushed { + if pushed := q.TryPush(msg); !pushed { const eventMsg = "❗ dropping message because the queue is full" logger.Warn(eventMsg, zap.String("msg_type", message.MsgTypeToString(msg.MsgType)), @@ -103,7 +96,7 @@ func (v *Validator) ConsumeQueue(msgID spectypes.MessageID, handler MessageHandl ctx, cancel := context.WithCancel(v.ctx) defer cancel() - var q QueueContainer + var q queue.Queue err := func() error { v.mtx.RLock() // read v.Queues defer v.mtx.RUnlock() @@ -124,21 +117,14 @@ func (v *Validator) ConsumeQueue(msgID spectypes.MessageID, handler MessageHandl for ctx.Err() == nil { // Construct a representation of the current state. - state := *q.queueState + state := queue.State{} runner := v.DutyRunners.DutyRunnerForMsgID(msgID) if runner == nil { return fmt.Errorf("could not get duty runner for msg ID %v", msgID) } - var runningInstance *instance.Instance - if runner.HasRunningDuty() { - runningInstance = runner.GetBaseRunner().State.RunningInstance - if runningInstance != nil { - decided, _ := runningInstance.IsDecided() - state.HasRunningInstance = !decided - } - } - state.Height = v.GetLastHeight(msgID) - state.Round = v.GetLastRound(msgID) + state.HasRunningInstance = runner.HasRunningQBFTInstance() + state.Height = runner.GetLastHeight() + state.Round = runner.GetLastRound() state.Quorum = v.Operator.GetQuorum() filter := queue.FilterAny @@ -151,7 +137,7 @@ func (v *Validator) ConsumeQueue(msgID spectypes.MessageID, handler MessageHandl } return e.Type == types.ExecuteDuty } - } else if runningInstance != nil && runningInstance.State.ProposalAcceptedForCurrentRound == nil { + } else if state.HasRunningInstance && !runner.HasAcceptedProposalForCurrentRound() { // If no proposal was accepted for the current round, skip prepare & commit messages // for the current height and round. filter = func(m *queue.SSVMessage) bool { @@ -168,15 +154,12 @@ func (v *Validator) ConsumeQueue(msgID spectypes.MessageID, handler MessageHandl } // Pop the highest priority message for the current state. - msg := q.Q.Pop(ctx, queue.NewMessagePrioritizer(&state), filter) - if ctx.Err() != nil { - break - } + msg := q.Pop(ctx, queue.NewMessagePrioritizer(&state), filter) if msg == nil { v.logger.Error("❗ got nil message from queue, but context is not done!") break } - lens = append(lens, q.Q.Len()) + lens = append(lens, q.Len()) if len(lens) >= 10 { v.logger.Debug("📬 [TEMPORARY] queue statistics", fields.MessageID(msg.MsgID), fields.MessageType(msg.MsgType), @@ -184,6 +167,10 @@ func (v *Validator) ConsumeQueue(msgID spectypes.MessageID, handler MessageHandl lens = lens[:0] } + // Handle the message, but only if ctx hasn't been canceled (so we can exit fast) + if ctx.Err() != nil { + break + } // Handle the message. if err := handler(ctx, msg); err != nil { v.logMsg(msg, "❗ could not handle message", @@ -218,30 +205,3 @@ func (v *Validator) logMsg(msg *queue.SSVMessage, logMsg string, withFields ...z } v.logger.Debug(logMsg, append(baseFields, withFields...)...) } - -// GetLastHeight returns the last height for the given identifier -func (v *Validator) GetLastHeight(identifier spectypes.MessageID) specqbft.Height { - r := v.DutyRunners.DutyRunnerForMsgID(identifier) - if r == nil { - return specqbft.Height(0) - } - if ctrl := r.GetBaseRunner().QBFTController; ctrl != nil { - return ctrl.Height - } - return specqbft.Height(0) -} - -// GetLastRound returns the last height for the given identifier -func (v *Validator) GetLastRound(identifier spectypes.MessageID) specqbft.Round { - r := v.DutyRunners.DutyRunnerForMsgID(identifier) - if r == nil { - return specqbft.Round(1) - } - if r.HasRunningDuty() { - inst := r.GetBaseRunner().State.RunningInstance - if inst != nil { - return inst.State.Round - } - } - return specqbft.Round(1) -} diff --git a/protocol/v2/ssv/validator/validator_queue_test.go b/protocol/v2/ssv/validator/queue_validator_test.go similarity index 100% rename from protocol/v2/ssv/validator/validator_queue_test.go rename to protocol/v2/ssv/validator/queue_validator_test.go diff --git a/protocol/v2/ssv/validator/startup.go b/protocol/v2/ssv/validator/startup.go index 2a60b0bd65..d9a46d27c0 100644 --- a/protocol/v2/ssv/validator/startup.go +++ b/protocol/v2/ssv/validator/startup.go @@ -2,16 +2,22 @@ package validator import ( "fmt" - "sync/atomic" "github.com/ssvlabs/ssv-spec/p2p" spectypes "github.com/ssvlabs/ssv-spec/types" ) -// Start starts a Validator. +// Start starts Validator. func (v *Validator) Start() (started bool, err error) { - if !atomic.CompareAndSwapUint32(&v.state, uint32(NotStarted), uint32(Started)) { - return false, nil + v.mtx.Lock() + defer v.mtx.Unlock() + + if v.stopped { + return false, fmt.Errorf("stopped validator cannot be restarted") + } + + if v.started { + return false, nil // nothing to do } n, ok := v.Network.(p2p.Subscriber) @@ -19,26 +25,28 @@ func (v *Validator) Start() (started bool, err error) { return false, fmt.Errorf("network does not support subscription") } for role := range v.DutyRunners { - identifier := spectypes.NewMsgID(v.NetworkConfig.DomainType, v.Share.ValidatorPubKey[:], role) - if err := n.Subscribe(v.Share.ValidatorPubKey); err != nil { - atomic.StoreUint32(&v.state, uint32(NotStarted)) return false, err } + identifier := spectypes.NewMsgID(v.NetworkConfig.DomainType, v.Share.ValidatorPubKey[:], role) go v.StartQueueConsumer(identifier, v.ProcessMessage) } + + v.started = true + return true, nil } -// Stop stops a Validator. +// Stop stops Validator. func (v *Validator) Stop() { - if atomic.CompareAndSwapUint32(&v.state, uint32(Started), uint32(NotStarted)) { - v.cancel() - - v.mtx.Lock() // write-lock for v.Queues - defer v.mtx.Unlock() + v.mtx.Lock() + defer v.mtx.Unlock() - // clear the msg q - v.Queues = make(map[spectypes.RunnerRole]QueueContainer) + if v.stopped || !v.started { + return // nothing to do } + + v.cancel() + + v.stopped = true } diff --git a/protocol/v2/ssv/validator/timer.go b/protocol/v2/ssv/validator/timer.go index 10ab3527f7..8b3f4eb092 100644 --- a/protocol/v2/ssv/validator/timer.go +++ b/protocol/v2/ssv/validator/timer.go @@ -19,11 +19,12 @@ import ( func (v *Validator) onTimeout(ctx context.Context, logger *zap.Logger, identifier spectypes.MessageID, height specqbft.Height) roundtimer.OnRoundTimeoutF { return func(round specqbft.Round) { - v.mtx.RLock() // read-lock for v.Queues, v.state + v.mtx.RLock() // read-lock for v.Queues defer v.mtx.RUnlock() - // only run if the validator is started - if v.state != uint32(Started) { + // The relevant queue might not have been initialized yet, hence we need to check for nil here + q := v.Queues[identifier.GetRoleType()] + if q == nil { return } @@ -50,11 +51,12 @@ func (v *Validator) onTimeout(ctx context.Context, logger *zap.Logger, identifie return } - if pushed := v.Queues[identifier.GetRoleType()].Q.TryPush(dec); !pushed { + if pushed := q.TryPush(dec); !pushed { logger.Warn("❗️ dropping timeout message because the queue is full", - fields.RunnerRole(identifier.GetRoleType())) + fields.RunnerRole(identifier.GetRoleType()), + ) + return } - // logger.Debug("📬 queue: pushed message", fields.PubKey(identifier.GetPubKey()), fields.MessageID(dec.MsgID), fields.MessageType(dec.MsgType)) } } diff --git a/protocol/v2/ssv/validator/validator.go b/protocol/v2/ssv/validator/validator.go index fb94204314..0bfe373458 100644 --- a/protocol/v2/ssv/validator/validator.go +++ b/protocol/v2/ssv/validator/validator.go @@ -28,17 +28,24 @@ import ( // Validator represents an SSV ETH consensus validator Share assigned, coordinates duty execution and more. // Every validator has a validatorID which is validator's public key. -// Each validator has multiple DutyRunners, for each duty type. +// Each validator has multiple DutyRunners - one per duty-type. type Validator struct { logger *zap.Logger - mtx *sync.RWMutex + // mtx ensures the consistent Validator lifecycle (the correct usage of Start and Stop methods), + // as well as syncs access to validator-managed data (such as Queues) across go-routines. + mtx sync.RWMutex + + // Started reflects whether this validator has already been started. Once the Validator has been stopped, it + // cannot be restarted. + started bool + // Stopped reflects whether this validator has already been stopped. + stopped bool ctx context.Context cancel context.CancelFunc NetworkConfig *networkconfig.Network - DutyRunners runner.ValidatorDutyRunners Network specqbft.Network Operator *spectypes.CommitteeMember @@ -46,9 +53,9 @@ type Validator struct { Signer ekm.BeaconSigner OperatorSigner ssvtypes.OperatorSigner - Queues map[spectypes.RunnerRole]QueueContainer + Queues map[spectypes.RunnerRole]queue.Queue - state uint32 + DutyRunners runner.ValidatorDutyRunners messageValidator validation.MessageValidator } @@ -57,7 +64,6 @@ type Validator struct { func NewValidator(pctx context.Context, cancel func(), logger *zap.Logger, options *Options) *Validator { v := &Validator{ logger: logger.Named(log.NameValidator).With(fields.PubKey(options.SSVShare.ValidatorPubKey[:])), - mtx: &sync.RWMutex{}, ctx: pctx, cancel: cancel, NetworkConfig: options.NetworkConfig, @@ -67,27 +73,14 @@ func NewValidator(pctx context.Context, cancel func(), logger *zap.Logger, optio Share: options.SSVShare, Signer: options.Signer, OperatorSigner: options.OperatorSigner, - Queues: make(map[spectypes.RunnerRole]QueueContainer), - state: uint32(NotStarted), + Queues: make(map[spectypes.RunnerRole]queue.Queue), messageValidator: options.MessageValidator, } + // some additional steps to prepare duty runners for handling duties for _, dutyRunner := range options.DutyRunners { - // Set timeout function. - dutyRunner.GetBaseRunner().TimeoutF = v.onTimeout - - //Setup the queue. - role := dutyRunner.GetBaseRunner().RunnerRoleType - - v.Queues[role] = QueueContainer{ - Q: queue.New(options.QueueSize), - queueState: &queue.State{ - HasRunningInstance: false, - Height: 0, - Slot: 0, - //Quorum: options.SSVShare.Share,// TODO - }, - } + dutyRunner.SetTimeoutFunc(v.onTimeout) + v.Queues[dutyRunner.GetRole()] = queue.New(options.QueueSize) } return v diff --git a/scripts/differ/parser.go b/scripts/differ/parser.go index e300989ccd..e8adf4719f 100644 --- a/scripts/differ/parser.go +++ b/scripts/differ/parser.go @@ -149,7 +149,7 @@ func (p *Parser) transformFuncDecl(funcDecl *ast.FuncDecl) { if len(field.Names) == 0 { continue } - if !slices.Contains(p.RemoveIdentifiers, field.Names[0].Name) { + if len(field.Names) > 0 && !slices.Contains(p.RemoveIdentifiers, field.Names[0].Name) { newList = append(newList, field) } } @@ -164,7 +164,7 @@ func (p *Parser) transformTypeSpec(typeSpec *ast.TypeSpec) { if funcType, ok := field.Type.(*ast.FuncType); ok { newList := []*ast.Field{} for _, param := range funcType.Params.List { - if !slices.Contains(p.RemoveIdentifiers, param.Names[0].Name) { + if len(param.Names) > 0 && !slices.Contains(p.RemoveIdentifiers, param.Names[0].Name) { newList = append(newList, param) } } diff --git a/scripts/spec-alignment/differ.config.yaml b/scripts/spec-alignment/differ.config.yaml index 3f9f64466a..687ba4432c 100644 --- a/scripts/spec-alignment/differ.config.yaml +++ b/scripts/spec-alignment/differ.config.yaml @@ -1,4 +1,4 @@ -ApprovedChanges: ["34e8b7999dfb14b5","56ceb03cd44ff702","ccc48c575f7ab4eb","262a19d18869cedd","e710070bfb15d3ab","1c0da45940bfa76f","881fc8fbb6a4edb1","31bc6100f3d4db08","c7ce0261a1b6014f","c4c4caa5d0938b85","60f1d510640499ac","91073f39440ab37f","d308bd7c553ccdcf","bdaf172971637cbe","396137c9cb425893","d4fef6512374c1f5","1bbcbe4e8194370d","59b2375130aef5df","1bd46741a98043b0","417e005d18c1f7d4","14dd874e7df81b37","50cea598241b0bc9","3ddc92a39c324d69","b0934079dcd986cc","e27b9c1ea1c64357","fe62097e8814c106","74a928f5dcb2fdd9","cbbfdb5e68cdac80","6577bf381d78bae2","39ea06bfd1477d2d","7e2550bab51f22b2","87ebd29bd49fc52f","ef39dd5223e0d080","aebb8e4348b6d667","6146023d4d5708a2","fe14e7f0503ea188","fb4cac598a68c592","257c7eb81d6eb245","62547d1b32ce44e3","5be5c4ce7d3212da","960a9c64cd4ec93c","df3771e2589008f9","c8f122c9fb83793e","2c6054db7088bcef","5714652b88e2d44f","7a53b3b037c56325","11587bf00d05a4e2","601862214795ec47","b523eed171e5b6bf","5797f56a84b7ebef","81a60407a3a0ba80","38faed802f80914b","bcb381e843ace421","1067c9a5622c3508","4b58762c0b433442","d293ec1bc61bb707","3e88c3b49d093605","4890ff80c88cc41d","9b70726386e38170","8bbf7eba3fa0cf7e","8cc38593ebe049b6","ef7b63d9848dc185","7f816e2f33da3186","2314d7fa0b387cff","267886c6b07733d7","f9480b2e48319466","98c46ae55d1d4e39","edfd442b4d725fbb","122f053573538a32","d720d714a20833e1","a81c092c985de728","143c7821cd65bcac","9e53c73ee60b1cc2","9d265e99dd31d4f5","70d29c9e7a4ef797","f2972b2906912d3a","c411b0f9f069c032","a037c260bd97c7d2","96631f77414215f5","389037ae98d33f26","b3e7d9382b320d70","3426003abbd5ebf0","22b9026cb0221ce0","e07da5b414afe6a6","50e62cbde34516ee","57c1885b43dd8d19","e8a49856a5edd893","9273478f6feccd62","1cfd3a3660a62879","859884a25fccea80","8651bdc20c1946f3","5a44c960cc4178dd","108b9575f7c1d4bc","5499865be87b7e00","159536003eeddac8","a20ea51df8b7d65b","9133b167cc5a601d","a748ad5c74850749","16ebe47404323cc1","48bfe5cf1e578b47","4366899a2cb05197","730c3e5e59393b7d","5b44a4b425ecc397","df5debc50ec8babc","92a41554b2910bb8","c36c680554dde59f","f265acf7423fa9b1","447feaa5cdc1a010","8ac6baea1b755066","2bc023c7e062f24b","274336ec1127e6c0","ddf404c2905d3b26","8c43f241aeac6d75","832fc9a9a2b71d00","8850900b5d9bcc65","cc22f28953b787ea","3bad6ae11596a574","8f84422a240d889c","82b392ba39c6c594","7975821460ebe1e7","173c505e12aabb8f","47ee0d148148a56f","6707ecfefa5fec21","d5a7389d730464f1","8e4ec8debe331b36","875393173d59b0f2","943be3ce709a99d3","4e22a08543b079b","f66dbe09241b5a4","fd6796ea6d131c3","634abeefa604ee2","e939b25394581c4","6ccbcb02e457e66","e1d82f0619360a5","5a7ad98296703f6","f4ce01b385c68a2","90aac1feca751b0","678c366df2dd8302","902874f1439b6cbe","6fc8a4fe6e10380e","9a904d77eccac8c4","3c68f86d18282872","4cbea5ccb3ba96f9","ad53997a7af4a476","43ca8640bc136b04","b7a9dc3af09265f2","9e33f2c164150c03","8c8fdd2a8066a8a7","78ac98e0a065deb2","3cd8818a0517abfb","80b8aa081b93ec8b","e781436db8f6a346","21b8868ce00f6261","c4f7f025e7a437ff","1656e3e33948c0d3","a2aab6b86cefe217","97df26235b1eabee","a5202339e0e86850","261d0458cc8b956f","a1efe4ea2b85117","3b53ec55fa55f1df","e92e2c9af55c0a91","db32c6c8ef7b0cf6","1f578f6799e515c1","4fb205310012cc98","9031993dbbed4646","12dd40a163e41548","547bf66f36d25667","1961f69f1787dc5d","3e1e15e5707ad393","1a0e96bf6922218d","63091d22982d70a9","cbafb1c3b188dbd9", "5d954981612100e0","5eb948ff56039ff1","56276561b0c3d933","2ba698695e4fcb6","12dd40a163e41548","e933dbb89bc159ff","56276561b0c3d933","adfc83ab2b7529e2","e397d10800b51844","510f812787530ce5","79d524c526c3202e","615382c9e22d5772","862816d5dd5f9650","76c7a4b27d6dd1ab", "a89f217d08ceef40", "9698d9b7425e5e21", "c217ff38383363c8", "8ce8a758b93cfd3f", "181c96f033f99b2b", "d59992d5de732920", "b0de1d86ee844b43", "32e5196e8623d4e0", "40db73ae9746149f", "93192e1f7dd7f8ab", "3d9d307db16b7a1f","9b18244fb96341a4","7b6444cbaf195b71","2428357c5baa54e3","2f42d05f686ceac5","c5af65609c08034e","48621d3cb9e3daf3","57c5a5c8ea9ceb9f","814f92d8184047d7","6c8e69e1d13f60d1","6335ffb5fa113156","f859d9d766479837","127e999402838373","a9aad499f5b9eb97","37abf0629001274","4365987e0c443305","f4dd26ea320f18e8","2bc49a0c4a8bcb7","b1a95069de2239a1","130eef566ad2f006","ab215fcfe80d7128","f68f4023d039d119","6ed13432ac58e15b","7e891d2b17f5c66f","2ca2d4de6ef5ff01","377232614c285ce8","b420f6affd5d4998","1f7cc61e2724678c","155c6ec3e2d9d3ce","40cc95280c0322e","da96f6d8b26acd87","7d8ab33d0989448c","a059d1cffddd1806","b5068b26ac6ba39a","c89772b6c110dc14","780c3281d914a06a","2153cdb8ff8acb14","24c638d05acc6258","37cdbf3fe0d2c0be","debb273257b832ac","b9f863429c5bd028","622dd7837dafba8e","bcacf2c847388455","2b6e432e076cde11","666f04b7d5e29eb","bec26aa6892bbca1","b006e20b3cb4f8bc","b6012c4075285778","a74d021f7b35fdb2","ae46fff3a49f773d","42214aa8ab8e4ab9","2ecfd0e26e23264c","af3b5d02752960e7","5e26dc96bd3c8e75","b7982786a04cc064","a6688aa594abdde6","fd1de2bf3d1bb8ea","b10c72c36fa90666","1c4a85421b52e46a","5ab7b7a9107c354f","f88e0383750294a6","6fc7c41b42b4e3e5","f28f3a41bc8c741d","b49179c36c111cca","5df9f55a706e5291","4994abbc55e58278","2c02276fa72f61b9","937ce36b0949796a","3f6f707d5ecf3903","11a340273beaf077","2630b7d3f7463316","33dd6bfa496bc889","a9685eff5edb9118","2fd0165ccc59ddf","de0733313f796fc9","a5458fa616bc6d6d","73244bf5a8b49e23","42052e9213a5780d","26672cc9a6ae0af0","b7ca13196a824fc5","801f7d8a8151c73e","20b4765e446c4803","25c4b276cfa24926","cebe87e9824d213e","be65f009ecd0d4b8","4d1a0519b660e5f","ad8332e881c48041","fbcd6b75d8baa5c1","787162ea04b0c4ed","c97a6f1c933535b","b5b9b0a45f6df79e","e9569ea2dc0f1406","5a3c840d9fc7bec9","7cf54a264ab481f3","cbbb16ca9342dc4c","46636a126e9d32ad","c90ffdb18233f0ea","e5e2698cc827129b","b31079cc52d70820","99ebbf611546fd06","a6c82644fe7b2bf3","c110b52f3fc04964","b2ba608989ac677","373a9525ae8811a8","44a2015641f07e3","d6aafafb59a8dd6e","b7f2e4dec9742e39","bb626b923fe3850","b2315a5d4749f93","f1b684011e369981","fb2604dba9677890","b6908fa26415a154","70247f4b2e200f5b","d3ac29b25686ae94","636c42279c683d53","523ff663791a047a","cee19c2663445d45","819aa7ef21eb1f94","6e7d2035244a01b4","5a73c0f03029acba","96abe9e4636a4d4a","16f41dbe0a2ff32a","e16b89eb783f91bd","c0c8738f2d82c5bd","96c004530466d7ed","ecda760b0ddbdb11","db9d242dc4f80e0a","37176fea3b21eb63","7031f7740a5c0152","8cfc723c9abfd67e","9404f35d52cd90bc","f2eaa816c7adbbd1","db35cde6aead1669","7256a65d415b844b","d1a754e80a79a384"] +ApprovedChanges: ["34e8b7999dfb14b5","56ceb03cd44ff702","ccc48c575f7ab4eb","262a19d18869cedd","e710070bfb15d3ab","1c0da45940bfa76f","881fc8fbb6a4edb1","31bc6100f3d4db08","c7ce0261a1b6014f","c4c4caa5d0938b85","60f1d510640499ac","91073f39440ab37f","d308bd7c553ccdcf","bdaf172971637cbe","396137c9cb425893","d4fef6512374c1f5","1bbcbe4e8194370d","59b2375130aef5df","1bd46741a98043b0","417e005d18c1f7d4","14dd874e7df81b37","50cea598241b0bc9","3ddc92a39c324d69","b0934079dcd986cc","e27b9c1ea1c64357","fe62097e8814c106","74a928f5dcb2fdd9","cbbfdb5e68cdac80","6577bf381d78bae2","39ea06bfd1477d2d","7e2550bab51f22b2","87ebd29bd49fc52f","ef39dd5223e0d080","aebb8e4348b6d667","6146023d4d5708a2","fe14e7f0503ea188","fb4cac598a68c592","257c7eb81d6eb245","62547d1b32ce44e3","5be5c4ce7d3212da","960a9c64cd4ec93c","df3771e2589008f9","c8f122c9fb83793e","2c6054db7088bcef","5714652b88e2d44f","7a53b3b037c56325","11587bf00d05a4e2","601862214795ec47","b523eed171e5b6bf","5797f56a84b7ebef","81a60407a3a0ba80","38faed802f80914b","bcb381e843ace421","1067c9a5622c3508","4b58762c0b433442","d293ec1bc61bb707","3e88c3b49d093605","4890ff80c88cc41d","9b70726386e38170","8bbf7eba3fa0cf7e","8cc38593ebe049b6","ef7b63d9848dc185","7f816e2f33da3186","2314d7fa0b387cff","267886c6b07733d7","f9480b2e48319466","98c46ae55d1d4e39","edfd442b4d725fbb","122f053573538a32","d720d714a20833e1","a81c092c985de728","143c7821cd65bcac","9e53c73ee60b1cc2","9d265e99dd31d4f5","70d29c9e7a4ef797","f2972b2906912d3a","c411b0f9f069c032","a037c260bd97c7d2","96631f77414215f5","389037ae98d33f26","b3e7d9382b320d70","3426003abbd5ebf0","22b9026cb0221ce0","e07da5b414afe6a6","50e62cbde34516ee","57c1885b43dd8d19","e8a49856a5edd893","9273478f6feccd62","1cfd3a3660a62879","859884a25fccea80","8651bdc20c1946f3","5a44c960cc4178dd","108b9575f7c1d4bc","5499865be87b7e00","159536003eeddac8","a20ea51df8b7d65b","9133b167cc5a601d","a748ad5c74850749","16ebe47404323cc1","48bfe5cf1e578b47","4366899a2cb05197","730c3e5e59393b7d","5b44a4b425ecc397","df5debc50ec8babc","92a41554b2910bb8","c36c680554dde59f","f265acf7423fa9b1","447feaa5cdc1a010","8ac6baea1b755066","2bc023c7e062f24b","274336ec1127e6c0","ddf404c2905d3b26","8c43f241aeac6d75","832fc9a9a2b71d00","8850900b5d9bcc65","cc22f28953b787ea","3bad6ae11596a574","8f84422a240d889c","82b392ba39c6c594","7975821460ebe1e7","173c505e12aabb8f","47ee0d148148a56f","6707ecfefa5fec21","d5a7389d730464f1","8e4ec8debe331b36","875393173d59b0f2","943be3ce709a99d3","4e22a08543b079b","f66dbe09241b5a4","fd6796ea6d131c3","634abeefa604ee2","e939b25394581c4","6ccbcb02e457e66","e1d82f0619360a5","5a7ad98296703f6","f4ce01b385c68a2","90aac1feca751b0","678c366df2dd8302","902874f1439b6cbe","6fc8a4fe6e10380e","9a904d77eccac8c4","3c68f86d18282872","4cbea5ccb3ba96f9","ad53997a7af4a476","43ca8640bc136b04","b7a9dc3af09265f2","9e33f2c164150c03","8c8fdd2a8066a8a7","78ac98e0a065deb2","3cd8818a0517abfb","80b8aa081b93ec8b","e781436db8f6a346","21b8868ce00f6261","c4f7f025e7a437ff","1656e3e33948c0d3","a2aab6b86cefe217","97df26235b1eabee","a5202339e0e86850","261d0458cc8b956f","a1efe4ea2b85117","3b53ec55fa55f1df","e92e2c9af55c0a91","db32c6c8ef7b0cf6","1f578f6799e515c1","4fb205310012cc98","9031993dbbed4646","12dd40a163e41548","547bf66f36d25667","1961f69f1787dc5d","3e1e15e5707ad393","1a0e96bf6922218d","63091d22982d70a9","cbafb1c3b188dbd9", "5d954981612100e0","5eb948ff56039ff1","56276561b0c3d933","2ba698695e4fcb6","12dd40a163e41548","e933dbb89bc159ff","56276561b0c3d933","adfc83ab2b7529e2","e397d10800b51844","510f812787530ce5","79d524c526c3202e","615382c9e22d5772","862816d5dd5f9650","76c7a4b27d6dd1ab", "a89f217d08ceef40", "9698d9b7425e5e21", "c217ff38383363c8", "8ce8a758b93cfd3f", "181c96f033f99b2b", "d59992d5de732920", "b0de1d86ee844b43", "32e5196e8623d4e0", "40db73ae9746149f", "93192e1f7dd7f8ab", "3d9d307db16b7a1f","9b18244fb96341a4","7b6444cbaf195b71","2428357c5baa54e3","2f42d05f686ceac5","c5af65609c08034e","48621d3cb9e3daf3","57c5a5c8ea9ceb9f","814f92d8184047d7","6c8e69e1d13f60d1","6335ffb5fa113156","f859d9d766479837","127e999402838373","a9aad499f5b9eb97","37abf0629001274","4365987e0c443305","f4dd26ea320f18e8","2bc49a0c4a8bcb7","b1a95069de2239a1","130eef566ad2f006","ab215fcfe80d7128","f68f4023d039d119","6ed13432ac58e15b","7e891d2b17f5c66f","2ca2d4de6ef5ff01","377232614c285ce8","b420f6affd5d4998","1f7cc61e2724678c","155c6ec3e2d9d3ce","40cc95280c0322e","da96f6d8b26acd87","7d8ab33d0989448c","a059d1cffddd1806","b5068b26ac6ba39a","c89772b6c110dc14","780c3281d914a06a","2153cdb8ff8acb14","24c638d05acc6258","37cdbf3fe0d2c0be","debb273257b832ac","b9f863429c5bd028","622dd7837dafba8e","bcacf2c847388455","2b6e432e076cde11","666f04b7d5e29eb","bec26aa6892bbca1","b006e20b3cb4f8bc","b6012c4075285778","a74d021f7b35fdb2","ae46fff3a49f773d","42214aa8ab8e4ab9","2ecfd0e26e23264c","af3b5d02752960e7","5e26dc96bd3c8e75","b7982786a04cc064","a6688aa594abdde6","fd1de2bf3d1bb8ea","b10c72c36fa90666","1c4a85421b52e46a","5ab7b7a9107c354f","f88e0383750294a6","6fc7c41b42b4e3e5","f28f3a41bc8c741d","b49179c36c111cca","5df9f55a706e5291","4994abbc55e58278","2c02276fa72f61b9","937ce36b0949796a","3f6f707d5ecf3903","11a340273beaf077","2630b7d3f7463316","33dd6bfa496bc889","a9685eff5edb9118","2fd0165ccc59ddf","de0733313f796fc9","a5458fa616bc6d6d","73244bf5a8b49e23","42052e9213a5780d","26672cc9a6ae0af0","b7ca13196a824fc5","801f7d8a8151c73e","20b4765e446c4803","25c4b276cfa24926","cebe87e9824d213e","be65f009ecd0d4b8","4d1a0519b660e5f","ad8332e881c48041","fbcd6b75d8baa5c1","787162ea04b0c4ed","c97a6f1c933535b","b5b9b0a45f6df79e","e9569ea2dc0f1406","5a3c840d9fc7bec9","7cf54a264ab481f3","cbbb16ca9342dc4c","46636a126e9d32ad","c90ffdb18233f0ea","e5e2698cc827129b","b31079cc52d70820","99ebbf611546fd06","a6c82644fe7b2bf3","c110b52f3fc04964","b2ba608989ac677","373a9525ae8811a8","44a2015641f07e3","d6aafafb59a8dd6e","b7f2e4dec9742e39","bb626b923fe3850","b2315a5d4749f93","f1b684011e369981","fb2604dba9677890","b6908fa26415a154","70247f4b2e200f5b","d3ac29b25686ae94","636c42279c683d53","523ff663791a047a","cee19c2663445d45","819aa7ef21eb1f94","6e7d2035244a01b4","5a73c0f03029acba","96abe9e4636a4d4a","16f41dbe0a2ff32a","e16b89eb783f91bd","c0c8738f2d82c5bd","96c004530466d7ed","ecda760b0ddbdb11","db9d242dc4f80e0a","37176fea3b21eb63","7031f7740a5c0152","8cfc723c9abfd67e","9404f35d52cd90bc","f2eaa816c7adbbd1","db35cde6aead1669","7256a65d415b844b","d1a754e80a79a384","88f2419e7e4868c5","6ca2a43c169f0804","aff9d34fa74fd11c","d0d58b90fcf8a9aa","8ac707d59bb84bd","373eafb5b2fd0a2a","1aa6ccfbf1c0f89d","7363c65da2bdb691","666d332e21855590","136ec79d97235c2","2dcab1785bda75da","a6bf3c0cb687265f","9c09a06a698ba4ec","faea264ed3f24053","c14a03bb67ce3b9d","3f84e3c962fceddf","d60b7f4ec3c0eaaa","713978f4e6c97dff","4f8ef5c65d1e03d0","8776a7a362bb7054","baf57f86f81b9778","a26828947dd80dd2","c1980c7a36e1326d","bad102976bc7a727","5927893bad4eafa","d20c88af9c714d6f","e6586bef5d91a976","15648639ecc149fc","fd26b82adf7ed0ec"] IgnoredIdentifiers: - logger