diff --git a/.avalanche-golangci.yml b/.avalanche-golangci.yml index e0aed8d2ad..ac24ac70be 100644 --- a/.avalanche-golangci.yml +++ b/.avalanche-golangci.yml @@ -55,7 +55,7 @@ linters: - bodyclose - copyloopvar - depguard - # - errcheck + - errcheck - errorlint - forbidigo - goconst diff --git a/core/fifo_cache.go b/core/fifo_cache.go index 9f1eb8cf1e..06e9de1cbe 100644 --- a/core/fifo_cache.go +++ b/core/fifo_cache.go @@ -44,7 +44,9 @@ func (f *BufferFIFOCache[K, V]) Put(key K, val V) { f.l.Lock() defer f.l.Unlock() - f.buffer.Insert(key) // Insert will remove the oldest [K] if we are at the [limit] + // Insert will remove the oldest [K] if we are at the [limit] - + // remove always returns nil, so it is safe to ignore this error. + _ = f.buffer.Insert(key) f.m[key] = val } diff --git a/core/state/trie_prefetcher_extra_test.go b/core/state/trie_prefetcher_extra_test.go index d312b98d4d..8b60d49cb3 100644 --- a/core/state/trie_prefetcher_extra_test.go +++ b/core/state/trie_prefetcher_extra_test.go @@ -178,8 +178,9 @@ func addKVs( for i := 0; i < count/2; i++ { key := make([]byte, 32) value := make([]byte, 32) - rand.Read(key) - rand.Read(value) + // rand.Read never returns an error + _, _ = rand.Read(key) + _, _ = rand.Read(value) statedb.SetState(address, common.BytesToHash(key), common.BytesToHash(value)) } diff --git a/core/state_manager_test.go b/core/state_manager_test.go index 03bb4a6ac6..f2d1c3894d 100644 --- a/core/state_manager_test.go +++ b/core/state_manager_test.go @@ -55,8 +55,8 @@ func TestCappedMemoryTrieWriter(t *testing.T) { require.NoError(t, w.InsertTrie(block)) require.Zero(t, m.LastDereference, "should not have dereferenced block on insert") require.Zero(t, m.LastCommit, "should not have committed block on insert") + require.NoError(t, w.AcceptTrie(block)) - w.AcceptTrie(block) if i <= tipBufferSize { require.Zero(t, m.LastDereference, "should not have dereferenced block on accept") } else { @@ -70,7 +70,7 @@ func TestCappedMemoryTrieWriter(t *testing.T) { m.LastCommit = common.Hash{} } - w.RejectTrie(block) + require.NoError(t, w.RejectTrie(block)) require.Equal(t, block.Root(), m.LastDereference, "should have dereferenced block on reject") require.Zero(t, m.LastCommit, "should not have committed block on reject") m.LastDereference = common.Hash{} @@ -94,12 +94,12 @@ func TestNoPruningTrieWriter(t *testing.T) { require.Zero(t, m.LastDereference, "should not have dereferenced block on insert") require.Zero(t, m.LastCommit, "should not have committed block on insert") - w.AcceptTrie(block) + require.NoError(t, w.AcceptTrie(block)) require.Zero(t, m.LastDereference, "should not have dereferenced block on accept") require.Equal(t, block.Root(), m.LastCommit, "should have committed block on accept") m.LastCommit = common.Hash{} - w.RejectTrie(block) + require.NoError(t, w.RejectTrie(block)) require.Equal(t, block.Root(), m.LastDereference, "should have dereferenced block on reject") require.Zero(t, m.LastCommit, "should not have committed block on reject") m.LastDereference = common.Hash{} diff --git a/plugin/evm/message/codec.go b/plugin/evm/message/codec.go index ba71af2f32..f887b925ea 100644 --- a/plugin/evm/message/codec.go +++ b/plugin/evm/message/codec.go @@ -40,7 +40,7 @@ func init() { // See https://github.com/ava-labs/coreth/pull/999 c.SkipRegistrations(3) - Codec.RegisterCodec(Version, c) + errs.Add(Codec.RegisterCodec(Version, c)) if errs.Errored() { panic(errs.Err) diff --git a/plugin/evm/syncervm_test.go b/plugin/evm/syncervm_test.go index 8054fdbae6..4848243206 100644 --- a/plugin/evm/syncervm_test.go +++ b/plugin/evm/syncervm_test.go @@ -19,6 +19,7 @@ import ( "github.com/ava-labs/avalanchego/snow/engine/enginetest" "github.com/ava-labs/avalanchego/snow/engine/snowman/block" "github.com/ava-labs/avalanchego/upgrade/upgradetest" + "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/vms/evm/database" "github.com/ava-labs/avalanchego/vms/evm/predicate" @@ -104,7 +105,7 @@ func TestStateSyncToggleEnabledToDisabled(t *testing.T) { require.NoError(t, syncerVM.AppRequestFailed(t.Context(), nodeID, requestID, commonEng.ErrTimeout)) require.NoError(t, syncerVM.Client.Shutdown()) } else { - syncerVM.AppResponse(t.Context(), nodeID, requestID, response) + require.NoError(t, syncerVM.AppResponse(t.Context(), nodeID, requestID, response)) } }, expectedErr: context.Canceled, @@ -118,13 +119,18 @@ func TestStateSyncToggleEnabledToDisabled(t *testing.T) { test.responseIntercept = nil test.expectedErr = nil + var atomicErr utils.Atomic[error] syncDisabledVM := &VM{} appSender := &enginetest.Sender{T: t} appSender.SendAppGossipF = func(context.Context, commonEng.SendConfig, []byte) error { return nil } appSender.SendAppRequestF = func(ctx context.Context, nodeSet set.Set[ids.NodeID], requestID uint32, request []byte) error { nodeID, hasItem := nodeSet.Pop() require.True(t, hasItem, "expected nodeSet to contain at least 1 nodeID") - go vmSetup.serverVM.AppRequest(ctx, nodeID, requestID, time.Now().Add(1*time.Second), request) + go func() { + if err := vmSetup.serverVM.AppRequest(ctx, nodeID, requestID, time.Now().Add(1*time.Second), request); err != nil { + atomicErr.Set(err) + } + }() return nil } // Reset metrics to allow re-initialization @@ -191,7 +197,11 @@ func TestStateSyncToggleEnabledToDisabled(t *testing.T) { // override [serverVM]'s SendAppResponse function to trigger AppResponse on [syncerVM] vmSetup.serverAppSender.SendAppResponseF = func(ctx context.Context, nodeID ids.NodeID, requestID uint32, response []byte) error { if test.responseIntercept == nil { - go syncReEnabledVM.AppResponse(ctx, nodeID, requestID, response) + go func() { + if err := syncReEnabledVM.AppResponse(ctx, nodeID, requestID, response); err != nil { + atomicErr.Set(err) + } + }() } else { go test.responseIntercept(syncReEnabledVM, nodeID, requestID, response) } @@ -212,6 +222,7 @@ func TestStateSyncToggleEnabledToDisabled(t *testing.T) { vmSetup.syncerVM = syncReEnabledVM testSyncerVM(t, vmSetup, test) + require.NoError(t, atomicErr.Get()) } func TestVMShutdownWhileSyncing(t *testing.T) { @@ -311,15 +322,23 @@ func createSyncServerAndClientVMs(t *testing.T, test syncTest, numBlocks int) *s require.True(enabled) // override [serverVM]'s SendAppResponse function to trigger AppResponse on [syncerVM] + var atomicErr utils.Atomic[error] serverVM.appSender.SendAppResponseF = func(ctx context.Context, nodeID ids.NodeID, requestID uint32, response []byte) error { if test.responseIntercept == nil { - go syncerVM.vm.AppResponse(ctx, nodeID, requestID, response) + go func() { + if err := syncerVM.vm.AppResponse(ctx, nodeID, requestID, response); err != nil { + atomicErr.Set(err) + } + }() } else { go test.responseIntercept(syncerVM.vm, nodeID, requestID, response) } return nil } + t.Cleanup(func() { + require.NoError(atomicErr.Get()) + }) // connect peer to [syncerVM] require.NoError( diff --git a/plugin/evm/vm.go b/plugin/evm/vm.go index 9730d31981..f84550a60a 100644 --- a/plugin/evm/vm.go +++ b/plugin/evm/vm.go @@ -497,7 +497,9 @@ func (vm *VM) Initialize( // Add p2p warp message warpHandler warpHandler := acp118.NewCachedHandler(meteredCache, vm.warpBackend, vm.ctx.WarpSigner) - vm.Network.AddHandler(p2p.SignatureRequestHandlerID, warpHandler) + if err = vm.Network.AddHandler(p2p.SignatureRequestHandlerID, warpHandler); err != nil { + return err + } vm.stateSyncDone = make(chan struct{}) @@ -813,7 +815,9 @@ func (vm *VM) onNormalOperationsStarted() error { // Initially sync the uptime tracker so that APIs expose recent data even if // called immediately after bootstrapping. - vm.uptimeTracker.Sync(ctx) + if err := vm.uptimeTracker.Sync(ctx); err != nil { + return err + } vm.shutdownWg.Add(1) go func() { @@ -986,8 +990,9 @@ func (vm *VM) Shutdown(context.Context) error { for _, handler := range vm.rpcHandlers { handler.Stop() } - vm.eth.Stop() - log.Info("Ethereum backend stop completed") + if err := vm.eth.Stop(); err != nil { + log.Error("failed stopping eth", "err", err) + } if vm.usingStandaloneDB { if err := vm.db.Close(); err != nil { log.Error("failed to close database: %w", err) diff --git a/plugin/evm/vm_test.go b/plugin/evm/vm_test.go index 6c1dbcae7c..88d012b4ba 100644 --- a/plugin/evm/vm_test.go +++ b/plugin/evm/vm_test.go @@ -2719,7 +2719,9 @@ func TestStandaloneDB(t *testing.T) { []*commonEng.Fx{}, appSender, ) - defer vm.Shutdown(t.Context()) + defer func() { + require.NoError(t, vm.Shutdown(t.Context())) + }() require.NoError(t, err, "error initializing VM") require.NoError(t, vm.SetState(t.Context(), snow.Bootstrapping)) require.NoError(t, vm.SetState(t.Context(), snow.NormalOp)) @@ -3166,7 +3168,7 @@ func TestWaitForEvent(t *testing.T) { fork: &fork, }).vm testCase.testCase(t, tvm) - tvm.Shutdown(t.Context()) + require.NoError(t, tvm.Shutdown(t.Context())) }) } } @@ -3533,7 +3535,9 @@ func TestDelegatePrecompile_BehaviorAcrossUpgrades(t *testing.T) { genesisJSON: string(genesisJSON), fork: &tt.fork, }).vm - defer vm.Shutdown(ctx) + defer func() { + require.NoError(t, vm.Shutdown(ctx)) + }() if tt.preDeployTime != 0 { vm.clock.Set(time.Unix(tt.preDeployTime, 0)) @@ -3704,6 +3708,6 @@ func TestInspectDatabases(t *testing.T) { db = memdb.New() ) - vm.initializeDBs(db) + require.NoError(t, vm.initializeDBs(db)) require.NoError(t, vm.inspectDatabases()) } diff --git a/plugin/evm/vm_uptime_test.go b/plugin/evm/vm_uptime_test.go index 6399b7ed61..9f9d4217a2 100644 --- a/plugin/evm/vm_uptime_test.go +++ b/plugin/evm/vm_uptime_test.go @@ -63,7 +63,9 @@ func TestUptimeTracker(t *testing.T) { []*commonEng.Fx{}, appSender, )) - defer vm.Shutdown(t.Context()) + t.Cleanup(func() { + require.NoError(vm.Shutdown(t.Context())) + }) // Mock the clock to control time progression clock := vm.clock diff --git a/plugin/runner/runner.go b/plugin/runner/runner.go index 7ee8d2ab8c..e6fed9be2f 100644 --- a/plugin/runner/runner.go +++ b/plugin/runner/runner.go @@ -29,5 +29,8 @@ func Run(versionStr string) { fmt.Printf("failed to set fd limit correctly due to: %s", err) os.Exit(1) } - rpcchainvm.Serve(context.Background(), &evm.VM{}) + if err := rpcchainvm.Serve(context.Background(), &evm.VM{}); err != nil { + fmt.Printf("failed to serve VM: %s", err) + os.Exit(1) + } } diff --git a/precompile/contracts/rewardmanager/config.go b/precompile/contracts/rewardmanager/config.go index 75069e9cde..f779c54a11 100644 --- a/precompile/contracts/rewardmanager/config.go +++ b/precompile/contracts/rewardmanager/config.go @@ -38,7 +38,7 @@ func (i *InitialRewardConfig) Verify() error { } } -func (i *InitialRewardConfig) Configure(state contract.StateDB) error { +func (i *InitialRewardConfig) Configure(state contract.StateDB) { // enable allow fee recipients if i.AllowFeeRecipients { EnableAllowFeeRecipients(state) @@ -50,7 +50,6 @@ func (i *InitialRewardConfig) Configure(state contract.StateDB) error { // set reward address StoreRewardAddress(state, i.RewardAddress) } - return nil } // Config implements the StatefulPrecompileConfig interface while adding in the diff --git a/sync/statesync/sync_test.go b/sync/statesync/sync_test.go index f4f92d2daf..bd8b48a8d4 100644 --- a/sync/statesync/sync_test.go +++ b/sync/statesync/sync_test.go @@ -71,7 +71,7 @@ func testSync(t *testing.T, test syncTest) { }) require.NoError(t, err, "failed to create state syncer") // begin sync - s.Start(ctx) + require.NoError(t, s.Start(ctx)) waitFor(t, t.Context(), s.Wait, test.expectedError, testSyncTimeout) if test.expectedError != nil { return @@ -98,7 +98,7 @@ func waitFor(t *testing.T, ctx context.Context, resultFunc func(context.Context) if ctx.Err() != nil { // print a stack trace to assist with debugging var stackBuf bytes.Buffer - pprof.Lookup("goroutine").WriteTo(&stackBuf, 2) + require.NoErrorf(t, pprof.Lookup("goroutine").WriteTo(&stackBuf, 2), "error trying to print stack trace for timeout") t.Log(stackBuf.String()) // fail the test require.Fail(t, "unexpected timeout waiting for sync result") diff --git a/sync/statesync/trie_sync_tasks.go b/sync/statesync/trie_sync_tasks.go index 40f1075f3a..2c7467022d 100644 --- a/sync/statesync/trie_sync_tasks.go +++ b/sync/statesync/trie_sync_tasks.go @@ -75,7 +75,9 @@ func (m *mainTrieTask) OnLeafs(db ethdb.KeyValueWriter, keys, vals [][]byte) err // check if this account has storage root that we need to fetch if acc.Root != (common.Hash{}) && acc.Root != types.EmptyRootHash { - m.sync.trieQueue.RegisterStorageTrie(acc.Root, accountHash) + if err := m.sync.trieQueue.RegisterStorageTrie(acc.Root, accountHash); err != nil { + return err + } } // check if this account has code and add it to codeHashes to fetch diff --git a/tests/utils/tmpnet.go b/tests/utils/tmpnet.go index 3bbfdf4921..5a0a067840 100644 --- a/tests/utils/tmpnet.go +++ b/tests/utils/tmpnet.go @@ -24,7 +24,7 @@ func NewTmpnetNodes(count int) []*tmpnet.Node { nodes := make([]*tmpnet.Node, count) for i := range nodes { node := tmpnet.NewNode() - node.EnsureKeys() + _ = node.EnsureKeys() // guaranteed to be nil for new node nodes[i] = node } return nodes diff --git a/tests/warp/warp_test.go b/tests/warp/warp_test.go index e0c22a6d5b..e715a30907 100644 --- a/tests/warp/warp_test.go +++ b/tests/warp/warp_test.go @@ -417,7 +417,7 @@ func (w *warpTest) aggregateSignaturesViaAPI() { numSigners, err := parsedWarpMessage.Signature.NumSigners() require.NoError(err) require.Len(warpValidators.Validators, numSigners) - parsedWarpMessage.Signature.Verify(&parsedWarpMessage.UnsignedMessage, w.networkID, warpValidators, warp.WarpQuorumDenominator, warp.WarpQuorumDenominator) + err = parsedWarpMessage.Signature.Verify(&parsedWarpMessage.UnsignedMessage, w.networkID, warpValidators, warp.WarpQuorumDenominator, warp.WarpQuorumDenominator) require.NoError(err) w.addressedCallSignedMessage = parsedWarpMessage @@ -429,7 +429,7 @@ func (w *warpTest) aggregateSignaturesViaAPI() { numSigners, err = parsedWarpBlockMessage.Signature.NumSigners() require.NoError(err) require.Len(warpValidators.Validators, numSigners) - parsedWarpBlockMessage.Signature.Verify(&parsedWarpBlockMessage.UnsignedMessage, w.networkID, warpValidators, warp.WarpQuorumDenominator, warp.WarpQuorumDenominator) + err = parsedWarpBlockMessage.Signature.Verify(&parsedWarpBlockMessage.UnsignedMessage, w.networkID, warpValidators, warp.WarpQuorumDenominator, warp.WarpQuorumDenominator) require.NoError(err) w.blockPayloadSignedMessage = parsedWarpBlockMessage } diff --git a/triedb/firewood/database.go b/triedb/firewood/database.go index e9ab2da66f..f95104260f 100644 --- a/triedb/firewood/database.go +++ b/triedb/firewood/database.go @@ -556,7 +556,11 @@ func (db *Database) getProposalHash(parentRoot common.Hash, keys, values [][]byt ffiHashTimer.Inc(time.Since(start).Milliseconds()) // We succesffuly created a proposal, so we must drop it after use. - defer p.Drop() + defer func() { + if err := p.Drop(); err != nil { + log.Error("firewood: error dropping proposal after hash computation", "parentRoot", parentRoot.Hex(), "error", err) + } + }() root, err := p.Root() if err != nil { diff --git a/warp/verifier_backend_test.go b/warp/verifier_backend_test.go index 97d679cc76..afd6d0c8bf 100644 --- a/warp/verifier_backend_test.go +++ b/warp/verifier_backend_test.go @@ -58,8 +58,7 @@ func TestAddressedCallSignatures(t *testing.T) { require.NoError(t, err) signature, err := snowCtx.WarpSigner.Sign(msg) require.NoError(t, err) - - backend.AddMessage(msg) + require.NoError(t, backend.AddMessage(msg)) return msg.Bytes(), signature }, verifyStats: func(t *testing.T, stats *verifierStats) {