-
Notifications
You must be signed in to change notification settings - Fork 1.2k
perf: improve islock / llmq signing latency #6896
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. Weβll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: develop
Are you sure you want to change the base?
Changes from all commits
89d1a6e
2f03126
dd56bbc
708fa0d
5177f80
6df1211
dcd1d9a
ae2dc7a
a80a8df
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -162,13 +162,14 @@ MessageProcessingResult CInstantSendManager::ProcessMessage(NodeId from, std::st | |
| } | ||
|
|
||
| LOCK(cs_pendingLocks); | ||
| pendingInstantSendLocks.emplace(hash, std::make_pair(from, islock)); | ||
| pendingInstantSendLocks.emplace(hash, instantsend::PendingISLockFromPeer{from, islock}); | ||
| NotifyWorker(); | ||
| return ret; | ||
| } | ||
|
|
||
| instantsend::PendingState CInstantSendManager::ProcessPendingInstantSendLocks() | ||
| { | ||
| decltype(pendingInstantSendLocks) pend; | ||
| std::vector<std::pair<uint256, instantsend::PendingISLockFromPeer>> pend; | ||
| instantsend::PendingState ret; | ||
|
|
||
| if (!IsInstantSendEnabled()) { | ||
|
|
@@ -183,14 +184,15 @@ instantsend::PendingState CInstantSendManager::ProcessPendingInstantSendLocks() | |
| // The keys of the removed values are temporaily stored here to avoid invalidating an iterator | ||
| std::vector<uint256> removed; | ||
| removed.reserve(maxCount); | ||
| pend.reserve(maxCount); | ||
|
|
||
| for (const auto& [islockHash, nodeid_islptr_pair] : pendingInstantSendLocks) { | ||
| // Check if we've reached max count | ||
| if (pend.size() >= maxCount) { | ||
| ret.m_pending_work = true; | ||
| break; | ||
| } | ||
| pend.emplace(islockHash, std::move(nodeid_islptr_pair)); | ||
| pend.emplace_back(islockHash, std::move(nodeid_islptr_pair)); | ||
| removed.emplace_back(islockHash); | ||
| } | ||
|
|
||
|
|
@@ -216,24 +218,25 @@ instantsend::PendingState CInstantSendManager::ProcessPendingInstantSendLocks() | |
| if (!badISLocks.empty()) { | ||
| LogPrint(BCLog::INSTANTSEND, "CInstantSendManager::%s -- doing verification on old active set\n", __func__); | ||
|
|
||
| // filter out valid IS locks from "pend" | ||
| for (auto it = pend.begin(); it != pend.end();) { | ||
| if (!badISLocks.count(it->first)) { | ||
| it = pend.erase(it); | ||
| } else { | ||
| ++it; | ||
| // filter out valid IS locks from "pend" - keep only bad ones | ||
| std::vector<std::pair<uint256, instantsend::PendingISLockFromPeer>> filteredPend; | ||
| filteredPend.reserve(badISLocks.size()); | ||
| for (auto& p : pend) { | ||
| if (badISLocks.contains(p.first)) { | ||
| filteredPend.push_back(std::move(p)); | ||
| } | ||
| } | ||
|
|
||
| // Now check against the previous active set and perform banning if this fails | ||
| ProcessPendingInstantSendLocks(llmq_params, dkgInterval, /*ban=*/true, pend, ret.m_peer_activity); | ||
| ProcessPendingInstantSendLocks(llmq_params, dkgInterval, /*ban=*/true, filteredPend, ret.m_peer_activity); | ||
| } | ||
|
|
||
| return ret; | ||
| } | ||
|
|
||
| Uint256HashSet CInstantSendManager::ProcessPendingInstantSendLocks( | ||
| const Consensus::LLMQParams& llmq_params, int signOffset, bool ban, | ||
| const Uint256HashMap<std::pair<NodeId, instantsend::InstantSendLockPtr>>& pend, | ||
| const std::vector<std::pair<uint256, instantsend::PendingISLockFromPeer>>& pend, | ||
| std::vector<std::pair<NodeId, MessageProcessingResult>>& peer_activity) | ||
| { | ||
| CBLSBatchVerifier<NodeId, uint256> batchVerifier(false, true, 8); | ||
|
|
@@ -243,8 +246,8 @@ Uint256HashSet CInstantSendManager::ProcessPendingInstantSendLocks( | |
| size_t alreadyVerified = 0; | ||
| for (const auto& p : pend) { | ||
| const auto& hash = p.first; | ||
| auto nodeId = p.second.first; | ||
| const auto& islock = p.second.second; | ||
| auto nodeId = p.second.node_id; | ||
| const auto& islock = p.second.islock; | ||
|
|
||
| if (batchVerifier.badSources.count(nodeId)) { | ||
| continue; | ||
|
|
@@ -315,8 +318,8 @@ Uint256HashSet CInstantSendManager::ProcessPendingInstantSendLocks( | |
| } | ||
| for (const auto& p : pend) { | ||
| const auto& hash = p.first; | ||
| auto nodeId = p.second.first; | ||
| const auto& islock = p.second.second; | ||
| auto nodeId = p.second.node_id; | ||
| const auto& islock = p.second.islock; | ||
|
|
||
| if (batchVerifier.badMessages.count(hash)) { | ||
| LogPrint(BCLog::INSTANTSEND, "CInstantSendManager::%s -- txid=%s, islock=%s: invalid sig in islock, peer=%d\n", | ||
|
|
@@ -393,7 +396,7 @@ MessageProcessingResult CInstantSendManager::ProcessInstantSendLock(NodeId from, | |
| } else { | ||
| // put it in a separate pending map and try again later | ||
| LOCK(cs_pendingLocks); | ||
| pendingNoTxInstantSendLocks.try_emplace(hash, std::make_pair(from, islock)); | ||
| pendingNoTxInstantSendLocks.try_emplace(hash, instantsend::PendingISLockFromPeer{from, islock}); | ||
| } | ||
|
|
||
| // This will also add children TXs to pendingRetryTxs | ||
|
|
@@ -436,11 +439,11 @@ void CInstantSendManager::TransactionAddedToMempool(const CTransactionRef& tx) | |
| LOCK(cs_pendingLocks); | ||
| auto it = pendingNoTxInstantSendLocks.begin(); | ||
| while (it != pendingNoTxInstantSendLocks.end()) { | ||
| if (it->second.second->txid == tx->GetHash()) { | ||
| if (it->second.islock->txid == tx->GetHash()) { | ||
| // we received an islock earlier | ||
| LogPrint(BCLog::INSTANTSEND, "CInstantSendManager::%s -- txid=%s, islock=%s\n", __func__, | ||
| tx->GetHash().ToString(), it->first.ToString()); | ||
| islock = it->second.second; | ||
| islock = it->second.islock; | ||
| pendingInstantSendLocks.try_emplace(it->first, it->second); | ||
| pendingNoTxInstantSendLocks.erase(it); | ||
| break; | ||
|
|
@@ -458,6 +461,7 @@ void CInstantSendManager::TransactionAddedToMempool(const CTransactionRef& tx) | |
| } else { | ||
| RemoveMempoolConflictsForLock(::SerializeHash(*islock), *islock); | ||
| } | ||
| NotifyWorker(); | ||
| } | ||
|
|
||
| void CInstantSendManager::TransactionRemovedFromMempool(const CTransactionRef& tx) | ||
|
|
@@ -475,6 +479,7 @@ void CInstantSendManager::TransactionRemovedFromMempool(const CTransactionRef& t | |
| LogPrint(BCLog::INSTANTSEND, "CInstantSendManager::%s -- transaction %s was removed from mempool\n", __func__, | ||
| tx->GetHash().ToString()); | ||
| RemoveConflictingLock(::SerializeHash(*islock), *islock); | ||
| NotifyWorker(); | ||
| } | ||
|
|
||
| void CInstantSendManager::BlockConnected(const std::shared_ptr<const CBlock>& pblock, const CBlockIndex* pindex) | ||
|
|
@@ -505,12 +510,14 @@ void CInstantSendManager::BlockConnected(const std::shared_ptr<const CBlock>& pb | |
| } | ||
|
|
||
| db.WriteBlockInstantSendLocks(pblock, pindex); | ||
| NotifyWorker(); | ||
| } | ||
|
|
||
| void CInstantSendManager::BlockDisconnected(const std::shared_ptr<const CBlock>& pblock, | ||
| const CBlockIndex* pindexDisconnected) | ||
| { | ||
| db.RemoveBlockInstantSendLocks(pblock, pindexDisconnected); | ||
| NotifyWorker(); | ||
| } | ||
|
|
||
| void CInstantSendManager::AddNonLockedTx(const CTransactionRef& tx, const CBlockIndex* pindexMined) | ||
|
|
@@ -533,7 +540,7 @@ void CInstantSendManager::AddNonLockedTx(const CTransactionRef& tx, const CBlock | |
| LOCK(cs_pendingLocks); | ||
| auto it = pendingNoTxInstantSendLocks.begin(); | ||
| while (it != pendingNoTxInstantSendLocks.end()) { | ||
| if (it->second.second->txid == tx->GetHash()) { | ||
| if (it->second.islock->txid == tx->GetHash()) { | ||
| // we received an islock earlier, let's put it back into pending and verify/lock | ||
| LogPrint(BCLog::INSTANTSEND, "CInstantSendManager::%s -- txid=%s, islock=%s\n", __func__, | ||
| tx->GetHash().ToString(), it->first.ToString()); | ||
|
|
@@ -553,6 +560,7 @@ void CInstantSendManager::AddNonLockedTx(const CTransactionRef& tx, const CBlock | |
|
|
||
| LogPrint(BCLog::INSTANTSEND, "CInstantSendManager::%s -- txid=%s, pindexMined=%s\n", __func__, | ||
| tx->GetHash().ToString(), pindexMined ? pindexMined->GetBlockHash().ToString() : ""); | ||
| NotifyWorker(); | ||
| } | ||
|
|
||
| void CInstantSendManager::RemoveNonLockedTx(const uint256& txid, bool retryChildren) | ||
|
|
@@ -593,6 +601,7 @@ void CInstantSendManager::RemoveNonLockedTx(const uint256& txid, bool retryChild | |
|
|
||
| LogPrint(BCLog::INSTANTSEND, "CInstantSendManager::%s -- txid=%s, retryChildren=%d, retryChildrenCount=%d\n", | ||
| __func__, txid.ToString(), retryChildren, retryChildrenCount); | ||
| NotifyWorker(); | ||
| } | ||
|
|
||
| void CInstantSendManager::RemoveConflictedTx(const CTransaction& tx) | ||
|
|
@@ -601,6 +610,7 @@ void CInstantSendManager::RemoveConflictedTx(const CTransaction& tx) | |
| if (auto signer = m_signer.load(std::memory_order_acquire); signer) { | ||
| signer->ClearInputsFromQueue(GetIdsFromLockable(tx.vin)); | ||
| } | ||
| NotifyWorker(); | ||
| } | ||
|
|
||
| void CInstantSendManager::TruncateRecoveredSigsForInputs(const instantsend::InstantSendLock& islock) | ||
|
|
@@ -620,13 +630,15 @@ void CInstantSendManager::TryEmplacePendingLock(const uint256& hash, const NodeI | |
| if (db.KnownInstantSendLock(hash)) return; | ||
| LOCK(cs_pendingLocks); | ||
| if (!pendingInstantSendLocks.count(hash)) { | ||
| pendingInstantSendLocks.emplace(hash, std::make_pair(id, islock)); | ||
| pendingInstantSendLocks.emplace(hash, instantsend::PendingISLockFromPeer{id, islock}); | ||
| } | ||
| NotifyWorker(); | ||
| } | ||
|
|
||
| void CInstantSendManager::NotifyChainLock(const CBlockIndex* pindexChainLock) | ||
| { | ||
| HandleFullyConfirmedBlock(pindexChainLock); | ||
| NotifyWorker(); | ||
| } | ||
|
|
||
| void CInstantSendManager::UpdatedBlockTip(const CBlockIndex* pindexNew) | ||
|
|
@@ -644,6 +656,7 @@ void CInstantSendManager::UpdatedBlockTip(const CBlockIndex* pindexNew) | |
| if (pindex) { | ||
| HandleFullyConfirmedBlock(pindex); | ||
| } | ||
| NotifyWorker(); | ||
| } | ||
|
|
||
| void CInstantSendManager::HandleFullyConfirmedBlock(const CBlockIndex* pindex) | ||
|
|
@@ -842,11 +855,11 @@ bool CInstantSendManager::GetInstantSendLockByHash(const uint256& hash, instants | |
| LOCK(cs_pendingLocks); | ||
| auto it = pendingInstantSendLocks.find(hash); | ||
| if (it != pendingInstantSendLocks.end()) { | ||
| islock = it->second.second; | ||
| islock = it->second.islock; | ||
| } else { | ||
| auto itNoTx = pendingNoTxInstantSendLocks.find(hash); | ||
| if (itNoTx != pendingNoTxInstantSendLocks.end()) { | ||
| islock = itNoTx->second.second; | ||
| islock = itNoTx->second.islock; | ||
| } else { | ||
| return false; | ||
| } | ||
|
|
@@ -883,7 +896,7 @@ bool CInstantSendManager::IsWaitingForTx(const uint256& txHash) const | |
| LOCK(cs_pendingLocks); | ||
| auto it = pendingNoTxInstantSendLocks.begin(); | ||
| while (it != pendingNoTxInstantSendLocks.end()) { | ||
| if (it->second.second->txid == txHash) { | ||
| if (it->second.islock->txid == txHash) { | ||
| LogPrint(BCLog::INSTANTSEND, "CInstantSendManager::%s -- txid=%s, islock=%s\n", __func__, txHash.ToString(), | ||
| it->first.ToString()); | ||
| return true; | ||
|
|
@@ -920,6 +933,7 @@ size_t CInstantSendManager::GetInstantSendLockCount() const | |
| void CInstantSendManager::WorkThreadMain(PeerManager& peerman) | ||
| { | ||
| while (!workInterrupt) { | ||
| uint64_t startEpoch = workEpoch.load(std::memory_order_acquire); | ||
| bool fMoreWork = [&]() -> bool { | ||
| if (!IsInstantSendEnabled()) return false; | ||
| auto [more_work, peer_activity] = ProcessPendingInstantSendLocks(); | ||
|
|
@@ -947,10 +961,11 @@ void CInstantSendManager::WorkThreadMain(PeerManager& peerman) | |
| signer->ProcessPendingRetryLockTxs(txns); | ||
| return more_work; | ||
| }(); | ||
|
|
||
| if (!fMoreWork && !workInterrupt.sleep_for(std::chrono::milliseconds(100))) { | ||
| return; | ||
| } | ||
| if (fMoreWork) continue; | ||
|
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Critical: Busy-loop causes 100% CPU utilization. When Under sustained load with >32 pending locks, Apply this diff to add throttling even when there's more work: return more_work;
}();
- if (fMoreWork) continue;
std::unique_lock<Mutex> l(workMutex);
- workCv.wait(l, [this, startEpoch]{
+ workCv.wait_for(l, std::chrono::milliseconds(fMoreWork ? 1 : 100), [this, startEpoch]{
return bool(workInterrupt) || workEpoch.load(std::memory_order_acquire) != startEpoch;
});This change uses a 1ms timeout when there's more work (allowing rapid processing) and 100ms when idle (reducing unnecessary wakeups).
π€ Prompt for AI Agents |
||
| std::unique_lock<Mutex> l(workMutex); | ||
| workCv.wait(l, [this, startEpoch]{ | ||
|
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. is there any chance that event could be missing and thread will wait forever? For example, when app is terminating. |
||
| return bool(workInterrupt) || workEpoch.load(std::memory_order_acquire) != startEpoch; | ||
| }); | ||
| } | ||
| } | ||
|
|
||
|
|
||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
linter complains about extra whitespaces here