From d2a366be9e1db8061be9601c3a2d5ca1546ee249 Mon Sep 17 00:00:00 2001 From: 0xJacobs Date: Thu, 5 Mar 2026 03:30:22 +0100 Subject: [PATCH] security: mitigate BatchSign DoS with limits and deduplication --- grpc/src/service.rs | 26 +++++++++++++++++++++++++- 1 file changed, 25 insertions(+), 1 deletion(-) diff --git a/grpc/src/service.rs b/grpc/src/service.rs index 09a1b71..7f7a841 100644 --- a/grpc/src/service.rs +++ b/grpc/src/service.rs @@ -15,6 +15,7 @@ use prost::Message; use rayon::iter::{IndexedParallelIterator, IntoParallelRefIterator, ParallelIterator}; use signer::{BatchRetrieveReply, BatchRetrieveRequest, Empty, Slices}; use std::sync::Arc; +use std::collections::HashSet; use std::time::{Duration, Instant}; use storage::blob_status_db::{BlobStatus, BlobStatusDB}; use storage::quorum_db::{AssignedSlices, QuorumDB}; @@ -76,12 +77,34 @@ impl SignerService { *cnt -= 1; } - async fn batch_sign_inner( +async fn batch_sign_inner( &self, request: Request, ) -> Result, Status> { let remote_addr = request.remote_addr(); let request_content = request.into_inner(); + + // --- DoS Mitigation Start --- + const MAX_BATCH_SIZE: usize = 100; + if request_content.requests.len() > MAX_BATCH_SIZE { + return Err(Status::new( + Code::InvalidArgument, + format!("Batch size exceeds limit of {} items", MAX_BATCH_SIZE), + )); + } + + // Deduplication: prevent the same storage_root from being processed multiple times in one batch + let mut seen_roots = std::collections::HashSet::new(); + for req in &request_content.requests { + if !seen_roots.insert(req.storage_root.clone()) { + return Err(Status::new( + Code::InvalidArgument, + "Duplicate storage_root detected in batch", + )); + } + } + // --- DoS Mitigation End --- + metrics::GRPC_REQ_GAUGE.set(request_content.encoded_len() as f64); let ts = Instant::now(); @@ -145,6 +168,7 @@ impl SignerService { Ok(Response::new(reply)) } + async fn batch_retrieve_inner( &self, request: Request,