diff --git a/CHANGELOG.md b/CHANGELOG.md index 0f3e3106c1a..481383eca6c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -13,6 +13,11 @@ and this project adheres to the versioning scheme outlined in the [README.md](RE - Added `result_hex` and `post_condition_aborted` to the block replay RPC endpoint - Added `--epoch ` flag to `clarity-cli` commands to specify the epoch context for evaluation. - In the `/v3/transaction/{txid}` RPC endpoint, added `block_height` and `is_canonical` to the response. +- Improved block validation in `stacks-inspect`. + +### Changed + +- Removed `validate-naka-block` option in `stacks-inspect`, merging it with `validate-block` so that users do not need to differentiate between the two. ### Fixed diff --git a/contrib/stacks-inspect/src/lib.rs b/contrib/stacks-inspect/src/lib.rs index 13026508aca..d5c3ad1a6d6 100644 --- a/contrib/stacks-inspect/src/lib.rs +++ b/contrib/stacks-inspect/src/lib.rs @@ -13,9 +13,10 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . +use std::io::Write; use std::path::PathBuf; use std::time::Instant; -use std::{fs, process}; +use std::{fs, io, process}; use clarity::types::chainstate::SortitionId; use clarity::util::hash::{Sha512Trunc256Sum, to_hex}; @@ -115,86 +116,202 @@ pub fn drain_common_opts(argv: &mut Vec, start_at: usize) -> CommonOpts opts } -/// Replay blocks from chainstate database -/// Terminates on error using `process::exit()` -/// -/// Arguments: -/// - `argv`: Args in CLI format: ` [args...]` -pub fn command_validate_block(argv: &[String], conf: Option<&Config>) { - let print_help_and_exit = || -> ! { - let n = &argv[0]; - eprintln!("Usage:"); - eprintln!(" {n} "); - eprintln!(" {n} prefix "); - eprintln!(" {n} index-range "); - eprintln!(" {n} range "); - eprintln!(" {n} "); - process::exit(1); - }; - let start = Instant::now(); - let db_path = argv.get(1).unwrap_or_else(|| print_help_and_exit()); - let mode = argv.get(2).map(String::as_str); - let staging_blocks_db_path = format!("{db_path}/chainstate/vm/index.sqlite"); - let conn = - Connection::open_with_flags(&staging_blocks_db_path, OpenFlags::SQLITE_OPEN_READ_ONLY) - .unwrap(); - - let query = match mode { - Some("prefix") => format!( - "SELECT index_block_hash FROM staging_blocks WHERE orphaned = 0 AND index_block_hash LIKE \"{}%\"", - argv[3] - ), - Some("first") => format!( - "SELECT index_block_hash FROM staging_blocks WHERE orphaned = 0 ORDER BY height ASC LIMIT {}", - argv[3] - ), +#[derive(Clone)] +enum BlockSource { + Nakamoto, + Epoch2, +} + +#[derive(Clone)] +struct BlockScanEntry { + index_block_hash: StacksBlockId, + source: BlockSource, +} + +enum BlockSelection { + All, + Prefix(String), + Last(u64), + HeightRange { start: u64, end: u64 }, + IndexRange { start: u64, end: u64 }, +} + +impl BlockSelection { + fn clause(&self) -> String { + match self { + BlockSelection::All => "WHERE orphaned = 0 ORDER BY height ASC".into(), + BlockSelection::Prefix(prefix) => format!( + "WHERE orphaned = 0 AND index_block_hash LIKE '{prefix}%' ORDER BY height ASC", + ), + BlockSelection::Last(count) => { + format!("WHERE orphaned = 0 ORDER BY height DESC LIMIT {count}") + } + BlockSelection::HeightRange { start, end } => format!( + "WHERE orphaned = 0 AND height BETWEEN {start} AND {end} ORDER BY height ASC" + ), + BlockSelection::IndexRange { start, end } => { + let blocks = end.saturating_sub(*start); + format!("WHERE orphaned = 0 ORDER BY index_block_hash ASC LIMIT {start}, {blocks}") + } + } + } +} + +fn parse_block_selection(mode: Option<&str>, argv: &[String]) -> Result { + match mode { + Some("prefix") => { + let prefix = argv + .get(3) + .ok_or_else(|| "Missing ".to_string())? + .clone(); + Ok(BlockSelection::Prefix(prefix)) + } + Some("last") => { + let count = argv + .get(3) + .ok_or_else(|| "Missing ".to_string())? + .parse::() + .map_err(|_| " must be a u64".to_string())?; + Ok(BlockSelection::Last(count)) + } Some("range") => { - let arg4 = argv[3] + let start = argv + .get(3) + .ok_or_else(|| "Missing ".to_string())? .parse::() - .expect(" not a valid u64"); - let arg5 = argv[4].parse::().expect(" not a valid u64"); - let start = arg4.saturating_sub(1); - let blocks = arg5.saturating_sub(arg4); - format!( - "SELECT index_block_hash FROM staging_blocks WHERE orphaned = 0 ORDER BY height ASC LIMIT {start}, {blocks}" - ) + .map_err(|_| " must be a u64".to_string())?; + let end = argv + .get(4) + .ok_or_else(|| "Missing ".to_string())? + .parse::() + .map_err(|_| " must be a u64".to_string())?; + if start > end { + return Err(" must be <= ".into()); + } + Ok(BlockSelection::HeightRange { start, end }) } Some("index-range") => { - let start = argv[3] + let start = argv + .get(3) + .ok_or_else(|| "Missing ".to_string())? .parse::() - .expect(" not a valid u64"); - let end = argv[4].parse::().expect(" not a valid u64"); - let blocks = end.saturating_sub(start); - format!( - "SELECT index_block_hash FROM staging_blocks WHERE orphaned = 0 ORDER BY index_block_hash ASC LIMIT {start}, {blocks}" - ) + .map_err(|_| " must be a u64".to_string())?; + let end = argv + .get(4) + .ok_or_else(|| "Missing ".to_string())? + .parse::() + .map_err(|_| " must be a u64".to_string())?; + if start > end { + return Err(" must be <= ".into()); + } + Ok(BlockSelection::IndexRange { start, end }) } - Some("last") => format!( - "SELECT index_block_hash FROM staging_blocks WHERE orphaned = 0 ORDER BY height DESC LIMIT {}", - argv[3] - ), - Some(_) => print_help_and_exit(), - // Default to ALL blocks - None => "SELECT index_block_hash FROM staging_blocks WHERE orphaned = 0".into(), - }; + Some(other) => Err(format!("Unrecognized option: {other}")), + None => Ok(BlockSelection::All), + } +} + +fn collect_block_entries_for_selection( + db_path: &str, + selection: &BlockSelection, + chainstate: &StacksChainState, +) -> Vec { + let mut entries = Vec::new(); + let clause = selection.clause(); + + match selection { + BlockSelection::Last(limit) => { + if collect_nakamoto_entries(&mut entries, &clause, chainstate, Some(*limit)) { + return entries; + } + collect_epoch2_entries(&mut entries, &clause, db_path, Some(*limit)); + } + _ => { + collect_epoch2_entries(&mut entries, &clause, db_path, None); + collect_nakamoto_entries(&mut entries, &clause, chainstate, None); + } + } + + entries +} - let mut stmt = conn.prepare(&query).unwrap(); - let mut hashes_set = stmt.query(NO_PARAMS).unwrap(); +fn limit_reached(limit: Option, current: usize) -> bool { + limit.is_some_and(|max| current >= max as usize) +} - let mut index_block_hashes: Vec = vec![]; - while let Ok(Some(row)) = hashes_set.next() { - index_block_hashes.push(row.get(0).unwrap()); +fn collect_epoch2_entries( + entries: &mut Vec, + clause: &str, + db_path: &str, + limit: Option, +) -> bool { + if limit_reached(limit, entries.len()) { + return true; } - let total = index_block_hashes.len(); - println!("Will check {total} blocks"); - for (i, index_block_hash) in index_block_hashes.iter().enumerate() { - if i % 100 == 0 { - println!("Checked {i}..."); + let staging_blocks_db_path = format!("{db_path}/chainstate/vm/index.sqlite"); + let conn = + Connection::open_with_flags(&staging_blocks_db_path, OpenFlags::SQLITE_OPEN_READ_ONLY) + .unwrap_or_else(|e| { + panic!("Failed to open staging blocks DB at {staging_blocks_db_path}: {e}"); + }); + let sql = format!("SELECT index_block_hash FROM staging_blocks {clause}"); + let mut stmt = conn.prepare(&sql).unwrap_or_else(|e| { + panic!("Failed to prepare query over staging_blocks: {e}"); + }); + let mut rows = stmt.query(NO_PARAMS).unwrap_or_else(|e| { + panic!("Failed to query staging_blocks: {e}"); + }); + while let Some(row) = rows.next().unwrap_or_else(|e| { + panic!("Failed to read staging block row: {e}"); + }) { + let index_block_hash: StacksBlockId = row.get(0).unwrap(); + entries.push(BlockScanEntry { + index_block_hash, + source: BlockSource::Epoch2, + }); + + if limit_reached(limit, entries.len()) { + return true; } - replay_staging_block(db_path, index_block_hash, conf); } - println!("Finished. run_time_seconds = {}", start.elapsed().as_secs()); + + false +} + +fn collect_nakamoto_entries( + entries: &mut Vec, + clause: &str, + chainstate: &StacksChainState, + limit: Option, +) -> bool { + if limit_reached(limit, entries.len()) { + return true; + } + + let sql = format!("SELECT index_block_hash FROM nakamoto_staging_blocks {clause}"); + let conn = chainstate.nakamoto_blocks_db(); + let mut stmt = conn.prepare(&sql).unwrap_or_else(|e| { + panic!("Failed to prepare query over nakamoto_staging_blocks: {e}"); + }); + let mut rows = stmt.query(NO_PARAMS).unwrap_or_else(|e| { + panic!("Failed to query nakamoto_staging_blocks: {e}"); + }); + while let Some(row) = rows.next().unwrap_or_else(|e| { + panic!("Failed to read Nakamoto staging block row: {e}"); + }) { + let index_block_hash: StacksBlockId = row.get(0).unwrap(); + entries.push(BlockScanEntry { + index_block_hash, + source: BlockSource::Nakamoto, + }); + + if limit_reached(limit, entries.len()) { + return true; + } + } + + false } /// Replay blocks from chainstate database @@ -202,7 +319,7 @@ pub fn command_validate_block(argv: &[String], conf: Option<&Config>) { /// /// Arguments: /// - `argv`: Args in CLI format: ` [args...]` -pub fn command_validate_block_nakamoto(argv: &[String], conf: Option<&Config>) { +pub fn command_validate_block(argv: &[String], conf: Option<&Config>) { let print_help_and_exit = || -> ! { let n = &argv[0]; eprintln!("Usage:"); @@ -210,83 +327,99 @@ pub fn command_validate_block_nakamoto(argv: &[String], conf: Option<&Config>) { eprintln!(" {n} prefix "); eprintln!(" {n} index-range "); eprintln!(" {n} range "); - eprintln!(" {n} "); + eprintln!(" {n} "); + eprintln!(" {n} --early-exit ... # Exit on first error found"); process::exit(1); }; - let start = Instant::now(); - let db_path = argv.get(1).unwrap_or_else(|| print_help_and_exit()); - let mode = argv.get(2).map(String::as_str); - let chain_state_path = format!("{db_path}/chainstate/"); + let start = Instant::now(); + let mut args = argv.to_vec(); + let early_exit = if let Some("--early-exit") = args.get(1).map(String::as_str) { + args.remove(1); + true + } else { + false + }; + let db_path = args.get(1).unwrap_or_else(|| print_help_and_exit()); + let mode = args.get(2).map(String::as_str); + let selection = parse_block_selection(mode, &args).unwrap_or_else(|err| { + eprintln!("{err}"); + print_help_and_exit(); + }); let conf = conf.unwrap_or(&DEFAULT_MAINNET_CONFIG); - + let chain_state_path = format!("{db_path}/chainstate/"); let (chainstate, _) = StacksChainState::open( conf.is_mainnet(), conf.burnchain.chain_id, &chain_state_path, None, ) - .unwrap(); - - let conn = chainstate.nakamoto_blocks_db(); + .unwrap_or_else(|e| { + eprintln!("Failed to open chainstate at {chain_state_path}: {e}"); + process::exit(1); + }); - let query = match mode { - Some("prefix") => format!( - "SELECT index_block_hash FROM nakamoto_staging_blocks WHERE orphaned = 0 AND index_block_hash LIKE \"{}%\"", - argv[3] - ), - Some("first") => format!( - "SELECT index_block_hash FROM nakamoto_staging_blocks WHERE orphaned = 0 ORDER BY height ASC LIMIT {}", - argv[3] - ), - Some("range") => { - let arg4 = argv[3] - .parse::() - .expect(" not a valid u64"); - let arg5 = argv[4].parse::().expect(" not a valid u64"); - let start = arg4.saturating_sub(1); - let blocks = arg5.saturating_sub(arg4); - format!( - "SELECT index_block_hash FROM nakamoto_staging_blocks WHERE orphaned = 0 ORDER BY height ASC LIMIT {start}, {blocks}" - ) - } - Some("index-range") => { - let start = argv[3] - .parse::() - .expect(" not a valid u64"); - let end = argv[4].parse::().expect(" not a valid u64"); - let blocks = end.saturating_sub(start); - format!( - "SELECT index_block_hash FROM nakamoto_staging_blocks WHERE orphaned = 0 ORDER BY index_block_hash ASC LIMIT {start}, {blocks}" - ) + let work_items = collect_block_entries_for_selection(db_path, &selection, &chainstate); + drop(chainstate); + if work_items.is_empty() { + println!("No blocks matched the requested selection."); + return; + } + let total_blocks = work_items.len(); + let mut completed = 0; + let mut errors = Vec::new(); + + for entry in work_items { + match &entry.source { + BlockSource::Nakamoto => { + if let Err(e) = + replay_naka_staging_block(db_path, &entry.index_block_hash, Some(conf)) + { + println!( + "Failed to validate Nakamoto block {}: {e:?}", + entry.index_block_hash + ); + if early_exit { + process::exit(1); + } + errors.push(entry.index_block_hash.clone()); + } + } + BlockSource::Epoch2 => { + if let Err(e) = replay_staging_block(db_path, &entry.index_block_hash, Some(conf)) { + println!("Failed to validate block {}: {e:?}", entry.index_block_hash); + if early_exit { + process::exit(1); + } + errors.push(entry.index_block_hash.clone()); + } + } } - Some("last") => format!( - "SELECT index_block_hash FROM nakamoto_staging_blocks WHERE orphaned = 0 ORDER BY height DESC LIMIT {}", - argv[3] - ), - Some(_) => print_help_and_exit(), - // Default to ALL blocks - None => "SELECT index_block_hash FROM nakamoto_staging_blocks WHERE orphaned = 0".into(), - }; - - let mut stmt = conn.prepare(&query).unwrap(); - let mut hashes_set = stmt.query(NO_PARAMS).unwrap(); - - let mut index_block_hashes: Vec = vec![]; - while let Ok(Some(row)) = hashes_set.next() { - index_block_hashes.push(row.get(0).unwrap()); + completed += 1; + let pct = ((completed as f32 / total_blocks as f32) * 100.0).floor() as usize; + print!("\rValidating: {:>3}% ({}/{})", pct, completed, total_blocks); + io::stdout().flush().ok(); } - let total = index_block_hashes.len(); - println!("Will check {total} blocks"); - for (i, index_block_hash) in index_block_hashes.iter().enumerate() { - if i % 100 == 0 { - println!("Checked {i}..."); + print!("\rValidating: 100% ({}/{})\n", total_blocks, total_blocks); + + if !errors.is_empty() { + println!( + "\nValidation completed with {} error(s) found in {}s:", + errors.len(), + start.elapsed().as_secs() + ); + for hash in errors.iter() { + println!(" Block {}", hash); } - replay_naka_staging_block(db_path, index_block_hash, conf); + process::exit(1); } - println!("Finished. run_time_seconds = {}", start.elapsed().as_secs()); + println!( + "\nFinished validating {} blocks in {}s", + total_blocks, + start.elapsed().as_secs() + ); } /// Replay mock mined blocks from JSON files @@ -583,20 +716,22 @@ pub fn command_contract_hash(argv: &[String], _conf: Option<&Config>) { } /// Fetch and process a `StagingBlock` from database and call `replay_block()` to validate -fn replay_staging_block(db_path: &str, index_block_hash_hex: &str, conf: Option<&Config>) { - let block_id = StacksBlockId::from_hex(index_block_hash_hex).unwrap(); +fn replay_staging_block( + db_path: &str, + block_id: &StacksBlockId, + conf: Option<&Config>, +) -> Result<(), String> { + let conf = conf.unwrap_or(&DEFAULT_MAINNET_CONFIG); let chain_state_path = format!("{db_path}/chainstate/"); let sort_db_path = format!("{db_path}/burnchain/sortition"); - let conf = conf.unwrap_or(&DEFAULT_MAINNET_CONFIG); - let (mut chainstate, _) = StacksChainState::open( conf.is_mainnet(), conf.burnchain.chain_id, &chain_state_path, None, ) - .unwrap(); + .map_err(|e| format!("Failed to open chainstate at {chain_state_path}: {e:?}"))?; let burnchain = conf.get_burnchain(); let epochs = conf.burnchain.get_epoch_list(); @@ -610,35 +745,34 @@ fn replay_staging_block(db_path: &str, index_block_hash_hex: &str, conf: Option< None, true, ) - .unwrap(); + .map_err(|e| format!("Failed to open sortition DB at {sort_db_path}: {e:?}"))?; + let sort_tx = sortdb.tx_begin_at_tip(); let blocks_path = chainstate.blocks_path.clone(); - let (mut chainstate_tx, clarity_instance) = chainstate + let (chainstate_tx, clarity_instance) = chainstate .chainstate_tx_begin() - .expect("Failed to start chainstate tx"); + .map_err(|e| format!("{e:?}"))?; let mut next_staging_block = - StacksChainState::load_staging_block_info(&chainstate_tx.tx, &block_id) - .expect("Failed to load staging block data") - .expect("No such index block hash in block database"); + StacksChainState::load_staging_block_info(&chainstate_tx.tx, block_id) + .map_err(|e| format!("Failed to load staging block info: {e:?}"))? + .ok_or_else(|| "No such index block hash in block database".to_string())?; next_staging_block.block_data = StacksChainState::load_block_bytes( &blocks_path, &next_staging_block.consensus_hash, &next_staging_block.anchored_block_hash, ) - .unwrap() + .map_err(|e| format!("Failed to load block bytes: {e:?}"))? .unwrap_or_default(); - let Some(parent_header_info) = - StacksChainState::get_parent_header_info(&mut chainstate_tx, &next_staging_block).unwrap() - else { - println!("Failed to load parent head info for block: {index_block_hash_hex}"); - return; - }; + let parent_header_info = + StacksChainState::get_parent_header_info(&chainstate_tx, &next_staging_block) + .map_err(|e| format!("Failed to get parent header info: {e:?}"))? + .ok_or_else(|| "Missing parent header info".to_string())?; - let block = - StacksChainState::extract_stacks_block(&next_staging_block).expect("Failed to get block"); + let block = StacksChainState::extract_stacks_block(&next_staging_block) + .map_err(|e| format!("{e:?}"))?; let block_size = next_staging_block.block_data.len() as u64; replay_block( @@ -648,7 +782,7 @@ fn replay_staging_block(db_path: &str, index_block_hash_hex: &str, conf: Option< &parent_header_info, &next_staging_block.parent_microblock_hash, next_staging_block.parent_microblock_seq, - &block_id, + block_id, &block, block_size, &next_staging_block.consensus_hash, @@ -656,6 +790,7 @@ fn replay_staging_block(db_path: &str, index_block_hash_hex: &str, conf: Option< next_staging_block.commit_burn, next_staging_block.sortition_burn, ); + Ok(()) } /// Process a mock mined block and call `replay_block()` to validate @@ -867,8 +1002,12 @@ fn replay_block( } /// Fetch and process a NakamotoBlock from database and call `replay_block_nakamoto()` to validate -fn replay_naka_staging_block(db_path: &str, index_block_hash_hex: &str, conf: &Config) { - let block_id = StacksBlockId::from_hex(index_block_hash_hex).unwrap(); +fn replay_naka_staging_block( + db_path: &str, + block_id: &StacksBlockId, + conf: Option<&Config>, +) -> Result<(), String> { + let conf = conf.unwrap_or(&DEFAULT_MAINNET_CONFIG); let chain_state_path = format!("{db_path}/chainstate/"); let sort_db_path = format!("{db_path}/burnchain/sortition"); @@ -878,7 +1017,7 @@ fn replay_naka_staging_block(db_path: &str, index_block_hash_hex: &str, conf: &C &chain_state_path, None, ) - .unwrap(); + .map_err(|e| format!("Failed to open chainstate: {e:?}"))?; let burnchain = conf.get_burnchain(); let epochs = conf.burnchain.get_epoch_list(); @@ -892,14 +1031,16 @@ fn replay_naka_staging_block(db_path: &str, index_block_hash_hex: &str, conf: &C None, true, ) - .unwrap(); + .map_err(|e| format!("Failed to open sortition DB: {e:?}"))?; let (block, block_size) = chainstate .nakamoto_blocks_db() - .get_nakamoto_block(&block_id) - .unwrap() - .unwrap(); - replay_block_nakamoto(&mut sortdb, &mut chainstate, &block, block_size).unwrap(); + .get_nakamoto_block(block_id) + .map_err(|e| format!("Failed to load Nakamoto block: {e:?}"))? + .ok_or_else(|| "No block data found".to_string())?; + + replay_block_nakamoto(&mut sortdb, &mut chainstate, &block, block_size) + .map_err(|e| format!("Failed to validate Nakamoto block: {e:?}")) } #[allow(clippy::result_large_err)] diff --git a/contrib/stacks-inspect/src/main.rs b/contrib/stacks-inspect/src/main.rs index 032eee97941..ac151a10558 100644 --- a/contrib/stacks-inspect/src/main.rs +++ b/contrib/stacks-inspect/src/main.rs @@ -22,7 +22,7 @@ use clarity::types::chainstate::StacksPrivateKey; use clarity_cli::DEFAULT_CLI_EPOCH; use stacks_inspect::{ command_contract_hash, command_replay_mock_mining, command_try_mine, command_validate_block, - command_validate_block_nakamoto, drain_common_opts, + drain_common_opts, }; use stackslib::chainstate::stacks::miner::BlockBuilderSettings; use stackslib::chainstate::stacks::{ @@ -1590,11 +1590,6 @@ check if the associated microblocks can be downloaded process::exit(0); } - if argv[1] == "validate-naka-block" { - command_validate_block_nakamoto(&argv[1..], common_opts.config.as_ref()); - process::exit(0); - } - if argv[1] == "replay-mock-mining" { command_replay_mock_mining(&argv[1..], common_opts.config.as_ref()); process::exit(0); diff --git a/stackslib/src/chainstate/stacks/db/blocks.rs b/stackslib/src/chainstate/stacks/db/blocks.rs index ef2c1457e37..00ce1a4493c 100644 --- a/stackslib/src/chainstate/stacks/db/blocks.rs +++ b/stackslib/src/chainstate/stacks/db/blocks.rs @@ -5857,7 +5857,7 @@ impl StacksChainState { /// parent block has been processed. /// If it's not known, return None. pub fn get_parent_header_info( - chainstate_tx: &mut ChainstateTx, + chainstate_tx: &ChainstateTx, next_staging_block: &StagingBlock, ) -> Result, Error> { let parent_block_header_info = match StacksChainState::get_anchored_block_header_info( @@ -6055,13 +6055,11 @@ impl StacksChainState { &next_staging_block.parent_microblock_hash, ); - let parent_header_info = match StacksChainState::get_parent_header_info( - &mut chainstate_tx, - &next_staging_block, - )? { - Some(hinfo) => hinfo, - None => return Ok((None, None)), - }; + let parent_header_info = + match StacksChainState::get_parent_header_info(&chainstate_tx, &next_staging_block)? { + Some(hinfo) => hinfo, + None => return Ok((None, None)), + }; let block = StacksChainState::extract_stacks_block(&next_staging_block)?; let block_size = u64::try_from(next_staging_block.block_data.len())