@@ -1497,10 +1497,7 @@ pub mod tests {
14971497 use core:: panic;
14981498 use std:: collections:: { HashMap , HashSet } ;
14991499
1500- use bao_tree:: {
1501- io:: { outboard:: PreOrderMemOutboard , round_up_to_chunks_groups} ,
1502- ChunkRanges ,
1503- } ;
1500+ use bao_tree:: { io:: round_up_to_chunks_groups, ChunkRanges } ;
15041501 use n0_future:: { stream, Stream , StreamExt } ;
15051502 use testresult:: TestResult ;
15061503 use walkdir:: WalkDir ;
@@ -1509,7 +1506,7 @@ pub mod tests {
15091506 use crate :: {
15101507 api:: blobs:: Bitfield ,
15111508 store:: {
1512- util:: { read_checksummed, SliceInfoExt , Tag } ,
1509+ util:: { read_checksummed, tests :: create_n0_bao , SliceInfoExt , Tag } ,
15131510 IROH_BLOCK_SIZE ,
15141511 } ,
15151512 } ;
@@ -1526,17 +1523,6 @@ pub mod tests {
15261523 1024 * 1024 * 8 , // data file, outboard file
15271524 ] ;
15281525
1529- /// Create n0 flavoured bao. Note that this can be used to request ranges below a chunk group size,
1530- /// which can not be exported via bao because we don't store hashes below the chunk group level.
1531- pub fn create_n0_bao ( data : & [ u8 ] , ranges : & ChunkRanges ) -> anyhow:: Result < ( Hash , Vec < u8 > ) > {
1532- let outboard = PreOrderMemOutboard :: create ( data, IROH_BLOCK_SIZE ) ;
1533- let mut encoded = Vec :: new ( ) ;
1534- let size = data. len ( ) as u64 ;
1535- encoded. extend_from_slice ( & size. to_le_bytes ( ) ) ;
1536- bao_tree:: io:: sync:: encode_ranges_validated ( data, & outboard, ranges, & mut encoded) ?;
1537- Ok ( ( outboard. root . into ( ) , encoded) )
1538- }
1539-
15401526 pub fn round_up_request ( size : u64 , ranges : & ChunkRanges ) -> ChunkRanges {
15411527 let last_chunk = ChunkNum :: chunks ( size) ;
15421528 let data_range = ChunkRanges :: from ( ..last_chunk) ;
0 commit comments