@@ -38,7 +38,7 @@ use static_array_rb_tree::*;
38
38
39
39
40
40
/// Certain regions are pre-designated for special usage, specifically the kernel's initial identity mapping.
41
- /// They will be allocated from if an address within them is specifically requested;
41
+ /// They will be allocated from if an address within them is specifically
42
42
/// otherwise, they will only be allocated from as a "last resort" if all other non-designated address ranges are exhausted.
43
43
///
44
44
/// Any virtual addresses **less than or equal** to this address are considered "designated".
@@ -536,10 +536,15 @@ fn find_specific_chunk(
536
536
/// If no range is specified, this function first attempts to find a suitable chunk
537
537
/// that is **not** within the designated regions,
538
538
/// and only allocates from the designated regions as a backup option.
539
+ ///
540
+ /// If an alignment is specified (in terms of number of 4KiB pages), then the starting page
541
+ /// in the allocated range must be aligned to that number of pages.
542
+ /// If no specific alignment is needed, the default aligment of 1 page should be used.
539
543
fn find_any_chunk (
540
544
list : & mut StaticArrayRBTree < Chunk > ,
541
545
num_pages : usize ,
542
546
within_range : Option < & PageRange > ,
547
+ alignment_4k_pages : usize ,
543
548
) -> Result < ( AllocatedPages , DeferredAllocAction < ' static > ) , AllocationError > {
544
549
let designated_low_end = DESIGNATED_PAGES_LOW_END . get ( )
545
550
. ok_or ( AllocationError :: NotInitialized ) ?;
@@ -555,7 +560,8 @@ fn find_any_chunk(
555
560
if let Some ( chunk) = elem {
556
561
// Use max and min below to ensure that the range of pages we allocate from
557
562
// is within *both* the current chunk's bounds and the range's bounds.
558
- let lowest_possible_start_page = * max ( chunk. start ( ) , range. start ( ) ) ;
563
+ let lowest_possible_start_page = max ( chunk. start ( ) , range. start ( ) )
564
+ . align_up ( alignment_4k_pages) ;
559
565
let highest_possible_end_page = * min ( chunk. end ( ) , range. end ( ) ) ;
560
566
if lowest_possible_start_page + num_pages <= highest_possible_end_page {
561
567
return adjust_chosen_chunk (
@@ -589,7 +595,8 @@ fn find_any_chunk(
589
595
while let Some ( chunk) = cursor. get ( ) . map ( |w| w. deref ( ) ) {
590
596
// Use max and min below to ensure that the range of pages we allocate from
591
597
// is within *both* the current chunk's bounds and the range's bounds.
592
- let lowest_possible_start_page = * max ( chunk. start ( ) , range. start ( ) ) ;
598
+ let lowest_possible_start_page = max ( chunk. start ( ) , range. start ( ) )
599
+ . align_up ( alignment_4k_pages) ;
593
600
let highest_possible_end_page = * min ( chunk. end ( ) , range. end ( ) ) ;
594
601
if lowest_possible_start_page + num_pages <= highest_possible_end_page {
595
602
return adjust_chosen_chunk (
@@ -621,8 +628,14 @@ fn find_any_chunk(
621
628
Inner :: Array ( ref mut arr) => {
622
629
for elem in arr. iter_mut ( ) {
623
630
if let Some ( chunk) = elem {
624
- if num_pages <= chunk. size_in_pages ( ) {
625
- return adjust_chosen_chunk ( * chunk. start ( ) , num_pages, & chunk. clone ( ) , ValueRefMut :: Array ( elem) ) ;
631
+ let lowest_possible_start_page = chunk. start ( ) . align_up ( alignment_4k_pages) ;
632
+ if lowest_possible_start_page + num_pages <= * chunk. end ( ) {
633
+ return adjust_chosen_chunk (
634
+ lowest_possible_start_page,
635
+ num_pages,
636
+ & chunk. clone ( ) ,
637
+ ValueRefMut :: Array ( elem) ,
638
+ ) ;
626
639
}
627
640
}
628
641
}
@@ -644,8 +657,14 @@ fn find_any_chunk(
644
657
// The first iterates over the lower designated region, from higher addresses to lower, down to zero.
645
658
let mut cursor = tree. upper_bound_mut ( Bound :: Included ( designated_low_end) ) ;
646
659
while let Some ( chunk) = cursor. get ( ) . map ( |w| w. deref ( ) ) {
647
- if num_pages < chunk. size_in_pages ( ) {
648
- return adjust_chosen_chunk ( * chunk. start ( ) , num_pages, & chunk. clone ( ) , ValueRefMut :: RBTree ( cursor) ) ;
660
+ let lowest_possible_start_page = chunk. start ( ) . align_up ( alignment_4k_pages) ;
661
+ if lowest_possible_start_page + num_pages <= * chunk. end ( ) {
662
+ return adjust_chosen_chunk (
663
+ lowest_possible_start_page,
664
+ num_pages,
665
+ & chunk. clone ( ) ,
666
+ ValueRefMut :: RBTree ( cursor) ,
667
+ ) ;
649
668
}
650
669
cursor. move_prev ( ) ;
651
670
}
@@ -657,8 +676,14 @@ fn find_any_chunk(
657
676
// we already iterated over non-designated pages in the first match statement above, so we're out of memory.
658
677
break ;
659
678
}
660
- if num_pages < chunk. size_in_pages ( ) {
661
- return adjust_chosen_chunk ( * chunk. start ( ) , num_pages, & chunk. clone ( ) , ValueRefMut :: RBTree ( cursor) ) ;
679
+ let lowest_possible_start_page = chunk. start ( ) . align_up ( alignment_4k_pages) ;
680
+ if lowest_possible_start_page + num_pages <= * chunk. end ( ) {
681
+ return adjust_chosen_chunk (
682
+ lowest_possible_start_page,
683
+ num_pages,
684
+ & chunk. clone ( ) ,
685
+ ValueRefMut :: RBTree ( cursor) ,
686
+ ) ;
662
687
}
663
688
cursor. move_prev ( ) ;
664
689
}
@@ -729,23 +754,31 @@ fn adjust_chosen_chunk(
729
754
}
730
755
731
756
732
- /// Possible options when requested pages from the page allocator.
757
+ /// Possible options when requesting pages from the page allocator.
733
758
pub enum AllocationRequest < ' r > {
734
- /// The allocated pages can be located at any virtual address.
735
- Any ,
736
759
/// The allocated pages must start exactly at the given `VirtualAddress`.
737
760
AtVirtualAddress ( VirtualAddress ) ,
761
+ /// The allocated pages may be located at any virtual address,
762
+ /// but the starting page must be aligned to a multiple of `alignment_4k_pages`.
763
+ /// An alignment of `1` page is equivalent to specifying no alignment requirement.
764
+ ///
765
+ /// Note: alignment is specified in number of 4KiB pages, not number of bytes.
766
+ AlignedTo { alignment_4k_pages : usize } ,
738
767
/// The allocated pages can be located anywhere within the given range.
739
768
WithinRange ( & ' r PageRange ) ,
769
+ /// The allocated pages can be located at any virtual address
770
+ /// and have no special alignment requirements beyond a single page.
771
+ Any ,
740
772
}
741
773
774
+
742
775
/// The core page allocation routine that allocates the given number of virtual pages,
743
776
/// optionally at the requested starting `VirtualAddress`.
744
777
///
745
778
/// This simply reserves a range of virtual addresses, it does not allocate
746
779
/// actual physical memory frames nor do any memory mapping.
747
780
/// Thus, the returned `AllocatedPages` aren't directly usable until they are mapped to physical frames.
748
- ///
781
+ ///
749
782
/// Allocation is based on a red-black tree and is thus `O(log(n))`.
750
783
/// Fragmentation isn't cleaned up until we're out of address space, but that's not really a big deal.
751
784
///
@@ -780,11 +813,14 @@ pub fn allocate_pages_deferred(
780
813
AllocationRequest :: AtVirtualAddress ( vaddr) => {
781
814
find_specific_chunk ( & mut locked_list, Page :: containing_address ( vaddr) , num_pages)
782
815
}
783
- AllocationRequest :: Any => {
784
- find_any_chunk ( & mut locked_list, num_pages, None )
816
+ AllocationRequest :: AlignedTo { alignment_4k_pages } => {
817
+ find_any_chunk ( & mut locked_list, num_pages, None , alignment_4k_pages )
785
818
}
786
819
AllocationRequest :: WithinRange ( range) => {
787
- find_any_chunk ( & mut locked_list, num_pages, Some ( range) )
820
+ find_any_chunk ( & mut locked_list, num_pages, Some ( range) , 1 )
821
+ }
822
+ AllocationRequest :: Any => {
823
+ find_any_chunk ( & mut locked_list, num_pages, None , 1 )
788
824
}
789
825
} ;
790
826
res. map_err ( From :: from) // convert from AllocationError to &str
0 commit comments