diff --git a/Cargo.toml b/Cargo.toml index 0c2c550..4d124fe 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -11,10 +11,9 @@ keywords = ["hypervisor", "address-space"] license = "Apache-2.0 OR MIT" name = "axaddrspace" repository = "https://github.com/arceos-hypervisor/axaddrspace" -version = "0.1.3" +version = "0.2.0" [features] -4-level-ept = [] arm-el2 = ["page_table_entry/arm-el2"] default = ["arm-el2"] diff --git a/src/address_space/backend/alloc.rs b/src/address_space/backend/alloc.rs index eb01e11..80c1e12 100644 --- a/src/address_space/backend/alloc.rs +++ b/src/address_space/backend/alloc.rs @@ -62,7 +62,7 @@ impl Backend { ) -> bool { debug!("unmap_alloc: [{:#x}, {:#x})", start, start + size); for addr in PageIter4K::new(start, start + size).unwrap() { - if let Ok((frame, page_size, _)) = pt.unmap(addr) { + if let Ok((frame, page_size)) = pt.unmap(addr) { // Deallocate the physical frame if there is a mapping in the // page table. if page_size.is_huge() { @@ -89,9 +89,10 @@ impl Backend { // Allocate a physical frame lazily and map it to the fault address. // `vaddr` does not need to be aligned. It will be automatically // aligned during `pt.remap` regardless of the page size. - H::alloc_frame() - .and_then(|frame| pt.remap(vaddr, frame, orig_flags).ok()) - .is_some() + let Some(frame) = H::alloc_frame() else { + return false; + }; + pt.remap(vaddr, frame, orig_flags) } } } diff --git a/src/address_space/backend/mod.rs b/src/address_space/backend/mod.rs index e8f7345..0165c82 100644 --- a/src/address_space/backend/mod.rs +++ b/src/address_space/backend/mod.rs @@ -84,12 +84,7 @@ impl MappingBackend for Backend { new_flags: MappingFlags, page_table: &mut PageTable, ) -> bool { - page_table - .protect_region(start, size, new_flags, true) - // If the TLB is refreshed immediately every time, there might be performance issues. - // The TLB refresh is managed uniformly at a higher level. - .map(|tlb| tlb.ignore()) - .is_ok() + page_table.protect_region(start, size, new_flags, true) } } diff --git a/src/address_space/mod.rs b/src/address_space/mod.rs index 0ab5797..9144996 100644 --- a/src/address_space/mod.rs +++ b/src/address_space/mod.rs @@ -1,7 +1,7 @@ use alloc::vec::Vec; use core::fmt; -use axerrno::{AxError, AxResult, ax_err}; +use axerrno::ax_err; use memory_addr::{MemoryAddr, PhysAddr, is_aligned_4k}; use memory_set::{MemoryArea, MemorySet}; use page_table_multiarch::PagingHandler; @@ -11,6 +11,7 @@ use crate::{GuestPhysAddr, GuestPhysAddrRange, mapping_err_to_ax_err}; mod backend; +pub use axerrno::{AxError, AxResult}; pub use backend::Backend; pub use page_table_entry::MappingFlags; @@ -43,7 +44,7 @@ impl AddrSpace { } /// Returns the root physical address of the inner page table. - pub const fn page_table_root(&self) -> PhysAddr { + pub fn page_table_root(&self) -> PhysAddr { self.pt.root_paddr() } @@ -53,12 +54,12 @@ impl AddrSpace { .contains_range(GuestPhysAddrRange::from_start_size(start, size)) } - /// Creates a new empty address space. - pub fn new_empty(base: GuestPhysAddr, size: usize) -> AxResult { + /// Creates a new empty address space with the architecture default page table level. + pub fn new_empty(level: usize, base: GuestPhysAddr, size: usize) -> AxResult { Ok(Self { va_range: GuestPhysAddrRange::from_start_size(base, size), areas: MemorySet::new(), - pt: PageTable::try_new().map_err(|_| AxError::NoMemory)?, + pt: PageTable::::new(level)?, }) } @@ -276,7 +277,7 @@ mod tests { fn setup_test_addr_space() -> (AddrSpace, GuestPhysAddr, usize) { const BASE: GuestPhysAddr = GuestPhysAddr::from_usize(0x10000); const SIZE: usize = 0x10000; - let addr_space = AddrSpace::::new_empty(BASE, SIZE).unwrap(); + let addr_space = AddrSpace::::new_empty(4, BASE, SIZE).unwrap(); (addr_space, BASE, SIZE) } diff --git a/src/lib.rs b/src/lib.rs index 11ed0f0..a1a4daf 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -23,7 +23,6 @@ pub use hal::AxMmHal; pub use memory_accessor::GuestMemoryAccessor; -use axerrno::AxError; use memory_set::MappingError; /// Information about nested page faults. diff --git a/src/npt/arch/aarch64.rs b/src/npt/arch/aarch64.rs index fa056ce..0eea435 100644 --- a/src/npt/arch/aarch64.rs +++ b/src/npt/arch/aarch64.rs @@ -210,23 +210,55 @@ impl fmt::Debug for A64PTEHV { } } -/// Metadata of AArch64 hypervisor page tables (ipa to hpa). +/// Metadata of AArch64 hypervisor page tables (ipa to hpa) - Level 3. #[derive(Copy, Clone)] -pub struct A64HVPagingMetaData; +pub struct A64HVPagingMetaDataL3; -impl PagingMetaData for A64HVPagingMetaData { - // The levels of the page table. - #[cfg(not(feature = "4-level-ept"))] +impl PagingMetaData for A64HVPagingMetaDataL3 { + // The levels of the page table for 3-level configuration. const LEVELS: usize = 3; - #[cfg(feature = "4-level-ept")] - const LEVELS: usize = 4; + // The size of the IPA space for 3-level configuration. + const VA_MAX_BITS: usize = 40; + // In Armv8.0-A, the maximum size for a physical address is 48 bits. + const PA_MAX_BITS: usize = 48; - // The size of the IPA space can be configured in the same way as the - #[cfg(not(feature = "4-level-ept"))] - const VA_MAX_BITS: usize = 40; // virtual address space. VTCR_EL2.T0SZ controls the size. - #[cfg(feature = "4-level-ept")] - const VA_MAX_BITS: usize = 48; + type VirtAddr = GuestPhysAddr; + fn flush_tlb(vaddr: Option) { + unsafe { + if let Some(vaddr) = vaddr { + #[cfg(not(feature = "arm-el2"))] + { + asm!("tlbi vaae1is, {}; dsb sy; isb", in(reg) vaddr.as_usize()) + } + #[cfg(feature = "arm-el2")] + { + asm!("tlbi vae2is, {}; dsb sy; isb", in(reg) vaddr.as_usize()) + } + } else { + // flush the entire TLB + #[cfg(not(feature = "arm-el2"))] + { + asm!("tlbi vmalle1; dsb sy; isb") + } + #[cfg(feature = "arm-el2")] + { + asm!("tlbi alle2is; dsb sy; isb") + } + } + } + } +} + +/// Metadata of AArch64 hypervisor page tables (ipa to hpa) - Level 4. +#[derive(Copy, Clone)] +pub struct A64HVPagingMetaDataL4; + +impl PagingMetaData for A64HVPagingMetaDataL4 { + // The levels of the page table for 4-level configuration. + const LEVELS: usize = 4; + // The size of the IPA space for 4-level configuration. + const VA_MAX_BITS: usize = 48; // In Armv8.0-A, the maximum size for a physical address is 48 bits. const PA_MAX_BITS: usize = 48; @@ -257,5 +289,3 @@ impl PagingMetaData for A64HVPagingMetaData { } } } -/// According to rust shyper, AArch64 translation table. -pub type NestedPageTable = PageTable64; diff --git a/src/npt/arch/riscv.rs b/src/npt/arch/riscv.rs index aa190e2..a09fb6f 100644 --- a/src/npt/arch/riscv.rs +++ b/src/npt/arch/riscv.rs @@ -1,6 +1,2 @@ -use page_table_entry::riscv::Rv64PTE; -use page_table_multiarch::{PageTable64, riscv::Sv39MetaData}; - -use crate::GuestPhysAddr; - -pub type NestedPageTable = PageTable64, Rv64PTE, H>; +pub use page_table_entry::riscv::Rv64PTE; +pub use page_table_multiarch::riscv::{Sv39MetaData, Sv48MetaData}; diff --git a/src/npt/mod.rs b/src/npt/mod.rs index 14d7d2d..967785e 100644 --- a/src/npt/mod.rs +++ b/src/npt/mod.rs @@ -1,14 +1,211 @@ +use axerrno::{ax_err, ax_err_type}; +use memory_addr::PhysAddr; +use memory_set::MappingError; +use page_table_entry::MappingFlags; +use page_table_multiarch::PagingHandler; + +use crate::GuestPhysAddr; + cfg_if::cfg_if! { if #[cfg(target_arch = "x86_64")] { - /// The architecture-specific nested page table for two-stage address translation. - pub type NestedPageTable = arch::ExtendedPageTable; + pub type NestedPageTableL4 = arch::ExtendedPageTable; + } else if #[cfg(any(target_arch = "riscv32", target_arch = "riscv64"))] { - /// The architecture-specific page table. - pub type NestedPageTable = arch::NestedPageTable; - } else if #[cfg(target_arch = "aarch64")]{ - /// The architecture-specific nested page table for two-stage address translation. - pub type NestedPageTable = arch::NestedPageTable; + pub type NestedPageTableL3 = page_table_multiarch::PageTable64, arch::Rv64PTE, H>; + pub type NestedPageTableL4 = page_table_multiarch::PageTable64, arch::Rv64PTE, H>; + + } else if #[cfg(target_arch = "aarch64")] { + /// AArch64 Level 3 nested page table type alias. + pub type NestedPageTableL3 = page_table_multiarch::PageTable64; + + /// AArch64 Level 4 nested page table type alias. + pub type NestedPageTableL4 = page_table_multiarch::PageTable64; } } mod arch; + +pub enum NestedPageTable { + #[cfg(not(target_arch = "x86_64"))] + L3(NestedPageTableL3), + L4(NestedPageTableL4), +} + +impl NestedPageTable { + pub fn new(level: usize) -> axerrno::AxResult { + match level { + 3 => { + #[cfg(not(target_arch = "x86_64"))] + { + use axerrno::ax_err_type; + + let res = NestedPageTableL3::try_new().map_err(|_| ax_err_type!(NoMemory))?; + return Ok(NestedPageTable::L3(res)); + } + #[cfg(target_arch = "x86_64")] + { + return ax_err!(InvalidInput, "L3 not supported on x86_64"); + } + } + 4 => { + let res = NestedPageTableL4::try_new().map_err(|_| ax_err_type!(NoMemory))?; + return Ok(NestedPageTable::L4(res)); + } + _ => return ax_err!(InvalidInput, "Invalid page table level"), + } + } + + pub fn root_paddr(&self) -> memory_addr::PhysAddr { + match self { + #[cfg(not(target_arch = "x86_64"))] + NestedPageTable::L3(pt) => pt.root_paddr(), + NestedPageTable::L4(pt) => pt.root_paddr(), + } + } + + /// Maps a virtual address to a physical address. + pub fn map( + &mut self, + vaddr: crate::GuestPhysAddr, + paddr: memory_addr::PhysAddr, + size: page_table_multiarch::PageSize, + flags: page_table_entry::MappingFlags, + ) -> memory_set::MappingResult { + match self { + #[cfg(not(target_arch = "x86_64"))] + NestedPageTable::L3(pt) => { + pt.map(vaddr, paddr, size, flags) + .map_err(|_| MappingError::BadState)? + .flush(); + } + NestedPageTable::L4(pt) => { + let _res = pt + .map(vaddr, paddr, size, flags) + .map_err(|_| MappingError::BadState)? + .flush(); + } + } + Ok(()) + } + + /// Unmaps a virtual address. + pub fn unmap( + &mut self, + vaddr: GuestPhysAddr, + ) -> memory_set::MappingResult<(memory_addr::PhysAddr, page_table_multiarch::PageSize)> { + match self { + #[cfg(not(target_arch = "x86_64"))] + NestedPageTable::L3(pt) => { + let (addr, size, f) = pt.unmap(vaddr).map_err(|_| MappingError::BadState)?; + f.flush(); + Ok((addr, size)) + } + NestedPageTable::L4(pt) => { + let (addr, size, f) = pt.unmap(vaddr).map_err(|_| MappingError::BadState)?; + f.flush(); + Ok((addr, size)) + } + } + } + + /// Maps a region. + pub fn map_region( + &mut self, + vaddr: GuestPhysAddr, + get_paddr: impl Fn(GuestPhysAddr) -> PhysAddr, + size: usize, + flags: MappingFlags, + allow_huge: bool, + flush_tlb_by_page: bool, + ) -> memory_set::MappingResult { + match self { + #[cfg(not(target_arch = "x86_64"))] + NestedPageTable::L3(pt) => { + pt.map_region(vaddr, get_paddr, size, flags, allow_huge, flush_tlb_by_page) + .map_err(|_| MappingError::BadState)? + .flush_all(); + } + NestedPageTable::L4(pt) => { + pt.map_region(vaddr, get_paddr, size, flags, allow_huge, flush_tlb_by_page) + .map_err(|_| MappingError::BadState)? + .flush_all(); + } + } + Ok(()) + } + + /// Unmaps a region. + pub fn unmap_region( + &mut self, + start: GuestPhysAddr, + size: usize, + flush: bool, + ) -> memory_set::MappingResult { + match self { + #[cfg(not(target_arch = "x86_64"))] + NestedPageTable::L3(pt) => { + pt.unmap_region(start, size, flush) + .map_err(|_| MappingError::BadState)? + .ignore(); + } + NestedPageTable::L4(pt) => { + pt.unmap_region(start, size, flush) + .map_err(|_| MappingError::BadState)? + .ignore(); + } + } + Ok(()) + } + + pub fn remap(&mut self, start: GuestPhysAddr, paddr: PhysAddr, flags: MappingFlags) -> bool { + match self { + #[cfg(not(target_arch = "x86_64"))] + NestedPageTable::L3(pt) => pt.remap(start, paddr, flags).is_ok(), + NestedPageTable::L4(pt) => pt.remap(start, paddr, flags).is_ok(), + } + } + + /// Updates protection flags for a region. + pub fn protect_region( + &mut self, + start: GuestPhysAddr, + size: usize, + new_flags: page_table_entry::MappingFlags, + flush: bool, + ) -> bool { + match self { + #[cfg(not(target_arch = "x86_64"))] + NestedPageTable::L3(pt) => pt + .protect_region(start, size, new_flags, flush) // If the TLB is refreshed immediately every time, there might be performance issues. + // The TLB refresh is managed uniformly at a higher level. + .map(|tlb| tlb.ignore()) + .is_ok(), + NestedPageTable::L4(pt) => pt + .protect_region(start, size, new_flags, flush) // If the TLB is refreshed immediately every time, there might be performance issues. + // The TLB refresh is managed uniformly at a higher level. + .map(|tlb| tlb.ignore()) + .is_ok(), + } + } + + /// Queries a virtual address to get physical address and mapping info. + pub fn query( + &self, + vaddr: crate::GuestPhysAddr, + ) -> page_table_multiarch::PagingResult<( + memory_addr::PhysAddr, + page_table_entry::MappingFlags, + page_table_multiarch::PageSize, + )> { + match self { + #[cfg(not(target_arch = "x86_64"))] + NestedPageTable::L3(pt) => pt.query(vaddr), + NestedPageTable::L4(pt) => pt.query(vaddr), + } + } + + /// Translates a virtual address to a physical address. + pub fn translate(&self, vaddr: crate::GuestPhysAddr) -> Option { + self.query(vaddr).ok().map(|(paddr, _, _)| paddr) + } +}