Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 1 addition & 2 deletions Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -11,10 +11,9 @@ keywords = ["hypervisor", "address-space"]
license = "Apache-2.0 OR MIT"
name = "axaddrspace"
repository = "https://github.com/arceos-hypervisor/axaddrspace"
version = "0.1.3"
version = "0.2.0"

[features]
4-level-ept = []
arm-el2 = ["page_table_entry/arm-el2"]
default = ["arm-el2"]

Expand Down
9 changes: 5 additions & 4 deletions src/address_space/backend/alloc.rs
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,7 @@ impl<H: PagingHandler> Backend<H> {
) -> bool {
debug!("unmap_alloc: [{:#x}, {:#x})", start, start + size);
for addr in PageIter4K::new(start, start + size).unwrap() {
if let Ok((frame, page_size, _)) = pt.unmap(addr) {
if let Ok((frame, page_size)) = pt.unmap(addr) {
// Deallocate the physical frame if there is a mapping in the
// page table.
if page_size.is_huge() {
Expand All @@ -89,9 +89,10 @@ impl<H: PagingHandler> Backend<H> {
// Allocate a physical frame lazily and map it to the fault address.
// `vaddr` does not need to be aligned. It will be automatically
// aligned during `pt.remap` regardless of the page size.
H::alloc_frame()
.and_then(|frame| pt.remap(vaddr, frame, orig_flags).ok())
.is_some()
let Some(frame) = H::alloc_frame() else {
return false;
};
pt.remap(vaddr, frame, orig_flags)
}
}
}
7 changes: 1 addition & 6 deletions src/address_space/backend/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -84,12 +84,7 @@ impl<H: PagingHandler> MappingBackend for Backend<H> {
new_flags: MappingFlags,
page_table: &mut PageTable<H>,
) -> bool {
page_table
.protect_region(start, size, new_flags, true)
// If the TLB is refreshed immediately every time, there might be performance issues.
// The TLB refresh is managed uniformly at a higher level.
.map(|tlb| tlb.ignore())
.is_ok()
page_table.protect_region(start, size, new_flags, true)
}
}

Expand Down
13 changes: 7 additions & 6 deletions src/address_space/mod.rs
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
use alloc::vec::Vec;
use core::fmt;

use axerrno::{AxError, AxResult, ax_err};
use axerrno::ax_err;
use memory_addr::{MemoryAddr, PhysAddr, is_aligned_4k};
use memory_set::{MemoryArea, MemorySet};
use page_table_multiarch::PagingHandler;
Expand All @@ -11,6 +11,7 @@ use crate::{GuestPhysAddr, GuestPhysAddrRange, mapping_err_to_ax_err};

mod backend;

pub use axerrno::{AxError, AxResult};
pub use backend::Backend;
pub use page_table_entry::MappingFlags;

Expand Down Expand Up @@ -43,7 +44,7 @@ impl<H: PagingHandler> AddrSpace<H> {
}

/// Returns the root physical address of the inner page table.
pub const fn page_table_root(&self) -> PhysAddr {
pub fn page_table_root(&self) -> PhysAddr {
self.pt.root_paddr()
}

Expand All @@ -53,12 +54,12 @@ impl<H: PagingHandler> AddrSpace<H> {
.contains_range(GuestPhysAddrRange::from_start_size(start, size))
}

/// Creates a new empty address space.
pub fn new_empty(base: GuestPhysAddr, size: usize) -> AxResult<Self> {
/// Creates a new empty address space with the architecture default page table level.
pub fn new_empty(level: usize, base: GuestPhysAddr, size: usize) -> AxResult<Self> {
Ok(Self {
va_range: GuestPhysAddrRange::from_start_size(base, size),
areas: MemorySet::new(),
pt: PageTable::try_new().map_err(|_| AxError::NoMemory)?,
pt: PageTable::<H>::new(level)?,
})
}

Expand Down Expand Up @@ -276,7 +277,7 @@ mod tests {
fn setup_test_addr_space() -> (AddrSpace<MockHal>, GuestPhysAddr, usize) {
const BASE: GuestPhysAddr = GuestPhysAddr::from_usize(0x10000);
const SIZE: usize = 0x10000;
let addr_space = AddrSpace::<MockHal>::new_empty(BASE, SIZE).unwrap();
let addr_space = AddrSpace::<MockHal>::new_empty(4, BASE, SIZE).unwrap();
(addr_space, BASE, SIZE)
}

Expand Down
1 change: 0 additions & 1 deletion src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,6 @@ pub use hal::AxMmHal;

pub use memory_accessor::GuestMemoryAccessor;

use axerrno::AxError;
use memory_set::MappingError;

/// Information about nested page faults.
Expand Down
58 changes: 44 additions & 14 deletions src/npt/arch/aarch64.rs
Original file line number Diff line number Diff line change
Expand Up @@ -210,23 +210,55 @@ impl fmt::Debug for A64PTEHV {
}
}

/// Metadata of AArch64 hypervisor page tables (ipa to hpa).
/// Metadata of AArch64 hypervisor page tables (ipa to hpa) - Level 3.
#[derive(Copy, Clone)]
pub struct A64HVPagingMetaData;
pub struct A64HVPagingMetaDataL3;

impl PagingMetaData for A64HVPagingMetaData {
// The levels of the page table.
#[cfg(not(feature = "4-level-ept"))]
impl PagingMetaData for A64HVPagingMetaDataL3 {
// The levels of the page table for 3-level configuration.
const LEVELS: usize = 3;
#[cfg(feature = "4-level-ept")]
const LEVELS: usize = 4;
// The size of the IPA space for 3-level configuration.
const VA_MAX_BITS: usize = 40;
// In Armv8.0-A, the maximum size for a physical address is 48 bits.
const PA_MAX_BITS: usize = 48;

// The size of the IPA space can be configured in the same way as the
#[cfg(not(feature = "4-level-ept"))]
const VA_MAX_BITS: usize = 40; // virtual address space. VTCR_EL2.T0SZ controls the size.
#[cfg(feature = "4-level-ept")]
const VA_MAX_BITS: usize = 48;
type VirtAddr = GuestPhysAddr;

fn flush_tlb(vaddr: Option<Self::VirtAddr>) {
unsafe {
if let Some(vaddr) = vaddr {
#[cfg(not(feature = "arm-el2"))]
{
asm!("tlbi vaae1is, {}; dsb sy; isb", in(reg) vaddr.as_usize())
}
#[cfg(feature = "arm-el2")]
{
asm!("tlbi vae2is, {}; dsb sy; isb", in(reg) vaddr.as_usize())
}
} else {
// flush the entire TLB
#[cfg(not(feature = "arm-el2"))]
{
asm!("tlbi vmalle1; dsb sy; isb")
}
#[cfg(feature = "arm-el2")]
{
asm!("tlbi alle2is; dsb sy; isb")
}
}
}
}
}

/// Metadata of AArch64 hypervisor page tables (ipa to hpa) - Level 4.
#[derive(Copy, Clone)]
pub struct A64HVPagingMetaDataL4;

impl PagingMetaData for A64HVPagingMetaDataL4 {
// The levels of the page table for 4-level configuration.
const LEVELS: usize = 4;
// The size of the IPA space for 4-level configuration.
const VA_MAX_BITS: usize = 48;
// In Armv8.0-A, the maximum size for a physical address is 48 bits.
const PA_MAX_BITS: usize = 48;

Expand Down Expand Up @@ -257,5 +289,3 @@ impl PagingMetaData for A64HVPagingMetaData {
}
}
}
/// According to rust shyper, AArch64 translation table.
pub type NestedPageTable<H> = PageTable64<A64HVPagingMetaData, A64PTEHV, H>;
8 changes: 2 additions & 6 deletions src/npt/arch/riscv.rs
Original file line number Diff line number Diff line change
@@ -1,6 +1,2 @@
use page_table_entry::riscv::Rv64PTE;
use page_table_multiarch::{PageTable64, riscv::Sv39MetaData};

use crate::GuestPhysAddr;

pub type NestedPageTable<H> = PageTable64<Sv39MetaData<GuestPhysAddr>, Rv64PTE, H>;
pub use page_table_entry::riscv::Rv64PTE;
pub use page_table_multiarch::riscv::{Sv39MetaData, Sv48MetaData};
Loading