Remove `Rights` from VMAR

This commit is contained in:
Ruihan Li 2025-10-24 10:04:38 +08:00 committed by Tate, Hongliang Tian
parent 6a67807fd0
commit b07d06170b
13 changed files with 446 additions and 792 deletions

View File

@ -4,7 +4,6 @@
use core::cell::Ref;
use aster_rights::Full;
use ostd::{
mm::{Fallible, Infallible, PodAtomic, VmReader, VmWriter, MAX_USERSPACE_VADDR},
task::Task,
@ -40,7 +39,7 @@ impl Context<'_> {
/// The user's memory space of the current task.
///
/// It provides methods to read from or write to the user space efficiently.
pub struct CurrentUserSpace<'a>(Ref<'a, Option<Vmar<Full>>>);
pub struct CurrentUserSpace<'a>(Ref<'a, Option<Arc<Vmar>>>);
/// Gets the [`CurrentUserSpace`] from the current task.
///
@ -76,7 +75,7 @@ impl<'a> CurrentUserSpace<'a> {
/// # Panics
///
/// This method will panic if the current process has cleared its `Vmar`.
pub fn vmar(&self) -> &Vmar<Full> {
pub fn vmar(&self) -> &Vmar {
self.0.as_ref().unwrap()
}
@ -84,7 +83,7 @@ impl<'a> CurrentUserSpace<'a> {
pub fn is_vmar_shared(&self) -> bool {
// If the VMAR is not shared, its reference count should be exactly 2:
// one reference is held by `ThreadLocal` and the other by `ProcessVm` in `Process`.
self.vmar().reference_count() != 2
Arc::strong_count(self.0.as_ref().unwrap()) != 2
}
/// Creates a reader to read data from the user space of the current task.

View File

@ -153,13 +153,7 @@ impl PosixThreadBuilder {
let fs =
fs.unwrap_or_else(|| Arc::new(ThreadFsInfo::new(ns_proxy.mnt_ns().new_fs_resolver())));
let vmar = process
.upgrade()
.unwrap()
.lock_vmar()
.unwrap()
.dup()
.unwrap();
let vmar = process.upgrade().unwrap().lock_vmar().dup_vmar().unwrap();
Arc::new_cyclic(|weak_task| {
let posix_thread = {

View File

@ -2,7 +2,6 @@
use core::cell::{Cell, Ref, RefCell, RefMut};
use aster_rights::Full;
use ostd::{arch::cpu::context::FpuContext, mm::Vaddr, sync::RwArc, task::CurrentTask};
use super::RobustListHead;
@ -21,7 +20,7 @@ pub struct ThreadLocal {
clear_child_tid: Cell<Vaddr>,
// Virtual memory address regions.
vmar: RefCell<Option<Vmar<Full>>>,
vmar: RefCell<Option<Arc<Vmar>>>,
page_fault_disabled: Cell<bool>,
// Robust futexes.
@ -56,7 +55,7 @@ impl ThreadLocal {
pub(super) fn new(
set_child_tid: Vaddr,
clear_child_tid: Vaddr,
vmar: Vmar<Full>,
vmar: Arc<Vmar>,
file_table: RwArc<FileTable>,
fs: Arc<ThreadFsInfo>,
fpu_context: FpuContext,
@ -88,7 +87,7 @@ impl ThreadLocal {
&self.clear_child_tid
}
pub fn vmar(&self) -> &RefCell<Option<Vmar<Full>>> {
pub fn vmar(&self) -> &RefCell<Option<Arc<Vmar>>> {
&self.vmar
}

View File

@ -3,7 +3,6 @@
use core::sync::atomic::{AtomicUsize, Ordering};
use align_ext::AlignExt;
use aster_rights::Full;
use crate::{
prelude::*,
@ -35,7 +34,7 @@ impl Heap {
}
/// Initializes and maps the heap virtual memory.
pub(super) fn alloc_and_map(&self, vmar: &Vmar<Full>) -> Result<()> {
pub(super) fn alloc_and_map(&self, vmar: &Vmar) -> Result<()> {
let vmar_map_options = {
let perms = VmPerms::READ | VmPerms::WRITE;
vmar.new_map(PAGE_SIZE, perms).unwrap().offset(self.base)

View File

@ -165,7 +165,7 @@ impl InitStack {
/// Maps the VMO of the init stack and constructs a writer to initialize its content.
pub(super) fn map_and_write(
&self,
vmar: &Vmar<Full>,
vmar: &Vmar,
argv: Vec<CString>,
envp: Vec<CString>,
auxvec: AuxVec,
@ -204,7 +204,7 @@ impl InitStack {
/// Constructs a reader to parse the content of an `InitStack`.
/// The `InitStack` should only be read after initialized
pub(super) fn reader<'a>(&self, vmar: &'a Vmar<Full>) -> InitStackReader<'a> {
pub(super) fn reader<'a>(&self, vmar: &'a Vmar) -> InitStackReader<'a> {
debug_assert!(self.is_initialized());
InitStackReader {
base: self.pos(),
@ -397,7 +397,7 @@ fn generate_random_for_aux_vec() -> [u8; 16] {
/// A reader to parse the content of an `InitStack`.
pub struct InitStackReader<'a> {
base: Vaddr,
vmar: &'a Vmar<Full>,
vmar: &'a Vmar,
/// The mapping address of the `InitStack`.
map_addr: usize,
argv_range: Range<Vaddr>,

View File

@ -15,8 +15,6 @@ mod init_stack;
use core::ops::Range;
use align_ext::AlignExt;
use aster_rights::Full;
pub use heap::Heap;
use ostd::{
mm::{io_util::HasVmReaderWriter, vm_space::VmQueriedItem, PageFlags, UFrame},
sync::MutexGuard,
@ -24,7 +22,7 @@ use ostd::{
};
pub use self::{
heap::USER_HEAP_SIZE_LIMIT,
heap::{Heap, USER_HEAP_SIZE_LIMIT},
init_stack::{
aux_vec::{AuxKey, AuxVec},
InitStack, InitStackReader, INIT_STACK_SIZE, MAX_LEN_STRING_ARG, MAX_NR_STRING_ARGS,
@ -76,14 +74,14 @@ use crate::{
*/
/// The process user space virtual memory
pub struct ProcessVm(Mutex<Option<Vmar<Full>>>);
pub struct ProcessVm(Mutex<Option<Arc<Vmar>>>);
/// A guard to the [`Vmar`] used by a process.
///
/// It is bound to a [`ProcessVm`] and can only be obtained from
/// the [`ProcessVm::lock_vmar`] method.
pub struct ProcessVmarGuard<'a> {
inner: MutexGuard<'a, Option<Vmar<Full>>>,
inner: MutexGuard<'a, Option<Arc<Vmar>>>,
}
impl ProcessVmarGuard<'_> {
@ -92,37 +90,45 @@ impl ProcessVmarGuard<'_> {
/// # Panics
///
/// This method will panic if the process has exited and its VMAR has been dropped.
pub fn unwrap(&self) -> &Vmar<Full> {
pub fn unwrap(&self) -> &Vmar {
self.inner.as_ref().unwrap()
}
/// Returns a reference to the process VMAR if it exists.
///
/// Returns `None` if the process has exited and its VMAR has been dropped.
pub fn as_ref(&self) -> Option<&Vmar<Full>> {
self.inner.as_ref()
pub fn as_ref(&self) -> Option<&Vmar> {
self.inner.as_ref().map(|v| &**v)
}
/// Sets a new VMAR for the binding process.
///
/// If the `new_vmar` is `None`, this method will remove the
/// current VMAR.
pub(super) fn set_vmar(&mut self, new_vmar: Option<Vmar<Full>>) {
pub(super) fn set_vmar(&mut self, new_vmar: Option<Arc<Vmar>>) {
*self.inner = new_vmar;
}
/// Duplicates a new VMAR from the binding process.
///
/// This method should only be used when creating a process that
/// shares the same VMAR.
pub(super) fn dup_vmar(&self) -> Option<Arc<Vmar>> {
self.inner.as_ref().cloned()
}
}
impl Clone for ProcessVm {
fn clone(&self) -> Self {
let vmar = self.lock_vmar();
Self(Mutex::new(Some(vmar.unwrap().dup().unwrap())))
let vmar = self.lock_vmar().dup_vmar();
Self(Mutex::new(vmar))
}
}
impl ProcessVm {
/// Allocates a new `ProcessVm`
pub fn alloc() -> Self {
let vmar = Vmar::<Full>::new();
let vmar = Vmar::new();
let heap = vmar.heap();
heap.alloc_and_map(&vmar).unwrap();
Self(Mutex::new(Some(vmar)))
@ -133,7 +139,7 @@ impl ProcessVm {
/// The returned `ProcessVm` will have a forked `Vmar`.
pub fn fork_from(other: &ProcessVm) -> Result<Self> {
let process_vmar = other.lock_vmar();
let vmar = Mutex::new(Some(Vmar::<Full>::fork_from(process_vmar.unwrap())?));
let vmar = Mutex::new(Some(Vmar::fork_from(process_vmar.unwrap())?));
Ok(Self(vmar))
}
@ -154,7 +160,7 @@ impl ProcessVm {
}
// TODO: Move the below code to the vm module.
impl Vmar<Full> {
impl Vmar {
/// Returns a reader for reading contents from
/// the `InitStack`.
pub fn init_stack_reader(&self) -> InitStackReader {
@ -172,7 +178,7 @@ impl Vmar<Full> {
}
// TODO: Move the below code to the vm module.
impl Vmar<Full> {
impl Vmar {
/// Reads memory from the process user space.
///
/// This method reads until one of the conditions is met:
@ -317,9 +323,9 @@ impl Vmar<Full> {
/// Unshares and renews the [`Vmar`] of the current process.
pub(super) fn unshare_and_renew_vmar(ctx: &Context, vmar: &mut ProcessVmarGuard) {
let new_vmar = Vmar::<Full>::new();
let new_vmar = Vmar::new();
let guard = disable_preempt();
*ctx.thread_local.vmar().borrow_mut() = Some(new_vmar.dup().unwrap());
*ctx.thread_local.vmar().borrow_mut() = Some(new_vmar.clone());
new_vmar.vm_space().activate();
vmar.set_vmar(Some(new_vmar));
drop(guard);

View File

@ -6,7 +6,6 @@
use core::ops::Range;
use align_ext::AlignExt;
use aster_rights::Full;
use ostd::{
mm::{CachePolicy, PageFlags, PageProperty, VmIo},
task::disable_preempt,
@ -34,7 +33,7 @@ use crate::{
/// This function will map elf segments and
/// initialize process init stack.
pub fn load_elf_to_vmar(
vmar: &Vmar<Full>,
vmar: &Vmar,
elf_file: Path,
fs_resolver: &FsResolver,
elf_headers: ElfHeaders,
@ -100,7 +99,7 @@ fn lookup_and_parse_ldso(
Ok(Some((ldso_file, ldso_elf)))
}
fn load_ldso(vmar: &Vmar<Full>, ldso_file: &Path, ldso_elf: &ElfHeaders) -> Result<LdsoLoadInfo> {
fn load_ldso(vmar: &Vmar, ldso_file: &Path, ldso_elf: &ElfHeaders) -> Result<LdsoLoadInfo> {
let range = map_segment_vmos(ldso_elf, vmar, ldso_file)?;
Ok(LdsoLoadInfo {
entry_point: range
@ -118,7 +117,7 @@ fn load_ldso(vmar: &Vmar<Full>, ldso_file: &Path, ldso_elf: &ElfHeaders) -> Resu
///
/// Returns the mapped range, the entry point and the auxiliary vector.
fn init_and_map_vmos(
vmar: &Vmar<Full>,
vmar: &Vmar,
ldso: Option<(Path, ElfHeaders)>,
parsed_elf: &ElfHeaders,
elf_file: &Path,
@ -179,11 +178,7 @@ pub struct ElfLoadInfo {
/// boundaries may not be page-aligned.
///
/// [`Vmo`]: crate::vm::vmo::Vmo
pub fn map_segment_vmos(
elf: &ElfHeaders,
vmar: &Vmar<Full>,
elf_file: &Path,
) -> Result<RelocatedRange> {
pub fn map_segment_vmos(elf: &ElfHeaders, vmar: &Vmar, elf_file: &Path) -> Result<RelocatedRange> {
let elf_va_range = get_range_for_all_segments(elf)?;
let map_range = if elf.is_shared_object() {
@ -298,7 +293,7 @@ fn get_range_for_all_segments(elf: &ElfHeaders) -> Result<Range<Vaddr>> {
fn map_segment_vmo(
program_header: &ProgramHeader64,
elf_file: &Path,
vmar: &Vmar<Full>,
vmar: &Vmar,
map_at: Vaddr,
) -> Result<()> {
trace!(
@ -485,7 +480,7 @@ pub fn init_aux_vec(
/// Maps the vDSO VMO to the corresponding virtual memory address.
#[cfg(any(target_arch = "x86_64", target_arch = "riscv64"))]
fn map_vdso_to_vmar(vmar: &Vmar<Full>) -> Option<Vaddr> {
fn map_vdso_to_vmar(vmar: &Vmar) -> Option<Vaddr> {
use crate::vdso::{vdso_vmo, VDSO_VMO_LAYOUT};
let vdso_vmo = vdso_vmo()?;

View File

@ -3,8 +3,6 @@
pub mod elf;
mod shebang;
use aster_rights::Full;
use self::{
elf::{load_elf_to_vmar, ElfHeaders, ElfLoadInfo},
shebang::parse_shebang_line,
@ -85,7 +83,7 @@ impl ProgramToLoad {
/// Returns a tuple containing:
/// 1. The absolute path of the loaded executable.
/// 2. Information about the ELF loading process.
pub fn load_to_vmar(self, vmar: &Vmar<Full>, fs_resolver: &FsResolver) -> Result<ElfLoadInfo> {
pub fn load_to_vmar(self, vmar: &Vmar, fs_resolver: &FsResolver) -> Result<ElfLoadInfo> {
let elf_headers = ElfHeaders::parse_elf(&*self.file_first_page)?;
let elf_load_info = load_elf_to_vmar(
vmar,

View File

@ -53,7 +53,7 @@ pub fn sys_msync(start: Vaddr, size: usize, flag: i32, ctx: &Context) -> Result<
};
let user_space = ctx.user_space();
let vmar = user_space.vmar().dup()?;
let vmar = user_space.vmar();
let guard = vmar.query(range.clone());
let mut mappings_iter = guard.iter();

View File

@ -2,7 +2,6 @@
#![expect(unused_variables)]
use aster_rights::Full;
#[cfg(target_arch = "x86_64")]
use ostd::arch::cpu::context::CpuException;
#[cfg(target_arch = "riscv64")]
@ -48,7 +47,7 @@ pub fn handle_exception(ctx: &Context, context: &UserContext, exception: CpuExce
/// Handles the page fault occurs in the VMAR.
fn handle_page_fault_from_vmar(
vmar: &Vmar<Full>,
vmar: &Vmar,
page_fault_info: &PageFaultInfo,
) -> core::result::Result<(), ()> {
if let Err(e) = vmar.handle_page_fault(page_fault_info) {

View File

@ -1,137 +0,0 @@
// SPDX-License-Identifier: MPL-2.0
use core::ops::Range;
use aster_rights::Rights;
use super::{VmPerms, Vmar, VmarMapOptions, VmarRightsOp, Vmar_};
use crate::{
prelude::*, thread::exception::PageFaultInfo, vm::page_fault_handler::PageFaultHandler,
};
impl Vmar<Rights> {
/// Creates a new VMAR.
#[expect(dead_code)]
pub fn new() -> Self {
let inner = Vmar_::new();
let rights = Rights::all();
Self(inner, rights)
}
/// Creates a mapping into the VMAR through a set of VMAR mapping options.
///
/// # Example
///
/// ```
/// use aster_nix::prelude::*;
/// use aster_nix::vm::{PAGE_SIZE, Vmar, VmoOptions};
///
/// let vmar = Vmar::new().unwrap();
/// let vmo = VmoOptions::new(10 * PAGE_SIZE).alloc().unwrap();
/// let target_vaddr = 0x1234000;
/// let real_vaddr = vmar
/// // Create a 4 * PAGE_SIZE bytes, read-only mapping
/// .new_map(PAGE_SIZE * 4, VmPerms::READ)
/// // Provide an optional offset for the mapping inside the VMAR
/// .offset(target_vaddr)
/// // Specify an optional binding VMO.
/// .vmo(vmo)
/// // Provide an optional offset to indicate the corresponding offset
/// // in the VMO for the mapping
/// .vmo_offset(2 * PAGE_SIZE)
/// .build()
/// .unwrap();
/// assert!(real_vaddr == target_vaddr);
/// ```
///
/// For more details on the available options, see `VmarMapOptions`.
///
/// # Access rights
///
/// This method requires the following access rights:
/// 1. The VMAR contains the rights corresponding to the memory permissions of
/// the mapping. For example, if `perms` contains `VmPerms::WRITE`,
/// then the VMAR must have the Write right.
/// 2. Similarly, the VMO contains the rights corresponding to the memory
/// permissions of the mapping.
///
/// Memory permissions may be changed through the `protect` method,
/// which ensures that any updated memory permissions do not go beyond
/// the access rights of the underlying VMOs.
#[expect(dead_code)]
pub fn new_map(&self, size: usize, perms: VmPerms) -> Result<VmarMapOptions<Rights, Rights>> {
Ok(VmarMapOptions::new(self, size, perms))
}
/// Changes the permissions of the memory mappings in the specified range.
///
/// The range's start and end addresses must be page-aligned.
/// Also, the range must be completely mapped.
///
/// # Access rights
///
/// The VMAR must have the rights corresponding to the specified memory
/// permissions.
#[expect(dead_code)]
pub fn protect(&self, perms: VmPerms, range: Range<usize>) -> Result<()> {
self.check_rights(perms.into())?;
self.0.protect(perms, range)
}
/// Clears all mappings.
///
/// After being cleared, this vmar will become an empty vmar
#[expect(dead_code)]
pub fn clear(&self) {
self.0.clear_vmar()
}
/// Destroys all mappings that fall within the specified
/// range in bytes.
///
/// The range's start and end addresses must be page-aligned.
///
/// Mappings may fall partially within the range; only the overlapped
/// portions of the mappings are unmapped.
#[expect(dead_code)]
pub fn remove_mapping(&self, range: Range<usize>) -> Result<()> {
self.0.remove_mapping(range)
}
/// Duplicates the capability.
///
/// # Access rights
///
/// The method requires the Dup right.
#[expect(dead_code)]
pub fn dup(&self) -> Result<Self> {
self.check_rights(Rights::DUP)?;
Ok(Vmar(self.0.clone(), self.1))
}
/// Creates a new VMAR whose content is inherited from another
/// using copy-on-write (COW) technique.
///
/// # Access rights
///
/// The method requires the Read right.
#[expect(dead_code)]
pub fn fork_from(vmar: &Self) -> Result<Self> {
vmar.check_rights(Rights::READ)?;
let vmar_ = vmar.0.new_fork()?;
Ok(Vmar(vmar_, Rights::all()))
}
}
impl PageFaultHandler for Vmar<Rights> {
fn handle_page_fault(&self, page_fault_info: &PageFaultInfo) -> Result<()> {
self.check_rights(page_fault_info.required_perms.into())?;
self.0.handle_page_fault(page_fault_info)
}
}
impl VmarRightsOp for Vmar<Rights> {
fn rights(&self) -> Rights {
self.1
}
}

View File

@ -2,10 +2,8 @@
//! Virtual Memory Address Regions (VMARs).
mod dyn_cap;
mod interval_set;
mod static_cap;
pub mod vm_mapping;
mod vm_mapping;
#[cfg(target_arch = "riscv64")]
use core::sync::atomic::{AtomicUsize, Ordering};
@ -24,12 +22,12 @@ use ostd::{
sync::RwMutexReadGuard,
task::disable_preempt,
};
use vm_mapping::{MappedMemory, MappedVmo};
use self::{
interval_set::{Interval, IntervalSet},
vm_mapping::VmMapping,
vm_mapping::{MappedMemory, MappedVmo, VmMapping},
};
use super::page_fault_handler::PageFaultHandler;
use crate::{
fs::file_handle::Mappable,
prelude::*,
@ -43,47 +41,277 @@ use crate::{
/// Virtual Memory Address Regions (VMARs) are a type of capability that manages
/// user address spaces.
///
/// # Capabilities
///
/// As a capability, each VMAR is associated with a set of access rights,
/// whose semantics are explained below.
///
/// The semantics of each access rights for VMARs are described below:
/// * The Dup right allows duplicating a VMAR.
/// * The Read, Write, Exec rights allow creating memory mappings with
/// readable, writable, and executable access permissions, respectively.
/// * The Read and Write rights allow the VMAR to be read from and written to
/// directly.
///
/// VMARs are implemented with two flavors of capabilities:
/// the dynamic one (`Vmar<Rights>`) and the static one (`Vmar<R: TRights>`).
pub struct Vmar<R = Rights>(Arc<Vmar_>, R);
pub struct Vmar {
/// VMAR inner
inner: RwMutex<VmarInner>,
/// The attached `VmSpace`
vm_space: Arc<VmSpace>,
/// The RSS counters.
rss_counters: [PerCpuCounter; NUM_RSS_COUNTERS],
/// The initial portion of the main stack of a process.
init_stack: InitStack,
/// The user heap
heap: Heap,
/// The base address for vDSO segment
#[cfg(target_arch = "riscv64")]
vdso_base: AtomicUsize,
}
pub trait VmarRightsOp {
/// Returns the access rights.
fn rights(&self) -> Rights;
impl Vmar {
/// Creates a new VMAR.
pub fn new() -> Arc<Self> {
let inner = VmarInner::new();
let vm_space = VmSpace::new();
let rss_counters = array::from_fn(|_| PerCpuCounter::new());
Arc::new(Vmar {
inner: RwMutex::new(inner),
vm_space: Arc::new(vm_space),
rss_counters,
init_stack: InitStack::new(),
heap: Heap::new(),
#[cfg(target_arch = "riscv64")]
vdso_base: AtomicUsize::new(0),
})
}
/// Checks whether current rights meet the input `rights`.
fn check_rights(&self, rights: Rights) -> Result<()> {
if self.rights().contains(rights) {
Ok(())
} else {
return_errno_with_message!(Errno::EACCES, "VMAR rights are insufficient");
/// Creates a mapping into the VMAR through a set of VMAR mapping options.
///
/// # Examples
///
/// ```
/// use aster_rights::Rights;
/// use ostd::mm::PAGE_SIZE;
///
/// use crate::vm::{perms::VmPerms, vmar::Vmar, vmo::VmoOptions};
///
/// let vmar = Vmar::new();
/// let vmo = VmoOptions::<Rights>::new(10 * PAGE_SIZE).alloc().unwrap();
/// let target_vaddr = 0x1234000;
/// let real_vaddr = vmar
/// // Create a 4 * PAGE_SIZE bytes, read-only mapping
/// .new_map(PAGE_SIZE * 4, VmPerms::READ).unwrap()
/// // Provide an optional offset for the mapping inside the VMAR
/// .offset(target_vaddr)
/// // Specify an optional binding VMO.
/// .vmo(vmo)
/// // Provide an optional offset to indicate the corresponding offset
/// // in the VMO for the mapping
/// .vmo_offset(2 * PAGE_SIZE)
/// .build()
/// .unwrap();
/// assert!(real_vaddr == target_vaddr);
/// ```
///
/// For more details on the available options, see `VmarMapOptions`.
pub fn new_map(&self, size: usize, perms: VmPerms) -> Result<VmarMapOptions<Rights>> {
Ok(VmarMapOptions::new(self, size, perms))
}
/// Change the permissions of the memory mappings in the specified range.
///
/// The range's start and end addresses must be page-aligned.
/// Also, the range must be completely mapped.
pub fn protect(&self, perms: VmPerms, range: Range<usize>) -> Result<()> {
assert!(range.start % PAGE_SIZE == 0);
assert!(range.end % PAGE_SIZE == 0);
let mut inner = self.inner.write();
let vm_space = self.vm_space();
let mut protect_mappings = Vec::new();
for vm_mapping in inner.vm_mappings.find(&range) {
protect_mappings.push((vm_mapping.map_to_addr(), vm_mapping.perms()));
}
for (vm_mapping_addr, vm_mapping_perms) in protect_mappings {
if perms == vm_mapping_perms & VmPerms::ALL_PERMS {
continue;
}
let new_perms = perms | (vm_mapping_perms & VmPerms::ALL_MAY_PERMS);
new_perms.check()?;
let vm_mapping = inner.remove(&vm_mapping_addr).unwrap();
let vm_mapping_range = vm_mapping.range();
let intersected_range = get_intersected_range(&range, &vm_mapping_range);
// Protects part of the taken `VmMapping`.
let (left, taken, right) = vm_mapping.split_range(&intersected_range);
// Puts the rest back.
if let Some(left) = left {
inner.insert_without_try_merge(left);
}
if let Some(right) = right {
inner.insert_without_try_merge(right);
}
// Protects part of the `VmMapping`.
let taken = taken.protect(vm_space.as_ref(), new_perms);
inner.insert_try_merge(taken);
}
Ok(())
}
/// Finds all the mapped regions that intersect with the specified range.
pub fn query(&self, range: Range<usize>) -> VmarQueryGuard<'_> {
VmarQueryGuard {
vmar: self.inner.read(),
range,
}
}
}
impl<R> PartialEq for Vmar<R> {
fn eq(&self, other: &Self) -> bool {
Arc::ptr_eq(&self.0, &other.0)
/// Clears all mappings.
///
/// After being cleared, this vmar will become an empty vmar
pub fn clear(&self) {
let mut inner = self.inner.write();
inner.vm_mappings.clear();
// Keep `inner` locked to avoid race conditions.
let preempt_guard = disable_preempt();
let full_range = 0..MAX_USERSPACE_VADDR;
let mut cursor = self
.vm_space
.cursor_mut(&preempt_guard, &full_range)
.unwrap();
cursor.unmap(full_range.len());
cursor.flusher().sync_tlb_flush();
}
/// Destroys all mappings that fall within the specified
/// range in bytes.
///
/// The range's start and end addresses must be page-aligned.
///
/// Mappings may fall partially within the range; only the overlapped
/// portions of the mappings are unmapped.
pub fn remove_mapping(&self, range: Range<usize>) -> Result<()> {
let mut inner = self.inner.write();
let mut rss_delta = RssDelta::new(self);
inner.alloc_free_region_exact_truncate(
&self.vm_space,
range.start,
range.len(),
&mut rss_delta,
)?;
Ok(())
}
/// Creates a new VMAR whose content is inherited from another
/// using copy-on-write (COW) technique.
pub fn fork_from(vmar: &Self) -> Result<Arc<Self>> {
let new_vmar = Arc::new(Vmar {
inner: RwMutex::new(VmarInner::new()),
vm_space: Arc::new(VmSpace::new()),
rss_counters: array::from_fn(|_| PerCpuCounter::new()),
init_stack: vmar.init_stack.clone(),
heap: vmar.heap.clone(),
#[cfg(target_arch = "riscv64")]
vdso_base: AtomicUsize::new(vmar.vdso_base.load(Ordering::Relaxed)),
});
{
let inner = vmar.inner.read();
let mut new_inner = new_vmar.inner.write();
// Clone mappings.
let preempt_guard = disable_preempt();
let range = VMAR_LOWEST_ADDR..VMAR_CAP_ADDR;
let new_vmspace = new_vmar.vm_space();
let mut new_cursor = new_vmspace.cursor_mut(&preempt_guard, &range).unwrap();
let cur_vmspace = vmar.vm_space();
let mut cur_cursor = cur_vmspace.cursor_mut(&preempt_guard, &range).unwrap();
let mut rss_delta = RssDelta::new(&new_vmar);
for vm_mapping in inner.vm_mappings.iter() {
let base = vm_mapping.map_to_addr();
// Clone the `VmMapping` to the new VMAR.
let new_mapping = vm_mapping.new_fork()?;
new_inner.insert_without_try_merge(new_mapping);
// Protect the mapping and copy to the new page table for COW.
cur_cursor.jump(base).unwrap();
new_cursor.jump(base).unwrap();
let num_copied =
cow_copy_pt(&mut cur_cursor, &mut new_cursor, vm_mapping.map_size());
rss_delta.add(vm_mapping.rss_type(), num_copied as isize);
}
cur_cursor.flusher().issue_tlb_flush(TlbFlushOp::for_all());
cur_cursor.flusher().dispatch_tlb_flush();
cur_cursor.flusher().sync_tlb_flush();
}
Ok(new_vmar)
}
/// Returns the current RSS count for the given RSS type.
pub fn get_rss_counter(&self, rss_type: RssType) -> usize {
self.rss_counters[rss_type as usize].sum_all_cpus()
}
/// Returns the total size of the mappings in bytes.
pub fn get_mappings_total_size(&self) -> usize {
self.inner.read().total_vm
}
fn add_rss_counter(&self, rss_type: RssType, val: isize) {
// There are races but updating a remote counter won't cause any problems.
let cpu_id = CpuId::current_racy();
self.rss_counters[rss_type as usize].add_on_cpu(cpu_id, val);
}
}
impl<R> Vmar<R> {
/// FIXME: This function should require access control
impl PageFaultHandler for Vmar {
fn handle_page_fault(&self, page_fault_info: &PageFaultInfo) -> Result<()> {
let inner = self.inner.read();
let address = page_fault_info.address;
if let Some(vm_mapping) = inner.vm_mappings.find_one(&address) {
debug_assert!(vm_mapping.range().contains(&address));
let mut rss_delta = RssDelta::new(self);
return vm_mapping.handle_page_fault(&self.vm_space, page_fault_info, &mut rss_delta);
}
return_errno_with_message!(
Errno::EACCES,
"no VM mappings contain the page fault address"
);
}
}
impl Vmar {
/// Returns the attached `VmSpace`.
pub fn vm_space(&self) -> &Arc<VmSpace> {
self.0.vm_space()
&self.vm_space
}
/// Returns the initial portion of the main stack of a process.
pub fn init_stack(&self) -> &InitStack {
&self.init_stack
}
/// Returns the user heap.
pub fn heap(&self) -> &Heap {
&self.heap
}
/// Returns the base address for vDSO segment.
#[cfg(target_arch = "riscv64")]
pub fn vdso_base(&self) -> Vaddr {
self.vdso_base.load(Ordering::Relaxed)
}
/// Sets the base address for vDSO segment.
#[cfg(target_arch = "riscv64")]
pub fn set_vdso_base(&self, addr: Vaddr) {
self.vdso_base.store(addr, Ordering::Relaxed);
}
/// Resizes the original mapping.
@ -110,8 +338,19 @@ impl<R> Vmar<R> {
new_size: usize,
check_single_mapping: bool,
) -> Result<()> {
self.0
.resize_mapping(map_addr, old_size, new_size, check_single_mapping)
let mut inner = self.inner.write();
let mut rss_delta = RssDelta::new(self);
if check_single_mapping {
inner.check_lies_in_single_mapping(map_addr, old_size)?;
} else if inner.vm_mappings.find_one(&map_addr).is_none() {
return_errno_with_message!(Errno::EFAULT, "there is no mapping at the old address")
}
// FIXME: We should check whether all existing ranges in
// `map_addr..map_addr + old_size` have a mapping. If not,
// we should return an `Err`.
inner.resize_mapping(&self.vm_space, map_addr, old_size, new_size, &mut rss_delta)
}
/// Remaps the original mapping to a new address and/or size.
@ -132,29 +371,120 @@ impl<R> Vmar<R> {
new_addr: Option<Vaddr>,
new_size: usize,
) -> Result<Vaddr> {
self.0.remap(old_addr, old_size, new_addr, new_size)
}
debug_assert_eq!(old_addr % PAGE_SIZE, 0);
debug_assert_eq!(old_size % PAGE_SIZE, 0);
debug_assert_eq!(new_size % PAGE_SIZE, 0);
/// Returns the reference count of the VMAR.
pub fn reference_count(&self) -> usize {
Arc::strong_count(&self.0)
}
}
let mut inner = self.inner.write();
let mut rss_delta = RssDelta::new(self);
pub(super) struct Vmar_ {
/// VMAR inner
inner: RwMutex<VmarInner>,
/// The attached `VmSpace`
vm_space: Arc<VmSpace>,
/// The RSS counters.
rss_counters: [PerCpuCounter; NUM_RSS_COUNTERS],
/// The initial portion of the main stack of a process.
init_stack: InitStack,
/// The user heap
heap: Heap,
/// The base address for vDSO segment
#[cfg(target_arch = "riscv64")]
vdso_base: AtomicUsize,
let Some(old_mapping) = inner.vm_mappings.find_one(&old_addr) else {
return_errno_with_message!(
Errno::EFAULT,
"remap: there is no mapping at the old address"
)
};
if new_size > old_size && !old_mapping.can_expand() {
return_errno_with_message!(Errno::EFAULT, "remap: device mappings cannot be expanded");
}
// Shrink the old mapping first.
old_addr.checked_add(old_size).ok_or(Errno::EINVAL)?;
let (old_size, old_range) = if new_size < old_size {
inner.alloc_free_region_exact_truncate(
&self.vm_space,
old_addr + new_size,
old_size - new_size,
&mut rss_delta,
)?;
(new_size, old_addr..old_addr + new_size)
} else {
(old_size, old_addr..old_addr + old_size)
};
// Allocate a new free region that does not overlap with the old range.
let new_range = if let Some(new_addr) = new_addr {
let new_range = new_addr..new_addr.checked_add(new_size).ok_or(Errno::EINVAL)?;
if new_addr % PAGE_SIZE != 0
|| !is_userspace_vaddr(new_addr)
|| !is_userspace_vaddr(new_range.end - 1)
{
return_errno_with_message!(Errno::EINVAL, "remap: invalid fixed new address");
}
if is_intersected(&old_range, &new_range) {
return_errno_with_message!(
Errno::EINVAL,
"remap: the new range overlaps with the old one"
);
}
inner.alloc_free_region_exact_truncate(
&self.vm_space,
new_addr,
new_size,
&mut rss_delta,
)?
} else {
inner.alloc_free_region(new_size, PAGE_SIZE)?
};
// Create a new `VmMapping`.
let old_mapping = {
let old_mapping_addr = inner.check_lies_in_single_mapping(old_addr, old_size)?;
let vm_mapping = inner.remove(&old_mapping_addr).unwrap();
let (left, old_mapping, right) = vm_mapping.split_range(&old_range);
if let Some(left) = left {
inner.insert_without_try_merge(left);
}
if let Some(right) = right {
inner.insert_without_try_merge(right);
}
old_mapping
};
// Note that we have ensured that `new_size >= old_size` at the beginning.
let new_mapping = old_mapping.clone_for_remap_at(new_range.start).unwrap();
inner.insert_try_merge(new_mapping.enlarge(new_size - old_size));
let preempt_guard = disable_preempt();
let total_range = old_range.start.min(new_range.start)..old_range.end.max(new_range.end);
let vmspace = self.vm_space();
let mut cursor = vmspace.cursor_mut(&preempt_guard, &total_range).unwrap();
// Move the mapping.
let mut current_offset = 0;
while current_offset < old_size {
cursor.jump(old_range.start + current_offset).unwrap();
let Some(mapped_va) = cursor.find_next(old_size - current_offset) else {
break;
};
let (va, Some(item)) = cursor.query().unwrap() else {
panic!("Found mapped page but query failed");
};
debug_assert_eq!(mapped_va, va.start);
cursor.unmap(PAGE_SIZE);
let offset = mapped_va - old_range.start;
cursor.jump(new_range.start + offset).unwrap();
match item {
VmQueriedItem::MappedRam { frame, prop } => {
cursor.map(frame, prop);
}
VmQueriedItem::MappedIoMem { paddr, prop } => {
// For MMIO pages, find the corresponding `IoMem` and map it
// at the new location
let (iomem, offset) = cursor.find_iomem_by_paddr(paddr).unwrap();
cursor.map_iomem(iomem, prop, PAGE_SIZE, offset);
}
}
current_offset = offset + PAGE_SIZE;
}
cursor.flusher().dispatch_tlb_flush();
cursor.flusher().sync_tlb_flush();
Ok(new_range.start)
}
}
struct VmarInner {
@ -436,334 +766,6 @@ pub fn is_userspace_vaddr(vaddr: Vaddr) -> bool {
(VMAR_LOWEST_ADDR..VMAR_CAP_ADDR).contains(&vaddr)
}
impl Vmar_ {
fn new() -> Arc<Self> {
let inner = VmarInner::new();
let vm_space = VmSpace::new();
let rss_counters = array::from_fn(|_| PerCpuCounter::new());
Arc::new(Vmar_ {
inner: RwMutex::new(inner),
vm_space: Arc::new(vm_space),
rss_counters,
init_stack: InitStack::new(),
heap: Heap::new(),
#[cfg(target_arch = "riscv64")]
vdso_base: AtomicUsize::new(0),
})
}
fn query(&self, range: Range<usize>) -> VmarQueryGuard<'_> {
VmarQueryGuard {
vmar: self.inner.read(),
range,
}
}
fn protect(&self, perms: VmPerms, range: Range<usize>) -> Result<()> {
assert!(range.start % PAGE_SIZE == 0);
assert!(range.end % PAGE_SIZE == 0);
self.do_protect_inner(perms, range)?;
Ok(())
}
// Do real protect. The protected range is ensured to be mapped.
fn do_protect_inner(&self, perms: VmPerms, range: Range<usize>) -> Result<()> {
let mut inner = self.inner.write();
let vm_space = self.vm_space();
let mut protect_mappings = Vec::new();
for vm_mapping in inner.vm_mappings.find(&range) {
protect_mappings.push((vm_mapping.map_to_addr(), vm_mapping.perms()));
}
for (vm_mapping_addr, vm_mapping_perms) in protect_mappings {
if perms == vm_mapping_perms & VmPerms::ALL_PERMS {
continue;
}
let new_perms = perms | (vm_mapping_perms & VmPerms::ALL_MAY_PERMS);
new_perms.check()?;
let vm_mapping = inner.remove(&vm_mapping_addr).unwrap();
let vm_mapping_range = vm_mapping.range();
let intersected_range = get_intersected_range(&range, &vm_mapping_range);
// Protects part of the taken `VmMapping`.
let (left, taken, right) = vm_mapping.split_range(&intersected_range);
// Puts the rest back.
if let Some(left) = left {
inner.insert_without_try_merge(left);
}
if let Some(right) = right {
inner.insert_without_try_merge(right);
}
// Protects part of the `VmMapping`.
let taken = taken.protect(vm_space.as_ref(), new_perms);
inner.insert_try_merge(taken);
}
Ok(())
}
/// Handles user space page fault, if the page fault is successfully handled, return Ok(()).
pub fn handle_page_fault(&self, page_fault_info: &PageFaultInfo) -> Result<()> {
let inner = self.inner.read();
let address = page_fault_info.address;
if let Some(vm_mapping) = inner.vm_mappings.find_one(&address) {
debug_assert!(vm_mapping.range().contains(&address));
let mut rss_delta = RssDelta::new(self);
return vm_mapping.handle_page_fault(&self.vm_space, page_fault_info, &mut rss_delta);
}
return_errno_with_message!(
Errno::EACCES,
"no VM mappings contain the page fault address"
);
}
/// Clears all content of the VMAR.
fn clear_vmar(&self) {
let mut inner = self.inner.write();
inner.vm_mappings.clear();
// Keep `inner` locked to avoid race conditions.
let preempt_guard = disable_preempt();
let full_range = 0..MAX_USERSPACE_VADDR;
let mut cursor = self
.vm_space
.cursor_mut(&preempt_guard, &full_range)
.unwrap();
cursor.unmap(full_range.len());
cursor.flusher().sync_tlb_flush();
}
pub fn remove_mapping(&self, range: Range<usize>) -> Result<()> {
let mut inner = self.inner.write();
let mut rss_delta = RssDelta::new(self);
inner.alloc_free_region_exact_truncate(
&self.vm_space,
range.start,
range.len(),
&mut rss_delta,
)?;
Ok(())
}
/// Splits and unmaps the found mapping if the new size is smaller.
/// Enlarges the last mapping if the new size is larger.
fn resize_mapping(
&self,
map_addr: Vaddr,
old_size: usize,
new_size: usize,
check_single_mapping: bool,
) -> Result<()> {
let mut inner = self.inner.write();
let mut rss_delta = RssDelta::new(self);
if check_single_mapping {
inner.check_lies_in_single_mapping(map_addr, old_size)?;
} else if inner.vm_mappings.find_one(&map_addr).is_none() {
return_errno_with_message!(Errno::EFAULT, "there is no mapping at the old address")
}
// FIXME: We should check whether all existing ranges in
// `map_addr..map_addr + old_size` have a mapping. If not,
// we should return an `Err`.
inner.resize_mapping(&self.vm_space, map_addr, old_size, new_size, &mut rss_delta)
}
fn remap(
&self,
old_addr: Vaddr,
old_size: usize,
new_addr: Option<Vaddr>,
new_size: usize,
) -> Result<Vaddr> {
debug_assert_eq!(old_addr % PAGE_SIZE, 0);
debug_assert_eq!(old_size % PAGE_SIZE, 0);
debug_assert_eq!(new_size % PAGE_SIZE, 0);
let mut inner = self.inner.write();
let mut rss_delta = RssDelta::new(self);
let Some(old_mapping) = inner.vm_mappings.find_one(&old_addr) else {
return_errno_with_message!(
Errno::EFAULT,
"remap: there is no mapping at the old address"
)
};
if new_size > old_size && !old_mapping.can_expand() {
return_errno_with_message!(Errno::EFAULT, "remap: device mappings cannot be expanded");
}
// Shrink the old mapping first.
old_addr.checked_add(old_size).ok_or(Errno::EINVAL)?;
let (old_size, old_range) = if new_size < old_size {
inner.alloc_free_region_exact_truncate(
&self.vm_space,
old_addr + new_size,
old_size - new_size,
&mut rss_delta,
)?;
(new_size, old_addr..old_addr + new_size)
} else {
(old_size, old_addr..old_addr + old_size)
};
// Allocate a new free region that does not overlap with the old range.
let new_range = if let Some(new_addr) = new_addr {
let new_range = new_addr..new_addr.checked_add(new_size).ok_or(Errno::EINVAL)?;
if new_addr % PAGE_SIZE != 0
|| !is_userspace_vaddr(new_addr)
|| !is_userspace_vaddr(new_range.end - 1)
{
return_errno_with_message!(Errno::EINVAL, "remap: invalid fixed new address");
}
if is_intersected(&old_range, &new_range) {
return_errno_with_message!(
Errno::EINVAL,
"remap: the new range overlaps with the old one"
);
}
inner.alloc_free_region_exact_truncate(
&self.vm_space,
new_addr,
new_size,
&mut rss_delta,
)?
} else {
inner.alloc_free_region(new_size, PAGE_SIZE)?
};
// Create a new `VmMapping`.
let old_mapping = {
let old_mapping_addr = inner.check_lies_in_single_mapping(old_addr, old_size)?;
let vm_mapping = inner.remove(&old_mapping_addr).unwrap();
let (left, old_mapping, right) = vm_mapping.split_range(&old_range);
if let Some(left) = left {
inner.insert_without_try_merge(left);
}
if let Some(right) = right {
inner.insert_without_try_merge(right);
}
old_mapping
};
// Note that we have ensured that `new_size >= old_size` at the beginning.
let new_mapping = old_mapping.clone_for_remap_at(new_range.start).unwrap();
inner.insert_try_merge(new_mapping.enlarge(new_size - old_size));
let preempt_guard = disable_preempt();
let total_range = old_range.start.min(new_range.start)..old_range.end.max(new_range.end);
let vmspace = self.vm_space();
let mut cursor = vmspace.cursor_mut(&preempt_guard, &total_range).unwrap();
// Move the mapping.
let mut current_offset = 0;
while current_offset < old_size {
cursor.jump(old_range.start + current_offset).unwrap();
let Some(mapped_va) = cursor.find_next(old_size - current_offset) else {
break;
};
let (va, Some(item)) = cursor.query().unwrap() else {
panic!("Found mapped page but query failed");
};
debug_assert_eq!(mapped_va, va.start);
cursor.unmap(PAGE_SIZE);
let offset = mapped_va - old_range.start;
cursor.jump(new_range.start + offset).unwrap();
match item {
VmQueriedItem::MappedRam { frame, prop } => {
cursor.map(frame, prop);
}
VmQueriedItem::MappedIoMem { paddr, prop } => {
// For MMIO pages, find the corresponding `IoMem` and map it
// at the new location
let (iomem, offset) = cursor.find_iomem_by_paddr(paddr).unwrap();
cursor.map_iomem(iomem, prop, PAGE_SIZE, offset);
}
}
current_offset = offset + PAGE_SIZE;
}
cursor.flusher().dispatch_tlb_flush();
cursor.flusher().sync_tlb_flush();
Ok(new_range.start)
}
/// Returns the attached `VmSpace`.
fn vm_space(&self) -> &Arc<VmSpace> {
&self.vm_space
}
pub(super) fn new_fork(self: &Arc<Self>) -> Result<Arc<Self>> {
let new_vmar_ = Arc::new(Vmar_ {
inner: RwMutex::new(VmarInner::new()),
vm_space: Arc::new(VmSpace::new()),
rss_counters: array::from_fn(|_| PerCpuCounter::new()),
init_stack: self.init_stack.clone(),
heap: self.heap.clone(),
#[cfg(target_arch = "riscv64")]
vdso_base: AtomicUsize::new(self.vdso_base.load(Ordering::Relaxed)),
});
{
let inner = self.inner.read();
let mut new_inner = new_vmar_.inner.write();
// Clone mappings.
let preempt_guard = disable_preempt();
let range = VMAR_LOWEST_ADDR..VMAR_CAP_ADDR;
let new_vmspace = new_vmar_.vm_space();
let mut new_cursor = new_vmspace.cursor_mut(&preempt_guard, &range).unwrap();
let cur_vmspace = self.vm_space();
let mut cur_cursor = cur_vmspace.cursor_mut(&preempt_guard, &range).unwrap();
let mut rss_delta = RssDelta::new(&new_vmar_);
for vm_mapping in inner.vm_mappings.iter() {
let base = vm_mapping.map_to_addr();
// Clone the `VmMapping` to the new VMAR.
let new_mapping = vm_mapping.new_fork()?;
new_inner.insert_without_try_merge(new_mapping);
// Protect the mapping and copy to the new page table for COW.
cur_cursor.jump(base).unwrap();
new_cursor.jump(base).unwrap();
let num_copied =
cow_copy_pt(&mut cur_cursor, &mut new_cursor, vm_mapping.map_size());
rss_delta.add(vm_mapping.rss_type(), num_copied as isize);
}
cur_cursor.flusher().issue_tlb_flush(TlbFlushOp::for_all());
cur_cursor.flusher().dispatch_tlb_flush();
cur_cursor.flusher().sync_tlb_flush();
}
Ok(new_vmar_)
}
pub fn get_rss_counter(&self, rss_type: RssType) -> usize {
self.rss_counters[rss_type as usize].sum_all_cpus()
}
fn add_rss_counter(&self, rss_type: RssType, val: isize) {
// There are races but updating a remote counter won't cause any problems.
let cpu_id = CpuId::current_racy();
self.rss_counters[rss_type as usize].add_on_cpu(cpu_id, val);
}
}
/// Sets mappings in the source page table as read-only to trigger COW, and
/// copies the mappings to the destination page table.
///
@ -817,24 +819,12 @@ fn cow_copy_pt(src: &mut CursorMut<'_>, dst: &mut CursorMut<'_>, size: usize) ->
num_copied
}
impl<R> Vmar<R> {
/// Returns the current RSS count for the given RSS type.
pub fn get_rss_counter(&self, rss_type: RssType) -> usize {
self.0.get_rss_counter(rss_type)
}
/// Returns the total size of the mappings in bytes.
pub fn get_mappings_total_size(&self) -> usize {
self.0.inner.read().total_vm
}
}
/// Options for creating a new mapping. The mapping is not allowed to overlap
/// with any child VMARs. And unless specified otherwise, it is not allowed
/// to overlap with any existing mapping, either.
pub struct VmarMapOptions<'a, R1, R2> {
parent: &'a Vmar<R1>,
vmo: Option<Vmo<R2>>,
pub struct VmarMapOptions<'a, R> {
parent: &'a Vmar,
vmo: Option<Vmo<R>>,
mappable: Option<Mappable>,
perms: VmPerms,
may_perms: VmPerms,
@ -849,14 +839,14 @@ pub struct VmarMapOptions<'a, R1, R2> {
handle_page_faults_around: bool,
}
impl<'a, R1, R2> VmarMapOptions<'a, R1, R2> {
impl<'a, R> VmarMapOptions<'a, R> {
/// Creates a default set of options with the VMO and the memory access
/// permissions.
///
/// The VMO must have access rights that correspond to the memory
/// access permissions. For example, if `perms` contains `VmPerms::Write`,
/// then `vmo.rights()` should contain `Rights::WRITE`.
pub fn new(parent: &'a Vmar<R1>, size: usize, perms: VmPerms) -> Self {
pub fn new(parent: &'a Vmar, size: usize, perms: VmPerms) -> Self {
Self {
parent,
vmo: None,
@ -905,7 +895,7 @@ impl<'a, R1, R2> VmarMapOptions<'a, R1, R2> {
/// # Panics
///
/// This function panics if a [`Mappable`] is already provided.
pub fn vmo(mut self, vmo: Vmo<R2>) -> Self {
pub fn vmo(mut self, vmo: Vmo<R>) -> Self {
if self.mappable.is_some() {
panic!("Cannot set `vmo` when `mappable` is already set");
}
@ -979,7 +969,7 @@ impl<'a, R1, R2> VmarMapOptions<'a, R1, R2> {
}
}
impl<R1> VmarMapOptions<'_, R1, Rights> {
impl VmarMapOptions<'_, Rights> {
/// Binds memory to map based on the [`Mappable`] enum.
///
/// This method accepts file-specific details, like a page cache (inode)
@ -1012,9 +1002,9 @@ impl<R1> VmarMapOptions<'_, R1, Rights> {
}
}
impl<R1, R2> VmarMapOptions<'_, R1, R2>
impl<R> VmarMapOptions<'_, R>
where
Vmo<R2>: VmoRightsOp,
Vmo<R>: VmoRightsOp,
{
/// Creates the mapping and adds it to the parent VMAR.
///
@ -1038,7 +1028,7 @@ where
handle_page_faults_around,
} = self;
let mut inner = parent.0.inner.write();
let mut inner = parent.inner.write();
inner.check_extra_size_fits_rlimit(map_size).or_else(|e| {
if can_overwrite {
@ -1062,7 +1052,7 @@ where
Errno::EINVAL,
"offset cannot be None since can overwrite is set",
))?;
let mut rss_delta = RssDelta::new(&parent.0);
let mut rss_delta = RssDelta::new(parent);
inner.alloc_free_region_exact_truncate(
parent.vm_space(),
offset,
@ -1216,11 +1206,11 @@ const NUM_RSS_COUNTERS: usize = 2;
pub(super) struct RssDelta<'a> {
delta: [isize; NUM_RSS_COUNTERS],
operated_vmar: &'a Vmar_,
operated_vmar: &'a Vmar,
}
impl<'a> RssDelta<'a> {
pub(self) fn new(operated_vmar: &'a Vmar_) -> Self {
pub(self) fn new(operated_vmar: &'a Vmar) -> Self {
Self {
delta: [0; NUM_RSS_COUNTERS],
operated_vmar,

View File

@ -1,188 +0,0 @@
// SPDX-License-Identifier: MPL-2.0
use core::ops::Range;
#[cfg(target_arch = "riscv64")]
use core::sync::atomic::Ordering;
use aster_rights::{Dup, Read, Rights, TRightSet, TRights, Write};
use aster_rights_proc::require;
use super::{VmPerms, Vmar, VmarMapOptions, VmarQueryGuard, VmarRightsOp, Vmar_};
use crate::{
prelude::*,
process::{Heap, InitStack},
thread::exception::PageFaultInfo,
vm::page_fault_handler::PageFaultHandler,
};
impl<R: TRights> Vmar<TRightSet<R>> {
/// Creates a new VMAR.
///
/// # Access rights
///
/// A new VMAR is initially given full access rights.
pub fn new() -> Self {
let inner = Vmar_::new();
let rights = R::new();
Self(inner, TRightSet(rights))
}
/// Creates a mapping into the VMAR through a set of VMAR mapping options.
///
/// # Example
///
/// ```
/// use aster_nix::prelude::*;
/// use aster_nix::vm::{PAGE_SIZE, Vmar, VmoOptions};
///
/// let vmar = Vmar::<RightsWrapper<Full>>::new().unwrap();
/// let vmo = VmoOptions::new(10 * PAGE_SIZE).alloc().unwrap();
/// let target_vaddr = 0x1234000;
/// let real_vaddr = vmar
/// // Create a 4 * PAGE_SIZE bytes, read-only mapping
/// .new_map(PAGE_SIZE * 4, VmPerms::READ)
/// // Provide an optional offset for the mapping inside the VMAR
/// .offset(target_vaddr)
/// // Specify an optional binding VMO.
/// .vmo(vmo)
/// // Provide an optional offset to indicate the corresponding offset
/// // in the VMO for the mapping
/// .vmo_offset(2 * PAGE_SIZE)
/// .build()
/// .unwrap();
/// assert!(real_vaddr == target_vaddr);
/// ```
///
/// For more details on the available options, see `VmarMapOptions`.
///
/// # Access rights
///
/// This method requires the following access rights:
/// 1. The VMAR contains the rights corresponding to the memory permissions of
/// the mapping. For example, if `perms` contains `VmPerms::WRITE`,
/// then the VMAR must have the Write right.
/// 2. Similarly, the VMO contains the rights corresponding to the memory
/// permissions of the mapping.
///
/// Memory permissions may be changed through the `protect` method,
/// which ensures that any updated memory permissions do not go beyond
/// the access rights of the underlying VMOs.
#[require(R > Dup)]
pub fn new_map(
&self,
size: usize,
perms: VmPerms,
) -> Result<VmarMapOptions<TRightSet<R>, Rights>> {
Ok(VmarMapOptions::new(self, size, perms))
}
/// Change the permissions of the memory mappings in the specified range.
///
/// The range's start and end addresses must be page-aligned.
/// Also, the range must be completely mapped.
///
/// # Access rights
///
/// The VMAR must have the rights corresponding to the specified memory
/// permissions.
pub fn protect(&self, perms: VmPerms, range: Range<usize>) -> Result<()> {
self.check_rights(perms.into())?;
self.0.protect(perms, range)
}
/// Finds all the mapped regions that intersect with the specified range.
pub fn query(&self, range: Range<usize>) -> VmarQueryGuard<'_> {
self.0.query(range)
}
/// Clears all mappings.
///
/// After being cleared, this vmar will become an empty vmar
pub fn clear(&self) {
self.0.clear_vmar()
}
/// Destroys all mappings that fall within the specified
/// range in bytes.
///
/// The range's start and end addresses must be page-aligned.
///
/// Mappings may fall partially within the range; only the overlapped
/// portions of the mappings are unmapped.
#[require(R > Write)]
pub fn remove_mapping(&self, range: Range<usize>) -> Result<()> {
self.0.remove_mapping(range)
}
/// Duplicates the capability.
///
/// # Access rights
///
/// The method requires the Dup right.
#[require(R > Dup)]
pub fn dup(&self) -> Result<Self> {
Ok(Vmar(self.0.clone(), self.1))
}
/// Creates a new VMAR whose content is inherited from another
/// using copy-on-write (COW) technique.
///
/// # Access rights
///
/// The method requires the Read right.
#[require(R > Read)]
pub fn fork_from(vmar: &Self) -> Result<Self> {
let vmar_ = vmar.0.new_fork()?;
Ok(Vmar(vmar_, TRightSet(R::new())))
}
/// Stricts the access rights.
#[expect(dead_code)]
#[require(R > R1)]
pub fn restrict<R1: TRights>(self) -> Vmar<R1> {
Vmar(self.0, R1::new())
}
fn check_rights(&self, rights: Rights) -> Result<()> {
if self.rights().contains(rights) {
Ok(())
} else {
return_errno_with_message!(Errno::EACCES, "check rights failed");
}
}
/// Returns the initial portion of the main stack of a process.
pub fn init_stack(&self) -> &InitStack {
&self.0.init_stack
}
/// Returns the user heap.
pub fn heap(&self) -> &Heap {
&self.0.heap
}
/// Returns the base address for vDSO segment.
#[cfg(target_arch = "riscv64")]
pub fn vdso_base(&self) -> Vaddr {
self.0.vdso_base.load(Ordering::Relaxed)
}
/// Sets the base address for vDSO segment.
#[cfg(target_arch = "riscv64")]
pub fn set_vdso_base(&self, addr: Vaddr) {
self.0.vdso_base.store(addr, Ordering::Relaxed);
}
}
impl<R: TRights> PageFaultHandler for Vmar<TRightSet<R>> {
fn handle_page_fault(&self, page_fault_info: &PageFaultInfo) -> Result<()> {
self.check_rights(page_fault_info.required_perms.into())?;
self.0.handle_page_fault(page_fault_info)
}
}
impl<R: TRights> VmarRightsOp for Vmar<TRightSet<R>> {
fn rights(&self) -> Rights {
Rights::from_bits(R::BITS).unwrap()
}
}