Panic if map on a mapped page
This commit is contained in:
parent
77069654d0
commit
272f385cf6
|
|
@ -427,6 +427,8 @@ impl VmMapping {
|
||||||
} else {
|
} else {
|
||||||
let new_frame = duplicate_frame(&frame)?;
|
let new_frame = duplicate_frame(&frame)?;
|
||||||
prop.flags |= new_flags;
|
prop.flags |= new_flags;
|
||||||
|
cursor.unmap(PAGE_SIZE);
|
||||||
|
cursor.jump(va.start).unwrap();
|
||||||
cursor.map(new_frame.into(), prop);
|
cursor.map(new_frame.into(), prop);
|
||||||
rss_delta.add(self.rss_type(), 1);
|
rss_delta.add(self.rss_type(), 1);
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -315,7 +315,7 @@ impl ContextTable {
|
||||||
let mut cursor = pt.cursor_mut(&preempt_guard, &from).unwrap();
|
let mut cursor = pt.cursor_mut(&preempt_guard, &from).unwrap();
|
||||||
|
|
||||||
// SAFETY: The safety is upheld by the caller.
|
// SAFETY: The safety is upheld by the caller.
|
||||||
unsafe { cursor.map((paddr, 1, prop)).unwrap() };
|
unsafe { cursor.map((paddr, 1, prop)) };
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -120,8 +120,7 @@ impl KVirtArea {
|
||||||
for frame in frames.into_iter() {
|
for frame in frames.into_iter() {
|
||||||
// SAFETY: The constructor of the `KVirtArea` has already ensured
|
// SAFETY: The constructor of the `KVirtArea` has already ensured
|
||||||
// that this mapping does not affect kernel's memory safety.
|
// that this mapping does not affect kernel's memory safety.
|
||||||
unsafe { cursor.map(MappedItem::Tracked(Frame::from_unsized(frame), prop)) }
|
unsafe { cursor.map(MappedItem::Tracked(Frame::from_unsized(frame), prop)) };
|
||||||
.expect("Failed to map frame in a new `KVirtArea`");
|
|
||||||
}
|
}
|
||||||
|
|
||||||
Self { range }
|
Self { range }
|
||||||
|
|
@ -170,7 +169,7 @@ impl KVirtArea {
|
||||||
for (pa, level) in largest_pages::<KernelPtConfig>(va_range.start, pa_range.start, len)
|
for (pa, level) in largest_pages::<KernelPtConfig>(va_range.start, pa_range.start, len)
|
||||||
{
|
{
|
||||||
// SAFETY: The caller of `map_untracked_frames` has ensured the safety of this mapping.
|
// SAFETY: The caller of `map_untracked_frames` has ensured the safety of this mapping.
|
||||||
let _ = unsafe { cursor.map(MappedItem::Untracked(pa, level, prop)) };
|
unsafe { cursor.map(MappedItem::Untracked(pa, level, prop)) };
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -242,8 +242,7 @@ pub fn init_kernel_page_table(meta_pages: Segment<MetaPageMeta>) {
|
||||||
let mut cursor = kpt.cursor_mut(&preempt_guard, &from).unwrap();
|
let mut cursor = kpt.cursor_mut(&preempt_guard, &from).unwrap();
|
||||||
for (pa, level) in largest_pages::<KernelPtConfig>(from.start, 0, max_paddr) {
|
for (pa, level) in largest_pages::<KernelPtConfig>(from.start, 0, max_paddr) {
|
||||||
// SAFETY: we are doing the linear mapping for the kernel.
|
// SAFETY: we are doing the linear mapping for the kernel.
|
||||||
unsafe { cursor.map(MappedItem::Untracked(pa, level, prop)) }
|
unsafe { cursor.map(MappedItem::Untracked(pa, level, prop)) };
|
||||||
.expect("Kernel linear address space is mapped twice");
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -265,8 +264,7 @@ pub fn init_kernel_page_table(meta_pages: Segment<MetaPageMeta>) {
|
||||||
largest_pages::<KernelPtConfig>(from.start, pa_range.start, pa_range.len())
|
largest_pages::<KernelPtConfig>(from.start, pa_range.start, pa_range.len())
|
||||||
{
|
{
|
||||||
// SAFETY: We are doing the metadata mappings for the kernel.
|
// SAFETY: We are doing the metadata mappings for the kernel.
|
||||||
unsafe { cursor.map(MappedItem::Untracked(pa, level, prop)) }
|
unsafe { cursor.map(MappedItem::Untracked(pa, level, prop)) };
|
||||||
.expect("Frame metadata address space is mapped twice");
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -290,8 +288,7 @@ pub fn init_kernel_page_table(meta_pages: Segment<MetaPageMeta>) {
|
||||||
let mut cursor = kpt.cursor_mut(&preempt_guard, &from).unwrap();
|
let mut cursor = kpt.cursor_mut(&preempt_guard, &from).unwrap();
|
||||||
for (pa, level) in largest_pages::<KernelPtConfig>(from.start, region.base(), from.len()) {
|
for (pa, level) in largest_pages::<KernelPtConfig>(from.start, region.base(), from.len()) {
|
||||||
// SAFETY: we are doing the kernel code mapping.
|
// SAFETY: we are doing the kernel code mapping.
|
||||||
unsafe { cursor.map(MappedItem::Untracked(pa, level, prop)) }
|
unsafe { cursor.map(MappedItem::Untracked(pa, level, prop)) };
|
||||||
.expect("Kernel code mapped twice");
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -40,7 +40,8 @@ fn kvirt_area_tracked_map_pages() {
|
||||||
panic!("Expected a tracked page");
|
panic!("Expected a tracked page");
|
||||||
};
|
};
|
||||||
assert_eq!(page.paddr(), paddr + (i * PAGE_SIZE));
|
assert_eq!(page.paddr(), paddr + (i * PAGE_SIZE));
|
||||||
assert_eq!(prop, default_prop());
|
assert_eq!(prop.flags, default_prop().flags);
|
||||||
|
assert_eq!(prop.cache, default_prop().cache);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -95,19 +95,6 @@ pub(crate) enum PageTableFrag<C: PageTableConfig> {
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(ktest)]
|
|
||||||
impl<C: PageTableConfig> PageTableFrag<C> {
|
|
||||||
pub(crate) fn va_range(&self) -> Range<Vaddr> {
|
|
||||||
match self {
|
|
||||||
PageTableFrag::Mapped { va, item } => {
|
|
||||||
let (_pa, level, _prop) = C::item_raw_info(item);
|
|
||||||
*va..*va + page_size::<C>(level)
|
|
||||||
}
|
|
||||||
PageTableFrag::StrayPageTable { va, len, .. } => *va..*va + *len,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<'rcu, C: PageTableConfig> Cursor<'rcu, C> {
|
impl<'rcu, C: PageTableConfig> Cursor<'rcu, C> {
|
||||||
/// Creates a cursor claiming exclusive access over the given range.
|
/// Creates a cursor claiming exclusive access over the given range.
|
||||||
///
|
///
|
||||||
|
|
@ -406,13 +393,7 @@ impl<'rcu, C: PageTableConfig> CursorMut<'rcu, C> {
|
||||||
|
|
||||||
/// Maps the item starting from the current address to a physical address range.
|
/// Maps the item starting from the current address to a physical address range.
|
||||||
///
|
///
|
||||||
/// If the current address has already mapped pages, it will do a re-map,
|
/// The current virtual address should not be mapped.
|
||||||
/// taking out the old physical address and replacing it with the new one.
|
|
||||||
/// This function will return [`Err`] with a [`PageTableFrag`], the not
|
|
||||||
/// mapped item. The caller should drop it after TLB coherence.
|
|
||||||
///
|
|
||||||
/// If there is no mapped pages in the specified virtual address range,
|
|
||||||
/// the function will return [`None`].
|
|
||||||
///
|
///
|
||||||
/// # Panics
|
/// # Panics
|
||||||
///
|
///
|
||||||
|
|
@ -420,13 +401,14 @@ impl<'rcu, C: PageTableConfig> CursorMut<'rcu, C> {
|
||||||
/// - the virtual address range to be mapped is out of the locked range;
|
/// - the virtual address range to be mapped is out of the locked range;
|
||||||
/// - the current virtual address is not aligned to the page size of the
|
/// - the current virtual address is not aligned to the page size of the
|
||||||
/// item to be mapped;
|
/// item to be mapped;
|
||||||
|
/// - the virtual address range contains mappings that conflicts with the item.
|
||||||
///
|
///
|
||||||
/// # Safety
|
/// # Safety
|
||||||
///
|
///
|
||||||
/// The caller should ensure that
|
/// The caller should ensure that
|
||||||
/// - the range being mapped does not affect kernel's memory safety;
|
/// - the range being mapped does not affect kernel's memory safety;
|
||||||
/// - the physical address to be mapped is valid and safe to use.
|
/// - the physical address to be mapped is valid and safe to use.
|
||||||
pub unsafe fn map(&mut self, item: C::Item) -> Result<(), PageTableFrag<C>> {
|
pub unsafe fn map(&mut self, item: C::Item) {
|
||||||
assert!(self.0.va < self.0.barrier_va.end);
|
assert!(self.0.va < self.0.barrier_va.end);
|
||||||
let (_, level, _) = C::item_raw_info(&item);
|
let (_, level, _) = C::item_raw_info(&item);
|
||||||
assert!(level <= C::HIGHEST_TRANSLATION_LEVEL);
|
assert!(level <= C::HIGHEST_TRANSLATION_LEVEL);
|
||||||
|
|
@ -462,15 +444,13 @@ impl<'rcu, C: PageTableConfig> CursorMut<'rcu, C> {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
let frag = self.replace_cur_entry(PteState::Mapped(RcuDrop::new(item)));
|
if !matches!(self.0.cur_entry().to_ref(), PteStateRef::Absent) {
|
||||||
|
panic!("Mapping over an already mapped page at {:#x}", self.0.va);
|
||||||
|
}
|
||||||
|
|
||||||
|
let _ = self.replace_cur_entry(PteState::Mapped(RcuDrop::new(item)));
|
||||||
|
|
||||||
self.0.move_forward();
|
self.0.move_forward();
|
||||||
|
|
||||||
if let Some(frag) = frag {
|
|
||||||
Err(frag)
|
|
||||||
} else {
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Finds and removes the first page table fragment in the following range.
|
/// Finds and removes the first page table fragment in the following range.
|
||||||
|
|
|
||||||
|
|
@ -33,8 +33,7 @@ mod test_utils {
|
||||||
.cursor_mut(&preempt_guard, &virt_range)
|
.cursor_mut(&preempt_guard, &virt_range)
|
||||||
.unwrap()
|
.unwrap()
|
||||||
.map(VmItem::new_tracked(frame.into(), page_property))
|
.map(VmItem::new_tracked(frame.into(), page_property))
|
||||||
}
|
};
|
||||||
.expect("First map found an unexpected item");
|
|
||||||
|
|
||||||
page_table
|
page_table
|
||||||
}
|
}
|
||||||
|
|
@ -50,7 +49,7 @@ mod test_utils {
|
||||||
let preempt_guard = disable_preempt();
|
let preempt_guard = disable_preempt();
|
||||||
let mut cursor = pt.cursor_mut(&preempt_guard, &(va..va + pa.len())).unwrap();
|
let mut cursor = pt.cursor_mut(&preempt_guard, &(va..va + pa.len())).unwrap();
|
||||||
for (paddr, level) in largest_pages::<TestPtConfig>(va, pa.start, pa.len()) {
|
for (paddr, level) in largest_pages::<TestPtConfig>(va, pa.start, pa.len()) {
|
||||||
let _ = unsafe { cursor.map((paddr, level, prop)) };
|
unsafe { cursor.map((paddr, level, prop)) };
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -340,7 +339,7 @@ mod page_properties {
|
||||||
let preempt_guard = disable_preempt();
|
let preempt_guard = disable_preempt();
|
||||||
let virtual_range = PAGE_SIZE..(PAGE_SIZE * 2);
|
let virtual_range = PAGE_SIZE..(PAGE_SIZE * 2);
|
||||||
let frame = FrameAllocOptions::new().alloc_frame().unwrap();
|
let frame = FrameAllocOptions::new().alloc_frame().unwrap();
|
||||||
let _ = unsafe {
|
unsafe {
|
||||||
page_table
|
page_table
|
||||||
.cursor_mut(&preempt_guard, &virtual_range)
|
.cursor_mut(&preempt_guard, &virtual_range)
|
||||||
.unwrap()
|
.unwrap()
|
||||||
|
|
@ -438,6 +437,7 @@ mod overlapping_mappings {
|
||||||
use super::{test_utils::*, *};
|
use super::{test_utils::*, *};
|
||||||
|
|
||||||
#[ktest]
|
#[ktest]
|
||||||
|
#[should_panic]
|
||||||
fn overlapping_mappings() {
|
fn overlapping_mappings() {
|
||||||
let page_table = PageTable::<TestPtConfig>::empty();
|
let page_table = PageTable::<TestPtConfig>::empty();
|
||||||
let vrange1 = PAGE_SIZE..(PAGE_SIZE * 2);
|
let vrange1 = PAGE_SIZE..(PAGE_SIZE * 2);
|
||||||
|
|
@ -452,28 +452,15 @@ mod overlapping_mappings {
|
||||||
page_table
|
page_table
|
||||||
.cursor_mut(&preempt_guard, &vrange1)
|
.cursor_mut(&preempt_guard, &vrange1)
|
||||||
.unwrap()
|
.unwrap()
|
||||||
.map((prange1.start, 1, page_property))
|
.map((prange1.start, 1, page_property));
|
||||||
.expect("Mapping to empty range failed");
|
|
||||||
}
|
}
|
||||||
// Maps the second range, overlapping with the first.
|
// Maps the second range, overlapping with the first.
|
||||||
let res2 = unsafe {
|
unsafe {
|
||||||
page_table
|
page_table
|
||||||
.cursor_mut(&preempt_guard, &vrange2)
|
.cursor_mut(&preempt_guard, &vrange2)
|
||||||
.unwrap()
|
.unwrap()
|
||||||
.map((prange2.start, 1, page_property))
|
.map((prange2.start, 1, page_property))
|
||||||
};
|
};
|
||||||
let Err(frag) = res2 else {
|
|
||||||
panic!(
|
|
||||||
"Expected an error due to overlapping mapping, got {:#x?}",
|
|
||||||
res2
|
|
||||||
);
|
|
||||||
};
|
|
||||||
assert_eq!(frag.va_range(), vrange1);
|
|
||||||
|
|
||||||
// Verifies that the overlapping address maps to the latest physical address.
|
|
||||||
assert!(page_table.page_walk(vrange2.start + 10).is_some());
|
|
||||||
let mapped_pa = page_table.page_walk(vrange2.start + 10).unwrap().0;
|
|
||||||
assert_eq!(mapped_pa, prange2.start + 10);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[ktest]
|
#[ktest]
|
||||||
|
|
@ -487,7 +474,7 @@ mod overlapping_mappings {
|
||||||
|
|
||||||
// Attempts to map an unaligned virtual address range (expected to panic).
|
// Attempts to map an unaligned virtual address range (expected to panic).
|
||||||
unsafe {
|
unsafe {
|
||||||
let _ = page_table
|
page_table
|
||||||
.cursor_mut(&preempt_guard, &virt_range)
|
.cursor_mut(&preempt_guard, &virt_range)
|
||||||
.unwrap()
|
.unwrap()
|
||||||
.map((phys_range.start, 1, page_property));
|
.map((phys_range.start, 1, page_property));
|
||||||
|
|
@ -516,8 +503,7 @@ mod navigation {
|
||||||
&(FIRST_MAP_ADDR..FIRST_MAP_ADDR + PAGE_SIZE),
|
&(FIRST_MAP_ADDR..FIRST_MAP_ADDR + PAGE_SIZE),
|
||||||
)
|
)
|
||||||
.unwrap()
|
.unwrap()
|
||||||
.map((pa1, 1, page_property))
|
.map((pa1, 1, page_property));
|
||||||
.unwrap();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
unsafe {
|
unsafe {
|
||||||
|
|
@ -527,8 +513,7 @@ mod navigation {
|
||||||
&(SECOND_MAP_ADDR..SECOND_MAP_ADDR + PAGE_SIZE),
|
&(SECOND_MAP_ADDR..SECOND_MAP_ADDR + PAGE_SIZE),
|
||||||
)
|
)
|
||||||
.unwrap()
|
.unwrap()
|
||||||
.map((pa2, 1, page_property))
|
.map((pa2, 1, page_property));
|
||||||
.unwrap();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
(page_table, pa1, pa2)
|
(page_table, pa1, pa2)
|
||||||
|
|
@ -578,7 +563,7 @@ mod navigation {
|
||||||
let mut cursor = page_table.cursor_mut(&preempt_guard, &virt_range).unwrap();
|
let mut cursor = page_table.cursor_mut(&preempt_guard, &virt_range).unwrap();
|
||||||
|
|
||||||
cursor.jump(map_va).unwrap();
|
cursor.jump(map_va).unwrap();
|
||||||
unsafe { cursor.map(map_item).unwrap() };
|
unsafe { cursor.map(map_item) };
|
||||||
|
|
||||||
// Now the cursor is at the end of the range with level 2.
|
// Now the cursor is at the end of the range with level 2.
|
||||||
assert!(cursor.query().is_err());
|
assert!(cursor.query().is_err());
|
||||||
|
|
@ -606,13 +591,11 @@ mod navigation {
|
||||||
let mut cursor = page_table.cursor_mut(&preempt_guard, &virt_range).unwrap();
|
let mut cursor = page_table.cursor_mut(&preempt_guard, &virt_range).unwrap();
|
||||||
|
|
||||||
unsafe {
|
unsafe {
|
||||||
cursor
|
cursor.map((
|
||||||
.map((
|
0,
|
||||||
0,
|
1,
|
||||||
1,
|
PageProperty::new_user(PageFlags::RW, CachePolicy::Writeback),
|
||||||
PageProperty::new_user(PageFlags::RW, CachePolicy::Writeback),
|
))
|
||||||
))
|
|
||||||
.unwrap()
|
|
||||||
};
|
};
|
||||||
|
|
||||||
assert_eq!(cursor.virt_addr(), virt_range.end);
|
assert_eq!(cursor.virt_addr(), virt_range.end);
|
||||||
|
|
@ -662,7 +645,7 @@ mod unmap {
|
||||||
{
|
{
|
||||||
let mut cursor = page_table.cursor_mut(&preempt_guard, &virt_range).unwrap();
|
let mut cursor = page_table.cursor_mut(&preempt_guard, &virt_range).unwrap();
|
||||||
unsafe {
|
unsafe {
|
||||||
cursor.map((phys_addr, 1, page_property)).unwrap();
|
cursor.map((phys_addr, 1, page_property));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -691,7 +674,7 @@ mod unmap {
|
||||||
{
|
{
|
||||||
let mut cursor = page_table.cursor_mut(&preempt_guard, &virt_range).unwrap();
|
let mut cursor = page_table.cursor_mut(&preempt_guard, &virt_range).unwrap();
|
||||||
unsafe {
|
unsafe {
|
||||||
cursor.map((PAGE_SIZE, 1, page_property)).unwrap();
|
cursor.map((PAGE_SIZE, 1, page_property));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -717,35 +700,6 @@ mod unmap {
|
||||||
|
|
||||||
mod mapping {
|
mod mapping {
|
||||||
use super::{test_utils::*, *};
|
use super::{test_utils::*, *};
|
||||||
use crate::mm::vm_space::UserPtConfig;
|
|
||||||
|
|
||||||
#[ktest]
|
|
||||||
fn remap_yields_original() {
|
|
||||||
let pt = PageTable::<UserPtConfig>::empty();
|
|
||||||
let preempt_guard = disable_preempt();
|
|
||||||
|
|
||||||
let virt_range = PAGE_SIZE..(PAGE_SIZE * 2);
|
|
||||||
let page_property = PageProperty::new_user(PageFlags::RW, CachePolicy::Writeback);
|
|
||||||
|
|
||||||
let frame = FrameAllocOptions::new().alloc_frame().unwrap();
|
|
||||||
unsafe {
|
|
||||||
pt.cursor_mut(&preempt_guard, &virt_range)
|
|
||||||
.unwrap()
|
|
||||||
.map(VmItem::new_tracked(frame.into(), page_property))
|
|
||||||
.unwrap()
|
|
||||||
}
|
|
||||||
|
|
||||||
let frame2 = FrameAllocOptions::new().alloc_frame().unwrap();
|
|
||||||
let Err(frag) = (unsafe {
|
|
||||||
pt.cursor_mut(&preempt_guard, &virt_range)
|
|
||||||
.unwrap()
|
|
||||||
.map(VmItem::new_tracked(frame2.into(), page_property))
|
|
||||||
}) else {
|
|
||||||
panic!("Expected to get error on remapping, got `Ok`");
|
|
||||||
};
|
|
||||||
|
|
||||||
assert_eq!(frag.va_range(), virt_range);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[ktest]
|
#[ktest]
|
||||||
fn mixed_granularity_map_unmap() {
|
fn mixed_granularity_map_unmap() {
|
||||||
|
|
@ -1013,8 +967,7 @@ mod protection_and_query {
|
||||||
page_table
|
page_table
|
||||||
.cursor_mut(&preempt_guard, &sub_range)
|
.cursor_mut(&preempt_guard, &sub_range)
|
||||||
.unwrap()
|
.unwrap()
|
||||||
.map((frame_range.start, 1, prop))
|
.map((frame_range.start, 1, prop));
|
||||||
.unwrap();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Attempts to protect the larger range. `protect_next` should traverse.
|
// Attempts to protect the larger range. `protect_next` should traverse.
|
||||||
|
|
|
||||||
|
|
@ -581,6 +581,7 @@ mod vmspace {
|
||||||
|
|
||||||
/// Maps a page twice and unmaps twice using `CursorMut`.
|
/// Maps a page twice and unmaps twice using `CursorMut`.
|
||||||
#[ktest]
|
#[ktest]
|
||||||
|
#[should_panic = "Mapping over an already mapped page at 0x1000"]
|
||||||
fn vmspace_map_twice() {
|
fn vmspace_map_twice() {
|
||||||
let vmspace = VmSpace::default();
|
let vmspace = VmSpace::default();
|
||||||
let range = 0x1000..0x2000;
|
let range = 0x1000..0x2000;
|
||||||
|
|
@ -608,28 +609,6 @@ mod vmspace {
|
||||||
.expect("Failed to create mutable cursor");
|
.expect("Failed to create mutable cursor");
|
||||||
cursor_mut.map(frame.clone(), prop);
|
cursor_mut.map(frame.clone(), prop);
|
||||||
}
|
}
|
||||||
|
|
||||||
{
|
|
||||||
let mut cursor = vmspace
|
|
||||||
.cursor(&preempt_guard, &range)
|
|
||||||
.expect("Failed to create cursor");
|
|
||||||
assert_matches_mapped!(cursor, range.clone(), frame, prop);
|
|
||||||
}
|
|
||||||
|
|
||||||
{
|
|
||||||
let mut cursor_mut = vmspace
|
|
||||||
.cursor_mut(&preempt_guard, &range)
|
|
||||||
.expect("Failed to create mutable cursor");
|
|
||||||
cursor_mut.unmap(range.start);
|
|
||||||
}
|
|
||||||
|
|
||||||
let mut cursor = vmspace
|
|
||||||
.cursor(&preempt_guard, &range)
|
|
||||||
.expect("Failed to create cursor");
|
|
||||||
assert!(matches!(
|
|
||||||
cursor.query().unwrap(),
|
|
||||||
(r, None) if r == range
|
|
||||||
));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Unmaps twice using `CursorMut`.
|
/// Unmaps twice using `CursorMut`.
|
||||||
|
|
|
||||||
|
|
@ -355,16 +355,15 @@ impl<'a> CursorMut<'a> {
|
||||||
/// Maps a frame into the current slot.
|
/// Maps a frame into the current slot.
|
||||||
///
|
///
|
||||||
/// This method will bring the cursor to the next slot after the modification.
|
/// This method will bring the cursor to the next slot after the modification.
|
||||||
|
///
|
||||||
|
/// # Panics
|
||||||
|
///
|
||||||
|
/// Panics if the current virtual address is already mapped.
|
||||||
pub fn map(&mut self, frame: UFrame, prop: PageProperty) {
|
pub fn map(&mut self, frame: UFrame, prop: PageProperty) {
|
||||||
let start_va = self.virt_addr();
|
|
||||||
let item = VmItem::new_tracked(frame, prop);
|
let item = VmItem::new_tracked(frame, prop);
|
||||||
|
|
||||||
// SAFETY: It is safe to map untyped memory into the userspace.
|
// SAFETY: It is safe to map untyped memory into the userspace.
|
||||||
let Err(frag) = (unsafe { self.pt_cursor.map(item) }) else {
|
unsafe { self.pt_cursor.map(item) };
|
||||||
return; // No mapping exists at the current address.
|
|
||||||
};
|
|
||||||
|
|
||||||
self.handle_remapped_frag(frag, start_va);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Maps a range of [`IoMem`] into the current slot.
|
/// Maps a range of [`IoMem`] into the current slot.
|
||||||
|
|
@ -383,7 +382,9 @@ impl<'a> CursorMut<'a> {
|
||||||
///
|
///
|
||||||
/// # Panics
|
/// # Panics
|
||||||
///
|
///
|
||||||
/// Panics if `len` or `offset` is not aligned to the page size.
|
/// Panics if
|
||||||
|
/// - `len` or `offset` is not aligned to the page size;
|
||||||
|
/// - the current virtual address is already mapped.
|
||||||
pub fn map_iomem(&mut self, io_mem: IoMem, prop: PageProperty, len: usize, offset: usize) {
|
pub fn map_iomem(&mut self, io_mem: IoMem, prop: PageProperty, len: usize, offset: usize) {
|
||||||
assert_eq!(len % PAGE_SIZE, 0);
|
assert_eq!(len % PAGE_SIZE, 0);
|
||||||
assert_eq!(offset % PAGE_SIZE, 0);
|
assert_eq!(offset % PAGE_SIZE, 0);
|
||||||
|
|
@ -400,21 +401,11 @@ impl<'a> CursorMut<'a> {
|
||||||
};
|
};
|
||||||
|
|
||||||
for current_paddr in (paddr_begin..paddr_end).step_by(PAGE_SIZE) {
|
for current_paddr in (paddr_begin..paddr_end).step_by(PAGE_SIZE) {
|
||||||
// Save the current virtual address before mapping, since map() will advance the cursor
|
|
||||||
let current_va = self.virt_addr();
|
|
||||||
|
|
||||||
// SAFETY: It is safe to map I/O memory into the userspace.
|
// SAFETY: It is safe to map I/O memory into the userspace.
|
||||||
let map_result = unsafe {
|
unsafe {
|
||||||
self.pt_cursor
|
self.pt_cursor
|
||||||
.map(VmItem::new_untracked_io(current_paddr, prop))
|
.map(VmItem::new_untracked_io(current_paddr, prop))
|
||||||
};
|
};
|
||||||
|
|
||||||
let Err(frag) = map_result else {
|
|
||||||
// No mapping exists at the current address.
|
|
||||||
continue;
|
|
||||||
};
|
|
||||||
|
|
||||||
self.handle_remapped_frag(frag, current_va);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// If the `iomems` list in `VmSpace` does not contain the current I/O
|
// If the `iomems` list in `VmSpace` does not contain the current I/O
|
||||||
|
|
@ -443,47 +434,6 @@ impl<'a> CursorMut<'a> {
|
||||||
self.vmspace.find_iomem_by_paddr(paddr)
|
self.vmspace.find_iomem_by_paddr(paddr)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Handles a page table fragment that was remapped.
|
|
||||||
///
|
|
||||||
/// This method handles the TLB flushing and other cleanup when a mapping
|
|
||||||
/// operation results in a fragment being replaced.
|
|
||||||
fn handle_remapped_frag(&mut self, frag: PageTableFrag<UserPtConfig>, start_va: Vaddr) {
|
|
||||||
match frag {
|
|
||||||
PageTableFrag::Mapped { va, item } => {
|
|
||||||
debug_assert_eq!(va, start_va);
|
|
||||||
// SAFETY: If the item is not a scalar (e.g., a frame
|
|
||||||
// pointer), we will drop it after the RCU grace period
|
|
||||||
// (see `issue_tlb_flush_with`).
|
|
||||||
let (item, panic_guard) = unsafe { RcuDrop::into_inner(item) };
|
|
||||||
|
|
||||||
match item.mapped_item {
|
|
||||||
MappedItem::TrackedFrame(old_frame) => {
|
|
||||||
let rcu_frame = RcuDrop::new(old_frame);
|
|
||||||
panic_guard.forget();
|
|
||||||
let rcu_frame = Frame::rcu_from_unsized(rcu_frame);
|
|
||||||
self.flusher
|
|
||||||
.issue_tlb_flush_with(TlbFlushOp::for_single(start_va), rcu_frame);
|
|
||||||
}
|
|
||||||
MappedItem::UntrackedIoMem { .. } => {
|
|
||||||
panic_guard.forget();
|
|
||||||
// Flush the TLB entry for the current address, but in
|
|
||||||
// the current design, we cannot drop the corresponding
|
|
||||||
// `IoMem`. This is because we manage the range of I/O
|
|
||||||
// as a whole, but the frames handled here might be one
|
|
||||||
// segment of it.
|
|
||||||
self.flusher
|
|
||||||
.issue_tlb_flush(TlbFlushOp::for_single(start_va));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
self.flusher.dispatch_tlb_flush();
|
|
||||||
}
|
|
||||||
PageTableFrag::StrayPageTable { .. } => {
|
|
||||||
panic!("`UFrame` is base page sized but re-mapping out a child PT");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Clears the mapping starting from the current slot,
|
/// Clears the mapping starting from the current slot,
|
||||||
/// and returns the number of unmapped pages.
|
/// and returns the number of unmapped pages.
|
||||||
///
|
///
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue