Revise `PageTableEntryTrait`

This commit is contained in:
Zhang Junyang 2025-11-18 11:27:20 +08:00 committed by Ruihan Li
parent f62ae40188
commit c77c2686d8
15 changed files with 805 additions and 658 deletions

View File

@ -1,6 +1,5 @@
// SPDX-License-Identifier: MPL-2.0
use alloc::fmt;
use core::{arch::asm, intrinsics::AtomicOrdering::Relaxed, ops::Range};
use crate::{
@ -8,8 +7,10 @@ use crate::{
mm::{
PAGE_SIZE, Paddr, PagingConstsTrait, PagingLevel, PodOnce, Vaddr,
dma::DmaDirection,
page_prop::{CachePolicy, PageFlags, PageProperty, PrivilegedPageFlags as PrivFlags},
page_table::PageTableEntryTrait,
page_prop::{
CachePolicy, PageFlags, PageProperty, PageTableFlags, PrivilegedPageFlags as PrivFlags,
},
page_table::{PteScalar, PteTrait},
},
};
@ -30,7 +31,7 @@ bitflags::bitflags! {
#[derive(Pod)]
#[repr(C)]
/// Possible flags for a page table entry.
pub(crate) struct PageTableFlags: usize {
pub(crate) struct PteFlags: usize {
/// Specifies whether the mapped frame is valid.
const VALID = 1 << 0;
/// Whether the memory area represented by this entry is modified.
@ -119,10 +120,6 @@ pub(crate) unsafe fn sync_dma_range<D: DmaDirection>(_range: Range<Vaddr>) {
unreachable!("`can_sync_dma()` never returns `true`");
}
#[derive(Clone, Copy, Pod, Default)]
#[repr(C)]
pub(crate) struct PageTableEntry(usize);
/// Activates the given root-level page table.
///
/// "pgdl" or "pgdh" register doesn't have a field that encodes the cache policy,
@ -148,64 +145,47 @@ pub(crate) fn current_page_table_paddr() -> Paddr {
pgdl
}
impl PageTableEntry {
const PHYS_ADDR_MASK: usize = 0x0000_FFFF_FFFF_F000;
#[derive(Debug, Clone, Copy, Pod, Default)]
#[repr(C)]
pub(crate) struct PageTableEntry(usize);
fn is_user(&self) -> bool {
self.0 & PageTableFlags::PLVL.bits() != 0 && self.0 & PageTableFlags::PLVH.bits() != 0
}
fn is_huge(&self) -> bool {
if self.0 & PageTableFlags::IS_BASIC.bits() != 0 {
false
} else {
self.0 & PageTableFlags::GLOBAL_OR_HUGE.bits() != 0
}
}
fn is_global(&self) -> bool {
if self.0 & PageTableFlags::IS_BASIC.bits() != 0 {
self.0 & PageTableFlags::GLOBAL_OR_HUGE.bits() != 0
} else {
self.0 & PageTableFlags::GLOBAL_IN_HUGE.bits() != 0
}
}
}
/// Parse a bit-flag bits `val` in the representation of `from` to `to` in bits.
/// Parses a bit-flag bits `val` in the representation of `from` to `to` in bits.
macro_rules! parse_flags {
($val:expr, $from:expr, $to:expr) => {
(($val as usize & $from.bits() as usize) >> $from.bits().ilog2() << $to.bits().ilog2())
};
}
impl PodOnce for PageTableEntry {}
impl PageTableEntry {
const PHYS_ADDR_MASK: usize = 0x0000_FFFF_FFFF_F000;
impl PageTableEntryTrait for PageTableEntry {
fn is_present(&self) -> bool {
self.0 & PageTableFlags::VALID.bits() != 0
fn is_user(&self) -> bool {
self.0 & PteFlags::PLVL.bits() != 0 && self.0 & PteFlags::PLVH.bits() != 0
}
fn new_page(paddr: Paddr, level: PagingLevel, prop: PageProperty) -> Self {
let flags = if level == 1 {
PageTableFlags::IS_BASIC.bits()
fn is_huge(&self) -> bool {
if self.0 & PteFlags::IS_BASIC.bits() != 0 {
false
} else {
PageTableFlags::GLOBAL_OR_HUGE.bits()
};
let mut pte = Self(paddr & Self::PHYS_ADDR_MASK);
pte.set_prop(prop);
let pte = pte.0 | flags;
Self(pte)
self.0 & PteFlags::GLOBAL_OR_HUGE.bits() != 0
}
}
fn new_pt(paddr: Paddr) -> Self {
Self(paddr & Self::PHYS_ADDR_MASK | PageTableFlags::VALID.bits())
fn is_global(&self) -> bool {
if self.0 & PteFlags::IS_BASIC.bits() != 0 {
self.0 & PteFlags::GLOBAL_OR_HUGE.bits() != 0
} else {
self.0 & PteFlags::GLOBAL_IN_HUGE.bits() != 0
}
}
fn is_last(&self, level: PagingLevel) -> bool {
level == 1 || self.is_huge()
}
fn paddr(&self) -> Paddr {
if self.is_huge() {
let paddr =
(self.0 & Self::PHYS_ADDR_MASK & !PageTableFlags::GLOBAL_IN_HUGE.bits()) >> 12;
let paddr = (self.0 & Self::PHYS_ADDR_MASK & !PteFlags::GLOBAL_IN_HUGE.bits()) >> 12;
paddr << 12
} else {
let ppn = (self.0 & Self::PHYS_ADDR_MASK) >> 12;
@ -214,14 +194,15 @@ impl PageTableEntryTrait for PageTableEntry {
}
fn prop(&self) -> PageProperty {
let flags = (parse_flags!(!(self.0), PageTableFlags::NOT_READABLE, PageFlags::R))
| (parse_flags!(self.0, PageTableFlags::WRITABLE, PageFlags::W))
| (parse_flags!(!(self.0), PageTableFlags::NOT_EXECUTABLE, PageFlags::X))
let flags = parse_flags!(!(self.0), PteFlags::NOT_READABLE, PageFlags::R)
| parse_flags!(self.0, PteFlags::WRITABLE, PageFlags::W)
| parse_flags!(!(self.0), PteFlags::NOT_EXECUTABLE, PageFlags::X)
// TODO: How to get the accessed bit in loongarch?
| (parse_flags!(self.0, PageTableFlags::PRESENT, PageFlags::ACCESSED))
| (parse_flags!(self.0, PageTableFlags::DIRTY, PageFlags::DIRTY))
| (parse_flags!(self.0, PageTableFlags::RSV2, PageFlags::AVAIL2));
let mut priv_flags = parse_flags!(self.0, PageTableFlags::RSV1, PrivFlags::AVAIL1);
| parse_flags!(self.0, PteFlags::PRESENT, PageFlags::ACCESSED)
| parse_flags!(self.0, PteFlags::DIRTY, PageFlags::DIRTY)
| parse_flags!(self.0, PteFlags::RSV2, PageFlags::AVAIL2);
let mut priv_flags = parse_flags!(self.0, PteFlags::RSV1, PrivFlags::AVAIL1);
if self.is_user() {
priv_flags |= PrivFlags::USER.bits() as usize;
}
@ -229,9 +210,9 @@ impl PageTableEntryTrait for PageTableEntry {
priv_flags |= PrivFlags::GLOBAL.bits() as usize;
}
let cache = if self.0 & PageTableFlags::MATL.bits() != 0 {
let cache = if self.0 & PteFlags::MATL.bits() != 0 {
CachePolicy::Writeback
} else if self.0 & PageTableFlags::MATH.bits() != 0 {
} else if self.0 & PteFlags::MATH.bits() != 0 {
CachePolicy::WriteCombining
} else {
CachePolicy::Writeback
@ -244,73 +225,96 @@ impl PageTableEntryTrait for PageTableEntry {
}
}
fn set_prop(&mut self, prop: PageProperty) {
let mut flags = PageTableFlags::VALID.bits()
fn pt_flags(&self) -> PageTableFlags {
let bits = PageTableFlags::empty().bits() as usize
| parse_flags!(self.0, PteFlags::RSV1, PageTableFlags::AVAIL1)
| parse_flags!(self.0, PteFlags::RSV2, PageTableFlags::AVAIL2);
PageTableFlags::from_bits(bits as u8).unwrap()
}
fn new_page(paddr: Paddr, level: PagingLevel, prop: PageProperty) -> Self {
let mut flags = PteFlags::VALID.bits()
// FIXME: To avoid the PageModifyFault exception,
// we set the DIRTY bit to 1 all the time.
| PageTableFlags::DIRTY.bits()
| PteFlags::DIRTY.bits()
| parse_flags!(
!prop.flags.bits(),
PageFlags::R,
PageTableFlags::NOT_READABLE
PteFlags::NOT_READABLE
)
| parse_flags!(prop.flags.bits(), PageFlags::W, PageTableFlags::WRITABLE)
| parse_flags!(prop.flags.bits(), PageFlags::W, PteFlags::WRITABLE)
| parse_flags!(
!prop.flags.bits(),
PageFlags::X,
PageTableFlags::NOT_EXECUTABLE
PteFlags::NOT_EXECUTABLE
)
| parse_flags!(prop.flags.bits(), PageFlags::DIRTY, PageTableFlags::DIRTY)
| parse_flags!(prop.flags.bits(), PageFlags::DIRTY, PteFlags::DIRTY)
// TODO: How to get the accessed bit in loongarch?
| parse_flags!(prop.flags.bits(), PageFlags::ACCESSED, PageTableFlags::PRESENT)
| parse_flags!(prop.flags.bits(), PageFlags::AVAIL2, PageTableFlags::RSV2);
flags |= parse_flags!(
prop.priv_flags.bits(),
PrivFlags::AVAIL1,
PageTableFlags::RSV1
);
| parse_flags!(prop.flags.bits(), PageFlags::ACCESSED, PteFlags::PRESENT)
| parse_flags!(prop.flags.bits(), PageFlags::AVAIL2, PteFlags::RSV2);
flags |= parse_flags!(prop.priv_flags.bits(), PrivFlags::AVAIL1, PteFlags::RSV1);
if prop.priv_flags.contains(PrivFlags::USER) {
flags |= PageTableFlags::PLVL.bits();
flags |= PageTableFlags::PLVH.bits();
flags |= PteFlags::PLVL.bits();
flags |= PteFlags::PLVH.bits();
}
if prop.priv_flags.contains(PrivFlags::GLOBAL) {
if self.is_huge() {
flags |= PageTableFlags::GLOBAL_IN_HUGE.bits();
if level != 1 {
flags |= PteFlags::GLOBAL_IN_HUGE.bits();
} else {
flags |= PageTableFlags::GLOBAL_OR_HUGE.bits();
flags |= PteFlags::GLOBAL_OR_HUGE.bits();
}
}
match prop.cache {
CachePolicy::Writeback => {
flags |= PageTableFlags::MATL.bits();
flags |= PteFlags::MATL.bits();
}
CachePolicy::Uncacheable => (),
CachePolicy::WriteCombining => {
flags |= PageTableFlags::MATH.bits();
flags |= PteFlags::MATH.bits();
}
_ => panic!("unsupported cache policy"),
}
self.0 = (self.0 & Self::PHYS_ADDR_MASK) | flags;
let level_bits = if level != 1 {
PteFlags::GLOBAL_OR_HUGE.bits()
} else {
PteFlags::IS_BASIC.bits()
};
Self((paddr & Self::PHYS_ADDR_MASK) | flags | level_bits)
}
fn is_last(&self, level: PagingLevel) -> bool {
level == 1 || self.is_huge()
fn new_pt(paddr: Paddr, flags: PageTableFlags) -> Self {
let flags = PteFlags::VALID.bits()
| parse_flags!(flags.bits(), PageTableFlags::AVAIL1, PteFlags::RSV1)
| parse_flags!(flags.bits(), PageTableFlags::AVAIL2, PteFlags::RSV2);
Self(paddr & Self::PHYS_ADDR_MASK | flags)
}
}
impl fmt::Debug for PageTableEntry {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let mut f = f.debug_struct("PageTableEntry");
f.field("raw", &format_args!("{:#x}", self.0))
.field("paddr", &format_args!("{:#x}", self.paddr()))
.field("present", &self.is_present())
.field(
"flags",
&PageTableFlags::from_bits_truncate(self.0 & !Self::PHYS_ADDR_MASK),
)
.field("prop", &self.prop())
.finish()
impl PodOnce for PageTableEntry {}
/// SAFETY: The implementation is safe because:
/// - `from_usize` and `into_usize` are not overridden;
/// - `from_repr` and `repr` are correctly implemented;
/// - a zeroed PTE represents an absent entry.
unsafe impl PteTrait for PageTableEntry {
fn from_repr(repr: &PteScalar, level: PagingLevel) -> Self {
match repr {
PteScalar::Absent => PageTableEntry(0),
PteScalar::PageTable(paddr, flags) => Self::new_pt(*paddr, *flags),
PteScalar::Mapped(paddr, prop) => Self::new_page(*paddr, level, *prop),
}
}
fn to_repr(&self, level: PagingLevel) -> PteScalar {
if self.0 & PteFlags::VALID.bits() == 0 {
return PteScalar::Absent;
}
if self.is_last(level) {
PteScalar::Mapped(self.paddr(), self.prop())
} else {
PteScalar::PageTable(self.paddr(), self.pt_flags())
}
}
}

View File

@ -1,6 +1,5 @@
// SPDX-License-Identifier: MPL-2.0
use alloc::fmt;
use core::ops::Range;
use spin::Once;
@ -17,8 +16,10 @@ use crate::{
mm::{
PAGE_SIZE, Paddr, PagingConstsTrait, PagingLevel, PodOnce, Vaddr,
dma::DmaDirection,
page_prop::{CachePolicy, PageFlags, PageProperty, PrivilegedPageFlags as PrivFlags},
page_table::PageTableEntryTrait,
page_prop::{
CachePolicy, PageFlags, PageProperty, PageTableFlags, PrivilegedPageFlags as PrivFlags,
},
page_table::{PteScalar, PteTrait},
},
};
@ -51,7 +52,7 @@ bitflags::bitflags! {
#[derive(Pod)]
#[repr(C)]
/// Possible flags for a page table entry.
pub(crate) struct PageTableFlags: usize {
pub(crate) struct PteFlags: usize {
/// Specifies whether the mapped frame or page table is valid.
const VALID = 1 << 0;
/// Controls whether reads to the mapped frames are allowed.
@ -151,10 +152,6 @@ pub(crate) unsafe fn sync_dma_range<D: DmaDirection>(range: Range<Vaddr>) {
unsafe { core::arch::asm!("fence rw, rw", options(nostack)) };
}
#[derive(Clone, Copy, Pod, Default)]
#[repr(C)]
pub(crate) struct PageTableEntry(usize);
/// Activates the given root-level page table.
///
/// "satp" register doesn't have a field that encodes the cache policy,
@ -182,59 +179,47 @@ pub(crate) fn current_page_table_paddr() -> Paddr {
riscv::register::satp::read().ppn() << 12
}
impl PageTableEntry {
const PHYS_ADDR_MASK: usize = 0x003F_FFFF_FFFF_FC00;
#[derive(Debug, Clone, Copy, Pod, Default)]
#[repr(C)]
pub(crate) struct PageTableEntry(usize);
fn new_paddr(paddr: Paddr) -> Self {
let ppn = paddr >> 12;
Self(ppn << 10)
}
}
/// Parse a bit-flag bits `val` in the representation of `from` to `to` in bits.
/// Parses a bit-flag bits `val` in the representation of `from` to `to` in bits.
macro_rules! parse_flags {
($val:expr, $from:expr, $to:expr) => {
(($val as usize & $from.bits() as usize) >> $from.bits().ilog2() << $to.bits().ilog2())
};
}
impl PodOnce for PageTableEntry {}
impl PageTableEntry {
const PHYS_ADDR_MASK: usize = 0x003f_ffff_ffff_fc00;
impl PageTableEntryTrait for PageTableEntry {
fn is_present(&self) -> bool {
self.0 & PageTableFlags::VALID.bits() != 0
}
fn new_page(paddr: Paddr, _level: PagingLevel, prop: PageProperty) -> Self {
let mut pte = Self::new_paddr(paddr);
pte.set_prop(prop);
pte
}
fn new_pt(paddr: Paddr) -> Self {
// In RISC-V, non-leaf PTE should have RWX = 000,
// and D, A, and U are reserved for future standard use.
let pte = Self::new_paddr(paddr);
PageTableEntry(pte.0 | PageTableFlags::VALID.bits())
fn new_without_flags(paddr: Paddr) -> Self {
assert_eq!(paddr & !Self::PHYS_ADDR_MASK, 0);
Self(paddr >> 12 << 10)
}
fn paddr(&self) -> Paddr {
let ppn = (self.0 & Self::PHYS_ADDR_MASK) >> 10;
ppn << 12
(self.0 & Self::PHYS_ADDR_MASK) >> 10 << 12
}
fn is_last(&self, level: PagingLevel) -> bool {
let rwx = PteFlags::READABLE | PteFlags::WRITABLE | PteFlags::EXECUTABLE;
level == 1 || (self.0 & rwx.bits()) != 0
}
fn prop(&self) -> PageProperty {
let flags = (parse_flags!(self.0, PageTableFlags::READABLE, PageFlags::R))
| (parse_flags!(self.0, PageTableFlags::WRITABLE, PageFlags::W))
| (parse_flags!(self.0, PageTableFlags::EXECUTABLE, PageFlags::X))
| (parse_flags!(self.0, PageTableFlags::ACCESSED, PageFlags::ACCESSED))
| (parse_flags!(self.0, PageTableFlags::DIRTY, PageFlags::DIRTY))
| (parse_flags!(self.0, PageTableFlags::RSV2, PageFlags::AVAIL2));
let priv_flags = (parse_flags!(self.0, PageTableFlags::USER, PrivFlags::USER))
| (parse_flags!(self.0, PageTableFlags::GLOBAL, PrivFlags::GLOBAL))
| (parse_flags!(self.0, PageTableFlags::RSV1, PrivFlags::AVAIL1));
let flags = parse_flags!(self.0, PteFlags::READABLE, PageFlags::R)
| parse_flags!(self.0, PteFlags::WRITABLE, PageFlags::W)
| parse_flags!(self.0, PteFlags::EXECUTABLE, PageFlags::X)
| parse_flags!(self.0, PteFlags::ACCESSED, PageFlags::ACCESSED)
| parse_flags!(self.0, PteFlags::DIRTY, PageFlags::DIRTY)
| parse_flags!(self.0, PteFlags::RSV2, PageFlags::AVAIL2);
let cache = if self.0 & PageTableFlags::PBMT_IO.bits() != 0 {
let priv_flags = parse_flags!(self.0, PteFlags::USER, PrivFlags::USER)
| parse_flags!(self.0, PteFlags::GLOBAL, PrivFlags::GLOBAL)
| parse_flags!(self.0, PteFlags::RSV1, PrivFlags::AVAIL1);
let cache = if self.0 & PteFlags::PBMT_IO.bits() != 0 {
CachePolicy::Uncacheable
} else {
CachePolicy::Writeback
@ -247,33 +232,24 @@ impl PageTableEntryTrait for PageTableEntry {
}
}
fn set_prop(&mut self, prop: PageProperty) {
let mut flags = PageTableFlags::VALID.bits()
| parse_flags!(prop.flags.bits(), PageFlags::R, PageTableFlags::READABLE)
| parse_flags!(prop.flags.bits(), PageFlags::W, PageTableFlags::WRITABLE)
| parse_flags!(prop.flags.bits(), PageFlags::X, PageTableFlags::EXECUTABLE)
| parse_flags!(
prop.flags.bits(),
PageFlags::ACCESSED,
PageTableFlags::ACCESSED
)
| parse_flags!(prop.flags.bits(), PageFlags::DIRTY, PageTableFlags::DIRTY)
| parse_flags!(
prop.priv_flags.bits(),
PrivFlags::USER,
PageTableFlags::USER
)
| parse_flags!(
prop.priv_flags.bits(),
PrivFlags::GLOBAL,
PageTableFlags::GLOBAL
)
| parse_flags!(
prop.priv_flags.bits(),
PrivFlags::AVAIL1,
PageTableFlags::RSV1
)
| parse_flags!(prop.flags.bits(), PageFlags::AVAIL2, PageTableFlags::RSV2);
fn pt_flags(&self) -> PageTableFlags {
let bits = PageTableFlags::empty().bits() as usize
| parse_flags!(self.0, PteFlags::RSV1, PageTableFlags::AVAIL1)
| parse_flags!(self.0, PteFlags::RSV2, PageTableFlags::AVAIL2);
PageTableFlags::from_bits(bits as u8).unwrap()
}
fn new_page(paddr: Paddr, _level: PagingLevel, prop: PageProperty) -> Self {
let mut flags = PteFlags::VALID.bits()
| parse_flags!(prop.flags.bits(), PageFlags::R, PteFlags::READABLE)
| parse_flags!(prop.flags.bits(), PageFlags::W, PteFlags::WRITABLE)
| parse_flags!(prop.flags.bits(), PageFlags::X, PteFlags::EXECUTABLE)
| parse_flags!(prop.flags.bits(), PageFlags::ACCESSED, PteFlags::ACCESSED)
| parse_flags!(prop.flags.bits(), PageFlags::DIRTY, PteFlags::DIRTY)
| parse_flags!(prop.priv_flags.bits(), PrivFlags::USER, PteFlags::USER)
| parse_flags!(prop.priv_flags.bits(), PrivFlags::GLOBAL, PteFlags::GLOBAL)
| parse_flags!(prop.priv_flags.bits(), PrivFlags::AVAIL1, PteFlags::RSV1)
| parse_flags!(prop.flags.bits(), PageFlags::AVAIL2, PteFlags::RSV2);
match prop.cache {
CachePolicy::Writeback => (),
@ -282,32 +258,52 @@ impl PageTableEntryTrait for PageTableEntry {
// memory. Normal memory can also be `Noncacheable`, where the
// PBMT should be set to `PBMT_NC`.
if has_extensions(IsaExtensions::SVPBMT) {
flags |= PageTableFlags::PBMT_IO.bits()
flags |= PteFlags::PBMT_IO.bits()
}
}
_ => panic!("unsupported cache policy"),
}
self.0 = (self.0 & Self::PHYS_ADDR_MASK) | flags;
let res = Self::new_without_flags(paddr);
Self(res.0 | flags)
}
fn is_last(&self, level: PagingLevel) -> bool {
let rwx = PageTableFlags::READABLE | PageTableFlags::WRITABLE | PageTableFlags::EXECUTABLE;
level == 1 || (self.0 & rwx.bits()) != 0
fn new_pt(paddr: Paddr, flags: PageTableFlags) -> Self {
// In RISC-V, non-leaf PTE should have RWX = 000,
// and D, A, and U are reserved for future standard use.
let flags = PteFlags::VALID.bits()
| parse_flags!(flags.bits(), PageTableFlags::AVAIL1, PteFlags::RSV1)
| parse_flags!(flags.bits(), PageTableFlags::AVAIL2, PteFlags::RSV2);
let res = Self::new_without_flags(paddr);
Self(res.0 | flags)
}
}
impl fmt::Debug for PageTableEntry {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let mut f = f.debug_struct("PageTableEntry");
f.field("raw", &format_args!("{:#x}", self.0))
.field("paddr", &format_args!("{:#x}", self.paddr()))
.field("present", &self.is_present())
.field(
"flags",
&PageTableFlags::from_bits_truncate(self.0 & !Self::PHYS_ADDR_MASK),
)
.field("prop", &self.prop())
.finish()
impl PodOnce for PageTableEntry {}
/// SAFETY: The implementation is safe because:
/// - `from_usize` and `into_usize` are not overridden;
/// - `from_repr` and `repr` are correctly implemented;
/// - a zeroed PTE represents an absent entry.
unsafe impl PteTrait for PageTableEntry {
fn from_repr(repr: &PteScalar, level: PagingLevel) -> Self {
match repr {
PteScalar::Absent => PageTableEntry(0),
PteScalar::PageTable(paddr, flags) => Self::new_pt(*paddr, *flags),
PteScalar::Mapped(paddr, prop) => Self::new_page(*paddr, level, *prop),
}
}
fn to_repr(&self, level: PagingLevel) -> PteScalar {
if self.0 & PteFlags::VALID.bits() == 0 {
return PteScalar::Absent;
}
if self.is_last(level) {
PteScalar::Mapped(self.paddr(), self.prop())
} else {
PteScalar::PageTable(self.paddr(), self.pt_flags())
}
}
}

View File

@ -1,15 +1,13 @@
// SPDX-License-Identifier: MPL-2.0
#![expect(unused_variables)]
use core::ops::Range;
use crate::{
Pod,
mm::{
Paddr, PageProperty, PagingConstsTrait, PagingLevel, PodOnce,
page_prop::{CachePolicy, PageFlags, PrivilegedPageFlags as PrivFlags},
page_table::{PageTableConfig, PageTableEntryTrait},
page_prop::{CachePolicy, PageFlags, PageTableFlags, PrivilegedPageFlags as PrivFlags},
page_table::{PageTableConfig, PteScalar, PteTrait},
},
};
@ -54,19 +52,20 @@ impl PagingConstsTrait for PagingConsts {
bitflags::bitflags! {
#[derive(Pod)]
#[repr(C)]
pub struct PageTableFlags : u64{
pub struct PteFlags: usize {
/// Whether accesses to this page must snoop processor caches.
const SNOOP = 1 << 11;
const DIRTY = 1 << 9;
/// Bits ignored by hardware.
const IGN2 = 1 << 9;
const IGN1 = 1 << 8;
const ACCESSED = 1 << 8;
/// Whether this page table entry is the last entry.
const LAST_PAGE = 1 << 7;
/// Ignore PAT, 1 if the scalable-mode PASID-table entry is not
/// used for effective memory-type determination.
const IGNORE_PAT = 1 << 6;
const IGNORE_PAT = 1 << 6;
/// Extended Memory Type, ignored by hardware when the
/// Extended Memory Type Enable (EMTE) field is Clear.
@ -84,81 +83,102 @@ bitflags::bitflags! {
#[derive(Debug, Clone, Copy, Pod, Default)]
#[repr(C)]
pub struct PageTableEntry(u64);
pub struct PageTableEntry(usize);
impl PageTableEntry {
const PHYS_MASK: u64 = 0xFFFF_FFFF_F000;
const PROP_MASK: u64 = !Self::PHYS_MASK & !PageTableFlags::LAST_PAGE.bits();
/// Parses a bit-flag bits `val` in the representation of `from` to `to` in bits.
macro_rules! parse_flags {
($val:expr, $from:expr, $to:expr) => {
(($val as usize & $from.bits() as usize) >> $from.bits().ilog2() << $to.bits().ilog2())
};
}
impl PodOnce for PageTableEntry {}
impl PageTableEntryTrait for PageTableEntry {
fn new_page(paddr: Paddr, level: PagingLevel, prop: PageProperty) -> Self {
let mut pte = Self(paddr as u64 & Self::PHYS_MASK | PageTableFlags::LAST_PAGE.bits());
pte.set_prop(prop);
pte
}
fn new_pt(paddr: Paddr) -> Self {
Self(
paddr as u64 & Self::PHYS_MASK
| PageTableFlags::READABLE.bits()
| PageTableFlags::WRITABLE.bits(),
)
}
fn paddr(&self) -> Paddr {
(self.0 & Self::PHYS_MASK) as usize
}
impl PageTableEntry {
const PHYS_MASK: usize = 0xffff_ffff_f000;
fn is_present(&self) -> bool {
self.0 & (PageTableFlags::READABLE | PageTableFlags::WRITABLE).bits() != 0
self.0 & (PteFlags::READABLE | PteFlags::LAST_PAGE).bits() != 0
}
fn is_last(&self, level: PagingLevel) -> bool {
level == 1
}
fn prop(&self) -> PageProperty {
let mut flags = PageFlags::empty();
if self.0 & PageTableFlags::READABLE.bits() != 0 {
flags |= PageFlags::R;
}
if self.0 & PageTableFlags::WRITABLE.bits() != 0 {
flags |= PageFlags::W;
}
if self.0 & PageTableFlags::ACCESSED.bits() != 0 {
flags |= PageFlags::ACCESSED;
}
if self.0 & PageTableFlags::DIRTY.bits() != 0 {
flags |= PageFlags::DIRTY;
}
let flags = parse_flags!(self.0, PteFlags::READABLE, PageFlags::R)
| parse_flags!(self.0, PteFlags::WRITABLE, PageFlags::W)
| parse_flags!(self.0, PteFlags::IGN2, PageFlags::AVAIL2);
let priv_flags = parse_flags!(self.0, PteFlags::IGN1, PrivFlags::AVAIL1);
// TODO: The determination cache policy is not rigorous. We should revise it.
let cache = if self.0 & PageTableFlags::SNOOP.bits() != 0 {
let cache = if self.0 & PteFlags::SNOOP.bits() != 0 {
CachePolicy::Writeback
} else {
CachePolicy::Uncacheable
};
PageProperty {
flags,
flags: PageFlags::from_bits(flags as u8).unwrap(),
cache,
priv_flags: PrivFlags::empty(),
priv_flags: PrivFlags::from_bits(priv_flags as u8).unwrap(),
}
}
fn set_prop(&mut self, prop: PageProperty) {
let mut flags = PageTableFlags::empty();
if prop.flags.contains(PageFlags::W) {
flags |= PageTableFlags::WRITABLE;
}
if prop.flags.contains(PageFlags::R) {
flags |= PageTableFlags::READABLE;
}
fn pt_flags(&self) -> PageTableFlags {
let bits = PageTableFlags::empty().bits() as usize
| parse_flags!(self.0, PteFlags::IGN1, PageTableFlags::AVAIL1)
| parse_flags!(self.0, PteFlags::IGN2, PageTableFlags::AVAIL2);
PageTableFlags::from_bits(bits as u8).unwrap()
}
fn new_page(paddr: Paddr, _level: PagingLevel, prop: PageProperty) -> Self {
let mut flags = PteFlags::LAST_PAGE.bits()
| parse_flags!(prop.flags.bits(), PageFlags::R, PteFlags::READABLE)
| parse_flags!(prop.flags.bits(), PageFlags::W, PteFlags::WRITABLE)
| parse_flags!(prop.priv_flags.bits(), PrivFlags::AVAIL1, PteFlags::IGN1)
| parse_flags!(prop.flags.bits(), PageFlags::AVAIL2, PteFlags::IGN2);
if prop.cache != CachePolicy::Uncacheable {
flags |= PageTableFlags::SNOOP;
flags |= PteFlags::SNOOP.bits();
}
self.0 = self.0 & !Self::PROP_MASK | flags.bits();
Self(paddr & Self::PHYS_MASK | flags)
}
fn is_last(&self, level: PagingLevel) -> bool {
level == 1
fn new_pt(paddr: Paddr, flags: PageTableFlags) -> Self {
let flags = PteFlags::READABLE.bits()
| PteFlags::WRITABLE.bits()
| parse_flags!(flags.bits(), PageTableFlags::AVAIL1, PteFlags::IGN1)
| parse_flags!(flags.bits(), PageTableFlags::AVAIL2, PteFlags::IGN2);
Self(paddr & Self::PHYS_MASK | flags)
}
}
impl PodOnce for PageTableEntry {}
/// SAFETY: The implementation is safe because:
/// - `from_usize` and `into_usize` are not overridden;
/// - `from_repr` and `repr` are correctly implemented;
/// - a zeroed PTE represents an absent entry.
unsafe impl PteTrait for PageTableEntry {
fn from_repr(repr: &PteScalar, level: PagingLevel) -> Self {
match repr {
PteScalar::Absent => PageTableEntry(0),
PteScalar::PageTable(paddr, flags) => Self::new_pt(*paddr, *flags),
PteScalar::Mapped(paddr, prop) => Self::new_page(*paddr, level, *prop),
}
}
fn to_repr(&self, level: PagingLevel) -> PteScalar {
if !self.is_present() {
return PteScalar::Absent;
}
let paddr = self.0 & Self::PHYS_MASK;
if self.is_last(level) {
PteScalar::Mapped(paddr, self.prop())
} else {
PteScalar::PageTable(paddr, self.pt_flags())
}
}
}

View File

@ -1,6 +1,5 @@
// SPDX-License-Identifier: MPL-2.0
use alloc::fmt;
use core::ops::Range;
use cfg_if::cfg_if;
@ -19,8 +18,10 @@ use crate::{
mm::{
PAGE_SIZE, Paddr, PagingConstsTrait, PagingLevel, PodOnce, Vaddr,
dma::DmaDirection,
page_prop::{CachePolicy, PageFlags, PageProperty, PrivilegedPageFlags as PrivFlags},
page_table::PageTableEntryTrait,
page_prop::{
CachePolicy, PageFlags, PageProperty, PageTableFlags, PrivilegedPageFlags as PrivFlags,
},
page_table::{PteScalar, PteTrait},
},
};
@ -45,7 +46,7 @@ bitflags::bitflags! {
#[derive(Pod)]
#[repr(C)]
/// Possible flags for a page table entry.
pub(crate) struct PageTableFlags: usize {
pub(crate) struct PteFlags: usize {
/// Specifies whether the mapped frame or page table is loaded in memory.
const PRESENT = 1 << 0;
/// Controls whether writes to the mapped frames are allowed.
@ -135,10 +136,6 @@ pub(crate) unsafe fn sync_dma_range<D: DmaDirection>(_range: Range<Vaddr>) {
// Reference: <https://lwn.net/Articles/855328/>, <https://lwn.net/Articles/2265/>.
}
#[derive(Clone, Copy, Pod, Default)]
#[repr(C)]
pub(crate) struct PageTableEntry(usize);
/// Activates the given root-level page table.
///
/// The cache policy of the root page table node is controlled by `root_pt_cache`.
@ -173,71 +170,66 @@ pub(crate) fn current_page_table_paddr() -> Paddr {
.as_u64() as Paddr
}
impl PageTableEntry {
cfg_if! {
if #[cfg(feature = "cvm_guest")] {
const PHYS_ADDR_MASK: usize = 0x7_FFFF_FFFF_F000;
} else {
const PHYS_ADDR_MASK: usize = 0xF_FFFF_FFFF_F000;
}
}
const PROP_MASK: usize = !Self::PHYS_ADDR_MASK & !PageTableFlags::HUGE.bits();
}
#[derive(Debug, Clone, Copy, Pod)]
#[repr(C)]
pub(crate) struct PageTableEntry(usize);
/// Parse a bit-flag bits `val` in the representation of `from` to `to` in bits.
/// Parses a bit-flag bits `val` in the representation of `from` to `to` in bits.
macro_rules! parse_flags {
($val:expr, $from:expr, $to:expr) => {
($val as usize & $from.bits() as usize) >> $from.bits().ilog2() << $to.bits().ilog2()
(($val as usize & $from.bits() as usize) >> $from.bits().ilog2() << $to.bits().ilog2())
};
}
impl PodOnce for PageTableEntry {}
impl PageTableEntry {
cfg_if! {
if #[cfg(feature = "cvm_guest")] {
const PHYS_ADDR_MASK_LVL1: usize = 0x7_ffff_ffff_f000;
const PHYS_ADDR_MASK_LVL2: usize = 0x7_ffff_ffe0_0000;
const PHYS_ADDR_MASK_LVL3: usize = 0x7_ffff_c000_0000;
} else {
const PHYS_ADDR_MASK_LVL1: usize = 0xf_ffff_ffff_f000;
const PHYS_ADDR_MASK_LVL2: usize = 0xf_ffff_ffe0_0000;
const PHYS_ADDR_MASK_LVL3: usize = 0xf_ffff_c000_0000;
}
}
const CHILD_PT_ADDR_MASK: usize = Self::PHYS_ADDR_MASK_LVL1;
fn pa_mask_at_level(level: PagingLevel) -> usize {
match level {
1 => Self::PHYS_ADDR_MASK_LVL1,
2 => Self::PHYS_ADDR_MASK_LVL2,
3 => Self::PHYS_ADDR_MASK_LVL3,
_ => panic!("invalid level {} for page entry", level),
}
}
impl PageTableEntryTrait for PageTableEntry {
fn is_present(&self) -> bool {
// For PT child, `PRESENT` should be set; for huge page, `HUGE` should
// be set; for the leaf child page, `PAT`, which is the same bit as
// the `HUGE` bit in upper levels, should be set.
self.0 & PageTableFlags::PRESENT.bits() != 0 || self.0 & PageTableFlags::HUGE.bits() != 0
}
fn new_page(paddr: Paddr, _level: PagingLevel, prop: PageProperty) -> Self {
let flags = PageTableFlags::HUGE.bits();
let mut pte = Self(paddr & Self::PHYS_ADDR_MASK | flags);
pte.set_prop(prop);
pte
}
fn new_pt(paddr: Paddr) -> Self {
// In x86 if it's an intermediate PTE, it's better to have the same permissions
// as the most permissive child (to reduce hardware page walk accesses). But we
// don't have a mechanism to keep it generic across architectures, thus just
// setting it to be the most permissive.
let flags = PageTableFlags::PRESENT.bits()
| PageTableFlags::WRITABLE.bits()
| PageTableFlags::USER.bits();
Self(paddr & Self::PHYS_ADDR_MASK | flags)
}
fn paddr(&self) -> Paddr {
self.0 & Self::PHYS_ADDR_MASK
self.0 & PteFlags::PRESENT.bits() != 0 || self.0 & PteFlags::HUGE.bits() != 0
}
fn prop(&self) -> PageProperty {
let flags = (parse_flags!(self.0, PageTableFlags::PRESENT, PageFlags::R))
| (parse_flags!(self.0, PageTableFlags::WRITABLE, PageFlags::W))
| (parse_flags!(!self.0, PageTableFlags::NO_EXECUTE, PageFlags::X))
| (parse_flags!(self.0, PageTableFlags::ACCESSED, PageFlags::ACCESSED))
| (parse_flags!(self.0, PageTableFlags::DIRTY, PageFlags::DIRTY))
| (parse_flags!(self.0, PageTableFlags::HIGH_IGN2, PageFlags::AVAIL2));
let priv_flags = (parse_flags!(self.0, PageTableFlags::USER, PrivFlags::USER))
| (parse_flags!(self.0, PageTableFlags::GLOBAL, PrivFlags::GLOBAL))
| (parse_flags!(self.0, PageTableFlags::HIGH_IGN1, PrivFlags::AVAIL1));
let flags = parse_flags!(self.0, PteFlags::PRESENT, PageFlags::R)
| parse_flags!(self.0, PteFlags::WRITABLE, PageFlags::W)
| parse_flags!(!self.0, PteFlags::NO_EXECUTE, PageFlags::X)
| parse_flags!(self.0, PteFlags::ACCESSED, PageFlags::ACCESSED)
| parse_flags!(self.0, PteFlags::DIRTY, PageFlags::DIRTY)
| parse_flags!(self.0, PteFlags::HIGH_IGN2, PageFlags::AVAIL2);
let priv_flags = parse_flags!(self.0, PteFlags::USER, PrivFlags::USER)
| parse_flags!(self.0, PteFlags::GLOBAL, PrivFlags::GLOBAL)
| parse_flags!(self.0, PteFlags::HIGH_IGN1, PrivFlags::AVAIL1);
#[cfg(feature = "cvm_guest")]
let priv_flags =
priv_flags | (parse_flags!(self.0, PageTableFlags::SHARED, PrivFlags::SHARED));
let priv_flags = priv_flags | parse_flags!(self.0, PteFlags::SHARED, PrivFlags::SHARED);
// Determine cache policy from PCD, PWT bits.
let cache = flags_to_cache_policy(PageTableFlags::from_bits_truncate(self.0));
let cache = flags_to_cache_policy(PteFlags::from_bits_truncate(self.0));
PageProperty {
flags: PageFlags::from_bits(flags as u8).unwrap(),
cache,
@ -245,69 +237,91 @@ impl PageTableEntryTrait for PageTableEntry {
}
}
fn set_prop(&mut self, prop: PageProperty) {
if !self.is_present() {
return;
}
let mut flags = PageTableFlags::empty().bits();
flags |= (parse_flags!(prop.flags.bits(), PageFlags::R, PageTableFlags::PRESENT))
| (parse_flags!(prop.flags.bits(), PageFlags::W, PageTableFlags::WRITABLE))
| (parse_flags!(!prop.flags.bits(), PageFlags::X, PageTableFlags::NO_EXECUTE))
| (parse_flags!(
prop.flags.bits(),
PageFlags::ACCESSED,
PageTableFlags::ACCESSED
))
| (parse_flags!(prop.flags.bits(), PageFlags::DIRTY, PageTableFlags::DIRTY))
| (parse_flags!(
prop.priv_flags.bits(),
PrivFlags::AVAIL1,
PageTableFlags::HIGH_IGN1
))
| (parse_flags!(
prop.flags.bits(),
PageFlags::AVAIL2,
PageTableFlags::HIGH_IGN2
))
| (parse_flags!(
prop.priv_flags.bits(),
PrivFlags::USER,
PageTableFlags::USER
))
| (parse_flags!(
prop.priv_flags.bits(),
PrivFlags::GLOBAL,
PageTableFlags::GLOBAL
));
#[cfg(feature = "cvm_guest")]
{
flags |= parse_flags!(
prop.priv_flags.bits(),
PrivFlags::SHARED,
PageTableFlags::SHARED
);
}
flags |= cache_policy_to_flags(prop.cache).bits();
self.0 = self.0 & !Self::PROP_MASK | flags;
fn pt_flags(&self) -> PageTableFlags {
let bits = PageTableFlags::empty().bits() as usize
| parse_flags!(self.0, PteFlags::HIGH_IGN1, PageTableFlags::AVAIL1)
| parse_flags!(self.0, PteFlags::HIGH_IGN2, PageTableFlags::AVAIL2);
PageTableFlags::from_bits(bits as u8).unwrap()
}
fn is_last(&self, _level: PagingLevel) -> bool {
self.0 & PageTableFlags::HUGE.bits() != 0
fn new_page(paddr: Paddr, level: PagingLevel, prop: PageProperty) -> Self {
let mut flags = PteFlags::HUGE.bits();
flags |= parse_flags!(prop.flags.bits(), PageFlags::R, PteFlags::PRESENT)
| parse_flags!(prop.flags.bits(), PageFlags::W, PteFlags::WRITABLE)
| parse_flags!(!prop.flags.bits(), PageFlags::X, PteFlags::NO_EXECUTE)
| parse_flags!(prop.flags.bits(), PageFlags::ACCESSED, PteFlags::ACCESSED)
| parse_flags!(prop.flags.bits(), PageFlags::DIRTY, PteFlags::DIRTY)
| parse_flags!(
prop.priv_flags.bits(),
PrivFlags::AVAIL1,
PteFlags::HIGH_IGN1
)
| parse_flags!(prop.flags.bits(), PageFlags::AVAIL2, PteFlags::HIGH_IGN2)
| parse_flags!(prop.priv_flags.bits(), PrivFlags::USER, PteFlags::USER)
| parse_flags!(prop.priv_flags.bits(), PrivFlags::GLOBAL, PteFlags::GLOBAL);
#[cfg(feature = "cvm_guest")]
{
flags |= parse_flags!(prop.priv_flags.bits(), PrivFlags::SHARED, PteFlags::SHARED);
}
flags |= cache_policy_to_flags(prop.cache).bits();
assert_eq!(
paddr & !Self::pa_mask_at_level(level),
0,
"page physical address contains invalid bits"
);
Self(paddr | flags)
}
fn new_pt(paddr: Paddr, flags: PageTableFlags) -> Self {
// In x86 if it's an intermediate PTE, it's better to have the same permissions
// as the most permissive child (to reduce hardware page walk accesses). But we
// don't have a mechanism to keep it generic across architectures, thus just
// setting it to be the most permissive.
let flags = PteFlags::PRESENT.bits()
| PteFlags::WRITABLE.bits()
| PteFlags::USER.bits()
| parse_flags!(flags.bits(), PageTableFlags::AVAIL1, PteFlags::HIGH_IGN1)
| parse_flags!(flags.bits(), PageTableFlags::AVAIL2, PteFlags::HIGH_IGN2);
assert_eq!(
paddr & !Self::CHILD_PT_ADDR_MASK,
0,
"page table physical address contains invalid bits"
);
Self(paddr | flags)
}
}
impl fmt::Debug for PageTableEntry {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let mut f = f.debug_struct("PageTableEntry");
f.field("raw", &format_args!("{:#x}", self.0))
.field("paddr", &format_args!("{:#x}", self.paddr()))
.field("present", &self.is_present())
.field(
"flags",
&PageTableFlags::from_bits_truncate(self.0 & !Self::PHYS_ADDR_MASK),
)
.field("prop", &self.prop())
.finish()
impl PodOnce for PageTableEntry {}
/// SAFETY: The implementation is safe because:
/// - `from_usize` and `into_usize` are not overridden;
/// - `from_repr` and `repr` are correctly implemented;
/// - a zeroed PTE represents an absent entry.
unsafe impl PteTrait for PageTableEntry {
fn from_repr(repr: &PteScalar, level: PagingLevel) -> Self {
match repr {
PteScalar::Absent => PageTableEntry(0),
PteScalar::PageTable(paddr, flags) => Self::new_pt(*paddr, *flags),
PteScalar::Mapped(paddr, prop) => Self::new_page(*paddr, level, *prop),
}
}
fn to_repr(&self, level: PagingLevel) -> PteScalar {
if !self.is_present() {
return PteScalar::Absent;
}
if self.0 & PteFlags::HUGE.bits() != 0 {
let paddr = self.0 & Self::pa_mask_at_level(level);
PteScalar::Mapped(paddr, self.prop())
} else {
let paddr = self.0 & Self::CHILD_PT_ADDR_MASK;
PteScalar::PageTable(paddr, self.pt_flags())
}
}
}

View File

@ -2,7 +2,7 @@
use x86::msr::{IA32_PAT, wrmsr};
use super::PageTableFlags;
use super::PteFlags;
use crate::{const_assert, mm::page_prop::CachePolicy};
/// Software-defined mapping from PAT (page attribute table) bit combinations
@ -23,29 +23,27 @@ const IA32_PAT_MAPPINGS: [CachePolicy; 8] = [
CachePolicy::Uncacheable, // Index 7: PAT=1, PCD=1, PWT=1 (same as 3)
];
pub(super) const fn flags_to_cache_policy(flags: PageTableFlags) -> CachePolicy {
pub(super) const fn flags_to_cache_policy(flags: PteFlags) -> CachePolicy {
let bits = flags.bits();
let mut index = 0usize;
if bits & PageTableFlags::NO_CACHE.bits() != 0 {
if bits & PteFlags::NO_CACHE.bits() != 0 {
index |= 2;
}
if bits & PageTableFlags::WRITE_THROUGH.bits() != 0 {
if bits & PteFlags::WRITE_THROUGH.bits() != 0 {
index |= 1;
}
IA32_PAT_MAPPINGS[index]
}
pub(super) const fn cache_policy_to_flags(cache_policy: CachePolicy) -> PageTableFlags {
pub(super) const fn cache_policy_to_flags(cache_policy: CachePolicy) -> PteFlags {
let bits = match cache_policy {
CachePolicy::Writeback => 0,
CachePolicy::Writethrough => PageTableFlags::WRITE_THROUGH.bits(),
CachePolicy::Uncacheable => {
PageTableFlags::NO_CACHE.bits() | PageTableFlags::WRITE_THROUGH.bits()
}
CachePolicy::WriteCombining => PageTableFlags::NO_CACHE.bits(),
CachePolicy::Writethrough => PteFlags::WRITE_THROUGH.bits(),
CachePolicy::Uncacheable => PteFlags::NO_CACHE.bits() | PteFlags::WRITE_THROUGH.bits(),
CachePolicy::WriteCombining => PteFlags::NO_CACHE.bits(),
_ => panic!("unsupported cache policy"),
};
PageTableFlags::from_bits_truncate(bits)
PteFlags::from_bits_truncate(bits)
}
const_assert!(matches!(

View File

@ -490,7 +490,7 @@ pub(crate) unsafe fn init() -> Segment<MetaPageMeta> {
priv_flags: PrivilegedPageFlags::GLOBAL,
};
// SAFETY: we are doing the metadata mappings for the kernel.
unsafe { boot_pt.map_base_page(vaddr, frame_paddr / PAGE_SIZE, prop) };
unsafe { boot_pt.map_base_page(vaddr, frame_paddr, prop) };
}
})
.unwrap();
@ -635,7 +635,7 @@ fn add_temp_linear_mapping(max_paddr: Paddr) {
boot_pt::with_borrow(|boot_pt| {
for paddr in prange.step_by(PAGE_SIZE) {
let vaddr = LINEAR_MAPPING_BASE_VADDR + paddr;
boot_pt.map_base_page(vaddr, paddr / PAGE_SIZE, prop);
boot_pt.map_base_page(vaddr, paddr, prop);
}
})
.unwrap();

View File

@ -42,7 +42,9 @@ pub use self::{
vm_space::VmSpace,
};
pub(crate) use self::{
kspace::paddr_to_vaddr, page_prop::PrivilegedPageFlags, page_table::PageTable,
kspace::paddr_to_vaddr,
page_prop::{PageTableFlags, PrivilegedPageFlags},
page_table::PageTable,
};
use crate::arch::mm::PagingConsts;

View File

@ -132,3 +132,13 @@ bitflags! {
const SHARED = 0b10000000;
}
}
bitflags! {
/// Flags that can be stored on intermediate page table entries.
pub(crate) struct PageTableFlags: u8 {
/// The first bit available for software use.
const AVAIL1 = 0b01000000;
/// The second bit available for software use.
const AVAIL2 = 0b10000000;
}
}

View File

@ -10,25 +10,27 @@ use core::{
sync::atomic::{AtomicU32, Ordering},
};
use super::{PageTableEntryTrait, pte_index};
use ostd_pod::Pod;
use super::{PteTrait, pte_index};
use crate::{
arch::mm::{PageTableEntry, PagingConsts},
cpu::num_cpus,
cpu_local_cell,
mm::{
Frame, FrameAllocOptions, PAGE_SIZE, Paddr, PageProperty, PagingConstsTrait, PagingLevel,
PrivilegedPageFlags, Vaddr,
Vaddr,
frame::{
self,
allocator::{self, EarlyAllocatedFrameMeta},
},
nr_subpage_per_huge, paddr_to_vaddr,
page_prop::PageTableFlags,
page_table::PteScalar,
},
sync::SpinLock,
};
type FrameNumber = usize;
/// The accessor to the boot page table singleton [`BootPageTable`].
///
/// The user should provide a closure to access the boot page table. The
@ -77,14 +79,14 @@ pub(crate) unsafe fn dismiss() {
dfs_walk_on_leave::<PageTableEntry, PagingConsts>(
boot_pt.root_pt,
PagingConsts::NR_LEVELS,
&mut |pte| {
if !pte.prop().priv_flags.contains(PTE_POINTS_TO_FIRMWARE_PT) {
&mut |pte, pa, _, flags| {
if !flags.contains(PTE_POINTS_TO_FIRMWARE_PT) {
// SAFETY: The pointed frame is allocated and forgotten with `into_raw`.
drop(unsafe { Frame::<EarlyAllocatedFrameMeta>::from_raw(pte.paddr()) })
drop(unsafe { Frame::<EarlyAllocatedFrameMeta>::from_raw(pa) })
}
// Firmware provided page tables may be a DAG instead of a tree.
// Clear it to avoid double-free when we meet it the second time.
*pte = PageTableEntry::new_absent();
*pte = PageTableEntry::new_zeroed();
},
);
}
@ -107,12 +109,9 @@ cpu_local_cell! {
/// All the newly allocated page table frames have the first unused bit in
/// parent PTEs. This allows us to deallocate them when the boot page table
/// is dropped.
pub(crate) struct BootPageTable<
E: PageTableEntryTrait = PageTableEntry,
C: PagingConstsTrait = PagingConsts,
> {
root_pt: FrameNumber,
_pretend_to_use: core::marker::PhantomData<(E, C)>,
pub(crate) struct BootPageTable<E: PteTrait = PageTableEntry, C: PagingConstsTrait = PagingConsts> {
root_pt: Paddr,
_phantom: core::marker::PhantomData<(E, C)>,
}
// We use extra two available bits in the boot PT for memory management.
@ -120,9 +119,9 @@ pub(crate) struct BootPageTable<
// The first available bit is used to differentiate firmware page tables from
// the page tables allocated here. The second is for identifying double-visits
// when walking the page tables since the PT can be a DAG.
const PTE_POINTS_TO_FIRMWARE_PT: PrivilegedPageFlags = PrivilegedPageFlags::AVAIL1;
const PTE_POINTS_TO_FIRMWARE_PT: PageTableFlags = PageTableFlags::AVAIL1;
impl<E: PageTableEntryTrait, C: PagingConstsTrait> BootPageTable<E, C> {
impl<E: PteTrait, C: PagingConstsTrait> BootPageTable<E, C> {
/// Creates a new boot page table from the current page table root
/// physical address.
///
@ -132,22 +131,25 @@ impl<E: PageTableEntryTrait, C: PagingConstsTrait> BootPageTable<E, C> {
/// Otherwise, It would lead to double-drop of the page table frames set up
/// by the firmware, loader or the setup code.
unsafe fn from_current_pt() -> Self {
let root_pt = crate::arch::mm::current_page_table_paddr() / C::BASE_PAGE_SIZE;
let root_pt = crate::arch::mm::current_page_table_paddr();
// Make sure the 2 available bits are not set for firmware page tables.
dfs_walk_on_leave::<E, C>(root_pt, C::NR_LEVELS, &mut |pte: &mut E| {
let mut prop = pte.prop();
prop.priv_flags |= PTE_POINTS_TO_FIRMWARE_PT;
pte.set_prop(prop);
});
dfs_walk_on_leave::<E, C>(
root_pt,
C::NR_LEVELS,
&mut |pte: &mut E, pa, level, mut flags| {
flags |= PTE_POINTS_TO_FIRMWARE_PT;
*pte = E::from_repr(&PteScalar::PageTable(pa, flags), level);
},
);
Self {
root_pt,
_pretend_to_use: core::marker::PhantomData,
_phantom: core::marker::PhantomData,
}
}
/// Returns the root physical address of the boot page table.
pub(crate) fn root_address(&self) -> Paddr {
self.root_pt * C::BASE_PAGE_SIZE
self.root_pt
}
/// Maps a base page to a frame.
@ -160,33 +162,37 @@ impl<E: PageTableEntryTrait, C: PagingConstsTrait> BootPageTable<E, C> {
///
/// This function is unsafe because it can cause undefined behavior if the caller
/// maps a page in the kernel address space.
pub unsafe fn map_base_page(&mut self, from: Vaddr, to: FrameNumber, prop: PageProperty) {
pub unsafe fn map_base_page(&mut self, from: Vaddr, to: Paddr, prop: PageProperty) {
let mut pt = self.root_pt;
let mut level = C::NR_LEVELS;
// Walk to the last level of the page table.
while level > 1 {
let index = pte_index::<C>(from, level);
let pte_ptr = unsafe { (paddr_to_vaddr(pt * C::BASE_PAGE_SIZE) as *mut E).add(index) };
let pte_ptr = unsafe { (paddr_to_vaddr(pt) as *mut E).add(index) };
let pte = unsafe { pte_ptr.read() };
pt = if !pte.is_present() {
let pte = self.alloc_child();
unsafe { pte_ptr.write(pte) };
pte.paddr() / C::BASE_PAGE_SIZE
} else if pte.is_last(level) {
panic!("mapping an already mapped huge page in the boot page table");
} else {
pte.paddr() / C::BASE_PAGE_SIZE
match pte.to_repr(level) {
PteScalar::Absent => {
let (pte, child_pt) = self.alloc_child(level);
unsafe { pte_ptr.write(pte) };
pt = child_pt;
}
PteScalar::Mapped(_, _) => {
panic!("mapping an already mapped huge page in the boot page table");
}
PteScalar::PageTable(child_pt, _) => {
pt = child_pt;
}
};
level -= 1;
}
// Map the page in the last level page table.
let index = pte_index::<C>(from, 1);
let pte_ptr = unsafe { (paddr_to_vaddr(pt * C::BASE_PAGE_SIZE) as *mut E).add(index) };
let pte_ptr = unsafe { (paddr_to_vaddr(pt) as *mut E).add(index) };
let pte = unsafe { pte_ptr.read() };
if pte.is_present() {
if matches!(pte.to_repr(1), PteScalar::Mapped(_, _)) {
panic!("mapping an already mapped page in the boot page table");
}
unsafe { pte_ptr.write(E::new_page(to * C::BASE_PAGE_SIZE, 1, prop)) };
unsafe { pte_ptr.write(E::from_repr(&PteScalar::Mapped(to, prop), 1)) };
}
/// Set protections of a base page mapping.
@ -213,45 +219,45 @@ impl<E: PageTableEntryTrait, C: PagingConstsTrait> BootPageTable<E, C> {
// Walk to the last level of the page table.
while level > 1 {
let index = pte_index::<C>(virt_addr, level);
let pte_ptr = unsafe { (paddr_to_vaddr(pt * C::BASE_PAGE_SIZE) as *mut E).add(index) };
let pte_ptr = unsafe { (paddr_to_vaddr(pt) as *mut E).add(index) };
let pte = unsafe { pte_ptr.read() };
pt = if !pte.is_present() {
panic!("protecting an unmapped page in the boot page table");
} else if pte.is_last(level) {
// Split the huge page.
let child_pte = self.alloc_child();
let child_frame_pa = child_pte.paddr();
let huge_pa = pte.paddr();
for i in 0..nr_subpage_per_huge::<C>() {
let nxt_ptr = unsafe { (paddr_to_vaddr(child_frame_pa) as *mut E).add(i) };
unsafe {
nxt_ptr.write(E::new_page(
huge_pa + i * C::BASE_PAGE_SIZE,
level - 1,
pte.prop(),
))
};
match pte.to_repr(level) {
PteScalar::Absent => {
panic!("protecting an unmapped page in the boot page table");
}
PteScalar::PageTable(child_pt, _) => {
pt = child_pt;
}
PteScalar::Mapped(huge_pa, prop) => {
// Split the huge page.
let (child_pte, child_frame_pa) = self.alloc_child(level);
for i in 0..nr_subpage_per_huge::<C>() {
let nxt_ptr = unsafe { (paddr_to_vaddr(child_frame_pa) as *mut E).add(i) };
unsafe {
nxt_ptr.write(E::from_repr(
&PteScalar::Mapped(huge_pa + i * C::BASE_PAGE_SIZE, prop),
level - 1,
))
};
}
unsafe { pte_ptr.write(child_pte) };
pt = child_frame_pa;
}
unsafe { pte_ptr.write(E::new_pt(child_frame_pa)) };
child_frame_pa / C::BASE_PAGE_SIZE
} else {
pte.paddr() / C::BASE_PAGE_SIZE
};
level -= 1;
}
// Do protection in the last level page table.
let index = pte_index::<C>(virt_addr, 1);
let pte_ptr = unsafe { (paddr_to_vaddr(pt * C::BASE_PAGE_SIZE) as *mut E).add(index) };
let pte_ptr = unsafe { (paddr_to_vaddr(pt) as *mut E).add(index) };
let pte = unsafe { pte_ptr.read() };
if !pte.is_present() {
let PteScalar::Mapped(pa, mut prop) = pte.to_repr(1) else {
panic!("protecting an unmapped page in the boot page table");
}
let mut prop = pte.prop();
};
op(&mut prop);
unsafe { pte_ptr.write(E::new_page(pte.paddr(), 1, prop)) };
unsafe { pte_ptr.write(E::from_repr(&PteScalar::Mapped(pa, prop), 1)) };
}
fn alloc_child(&mut self) -> E {
fn alloc_child(&mut self, level: PagingLevel) -> (E, Paddr) {
let frame_paddr = if frame::meta::is_initialized() {
let frame = FrameAllocOptions::new()
.zeroed(false)
@ -269,14 +275,20 @@ impl<E: PageTableEntryTrait, C: PagingConstsTrait> BootPageTable<E, C> {
let vaddr = paddr_to_vaddr(frame_paddr) as *mut u8;
unsafe { core::ptr::write_bytes(vaddr, 0, PAGE_SIZE) };
E::new_pt(frame_paddr)
(
E::from_repr(
&PteScalar::PageTable(frame_paddr, PageTableFlags::empty()),
level,
),
frame_paddr,
)
}
#[cfg(ktest)]
pub(super) fn new(root_pt: FrameNumber) -> Self {
pub(super) fn new(root_pt: Paddr) -> Self {
Self {
root_pt,
_pretend_to_use: core::marker::PhantomData,
_phantom: core::marker::PhantomData,
}
}
}
@ -285,18 +297,18 @@ impl<E: PageTableEntryTrait, C: PagingConstsTrait> BootPageTable<E, C> {
///
/// Once leaving a page table frame, the closure will be called with the PTE to
/// the frame.
fn dfs_walk_on_leave<E: PageTableEntryTrait, C: PagingConstsTrait>(
pt: FrameNumber,
fn dfs_walk_on_leave<E: PteTrait, C: PagingConstsTrait>(
pt: Paddr,
level: PagingLevel,
op: &mut impl FnMut(&mut E),
op: &mut impl FnMut(&mut E, Paddr, PagingLevel, PageTableFlags),
) {
if level >= 2 {
let pt_vaddr = paddr_to_vaddr(pt * C::BASE_PAGE_SIZE) as *mut E;
let pt_vaddr = paddr_to_vaddr(pt) as *mut E;
let pt = unsafe { core::slice::from_raw_parts_mut(pt_vaddr, nr_subpage_per_huge::<C>()) };
for pte in pt {
if pte.is_present() && !pte.is_last(level) {
dfs_walk_on_leave::<E, C>(pte.paddr() / C::BASE_PAGE_SIZE, level - 1, op);
op(pte)
if let PteScalar::PageTable(child_pt, flags) = pte.to_repr(level) {
dfs_walk_on_leave::<E, C>(child_pt, level - 1, op);
op(pte, child_pt, level, flags);
}
}
}

View File

@ -11,8 +11,8 @@ use crate::{
mm::{
HasPaddr, Vaddr, nr_subpage_per_huge, paddr_to_vaddr,
page_table::{
ChildRef, PageTable, PageTableConfig, PageTableEntryTrait, PageTableGuard,
PageTableNodeRef, PagingConstsTrait, PagingLevel, load_pte, page_size, pte_index,
ChildRef, PageTable, PageTableConfig, PageTableGuard, PageTableNodeRef,
PagingConstsTrait, PagingLevel, PteScalar, PteTrait, load_pte, page_size, pte_index,
},
},
task::atomic_mode::InAtomicMode,
@ -112,13 +112,16 @@ fn try_traverse_and_lock_subtree_root<'rcu, C: PageTableConfig>(
// - All page table entries are aligned and accessed with atomic operations only.
let cur_pte = unsafe { load_pte(cur_pt_ptr.add(start_idx), Ordering::Acquire) };
if cur_pte.is_present() {
if cur_pte.is_last(cur_level) {
match cur_pte.to_repr(cur_level) {
PteScalar::Mapped(_, _) => {
break;
}
cur_pt_addr = cur_pte.paddr();
cur_node_guard = None;
continue;
PteScalar::Absent => {}
PteScalar::PageTable(child_pt_addr, _) => {
cur_pt_addr = child_pt_addr;
cur_node_guard = None;
continue;
}
}
// In case the child is absent, we should lock and allocate a new page table node.
@ -133,18 +136,19 @@ fn try_traverse_and_lock_subtree_root<'rcu, C: PageTableConfig>(
}
let mut cur_entry = pt_guard.entry(start_idx);
if cur_entry.is_none() {
let allocated_guard = cur_entry.alloc_if_none(guard).unwrap();
cur_pt_addr = allocated_guard.paddr();
cur_node_guard = Some(allocated_guard);
} else if cur_entry.is_node() {
let ChildRef::PageTable(pt) = cur_entry.to_ref() else {
unreachable!();
};
cur_pt_addr = pt.paddr();
cur_node_guard = None;
} else {
break;
match cur_entry.to_ref() {
ChildRef::Frame(_, _, _) => {
break;
}
ChildRef::None => {
let allocated_guard = cur_entry.alloc_if_none(guard).unwrap();
cur_pt_addr = allocated_guard.paddr();
cur_node_guard = Some(allocated_guard);
}
ChildRef::PageTable(pt) => {
cur_pt_addr = pt.paddr();
cur_node_guard = None;
}
}
}

View File

@ -19,6 +19,7 @@ use super::{
use crate::{
Pod,
arch::mm::{PageTableEntry, PagingConsts},
mm::page_prop::PageTableFlags,
task::{atomic_mode::AsAtomicModeGuard, disable_preempt},
};
@ -79,7 +80,7 @@ pub(crate) unsafe trait PageTableConfig:
const TOP_LEVEL_CAN_UNMAP: bool = true;
/// The type of the page table entry.
type E: PageTableEntryTrait;
type E: PteTrait;
/// The paging constants.
type C: PagingConstsTrait;
@ -342,7 +343,10 @@ impl PageTable<KernelPtConfig> {
// outlive the kernel page table, which is trivially true.
// See also `<PageTablePageMeta as AnyFrameMeta>::on_drop`.
let pt_addr = pt.paddr();
let pte = PageTableEntry::new_pt(pt_addr);
let pte = PageTableEntry::from_repr(
&PteScalar::PageTable(pt_addr, PageTableFlags::empty()),
UserPtConfig::NR_LEVELS,
);
// SAFETY: The index is within the bounds and the PTE is at the
// correct paging level. However, neither it's a `UserPtConfig`
// child nor the node has the ownership of the child. It is
@ -467,71 +471,66 @@ pub(super) unsafe fn page_walk<C: PageTableConfig>(
// - All page table entries are aligned and accessed with atomic operations only.
let cur_pte = unsafe { load_pte((pt_addr as *mut C::E).add(offset), Ordering::Acquire) };
if !cur_pte.is_present() {
return None;
match cur_pte.to_repr(cur_level) {
PteScalar::Absent => return None,
PteScalar::PageTable(next_pt_addr, _) => {
pt_addr = paddr_to_vaddr(next_pt_addr);
continue;
}
PteScalar::Mapped(frame_paddr, prop) => {
debug_assert!(cur_level <= C::HIGHEST_TRANSLATION_LEVEL);
return Some((
frame_paddr + (vaddr & (page_size::<C>(cur_level) - 1)),
prop,
));
}
}
if cur_pte.is_last(cur_level) {
debug_assert!(cur_level <= C::HIGHEST_TRANSLATION_LEVEL);
return Some((
cur_pte.paddr() + (vaddr & (page_size::<C>(cur_level) - 1)),
cur_pte.prop(),
));
}
pt_addr = paddr_to_vaddr(cur_pte.paddr());
}
unreachable!("All present PTEs at the level 1 must be last-level PTEs");
}
/// The scalar representation of a page table entry (PTE).
///
/// This is an architecture-agnostic representation that can be converted
/// to/from architecture-specific PTEs via the [`PteTrait`]. This is a scalar
/// value that can be cloned or compared, and does not own the underlying page
/// table node or mapped item if present.
#[derive(Debug, Clone, PartialEq, Eq)]
pub(crate) enum PteScalar {
/// A PTE that is considered absent by the MMU.
Absent,
/// A PTE that points to the next-level page table.
PageTable(Paddr, PageTableFlags),
/// A PTE that establishes the mapping to a physical frame.
Mapped(Paddr, PageProperty),
}
/// A trait that abstracts architecture-specific page table entries (PTEs).
///
/// Note that a default PTE should be a PTE that points to nothing.
pub trait PageTableEntryTrait:
Clone + Copy + Debug + Default + Pod + PodOnce + Sized + Send + Sync + 'static
/// A PTE refers to an entry in any level of a page table. This trait requires
/// that any architecture-specific PTEs are scalar types that essentially
/// encodes [`PteScalar`].
///
/// # Safety
///
/// An implementor must ensure that:
/// - the methods `as_usize` and `from_usize` are not overridden;
/// - the return value of `from_repr`, when called with `repr`, should return
/// the same [`PteScalar`] that is passed to `from_repr`, if the level
/// passed to `repr` is the same as that passed to `from_repr`;
/// - a zeroed PTE represents an absent entry.
pub(crate) unsafe trait PteTrait:
Clone + Copy + Debug + Pod + PodOnce + Sized + Send + Sync + 'static
{
/// Creates a PTE that points to nothing.
/// Returns architecture-specific representation of the PTE.
fn from_repr(repr: &PteScalar, level: PagingLevel) -> Self;
/// Returns the representation of the PTE.
///
/// Note that currently the implementation requires a zeroed PTE to be an absent PTE.
fn new_absent() -> Self {
Self::default()
}
/// Returns if the PTE points to something.
///
/// For PTEs created by [`Self::new_absent`], this method should return
/// false. For PTEs created by [`Self::new_page`] or [`Self::new_pt`]
/// and modified with [`Self::set_prop`], this method should return true.
fn is_present(&self) -> bool;
/// Creates a new PTE that maps to a page.
fn new_page(paddr: Paddr, level: PagingLevel, prop: PageProperty) -> Self;
/// Creates a new PTE that maps to a child page table.
fn new_pt(paddr: Paddr) -> Self;
/// Returns the physical address from the PTE.
///
/// The physical address recorded in the PTE is either:
/// - the physical address of the next-level page table, or
/// - the physical address of the page that the PTE maps to.
fn paddr(&self) -> Paddr;
/// Returns the page property of the PTE.
fn prop(&self) -> PageProperty;
/// Sets the page property of the PTE.
///
/// This methold has an impact only if the PTE is present. If not, this
/// method will do nothing.
fn set_prop(&mut self, prop: PageProperty);
/// Returns if the PTE maps a page rather than a child page table.
///
/// The method needs to know the level of the page table where the PTE resides,
/// since architectures like x86-64 have a huge bit only in intermediate levels.
fn is_last(&self, level: PagingLevel) -> bool;
/// The caller must ensure that the level is the correct level of the PTE,
/// otherwise the implementation can return arbitrary value.
fn to_repr(&self, level: PagingLevel) -> PteScalar;
/// Converts the PTE into a raw `usize` value.
fn as_usize(self) -> usize {
@ -555,7 +554,7 @@ pub trait PageTableEntryTrait:
/// # Safety
///
/// The safety preconditions are same as those of [`AtomicUsize::from_ptr`].
pub unsafe fn load_pte<E: PageTableEntryTrait>(ptr: *mut E, ordering: Ordering) -> E {
pub unsafe fn load_pte<E: PteTrait>(ptr: *mut E, ordering: Ordering) -> E {
// SAFETY: The safety is upheld by the caller.
let atomic = unsafe { AtomicUsize::from_ptr(ptr.cast()) };
let pte_raw = atomic.load(ordering);
@ -567,7 +566,7 @@ pub unsafe fn load_pte<E: PageTableEntryTrait>(ptr: *mut E, ordering: Ordering)
/// # Safety
///
/// The safety preconditions are same as those of [`AtomicUsize::from_ptr`].
pub unsafe fn store_pte<E: PageTableEntryTrait>(ptr: *mut E, new_val: E, ordering: Ordering) {
pub unsafe fn store_pte<E: PteTrait>(ptr: *mut E, new_val: E, ordering: Ordering) {
let new_raw = new_val.as_usize();
// SAFETY: The safety is upheld by the caller.
let atomic = unsafe { AtomicUsize::from_ptr(ptr.cast()) };

View File

@ -4,9 +4,15 @@
use core::mem::ManuallyDrop;
use super::{PageTableEntryTrait, PageTableNode, PageTableNodeRef};
use ostd_pod::Pod;
use super::{PageTableNode, PageTableNodeRef, PteTrait};
use crate::{
mm::{HasPaddr, Paddr, PagingLevel, page_prop::PageProperty, page_table::PageTableConfig},
mm::{
HasPaddr, Paddr, PageTableFlags, PagingLevel,
page_prop::PageProperty,
page_table::{PageTableConfig, PteScalar},
},
sync::RcuDrop,
};
@ -33,11 +39,14 @@ impl<C: PageTableConfig> Child<C> {
match self {
Child::PageTable(node) => {
let paddr = node.paddr();
let level = node.level();
let _ = ManuallyDrop::new(node);
C::E::new_pt(paddr)
C::E::from_repr(&PteScalar::PageTable(paddr, PageTableFlags::empty()), level)
}
Child::Frame(paddr, level, prop) => C::E::new_page(paddr, level, prop),
Child::None => C::E::new_absent(),
Child::Frame(paddr, level, prop) => {
C::E::from_repr(&PteScalar::Mapped(paddr, prop), level)
}
Child::None => C::E::new_zeroed(),
}
}
@ -49,21 +58,19 @@ impl<C: PageTableConfig> Child<C> {
///
/// The level must match the original level of the child.
pub(super) unsafe fn from_pte(pte: C::E, level: PagingLevel) -> Self {
if !pte.is_present() {
return Child::None;
let repr = pte.to_repr(level);
match repr {
PteScalar::Absent => Child::None,
PteScalar::PageTable(paddr, _) => {
// SAFETY: The caller ensures that this node was created by
// `into_pte`, so that restoring the forgotten reference is safe.
let node = unsafe { PageTableNode::from_raw(paddr) };
debug_assert_eq!(node.level(), level - 1);
Child::PageTable(RcuDrop::new(node))
}
PteScalar::Mapped(paddr, prop) => Child::Frame(paddr, level, prop),
}
let paddr = pte.paddr();
if !pte.is_last(level) {
// SAFETY: The caller ensures that this node was created by
// `into_pte`, so that restoring the forgotten reference is safe.
let node = unsafe { PageTableNode::from_raw(paddr) };
debug_assert_eq!(node.level(), level - 1);
return Child::PageTable(RcuDrop::new(node));
}
Child::Frame(paddr, level, pte.prop())
}
}
@ -91,21 +98,19 @@ impl<C: PageTableConfig> ChildRef<'_, C> {
/// The provided level must be the same with the level of the page table
/// node that contains this PTE.
pub(super) unsafe fn from_pte(pte: &C::E, level: PagingLevel) -> Self {
if !pte.is_present() {
return ChildRef::None;
let repr = pte.to_repr(level);
match repr {
PteScalar::Absent => ChildRef::None,
PteScalar::PageTable(paddr, _) => {
// SAFETY: The caller ensures that the lifetime of the child is
// contained by the residing node, and the physical address is
// valid since the entry is present.
let node = unsafe { PageTableNodeRef::borrow_paddr(paddr) };
debug_assert_eq!(node.level(), level - 1);
ChildRef::PageTable(node)
}
PteScalar::Mapped(paddr, prop) => ChildRef::Frame(paddr, level, prop),
}
let paddr = pte.paddr();
if !pte.is_last(level) {
// SAFETY: The caller ensures that the lifetime of the child is
// contained by the residing node, and the physical address is
// valid since the entry is present.
let node = unsafe { PageTableNodeRef::borrow_paddr(paddr) };
debug_assert_eq!(node.level(), level - 1);
return ChildRef::PageTable(node);
}
ChildRef::Frame(paddr, level, pte.prop())
}
}

View File

@ -2,13 +2,13 @@
//! This module provides accessors to the page table entries in a node.
use super::{Child, ChildRef, PageTableEntryTrait, PageTableGuard, PageTableNode};
use super::{Child, ChildRef, PageTableGuard, PageTableNode, PteTrait};
use crate::{
mm::{
HasPaddr, nr_subpage_per_huge,
page_prop::PageProperty,
page_size,
page_table::{PageTableConfig, PageTableNodeRef},
page_table::{PageTableConfig, PageTableNodeRef, PteScalar},
},
sync::RcuDrop,
task::atomic_mode::InAtomicMode,
@ -37,16 +37,6 @@ pub(in crate::mm) struct Entry<'a, 'rcu, C: PageTableConfig> {
}
impl<'a, 'rcu, C: PageTableConfig> Entry<'a, 'rcu, C> {
/// Returns if the entry does not map to anything.
pub(in crate::mm) fn is_none(&self) -> bool {
!self.pte.is_present()
}
/// Returns if the entry maps to a page table node.
pub(in crate::mm) fn is_node(&self) -> bool {
self.pte.is_present() && !self.pte.is_last(self.node.level())
}
/// Gets a reference to the child.
pub(in crate::mm) fn to_ref(&self) -> ChildRef<'rcu, C> {
// SAFETY:
@ -59,11 +49,11 @@ impl<'a, 'rcu, C: PageTableConfig> Entry<'a, 'rcu, C> {
///
/// It only modifies the properties if the entry is present.
pub(in crate::mm) fn protect(&mut self, op: &mut impl FnMut(&mut PageProperty)) {
if !self.pte.is_present() {
let level = self.node.level();
let PteScalar::Mapped(pa, prop) = self.pte.to_repr(level) else {
return;
}
};
let prop = self.pte.prop();
let mut new_prop = prop;
op(&mut new_prop);
@ -71,7 +61,7 @@ impl<'a, 'rcu, C: PageTableConfig> Entry<'a, 'rcu, C> {
return;
}
self.pte.set_prop(new_prop);
self.pte = C::E::from_repr(&PteScalar::Mapped(pa, new_prop), level);
// SAFETY:
// 1. The index is within the bounds.
@ -130,7 +120,7 @@ impl<'a, 'rcu, C: PageTableConfig> Entry<'a, 'rcu, C> {
&mut self,
guard: &'rcu dyn InAtomicMode,
) -> Option<PageTableGuard<'rcu, C>> {
if !(self.is_none() && self.node.level() > 1) {
if !matches!(self.to_ref(), ChildRef::None) || self.node.level() == 1 {
return None;
}
@ -171,13 +161,9 @@ impl<'a, 'rcu, C: PageTableConfig> Entry<'a, 'rcu, C> {
guard: &'rcu dyn InAtomicMode,
) -> Option<PageTableGuard<'rcu, C>> {
let level = self.node.level();
if !(self.pte.is_last(level) && level > 1) {
let PteScalar::Mapped(pa, prop) = self.pte.to_repr(level) else {
return None;
}
let pa = self.pte.paddr();
let prop = self.pte.prop();
};
let new_page = RcuDrop::new(PageTableNode::<C>::alloc(level - 1));

View File

@ -39,13 +39,13 @@ pub(in crate::mm) use self::{
child::{Child, ChildRef},
entry::Entry,
};
use super::{PageTableConfig, PageTableEntryTrait, nr_subpage_per_huge};
use super::{PageTableConfig, PteTrait, nr_subpage_per_huge};
use crate::{
mm::{
FrameAllocOptions, HasPaddr, Infallible, PagingConstsTrait, PagingLevel, VmReader,
frame::{Frame, FrameRef, meta::AnyFrameMeta},
paddr_to_vaddr,
page_table::{load_pte, store_pte},
page_table::{PteScalar, load_pte, store_pte},
},
task::atomic_mode::InAtomicMode,
};
@ -69,14 +69,10 @@ impl<C: PageTableConfig> PageTableNode<C> {
/// Allocates a new empty page table node.
pub(super) fn alloc(level: PagingLevel) -> Self {
let meta = PageTablePageMeta::new(level);
let frame = FrameAllocOptions::new()
FrameAllocOptions::new()
.zeroed(true)
.alloc_frame_with(meta)
.expect("Failed to allocate a page table node");
// The allocated frame is zeroed. Make sure zero is absent PTE.
debug_assert_eq!(C::E::new_absent().as_usize(), 0);
frame
.expect("Failed to allocate a page table node")
}
/// Activates the page table assuming it is a root page table.
@ -318,20 +314,18 @@ unsafe impl<C: PageTableConfig> AnyFrameMeta for PageTablePageMeta<C> {
for _ in range {
// Non-atomic read is OK because we have mutable access.
let pte = reader.read_once::<C::E>().unwrap();
if pte.is_present() {
let paddr = pte.paddr();
// As a fast path, we can ensure that the type of the child frame
// is `Self` if the PTE points to a child page table. Then we don't
// need to check the vtable for the drop method.
if !pte.is_last(level) {
match pte.to_repr(level) {
PteScalar::PageTable(child_pt_addr, _) => {
// SAFETY: The PTE points to a page table node. The ownership
// of the child is transferred to the child then dropped.
drop(unsafe { Frame::<Self>::from_raw(paddr) });
} else {
drop(unsafe { PageTableNode::<C>::from_raw(child_pt_addr) });
}
PteScalar::Mapped(pa, prop) => {
// SAFETY: The PTE points to a mapped item. The ownership
// of the item is transferred here then dropped.
drop(unsafe { C::item_from_raw(paddr, level, pte.prop()) });
drop(unsafe { C::item_from_raw(pa, level, prop) });
}
PteScalar::Absent => {}
}
}
}

View File

@ -14,6 +14,7 @@ use crate::{
mod test_utils {
use super::*;
use crate::mm::PrivilegedPageFlags;
/// Creates a new user page table that has mapped a virtual range to a physical frame.
#[track_caller]
@ -111,6 +112,78 @@ mod test_utils {
(paddr, level, prop)
}
}
/// A subset iterator for bitflags.
///
/// A bitflag is a set of boolean options represented as bits in an integer.
///
/// When given a bitflag `full`, it iterates over all subsets of `full` in
/// descending order of their integer values.
pub struct SubsetIter {
full: u8,
cur: u8,
finished: bool,
}
impl SubsetIter {
/// Create a new subset iterator for the given full bitflag.
pub fn new(full: u8) -> Self {
SubsetIter {
full,
cur: full,
finished: false,
}
}
}
impl Iterator for SubsetIter {
type Item = u8;
fn next(&mut self) -> Option<Self::Item> {
if self.finished {
return None;
}
let flag = self.cur;
if self.cur == 0 {
self.finished = true;
} else {
self.cur = (self.cur - 1) & self.full;
}
Some(flag)
}
}
#[ktest]
fn test_subset_iter() {
use alloc::{vec, vec::Vec};
assert_eq!(
SubsetIter::new(0b1011).collect::<Vec<u8>>(),
vec![
0b1011, 0b1010, 0b1001, 0b1000, 0b0011, 0b0010, 0b0001, 0b0000
]
);
}
/// Generates all possible page properties.
pub fn all_page_properties() -> impl Iterator<Item = PageProperty> {
let flag_subsets =
SubsetIter::new(PageFlags::all().bits()).map(|f| PageFlags::from_bits(f).unwrap());
flag_subsets.flat_map(|flags| {
let priv_flag_subsets = SubsetIter::new(PrivilegedPageFlags::all().bits())
.map(|f| PrivilegedPageFlags::from_bits(f).unwrap());
priv_flag_subsets.flat_map(move |priv_flags| {
// We do not supporting other cache policies yet. So just test them.
static CACHE_POLICIES: [CachePolicy; 2] =
[CachePolicy::Writeback, CachePolicy::Uncacheable];
CACHE_POLICIES.iter().map(move |&cache| PageProperty {
flags,
cache,
priv_flags,
})
})
})
}
}
mod create_page_table {
@ -162,7 +235,10 @@ mod create_page_table {
let preempt_guard = disable_preempt();
let mut root_node = kernel_pt.root.borrow().lock(&preempt_guard);
for i in shared_range {
assert!(root_node.entry(i).is_node());
assert!(matches!(
root_node.entry(i).to_ref(),
ChildRef::PageTable(_)
));
}
}
}
@ -245,7 +321,7 @@ mod range_checks {
}
mod page_properties {
use super::*;
use super::{test_utils::all_page_properties, *};
use crate::mm::PrivilegedPageFlags;
/// Helper function to map a single page with given properties and verify the properties.
@ -273,45 +349,80 @@ mod page_properties {
#[ktest]
fn map_preserves_page_property() {
struct SubsetIter {
full: u8,
cur: u8,
}
impl SubsetIter {
fn new(full: u8) -> Self {
SubsetIter { full, cur: full }
}
}
impl Iterator for SubsetIter {
type Item = u8;
fn next(&mut self) -> Option<Self::Item> {
if self.cur == 0 {
return None;
}
let flag = self.cur;
self.cur = (self.cur - 1) & self.full;
Some(flag)
}
for prop in all_page_properties() {
check_map_with_property(prop);
}
}
}
let flag_subsets =
SubsetIter::new(PageFlags::all().bits()).map(|f| PageFlags::from_bits(f).unwrap());
for flags in flag_subsets {
let priv_flag_subsets = SubsetIter::new(PrivilegedPageFlags::all().bits())
.map(|f| PrivilegedPageFlags::from_bits(f).unwrap());
for priv_flags in priv_flag_subsets {
// We do not supporting other cache policies yet. So just test them.
let cache_policies = [CachePolicy::Writeback, CachePolicy::Uncacheable];
for cache in cache_policies {
check_map_with_property(PageProperty {
flags,
cache,
priv_flags,
});
mod arch_pte_impls {
use super::{
test_utils::{SubsetIter, all_page_properties},
*,
};
use crate::{
arch::mm::{PageTableEntry, PagingConsts},
mm::{
PageTableFlags,
page_table::{PteScalar, PteTrait},
},
};
#[ktest]
fn zeroed_pte_is_absent_pte() {
let pte = PageTableEntry::new_zeroed();
for level in 1..=PagingConsts::NR_LEVELS {
let repr = pte.to_repr(level);
assert_eq!(repr, PteScalar::Absent);
}
}
#[ktest]
fn cast_frame_pte_preserves_repr() {
for level in 1..=PagingConsts::HIGHEST_TRANSLATION_LEVEL {
for prop in all_page_properties() {
// TODO: Almost all architectures doesn't support non-readable
// pages. We can opt-out this flag at compile time.
if !prop.flags.contains(PageFlags::R) {
continue;
}
let paddr = 0xff_c000_0000;
let repr = PteScalar::Mapped(paddr, prop);
let pte = PageTableEntry::from_repr(&repr, level);
let parsed_repr = pte.to_repr(level);
assert_eq!(repr, parsed_repr);
}
}
}
#[ktest]
fn cast_pt_pte_preserves_repr() {
for level in 2..=PagingConsts::NR_LEVELS {
let paddr = 0xff_c000_0000;
let pt_flags_iter = SubsetIter::new(PageTableFlags::all().bits())
.map(|f| PageTableFlags::from_bits(f).unwrap());
for pt_flags in pt_flags_iter {
let repr = PteScalar::PageTable(paddr, pt_flags);
let pte = PageTableEntry::from_repr(&repr, level);
let parsed_repr = pte.to_repr(level);
assert_eq!(repr, parsed_repr);
}
}
}
#[ktest]
fn cast_absent_pte_preserves_repr() {
for level in 1..=PagingConsts::NR_LEVELS {
let repr = PteScalar::Absent;
let pte = PageTableEntry::from_repr(&repr, level);
let parsed_repr = pte.to_repr(level);
assert_eq!(repr, parsed_repr);
}
}
}
mod overlapping_mappings {
@ -905,12 +1016,10 @@ mod boot_pt {
fn map_base_page() {
let root_frame = FrameAllocOptions::new().alloc_frame().unwrap();
let root_paddr = root_frame.paddr();
let mut boot_pt = BootPageTable::<PageTableEntry, PagingConsts>::new(
root_paddr / PagingConsts::BASE_PAGE_SIZE,
);
let mut boot_pt = BootPageTable::<PageTableEntry, PagingConsts>::new(root_paddr);
let from_virt = 0x1000;
let to_phys = 0x2;
let to_phys = 0x2000;
let page_property = PageProperty::new_user(PageFlags::RW, CachePolicy::Writeback);
unsafe {
@ -921,7 +1030,7 @@ mod boot_pt {
let root_paddr = boot_pt.root_address();
assert_eq!(
unsafe { page_walk::<KernelPtConfig>(root_paddr, from_virt + 1) },
Some((to_phys * PAGE_SIZE + 1, page_property))
Some((to_phys + 1, page_property))
);
}
@ -930,13 +1039,11 @@ mod boot_pt {
fn map_base_page_already_mapped() {
let root_frame = FrameAllocOptions::new().alloc_frame().unwrap();
let root_paddr = root_frame.paddr();
let mut boot_pt = BootPageTable::<PageTableEntry, PagingConsts>::new(
root_paddr / PagingConsts::BASE_PAGE_SIZE,
);
let mut boot_pt = BootPageTable::<PageTableEntry, PagingConsts>::new(root_paddr);
let from_virt = 0x1000;
let to_phys1 = 0x2;
let to_phys2 = 0x3;
let to_phys1 = 0x2000;
let to_phys2 = 0x3000;
let page_property = PageProperty::new_user(PageFlags::RW, CachePolicy::Writeback);
unsafe {
@ -950,9 +1057,7 @@ mod boot_pt {
fn protect_base_page_unmapped() {
let root_frame = FrameAllocOptions::new().alloc_frame().unwrap();
let root_paddr = root_frame.paddr();
let mut boot_pt = BootPageTable::<PageTableEntry, PagingConsts>::new(
root_paddr / PagingConsts::BASE_PAGE_SIZE,
);
let mut boot_pt = BootPageTable::<PageTableEntry, PagingConsts>::new(root_paddr);
let virt_addr = 0x2000;
// Attempts to protect an unmapped page (expected to panic).
@ -965,20 +1070,18 @@ mod boot_pt {
fn map_protect() {
let root_frame = FrameAllocOptions::new().alloc_frame().unwrap();
let root_paddr = root_frame.paddr();
let mut boot_pt = BootPageTable::<PageTableEntry, PagingConsts>::new(
root_paddr / PagingConsts::BASE_PAGE_SIZE,
);
let mut boot_pt = BootPageTable::<PageTableEntry, PagingConsts>::new(root_paddr);
let root_paddr = boot_pt.root_address();
// Maps page 1.
let from1 = 0x2000;
let to_phys1 = 0x2;
let to_phys1 = 0x2000;
let prop1 = PageProperty::new_user(PageFlags::RW, CachePolicy::Writeback);
unsafe { boot_pt.map_base_page(from1, to_phys1, prop1) };
assert_eq!(
unsafe { page_walk::<KernelPtConfig>(root_paddr, from1 + 1) },
Some((to_phys1 * PAGE_SIZE + 1, prop1))
Some((to_phys1 + 1, prop1))
);
// Protects page 1.
@ -987,17 +1090,17 @@ mod boot_pt {
PageProperty::new_user(PageFlags::RX, CachePolicy::Writeback);
assert_eq!(
unsafe { page_walk::<KernelPtConfig>(root_paddr, from1 + 1) },
Some((to_phys1 * PAGE_SIZE + 1, expected_prop1_protected))
Some((to_phys1 + 1, expected_prop1_protected))
);
// Maps page 2.
let from2 = 0x3000;
let to_phys2 = 0x3;
let to_phys2 = 0x3000;
let prop2 = PageProperty::new_user(PageFlags::RX, CachePolicy::Uncacheable);
unsafe { boot_pt.map_base_page(from2, to_phys2, prop2) };
assert_eq!(
unsafe { page_walk::<KernelPtConfig>(root_paddr, from2 + 2) },
Some((to_phys2 * PAGE_SIZE + 2, prop2))
Some((to_phys2 + 2, prop2))
);
// Protects page 2.
@ -1006,7 +1109,7 @@ mod boot_pt {
PageProperty::new_user(PageFlags::RW, CachePolicy::Uncacheable);
assert_eq!(
unsafe { page_walk::<KernelPtConfig>(root_paddr, from2 + 2) },
Some((to_phys2 * PAGE_SIZE + 2, expected_prop2_protected))
Some((to_phys2 + 2, expected_prop2_protected))
);
}
}