Add the mm section of LoongArch in OSTD

This commit is contained in:
王英泰 2025-07-08 17:17:08 +08:00 committed by Tate, Hongliang Tian
parent d3538ec6df
commit e4db73e1a0
4 changed files with 337 additions and 1 deletions

View File

@ -0,0 +1,314 @@
// SPDX-License-Identifier: MPL-2.0
use alloc::fmt;
use core::{arch::asm, ops::Range};
use crate::{
mm::{
page_prop::{CachePolicy, PageFlags, PageProperty, PrivilegedPageFlags as PrivFlags},
page_table::PageTableEntryTrait,
Paddr, PagingConstsTrait, PagingLevel, PodOnce, Vaddr, PAGE_SIZE,
},
Pod,
};
pub(crate) const NR_ENTRIES_PER_PAGE: usize = 512;
#[derive(Clone, Debug, Default)]
pub struct PagingConsts {}
impl PagingConstsTrait for PagingConsts {
const BASE_PAGE_SIZE: usize = 4096;
const NR_LEVELS: PagingLevel = 4;
const ADDRESS_WIDTH: usize = 48;
const VA_SIGN_EXT: bool = true;
// TODO: Support huge page
const HIGHEST_TRANSLATION_LEVEL: PagingLevel = 1;
const PTE_SIZE: usize = core::mem::size_of::<PageTableEntry>();
}
bitflags::bitflags! {
#[derive(Pod)]
#[repr(C)]
/// Possible flags for a page table entry.
pub struct PageTableFlags: usize {
/// Specifies whether the mapped frame is valid.
const VALID = 1 << 0;
/// Whether the memory area represented by this entry is modified.
const DIRTY = 1 << 1;
/// Privilege level corresponding to the page table entry.
/// When `RPLV` = 0, the page table entry can be accessed by any program
/// with a privilege level not lower than `PLV`;
/// When `RPLV` = 1, this page table entry can only be accessed by programs
/// with privilege level equal to `PLV`.
const PLVL = 1 << 2;
const PLVH = 1 << 3;
/// Controls the memory access type of the memory access operation
/// falling on the address space of the table page entry.
const MATL = 1 << 4;
const MATH = 1 << 5;
/// If this entry is a basic page table entry, it is `GLOBAL`,
/// which means that the mapping is present in all address spaces,
/// so it isn't flushed from the TLB on an address space switch.
/// If this entry is a huge page table entry, it is `HUGE`,
/// which means that the memory area represented by this entry is
/// a huge page.
const GLOBAL_OR_HUGE = 1 << 6;
/// Specifies whether the mapped frame or page table is loaded in memory.
/// This flag does not fill in TLB.
const PRESENT = 1 << 7;
/// Controls whether writes to the mapped frames are allowed.
/// This flag does not fill in TLB.
const WRITABLE = 1 << 8;
// Whether this entry is a basic page table entry.
const IS_BASIC = 1 << 9;
// First bit ignored by MMU.
const RSV1 = 1 << 10;
// Second bit ignored by MMU.
const RSV2 = 1 << 11;
/// If this entry is a huge page table entry, it is `GLOBAL`.
const GLOBAL_IN_HUGE = 1 << 12;
/// Controls whether reads to the mapped frames are not allowed.
const NOT_READABLE = 1 << 61;
/// Controls whether execution code in the mapped frames are not allowed.
const NOT_EXECUTABLE = 1 << 62;
/// Whether the `PageTableEntry` can only be accessed by the privileged level `PLV` field inferred
const RPLV = 1 << 63;
}
}
pub(crate) fn tlb_flush_addr(vaddr: Vaddr) {
unsafe {
asm!(
"invtlb 0, $zero, {}",
in(reg) vaddr
);
}
}
pub(crate) fn tlb_flush_addr_range(range: &Range<Vaddr>) {
for vaddr in range.clone().step_by(PAGE_SIZE) {
tlb_flush_addr(vaddr);
}
}
pub(crate) fn tlb_flush_all_excluding_global() {
unsafe {
asm!("invtlb 3, $zero, $zero");
}
}
pub(crate) fn tlb_flush_all_including_global() {
unsafe {
asm!("invtlb 0, $zero, $zero");
}
}
/// Activates the given level 4 page table.
///
/// "pgdl" or "pgdh" register doesn't have a field that encodes the cache policy,
/// so `_root_pt_cache` is ignored.
///
/// # Safety
///
/// Changing the level 4 page table is unsafe, because it's possible to violate memory safety by
/// changing the page mapping.
pub unsafe fn activate_page_table(root_paddr: Paddr, _root_pt_cache: CachePolicy) {
assert!(root_paddr % PagingConsts::BASE_PAGE_SIZE == 0);
loongArch64::register::pgdl::set_base(root_paddr);
loongArch64::register::pgdh::set_base(root_paddr);
}
pub fn current_page_table_paddr() -> Paddr {
let pgdl = loongArch64::register::pgdl::read().raw();
let pgdh = loongArch64::register::pgdh::read().raw();
assert_eq!(
pgdl, pgdh,
"Only support to share the same page table for both user and kernel space"
);
pgdl
}
#[derive(Clone, Copy, Pod, Default)]
#[repr(C)]
pub struct PageTableEntry(usize);
impl PageTableEntry {
const PHYS_ADDR_MASK: usize = 0x0000_FFFF_FFFF_F000;
fn is_user(&self) -> bool {
self.0 & PageTableFlags::PLVL.bits() != 0 && self.0 & PageTableFlags::PLVH.bits() != 0
}
fn is_huge(&self) -> bool {
if self.0 & PageTableFlags::IS_BASIC.bits() != 0 {
return false;
} else {
return self.0 & PageTableFlags::GLOBAL_OR_HUGE.bits() != 0;
}
}
fn is_global(&self) -> bool {
if self.0 & PageTableFlags::IS_BASIC.bits() != 0 {
return self.0 & PageTableFlags::GLOBAL_OR_HUGE.bits() != 0;
} else {
return self.0 & PageTableFlags::GLOBAL_IN_HUGE.bits() != 0;
}
}
}
/// Parse a bit-flag bits `val` in the representation of `from` to `to` in bits.
macro_rules! parse_flags {
($val:expr, $from:expr, $to:expr) => {
($val as usize & $from.bits() as usize) >> $from.bits().ilog2() << $to.bits().ilog2()
};
}
impl PodOnce for PageTableEntry {}
impl PageTableEntryTrait for PageTableEntry {
fn is_present(&self) -> bool {
self.0 & PageTableFlags::VALID.bits() != 0
}
fn new_page(paddr: Paddr, level: PagingLevel, prop: PageProperty) -> Self {
let flags = if level == 1 {
PageTableFlags::IS_BASIC.bits()
} else {
PageTableFlags::GLOBAL_OR_HUGE.bits()
};
let mut pte = Self(paddr & Self::PHYS_ADDR_MASK);
pte.set_prop(prop);
let pte = pte.0 | flags;
Self(pte)
}
fn new_pt(paddr: Paddr) -> Self {
Self(paddr & Self::PHYS_ADDR_MASK | PageTableFlags::VALID.bits())
}
fn paddr(&self) -> Paddr {
if self.is_huge() {
let paddr =
(self.0 & Self::PHYS_ADDR_MASK & !PageTableFlags::GLOBAL_IN_HUGE.bits()) >> 12;
paddr << 12
} else {
let ppn = (self.0 & Self::PHYS_ADDR_MASK) >> 12;
ppn << 12
}
}
fn prop(&self) -> PageProperty {
let flags = (parse_flags!(!(self.0), PageTableFlags::NOT_READABLE, PageFlags::R))
| (parse_flags!(self.0, PageTableFlags::WRITABLE, PageFlags::W))
| (parse_flags!(!(self.0), PageTableFlags::NOT_EXECUTABLE, PageFlags::X))
// TODO: How to get the accessed bit in loongarch?
| (parse_flags!(self.0, PageTableFlags::PRESENT, PageFlags::ACCESSED))
| (parse_flags!(self.0, PageTableFlags::DIRTY, PageFlags::DIRTY))
| (parse_flags!(self.0, PageTableFlags::RSV1, PageFlags::AVAIL1))
| (parse_flags!(self.0, PageTableFlags::RSV2, PageFlags::AVAIL2));
let mut priv_flags = PrivFlags::empty().bits();
if self.is_user() {
priv_flags |= PrivFlags::USER.bits();
}
if self.is_global() {
priv_flags |= PrivFlags::GLOBAL.bits();
}
let cache = if self.0 & PageTableFlags::MATL.bits() != 0 {
CachePolicy::Writeback
} else if self.0 & PageTableFlags::MATH.bits() != 0 {
CachePolicy::WriteCombining
} else {
CachePolicy::Writeback
};
PageProperty {
flags: PageFlags::from_bits(flags as u8).unwrap(),
cache,
priv_flags: PrivFlags::from_bits(priv_flags as u8).unwrap(),
}
}
fn set_prop(&mut self, prop: PageProperty) {
let mut flags = PageTableFlags::VALID.bits()
// FIXME: To avoid the PageModifyFault exception,
// we set the DIRTY bit to 1 all the time.
| PageTableFlags::DIRTY.bits()
| parse_flags!(
!prop.flags.bits(),
PageFlags::R,
PageTableFlags::NOT_READABLE
)
| parse_flags!(prop.flags.bits(), PageFlags::W, PageTableFlags::WRITABLE)
| parse_flags!(
!prop.flags.bits(),
PageFlags::X,
PageTableFlags::NOT_EXECUTABLE
)
| parse_flags!(prop.flags.bits(), PageFlags::DIRTY, PageTableFlags::DIRTY)
// TODO: How to get the accessed bit in loongarch?
| parse_flags!(prop.flags.bits(), PageFlags::ACCESSED, PageTableFlags::PRESENT)
| parse_flags!(prop.flags.bits(), PageFlags::AVAIL1, PageTableFlags::RSV1)
| parse_flags!(prop.flags.bits(), PageFlags::AVAIL2, PageTableFlags::RSV2);
if prop.priv_flags.contains(PrivFlags::USER) {
flags |= PageTableFlags::PLVL.bits();
flags |= PageTableFlags::PLVH.bits();
}
if prop.priv_flags.contains(PrivFlags::GLOBAL) {
if self.is_huge() {
flags |= PageTableFlags::GLOBAL_IN_HUGE.bits();
} else {
flags |= PageTableFlags::GLOBAL_OR_HUGE.bits();
}
}
match prop.cache {
CachePolicy::Writeback => {
flags |= PageTableFlags::MATL.bits();
}
CachePolicy::Uncacheable => (),
CachePolicy::WriteCombining => {
flags |= PageTableFlags::MATH.bits();
}
_ => panic!("unsupported cache policy"),
}
self.0 = (self.0 & Self::PHYS_ADDR_MASK) | flags;
}
fn is_last(&self, level: PagingLevel) -> bool {
level == 1 || self.is_huge()
}
}
impl fmt::Debug for PageTableEntry {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let mut f = f.debug_struct("PageTableEntry");
f.field("raw", &format_args!("{:#x}", self.0))
.field("paddr", &format_args!("{:#x}", self.paddr()))
.field("present", &self.is_present())
.field(
"flags",
&PageTableFlags::from_bits_truncate(self.0 & !Self::PHYS_ADDR_MASK),
)
.field("prop", &self.prop())
.finish()
}
}
pub(in crate::arch) fn paddr_to_daddr(pa: Paddr) -> usize {
const DEVICE_LINEAR_MAPPING_BASE_VADDR: usize = 0x8000_0000_0000_0000;
pa + DEVICE_LINEAR_MAPPING_BASE_VADDR
}
pub(crate) unsafe fn __memcpy_fallible(dst: *mut u8, src: *const u8, size: usize) -> usize {
// TODO: implement fallible
unsafe { core::ptr::copy(src, dst, size) };
0
}
pub(crate) unsafe fn __memset_fallible(dst: *mut u8, value: u8, size: usize) -> usize {
// TODO: implement fallible
unsafe { core::ptr::write_bytes(dst, value, size) };
0
}

View File

@ -467,6 +467,11 @@ pub(crate) unsafe fn init() -> Segment<MetaPageMeta> {
max_paddr
);
// In RISC-V, the boot page table has mapped the 512GB memory,
// so we don't need to add temporary linear mapping.
// In LoongArch, the DWM0 has mapped the whole memory,
// so we don't need to add temporary linear mapping.
#[cfg(target_arch = "x86_64")]
add_temp_linear_mapping(max_paddr);
let tot_nr_frames = max_paddr / page_size::<PagingConsts>(1);
@ -604,6 +609,7 @@ fn mark_unusable_ranges() {
/// We only assume boot page table to contain 4G linear mapping. Thus if the
/// physical memory is huge we end up depleted of linear virtual memory for
/// initializing metadata.
#[cfg(target_arch = "x86_64")]
fn add_temp_linear_mapping(max_paddr: Paddr) {
const PADDR4G: Paddr = 0x1_0000_0000;

View File

@ -949,7 +949,11 @@ impl<'a> From<&'a mut [u8]> for VmWriter<'a, Infallible> {
/// `read_once`/`write_once` will lead to a failed compile-time assertion.
pub trait PodOnce: Pod {}
#[cfg(any(target_arch = "x86_64", target_arch = "riscv64"))]
#[cfg(any(
target_arch = "x86_64",
target_arch = "riscv64",
target_arch = "loongarch64"
))]
mod pod_once_impls {
use super::PodOnce;

View File

@ -66,7 +66,10 @@ const ADDR_WIDTH_SHIFT: isize = PagingConsts::ADDRESS_WIDTH as isize - 48;
/// Start of the kernel address space.
/// This is the _lowest_ address of the x86-64's _high_ canonical addresses.
#[cfg(not(target_arch = "loongarch64"))]
pub const KERNEL_BASE_VADDR: Vaddr = 0xffff_8000_0000_0000 << ADDR_WIDTH_SHIFT;
#[cfg(target_arch = "loongarch64")]
pub const KERNEL_BASE_VADDR: Vaddr = 0x9000_0000_0000_0000 << ADDR_WIDTH_SHIFT;
/// End of the kernel address space (non inclusive).
pub const KERNEL_END_VADDR: Vaddr = 0xffff_ffff_ffff_0000 << ADDR_WIDTH_SHIFT;
@ -83,6 +86,8 @@ pub fn kernel_loaded_offset() -> usize {
const KERNEL_CODE_BASE_VADDR: usize = 0xffff_ffff_8000_0000 << ADDR_WIDTH_SHIFT;
#[cfg(target_arch = "riscv64")]
const KERNEL_CODE_BASE_VADDR: usize = 0xffff_ffff_0000_0000 << ADDR_WIDTH_SHIFT;
#[cfg(target_arch = "loongarch64")]
const KERNEL_CODE_BASE_VADDR: usize = 0x9000_0000_0000_0000 << ADDR_WIDTH_SHIFT;
const FRAME_METADATA_CAP_VADDR: Vaddr = 0xffff_e100_0000_0000 << ADDR_WIDTH_SHIFT;
const FRAME_METADATA_BASE_VADDR: Vaddr = 0xffff_e000_0000_0000 << ADDR_WIDTH_SHIFT;
@ -94,7 +99,10 @@ pub const VMALLOC_VADDR_RANGE: Range<Vaddr> = VMALLOC_BASE_VADDR..FRAME_METADATA
/// The base address of the linear mapping of all physical
/// memory in the kernel address space.
#[cfg(not(target_arch = "loongarch64"))]
pub const LINEAR_MAPPING_BASE_VADDR: Vaddr = 0xffff_8000_0000_0000 << ADDR_WIDTH_SHIFT;
#[cfg(target_arch = "loongarch64")]
pub const LINEAR_MAPPING_BASE_VADDR: Vaddr = 0x9000_0000_0000_0000 << ADDR_WIDTH_SHIFT;
pub const LINEAR_MAPPING_VADDR_RANGE: Range<Vaddr> = LINEAR_MAPPING_BASE_VADDR..VMALLOC_BASE_VADDR;
/// Convert physical address to virtual address using offset, only available inside `ostd`
@ -173,6 +181,8 @@ pub fn init_kernel_page_table(meta_pages: Segment<MetaPageMeta>) {
let kpt = PageTable::<KernelPtConfig>::new_kernel_page_table();
let preempt_guard = disable_preempt();
// In LoongArch64, we don't need to do linear mappings for the kernel because of DMW0.
#[cfg(not(target_arch = "loongarch64"))]
// Do linear mappings for the kernel.
{
let max_paddr = crate::mm::frame::max_paddr();
@ -213,6 +223,8 @@ pub fn init_kernel_page_table(meta_pages: Segment<MetaPageMeta>) {
}
}
// In LoongArch64, we don't need to do linear mappings for the kernel code because of DMW0.
#[cfg(not(target_arch = "loongarch64"))]
// Map for the kernel code itself.
// TODO: set separated permissions for each segments in the kernel.
{