finish trap and execute user mode program

This commit is contained in:
Yuke Peng 2022-08-25 19:44:58 -07:00
parent c8d405adeb
commit 2caa3d9df5
25 changed files with 1906 additions and 158 deletions

View File

@ -0,0 +1,39 @@
use core::{
cell::UnsafeCell,
ops::{Deref, DerefMut},
};
#[derive(Debug, Default)]
#[repr(transparent)]
pub struct Cell<T>(UnsafeCell<T>);
unsafe impl<T> Sync for Cell<T> {}
impl<T> Cell<T> {
/// User is responsible to guarantee that inner struct is only used in
/// uniprocessor.
#[inline(always)]
pub const fn new(val: T) -> Self {
Self(UnsafeCell::new(val))
}
#[inline(always)]
pub fn get(&self) -> &mut T {
unsafe { &mut *self.0.get() }
}
}
impl<T> Deref for Cell<T> {
type Target = T;
#[inline(always)]
fn deref(&self) -> &Self::Target {
self.get()
}
}
impl<T> DerefMut for Cell<T> {
#[inline(always)]
fn deref_mut(&mut self) -> &mut Self::Target {
self.get()
}
}

View File

@ -2,7 +2,7 @@
pub const USER_STACK_SIZE: usize = 4096 * 2;
pub const KERNEL_STACK_SIZE: usize = 4096 * 2;
pub const KERNEL_HEAP_SIZE: usize = 0x20_0000;
pub const KERNEL_HEAP_SIZE: usize = 0x1_000_000;
pub const KERNEL_OFFSET: usize = 0xffffff00_00000000;
pub const PHYS_OFFSET: usize = 0xFFFF800000000000;

View File

@ -68,7 +68,7 @@ impl FpRegs {
//let buf = Aligned(unsafe { MaybeUninit::uninit().assume_init() });
//let is_valid = false;
//Self { buf, is_valid }
Self{is_valid:false}
Self { is_valid: false }
// todo!("import aligned")
}

View File

@ -2,12 +2,9 @@
pub mod framebuffer;
mod io_port;
mod irq;
pub use self::io_port::IoPort;
pub use self::irq::{InterruptInformation, IrqCallbackHandle, IrqLine};
pub fn init(framebuffer: &'static mut bootloader::boot_info::FrameBuffer) {
framebuffer::init(framebuffer);
irq::init();
}

View File

@ -9,8 +9,10 @@
#![feature(const_maybe_uninit_zeroed)]
#![feature(alloc_error_handler)]
#![feature(core_intrinsics)]
#![feature(new_uninit)]
extern crate alloc;
pub mod cell;
pub mod config;
pub mod cpu;
pub mod device;
@ -25,23 +27,25 @@ pub mod trap;
pub mod user;
mod util;
pub mod vm;
pub mod x86_64_util;
use core::mem;
pub use self::error::Error;
pub use self::sync::up::UPSafeCell;
use alloc::{boxed::Box, sync::Arc};
use bootloader::{boot_info::MemoryRegionKind, BootInfo};
use device::{InterruptInformation, IrqLine};
use alloc::sync::Arc;
use bootloader::{
boot_info::{FrameBuffer, MemoryRegionKind},
BootInfo,
};
use trap::{IrqLine, TrapFrame};
pub fn init(boot_info: &'static mut BootInfo) {
let siz = boot_info.framebuffer.as_ref().unwrap() as *const FrameBuffer as usize;
device::init(boot_info.framebuffer.as_mut().unwrap());
device::framebuffer::WRITER.lock().as_mut().unwrap().clear();
println!(
"heap_value at {:x}",
boot_info.physical_memory_offset.into_option().unwrap()
);
println!("{:x}", siz);
trap::init();
let mut memory_init = false;
// memory
for region in boot_info.memory_regions.iter() {
@ -68,12 +72,9 @@ pub fn init(boot_info: &'static mut BootInfo) {
}
let a = breakpoint_irq.on_active(breakpoint_handler);
x86_64::instructions::interrupts::int3(); // breakpoint
let heap_value = Box::new(41);
println!("test");
println!("heap_value at {:p}", heap_value);
}
fn breakpoint_handler(interrupt_information: InterruptInformation) {
fn breakpoint_handler(interrupt_information: TrapFrame) {
println!("EXCEPTION: BREAKPOINT\n{:#?}", interrupt_information);
}

View File

@ -56,6 +56,10 @@ impl PhysFrame {
PhysAddr(self.start_pa)
}
pub const fn end_pa(&self) -> PhysAddr {
PhysAddr(self.start_pa + PAGE_SIZE)
}
pub fn alloc() -> Option<Self> {
FRAME_ALLOCATOR
.exclusive_access()

View File

@ -2,18 +2,12 @@ use super::{page_table::PageTable, *};
use crate::prelude::*;
use crate::{
config::PAGE_SIZE,
mm::address::{is_aligned},
mm::address::is_aligned,
vm::{VmFrame, VmFrameVec},
*,
};
use alloc::{
collections::{btree_map::Entry, BTreeMap}};
use alloc::collections::{btree_map::Entry, BTreeMap};
use core::fmt;
use x86_64::registers::control::Cr3Flags;
// use xmas_elf::{program::{SegmentData, Type}, {header, ElfFile}};
pub const USTACK_SIZE: usize = 4096 * 4;
pub const USTACK_TOP: usize = 0x8000_0000_0000;
pub struct MapArea {
/// flags
@ -57,7 +51,7 @@ impl MapArea {
for i in 0..page_size {
let vm_frame = phy_frame_iter.next().unwrap();
map_area.map_with_physical_address(current_va, vm_frame.clone());
current_va+=PAGE_SIZE;
current_va += PAGE_SIZE;
}
map_area
@ -157,7 +151,7 @@ impl MemorySet {
pt.map_area(&area);
Self {
pt: PageTable::new(),
pt: pt,
area: Some(area),
}
}
@ -169,6 +163,11 @@ impl MemorySet {
}
}
pub fn map_area(&mut self, area: MapArea) {
self.pt.map_area(&area);
self.area = Some(area);
}
pub fn unmap(&mut self, va: VirtAddr) -> Result<()> {
if self.area.is_none() {
Err(Error::InvalidArgs)
@ -183,18 +182,6 @@ impl MemorySet {
self.area = None;
}
pub fn activate(&self) {
unsafe {
x86_64::registers::control::Cr3::write(
x86_64::structures::paging::PhysFrame::from_start_address(x86_64::PhysAddr::new(
self.pt.root_pa.0 as u64,
))
.unwrap(),
Cr3Flags::empty(),
);
}
}
pub fn write_bytes(&mut self, offset: usize, data: &[u8]) -> Result<()> {
if self.area.is_none() {
Err(Error::InvalidArgs)
@ -216,6 +203,7 @@ impl MemorySet {
impl Clone for MemorySet {
fn clone(&self) -> Self {
println!("clone memory set");
if self.area.is_none() {
Self::zero()
} else {

View File

@ -1,15 +1,21 @@
use alloc::{vec, vec::Vec};
use super::{memory_set::MapArea, *};
use crate::cell::Cell;
use crate::{
config::{ENTRY_COUNT, KERNEL_OFFSET, PAGE_SIZE, PHYS_OFFSET},
vm::VmFrame,
*,
};
use alloc::{collections::BTreeMap, vec, vec::Vec};
use core::fmt;
use lazy_static::lazy_static;
static KERNEL_PTE: UPSafeCell<PageTableEntry> = zero();
static PHYS_PTE: UPSafeCell<PageTableEntry> = zero();
static KERNEL_PTE: Cell<PageTableEntry> = zero();
static PHYS_PTE: Cell<PageTableEntry> = zero();
lazy_static! {
pub static ref ALL_MAPPED_PTE: UPSafeCell<BTreeMap<usize, PageTableEntry>> =
unsafe { UPSafeCell::new(BTreeMap::new()) };
}
#[derive(Clone, Copy)]
#[repr(transparent)]
@ -55,14 +61,30 @@ impl PageTable {
pub fn new() -> Self {
let root_frame = VmFrame::alloc_zero().unwrap();
let p4 = table_of(root_frame.start_pa());
p4[p4_index(VirtAddr(KERNEL_OFFSET))] = *KERNEL_PTE.exclusive_access();
p4[p4_index(VirtAddr(PHYS_OFFSET))] = *PHYS_PTE.exclusive_access();
let map_pte = ALL_MAPPED_PTE.exclusive_access();
for (index, pte) in map_pte.iter() {
p4[*index] = *pte;
}
println!("start_pa:{:x}", root_frame.start_pa());
Self {
root_pa: root_frame.start_pa(),
tables: vec![root_frame],
}
}
pub fn print_kernel(&self) {
let p4 = table_of(self.root_pa);
for i in 0..(256) {
let phys = PhysAddr(i << (12 + 27));
let a = p4[p4_index(phys.kvaddr())];
if a.is_present() {
println!("index:{:?},PTE:{:?}", i, a);
}
}
println!("kernel_pte:{:?}", p4[p4_index(VirtAddr(KERNEL_OFFSET))]);
println!("PHYS_PTE:{:?}", p4[p4_index(VirtAddr(PHYS_OFFSET))]);
}
pub fn map(&mut self, va: VirtAddr, pa: PhysAddr, flags: PTFlags) {
let entry = self.get_entry_or_create(va).unwrap();
if !entry.is_unused() {
@ -80,7 +102,6 @@ impl PageTable {
}
pub fn map_area(&mut self, area: &MapArea) {
println!("frame test");
for (va, pa) in area.mapper.iter() {
assert!(pa.start_pa().0 < PHYS_OFFSET);
self.map(*va, pa.start_pa(), area.flags);
@ -179,13 +200,24 @@ fn next_table_or_create<'a>(
}
pub(crate) fn init() {
let (cr3, _) = x86_64::registers::control::Cr3::read();
let cr3 = x86_64_util::get_cr3();
let p4 = table_of(PhysAddr(cr3.start_address().as_u64() as usize));
*KERNEL_PTE.exclusive_access() = p4[p4_index(VirtAddr(KERNEL_OFFSET))];
*PHYS_PTE.exclusive_access() = p4[p4_index(VirtAddr(PHYS_OFFSET))];
println!("kernel_pte:{:?}", p4[p4_index(VirtAddr(KERNEL_OFFSET))]);
println!("PHYS_PTE:{:?}", p4[p4_index(VirtAddr(PHYS_OFFSET))]);
let p4 = table_of(PhysAddr(cr3));
// Cancel mapping in lowest addresses.
p4[0].0 = 0;
// there is mapping where index is 1,2,3, so user may not use these value
let mut map_pte = ALL_MAPPED_PTE.exclusive_access();
for i in 0..512 {
if !p4[i].flags().is_empty() {
map_pte.insert(i, p4[i]);
// println!("i:{:x},{:?}",i,p4[i]);
}
}
// print how it use p4[0]
// *KERNEL_PTE.get() = p4[p4_index(VirtAddr(KERNEL_OFFSET))];
// *PHYS_PTE.get() = p4[p4_index(VirtAddr(PHYS_OFFSET))];
// println!("kernel_pte:{:?}", *KERNEL_PTE.get());
// println!("PHYS_PTE:{:?}", *PHYS_PTE.get());
// Cancel mapping in lowest addresses.
// p4[0].0 = 0;

View File

@ -1,5 +1,4 @@
use core::{
cell::{RefCell, RefMut},};
use core::cell::{RefCell, RefMut};
#[derive(Debug)]
/// Wrap a static data structure inside it so that we are

View File

@ -31,7 +31,7 @@ impl WaitQueue {
loop {
if (cond)() {
self.dequeue(&waiter);
return;
break;
}
waiter.wait();
}

View File

@ -1,9 +1,10 @@
use super::{
scheduler::{fetch_task, GLOBAL_SCHEDULER},
task::{context_switch, TaskContext},
Task, scheduler::{fetch_task, GLOBAL_SCHEDULER}, TaskStatus,
Task, TaskStatus,
};
use crate::UPSafeCell;
use alloc:: sync::Arc;
use alloc::sync::Arc;
use lazy_static::*;
pub struct Processor {
@ -45,29 +46,32 @@ pub fn current_task() -> Option<Arc<Task>> {
}
/// call this function to switch to other task by using GLOBAL_SCHEDULER
///
/// if current task is none, then it will use the default task context
///
///
/// if current task is none, then it will use the default task context and it will not return to this function again
///
/// if current task status is exit, then it will not add to the scheduler
///
///
/// before context switch, current task will switch to the next task
pub fn schedule() {
let next_task = fetch_task().expect("no more task found");
let current_task_option = current_task();
let next_task_cx_ptr = &next_task.inner_exclusive_access().ctx as *const TaskContext;
let current_task: Arc<Task>;
let current_task_cx_ptr = if current_task_option.is_none(){
let current_task_cx_ptr = if current_task_option.is_none() {
PROCESSOR.exclusive_access().get_idle_task_cx_ptr()
}else{
} else {
current_task = current_task_option.unwrap();
if current_task.status() != TaskStatus::Exited{
GLOBAL_SCHEDULER.exclusive_access().enqueue(current_task.clone());
if current_task.status() != TaskStatus::Exited {
GLOBAL_SCHEDULER
.exclusive_access()
.enqueue(current_task.clone());
}
&mut current_task.inner_exclusive_access().ctx as *mut TaskContext
};
// change the current task to the next task
PROCESSOR.exclusive_access().current = Some(next_task.clone());
unsafe {
context_switch(current_task_cx_ptr, next_task_cx_ptr);
}
}
}

View File

@ -1,11 +1,8 @@
use crate::task::Task;
use crate::{prelude::*, UPSafeCell};
use crate::{prelude::*, println, UPSafeCell};
use lazy_static::lazy_static;
use super::processor::current_task;
use super::task::{context_switch, TaskContext};
lazy_static! {
pub static ref GLOBAL_SCHEDULER: UPSafeCell<GlobalScheduler> =
unsafe { UPSafeCell::new(GlobalScheduler { scheduler: None }) };
@ -33,12 +30,13 @@ impl GlobalScheduler {
/// dequeue a task using scheduler
/// require the scheduler is not none
pub fn dequeue(&mut self) -> Option<Arc<Task>> {
self.scheduler.take().unwrap().dequeue()
self.scheduler.unwrap().dequeue()
}
/// enqueue a task using scheduler
/// require the scheduler is not none
pub fn enqueue(&mut self, task: Arc<Task>) {
self.scheduler.take().unwrap().enqueue(task)
println!("{:?}", self.scheduler.is_none());
self.scheduler.unwrap().enqueue(task)
}
}
/// Set the global task scheduler.
@ -55,5 +53,3 @@ pub fn fetch_task() -> Option<Arc<Task>> {
pub fn add_task(task: Arc<Task>) {
GLOBAL_SCHEDULER.exclusive_access().enqueue(task);
}

View File

@ -1,10 +1,10 @@
use core::cell::RefMut;
use core::intrinsics::unreachable;
use core::mem::size_of;
use crate::trap::CalleeRegs;
use crate::mm::PhysFrame;
use crate::trap::{CalleeRegs, SyscallFrame};
use crate::user::UserSpace;
use crate::{prelude::*, UPSafeCell, println};
use crate::{prelude::*, UPSafeCell};
use super::processor::{current_task, schedule};
use super::scheduler::add_task;
@ -20,34 +20,28 @@ pub struct TaskContext {
extern "C" {
pub fn context_switch(cur: *mut TaskContext, nxt: *const TaskContext);
}
/// 8*PAGE_SIZE
#[cfg(debug_assertions)]
pub const TASK_SIZE: usize = 32768;
/// 2*PAGE_SIZE
#[cfg(not(debug_assertions))]
pub const TASK_SIZE: usize = 8192;
#[cfg(debug_assertions)]
#[repr(align(32768))]
struct TaskAlign;
pub struct KernelStack {
frame: PhysFrame,
}
#[cfg(not(debug_assertions))]
#[repr(C, align(8192))]
struct TaskAlign;
pub const KERNEL_STACK_SIZE: usize =
TASK_SIZE - size_of::<Box<dyn Fn()>>() - size_of::<Box<dyn Any + Send + Sync>>() - size_of::<Option<Arc<UserSpace>>>()
- size_of::<UPSafeCell<TaskInner>>() - size_of::<usize>();
impl KernelStack {
pub fn new() -> Self {
Self {
frame: PhysFrame::alloc().expect("out of memory"),
}
}
}
/// A task that executes a function to the end.
pub struct Task {
_align: TaskAlign,
func: Box<dyn Fn() + Send + Sync>,
data: Box<dyn Any + Send + Sync>,
user_space: Option<Arc<UserSpace>>,
task_inner: UPSafeCell<TaskInner>,
exit_code: usize,
kstack: [u8; KERNEL_STACK_SIZE],
/// kernel stack, note that the top is SyscallFrame
kstack: KernelStack,
}
pub struct TaskInner {
@ -90,8 +84,9 @@ impl Task {
{
/// all task will entering this function
/// this function is mean to executing the task_fn in Task
fn kernel_task_entry(){
let current_task = current_task().expect("no current task, it should have current task in kernel task entry");
fn kernel_task_entry() {
let current_task = current_task()
.expect("no current task, it should have current task in kernel task entry");
current_task.func.call(())
}
let result = Self {
@ -104,19 +99,36 @@ impl Task {
ctx: TaskContext::default(),
})
},
_align: TaskAlign,
exit_code:0,
kstack: [0; KERNEL_STACK_SIZE],
exit_code: 0,
kstack: KernelStack::new(),
};
result.task_inner.exclusive_access().task_status = TaskStatus::Runnable;
result.task_inner.exclusive_access().ctx.rip = kernel_task_entry as usize;
let arc_self = Arc::new(result);
result.task_inner.exclusive_access().ctx.regs.rsp = result.kstack.frame.end_pa().kvaddr().0
as usize
- size_of::<usize>()
- size_of::<SyscallFrame>();
let arc_self = Arc::new(result);
add_task(arc_self.clone());
schedule();
Ok(arc_self)
}
pub fn syscall_frame(&self) -> &mut SyscallFrame {
unsafe {
&mut *(self
.kstack
.frame
.end_pa()
.kvaddr()
.get_mut::<SyscallFrame>() as *mut SyscallFrame)
.sub(1)
}
}
/// Returns the task status.
pub fn status(&self) -> TaskStatus {
self.task_inner.exclusive_access().task_status
@ -136,14 +148,14 @@ impl Task {
}
}
pub fn exit(&self)->!{
pub fn exit(&self) -> ! {
self.inner_exclusive_access().task_status = TaskStatus::Exited;
schedule();
unreachable!()
}
}
#[derive(Clone, Copy,PartialEq, Eq, PartialOrd, Ord)]
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
/// The status of a task.
pub enum TaskStatus {
/// The task is runnable.

View File

@ -0,0 +1,28 @@
use super::{irq::IRQ_LIST, *};
#[no_mangle]
pub extern "C" fn syscall_handler(f: &'static mut SyscallFrame) -> isize {
let r = &f.caller;
println!("{:?}", f);
// let ret = syscall::syscall(r.rax, [r.rdi, r.rsi, r.rdx]);
// current_check_signal();
// ret
-1
}
const DIVIDE_BY_ZERO: usize = 0;
const INVALID_OPCODE: usize = 6;
const SEGMENT_NOT_PRESENT: usize = 11;
const STACK_SEGMENT_FAULT: usize = 12;
const GENERAL_PROTECTION_FAULT: usize = 13;
const PAGE_FAULT: usize = 14;
const TIMER: usize = 32;
#[no_mangle]
pub extern "C" fn trap_handler(f: &'static mut TrapFrame) {
let irq_line = IRQ_LIST.get(f.id as usize).unwrap();
let callback_functions = irq_line.callback_list();
for callback_function in callback_functions.iter() {
callback_function.call(f.clone());
}
}

View File

@ -1,20 +1,11 @@
use crate::prelude::*;
use super::TrapFrame;
use lazy_static::lazy_static;
use spin::Mutex;
use x86_64::{
set_general_handler,
structures::idt::{InterruptDescriptorTable, InterruptStackFrame, InterruptStackFrameValue},
};
use spin::{Mutex, MutexGuard};
lazy_static! {
static ref IDT: InterruptDescriptorTable = {
let mut idt = InterruptDescriptorTable::new();
set_general_handler!(&mut idt, my_general_hander);
idt
};
}
lazy_static! {
static ref IRQ_LIST: Vec<IrqLine> = {
pub static ref IRQ_LIST: Vec<IrqLine> = {
let mut list: Vec<IrqLine> = Vec::new();
for i in 0..256 {
list.push(IrqLine {
@ -30,21 +21,6 @@ lazy_static! {
static ref ID_ALLOCATOR: Mutex<RecycleAllocator> = Mutex::new(RecycleAllocator::new());
}
pub fn init() {
IDT.load();
}
fn my_general_hander(stack_frame: InterruptStackFrame, index: u8, error_code: Option<u64>) {
let irq_line = IRQ_LIST.get(index as usize).unwrap();
let callback_functions = irq_line.callback_list.lock();
for callback_function in callback_functions.iter() {
callback_function.function.call((InterruptInformation {
interrupt_stack_frame: *stack_frame,
error_code,
},));
}
}
struct RecycleAllocator {
current: usize,
recycled: Vec<usize>,
@ -78,23 +54,23 @@ impl RecycleAllocator {
}
}
struct CallbackElement {
function: Box<dyn Fn(InterruptInformation) + Send + Sync + 'static>,
pub struct CallbackElement {
function: Box<dyn Fn(TrapFrame) + Send + Sync + 'static>,
id: usize,
}
impl CallbackElement {
pub fn call(&self, element: TrapFrame) {
self.function.call((element,));
}
}
/// An interrupt request (IRQ) line.
pub struct IrqLine {
irq_num: u8,
callback_list: Mutex<Vec<CallbackElement>>,
}
#[derive(Debug)]
pub struct InterruptInformation {
pub interrupt_stack_frame: InterruptStackFrameValue,
pub error_code: Option<u64>,
}
impl IrqLine {
/// Acquire an interrupt request line.
///
@ -111,6 +87,10 @@ impl IrqLine {
self.irq_num
}
pub fn callback_list(&self) -> MutexGuard<'_, alloc::vec::Vec<CallbackElement>> {
self.callback_list.lock()
}
/// Register a callback that will be invoked when the IRQ is active.
///
/// A handle to the callback is returned. Dropping the handle
@ -119,7 +99,7 @@ impl IrqLine {
/// For each IRQ line, multiple callbacks may be registered.
pub fn on_active<F>(&self, callback: F) -> IrqCallbackHandle
where
F: Fn(InterruptInformation) + Sync + Send + 'static,
F: Fn(TrapFrame) + Sync + Send + 'static,
{
let allocate_id = ID_ALLOCATOR.lock().alloc();
self.callback_list.lock().push(CallbackElement {

View File

@ -1,3 +1,14 @@
mod handler;
mod irq;
pub use self::irq::{IrqCallbackHandle, IrqLine};
use core::mem::size_of_val;
use crate::{x86_64_util::*, *};
core::arch::global_asm!(include_str!("trap.S"));
core::arch::global_asm!(include_str!("vector.S"));
#[derive(Debug, Default, Clone, Copy)]
#[repr(C)]
pub struct CallerRegs {
@ -44,3 +55,74 @@ pub struct TrapFrame {
pub rsp: usize,
pub ss: usize,
}
const TSS_SIZE: usize = 104;
extern "C" {
/// TSS
static TSS: [u8; TSS_SIZE];
/// 所有的中断向量push一个id后跳转到trao_entry
static __vectors: [usize; 256];
fn syscall_entry();
pub fn syscall_return(f: &SyscallFrame) -> !;
}
pub fn init() {
static mut GDT: [usize; 7] = [
0,
0x00209800_00000000, // KCODE, EXECUTABLE | USER_SEGMENT | PRESENT | LONG_MODE
0x00009200_00000000, // KDATA, DATA_WRITABLE | USER_SEGMENT | PRESENT
0x0000F200_00000000, // UDATA, DATA_WRITABLE | USER_SEGMENT | USER_MODE | PRESENT
0x0020F800_00000000, // UCODE, EXECUTABLE | USER_SEGMENT | USER_MODE | PRESENT | LONG_MODE
0,
0, // TSS, filled in runtime
];
let ptr = unsafe { TSS.as_ptr() as usize };
let low = (1 << 47)
| 0b1001 << 40
| (TSS_SIZE - 1)
| ((ptr & ((1 << 24) - 1)) << 16)
| (((ptr >> 24) & ((1 << 8) - 1)) << 56);
let high = ptr >> 32;
unsafe {
GDT[5] = low;
GDT[6] = high;
lgdt(&DescriptorTablePointer {
limit: size_of_val(&GDT) as u16 - 1,
base: GDT.as_ptr() as _,
});
}
x86_64_util::set_cs((1 << 3) | x86_64_util::RING0);
x86_64_util::set_ss((2 << 3) | x86_64_util::RING0);
load_tss((5 << 3) | RING0);
set_msr(EFER_MSR, get_msr(EFER_MSR) | 1); // enable system call extensions
set_msr(STAR_MSR, (2 << 3 << 48) | (1 << 3 << 32));
set_msr(LSTAR_MSR, syscall_entry as _);
set_msr(SFMASK_MSR, 0x47700); // TF|DF|IF|IOPL|AC|NT
#[repr(C, align(16))]
struct IDT {
entries: [[usize; 2]; 256],
}
static mut IDT: IDT = zero();
let cs = (1 << 3) | x86_64_util::RING0 as usize;
for i in 0..256 {
let p = unsafe { __vectors[i] };
let low = (((p >> 16) & 0xFFFF) << 48)
| (0b1000_1110_0000_0000 << 32)
| (cs << 16)
| (p & 0xFFFF);
let high = p >> 32;
unsafe {
IDT.entries[i] = [low, high];
}
}
unsafe {
lidt(&DescriptorTablePointer {
limit: size_of_val(&IDT) as u16 - 1,
base: &IDT as *const _ as _,
})
}
}

View File

@ -0,0 +1,96 @@
.data
.align 4
TSS:
.space 104
.text
.macro save
push r11
push r10
push r9
push r8
push rdi
push rsi
push rdx
push rcx
push rax
.endm
.macro restore
pop rax
pop rcx
pop rdx
pop rsi
pop rdi
pop r8
pop r9
pop r10
pop r11
.endm
.global __trap_entry
__trap_entry:
#
save
# trap_handler
mov rdi, rsp
call trap_handler
mov rax, [rsp + 96] # 96 = offsetof(TrapFrame, cs)
and rax, 0x3
jz __from_kernel
lea rax, [rsp + 128] # prepare new TSS.sp0, 128 = sizeof(TrapFrame)
mov [TSS + rip + 4], rax
__from_kernel:
restore
add rsp, 16 # skip TrapFrame.err and id
iretq
.global syscall_entry
syscall_entry:
# syscall instruction do:
# - load cs, ss from STAR MSR
# - r11 <- rflags, mask rflags from RFMASK MSR
# - rcx <- rip, load rip from LSTAR MSR
# temporarily store user rsp into TSS.sp0 and load kernel rsp from it.
xchg rsp, [TSS + rip + 4]
push r15
push r14
push r13
push r12
push rbp
push rbx
push [TSS + rip + 4] # store user rsp into SyscallFrame.rsp
save
mov rdi, rsp
call syscall_handler
mov [rsp], rax # CallerRegs.rax is at offset 0
jmp __syscall_return
.global syscall_return
syscall_return: # (SyscallFrame *)
mov rsp, rdi
__syscall_return:
lea rax, [rsp + 128] # prepare new TSS.sp0, 128 = sizeof(SyscallFrame)
# ring0stack4TSS
mov [TSS + rip + 4], rax
restore
mov rbx, [rsp + 8]
mov rbp, [rsp + 16]
mov r12, [rsp + 24]
mov r13, [rsp + 32]
mov r14, [rsp + 40]
mov r15, [rsp + 48]
mov rsp, [rsp + 0]
sysretq
.global switch_to_user_space
switch_to_user_space: # (cpu_context: *CpuContext,reg: *CallerRegs)
# mov rflag, [rdi+136]
mov rdi, rsi
jmp syscall_return

File diff suppressed because it is too large Load Diff

View File

@ -1,9 +1,16 @@
//! User space.
use crate::println;
use crate::cpu::CpuContext;
use crate::prelude::*;
use crate::task::Task;
use crate::trap::SyscallFrame;
use crate::vm::VmSpace;
use crate::{prelude::*};
extern "C" {
fn switch_to_user_space(cpu_context: &CpuContext, syscall_frame: &SyscallFrame);
}
/// A user space.
///
@ -77,6 +84,13 @@ pub struct UserMode<'a> {
impl<'a> !Send for UserMode<'a> {}
impl<'a> UserMode<'a> {
pub fn new(user_space: &'a Arc<UserSpace>) -> Self {
Self {
current: Task::current(),
user_space,
}
}
/// Starts executing in the user mode.
///
/// The method returns for one of three possible reasons indicated by `UserEvent`.
@ -87,7 +101,13 @@ impl<'a> UserMode<'a> {
/// After handling the user event and updating the user-mode CPU context,
/// this method can be invoked again to go back to the user space.
pub fn execute(&mut self) -> UserEvent {
todo!()
self.user_space.vm_space().activate();
self.current.syscall_frame().caller.rcx = self.user_space.cpu_ctx.gp_regs.rip as usize;
println!("{:?}", self.current.syscall_frame());
unsafe {
switch_to_user_space(&self.user_space.cpu_ctx, self.current.syscall_frame());
}
UserEvent::Syscall
}
/// Returns an immutable reference the user-mode CPU context.
@ -101,6 +121,7 @@ impl<'a> UserMode<'a> {
}
}
#[derive(PartialEq, Eq, PartialOrd, Ord, Debug)]
/// A user event is what brings back the control of the CPU back from
/// the user space to the kernel space.
///

View File

@ -1,5 +1,5 @@
use crate::config::PAGE_SIZE;
use crate::{UPSafeCell, println};
use crate::{x86_64_util, UPSafeCell};
use bitflags::bitflags;
use core::ops::Range;
@ -34,7 +34,7 @@ impl VmSpace {
}
pub fn activate(&self) {
self.memory_set.exclusive_access().activate();
x86_64_util::set_cr3(self.memory_set.exclusive_access().pt.root_pa.0);
}
/// Maps some physical memory pages into the VM space according to the given
@ -54,15 +54,12 @@ impl VmSpace {
if options.addr.is_none() {
return Err(Error::InvalidArgs);
}
self.memory_set
.exclusive_access()
.pt
.map_area(&mut MapArea::new(
VirtAddr(options.addr.unwrap()),
frames.len()*PAGE_SIZE,
flags,
frames,
));
self.memory_set.exclusive_access().map_area(MapArea::new(
VirtAddr(options.addr.unwrap()),
frames.len() * PAGE_SIZE,
flags,
frames,
));
Ok(options.addr.unwrap())
}
@ -192,5 +189,11 @@ bitflags! {
const RX = Self::R.bits | Self::X.bits;
/// Readable + writable + executable.
const RWX = Self::R.bits | Self::W.bits | Self::X.bits;
/// Readable + writable + user.
const RWU = Self::R.bits | Self::W.bits | Self::U.bits;
/// Readable + execuable + user.
const RXU = Self::R.bits | Self::X.bits | Self::U.bits;
/// Readable + writable + executable + user.
const RWXU = Self::R.bits | Self::W.bits | Self::X.bits | Self::U.bits;
}
}

View File

@ -0,0 +1,185 @@
//! util for x86_64, it will rename to x86_64 when depend x86_64 isn't necessary
use core::arch::asm;
#[inline(always)]
pub fn read_rsp() -> usize {
let val: usize;
unsafe {
asm!("mov {}, rsp", out(reg) val);
}
val
}
#[inline(always)]
pub fn in8(port: u16) -> u8 {
// ::x86_64::instructions::port::Port::read()
let val: u8;
unsafe {
asm!("in al, dx", out("al") val, in("dx") port, options(nomem, nostack, preserves_flags));
}
val
}
#[inline(always)]
pub fn in16(port: u16) -> u16 {
let val: u16;
unsafe {
asm!("in ax, dx", out("ax") val, in("dx") port, options(nomem, nostack, preserves_flags));
}
val
}
#[inline(always)]
pub fn in32(port: u16) -> u32 {
let val: u32;
unsafe {
asm!("in eax, dx", out("eax") val, in("dx") port, options(nomem, nostack, preserves_flags));
}
val
}
#[inline(always)]
pub fn out8(port: u16, val: u8) {
unsafe {
asm!("out dx, al", in("dx") port, in("al") val, options(nomem, nostack, preserves_flags));
}
}
#[inline(always)]
pub fn out16(port: u16, val: u16) {
unsafe {
asm!("out dx, ax", in("dx") port, in("ax") val, options(nomem, nostack, preserves_flags));
}
}
#[inline(always)]
pub fn out32(port: u16, val: u32) {
unsafe {
asm!("out dx, eax", in("dx") port, in("eax") val, options(nomem, nostack, preserves_flags));
}
}
#[inline(always)]
pub fn disable_interrupts() {
unsafe {
asm!("cli", options(nomem, nostack));
}
}
#[inline(always)]
pub fn enable_interrupts_and_hlt() {
unsafe {
asm!("sti; hlt", options(nomem, nostack));
}
}
pub const RING0: u16 = 0;
pub const RING3: u16 = 3;
pub const RFLAGS_IF: usize = 1 << 9;
#[inline(always)]
pub fn get_msr(id: u32) -> usize {
let (high, low): (u32, u32);
unsafe {
asm!("rdmsr", in("ecx") id, out("eax") low, out("edx") high, options(nomem, nostack, preserves_flags));
}
((high as usize) << 32) | (low as usize)
}
#[inline(always)]
pub fn set_msr(id: u32, val: usize) {
let low = val as u32;
let high = (val >> 32) as u32;
unsafe {
asm!("wrmsr", in("ecx") id, in("eax") low, in("edx") high, options(nostack, preserves_flags));
}
}
pub const EFER_MSR: u32 = 0xC000_0080;
pub const STAR_MSR: u32 = 0xC000_0081;
pub const LSTAR_MSR: u32 = 0xC000_0082;
pub const SFMASK_MSR: u32 = 0xC000_0084;
#[derive(Debug, Clone, Copy)]
#[repr(C, packed)]
pub struct DescriptorTablePointer {
/// Size of the DT.
pub limit: u16,
/// Pointer to the memory region containing the DT.
pub base: usize,
}
/// Load a GDT.
#[inline(always)]
pub fn lgdt(gdt: &DescriptorTablePointer) {
unsafe {
asm!("lgdt [{}]", in(reg) gdt, options(readonly, nostack, preserves_flags));
}
}
/// Load an IDT.
#[inline(always)]
pub fn lidt(idt: &DescriptorTablePointer) {
unsafe {
asm!("lidt [{}]", in(reg) idt, options(readonly, nostack, preserves_flags));
}
}
/// Load the task state register using the `ltr` instruction.
#[inline(always)]
pub fn load_tss(sel: u16) {
unsafe {
asm!("ltr {0:x}", in(reg) sel, options(nomem, nostack, preserves_flags));
}
}
#[inline(always)]
pub fn set_cs(sel: u16) {
unsafe {
asm!(
"push {sel}",
"lea {tmp}, [1f + rip]",
"push {tmp}",
"retfq",
"1:",
sel = in(reg) sel as usize,
tmp = lateout(reg) _,
options(preserves_flags),
);
}
}
#[inline(always)]
pub fn set_ss(sel: u16) {
unsafe {
asm!("mov ss, {0:x}", in(reg) sel, options(nostack, preserves_flags));
}
}
#[inline(always)]
pub fn get_cr3() -> usize {
let val: usize;
unsafe {
asm!("mov {}, cr3", out(reg) val, options(nomem, nostack, preserves_flags));
}
// Mask top bits and flags.
val & 0x_000f_ffff_ffff_f000
}
#[inline(always)]
pub fn get_cr3_raw() -> usize {
let val: usize;
unsafe {
asm!("mov {}, cr3", out(reg) val, options(nomem, nostack, preserves_flags));
}
// Mask top bits and flags.
val
}
#[inline(always)]
pub fn set_cr3(pa: usize) {
unsafe {
asm!("mov cr3, {}", in(reg) pa, options(nostack, preserves_flags));
}
}

View File

@ -147,7 +147,7 @@ impl<'a> ElfLoadInfo<'a> {
/// return the perm of elf pages
/// FIXME: Set the correct permission bit of user pages.
fn perm() -> VmPerm {
VmPerm::RX
VmPerm::RXU
}
pub fn entry_point(&self) -> u64 {

View File

@ -38,6 +38,6 @@ impl UserStack {
}
pub const fn perm() -> VmPerm {
VmPerm::RW
VmPerm::RWU
}
}

View File

@ -1,9 +1,10 @@
use alloc::sync::Arc;
use kxos_frame::{
cpu::CpuContext,
println,
task::Task,
user::{UserEvent, UserSpace},
vm::VmSpace, println,
user::{UserEvent, UserMode, UserSpace},
vm::VmSpace,
};
use crate::{memory::load_elf_to_vm_space, syscall::syscall_handler};
@ -23,9 +24,10 @@ pub fn spawn_user_task_from_elf(elf_file_content: &[u8]) -> Arc<Task> {
fn user_task_entry() {
let cur = Task::current();
let user_space = cur.user_space().expect("user task should have user space");
let mut user_mode = user_space.user_mode();
let mut user_mode = UserMode::new(user_space);
loop {
let user_event = user_mode.execute();
println!("get user event:{:?}", user_event);
let context = user_mode.context_mut();
if let HandlerResult::Exit = handle_user_event(user_event, context) {
// FIXME: How to set task status? How to set exit code of process?

View File

@ -29,5 +29,6 @@ fn kernel_main(boot_info: &'static mut BootInfo) -> ! {
#[panic_handler]
fn panic(_info: &PanicInfo) -> ! {
println!("[panic]:{:?}", _info);
loop {}
}