diff --git a/src/kxos-frame/src/cell.rs b/src/kxos-frame/src/cell.rs new file mode 100644 index 000000000..e6e0895dd --- /dev/null +++ b/src/kxos-frame/src/cell.rs @@ -0,0 +1,39 @@ +use core::{ + cell::UnsafeCell, + ops::{Deref, DerefMut}, +}; + +#[derive(Debug, Default)] +#[repr(transparent)] +pub struct Cell(UnsafeCell); + +unsafe impl Sync for Cell {} + +impl Cell { + /// User is responsible to guarantee that inner struct is only used in + /// uniprocessor. + #[inline(always)] + pub const fn new(val: T) -> Self { + Self(UnsafeCell::new(val)) + } + + #[inline(always)] + pub fn get(&self) -> &mut T { + unsafe { &mut *self.0.get() } + } +} + +impl Deref for Cell { + type Target = T; + #[inline(always)] + fn deref(&self) -> &Self::Target { + self.get() + } +} + +impl DerefMut for Cell { + #[inline(always)] + fn deref_mut(&mut self) -> &mut Self::Target { + self.get() + } +} diff --git a/src/kxos-frame/src/config.rs b/src/kxos-frame/src/config.rs index 508d1e954..5f1fc51d8 100644 --- a/src/kxos-frame/src/config.rs +++ b/src/kxos-frame/src/config.rs @@ -2,7 +2,7 @@ pub const USER_STACK_SIZE: usize = 4096 * 2; pub const KERNEL_STACK_SIZE: usize = 4096 * 2; -pub const KERNEL_HEAP_SIZE: usize = 0x20_0000; +pub const KERNEL_HEAP_SIZE: usize = 0x1_000_000; pub const KERNEL_OFFSET: usize = 0xffffff00_00000000; pub const PHYS_OFFSET: usize = 0xFFFF800000000000; diff --git a/src/kxos-frame/src/cpu.rs b/src/kxos-frame/src/cpu.rs index af2df4951..3bd90c970 100644 --- a/src/kxos-frame/src/cpu.rs +++ b/src/kxos-frame/src/cpu.rs @@ -68,7 +68,7 @@ impl FpRegs { //let buf = Aligned(unsafe { MaybeUninit::uninit().assume_init() }); //let is_valid = false; //Self { buf, is_valid } - Self{is_valid:false} + Self { is_valid: false } // todo!("import aligned") } diff --git a/src/kxos-frame/src/device/mod.rs b/src/kxos-frame/src/device/mod.rs index dd11517ad..be50c7c47 100644 --- a/src/kxos-frame/src/device/mod.rs +++ b/src/kxos-frame/src/device/mod.rs @@ -2,12 +2,9 @@ pub mod framebuffer; mod io_port; -mod irq; pub use self::io_port::IoPort; -pub use self::irq::{InterruptInformation, IrqCallbackHandle, IrqLine}; pub fn init(framebuffer: &'static mut bootloader::boot_info::FrameBuffer) { framebuffer::init(framebuffer); - irq::init(); } diff --git a/src/kxos-frame/src/lib.rs b/src/kxos-frame/src/lib.rs index a72527a48..0be4ed53e 100644 --- a/src/kxos-frame/src/lib.rs +++ b/src/kxos-frame/src/lib.rs @@ -9,8 +9,10 @@ #![feature(const_maybe_uninit_zeroed)] #![feature(alloc_error_handler)] #![feature(core_intrinsics)] +#![feature(new_uninit)] extern crate alloc; +pub mod cell; pub mod config; pub mod cpu; pub mod device; @@ -25,23 +27,25 @@ pub mod trap; pub mod user; mod util; pub mod vm; +pub mod x86_64_util; use core::mem; pub use self::error::Error; pub use self::sync::up::UPSafeCell; -use alloc::{boxed::Box, sync::Arc}; -use bootloader::{boot_info::MemoryRegionKind, BootInfo}; -use device::{InterruptInformation, IrqLine}; +use alloc::sync::Arc; +use bootloader::{ + boot_info::{FrameBuffer, MemoryRegionKind}, + BootInfo, +}; +use trap::{IrqLine, TrapFrame}; pub fn init(boot_info: &'static mut BootInfo) { + let siz = boot_info.framebuffer.as_ref().unwrap() as *const FrameBuffer as usize; device::init(boot_info.framebuffer.as_mut().unwrap()); device::framebuffer::WRITER.lock().as_mut().unwrap().clear(); - println!( - "heap_value at {:x}", - boot_info.physical_memory_offset.into_option().unwrap() - ); - + println!("{:x}", siz); + trap::init(); let mut memory_init = false; // memory for region in boot_info.memory_regions.iter() { @@ -68,12 +72,9 @@ pub fn init(boot_info: &'static mut BootInfo) { } let a = breakpoint_irq.on_active(breakpoint_handler); x86_64::instructions::interrupts::int3(); // breakpoint - let heap_value = Box::new(41); - println!("test"); - println!("heap_value at {:p}", heap_value); } -fn breakpoint_handler(interrupt_information: InterruptInformation) { +fn breakpoint_handler(interrupt_information: TrapFrame) { println!("EXCEPTION: BREAKPOINT\n{:#?}", interrupt_information); } diff --git a/src/kxos-frame/src/mm/frame_allocator.rs b/src/kxos-frame/src/mm/frame_allocator.rs index 458eda460..166308100 100644 --- a/src/kxos-frame/src/mm/frame_allocator.rs +++ b/src/kxos-frame/src/mm/frame_allocator.rs @@ -56,6 +56,10 @@ impl PhysFrame { PhysAddr(self.start_pa) } + pub const fn end_pa(&self) -> PhysAddr { + PhysAddr(self.start_pa + PAGE_SIZE) + } + pub fn alloc() -> Option { FRAME_ALLOCATOR .exclusive_access() diff --git a/src/kxos-frame/src/mm/memory_set.rs b/src/kxos-frame/src/mm/memory_set.rs index 52e5bacf1..47be97e35 100644 --- a/src/kxos-frame/src/mm/memory_set.rs +++ b/src/kxos-frame/src/mm/memory_set.rs @@ -2,18 +2,12 @@ use super::{page_table::PageTable, *}; use crate::prelude::*; use crate::{ config::PAGE_SIZE, - mm::address::{is_aligned}, + mm::address::is_aligned, vm::{VmFrame, VmFrameVec}, *, }; -use alloc::{ - collections::{btree_map::Entry, BTreeMap}}; +use alloc::collections::{btree_map::Entry, BTreeMap}; use core::fmt; -use x86_64::registers::control::Cr3Flags; -// use xmas_elf::{program::{SegmentData, Type}, {header, ElfFile}}; - -pub const USTACK_SIZE: usize = 4096 * 4; -pub const USTACK_TOP: usize = 0x8000_0000_0000; pub struct MapArea { /// flags @@ -57,7 +51,7 @@ impl MapArea { for i in 0..page_size { let vm_frame = phy_frame_iter.next().unwrap(); map_area.map_with_physical_address(current_va, vm_frame.clone()); - current_va+=PAGE_SIZE; + current_va += PAGE_SIZE; } map_area @@ -157,7 +151,7 @@ impl MemorySet { pt.map_area(&area); Self { - pt: PageTable::new(), + pt: pt, area: Some(area), } } @@ -169,6 +163,11 @@ impl MemorySet { } } + pub fn map_area(&mut self, area: MapArea) { + self.pt.map_area(&area); + self.area = Some(area); + } + pub fn unmap(&mut self, va: VirtAddr) -> Result<()> { if self.area.is_none() { Err(Error::InvalidArgs) @@ -183,18 +182,6 @@ impl MemorySet { self.area = None; } - pub fn activate(&self) { - unsafe { - x86_64::registers::control::Cr3::write( - x86_64::structures::paging::PhysFrame::from_start_address(x86_64::PhysAddr::new( - self.pt.root_pa.0 as u64, - )) - .unwrap(), - Cr3Flags::empty(), - ); - } - } - pub fn write_bytes(&mut self, offset: usize, data: &[u8]) -> Result<()> { if self.area.is_none() { Err(Error::InvalidArgs) @@ -216,6 +203,7 @@ impl MemorySet { impl Clone for MemorySet { fn clone(&self) -> Self { + println!("clone memory set"); if self.area.is_none() { Self::zero() } else { diff --git a/src/kxos-frame/src/mm/page_table.rs b/src/kxos-frame/src/mm/page_table.rs index 185e3ecec..9336776f8 100644 --- a/src/kxos-frame/src/mm/page_table.rs +++ b/src/kxos-frame/src/mm/page_table.rs @@ -1,15 +1,21 @@ -use alloc::{vec, vec::Vec}; - use super::{memory_set::MapArea, *}; +use crate::cell::Cell; use crate::{ config::{ENTRY_COUNT, KERNEL_OFFSET, PAGE_SIZE, PHYS_OFFSET}, vm::VmFrame, *, }; +use alloc::{collections::BTreeMap, vec, vec::Vec}; use core::fmt; +use lazy_static::lazy_static; -static KERNEL_PTE: UPSafeCell = zero(); -static PHYS_PTE: UPSafeCell = zero(); +static KERNEL_PTE: Cell = zero(); +static PHYS_PTE: Cell = zero(); + +lazy_static! { + pub static ref ALL_MAPPED_PTE: UPSafeCell> = + unsafe { UPSafeCell::new(BTreeMap::new()) }; +} #[derive(Clone, Copy)] #[repr(transparent)] @@ -55,14 +61,30 @@ impl PageTable { pub fn new() -> Self { let root_frame = VmFrame::alloc_zero().unwrap(); let p4 = table_of(root_frame.start_pa()); - p4[p4_index(VirtAddr(KERNEL_OFFSET))] = *KERNEL_PTE.exclusive_access(); - p4[p4_index(VirtAddr(PHYS_OFFSET))] = *PHYS_PTE.exclusive_access(); + let map_pte = ALL_MAPPED_PTE.exclusive_access(); + for (index, pte) in map_pte.iter() { + p4[*index] = *pte; + } + println!("start_pa:{:x}", root_frame.start_pa()); Self { root_pa: root_frame.start_pa(), tables: vec![root_frame], } } + pub fn print_kernel(&self) { + let p4 = table_of(self.root_pa); + for i in 0..(256) { + let phys = PhysAddr(i << (12 + 27)); + let a = p4[p4_index(phys.kvaddr())]; + if a.is_present() { + println!("index:{:?},PTE:{:?}", i, a); + } + } + println!("kernel_pte:{:?}", p4[p4_index(VirtAddr(KERNEL_OFFSET))]); + println!("PHYS_PTE:{:?}", p4[p4_index(VirtAddr(PHYS_OFFSET))]); + } + pub fn map(&mut self, va: VirtAddr, pa: PhysAddr, flags: PTFlags) { let entry = self.get_entry_or_create(va).unwrap(); if !entry.is_unused() { @@ -80,7 +102,6 @@ impl PageTable { } pub fn map_area(&mut self, area: &MapArea) { - println!("frame test"); for (va, pa) in area.mapper.iter() { assert!(pa.start_pa().0 < PHYS_OFFSET); self.map(*va, pa.start_pa(), area.flags); @@ -179,13 +200,24 @@ fn next_table_or_create<'a>( } pub(crate) fn init() { - let (cr3, _) = x86_64::registers::control::Cr3::read(); + let cr3 = x86_64_util::get_cr3(); - let p4 = table_of(PhysAddr(cr3.start_address().as_u64() as usize)); - *KERNEL_PTE.exclusive_access() = p4[p4_index(VirtAddr(KERNEL_OFFSET))]; - *PHYS_PTE.exclusive_access() = p4[p4_index(VirtAddr(PHYS_OFFSET))]; - println!("kernel_pte:{:?}", p4[p4_index(VirtAddr(KERNEL_OFFSET))]); - println!("PHYS_PTE:{:?}", p4[p4_index(VirtAddr(PHYS_OFFSET))]); + let p4 = table_of(PhysAddr(cr3)); + // Cancel mapping in lowest addresses. + p4[0].0 = 0; + // there is mapping where index is 1,2,3, so user may not use these value + let mut map_pte = ALL_MAPPED_PTE.exclusive_access(); + for i in 0..512 { + if !p4[i].flags().is_empty() { + map_pte.insert(i, p4[i]); + // println!("i:{:x},{:?}",i,p4[i]); + } + } + // print how it use p4[0] + // *KERNEL_PTE.get() = p4[p4_index(VirtAddr(KERNEL_OFFSET))]; + // *PHYS_PTE.get() = p4[p4_index(VirtAddr(PHYS_OFFSET))]; + // println!("kernel_pte:{:?}", *KERNEL_PTE.get()); + // println!("PHYS_PTE:{:?}", *PHYS_PTE.get()); // Cancel mapping in lowest addresses. // p4[0].0 = 0; diff --git a/src/kxos-frame/src/sync/up.rs b/src/kxos-frame/src/sync/up.rs index 76ede8bc0..8491938b1 100644 --- a/src/kxos-frame/src/sync/up.rs +++ b/src/kxos-frame/src/sync/up.rs @@ -1,5 +1,4 @@ -use core::{ - cell::{RefCell, RefMut},}; +use core::cell::{RefCell, RefMut}; #[derive(Debug)] /// Wrap a static data structure inside it so that we are diff --git a/src/kxos-frame/src/sync/wait.rs b/src/kxos-frame/src/sync/wait.rs index 37597c19f..1cb074189 100644 --- a/src/kxos-frame/src/sync/wait.rs +++ b/src/kxos-frame/src/sync/wait.rs @@ -31,7 +31,7 @@ impl WaitQueue { loop { if (cond)() { self.dequeue(&waiter); - return; + break; } waiter.wait(); } diff --git a/src/kxos-frame/src/task/processor.rs b/src/kxos-frame/src/task/processor.rs index 1487df4ad..5b679ad05 100644 --- a/src/kxos-frame/src/task/processor.rs +++ b/src/kxos-frame/src/task/processor.rs @@ -1,9 +1,10 @@ use super::{ + scheduler::{fetch_task, GLOBAL_SCHEDULER}, task::{context_switch, TaskContext}, - Task, scheduler::{fetch_task, GLOBAL_SCHEDULER}, TaskStatus, + Task, TaskStatus, }; use crate::UPSafeCell; -use alloc:: sync::Arc; +use alloc::sync::Arc; use lazy_static::*; pub struct Processor { @@ -45,29 +46,32 @@ pub fn current_task() -> Option> { } /// call this function to switch to other task by using GLOBAL_SCHEDULER -/// -/// if current task is none, then it will use the default task context -/// +/// +/// if current task is none, then it will use the default task context and it will not return to this function again +/// /// if current task status is exit, then it will not add to the scheduler -/// +/// /// before context switch, current task will switch to the next task pub fn schedule() { let next_task = fetch_task().expect("no more task found"); let current_task_option = current_task(); let next_task_cx_ptr = &next_task.inner_exclusive_access().ctx as *const TaskContext; let current_task: Arc; - let current_task_cx_ptr = if current_task_option.is_none(){ + let current_task_cx_ptr = if current_task_option.is_none() { PROCESSOR.exclusive_access().get_idle_task_cx_ptr() - }else{ + } else { current_task = current_task_option.unwrap(); - if current_task.status() != TaskStatus::Exited{ - GLOBAL_SCHEDULER.exclusive_access().enqueue(current_task.clone()); + if current_task.status() != TaskStatus::Exited { + GLOBAL_SCHEDULER + .exclusive_access() + .enqueue(current_task.clone()); } &mut current_task.inner_exclusive_access().ctx as *mut TaskContext }; // change the current task to the next task + PROCESSOR.exclusive_access().current = Some(next_task.clone()); unsafe { context_switch(current_task_cx_ptr, next_task_cx_ptr); } -} \ No newline at end of file +} diff --git a/src/kxos-frame/src/task/scheduler.rs b/src/kxos-frame/src/task/scheduler.rs index a5d4e21dd..bef5afba7 100644 --- a/src/kxos-frame/src/task/scheduler.rs +++ b/src/kxos-frame/src/task/scheduler.rs @@ -1,11 +1,8 @@ use crate::task::Task; -use crate::{prelude::*, UPSafeCell}; +use crate::{prelude::*, println, UPSafeCell}; use lazy_static::lazy_static; -use super::processor::current_task; -use super::task::{context_switch, TaskContext}; - lazy_static! { pub static ref GLOBAL_SCHEDULER: UPSafeCell = unsafe { UPSafeCell::new(GlobalScheduler { scheduler: None }) }; @@ -33,12 +30,13 @@ impl GlobalScheduler { /// dequeue a task using scheduler /// require the scheduler is not none pub fn dequeue(&mut self) -> Option> { - self.scheduler.take().unwrap().dequeue() + self.scheduler.unwrap().dequeue() } /// enqueue a task using scheduler /// require the scheduler is not none pub fn enqueue(&mut self, task: Arc) { - self.scheduler.take().unwrap().enqueue(task) + println!("{:?}", self.scheduler.is_none()); + self.scheduler.unwrap().enqueue(task) } } /// Set the global task scheduler. @@ -55,5 +53,3 @@ pub fn fetch_task() -> Option> { pub fn add_task(task: Arc) { GLOBAL_SCHEDULER.exclusive_access().enqueue(task); } - - diff --git a/src/kxos-frame/src/task/task.rs b/src/kxos-frame/src/task/task.rs index 5896b917d..0df563f98 100644 --- a/src/kxos-frame/src/task/task.rs +++ b/src/kxos-frame/src/task/task.rs @@ -1,10 +1,10 @@ use core::cell::RefMut; -use core::intrinsics::unreachable; use core::mem::size_of; -use crate::trap::CalleeRegs; +use crate::mm::PhysFrame; +use crate::trap::{CalleeRegs, SyscallFrame}; use crate::user::UserSpace; -use crate::{prelude::*, UPSafeCell, println}; +use crate::{prelude::*, UPSafeCell}; use super::processor::{current_task, schedule}; use super::scheduler::add_task; @@ -20,34 +20,28 @@ pub struct TaskContext { extern "C" { pub fn context_switch(cur: *mut TaskContext, nxt: *const TaskContext); } -/// 8*PAGE_SIZE -#[cfg(debug_assertions)] -pub const TASK_SIZE: usize = 32768; -/// 2*PAGE_SIZE -#[cfg(not(debug_assertions))] -pub const TASK_SIZE: usize = 8192; -#[cfg(debug_assertions)] -#[repr(align(32768))] -struct TaskAlign; +pub struct KernelStack { + frame: PhysFrame, +} -#[cfg(not(debug_assertions))] -#[repr(C, align(8192))] -struct TaskAlign; - -pub const KERNEL_STACK_SIZE: usize = - TASK_SIZE - size_of::>() - size_of::>() - size_of::>>() - - size_of::>() - size_of::(); +impl KernelStack { + pub fn new() -> Self { + Self { + frame: PhysFrame::alloc().expect("out of memory"), + } + } +} /// A task that executes a function to the end. pub struct Task { - _align: TaskAlign, func: Box, data: Box, user_space: Option>, task_inner: UPSafeCell, exit_code: usize, - kstack: [u8; KERNEL_STACK_SIZE], + /// kernel stack, note that the top is SyscallFrame + kstack: KernelStack, } pub struct TaskInner { @@ -90,8 +84,9 @@ impl Task { { /// all task will entering this function /// this function is mean to executing the task_fn in Task - fn kernel_task_entry(){ - let current_task = current_task().expect("no current task, it should have current task in kernel task entry"); + fn kernel_task_entry() { + let current_task = current_task() + .expect("no current task, it should have current task in kernel task entry"); current_task.func.call(()) } let result = Self { @@ -104,19 +99,36 @@ impl Task { ctx: TaskContext::default(), }) }, - _align: TaskAlign, - exit_code:0, - kstack: [0; KERNEL_STACK_SIZE], + exit_code: 0, + kstack: KernelStack::new(), }; + result.task_inner.exclusive_access().task_status = TaskStatus::Runnable; result.task_inner.exclusive_access().ctx.rip = kernel_task_entry as usize; - let arc_self = Arc::new(result); + result.task_inner.exclusive_access().ctx.regs.rsp = result.kstack.frame.end_pa().kvaddr().0 + as usize + - size_of::() + - size_of::(); + let arc_self = Arc::new(result); add_task(arc_self.clone()); + schedule(); Ok(arc_self) } + pub fn syscall_frame(&self) -> &mut SyscallFrame { + unsafe { + &mut *(self + .kstack + .frame + .end_pa() + .kvaddr() + .get_mut::() as *mut SyscallFrame) + .sub(1) + } + } + /// Returns the task status. pub fn status(&self) -> TaskStatus { self.task_inner.exclusive_access().task_status @@ -136,14 +148,14 @@ impl Task { } } - pub fn exit(&self)->!{ + pub fn exit(&self) -> ! { self.inner_exclusive_access().task_status = TaskStatus::Exited; schedule(); unreachable!() } } -#[derive(Clone, Copy,PartialEq, Eq, PartialOrd, Ord)] +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord)] /// The status of a task. pub enum TaskStatus { /// The task is runnable. diff --git a/src/kxos-frame/src/trap/handler.rs b/src/kxos-frame/src/trap/handler.rs new file mode 100644 index 000000000..2fff060cb --- /dev/null +++ b/src/kxos-frame/src/trap/handler.rs @@ -0,0 +1,28 @@ +use super::{irq::IRQ_LIST, *}; + +#[no_mangle] +pub extern "C" fn syscall_handler(f: &'static mut SyscallFrame) -> isize { + let r = &f.caller; + println!("{:?}", f); + // let ret = syscall::syscall(r.rax, [r.rdi, r.rsi, r.rdx]); + // current_check_signal(); + // ret + -1 +} + +const DIVIDE_BY_ZERO: usize = 0; +const INVALID_OPCODE: usize = 6; +const SEGMENT_NOT_PRESENT: usize = 11; +const STACK_SEGMENT_FAULT: usize = 12; +const GENERAL_PROTECTION_FAULT: usize = 13; +const PAGE_FAULT: usize = 14; +const TIMER: usize = 32; + +#[no_mangle] +pub extern "C" fn trap_handler(f: &'static mut TrapFrame) { + let irq_line = IRQ_LIST.get(f.id as usize).unwrap(); + let callback_functions = irq_line.callback_list(); + for callback_function in callback_functions.iter() { + callback_function.call(f.clone()); + } +} diff --git a/src/kxos-frame/src/device/irq.rs b/src/kxos-frame/src/trap/irq.rs similarity index 72% rename from src/kxos-frame/src/device/irq.rs rename to src/kxos-frame/src/trap/irq.rs index 877e66061..10a46d509 100644 --- a/src/kxos-frame/src/device/irq.rs +++ b/src/kxos-frame/src/trap/irq.rs @@ -1,20 +1,11 @@ use crate::prelude::*; +use super::TrapFrame; use lazy_static::lazy_static; -use spin::Mutex; -use x86_64::{ - set_general_handler, - structures::idt::{InterruptDescriptorTable, InterruptStackFrame, InterruptStackFrameValue}, -}; +use spin::{Mutex, MutexGuard}; + lazy_static! { - static ref IDT: InterruptDescriptorTable = { - let mut idt = InterruptDescriptorTable::new(); - set_general_handler!(&mut idt, my_general_hander); - idt - }; -} -lazy_static! { - static ref IRQ_LIST: Vec = { + pub static ref IRQ_LIST: Vec = { let mut list: Vec = Vec::new(); for i in 0..256 { list.push(IrqLine { @@ -30,21 +21,6 @@ lazy_static! { static ref ID_ALLOCATOR: Mutex = Mutex::new(RecycleAllocator::new()); } -pub fn init() { - IDT.load(); -} - -fn my_general_hander(stack_frame: InterruptStackFrame, index: u8, error_code: Option) { - let irq_line = IRQ_LIST.get(index as usize).unwrap(); - let callback_functions = irq_line.callback_list.lock(); - for callback_function in callback_functions.iter() { - callback_function.function.call((InterruptInformation { - interrupt_stack_frame: *stack_frame, - error_code, - },)); - } -} - struct RecycleAllocator { current: usize, recycled: Vec, @@ -78,23 +54,23 @@ impl RecycleAllocator { } } -struct CallbackElement { - function: Box, +pub struct CallbackElement { + function: Box, id: usize, } +impl CallbackElement { + pub fn call(&self, element: TrapFrame) { + self.function.call((element,)); + } +} + /// An interrupt request (IRQ) line. pub struct IrqLine { irq_num: u8, callback_list: Mutex>, } -#[derive(Debug)] -pub struct InterruptInformation { - pub interrupt_stack_frame: InterruptStackFrameValue, - pub error_code: Option, -} - impl IrqLine { /// Acquire an interrupt request line. /// @@ -111,6 +87,10 @@ impl IrqLine { self.irq_num } + pub fn callback_list(&self) -> MutexGuard<'_, alloc::vec::Vec> { + self.callback_list.lock() + } + /// Register a callback that will be invoked when the IRQ is active. /// /// A handle to the callback is returned. Dropping the handle @@ -119,7 +99,7 @@ impl IrqLine { /// For each IRQ line, multiple callbacks may be registered. pub fn on_active(&self, callback: F) -> IrqCallbackHandle where - F: Fn(InterruptInformation) + Sync + Send + 'static, + F: Fn(TrapFrame) + Sync + Send + 'static, { let allocate_id = ID_ALLOCATOR.lock().alloc(); self.callback_list.lock().push(CallbackElement { diff --git a/src/kxos-frame/src/trap/mod.rs b/src/kxos-frame/src/trap/mod.rs index 9c2958111..92b873ab3 100644 --- a/src/kxos-frame/src/trap/mod.rs +++ b/src/kxos-frame/src/trap/mod.rs @@ -1,3 +1,14 @@ +mod handler; +mod irq; + +pub use self::irq::{IrqCallbackHandle, IrqLine}; +use core::mem::size_of_val; + +use crate::{x86_64_util::*, *}; + +core::arch::global_asm!(include_str!("trap.S")); +core::arch::global_asm!(include_str!("vector.S")); + #[derive(Debug, Default, Clone, Copy)] #[repr(C)] pub struct CallerRegs { @@ -44,3 +55,74 @@ pub struct TrapFrame { pub rsp: usize, pub ss: usize, } + +const TSS_SIZE: usize = 104; + +extern "C" { + /// TSS + static TSS: [u8; TSS_SIZE]; + /// 所有的中断向量push一个id后跳转到trao_entry + static __vectors: [usize; 256]; + fn syscall_entry(); + pub fn syscall_return(f: &SyscallFrame) -> !; +} + +pub fn init() { + static mut GDT: [usize; 7] = [ + 0, + 0x00209800_00000000, // KCODE, EXECUTABLE | USER_SEGMENT | PRESENT | LONG_MODE + 0x00009200_00000000, // KDATA, DATA_WRITABLE | USER_SEGMENT | PRESENT + 0x0000F200_00000000, // UDATA, DATA_WRITABLE | USER_SEGMENT | USER_MODE | PRESENT + 0x0020F800_00000000, // UCODE, EXECUTABLE | USER_SEGMENT | USER_MODE | PRESENT | LONG_MODE + 0, + 0, // TSS, filled in runtime + ]; + let ptr = unsafe { TSS.as_ptr() as usize }; + let low = (1 << 47) + | 0b1001 << 40 + | (TSS_SIZE - 1) + | ((ptr & ((1 << 24) - 1)) << 16) + | (((ptr >> 24) & ((1 << 8) - 1)) << 56); + let high = ptr >> 32; + unsafe { + GDT[5] = low; + GDT[6] = high; + lgdt(&DescriptorTablePointer { + limit: size_of_val(&GDT) as u16 - 1, + base: GDT.as_ptr() as _, + }); + } + + x86_64_util::set_cs((1 << 3) | x86_64_util::RING0); + x86_64_util::set_ss((2 << 3) | x86_64_util::RING0); + + load_tss((5 << 3) | RING0); + set_msr(EFER_MSR, get_msr(EFER_MSR) | 1); // enable system call extensions + set_msr(STAR_MSR, (2 << 3 << 48) | (1 << 3 << 32)); + set_msr(LSTAR_MSR, syscall_entry as _); + set_msr(SFMASK_MSR, 0x47700); // TF|DF|IF|IOPL|AC|NT + + #[repr(C, align(16))] + struct IDT { + entries: [[usize; 2]; 256], + } + static mut IDT: IDT = zero(); + let cs = (1 << 3) | x86_64_util::RING0 as usize; + for i in 0..256 { + let p = unsafe { __vectors[i] }; + let low = (((p >> 16) & 0xFFFF) << 48) + | (0b1000_1110_0000_0000 << 32) + | (cs << 16) + | (p & 0xFFFF); + let high = p >> 32; + unsafe { + IDT.entries[i] = [low, high]; + } + } + unsafe { + lidt(&DescriptorTablePointer { + limit: size_of_val(&IDT) as u16 - 1, + base: &IDT as *const _ as _, + }) + } +} diff --git a/src/kxos-frame/src/trap/trap.S b/src/kxos-frame/src/trap/trap.S new file mode 100644 index 000000000..6b55dd8d9 --- /dev/null +++ b/src/kxos-frame/src/trap/trap.S @@ -0,0 +1,96 @@ +.data +.align 4 +TSS: + .space 104 + +.text + +.macro save + push r11 + push r10 + push r9 + push r8 + push rdi + push rsi + push rdx + push rcx + push rax +.endm + +.macro restore + pop rax + pop rcx + pop rdx + pop rsi + pop rdi + pop r8 + pop r9 + pop r10 + pop r11 +.endm + +.global __trap_entry +__trap_entry: + # 保存所有的调用者保存寄存器 + save + # 将栈指针当成参数传入trap_handler + mov rdi, rsp + call trap_handler + mov rax, [rsp + 96] # 96 = offsetof(TrapFrame, cs) + and rax, 0x3 + jz __from_kernel + lea rax, [rsp + 128] # prepare new TSS.sp0, 128 = sizeof(TrapFrame) + mov [TSS + rip + 4], rax +__from_kernel: + restore + add rsp, 16 # skip TrapFrame.err and id + iretq + +.global syscall_entry +syscall_entry: + # syscall instruction do: + # - load cs, ss from STAR MSR + # - r11 <- rflags, mask rflags from RFMASK MSR + # - rcx <- rip, load rip from LSTAR MSR + + # temporarily store user rsp into TSS.sp0 and load kernel rsp from it. + xchg rsp, [TSS + rip + 4] + push r15 + push r14 + push r13 + push r12 + push rbp + push rbx + push [TSS + rip + 4] # store user rsp into SyscallFrame.rsp + save + mov rdi, rsp + call syscall_handler + mov [rsp], rax # CallerRegs.rax is at offset 0 + jmp __syscall_return + +.global syscall_return +syscall_return: # (SyscallFrame *) + mov rsp, rdi +__syscall_return: + lea rax, [rsp + 128] # prepare new TSS.sp0, 128 = sizeof(SyscallFrame) + # 将跳到ring0要跳转的stack指针存储到4(TSS)处 + mov [TSS + rip + 4], rax + restore + mov rbx, [rsp + 8] + mov rbp, [rsp + 16] + mov r12, [rsp + 24] + mov r13, [rsp + 32] + mov r14, [rsp + 40] + mov r15, [rsp + 48] + mov rsp, [rsp + 0] + sysretq + +.global switch_to_user_space +switch_to_user_space: # (cpu_context: *CpuContext,reg: *CallerRegs) + # mov rflag, [rdi+136] + mov rdi, rsi + jmp syscall_return + + + + diff --git a/src/kxos-frame/src/trap/vector.S b/src/kxos-frame/src/trap/vector.S new file mode 100644 index 000000000..d4fba75ef --- /dev/null +++ b/src/kxos-frame/src/trap/vector.S @@ -0,0 +1,1278 @@ +.section .text +vector0: + push 0 + push 0 + jmp __trap_entry +vector1: + push 0 + push 1 + jmp __trap_entry +vector2: + push 0 + push 2 + jmp __trap_entry +vector3: + push 0 + push 3 + jmp __trap_entry +vector4: + push 0 + push 4 + jmp __trap_entry +vector5: + push 0 + push 5 + jmp __trap_entry +vector6: + push 0 + push 6 + jmp __trap_entry +vector7: + push 0 + push 7 + jmp __trap_entry +vector8: + push 8 + jmp __trap_entry +vector9: + push 0 + push 9 + jmp __trap_entry +vector10: + push 10 + jmp __trap_entry +vector11: + push 11 + jmp __trap_entry +vector12: + push 12 + jmp __trap_entry +vector13: + push 13 + jmp __trap_entry +vector14: + push 14 + jmp __trap_entry +vector15: + push 0 + push 15 + jmp __trap_entry +vector16: + push 0 + push 16 + jmp __trap_entry +vector17: + push 17 + jmp __trap_entry +vector18: + push 0 + push 18 + jmp __trap_entry +vector19: + push 0 + push 19 + jmp __trap_entry +vector20: + push 0 + push 20 + jmp __trap_entry +vector21: + push 0 + push 21 + jmp __trap_entry +vector22: + push 0 + push 22 + jmp __trap_entry +vector23: + push 0 + push 23 + jmp __trap_entry +vector24: + push 0 + push 24 + jmp __trap_entry +vector25: + push 0 + push 25 + jmp __trap_entry +vector26: + push 0 + push 26 + jmp __trap_entry +vector27: + push 0 + push 27 + jmp __trap_entry +vector28: + push 0 + push 28 + jmp __trap_entry +vector29: + push 0 + push 29 + jmp __trap_entry +vector30: + push 0 + push 30 + jmp __trap_entry +vector31: + push 0 + push 31 + jmp __trap_entry +vector32: + push 0 + push 32 + jmp __trap_entry +vector33: + push 0 + push 33 + jmp __trap_entry +vector34: + push 0 + push 34 + jmp __trap_entry +vector35: + push 0 + push 35 + jmp __trap_entry +vector36: + push 0 + push 36 + jmp __trap_entry +vector37: + push 0 + push 37 + jmp __trap_entry +vector38: + push 0 + push 38 + jmp __trap_entry +vector39: + push 0 + push 39 + jmp __trap_entry +vector40: + push 0 + push 40 + jmp __trap_entry +vector41: + push 0 + push 41 + jmp __trap_entry +vector42: + push 0 + push 42 + jmp __trap_entry +vector43: + push 0 + push 43 + jmp __trap_entry +vector44: + push 0 + push 44 + jmp __trap_entry +vector45: + push 0 + push 45 + jmp __trap_entry +vector46: + push 0 + push 46 + jmp __trap_entry +vector47: + push 0 + push 47 + jmp __trap_entry +vector48: + push 0 + push 48 + jmp __trap_entry +vector49: + push 0 + push 49 + jmp __trap_entry +vector50: + push 0 + push 50 + jmp __trap_entry +vector51: + push 0 + push 51 + jmp __trap_entry +vector52: + push 0 + push 52 + jmp __trap_entry +vector53: + push 0 + push 53 + jmp __trap_entry +vector54: + push 0 + push 54 + jmp __trap_entry +vector55: + push 0 + push 55 + jmp __trap_entry +vector56: + push 0 + push 56 + jmp __trap_entry +vector57: + push 0 + push 57 + jmp __trap_entry +vector58: + push 0 + push 58 + jmp __trap_entry +vector59: + push 0 + push 59 + jmp __trap_entry +vector60: + push 0 + push 60 + jmp __trap_entry +vector61: + push 0 + push 61 + jmp __trap_entry +vector62: + push 0 + push 62 + jmp __trap_entry +vector63: + push 0 + push 63 + jmp __trap_entry +vector64: + push 0 + push 64 + jmp __trap_entry +vector65: + push 0 + push 65 + jmp __trap_entry +vector66: + push 0 + push 66 + jmp __trap_entry +vector67: + push 0 + push 67 + jmp __trap_entry +vector68: + push 0 + push 68 + jmp __trap_entry +vector69: + push 0 + push 69 + jmp __trap_entry +vector70: + push 0 + push 70 + jmp __trap_entry +vector71: + push 0 + push 71 + jmp __trap_entry +vector72: + push 0 + push 72 + jmp __trap_entry +vector73: + push 0 + push 73 + jmp __trap_entry +vector74: + push 0 + push 74 + jmp __trap_entry +vector75: + push 0 + push 75 + jmp __trap_entry +vector76: + push 0 + push 76 + jmp __trap_entry +vector77: + push 0 + push 77 + jmp __trap_entry +vector78: + push 0 + push 78 + jmp __trap_entry +vector79: + push 0 + push 79 + jmp __trap_entry +vector80: + push 0 + push 80 + jmp __trap_entry +vector81: + push 0 + push 81 + jmp __trap_entry +vector82: + push 0 + push 82 + jmp __trap_entry +vector83: + push 0 + push 83 + jmp __trap_entry +vector84: + push 0 + push 84 + jmp __trap_entry +vector85: + push 0 + push 85 + jmp __trap_entry +vector86: + push 0 + push 86 + jmp __trap_entry +vector87: + push 0 + push 87 + jmp __trap_entry +vector88: + push 0 + push 88 + jmp __trap_entry +vector89: + push 0 + push 89 + jmp __trap_entry +vector90: + push 0 + push 90 + jmp __trap_entry +vector91: + push 0 + push 91 + jmp __trap_entry +vector92: + push 0 + push 92 + jmp __trap_entry +vector93: + push 0 + push 93 + jmp __trap_entry +vector94: + push 0 + push 94 + jmp __trap_entry +vector95: + push 0 + push 95 + jmp __trap_entry +vector96: + push 0 + push 96 + jmp __trap_entry +vector97: + push 0 + push 97 + jmp __trap_entry +vector98: + push 0 + push 98 + jmp __trap_entry +vector99: + push 0 + push 99 + jmp __trap_entry +vector100: + push 0 + push 100 + jmp __trap_entry +vector101: + push 0 + push 101 + jmp __trap_entry +vector102: + push 0 + push 102 + jmp __trap_entry +vector103: + push 0 + push 103 + jmp __trap_entry +vector104: + push 0 + push 104 + jmp __trap_entry +vector105: + push 0 + push 105 + jmp __trap_entry +vector106: + push 0 + push 106 + jmp __trap_entry +vector107: + push 0 + push 107 + jmp __trap_entry +vector108: + push 0 + push 108 + jmp __trap_entry +vector109: + push 0 + push 109 + jmp __trap_entry +vector110: + push 0 + push 110 + jmp __trap_entry +vector111: + push 0 + push 111 + jmp __trap_entry +vector112: + push 0 + push 112 + jmp __trap_entry +vector113: + push 0 + push 113 + jmp __trap_entry +vector114: + push 0 + push 114 + jmp __trap_entry +vector115: + push 0 + push 115 + jmp __trap_entry +vector116: + push 0 + push 116 + jmp __trap_entry +vector117: + push 0 + push 117 + jmp __trap_entry +vector118: + push 0 + push 118 + jmp __trap_entry +vector119: + push 0 + push 119 + jmp __trap_entry +vector120: + push 0 + push 120 + jmp __trap_entry +vector121: + push 0 + push 121 + jmp __trap_entry +vector122: + push 0 + push 122 + jmp __trap_entry +vector123: + push 0 + push 123 + jmp __trap_entry +vector124: + push 0 + push 124 + jmp __trap_entry +vector125: + push 0 + push 125 + jmp __trap_entry +vector126: + push 0 + push 126 + jmp __trap_entry +vector127: + push 0 + push 127 + jmp __trap_entry +vector128: + push 0 + push 128 + jmp __trap_entry +vector129: + push 0 + push 129 + jmp __trap_entry +vector130: + push 0 + push 130 + jmp __trap_entry +vector131: + push 0 + push 131 + jmp __trap_entry +vector132: + push 0 + push 132 + jmp __trap_entry +vector133: + push 0 + push 133 + jmp __trap_entry +vector134: + push 0 + push 134 + jmp __trap_entry +vector135: + push 0 + push 135 + jmp __trap_entry +vector136: + push 0 + push 136 + jmp __trap_entry +vector137: + push 0 + push 137 + jmp __trap_entry +vector138: + push 0 + push 138 + jmp __trap_entry +vector139: + push 0 + push 139 + jmp __trap_entry +vector140: + push 0 + push 140 + jmp __trap_entry +vector141: + push 0 + push 141 + jmp __trap_entry +vector142: + push 0 + push 142 + jmp __trap_entry +vector143: + push 0 + push 143 + jmp __trap_entry +vector144: + push 0 + push 144 + jmp __trap_entry +vector145: + push 0 + push 145 + jmp __trap_entry +vector146: + push 0 + push 146 + jmp __trap_entry +vector147: + push 0 + push 147 + jmp __trap_entry +vector148: + push 0 + push 148 + jmp __trap_entry +vector149: + push 0 + push 149 + jmp __trap_entry +vector150: + push 0 + push 150 + jmp __trap_entry +vector151: + push 0 + push 151 + jmp __trap_entry +vector152: + push 0 + push 152 + jmp __trap_entry +vector153: + push 0 + push 153 + jmp __trap_entry +vector154: + push 0 + push 154 + jmp __trap_entry +vector155: + push 0 + push 155 + jmp __trap_entry +vector156: + push 0 + push 156 + jmp __trap_entry +vector157: + push 0 + push 157 + jmp __trap_entry +vector158: + push 0 + push 158 + jmp __trap_entry +vector159: + push 0 + push 159 + jmp __trap_entry +vector160: + push 0 + push 160 + jmp __trap_entry +vector161: + push 0 + push 161 + jmp __trap_entry +vector162: + push 0 + push 162 + jmp __trap_entry +vector163: + push 0 + push 163 + jmp __trap_entry +vector164: + push 0 + push 164 + jmp __trap_entry +vector165: + push 0 + push 165 + jmp __trap_entry +vector166: + push 0 + push 166 + jmp __trap_entry +vector167: + push 0 + push 167 + jmp __trap_entry +vector168: + push 0 + push 168 + jmp __trap_entry +vector169: + push 0 + push 169 + jmp __trap_entry +vector170: + push 0 + push 170 + jmp __trap_entry +vector171: + push 0 + push 171 + jmp __trap_entry +vector172: + push 0 + push 172 + jmp __trap_entry +vector173: + push 0 + push 173 + jmp __trap_entry +vector174: + push 0 + push 174 + jmp __trap_entry +vector175: + push 0 + push 175 + jmp __trap_entry +vector176: + push 0 + push 176 + jmp __trap_entry +vector177: + push 0 + push 177 + jmp __trap_entry +vector178: + push 0 + push 178 + jmp __trap_entry +vector179: + push 0 + push 179 + jmp __trap_entry +vector180: + push 0 + push 180 + jmp __trap_entry +vector181: + push 0 + push 181 + jmp __trap_entry +vector182: + push 0 + push 182 + jmp __trap_entry +vector183: + push 0 + push 183 + jmp __trap_entry +vector184: + push 0 + push 184 + jmp __trap_entry +vector185: + push 0 + push 185 + jmp __trap_entry +vector186: + push 0 + push 186 + jmp __trap_entry +vector187: + push 0 + push 187 + jmp __trap_entry +vector188: + push 0 + push 188 + jmp __trap_entry +vector189: + push 0 + push 189 + jmp __trap_entry +vector190: + push 0 + push 190 + jmp __trap_entry +vector191: + push 0 + push 191 + jmp __trap_entry +vector192: + push 0 + push 192 + jmp __trap_entry +vector193: + push 0 + push 193 + jmp __trap_entry +vector194: + push 0 + push 194 + jmp __trap_entry +vector195: + push 0 + push 195 + jmp __trap_entry +vector196: + push 0 + push 196 + jmp __trap_entry +vector197: + push 0 + push 197 + jmp __trap_entry +vector198: + push 0 + push 198 + jmp __trap_entry +vector199: + push 0 + push 199 + jmp __trap_entry +vector200: + push 0 + push 200 + jmp __trap_entry +vector201: + push 0 + push 201 + jmp __trap_entry +vector202: + push 0 + push 202 + jmp __trap_entry +vector203: + push 0 + push 203 + jmp __trap_entry +vector204: + push 0 + push 204 + jmp __trap_entry +vector205: + push 0 + push 205 + jmp __trap_entry +vector206: + push 0 + push 206 + jmp __trap_entry +vector207: + push 0 + push 207 + jmp __trap_entry +vector208: + push 0 + push 208 + jmp __trap_entry +vector209: + push 0 + push 209 + jmp __trap_entry +vector210: + push 0 + push 210 + jmp __trap_entry +vector211: + push 0 + push 211 + jmp __trap_entry +vector212: + push 0 + push 212 + jmp __trap_entry +vector213: + push 0 + push 213 + jmp __trap_entry +vector214: + push 0 + push 214 + jmp __trap_entry +vector215: + push 0 + push 215 + jmp __trap_entry +vector216: + push 0 + push 216 + jmp __trap_entry +vector217: + push 0 + push 217 + jmp __trap_entry +vector218: + push 0 + push 218 + jmp __trap_entry +vector219: + push 0 + push 219 + jmp __trap_entry +vector220: + push 0 + push 220 + jmp __trap_entry +vector221: + push 0 + push 221 + jmp __trap_entry +vector222: + push 0 + push 222 + jmp __trap_entry +vector223: + push 0 + push 223 + jmp __trap_entry +vector224: + push 0 + push 224 + jmp __trap_entry +vector225: + push 0 + push 225 + jmp __trap_entry +vector226: + push 0 + push 226 + jmp __trap_entry +vector227: + push 0 + push 227 + jmp __trap_entry +vector228: + push 0 + push 228 + jmp __trap_entry +vector229: + push 0 + push 229 + jmp __trap_entry +vector230: + push 0 + push 230 + jmp __trap_entry +vector231: + push 0 + push 231 + jmp __trap_entry +vector232: + push 0 + push 232 + jmp __trap_entry +vector233: + push 0 + push 233 + jmp __trap_entry +vector234: + push 0 + push 234 + jmp __trap_entry +vector235: + push 0 + push 235 + jmp __trap_entry +vector236: + push 0 + push 236 + jmp __trap_entry +vector237: + push 0 + push 237 + jmp __trap_entry +vector238: + push 0 + push 238 + jmp __trap_entry +vector239: + push 0 + push 239 + jmp __trap_entry +vector240: + push 0 + push 240 + jmp __trap_entry +vector241: + push 0 + push 241 + jmp __trap_entry +vector242: + push 0 + push 242 + jmp __trap_entry +vector243: + push 0 + push 243 + jmp __trap_entry +vector244: + push 0 + push 244 + jmp __trap_entry +vector245: + push 0 + push 245 + jmp __trap_entry +vector246: + push 0 + push 246 + jmp __trap_entry +vector247: + push 0 + push 247 + jmp __trap_entry +vector248: + push 0 + push 248 + jmp __trap_entry +vector249: + push 0 + push 249 + jmp __trap_entry +vector250: + push 0 + push 250 + jmp __trap_entry +vector251: + push 0 + push 251 + jmp __trap_entry +vector252: + push 0 + push 252 + jmp __trap_entry +vector253: + push 0 + push 253 + jmp __trap_entry +vector254: + push 0 + push 254 + jmp __trap_entry +vector255: + push 0 + push 255 + jmp __trap_entry + +.section .rodata +.global __vectors +__vectors: + .quad vector0 + .quad vector1 + .quad vector2 + .quad vector3 + .quad vector4 + .quad vector5 + .quad vector6 + .quad vector7 + .quad vector8 + .quad vector9 + .quad vector10 + .quad vector11 + .quad vector12 + .quad vector13 + .quad vector14 + .quad vector15 + .quad vector16 + .quad vector17 + .quad vector18 + .quad vector19 + .quad vector20 + .quad vector21 + .quad vector22 + .quad vector23 + .quad vector24 + .quad vector25 + .quad vector26 + .quad vector27 + .quad vector28 + .quad vector29 + .quad vector30 + .quad vector31 + .quad vector32 + .quad vector33 + .quad vector34 + .quad vector35 + .quad vector36 + .quad vector37 + .quad vector38 + .quad vector39 + .quad vector40 + .quad vector41 + .quad vector42 + .quad vector43 + .quad vector44 + .quad vector45 + .quad vector46 + .quad vector47 + .quad vector48 + .quad vector49 + .quad vector50 + .quad vector51 + .quad vector52 + .quad vector53 + .quad vector54 + .quad vector55 + .quad vector56 + .quad vector57 + .quad vector58 + .quad vector59 + .quad vector60 + .quad vector61 + .quad vector62 + .quad vector63 + .quad vector64 + .quad vector65 + .quad vector66 + .quad vector67 + .quad vector68 + .quad vector69 + .quad vector70 + .quad vector71 + .quad vector72 + .quad vector73 + .quad vector74 + .quad vector75 + .quad vector76 + .quad vector77 + .quad vector78 + .quad vector79 + .quad vector80 + .quad vector81 + .quad vector82 + .quad vector83 + .quad vector84 + .quad vector85 + .quad vector86 + .quad vector87 + .quad vector88 + .quad vector89 + .quad vector90 + .quad vector91 + .quad vector92 + .quad vector93 + .quad vector94 + .quad vector95 + .quad vector96 + .quad vector97 + .quad vector98 + .quad vector99 + .quad vector100 + .quad vector101 + .quad vector102 + .quad vector103 + .quad vector104 + .quad vector105 + .quad vector106 + .quad vector107 + .quad vector108 + .quad vector109 + .quad vector110 + .quad vector111 + .quad vector112 + .quad vector113 + .quad vector114 + .quad vector115 + .quad vector116 + .quad vector117 + .quad vector118 + .quad vector119 + .quad vector120 + .quad vector121 + .quad vector122 + .quad vector123 + .quad vector124 + .quad vector125 + .quad vector126 + .quad vector127 + .quad vector128 + .quad vector129 + .quad vector130 + .quad vector131 + .quad vector132 + .quad vector133 + .quad vector134 + .quad vector135 + .quad vector136 + .quad vector137 + .quad vector138 + .quad vector139 + .quad vector140 + .quad vector141 + .quad vector142 + .quad vector143 + .quad vector144 + .quad vector145 + .quad vector146 + .quad vector147 + .quad vector148 + .quad vector149 + .quad vector150 + .quad vector151 + .quad vector152 + .quad vector153 + .quad vector154 + .quad vector155 + .quad vector156 + .quad vector157 + .quad vector158 + .quad vector159 + .quad vector160 + .quad vector161 + .quad vector162 + .quad vector163 + .quad vector164 + .quad vector165 + .quad vector166 + .quad vector167 + .quad vector168 + .quad vector169 + .quad vector170 + .quad vector171 + .quad vector172 + .quad vector173 + .quad vector174 + .quad vector175 + .quad vector176 + .quad vector177 + .quad vector178 + .quad vector179 + .quad vector180 + .quad vector181 + .quad vector182 + .quad vector183 + .quad vector184 + .quad vector185 + .quad vector186 + .quad vector187 + .quad vector188 + .quad vector189 + .quad vector190 + .quad vector191 + .quad vector192 + .quad vector193 + .quad vector194 + .quad vector195 + .quad vector196 + .quad vector197 + .quad vector198 + .quad vector199 + .quad vector200 + .quad vector201 + .quad vector202 + .quad vector203 + .quad vector204 + .quad vector205 + .quad vector206 + .quad vector207 + .quad vector208 + .quad vector209 + .quad vector210 + .quad vector211 + .quad vector212 + .quad vector213 + .quad vector214 + .quad vector215 + .quad vector216 + .quad vector217 + .quad vector218 + .quad vector219 + .quad vector220 + .quad vector221 + .quad vector222 + .quad vector223 + .quad vector224 + .quad vector225 + .quad vector226 + .quad vector227 + .quad vector228 + .quad vector229 + .quad vector230 + .quad vector231 + .quad vector232 + .quad vector233 + .quad vector234 + .quad vector235 + .quad vector236 + .quad vector237 + .quad vector238 + .quad vector239 + .quad vector240 + .quad vector241 + .quad vector242 + .quad vector243 + .quad vector244 + .quad vector245 + .quad vector246 + .quad vector247 + .quad vector248 + .quad vector249 + .quad vector250 + .quad vector251 + .quad vector252 + .quad vector253 + .quad vector254 + .quad vector255 diff --git a/src/kxos-frame/src/user.rs b/src/kxos-frame/src/user.rs index b6af2493a..39eb1b509 100644 --- a/src/kxos-frame/src/user.rs +++ b/src/kxos-frame/src/user.rs @@ -1,9 +1,16 @@ //! User space. +use crate::println; + use crate::cpu::CpuContext; +use crate::prelude::*; use crate::task::Task; +use crate::trap::SyscallFrame; use crate::vm::VmSpace; -use crate::{prelude::*}; + +extern "C" { + fn switch_to_user_space(cpu_context: &CpuContext, syscall_frame: &SyscallFrame); +} /// A user space. /// @@ -77,6 +84,13 @@ pub struct UserMode<'a> { impl<'a> !Send for UserMode<'a> {} impl<'a> UserMode<'a> { + pub fn new(user_space: &'a Arc) -> Self { + Self { + current: Task::current(), + user_space, + } + } + /// Starts executing in the user mode. /// /// The method returns for one of three possible reasons indicated by `UserEvent`. @@ -87,7 +101,13 @@ impl<'a> UserMode<'a> { /// After handling the user event and updating the user-mode CPU context, /// this method can be invoked again to go back to the user space. pub fn execute(&mut self) -> UserEvent { - todo!() + self.user_space.vm_space().activate(); + self.current.syscall_frame().caller.rcx = self.user_space.cpu_ctx.gp_regs.rip as usize; + println!("{:?}", self.current.syscall_frame()); + unsafe { + switch_to_user_space(&self.user_space.cpu_ctx, self.current.syscall_frame()); + } + UserEvent::Syscall } /// Returns an immutable reference the user-mode CPU context. @@ -101,6 +121,7 @@ impl<'a> UserMode<'a> { } } +#[derive(PartialEq, Eq, PartialOrd, Ord, Debug)] /// A user event is what brings back the control of the CPU back from /// the user space to the kernel space. /// diff --git a/src/kxos-frame/src/vm/space.rs b/src/kxos-frame/src/vm/space.rs index 43237d3ad..076453e6d 100644 --- a/src/kxos-frame/src/vm/space.rs +++ b/src/kxos-frame/src/vm/space.rs @@ -1,5 +1,5 @@ use crate::config::PAGE_SIZE; -use crate::{UPSafeCell, println}; +use crate::{x86_64_util, UPSafeCell}; use bitflags::bitflags; use core::ops::Range; @@ -34,7 +34,7 @@ impl VmSpace { } pub fn activate(&self) { - self.memory_set.exclusive_access().activate(); + x86_64_util::set_cr3(self.memory_set.exclusive_access().pt.root_pa.0); } /// Maps some physical memory pages into the VM space according to the given @@ -54,15 +54,12 @@ impl VmSpace { if options.addr.is_none() { return Err(Error::InvalidArgs); } - self.memory_set - .exclusive_access() - .pt - .map_area(&mut MapArea::new( - VirtAddr(options.addr.unwrap()), - frames.len()*PAGE_SIZE, - flags, - frames, - )); + self.memory_set.exclusive_access().map_area(MapArea::new( + VirtAddr(options.addr.unwrap()), + frames.len() * PAGE_SIZE, + flags, + frames, + )); Ok(options.addr.unwrap()) } @@ -192,5 +189,11 @@ bitflags! { const RX = Self::R.bits | Self::X.bits; /// Readable + writable + executable. const RWX = Self::R.bits | Self::W.bits | Self::X.bits; + /// Readable + writable + user. + const RWU = Self::R.bits | Self::W.bits | Self::U.bits; + /// Readable + execuable + user. + const RXU = Self::R.bits | Self::X.bits | Self::U.bits; + /// Readable + writable + executable + user. + const RWXU = Self::R.bits | Self::W.bits | Self::X.bits | Self::U.bits; } } diff --git a/src/kxos-frame/src/x86_64_util.rs b/src/kxos-frame/src/x86_64_util.rs new file mode 100644 index 000000000..61928fed0 --- /dev/null +++ b/src/kxos-frame/src/x86_64_util.rs @@ -0,0 +1,185 @@ +//! util for x86_64, it will rename to x86_64 when depend x86_64 isn't necessary +use core::arch::asm; + +#[inline(always)] +pub fn read_rsp() -> usize { + let val: usize; + unsafe { + asm!("mov {}, rsp", out(reg) val); + } + val +} + +#[inline(always)] +pub fn in8(port: u16) -> u8 { + // ::x86_64::instructions::port::Port::read() + let val: u8; + unsafe { + asm!("in al, dx", out("al") val, in("dx") port, options(nomem, nostack, preserves_flags)); + } + val +} + +#[inline(always)] +pub fn in16(port: u16) -> u16 { + let val: u16; + unsafe { + asm!("in ax, dx", out("ax") val, in("dx") port, options(nomem, nostack, preserves_flags)); + } + val +} + +#[inline(always)] +pub fn in32(port: u16) -> u32 { + let val: u32; + unsafe { + asm!("in eax, dx", out("eax") val, in("dx") port, options(nomem, nostack, preserves_flags)); + } + val +} + +#[inline(always)] +pub fn out8(port: u16, val: u8) { + unsafe { + asm!("out dx, al", in("dx") port, in("al") val, options(nomem, nostack, preserves_flags)); + } +} + +#[inline(always)] +pub fn out16(port: u16, val: u16) { + unsafe { + asm!("out dx, ax", in("dx") port, in("ax") val, options(nomem, nostack, preserves_flags)); + } +} + +#[inline(always)] +pub fn out32(port: u16, val: u32) { + unsafe { + asm!("out dx, eax", in("dx") port, in("eax") val, options(nomem, nostack, preserves_flags)); + } +} + +#[inline(always)] +pub fn disable_interrupts() { + unsafe { + asm!("cli", options(nomem, nostack)); + } +} + +#[inline(always)] +pub fn enable_interrupts_and_hlt() { + unsafe { + asm!("sti; hlt", options(nomem, nostack)); + } +} + +pub const RING0: u16 = 0; +pub const RING3: u16 = 3; + +pub const RFLAGS_IF: usize = 1 << 9; + +#[inline(always)] +pub fn get_msr(id: u32) -> usize { + let (high, low): (u32, u32); + unsafe { + asm!("rdmsr", in("ecx") id, out("eax") low, out("edx") high, options(nomem, nostack, preserves_flags)); + } + ((high as usize) << 32) | (low as usize) +} + +#[inline(always)] +pub fn set_msr(id: u32, val: usize) { + let low = val as u32; + let high = (val >> 32) as u32; + unsafe { + asm!("wrmsr", in("ecx") id, in("eax") low, in("edx") high, options(nostack, preserves_flags)); + } +} + +pub const EFER_MSR: u32 = 0xC000_0080; +pub const STAR_MSR: u32 = 0xC000_0081; +pub const LSTAR_MSR: u32 = 0xC000_0082; +pub const SFMASK_MSR: u32 = 0xC000_0084; + +#[derive(Debug, Clone, Copy)] +#[repr(C, packed)] +pub struct DescriptorTablePointer { + /// Size of the DT. + pub limit: u16, + /// Pointer to the memory region containing the DT. + pub base: usize, +} + +/// Load a GDT. +#[inline(always)] +pub fn lgdt(gdt: &DescriptorTablePointer) { + unsafe { + asm!("lgdt [{}]", in(reg) gdt, options(readonly, nostack, preserves_flags)); + } +} + +/// Load an IDT. +#[inline(always)] +pub fn lidt(idt: &DescriptorTablePointer) { + unsafe { + asm!("lidt [{}]", in(reg) idt, options(readonly, nostack, preserves_flags)); + } +} + +/// Load the task state register using the `ltr` instruction. +#[inline(always)] +pub fn load_tss(sel: u16) { + unsafe { + asm!("ltr {0:x}", in(reg) sel, options(nomem, nostack, preserves_flags)); + } +} + +#[inline(always)] +pub fn set_cs(sel: u16) { + unsafe { + asm!( + "push {sel}", + "lea {tmp}, [1f + rip]", + "push {tmp}", + "retfq", + "1:", + sel = in(reg) sel as usize, + tmp = lateout(reg) _, + options(preserves_flags), + ); + } +} + +#[inline(always)] +pub fn set_ss(sel: u16) { + unsafe { + asm!("mov ss, {0:x}", in(reg) sel, options(nostack, preserves_flags)); + } +} + +#[inline(always)] +pub fn get_cr3() -> usize { + let val: usize; + unsafe { + asm!("mov {}, cr3", out(reg) val, options(nomem, nostack, preserves_flags)); + } + // Mask top bits and flags. + val & 0x_000f_ffff_ffff_f000 +} + +#[inline(always)] +pub fn get_cr3_raw() -> usize { + let val: usize; + unsafe { + asm!("mov {}, cr3", out(reg) val, options(nomem, nostack, preserves_flags)); + } + // Mask top bits and flags. + val +} + +#[inline(always)] +pub fn set_cr3(pa: usize) { + unsafe { + asm!("mov cr3, {}", in(reg) pa, options(nostack, preserves_flags)); + } +} diff --git a/src/kxos-std/src/memory/elf.rs b/src/kxos-std/src/memory/elf.rs index 5398e6be6..50470e197 100644 --- a/src/kxos-std/src/memory/elf.rs +++ b/src/kxos-std/src/memory/elf.rs @@ -147,7 +147,7 @@ impl<'a> ElfLoadInfo<'a> { /// return the perm of elf pages /// FIXME: Set the correct permission bit of user pages. fn perm() -> VmPerm { - VmPerm::RX + VmPerm::RXU } pub fn entry_point(&self) -> u64 { diff --git a/src/kxos-std/src/memory/user_stack.rs b/src/kxos-std/src/memory/user_stack.rs index 2ffa25903..4b209f130 100644 --- a/src/kxos-std/src/memory/user_stack.rs +++ b/src/kxos-std/src/memory/user_stack.rs @@ -38,6 +38,6 @@ impl UserStack { } pub const fn perm() -> VmPerm { - VmPerm::RW + VmPerm::RWU } } diff --git a/src/kxos-std/src/process/task.rs b/src/kxos-std/src/process/task.rs index f2fd03579..13997cf0f 100644 --- a/src/kxos-std/src/process/task.rs +++ b/src/kxos-std/src/process/task.rs @@ -1,9 +1,10 @@ use alloc::sync::Arc; use kxos_frame::{ cpu::CpuContext, + println, task::Task, - user::{UserEvent, UserSpace}, - vm::VmSpace, println, + user::{UserEvent, UserMode, UserSpace}, + vm::VmSpace, }; use crate::{memory::load_elf_to_vm_space, syscall::syscall_handler}; @@ -23,9 +24,10 @@ pub fn spawn_user_task_from_elf(elf_file_content: &[u8]) -> Arc { fn user_task_entry() { let cur = Task::current(); let user_space = cur.user_space().expect("user task should have user space"); - let mut user_mode = user_space.user_mode(); + let mut user_mode = UserMode::new(user_space); loop { let user_event = user_mode.execute(); + println!("get user event:{:?}", user_event); let context = user_mode.context_mut(); if let HandlerResult::Exit = handle_user_event(user_event, context) { // FIXME: How to set task status? How to set exit code of process? diff --git a/src/src/main.rs b/src/src/main.rs index db1822d41..6fffb4e33 100644 --- a/src/src/main.rs +++ b/src/src/main.rs @@ -29,5 +29,6 @@ fn kernel_main(boot_info: &'static mut BootInfo) -> ! { #[panic_handler] fn panic(_info: &PanicInfo) -> ! { + println!("[panic]:{:?}", _info); loop {} }