diff --git a/ostd/src/arch/loongarch/mm/mod.rs b/ostd/src/arch/loongarch/mm/mod.rs index 8b2c41a0f..47ad5b079 100644 --- a/ostd/src/arch/loongarch/mm/mod.rs +++ b/ostd/src/arch/loongarch/mm/mod.rs @@ -102,10 +102,18 @@ pub(crate) fn tlb_flush_all_including_global() { } } -pub(crate) fn sync_dma_range(_range: Range, _direction: DmaDirection) { - todo!("Implement DMA synchronization for LoongArch64 architecture"); +/// # Safety +/// +/// The caller must ensure that the virtual address range and DMA direction correspond correctly to +/// a DMA region. +pub(crate) unsafe fn sync_dma_range(_range: Range, _direction: DmaDirection) { + unimplemented!("DMA synchronization is unimplemented in LoongArch64") } +#[derive(Clone, Copy, Pod, Default)] +#[repr(C)] +pub(crate) struct PageTableEntry(usize); + /// Activates the given level 4 page table. /// /// "pgdl" or "pgdh" register doesn't have a field that encodes the cache policy, @@ -131,10 +139,6 @@ pub(crate) fn current_page_table_paddr() -> Paddr { pgdl } -#[derive(Clone, Copy, Pod, Default)] -#[repr(C)] -pub(crate) struct PageTableEntry(usize); - impl PageTableEntry { const PHYS_ADDR_MASK: usize = 0x0000_FFFF_FFFF_F000; diff --git a/ostd/src/arch/riscv/mm/mod.rs b/ostd/src/arch/riscv/mm/mod.rs index 2e807baa1..aa993ecf4 100644 --- a/ostd/src/arch/riscv/mm/mod.rs +++ b/ostd/src/arch/riscv/mm/mod.rs @@ -104,48 +104,51 @@ pub(crate) fn tlb_flush_all_including_global() { riscv::asm::sfence_vma_all() } -pub(crate) fn sync_dma_range(range: Range, direction: DmaDirection) { - if has_extensions(IsaExtensions::ZICBOM) { - static CMO_MANAGEMENT_BLOCK_SIZE: Once = Once::new(); - CMO_MANAGEMENT_BLOCK_SIZE.call_once(|| { - DEVICE_TREE - .get() - .unwrap() - .cpus() - .find(|cpu| cpu.property("mmu-type").is_some()) - .expect("Failed to find an application CPU node in device tree") - .property("riscv,cbom-block-size") - .expect("Failed to find `riscv,cbom-block-size` property of the CPU node") - .as_usize() - .unwrap_or(64) - }); +/// # Safety +/// +/// The caller must ensure that the virtual address range and DMA direction correspond correctly to +/// a DMA region. +pub(crate) unsafe fn sync_dma_range(range: Range, direction: DmaDirection) { + if !has_extensions(IsaExtensions::ZICBOM) { + unimplemented!("DMA synchronization is unimplemented without ZICBOM extension") + } - for addr in range.step_by(*CMO_MANAGEMENT_BLOCK_SIZE.get().unwrap()) { - // SAFETY: These are cache maintenance operations on a valid, owned - // memory range. They are required for correctness on systems with - // non-coherent DMA. - unsafe { - match direction { - DmaDirection::ToDevice => { - core::arch::asm!("cbo.clean ({})", in(reg) addr, options(nostack)) - } - DmaDirection::FromDevice => { - core::arch::asm!("cbo.inval ({})", in(reg) addr, options(nostack)); - } - DmaDirection::Bidirectional => { - core::arch::asm!("cbo.flush ({})", in(reg) addr, options(nostack)); - } + static CMO_MANAGEMENT_BLOCK_SIZE: Once = Once::new(); + let cmo_management_block_size = *CMO_MANAGEMENT_BLOCK_SIZE.call_once(|| { + DEVICE_TREE + .get() + .unwrap() + .cpus() + .find(|cpu| cpu.property("mmu-type").is_some()) + .expect("Failed to find an application CPU node in device tree") + .property("riscv,cbom-block-size") + .expect("Failed to find `riscv,cbom-block-size` property of the CPU node") + .as_usize() + .expect("Failed to parse `riscv,cbom-block-size` property of the CPU node") + }); + + for addr in range.step_by(cmo_management_block_size) { + // Performing cache maintenance operations is required for correctness on systems with + // non-coherent DMA. + // SAFETY: The caller ensures that the virtual address range corresponds to a DMA region. + // So the underlying memory is untyped and the operations are safe to perform. + unsafe { + match direction { + DmaDirection::ToDevice => { + core::arch::asm!("cbo.clean ({})", in(reg) addr, options(nostack)); + } + DmaDirection::FromDevice => { + core::arch::asm!("cbo.inval ({})", in(reg) addr, options(nostack)); + } + DmaDirection::Bidirectional => { + core::arch::asm!("cbo.flush ({})", in(reg) addr, options(nostack)); } } } - // Ensure that all cache operations have completed before proceeding. - // SAFETY: Safe because it is only a memory fence. - unsafe { - core::arch::asm!("fence rw, rw", options(nostack)); - } - } else { - // TODO: Implement DMA synchronization without ZICBOM support. } + // Ensure that all cache operations have completed before proceeding. + // SAFETY: Performing a memory fence is always safe. + unsafe { core::arch::asm!("fence rw, rw", options(nostack)) }; } #[derive(Clone, Copy, Pod, Default)] diff --git a/ostd/src/arch/x86/mm/mod.rs b/ostd/src/arch/x86/mm/mod.rs index ada363e09..78586136d 100644 --- a/ostd/src/arch/x86/mm/mod.rs +++ b/ostd/src/arch/x86/mm/mod.rs @@ -110,7 +110,11 @@ pub(crate) fn tlb_flush_all_including_global() { } } -pub(crate) fn sync_dma_range(_range: Range, _direction: DmaDirection) { +/// # Safety +/// +/// The caller must ensure that the virtual address range and DMA direction correspond correctly to +/// a DMA region. +pub(crate) unsafe fn sync_dma_range(_range: Range, _direction: DmaDirection) { // The streaming DMA mapping in x86_64 is cache coherent, and does not // require synchronization. // Reference: , . diff --git a/ostd/src/mm/dma/dma_stream.rs b/ostd/src/mm/dma/dma_stream.rs index 031f52d0b..ed9cb424e 100644 --- a/ostd/src/mm/dma/dma_stream.rs +++ b/ostd/src/mm/dma/dma_stream.rs @@ -122,16 +122,20 @@ impl DmaStream { /// [`read_bytes`]: crate::mm::VmIo::read_bytes /// [`write_bytes`]: crate::mm::VmIo::write_bytes pub fn sync(&self, byte_range: Range) -> Result<(), Error> { - if byte_range.end > self.size() { + let size = self.size(); + if byte_range.end > size || byte_range.start > size { return Err(Error::InvalidArgs); } + if self.is_cache_coherent { return Ok(()); } let start_vaddr = crate::mm::paddr_to_vaddr(self.segment.paddr()); let range = (start_vaddr + byte_range.start)..(start_vaddr + byte_range.end); - crate::arch::mm::sync_dma_range(range, self.direction); + // SAFETY: We've checked that the range is inbound, so the virtual address range and the + // DMA direction correspond to a DMA region (they're part of `self`). + unsafe { crate::arch::mm::sync_dma_range(range, self.direction) }; Ok(()) }