Mark `sync_dma_range` as `unsafe`

This commit is contained in:
Ruihan Li 2025-10-30 13:12:36 +08:00 committed by Tate, Hongliang Tian
parent d487e42b7c
commit 9c70ac0f0a
4 changed files with 61 additions and 46 deletions

View File

@ -102,10 +102,18 @@ pub(crate) fn tlb_flush_all_including_global() {
}
}
pub(crate) fn sync_dma_range(_range: Range<Vaddr>, _direction: DmaDirection) {
todo!("Implement DMA synchronization for LoongArch64 architecture");
/// # Safety
///
/// The caller must ensure that the virtual address range and DMA direction correspond correctly to
/// a DMA region.
pub(crate) unsafe fn sync_dma_range(_range: Range<Vaddr>, _direction: DmaDirection) {
unimplemented!("DMA synchronization is unimplemented in LoongArch64")
}
#[derive(Clone, Copy, Pod, Default)]
#[repr(C)]
pub(crate) struct PageTableEntry(usize);
/// Activates the given level 4 page table.
///
/// "pgdl" or "pgdh" register doesn't have a field that encodes the cache policy,
@ -131,10 +139,6 @@ pub(crate) fn current_page_table_paddr() -> Paddr {
pgdl
}
#[derive(Clone, Copy, Pod, Default)]
#[repr(C)]
pub(crate) struct PageTableEntry(usize);
impl PageTableEntry {
const PHYS_ADDR_MASK: usize = 0x0000_FFFF_FFFF_F000;

View File

@ -104,48 +104,51 @@ pub(crate) fn tlb_flush_all_including_global() {
riscv::asm::sfence_vma_all()
}
pub(crate) fn sync_dma_range(range: Range<Vaddr>, direction: DmaDirection) {
if has_extensions(IsaExtensions::ZICBOM) {
static CMO_MANAGEMENT_BLOCK_SIZE: Once<usize> = Once::new();
CMO_MANAGEMENT_BLOCK_SIZE.call_once(|| {
DEVICE_TREE
.get()
.unwrap()
.cpus()
.find(|cpu| cpu.property("mmu-type").is_some())
.expect("Failed to find an application CPU node in device tree")
.property("riscv,cbom-block-size")
.expect("Failed to find `riscv,cbom-block-size` property of the CPU node")
.as_usize()
.unwrap_or(64)
});
/// # Safety
///
/// The caller must ensure that the virtual address range and DMA direction correspond correctly to
/// a DMA region.
pub(crate) unsafe fn sync_dma_range(range: Range<Vaddr>, direction: DmaDirection) {
if !has_extensions(IsaExtensions::ZICBOM) {
unimplemented!("DMA synchronization is unimplemented without ZICBOM extension")
}
for addr in range.step_by(*CMO_MANAGEMENT_BLOCK_SIZE.get().unwrap()) {
// SAFETY: These are cache maintenance operations on a valid, owned
// memory range. They are required for correctness on systems with
// non-coherent DMA.
unsafe {
match direction {
DmaDirection::ToDevice => {
core::arch::asm!("cbo.clean ({})", in(reg) addr, options(nostack))
}
DmaDirection::FromDevice => {
core::arch::asm!("cbo.inval ({})", in(reg) addr, options(nostack));
}
DmaDirection::Bidirectional => {
core::arch::asm!("cbo.flush ({})", in(reg) addr, options(nostack));
}
static CMO_MANAGEMENT_BLOCK_SIZE: Once<usize> = Once::new();
let cmo_management_block_size = *CMO_MANAGEMENT_BLOCK_SIZE.call_once(|| {
DEVICE_TREE
.get()
.unwrap()
.cpus()
.find(|cpu| cpu.property("mmu-type").is_some())
.expect("Failed to find an application CPU node in device tree")
.property("riscv,cbom-block-size")
.expect("Failed to find `riscv,cbom-block-size` property of the CPU node")
.as_usize()
.expect("Failed to parse `riscv,cbom-block-size` property of the CPU node")
});
for addr in range.step_by(cmo_management_block_size) {
// Performing cache maintenance operations is required for correctness on systems with
// non-coherent DMA.
// SAFETY: The caller ensures that the virtual address range corresponds to a DMA region.
// So the underlying memory is untyped and the operations are safe to perform.
unsafe {
match direction {
DmaDirection::ToDevice => {
core::arch::asm!("cbo.clean ({})", in(reg) addr, options(nostack));
}
DmaDirection::FromDevice => {
core::arch::asm!("cbo.inval ({})", in(reg) addr, options(nostack));
}
DmaDirection::Bidirectional => {
core::arch::asm!("cbo.flush ({})", in(reg) addr, options(nostack));
}
}
}
// Ensure that all cache operations have completed before proceeding.
// SAFETY: Safe because it is only a memory fence.
unsafe {
core::arch::asm!("fence rw, rw", options(nostack));
}
} else {
// TODO: Implement DMA synchronization without ZICBOM support.
}
// Ensure that all cache operations have completed before proceeding.
// SAFETY: Performing a memory fence is always safe.
unsafe { core::arch::asm!("fence rw, rw", options(nostack)) };
}
#[derive(Clone, Copy, Pod, Default)]

View File

@ -110,7 +110,11 @@ pub(crate) fn tlb_flush_all_including_global() {
}
}
pub(crate) fn sync_dma_range(_range: Range<Vaddr>, _direction: DmaDirection) {
/// # Safety
///
/// The caller must ensure that the virtual address range and DMA direction correspond correctly to
/// a DMA region.
pub(crate) unsafe fn sync_dma_range(_range: Range<Vaddr>, _direction: DmaDirection) {
// The streaming DMA mapping in x86_64 is cache coherent, and does not
// require synchronization.
// Reference: <https://lwn.net/Articles/855328/>, <https://lwn.net/Articles/2265/>.

View File

@ -122,16 +122,20 @@ impl DmaStream {
/// [`read_bytes`]: crate::mm::VmIo::read_bytes
/// [`write_bytes`]: crate::mm::VmIo::write_bytes
pub fn sync(&self, byte_range: Range<usize>) -> Result<(), Error> {
if byte_range.end > self.size() {
let size = self.size();
if byte_range.end > size || byte_range.start > size {
return Err(Error::InvalidArgs);
}
if self.is_cache_coherent {
return Ok(());
}
let start_vaddr = crate::mm::paddr_to_vaddr(self.segment.paddr());
let range = (start_vaddr + byte_range.start)..(start_vaddr + byte_range.end);
crate::arch::mm::sync_dma_range(range, self.direction);
// SAFETY: We've checked that the range is inbound, so the virtual address range and the
// DMA direction correspond to a DMA region (they're part of `self`).
unsafe { crate::arch::mm::sync_dma_range(range, self.direction) };
Ok(())
}