From fc2e8d95c93340172c8f34896a67a74394e970a2 Mon Sep 17 00:00:00 2001 From: Tao Su Date: Wed, 7 Jan 2026 10:11:11 +0000 Subject: [PATCH] Add `DmaCoherent::alloc_uninit()` and defer zeroing to `DmaCoherent::alloc()` --- ostd/src/arch/x86/tdx_guest.rs | 4 ++-- ostd/src/mm/dma/dma_coherent.rs | 17 ++++++++++++++++- ostd/src/mm/dma/util.rs | 4 +++- 3 files changed, 21 insertions(+), 4 deletions(-) diff --git a/ostd/src/arch/x86/tdx_guest.rs b/ostd/src/arch/x86/tdx_guest.rs index 0eaef3399..4ad13773a 100644 --- a/ostd/src/arch/x86/tdx_guest.rs +++ b/ostd/src/arch/x86/tdx_guest.rs @@ -18,7 +18,7 @@ pub enum PageConvertError { /// /// It invokes the [`map_gpa`] TDVMCALL to convert those pages into Intel TDX /// shared pages. Due to the conversion, any existing data on the pages will -/// be erased. +/// be lost. /// /// # Safety /// @@ -38,7 +38,7 @@ pub unsafe fn unprotect_gpa_tdvm_call(gpa: Paddr, size: usize) -> Result<(), Pag /// /// It invokes the [`map_gpa`] TDVMCALL and the [`accept_page`] TDCALL to /// convert those pages into Intel TDX private pages. Due to the conversion, -/// any existing data on the pages will be erased. +/// any existing data on the pages will be zeroed. /// /// # Safety /// diff --git a/ostd/src/mm/dma/dma_coherent.rs b/ostd/src/mm/dma/dma_coherent.rs index e4362d748..665902ead 100644 --- a/ostd/src/mm/dma/dma_coherent.rs +++ b/ostd/src/mm/dma/dma_coherent.rs @@ -37,14 +37,29 @@ enum Inner { impl DmaCoherent { /// Allocates a region of physical memory for coherent DMA access. /// + /// The memory of the newly-allocated DMA buffer is initialized to zeros. + /// /// The `is_cache_coherent` argument specifies whether the target device /// that the DMA mapping is prepared for can access the main memory in a /// CPU cache coherent way or not. pub fn alloc(nframes: usize, is_cache_coherent: bool) -> Result { + Self::alloc_uninit(nframes, is_cache_coherent).inspect(|dma| { + dma.writer().fill_zeros(dma.size()); + }) + } + + /// Allocates a region of physical memory for coherent DMA access + /// without initialization. + /// + /// This method is the same as [`DmaCoherent::alloc`] + /// except that it skips zeroing the memory of newly-allocated DMA region. + pub fn alloc_uninit(nframes: usize, is_cache_coherent: bool) -> Result { let cvm = cvm_need_private_protection(); let (inner, paddr_range) = if is_cache_coherent && !cvm { - let segment = FrameAllocOptions::new().alloc_segment(nframes)?; + let segment = FrameAllocOptions::new() + .zeroed(false) + .alloc_segment(nframes)?; let paddr_range = segment.paddr_range(); (Inner::Segment(segment), paddr_range) diff --git a/ostd/src/mm/dma/util.rs b/ostd/src/mm/dma/util.rs index f9c52a346..1a39d841f 100644 --- a/ostd/src/mm/dma/util.rs +++ b/ostd/src/mm/dma/util.rs @@ -76,7 +76,9 @@ pub(super) fn alloc_kva( is_cache_coherent: bool, ) -> Result<(KVirtArea, Paddr), Error> { let segment = Segment::from_unsized( - FrameAllocOptions::new().alloc_segment_with(nframes, |_| DmaBufferMeta)?, + FrameAllocOptions::new() + .zeroed(false) + .alloc_segment_with(nframes, |_| DmaBufferMeta)?, ); #[cfg_attr(not(target_arch = "x86_64"), expect(unused_labels))]