From 610504e2e94a641a591b1a50f336014b249261d5 Mon Sep 17 00:00:00 2001 From: Chen Chengjun Date: Wed, 11 Feb 2026 09:24:40 +0000 Subject: [PATCH] Support specifying a success_fn for BIO read/write operations --- kernel/comps/block/src/bio.rs | 15 +++-- kernel/comps/block/src/impl_block_device.rs | 51 +++++++--------- kernel/src/fs/exfat/fs.rs | 26 ++++++--- kernel/src/fs/exfat/inode.rs | 13 ++++- kernel/src/fs/ext2/block_group.rs | 16 ++++-- kernel/src/fs/ext2/fs.rs | 16 ++++-- kernel/src/fs/ext2/indirect_block_cache.rs | 2 +- kernel/src/fs/ext2/inode.rs | 64 +++++++++++---------- 8 files changed, 114 insertions(+), 89 deletions(-) diff --git a/kernel/comps/block/src/bio.rs b/kernel/comps/block/src/bio.rs index 66ba34cd3..7d5e7fa7c 100644 --- a/kernel/comps/block/src/bio.rs +++ b/kernel/comps/block/src/bio.rs @@ -1,5 +1,6 @@ // SPDX-License-Identifier: MPL-2.0 +use alloc::boxed::Box; use core::sync::atomic::AtomicU64; use align_ext::AlignExt; @@ -18,7 +19,7 @@ use ostd::{ use spin::Once; use super::{BlockDevice, id::Sid}; -use crate::{BLOCK_SIZE, SECTOR_SIZE, prelude::*}; +use crate::{BLOCK_SIZE, SECTOR_SIZE, impl_block_device::general_complete_fn, prelude::*}; /// The unit for block I/O. /// @@ -41,7 +42,7 @@ impl Bio { type_: BioType, start_sid: Sid, segments: Vec, - complete_fn: Option, + success_fn: Option>, ) -> Self { let nsectors = segments .iter() @@ -53,7 +54,7 @@ impl Bio { sid_range: start_sid..start_sid + nsectors, sid_offset: AtomicU64::new(0), segments, - complete_fn, + success_fn: SpinLock::new(success_fn), status: AtomicU32::new(BioStatus::Init as u32), wait_queue: WaitQueue::new(), }); @@ -297,9 +298,8 @@ impl SubmittedBio { assert!(result.is_ok()); self.0.wait_queue.wake_all(); - if let Some(complete_fn) = self.0.complete_fn { - complete_fn(self); - } + + general_complete_fn(self, self.0.success_fn.disable_irq().lock().take()); } } @@ -314,7 +314,7 @@ struct BioInner { /// The memory segments in this `Bio` segments: Vec, /// The I/O completion method - complete_fn: Option, + success_fn: SpinLock>>, /// The I/O status status: AtomicU32, /// The wait queue for I/O completion @@ -346,7 +346,6 @@ impl Debug for BioInner { .field("sid_range", &self.sid_range()) .field("status", &self.status()) .field("segments", &self.segments()) - .field("complete_fn", &self.complete_fn) .finish() } } diff --git a/kernel/comps/block/src/impl_block_device.rs b/kernel/comps/block/src/impl_block_device.rs index 75a713b0e..c0eebd9f3 100644 --- a/kernel/comps/block/src/impl_block_device.rs +++ b/kernel/comps/block/src/impl_block_device.rs @@ -1,5 +1,7 @@ // SPDX-License-Identifier: MPL-2.0 +use alloc::boxed::Box; + use ostd::mm::{VmIo, VmReader, VmWriter}; use super::{ @@ -22,12 +24,7 @@ impl dyn BlockDevice { bid: Bid, bio_segment: BioSegment, ) -> Result { - let bio = Bio::new( - BioType::Read, - Sid::from(bid), - vec![bio_segment], - Some(general_complete_fn), - ); + let bio = Bio::new(BioType::Read, Sid::from(bid), vec![bio_segment], None); let status = bio.submit_and_wait(self)?; Ok(status) } @@ -37,13 +34,9 @@ impl dyn BlockDevice { &self, bid: Bid, bio_segment: BioSegment, + success_fn: Option>, ) -> Result { - let bio = Bio::new( - BioType::Read, - Sid::from(bid), - vec![bio_segment], - Some(general_complete_fn), - ); + let bio = Bio::new(BioType::Read, Sid::from(bid), vec![bio_segment], success_fn); bio.submit(self) } @@ -53,12 +46,7 @@ impl dyn BlockDevice { bid: Bid, bio_segment: BioSegment, ) -> Result { - let bio = Bio::new( - BioType::Write, - Sid::from(bid), - vec![bio_segment], - Some(general_complete_fn), - ); + let bio = Bio::new(BioType::Write, Sid::from(bid), vec![bio_segment], None); let status = bio.submit_and_wait(self)?; Ok(status) } @@ -68,24 +56,20 @@ impl dyn BlockDevice { &self, bid: Bid, bio_segment: BioSegment, + success_fn: Option>, ) -> Result { let bio = Bio::new( BioType::Write, Sid::from(bid), vec![bio_segment], - Some(general_complete_fn), + success_fn, ); bio.submit(self) } /// Issues a sync request pub fn sync(&self) -> Result { - let bio = Bio::new( - BioType::Flush, - Sid::from(Bid::from_offset(0)), - vec![], - Some(general_complete_fn), - ); + let bio = Bio::new(BioType::Flush, Sid::from(Bid::from_offset(0)), vec![], None); let status = bio.submit_and_wait(self)?; Ok(status) } @@ -120,7 +104,7 @@ impl VmIo for dyn BlockDevice { BioType::Read, Sid::from_offset(offset), vec![bio_segment.clone()], - Some(general_complete_fn), + None, ), bio_segment, ) @@ -161,7 +145,7 @@ impl VmIo for dyn BlockDevice { BioType::Write, Sid::from_offset(offset), vec![bio_segment], - Some(general_complete_fn), + None, ) }; @@ -201,7 +185,7 @@ impl dyn BlockDevice { BioType::Write, Sid::from_offset(offset), vec![bio_segment], - Some(general_complete_fn), + None, ) }; @@ -210,9 +194,16 @@ impl dyn BlockDevice { } } -fn general_complete_fn(bio: &SubmittedBio) { +pub(super) fn general_complete_fn( + bio: &SubmittedBio, + success_fn: Option>, +) { match bio.status() { - BioStatus::Complete => (), + BioStatus::Complete => { + if let Some(success_fn) = success_fn { + success_fn(); + } + } err_status => log::error!( "failed to do {:?} on the device with error status: {:?}", bio.type_(), diff --git a/kernel/src/fs/exfat/fs.rs b/kernel/src/fs/exfat/fs.rs index 1c488672d..54ecbaa81 100644 --- a/kernel/src/fs/exfat/fs.rs +++ b/kernel/src/fs/exfat/fs.rs @@ -370,7 +370,7 @@ impl ExfatFs { } impl PageCacheBackend for ExfatFs { - fn read_page_async(&self, idx: usize, frame: &CachePage) -> Result { + fn read_page_async(&self, idx: usize, frame: LockedCachePage) -> Result { if self.fs_size() < idx * PAGE_SIZE { return_errno_with_message!(Errno::EINVAL, "invalid read size") } @@ -378,13 +378,19 @@ impl PageCacheBackend for ExfatFs { Segment::from(frame.clone()).into(), BioDirection::FromDevice, ); - let waiter = self - .block_device - .read_blocks_async(BlockId::new(idx as u64), bio_segment)?; + + let success_fn = Box::new(move || { + frame.set_up_to_date(); + }); + let waiter = self.block_device.read_blocks_async( + BlockId::new(idx as u64), + bio_segment, + Some(success_fn), + )?; Ok(waiter) } - fn write_page_async(&self, idx: usize, frame: &CachePage) -> Result { + fn write_page_async(&self, idx: usize, frame: LockedCachePage) -> Result { if self.fs_size() < idx * PAGE_SIZE { return_errno_with_message!(Errno::EINVAL, "invalid write size") } @@ -392,9 +398,13 @@ impl PageCacheBackend for ExfatFs { Segment::from(frame.clone()).into(), BioDirection::ToDevice, ); - let waiter = self - .block_device - .write_blocks_async(BlockId::new(idx as u64), bio_segment)?; + + frame.set_up_to_date(); + frame.unlock(); + + let waiter = + self.block_device + .write_blocks_async(BlockId::new(idx as u64), bio_segment, None)?; Ok(waiter) } diff --git a/kernel/src/fs/exfat/inode.rs b/kernel/src/fs/exfat/inode.rs index b7d6e2874..33f9ca703 100644 --- a/kernel/src/fs/exfat/inode.rs +++ b/kernel/src/fs/exfat/inode.rs @@ -134,7 +134,7 @@ struct ExfatInodeInner { } impl PageCacheBackend for ExfatInode { - fn read_page_async(&self, idx: usize, frame: &CachePage) -> Result { + fn read_page_async(&self, idx: usize, frame: LockedCachePage) -> Result { let inner = self.inner.read(); if inner.size < idx * PAGE_SIZE { return_errno_with_message!(Errno::EINVAL, "Invalid read size") @@ -144,14 +144,18 @@ impl PageCacheBackend for ExfatInode { Segment::from(frame.clone()).into(), BioDirection::FromDevice, ); + let success_fn = Box::new(move || { + frame.set_up_to_date(); + }); let waiter = inner.fs().block_device().read_blocks_async( BlockId::from_offset(sector_id * inner.fs().sector_size()), bio_segment, + Some(success_fn), )?; Ok(waiter) } - fn write_page_async(&self, idx: usize, frame: &CachePage) -> Result { + fn write_page_async(&self, idx: usize, frame: LockedCachePage) -> Result { let inner = self.inner.read(); let sector_size = inner.fs().sector_size(); @@ -163,9 +167,14 @@ impl PageCacheBackend for ExfatInode { Segment::from(frame.clone()).into(), BioDirection::ToDevice, ); + + frame.set_up_to_date(); + frame.unlock(); + let waiter = inner.fs().block_device().write_blocks_async( BlockId::from_offset(sector_id * inner.fs().sector_size()), bio_segment, + None, )?; Ok(waiter) } diff --git a/kernel/src/fs/ext2/block_group.rs b/kernel/src/fs/ext2/block_group.rs index b82f11253..a9e3f1e96 100644 --- a/kernel/src/fs/ext2/block_group.rs +++ b/kernel/src/fs/ext2/block_group.rs @@ -324,7 +324,7 @@ impl Debug for BlockGroup { } impl PageCacheBackend for BlockGroupImpl { - fn read_page_async(&self, idx: usize, frame: &CachePage) -> Result { + fn read_page_async(&self, idx: usize, frame: LockedCachePage) -> Result { let bid = self.inode_table_bid + idx as Ext2Bid; // TODO: Should we allocate the bio segment from the pool on reads? // This may require an additional copy to the requested frame in the completion callback. @@ -332,13 +332,17 @@ impl PageCacheBackend for BlockGroupImpl { Segment::from(frame.clone()).into(), BioDirection::FromDevice, ); + + let success_fn = Box::new(move || { + frame.set_up_to_date(); + }); self.fs .upgrade() .unwrap() - .read_blocks_async(bid, bio_segment) + .read_blocks_async(bid, bio_segment, Some(success_fn)) } - fn write_page_async(&self, idx: usize, frame: &CachePage) -> Result { + fn write_page_async(&self, idx: usize, frame: LockedCachePage) -> Result { let bid = self.inode_table_bid + idx as Ext2Bid; let bio_segment = BioSegment::alloc(1, BioDirection::ToDevice); // This requires an additional copy to the pooled bio segment. @@ -346,10 +350,14 @@ impl PageCacheBackend for BlockGroupImpl { .writer() .unwrap() .write_fallible(&mut frame.reader().to_fallible())?; + + frame.set_up_to_date(); + frame.unlock(); + self.fs .upgrade() .unwrap() - .write_blocks_async(bid, bio_segment) + .write_blocks_async(bid, bio_segment, None) } fn npages(&self) -> usize { diff --git a/kernel/src/fs/ext2/fs.rs b/kernel/src/fs/ext2/fs.rs index 76ad34818..3bc6b7594 100644 --- a/kernel/src/fs/ext2/fs.rs +++ b/kernel/src/fs/ext2/fs.rs @@ -320,10 +320,11 @@ impl Ext2 { &self, bid: Ext2Bid, bio_segment: BioSegment, + success_fn: Option>, ) -> Result { - let waiter = self - .block_device - .read_blocks_async(Bid::new(bid as u64), bio_segment)?; + let waiter = + self.block_device + .read_blocks_async(Bid::new(bid as u64), bio_segment, success_fn)?; Ok(waiter) } @@ -343,10 +344,11 @@ impl Ext2 { &self, bid: Ext2Bid, bio_segment: BioSegment, + success_fn: Option>, ) -> Result { - let waiter = self - .block_device - .write_blocks_async(Bid::new(bid as u64), bio_segment)?; + let waiter = + self.block_device + .write_blocks_async(Bid::new(bid as u64), bio_segment, success_fn)?; Ok(waiter) } @@ -377,6 +379,7 @@ impl Ext2 { bio_waiter.concat(self.block_device.write_blocks_async( super_block.group_descriptors_bid(0), group_descriptors_bio_segment.clone(), + None, )?); bio_waiter .wait() @@ -396,6 +399,7 @@ impl Ext2 { bio_waiter.concat(self.block_device.write_blocks_async( super_block.group_descriptors_bid(idx as usize), group_descriptors_bio_segment.clone(), + None, )?); bio_waiter.wait().ok_or_else(|| { Error::with_message(Errno::EIO, "failed to sync backup metadata") diff --git a/kernel/src/fs/ext2/indirect_block_cache.rs b/kernel/src/fs/ext2/indirect_block_cache.rs index ae6beca15..390db2d5d 100644 --- a/kernel/src/fs/ext2/indirect_block_cache.rs +++ b/kernel/src/fs/ext2/indirect_block_cache.rs @@ -117,7 +117,7 @@ impl IndirectBlockCache { Segment::<()>::from(block.frame.clone()).into(), BioDirection::ToDevice, ); - bio_waiter.concat(self.fs().write_blocks_async(bid, bio_segment)?); + bio_waiter.concat(self.fs().write_blocks_async(bid, bio_segment, None)?); } } diff --git a/kernel/src/fs/ext2/inode.rs b/kernel/src/fs/ext2/inode.rs index 686bde9c8..3fdfb5c89 100644 --- a/kernel/src/fs/ext2/inode.rs +++ b/kernel/src/fs/ext2/inode.rs @@ -1870,7 +1870,7 @@ impl InodeBlockManager { let bio_segment = BioSegment::alloc(range_nblocks, BioDirection::FromDevice); bio_segment.reader().unwrap().read_fallible(writer)?; - let waiter = self.fs().read_blocks_async(start_bid, bio_segment)?; + let waiter = self.fs().read_blocks_async(start_bid, bio_segment, None)?; bio_waiter.concat(waiter); } @@ -1884,22 +1884,25 @@ impl InodeBlockManager { } } - pub fn read_block_async(&self, bid: Ext2Bid, frame: &CachePage) -> Result { - let mut bio_waiter = BioWaiter::new(); + pub fn read_block_async(&self, bid: Ext2Bid, frame: LockedCachePage) -> Result { + let dev_range = DeviceRangeReader::new(self, bid..bid + 1 as Ext2Bid)?.read()?; + let start_bid = dev_range.start as Ext2Bid; + // TODO: Should we allocate the bio segment from the pool on reads? + // This may require an additional copy to the requested frame in the completion callback. + let bio_segment = BioSegment::new_from_segment( + Segment::from(frame.clone()).into(), + BioDirection::FromDevice, + ); - for dev_range in DeviceRangeReader::new(self, bid..bid + 1 as Ext2Bid)? { - let start_bid = dev_range.start as Ext2Bid; - // TODO: Should we allocate the bio segment from the pool on reads? - // This may require an additional copy to the requested frame in the completion callback. - let bio_segment = BioSegment::new_from_segment( - Segment::from(frame.clone()).into(), - BioDirection::FromDevice, - ); - let waiter = self.fs().read_blocks_async(start_bid, bio_segment)?; - bio_waiter.concat(waiter); - } + let success_fn = Box::new(move || { + frame.set_up_to_date(); + }); - Ok(bio_waiter) + let waiter = self + .fs() + .read_blocks_async(start_bid, bio_segment, Some(success_fn))?; + + Ok(waiter) } /// Writes one or multiple blocks from the segment start from `bid` asynchronously. @@ -1919,7 +1922,7 @@ impl InodeBlockManager { let bio_segment = BioSegment::alloc(range_nblocks, BioDirection::ToDevice); bio_segment.writer().unwrap().write_fallible(reader)?; - let waiter = self.fs().write_blocks_async(start_bid, bio_segment)?; + let waiter = self.fs().write_blocks_async(start_bid, bio_segment, None)?; bio_waiter.concat(waiter); } @@ -1933,22 +1936,23 @@ impl InodeBlockManager { } } - pub fn write_block_async(&self, bid: Ext2Bid, frame: &CachePage) -> Result { - let mut bio_waiter = BioWaiter::new(); + pub fn write_block_async(&self, bid: Ext2Bid, frame: LockedCachePage) -> Result { + let dev_range = DeviceRangeReader::new(self, bid..bid + 1 as Ext2Bid)?.read()?; + let start_bid = dev_range.start as Ext2Bid; + let bio_segment = BioSegment::alloc(1, BioDirection::ToDevice); - for dev_range in DeviceRangeReader::new(self, bid..bid + 1 as Ext2Bid)? { - let start_bid = dev_range.start as Ext2Bid; - let bio_segment = BioSegment::alloc(1, BioDirection::ToDevice); - // This requires an additional copy to the pooled bio segment. - bio_segment - .writer() - .unwrap() - .write_fallible(&mut frame.reader().to_fallible())?; - let waiter = self.fs().write_blocks_async(start_bid, bio_segment)?; - bio_waiter.concat(waiter); - } + // This requires an additional copy to the pooled bio segment. + bio_segment + .writer() + .unwrap() + .write_fallible(&mut frame.reader().to_fallible())?; - Ok(bio_waiter) + frame.set_up_to_date(); + frame.unlock(); + + let waiter = self.fs().write_blocks_async(start_bid, bio_segment, None)?; + + Ok(waiter) } pub fn nblocks(&self) -> usize {