Fix manual implementations of `.is_multiple_of()`

This commit is contained in:
Zhang Junyang 2025-12-08 21:49:15 +08:00 committed by Tate, Hongliang Tian
parent 559ce94aba
commit fc9f47a019
44 changed files with 132 additions and 114 deletions

View File

@ -717,7 +717,7 @@ impl From<BioDirection> for DmaDirection {
/// Checks if the given offset is aligned to sector.
pub fn is_sector_aligned(offset: usize) -> bool {
offset % SECTOR_SIZE == 0
offset.is_multiple_of(SECTOR_SIZE)
}
/// An aligned unsigned integer number.
@ -737,7 +737,7 @@ pub fn is_sector_aligned(offset: usize) -> bool {
/// let sector_offset = sector_num * (SECTOR_SIZE as usize);
/// AlignedUsize::<SECTOR_SIZE>::new(sector_offset).unwrap()
/// };
/// assert!(sector_offset.value() % sector_offset.align() == 0);
/// assert!(sector_offset.value().is_multiple_of(sector_offset.align()));
/// ```
///
/// # Limitation
@ -754,7 +754,7 @@ pub struct AlignedUsize<const N: u16>(usize);
impl<const N: u16> AlignedUsize<N> {
/// Constructs a new instance of aligned integer if the given value is aligned.
pub fn new(val: usize) -> Option<Self> {
if val % (N as usize) == 0 {
if val.is_multiple_of(N as usize) {
Some(Self(val))
} else {
None

View File

@ -99,7 +99,7 @@ impl<'a> TryFrom<&'a [u8]> for BufRef<'a> {
if buf.is_empty() {
return_errno_with_msg!(InvalidArgs, "empty buf in `BufRef::try_from`");
}
if buf.len() % BLOCK_SIZE != 0 {
if !buf.len().is_multiple_of(BLOCK_SIZE) {
return_errno_with_msg!(
NotBlockSizeAligned,
"buf not block size aligned `BufRef::try_from`"
@ -154,7 +154,7 @@ impl<'a> TryFrom<&'a mut [u8]> for BufMut<'a> {
if buf.is_empty() {
return_errno_with_msg!(InvalidArgs, "empty buf in `BufMut::try_from`");
}
if buf.len() % BLOCK_SIZE != 0 {
if !buf.len().is_multiple_of(BLOCK_SIZE) {
return_errno_with_msg!(
NotBlockSizeAligned,
"buf not block size aligned `BufMut::try_from`"

View File

@ -62,7 +62,7 @@ pub trait BlockSet: Sync + Send {
blocks.as_mut_slice()[start_offset..end_offset].copy_from_slice(buf);
// Maybe we should read the last block partially.
if end_offset % BLOCK_SIZE != 0 {
if !end_offset.is_multiple_of(BLOCK_SIZE) {
let mut end_block = Buf::alloc(1)?;
self.read(end_pos, end_block.as_mut())?;
blocks.as_mut_slice()[end_offset..]

View File

@ -607,7 +607,9 @@ impl MhtNode {
}
pub fn num_complete_children(&self) -> usize {
if self.num_data_nodes() % MHT_NBRANCHES == 0 || Self::is_lowest_level(self.height()) {
if self.num_data_nodes().is_multiple_of(MHT_NBRANCHES)
|| Self::is_lowest_level(self.height())
{
self.num_valid_entries()
} else {
self.num_valid_entries() - 1
@ -1176,7 +1178,7 @@ mod tests {
for i in 0..append_cnt {
buf.as_mut_slice().fill(i as _);
log.append(buf.as_ref())?;
if i % flush_freq == 0 {
if i.is_multiple_of(flush_freq) {
log.flush()?;
}
}

View File

@ -145,7 +145,7 @@ impl<D: BlockSet + 'static> WalAppendTx<D> {
wal_tx: &CurrentTx<'_>,
log: &Arc<TxLog<D>>,
) -> Result<()> {
debug_assert!(!record_buf.is_empty() && record_buf.len() % BLOCK_SIZE == 0);
debug_assert!(!record_buf.is_empty() && record_buf.len().is_multiple_of(BLOCK_SIZE));
let res = wal_tx.context(|| {
let buf = BufRef::try_from(record_buf).unwrap();
log.append(buf)

View File

@ -127,7 +127,7 @@ impl<D: BlockSet + 'static> aster_block::BlockDevice for MlsDisk<D> {
}
// Read the last unaligned block.
if end_offset % BLOCK_SIZE != 0 {
if !end_offset.is_multiple_of(BLOCK_SIZE) {
let offset = buf.as_slice().len() - BLOCK_SIZE;
let buf_mut = BufMut::try_from(&mut buf.as_mut_slice()[offset..]).unwrap();
if self.read(end_lba - 1, buf_mut).is_err() {

View File

@ -32,7 +32,7 @@ impl BitMap {
}
// Set the unused bits in the last u64 with zero.
if nbits % 64 != 0 {
if !nbits.is_multiple_of(64) {
bits[vec_len - 1]
.iter_ones()
.filter(|index| (*index as usize) >= nbits % 64)

View File

@ -365,7 +365,7 @@ impl IoBar {
/// Reads from port
pub fn read<T: PortRead>(&self, offset: u32) -> Result<T> {
// Check alignment
if (self.base + offset) % size_of::<T>() as u32 != 0 {
if !(self.base + offset).is_multiple_of(size_of::<T>() as u32) {
return Err(Error::InvalidArgs);
}
// Check overflow
@ -380,7 +380,7 @@ impl IoBar {
/// Writes to port
pub fn write<T: PortWrite>(&self, offset: u32, _value: T) -> Result<()> {
// Check alignment
if (self.base + offset) % size_of::<T>() as u32 != 0 {
if !(self.base + offset).is_multiple_of(size_of::<T>() as u32) {
return Err(Error::InvalidArgs);
}
// Check overflow

View File

@ -78,7 +78,7 @@ fn init_timer() {
let update = move || {
let counter = TSC_UPDATE_COUNTER.fetch_add(1, Ordering::Relaxed);
if counter % delay_counts == 0 {
if counter.is_multiple_of(delay_counts) {
update_clocksource();
}
};

View File

@ -279,7 +279,7 @@ impl<T: PodOnce, M: VmIoOnce, R: TRights> SafePtr<T, M, TRightSet<R>> {
// =============== Address-related methods ==============
impl<T, M, R> SafePtr<T, M, R> {
pub const fn is_aligned(&self) -> bool {
self.offset % align_of::<T>() == 0
self.offset.is_multiple_of(align_of::<T>())
}
/// Increase the address in units of bytes occupied by the generic T.

View File

@ -16,16 +16,16 @@ macro_rules! n {
};
}
fn init_continuous_with_arc<M>(xarray: &XArray<Arc<i32>, M>, item_num: i32) {
fn init_continuous_with_arc<M>(xarray: &XArray<Arc<u32>, M>, item_num: u32) {
for i in 0..item_num {
let value = Arc::new(i);
xarray.lock().store(i as u64, value);
}
}
fn init_sparse_with_arc<M>(xarray: &XArray<Arc<i32>, M>, item_num: i32) {
for i in 0..2 * item_num {
if i % 2 == 0 {
fn init_sparse_with_arc<M>(xarray: &XArray<Arc<u32>, M>, item_num: u32) {
for i in 0u32..2 * item_num {
if i.is_multiple_of(2) {
let value = Arc::new(i);
xarray.lock().store(i as u64, value);
}
@ -34,7 +34,7 @@ fn init_sparse_with_arc<M>(xarray: &XArray<Arc<i32>, M>, item_num: i32) {
#[ktest]
fn store_continuous() {
let xarray_arc: XArray<Arc<i32>> = XArray::new();
let xarray_arc: XArray<Arc<u32>> = XArray::new();
init_continuous_with_arc(&xarray_arc, n!(100));
let guard = disable_preempt();
for i in 0..n!(100) {
@ -45,12 +45,12 @@ fn store_continuous() {
#[ktest]
fn store_sparse() {
let xarray_arc: XArray<Arc<i32>> = XArray::new();
let xarray_arc: XArray<Arc<u32>> = XArray::new();
init_sparse_with_arc(&xarray_arc, n!(100));
let guard = disable_preempt();
for i in 0..n!(100) {
if i % 2 == 0 {
for i in 0u32..n!(100) {
if i.is_multiple_of(2) {
let value = xarray_arc.load(&guard, i as u64).unwrap();
assert_eq!(*value.as_ref(), i);
}
@ -59,7 +59,7 @@ fn store_sparse() {
#[ktest]
fn store_overwrite() {
let xarray_arc: XArray<Arc<i32>> = XArray::new();
let xarray_arc: XArray<Arc<u32>> = XArray::new();
init_continuous_with_arc(&xarray_arc, n!(100));
let mut locked_xarray = xarray_arc.lock();
@ -77,7 +77,7 @@ fn store_overwrite() {
#[ktest]
fn remove() {
let xarray_arc: XArray<Arc<i32>> = XArray::new();
let xarray_arc: XArray<Arc<u32>> = XArray::new();
assert!(xarray_arc.lock().remove(n!(1)).is_none());
init_continuous_with_arc(&xarray_arc, n!(100));
@ -92,7 +92,7 @@ fn remove() {
#[ktest]
fn cursor_load() {
let xarray_arc: XArray<Arc<i32>> = XArray::new();
let xarray_arc: XArray<Arc<u32>> = XArray::new();
init_continuous_with_arc(&xarray_arc, n!(100));
let guard = disable_preempt();
@ -110,7 +110,7 @@ fn cursor_load() {
#[ktest]
fn cursor_load_very_sparse() {
let xarray_arc: XArray<Arc<i32>> = XArray::new();
let xarray_arc: XArray<Arc<u32>> = XArray::new();
let mut locked_xarray = xarray_arc.lock();
locked_xarray.store(0, Arc::new(1));
locked_xarray.store(n!(100), Arc::new(2));
@ -125,7 +125,7 @@ fn cursor_load_very_sparse() {
#[ktest]
fn cursor_store_continuous() {
let xarray_arc: XArray<Arc<i32>> = XArray::new();
let xarray_arc: XArray<Arc<u32>> = XArray::new();
let mut locked_xarray = xarray_arc.lock();
let mut cursor = locked_xarray.cursor_mut(0);
@ -143,20 +143,20 @@ fn cursor_store_continuous() {
#[ktest]
fn cursor_store_sparse() {
let xarray_arc: XArray<Arc<i32>> = XArray::new();
let xarray_arc: XArray<Arc<u32>> = XArray::new();
let mut locked_xarray = xarray_arc.lock();
let mut cursor = locked_xarray.cursor_mut(0);
for i in 0..n!(100) {
if i % 2 == 0 {
for i in 0u32..n!(100) {
if i.is_multiple_of(2) {
let value = Arc::new(i);
cursor.store(value);
}
cursor.next();
}
for i in 0..n!(100) {
if i % 2 == 0 {
for i in 0u32..n!(100) {
if i.is_multiple_of(2) {
let value = locked_xarray.load(i as u64).unwrap();
assert_eq!(*value.as_ref(), i);
}
@ -165,7 +165,7 @@ fn cursor_store_sparse() {
#[ktest]
fn set_mark() {
let xarray_arc: XArray<Arc<i32>, XMark> = XArray::new();
let xarray_arc: XArray<Arc<u32>, XMark> = XArray::new();
init_continuous_with_arc(&xarray_arc, n!(100));
let mut locked_xarray = xarray_arc.lock();
@ -195,7 +195,7 @@ fn set_mark() {
#[ktest]
fn unset_mark() {
let xarray_arc: XArray<Arc<i32>, XMark> = XArray::new();
let xarray_arc: XArray<Arc<u32>, XMark> = XArray::new();
init_continuous_with_arc(&xarray_arc, n!(100));
let mut locked_xarray = xarray_arc.lock();
@ -214,7 +214,7 @@ fn unset_mark() {
#[ktest]
fn mark_overflow() {
let xarray_arc: XArray<Arc<i32>, XMark> = XArray::new();
let xarray_arc: XArray<Arc<u32>, XMark> = XArray::new();
init_continuous_with_arc(&xarray_arc, n!(100));
let mut locked_xarray = xarray_arc.lock();
let mut cursor = locked_xarray.cursor_mut(n!(200));
@ -225,19 +225,19 @@ fn mark_overflow() {
#[ktest]
fn box_operate() {
let xarray_box: XArray<Box<i32>> = XArray::new();
let xarray_box: XArray<Box<u32>> = XArray::new();
let mut locked_xarray = xarray_box.lock();
let mut cursor_mut = locked_xarray.cursor_mut(0);
for i in 0..n!(100) {
if i % 2 == 0 {
for i in 0u32..n!(100) {
if i.is_multiple_of(2) {
cursor_mut.store(Box::new(i * 2));
}
cursor_mut.next();
}
cursor_mut.reset_to(0);
for i in 0..n!(100) {
if i % 2 == 0 {
for i in 0u32..n!(100) {
if i.is_multiple_of(2) {
assert_eq!(*cursor_mut.load().unwrap().as_ref(), i * 2);
} else {
assert!(cursor_mut.load().is_none());
@ -246,8 +246,8 @@ fn box_operate() {
}
let mut cursor = locked_xarray.cursor(0);
for i in 0..n!(100) {
if i % 2 == 0 {
for i in 0u32..n!(100) {
if i.is_multiple_of(2) {
assert_eq!(*cursor.load().unwrap().as_ref(), i * 2);
} else {
assert!(cursor.load().is_none());
@ -258,7 +258,7 @@ fn box_operate() {
#[ktest]
fn range() {
let xarray_arc: XArray<Arc<i32>> = XArray::new();
let xarray_arc: XArray<Arc<u32>> = XArray::new();
for i in 0..n!(100) {
let value = Arc::new(i * 2);
xarray_arc.lock().store((i * 2) as u64, value);
@ -275,7 +275,7 @@ fn range() {
#[ktest]
fn load_after_clear() {
let xarray_arc: XArray<Arc<i32>> = XArray::new();
let xarray_arc: XArray<Arc<u32>> = XArray::new();
init_continuous_with_arc(&xarray_arc, n!(100));
let guard = disable_preempt();
@ -318,8 +318,8 @@ fn no_leakage() {
finish_grace_period();
TEST_LEAKAGE.store(true, Ordering::Relaxed);
let xarray_arc: XArray<Arc<i32>> = XArray::new();
init_sparse_with_arc(&xarray_arc, (SLOT_SIZE * SLOT_SIZE / 2 + 1) as i32);
let xarray_arc: XArray<Arc<u32>> = XArray::new();
init_sparse_with_arc(&xarray_arc, (SLOT_SIZE * SLOT_SIZE / 2 + 1) as u32);
drop(xarray_arc);
// Drop the nodes created in the test.

View File

@ -449,11 +449,11 @@ pub(super) struct ExfatDentryIterator<'a> {
impl<'a> ExfatDentryIterator<'a> {
pub fn new(page_cache: &'a Vmo, offset: usize, size: Option<usize>) -> Result<Self> {
if size.is_some() && size.unwrap() % DENTRY_SIZE != 0 {
if size.is_some() && !size.unwrap().is_multiple_of(DENTRY_SIZE) {
return_errno_with_message!(Errno::EINVAL, "remaining size unaligned to dentry size")
}
if offset % DENTRY_SIZE != 0 {
if !offset.is_multiple_of(DENTRY_SIZE) {
return_errno_with_message!(Errno::EINVAL, "dentry offset unaligned to dentry size")
}

View File

@ -1263,7 +1263,7 @@ impl DirentVisitor for EmptyVisitor {
}
}
fn is_block_aligned(off: usize) -> bool {
off % PAGE_SIZE == 0
off.is_multiple_of(PAGE_SIZE)
}
fn check_corner_cases_for_rename(

View File

@ -2407,5 +2407,5 @@ pub(super) struct Osd2 {
}
fn is_block_aligned(offset: usize) -> bool {
offset % BLOCK_SIZE == 0
offset.is_multiple_of(BLOCK_SIZE)
}

View File

@ -77,7 +77,7 @@ impl PageCache {
// first zero the gap between the new size and the
// next page boundary (or the old size), if such a gap exists.
let old_size = self.pages.size();
if old_size > new_size && new_size % PAGE_SIZE != 0 {
if old_size > new_size && !new_size.is_multiple_of(PAGE_SIZE) {
let gap_size = old_size.min(new_size.align_up(PAGE_SIZE)) - new_size;
if gap_size > 0 {
self.fill_zeros(new_size..new_size + gap_size)?;

View File

@ -81,7 +81,7 @@ const MAX_NR_FILES: usize = 253;
impl FileMessage {
fn read_from(header: &CControlHeader, reader: &mut VmReader) -> Result<Self> {
let payload_len = header.payload_len();
if payload_len % size_of::<i32>() != 0 {
if !payload_len.is_multiple_of(size_of::<i32>()) {
return_errno_with_message!(Errno::EINVAL, "the SCM_RIGHTS message is invalid");
}
let nfiles = payload_len / size_of::<i32>();

View File

@ -540,7 +540,7 @@ impl FutexKey {
// "On all platforms, futexes are four-byte integers that must be aligned on a four-byte
// boundary."
// Reference: <https://man7.org/linux/man-pages/man2/futex.2.html>.
if addr % align_of::<u32>() != 0 {
if !addr.is_multiple_of(align_of::<u32>()) {
return_errno_with_message!(
Errno::EINVAL,
"the futex word is not aligend on a four-byte boundary"

View File

@ -178,7 +178,7 @@ impl InitStack {
let vmar_map_options = {
let perms = VmPerms::READ | VmPerms::WRITE;
let map_addr = self.initial_top - self.max_size;
debug_assert!(map_addr % PAGE_SIZE == 0);
debug_assert!(map_addr.is_multiple_of(PAGE_SIZE));
vmar.new_map(self.max_size, perms)?
.offset(map_addr)
.vmo(vmo.clone())
@ -310,7 +310,7 @@ impl InitStackWriter<'_> {
let argv_pointers_size = (argv_pointers.len() + 1) * size_of::<u64>();
let argc_size = size_of::<u64>();
let to_write_size = auxvec_size + envp_pointers_size + argv_pointers_size + argc_size;
if (self.pos() - to_write_size) % 16 != 0 {
if !(self.pos() - to_write_size).is_multiple_of(16) {
self.write_u64(0)?;
}
Ok(())

View File

@ -18,7 +18,7 @@ pub fn sys_madvise(
start, len, behavior
);
if start % PAGE_SIZE != 0 {
if !start.is_multiple_of(PAGE_SIZE) {
return_errno_with_message!(Errno::EINVAL, "the start address should be page aligned");
}
if len > isize::MAX as usize {

View File

@ -63,7 +63,7 @@ fn do_sys_mmap(
let len = len.align_up(PAGE_SIZE);
if offset % PAGE_SIZE != 0 {
if !offset.is_multiple_of(PAGE_SIZE) {
return_errno_with_message!(Errno::EINVAL, "mmap only support page-aligned offset");
}
offset.checked_add(len).ok_or(Error::with_message(

View File

@ -17,7 +17,7 @@ pub fn sys_mprotect(addr: Vaddr, len: usize, perms: u64, ctx: &Context) -> Resul
// According to linux behavior,
// <https://elixir.bootlin.com/linux/v6.0.9/source/mm/mprotect.c#L681>,
// the addr is checked even if len is 0.
if addr % PAGE_SIZE != 0 {
if !addr.is_multiple_of(PAGE_SIZE) {
return_errno_with_message!(Errno::EINVAL, "the start address should be page aligned");
}
if len == 0 {

View File

@ -31,7 +31,7 @@ fn do_sys_mremap(
old_addr, old_size, new_size, flags, new_addr,
);
if old_addr % PAGE_SIZE != 0 {
if !old_addr.is_multiple_of(PAGE_SIZE) {
return_errno_with_message!(Errno::EINVAL, "mremap: `old_addr` must be page-aligned");
}
if new_size == 0 {

View File

@ -33,7 +33,9 @@ pub fn sys_msync(start: Vaddr, size: usize, flag: i32, ctx: &Context) -> Result<
debug!("msync: start = {start:#x}, size = {size}, flags = {flags:?}");
if start % PAGE_SIZE != 0 || flags.contains(MsyncFlags::MS_ASYNC | MsyncFlags::MS_SYNC) {
if !start.is_multiple_of(PAGE_SIZE)
|| flags.contains(MsyncFlags::MS_ASYNC | MsyncFlags::MS_SYNC)
{
return_errno!(Errno::EINVAL);
}

View File

@ -8,7 +8,7 @@ use crate::prelude::*;
pub fn sys_munmap(addr: Vaddr, len: usize, ctx: &Context) -> Result<SyscallReturn> {
debug!("addr = 0x{:x}, len = {}", addr, len);
if addr % PAGE_SIZE != 0 {
if !addr.is_multiple_of(PAGE_SIZE) {
return_errno_with_message!(Errno::EINVAL, "munmap addr must be page-aligned");
}
if len == 0 {

View File

@ -420,11 +420,19 @@ pub struct VdsoVmoLayout {
pub size: usize,
}
const_assert!(VDSO_VMO_LAYOUT.data_segment_offset % PAGE_SIZE == 0);
const_assert!(VDSO_VMO_LAYOUT.data_segment_size % PAGE_SIZE == 0);
const_assert!(VDSO_VMO_LAYOUT.text_segment_offset % PAGE_SIZE == 0);
const_assert!(VDSO_VMO_LAYOUT.text_segment_size % PAGE_SIZE == 0);
const_assert!(VDSO_VMO_LAYOUT.size % PAGE_SIZE == 0);
const_assert!(
VDSO_VMO_LAYOUT
.data_segment_offset
.is_multiple_of(PAGE_SIZE)
);
const_assert!(VDSO_VMO_LAYOUT.data_segment_size.is_multiple_of(PAGE_SIZE));
const_assert!(
VDSO_VMO_LAYOUT
.text_segment_offset
.is_multiple_of(PAGE_SIZE)
);
const_assert!(VDSO_VMO_LAYOUT.text_segment_size.is_multiple_of(PAGE_SIZE));
const_assert!(VDSO_VMO_LAYOUT.size.is_multiple_of(PAGE_SIZE));
// Ensure that the vDSO data at `VDSO_VMO_LAYOUT.data_offset` is in the data segment.
//

View File

@ -98,8 +98,8 @@ impl Vmar {
/// The range's start and end addresses must be page-aligned.
/// Also, the range must be completely mapped.
pub fn protect(&self, perms: VmPerms, range: Range<usize>) -> Result<()> {
assert!(range.start % PAGE_SIZE == 0);
assert!(range.end % PAGE_SIZE == 0);
assert!(range.start.is_multiple_of(PAGE_SIZE));
assert!(range.end.is_multiple_of(PAGE_SIZE));
let mut inner = self.inner.write();
let vm_space = self.vm_space();
@ -374,7 +374,7 @@ impl Vmar {
// Allocate a new free region that does not overlap with the old range.
let new_range = if let Some(new_addr) = new_addr {
let new_range = new_addr..new_addr.checked_add(new_size).ok_or(Errno::EINVAL)?;
if new_addr % PAGE_SIZE != 0
if !new_addr.is_multiple_of(PAGE_SIZE)
|| !is_userspace_vaddr(new_addr)
|| !is_userspace_vaddr(new_range.end - 1)
{
@ -617,7 +617,7 @@ impl Vmar {
}
fn query_page(&self, vaddr: Vaddr) -> Result<Option<VmQueriedItem>> {
debug_assert!(is_userspace_vaddr(vaddr) && vaddr % PAGE_SIZE == 0);
debug_assert!(is_userspace_vaddr(vaddr) && vaddr.is_multiple_of(PAGE_SIZE));
let preempt_guard = disable_preempt();
let vmspace = self.vm_space();
@ -1273,22 +1273,22 @@ impl<'a> VmarMapOptions<'a> {
/// Checks whether all options are valid.
fn check_options(&self) -> Result<()> {
// Check align.
debug_assert!(self.align % PAGE_SIZE == 0);
debug_assert!(self.align.is_multiple_of(PAGE_SIZE));
debug_assert!(self.align.is_power_of_two());
if self.align % PAGE_SIZE != 0 || !self.align.is_power_of_two() {
if !self.align.is_multiple_of(PAGE_SIZE) || !self.align.is_power_of_two() {
return_errno_with_message!(Errno::EINVAL, "invalid align");
}
debug_assert!(self.size % self.align == 0);
if self.size % self.align != 0 {
debug_assert!(self.size.is_multiple_of(self.align));
if !self.size.is_multiple_of(self.align) {
return_errno_with_message!(Errno::EINVAL, "invalid mapping size");
}
debug_assert!(self.vmo_offset % self.align == 0);
if self.vmo_offset % self.align != 0 {
debug_assert!(self.vmo_offset.is_multiple_of(self.align));
if !self.vmo_offset.is_multiple_of(self.align) {
return_errno_with_message!(Errno::EINVAL, "invalid vmo offset");
}
if let Some(offset) = self.offset {
debug_assert!(offset % self.align == 0);
if offset % self.align != 0 {
debug_assert!(offset.is_multiple_of(self.align));
if !offset.is_multiple_of(self.align) {
return_errno_with_message!(Errno::EINVAL, "invalid offset");
}
}

View File

@ -503,7 +503,7 @@ impl VmMapping {
/// must not be either the start or the end of the mapping.
pub fn split(self, at: Vaddr) -> Result<(Self, Self)> {
debug_assert!(self.map_to_addr < at && at < self.map_end());
debug_assert!(at % PAGE_SIZE == 0);
debug_assert!(at.is_multiple_of(PAGE_SIZE));
let (l_mapped_mem, r_mapped_mem) = match self.mapped_mem {
MappedMemory::Vmo(vmo) => {
@ -732,7 +732,7 @@ impl MappedVmo {
&self,
page_offset: usize,
) -> core::result::Result<UFrame, VmoCommitError> {
debug_assert!(page_offset % PAGE_SIZE == 0);
debug_assert!(page_offset.is_multiple_of(PAGE_SIZE));
self.vmo.try_commit_page(self.offset + page_offset)
}

View File

@ -46,8 +46,8 @@ pub(crate) fn split_to_chunks(
addr: Paddr,
size: usize,
) -> impl Iterator<Item = (Paddr, BuddyOrder)> {
assert!(addr % PAGE_SIZE == 0);
assert!(size % PAGE_SIZE == 0);
assert!(addr.is_multiple_of(PAGE_SIZE));
assert!(size.is_multiple_of(PAGE_SIZE));
struct SplitChunks {
addr: Paddr,

View File

@ -113,7 +113,9 @@ fn load_cmdline() -> Option<&'static CStr> {
return None;
};
if load_options.len() % 2 != 0 || load_options.iter().skip(1).step_by(2).any(|c| *c != 0) {
if !load_options.len().is_multiple_of(2)
|| load_options.iter().skip(1).step_by(2).any(|c| *c != 0)
{
uefi::println!("[EFI stub] Warning: The cmdline contains non-ASCII characters!");
return None;
}

View File

@ -124,7 +124,7 @@ pub(crate) struct PageTableEntry(usize);
/// Changing the root-level page table is unsafe, because it's possible to violate memory safety by
/// changing the page mapping.
pub(crate) unsafe fn activate_page_table(root_paddr: Paddr, _root_pt_cache: CachePolicy) {
assert!(root_paddr % PagingConsts::BASE_PAGE_SIZE == 0);
assert!(root_paddr.is_multiple_of(PagingConsts::BASE_PAGE_SIZE));
loongArch64::register::pgdl::set_base(root_paddr);
loongArch64::register::pgdh::set_base(root_paddr);
}

View File

@ -163,7 +163,7 @@ pub(crate) struct PageTableEntry(usize);
/// Changing the root-level page table is unsafe, because it's possible to violate memory safety by
/// changing the page mapping.
pub(crate) unsafe fn activate_page_table(root_paddr: Paddr, _root_pt_cache: CachePolicy) {
assert!(root_paddr % PagingConsts::BASE_PAGE_SIZE == 0);
assert!(root_paddr.is_multiple_of(PagingConsts::BASE_PAGE_SIZE));
let ppn = root_paddr >> 12;
#[cfg(not(feature = "riscv_sv39_mode"))]

View File

@ -136,7 +136,7 @@ impl<const ITEM_SIZE: usize> DynCpuLocalChunk<ITEM_SIZE> {
.alloc_segment_with(total_chunk_size.div_ceil(PAGE_SIZE), |_| DynCpuLocalMeta)?;
let num_items = CHUNK_SIZE / ITEM_SIZE;
const { assert!(CHUNK_SIZE % ITEM_SIZE == 0) };
const { assert!(CHUNK_SIZE.is_multiple_of(ITEM_SIZE)) };
Ok(Self {
segment: ManuallyDrop::new(segment),

View File

@ -86,7 +86,7 @@ pub(in crate::io) unsafe fn init() {
}
let start = __sensitive_io_ports_start as *const () as usize;
let end = __sensitive_io_ports_end as *const () as usize;
assert!((end - start) % size_of::<RawIoPortRange>() == 0);
assert!((end - start).is_multiple_of(size_of::<RawIoPortRange>()));
// Iterate through the sensitive I/O port ranges and remove them from the allocator.
let io_port_range_count = (end - start) / size_of::<RawIoPortRange>();

View File

@ -205,8 +205,8 @@ pub(crate) unsafe fn init() {
for region in regions.iter() {
if region.typ() == MemoryRegionType::Usable {
debug_assert!(region.base() % PAGE_SIZE == 0);
debug_assert!(region.len() % PAGE_SIZE == 0);
debug_assert!(region.base().is_multiple_of(PAGE_SIZE));
debug_assert!(region.len().is_multiple_of(PAGE_SIZE));
// Add global free pages to the frame allocator.
// Truncate the early allocated frames if there is an overlap.

View File

@ -127,7 +127,7 @@ pub(super) const REF_COUNT_MAX: u64 = i64::MAX as u64;
type FrameMetaVtablePtr = core::ptr::DynMetadata<dyn AnyFrameMeta>;
const_assert!(PAGE_SIZE % META_SLOT_SIZE == 0);
const_assert!(PAGE_SIZE.is_multiple_of(META_SLOT_SIZE));
const_assert!(size_of::<MetaSlot>() == META_SLOT_SIZE);
/// All frame metadata types must implement this trait.
@ -207,7 +207,7 @@ pub enum GetFrameError {
/// Gets the reference to a metadata slot.
pub(super) fn get_slot(paddr: Paddr) -> Result<&'static MetaSlot, GetFrameError> {
if paddr % PAGE_SIZE != 0 {
if !paddr.is_multiple_of(PAGE_SIZE) {
return Err(GetFrameError::NotAligned);
}
if paddr >= super::max_paddr() {
@ -576,8 +576,8 @@ impl_frame_meta_for!(KernelMeta);
macro_rules! mark_ranges {
($region: expr, $typ: expr) => {{
debug_assert!($region.base() % PAGE_SIZE == 0);
debug_assert!($region.len() % PAGE_SIZE == 0);
debug_assert!($region.base().is_multiple_of(PAGE_SIZE));
debug_assert!($region.len().is_multiple_of(PAGE_SIZE));
let seg = Segment::from_unused($region.base()..$region.end(), |_| $typ).unwrap();
let _ = ManuallyDrop::new(seg);

View File

@ -324,7 +324,7 @@ impl TryFrom<Frame<dyn AnyFrameMeta>> for UFrame {
/// 1. The physical address must represent a valid frame;
/// 2. The caller must have already held a reference to the frame.
pub(in crate::mm) unsafe fn inc_frame_ref_count(paddr: Paddr) {
debug_assert!(paddr % PAGE_SIZE == 0);
debug_assert!(paddr.is_multiple_of(PAGE_SIZE));
debug_assert!(paddr < max_paddr());
let vaddr: Vaddr = mapping::frame_to_meta::<PagingConsts>(paddr);

View File

@ -86,7 +86,7 @@ impl<M: AnyFrameMeta> Segment<M> {
where
F: FnMut(Paddr) -> M,
{
if range.start % PAGE_SIZE != 0 || range.end % PAGE_SIZE != 0 {
if !range.start.is_multiple_of(PAGE_SIZE) || !range.end.is_multiple_of(PAGE_SIZE) {
return Err(GetFrameError::NotAligned);
}
if range.end > super::max_paddr() {
@ -135,7 +135,7 @@ impl<M: AnyFrameMeta + ?Sized> Segment<M> {
/// The function panics if the offset is out of bounds, at either ends, or
/// not base-page-aligned.
pub fn split(self, offset: usize) -> (Self, Self) {
assert!(offset % PAGE_SIZE == 0);
assert!(offset.is_multiple_of(PAGE_SIZE));
assert!(0 < offset && offset < self.size());
let old = ManuallyDrop::new(self);
@ -163,7 +163,7 @@ impl<M: AnyFrameMeta + ?Sized> Segment<M> {
/// The function panics if the byte offset range is out of bounds, or if
/// any of the ends of the byte offset range is not base-page aligned.
pub fn slice(&self, range: &Range<usize>) -> Self {
assert!(range.start % PAGE_SIZE == 0 && range.end % PAGE_SIZE == 0);
assert!(range.start.is_multiple_of(PAGE_SIZE) && range.end.is_multiple_of(PAGE_SIZE));
let start = self.range.start + range.start;
let end = self.range.start + range.end;
assert!(start <= end && end <= self.range.end);

View File

@ -110,7 +110,7 @@ unsafe impl GlobalAlloc for AllocDispatch {
if required_slot.size() != slot.size()
|| slot.size() < layout.size()
|| slot.as_ptr() as Vaddr % layout.align() != 0
|| !(slot.as_ptr() as Vaddr).is_multiple_of(layout.align())
{
abort_with_message!(
"Heap allocation mismatch: slot ptr = {:p}, size = {:x}; layout = {:#x?}; required_slot = {:#x?}",

View File

@ -85,8 +85,8 @@ impl KVirtArea {
frames: impl Iterator<Item = Frame<T>>,
prop: PageProperty,
) -> Self {
assert!(area_size % PAGE_SIZE == 0);
assert!(map_offset % PAGE_SIZE == 0);
assert!(area_size.is_multiple_of(PAGE_SIZE));
assert!(map_offset.is_multiple_of(PAGE_SIZE));
let range = KVIRT_AREA_ALLOCATOR.alloc(area_size).unwrap();
let cursor_range = range.start + map_offset..range.end;
@ -131,10 +131,10 @@ impl KVirtArea {
pa_range: Range<Paddr>,
prop: PageProperty,
) -> Self {
assert!(pa_range.start % PAGE_SIZE == 0);
assert!(pa_range.end % PAGE_SIZE == 0);
assert!(area_size % PAGE_SIZE == 0);
assert!(map_offset % PAGE_SIZE == 0);
assert!(pa_range.start.is_multiple_of(PAGE_SIZE));
assert!(pa_range.end.is_multiple_of(PAGE_SIZE));
assert!(area_size.is_multiple_of(PAGE_SIZE));
assert!(map_offset.is_multiple_of(PAGE_SIZE));
assert!(map_offset + pa_range.len() <= area_size);
let range = KVIRT_AREA_ALLOCATOR.alloc(area_size).unwrap();

View File

@ -125,7 +125,8 @@ impl<'rcu, C: PageTableConfig> Cursor<'rcu, C> {
if !is_valid_range::<C>(va) || va.is_empty() {
return Err(PageTableError::InvalidVaddrRange(va.start, va.end));
}
if va.start % C::BASE_PAGE_SIZE != 0 || va.end % C::BASE_PAGE_SIZE != 0 {
if !va.start.is_multiple_of(C::BASE_PAGE_SIZE) || !va.end.is_multiple_of(C::BASE_PAGE_SIZE)
{
return Err(PageTableError::UnalignedVaddr);
}
@ -280,7 +281,7 @@ impl<'rcu, C: PageTableConfig> Cursor<'rcu, C> {
///
/// This method panics if the address has bad alignment.
pub fn jump(&mut self, va: Vaddr) -> Result<(), PageTableError> {
assert!(va % C::BASE_PAGE_SIZE == 0);
assert!(va.is_multiple_of(C::BASE_PAGE_SIZE));
if !self.barrier_va.contains(&va) {
return Err(PageTableError::InvalidVaddr(va));
}

View File

@ -182,8 +182,8 @@ pub(crate) fn largest_pages<C: PageTableConfig>(
let mut level = C::HIGHEST_TRANSLATION_LEVEL;
while page_size::<C>(level) > len
|| va % page_size::<C>(level) != 0
|| pa % page_size::<C>(level) != 0
|| !va.is_multiple_of(page_size::<C>(level))
|| !pa.is_multiple_of(page_size::<C>(level))
{
level -= 1;
}

View File

@ -220,10 +220,13 @@ impl TlbFlushOp {
/// Panics if the range is not page-aligned or if the range is empty.
pub const fn for_range(range: Range<Vaddr>) -> Self {
assert!(
range.start % PAGE_SIZE == 0,
range.start.is_multiple_of(PAGE_SIZE),
"Range start must be page-aligned"
);
assert!(range.end % PAGE_SIZE == 0, "Range end must be page-aligned");
assert!(
range.end.is_multiple_of(PAGE_SIZE),
"Range end must be page-aligned"
);
assert!(range.start < range.end, "Range must not be empty");
let num_pages = (range.end - range.start) / PAGE_SIZE;
if num_pages >= FLUSH_ALL_PAGES_THRESHOLD {

View File

@ -100,7 +100,7 @@ pub fn print_stack_trace() {
let reg_name = "unknown";
}
}
if i % 4 == 0 {
if i.is_multiple_of(4) {
early_print!("\n ");
}
early_print!(" {} {:#18x};", reg_name, reg_i);

View File

@ -180,7 +180,7 @@ impl<I: Id> IdSet<I> {
fn clear_invalid_id_bits(bits: &mut SmallVec<[InnerPart; NR_PARTS_NO_ALLOC]>) {
let num_ids = I::cardinality() as usize;
if num_ids % BITS_PER_PART != 0 {
if !num_ids.is_multiple_of(BITS_PER_PART) {
let num_parts = parts_for_ids::<I>();
bits[num_parts - 1] &= (1 << (num_ids % BITS_PER_PART)) - 1;
}
@ -229,7 +229,7 @@ impl<I: Id> IdSet<I> {
pub fn is_full(&self) -> bool {
let num_ids = I::cardinality() as usize;
self.bits.iter().enumerate().all(|(idx, part)| {
if idx == self.bits.len() - 1 && num_ids % BITS_PER_PART != 0 {
if idx == self.bits.len() - 1 && !num_ids.is_multiple_of(BITS_PER_PART) {
*part == (1 << (num_ids % BITS_PER_PART)) - 1
} else {
*part == !0