16 hotfixes. 6 are cc:stable and the remainder address post-6.15 issues
or aren't considered necessary for -stable kernels. 5 are for MM. -----BEGIN PGP SIGNATURE----- iHUEABYKAB0WIQTTMBEPP41GrTpTJgfdBJ7gKXxAjgUCaF8vtQAKCRDdBJ7gKXxA jlK9AP9Syx5isoE7MAMKjr9iI/2z+NRaCCro/VM4oQk8m2cNFgD/ZsL9YMhjZlcL bMIVUZ9E+yf1w9dLeHLoDba+pnF7Wwc= =vdkO -----END PGP SIGNATURE----- Merge tag 'mm-hotfixes-stable-2025-06-27-16-56' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm Pull misc fixes from Andrew Morton: "16 hotfixes. 6 are cc:stable and the remainder address post-6.15 issues or aren't considered necessary for -stable kernels. 5 are for MM" * tag 'mm-hotfixes-stable-2025-06-27-16-56' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm: MAINTAINERS: add Lorenzo as THP co-maintainer mailmap: update Duje Mihanović's email address selftests/mm: fix validate_addr() helper crashdump: add CONFIG_KEYS dependency mailmap: correct name for a historical account of Zijun Hu mailmap: add entries for Zijun Hu fuse: fix runtime warning on truncate_folio_batch_exceptionals() scripts/gdb: fix dentry_name() lookup mm/damon/sysfs-schemes: free old damon_sysfs_scheme_filter->memcg_path on write mm/alloc_tag: fix the kmemleak false positive issue in the allocation of the percpu variable tag->counters lib/group_cpus: fix NULL pointer dereference from group_cpus_evenly() mm/hugetlb: remove unnecessary holding of hugetlb_lock MAINTAINERS: add missing files to mm page alloc section MAINTAINERS: add tree entry to mm init block mm: add OOM killer maintainer structure fs/proc/task_mmu: fix PAGE_IS_PFNZERO detection for the huge zero folio
This commit is contained in:
commit
0fd39af24e
4
.mailmap
4
.mailmap
|
@ -224,6 +224,7 @@ Dmitry Safonov <0x7f454c46@gmail.com> <dsafonov@virtuozzo.com>
|
|||
Domen Puncer <domen@coderock.org>
|
||||
Douglas Gilbert <dougg@torque.net>
|
||||
Drew Fustini <fustini@kernel.org> <drew@pdp7.com>
|
||||
<duje@dujemihanovic.xyz> <duje.mihanovic@skole.hr>
|
||||
Ed L. Cashin <ecashin@coraid.com>
|
||||
Elliot Berman <quic_eberman@quicinc.com> <eberman@codeaurora.org>
|
||||
Enric Balletbo i Serra <eballetbo@kernel.org> <enric.balletbo@collabora.com>
|
||||
|
@ -831,3 +832,6 @@ Yosry Ahmed <yosry.ahmed@linux.dev> <yosryahmed@google.com>
|
|||
Yusuke Goda <goda.yusuke@renesas.com>
|
||||
Zack Rusin <zack.rusin@broadcom.com> <zackr@vmware.com>
|
||||
Zhu Yanjun <zyjzyj2000@gmail.com> <yanjunz@nvidia.com>
|
||||
Zijun Hu <zijun.hu@oss.qualcomm.com> <quic_zijuhu@quicinc.com>
|
||||
Zijun Hu <zijun.hu@oss.qualcomm.com> <zijuhu@codeaurora.org>
|
||||
Zijun Hu <zijun_hu@htc.com>
|
||||
|
|
24
MAINTAINERS
24
MAINTAINERS
|
@ -15676,6 +15676,8 @@ MEMBLOCK AND MEMORY MANAGEMENT INITIALIZATION
|
|||
M: Mike Rapoport <rppt@kernel.org>
|
||||
L: linux-mm@kvack.org
|
||||
S: Maintained
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/rppt/memblock.git for-next
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/rppt/memblock.git fixes
|
||||
F: Documentation/core-api/boot-time-mm.rst
|
||||
F: Documentation/core-api/kho/bindings/memblock/*
|
||||
F: include/linux/memblock.h
|
||||
|
@ -15848,6 +15850,17 @@ F: mm/numa.c
|
|||
F: mm/numa_emulation.c
|
||||
F: mm/numa_memblks.c
|
||||
|
||||
MEMORY MANAGEMENT - OOM KILLER
|
||||
M: Michal Hocko <mhocko@suse.com>
|
||||
R: David Rientjes <rientjes@google.com>
|
||||
R: Shakeel Butt <shakeel.butt@linux.dev>
|
||||
L: linux-mm@kvack.org
|
||||
S: Maintained
|
||||
F: include/linux/oom.h
|
||||
F: include/trace/events/oom.h
|
||||
F: include/uapi/linux/oom.h
|
||||
F: mm/oom_kill.c
|
||||
|
||||
MEMORY MANAGEMENT - PAGE ALLOCATOR
|
||||
M: Andrew Morton <akpm@linux-foundation.org>
|
||||
M: Vlastimil Babka <vbabka@suse.cz>
|
||||
|
@ -15862,8 +15875,17 @@ F: include/linux/compaction.h
|
|||
F: include/linux/gfp.h
|
||||
F: include/linux/page-isolation.h
|
||||
F: mm/compaction.c
|
||||
F: mm/debug_page_alloc.c
|
||||
F: mm/fail_page_alloc.c
|
||||
F: mm/page_alloc.c
|
||||
F: mm/page_ext.c
|
||||
F: mm/page_frag_cache.c
|
||||
F: mm/page_isolation.c
|
||||
F: mm/page_owner.c
|
||||
F: mm/page_poison.c
|
||||
F: mm/page_reporting.c
|
||||
F: mm/show_mem.c
|
||||
F: mm/shuffle.c
|
||||
|
||||
MEMORY MANAGEMENT - RECLAIM
|
||||
M: Andrew Morton <akpm@linux-foundation.org>
|
||||
|
@ -15923,9 +15945,9 @@ F: mm/swapfile.c
|
|||
MEMORY MANAGEMENT - THP (TRANSPARENT HUGE PAGE)
|
||||
M: Andrew Morton <akpm@linux-foundation.org>
|
||||
M: David Hildenbrand <david@redhat.com>
|
||||
M: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
|
||||
R: Zi Yan <ziy@nvidia.com>
|
||||
R: Baolin Wang <baolin.wang@linux.alibaba.com>
|
||||
R: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
|
||||
R: Liam R. Howlett <Liam.Howlett@oracle.com>
|
||||
R: Nico Pache <npache@redhat.com>
|
||||
R: Ryan Roberts <ryan.roberts@arm.com>
|
||||
|
|
|
@ -9,6 +9,7 @@
|
|||
#include "fuse_i.h"
|
||||
#include "dev_uring_i.h"
|
||||
|
||||
#include <linux/dax.h>
|
||||
#include <linux/pagemap.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/file.h>
|
||||
|
@ -162,6 +163,9 @@ static void fuse_evict_inode(struct inode *inode)
|
|||
/* Will write inode on close/munmap and in all other dirtiers */
|
||||
WARN_ON(inode->i_state & I_DIRTY_INODE);
|
||||
|
||||
if (FUSE_IS_DAX(inode))
|
||||
dax_break_layout_final(inode);
|
||||
|
||||
truncate_inode_pages_final(&inode->i_data);
|
||||
clear_inode(inode);
|
||||
if (inode->i_sb->s_flags & SB_ACTIVE) {
|
||||
|
|
|
@ -2182,7 +2182,7 @@ static unsigned long pagemap_thp_category(struct pagemap_scan_private *p,
|
|||
categories |= PAGE_IS_FILE;
|
||||
}
|
||||
|
||||
if (is_zero_pfn(pmd_pfn(pmd)))
|
||||
if (is_huge_zero_pmd(pmd))
|
||||
categories |= PAGE_IS_PFNZERO;
|
||||
if (pmd_soft_dirty(pmd))
|
||||
categories |= PAGE_IS_SOFT_DIRTY;
|
||||
|
|
|
@ -28,6 +28,7 @@ extern void kmemleak_update_trace(const void *ptr) __ref;
|
|||
extern void kmemleak_not_leak(const void *ptr) __ref;
|
||||
extern void kmemleak_transient_leak(const void *ptr) __ref;
|
||||
extern void kmemleak_ignore(const void *ptr) __ref;
|
||||
extern void kmemleak_ignore_percpu(const void __percpu *ptr) __ref;
|
||||
extern void kmemleak_scan_area(const void *ptr, size_t size, gfp_t gfp) __ref;
|
||||
extern void kmemleak_no_scan(const void *ptr) __ref;
|
||||
extern void kmemleak_alloc_phys(phys_addr_t phys, size_t size,
|
||||
|
@ -97,6 +98,9 @@ static inline void kmemleak_not_leak(const void *ptr)
|
|||
static inline void kmemleak_transient_leak(const void *ptr)
|
||||
{
|
||||
}
|
||||
static inline void kmemleak_ignore_percpu(const void __percpu *ptr)
|
||||
{
|
||||
}
|
||||
static inline void kmemleak_ignore(const void *ptr)
|
||||
{
|
||||
}
|
||||
|
|
|
@ -134,6 +134,7 @@ config CRASH_DM_CRYPT
|
|||
depends on KEXEC_FILE
|
||||
depends on CRASH_DUMP
|
||||
depends on DM_CRYPT
|
||||
depends on KEYS
|
||||
help
|
||||
With this option enabled, user space can intereact with
|
||||
/sys/kernel/config/crash_dm_crypt_keys to make the dm crypt keys
|
||||
|
|
|
@ -10,6 +10,7 @@
|
|||
#include <linux/seq_buf.h>
|
||||
#include <linux/seq_file.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/kmemleak.h>
|
||||
|
||||
#define ALLOCINFO_FILE_NAME "allocinfo"
|
||||
#define MODULE_ALLOC_TAG_VMAP_SIZE (100000UL * sizeof(struct alloc_tag))
|
||||
|
@ -632,8 +633,13 @@ static int load_module(struct module *mod, struct codetag *start, struct codetag
|
|||
mod->name);
|
||||
return -ENOMEM;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Avoid a kmemleak false positive. The pointer to the counters is stored
|
||||
* in the alloc_tag section of the module and cannot be directly accessed.
|
||||
*/
|
||||
kmemleak_ignore_percpu(tag->counters);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -352,6 +352,9 @@ struct cpumask *group_cpus_evenly(unsigned int numgrps)
|
|||
int ret = -ENOMEM;
|
||||
struct cpumask *masks = NULL;
|
||||
|
||||
if (numgrps == 0)
|
||||
return NULL;
|
||||
|
||||
if (!zalloc_cpumask_var(&nmsk, GFP_KERNEL))
|
||||
return NULL;
|
||||
|
||||
|
@ -426,8 +429,12 @@ struct cpumask *group_cpus_evenly(unsigned int numgrps)
|
|||
#else /* CONFIG_SMP */
|
||||
struct cpumask *group_cpus_evenly(unsigned int numgrps)
|
||||
{
|
||||
struct cpumask *masks = kcalloc(numgrps, sizeof(*masks), GFP_KERNEL);
|
||||
struct cpumask *masks;
|
||||
|
||||
if (numgrps == 0)
|
||||
return NULL;
|
||||
|
||||
masks = kcalloc(numgrps, sizeof(*masks), GFP_KERNEL);
|
||||
if (!masks)
|
||||
return NULL;
|
||||
|
||||
|
|
|
@ -472,6 +472,7 @@ static ssize_t memcg_path_store(struct kobject *kobj,
|
|||
return -ENOMEM;
|
||||
|
||||
strscpy(path, buf, count + 1);
|
||||
kfree(filter->memcg_path);
|
||||
filter->memcg_path = path;
|
||||
return count;
|
||||
}
|
||||
|
|
54
mm/hugetlb.c
54
mm/hugetlb.c
|
@ -2787,20 +2787,24 @@ void restore_reserve_on_error(struct hstate *h, struct vm_area_struct *vma,
|
|||
/*
|
||||
* alloc_and_dissolve_hugetlb_folio - Allocate a new folio and dissolve
|
||||
* the old one
|
||||
* @h: struct hstate old page belongs to
|
||||
* @old_folio: Old folio to dissolve
|
||||
* @list: List to isolate the page in case we need to
|
||||
* Returns 0 on success, otherwise negated error.
|
||||
*/
|
||||
static int alloc_and_dissolve_hugetlb_folio(struct hstate *h,
|
||||
struct folio *old_folio, struct list_head *list)
|
||||
static int alloc_and_dissolve_hugetlb_folio(struct folio *old_folio,
|
||||
struct list_head *list)
|
||||
{
|
||||
gfp_t gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE;
|
||||
gfp_t gfp_mask;
|
||||
struct hstate *h;
|
||||
int nid = folio_nid(old_folio);
|
||||
struct folio *new_folio = NULL;
|
||||
int ret = 0;
|
||||
|
||||
retry:
|
||||
/*
|
||||
* The old_folio might have been dissolved from under our feet, so make sure
|
||||
* to carefully check the state under the lock.
|
||||
*/
|
||||
spin_lock_irq(&hugetlb_lock);
|
||||
if (!folio_test_hugetlb(old_folio)) {
|
||||
/*
|
||||
|
@ -2829,8 +2833,10 @@ retry:
|
|||
cond_resched();
|
||||
goto retry;
|
||||
} else {
|
||||
h = folio_hstate(old_folio);
|
||||
if (!new_folio) {
|
||||
spin_unlock_irq(&hugetlb_lock);
|
||||
gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE;
|
||||
new_folio = alloc_buddy_hugetlb_folio(h, gfp_mask, nid,
|
||||
NULL, NULL);
|
||||
if (!new_folio)
|
||||
|
@ -2874,35 +2880,24 @@ free_new:
|
|||
|
||||
int isolate_or_dissolve_huge_folio(struct folio *folio, struct list_head *list)
|
||||
{
|
||||
struct hstate *h;
|
||||
int ret = -EBUSY;
|
||||
|
||||
/*
|
||||
* The page might have been dissolved from under our feet, so make sure
|
||||
* to carefully check the state under the lock.
|
||||
* Return success when racing as if we dissolved the page ourselves.
|
||||
*/
|
||||
spin_lock_irq(&hugetlb_lock);
|
||||
if (folio_test_hugetlb(folio)) {
|
||||
h = folio_hstate(folio);
|
||||
} else {
|
||||
spin_unlock_irq(&hugetlb_lock);
|
||||
/* Not to disrupt normal path by vainly holding hugetlb_lock */
|
||||
if (!folio_test_hugetlb(folio))
|
||||
return 0;
|
||||
}
|
||||
spin_unlock_irq(&hugetlb_lock);
|
||||
|
||||
/*
|
||||
* Fence off gigantic pages as there is a cyclic dependency between
|
||||
* alloc_contig_range and them. Return -ENOMEM as this has the effect
|
||||
* of bailing out right away without further retrying.
|
||||
*/
|
||||
if (hstate_is_gigantic(h))
|
||||
if (folio_order(folio) > MAX_PAGE_ORDER)
|
||||
return -ENOMEM;
|
||||
|
||||
if (folio_ref_count(folio) && folio_isolate_hugetlb(folio, list))
|
||||
ret = 0;
|
||||
else if (!folio_ref_count(folio))
|
||||
ret = alloc_and_dissolve_hugetlb_folio(h, folio, list);
|
||||
ret = alloc_and_dissolve_hugetlb_folio(folio, list);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -2916,7 +2911,6 @@ int isolate_or_dissolve_huge_folio(struct folio *folio, struct list_head *list)
|
|||
*/
|
||||
int replace_free_hugepage_folios(unsigned long start_pfn, unsigned long end_pfn)
|
||||
{
|
||||
struct hstate *h;
|
||||
struct folio *folio;
|
||||
int ret = 0;
|
||||
|
||||
|
@ -2925,23 +2919,9 @@ int replace_free_hugepage_folios(unsigned long start_pfn, unsigned long end_pfn)
|
|||
while (start_pfn < end_pfn) {
|
||||
folio = pfn_folio(start_pfn);
|
||||
|
||||
/*
|
||||
* The folio might have been dissolved from under our feet, so make sure
|
||||
* to carefully check the state under the lock.
|
||||
*/
|
||||
spin_lock_irq(&hugetlb_lock);
|
||||
if (folio_test_hugetlb(folio)) {
|
||||
h = folio_hstate(folio);
|
||||
} else {
|
||||
spin_unlock_irq(&hugetlb_lock);
|
||||
start_pfn++;
|
||||
continue;
|
||||
}
|
||||
spin_unlock_irq(&hugetlb_lock);
|
||||
|
||||
if (!folio_ref_count(folio)) {
|
||||
ret = alloc_and_dissolve_hugetlb_folio(h, folio,
|
||||
&isolate_list);
|
||||
/* Not to disrupt normal path by vainly holding hugetlb_lock */
|
||||
if (folio_test_hugetlb(folio) && !folio_ref_count(folio)) {
|
||||
ret = alloc_and_dissolve_hugetlb_folio(folio, &isolate_list);
|
||||
if (ret)
|
||||
break;
|
||||
|
||||
|
|
|
@ -1246,6 +1246,20 @@ void __ref kmemleak_transient_leak(const void *ptr)
|
|||
}
|
||||
EXPORT_SYMBOL(kmemleak_transient_leak);
|
||||
|
||||
/**
|
||||
* kmemleak_ignore_percpu - similar to kmemleak_ignore but taking a percpu
|
||||
* address argument
|
||||
* @ptr: percpu address of the object
|
||||
*/
|
||||
void __ref kmemleak_ignore_percpu(const void __percpu *ptr)
|
||||
{
|
||||
pr_debug("%s(0x%px)\n", __func__, ptr);
|
||||
|
||||
if (kmemleak_enabled && ptr && !IS_ERR_PCPU(ptr))
|
||||
make_black_object((unsigned long)ptr, OBJECT_PERCPU);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kmemleak_ignore_percpu);
|
||||
|
||||
/**
|
||||
* kmemleak_ignore - ignore an allocated object
|
||||
* @ptr: pointer to beginning of the object
|
||||
|
|
|
@ -22,7 +22,7 @@ def dentry_name(d):
|
|||
if parent == d or parent == 0:
|
||||
return ""
|
||||
p = dentry_name(d['d_parent']) + "/"
|
||||
return p + d['d_iname'].string()
|
||||
return p + d['d_shortname']['string'].string()
|
||||
|
||||
class DentryName(gdb.Function):
|
||||
"""Return string of the full path of a dentry.
|
||||
|
|
|
@ -77,8 +77,11 @@ static void validate_addr(char *ptr, int high_addr)
|
|||
{
|
||||
unsigned long addr = (unsigned long) ptr;
|
||||
|
||||
if (high_addr && addr < HIGH_ADDR_MARK)
|
||||
ksft_exit_fail_msg("Bad address %lx\n", addr);
|
||||
if (high_addr) {
|
||||
if (addr < HIGH_ADDR_MARK)
|
||||
ksft_exit_fail_msg("Bad address %lx\n", addr);
|
||||
return;
|
||||
}
|
||||
|
||||
if (addr > HIGH_ADDR_MARK)
|
||||
ksft_exit_fail_msg("Bad address %lx\n", addr);
|
||||
|
|
Loading…
Reference in New Issue