From 9ebc772559ec15a6e8c385d9515bf5360a3a0ccf Mon Sep 17 00:00:00 2001 From: Radostin Stoyanov Date: Thu, 27 Mar 2025 12:21:34 +0000 Subject: [PATCH] mm: migrate: use folio_xchg_last_cpupid() in folio_migrate_flags() JIRA: https://issues.redhat.com/browse/RHEL-80382 commit 4e694fe4d2fa3031392bdbeaa88066f67c886a0c Author: Kefeng Wang Date: Wed Oct 18 22:08:01 2023 +0800 mm: migrate: use folio_xchg_last_cpupid() in folio_migrate_flags() Convert to use folio_xchg_last_cpupid() in folio_migrate_flags(), also directly use folio_nid() instead of page_to_nid(&folio->page). Link: https://lkml.kernel.org/r/20231018140806.2783514-15-wangkefeng.wang@huawei.com Signed-off-by: Kefeng Wang Cc: David Hildenbrand Cc: Huang Ying Cc: Ingo Molnar Cc: Juri Lelli Cc: Matthew Wilcox (Oracle) Cc: Peter Zijlstra Cc: Vincent Guittot Cc: Zi Yan Signed-off-by: Andrew Morton Signed-off-by: Radostin Stoyanov --- mm/migrate.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/mm/migrate.c b/mm/migrate.c index fa925c5b858d..8aecfe78c6a7 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -606,20 +606,20 @@ void folio_migrate_flags(struct folio *newfolio, struct folio *folio) * Copy NUMA information to the new page, to prevent over-eager * future migrations of this same page. */ - cpupid = page_cpupid_xchg_last(&folio->page, -1); + cpupid = folio_xchg_last_cpupid(folio, -1); /* * For memory tiering mode, when migrate between slow and fast * memory node, reset cpupid, because that is used to record * page access time in slow memory node. */ if (sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING) { - bool f_toptier = node_is_toptier(page_to_nid(&folio->page)); - bool t_toptier = node_is_toptier(page_to_nid(&newfolio->page)); + bool f_toptier = node_is_toptier(folio_nid(folio)); + bool t_toptier = node_is_toptier(folio_nid(newfolio)); if (f_toptier != t_toptier) cpupid = -1; } - page_cpupid_xchg_last(&newfolio->page, cpupid); + folio_xchg_last_cpupid(newfolio, cpupid); folio_migrate_ksm(newfolio, folio); /*