mm: migrate: convert numamigrate_isolate_page() to numamigrate_isolate_folio()
JIRA: https://issues.redhat.com/browse/RHEL-27745 Conflicts: * Minor context conflict on the 2nd hunk due to out-of-order backport of commit 774f256e7c0 ("mm/vmscan: fix a bug calling wakeup_kswapd() with a wrong zone index") This patch is a backport of the following upstream commit: commit 2ac9e99f3b21b2864305fbfba4bae5913274c409 Author: Kefeng Wang <wangkefeng.wang@huawei.com> Date: Wed Sep 13 17:51:26 2023 +0800 mm: migrate: convert numamigrate_isolate_page() to numamigrate_isolate_folio() Rename numamigrate_isolate_page() to numamigrate_isolate_folio(), then make it takes a folio and use folio API to save compound_head() calls. Link: https://lkml.kernel.org/r/20230913095131.2426871-4-wangkefeng.wang@huawei.com Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com> Reviewed-by: Zi Yan <ziy@nvidia.com> Cc: David Hildenbrand <david@redhat.com> Cc: "Huang, Ying" <ying.huang@intel.com> Cc: Hugh Dickins <hughd@google.com> Cc: Matthew Wilcox (Oracle) <willy@infradead.org> Cc: Mike Kravetz <mike.kravetz@oracle.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Rafael Aquini <raquini@redhat.com>
This commit is contained in:
parent
0385689f3a
commit
829524ec07
20
mm/migrate.c
20
mm/migrate.c
|
@ -2499,10 +2499,9 @@ static struct folio *alloc_misplaced_dst_folio(struct folio *src,
|
|||
return __folio_alloc_node(gfp, order, nid);
|
||||
}
|
||||
|
||||
static int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page)
|
||||
static int numamigrate_isolate_folio(pg_data_t *pgdat, struct folio *folio)
|
||||
{
|
||||
int nr_pages = thp_nr_pages(page);
|
||||
int order = compound_order(page);
|
||||
int nr_pages = folio_nr_pages(folio);
|
||||
|
||||
/* Avoid migrating to a node that is nearly full */
|
||||
if (!migrate_balanced_pgdat(pgdat, nr_pages)) {
|
||||
|
@ -2522,22 +2521,23 @@ static int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page)
|
|||
if (z < 0)
|
||||
return 0;
|
||||
|
||||
wakeup_kswapd(pgdat->node_zones + z, 0, order, ZONE_MOVABLE);
|
||||
wakeup_kswapd(pgdat->node_zones + z, 0,
|
||||
folio_order(folio), ZONE_MOVABLE);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (!isolate_lru_page(page))
|
||||
if (!folio_isolate_lru(folio))
|
||||
return 0;
|
||||
|
||||
mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON + page_is_file_lru(page),
|
||||
node_stat_mod_folio(folio, NR_ISOLATED_ANON + folio_is_file_lru(folio),
|
||||
nr_pages);
|
||||
|
||||
/*
|
||||
* Isolating the page has taken another reference, so the
|
||||
* caller's reference can be safely dropped without the page
|
||||
* Isolating the folio has taken another reference, so the
|
||||
* caller's reference can be safely dropped without the folio
|
||||
* disappearing underneath us during migration.
|
||||
*/
|
||||
put_page(page);
|
||||
folio_put(folio);
|
||||
return 1;
|
||||
}
|
||||
|
||||
|
@ -2571,7 +2571,7 @@ int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma,
|
|||
if (page_is_file_lru(page) && PageDirty(page))
|
||||
goto out;
|
||||
|
||||
isolated = numamigrate_isolate_page(pgdat, page);
|
||||
isolated = numamigrate_isolate_folio(pgdat, page_folio(page));
|
||||
if (!isolated)
|
||||
goto out;
|
||||
|
||||
|
|
Loading…
Reference in New Issue