* [PATCH] mm: Convert migrate_pages() to work on folios
@ 2023-05-13 0:11 Matthew Wilcox (Oracle)
2023-05-15 7:12 ` Huang, Ying
0 siblings, 1 reply; 2+ messages in thread
From: Matthew Wilcox (Oracle) @ 2023-05-13 0:11 UTC (permalink / raw)
To: linux-mm; +Cc: Matthew Wilcox (Oracle)
Almost all of the callers & implementors of migrate_pages() were already
converted to use folios. compaction_alloc() & compaction_free() are
trivial to convert a part of this patch and not worth splitting out.
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
---
Documentation/mm/page_migration.rst | 7 +-
.../translations/zh_CN/mm/page_migration.rst | 2 +-
include/linux/migrate.h | 16 +-
mm/compaction.c | 15 +-
mm/mempolicy.c | 15 +-
mm/migrate.c | 161 ++++++++----------
mm/vmscan.c | 15 +-
7 files changed, 108 insertions(+), 123 deletions(-)
diff --git a/Documentation/mm/page_migration.rst b/Documentation/mm/page_migration.rst
index 313dce18893e..e35af7805be5 100644
--- a/Documentation/mm/page_migration.rst
+++ b/Documentation/mm/page_migration.rst
@@ -73,14 +73,13 @@ In kernel use of migrate_pages()
It also prevents the swapper or other scans from encountering
the page.
-2. We need to have a function of type new_page_t that can be
+2. We need to have a function of type new_folio_t that can be
passed to migrate_pages(). This function should figure out
- how to allocate the correct new page given the old page.
+ how to allocate the correct new folio given the old folio.
3. The migrate_pages() function is called which attempts
to do the migration. It will call the function to allocate
- the new page for each page that is considered for
- moving.
+ the new folio for each folio that is considered for moving.
How migrate_pages() works
=========================
diff --git a/Documentation/translations/zh_CN/mm/page_migration.rst b/Documentation/translations/zh_CN/mm/page_migration.rst
index 076081dc1635..f95063826a15 100644
--- a/Documentation/translations/zh_CN/mm/page_migration.rst
+++ b/Documentation/translations/zh_CN/mm/page_migration.rst
@@ -55,7 +55,7 @@ mbind()设置一个新的内存策略。一个进程的页面也可以通过sys_
消失。它还可以防止交换器或其他扫描器遇到该页。
-2. 我们需要有一个new_page_t类型的函数,可以传递给migrate_pages()。这个函数应该计算
+2. 我们需要有一个new_folio_t类型的函数,可以传递给migrate_pages()。这个函数应该计算
出如何在给定的旧页面中分配正确的新页面。
3. migrate_pages()函数被调用,它试图进行迁移。它将调用该函数为每个被考虑迁移的页面分
diff --git a/include/linux/migrate.h b/include/linux/migrate.h
index 6241a1596a75..6de5756d8533 100644
--- a/include/linux/migrate.h
+++ b/include/linux/migrate.h
@@ -7,8 +7,8 @@
#include <linux/migrate_mode.h>
#include <linux/hugetlb.h>
-typedef struct page *new_page_t(struct page *page, unsigned long private);
-typedef void free_page_t(struct page *page, unsigned long private);
+typedef struct folio *new_folio_t(struct folio *folio, unsigned long private);
+typedef void free_folio_t(struct folio *folio, unsigned long private);
struct migration_target_control;
@@ -67,10 +67,10 @@ int migrate_folio_extra(struct address_space *mapping, struct folio *dst,
struct folio *src, enum migrate_mode mode, int extra_count);
int migrate_folio(struct address_space *mapping, struct folio *dst,
struct folio *src, enum migrate_mode mode);
-int migrate_pages(struct list_head *l, new_page_t new, free_page_t free,
+int migrate_pages(struct list_head *l, new_folio_t new, free_folio_t free,
unsigned long private, enum migrate_mode mode, int reason,
unsigned int *ret_succeeded);
-struct page *alloc_migration_target(struct page *page, unsigned long private);
+struct folio *alloc_migration_target(struct folio *src, unsigned long private);
bool isolate_movable_page(struct page *page, isolate_mode_t mode);
int migrate_huge_page_move_mapping(struct address_space *mapping,
@@ -85,11 +85,11 @@ int folio_migrate_mapping(struct address_space *mapping,
#else
static inline void putback_movable_pages(struct list_head *l) {}
-static inline int migrate_pages(struct list_head *l, new_page_t new,
- free_page_t free, unsigned long private, enum migrate_mode mode,
- int reason, unsigned int *ret_succeeded)
+static inline int migrate_pages(struct list_head *l, new_folio_t new,
+ free_folio_t free, unsigned long private,
+ enum migrate_mode mode, int reason, unsigned int *ret_succeeded)
{ return -ENOSYS; }
-static inline struct page *alloc_migration_target(struct page *page,
+static inline struct folio *alloc_migration_target(struct folio *src,
unsigned long private)
{ return NULL; }
static inline bool isolate_movable_page(struct page *page, isolate_mode_t mode)
diff --git a/mm/compaction.c b/mm/compaction.c
index c8bcdea15f5f..3a8ac58c8af4 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -1684,11 +1684,10 @@ static void isolate_freepages(struct compact_control *cc)
* This is a migrate-callback that "allocates" freepages by taking pages
* from the isolated freelists in the block we are migrating to.
*/
-static struct page *compaction_alloc(struct page *migratepage,
- unsigned long data)
+static struct folio *compaction_alloc(struct folio *src, unsigned long data)
{
struct compact_control *cc = (struct compact_control *)data;
- struct page *freepage;
+ struct folio *dst;
if (list_empty(&cc->freepages)) {
isolate_freepages(cc);
@@ -1697,11 +1696,11 @@ static struct page *compaction_alloc(struct page *migratepage,
return NULL;
}
- freepage = list_entry(cc->freepages.next, struct page, lru);
- list_del(&freepage->lru);
+ dst = list_entry(cc->freepages.next, struct folio, lru);
+ list_del(&dst->lru);
cc->nr_freepages--;
- return freepage;
+ return dst;
}
/*
@@ -1709,11 +1708,11 @@ static struct page *compaction_alloc(struct page *migratepage,
* freelist. All pages on the freelist are from the same zone, so there is no
* special handling needed for NUMA.
*/
-static void compaction_free(struct page *page, unsigned long data)
+static void compaction_free(struct folio *dst, unsigned long data)
{
struct compact_control *cc = (struct compact_control *)data;
- list_add(&page->lru, &cc->freepages);
+ list_add(&dst->lru, &cc->freepages);
cc->nr_freepages++;
}
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 1756389a0609..f06ca8c18e62 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -1195,24 +1195,22 @@ int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
* list of pages handed to migrate_pages()--which is how we get here--
* is in virtual address order.
*/
-static struct page *new_page(struct page *page, unsigned long start)
+static struct folio *new_folio(struct folio *src, unsigned long start)
{
- struct folio *dst, *src = page_folio(page);
struct vm_area_struct *vma;
unsigned long address;
VMA_ITERATOR(vmi, current->mm, start);
gfp_t gfp = GFP_HIGHUSER_MOVABLE | __GFP_RETRY_MAYFAIL;
for_each_vma(vmi, vma) {
- address = page_address_in_vma(page, vma);
+ address = page_address_in_vma(&src->page, vma);
if (address != -EFAULT)
break;
}
if (folio_test_hugetlb(src)) {
- dst = alloc_hugetlb_folio_vma(folio_hstate(src),
+ return alloc_hugetlb_folio_vma(folio_hstate(src),
vma, address);
- return &dst->page;
}
if (folio_test_large(src))
@@ -1221,9 +1219,8 @@ static struct page *new_page(struct page *page, unsigned long start)
/*
* if !vma, vma_alloc_folio() will use task or system default policy
*/
- dst = vma_alloc_folio(gfp, folio_order(src), vma, address,
+ return vma_alloc_folio(gfp, folio_order(src), vma, address,
folio_test_large(src));
- return &dst->page;
}
#else
@@ -1239,7 +1236,7 @@ int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
return -ENOSYS;
}
-static struct page *new_page(struct page *page, unsigned long start)
+static struct folio *new_folio(struct folio *src, unsigned long start)
{
return NULL;
}
@@ -1334,7 +1331,7 @@ static long do_mbind(unsigned long start, unsigned long len,
if (!list_empty(&pagelist)) {
WARN_ON_ONCE(flags & MPOL_MF_LAZY);
- nr_failed = migrate_pages(&pagelist, new_page, NULL,
+ nr_failed = migrate_pages(&pagelist, new_folio, NULL,
start, MIGRATE_SYNC, MR_MEMPOLICY_MBIND, NULL);
if (nr_failed)
putback_movable_pages(&pagelist);
diff --git a/mm/migrate.c b/mm/migrate.c
index 01cac26a3127..fdf4e00f7fe4 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -1072,15 +1072,13 @@ static void migrate_folio_undo_src(struct folio *src,
}
/* Restore the destination folio to the original state upon failure */
-static void migrate_folio_undo_dst(struct folio *dst,
- bool locked,
- free_page_t put_new_page,
- unsigned long private)
+static void migrate_folio_undo_dst(struct folio *dst, bool locked,
+ free_folio_t put_new_folio, unsigned long private)
{
if (locked)
folio_unlock(dst);
- if (put_new_page)
- put_new_page(&dst->page, private);
+ if (put_new_folio)
+ put_new_folio(dst, private);
else
folio_put(dst);
}
@@ -1104,14 +1102,13 @@ static void migrate_folio_done(struct folio *src,
}
/* Obtain the lock on page, remove all ptes. */
-static int migrate_folio_unmap(new_page_t get_new_page, free_page_t put_new_page,
- unsigned long private, struct folio *src,
- struct folio **dstp, enum migrate_mode mode,
- enum migrate_reason reason, struct list_head *ret)
+static int migrate_folio_unmap(new_folio_t get_new_folio,
+ free_folio_t put_new_folio, unsigned long private,
+ struct folio *src, struct folio **dstp, enum migrate_mode mode,
+ enum migrate_reason reason, struct list_head *ret)
{
struct folio *dst;
int rc = -EAGAIN;
- struct page *newpage = NULL;
int page_was_mapped = 0;
struct anon_vma *anon_vma = NULL;
bool is_lru = !__PageMovable(&src->page);
@@ -1128,10 +1125,9 @@ static int migrate_folio_unmap(new_page_t get_new_page, free_page_t put_new_page
return MIGRATEPAGE_SUCCESS;
}
- newpage = get_new_page(&src->page, private);
- if (!newpage)
+ dst = get_new_folio(src, private);
+ if (!dst)
return -ENOMEM;
- dst = page_folio(newpage);
*dstp = dst;
dst->private = NULL;
@@ -1251,13 +1247,13 @@ static int migrate_folio_unmap(new_page_t get_new_page, free_page_t put_new_page
ret = NULL;
migrate_folio_undo_src(src, page_was_mapped, anon_vma, locked, ret);
- migrate_folio_undo_dst(dst, dst_locked, put_new_page, private);
+ migrate_folio_undo_dst(dst, dst_locked, put_new_folio, private);
return rc;
}
/* Migrate the folio to the newly allocated folio in dst. */
-static int migrate_folio_move(free_page_t put_new_page, unsigned long private,
+static int migrate_folio_move(free_folio_t put_new_folio, unsigned long private,
struct folio *src, struct folio *dst,
enum migrate_mode mode, enum migrate_reason reason,
struct list_head *ret)
@@ -1329,7 +1325,7 @@ static int migrate_folio_move(free_page_t put_new_page, unsigned long private,
}
migrate_folio_undo_src(src, page_was_mapped, anon_vma, true, ret);
- migrate_folio_undo_dst(dst, true, put_new_page, private);
+ migrate_folio_undo_dst(dst, true, put_new_folio, private);
return rc;
}
@@ -1352,16 +1348,14 @@ static int migrate_folio_move(free_page_t put_new_page, unsigned long private,
* because then pte is replaced with migration swap entry and direct I/O code
* will wait in the page fault for migration to complete.
*/
-static int unmap_and_move_huge_page(new_page_t get_new_page,
- free_page_t put_new_page, unsigned long private,
- struct page *hpage, int force,
- enum migrate_mode mode, int reason,
- struct list_head *ret)
+static int unmap_and_move_huge_page(new_folio_t get_new_folio,
+ free_folio_t put_new_folio, unsigned long private,
+ struct folio *src, int force, enum migrate_mode mode,
+ int reason, struct list_head *ret)
{
- struct folio *dst, *src = page_folio(hpage);
+ struct folio *dst;
int rc = -EAGAIN;
int page_was_mapped = 0;
- struct page *new_hpage;
struct anon_vma *anon_vma = NULL;
struct address_space *mapping = NULL;
@@ -1371,10 +1365,9 @@ static int unmap_and_move_huge_page(new_page_t get_new_page,
return MIGRATEPAGE_SUCCESS;
}
- new_hpage = get_new_page(hpage, private);
- if (!new_hpage)
+ dst = get_new_folio(src, private);
+ if (!dst)
return -ENOMEM;
- dst = page_folio(new_hpage);
if (!folio_trylock(src)) {
if (!force)
@@ -1415,7 +1408,7 @@ static int unmap_and_move_huge_page(new_page_t get_new_page,
* semaphore in write mode here and set TTU_RMAP_LOCKED
* to let lower levels know we have taken the lock.
*/
- mapping = hugetlb_page_mapping_lock_write(hpage);
+ mapping = hugetlb_page_mapping_lock_write(&src->page);
if (unlikely(!mapping))
goto unlock_put_anon;
@@ -1445,7 +1438,7 @@ static int unmap_and_move_huge_page(new_page_t get_new_page,
if (rc == MIGRATEPAGE_SUCCESS) {
move_hugetlb_state(src, dst, reason);
- put_new_page = NULL;
+ put_new_folio = NULL;
}
out_unlock:
@@ -1461,8 +1454,8 @@ static int unmap_and_move_huge_page(new_page_t get_new_page,
* it. Otherwise, put_page() will drop the reference grabbed during
* isolation.
*/
- if (put_new_page)
- put_new_page(new_hpage, private);
+ if (put_new_folio)
+ put_new_folio(dst, private);
else
folio_putback_active_hugetlb(dst);
@@ -1509,8 +1502,8 @@ struct migrate_pages_stats {
* exist any more. It is caller's responsibility to call putback_movable_pages()
* only if ret != 0.
*/
-static int migrate_hugetlbs(struct list_head *from, new_page_t get_new_page,
- free_page_t put_new_page, unsigned long private,
+static int migrate_hugetlbs(struct list_head *from, new_folio_t get_new_folio,
+ free_folio_t put_new_folio, unsigned long private,
enum migrate_mode mode, int reason,
struct migrate_pages_stats *stats,
struct list_head *ret_folios)
@@ -1548,9 +1541,9 @@ static int migrate_hugetlbs(struct list_head *from, new_page_t get_new_page,
continue;
}
- rc = unmap_and_move_huge_page(get_new_page,
- put_new_page, private,
- &folio->page, pass > 2, mode,
+ rc = unmap_and_move_huge_page(get_new_folio,
+ put_new_folio, private,
+ folio, pass > 2, mode,
reason, ret_folios);
/*
* The rules are:
@@ -1607,11 +1600,11 @@ static int migrate_hugetlbs(struct list_head *from, new_page_t get_new_page,
* deadlock (e.g., for loop device). So, if mode != MIGRATE_ASYNC, the
* length of the from list must be <= 1.
*/
-static int migrate_pages_batch(struct list_head *from, new_page_t get_new_page,
- free_page_t put_new_page, unsigned long private,
- enum migrate_mode mode, int reason, struct list_head *ret_folios,
- struct list_head *split_folios, struct migrate_pages_stats *stats,
- int nr_pass)
+static int migrate_pages_batch(struct list_head *from,
+ new_folio_t get_new_folio, free_folio_t put_new_folio,
+ unsigned long private, enum migrate_mode mode, int reason,
+ struct list_head *ret_folios, struct list_head *split_folios,
+ struct migrate_pages_stats *stats, int nr_pass)
{
int retry = 1;
int large_retry = 1;
@@ -1671,8 +1664,9 @@ static int migrate_pages_batch(struct list_head *from, new_page_t get_new_page,
continue;
}
- rc = migrate_folio_unmap(get_new_page, put_new_page, private,
- folio, &dst, mode, reason, ret_folios);
+ rc = migrate_folio_unmap(get_new_folio, put_new_folio,
+ private, folio, &dst, mode, reason,
+ ret_folios);
/*
* The rules are:
* Success: folio will be freed
@@ -1786,7 +1780,7 @@ static int migrate_pages_batch(struct list_head *from, new_page_t get_new_page,
cond_resched();
- rc = migrate_folio_move(put_new_page, private,
+ rc = migrate_folio_move(put_new_folio, private,
folio, dst, mode,
reason, ret_folios);
/*
@@ -1845,7 +1839,7 @@ static int migrate_pages_batch(struct list_head *from, new_page_t get_new_page,
migrate_folio_undo_src(folio, page_was_mapped, anon_vma,
true, ret_folios);
list_del(&dst->lru);
- migrate_folio_undo_dst(dst, true, put_new_page, private);
+ migrate_folio_undo_dst(dst, true, put_new_folio, private);
dst = dst2;
dst2 = list_next_entry(dst, lru);
}
@@ -1853,10 +1847,11 @@ static int migrate_pages_batch(struct list_head *from, new_page_t get_new_page,
return rc;
}
-static int migrate_pages_sync(struct list_head *from, new_page_t get_new_page,
- free_page_t put_new_page, unsigned long private,
- enum migrate_mode mode, int reason, struct list_head *ret_folios,
- struct list_head *split_folios, struct migrate_pages_stats *stats)
+static int migrate_pages_sync(struct list_head *from, new_folio_t get_new_folio,
+ free_folio_t put_new_folio, unsigned long private,
+ enum migrate_mode mode, int reason,
+ struct list_head *ret_folios, struct list_head *split_folios,
+ struct migrate_pages_stats *stats)
{
int rc, nr_failed = 0;
LIST_HEAD(folios);
@@ -1864,7 +1859,7 @@ static int migrate_pages_sync(struct list_head *from, new_page_t get_new_page,
memset(&astats, 0, sizeof(astats));
/* Try to migrate in batch with MIGRATE_ASYNC mode firstly */
- rc = migrate_pages_batch(from, get_new_page, put_new_page, private, MIGRATE_ASYNC,
+ rc = migrate_pages_batch(from, get_new_folio, put_new_folio, private, MIGRATE_ASYNC,
reason, &folios, split_folios, &astats,
NR_MAX_MIGRATE_ASYNC_RETRY);
stats->nr_succeeded += astats.nr_succeeded;
@@ -1886,7 +1881,7 @@ static int migrate_pages_sync(struct list_head *from, new_page_t get_new_page,
list_splice_tail_init(&folios, from);
while (!list_empty(from)) {
list_move(from->next, &folios);
- rc = migrate_pages_batch(&folios, get_new_page, put_new_page,
+ rc = migrate_pages_batch(&folios, get_new_folio, put_new_folio,
private, mode, reason, ret_folios,
split_folios, stats, NR_MAX_MIGRATE_SYNC_RETRY);
list_splice_tail_init(&folios, ret_folios);
@@ -1903,11 +1898,11 @@ static int migrate_pages_sync(struct list_head *from, new_page_t get_new_page,
* supplied as the target for the page migration
*
* @from: The list of folios to be migrated.
- * @get_new_page: The function used to allocate free folios to be used
+ * @get_new_folio: The function used to allocate free folios to be used
* as the target of the folio migration.
- * @put_new_page: The function used to free target folios if migration
+ * @put_new_folio: The function used to free target folios if migration
* fails, or NULL if no special handling is necessary.
- * @private: Private data to be passed on to get_new_page()
+ * @private: Private data to be passed on to get_new_folio()
* @mode: The migration mode that specifies the constraints for
* folio migration, if any.
* @reason: The reason for folio migration.
@@ -1924,8 +1919,8 @@ static int migrate_pages_sync(struct list_head *from, new_page_t get_new_page,
* considered as the number of non-migrated large folio, no matter how many
* split folios of the large folio are migrated successfully.
*/
-int migrate_pages(struct list_head *from, new_page_t get_new_page,
- free_page_t put_new_page, unsigned long private,
+int migrate_pages(struct list_head *from, new_folio_t get_new_folio,
+ free_folio_t put_new_folio, unsigned long private,
enum migrate_mode mode, int reason, unsigned int *ret_succeeded)
{
int rc, rc_gather;
@@ -1940,7 +1935,7 @@ int migrate_pages(struct list_head *from, new_page_t get_new_page,
memset(&stats, 0, sizeof(stats));
- rc_gather = migrate_hugetlbs(from, get_new_page, put_new_page, private,
+ rc_gather = migrate_hugetlbs(from, get_new_folio, put_new_folio, private,
mode, reason, &stats, &ret_folios);
if (rc_gather < 0)
goto out;
@@ -1963,12 +1958,14 @@ int migrate_pages(struct list_head *from, new_page_t get_new_page,
else
list_splice_init(from, &folios);
if (mode == MIGRATE_ASYNC)
- rc = migrate_pages_batch(&folios, get_new_page, put_new_page, private,
- mode, reason, &ret_folios, &split_folios, &stats,
- NR_MAX_MIGRATE_PAGES_RETRY);
+ rc = migrate_pages_batch(&folios, get_new_folio, put_new_folio,
+ private, mode, reason, &ret_folios,
+ &split_folios, &stats,
+ NR_MAX_MIGRATE_PAGES_RETRY);
else
- rc = migrate_pages_sync(&folios, get_new_page, put_new_page, private,
- mode, reason, &ret_folios, &split_folios, &stats);
+ rc = migrate_pages_sync(&folios, get_new_folio, put_new_folio,
+ private, mode, reason, &ret_folios,
+ &split_folios, &stats);
list_splice_tail_init(&folios, &ret_folios);
if (rc < 0) {
rc_gather = rc;
@@ -1981,8 +1978,9 @@ int migrate_pages(struct list_head *from, new_page_t get_new_page,
* is counted as 1 failure already. And, we only try to migrate
* with minimal effort, force MIGRATE_ASYNC mode and retry once.
*/
- migrate_pages_batch(&split_folios, get_new_page, put_new_page, private,
- MIGRATE_ASYNC, reason, &ret_folios, NULL, &stats, 1);
+ migrate_pages_batch(&split_folios, get_new_folio,
+ put_new_folio, private, MIGRATE_ASYNC, reason,
+ &ret_folios, NULL, &stats, 1);
list_splice_tail_init(&split_folios, &ret_folios);
}
rc_gather += rc;
@@ -2017,14 +2015,11 @@ int migrate_pages(struct list_head *from, new_page_t get_new_page,
return rc_gather;
}
-struct page *alloc_migration_target(struct page *page, unsigned long private)
+struct folio *alloc_migration_target(struct folio *src, unsigned long private)
{
- struct folio *folio = page_folio(page);
struct migration_target_control *mtc;
gfp_t gfp_mask;
unsigned int order = 0;
- struct folio *hugetlb_folio = NULL;
- struct folio *new_folio = NULL;
int nid;
int zidx;
@@ -2032,33 +2027,30 @@ struct page *alloc_migration_target(struct page *page, unsigned long private)
gfp_mask = mtc->gfp_mask;
nid = mtc->nid;
if (nid == NUMA_NO_NODE)
- nid = folio_nid(folio);
+ nid = folio_nid(src);
- if (folio_test_hugetlb(folio)) {
- struct hstate *h = folio_hstate(folio);
+ if (folio_test_hugetlb(src)) {
+ struct hstate *h = folio_hstate(src);
gfp_mask = htlb_modify_alloc_mask(h, gfp_mask);
- hugetlb_folio = alloc_hugetlb_folio_nodemask(h, nid,
+ return alloc_hugetlb_folio_nodemask(h, nid,
mtc->nmask, gfp_mask);
- return &hugetlb_folio->page;
}
- if (folio_test_large(folio)) {
+ if (folio_test_large(src)) {
/*
* clear __GFP_RECLAIM to make the migration callback
* consistent with regular THP allocations.
*/
gfp_mask &= ~__GFP_RECLAIM;
gfp_mask |= GFP_TRANSHUGE;
- order = folio_order(folio);
+ order = folio_order(src);
}
- zidx = zone_idx(folio_zone(folio));
+ zidx = zone_idx(folio_zone(src));
if (is_highmem_idx(zidx) || zidx == ZONE_MOVABLE)
gfp_mask |= __GFP_HIGHMEM;
- new_folio = __folio_alloc(gfp_mask, order, nid, mtc->nmask);
-
- return &new_folio->page;
+ return __folio_alloc(gfp_mask, order, nid, mtc->nmask);
}
#ifdef CONFIG_NUMA
@@ -2509,13 +2501,12 @@ static bool migrate_balanced_pgdat(struct pglist_data *pgdat,
return false;
}
-static struct page *alloc_misplaced_dst_page(struct page *page,
+static struct folio *alloc_misplaced_dst_folio(struct folio *src,
unsigned long data)
{
int nid = (int) data;
- int order = compound_order(page);
+ int order = folio_order(src);
gfp_t gfp = __GFP_THISNODE;
- struct folio *new;
if (order > 0)
gfp |= GFP_TRANSHUGE_LIGHT;
@@ -2524,9 +2515,7 @@ static struct page *alloc_misplaced_dst_page(struct page *page,
__GFP_NOWARN;
gfp &= ~__GFP_RECLAIM;
}
- new = __folio_alloc_node(gfp, order, nid);
-
- return &new->page;
+ return __folio_alloc_node(gfp, order, nid);
}
static int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page)
@@ -2604,7 +2593,7 @@ int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma,
goto out;
list_add(&page->lru, &migratepages);
- nr_remaining = migrate_pages(&migratepages, alloc_misplaced_dst_page,
+ nr_remaining = migrate_pages(&migratepages, alloc_misplaced_dst_folio,
NULL, node, MIGRATE_ASYNC,
MR_NUMA_MISPLACED, &nr_succeeded);
if (nr_remaining) {
diff --git a/mm/vmscan.c b/mm/vmscan.c
index d257916f39e5..a41fd3333773 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1620,9 +1620,10 @@ static void folio_check_dirty_writeback(struct folio *folio,
mapping->a_ops->is_dirty_writeback(folio, dirty, writeback);
}
-static struct page *alloc_demote_page(struct page *page, unsigned long private)
+static struct folio *alloc_demote_folio(struct folio *src,
+ unsigned long private)
{
- struct page *target_page;
+ struct folio *dst;
nodemask_t *allowed_mask;
struct migration_target_control *mtc;
@@ -1640,14 +1641,14 @@ static struct page *alloc_demote_page(struct page *page, unsigned long private)
*/
mtc->nmask = NULL;
mtc->gfp_mask |= __GFP_THISNODE;
- target_page = alloc_migration_target(page, (unsigned long)mtc);
- if (target_page)
- return target_page;
+ dst = alloc_migration_target(src, (unsigned long)mtc);
+ if (dst)
+ return dst;
mtc->gfp_mask &= ~__GFP_THISNODE;
mtc->nmask = allowed_mask;
- return alloc_migration_target(page, (unsigned long)mtc);
+ return alloc_migration_target(src, (unsigned long)mtc);
}
/*
@@ -1682,7 +1683,7 @@ static unsigned int demote_folio_list(struct list_head *demote_folios,
node_get_allowed_targets(pgdat, &allowed_mask);
/* Demotion ignores all cpuset and mempolicy settings */
- migrate_pages(demote_folios, alloc_demote_page, NULL,
+ migrate_pages(demote_folios, alloc_demote_folio, NULL,
(unsigned long)&mtc, MIGRATE_ASYNC, MR_DEMOTION,
&nr_succeeded);
--
2.39.2
^ permalink raw reply [flat|nested] 2+ messages in thread
* Re: [PATCH] mm: Convert migrate_pages() to work on folios
2023-05-13 0:11 [PATCH] mm: Convert migrate_pages() to work on folios Matthew Wilcox (Oracle)
@ 2023-05-15 7:12 ` Huang, Ying
0 siblings, 0 replies; 2+ messages in thread
From: Huang, Ying @ 2023-05-15 7:12 UTC (permalink / raw)
To: Matthew Wilcox (Oracle); +Cc: linux-mm
"Matthew Wilcox (Oracle)" <willy@infradead.org> writes:
> Almost all of the callers & implementors of migrate_pages() were already
> converted to use folios. compaction_alloc() & compaction_free() are
> trivial to convert a part of this patch and not worth splitting out.
>
> Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: "Huang, Ying" <ying.huang@intel.com>
Especially the Chinese document :-)
Is it necessary to rename migrate_pages() to migrate_folios()?
Best Regards,
Huang, Ying
> ---
> Documentation/mm/page_migration.rst | 7 +-
> .../translations/zh_CN/mm/page_migration.rst | 2 +-
> include/linux/migrate.h | 16 +-
> mm/compaction.c | 15 +-
> mm/mempolicy.c | 15 +-
> mm/migrate.c | 161 ++++++++----------
> mm/vmscan.c | 15 +-
> 7 files changed, 108 insertions(+), 123 deletions(-)
>
> diff --git a/Documentation/mm/page_migration.rst b/Documentation/mm/page_migration.rst
> index 313dce18893e..e35af7805be5 100644
> --- a/Documentation/mm/page_migration.rst
> +++ b/Documentation/mm/page_migration.rst
> @@ -73,14 +73,13 @@ In kernel use of migrate_pages()
> It also prevents the swapper or other scans from encountering
> the page.
>
> -2. We need to have a function of type new_page_t that can be
> +2. We need to have a function of type new_folio_t that can be
> passed to migrate_pages(). This function should figure out
> - how to allocate the correct new page given the old page.
> + how to allocate the correct new folio given the old folio.
>
> 3. The migrate_pages() function is called which attempts
> to do the migration. It will call the function to allocate
> - the new page for each page that is considered for
> - moving.
> + the new folio for each folio that is considered for moving.
>
> How migrate_pages() works
> =========================
> diff --git a/Documentation/translations/zh_CN/mm/page_migration.rst b/Documentation/translations/zh_CN/mm/page_migration.rst
> index 076081dc1635..f95063826a15 100644
> --- a/Documentation/translations/zh_CN/mm/page_migration.rst
> +++ b/Documentation/translations/zh_CN/mm/page_migration.rst
> @@ -55,7 +55,7 @@ mbind()设置一个新的内存策略。一个进程的页面也可以通过sys_
> 消失。它还可以防止交换器或其他扫描器遇到该页。
>
>
> -2. 我们需要有一个new_page_t类型的函数,可以传递给migrate_pages()。这个函数应该计算
> +2. 我们需要有一个new_folio_t类型的函数,可以传递给migrate_pages()。这个函数应该计算
> 出如何在给定的旧页面中分配正确的新页面。
>
> 3. migrate_pages()函数被调用,它试图进行迁移。它将调用该函数为每个被考虑迁移的页面分
> diff --git a/include/linux/migrate.h b/include/linux/migrate.h
> index 6241a1596a75..6de5756d8533 100644
> --- a/include/linux/migrate.h
> +++ b/include/linux/migrate.h
> @@ -7,8 +7,8 @@
> #include <linux/migrate_mode.h>
> #include <linux/hugetlb.h>
>
> -typedef struct page *new_page_t(struct page *page, unsigned long private);
> -typedef void free_page_t(struct page *page, unsigned long private);
> +typedef struct folio *new_folio_t(struct folio *folio, unsigned long private);
> +typedef void free_folio_t(struct folio *folio, unsigned long private);
>
> struct migration_target_control;
>
> @@ -67,10 +67,10 @@ int migrate_folio_extra(struct address_space *mapping, struct folio *dst,
> struct folio *src, enum migrate_mode mode, int extra_count);
> int migrate_folio(struct address_space *mapping, struct folio *dst,
> struct folio *src, enum migrate_mode mode);
> -int migrate_pages(struct list_head *l, new_page_t new, free_page_t free,
> +int migrate_pages(struct list_head *l, new_folio_t new, free_folio_t free,
> unsigned long private, enum migrate_mode mode, int reason,
> unsigned int *ret_succeeded);
> -struct page *alloc_migration_target(struct page *page, unsigned long private);
> +struct folio *alloc_migration_target(struct folio *src, unsigned long private);
> bool isolate_movable_page(struct page *page, isolate_mode_t mode);
>
> int migrate_huge_page_move_mapping(struct address_space *mapping,
> @@ -85,11 +85,11 @@ int folio_migrate_mapping(struct address_space *mapping,
> #else
>
> static inline void putback_movable_pages(struct list_head *l) {}
> -static inline int migrate_pages(struct list_head *l, new_page_t new,
> - free_page_t free, unsigned long private, enum migrate_mode mode,
> - int reason, unsigned int *ret_succeeded)
> +static inline int migrate_pages(struct list_head *l, new_folio_t new,
> + free_folio_t free, unsigned long private,
> + enum migrate_mode mode, int reason, unsigned int *ret_succeeded)
> { return -ENOSYS; }
> -static inline struct page *alloc_migration_target(struct page *page,
> +static inline struct folio *alloc_migration_target(struct folio *src,
> unsigned long private)
> { return NULL; }
> static inline bool isolate_movable_page(struct page *page, isolate_mode_t mode)
> diff --git a/mm/compaction.c b/mm/compaction.c
> index c8bcdea15f5f..3a8ac58c8af4 100644
> --- a/mm/compaction.c
> +++ b/mm/compaction.c
> @@ -1684,11 +1684,10 @@ static void isolate_freepages(struct compact_control *cc)
> * This is a migrate-callback that "allocates" freepages by taking pages
> * from the isolated freelists in the block we are migrating to.
> */
> -static struct page *compaction_alloc(struct page *migratepage,
> - unsigned long data)
> +static struct folio *compaction_alloc(struct folio *src, unsigned long data)
> {
> struct compact_control *cc = (struct compact_control *)data;
> - struct page *freepage;
> + struct folio *dst;
>
> if (list_empty(&cc->freepages)) {
> isolate_freepages(cc);
> @@ -1697,11 +1696,11 @@ static struct page *compaction_alloc(struct page *migratepage,
> return NULL;
> }
>
> - freepage = list_entry(cc->freepages.next, struct page, lru);
> - list_del(&freepage->lru);
> + dst = list_entry(cc->freepages.next, struct folio, lru);
> + list_del(&dst->lru);
> cc->nr_freepages--;
>
> - return freepage;
> + return dst;
> }
>
> /*
> @@ -1709,11 +1708,11 @@ static struct page *compaction_alloc(struct page *migratepage,
> * freelist. All pages on the freelist are from the same zone, so there is no
> * special handling needed for NUMA.
> */
> -static void compaction_free(struct page *page, unsigned long data)
> +static void compaction_free(struct folio *dst, unsigned long data)
> {
> struct compact_control *cc = (struct compact_control *)data;
>
> - list_add(&page->lru, &cc->freepages);
> + list_add(&dst->lru, &cc->freepages);
> cc->nr_freepages++;
> }
>
> diff --git a/mm/mempolicy.c b/mm/mempolicy.c
> index 1756389a0609..f06ca8c18e62 100644
> --- a/mm/mempolicy.c
> +++ b/mm/mempolicy.c
> @@ -1195,24 +1195,22 @@ int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
> * list of pages handed to migrate_pages()--which is how we get here--
> * is in virtual address order.
> */
> -static struct page *new_page(struct page *page, unsigned long start)
> +static struct folio *new_folio(struct folio *src, unsigned long start)
> {
> - struct folio *dst, *src = page_folio(page);
> struct vm_area_struct *vma;
> unsigned long address;
> VMA_ITERATOR(vmi, current->mm, start);
> gfp_t gfp = GFP_HIGHUSER_MOVABLE | __GFP_RETRY_MAYFAIL;
>
> for_each_vma(vmi, vma) {
> - address = page_address_in_vma(page, vma);
> + address = page_address_in_vma(&src->page, vma);
> if (address != -EFAULT)
> break;
> }
>
> if (folio_test_hugetlb(src)) {
> - dst = alloc_hugetlb_folio_vma(folio_hstate(src),
> + return alloc_hugetlb_folio_vma(folio_hstate(src),
> vma, address);
> - return &dst->page;
> }
>
> if (folio_test_large(src))
> @@ -1221,9 +1219,8 @@ static struct page *new_page(struct page *page, unsigned long start)
> /*
> * if !vma, vma_alloc_folio() will use task or system default policy
> */
> - dst = vma_alloc_folio(gfp, folio_order(src), vma, address,
> + return vma_alloc_folio(gfp, folio_order(src), vma, address,
> folio_test_large(src));
> - return &dst->page;
> }
> #else
>
> @@ -1239,7 +1236,7 @@ int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
> return -ENOSYS;
> }
>
> -static struct page *new_page(struct page *page, unsigned long start)
> +static struct folio *new_folio(struct folio *src, unsigned long start)
> {
> return NULL;
> }
> @@ -1334,7 +1331,7 @@ static long do_mbind(unsigned long start, unsigned long len,
>
> if (!list_empty(&pagelist)) {
> WARN_ON_ONCE(flags & MPOL_MF_LAZY);
> - nr_failed = migrate_pages(&pagelist, new_page, NULL,
> + nr_failed = migrate_pages(&pagelist, new_folio, NULL,
> start, MIGRATE_SYNC, MR_MEMPOLICY_MBIND, NULL);
> if (nr_failed)
> putback_movable_pages(&pagelist);
> diff --git a/mm/migrate.c b/mm/migrate.c
> index 01cac26a3127..fdf4e00f7fe4 100644
> --- a/mm/migrate.c
> +++ b/mm/migrate.c
> @@ -1072,15 +1072,13 @@ static void migrate_folio_undo_src(struct folio *src,
> }
>
> /* Restore the destination folio to the original state upon failure */
> -static void migrate_folio_undo_dst(struct folio *dst,
> - bool locked,
> - free_page_t put_new_page,
> - unsigned long private)
> +static void migrate_folio_undo_dst(struct folio *dst, bool locked,
> + free_folio_t put_new_folio, unsigned long private)
> {
> if (locked)
> folio_unlock(dst);
> - if (put_new_page)
> - put_new_page(&dst->page, private);
> + if (put_new_folio)
> + put_new_folio(dst, private);
> else
> folio_put(dst);
> }
> @@ -1104,14 +1102,13 @@ static void migrate_folio_done(struct folio *src,
> }
>
> /* Obtain the lock on page, remove all ptes. */
> -static int migrate_folio_unmap(new_page_t get_new_page, free_page_t put_new_page,
> - unsigned long private, struct folio *src,
> - struct folio **dstp, enum migrate_mode mode,
> - enum migrate_reason reason, struct list_head *ret)
> +static int migrate_folio_unmap(new_folio_t get_new_folio,
> + free_folio_t put_new_folio, unsigned long private,
> + struct folio *src, struct folio **dstp, enum migrate_mode mode,
> + enum migrate_reason reason, struct list_head *ret)
> {
> struct folio *dst;
> int rc = -EAGAIN;
> - struct page *newpage = NULL;
> int page_was_mapped = 0;
> struct anon_vma *anon_vma = NULL;
> bool is_lru = !__PageMovable(&src->page);
> @@ -1128,10 +1125,9 @@ static int migrate_folio_unmap(new_page_t get_new_page, free_page_t put_new_page
> return MIGRATEPAGE_SUCCESS;
> }
>
> - newpage = get_new_page(&src->page, private);
> - if (!newpage)
> + dst = get_new_folio(src, private);
> + if (!dst)
> return -ENOMEM;
> - dst = page_folio(newpage);
> *dstp = dst;
>
> dst->private = NULL;
> @@ -1251,13 +1247,13 @@ static int migrate_folio_unmap(new_page_t get_new_page, free_page_t put_new_page
> ret = NULL;
>
> migrate_folio_undo_src(src, page_was_mapped, anon_vma, locked, ret);
> - migrate_folio_undo_dst(dst, dst_locked, put_new_page, private);
> + migrate_folio_undo_dst(dst, dst_locked, put_new_folio, private);
>
> return rc;
> }
>
> /* Migrate the folio to the newly allocated folio in dst. */
> -static int migrate_folio_move(free_page_t put_new_page, unsigned long private,
> +static int migrate_folio_move(free_folio_t put_new_folio, unsigned long private,
> struct folio *src, struct folio *dst,
> enum migrate_mode mode, enum migrate_reason reason,
> struct list_head *ret)
> @@ -1329,7 +1325,7 @@ static int migrate_folio_move(free_page_t put_new_page, unsigned long private,
> }
>
> migrate_folio_undo_src(src, page_was_mapped, anon_vma, true, ret);
> - migrate_folio_undo_dst(dst, true, put_new_page, private);
> + migrate_folio_undo_dst(dst, true, put_new_folio, private);
>
> return rc;
> }
> @@ -1352,16 +1348,14 @@ static int migrate_folio_move(free_page_t put_new_page, unsigned long private,
> * because then pte is replaced with migration swap entry and direct I/O code
> * will wait in the page fault for migration to complete.
> */
> -static int unmap_and_move_huge_page(new_page_t get_new_page,
> - free_page_t put_new_page, unsigned long private,
> - struct page *hpage, int force,
> - enum migrate_mode mode, int reason,
> - struct list_head *ret)
> +static int unmap_and_move_huge_page(new_folio_t get_new_folio,
> + free_folio_t put_new_folio, unsigned long private,
> + struct folio *src, int force, enum migrate_mode mode,
> + int reason, struct list_head *ret)
> {
> - struct folio *dst, *src = page_folio(hpage);
> + struct folio *dst;
> int rc = -EAGAIN;
> int page_was_mapped = 0;
> - struct page *new_hpage;
> struct anon_vma *anon_vma = NULL;
> struct address_space *mapping = NULL;
>
> @@ -1371,10 +1365,9 @@ static int unmap_and_move_huge_page(new_page_t get_new_page,
> return MIGRATEPAGE_SUCCESS;
> }
>
> - new_hpage = get_new_page(hpage, private);
> - if (!new_hpage)
> + dst = get_new_folio(src, private);
> + if (!dst)
> return -ENOMEM;
> - dst = page_folio(new_hpage);
>
> if (!folio_trylock(src)) {
> if (!force)
> @@ -1415,7 +1408,7 @@ static int unmap_and_move_huge_page(new_page_t get_new_page,
> * semaphore in write mode here and set TTU_RMAP_LOCKED
> * to let lower levels know we have taken the lock.
> */
> - mapping = hugetlb_page_mapping_lock_write(hpage);
> + mapping = hugetlb_page_mapping_lock_write(&src->page);
> if (unlikely(!mapping))
> goto unlock_put_anon;
>
> @@ -1445,7 +1438,7 @@ static int unmap_and_move_huge_page(new_page_t get_new_page,
>
> if (rc == MIGRATEPAGE_SUCCESS) {
> move_hugetlb_state(src, dst, reason);
> - put_new_page = NULL;
> + put_new_folio = NULL;
> }
>
> out_unlock:
> @@ -1461,8 +1454,8 @@ static int unmap_and_move_huge_page(new_page_t get_new_page,
> * it. Otherwise, put_page() will drop the reference grabbed during
> * isolation.
> */
> - if (put_new_page)
> - put_new_page(new_hpage, private);
> + if (put_new_folio)
> + put_new_folio(dst, private);
> else
> folio_putback_active_hugetlb(dst);
>
> @@ -1509,8 +1502,8 @@ struct migrate_pages_stats {
> * exist any more. It is caller's responsibility to call putback_movable_pages()
> * only if ret != 0.
> */
> -static int migrate_hugetlbs(struct list_head *from, new_page_t get_new_page,
> - free_page_t put_new_page, unsigned long private,
> +static int migrate_hugetlbs(struct list_head *from, new_folio_t get_new_folio,
> + free_folio_t put_new_folio, unsigned long private,
> enum migrate_mode mode, int reason,
> struct migrate_pages_stats *stats,
> struct list_head *ret_folios)
> @@ -1548,9 +1541,9 @@ static int migrate_hugetlbs(struct list_head *from, new_page_t get_new_page,
> continue;
> }
>
> - rc = unmap_and_move_huge_page(get_new_page,
> - put_new_page, private,
> - &folio->page, pass > 2, mode,
> + rc = unmap_and_move_huge_page(get_new_folio,
> + put_new_folio, private,
> + folio, pass > 2, mode,
> reason, ret_folios);
> /*
> * The rules are:
> @@ -1607,11 +1600,11 @@ static int migrate_hugetlbs(struct list_head *from, new_page_t get_new_page,
> * deadlock (e.g., for loop device). So, if mode != MIGRATE_ASYNC, the
> * length of the from list must be <= 1.
> */
> -static int migrate_pages_batch(struct list_head *from, new_page_t get_new_page,
> - free_page_t put_new_page, unsigned long private,
> - enum migrate_mode mode, int reason, struct list_head *ret_folios,
> - struct list_head *split_folios, struct migrate_pages_stats *stats,
> - int nr_pass)
> +static int migrate_pages_batch(struct list_head *from,
> + new_folio_t get_new_folio, free_folio_t put_new_folio,
> + unsigned long private, enum migrate_mode mode, int reason,
> + struct list_head *ret_folios, struct list_head *split_folios,
> + struct migrate_pages_stats *stats, int nr_pass)
> {
> int retry = 1;
> int large_retry = 1;
> @@ -1671,8 +1664,9 @@ static int migrate_pages_batch(struct list_head *from, new_page_t get_new_page,
> continue;
> }
>
> - rc = migrate_folio_unmap(get_new_page, put_new_page, private,
> - folio, &dst, mode, reason, ret_folios);
> + rc = migrate_folio_unmap(get_new_folio, put_new_folio,
> + private, folio, &dst, mode, reason,
> + ret_folios);
> /*
> * The rules are:
> * Success: folio will be freed
> @@ -1786,7 +1780,7 @@ static int migrate_pages_batch(struct list_head *from, new_page_t get_new_page,
>
> cond_resched();
>
> - rc = migrate_folio_move(put_new_page, private,
> + rc = migrate_folio_move(put_new_folio, private,
> folio, dst, mode,
> reason, ret_folios);
> /*
> @@ -1845,7 +1839,7 @@ static int migrate_pages_batch(struct list_head *from, new_page_t get_new_page,
> migrate_folio_undo_src(folio, page_was_mapped, anon_vma,
> true, ret_folios);
> list_del(&dst->lru);
> - migrate_folio_undo_dst(dst, true, put_new_page, private);
> + migrate_folio_undo_dst(dst, true, put_new_folio, private);
> dst = dst2;
> dst2 = list_next_entry(dst, lru);
> }
> @@ -1853,10 +1847,11 @@ static int migrate_pages_batch(struct list_head *from, new_page_t get_new_page,
> return rc;
> }
>
> -static int migrate_pages_sync(struct list_head *from, new_page_t get_new_page,
> - free_page_t put_new_page, unsigned long private,
> - enum migrate_mode mode, int reason, struct list_head *ret_folios,
> - struct list_head *split_folios, struct migrate_pages_stats *stats)
> +static int migrate_pages_sync(struct list_head *from, new_folio_t get_new_folio,
> + free_folio_t put_new_folio, unsigned long private,
> + enum migrate_mode mode, int reason,
> + struct list_head *ret_folios, struct list_head *split_folios,
> + struct migrate_pages_stats *stats)
> {
> int rc, nr_failed = 0;
> LIST_HEAD(folios);
> @@ -1864,7 +1859,7 @@ static int migrate_pages_sync(struct list_head *from, new_page_t get_new_page,
>
> memset(&astats, 0, sizeof(astats));
> /* Try to migrate in batch with MIGRATE_ASYNC mode firstly */
> - rc = migrate_pages_batch(from, get_new_page, put_new_page, private, MIGRATE_ASYNC,
> + rc = migrate_pages_batch(from, get_new_folio, put_new_folio, private, MIGRATE_ASYNC,
> reason, &folios, split_folios, &astats,
> NR_MAX_MIGRATE_ASYNC_RETRY);
> stats->nr_succeeded += astats.nr_succeeded;
> @@ -1886,7 +1881,7 @@ static int migrate_pages_sync(struct list_head *from, new_page_t get_new_page,
> list_splice_tail_init(&folios, from);
> while (!list_empty(from)) {
> list_move(from->next, &folios);
> - rc = migrate_pages_batch(&folios, get_new_page, put_new_page,
> + rc = migrate_pages_batch(&folios, get_new_folio, put_new_folio,
> private, mode, reason, ret_folios,
> split_folios, stats, NR_MAX_MIGRATE_SYNC_RETRY);
> list_splice_tail_init(&folios, ret_folios);
> @@ -1903,11 +1898,11 @@ static int migrate_pages_sync(struct list_head *from, new_page_t get_new_page,
> * supplied as the target for the page migration
> *
> * @from: The list of folios to be migrated.
> - * @get_new_page: The function used to allocate free folios to be used
> + * @get_new_folio: The function used to allocate free folios to be used
> * as the target of the folio migration.
> - * @put_new_page: The function used to free target folios if migration
> + * @put_new_folio: The function used to free target folios if migration
> * fails, or NULL if no special handling is necessary.
> - * @private: Private data to be passed on to get_new_page()
> + * @private: Private data to be passed on to get_new_folio()
> * @mode: The migration mode that specifies the constraints for
> * folio migration, if any.
> * @reason: The reason for folio migration.
> @@ -1924,8 +1919,8 @@ static int migrate_pages_sync(struct list_head *from, new_page_t get_new_page,
> * considered as the number of non-migrated large folio, no matter how many
> * split folios of the large folio are migrated successfully.
> */
> -int migrate_pages(struct list_head *from, new_page_t get_new_page,
> - free_page_t put_new_page, unsigned long private,
> +int migrate_pages(struct list_head *from, new_folio_t get_new_folio,
> + free_folio_t put_new_folio, unsigned long private,
> enum migrate_mode mode, int reason, unsigned int *ret_succeeded)
> {
> int rc, rc_gather;
> @@ -1940,7 +1935,7 @@ int migrate_pages(struct list_head *from, new_page_t get_new_page,
>
> memset(&stats, 0, sizeof(stats));
>
> - rc_gather = migrate_hugetlbs(from, get_new_page, put_new_page, private,
> + rc_gather = migrate_hugetlbs(from, get_new_folio, put_new_folio, private,
> mode, reason, &stats, &ret_folios);
> if (rc_gather < 0)
> goto out;
> @@ -1963,12 +1958,14 @@ int migrate_pages(struct list_head *from, new_page_t get_new_page,
> else
> list_splice_init(from, &folios);
> if (mode == MIGRATE_ASYNC)
> - rc = migrate_pages_batch(&folios, get_new_page, put_new_page, private,
> - mode, reason, &ret_folios, &split_folios, &stats,
> - NR_MAX_MIGRATE_PAGES_RETRY);
> + rc = migrate_pages_batch(&folios, get_new_folio, put_new_folio,
> + private, mode, reason, &ret_folios,
> + &split_folios, &stats,
> + NR_MAX_MIGRATE_PAGES_RETRY);
> else
> - rc = migrate_pages_sync(&folios, get_new_page, put_new_page, private,
> - mode, reason, &ret_folios, &split_folios, &stats);
> + rc = migrate_pages_sync(&folios, get_new_folio, put_new_folio,
> + private, mode, reason, &ret_folios,
> + &split_folios, &stats);
> list_splice_tail_init(&folios, &ret_folios);
> if (rc < 0) {
> rc_gather = rc;
> @@ -1981,8 +1978,9 @@ int migrate_pages(struct list_head *from, new_page_t get_new_page,
> * is counted as 1 failure already. And, we only try to migrate
> * with minimal effort, force MIGRATE_ASYNC mode and retry once.
> */
> - migrate_pages_batch(&split_folios, get_new_page, put_new_page, private,
> - MIGRATE_ASYNC, reason, &ret_folios, NULL, &stats, 1);
> + migrate_pages_batch(&split_folios, get_new_folio,
> + put_new_folio, private, MIGRATE_ASYNC, reason,
> + &ret_folios, NULL, &stats, 1);
> list_splice_tail_init(&split_folios, &ret_folios);
> }
> rc_gather += rc;
> @@ -2017,14 +2015,11 @@ int migrate_pages(struct list_head *from, new_page_t get_new_page,
> return rc_gather;
> }
>
> -struct page *alloc_migration_target(struct page *page, unsigned long private)
> +struct folio *alloc_migration_target(struct folio *src, unsigned long private)
> {
> - struct folio *folio = page_folio(page);
> struct migration_target_control *mtc;
> gfp_t gfp_mask;
> unsigned int order = 0;
> - struct folio *hugetlb_folio = NULL;
> - struct folio *new_folio = NULL;
> int nid;
> int zidx;
>
> @@ -2032,33 +2027,30 @@ struct page *alloc_migration_target(struct page *page, unsigned long private)
> gfp_mask = mtc->gfp_mask;
> nid = mtc->nid;
> if (nid == NUMA_NO_NODE)
> - nid = folio_nid(folio);
> + nid = folio_nid(src);
>
> - if (folio_test_hugetlb(folio)) {
> - struct hstate *h = folio_hstate(folio);
> + if (folio_test_hugetlb(src)) {
> + struct hstate *h = folio_hstate(src);
>
> gfp_mask = htlb_modify_alloc_mask(h, gfp_mask);
> - hugetlb_folio = alloc_hugetlb_folio_nodemask(h, nid,
> + return alloc_hugetlb_folio_nodemask(h, nid,
> mtc->nmask, gfp_mask);
> - return &hugetlb_folio->page;
> }
>
> - if (folio_test_large(folio)) {
> + if (folio_test_large(src)) {
> /*
> * clear __GFP_RECLAIM to make the migration callback
> * consistent with regular THP allocations.
> */
> gfp_mask &= ~__GFP_RECLAIM;
> gfp_mask |= GFP_TRANSHUGE;
> - order = folio_order(folio);
> + order = folio_order(src);
> }
> - zidx = zone_idx(folio_zone(folio));
> + zidx = zone_idx(folio_zone(src));
> if (is_highmem_idx(zidx) || zidx == ZONE_MOVABLE)
> gfp_mask |= __GFP_HIGHMEM;
>
> - new_folio = __folio_alloc(gfp_mask, order, nid, mtc->nmask);
> -
> - return &new_folio->page;
> + return __folio_alloc(gfp_mask, order, nid, mtc->nmask);
> }
>
> #ifdef CONFIG_NUMA
> @@ -2509,13 +2501,12 @@ static bool migrate_balanced_pgdat(struct pglist_data *pgdat,
> return false;
> }
>
> -static struct page *alloc_misplaced_dst_page(struct page *page,
> +static struct folio *alloc_misplaced_dst_folio(struct folio *src,
> unsigned long data)
> {
> int nid = (int) data;
> - int order = compound_order(page);
> + int order = folio_order(src);
> gfp_t gfp = __GFP_THISNODE;
> - struct folio *new;
>
> if (order > 0)
> gfp |= GFP_TRANSHUGE_LIGHT;
> @@ -2524,9 +2515,7 @@ static struct page *alloc_misplaced_dst_page(struct page *page,
> __GFP_NOWARN;
> gfp &= ~__GFP_RECLAIM;
> }
> - new = __folio_alloc_node(gfp, order, nid);
> -
> - return &new->page;
> + return __folio_alloc_node(gfp, order, nid);
> }
>
> static int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page)
> @@ -2604,7 +2593,7 @@ int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma,
> goto out;
>
> list_add(&page->lru, &migratepages);
> - nr_remaining = migrate_pages(&migratepages, alloc_misplaced_dst_page,
> + nr_remaining = migrate_pages(&migratepages, alloc_misplaced_dst_folio,
> NULL, node, MIGRATE_ASYNC,
> MR_NUMA_MISPLACED, &nr_succeeded);
> if (nr_remaining) {
> diff --git a/mm/vmscan.c b/mm/vmscan.c
> index d257916f39e5..a41fd3333773 100644
> --- a/mm/vmscan.c
> +++ b/mm/vmscan.c
> @@ -1620,9 +1620,10 @@ static void folio_check_dirty_writeback(struct folio *folio,
> mapping->a_ops->is_dirty_writeback(folio, dirty, writeback);
> }
>
> -static struct page *alloc_demote_page(struct page *page, unsigned long private)
> +static struct folio *alloc_demote_folio(struct folio *src,
> + unsigned long private)
> {
> - struct page *target_page;
> + struct folio *dst;
> nodemask_t *allowed_mask;
> struct migration_target_control *mtc;
>
> @@ -1640,14 +1641,14 @@ static struct page *alloc_demote_page(struct page *page, unsigned long private)
> */
> mtc->nmask = NULL;
> mtc->gfp_mask |= __GFP_THISNODE;
> - target_page = alloc_migration_target(page, (unsigned long)mtc);
> - if (target_page)
> - return target_page;
> + dst = alloc_migration_target(src, (unsigned long)mtc);
> + if (dst)
> + return dst;
>
> mtc->gfp_mask &= ~__GFP_THISNODE;
> mtc->nmask = allowed_mask;
>
> - return alloc_migration_target(page, (unsigned long)mtc);
> + return alloc_migration_target(src, (unsigned long)mtc);
> }
>
> /*
> @@ -1682,7 +1683,7 @@ static unsigned int demote_folio_list(struct list_head *demote_folios,
> node_get_allowed_targets(pgdat, &allowed_mask);
>
> /* Demotion ignores all cpuset and mempolicy settings */
> - migrate_pages(demote_folios, alloc_demote_page, NULL,
> + migrate_pages(demote_folios, alloc_demote_folio, NULL,
> (unsigned long)&mtc, MIGRATE_ASYNC, MR_DEMOTION,
> &nr_succeeded);
^ permalink raw reply [flat|nested] 2+ messages in thread
end of thread, other threads:[~2023-05-15 7:13 UTC | newest]
Thread overview: 2+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2023-05-13 0:11 [PATCH] mm: Convert migrate_pages() to work on folios Matthew Wilcox (Oracle)
2023-05-15 7:12 ` Huang, Ying
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox