* [PATCH v3 1/4] mm/page_isolation: make page isolation a standalone bit.
2025-05-07 21:10 [PATCH v3 0/4] Make MIGRATE_ISOLATE a standalone bit Zi Yan
@ 2025-05-07 21:10 ` Zi Yan
2025-05-08 5:24 ` Johannes Weiner
2025-05-08 20:22 ` Zi Yan
2025-05-07 21:10 ` [PATCH v3 2/4] mm/page_isolation: remove migratetype from move_freepages_block_isolate() Zi Yan
` (3 subsequent siblings)
4 siblings, 2 replies; 23+ messages in thread
From: Zi Yan @ 2025-05-07 21:10 UTC (permalink / raw)
To: David Hildenbrand, Johannes Weiner, linux-mm
Cc: Andrew Morton, Oscar Salvador, Vlastimil Babka, Baolin Wang,
Kirill A . Shutemov, Mel Gorman, Suren Baghdasaryan,
Michal Hocko, Brendan Jackman, Richard Chang, linux-kernel,
Zi Yan
During page isolation, the original migratetype is overwritten, since
MIGRATE_* are enums and stored in pageblock bitmaps. Change
MIGRATE_ISOLATE to be stored a standalone bit, PB_migrate_isolate, like
PB_migrate_skip, so that migratetype is not lost during pageblock
isolation. pageblock bits needs to be word aligned, so expand
the number of pageblock bits from 4 to 8 and make PB_migrate_isolate bit 7.
Signed-off-by: Zi Yan <ziy@nvidia.com>
---
include/linux/mmzone.h | 17 ++++++++++----
include/linux/page-isolation.h | 2 +-
include/linux/pageblock-flags.h | 33 +++++++++++++++++++++++++-
mm/page_alloc.c | 41 ++++++++++++++++++++++++++++++++-
4 files changed, 86 insertions(+), 7 deletions(-)
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index b19a98c20de8..9ec022a0b826 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -106,14 +106,23 @@ static inline bool migratetype_is_mergeable(int mt)
extern int page_group_by_mobility_disabled;
-#define MIGRATETYPE_MASK ((1UL << PB_migratetype_bits) - 1)
+#ifdef CONFIG_MEMORY_ISOLATION
+#define MIGRATETYPE_MASK ((BIT(PB_migratetype_bits) - 1) | PB_migrate_isolate_bit)
+#else
+#define MIGRATETYPE_MASK (BIT(PB_migratetype_bits) - 1)
+#endif
+#ifdef CONFIG_MEMORY_ISOLATION
+unsigned long get_pageblock_migratetype(const struct page *page);
+#else
#define get_pageblock_migratetype(page) \
get_pfnblock_flags_mask(page, page_to_pfn(page), MIGRATETYPE_MASK)
-#define folio_migratetype(folio) \
- get_pfnblock_flags_mask(&folio->page, folio_pfn(folio), \
- MIGRATETYPE_MASK)
+#endif
+
+#define folio_migratetype(folio) \
+ get_pageblock_migratetype(&folio->page)
+
struct free_area {
struct list_head free_list[MIGRATE_TYPES];
unsigned long nr_free;
diff --git a/include/linux/page-isolation.h b/include/linux/page-isolation.h
index 898bb788243b..51797dc39cbc 100644
--- a/include/linux/page-isolation.h
+++ b/include/linux/page-isolation.h
@@ -5,7 +5,7 @@
#ifdef CONFIG_MEMORY_ISOLATION
static inline bool is_migrate_isolate_page(struct page *page)
{
- return get_pageblock_migratetype(page) == MIGRATE_ISOLATE;
+ return get_pageblock_isolate(page);
}
static inline bool is_migrate_isolate(int migratetype)
{
diff --git a/include/linux/pageblock-flags.h b/include/linux/pageblock-flags.h
index 0c4963339f0b..9fadae5892b2 100644
--- a/include/linux/pageblock-flags.h
+++ b/include/linux/pageblock-flags.h
@@ -20,7 +20,10 @@ enum pageblock_bits {
PB_migrate_end = PB_migrate + PB_migratetype_bits - 1,
/* 3 bits required for migrate types */
PB_migrate_skip,/* If set the block is skipped by compaction */
-
+#ifdef CONFIG_MEMORY_ISOLATION
+ PB_migrate_isolate = 7, /* If set the block is isolated */
+ /* set it to 7 to make pageblock bit word aligned */
+#endif
/*
* Assume the bits will always align on a word. If this assumption
* changes then get/set pageblock needs updating.
@@ -28,6 +31,10 @@ enum pageblock_bits {
NR_PAGEBLOCK_BITS
};
+#ifdef CONFIG_MEMORY_ISOLATION
+#define PB_migrate_isolate_bit BIT(PB_migrate_isolate)
+#endif
+
#if defined(CONFIG_PAGE_BLOCK_ORDER)
#define PAGE_BLOCK_ORDER CONFIG_PAGE_BLOCK_ORDER
#else
@@ -105,4 +112,28 @@ static inline void set_pageblock_skip(struct page *page)
}
#endif /* CONFIG_COMPACTION */
+#ifdef CONFIG_MEMORY_ISOLATION
+#define get_pageblock_isolate(page) \
+ get_pfnblock_flags_mask(page, page_to_pfn(page), \
+ PB_migrate_isolate_bit)
+#define clear_pageblock_isolate(page) \
+ set_pfnblock_flags_mask(page, 0, page_to_pfn(page), \
+ PB_migrate_isolate_bit)
+#define set_pageblock_isolate(page) \
+ set_pfnblock_flags_mask(page, PB_migrate_isolate_bit, \
+ page_to_pfn(page), \
+ PB_migrate_isolate_bit)
+#else
+static inline bool get_pageblock_isolate(struct page *page)
+{
+ return false;
+}
+static inline void clear_pageblock_isolate(struct page *page)
+{
+}
+static inline void set_pageblock_isolate(struct page *page)
+{
+}
+#endif /* CONFIG_MEMORY_ISOLATION */
+
#endif /* PAGEBLOCK_FLAGS_H */
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index c77592b22256..acf68ef041d8 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -381,12 +381,40 @@ unsigned long get_pfnblock_flags_mask(const struct page *page,
return (word >> bitidx) & mask;
}
+#ifdef CONFIG_MEMORY_ISOLATION
+unsigned long get_pageblock_migratetype(const struct page *page)
+{
+ unsigned long flags;
+
+ flags = get_pfnblock_flags_mask(page, page_to_pfn(page),
+ MIGRATETYPE_MASK);
+ if (flags & PB_migrate_isolate_bit)
+ return MIGRATE_ISOLATE;
+
+ return flags;
+}
+
+static __always_inline int get_pfnblock_migratetype(const struct page *page,
+ unsigned long pfn)
+{
+ unsigned long flags;
+
+ flags = get_pfnblock_flags_mask(page, pfn,
+ MIGRATETYPE_MASK);
+ if (flags & PB_migrate_isolate_bit)
+ return MIGRATE_ISOLATE;
+
+ return flags;
+}
+#else
static __always_inline int get_pfnblock_migratetype(const struct page *page,
unsigned long pfn)
{
return get_pfnblock_flags_mask(page, pfn, MIGRATETYPE_MASK);
}
+#endif
+
/**
* set_pfnblock_flags_mask - Set the requested group of flags for a pageblock_nr_pages block of pages
* @page: The page within the block of interest
@@ -402,8 +430,14 @@ void set_pfnblock_flags_mask(struct page *page, unsigned long flags,
unsigned long bitidx, word_bitidx;
unsigned long word;
+#ifdef CONFIG_MEMORY_ISOLATION
+ BUILD_BUG_ON(NR_PAGEBLOCK_BITS != 8);
+ /* extra one for MIGRATE_ISOLATE */
+ BUILD_BUG_ON(MIGRATE_TYPES > (1 << PB_migratetype_bits) + 1);
+#else
BUILD_BUG_ON(NR_PAGEBLOCK_BITS != 4);
BUILD_BUG_ON(MIGRATE_TYPES > (1 << PB_migratetype_bits));
+#endif
bitmap = get_pageblock_bitmap(page, pfn);
bitidx = pfn_to_bitidx(page, pfn);
@@ -426,7 +460,12 @@ void set_pageblock_migratetype(struct page *page, int migratetype)
migratetype < MIGRATE_PCPTYPES))
migratetype = MIGRATE_UNMOVABLE;
- set_pfnblock_flags_mask(page, (unsigned long)migratetype,
+#ifdef CONFIG_MEMORY_ISOLATION
+ if (migratetype == MIGRATE_ISOLATE)
+ set_pageblock_isolate(page);
+ else
+#endif
+ set_pfnblock_flags_mask(page, (unsigned long)migratetype,
page_to_pfn(page), MIGRATETYPE_MASK);
}
--
2.47.2
^ permalink raw reply [flat|nested] 23+ messages in thread* Re: [PATCH v3 1/4] mm/page_isolation: make page isolation a standalone bit.
2025-05-07 21:10 ` [PATCH v3 1/4] mm/page_isolation: make page isolation " Zi Yan
@ 2025-05-08 5:24 ` Johannes Weiner
2025-05-08 15:27 ` Zi Yan
2025-05-08 20:22 ` Zi Yan
1 sibling, 1 reply; 23+ messages in thread
From: Johannes Weiner @ 2025-05-08 5:24 UTC (permalink / raw)
To: Zi Yan
Cc: David Hildenbrand, linux-mm, Andrew Morton, Oscar Salvador,
Vlastimil Babka, Baolin Wang, Kirill A . Shutemov, Mel Gorman,
Suren Baghdasaryan, Michal Hocko, Brendan Jackman, Richard Chang,
linux-kernel
On Wed, May 07, 2025 at 05:10:56PM -0400, Zi Yan wrote:
> During page isolation, the original migratetype is overwritten, since
> MIGRATE_* are enums and stored in pageblock bitmaps. Change
> MIGRATE_ISOLATE to be stored a standalone bit, PB_migrate_isolate, like
> PB_migrate_skip, so that migratetype is not lost during pageblock
> isolation. pageblock bits needs to be word aligned, so expand
> the number of pageblock bits from 4 to 8 and make PB_migrate_isolate bit 7.
>
> Signed-off-by: Zi Yan <ziy@nvidia.com>
> ---
> include/linux/mmzone.h | 17 ++++++++++----
> include/linux/page-isolation.h | 2 +-
> include/linux/pageblock-flags.h | 33 +++++++++++++++++++++++++-
> mm/page_alloc.c | 41 ++++++++++++++++++++++++++++++++-
> 4 files changed, 86 insertions(+), 7 deletions(-)
>
> diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
> index b19a98c20de8..9ec022a0b826 100644
> --- a/include/linux/mmzone.h
> +++ b/include/linux/mmzone.h
> @@ -106,14 +106,23 @@ static inline bool migratetype_is_mergeable(int mt)
>
> extern int page_group_by_mobility_disabled;
>
> -#define MIGRATETYPE_MASK ((1UL << PB_migratetype_bits) - 1)
> +#ifdef CONFIG_MEMORY_ISOLATION
> +#define MIGRATETYPE_MASK ((BIT(PB_migratetype_bits) - 1) | PB_migrate_isolate_bit)
> +#else
> +#define MIGRATETYPE_MASK (BIT(PB_migratetype_bits) - 1)
> +#endif
>
> +#ifdef CONFIG_MEMORY_ISOLATION
> +unsigned long get_pageblock_migratetype(const struct page *page);
> +#else
> #define get_pageblock_migratetype(page) \
> get_pfnblock_flags_mask(page, page_to_pfn(page), MIGRATETYPE_MASK)
>
> -#define folio_migratetype(folio) \
> - get_pfnblock_flags_mask(&folio->page, folio_pfn(folio), \
> - MIGRATETYPE_MASK)
> +#endif
> +
> +#define folio_migratetype(folio) \
> + get_pageblock_migratetype(&folio->page)
> +
> struct free_area {
> struct list_head free_list[MIGRATE_TYPES];
> unsigned long nr_free;
> diff --git a/include/linux/page-isolation.h b/include/linux/page-isolation.h
> index 898bb788243b..51797dc39cbc 100644
> --- a/include/linux/page-isolation.h
> +++ b/include/linux/page-isolation.h
> @@ -5,7 +5,7 @@
> #ifdef CONFIG_MEMORY_ISOLATION
> static inline bool is_migrate_isolate_page(struct page *page)
> {
> - return get_pageblock_migratetype(page) == MIGRATE_ISOLATE;
> + return get_pageblock_isolate(page);
The old version still works, right?
It would match is_migrate_isolate() a bit better, but no strong
feelings either way...
> static inline bool is_migrate_isolate(int migratetype)
> {
> diff --git a/include/linux/pageblock-flags.h b/include/linux/pageblock-flags.h
> index 0c4963339f0b..9fadae5892b2 100644
> --- a/include/linux/pageblock-flags.h
> +++ b/include/linux/pageblock-flags.h
> @@ -20,7 +20,10 @@ enum pageblock_bits {
> PB_migrate_end = PB_migrate + PB_migratetype_bits - 1,
> /* 3 bits required for migrate types */
> PB_migrate_skip,/* If set the block is skipped by compaction */
> -
> +#ifdef CONFIG_MEMORY_ISOLATION
> + PB_migrate_isolate = 7, /* If set the block is isolated */
> + /* set it to 7 to make pageblock bit word aligned */
> +#endif
> /*
> * Assume the bits will always align on a word. If this assumption
> * changes then get/set pageblock needs updating.
> @@ -28,6 +31,10 @@ enum pageblock_bits {
> NR_PAGEBLOCK_BITS
> };
>
> +#ifdef CONFIG_MEMORY_ISOLATION
> +#define PB_migrate_isolate_bit BIT(PB_migrate_isolate)
> +#endif
> +
> #if defined(CONFIG_PAGE_BLOCK_ORDER)
> #define PAGE_BLOCK_ORDER CONFIG_PAGE_BLOCK_ORDER
> #else
> @@ -105,4 +112,28 @@ static inline void set_pageblock_skip(struct page *page)
> }
> #endif /* CONFIG_COMPACTION */
>
> +#ifdef CONFIG_MEMORY_ISOLATION
> +#define get_pageblock_isolate(page) \
> + get_pfnblock_flags_mask(page, page_to_pfn(page), \
> + PB_migrate_isolate_bit)
> +#define clear_pageblock_isolate(page) \
> + set_pfnblock_flags_mask(page, 0, page_to_pfn(page), \
> + PB_migrate_isolate_bit)
> +#define set_pageblock_isolate(page) \
> + set_pfnblock_flags_mask(page, PB_migrate_isolate_bit, \
> + page_to_pfn(page), \
> + PB_migrate_isolate_bit)
Would it make sense to move these to page_isolation.c? Then they
wouldn't have to be macros.
> +#else
> +static inline bool get_pageblock_isolate(struct page *page)
> +{
> + return false;
> +}
> +static inline void clear_pageblock_isolate(struct page *page)
> +{
> +}
> +static inline void set_pageblock_isolate(struct page *page)
> +{
> +}
> +#endif /* CONFIG_MEMORY_ISOLATION */
> +
> #endif /* PAGEBLOCK_FLAGS_H */
> diff --git a/mm/page_alloc.c b/mm/page_alloc.c
> index c77592b22256..acf68ef041d8 100644
> --- a/mm/page_alloc.c
> +++ b/mm/page_alloc.c
> @@ -381,12 +381,40 @@ unsigned long get_pfnblock_flags_mask(const struct page *page,
> return (word >> bitidx) & mask;
> }
>
> +#ifdef CONFIG_MEMORY_ISOLATION
> +unsigned long get_pageblock_migratetype(const struct page *page)
> +{
> + unsigned long flags;
> +
> + flags = get_pfnblock_flags_mask(page, page_to_pfn(page),
> + MIGRATETYPE_MASK);
> + if (flags & PB_migrate_isolate_bit)
> + return MIGRATE_ISOLATE;
> +
> + return flags;
> +}
Since MIGRATETYPE_MASK includes the isolate bit if it exists, I think
you can share the get_pfnblock_flags_mask() part:
static inline get_pageblock_migratetype(const struct page *page)
{
unsigned long pfn = page_to_pfn(page);
flags = get_pfnblock_flags_mask(page, pfn, MIGRATETYPE_MASK);
#ifdef CONFIG_MEMORY_ISOLATEION
if (flags & PB_migrate_isolate_bit)
return MIGRATE_ISOLATE;
#endif
return flags;
}
> +static __always_inline int get_pfnblock_migratetype(const struct page *page,
> + unsigned long pfn)
> +{
> + unsigned long flags;
> +
> + flags = get_pfnblock_flags_mask(page, pfn,
> + MIGRATETYPE_MASK);
> + if (flags & PB_migrate_isolate_bit)
> + return MIGRATE_ISOLATE;
> +
> + return flags;
> +}
> +#else
> static __always_inline int get_pfnblock_migratetype(const struct page *page,
> unsigned long pfn)
> {
> return get_pfnblock_flags_mask(page, pfn, MIGRATETYPE_MASK);
> }
Same with this one.
>
> +#endif
> +
> /**
> * set_pfnblock_flags_mask - Set the requested group of flags for a pageblock_nr_pages block of pages
> * @page: The page within the block of interest
> @@ -402,8 +430,14 @@ void set_pfnblock_flags_mask(struct page *page, unsigned long flags,
> unsigned long bitidx, word_bitidx;
> unsigned long word;
>
> +#ifdef CONFIG_MEMORY_ISOLATION
> + BUILD_BUG_ON(NR_PAGEBLOCK_BITS != 8);
> + /* extra one for MIGRATE_ISOLATE */
> + BUILD_BUG_ON(MIGRATE_TYPES > (1 << PB_migratetype_bits) + 1);
> +#else
> BUILD_BUG_ON(NR_PAGEBLOCK_BITS != 4);
> BUILD_BUG_ON(MIGRATE_TYPES > (1 << PB_migratetype_bits));
> +#endif
>
> bitmap = get_pageblock_bitmap(page, pfn);
> bitidx = pfn_to_bitidx(page, pfn);
> @@ -426,7 +460,12 @@ void set_pageblock_migratetype(struct page *page, int migratetype)
> migratetype < MIGRATE_PCPTYPES))
> migratetype = MIGRATE_UNMOVABLE;
>
> - set_pfnblock_flags_mask(page, (unsigned long)migratetype,
> +#ifdef CONFIG_MEMORY_ISOLATION
> + if (migratetype == MIGRATE_ISOLATE)
> + set_pageblock_isolate(page);
Are there paths actually doing this after the second patch?
There are many instances that want to *read* the migratetype or
MIGRATE_ISOLATE, but only isolation code should be manipulating that
bit through the dedicated set/toggle_pageblock_isolate API.
If there isn't one, it might be good to enforce this with a VM_WARN
instead.
> + else
> +#endif
> + set_pfnblock_flags_mask(page, (unsigned long)migratetype,
> page_to_pfn(page), MIGRATETYPE_MASK);
If the branch stays, you could add a `return' to the MIGRATE_ISOLATE
leg, drop the else and indent this line normally.
^ permalink raw reply [flat|nested] 23+ messages in thread* Re: [PATCH v3 1/4] mm/page_isolation: make page isolation a standalone bit.
2025-05-08 5:24 ` Johannes Weiner
@ 2025-05-08 15:27 ` Zi Yan
2025-05-08 19:17 ` Zi Yan
0 siblings, 1 reply; 23+ messages in thread
From: Zi Yan @ 2025-05-08 15:27 UTC (permalink / raw)
To: Johannes Weiner
Cc: David Hildenbrand, linux-mm, Andrew Morton, Oscar Salvador,
Vlastimil Babka, Baolin Wang, Kirill A . Shutemov, Mel Gorman,
Suren Baghdasaryan, Michal Hocko, Brendan Jackman, Richard Chang,
linux-kernel
On 8 May 2025, at 1:24, Johannes Weiner wrote:
> On Wed, May 07, 2025 at 05:10:56PM -0400, Zi Yan wrote:
>> During page isolation, the original migratetype is overwritten, since
>> MIGRATE_* are enums and stored in pageblock bitmaps. Change
>> MIGRATE_ISOLATE to be stored a standalone bit, PB_migrate_isolate, like
>> PB_migrate_skip, so that migratetype is not lost during pageblock
>> isolation. pageblock bits needs to be word aligned, so expand
>> the number of pageblock bits from 4 to 8 and make PB_migrate_isolate bit 7.
>>
>> Signed-off-by: Zi Yan <ziy@nvidia.com>
>> ---
>> include/linux/mmzone.h | 17 ++++++++++----
>> include/linux/page-isolation.h | 2 +-
>> include/linux/pageblock-flags.h | 33 +++++++++++++++++++++++++-
>> mm/page_alloc.c | 41 ++++++++++++++++++++++++++++++++-
>> 4 files changed, 86 insertions(+), 7 deletions(-)
>>
>> diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
>> index b19a98c20de8..9ec022a0b826 100644
>> --- a/include/linux/mmzone.h
>> +++ b/include/linux/mmzone.h
>> @@ -106,14 +106,23 @@ static inline bool migratetype_is_mergeable(int mt)
>>
>> extern int page_group_by_mobility_disabled;
>>
>> -#define MIGRATETYPE_MASK ((1UL << PB_migratetype_bits) - 1)
>> +#ifdef CONFIG_MEMORY_ISOLATION
>> +#define MIGRATETYPE_MASK ((BIT(PB_migratetype_bits) - 1) | PB_migrate_isolate_bit)
>> +#else
>> +#define MIGRATETYPE_MASK (BIT(PB_migratetype_bits) - 1)
>> +#endif
>>
>> +#ifdef CONFIG_MEMORY_ISOLATION
>> +unsigned long get_pageblock_migratetype(const struct page *page);
>> +#else
>> #define get_pageblock_migratetype(page) \
>> get_pfnblock_flags_mask(page, page_to_pfn(page), MIGRATETYPE_MASK)
>>
>> -#define folio_migratetype(folio) \
>> - get_pfnblock_flags_mask(&folio->page, folio_pfn(folio), \
>> - MIGRATETYPE_MASK)
>> +#endif
>> +
>> +#define folio_migratetype(folio) \
>> + get_pageblock_migratetype(&folio->page)
>> +
>> struct free_area {
>> struct list_head free_list[MIGRATE_TYPES];
>> unsigned long nr_free;
>> diff --git a/include/linux/page-isolation.h b/include/linux/page-isolation.h
>> index 898bb788243b..51797dc39cbc 100644
>> --- a/include/linux/page-isolation.h
>> +++ b/include/linux/page-isolation.h
>> @@ -5,7 +5,7 @@
>> #ifdef CONFIG_MEMORY_ISOLATION
>> static inline bool is_migrate_isolate_page(struct page *page)
>> {
>> - return get_pageblock_migratetype(page) == MIGRATE_ISOLATE;
>> + return get_pageblock_isolate(page);
>
> The old version still works, right?
>
> It would match is_migrate_isolate() a bit better, but no strong
> feelings either way...
Yes and compiler should generate the same code. OK, I will drop this.
>
>> static inline bool is_migrate_isolate(int migratetype)
>> {
>> diff --git a/include/linux/pageblock-flags.h b/include/linux/pageblock-flags.h
>> index 0c4963339f0b..9fadae5892b2 100644
>> --- a/include/linux/pageblock-flags.h
>> +++ b/include/linux/pageblock-flags.h
>> @@ -20,7 +20,10 @@ enum pageblock_bits {
>> PB_migrate_end = PB_migrate + PB_migratetype_bits - 1,
>> /* 3 bits required for migrate types */
>> PB_migrate_skip,/* If set the block is skipped by compaction */
>> -
>> +#ifdef CONFIG_MEMORY_ISOLATION
>> + PB_migrate_isolate = 7, /* If set the block is isolated */
>> + /* set it to 7 to make pageblock bit word aligned */
>> +#endif
>> /*
>> * Assume the bits will always align on a word. If this assumption
>> * changes then get/set pageblock needs updating.
>> @@ -28,6 +31,10 @@ enum pageblock_bits {
>> NR_PAGEBLOCK_BITS
>> };
>>
>> +#ifdef CONFIG_MEMORY_ISOLATION
>> +#define PB_migrate_isolate_bit BIT(PB_migrate_isolate)
>> +#endif
>> +
>> #if defined(CONFIG_PAGE_BLOCK_ORDER)
>> #define PAGE_BLOCK_ORDER CONFIG_PAGE_BLOCK_ORDER
>> #else
>> @@ -105,4 +112,28 @@ static inline void set_pageblock_skip(struct page *page)
>> }
>> #endif /* CONFIG_COMPACTION */
>>
>> +#ifdef CONFIG_MEMORY_ISOLATION
>> +#define get_pageblock_isolate(page) \
>> + get_pfnblock_flags_mask(page, page_to_pfn(page), \
>> + PB_migrate_isolate_bit)
>> +#define clear_pageblock_isolate(page) \
>> + set_pfnblock_flags_mask(page, 0, page_to_pfn(page), \
>> + PB_migrate_isolate_bit)
>> +#define set_pageblock_isolate(page) \
>> + set_pfnblock_flags_mask(page, PB_migrate_isolate_bit, \
>> + page_to_pfn(page), \
>> + PB_migrate_isolate_bit)
>
> Would it make sense to move these to page_isolation.c? Then they
> wouldn't have to be macros.
Sure. Although I am using get_pageblock_isolate() in mm/page_alloc.c,
I can change it to use get_pageblock_migratetype() == MIGRATE_ISOLATE.
>
>> +#else
>> +static inline bool get_pageblock_isolate(struct page *page)
>> +{
>> + return false;
>> +}
>> +static inline void clear_pageblock_isolate(struct page *page)
>> +{
>> +}
>> +static inline void set_pageblock_isolate(struct page *page)
>> +{
>> +}
>> +#endif /* CONFIG_MEMORY_ISOLATION */
>> +
>> #endif /* PAGEBLOCK_FLAGS_H */
>> diff --git a/mm/page_alloc.c b/mm/page_alloc.c
>> index c77592b22256..acf68ef041d8 100644
>> --- a/mm/page_alloc.c
>> +++ b/mm/page_alloc.c
>> @@ -381,12 +381,40 @@ unsigned long get_pfnblock_flags_mask(const struct page *page,
>> return (word >> bitidx) & mask;
>> }
>>
>> +#ifdef CONFIG_MEMORY_ISOLATION
>> +unsigned long get_pageblock_migratetype(const struct page *page)
>> +{
>> + unsigned long flags;
>> +
>> + flags = get_pfnblock_flags_mask(page, page_to_pfn(page),
>> + MIGRATETYPE_MASK);
>> + if (flags & PB_migrate_isolate_bit)
>> + return MIGRATE_ISOLATE;
>> +
>> + return flags;
>> +}
>
> Since MIGRATETYPE_MASK includes the isolate bit if it exists, I think
> you can share the get_pfnblock_flags_mask() part:
>
> static inline get_pageblock_migratetype(const struct page *page)
> {
> unsigned long pfn = page_to_pfn(page);
>
> flags = get_pfnblock_flags_mask(page, pfn, MIGRATETYPE_MASK);
> #ifdef CONFIG_MEMORY_ISOLATEION
> if (flags & PB_migrate_isolate_bit)
> return MIGRATE_ISOLATE;
> #endif
> return flags;
> }
>
>> +static __always_inline int get_pfnblock_migratetype(const struct page *page,
>> + unsigned long pfn)
>> +{
>> + unsigned long flags;
>> +
>> + flags = get_pfnblock_flags_mask(page, pfn,
>> + MIGRATETYPE_MASK);
>> + if (flags & PB_migrate_isolate_bit)
>> + return MIGRATE_ISOLATE;
>> +
>> + return flags;
>> +}
>> +#else
>> static __always_inline int get_pfnblock_migratetype(const struct page *page,
>> unsigned long pfn)
>> {
>> return get_pfnblock_flags_mask(page, pfn, MIGRATETYPE_MASK);
>> }
>
> Same with this one.
>
OK, will change these two.
>>
>> +#endif
>> +
>> /**
>> * set_pfnblock_flags_mask - Set the requested group of flags for a pageblock_nr_pages block of pages
>> * @page: The page within the block of interest
>> @@ -402,8 +430,14 @@ void set_pfnblock_flags_mask(struct page *page, unsigned long flags,
>> unsigned long bitidx, word_bitidx;
>> unsigned long word;
>>
>> +#ifdef CONFIG_MEMORY_ISOLATION
>> + BUILD_BUG_ON(NR_PAGEBLOCK_BITS != 8);
>> + /* extra one for MIGRATE_ISOLATE */
>> + BUILD_BUG_ON(MIGRATE_TYPES > (1 << PB_migratetype_bits) + 1);
>> +#else
>> BUILD_BUG_ON(NR_PAGEBLOCK_BITS != 4);
>> BUILD_BUG_ON(MIGRATE_TYPES > (1 << PB_migratetype_bits));
>> +#endif
>>
>> bitmap = get_pageblock_bitmap(page, pfn);
>> bitidx = pfn_to_bitidx(page, pfn);
>> @@ -426,7 +460,12 @@ void set_pageblock_migratetype(struct page *page, int migratetype)
>> migratetype < MIGRATE_PCPTYPES))
>> migratetype = MIGRATE_UNMOVABLE;
>>
>> - set_pfnblock_flags_mask(page, (unsigned long)migratetype,
>> +#ifdef CONFIG_MEMORY_ISOLATION
>> + if (migratetype == MIGRATE_ISOLATE)
>> + set_pageblock_isolate(page);
>
> Are there paths actually doing this after the second patch?
>
> There are many instances that want to *read* the migratetype or
> MIGRATE_ISOLATE, but only isolation code should be manipulating that
> bit through the dedicated set/toggle_pageblock_isolate API.
>
> If there isn't one, it might be good to enforce this with a VM_WARN
> instead.
I checked all set_pageblock_migratetype() callers and do not see
one using it for pageblock isolation. Let me replace the code
with a VM_WARN and add a comment to tell users to use dedicated
pageblock isolation APIs.
Thanks for all the reviews. Let me send the fixup patches.
>
>> + else
>> +#endif
>> + set_pfnblock_flags_mask(page, (unsigned long)migratetype,
>> page_to_pfn(page), MIGRATETYPE_MASK);
>
> If the branch stays, you could add a `return' to the MIGRATE_ISOLATE
> leg, drop the else and indent this line normally.
--
Best Regards,
Yan, Zi
^ permalink raw reply [flat|nested] 23+ messages in thread* Re: [PATCH v3 1/4] mm/page_isolation: make page isolation a standalone bit.
2025-05-08 15:27 ` Zi Yan
@ 2025-05-08 19:17 ` Zi Yan
2025-05-08 20:46 ` Johannes Weiner
0 siblings, 1 reply; 23+ messages in thread
From: Zi Yan @ 2025-05-08 19:17 UTC (permalink / raw)
To: Johannes Weiner
Cc: David Hildenbrand, linux-mm, Andrew Morton, Oscar Salvador,
Vlastimil Babka, Baolin Wang, Kirill A . Shutemov, Mel Gorman,
Suren Baghdasaryan, Michal Hocko, Brendan Jackman, Richard Chang,
linux-kernel
>>> @@ -426,7 +460,12 @@ void set_pageblock_migratetype(struct page *page, int migratetype)
>>> migratetype < MIGRATE_PCPTYPES))
>>> migratetype = MIGRATE_UNMOVABLE;
>>>
>>> - set_pfnblock_flags_mask(page, (unsigned long)migratetype,
>>> +#ifdef CONFIG_MEMORY_ISOLATION
>>> + if (migratetype == MIGRATE_ISOLATE)
>>> + set_pageblock_isolate(page);
>>
>> Are there paths actually doing this after the second patch?
>>
>> There are many instances that want to *read* the migratetype or
>> MIGRATE_ISOLATE, but only isolation code should be manipulating that
>> bit through the dedicated set/toggle_pageblock_isolate API.
>>
>> If there isn't one, it might be good to enforce this with a VM_WARN
>> instead.
>
> I checked all set_pageblock_migratetype() callers and do not see
> one using it for pageblock isolation. Let me replace the code
> with a VM_WARN and add a comment to tell users to use dedicated
> pageblock isolation APIs.
>
Actually, move_freepages_block_isolate() calls __move_freepages_block()
to move free pages to MIGRATE_ISOLATE pageblock and
set_pageblock_migratetype() is used inside __move_freepages_block().
So the branch has to stay. Will use the suggestion below.
>>
>>> + else
>>> +#endif
>>> + set_pfnblock_flags_mask(page, (unsigned long)migratetype,
>>> page_to_pfn(page), MIGRATETYPE_MASK);
>>
>> If the branch stays, you could add a `return' to the MIGRATE_ISOLATE
>> leg, drop the else and indent this line normally.
Best Regards,
Yan, Zi
^ permalink raw reply [flat|nested] 23+ messages in thread
* Re: [PATCH v3 1/4] mm/page_isolation: make page isolation a standalone bit.
2025-05-08 19:17 ` Zi Yan
@ 2025-05-08 20:46 ` Johannes Weiner
2025-05-08 20:53 ` Zi Yan
0 siblings, 1 reply; 23+ messages in thread
From: Johannes Weiner @ 2025-05-08 20:46 UTC (permalink / raw)
To: Zi Yan
Cc: David Hildenbrand, linux-mm, Andrew Morton, Oscar Salvador,
Vlastimil Babka, Baolin Wang, Kirill A . Shutemov, Mel Gorman,
Suren Baghdasaryan, Michal Hocko, Brendan Jackman, Richard Chang,
linux-kernel
On Thu, May 08, 2025 at 03:17:05PM -0400, Zi Yan wrote:
>
> >>> @@ -426,7 +460,12 @@ void set_pageblock_migratetype(struct page *page, int migratetype)
> >>> migratetype < MIGRATE_PCPTYPES))
> >>> migratetype = MIGRATE_UNMOVABLE;
> >>>
> >>> - set_pfnblock_flags_mask(page, (unsigned long)migratetype,
> >>> +#ifdef CONFIG_MEMORY_ISOLATION
> >>> + if (migratetype == MIGRATE_ISOLATE)
> >>> + set_pageblock_isolate(page);
> >>
> >> Are there paths actually doing this after the second patch?
> >>
> >> There are many instances that want to *read* the migratetype or
> >> MIGRATE_ISOLATE, but only isolation code should be manipulating that
> >> bit through the dedicated set/toggle_pageblock_isolate API.
> >>
> >> If there isn't one, it might be good to enforce this with a VM_WARN
> >> instead.
> >
> > I checked all set_pageblock_migratetype() callers and do not see
> > one using it for pageblock isolation. Let me replace the code
> > with a VM_WARN and add a comment to tell users to use dedicated
> > pageblock isolation APIs.
> >
>
> Actually, move_freepages_block_isolate() calls __move_freepages_block()
> to move free pages to MIGRATE_ISOLATE pageblock and
> set_pageblock_migratetype() is used inside __move_freepages_block().
> So the branch has to stay. Will use the suggestion below.
Ah, good catch. But looking at the callers, it's:
move_freepages_block()
move_freepages_block_isolate()
try_to_claim_block()
The last one would benefit from having the set_pageblock_migratetype()
there explicitly, as this is what this function is supposed to do. It
also should never set the isolation bit.
move_freepages_block_isolate() has two set_pageblock_migratetype()
calls already. And after the series, it should only manipulate the
isolate bit, not change the actual migratetype anymore, right?
Maybe it makes the most sense to move it into the three callers?
And then fortify set_pageblock_migratetype() after all.
^ permalink raw reply [flat|nested] 23+ messages in thread
* Re: [PATCH v3 1/4] mm/page_isolation: make page isolation a standalone bit.
2025-05-08 20:46 ` Johannes Weiner
@ 2025-05-08 20:53 ` Zi Yan
2025-05-09 1:33 ` Zi Yan
0 siblings, 1 reply; 23+ messages in thread
From: Zi Yan @ 2025-05-08 20:53 UTC (permalink / raw)
To: Johannes Weiner, Andrew Morton
Cc: David Hildenbrand, linux-mm, Oscar Salvador, Vlastimil Babka,
Baolin Wang, Kirill A . Shutemov, Mel Gorman, Suren Baghdasaryan,
Michal Hocko, Brendan Jackman, Richard Chang, linux-kernel
On 8 May 2025, at 16:46, Johannes Weiner wrote:
> On Thu, May 08, 2025 at 03:17:05PM -0400, Zi Yan wrote:
>>
>>>>> @@ -426,7 +460,12 @@ void set_pageblock_migratetype(struct page *page, int migratetype)
>>>>> migratetype < MIGRATE_PCPTYPES))
>>>>> migratetype = MIGRATE_UNMOVABLE;
>>>>>
>>>>> - set_pfnblock_flags_mask(page, (unsigned long)migratetype,
>>>>> +#ifdef CONFIG_MEMORY_ISOLATION
>>>>> + if (migratetype == MIGRATE_ISOLATE)
>>>>> + set_pageblock_isolate(page);
>>>>
>>>> Are there paths actually doing this after the second patch?
>>>>
>>>> There are many instances that want to *read* the migratetype or
>>>> MIGRATE_ISOLATE, but only isolation code should be manipulating that
>>>> bit through the dedicated set/toggle_pageblock_isolate API.
>>>>
>>>> If there isn't one, it might be good to enforce this with a VM_WARN
>>>> instead.
>>>
>>> I checked all set_pageblock_migratetype() callers and do not see
>>> one using it for pageblock isolation. Let me replace the code
>>> with a VM_WARN and add a comment to tell users to use dedicated
>>> pageblock isolation APIs.
>>>
>>
>> Actually, move_freepages_block_isolate() calls __move_freepages_block()
>> to move free pages to MIGRATE_ISOLATE pageblock and
>> set_pageblock_migratetype() is used inside __move_freepages_block().
>> So the branch has to stay. Will use the suggestion below.
>
> Ah, good catch. But looking at the callers, it's:
>
> move_freepages_block()
> move_freepages_block_isolate()
> try_to_claim_block()
>
> The last one would benefit from having the set_pageblock_migratetype()
> there explicitly, as this is what this function is supposed to do. It
> also should never set the isolation bit.
>
> move_freepages_block_isolate() has two set_pageblock_migratetype()
> calls already. And after the series, it should only manipulate the
> isolate bit, not change the actual migratetype anymore, right?
>
> Maybe it makes the most sense to move it into the three callers?
>
> And then fortify set_pageblock_migratetype() after all.
Sounds good to me. Let me update my fixups.
Hi Andrew,
I will resend new fixups based on Johannes' comment.
--
Best Regards,
Yan, Zi
^ permalink raw reply [flat|nested] 23+ messages in thread
* Re: [PATCH v3 1/4] mm/page_isolation: make page isolation a standalone bit.
2025-05-08 20:53 ` Zi Yan
@ 2025-05-09 1:33 ` Zi Yan
2025-05-09 12:48 ` Zi Yan
0 siblings, 1 reply; 23+ messages in thread
From: Zi Yan @ 2025-05-09 1:33 UTC (permalink / raw)
To: Johannes Weiner, David Hildenbrand
Cc: Andrew Morton, linux-mm, Oscar Salvador, Vlastimil Babka,
Baolin Wang, Kirill A . Shutemov, Mel Gorman, Suren Baghdasaryan,
Michal Hocko, Brendan Jackman, Richard Chang, linux-kernel
On 8 May 2025, at 16:53, Zi Yan wrote:
> On 8 May 2025, at 16:46, Johannes Weiner wrote:
>
>> On Thu, May 08, 2025 at 03:17:05PM -0400, Zi Yan wrote:
>>>
>>>>>> @@ -426,7 +460,12 @@ void set_pageblock_migratetype(struct page *page, int migratetype)
>>>>>> migratetype < MIGRATE_PCPTYPES))
>>>>>> migratetype = MIGRATE_UNMOVABLE;
>>>>>>
>>>>>> - set_pfnblock_flags_mask(page, (unsigned long)migratetype,
>>>>>> +#ifdef CONFIG_MEMORY_ISOLATION
>>>>>> + if (migratetype == MIGRATE_ISOLATE)
>>>>>> + set_pageblock_isolate(page);
>>>>>
>>>>> Are there paths actually doing this after the second patch?
>>>>>
>>>>> There are many instances that want to *read* the migratetype or
>>>>> MIGRATE_ISOLATE, but only isolation code should be manipulating that
>>>>> bit through the dedicated set/toggle_pageblock_isolate API.
>>>>>
>>>>> If there isn't one, it might be good to enforce this with a VM_WARN
>>>>> instead.
>>>>
>>>> I checked all set_pageblock_migratetype() callers and do not see
>>>> one using it for pageblock isolation. Let me replace the code
>>>> with a VM_WARN and add a comment to tell users to use dedicated
>>>> pageblock isolation APIs.
>>>>
>>>
>>> Actually, move_freepages_block_isolate() calls __move_freepages_block()
>>> to move free pages to MIGRATE_ISOLATE pageblock and
>>> set_pageblock_migratetype() is used inside __move_freepages_block().
>>> So the branch has to stay. Will use the suggestion below.
>>
>> Ah, good catch. But looking at the callers, it's:
>>
>> move_freepages_block()
>> move_freepages_block_isolate()
>> try_to_claim_block()
>>
>> The last one would benefit from having the set_pageblock_migratetype()
>> there explicitly, as this is what this function is supposed to do. It
>> also should never set the isolation bit.
>>
>> move_freepages_block_isolate() has two set_pageblock_migratetype()
>> calls already. And after the series, it should only manipulate the
>> isolate bit, not change the actual migratetype anymore, right?
>>
>> Maybe it makes the most sense to move it into the three callers?
>>
>> And then fortify set_pageblock_migratetype() after all.
>
> Sounds good to me. Let me update my fixups.
Hmm, hit another roadblock. In online_pages() from mm/memory_hotplug.c,
move_pfn_range_to_zone(MIGRATE_ISOLATE) calls memmap_init_range(),
which uses set_pageblock_migratetype(MIGRATE_ISOLATE).
I could use set_pageblock_isolate() in memmap_init_range(), but
that requires move set_pageblock_isolate() out of mm/page_isolation.c.
The change might be too substantial for a fixup.
I also would like to get some opinion from David on this. So I am
holding this fixup and will send it out as a separate patch when
I get more information.
--
Best Regards,
Yan, Zi
^ permalink raw reply [flat|nested] 23+ messages in thread
* Re: [PATCH v3 1/4] mm/page_isolation: make page isolation a standalone bit.
2025-05-09 1:33 ` Zi Yan
@ 2025-05-09 12:48 ` Zi Yan
0 siblings, 0 replies; 23+ messages in thread
From: Zi Yan @ 2025-05-09 12:48 UTC (permalink / raw)
To: David Hildenbrand
Cc: Johannes Weiner, Andrew Morton, linux-mm, Oscar Salvador,
Vlastimil Babka, Baolin Wang, Kirill A . Shutemov, Mel Gorman,
Suren Baghdasaryan, Michal Hocko, Brendan Jackman, Richard Chang,
linux-kernel
On 8 May 2025, at 21:33, Zi Yan wrote:
> On 8 May 2025, at 16:53, Zi Yan wrote:
>
>> On 8 May 2025, at 16:46, Johannes Weiner wrote:
>>
>>> On Thu, May 08, 2025 at 03:17:05PM -0400, Zi Yan wrote:
>>>>
>>>>>>> @@ -426,7 +460,12 @@ void set_pageblock_migratetype(struct page *page, int migratetype)
>>>>>>> migratetype < MIGRATE_PCPTYPES))
>>>>>>> migratetype = MIGRATE_UNMOVABLE;
>>>>>>>
>>>>>>> - set_pfnblock_flags_mask(page, (unsigned long)migratetype,
>>>>>>> +#ifdef CONFIG_MEMORY_ISOLATION
>>>>>>> + if (migratetype == MIGRATE_ISOLATE)
>>>>>>> + set_pageblock_isolate(page);
>>>>>>
>>>>>> Are there paths actually doing this after the second patch?
>>>>>>
>>>>>> There are many instances that want to *read* the migratetype or
>>>>>> MIGRATE_ISOLATE, but only isolation code should be manipulating that
>>>>>> bit through the dedicated set/toggle_pageblock_isolate API.
>>>>>>
>>>>>> If there isn't one, it might be good to enforce this with a VM_WARN
>>>>>> instead.
>>>>>
>>>>> I checked all set_pageblock_migratetype() callers and do not see
>>>>> one using it for pageblock isolation. Let me replace the code
>>>>> with a VM_WARN and add a comment to tell users to use dedicated
>>>>> pageblock isolation APIs.
>>>>>
>>>>
>>>> Actually, move_freepages_block_isolate() calls __move_freepages_block()
>>>> to move free pages to MIGRATE_ISOLATE pageblock and
>>>> set_pageblock_migratetype() is used inside __move_freepages_block().
>>>> So the branch has to stay. Will use the suggestion below.
>>>
>>> Ah, good catch. But looking at the callers, it's:
>>>
>>> move_freepages_block()
>>> move_freepages_block_isolate()
>>> try_to_claim_block()
>>>
>>> The last one would benefit from having the set_pageblock_migratetype()
>>> there explicitly, as this is what this function is supposed to do. It
>>> also should never set the isolation bit.
>>>
>>> move_freepages_block_isolate() has two set_pageblock_migratetype()
>>> calls already. And after the series, it should only manipulate the
>>> isolate bit, not change the actual migratetype anymore, right?
>>>
>>> Maybe it makes the most sense to move it into the three callers?
>>>
>>> And then fortify set_pageblock_migratetype() after all.
>>
>> Sounds good to me. Let me update my fixups.
>
> Hmm, hit another roadblock. In online_pages() from mm/memory_hotplug.c,
> move_pfn_range_to_zone(MIGRATE_ISOLATE) calls memmap_init_range(),
> which uses set_pageblock_migratetype(MIGRATE_ISOLATE).
>
> I could use set_pageblock_isolate() in memmap_init_range(), but
> that requires move set_pageblock_isolate() out of mm/page_isolation.c.
> The change might be too substantial for a fixup.
>
> I also would like to get some opinion from David on this. So I am
> holding this fixup and will send it out as a separate patch when
> I get more information.
Hi David,
I have some concern regarding online_pages() calling
move_pfn_range_to_zone(MIGRATE_ISOLATE) code path after my patchset.
During the process, all pageblocks are initialized to MIGRATE_ISOLATE,
then undo_isolate_page_range() is called to unisolate all pageblocks.
That means these pageblocks do not have proper migratetype, since
they are not set to any type other than MIGRATE_ISOLATE.
Maybe the code should be changed to
move_pfn_range_to_zone(zone, pfn, nr_pages, NULL, MIGRATE_MOVABLE);
start_isolate_page_range(pfn, pfn + nr_pages, ISOLATE_MODE_NONE, 0);
...
undo_isolate_page_range(pfn, pfn + nr_pages);
so that these pageblocks have MIGRATE_MOVABLE type.
--
Best Regards,
Yan, Zi
^ permalink raw reply [flat|nested] 23+ messages in thread
* Re: [PATCH v3 1/4] mm/page_isolation: make page isolation a standalone bit.
2025-05-07 21:10 ` [PATCH v3 1/4] mm/page_isolation: make page isolation " Zi Yan
2025-05-08 5:24 ` Johannes Weiner
@ 2025-05-08 20:22 ` Zi Yan
2025-05-08 21:13 ` Johannes Weiner
2025-05-09 13:57 ` Zi Yan
1 sibling, 2 replies; 23+ messages in thread
From: Zi Yan @ 2025-05-08 20:22 UTC (permalink / raw)
To: Johannes Weiner, Andrew Morton
Cc: linux-mm, David Hildenbrand, Oscar Salvador, Vlastimil Babka,
Baolin Wang, Kirill A . Shutemov, Mel Gorman, Suren Baghdasaryan,
Michal Hocko, Brendan Jackman, Richard Chang, linux-kernel,
Zi Yan
On 7 May 2025, at 17:10, Zi Yan wrote:
> During page isolation, the original migratetype is overwritten, since
> MIGRATE_* are enums and stored in pageblock bitmaps. Change
> MIGRATE_ISOLATE to be stored a standalone bit, PB_migrate_isolate, like
> PB_migrate_skip, so that migratetype is not lost during pageblock
> isolation. pageblock bits needs to be word aligned, so expand
> the number of pageblock bits from 4 to 8 and make PB_migrate_isolate bit 7.
>
> Signed-off-by: Zi Yan <ziy@nvidia.com>
> ---
> include/linux/mmzone.h | 17 ++++++++++----
> include/linux/page-isolation.h | 2 +-
> include/linux/pageblock-flags.h | 33 +++++++++++++++++++++++++-
> mm/page_alloc.c | 41 ++++++++++++++++++++++++++++++++-
> 4 files changed, 86 insertions(+), 7 deletions(-)
>
Here is the fixup 1/3 to address Johannes’ comments.
From 7eb1d9fa58fdab216862436181e5d2baf2958c54 Mon Sep 17 00:00:00 2001
From: Zi Yan <ziy@nvidia.com>
Date: Thu, 8 May 2025 12:05:59 -0400
Subject: [PATCH] fixup for mm/page_isolation: make page isolation a standalone
bit
1. keep the original is_migrate_isolate_page()
2. move {get,set,clear}_pageblock_isolate() to mm/page_isolation.c
3. use a single version for get_pageblock_migratetype() and
get_pfnblock_migratetype().
Signed-off-by: Zi Yan <ziy@nvidia.com>
---
include/linux/mmzone.h | 6 ------
include/linux/page-isolation.h | 2 +-
include/linux/pageblock-flags.h | 24 ------------------------
mm/page_alloc.c | 25 ++++++++++---------------
mm/page_isolation.c | 17 +++++++++++++++++
5 files changed, 28 insertions(+), 46 deletions(-)
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 9ec022a0b826..7ef01fe148ce 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -112,13 +112,7 @@ extern int page_group_by_mobility_disabled;
#define MIGRATETYPE_MASK (BIT(PB_migratetype_bits) - 1)
#endif
-#ifdef CONFIG_MEMORY_ISOLATION
unsigned long get_pageblock_migratetype(const struct page *page);
-#else
-#define get_pageblock_migratetype(page) \
- get_pfnblock_flags_mask(page, page_to_pfn(page), MIGRATETYPE_MASK)
-
-#endif
#define folio_migratetype(folio) \
get_pageblock_migratetype(&folio->page)
diff --git a/include/linux/page-isolation.h b/include/linux/page-isolation.h
index 51797dc39cbc..898bb788243b 100644
--- a/include/linux/page-isolation.h
+++ b/include/linux/page-isolation.h
@@ -5,7 +5,7 @@
#ifdef CONFIG_MEMORY_ISOLATION
static inline bool is_migrate_isolate_page(struct page *page)
{
- return get_pageblock_isolate(page);
+ return get_pageblock_migratetype(page) == MIGRATE_ISOLATE;
}
static inline bool is_migrate_isolate(int migratetype)
{
diff --git a/include/linux/pageblock-flags.h b/include/linux/pageblock-flags.h
index 9fadae5892b2..00040e7df8c8 100644
--- a/include/linux/pageblock-flags.h
+++ b/include/linux/pageblock-flags.h
@@ -112,28 +112,4 @@ static inline void set_pageblock_skip(struct page *page)
}
#endif /* CONFIG_COMPACTION */
-#ifdef CONFIG_MEMORY_ISOLATION
-#define get_pageblock_isolate(page) \
- get_pfnblock_flags_mask(page, page_to_pfn(page), \
- PB_migrate_isolate_bit)
-#define clear_pageblock_isolate(page) \
- set_pfnblock_flags_mask(page, 0, page_to_pfn(page), \
- PB_migrate_isolate_bit)
-#define set_pageblock_isolate(page) \
- set_pfnblock_flags_mask(page, PB_migrate_isolate_bit, \
- page_to_pfn(page), \
- PB_migrate_isolate_bit)
-#else
-static inline bool get_pageblock_isolate(struct page *page)
-{
- return false;
-}
-static inline void clear_pageblock_isolate(struct page *page)
-{
-}
-static inline void set_pageblock_isolate(struct page *page)
-{
-}
-#endif /* CONFIG_MEMORY_ISOLATION */
-
#endif /* PAGEBLOCK_FLAGS_H */
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index acf68ef041d8..04e301fb4879 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -381,16 +381,16 @@ unsigned long get_pfnblock_flags_mask(const struct page *page,
return (word >> bitidx) & mask;
}
-#ifdef CONFIG_MEMORY_ISOLATION
unsigned long get_pageblock_migratetype(const struct page *page)
{
unsigned long flags;
flags = get_pfnblock_flags_mask(page, page_to_pfn(page),
MIGRATETYPE_MASK);
+#ifdef CONFIG_MEMORY_ISOLATION
if (flags & PB_migrate_isolate_bit)
return MIGRATE_ISOLATE;
-
+#endif
return flags;
}
@@ -401,19 +401,12 @@ static __always_inline int get_pfnblock_migratetype(const struct page *page,
flags = get_pfnblock_flags_mask(page, pfn,
MIGRATETYPE_MASK);
+#ifdef CONFIG_MEMORY_ISOLATION
if (flags & PB_migrate_isolate_bit)
return MIGRATE_ISOLATE;
-
+#endif
return flags;
}
-#else
-static __always_inline int get_pfnblock_migratetype(const struct page *page,
- unsigned long pfn)
-{
- return get_pfnblock_flags_mask(page, pfn, MIGRATETYPE_MASK);
-}
-
-#endif
/**
* set_pfnblock_flags_mask - Set the requested group of flags for a pageblock_nr_pages block of pages
@@ -461,11 +454,13 @@ void set_pageblock_migratetype(struct page *page, int migratetype)
migratetype = MIGRATE_UNMOVABLE;
#ifdef CONFIG_MEMORY_ISOLATION
- if (migratetype == MIGRATE_ISOLATE)
- set_pageblock_isolate(page);
- else
+ if (migratetype == MIGRATE_ISOLATE) {
+ set_pfnblock_flags_mask(page, PB_migrate_isolate_bit,
+ page_to_pfn(page), PB_migrate_isolate_bit);
+ return;
+ }
#endif
- set_pfnblock_flags_mask(page, (unsigned long)migratetype,
+ set_pfnblock_flags_mask(page, (unsigned long)migratetype,
page_to_pfn(page), MIGRATETYPE_MASK);
}
diff --git a/mm/page_isolation.c b/mm/page_isolation.c
index b2fc5266e3d2..b6a62132e20e 100644
--- a/mm/page_isolation.c
+++ b/mm/page_isolation.c
@@ -15,6 +15,23 @@
#define CREATE_TRACE_POINTS
#include <trace/events/page_isolation.h>
+static inline bool get_pageblock_isolate(struct page *page)
+{
+ return get_pfnblock_flags_mask(page, page_to_pfn(page),
+ PB_migrate_isolate_bit);
+}
+static inline void clear_pageblock_isolate(struct page *page)
+{
+ set_pfnblock_flags_mask(page, 0, page_to_pfn(page),
+ PB_migrate_isolate_bit);
+}
+static inline void set_pageblock_isolate(struct page *page)
+{
+ set_pfnblock_flags_mask(page, PB_migrate_isolate_bit,
+ page_to_pfn(page),
+ PB_migrate_isolate_bit);
+}
+
/*
* This function checks whether the range [start_pfn, end_pfn) includes
* unmovable pages or not. The range must fall into a single pageblock and
--
2.47.2
Best Regards,
Yan, Zi
^ permalink raw reply [flat|nested] 23+ messages in thread* Re: [PATCH v3 1/4] mm/page_isolation: make page isolation a standalone bit.
2025-05-08 20:22 ` Zi Yan
@ 2025-05-08 21:13 ` Johannes Weiner
2025-05-09 13:57 ` Zi Yan
1 sibling, 0 replies; 23+ messages in thread
From: Johannes Weiner @ 2025-05-08 21:13 UTC (permalink / raw)
To: Zi Yan
Cc: Andrew Morton, linux-mm, David Hildenbrand, Oscar Salvador,
Vlastimil Babka, Baolin Wang, Kirill A . Shutemov, Mel Gorman,
Suren Baghdasaryan, Michal Hocko, Brendan Jackman, Richard Chang,
linux-kernel
On Thu, May 08, 2025 at 04:22:43PM -0400, Zi Yan wrote:
> On 7 May 2025, at 17:10, Zi Yan wrote:
>
> > During page isolation, the original migratetype is overwritten, since
> > MIGRATE_* are enums and stored in pageblock bitmaps. Change
> > MIGRATE_ISOLATE to be stored a standalone bit, PB_migrate_isolate, like
> > PB_migrate_skip, so that migratetype is not lost during pageblock
> > isolation. pageblock bits needs to be word aligned, so expand
> > the number of pageblock bits from 4 to 8 and make PB_migrate_isolate bit 7.
> >
> > Signed-off-by: Zi Yan <ziy@nvidia.com>
Acked-by: Johannes Weiner <hannes@cmpxchg.org>
> > ---
> > include/linux/mmzone.h | 17 ++++++++++----
> > include/linux/page-isolation.h | 2 +-
> > include/linux/pageblock-flags.h | 33 +++++++++++++++++++++++++-
> > mm/page_alloc.c | 41 ++++++++++++++++++++++++++++++++-
> > 4 files changed, 86 insertions(+), 7 deletions(-)
> >
>
> Here is the fixup 1/3 to address Johannes’ comments.
Thanks!
^ permalink raw reply [flat|nested] 23+ messages in thread
* Re: [PATCH v3 1/4] mm/page_isolation: make page isolation a standalone bit.
2025-05-08 20:22 ` Zi Yan
2025-05-08 21:13 ` Johannes Weiner
@ 2025-05-09 13:57 ` Zi Yan
1 sibling, 0 replies; 23+ messages in thread
From: Zi Yan @ 2025-05-09 13:57 UTC (permalink / raw)
To: Andrew Morton
Cc: Johannes Weiner, linux-mm, David Hildenbrand, Oscar Salvador,
Vlastimil Babka, Baolin Wang, Kirill A . Shutemov, Mel Gorman,
Suren Baghdasaryan, Michal Hocko, Brendan Jackman, Richard Chang,
linux-kernel, Zi Yan
On 8 May 2025, at 16:22, Zi Yan wrote:
> On 7 May 2025, at 17:10, Zi Yan wrote:
>
>> During page isolation, the original migratetype is overwritten, since
>> MIGRATE_* are enums and stored in pageblock bitmaps. Change
>> MIGRATE_ISOLATE to be stored a standalone bit, PB_migrate_isolate, like
>> PB_migrate_skip, so that migratetype is not lost during pageblock
>> isolation. pageblock bits needs to be word aligned, so expand
>> the number of pageblock bits from 4 to 8 and make PB_migrate_isolate bit 7.
>>
>> Signed-off-by: Zi Yan <ziy@nvidia.com>
>> ---
>> include/linux/mmzone.h | 17 ++++++++++----
>> include/linux/page-isolation.h | 2 +-
>> include/linux/pageblock-flags.h | 33 +++++++++++++++++++++++++-
>> mm/page_alloc.c | 41 ++++++++++++++++++++++++++++++++-
>> 4 files changed, 86 insertions(+), 7 deletions(-)
>>
>
> Here is the fixup 1/3 to address Johannes’ comments.
>
> From 7eb1d9fa58fdab216862436181e5d2baf2958c54 Mon Sep 17 00:00:00 2001
> From: Zi Yan <ziy@nvidia.com>
> Date: Thu, 8 May 2025 12:05:59 -0400
> Subject: [PATCH] fixup for mm/page_isolation: make page isolation a standalone
> bit
>
> 1. keep the original is_migrate_isolate_page()
> 2. move {get,set,clear}_pageblock_isolate() to mm/page_isolation.c
> 3. use a single version for get_pageblock_migratetype() and
> get_pfnblock_migratetype().
>
> Signed-off-by: Zi Yan <ziy@nvidia.com>
> ---
> include/linux/mmzone.h | 6 ------
> include/linux/page-isolation.h | 2 +-
> include/linux/pageblock-flags.h | 24 ------------------------
> mm/page_alloc.c | 25 ++++++++++---------------
> mm/page_isolation.c | 17 +++++++++++++++++
> 5 files changed, 28 insertions(+), 46 deletions(-)
>
Hi Andrew,
This fixup gets rid of the unused function warning[1].
BTW, this series gets several fixups. Let me know if you think a V4
is necessary. Thanks.
[1] https://lore.kernel.org/linux-mm/202505092126.yRGhLhg1-lkp@intel.com/
From 126d116cb737bf3d8c475460b9b48edc6696e6dc Mon Sep 17 00:00:00 2001
From: Zi Yan <ziy@nvidia.com>
Date: Fri, 9 May 2025 09:29:09 -0400
Subject: [PATCH] fix unused function warnings
1. add __maybe_unused to get_pageblock_isolate(), which is used in
VM_BUG_ON only.
2. remove unused set_pageblock_isolate().
Signed-off-by: Zi Yan <ziy@nvidia.com>
---
mm/page_isolation.c | 8 +-------
1 file changed, 1 insertion(+), 7 deletions(-)
diff --git a/mm/page_isolation.c b/mm/page_isolation.c
index b0f83a7c508d..ce7e38c9f839 100644
--- a/mm/page_isolation.c
+++ b/mm/page_isolation.c
@@ -15,7 +15,7 @@
#define CREATE_TRACE_POINTS
#include <trace/events/page_isolation.h>
-static inline bool get_pageblock_isolate(struct page *page)
+static inline bool __maybe_unused get_pageblock_isolate(struct page *page)
{
return get_pfnblock_flags_mask(page, page_to_pfn(page),
PB_migrate_isolate_bit);
@@ -25,12 +25,6 @@ static inline void clear_pageblock_isolate(struct page *page)
set_pfnblock_flags_mask(page, 0, page_to_pfn(page),
PB_migrate_isolate_bit);
}
-static inline void set_pageblock_isolate(struct page *page)
-{
- set_pfnblock_flags_mask(page, PB_migrate_isolate_bit,
- page_to_pfn(page),
- PB_migrate_isolate_bit);
-}
/*
* This function checks whether the range [start_pfn, end_pfn) includes
--
2.47.2
--
Best Regards,
Yan, Zi
^ permalink raw reply [flat|nested] 23+ messages in thread
* [PATCH v3 2/4] mm/page_isolation: remove migratetype from move_freepages_block_isolate()
2025-05-07 21:10 [PATCH v3 0/4] Make MIGRATE_ISOLATE a standalone bit Zi Yan
2025-05-07 21:10 ` [PATCH v3 1/4] mm/page_isolation: make page isolation " Zi Yan
@ 2025-05-07 21:10 ` Zi Yan
2025-05-08 20:23 ` Zi Yan
2025-05-07 21:10 ` [PATCH v3 3/4] mm/page_isolation: remove migratetype from undo_isolate_page_range() Zi Yan
` (2 subsequent siblings)
4 siblings, 1 reply; 23+ messages in thread
From: Zi Yan @ 2025-05-07 21:10 UTC (permalink / raw)
To: David Hildenbrand, Johannes Weiner, linux-mm
Cc: Andrew Morton, Oscar Salvador, Vlastimil Babka, Baolin Wang,
Kirill A . Shutemov, Mel Gorman, Suren Baghdasaryan,
Michal Hocko, Brendan Jackman, Richard Chang, linux-kernel,
Zi Yan
Since migratetype is no longer overwritten during pageblock isolation,
moving pageblocks to and from MIGRATE_ISOLATE no longer needs migratetype.
Add MIGRATETYPE_NO_ISO_MASK to allow read before-isolation migratetype
when a pageblock is isolated. It is used by move_freepages_block_isolate().
Add pageblock_isolate_and_move_free_pages() and
pageblock_unisolate_and_move_free_pages() to be explicit about the page
isolation operations. Both share the common code in
__move_freepages_block_isolate(), which is renamed from
move_freepages_block_isolate().
Signed-off-by: Zi Yan <ziy@nvidia.com>
---
include/linux/mmzone.h | 4 ++-
include/linux/page-isolation.h | 4 +--
mm/page_alloc.c | 49 +++++++++++++++++++++++++++-------
mm/page_isolation.c | 21 +++++++--------
4 files changed, 55 insertions(+), 23 deletions(-)
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 9ec022a0b826..b88b98fb8a54 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -107,8 +107,10 @@ static inline bool migratetype_is_mergeable(int mt)
extern int page_group_by_mobility_disabled;
#ifdef CONFIG_MEMORY_ISOLATION
-#define MIGRATETYPE_MASK ((BIT(PB_migratetype_bits) - 1) | PB_migrate_isolate_bit)
+#define MIGRATETYPE_NO_ISO_MASK (BIT(PB_migratetype_bits) - 1)
+#define MIGRATETYPE_MASK (MIGRATETYPE_NO_ISO_MASK | PB_migrate_isolate_bit)
#else
+#define MIGRATETYPE_NO_ISO_MASK MIGRATETYPE_MASK
#define MIGRATETYPE_MASK (BIT(PB_migratetype_bits) - 1)
#endif
diff --git a/include/linux/page-isolation.h b/include/linux/page-isolation.h
index 51797dc39cbc..28c56f423e34 100644
--- a/include/linux/page-isolation.h
+++ b/include/linux/page-isolation.h
@@ -27,8 +27,8 @@ static inline bool is_migrate_isolate(int migratetype)
void set_pageblock_migratetype(struct page *page, int migratetype);
-bool move_freepages_block_isolate(struct zone *zone, struct page *page,
- int migratetype);
+bool pageblock_isolate_and_move_free_pages(struct zone *zone, struct page *page);
+bool pageblock_unisolate_and_move_free_pages(struct zone *zone, struct page *page);
int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
int migratetype, int flags);
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index acf68ef041d8..186d69c9f049 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1948,11 +1948,17 @@ static unsigned long find_large_buddy(unsigned long start_pfn)
return start_pfn;
}
+static inline void toggle_pageblock_isolate(struct page *page, bool isolate)
+{
+ set_pfnblock_flags_mask(page, (isolate << PB_migrate_isolate),
+ page_to_pfn(page), PB_migrate_isolate_bit);
+}
+
/**
- * move_freepages_block_isolate - move free pages in block for page isolation
+ * __move_freepages_block_isolate - move free pages in block for page isolation
* @zone: the zone
* @page: the pageblock page
- * @migratetype: migratetype to set on the pageblock
+ * @isolate: to isolate the given pageblock or unisolate it
*
* This is similar to move_freepages_block(), but handles the special
* case encountered in page isolation, where the block of interest
@@ -1967,10 +1973,15 @@ static unsigned long find_large_buddy(unsigned long start_pfn)
*
* Returns %true if pages could be moved, %false otherwise.
*/
-bool move_freepages_block_isolate(struct zone *zone, struct page *page,
- int migratetype)
+static bool __move_freepages_block_isolate(struct zone *zone,
+ struct page *page, bool isolate)
{
unsigned long start_pfn, pfn;
+ int from_mt;
+ int to_mt;
+
+ if (isolate == get_pageblock_isolate(page))
+ return false;
if (!prep_move_freepages_block(zone, page, &start_pfn, NULL, NULL))
return false;
@@ -1987,7 +1998,7 @@ bool move_freepages_block_isolate(struct zone *zone, struct page *page,
del_page_from_free_list(buddy, zone, order,
get_pfnblock_migratetype(buddy, pfn));
- set_pageblock_migratetype(page, migratetype);
+ toggle_pageblock_isolate(page, isolate);
split_large_buddy(zone, buddy, pfn, order, FPI_NONE);
return true;
}
@@ -1998,16 +2009,36 @@ bool move_freepages_block_isolate(struct zone *zone, struct page *page,
del_page_from_free_list(page, zone, order,
get_pfnblock_migratetype(page, pfn));
- set_pageblock_migratetype(page, migratetype);
+ toggle_pageblock_isolate(page, isolate);
split_large_buddy(zone, page, pfn, order, FPI_NONE);
return true;
}
move:
- __move_freepages_block(zone, start_pfn,
- get_pfnblock_migratetype(page, start_pfn),
- migratetype);
+ /* use MIGRATETYPE_NO_ISO_MASK to get the non-isolate migratetype */
+ if (isolate) {
+ from_mt = get_pfnblock_flags_mask(page, page_to_pfn(page),
+ MIGRATETYPE_NO_ISO_MASK);
+ to_mt = MIGRATE_ISOLATE;
+ } else {
+ from_mt = MIGRATE_ISOLATE;
+ to_mt = get_pfnblock_flags_mask(page, page_to_pfn(page),
+ MIGRATETYPE_NO_ISO_MASK);
+ }
+
+ __move_freepages_block(zone, start_pfn, from_mt, to_mt);
return true;
}
+
+bool pageblock_isolate_and_move_free_pages(struct zone *zone, struct page *page)
+{
+ return __move_freepages_block_isolate(zone, page, true);
+}
+
+bool pageblock_unisolate_and_move_free_pages(struct zone *zone, struct page *page)
+{
+ return __move_freepages_block_isolate(zone, page, false);
+}
+
#endif /* CONFIG_MEMORY_ISOLATION */
static void change_pageblock_range(struct page *pageblock_page,
diff --git a/mm/page_isolation.c b/mm/page_isolation.c
index b2fc5266e3d2..08f627a5032f 100644
--- a/mm/page_isolation.c
+++ b/mm/page_isolation.c
@@ -188,7 +188,7 @@ static int set_migratetype_isolate(struct page *page, int migratetype, int isol_
unmovable = has_unmovable_pages(check_unmovable_start, check_unmovable_end,
migratetype, isol_flags);
if (!unmovable) {
- if (!move_freepages_block_isolate(zone, page, MIGRATE_ISOLATE)) {
+ if (!pageblock_isolate_and_move_free_pages(zone, page)) {
spin_unlock_irqrestore(&zone->lock, flags);
return -EBUSY;
}
@@ -209,7 +209,7 @@ static int set_migratetype_isolate(struct page *page, int migratetype, int isol_
return -EBUSY;
}
-static void unset_migratetype_isolate(struct page *page, int migratetype)
+static void unset_migratetype_isolate(struct page *page)
{
struct zone *zone;
unsigned long flags;
@@ -262,10 +262,10 @@ static void unset_migratetype_isolate(struct page *page, int migratetype)
* Isolating this block already succeeded, so this
* should not fail on zone boundaries.
*/
- WARN_ON_ONCE(!move_freepages_block_isolate(zone, page, migratetype));
+ WARN_ON_ONCE(!pageblock_unisolate_and_move_free_pages(zone, page));
} else {
- set_pageblock_migratetype(page, migratetype);
- __putback_isolated_page(page, order, migratetype);
+ clear_pageblock_isolate(page);
+ __putback_isolated_page(page, order, get_pageblock_migratetype(page));
}
zone->nr_isolate_pageblock--;
out:
@@ -383,7 +383,7 @@ static int isolate_single_pageblock(unsigned long boundary_pfn, int flags,
if (PageBuddy(page)) {
int order = buddy_order(page);
- /* move_freepages_block_isolate() handled this */
+ /* pageblock_isolate_and_move_free_pages() handled this */
VM_WARN_ON_ONCE(pfn + (1 << order) > boundary_pfn);
pfn += 1UL << order;
@@ -433,7 +433,7 @@ static int isolate_single_pageblock(unsigned long boundary_pfn, int flags,
failed:
/* restore the original migratetype */
if (!skip_isolation)
- unset_migratetype_isolate(pfn_to_page(isolate_pageblock), migratetype);
+ unset_migratetype_isolate(pfn_to_page(isolate_pageblock));
return -EBUSY;
}
@@ -504,7 +504,7 @@ int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
ret = isolate_single_pageblock(isolate_end, flags, true,
skip_isolation, migratetype);
if (ret) {
- unset_migratetype_isolate(pfn_to_page(isolate_start), migratetype);
+ unset_migratetype_isolate(pfn_to_page(isolate_start));
return ret;
}
@@ -517,8 +517,7 @@ int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
start_pfn, end_pfn)) {
undo_isolate_page_range(isolate_start, pfn, migratetype);
unset_migratetype_isolate(
- pfn_to_page(isolate_end - pageblock_nr_pages),
- migratetype);
+ pfn_to_page(isolate_end - pageblock_nr_pages));
return -EBUSY;
}
}
@@ -548,7 +547,7 @@ void undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
page = __first_valid_page(pfn, pageblock_nr_pages);
if (!page || !is_migrate_isolate_page(page))
continue;
- unset_migratetype_isolate(page, migratetype);
+ unset_migratetype_isolate(page);
}
}
/*
--
2.47.2
^ permalink raw reply [flat|nested] 23+ messages in thread* Re: [PATCH v3 2/4] mm/page_isolation: remove migratetype from move_freepages_block_isolate()
2025-05-07 21:10 ` [PATCH v3 2/4] mm/page_isolation: remove migratetype from move_freepages_block_isolate() Zi Yan
@ 2025-05-08 20:23 ` Zi Yan
0 siblings, 0 replies; 23+ messages in thread
From: Zi Yan @ 2025-05-08 20:23 UTC (permalink / raw)
To: Johannes Weiner, Andrew Morton
Cc: linux-mm, David Hildenbrand, Oscar Salvador, Vlastimil Babka,
Baolin Wang, Kirill A . Shutemov, Mel Gorman, Suren Baghdasaryan,
Michal Hocko, Brendan Jackman, Richard Chang, linux-kernel,
Zi Yan
On 7 May 2025, at 17:10, Zi Yan wrote:
> Since migratetype is no longer overwritten during pageblock isolation,
> moving pageblocks to and from MIGRATE_ISOLATE no longer needs migratetype.
>
> Add MIGRATETYPE_NO_ISO_MASK to allow read before-isolation migratetype
> when a pageblock is isolated. It is used by move_freepages_block_isolate().
>
> Add pageblock_isolate_and_move_free_pages() and
> pageblock_unisolate_and_move_free_pages() to be explicit about the page
> isolation operations. Both share the common code in
> __move_freepages_block_isolate(), which is renamed from
> move_freepages_block_isolate().
>
> Signed-off-by: Zi Yan <ziy@nvidia.com>
> ---
> include/linux/mmzone.h | 4 ++-
> include/linux/page-isolation.h | 4 +--
> mm/page_alloc.c | 49 +++++++++++++++++++++++++++-------
> mm/page_isolation.c | 21 +++++++--------
> 4 files changed, 55 insertions(+), 23 deletions(-)
>
Here is the fixup 2/3 to address Johannes’ comments.
From 4e6fddf64c07da8796ff6ca90703a5aa64302a90 Mon Sep 17 00:00:00 2001
From: Zi Yan <ziy@nvidia.com>
Date: Thu, 8 May 2025 15:23:42 -0400
Subject: [PATCH] fixup for mm/page_isolation: remove migratetype from
move_freepages_block_isolate()
1. replace get_pageblock_isolate() with
get_pageblock_migratetype() == MIGRATE_ISOLATE, a
get_pageblock_isolate() becomes private in mm/page_isolation.c
Signed-off-by: Zi Yan <ziy@nvidia.com>
---
mm/page_alloc.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index b502e4a3afbe..c99b77fdf383 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1975,7 +1975,7 @@ static bool __move_freepages_block_isolate(struct zone *zone,
int from_mt;
int to_mt;
- if (isolate == get_pageblock_isolate(page))
+ if (isolate == (get_pageblock_migratetype(page) == MIGRATE_ISOLATE))
return false;
if (!prep_move_freepages_block(zone, page, &start_pfn, NULL, NULL))
--
2.47.2
Best Regards,
Yan, Zi
^ permalink raw reply [flat|nested] 23+ messages in thread
* [PATCH v3 3/4] mm/page_isolation: remove migratetype from undo_isolate_page_range()
2025-05-07 21:10 [PATCH v3 0/4] Make MIGRATE_ISOLATE a standalone bit Zi Yan
2025-05-07 21:10 ` [PATCH v3 1/4] mm/page_isolation: make page isolation " Zi Yan
2025-05-07 21:10 ` [PATCH v3 2/4] mm/page_isolation: remove migratetype from move_freepages_block_isolate() Zi Yan
@ 2025-05-07 21:10 ` Zi Yan
2025-05-08 21:12 ` Johannes Weiner
2025-05-07 21:10 ` [PATCH v3 4/4] mm/page_isolation: remove migratetype parameter from more functions Zi Yan
2025-05-08 4:38 ` [PATCH v3 0/4] Make MIGRATE_ISOLATE a standalone bit Johannes Weiner
4 siblings, 1 reply; 23+ messages in thread
From: Zi Yan @ 2025-05-07 21:10 UTC (permalink / raw)
To: David Hildenbrand, Johannes Weiner, linux-mm
Cc: Andrew Morton, Oscar Salvador, Vlastimil Babka, Baolin Wang,
Kirill A . Shutemov, Mel Gorman, Suren Baghdasaryan,
Michal Hocko, Brendan Jackman, Richard Chang, linux-kernel,
Zi Yan
Since migratetype is no longer overwritten during pageblock isolation,
undoing pageblock isolation no longer needs which migratetype to restore.
Signed-off-by: Zi Yan <ziy@nvidia.com>
Acked-by: David Hildenbrand <david@redhat.com>
---
include/linux/page-isolation.h | 3 +--
mm/memory_hotplug.c | 4 ++--
mm/page_alloc.c | 2 +-
mm/page_isolation.c | 9 +++------
4 files changed, 7 insertions(+), 11 deletions(-)
diff --git a/include/linux/page-isolation.h b/include/linux/page-isolation.h
index 28c56f423e34..b8b44d3aacd4 100644
--- a/include/linux/page-isolation.h
+++ b/include/linux/page-isolation.h
@@ -33,8 +33,7 @@ bool pageblock_unisolate_and_move_free_pages(struct zone *zone, struct page *pag
int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
int migratetype, int flags);
-void undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
- int migratetype);
+void undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn);
int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn,
int isol_flags);
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index b1caedbade5b..673d41a06da8 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -1229,7 +1229,7 @@ int online_pages(unsigned long pfn, unsigned long nr_pages,
build_all_zonelists(NULL);
/* Basic onlining is complete, allow allocation of onlined pages. */
- undo_isolate_page_range(pfn, pfn + nr_pages, MIGRATE_MOVABLE);
+ undo_isolate_page_range(pfn, pfn + nr_pages);
/*
* Freshly onlined pages aren't shuffled (e.g., all pages are placed to
@@ -2115,7 +2115,7 @@ int offline_pages(unsigned long start_pfn, unsigned long nr_pages,
failed_removal_isolated:
/* pushback to free area */
- undo_isolate_page_range(start_pfn, end_pfn, MIGRATE_MOVABLE);
+ undo_isolate_page_range(start_pfn, end_pfn);
memory_notify(MEM_CANCEL_OFFLINE, &arg);
failed_removal_pcplists_disabled:
lru_cache_enable();
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 186d69c9f049..b905ed13c908 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -6888,7 +6888,7 @@ int alloc_contig_range_noprof(unsigned long start, unsigned long end,
start, end, outer_start, outer_end);
}
done:
- undo_isolate_page_range(start, end, migratetype);
+ undo_isolate_page_range(start, end);
return ret;
}
EXPORT_SYMBOL(alloc_contig_range_noprof);
diff --git a/mm/page_isolation.c b/mm/page_isolation.c
index 08f627a5032f..1edfef408faf 100644
--- a/mm/page_isolation.c
+++ b/mm/page_isolation.c
@@ -515,7 +515,7 @@ int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
page = __first_valid_page(pfn, pageblock_nr_pages);
if (page && set_migratetype_isolate(page, migratetype, flags,
start_pfn, end_pfn)) {
- undo_isolate_page_range(isolate_start, pfn, migratetype);
+ undo_isolate_page_range(isolate_start, pfn);
unset_migratetype_isolate(
pfn_to_page(isolate_end - pageblock_nr_pages));
return -EBUSY;
@@ -528,13 +528,10 @@ int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
* undo_isolate_page_range - undo effects of start_isolate_page_range()
* @start_pfn: The first PFN of the isolated range
* @end_pfn: The last PFN of the isolated range
- * @migratetype: New migrate type to set on the range
*
- * This finds every MIGRATE_ISOLATE page block in the given range
- * and switches it to @migratetype.
+ * This finds and unsets every MIGRATE_ISOLATE page block in the given range
*/
-void undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
- int migratetype)
+void undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn)
{
unsigned long pfn;
struct page *page;
--
2.47.2
^ permalink raw reply [flat|nested] 23+ messages in thread* Re: [PATCH v3 3/4] mm/page_isolation: remove migratetype from undo_isolate_page_range()
2025-05-07 21:10 ` [PATCH v3 3/4] mm/page_isolation: remove migratetype from undo_isolate_page_range() Zi Yan
@ 2025-05-08 21:12 ` Johannes Weiner
0 siblings, 0 replies; 23+ messages in thread
From: Johannes Weiner @ 2025-05-08 21:12 UTC (permalink / raw)
To: Zi Yan
Cc: David Hildenbrand, linux-mm, Andrew Morton, Oscar Salvador,
Vlastimil Babka, Baolin Wang, Kirill A . Shutemov, Mel Gorman,
Suren Baghdasaryan, Michal Hocko, Brendan Jackman, Richard Chang,
linux-kernel
On Wed, May 07, 2025 at 05:10:58PM -0400, Zi Yan wrote:
> Since migratetype is no longer overwritten during pageblock isolation,
> undoing pageblock isolation no longer needs which migratetype to restore.
>
> Signed-off-by: Zi Yan <ziy@nvidia.com>
> Acked-by: David Hildenbrand <david@redhat.com>
Acked-by: Johannes Weiner <hannes@cmpxchg.org>
The whole series is a great improvement.
^ permalink raw reply [flat|nested] 23+ messages in thread
* [PATCH v3 4/4] mm/page_isolation: remove migratetype parameter from more functions.
2025-05-07 21:10 [PATCH v3 0/4] Make MIGRATE_ISOLATE a standalone bit Zi Yan
` (2 preceding siblings ...)
2025-05-07 21:10 ` [PATCH v3 3/4] mm/page_isolation: remove migratetype from undo_isolate_page_range() Zi Yan
@ 2025-05-07 21:10 ` Zi Yan
2025-05-08 20:25 ` Zi Yan
2025-05-08 21:11 ` Johannes Weiner
2025-05-08 4:38 ` [PATCH v3 0/4] Make MIGRATE_ISOLATE a standalone bit Johannes Weiner
4 siblings, 2 replies; 23+ messages in thread
From: Zi Yan @ 2025-05-07 21:10 UTC (permalink / raw)
To: David Hildenbrand, Johannes Weiner, linux-mm
Cc: Andrew Morton, Oscar Salvador, Vlastimil Babka, Baolin Wang,
Kirill A . Shutemov, Mel Gorman, Suren Baghdasaryan,
Michal Hocko, Brendan Jackman, Richard Chang, linux-kernel,
Zi Yan
migratetype is no longer overwritten during pageblock isolation,
start_isolate_page_range(), has_unmovable_pages(), and
set_migratetype_isolate() no longer need which migratetype to restore
during isolation failure.
For has_unmoable_pages(), it needs to know if the isolation is for CMA
allocation, so adding CMA_ALLOCATION to isolation flags to provide the
information.
alloc_contig_range() no longer needs migratetype. Replace it with
a newly defined acr_flags_t to tell if an allocation is for CMA. So does
__alloc_contig_migrate_range().
Signed-off-by: Zi Yan <ziy@nvidia.com>
---
drivers/virtio/virtio_mem.c | 3 +--
include/linux/gfp.h | 6 +++++-
include/linux/page-isolation.h | 15 +++++++++++---
include/trace/events/kmem.h | 14 +++++++------
mm/cma.c | 2 +-
mm/memory_hotplug.c | 1 -
mm/page_alloc.c | 22 ++++++++++-----------
mm/page_isolation.c | 36 ++++++++++++----------------------
8 files changed, 50 insertions(+), 49 deletions(-)
diff --git a/drivers/virtio/virtio_mem.c b/drivers/virtio/virtio_mem.c
index 56d0dbe62163..8accc0f255a8 100644
--- a/drivers/virtio/virtio_mem.c
+++ b/drivers/virtio/virtio_mem.c
@@ -1243,8 +1243,7 @@ static int virtio_mem_fake_offline(struct virtio_mem *vm, unsigned long pfn,
if (atomic_read(&vm->config_changed))
return -EAGAIN;
- rc = alloc_contig_range(pfn, pfn + nr_pages, MIGRATE_MOVABLE,
- GFP_KERNEL);
+ rc = alloc_contig_range(pfn, pfn + nr_pages, 0, GFP_KERNEL);
if (rc == -ENOMEM)
/* whoops, out of memory */
return rc;
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index c9fa6309c903..db4be1861736 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -423,9 +423,13 @@ static inline bool gfp_compaction_allowed(gfp_t gfp_mask)
extern gfp_t vma_thp_gfp_mask(struct vm_area_struct *vma);
#ifdef CONFIG_CONTIG_ALLOC
+
+typedef unsigned int __bitwise acr_flags_t;
+#define ACR_CMA ((__force acr_flags_t)BIT(0)) // allocate for CMA
+
/* The below functions must be run on a range from a single zone. */
extern int alloc_contig_range_noprof(unsigned long start, unsigned long end,
- unsigned migratetype, gfp_t gfp_mask);
+ acr_flags_t alloc_flags, gfp_t gfp_mask);
#define alloc_contig_range(...) alloc_hooks(alloc_contig_range_noprof(__VA_ARGS__))
extern struct page *alloc_contig_pages_noprof(unsigned long nr_pages, gfp_t gfp_mask,
diff --git a/include/linux/page-isolation.h b/include/linux/page-isolation.h
index b8b44d3aacd4..709a807202e9 100644
--- a/include/linux/page-isolation.h
+++ b/include/linux/page-isolation.h
@@ -22,8 +22,17 @@ static inline bool is_migrate_isolate(int migratetype)
}
#endif
-#define MEMORY_OFFLINE 0x1
-#define REPORT_FAILURE 0x2
+/*
+ * Isolation flags:
+ * MEMORY_OFFLINE - isolate to offline (!allocate) memory e.g., skip over
+ * PageHWPoison() pages and PageOffline() pages.
+ * REPORT_FAILURE - report details about the failure to isolate the range
+ * CMA_ALLOCATION - isolate for CMA allocations
+ */
+typedef unsigned int __bitwise isol_flags_t;
+#define MEMORY_OFFLINE ((__force isol_flags_t)BIT(0))
+#define REPORT_FAILURE ((__force isol_flags_t)BIT(1))
+#define CMA_ALLOCATION ((__force isol_flags_t)BIT(2))
void set_pageblock_migratetype(struct page *page, int migratetype);
@@ -31,7 +40,7 @@ bool pageblock_isolate_and_move_free_pages(struct zone *zone, struct page *page)
bool pageblock_unisolate_and_move_free_pages(struct zone *zone, struct page *page);
int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
- int migratetype, int flags);
+ isol_flags_t flags);
void undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn);
diff --git a/include/trace/events/kmem.h b/include/trace/events/kmem.h
index f74925a6cf69..efffcf578217 100644
--- a/include/trace/events/kmem.h
+++ b/include/trace/events/kmem.h
@@ -304,6 +304,7 @@ TRACE_EVENT(mm_page_alloc_extfrag,
__entry->change_ownership)
);
+#ifdef CONFIG_CONTIG_ALLOC
TRACE_EVENT(mm_alloc_contig_migrate_range_info,
TP_PROTO(unsigned long start,
@@ -311,9 +312,9 @@ TRACE_EVENT(mm_alloc_contig_migrate_range_info,
unsigned long nr_migrated,
unsigned long nr_reclaimed,
unsigned long nr_mapped,
- int migratetype),
+ acr_flags_t alloc_flags),
- TP_ARGS(start, end, nr_migrated, nr_reclaimed, nr_mapped, migratetype),
+ TP_ARGS(start, end, nr_migrated, nr_reclaimed, nr_mapped, alloc_flags),
TP_STRUCT__entry(
__field(unsigned long, start)
@@ -321,7 +322,7 @@ TRACE_EVENT(mm_alloc_contig_migrate_range_info,
__field(unsigned long, nr_migrated)
__field(unsigned long, nr_reclaimed)
__field(unsigned long, nr_mapped)
- __field(int, migratetype)
+ __field(acr_flags_t, alloc_flags)
),
TP_fast_assign(
@@ -330,17 +331,18 @@ TRACE_EVENT(mm_alloc_contig_migrate_range_info,
__entry->nr_migrated = nr_migrated;
__entry->nr_reclaimed = nr_reclaimed;
__entry->nr_mapped = nr_mapped;
- __entry->migratetype = migratetype;
+ __entry->alloc_flags = alloc_flags;
),
- TP_printk("start=0x%lx end=0x%lx migratetype=%d nr_migrated=%lu nr_reclaimed=%lu nr_mapped=%lu",
+ TP_printk("start=0x%lx end=0x%lx alloc_flags=%d nr_migrated=%lu nr_reclaimed=%lu nr_mapped=%lu",
__entry->start,
__entry->end,
- __entry->migratetype,
+ __entry->alloc_flags,
__entry->nr_migrated,
__entry->nr_reclaimed,
__entry->nr_mapped)
);
+#endif
TRACE_EVENT(mm_setup_per_zone_wmarks,
diff --git a/mm/cma.c b/mm/cma.c
index 15632939f20a..8606bfe19e5d 100644
--- a/mm/cma.c
+++ b/mm/cma.c
@@ -818,7 +818,7 @@ static int cma_range_alloc(struct cma *cma, struct cma_memrange *cmr,
pfn = cmr->base_pfn + (bitmap_no << cma->order_per_bit);
mutex_lock(&cma->alloc_mutex);
- ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA, gfp);
+ ret = alloc_contig_range(pfn, pfn + count, ACR_CMA, gfp);
mutex_unlock(&cma->alloc_mutex);
if (ret == 0) {
page = pfn_to_page(pfn);
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 673d41a06da8..155f0b4ff299 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -2005,7 +2005,6 @@ int offline_pages(unsigned long start_pfn, unsigned long nr_pages,
/* set above range as isolated */
ret = start_isolate_page_range(start_pfn, end_pfn,
- MIGRATE_MOVABLE,
MEMORY_OFFLINE | REPORT_FAILURE);
if (ret) {
reason = "failure to isolate range";
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index b905ed13c908..f2c148a3675a 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -6581,11 +6581,12 @@ static void alloc_contig_dump_pages(struct list_head *page_list)
/*
* [start, end) must belong to a single zone.
- * @migratetype: using migratetype to filter the type of migration in
+ * @alloc_flags: using acr_flags_t to filter the type of migration in
* trace_mm_alloc_contig_migrate_range_info.
*/
static int __alloc_contig_migrate_range(struct compact_control *cc,
- unsigned long start, unsigned long end, int migratetype)
+ unsigned long start, unsigned long end,
+ acr_flags_t alloc_flags)
{
/* This function is based on compact_zone() from compaction.c. */
unsigned int nr_reclaimed;
@@ -6657,7 +6658,7 @@ static int __alloc_contig_migrate_range(struct compact_control *cc,
putback_movable_pages(&cc->migratepages);
}
- trace_mm_alloc_contig_migrate_range_info(start, end, migratetype,
+ trace_mm_alloc_contig_migrate_range_info(start, end, alloc_flags,
total_migrated,
total_reclaimed,
total_mapped);
@@ -6728,10 +6729,7 @@ static int __alloc_contig_verify_gfp_mask(gfp_t gfp_mask, gfp_t *gfp_cc_mask)
* alloc_contig_range() -- tries to allocate given range of pages
* @start: start PFN to allocate
* @end: one-past-the-last PFN to allocate
- * @migratetype: migratetype of the underlying pageblocks (either
- * #MIGRATE_MOVABLE or #MIGRATE_CMA). All pageblocks
- * in range must have the same migratetype and it must
- * be either of the two.
+ * @alloc_flags: allocation information
* @gfp_mask: GFP mask. Node/zone/placement hints are ignored; only some
* action and reclaim modifiers are supported. Reclaim modifiers
* control allocation behavior during compaction/migration/reclaim.
@@ -6748,7 +6746,7 @@ static int __alloc_contig_verify_gfp_mask(gfp_t gfp_mask, gfp_t *gfp_cc_mask)
* need to be freed with free_contig_range().
*/
int alloc_contig_range_noprof(unsigned long start, unsigned long end,
- unsigned migratetype, gfp_t gfp_mask)
+ acr_flags_t alloc_flags, gfp_t gfp_mask)
{
unsigned long outer_start, outer_end;
int ret = 0;
@@ -6790,7 +6788,8 @@ int alloc_contig_range_noprof(unsigned long start, unsigned long end,
* put back to page allocator so that buddy can use them.
*/
- ret = start_isolate_page_range(start, end, migratetype, 0);
+ ret = start_isolate_page_range(start, end,
+ (alloc_flags & ACR_CMA) ? CMA_ALLOCATION : 0);
if (ret)
goto done;
@@ -6806,7 +6805,7 @@ int alloc_contig_range_noprof(unsigned long start, unsigned long end,
* allocated. So, if we fall through be sure to clear ret so that
* -EBUSY is not accidentally used or returned to caller.
*/
- ret = __alloc_contig_migrate_range(&cc, start, end, migratetype);
+ ret = __alloc_contig_migrate_range(&cc, start, end, alloc_flags);
if (ret && ret != -EBUSY)
goto done;
@@ -6898,8 +6897,7 @@ static int __alloc_contig_pages(unsigned long start_pfn,
{
unsigned long end_pfn = start_pfn + nr_pages;
- return alloc_contig_range_noprof(start_pfn, end_pfn, MIGRATE_MOVABLE,
- gfp_mask);
+ return alloc_contig_range_noprof(start_pfn, end_pfn, 0, gfp_mask);
}
static bool pfn_range_valid_contig(struct zone *z, unsigned long start_pfn,
diff --git a/mm/page_isolation.c b/mm/page_isolation.c
index 1edfef408faf..d1ec98fab6a4 100644
--- a/mm/page_isolation.c
+++ b/mm/page_isolation.c
@@ -31,7 +31,7 @@
*
*/
static struct page *has_unmovable_pages(unsigned long start_pfn, unsigned long end_pfn,
- int migratetype, int flags)
+ isol_flags_t flags)
{
struct page *page = pfn_to_page(start_pfn);
struct zone *zone = page_zone(page);
@@ -46,7 +46,7 @@ static struct page *has_unmovable_pages(unsigned long start_pfn, unsigned long e
* isolate CMA pageblocks even when they are not movable in fact
* so consider them movable here.
*/
- if (is_migrate_cma(migratetype))
+ if (flags & CMA_ALLOCATION)
return NULL;
return page;
@@ -151,7 +151,7 @@ static struct page *has_unmovable_pages(unsigned long start_pfn, unsigned long e
* present in [start_pfn, end_pfn). The pageblock must intersect with
* [start_pfn, end_pfn).
*/
-static int set_migratetype_isolate(struct page *page, int migratetype, int isol_flags,
+static int set_migratetype_isolate(struct page *page, isol_flags_t isol_flags,
unsigned long start_pfn, unsigned long end_pfn)
{
struct zone *zone = page_zone(page);
@@ -186,7 +186,7 @@ static int set_migratetype_isolate(struct page *page, int migratetype, int isol_
end_pfn);
unmovable = has_unmovable_pages(check_unmovable_start, check_unmovable_end,
- migratetype, isol_flags);
+ isol_flags);
if (!unmovable) {
if (!pageblock_isolate_and_move_free_pages(zone, page)) {
spin_unlock_irqrestore(&zone->lock, flags);
@@ -296,7 +296,6 @@ __first_valid_page(unsigned long pfn, unsigned long nr_pages)
* @isolate_before: isolate the pageblock before the boundary_pfn
* @skip_isolation: the flag to skip the pageblock isolation in second
* isolate_single_pageblock()
- * @migratetype: migrate type to set in error recovery.
*
* Free and in-use pages can be as big as MAX_PAGE_ORDER and contain more than one
* pageblock. When not all pageblocks within a page are isolated at the same
@@ -311,8 +310,8 @@ __first_valid_page(unsigned long pfn, unsigned long nr_pages)
* either. The function handles this by splitting the free page or migrating
* the in-use page then splitting the free page.
*/
-static int isolate_single_pageblock(unsigned long boundary_pfn, int flags,
- bool isolate_before, bool skip_isolation, int migratetype)
+static int isolate_single_pageblock(unsigned long boundary_pfn, isol_flags_t flags,
+ bool isolate_before, bool skip_isolation)
{
unsigned long start_pfn;
unsigned long isolate_pageblock;
@@ -338,11 +337,9 @@ static int isolate_single_pageblock(unsigned long boundary_pfn, int flags,
zone->zone_start_pfn);
if (skip_isolation) {
- int mt __maybe_unused = get_pageblock_migratetype(pfn_to_page(isolate_pageblock));
-
- VM_BUG_ON(!is_migrate_isolate(mt));
+ VM_BUG_ON(!get_pageblock_isolate(pfn_to_page(isolate_pageblock)));
} else {
- ret = set_migratetype_isolate(pfn_to_page(isolate_pageblock), migratetype,
+ ret = set_migratetype_isolate(pfn_to_page(isolate_pageblock),
flags, isolate_pageblock, isolate_pageblock + pageblock_nr_pages);
if (ret)
@@ -441,14 +438,7 @@ static int isolate_single_pageblock(unsigned long boundary_pfn, int flags,
* start_isolate_page_range() - mark page range MIGRATE_ISOLATE
* @start_pfn: The first PFN of the range to be isolated.
* @end_pfn: The last PFN of the range to be isolated.
- * @migratetype: Migrate type to set in error recovery.
- * @flags: The following flags are allowed (they can be combined in
- * a bit mask)
- * MEMORY_OFFLINE - isolate to offline (!allocate) memory
- * e.g., skip over PageHWPoison() pages
- * and PageOffline() pages.
- * REPORT_FAILURE - report details about the failure to
- * isolate the range
+ * @flags: isolation flags
*
* Making page-allocation-type to be MIGRATE_ISOLATE means free pages in
* the range will never be allocated. Any free pages and pages freed in the
@@ -481,7 +471,7 @@ static int isolate_single_pageblock(unsigned long boundary_pfn, int flags,
* Return: 0 on success and -EBUSY if any part of range cannot be isolated.
*/
int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
- int migratetype, int flags)
+ isol_flags_t flags)
{
unsigned long pfn;
struct page *page;
@@ -493,7 +483,7 @@ int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
/* isolate [isolate_start, isolate_start + pageblock_nr_pages) pageblock */
ret = isolate_single_pageblock(isolate_start, flags, false,
- skip_isolation, migratetype);
+ skip_isolation);
if (ret)
return ret;
@@ -502,7 +492,7 @@ int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
/* isolate [isolate_end - pageblock_nr_pages, isolate_end) pageblock */
ret = isolate_single_pageblock(isolate_end, flags, true,
- skip_isolation, migratetype);
+ skip_isolation);
if (ret) {
unset_migratetype_isolate(pfn_to_page(isolate_start));
return ret;
@@ -513,7 +503,7 @@ int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
pfn < isolate_end - pageblock_nr_pages;
pfn += pageblock_nr_pages) {
page = __first_valid_page(pfn, pageblock_nr_pages);
- if (page && set_migratetype_isolate(page, migratetype, flags,
+ if (page && set_migratetype_isolate(page, flags,
start_pfn, end_pfn)) {
undo_isolate_page_range(isolate_start, pfn);
unset_migratetype_isolate(
--
2.47.2
^ permalink raw reply [flat|nested] 23+ messages in thread* Re: [PATCH v3 4/4] mm/page_isolation: remove migratetype parameter from more functions.
2025-05-07 21:10 ` [PATCH v3 4/4] mm/page_isolation: remove migratetype parameter from more functions Zi Yan
@ 2025-05-08 20:25 ` Zi Yan
2025-05-09 1:56 ` Zi Yan
2025-05-08 21:11 ` Johannes Weiner
1 sibling, 1 reply; 23+ messages in thread
From: Zi Yan @ 2025-05-08 20:25 UTC (permalink / raw)
To: Johannes Weiner, Andrew Morton
Cc: David Hildenbrand, linux-mm, Oscar Salvador, Vlastimil Babka,
Baolin Wang, Kirill A . Shutemov, Mel Gorman, Suren Baghdasaryan,
Michal Hocko, Brendan Jackman, Richard Chang, linux-kernel,
Zi Yan
On 7 May 2025, at 17:10, Zi Yan wrote:
> migratetype is no longer overwritten during pageblock isolation,
> start_isolate_page_range(), has_unmovable_pages(), and
> set_migratetype_isolate() no longer need which migratetype to restore
> during isolation failure.
>
> For has_unmoable_pages(), it needs to know if the isolation is for CMA
> allocation, so adding CMA_ALLOCATION to isolation flags to provide the
> information.
>
> alloc_contig_range() no longer needs migratetype. Replace it with
> a newly defined acr_flags_t to tell if an allocation is for CMA. So does
> __alloc_contig_migrate_range().
>
> Signed-off-by: Zi Yan <ziy@nvidia.com>
> ---
> drivers/virtio/virtio_mem.c | 3 +--
> include/linux/gfp.h | 6 +++++-
> include/linux/page-isolation.h | 15 +++++++++++---
> include/trace/events/kmem.h | 14 +++++++------
> mm/cma.c | 2 +-
> mm/memory_hotplug.c | 1 -
> mm/page_alloc.c | 22 ++++++++++-----------
> mm/page_isolation.c | 36 ++++++++++++----------------------
> 8 files changed, 50 insertions(+), 49 deletions(-)
Here is the fixup 3/3 to address the type issue reported by kernel test robot.
From 3c439f1f09b03c8362b43c0ac05e5f174f1a6655 Mon Sep 17 00:00:00 2001
From: Zi Yan <ziy@nvidia.com>
Date: Thu, 8 May 2025 15:42:18 -0400
Subject: [PATCH] fixup for mm/page_isolation: remove migratetype parameter
from more functions.
1. fixed test_pages_isolated() and __test_page_isolated_in_pageblock()
signature by using the new isol_flags_t type.
2. fixed test_pages_isolated() doc: flags -> isol_flags
Signed-off-by: Zi Yan <ziy@nvidia.com>
---
include/linux/page-isolation.h | 2 +-
mm/page_isolation.c | 6 +++---
2 files changed, 4 insertions(+), 4 deletions(-)
diff --git a/include/linux/page-isolation.h b/include/linux/page-isolation.h
index c176c938da87..20c3f98b5afb 100644
--- a/include/linux/page-isolation.h
+++ b/include/linux/page-isolation.h
@@ -45,5 +45,5 @@ int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
void undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn);
int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn,
- int isol_flags);
+ isol_flags_t isol_flags);
#endif
diff --git a/mm/page_isolation.c b/mm/page_isolation.c
index a9d0e75db95d..5f00d7113766 100644
--- a/mm/page_isolation.c
+++ b/mm/page_isolation.c
@@ -563,7 +563,7 @@ void undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn)
*/
static unsigned long
__test_page_isolated_in_pageblock(unsigned long pfn, unsigned long end_pfn,
- int flags)
+ isol_flags_t flags)
{
struct page *page;
@@ -602,7 +602,7 @@ __test_page_isolated_in_pageblock(unsigned long pfn, unsigned long end_pfn,
*
* This tests if all in the specified range are free.
*
- * If %MEMORY_OFFLINE is specified in @flags, it will consider
+ * If %MEMORY_OFFLINE is specified in @isol_flags, it will consider
* poisoned and offlined pages free as well.
*
* Caller must ensure the requested range doesn't span zones.
@@ -610,7 +610,7 @@ __test_page_isolated_in_pageblock(unsigned long pfn, unsigned long end_pfn,
* Returns 0 if true, -EBUSY if one or more pages are in use.
*/
int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn,
- int isol_flags)
+ isol_flags_t isol_flags)
{
unsigned long pfn, flags;
struct page *page;
--
2.47.2
Best Regards,
Yan, Zi
^ permalink raw reply [flat|nested] 23+ messages in thread* Re: [PATCH v3 4/4] mm/page_isolation: remove migratetype parameter from more functions.
2025-05-08 20:25 ` Zi Yan
@ 2025-05-09 1:56 ` Zi Yan
2025-05-09 16:01 ` Zi Yan
0 siblings, 1 reply; 23+ messages in thread
From: Zi Yan @ 2025-05-09 1:56 UTC (permalink / raw)
To: Johannes Weiner, Andrew Morton
Cc: David Hildenbrand, linux-mm, Oscar Salvador, Vlastimil Babka,
Baolin Wang, Kirill A . Shutemov, Mel Gorman, Suren Baghdasaryan,
Michal Hocko, Brendan Jackman, Richard Chang, linux-kernel,
Zi Yan
On 8 May 2025, at 16:25, Zi Yan wrote:
> On 7 May 2025, at 17:10, Zi Yan wrote:
>
>> migratetype is no longer overwritten during pageblock isolation,
>> start_isolate_page_range(), has_unmovable_pages(), and
>> set_migratetype_isolate() no longer need which migratetype to restore
>> during isolation failure.
>>
>> For has_unmoable_pages(), it needs to know if the isolation is for CMA
>> allocation, so adding CMA_ALLOCATION to isolation flags to provide the
>> information.
>>
>> alloc_contig_range() no longer needs migratetype. Replace it with
>> a newly defined acr_flags_t to tell if an allocation is for CMA. So does
>> __alloc_contig_migrate_range().
>>
>> Signed-off-by: Zi Yan <ziy@nvidia.com>
>> ---
>> drivers/virtio/virtio_mem.c | 3 +--
>> include/linux/gfp.h | 6 +++++-
>> include/linux/page-isolation.h | 15 +++++++++++---
>> include/trace/events/kmem.h | 14 +++++++------
>> mm/cma.c | 2 +-
>> mm/memory_hotplug.c | 1 -
>> mm/page_alloc.c | 22 ++++++++++-----------
>> mm/page_isolation.c | 36 ++++++++++++----------------------
>> 8 files changed, 50 insertions(+), 49 deletions(-)
>
> Here is the fixup 3/3 to address the type issue reported by kernel test robot.
>
> From 3c439f1f09b03c8362b43c0ac05e5f174f1a6655 Mon Sep 17 00:00:00 2001
> From: Zi Yan <ziy@nvidia.com>
> Date: Thu, 8 May 2025 15:42:18 -0400
> Subject: [PATCH] fixup for mm/page_isolation: remove migratetype parameter
> from more functions.
>
> 1. fixed test_pages_isolated() and __test_page_isolated_in_pageblock()
> signature by using the new isol_flags_t type.
> 2. fixed test_pages_isolated() doc: flags -> isol_flags
>
> Signed-off-by: Zi Yan <ziy@nvidia.com>
> ---
> include/linux/page-isolation.h | 2 +-
> mm/page_isolation.c | 6 +++---
> 2 files changed, 4 insertions(+), 4 deletions(-)
This is the second round of fixup 1/1 to address Johannes' comment on Patch 4.
From 760c00e808c74d62e8d879f281f38d6608c89296 Mon Sep 17 00:00:00 2001
From: Zi Yan <ziy@nvidia.com>
Date: Thu, 8 May 2025 20:54:40 -0400
Subject: [PATCH] fixup for fixup for mm/page_isolation: remove migratetype
parameter from more functions.
1. change MEMORY_OFFLINE and CMA_ALLOCATION to isolate_mode_t enums.
2. rename isol_flags_t to isolate_flags_t.
2. REPORT_FAILURE becomes the only isolate_flags_t.
Signed-off-by: Zi Yan <ziy@nvidia.com>
---
include/linux/page-isolation.h | 26 +++++++++++++++++---------
mm/memory_hotplug.c | 2 +-
mm/page_alloc.c | 3 ++-
mm/page_isolation.c | 31 ++++++++++++++++++-------------
4 files changed, 38 insertions(+), 24 deletions(-)
diff --git a/include/linux/page-isolation.h b/include/linux/page-isolation.h
index 20c3f98b5afb..29b4ddcaea7a 100644
--- a/include/linux/page-isolation.h
+++ b/include/linux/page-isolation.h
@@ -22,17 +22,25 @@ static inline bool is_migrate_isolate(int migratetype)
}
#endif
+/*
+ * Isolation modes:
+ * ISOLATE_MODE_NONE - isolate for other purposes than those below
+ * MEMORY_OFFLINE - isolate to offline (!allocate) memory e.g., skip over
+ * PageHWPoison() pages and PageOffline() pages.
+ * CMA_ALLOCATION - isolate for CMA allocations
+ */
+enum isolate_mode_t {
+ ISOLATE_MODE_NONE,
+ MEMORY_OFFLINE,
+ CMA_ALLOCATION,
+};
+
/*
* Isolation flags:
- * MEMORY_OFFLINE - isolate to offline (!allocate) memory e.g., skip over
- * PageHWPoison() pages and PageOffline() pages.
* REPORT_FAILURE - report details about the failure to isolate the range
- * CMA_ALLOCATION - isolate for CMA allocations
*/
-typedef unsigned int __bitwise isol_flags_t;
-#define MEMORY_OFFLINE ((__force isol_flags_t)BIT(0))
-#define REPORT_FAILURE ((__force isol_flags_t)BIT(1))
-#define CMA_ALLOCATION ((__force isol_flags_t)BIT(2))
+typedef unsigned int __bitwise isolate_flags_t;
+#define REPORT_FAILURE ((__force isolate_flags_t)BIT(0))
void set_pageblock_migratetype(struct page *page, int migratetype);
@@ -40,10 +48,10 @@ bool pageblock_isolate_and_move_free_pages(struct zone *zone, struct page *page)
bool pageblock_unisolate_and_move_free_pages(struct zone *zone, struct page *page);
int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
- isol_flags_t flags);
+ isolate_mode_t mode, isolate_flags_t flags);
void undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn);
int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn,
- isol_flags_t isol_flags);
+ isolate_flags_t isol_flags);
#endif
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 155f0b4ff299..3dab006a537e 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -2005,7 +2005,7 @@ int offline_pages(unsigned long start_pfn, unsigned long nr_pages,
/* set above range as isolated */
ret = start_isolate_page_range(start_pfn, end_pfn,
- MEMORY_OFFLINE | REPORT_FAILURE);
+ MEMORY_OFFLINE, REPORT_FAILURE);
if (ret) {
reason = "failure to isolate range";
goto failed_removal_pcplists_disabled;
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 51d66f86b93d..3f208f8656f4 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -6787,7 +6787,8 @@ int alloc_contig_range_noprof(unsigned long start, unsigned long end,
*/
ret = start_isolate_page_range(start, end,
- (alloc_flags & ACR_CMA) ? CMA_ALLOCATION : 0);
+ (alloc_flags & ACR_CMA) ? CMA_ALLOCATION : ISOLATE_MODE_NONE,
+ 0);
if (ret)
goto done;
diff --git a/mm/page_isolation.c b/mm/page_isolation.c
index 5f00d7113766..fd4818862654 100644
--- a/mm/page_isolation.c
+++ b/mm/page_isolation.c
@@ -48,7 +48,7 @@ static inline void set_pageblock_isolate(struct page *page)
*
*/
static struct page *has_unmovable_pages(unsigned long start_pfn, unsigned long end_pfn,
- isol_flags_t flags)
+ isolate_mode_t mode, isolate_flags_t flags)
{
struct page *page = pfn_to_page(start_pfn);
struct zone *zone = page_zone(page);
@@ -63,7 +63,7 @@ static struct page *has_unmovable_pages(unsigned long start_pfn, unsigned long e
* isolate CMA pageblocks even when they are not movable in fact
* so consider them movable here.
*/
- if (flags & CMA_ALLOCATION)
+ if (mode == CMA_ALLOCATION)
return NULL;
return page;
@@ -168,8 +168,9 @@ static struct page *has_unmovable_pages(unsigned long start_pfn, unsigned long e
* present in [start_pfn, end_pfn). The pageblock must intersect with
* [start_pfn, end_pfn).
*/
-static int set_migratetype_isolate(struct page *page, isol_flags_t isol_flags,
- unsigned long start_pfn, unsigned long end_pfn)
+static int set_migratetype_isolate(struct page *page, isolate_mode_t mode,
+ isolate_flags_t isol_flags, unsigned long start_pfn,
+ unsigned long end_pfn)
{
struct zone *zone = page_zone(page);
struct page *unmovable;
@@ -203,7 +204,7 @@ static int set_migratetype_isolate(struct page *page, isol_flags_t isol_flags,
end_pfn);
unmovable = has_unmovable_pages(check_unmovable_start, check_unmovable_end,
- isol_flags);
+ mode, isol_flags);
if (!unmovable) {
if (!pageblock_isolate_and_move_free_pages(zone, page)) {
spin_unlock_irqrestore(&zone->lock, flags);
@@ -309,6 +310,7 @@ __first_valid_page(unsigned long pfn, unsigned long nr_pages)
* isolate_single_pageblock() -- tries to isolate a pageblock that might be
* within a free or in-use page.
* @boundary_pfn: pageblock-aligned pfn that a page might cross
+ * @mode: isolation mode
* @flags: isolation flags
* @isolate_before: isolate the pageblock before the boundary_pfn
* @skip_isolation: the flag to skip the pageblock isolation in second
@@ -327,7 +329,8 @@ __first_valid_page(unsigned long pfn, unsigned long nr_pages)
* either. The function handles this by splitting the free page or migrating
* the in-use page then splitting the free page.
*/
-static int isolate_single_pageblock(unsigned long boundary_pfn, isol_flags_t flags,
+static int isolate_single_pageblock(unsigned long boundary_pfn,
+ isolate_mode_t mode, isolate_flags_t flags,
bool isolate_before, bool skip_isolation)
{
unsigned long start_pfn;
@@ -357,7 +360,8 @@ static int isolate_single_pageblock(unsigned long boundary_pfn, isol_flags_t fla
VM_BUG_ON(!get_pageblock_isolate(pfn_to_page(isolate_pageblock)));
} else {
ret = set_migratetype_isolate(pfn_to_page(isolate_pageblock),
- flags, isolate_pageblock, isolate_pageblock + pageblock_nr_pages);
+ mode, flags, isolate_pageblock,
+ isolate_pageblock + pageblock_nr_pages);
if (ret)
return ret;
@@ -455,6 +459,7 @@ static int isolate_single_pageblock(unsigned long boundary_pfn, isol_flags_t fla
* start_isolate_page_range() - mark page range MIGRATE_ISOLATE
* @start_pfn: The first PFN of the range to be isolated.
* @end_pfn: The last PFN of the range to be isolated.
+ * @mode: isolation mode
* @flags: isolation flags
*
* Making page-allocation-type to be MIGRATE_ISOLATE means free pages in
@@ -488,7 +493,7 @@ static int isolate_single_pageblock(unsigned long boundary_pfn, isol_flags_t fla
* Return: 0 on success and -EBUSY if any part of range cannot be isolated.
*/
int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
- isol_flags_t flags)
+ isolate_mode_t mode, isolate_flags_t flags)
{
unsigned long pfn;
struct page *page;
@@ -499,7 +504,7 @@ int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
bool skip_isolation = false;
/* isolate [isolate_start, isolate_start + pageblock_nr_pages) pageblock */
- ret = isolate_single_pageblock(isolate_start, flags, false,
+ ret = isolate_single_pageblock(isolate_start, mode, flags, false,
skip_isolation);
if (ret)
return ret;
@@ -508,7 +513,7 @@ int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
skip_isolation = true;
/* isolate [isolate_end - pageblock_nr_pages, isolate_end) pageblock */
- ret = isolate_single_pageblock(isolate_end, flags, true,
+ ret = isolate_single_pageblock(isolate_end, mode, flags, true,
skip_isolation);
if (ret) {
unset_migratetype_isolate(pfn_to_page(isolate_start));
@@ -520,7 +525,7 @@ int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
pfn < isolate_end - pageblock_nr_pages;
pfn += pageblock_nr_pages) {
page = __first_valid_page(pfn, pageblock_nr_pages);
- if (page && set_migratetype_isolate(page, flags,
+ if (page && set_migratetype_isolate(page, mode, flags,
start_pfn, end_pfn)) {
undo_isolate_page_range(isolate_start, pfn);
unset_migratetype_isolate(
@@ -563,7 +568,7 @@ void undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn)
*/
static unsigned long
__test_page_isolated_in_pageblock(unsigned long pfn, unsigned long end_pfn,
- isol_flags_t flags)
+ isolate_flags_t flags)
{
struct page *page;
@@ -610,7 +615,7 @@ __test_page_isolated_in_pageblock(unsigned long pfn, unsigned long end_pfn,
* Returns 0 if true, -EBUSY if one or more pages are in use.
*/
int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn,
- isol_flags_t isol_flags)
+ isolate_flags_t isol_flags)
{
unsigned long pfn, flags;
struct page *page;
--
2.47.2
--
Best Regards,
Yan, Zi
^ permalink raw reply [flat|nested] 23+ messages in thread* Re: [PATCH v3 4/4] mm/page_isolation: remove migratetype parameter from more functions.
2025-05-09 1:56 ` Zi Yan
@ 2025-05-09 16:01 ` Zi Yan
0 siblings, 0 replies; 23+ messages in thread
From: Zi Yan @ 2025-05-09 16:01 UTC (permalink / raw)
To: Andrew Morton
Cc: Johannes Weiner, David Hildenbrand, linux-mm, Oscar Salvador,
Vlastimil Babka, Baolin Wang, Kirill A . Shutemov, Mel Gorman,
Suren Baghdasaryan, Michal Hocko, Brendan Jackman, Richard Chang,
linux-kernel, Zi Yan
On 8 May 2025, at 21:56, Zi Yan wrote:
> On 8 May 2025, at 16:25, Zi Yan wrote:
>
>> On 7 May 2025, at 17:10, Zi Yan wrote:
>>
>>> migratetype is no longer overwritten during pageblock isolation,
>>> start_isolate_page_range(), has_unmovable_pages(), and
>>> set_migratetype_isolate() no longer need which migratetype to restore
>>> during isolation failure.
>>>
>>> For has_unmoable_pages(), it needs to know if the isolation is for CMA
>>> allocation, so adding CMA_ALLOCATION to isolation flags to provide the
>>> information.
>>>
>>> alloc_contig_range() no longer needs migratetype. Replace it with
>>> a newly defined acr_flags_t to tell if an allocation is for CMA. So does
>>> __alloc_contig_migrate_range().
>>>
>>> Signed-off-by: Zi Yan <ziy@nvidia.com>
>>> ---
>>> drivers/virtio/virtio_mem.c | 3 +--
>>> include/linux/gfp.h | 6 +++++-
>>> include/linux/page-isolation.h | 15 +++++++++++---
>>> include/trace/events/kmem.h | 14 +++++++------
>>> mm/cma.c | 2 +-
>>> mm/memory_hotplug.c | 1 -
>>> mm/page_alloc.c | 22 ++++++++++-----------
>>> mm/page_isolation.c | 36 ++++++++++++----------------------
>>> 8 files changed, 50 insertions(+), 49 deletions(-)
>>
>> Here is the fixup 3/3 to address the type issue reported by kernel test robot.
>>
>> From 3c439f1f09b03c8362b43c0ac05e5f174f1a6655 Mon Sep 17 00:00:00 2001
>> From: Zi Yan <ziy@nvidia.com>
>> Date: Thu, 8 May 2025 15:42:18 -0400
>> Subject: [PATCH] fixup for mm/page_isolation: remove migratetype parameter
>> from more functions.
>>
>> 1. fixed test_pages_isolated() and __test_page_isolated_in_pageblock()
>> signature by using the new isol_flags_t type.
>> 2. fixed test_pages_isolated() doc: flags -> isol_flags
>>
>> Signed-off-by: Zi Yan <ziy@nvidia.com>
>> ---
>> include/linux/page-isolation.h | 2 +-
>> mm/page_isolation.c | 6 +++---
>> 2 files changed, 4 insertions(+), 4 deletions(-)
>
> This is the second round of fixup 1/1 to address Johannes' comment on Patch 4.
>
> From 760c00e808c74d62e8d879f281f38d6608c89296 Mon Sep 17 00:00:00 2001
> From: Zi Yan <ziy@nvidia.com>
> Date: Thu, 8 May 2025 20:54:40 -0400
> Subject: [PATCH] fixup for fixup for mm/page_isolation: remove migratetype
> parameter from more functions.
>
> 1. change MEMORY_OFFLINE and CMA_ALLOCATION to isolate_mode_t enums.
> 2. rename isol_flags_t to isolate_flags_t.
> 2. REPORT_FAILURE becomes the only isolate_flags_t.
>
> Signed-off-by: Zi Yan <ziy@nvidia.com>
> ---
> include/linux/page-isolation.h | 26 +++++++++++++++++---------
> mm/memory_hotplug.c | 2 +-
> mm/page_alloc.c | 3 ++-
> mm/page_isolation.c | 31 ++++++++++++++++++-------------
> 4 files changed, 38 insertions(+), 24 deletions(-)
>
This fixup has missing pieces. Let me send another one.
> diff --git a/include/linux/page-isolation.h b/include/linux/page-isolation.h
> index 20c3f98b5afb..29b4ddcaea7a 100644
> --- a/include/linux/page-isolation.h
> +++ b/include/linux/page-isolation.h
> @@ -22,17 +22,25 @@ static inline bool is_migrate_isolate(int migratetype)
> }
> #endif
>
> +/*
> + * Isolation modes:
> + * ISOLATE_MODE_NONE - isolate for other purposes than those below
> + * MEMORY_OFFLINE - isolate to offline (!allocate) memory e.g., skip over
> + * PageHWPoison() pages and PageOffline() pages.
> + * CMA_ALLOCATION - isolate for CMA allocations
> + */
> +enum isolate_mode_t {
> + ISOLATE_MODE_NONE,
> + MEMORY_OFFLINE,
> + CMA_ALLOCATION,
> +};
> +
> /*
> * Isolation flags:
> - * MEMORY_OFFLINE - isolate to offline (!allocate) memory e.g., skip over
> - * PageHWPoison() pages and PageOffline() pages.
> * REPORT_FAILURE - report details about the failure to isolate the range
> - * CMA_ALLOCATION - isolate for CMA allocations
> */
> -typedef unsigned int __bitwise isol_flags_t;
> -#define MEMORY_OFFLINE ((__force isol_flags_t)BIT(0))
> -#define REPORT_FAILURE ((__force isol_flags_t)BIT(1))
> -#define CMA_ALLOCATION ((__force isol_flags_t)BIT(2))
> +typedef unsigned int __bitwise isolate_flags_t;
> +#define REPORT_FAILURE ((__force isolate_flags_t)BIT(0))
>
> void set_pageblock_migratetype(struct page *page, int migratetype);
>
> @@ -40,10 +48,10 @@ bool pageblock_isolate_and_move_free_pages(struct zone *zone, struct page *page)
> bool pageblock_unisolate_and_move_free_pages(struct zone *zone, struct page *page);
>
> int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
> - isol_flags_t flags);
> + isolate_mode_t mode, isolate_flags_t flags);
>
> void undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn);
>
> int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn,
> - isol_flags_t isol_flags);
> + isolate_flags_t isol_flags);
> #endif
> diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
> index 155f0b4ff299..3dab006a537e 100644
> --- a/mm/memory_hotplug.c
> +++ b/mm/memory_hotplug.c
> @@ -2005,7 +2005,7 @@ int offline_pages(unsigned long start_pfn, unsigned long nr_pages,
>
> /* set above range as isolated */
> ret = start_isolate_page_range(start_pfn, end_pfn,
> - MEMORY_OFFLINE | REPORT_FAILURE);
> + MEMORY_OFFLINE, REPORT_FAILURE);
> if (ret) {
> reason = "failure to isolate range";
> goto failed_removal_pcplists_disabled;
> diff --git a/mm/page_alloc.c b/mm/page_alloc.c
> index 51d66f86b93d..3f208f8656f4 100644
> --- a/mm/page_alloc.c
> +++ b/mm/page_alloc.c
> @@ -6787,7 +6787,8 @@ int alloc_contig_range_noprof(unsigned long start, unsigned long end,
> */
>
> ret = start_isolate_page_range(start, end,
> - (alloc_flags & ACR_CMA) ? CMA_ALLOCATION : 0);
> + (alloc_flags & ACR_CMA) ? CMA_ALLOCATION : ISOLATE_MODE_NONE,
> + 0);
> if (ret)
> goto done;
>
> diff --git a/mm/page_isolation.c b/mm/page_isolation.c
> index 5f00d7113766..fd4818862654 100644
> --- a/mm/page_isolation.c
> +++ b/mm/page_isolation.c
> @@ -48,7 +48,7 @@ static inline void set_pageblock_isolate(struct page *page)
> *
> */
> static struct page *has_unmovable_pages(unsigned long start_pfn, unsigned long end_pfn,
> - isol_flags_t flags)
> + isolate_mode_t mode, isolate_flags_t flags)
> {
> struct page *page = pfn_to_page(start_pfn);
> struct zone *zone = page_zone(page);
> @@ -63,7 +63,7 @@ static struct page *has_unmovable_pages(unsigned long start_pfn, unsigned long e
> * isolate CMA pageblocks even when they are not movable in fact
> * so consider them movable here.
> */
> - if (flags & CMA_ALLOCATION)
> + if (mode == CMA_ALLOCATION)
> return NULL;
>
> return page;
> @@ -168,8 +168,9 @@ static struct page *has_unmovable_pages(unsigned long start_pfn, unsigned long e
> * present in [start_pfn, end_pfn). The pageblock must intersect with
> * [start_pfn, end_pfn).
> */
> -static int set_migratetype_isolate(struct page *page, isol_flags_t isol_flags,
> - unsigned long start_pfn, unsigned long end_pfn)
> +static int set_migratetype_isolate(struct page *page, isolate_mode_t mode,
> + isolate_flags_t isol_flags, unsigned long start_pfn,
> + unsigned long end_pfn)
> {
> struct zone *zone = page_zone(page);
> struct page *unmovable;
> @@ -203,7 +204,7 @@ static int set_migratetype_isolate(struct page *page, isol_flags_t isol_flags,
> end_pfn);
>
> unmovable = has_unmovable_pages(check_unmovable_start, check_unmovable_end,
> - isol_flags);
> + mode, isol_flags);
> if (!unmovable) {
> if (!pageblock_isolate_and_move_free_pages(zone, page)) {
> spin_unlock_irqrestore(&zone->lock, flags);
> @@ -309,6 +310,7 @@ __first_valid_page(unsigned long pfn, unsigned long nr_pages)
> * isolate_single_pageblock() -- tries to isolate a pageblock that might be
> * within a free or in-use page.
> * @boundary_pfn: pageblock-aligned pfn that a page might cross
> + * @mode: isolation mode
> * @flags: isolation flags
> * @isolate_before: isolate the pageblock before the boundary_pfn
> * @skip_isolation: the flag to skip the pageblock isolation in second
> @@ -327,7 +329,8 @@ __first_valid_page(unsigned long pfn, unsigned long nr_pages)
> * either. The function handles this by splitting the free page or migrating
> * the in-use page then splitting the free page.
> */
> -static int isolate_single_pageblock(unsigned long boundary_pfn, isol_flags_t flags,
> +static int isolate_single_pageblock(unsigned long boundary_pfn,
> + isolate_mode_t mode, isolate_flags_t flags,
> bool isolate_before, bool skip_isolation)
> {
> unsigned long start_pfn;
> @@ -357,7 +360,8 @@ static int isolate_single_pageblock(unsigned long boundary_pfn, isol_flags_t fla
> VM_BUG_ON(!get_pageblock_isolate(pfn_to_page(isolate_pageblock)));
> } else {
> ret = set_migratetype_isolate(pfn_to_page(isolate_pageblock),
> - flags, isolate_pageblock, isolate_pageblock + pageblock_nr_pages);
> + mode, flags, isolate_pageblock,
> + isolate_pageblock + pageblock_nr_pages);
>
> if (ret)
> return ret;
> @@ -455,6 +459,7 @@ static int isolate_single_pageblock(unsigned long boundary_pfn, isol_flags_t fla
> * start_isolate_page_range() - mark page range MIGRATE_ISOLATE
> * @start_pfn: The first PFN of the range to be isolated.
> * @end_pfn: The last PFN of the range to be isolated.
> + * @mode: isolation mode
> * @flags: isolation flags
> *
> * Making page-allocation-type to be MIGRATE_ISOLATE means free pages in
> @@ -488,7 +493,7 @@ static int isolate_single_pageblock(unsigned long boundary_pfn, isol_flags_t fla
> * Return: 0 on success and -EBUSY if any part of range cannot be isolated.
> */
> int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
> - isol_flags_t flags)
> + isolate_mode_t mode, isolate_flags_t flags)
> {
> unsigned long pfn;
> struct page *page;
> @@ -499,7 +504,7 @@ int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
> bool skip_isolation = false;
>
> /* isolate [isolate_start, isolate_start + pageblock_nr_pages) pageblock */
> - ret = isolate_single_pageblock(isolate_start, flags, false,
> + ret = isolate_single_pageblock(isolate_start, mode, flags, false,
> skip_isolation);
> if (ret)
> return ret;
> @@ -508,7 +513,7 @@ int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
> skip_isolation = true;
>
> /* isolate [isolate_end - pageblock_nr_pages, isolate_end) pageblock */
> - ret = isolate_single_pageblock(isolate_end, flags, true,
> + ret = isolate_single_pageblock(isolate_end, mode, flags, true,
> skip_isolation);
> if (ret) {
> unset_migratetype_isolate(pfn_to_page(isolate_start));
> @@ -520,7 +525,7 @@ int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
> pfn < isolate_end - pageblock_nr_pages;
> pfn += pageblock_nr_pages) {
> page = __first_valid_page(pfn, pageblock_nr_pages);
> - if (page && set_migratetype_isolate(page, flags,
> + if (page && set_migratetype_isolate(page, mode, flags,
> start_pfn, end_pfn)) {
> undo_isolate_page_range(isolate_start, pfn);
> unset_migratetype_isolate(
> @@ -563,7 +568,7 @@ void undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn)
> */
> static unsigned long
> __test_page_isolated_in_pageblock(unsigned long pfn, unsigned long end_pfn,
> - isol_flags_t flags)
> + isolate_flags_t flags)
> {
> struct page *page;
>
> @@ -610,7 +615,7 @@ __test_page_isolated_in_pageblock(unsigned long pfn, unsigned long end_pfn,
> * Returns 0 if true, -EBUSY if one or more pages are in use.
> */
> int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn,
> - isol_flags_t isol_flags)
> + isolate_flags_t isol_flags)
> {
> unsigned long pfn, flags;
> struct page *page;
> --
> 2.47.2
>
>
>
> --
> Best Regards,
> Yan, Zi
--
Best Regards,
Yan, Zi
^ permalink raw reply [flat|nested] 23+ messages in thread
* Re: [PATCH v3 4/4] mm/page_isolation: remove migratetype parameter from more functions.
2025-05-07 21:10 ` [PATCH v3 4/4] mm/page_isolation: remove migratetype parameter from more functions Zi Yan
2025-05-08 20:25 ` Zi Yan
@ 2025-05-08 21:11 ` Johannes Weiner
2025-05-08 22:15 ` Zi Yan
1 sibling, 1 reply; 23+ messages in thread
From: Johannes Weiner @ 2025-05-08 21:11 UTC (permalink / raw)
To: Zi Yan
Cc: David Hildenbrand, linux-mm, Andrew Morton, Oscar Salvador,
Vlastimil Babka, Baolin Wang, Kirill A . Shutemov, Mel Gorman,
Suren Baghdasaryan, Michal Hocko, Brendan Jackman, Richard Chang,
linux-kernel
On Wed, May 07, 2025 at 05:10:59PM -0400, Zi Yan wrote:
> @@ -22,8 +22,17 @@ static inline bool is_migrate_isolate(int migratetype)
> }
> #endif
>
> -#define MEMORY_OFFLINE 0x1
> -#define REPORT_FAILURE 0x2
> +/*
> + * Isolation flags:
> + * MEMORY_OFFLINE - isolate to offline (!allocate) memory e.g., skip over
> + * PageHWPoison() pages and PageOffline() pages.
> + * REPORT_FAILURE - report details about the failure to isolate the range
> + * CMA_ALLOCATION - isolate for CMA allocations
> + */
> +typedef unsigned int __bitwise isol_flags_t;
> +#define MEMORY_OFFLINE ((__force isol_flags_t)BIT(0))
> +#define REPORT_FAILURE ((__force isol_flags_t)BIT(1))
> +#define CMA_ALLOCATION ((__force isol_flags_t)BIT(2))
Should this be a mode enum instead? MEMORY_OFFLINE and CMA_ALLOCATION
are exclusive modes AFAICS. REPORT_FAILURE is a flag, but it's only
used by MEMORY_OFFLINE, so probably better to make it a part of that
instead of having both a mode and a flag field.
^ permalink raw reply [flat|nested] 23+ messages in thread
* Re: [PATCH v3 4/4] mm/page_isolation: remove migratetype parameter from more functions.
2025-05-08 21:11 ` Johannes Weiner
@ 2025-05-08 22:15 ` Zi Yan
0 siblings, 0 replies; 23+ messages in thread
From: Zi Yan @ 2025-05-08 22:15 UTC (permalink / raw)
To: Johannes Weiner
Cc: David Hildenbrand, linux-mm, Andrew Morton, Oscar Salvador,
Vlastimil Babka, Baolin Wang, Kirill A . Shutemov, Mel Gorman,
Suren Baghdasaryan, Michal Hocko, Brendan Jackman, Richard Chang,
linux-kernel
On 8 May 2025, at 17:11, Johannes Weiner wrote:
> On Wed, May 07, 2025 at 05:10:59PM -0400, Zi Yan wrote:
>> @@ -22,8 +22,17 @@ static inline bool is_migrate_isolate(int migratetype)
>> }
>> #endif
>>
>> -#define MEMORY_OFFLINE 0x1
>> -#define REPORT_FAILURE 0x2
>> +/*
>> + * Isolation flags:
>> + * MEMORY_OFFLINE - isolate to offline (!allocate) memory e.g., skip over
>> + * PageHWPoison() pages and PageOffline() pages.
>> + * REPORT_FAILURE - report details about the failure to isolate the range
>> + * CMA_ALLOCATION - isolate for CMA allocations
>> + */
>> +typedef unsigned int __bitwise isol_flags_t;
>> +#define MEMORY_OFFLINE ((__force isol_flags_t)BIT(0))
>> +#define REPORT_FAILURE ((__force isol_flags_t)BIT(1))
>> +#define CMA_ALLOCATION ((__force isol_flags_t)BIT(2))
>
> Should this be a mode enum instead? MEMORY_OFFLINE and CMA_ALLOCATION
> are exclusive modes AFAICS. REPORT_FAILURE is a flag, but it's only
> used by MEMORY_OFFLINE, so probably better to make it a part of that
> instead of having both a mode and a flag field.
Yes. Will use an enum for MEMORY_OFFLINE and CMA_ALLOCATION and
make REPORT_FAILURE as a separate flag. Thanks for the feedback.
--
Best Regards,
Yan, Zi
^ permalink raw reply [flat|nested] 23+ messages in thread
* Re: [PATCH v3 0/4] Make MIGRATE_ISOLATE a standalone bit
2025-05-07 21:10 [PATCH v3 0/4] Make MIGRATE_ISOLATE a standalone bit Zi Yan
` (3 preceding siblings ...)
2025-05-07 21:10 ` [PATCH v3 4/4] mm/page_isolation: remove migratetype parameter from more functions Zi Yan
@ 2025-05-08 4:38 ` Johannes Weiner
4 siblings, 0 replies; 23+ messages in thread
From: Johannes Weiner @ 2025-05-08 4:38 UTC (permalink / raw)
To: Zi Yan
Cc: David Hildenbrand, linux-mm, Andrew Morton, Oscar Salvador,
Vlastimil Babka, Baolin Wang, Kirill A . Shutemov, Mel Gorman,
Suren Baghdasaryan, Michal Hocko, Brendan Jackman, Richard Chang,
linux-kernel
On Wed, May 07, 2025 at 05:10:55PM -0400, Zi Yan wrote:
>
> Hi Johannes,
>
> I incorporated all your feedback on v2 (see Changelog below), except the
> "decoupling enum migratetype from the pageblock bits" one[1], since all
> 5 migratetypes (not MIGRATE_ISOLATE) are just values and
> "#define PB_migratetype_bits MIGRATE_TYPE_BITS" would take 5 bits
> for migratetypes, which only requires 3 bits. Let me know if I
> misunderstand your suggestion. Thanks.
Right, it's better to stick with enum values. My main worry was that
PB_migratetype_bits *usually* correspond to an enum migratetype, and
MIGRATE_ISOLATE being the precarious exception. But I think it's much
clearer with the special-casing in get/set_pageblock_migratetype()
instead of the lower pfnmask functions. Thanks for moving that!
^ permalink raw reply [flat|nested] 23+ messages in thread