* [PATCH 1/5] mm: use inline helper functions instead of ugly macros
2026-02-24 1:56 [PATCH 0/5] support batched checking of the young flag for MGLRU Baolin Wang
@ 2026-02-24 1:56 ` Baolin Wang
2026-02-24 2:36 ` Rik van Riel
2026-02-24 7:09 ` Barry Song
2026-02-24 1:56 ` [PATCH 2/5] mm: rmap: add a ZONE_DEVICE folio warning in folio_referenced() Baolin Wang
` (3 subsequent siblings)
4 siblings, 2 replies; 11+ messages in thread
From: Baolin Wang @ 2026-02-24 1:56 UTC (permalink / raw)
To: akpm, david
Cc: catalin.marinas, will, lorenzo.stoakes, ryan.roberts,
Liam.Howlett, vbabka, rppt, surenb, mhocko, riel, harry.yoo,
jannh, willy, baohua, dev.jain, axelrasmussen, yuanchu, weixugc,
hannes, zhengqi.arch, shakeel.butt, baolin.wang, linux-mm,
linux-arm-kernel, linux-kernel
People have already complained that these *_clear_young_notify() related
macros are very ugly, so let's use inline helpers to make them more readable.
In addition, I cannot implement these inline helper functions in the
mmu_notifier.h file, because some arch-specific files will include the
mmu_notifier.h, which introduces header compilation dependencies and causes
build errors (e.g., arch/arm64/include/asm/tlbflush.h). Moreover, since
these functions are only used in the mm, implementing these inline helpers
in the mm/internal.h header seems reasonable.
Signed-off-by: Baolin Wang <baolin.wang@linux.alibaba.com>
---
include/linux/mmu_notifier.h | 54 ------------------------------------
mm/internal.h | 53 +++++++++++++++++++++++++++++++++++
2 files changed, 53 insertions(+), 54 deletions(-)
diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h
index 07a2bbaf86e9..93894b90c8c1 100644
--- a/include/linux/mmu_notifier.h
+++ b/include/linux/mmu_notifier.h
@@ -515,55 +515,6 @@ static inline void mmu_notifier_range_init_owner(
range->owner = owner;
}
-#define clear_flush_young_ptes_notify(__vma, __address, __ptep, __nr) \
-({ \
- int __young; \
- struct vm_area_struct *___vma = __vma; \
- unsigned long ___address = __address; \
- unsigned int ___nr = __nr; \
- __young = clear_flush_young_ptes(___vma, ___address, __ptep, ___nr); \
- __young |= mmu_notifier_clear_flush_young(___vma->vm_mm, \
- ___address, \
- ___address + \
- ___nr * PAGE_SIZE); \
- __young; \
-})
-
-#define pmdp_clear_flush_young_notify(__vma, __address, __pmdp) \
-({ \
- int __young; \
- struct vm_area_struct *___vma = __vma; \
- unsigned long ___address = __address; \
- __young = pmdp_clear_flush_young(___vma, ___address, __pmdp); \
- __young |= mmu_notifier_clear_flush_young(___vma->vm_mm, \
- ___address, \
- ___address + \
- PMD_SIZE); \
- __young; \
-})
-
-#define ptep_clear_young_notify(__vma, __address, __ptep) \
-({ \
- int __young; \
- struct vm_area_struct *___vma = __vma; \
- unsigned long ___address = __address; \
- __young = ptep_test_and_clear_young(___vma, ___address, __ptep);\
- __young |= mmu_notifier_clear_young(___vma->vm_mm, ___address, \
- ___address + PAGE_SIZE); \
- __young; \
-})
-
-#define pmdp_clear_young_notify(__vma, __address, __pmdp) \
-({ \
- int __young; \
- struct vm_area_struct *___vma = __vma; \
- unsigned long ___address = __address; \
- __young = pmdp_test_and_clear_young(___vma, ___address, __pmdp);\
- __young |= mmu_notifier_clear_young(___vma->vm_mm, ___address, \
- ___address + PMD_SIZE); \
- __young; \
-})
-
#else /* CONFIG_MMU_NOTIFIER */
struct mmu_notifier_range {
@@ -651,11 +602,6 @@ static inline void mmu_notifier_subscriptions_destroy(struct mm_struct *mm)
#define mmu_notifier_range_update_to_read_only(r) false
-#define clear_flush_young_ptes_notify clear_flush_young_ptes
-#define pmdp_clear_flush_young_notify pmdp_clear_flush_young
-#define ptep_clear_young_notify ptep_test_and_clear_young
-#define pmdp_clear_young_notify pmdp_test_and_clear_young
-
static inline void mmu_notifier_synchronize(void)
{
}
diff --git a/mm/internal.h b/mm/internal.h
index e0ef192b0be3..1ba175b8d4f1 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -11,6 +11,7 @@
#include <linux/khugepaged.h>
#include <linux/mm.h>
#include <linux/mm_inline.h>
+#include <linux/mmu_notifier.h>
#include <linux/pagemap.h>
#include <linux/pagewalk.h>
#include <linux/rmap.h>
@@ -1789,4 +1790,56 @@ static inline int io_remap_pfn_range_complete(struct vm_area_struct *vma,
return remap_pfn_range_complete(vma, addr, pfn, size, prot);
}
+#ifdef CONFIG_MMU_NOTIFIER
+static inline int clear_flush_young_ptes_notify(struct vm_area_struct *vma,
+ unsigned long addr, pte_t *ptep,
+ unsigned int nr)
+{
+ int young;
+
+ young = clear_flush_young_ptes(vma, addr, ptep, nr);
+ young |= mmu_notifier_clear_flush_young(vma->vm_mm, addr,
+ addr + nr * PAGE_SIZE);
+ return young;
+}
+
+static inline int pmdp_clear_flush_young_notify(struct vm_area_struct *vma,
+ unsigned long addr, pmd_t *pmdp)
+{
+ int young;
+
+ young = pmdp_clear_flush_young(vma, addr, pmdp);
+ young |= mmu_notifier_clear_flush_young(vma->vm_mm, addr, addr + PMD_SIZE);
+ return young;
+}
+
+static inline int ptep_clear_young_notify(struct vm_area_struct *vma,
+ unsigned long addr, pte_t *ptep)
+{
+ int young;
+
+ young = ptep_test_and_clear_young(vma, addr, ptep);
+ young |= mmu_notifier_clear_young(vma->vm_mm, addr, addr + PAGE_SIZE);
+ return young;
+}
+
+static inline int pmdp_clear_young_notify(struct vm_area_struct *vma,
+ unsigned long addr, pmd_t *pmdp)
+{
+ int young;
+
+ young = pmdp_test_and_clear_young(vma, addr, pmdp);
+ young |= mmu_notifier_clear_young(vma->vm_mm, addr, addr + PMD_SIZE);
+ return young;
+}
+
+#else /* CONFIG_MMU_NOTIFIER */
+
+#define clear_flush_young_ptes_notify clear_flush_young_ptes
+#define pmdp_clear_flush_young_notify pmdp_clear_flush_young
+#define ptep_clear_young_notify ptep_test_and_clear_young
+#define pmdp_clear_young_notify pmdp_test_and_clear_young
+
+#endif /* CONFIG_MMU_NOTIFIER */
+
#endif /* __MM_INTERNAL_H */
--
2.47.3
^ permalink raw reply [flat|nested] 11+ messages in thread* Re: [PATCH 1/5] mm: use inline helper functions instead of ugly macros
2026-02-24 1:56 ` [PATCH 1/5] mm: use inline helper functions instead of ugly macros Baolin Wang
@ 2026-02-24 2:36 ` Rik van Riel
2026-02-24 7:09 ` Barry Song
1 sibling, 0 replies; 11+ messages in thread
From: Rik van Riel @ 2026-02-24 2:36 UTC (permalink / raw)
To: Baolin Wang, akpm, david
Cc: catalin.marinas, will, lorenzo.stoakes, ryan.roberts,
Liam.Howlett, vbabka, rppt, surenb, mhocko, harry.yoo, jannh,
willy, baohua, dev.jain, axelrasmussen, yuanchu, weixugc, hannes,
zhengqi.arch, shakeel.butt, linux-mm, linux-arm-kernel,
linux-kernel
On Tue, 2026-02-24 at 09:56 +0800, Baolin Wang wrote:
> People have already complained that these *_clear_young_notify()
> related
> macros are very ugly, so let's use inline helpers to make them more
> readable.
>
> In addition, I cannot implement these inline helper functions in the
> mmu_notifier.h file, because some arch-specific files will include
> the
> mmu_notifier.h, which introduces header compilation dependencies and
> causes
> build errors (e.g., arch/arm64/include/asm/tlbflush.h). Moreover,
> since
> these functions are only used in the mm, implementing these inline
> helpers
> in the mm/internal.h header seems reasonable.
>
> Signed-off-by: Baolin Wang <baolin.wang@linux.alibaba.com>
Reviewed-by: Rik van Riel <riel@surriel.com>
--
All Rights Reversed.
^ permalink raw reply [flat|nested] 11+ messages in thread
* Re: [PATCH 1/5] mm: use inline helper functions instead of ugly macros
2026-02-24 1:56 ` [PATCH 1/5] mm: use inline helper functions instead of ugly macros Baolin Wang
2026-02-24 2:36 ` Rik van Riel
@ 2026-02-24 7:09 ` Barry Song
1 sibling, 0 replies; 11+ messages in thread
From: Barry Song @ 2026-02-24 7:09 UTC (permalink / raw)
To: Baolin Wang
Cc: akpm, david, catalin.marinas, will, lorenzo.stoakes,
ryan.roberts, Liam.Howlett, vbabka, rppt, surenb, mhocko, riel,
harry.yoo, jannh, willy, dev.jain, axelrasmussen, yuanchu,
weixugc, hannes, zhengqi.arch, shakeel.butt, linux-mm,
linux-arm-kernel, linux-kernel
On Tue, Feb 24, 2026 at 10:02 AM Baolin Wang
<baolin.wang@linux.alibaba.com> wrote:
>
> People have already complained that these *_clear_young_notify() related
> macros are very ugly, so let's use inline helpers to make them more readable.
>
> In addition, I cannot implement these inline helper functions in the
> mmu_notifier.h file, because some arch-specific files will include the
> mmu_notifier.h, which introduces header compilation dependencies and causes
> build errors (e.g., arch/arm64/include/asm/tlbflush.h). Moreover, since
> these functions are only used in the mm, implementing these inline helpers
> in the mm/internal.h header seems reasonable.
>
> Signed-off-by: Baolin Wang <baolin.wang@linux.alibaba.com>
Reviewed-by: Barry Song <baohua@kernel.org>
Thanks
Barry
^ permalink raw reply [flat|nested] 11+ messages in thread
* [PATCH 2/5] mm: rmap: add a ZONE_DEVICE folio warning in folio_referenced()
2026-02-24 1:56 [PATCH 0/5] support batched checking of the young flag for MGLRU Baolin Wang
2026-02-24 1:56 ` [PATCH 1/5] mm: use inline helper functions instead of ugly macros Baolin Wang
@ 2026-02-24 1:56 ` Baolin Wang
2026-02-24 2:38 ` Rik van Riel
2026-02-24 6:34 ` Alistair Popple
2026-02-24 1:56 ` [PATCH 3/5] mm: add a batched helper to clear the young flag for large folios Baolin Wang
` (2 subsequent siblings)
4 siblings, 2 replies; 11+ messages in thread
From: Baolin Wang @ 2026-02-24 1:56 UTC (permalink / raw)
To: akpm, david
Cc: catalin.marinas, will, lorenzo.stoakes, ryan.roberts,
Liam.Howlett, vbabka, rppt, surenb, mhocko, riel, harry.yoo,
jannh, willy, baohua, dev.jain, axelrasmussen, yuanchu, weixugc,
hannes, zhengqi.arch, shakeel.butt, baolin.wang, linux-mm,
linux-arm-kernel, linux-kernel
The folio_referenced() is used to test whether a folio was referenced during
reclaim. Moreover, ZONE_DEVICE folios are controlled by their device driver,
have a lifetime tied to that driver, and are never placed on the LRU list.
That means we should never try to reclaim ZONE_DEVICE folios, so add a warning
to catch this unexpected behavior in folio_referenced() to avoid confusion,
as discussed in the previous thread[1].
[1] https://lore.kernel.org/all/16fb7985-ec0f-4b56-91e7-404c5114f899@kernel.org/
Signed-off-by: Baolin Wang <baolin.wang@linux.alibaba.com>
---
mm/rmap.c | 1 +
1 file changed, 1 insertion(+)
diff --git a/mm/rmap.c b/mm/rmap.c
index bff8f222004e..be785dfc9336 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1065,6 +1065,7 @@ int folio_referenced(struct folio *folio, int is_locked,
.invalid_vma = invalid_folio_referenced_vma,
};
+ VM_WARN_ON_FOLIO(folio_is_zone_device(folio), folio);
*vm_flags = 0;
if (!pra.mapcount)
return 0;
--
2.47.3
^ permalink raw reply [flat|nested] 11+ messages in thread* Re: [PATCH 2/5] mm: rmap: add a ZONE_DEVICE folio warning in folio_referenced()
2026-02-24 1:56 ` [PATCH 2/5] mm: rmap: add a ZONE_DEVICE folio warning in folio_referenced() Baolin Wang
@ 2026-02-24 2:38 ` Rik van Riel
2026-02-24 5:49 ` Baolin Wang
2026-02-24 6:34 ` Alistair Popple
1 sibling, 1 reply; 11+ messages in thread
From: Rik van Riel @ 2026-02-24 2:38 UTC (permalink / raw)
To: Baolin Wang, akpm, david
Cc: catalin.marinas, will, lorenzo.stoakes, ryan.roberts,
Liam.Howlett, vbabka, rppt, surenb, mhocko, harry.yoo, jannh,
willy, baohua, dev.jain, axelrasmussen, yuanchu, weixugc, hannes,
zhengqi.arch, shakeel.butt, linux-mm, linux-arm-kernel,
linux-kernel
On Tue, 2026-02-24 at 09:56 +0800, Baolin Wang wrote:
>
> diff --git a/mm/rmap.c b/mm/rmap.c
> index bff8f222004e..be785dfc9336 100644
> --- a/mm/rmap.c
> +++ b/mm/rmap.c
> @@ -1065,6 +1065,7 @@ int folio_referenced(struct folio *folio, int
> is_locked,
> .invalid_vma = invalid_folio_referenced_vma,
> };
>
> + VM_WARN_ON_FOLIO(folio_is_zone_device(folio), folio);
> *vm_flags = 0;
> if (!pra.mapcount)
> return 0;
Should be a VM_WARN_ON_ONCE_FOLIO so we do not cause
a softlockup if we try to print information about a
million ZONE_DEVICE pages?
--
All Rights Reversed.
^ permalink raw reply [flat|nested] 11+ messages in thread
* Re: [PATCH 2/5] mm: rmap: add a ZONE_DEVICE folio warning in folio_referenced()
2026-02-24 2:38 ` Rik van Riel
@ 2026-02-24 5:49 ` Baolin Wang
0 siblings, 0 replies; 11+ messages in thread
From: Baolin Wang @ 2026-02-24 5:49 UTC (permalink / raw)
To: Rik van Riel, akpm, david
Cc: catalin.marinas, will, lorenzo.stoakes, ryan.roberts,
Liam.Howlett, vbabka, rppt, surenb, mhocko, harry.yoo, jannh,
willy, baohua, dev.jain, axelrasmussen, yuanchu, weixugc, hannes,
zhengqi.arch, shakeel.butt, linux-mm, linux-arm-kernel,
linux-kernel
On 2/24/26 10:38 AM, Rik van Riel wrote:
> On Tue, 2026-02-24 at 09:56 +0800, Baolin Wang wrote:
>>
>> diff --git a/mm/rmap.c b/mm/rmap.c
>> index bff8f222004e..be785dfc9336 100644
>> --- a/mm/rmap.c
>> +++ b/mm/rmap.c
>> @@ -1065,6 +1065,7 @@ int folio_referenced(struct folio *folio, int
>> is_locked,
>> .invalid_vma = invalid_folio_referenced_vma,
>> };
>>
>> + VM_WARN_ON_FOLIO(folio_is_zone_device(folio), folio);
>> *vm_flags = 0;
>> if (!pra.mapcount)
>> return 0;
>
> Should be a VM_WARN_ON_ONCE_FOLIO so we do not cause
> a softlockup if we try to print information about a
> million ZONE_DEVICE pages?
Yes, sounds reasonable. Will do in next version. Thanks for reviewing.
^ permalink raw reply [flat|nested] 11+ messages in thread
* Re: [PATCH 2/5] mm: rmap: add a ZONE_DEVICE folio warning in folio_referenced()
2026-02-24 1:56 ` [PATCH 2/5] mm: rmap: add a ZONE_DEVICE folio warning in folio_referenced() Baolin Wang
2026-02-24 2:38 ` Rik van Riel
@ 2026-02-24 6:34 ` Alistair Popple
1 sibling, 0 replies; 11+ messages in thread
From: Alistair Popple @ 2026-02-24 6:34 UTC (permalink / raw)
To: Baolin Wang
Cc: akpm, david, catalin.marinas, will, lorenzo.stoakes,
ryan.roberts, Liam.Howlett, vbabka, rppt, surenb, mhocko, riel,
harry.yoo, jannh, willy, baohua, dev.jain, axelrasmussen,
yuanchu, weixugc, hannes, zhengqi.arch, shakeel.butt, linux-mm,
linux-arm-kernel, linux-kernel
On 2026-02-24 at 12:56 +1100, Baolin Wang <baolin.wang@linux.alibaba.com> wrote...
> The folio_referenced() is used to test whether a folio was referenced during
> reclaim. Moreover, ZONE_DEVICE folios are controlled by their device driver,
> have a lifetime tied to that driver, and are never placed on the LRU list.
> That means we should never try to reclaim ZONE_DEVICE folios, so add a warning
> to catch this unexpected behavior in folio_referenced() to avoid confusion,
> as discussed in the previous thread[1].
Agree with this so one you've handled Rik's comment feel free to add:
Reviewed-by: Alistair Popple <apopple@nvidia.com>
> [1] https://lore.kernel.org/all/16fb7985-ec0f-4b56-91e7-404c5114f899@kernel.org/
> Signed-off-by: Baolin Wang <baolin.wang@linux.alibaba.com>
> ---
> mm/rmap.c | 1 +
> 1 file changed, 1 insertion(+)
>
> diff --git a/mm/rmap.c b/mm/rmap.c
> index bff8f222004e..be785dfc9336 100644
> --- a/mm/rmap.c
> +++ b/mm/rmap.c
> @@ -1065,6 +1065,7 @@ int folio_referenced(struct folio *folio, int is_locked,
> .invalid_vma = invalid_folio_referenced_vma,
> };
>
> + VM_WARN_ON_FOLIO(folio_is_zone_device(folio), folio);
> *vm_flags = 0;
> if (!pra.mapcount)
> return 0;
> --
> 2.47.3
>
>
^ permalink raw reply [flat|nested] 11+ messages in thread
* [PATCH 3/5] mm: add a batched helper to clear the young flag for large folios
2026-02-24 1:56 [PATCH 0/5] support batched checking of the young flag for MGLRU Baolin Wang
2026-02-24 1:56 ` [PATCH 1/5] mm: use inline helper functions instead of ugly macros Baolin Wang
2026-02-24 1:56 ` [PATCH 2/5] mm: rmap: add a ZONE_DEVICE folio warning in folio_referenced() Baolin Wang
@ 2026-02-24 1:56 ` Baolin Wang
2026-02-24 1:56 ` [PATCH 4/5] mm: support batched checking of the young flag for MGLRU Baolin Wang
2026-02-24 1:56 ` [PATCH 5/5] arm64: mm: implement the architecture-specific test_and_clear_young_ptes() Baolin Wang
4 siblings, 0 replies; 11+ messages in thread
From: Baolin Wang @ 2026-02-24 1:56 UTC (permalink / raw)
To: akpm, david
Cc: catalin.marinas, will, lorenzo.stoakes, ryan.roberts,
Liam.Howlett, vbabka, rppt, surenb, mhocko, riel, harry.yoo,
jannh, willy, baohua, dev.jain, axelrasmussen, yuanchu, weixugc,
hannes, zhengqi.arch, shakeel.butt, baolin.wang, linux-mm,
linux-arm-kernel, linux-kernel
Currently, MGLRU will call ptep_clear_young_notify() to check and clear the
young flag for each PTE sequentially, which is inefficient for large folios
reclamation.
Moreover, on Arm64 architecture, which supports contiguous PTEs, the Arm64-
specific ptep_test_and_clear_young() already implements an optimization to
clear the young flags for PTEs within a contiguous range. However, this is not
sufficient. Similar to the Arm64 specific clear_flush_young_ptes(), we can
extend this to perform batched operations for the entire large folio (which
might exceed the contiguous range: CONT_PTE_SIZE).
Thus, we can introduce a new batched helper: test_and_clear_young_ptes() and
its wrapper clear_young_ptes_notify(), to perform batched checking of the young
flags for large folios, which can help improve performance during large folio
reclamation when MGLRU is enabled. And it will be overridden by the architecture
that implements a more efficient batch operation in the following patches.
Signed-off-by: Baolin Wang <baolin.wang@linux.alibaba.com>
---
include/linux/pgtable.h | 36 ++++++++++++++++++++++++++++++++++++
mm/internal.h | 23 ++++++++++++++++++-----
2 files changed, 54 insertions(+), 5 deletions(-)
diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h
index 776993d4567b..0bcd3be524d3 100644
--- a/include/linux/pgtable.h
+++ b/include/linux/pgtable.h
@@ -1103,6 +1103,42 @@ static inline int clear_flush_young_ptes(struct vm_area_struct *vma,
}
#endif
+#ifndef test_and_clear_young_ptes
+/**
+ * test_and_clear_young_ptes - Mark PTEs that map consecutive pages of the same
+ * folio as old
+ * @vma: The virtual memory area the pages are mapped into.
+ * @addr: Address the first page is mapped at.
+ * @ptep: Page table pointer for the first entry.
+ * @nr: Number of entries to clear access bit.
+ *
+ * May be overridden by the architecture; otherwise, implemented as a simple
+ * loop over ptep_test_and_clear_young().
+ *
+ * Note that PTE bits in the PTE range besides the PFN can differ. For example,
+ * some PTEs might be write-protected.
+ *
+ * Context: The caller holds the page table lock. The PTEs map consecutive
+ * pages that belong to the same folio. The PTEs are all in the same PMD.
+ */
+static inline int test_and_clear_young_ptes(struct vm_area_struct *vma,
+ unsigned long addr, pte_t *ptep,
+ unsigned int nr)
+{
+ int young = 0;
+
+ for (;;) {
+ young |= ptep_test_and_clear_young(vma, addr, ptep);
+ if (--nr == 0)
+ break;
+ ptep++;
+ addr += PAGE_SIZE;
+ }
+
+ return young;
+}
+#endif
+
/*
* On some architectures hardware does not set page access bit when accessing
* memory page, it is responsibility of software setting this bit. It brings
diff --git a/mm/internal.h b/mm/internal.h
index 1ba175b8d4f1..1b59be99dc3f 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -1813,16 +1813,23 @@ static inline int pmdp_clear_flush_young_notify(struct vm_area_struct *vma,
return young;
}
-static inline int ptep_clear_young_notify(struct vm_area_struct *vma,
- unsigned long addr, pte_t *ptep)
+static inline int clear_young_ptes_notify(struct vm_area_struct *vma,
+ unsigned long addr, pte_t *ptep,
+ unsigned int nr)
{
int young;
- young = ptep_test_and_clear_young(vma, addr, ptep);
- young |= mmu_notifier_clear_young(vma->vm_mm, addr, addr + PAGE_SIZE);
+ young = test_and_clear_young_ptes(vma, addr, ptep, nr);
+ young |= mmu_notifier_clear_young(vma->vm_mm, addr, addr + nr * PAGE_SIZE);
return young;
}
+static inline int ptep_clear_young_notify(struct vm_area_struct *vma,
+ unsigned long addr, pte_t *ptep)
+{
+ return clear_young_ptes_notify(vma, addr, ptep, 1);
+}
+
static inline int pmdp_clear_young_notify(struct vm_area_struct *vma,
unsigned long addr, pmd_t *pmdp)
{
@@ -1837,9 +1844,15 @@ static inline int pmdp_clear_young_notify(struct vm_area_struct *vma,
#define clear_flush_young_ptes_notify clear_flush_young_ptes
#define pmdp_clear_flush_young_notify pmdp_clear_flush_young
-#define ptep_clear_young_notify ptep_test_and_clear_young
+#define clear_young_ptes_notify test_and_clear_young_ptes
#define pmdp_clear_young_notify pmdp_test_and_clear_young
+static inline int ptep_clear_young_notify(struct vm_area_struct *vma,
+ unsigned long addr, pte_t *ptep)
+{
+ return test_and_clear_young_ptes(vma, addr, ptep, 1);
+}
+
#endif /* CONFIG_MMU_NOTIFIER */
#endif /* __MM_INTERNAL_H */
--
2.47.3
^ permalink raw reply [flat|nested] 11+ messages in thread* [PATCH 4/5] mm: support batched checking of the young flag for MGLRU
2026-02-24 1:56 [PATCH 0/5] support batched checking of the young flag for MGLRU Baolin Wang
` (2 preceding siblings ...)
2026-02-24 1:56 ` [PATCH 3/5] mm: add a batched helper to clear the young flag for large folios Baolin Wang
@ 2026-02-24 1:56 ` Baolin Wang
2026-02-24 1:56 ` [PATCH 5/5] arm64: mm: implement the architecture-specific test_and_clear_young_ptes() Baolin Wang
4 siblings, 0 replies; 11+ messages in thread
From: Baolin Wang @ 2026-02-24 1:56 UTC (permalink / raw)
To: akpm, david
Cc: catalin.marinas, will, lorenzo.stoakes, ryan.roberts,
Liam.Howlett, vbabka, rppt, surenb, mhocko, riel, harry.yoo,
jannh, willy, baohua, dev.jain, axelrasmussen, yuanchu, weixugc,
hannes, zhengqi.arch, shakeel.butt, baolin.wang, linux-mm,
linux-arm-kernel, linux-kernel
Use the batched helper clear_young_ptes_notify() to check and clear the
young flag to improve the performance during large folio reclamation when
MGLRU is enabled.
Meanwhile, we can also support batched checking the young and dirty flag
when MGLRU walks the mm's pagetable to update the folios' generation
counter. Since MGLRU also checks the PTE dirty bit, use folio_pte_batch_flags()
with FPB_MERGE_YOUNG_DIRTY set to detect batches of PTEs for a large folio.
Then we can remove the ptep_clear_young_notify() since it has no users now.
Signed-off-by: Baolin Wang <baolin.wang@linux.alibaba.com>
---
include/linux/mmzone.h | 5 +++--
mm/internal.h | 12 ------------
mm/rmap.c | 30 ++++++++++++++++--------------
mm/vmscan.c | 37 +++++++++++++++++++++++++++++--------
4 files changed, 48 insertions(+), 36 deletions(-)
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index db41b18a919d..de9fee4244d9 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -630,7 +630,7 @@ struct lru_gen_memcg {
void lru_gen_init_pgdat(struct pglist_data *pgdat);
void lru_gen_init_lruvec(struct lruvec *lruvec);
-bool lru_gen_look_around(struct page_vma_mapped_walk *pvmw);
+bool lru_gen_look_around(struct page_vma_mapped_walk *pvmw, unsigned int batched);
void lru_gen_init_memcg(struct mem_cgroup *memcg);
void lru_gen_exit_memcg(struct mem_cgroup *memcg);
@@ -649,7 +649,8 @@ static inline void lru_gen_init_lruvec(struct lruvec *lruvec)
{
}
-static inline bool lru_gen_look_around(struct page_vma_mapped_walk *pvmw)
+static inline bool lru_gen_look_around(struct page_vma_mapped_walk *pvmw,
+ unsigned int batched)
{
return false;
}
diff --git a/mm/internal.h b/mm/internal.h
index 1b59be99dc3f..4e8d37570f46 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -1824,12 +1824,6 @@ static inline int clear_young_ptes_notify(struct vm_area_struct *vma,
return young;
}
-static inline int ptep_clear_young_notify(struct vm_area_struct *vma,
- unsigned long addr, pte_t *ptep)
-{
- return clear_young_ptes_notify(vma, addr, ptep, 1);
-}
-
static inline int pmdp_clear_young_notify(struct vm_area_struct *vma,
unsigned long addr, pmd_t *pmdp)
{
@@ -1847,12 +1841,6 @@ static inline int pmdp_clear_young_notify(struct vm_area_struct *vma,
#define clear_young_ptes_notify test_and_clear_young_ptes
#define pmdp_clear_young_notify pmdp_test_and_clear_young
-static inline int ptep_clear_young_notify(struct vm_area_struct *vma,
- unsigned long addr, pte_t *ptep)
-{
- return test_and_clear_young_ptes(vma, addr, ptep, 1);
-}
-
#endif /* CONFIG_MMU_NOTIFIER */
#endif /* __MM_INTERNAL_H */
diff --git a/mm/rmap.c b/mm/rmap.c
index be785dfc9336..1c147251ae28 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -958,25 +958,21 @@ static bool folio_referenced_one(struct folio *folio,
return false;
}
+ if (pvmw.pte && folio_test_large(folio)) {
+ unsigned long end_addr = pmd_addr_end(address, vma->vm_end);
+ unsigned int max_nr = (end_addr - address) >> PAGE_SHIFT;
+ pte_t pteval = ptep_get(pvmw.pte);
+
+ nr = folio_pte_batch(folio, pvmw.pte, pteval, max_nr);
+ ptes += nr;
+ }
+
if (lru_gen_enabled() && pvmw.pte) {
- if (lru_gen_look_around(&pvmw))
+ if (lru_gen_look_around(&pvmw, nr))
referenced++;
} else if (pvmw.pte) {
- if (folio_test_large(folio)) {
- unsigned long end_addr = pmd_addr_end(address, vma->vm_end);
- unsigned int max_nr = (end_addr - address) >> PAGE_SHIFT;
- pte_t pteval = ptep_get(pvmw.pte);
-
- nr = folio_pte_batch(folio, pvmw.pte,
- pteval, max_nr);
- }
-
- ptes += nr;
if (clear_flush_young_ptes_notify(vma, address, pvmw.pte, nr))
referenced++;
- /* Skip the batched PTEs */
- pvmw.pte += nr - 1;
- pvmw.address += (nr - 1) * PAGE_SIZE;
} else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) {
if (pmdp_clear_flush_young_notify(vma, address,
pvmw.pmd))
@@ -995,6 +991,12 @@ static bool folio_referenced_one(struct folio *folio,
page_vma_mapped_walk_done(&pvmw);
break;
}
+
+ /* Skip the batched PTEs */
+ if (nr > 1) {
+ pvmw.pte += nr - 1;
+ pvmw.address += (nr - 1) * PAGE_SIZE;
+ }
}
if (referenced)
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 728868c61750..d83962468b2e 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -3494,6 +3494,7 @@ static bool walk_pte_range(pmd_t *pmd, unsigned long start, unsigned long end,
struct pglist_data *pgdat = lruvec_pgdat(walk->lruvec);
DEFINE_MAX_SEQ(walk->lruvec);
int gen = lru_gen_from_seq(max_seq);
+ unsigned int nr;
pmd_t pmdval;
pte = pte_offset_map_rw_nolock(args->mm, pmd, start & PMD_MASK, &pmdval, &ptl);
@@ -3512,11 +3513,13 @@ static bool walk_pte_range(pmd_t *pmd, unsigned long start, unsigned long end,
lazy_mmu_mode_enable();
restart:
- for (i = pte_index(start), addr = start; addr != end; i++, addr += PAGE_SIZE) {
+ for (i = pte_index(start), addr = start; addr != end; i += nr, addr += nr * PAGE_SIZE) {
unsigned long pfn;
struct folio *folio;
- pte_t ptent = ptep_get(pte + i);
+ pte_t *ptep = pte + i;
+ pte_t ptent = ptep_get(ptep);
+ nr = 1;
total++;
walk->mm_stats[MM_LEAF_TOTAL]++;
@@ -3528,7 +3531,14 @@ static bool walk_pte_range(pmd_t *pmd, unsigned long start, unsigned long end,
if (!folio)
continue;
- if (!ptep_clear_young_notify(args->vma, addr, pte + i))
+ if (folio_test_large(folio)) {
+ unsigned int max_nr = (end - addr) >> PAGE_SHIFT;
+
+ nr = folio_pte_batch_flags(folio, NULL, ptep, &ptent,
+ max_nr, FPB_MERGE_YOUNG_DIRTY);
+ }
+
+ if (!clear_young_ptes_notify(args->vma, addr, ptep, nr))
continue;
if (last != folio) {
@@ -4186,7 +4196,7 @@ static void lru_gen_age_node(struct pglist_data *pgdat, struct scan_control *sc)
* the PTE table to the Bloom filter. This forms a feedback loop between the
* eviction and the aging.
*/
-bool lru_gen_look_around(struct page_vma_mapped_walk *pvmw)
+bool lru_gen_look_around(struct page_vma_mapped_walk *pvmw, unsigned int batched)
{
int i;
bool dirty;
@@ -4205,11 +4215,13 @@ bool lru_gen_look_around(struct page_vma_mapped_walk *pvmw)
struct lru_gen_mm_state *mm_state = get_mm_state(lruvec);
DEFINE_MAX_SEQ(lruvec);
int gen = lru_gen_from_seq(max_seq);
+ unsigned int nr;
+ pte_t *ptep;
lockdep_assert_held(pvmw->ptl);
VM_WARN_ON_ONCE_FOLIO(folio_test_lru(folio), folio);
- if (!ptep_clear_young_notify(vma, addr, pte))
+ if (!clear_young_ptes_notify(vma, addr, pte, batched))
return false;
if (spin_is_contended(pvmw->ptl))
@@ -4243,10 +4255,12 @@ bool lru_gen_look_around(struct page_vma_mapped_walk *pvmw)
pte -= (addr - start) / PAGE_SIZE;
- for (i = 0, addr = start; addr != end; i++, addr += PAGE_SIZE) {
+ for (i = 0, addr = start, ptep = pte; addr != end;
+ i += nr, ptep += nr, addr += nr * PAGE_SIZE) {
unsigned long pfn;
- pte_t ptent = ptep_get(pte + i);
+ pte_t ptent = ptep_get(ptep);
+ nr = 1;
pfn = get_pte_pfn(ptent, vma, addr, pgdat);
if (pfn == -1)
continue;
@@ -4255,7 +4269,14 @@ bool lru_gen_look_around(struct page_vma_mapped_walk *pvmw)
if (!folio)
continue;
- if (!ptep_clear_young_notify(vma, addr, pte + i))
+ if (folio_test_large(folio)) {
+ unsigned int max_nr = (end - addr) >> PAGE_SHIFT;
+
+ nr = folio_pte_batch_flags(folio, NULL, ptep, &ptent,
+ max_nr, FPB_MERGE_YOUNG_DIRTY);
+ }
+
+ if (!clear_young_ptes_notify(vma, addr, ptep, nr))
continue;
if (last != folio) {
--
2.47.3
^ permalink raw reply [flat|nested] 11+ messages in thread* [PATCH 5/5] arm64: mm: implement the architecture-specific test_and_clear_young_ptes()
2026-02-24 1:56 [PATCH 0/5] support batched checking of the young flag for MGLRU Baolin Wang
` (3 preceding siblings ...)
2026-02-24 1:56 ` [PATCH 4/5] mm: support batched checking of the young flag for MGLRU Baolin Wang
@ 2026-02-24 1:56 ` Baolin Wang
4 siblings, 0 replies; 11+ messages in thread
From: Baolin Wang @ 2026-02-24 1:56 UTC (permalink / raw)
To: akpm, david
Cc: catalin.marinas, will, lorenzo.stoakes, ryan.roberts,
Liam.Howlett, vbabka, rppt, surenb, mhocko, riel, harry.yoo,
jannh, willy, baohua, dev.jain, axelrasmussen, yuanchu, weixugc,
hannes, zhengqi.arch, shakeel.butt, baolin.wang, linux-mm,
linux-arm-kernel, linux-kernel
Implement the Arm64 architecture-specific test_and_clear_young_ptes() to enable
batched checking of young flags, improving performance during large folio
reclamation when MGLRU is enabled.
While we're at it, simplify ptep_test_and_clear_young() by calling
test_and_clear_young_ptes(). Since callers guarantee that PTEs are present
before calling these functions, we can use pte_cont() to check the CONT_PTE
flag instead of pte_valid_cont().
Performance testing:
Enable MGLRU, then allocate 10G clean file-backed folios by mmap() in a memory
cgroup, and try to reclaim 8G file-backed folios via the memory.reclaim interface.
I can observe 60%+ performance improvement on my Arm64 32-core server (and about
15% improvement on my X86 machine).
W/o patchset:
real 0m0.470s
user 0m0.000s
sys 0m0.470s
W/ patchset:
real 0m0.180s
user 0m0.001s
sys 0m0.179s
Signed-off-by: Baolin Wang <baolin.wang@linux.alibaba.com>
---
arch/arm64/include/asm/pgtable.h | 18 ++++++++++++------
1 file changed, 12 insertions(+), 6 deletions(-)
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index aa4b13da6371..ab451d20e4c5 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -1812,16 +1812,22 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
return __ptep_get_and_clear(mm, addr, ptep);
}
+#define test_and_clear_young_ptes test_and_clear_young_ptes
+static inline int test_and_clear_young_ptes(struct vm_area_struct *vma,
+ unsigned long addr, pte_t *ptep,
+ unsigned int nr)
+{
+ if (likely(nr == 1 && !pte_cont(__ptep_get(ptep))))
+ return __ptep_test_and_clear_young(vma, addr, ptep);
+
+ return contpte_test_and_clear_young_ptes(vma, addr, ptep, nr);
+}
+
#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep)
{
- pte_t orig_pte = __ptep_get(ptep);
-
- if (likely(!pte_valid_cont(orig_pte)))
- return __ptep_test_and_clear_young(vma, addr, ptep);
-
- return contpte_test_and_clear_young_ptes(vma, addr, ptep, 1);
+ return test_and_clear_young_ptes(vma, addr, ptep, 1);
}
#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
--
2.47.3
^ permalink raw reply [flat|nested] 11+ messages in thread