* [Patch v2] mm/khugepaged: use start_addr/addr for better reading
@ 2025-09-22 14:09 Wei Yang
2025-09-23 6:59 ` Dev Jain
` (3 more replies)
0 siblings, 4 replies; 5+ messages in thread
From: Wei Yang @ 2025-09-22 14:09 UTC (permalink / raw)
To: akpm, david, lorenzo.stoakes, ziy, baolin.wang, Liam.Howlett,
npache, ryan.roberts, dev.jain, baohua, lance.yang
Cc: linux-mm, Wei Yang
When collapse a pmd, there are two address in use:
* address points to the start of pmd
* address points to each individual page
Current naming is not easy to distinguish these two and error prone.
Considering the plan to collapse mTHP, name the first one to start_addr
and second one to addr for better reading and consistency.
Signed-off-by: Wei Yang <richard.weiyang@gmail.com>
Suggested-by: David Hildenbrand <david@redhat.com>
---
v2: use start_addr/addr instead
---
mm/khugepaged.c | 43 ++++++++++++++++++++++---------------------
1 file changed, 22 insertions(+), 21 deletions(-)
diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index ec3f91a345a0..e3f7d1760567 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -537,18 +537,19 @@ static void release_pte_pages(pte_t *pte, pte_t *_pte,
}
static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
- unsigned long address,
+ unsigned long start_addr,
pte_t *pte,
struct collapse_control *cc,
struct list_head *compound_pagelist)
{
struct page *page = NULL;
struct folio *folio = NULL;
+ unsigned long addr = start_addr;
pte_t *_pte;
int none_or_zero = 0, shared = 0, result = SCAN_FAIL, referenced = 0;
for (_pte = pte; _pte < pte + HPAGE_PMD_NR;
- _pte++, address += PAGE_SIZE) {
+ _pte++, addr += PAGE_SIZE) {
pte_t pteval = ptep_get(_pte);
if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) {
++none_or_zero;
@@ -570,7 +571,7 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
result = SCAN_PTE_UFFD_WP;
goto out;
}
- page = vm_normal_page(vma, address, pteval);
+ page = vm_normal_page(vma, addr, pteval);
if (unlikely(!page) || unlikely(is_zone_device_page(page))) {
result = SCAN_PAGE_NULL;
goto out;
@@ -655,8 +656,8 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
*/
if (cc->is_khugepaged &&
(pte_young(pteval) || folio_test_young(folio) ||
- folio_test_referenced(folio) || mmu_notifier_test_young(vma->vm_mm,
- address)))
+ folio_test_referenced(folio) ||
+ mmu_notifier_test_young(vma->vm_mm, addr)))
referenced++;
}
@@ -985,21 +986,21 @@ static int check_pmd_still_valid(struct mm_struct *mm,
*/
static int __collapse_huge_page_swapin(struct mm_struct *mm,
struct vm_area_struct *vma,
- unsigned long haddr, pmd_t *pmd,
+ unsigned long start_addr, pmd_t *pmd,
int referenced)
{
int swapped_in = 0;
vm_fault_t ret = 0;
- unsigned long address, end = haddr + (HPAGE_PMD_NR * PAGE_SIZE);
+ unsigned long addr, end = start_addr + (HPAGE_PMD_NR * PAGE_SIZE);
int result;
pte_t *pte = NULL;
spinlock_t *ptl;
- for (address = haddr; address < end; address += PAGE_SIZE) {
+ for (addr = start_addr; addr < end; addr += PAGE_SIZE) {
struct vm_fault vmf = {
.vma = vma,
- .address = address,
- .pgoff = linear_page_index(vma, address),
+ .address = addr,
+ .pgoff = linear_page_index(vma, addr),
.flags = FAULT_FLAG_ALLOW_RETRY,
.pmd = pmd,
};
@@ -1009,7 +1010,7 @@ static int __collapse_huge_page_swapin(struct mm_struct *mm,
* Here the ptl is only used to check pte_same() in
* do_swap_page(), so readonly version is enough.
*/
- pte = pte_offset_map_ro_nolock(mm, pmd, address, &ptl);
+ pte = pte_offset_map_ro_nolock(mm, pmd, addr, &ptl);
if (!pte) {
mmap_read_unlock(mm);
result = SCAN_PMD_NULL;
@@ -1252,7 +1253,7 @@ static int collapse_huge_page(struct mm_struct *mm, unsigned long address,
static int hpage_collapse_scan_pmd(struct mm_struct *mm,
struct vm_area_struct *vma,
- unsigned long address, bool *mmap_locked,
+ unsigned long start_addr, bool *mmap_locked,
struct collapse_control *cc)
{
pmd_t *pmd;
@@ -1261,26 +1262,26 @@ static int hpage_collapse_scan_pmd(struct mm_struct *mm,
int none_or_zero = 0, shared = 0;
struct page *page = NULL;
struct folio *folio = NULL;
- unsigned long _address;
+ unsigned long addr;
spinlock_t *ptl;
int node = NUMA_NO_NODE, unmapped = 0;
- VM_BUG_ON(address & ~HPAGE_PMD_MASK);
+ VM_BUG_ON(start_addr & ~HPAGE_PMD_MASK);
- result = find_pmd_or_thp_or_none(mm, address, &pmd);
+ result = find_pmd_or_thp_or_none(mm, start_addr, &pmd);
if (result != SCAN_SUCCEED)
goto out;
memset(cc->node_load, 0, sizeof(cc->node_load));
nodes_clear(cc->alloc_nmask);
- pte = pte_offset_map_lock(mm, pmd, address, &ptl);
+ pte = pte_offset_map_lock(mm, pmd, start_addr, &ptl);
if (!pte) {
result = SCAN_PMD_NULL;
goto out;
}
- for (_address = address, _pte = pte; _pte < pte + HPAGE_PMD_NR;
- _pte++, _address += PAGE_SIZE) {
+ for (addr = start_addr, _pte = pte; _pte < pte + HPAGE_PMD_NR;
+ _pte++, addr += PAGE_SIZE) {
pte_t pteval = ptep_get(_pte);
if (is_swap_pte(pteval)) {
++unmapped;
@@ -1328,7 +1329,7 @@ static int hpage_collapse_scan_pmd(struct mm_struct *mm,
goto out_unmap;
}
- page = vm_normal_page(vma, _address, pteval);
+ page = vm_normal_page(vma, addr, pteval);
if (unlikely(!page) || unlikely(is_zone_device_page(page))) {
result = SCAN_PAGE_NULL;
goto out_unmap;
@@ -1397,7 +1398,7 @@ static int hpage_collapse_scan_pmd(struct mm_struct *mm,
if (cc->is_khugepaged &&
(pte_young(pteval) || folio_test_young(folio) ||
folio_test_referenced(folio) ||
- mmu_notifier_test_young(vma->vm_mm, _address)))
+ mmu_notifier_test_young(vma->vm_mm, addr)))
referenced++;
}
if (cc->is_khugepaged &&
@@ -1410,7 +1411,7 @@ static int hpage_collapse_scan_pmd(struct mm_struct *mm,
out_unmap:
pte_unmap_unlock(pte, ptl);
if (result == SCAN_SUCCEED) {
- result = collapse_huge_page(mm, address, referenced,
+ result = collapse_huge_page(mm, start_addr, referenced,
unmapped, cc);
/* collapse_huge_page will return with the mmap_lock released */
*mmap_locked = false;
--
2.34.1
^ permalink raw reply [flat|nested] 5+ messages in thread
* Re: [Patch v2] mm/khugepaged: use start_addr/addr for better reading
2025-09-22 14:09 [Patch v2] mm/khugepaged: use start_addr/addr for better reading Wei Yang
@ 2025-09-23 6:59 ` Dev Jain
2025-09-23 16:11 ` Zi Yan
` (2 subsequent siblings)
3 siblings, 0 replies; 5+ messages in thread
From: Dev Jain @ 2025-09-23 6:59 UTC (permalink / raw)
To: Wei Yang, akpm, david, lorenzo.stoakes, ziy, baolin.wang,
Liam.Howlett, npache, ryan.roberts, baohua, lance.yang
Cc: linux-mm
On 22/09/25 7:39 pm, Wei Yang wrote:
> When collapse a pmd, there are two address in use:
>
> * address points to the start of pmd
> * address points to each individual page
>
> Current naming is not easy to distinguish these two and error prone.
>
> Considering the plan to collapse mTHP, name the first one to start_addr
> and second one to addr for better reading and consistency.
>
> Signed-off-by: Wei Yang <richard.weiyang@gmail.com>
> Suggested-by: David Hildenbrand <david@redhat.com>
>
> ---
Reviewed-by: Dev Jain <dev.jain@arm.com>
^ permalink raw reply [flat|nested] 5+ messages in thread
* Re: [Patch v2] mm/khugepaged: use start_addr/addr for better reading
2025-09-22 14:09 [Patch v2] mm/khugepaged: use start_addr/addr for better reading Wei Yang
2025-09-23 6:59 ` Dev Jain
@ 2025-09-23 16:11 ` Zi Yan
2025-09-23 16:22 ` Nico Pache
2025-09-23 16:36 ` David Hildenbrand
3 siblings, 0 replies; 5+ messages in thread
From: Zi Yan @ 2025-09-23 16:11 UTC (permalink / raw)
To: Wei Yang
Cc: akpm, david, lorenzo.stoakes, baolin.wang, Liam.Howlett, npache,
ryan.roberts, dev.jain, baohua, lance.yang, linux-mm
On 22 Sep 2025, at 10:09, Wei Yang wrote:
> When collapse a pmd, there are two address in use:
>
> * address points to the start of pmd
> * address points to each individual page
>
> Current naming is not easy to distinguish these two and error prone.
>
> Considering the plan to collapse mTHP, name the first one to start_addr
> and second one to addr for better reading and consistency.
>
> Signed-off-by: Wei Yang <richard.weiyang@gmail.com>
> Suggested-by: David Hildenbrand <david@redhat.com>
>
> ---
> v2: use start_addr/addr instead
> ---
> mm/khugepaged.c | 43 ++++++++++++++++++++++---------------------
> 1 file changed, 22 insertions(+), 21 deletions(-)
>
LGTM. Reviewed-by: Zi Yan <ziy@nvidia.com>
Best Regards,
Yan, Zi
^ permalink raw reply [flat|nested] 5+ messages in thread
* Re: [Patch v2] mm/khugepaged: use start_addr/addr for better reading
2025-09-22 14:09 [Patch v2] mm/khugepaged: use start_addr/addr for better reading Wei Yang
2025-09-23 6:59 ` Dev Jain
2025-09-23 16:11 ` Zi Yan
@ 2025-09-23 16:22 ` Nico Pache
2025-09-23 16:36 ` David Hildenbrand
3 siblings, 0 replies; 5+ messages in thread
From: Nico Pache @ 2025-09-23 16:22 UTC (permalink / raw)
To: Wei Yang
Cc: akpm, david, lorenzo.stoakes, ziy, baolin.wang, Liam.Howlett,
ryan.roberts, dev.jain, baohua, lance.yang, linux-mm
On Mon, Sep 22, 2025 at 8:09 AM Wei Yang <richard.weiyang@gmail.com> wrote:
>
> When collapse a pmd, there are two address in use:
>
> * address points to the start of pmd
> * address points to each individual page
>
> Current naming is not easy to distinguish these two and error prone.
>
> Considering the plan to collapse mTHP, name the first one to start_addr
> and second one to addr for better reading and consistency.
>
> Signed-off-by: Wei Yang <richard.weiyang@gmail.com>
> Suggested-by: David Hildenbrand <david@redhat.com>
>
LGTM!
Reviewed-by: Nico Pache <npache@redhat.com>
> ---
> v2: use start_addr/addr instead
> ---
> mm/khugepaged.c | 43 ++++++++++++++++++++++---------------------
> 1 file changed, 22 insertions(+), 21 deletions(-)
>
> diff --git a/mm/khugepaged.c b/mm/khugepaged.c
> index ec3f91a345a0..e3f7d1760567 100644
> --- a/mm/khugepaged.c
> +++ b/mm/khugepaged.c
> @@ -537,18 +537,19 @@ static void release_pte_pages(pte_t *pte, pte_t *_pte,
> }
>
> static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
> - unsigned long address,
> + unsigned long start_addr,
> pte_t *pte,
> struct collapse_control *cc,
> struct list_head *compound_pagelist)
> {
> struct page *page = NULL;
> struct folio *folio = NULL;
> + unsigned long addr = start_addr;
> pte_t *_pte;
> int none_or_zero = 0, shared = 0, result = SCAN_FAIL, referenced = 0;
>
> for (_pte = pte; _pte < pte + HPAGE_PMD_NR;
> - _pte++, address += PAGE_SIZE) {
> + _pte++, addr += PAGE_SIZE) {
> pte_t pteval = ptep_get(_pte);
> if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) {
> ++none_or_zero;
> @@ -570,7 +571,7 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
> result = SCAN_PTE_UFFD_WP;
> goto out;
> }
> - page = vm_normal_page(vma, address, pteval);
> + page = vm_normal_page(vma, addr, pteval);
> if (unlikely(!page) || unlikely(is_zone_device_page(page))) {
> result = SCAN_PAGE_NULL;
> goto out;
> @@ -655,8 +656,8 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
> */
> if (cc->is_khugepaged &&
> (pte_young(pteval) || folio_test_young(folio) ||
> - folio_test_referenced(folio) || mmu_notifier_test_young(vma->vm_mm,
> - address)))
> + folio_test_referenced(folio) ||
> + mmu_notifier_test_young(vma->vm_mm, addr)))
> referenced++;
> }
>
> @@ -985,21 +986,21 @@ static int check_pmd_still_valid(struct mm_struct *mm,
> */
> static int __collapse_huge_page_swapin(struct mm_struct *mm,
> struct vm_area_struct *vma,
> - unsigned long haddr, pmd_t *pmd,
> + unsigned long start_addr, pmd_t *pmd,
> int referenced)
> {
> int swapped_in = 0;
> vm_fault_t ret = 0;
> - unsigned long address, end = haddr + (HPAGE_PMD_NR * PAGE_SIZE);
> + unsigned long addr, end = start_addr + (HPAGE_PMD_NR * PAGE_SIZE);
> int result;
> pte_t *pte = NULL;
> spinlock_t *ptl;
>
> - for (address = haddr; address < end; address += PAGE_SIZE) {
> + for (addr = start_addr; addr < end; addr += PAGE_SIZE) {
> struct vm_fault vmf = {
> .vma = vma,
> - .address = address,
> - .pgoff = linear_page_index(vma, address),
> + .address = addr,
> + .pgoff = linear_page_index(vma, addr),
> .flags = FAULT_FLAG_ALLOW_RETRY,
> .pmd = pmd,
> };
> @@ -1009,7 +1010,7 @@ static int __collapse_huge_page_swapin(struct mm_struct *mm,
> * Here the ptl is only used to check pte_same() in
> * do_swap_page(), so readonly version is enough.
> */
> - pte = pte_offset_map_ro_nolock(mm, pmd, address, &ptl);
> + pte = pte_offset_map_ro_nolock(mm, pmd, addr, &ptl);
> if (!pte) {
> mmap_read_unlock(mm);
> result = SCAN_PMD_NULL;
> @@ -1252,7 +1253,7 @@ static int collapse_huge_page(struct mm_struct *mm, unsigned long address,
>
> static int hpage_collapse_scan_pmd(struct mm_struct *mm,
> struct vm_area_struct *vma,
> - unsigned long address, bool *mmap_locked,
> + unsigned long start_addr, bool *mmap_locked,
> struct collapse_control *cc)
> {
> pmd_t *pmd;
> @@ -1261,26 +1262,26 @@ static int hpage_collapse_scan_pmd(struct mm_struct *mm,
> int none_or_zero = 0, shared = 0;
> struct page *page = NULL;
> struct folio *folio = NULL;
> - unsigned long _address;
> + unsigned long addr;
> spinlock_t *ptl;
> int node = NUMA_NO_NODE, unmapped = 0;
>
> - VM_BUG_ON(address & ~HPAGE_PMD_MASK);
> + VM_BUG_ON(start_addr & ~HPAGE_PMD_MASK);
>
> - result = find_pmd_or_thp_or_none(mm, address, &pmd);
> + result = find_pmd_or_thp_or_none(mm, start_addr, &pmd);
> if (result != SCAN_SUCCEED)
> goto out;
>
> memset(cc->node_load, 0, sizeof(cc->node_load));
> nodes_clear(cc->alloc_nmask);
> - pte = pte_offset_map_lock(mm, pmd, address, &ptl);
> + pte = pte_offset_map_lock(mm, pmd, start_addr, &ptl);
> if (!pte) {
> result = SCAN_PMD_NULL;
> goto out;
> }
>
> - for (_address = address, _pte = pte; _pte < pte + HPAGE_PMD_NR;
> - _pte++, _address += PAGE_SIZE) {
> + for (addr = start_addr, _pte = pte; _pte < pte + HPAGE_PMD_NR;
> + _pte++, addr += PAGE_SIZE) {
> pte_t pteval = ptep_get(_pte);
> if (is_swap_pte(pteval)) {
> ++unmapped;
> @@ -1328,7 +1329,7 @@ static int hpage_collapse_scan_pmd(struct mm_struct *mm,
> goto out_unmap;
> }
>
> - page = vm_normal_page(vma, _address, pteval);
> + page = vm_normal_page(vma, addr, pteval);
> if (unlikely(!page) || unlikely(is_zone_device_page(page))) {
> result = SCAN_PAGE_NULL;
> goto out_unmap;
> @@ -1397,7 +1398,7 @@ static int hpage_collapse_scan_pmd(struct mm_struct *mm,
> if (cc->is_khugepaged &&
> (pte_young(pteval) || folio_test_young(folio) ||
> folio_test_referenced(folio) ||
> - mmu_notifier_test_young(vma->vm_mm, _address)))
> + mmu_notifier_test_young(vma->vm_mm, addr)))
> referenced++;
> }
> if (cc->is_khugepaged &&
> @@ -1410,7 +1411,7 @@ static int hpage_collapse_scan_pmd(struct mm_struct *mm,
> out_unmap:
> pte_unmap_unlock(pte, ptl);
> if (result == SCAN_SUCCEED) {
> - result = collapse_huge_page(mm, address, referenced,
> + result = collapse_huge_page(mm, start_addr, referenced,
> unmapped, cc);
> /* collapse_huge_page will return with the mmap_lock released */
> *mmap_locked = false;
> --
> 2.34.1
>
^ permalink raw reply [flat|nested] 5+ messages in thread
* Re: [Patch v2] mm/khugepaged: use start_addr/addr for better reading
2025-09-22 14:09 [Patch v2] mm/khugepaged: use start_addr/addr for better reading Wei Yang
` (2 preceding siblings ...)
2025-09-23 16:22 ` Nico Pache
@ 2025-09-23 16:36 ` David Hildenbrand
3 siblings, 0 replies; 5+ messages in thread
From: David Hildenbrand @ 2025-09-23 16:36 UTC (permalink / raw)
To: Wei Yang, akpm, lorenzo.stoakes, ziy, baolin.wang, Liam.Howlett,
npache, ryan.roberts, dev.jain, baohua, lance.yang
Cc: linux-mm
On 22.09.25 16:09, Wei Yang wrote:
> When collapse a pmd, there are two address in use:
>
> * address points to the start of pmd
> * address points to each individual page
>
> Current naming is not easy to distinguish these two and error prone.
>
> Considering the plan to collapse mTHP, name the first one to start_addr
> and second one to addr for better reading and consistency.
>
> Signed-off-by: Wei Yang <richard.weiyang@gmail.com>
> Suggested-by: David Hildenbrand <david@redhat.com>
>
> ---
Acked-by: David Hildenbrand <david@redhat.com>
--
Cheers
David / dhildenb
^ permalink raw reply [flat|nested] 5+ messages in thread
end of thread, other threads:[~2025-09-23 16:36 UTC | newest]
Thread overview: 5+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2025-09-22 14:09 [Patch v2] mm/khugepaged: use start_addr/addr for better reading Wei Yang
2025-09-23 6:59 ` Dev Jain
2025-09-23 16:11 ` Zi Yan
2025-09-23 16:22 ` Nico Pache
2025-09-23 16:36 ` David Hildenbrand
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox