From: Nico Pache <npache@redhat.com>
To: Wei Yang <richard.weiyang@gmail.com>
Cc: akpm@linux-foundation.org, david@redhat.com,
lorenzo.stoakes@oracle.com, ziy@nvidia.com,
baolin.wang@linux.alibaba.com, Liam.Howlett@oracle.com,
ryan.roberts@arm.com, dev.jain@arm.com, baohua@kernel.org,
lance.yang@linux.dev, linux-mm@kvack.org
Subject: Re: [Patch v2] mm/khugepaged: use start_addr/addr for better reading
Date: Tue, 23 Sep 2025 10:22:57 -0600 [thread overview]
Message-ID: <CAA1CXcCjDZ9Q6d9X4-dXsUfLPHMNYehZUTARzMNmZJvaSP_t3A@mail.gmail.com> (raw)
In-Reply-To: <20250922140938.27343-1-richard.weiyang@gmail.com>
On Mon, Sep 22, 2025 at 8:09 AM Wei Yang <richard.weiyang@gmail.com> wrote:
>
> When collapse a pmd, there are two address in use:
>
> * address points to the start of pmd
> * address points to each individual page
>
> Current naming is not easy to distinguish these two and error prone.
>
> Considering the plan to collapse mTHP, name the first one to start_addr
> and second one to addr for better reading and consistency.
>
> Signed-off-by: Wei Yang <richard.weiyang@gmail.com>
> Suggested-by: David Hildenbrand <david@redhat.com>
>
LGTM!
Reviewed-by: Nico Pache <npache@redhat.com>
> ---
> v2: use start_addr/addr instead
> ---
> mm/khugepaged.c | 43 ++++++++++++++++++++++---------------------
> 1 file changed, 22 insertions(+), 21 deletions(-)
>
> diff --git a/mm/khugepaged.c b/mm/khugepaged.c
> index ec3f91a345a0..e3f7d1760567 100644
> --- a/mm/khugepaged.c
> +++ b/mm/khugepaged.c
> @@ -537,18 +537,19 @@ static void release_pte_pages(pte_t *pte, pte_t *_pte,
> }
>
> static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
> - unsigned long address,
> + unsigned long start_addr,
> pte_t *pte,
> struct collapse_control *cc,
> struct list_head *compound_pagelist)
> {
> struct page *page = NULL;
> struct folio *folio = NULL;
> + unsigned long addr = start_addr;
> pte_t *_pte;
> int none_or_zero = 0, shared = 0, result = SCAN_FAIL, referenced = 0;
>
> for (_pte = pte; _pte < pte + HPAGE_PMD_NR;
> - _pte++, address += PAGE_SIZE) {
> + _pte++, addr += PAGE_SIZE) {
> pte_t pteval = ptep_get(_pte);
> if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) {
> ++none_or_zero;
> @@ -570,7 +571,7 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
> result = SCAN_PTE_UFFD_WP;
> goto out;
> }
> - page = vm_normal_page(vma, address, pteval);
> + page = vm_normal_page(vma, addr, pteval);
> if (unlikely(!page) || unlikely(is_zone_device_page(page))) {
> result = SCAN_PAGE_NULL;
> goto out;
> @@ -655,8 +656,8 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
> */
> if (cc->is_khugepaged &&
> (pte_young(pteval) || folio_test_young(folio) ||
> - folio_test_referenced(folio) || mmu_notifier_test_young(vma->vm_mm,
> - address)))
> + folio_test_referenced(folio) ||
> + mmu_notifier_test_young(vma->vm_mm, addr)))
> referenced++;
> }
>
> @@ -985,21 +986,21 @@ static int check_pmd_still_valid(struct mm_struct *mm,
> */
> static int __collapse_huge_page_swapin(struct mm_struct *mm,
> struct vm_area_struct *vma,
> - unsigned long haddr, pmd_t *pmd,
> + unsigned long start_addr, pmd_t *pmd,
> int referenced)
> {
> int swapped_in = 0;
> vm_fault_t ret = 0;
> - unsigned long address, end = haddr + (HPAGE_PMD_NR * PAGE_SIZE);
> + unsigned long addr, end = start_addr + (HPAGE_PMD_NR * PAGE_SIZE);
> int result;
> pte_t *pte = NULL;
> spinlock_t *ptl;
>
> - for (address = haddr; address < end; address += PAGE_SIZE) {
> + for (addr = start_addr; addr < end; addr += PAGE_SIZE) {
> struct vm_fault vmf = {
> .vma = vma,
> - .address = address,
> - .pgoff = linear_page_index(vma, address),
> + .address = addr,
> + .pgoff = linear_page_index(vma, addr),
> .flags = FAULT_FLAG_ALLOW_RETRY,
> .pmd = pmd,
> };
> @@ -1009,7 +1010,7 @@ static int __collapse_huge_page_swapin(struct mm_struct *mm,
> * Here the ptl is only used to check pte_same() in
> * do_swap_page(), so readonly version is enough.
> */
> - pte = pte_offset_map_ro_nolock(mm, pmd, address, &ptl);
> + pte = pte_offset_map_ro_nolock(mm, pmd, addr, &ptl);
> if (!pte) {
> mmap_read_unlock(mm);
> result = SCAN_PMD_NULL;
> @@ -1252,7 +1253,7 @@ static int collapse_huge_page(struct mm_struct *mm, unsigned long address,
>
> static int hpage_collapse_scan_pmd(struct mm_struct *mm,
> struct vm_area_struct *vma,
> - unsigned long address, bool *mmap_locked,
> + unsigned long start_addr, bool *mmap_locked,
> struct collapse_control *cc)
> {
> pmd_t *pmd;
> @@ -1261,26 +1262,26 @@ static int hpage_collapse_scan_pmd(struct mm_struct *mm,
> int none_or_zero = 0, shared = 0;
> struct page *page = NULL;
> struct folio *folio = NULL;
> - unsigned long _address;
> + unsigned long addr;
> spinlock_t *ptl;
> int node = NUMA_NO_NODE, unmapped = 0;
>
> - VM_BUG_ON(address & ~HPAGE_PMD_MASK);
> + VM_BUG_ON(start_addr & ~HPAGE_PMD_MASK);
>
> - result = find_pmd_or_thp_or_none(mm, address, &pmd);
> + result = find_pmd_or_thp_or_none(mm, start_addr, &pmd);
> if (result != SCAN_SUCCEED)
> goto out;
>
> memset(cc->node_load, 0, sizeof(cc->node_load));
> nodes_clear(cc->alloc_nmask);
> - pte = pte_offset_map_lock(mm, pmd, address, &ptl);
> + pte = pte_offset_map_lock(mm, pmd, start_addr, &ptl);
> if (!pte) {
> result = SCAN_PMD_NULL;
> goto out;
> }
>
> - for (_address = address, _pte = pte; _pte < pte + HPAGE_PMD_NR;
> - _pte++, _address += PAGE_SIZE) {
> + for (addr = start_addr, _pte = pte; _pte < pte + HPAGE_PMD_NR;
> + _pte++, addr += PAGE_SIZE) {
> pte_t pteval = ptep_get(_pte);
> if (is_swap_pte(pteval)) {
> ++unmapped;
> @@ -1328,7 +1329,7 @@ static int hpage_collapse_scan_pmd(struct mm_struct *mm,
> goto out_unmap;
> }
>
> - page = vm_normal_page(vma, _address, pteval);
> + page = vm_normal_page(vma, addr, pteval);
> if (unlikely(!page) || unlikely(is_zone_device_page(page))) {
> result = SCAN_PAGE_NULL;
> goto out_unmap;
> @@ -1397,7 +1398,7 @@ static int hpage_collapse_scan_pmd(struct mm_struct *mm,
> if (cc->is_khugepaged &&
> (pte_young(pteval) || folio_test_young(folio) ||
> folio_test_referenced(folio) ||
> - mmu_notifier_test_young(vma->vm_mm, _address)))
> + mmu_notifier_test_young(vma->vm_mm, addr)))
> referenced++;
> }
> if (cc->is_khugepaged &&
> @@ -1410,7 +1411,7 @@ static int hpage_collapse_scan_pmd(struct mm_struct *mm,
> out_unmap:
> pte_unmap_unlock(pte, ptl);
> if (result == SCAN_SUCCEED) {
> - result = collapse_huge_page(mm, address, referenced,
> + result = collapse_huge_page(mm, start_addr, referenced,
> unmapped, cc);
> /* collapse_huge_page will return with the mmap_lock released */
> *mmap_locked = false;
> --
> 2.34.1
>
next prev parent reply other threads:[~2025-09-23 16:23 UTC|newest]
Thread overview: 5+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-09-22 14:09 Wei Yang
2025-09-23 6:59 ` Dev Jain
2025-09-23 16:11 ` Zi Yan
2025-09-23 16:22 ` Nico Pache [this message]
2025-09-23 16:36 ` David Hildenbrand
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=CAA1CXcCjDZ9Q6d9X4-dXsUfLPHMNYehZUTARzMNmZJvaSP_t3A@mail.gmail.com \
--to=npache@redhat.com \
--cc=Liam.Howlett@oracle.com \
--cc=akpm@linux-foundation.org \
--cc=baohua@kernel.org \
--cc=baolin.wang@linux.alibaba.com \
--cc=david@redhat.com \
--cc=dev.jain@arm.com \
--cc=lance.yang@linux.dev \
--cc=linux-mm@kvack.org \
--cc=lorenzo.stoakes@oracle.com \
--cc=richard.weiyang@gmail.com \
--cc=ryan.roberts@arm.com \
--cc=ziy@nvidia.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox