linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
From: peng li <peng8420.li@gmail.com>
To: david@redhat.com
Cc: Andrew Morton <akpm@linux-foundation.org>,
	Lorenzo Stoakes <lorenzo.stoakes@oracle.com>,
	"Liam R . Howlett" <Liam.Howlett@oracle.com>,
	Vlastimil Babka <vbabka@suse.cz>, Mike Rapoport <rppt@kernel.org>,
	Suren Baghdasaryan <surenb@google.com>,
	Michal Hocko <mhocko@suse.com>,
	linux-mm@kvack.org, linux-kernel@vger.kernel.org,
	peng li <peng8420.li@gmail.com>
Subject: [PATCH] mm/pagewalk: Clean up folio_walk_start() by removing expose_page
Date: Thu, 12 Jun 2025 20:09:03 +0800	[thread overview]
Message-ID: <20250612120903.2678910-1-peng8420.li@gmail.com> (raw)

The name expose_page is confusing to understand. From the code logic, it is
probably meant to express "belongs to the normal page and get offset from
the mapped page"?
Perhaps changing "expose_page" to "normal_mapped_page" can better express
its behavior? But perhaps its existence is meaningless, because fw->page
can be directly confirmed and obtained from the page type without the need
for a separate flag.

Key changes:
1. Remove expose_page and its conditional logic
2. Always set fw->page when a valid page is found
3. Add clarifying comments about offset calculation
4. Initialize fw->page to NULL at PMD/PTE levels

Signed-off-by: peng li <peng8420.li@gmail.com>
---
 mm/pagewalk.c | 22 +++++++++++-----------
 1 file changed, 11 insertions(+), 11 deletions(-)

diff --git a/mm/pagewalk.c b/mm/pagewalk.c
index e478777c86e1..15150c27b9cf 100644
--- a/mm/pagewalk.c
+++ b/mm/pagewalk.c
@@ -831,7 +831,6 @@ struct folio *folio_walk_start(struct folio_walk *fw,
 		folio_walk_flags_t flags)
 {
 	unsigned long entry_size;
-	bool expose_page = true;
 	struct page *page;
 	pud_t *pudp, pud;
 	pmd_t *pmdp, pmd;
@@ -884,6 +883,9 @@ struct folio *folio_walk_start(struct folio_walk *fw,
 		 * support PUD mappings in VM_PFNMAP|VM_MIXEDMAP VMAs.
 		 */
 		page = pud_page(pud);
+
+		/* Note: Offset from the mapped page, not the folio start. */
+		fw->page = nth_page(page, (addr & (entry_size - 1)) >> PAGE_SHIFT);
 		goto found;
 	}
 
@@ -902,6 +904,7 @@ struct folio *folio_walk_start(struct folio_walk *fw,
 		fw->level = FW_LEVEL_PMD;
 		fw->pmdp = pmdp;
 		fw->pmd = pmd;
+		fw->page = NULL;
 
 		if (pmd_none(pmd)) {
 			spin_unlock(ptl);
@@ -912,11 +915,12 @@ struct folio *folio_walk_start(struct folio_walk *fw,
 		} else if (pmd_present(pmd)) {
 			page = vm_normal_page_pmd(vma, addr, pmd);
 			if (page) {
+				/* Note: Offset from the mapped page, not the folio start. */
+				fw->page = nth_page(page, (addr & (entry_size - 1)) >> PAGE_SHIFT);
 				goto found;
 			} else if ((flags & FW_ZEROPAGE) &&
 				    is_huge_zero_pmd(pmd)) {
 				page = pfn_to_page(pmd_pfn(pmd));
-				expose_page = false;
 				goto found;
 			}
 		} else if ((flags & FW_MIGRATION) &&
@@ -924,7 +928,6 @@ struct folio *folio_walk_start(struct folio_walk *fw,
 			swp_entry_t entry = pmd_to_swp_entry(pmd);
 
 			page = pfn_swap_entry_to_page(entry);
-			expose_page = false;
 			goto found;
 		}
 		spin_unlock(ptl);
@@ -942,15 +945,18 @@ struct folio *folio_walk_start(struct folio_walk *fw,
 	fw->level = FW_LEVEL_PTE;
 	fw->ptep = ptep;
 	fw->pte = pte;
+	fw->page = NULL;
 
 	if (pte_present(pte)) {
 		page = vm_normal_page(vma, addr, pte);
-		if (page)
+		if (page) {
+			/* Note: Offset from the mapped page, not the folio start. */
+			fw->page = nth_page(page, (addr & (entry_size - 1)) >> PAGE_SHIFT);
 			goto found;
+		}
 		if ((flags & FW_ZEROPAGE) &&
 		    is_zero_pfn(pte_pfn(pte))) {
 			page = pfn_to_page(pte_pfn(pte));
-			expose_page = false;
 			goto found;
 		}
 	} else if (!pte_none(pte)) {
@@ -959,7 +965,6 @@ struct folio *folio_walk_start(struct folio_walk *fw,
 		if ((flags & FW_MIGRATION) &&
 		    is_migration_entry(entry)) {
 			page = pfn_swap_entry_to_page(entry);
-			expose_page = false;
 			goto found;
 		}
 	}
@@ -968,11 +973,6 @@ struct folio *folio_walk_start(struct folio_walk *fw,
 	vma_pgtable_walk_end(vma);
 	return NULL;
 found:
-	if (expose_page)
-		/* Note: Offset from the mapped page, not the folio start. */
-		fw->page = nth_page(page, (addr & (entry_size - 1)) >> PAGE_SHIFT);
-	else
-		fw->page = NULL;
 	fw->ptl = ptl;
 	return page_folio(page);
 }

base-commit: 19272b37aa4f83ca52bdf9c16d5d81bdd1354494
-- 
2.25.1



             reply	other threads:[~2025-06-12 12:09 UTC|newest]

Thread overview: 3+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2025-06-12 12:09 peng li [this message]
2025-06-12 12:36 ` David Hildenbrand
2025-06-12 12:38 ` Lorenzo Stoakes

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20250612120903.2678910-1-peng8420.li@gmail.com \
    --to=peng8420.li@gmail.com \
    --cc=Liam.Howlett@oracle.com \
    --cc=akpm@linux-foundation.org \
    --cc=david@redhat.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=lorenzo.stoakes@oracle.com \
    --cc=mhocko@suse.com \
    --cc=rppt@kernel.org \
    --cc=surenb@google.com \
    --cc=vbabka@suse.cz \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox