linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
* [PATCH] mm: use standard page table accessors
@ 2025-11-26  6:47 Wei Yang
  2025-11-26  8:15 ` Christophe Leroy (CS GROUP)
  2025-11-26  9:09 ` David Hildenbrand (Red Hat)
  0 siblings, 2 replies; 9+ messages in thread
From: Wei Yang @ 2025-11-26  6:47 UTC (permalink / raw)
  To: akpm, david, lorenzo.stoakes, Liam.Howlett, vbabka, rppt, surenb,
	mhocko, ziy, baolin.wang, npache, ryan.roberts, dev.jain, baohua,
	lance.yang
  Cc: linux-mm, Wei Yang

Use standard page table accessors i.e pxdp_get() to get the value of
pxdp.

Signed-off-by: Wei Yang <richard.weiyang@gmail.com>
---
 include/linux/pgtable.h | 2 +-
 mm/huge_memory.c        | 2 +-
 mm/memory.c             | 8 ++++----
 3 files changed, 6 insertions(+), 6 deletions(-)

diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h
index b13b6f42be3c..a9efd58658bc 100644
--- a/include/linux/pgtable.h
+++ b/include/linux/pgtable.h
@@ -1810,7 +1810,7 @@ static inline int pud_trans_unstable(pud_t *pud)
 {
 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && \
 	defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)
-	pud_t pudval = READ_ONCE(*pud);
+	pud_t pudval = pudp_get(pud);
 
 	if (pud_none(pudval) || pud_trans_huge(pudval))
 		return 1;
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 0d2ac331ccad..dd3577e40d16 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1486,7 +1486,7 @@ vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf)
 		}
 		vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
 		ret = 0;
-		if (pmd_none(*vmf->pmd)) {
+		if (pmd_none(pmdp_get(vmf->pmd))) {
 			ret = check_stable_address_space(vma->vm_mm);
 			if (ret) {
 				spin_unlock(vmf->ptl);
diff --git a/mm/memory.c b/mm/memory.c
index 8933069948e5..39839bf0c3f5 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -6193,7 +6193,7 @@ static vm_fault_t handle_pte_fault(struct vm_fault *vmf)
 {
 	pte_t entry;
 
-	if (unlikely(pmd_none(*vmf->pmd))) {
+	if (unlikely(pmd_none(pmdp_get(vmf->pmd)))) {
 		/*
 		 * Leave __pte_alloc() until later: because vm_ops->fault may
 		 * want to allocate huge page, and if we expose page table
@@ -6309,13 +6309,13 @@ static vm_fault_t __handle_mm_fault(struct vm_area_struct *vma,
 	if (!vmf.pud)
 		return VM_FAULT_OOM;
 retry_pud:
-	if (pud_none(*vmf.pud) &&
+	if (pud_none(pudp_get(vmf.pud)) &&
 	    thp_vma_allowable_order(vma, vm_flags, TVA_PAGEFAULT, PUD_ORDER)) {
 		ret = create_huge_pud(&vmf);
 		if (!(ret & VM_FAULT_FALLBACK))
 			return ret;
 	} else {
-		pud_t orig_pud = *vmf.pud;
+		pud_t orig_pud = pudp_get(vmf.pud);
 
 		barrier();
 		if (pud_trans_huge(orig_pud)) {
@@ -6343,7 +6343,7 @@ static vm_fault_t __handle_mm_fault(struct vm_area_struct *vma,
 	if (pud_trans_unstable(vmf.pud))
 		goto retry_pud;
 
-	if (pmd_none(*vmf.pmd) &&
+	if (pmd_none(pmdp_get(vmf.pmd)) &&
 	    thp_vma_allowable_order(vma, vm_flags, TVA_PAGEFAULT, PMD_ORDER)) {
 		ret = create_huge_pmd(&vmf);
 		if (ret & VM_FAULT_FALLBACK)
-- 
2.34.1



^ permalink raw reply	[flat|nested] 9+ messages in thread

end of thread, other threads:[~2025-11-26 12:15 UTC | newest]

Thread overview: 9+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2025-11-26  6:47 [PATCH] mm: use standard page table accessors Wei Yang
2025-11-26  8:15 ` Christophe Leroy (CS GROUP)
2025-11-26 10:03   ` Wei Yang
2025-11-26 10:28     ` Christophe Leroy (CS GROUP)
2025-11-26 12:15       ` Wei Yang
2025-11-26  9:09 ` David Hildenbrand (Red Hat)
2025-11-26  9:42   ` Wei Yang
2025-11-26 10:19   ` Ryan Roberts
2025-11-26 11:32     ` Ryan Roberts

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox