linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
From: Alexander Gordeev <agordeev@linux.ibm.com>
To: Kevin Brodsky <kevin.brodsky@arm.com>,
	David Hildenbrand <david@redhat.com>,
	Andrew Morton <akpm@linux-foundation.org>,
	Gerald Schaefer <gerald.schaefer@linux.ibm.com>,
	Heiko Carstens <hca@linux.ibm.com>,
	Christian Borntraeger <borntraeger@linux.ibm.com>,
	Vasily Gorbik <gor@linux.ibm.com>,
	Claudio Imbrenda <imbrenda@linux.ibm.com>
Cc: linux-s390@vger.kernel.org, linux-mm@kvack.org,
	linux-kernel@vger.kernel.org
Subject: [PATCH v2 3/6] s390/mm: Complete ptep_get() conversion
Date: Wed, 15 Apr 2026 17:01:21 +0200	[thread overview]
Message-ID: <7ad766612a3095c8c8d9a253ef0f484ef98196a5.1776264097.git.agordeev@linux.ibm.com> (raw)
In-Reply-To: <cover.1776264097.git.agordeev@linux.ibm.com>

Finalize commit c33c794828f2 ("mm: ptep_get() conversion") and
replace direct page table entry dereferencing with the proper
accessors (ptep_get(), pmdp_get(), etc.).

Override the default getter implementations even though they are
currently identical: pud_clear(), p4d_clear(), and pgd_clear()
require corresponding architecture-specific getters, but these
are not yet defined. This avoids a dependency loop.

Signed-off-by: Alexander Gordeev <agordeev@linux.ibm.com>
---
 arch/s390/boot/vmem.c           | 32 +++++++------
 arch/s390/include/asm/hugetlb.h |  2 +-
 arch/s390/include/asm/pgtable.h | 60 ++++++++++++++++++------
 arch/s390/mm/hugetlbpage.c      | 12 ++---
 arch/s390/mm/pageattr.c         | 42 +++++++++--------
 arch/s390/mm/vmem.c             | 82 ++++++++++++++++++---------------
 6 files changed, 138 insertions(+), 92 deletions(-)

diff --git a/arch/s390/boot/vmem.c b/arch/s390/boot/vmem.c
index 7d6cc4c85af0..ff6d58a476ba 100644
--- a/arch/s390/boot/vmem.c
+++ b/arch/s390/boot/vmem.c
@@ -338,7 +338,7 @@ static void pgtable_pte_populate(pmd_t *pmd, unsigned long addr, unsigned long e
 
 	pte = pte_offset_kernel(pmd, addr);
 	for (; addr < end; addr += PAGE_SIZE, pte++) {
-		if (pte_none(*pte)) {
+		if (pte_none(ptep_get(pte))) {
 			if (kasan_pte_populate_zero_shadow(pte, mode))
 				continue;
 			entry = __pte(resolve_pa_may_alloc(addr, PAGE_SIZE, mode));
@@ -355,26 +355,27 @@ static void pgtable_pmd_populate(pud_t *pud, unsigned long addr, unsigned long e
 				 enum populate_mode mode)
 {
 	unsigned long pa, next, pages = 0;
-	pmd_t *pmd, entry;
+	pmd_t *pmd, entry, large_entry;
 	pte_t *pte;
 
 	pmd = pmd_offset(pud, addr);
 	for (; addr < end; addr = next, pmd++) {
 		next = pmd_addr_end(addr, end);
-		if (pmd_none(*pmd)) {
+		entry = pmdp_get(pmd);
+		if (pmd_none(entry)) {
 			if (kasan_pmd_populate_zero_shadow(pmd, addr, next, mode))
 				continue;
 			pa = try_get_large_pmd_pa(pmd, addr, next, mode);
 			if (pa != INVALID_PHYS_ADDR) {
-				entry = __pmd(pa);
-				entry = set_pmd_bit(entry, SEGMENT_KERNEL);
-				set_pmd(pmd, entry);
+				large_entry = __pmd(pa);
+				large_entry = set_pmd_bit(large_entry, SEGMENT_KERNEL);
+				set_pmd(pmd, large_entry);
 				pages++;
 				continue;
 			}
 			pte = boot_pte_alloc();
 			pmd_populate(&init_mm, pmd, pte);
-		} else if (pmd_leaf(*pmd)) {
+		} else if (pmd_leaf(entry)) {
 			continue;
 		}
 		pgtable_pte_populate(pmd, addr, next, mode);
@@ -387,26 +388,27 @@ static void pgtable_pud_populate(p4d_t *p4d, unsigned long addr, unsigned long e
 				 enum populate_mode mode)
 {
 	unsigned long pa, next, pages = 0;
-	pud_t *pud, entry;
+	pud_t *pud, entry, large_entry;
 	pmd_t *pmd;
 
 	pud = pud_offset(p4d, addr);
 	for (; addr < end; addr = next, pud++) {
 		next = pud_addr_end(addr, end);
-		if (pud_none(*pud)) {
+		entry = pudp_get(pud);
+		if (pud_none(entry)) {
 			if (kasan_pud_populate_zero_shadow(pud, addr, next, mode))
 				continue;
 			pa = try_get_large_pud_pa(pud, addr, next, mode);
 			if (pa != INVALID_PHYS_ADDR) {
-				entry = __pud(pa);
-				entry = set_pud_bit(entry, REGION3_KERNEL);
-				set_pud(pud, entry);
+				large_entry = __pud(pa);
+				large_entry = set_pud_bit(large_entry, REGION3_KERNEL);
+				set_pud(pud, large_entry);
 				pages++;
 				continue;
 			}
 			pmd = boot_crst_alloc(_SEGMENT_ENTRY_EMPTY);
 			pud_populate(&init_mm, pud, pmd);
-		} else if (pud_leaf(*pud)) {
+		} else if (pud_leaf(entry)) {
 			continue;
 		}
 		pgtable_pmd_populate(pud, addr, next, mode);
@@ -425,7 +427,7 @@ static void pgtable_p4d_populate(pgd_t *pgd, unsigned long addr, unsigned long e
 	p4d = p4d_offset(pgd, addr);
 	for (; addr < end; addr = next, p4d++) {
 		next = p4d_addr_end(addr, end);
-		if (p4d_none(*p4d)) {
+		if (p4d_none(p4dp_get(p4d))) {
 			if (kasan_p4d_populate_zero_shadow(p4d, addr, next, mode))
 				continue;
 			pud = boot_crst_alloc(_REGION3_ENTRY_EMPTY);
@@ -451,7 +453,7 @@ static void pgtable_populate(unsigned long addr, unsigned long end, enum populat
 	pgd = pgd_offset(&init_mm, addr);
 	for (; addr < end; addr = next, pgd++) {
 		next = pgd_addr_end(addr, end);
-		if (pgd_none(*pgd)) {
+		if (pgd_none(pgdp_get(pgd))) {
 			if (kasan_pgd_populate_zero_shadow(pgd, addr, next, mode))
 				continue;
 			p4d = boot_crst_alloc(_REGION2_ENTRY_EMPTY);
diff --git a/arch/s390/include/asm/hugetlb.h b/arch/s390/include/asm/hugetlb.h
index 6983e52eaf81..e33a5b587ee4 100644
--- a/arch/s390/include/asm/hugetlb.h
+++ b/arch/s390/include/asm/hugetlb.h
@@ -41,7 +41,7 @@ static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
 static inline void huge_pte_clear(struct mm_struct *mm, unsigned long addr,
 				  pte_t *ptep, unsigned long sz)
 {
-	if ((pte_val(*ptep) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
+	if ((pte_val(ptep_get(ptep)) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
 		set_pte(ptep, __pte(_REGION3_ENTRY_EMPTY));
 	else
 		set_pte(ptep, __pte(_SEGMENT_ENTRY_EMPTY));
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index 67f5df20a57e..42688ea4337f 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -983,22 +983,39 @@ static inline void set_pte(pte_t *ptep, pte_t pte)
 	WRITE_ONCE(*ptep, pte);
 }
 
-static inline void pgd_clear(pgd_t *pgd)
+#define ptep_get ptep_get
+static inline pte_t ptep_get(pte_t *ptep)
 {
-	if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R1)
-		set_pgd(pgd, __pgd(_REGION1_ENTRY_EMPTY));
+	return READ_ONCE(*ptep);
 }
 
-static inline void p4d_clear(p4d_t *p4d)
+#define pmdp_get pmdp_get
+static inline pmd_t pmdp_get(pmd_t *pmdp)
 {
-	if ((p4d_val(*p4d) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2)
-		set_p4d(p4d, __p4d(_REGION2_ENTRY_EMPTY));
+	return READ_ONCE(*pmdp);
 }
 
-static inline void pud_clear(pud_t *pud)
+#define pudp_get pudp_get
+static inline pud_t pudp_get(pud_t *pudp)
 {
-	if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
-		set_pud(pud, __pud(_REGION3_ENTRY_EMPTY));
+	return READ_ONCE(*pudp);
+}
+
+#define p4dp_get p4dp_get
+static inline p4d_t p4dp_get(p4d_t *p4dp)
+{
+	return READ_ONCE(*p4dp);
+}
+
+#define pgdp_get pgdp_get
+static inline pgd_t pgdp_get(pgd_t *pgdp)
+{
+	return READ_ONCE(*pgdp);
+}
+
+static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
+{
+	set_pte(ptep, __pte(_PAGE_INVALID));
 }
 
 static inline void pmd_clear(pmd_t *pmdp)
@@ -1006,9 +1023,22 @@ static inline void pmd_clear(pmd_t *pmdp)
 	set_pmd(pmdp, __pmd(_SEGMENT_ENTRY_EMPTY));
 }
 
-static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
+static inline void pud_clear(pud_t *pud)
 {
-	set_pte(ptep, __pte(_PAGE_INVALID));
+	if ((pud_val(pudp_get(pud)) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
+		set_pud(pud, __pud(_REGION3_ENTRY_EMPTY));
+}
+
+static inline void p4d_clear(p4d_t *p4d)
+{
+	if ((p4d_val(p4dp_get(p4d)) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2)
+		set_p4d(p4d, __p4d(_REGION2_ENTRY_EMPTY));
+}
+
+static inline void pgd_clear(pgd_t *pgd)
+{
+	if ((pgd_val(pgdp_get(pgd)) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R1)
+		set_pgd(pgd, __pgd(_REGION1_ENTRY_EMPTY));
 }
 
 /*
@@ -1169,7 +1199,7 @@ pte_t ptep_xchg_lazy(struct mm_struct *, unsigned long, pte_t *, pte_t);
 static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
 					    unsigned long addr, pte_t *ptep)
 {
-	pte_t pte = *ptep;
+	pte_t pte = ptep_get(ptep);
 
 	pte = ptep_xchg_direct(vma->vm_mm, addr, ptep, pte_mkold(pte));
 	return pte_young(pte);
@@ -1230,7 +1260,7 @@ static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
 	pte_t res;
 
 	if (full) {
-		res = *ptep;
+		res = ptep_get(ptep);
 		set_pte(ptep, __pte(_PAGE_INVALID));
 	} else {
 		res = ptep_xchg_lazy(mm, addr, ptep, __pte(_PAGE_INVALID));
@@ -1262,7 +1292,7 @@ static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
 static inline void ptep_set_wrprotect(struct mm_struct *mm,
 				      unsigned long addr, pte_t *ptep)
 {
-	pte_t pte = *ptep;
+	pte_t pte = ptep_get(ptep);
 
 	if (pte_write(pte))
 		ptep_xchg_lazy(mm, addr, ptep, pte_wrprotect(pte));
@@ -1298,7 +1328,7 @@ static inline void flush_tlb_fix_spurious_fault(struct vm_area_struct *vma,
 	 * PTE does not have _PAGE_PROTECT set, to avoid unnecessary overhead.
 	 * A local RDP can be used to do the flush.
 	 */
-	if (cpu_has_rdp() && !(pte_val(*ptep) & _PAGE_PROTECT))
+	if (cpu_has_rdp() && !(pte_val(ptep_get(ptep)) & _PAGE_PROTECT))
 		__ptep_rdp(address, ptep, 1);
 }
 #define flush_tlb_fix_spurious_fault flush_tlb_fix_spurious_fault
diff --git a/arch/s390/mm/hugetlbpage.c b/arch/s390/mm/hugetlbpage.c
index 302ef5781b65..db35d8fe8609 100644
--- a/arch/s390/mm/hugetlbpage.c
+++ b/arch/s390/mm/hugetlbpage.c
@@ -143,7 +143,7 @@ void __set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
 	rste = __pte_to_rste(pte);
 
 	/* Set correct table type for 2G hugepages */
-	if ((pte_val(*ptep) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3) {
+	if ((pte_val(ptep_get(ptep)) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3) {
 		if (likely(pte_present(pte)))
 			rste |= _REGION3_ENTRY_LARGE;
 		rste |= _REGION_ENTRY_TYPE_R3;
@@ -161,7 +161,7 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
 
 pte_t huge_ptep_get(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
 {
-	return __rste_to_pte(pte_val(*ptep));
+	return __rste_to_pte(pte_val(ptep_get(ptep)));
 }
 
 pte_t __huge_ptep_get_and_clear(struct mm_struct *mm,
@@ -171,7 +171,7 @@ pte_t __huge_ptep_get_and_clear(struct mm_struct *mm,
 	pmd_t *pmdp = (pmd_t *) ptep;
 	pud_t *pudp = (pud_t *) ptep;
 
-	if ((pte_val(*ptep) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
+	if ((pte_val(ptep_get(ptep)) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
 		pudp_xchg_direct(mm, addr, pudp, __pud(_REGION3_ENTRY_EMPTY));
 	else
 		pmdp_xchg_direct(mm, addr, pmdp, __pmd(_SEGMENT_ENTRY_EMPTY));
@@ -209,13 +209,13 @@ pte_t *huge_pte_offset(struct mm_struct *mm,
 	pmd_t *pmdp = NULL;
 
 	pgdp = pgd_offset(mm, addr);
-	if (pgd_present(*pgdp)) {
+	if (pgd_present(pgdp_get(pgdp))) {
 		p4dp = p4d_offset(pgdp, addr);
-		if (p4d_present(*p4dp)) {
+		if (p4d_present(p4dp_get(p4dp))) {
 			pudp = pud_offset(p4dp, addr);
 			if (sz == PUD_SIZE)
 				return (pte_t *)pudp;
-			if (pud_present(*pudp))
+			if (pud_present(pudp_get(pudp)))
 				pmdp = pmd_offset(pudp, addr);
 		}
 	}
diff --git a/arch/s390/mm/pageattr.c b/arch/s390/mm/pageattr.c
index bb29c38ae624..3a54860cb05f 100644
--- a/arch/s390/mm/pageattr.c
+++ b/arch/s390/mm/pageattr.c
@@ -85,7 +85,7 @@ static int walk_pte_level(pmd_t *pmdp, unsigned long addr, unsigned long end,
 		return 0;
 	ptep = pte_offset_kernel(pmdp, addr);
 	do {
-		new = *ptep;
+		new = ptep_get(ptep);
 		if (pte_none(new))
 			return -EINVAL;
 		if (flags & SET_MEMORY_RO)
@@ -114,15 +114,16 @@ static int split_pmd_page(pmd_t *pmdp, unsigned long addr)
 {
 	unsigned long pte_addr, prot;
 	pte_t *pt_dir, *ptep;
-	pmd_t new;
+	pmd_t new, pmd;
 	int i, ro, nx;
 
 	pt_dir = vmem_pte_alloc();
 	if (!pt_dir)
 		return -ENOMEM;
-	pte_addr = pmd_pfn(*pmdp) << PAGE_SHIFT;
-	ro = !!(pmd_val(*pmdp) & _SEGMENT_ENTRY_PROTECT);
-	nx = !!(pmd_val(*pmdp) & _SEGMENT_ENTRY_NOEXEC);
+	pmd = pmdp_get(pmdp);
+	pte_addr = pmd_pfn(pmd) << PAGE_SHIFT;
+	ro = !!(pmd_val(pmd) & _SEGMENT_ENTRY_PROTECT);
+	nx = !!(pmd_val(pmd) & _SEGMENT_ENTRY_NOEXEC);
 	prot = pgprot_val(ro ? PAGE_KERNEL_RO : PAGE_KERNEL);
 	if (!nx)
 		prot &= ~_PAGE_NOEXEC;
@@ -142,7 +143,7 @@ static int split_pmd_page(pmd_t *pmdp, unsigned long addr)
 static void modify_pmd_page(pmd_t *pmdp, unsigned long addr,
 			    unsigned long flags)
 {
-	pmd_t new = *pmdp;
+	pmd_t new = pmdp_get(pmdp);
 
 	if (flags & SET_MEMORY_RO)
 		new = pmd_wrprotect(new);
@@ -165,16 +166,17 @@ static int walk_pmd_level(pud_t *pudp, unsigned long addr, unsigned long end,
 			  unsigned long flags)
 {
 	unsigned long next;
+	pmd_t *pmdp, pmd;
 	int need_split;
-	pmd_t *pmdp;
 	int rc = 0;
 
 	pmdp = pmd_offset(pudp, addr);
 	do {
-		if (pmd_none(*pmdp))
+		pmd = pmdp_get(pmdp);
+		if (pmd_none(pmd))
 			return -EINVAL;
 		next = pmd_addr_end(addr, end);
-		if (pmd_leaf(*pmdp)) {
+		if (pmd_leaf(pmd)) {
 			need_split  = !!(flags & SET_MEMORY_4K);
 			need_split |= !!(addr & ~PMD_MASK);
 			need_split |= !!(addr + PMD_SIZE > next);
@@ -201,15 +203,16 @@ int split_pud_page(pud_t *pudp, unsigned long addr)
 {
 	unsigned long pmd_addr, prot;
 	pmd_t *pm_dir, *pmdp;
-	pud_t new;
+	pud_t new, pud;
 	int i, ro, nx;
 
 	pm_dir = vmem_crst_alloc(_SEGMENT_ENTRY_EMPTY);
 	if (!pm_dir)
 		return -ENOMEM;
-	pmd_addr = pud_pfn(*pudp) << PAGE_SHIFT;
-	ro = !!(pud_val(*pudp) & _REGION_ENTRY_PROTECT);
-	nx = !!(pud_val(*pudp) & _REGION_ENTRY_NOEXEC);
+	pud = pudp_get(pudp);
+	pmd_addr = pud_pfn(pud) << PAGE_SHIFT;
+	ro = !!(pud_val(pud) & _REGION_ENTRY_PROTECT);
+	nx = !!(pud_val(pud) & _REGION_ENTRY_NOEXEC);
 	prot = pgprot_val(ro ? SEGMENT_KERNEL_RO : SEGMENT_KERNEL);
 	if (!nx)
 		prot &= ~_SEGMENT_ENTRY_NOEXEC;
@@ -229,7 +232,7 @@ int split_pud_page(pud_t *pudp, unsigned long addr)
 static void modify_pud_page(pud_t *pudp, unsigned long addr,
 			    unsigned long flags)
 {
-	pud_t new = *pudp;
+	pud_t new = pudp_get(pudp);
 
 	if (flags & SET_MEMORY_RO)
 		new = pud_wrprotect(new);
@@ -252,16 +255,17 @@ static int walk_pud_level(p4d_t *p4d, unsigned long addr, unsigned long end,
 			  unsigned long flags)
 {
 	unsigned long next;
+	pud_t *pudp, pud;
 	int need_split;
-	pud_t *pudp;
 	int rc = 0;
 
 	pudp = pud_offset(p4d, addr);
 	do {
-		if (pud_none(*pudp))
+		pud = pudp_get(pudp);
+		if (pud_none(pud))
 			return -EINVAL;
 		next = pud_addr_end(addr, end);
-		if (pud_leaf(*pudp)) {
+		if (pud_leaf(pud)) {
 			need_split  = !!(flags & SET_MEMORY_4K);
 			need_split |= !!(addr & ~PUD_MASK);
 			need_split |= !!(addr + PUD_SIZE > next);
@@ -291,7 +295,7 @@ static int walk_p4d_level(pgd_t *pgd, unsigned long addr, unsigned long end,
 
 	p4dp = p4d_offset(pgd, addr);
 	do {
-		if (p4d_none(*p4dp))
+		if (p4d_none(p4dp_get(p4dp)))
 			return -EINVAL;
 		next = p4d_addr_end(addr, end);
 		rc = walk_pud_level(p4dp, addr, next, flags);
@@ -313,7 +317,7 @@ static int change_page_attr(unsigned long addr, unsigned long end,
 
 	pgdp = pgd_offset_k(addr);
 	do {
-		if (pgd_none(*pgdp))
+		if (pgd_none(pgdp_get(pgdp)))
 			break;
 		next = pgd_addr_end(addr, end);
 		rc = walk_p4d_level(pgdp, addr, next, flags);
diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c
index eeadff45e0e1..803099f3db73 100644
--- a/arch/s390/mm/vmem.c
+++ b/arch/s390/mm/vmem.c
@@ -171,18 +171,19 @@ static int __ref modify_pte_table(pmd_t *pmd, unsigned long addr,
 {
 	unsigned long prot, pages = 0;
 	int ret = -ENOMEM;
-	pte_t *pte;
+	pte_t *pte, entry;
 
 	prot = pgprot_val(PAGE_KERNEL);
 	pte = pte_offset_kernel(pmd, addr);
 	for (; addr < end; addr += PAGE_SIZE, pte++) {
+		entry = ptep_get(pte);
 		if (!add) {
-			if (pte_none(*pte))
+			if (pte_none(entry))
 				continue;
 			if (!direct)
-				vmem_free_pages((unsigned long)pfn_to_virt(pte_pfn(*pte)), get_order(PAGE_SIZE), altmap);
+				vmem_free_pages((unsigned long)pfn_to_virt(pte_pfn(entry)), get_order(PAGE_SIZE), altmap);
 			pte_clear(&init_mm, addr, pte);
-		} else if (pte_none(*pte)) {
+		} else if (pte_none(entry)) {
 			if (!direct) {
 				void *new_page = vmemmap_alloc_block_buf(PAGE_SIZE, NUMA_NO_NODE, altmap);
 
@@ -212,10 +213,10 @@ static void try_free_pte_table(pmd_t *pmd, unsigned long start)
 	/* We can safely assume this is fully in 1:1 mapping & vmemmap area */
 	pte = pte_offset_kernel(pmd, start);
 	for (i = 0; i < PTRS_PER_PTE; i++, pte++) {
-		if (!pte_none(*pte))
+		if (!pte_none(ptep_get(pte)))
 			return;
 	}
-	vmem_pte_free((unsigned long *) pmd_deref(*pmd));
+	vmem_pte_free((unsigned long *)pmd_deref(pmdp_get(pmd)));
 	pmd_clear(pmd);
 }
 
@@ -226,6 +227,7 @@ static int __ref modify_pmd_table(pud_t *pud, unsigned long addr,
 {
 	unsigned long next, prot, pages = 0;
 	int ret = -ENOMEM;
+	pmd_t entry;
 	pmd_t *pmd;
 	pte_t *pte;
 
@@ -233,23 +235,24 @@ static int __ref modify_pmd_table(pud_t *pud, unsigned long addr,
 	pmd = pmd_offset(pud, addr);
 	for (; addr < end; addr = next, pmd++) {
 		next = pmd_addr_end(addr, end);
+		entry = pmdp_get(pmd);
 		if (!add) {
-			if (pmd_none(*pmd))
+			if (pmd_none(entry))
 				continue;
-			if (pmd_leaf(*pmd)) {
+			if (pmd_leaf(entry)) {
 				if (IS_ALIGNED(addr, PMD_SIZE) &&
 				    IS_ALIGNED(next, PMD_SIZE)) {
 					if (!direct)
-						vmem_free_pages(pmd_deref(*pmd), get_order(PMD_SIZE), altmap);
+						vmem_free_pages(pmd_deref(entry), get_order(PMD_SIZE), altmap);
 					pmd_clear(pmd);
 					pages++;
 				} else if (!direct && vmemmap_unuse_sub_pmd(addr, next)) {
-					vmem_free_pages(pmd_deref(*pmd), get_order(PMD_SIZE), altmap);
+					vmem_free_pages(pmd_deref(entry), get_order(PMD_SIZE), altmap);
 					pmd_clear(pmd);
 				}
 				continue;
 			}
-		} else if (pmd_none(*pmd)) {
+		} else if (pmd_none(entry)) {
 			if (IS_ALIGNED(addr, PMD_SIZE) &&
 			    IS_ALIGNED(next, PMD_SIZE) &&
 			    cpu_has_edat1() && direct &&
@@ -281,7 +284,7 @@ static int __ref modify_pmd_table(pud_t *pud, unsigned long addr,
 			if (!pte)
 				goto out;
 			pmd_populate(&init_mm, pmd, pte);
-		} else if (pmd_leaf(*pmd)) {
+		} else if (pmd_leaf(entry)) {
 			if (!direct)
 				vmemmap_use_sub_pmd(addr, next);
 			continue;
@@ -306,9 +309,9 @@ static void try_free_pmd_table(pud_t *pud, unsigned long start)
 
 	pmd = pmd_offset(pud, start);
 	for (i = 0; i < PTRS_PER_PMD; i++, pmd++)
-		if (!pmd_none(*pmd))
+		if (!pmd_none(pmdp_get(pmd)))
 			return;
-	vmem_free_pages(pud_deref(*pud), CRST_ALLOC_ORDER, NULL);
+	vmem_free_pages(pud_deref(pudp_get(pud)), CRST_ALLOC_ORDER, NULL);
 	pud_clear(pud);
 }
 
@@ -317,21 +320,22 @@ static int modify_pud_table(p4d_t *p4d, unsigned long addr, unsigned long end,
 {
 	unsigned long next, prot, pages = 0;
 	int ret = -ENOMEM;
-	pud_t *pud;
+	pud_t *pud, entry;
 	pmd_t *pmd;
 
 	prot = pgprot_val(REGION3_KERNEL);
 	pud = pud_offset(p4d, addr);
 	for (; addr < end; addr = next, pud++) {
 		next = pud_addr_end(addr, end);
+		entry = pudp_get(pud);
 		if (!add) {
-			if (pud_none(*pud))
+			if (pud_none(entry))
 				continue;
-			if (pud_leaf(*pud)) {
+			if (pud_leaf(entry)) {
 				if (IS_ALIGNED(addr, PUD_SIZE) &&
 				    IS_ALIGNED(next, PUD_SIZE)) {
 					if (!direct)
-						vmem_free_pages(pud_deref(*pud), get_order(PUD_SIZE), altmap);
+						vmem_free_pages(pud_deref(entry), get_order(PUD_SIZE), altmap);
 					pud_clear(pud);
 					pages++;
 					continue;
@@ -339,7 +343,7 @@ static int modify_pud_table(p4d_t *p4d, unsigned long addr, unsigned long end,
 					split_pud_page(pud, addr & PUD_MASK);
 				}
 			}
-		} else if (pud_none(*pud)) {
+		} else if (pud_none(entry)) {
 			if (IS_ALIGNED(addr, PUD_SIZE) &&
 			    IS_ALIGNED(next, PUD_SIZE) &&
 			    cpu_has_edat2() && direct &&
@@ -352,7 +356,7 @@ static int modify_pud_table(p4d_t *p4d, unsigned long addr, unsigned long end,
 			if (!pmd)
 				goto out;
 			pud_populate(&init_mm, pud, pmd);
-		} else if (pud_leaf(*pud)) {
+		} else if (pud_leaf(entry)) {
 			continue;
 		}
 		ret = modify_pmd_table(pud, addr, next, add, direct, altmap);
@@ -375,10 +379,10 @@ static void try_free_pud_table(p4d_t *p4d, unsigned long start)
 
 	pud = pud_offset(p4d, start);
 	for (i = 0; i < PTRS_PER_PUD; i++, pud++) {
-		if (!pud_none(*pud))
+		if (!pud_none(pudp_get(pud)))
 			return;
 	}
-	vmem_free_pages(p4d_deref(*p4d), CRST_ALLOC_ORDER, NULL);
+	vmem_free_pages(p4d_deref(p4dp_get(p4d)), CRST_ALLOC_ORDER, NULL);
 	p4d_clear(p4d);
 }
 
@@ -387,16 +391,17 @@ static int modify_p4d_table(pgd_t *pgd, unsigned long addr, unsigned long end,
 {
 	unsigned long next;
 	int ret = -ENOMEM;
-	p4d_t *p4d;
+	p4d_t *p4d, entry;
 	pud_t *pud;
 
 	p4d = p4d_offset(pgd, addr);
 	for (; addr < end; addr = next, p4d++) {
 		next = p4d_addr_end(addr, end);
+		entry = p4dp_get(p4d);
 		if (!add) {
-			if (p4d_none(*p4d))
+			if (p4d_none(entry))
 				continue;
-		} else if (p4d_none(*p4d)) {
+		} else if (p4d_none(entry)) {
 			pud = vmem_crst_alloc(_REGION3_ENTRY_EMPTY);
 			if (!pud)
 				goto out;
@@ -420,10 +425,10 @@ static void try_free_p4d_table(pgd_t *pgd, unsigned long start)
 
 	p4d = p4d_offset(pgd, start);
 	for (i = 0; i < PTRS_PER_P4D; i++, p4d++) {
-		if (!p4d_none(*p4d))
+		if (!p4d_none(p4dp_get(p4d)))
 			return;
 	}
-	vmem_free_pages(pgd_deref(*pgd), CRST_ALLOC_ORDER, NULL);
+	vmem_free_pages(pgd_deref(pgdp_get(pgd)), CRST_ALLOC_ORDER, NULL);
 	pgd_clear(pgd);
 }
 
@@ -432,7 +437,7 @@ static int modify_pagetable(unsigned long start, unsigned long end, bool add,
 {
 	unsigned long addr, next;
 	int ret = -ENOMEM;
-	pgd_t *pgd;
+	pgd_t *pgd, entry;
 	p4d_t *p4d;
 
 	if (WARN_ON_ONCE(!PAGE_ALIGNED(start | end)))
@@ -449,11 +454,12 @@ static int modify_pagetable(unsigned long start, unsigned long end, bool add,
 	for (addr = start; addr < end; addr = next) {
 		next = pgd_addr_end(addr, end);
 		pgd = pgd_offset_k(addr);
+		entry = pgdp_get(pgd);
 
 		if (!add) {
-			if (pgd_none(*pgd))
+			if (pgd_none(entry))
 				continue;
-		} else if (pgd_none(*pgd)) {
+		} else if (pgd_none(entry)) {
 			p4d = vmem_crst_alloc(_REGION2_ENTRY_EMPTY);
 			if (!p4d)
 				goto out;
@@ -575,6 +581,8 @@ int vmem_add_mapping(unsigned long start, unsigned long size)
 pte_t *vmem_get_alloc_pte(unsigned long addr, bool alloc)
 {
 	pte_t *ptep = NULL;
+	pud_t pud_entry;
+	pmd_t pmd_entry;
 	pgd_t *pgd;
 	p4d_t *p4d;
 	pud_t *pud;
@@ -582,7 +590,7 @@ pte_t *vmem_get_alloc_pte(unsigned long addr, bool alloc)
 	pte_t *pte;
 
 	pgd = pgd_offset_k(addr);
-	if (pgd_none(*pgd)) {
+	if (pgd_none(pgdp_get(pgd))) {
 		if (!alloc)
 			goto out;
 		p4d = vmem_crst_alloc(_REGION2_ENTRY_EMPTY);
@@ -591,7 +599,7 @@ pte_t *vmem_get_alloc_pte(unsigned long addr, bool alloc)
 		pgd_populate(&init_mm, pgd, p4d);
 	}
 	p4d = p4d_offset(pgd, addr);
-	if (p4d_none(*p4d)) {
+	if (p4d_none(p4dp_get(p4d))) {
 		if (!alloc)
 			goto out;
 		pud = vmem_crst_alloc(_REGION3_ENTRY_EMPTY);
@@ -600,25 +608,27 @@ pte_t *vmem_get_alloc_pte(unsigned long addr, bool alloc)
 		p4d_populate(&init_mm, p4d, pud);
 	}
 	pud = pud_offset(p4d, addr);
-	if (pud_none(*pud)) {
+	pud_entry = pudp_get(pud);
+	if (pud_none(pud_entry)) {
 		if (!alloc)
 			goto out;
 		pmd = vmem_crst_alloc(_SEGMENT_ENTRY_EMPTY);
 		if (!pmd)
 			goto out;
 		pud_populate(&init_mm, pud, pmd);
-	} else if (WARN_ON_ONCE(pud_leaf(*pud))) {
+	} else if (WARN_ON_ONCE(pud_leaf(pud_entry))) {
 		goto out;
 	}
 	pmd = pmd_offset(pud, addr);
-	if (pmd_none(*pmd)) {
+	pmd_entry = pmdp_get(pmd);
+	if (pmd_none(pmd_entry)) {
 		if (!alloc)
 			goto out;
 		pte = vmem_pte_alloc();
 		if (!pte)
 			goto out;
 		pmd_populate(&init_mm, pmd, pte);
-	} else if (WARN_ON_ONCE(pmd_leaf(*pmd))) {
+	} else if (WARN_ON_ONCE(pmd_leaf(pmd_entry))) {
 		goto out;
 	}
 	ptep = pte_offset_kernel(pmd, addr);
-- 
2.51.0



  parent reply	other threads:[~2026-04-15 15:01 UTC|newest]

Thread overview: 7+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2026-04-15 15:01 [PATCH v2 0/6] s390/mm: Batch PTE updates in lazy MMU mode Alexander Gordeev
2026-04-15 15:01 ` [PATCH v2 1/6] mm: Make lazy MMU mode context-aware Alexander Gordeev
2026-04-15 15:01 ` [PATCH v2 2/6] mm/pgtable: Fix bogus comment to clear_not_present_full_ptes() Alexander Gordeev
2026-04-15 15:01 ` Alexander Gordeev [this message]
2026-04-15 15:01 ` [PATCH v2 4/6] s390/mm: Make PTC and UV call order consistent Alexander Gordeev
2026-04-15 15:01 ` [PATCH v2 5/6] s390/mm: Batch PTE updates in lazy MMU mode Alexander Gordeev
2026-04-15 15:01 ` [PATCH v2 6/6] s390/mm: Allow lazy MMU mode disabling Alexander Gordeev

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=7ad766612a3095c8c8d9a253ef0f484ef98196a5.1776264097.git.agordeev@linux.ibm.com \
    --to=agordeev@linux.ibm.com \
    --cc=akpm@linux-foundation.org \
    --cc=borntraeger@linux.ibm.com \
    --cc=david@redhat.com \
    --cc=gerald.schaefer@linux.ibm.com \
    --cc=gor@linux.ibm.com \
    --cc=hca@linux.ibm.com \
    --cc=imbrenda@linux.ibm.com \
    --cc=kevin.brodsky@arm.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=linux-s390@vger.kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox