From: Anshuman Khandual <anshuman.khandual@arm.com>
To: linux-arm-kernel@lists.infradead.org
Cc: Anshuman Khandual <anshuman.khandual@arm.com>,
Catalin Marinas <catalin.marinas@arm.com>,
Will Deacon <will@kernel.org>,
Ryan Roberts <ryan.roberts@arm.com>,
Mark Rutland <mark.rutland@arm.com>,
Lorenzo Stoakes <lorenzo.stoakes@oracle.com>,
Andrew Morton <akpm@linux-foundation.org>,
David Hildenbrand <david@kernel.org>,
Mike Rapoport <rppt@kernel.org>,
Linu Cherian <linu.cherian@arm.com>,
linux-kernel@vger.kernel.org, linux-mm@kvack.org,
kasan-dev@googlegroups.com
Subject: [RFC V1 05/16] arm64/mm: Convert READ_ONCE() as pmdp_get() while accessing PMD
Date: Tue, 24 Feb 2026 10:41:42 +0530 [thread overview]
Message-ID: <20260224051153.3150613-6-anshuman.khandual@arm.com> (raw)
In-Reply-To: <20260224051153.3150613-1-anshuman.khandual@arm.com>
Convert all READ_ONCE() based PMD accesses as pmdp_get() instead which will
support both D64 and D128 translation regime going forward.
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Will Deacon <will@kernel.org>
Cc: Ryan Roberts <ryan.roberts@arm.com>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: linux-arm-kernel@lists.infradead.org
Cc: linux-kernel@vger.kernel.org
Cc: kasan-dev@googlegroups.com
Signed-off-by: Anshuman Khandual <anshuman.khandual@arm.com>
---
arch/arm64/include/asm/pgtable.h | 12 +++--------
arch/arm64/mm/fault.c | 2 +-
arch/arm64/mm/fixmap.c | 2 +-
arch/arm64/mm/hugetlbpage.c | 2 +-
arch/arm64/mm/kasan_init.c | 4 ++--
arch/arm64/mm/mmu.c | 35 ++++++++++++++++++++++----------
arch/arm64/mm/pageattr.c | 2 +-
arch/arm64/mm/trans_pgd.c | 2 +-
8 files changed, 34 insertions(+), 27 deletions(-)
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index b3e58735c49b..4b5bc2c09bf2 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -852,7 +852,8 @@ static inline unsigned long pmd_page_vaddr(pmd_t pmd)
}
/* Find an entry in the third-level page table. */
-#define pte_offset_phys(dir,addr) (pmd_page_paddr(READ_ONCE(*(dir))) + pte_index(addr) * sizeof(pte_t))
+#define pte_offset_phys(dir, addr) (pmd_page_paddr(pmdp_get(dir)) + \
+ pte_index(addr) * sizeof(pte_t))
#define pte_set_fixmap(addr) ((pte_t *)set_fixmap_offset(FIX_PTE, addr))
#define pte_set_fixmap_offset(pmd, addr) pte_set_fixmap(pte_offset_phys(pmd, addr))
@@ -1328,14 +1329,7 @@ static inline int __ptep_clear_flush_young(struct vm_area_struct *vma,
#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_ARCH_HAS_NONLEAF_PMD_YOUNG)
#define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
-static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
- unsigned long address,
- pmd_t *pmdp)
-{
- /* Operation applies to PMD table entry only if FEAT_HAFT is enabled */
- VM_WARN_ON(pmd_table(READ_ONCE(*pmdp)) && !system_supports_haft());
- return __ptep_test_and_clear_young(vma, address, (pte_t *)pmdp);
-}
+int pmdp_test_and_clear_young(struct vm_area_struct *vma, unsigned long address, pmd_t *pmdp);
#endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_ARCH_HAS_NONLEAF_PMD_YOUNG */
static inline pte_t __ptep_get_and_clear_anysz(struct mm_struct *mm,
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index be9dab2c7d6a..1389ba26ec74 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -177,7 +177,7 @@ static void show_pte(unsigned long addr)
break;
pmdp = pmd_offset(pudp, addr);
- pmd = READ_ONCE(*pmdp);
+ pmd = pmdp_get(pmdp);
pr_cont(", pmd=%016llx", pmd_val(pmd));
if (pmd_none(pmd) || pmd_bad(pmd))
break;
diff --git a/arch/arm64/mm/fixmap.c b/arch/arm64/mm/fixmap.c
index c5c5425791da..7a4bbcb39094 100644
--- a/arch/arm64/mm/fixmap.c
+++ b/arch/arm64/mm/fixmap.c
@@ -42,7 +42,7 @@ static inline pte_t *fixmap_pte(unsigned long addr)
static void __init early_fixmap_init_pte(pmd_t *pmdp, unsigned long addr)
{
- pmd_t pmd = READ_ONCE(*pmdp);
+ pmd_t pmd = pmdp_get(pmdp);
pte_t *ptep;
if (pmd_none(pmd)) {
diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c
index a42c05cf5640..6117aca2bac7 100644
--- a/arch/arm64/mm/hugetlbpage.c
+++ b/arch/arm64/mm/hugetlbpage.c
@@ -304,7 +304,7 @@ pte_t *huge_pte_offset(struct mm_struct *mm,
addr &= CONT_PMD_MASK;
pmdp = pmd_offset(pudp, addr);
- pmd = READ_ONCE(*pmdp);
+ pmd = pmdp_get(pmdp);
if (!(sz == PMD_SIZE || sz == CONT_PMD_SIZE) &&
pmd_none(pmd))
return NULL;
diff --git a/arch/arm64/mm/kasan_init.c b/arch/arm64/mm/kasan_init.c
index abeb81bf6ebd..709e8ad15603 100644
--- a/arch/arm64/mm/kasan_init.c
+++ b/arch/arm64/mm/kasan_init.c
@@ -62,7 +62,7 @@ static phys_addr_t __init kasan_alloc_raw_page(int node)
static pte_t *__init kasan_pte_offset(pmd_t *pmdp, unsigned long addr, int node,
bool early)
{
- if (pmd_none(READ_ONCE(*pmdp))) {
+ if (pmd_none(pmdp_get(pmdp))) {
phys_addr_t pte_phys = early ?
__pa_symbol(kasan_early_shadow_pte)
: kasan_alloc_zeroed_page(node);
@@ -138,7 +138,7 @@ static void __init kasan_pmd_populate(pud_t *pudp, unsigned long addr,
do {
next = pmd_addr_end(addr, end);
kasan_pte_populate(pmdp, addr, next, node, early);
- } while (pmdp++, addr = next, addr != end && pmd_none(READ_ONCE(*pmdp)));
+ } while (pmdp++, addr = next, addr != end && pmd_none(pmdp_get(pmdp)));
}
static void __init kasan_pud_populate(p4d_t *p4dp, unsigned long addr,
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index a6a00accf4f9..dea1b595f237 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -201,7 +201,7 @@ static int alloc_init_cont_pte(pmd_t *pmdp, unsigned long addr,
int flags)
{
unsigned long next;
- pmd_t pmd = READ_ONCE(*pmdp);
+ pmd_t pmd = pmdp_get(pmdp);
pte_t *ptep;
BUG_ON(pmd_sect(pmd));
@@ -257,7 +257,7 @@ static int init_pmd(pmd_t *pmdp, unsigned long addr, unsigned long end,
unsigned long next;
do {
- pmd_t old_pmd = READ_ONCE(*pmdp);
+ pmd_t old_pmd = pmdp_get(pmdp);
next = pmd_addr_end(addr, end);
@@ -271,7 +271,7 @@ static int init_pmd(pmd_t *pmdp, unsigned long addr, unsigned long end,
* only allow updates to the permission attributes.
*/
BUG_ON(!pgattr_change_is_safe(pmd_val(old_pmd),
- READ_ONCE(pmd_val(*pmdp))));
+ pmd_val(pmdp_get(pmdp))));
} else {
int ret;
@@ -281,7 +281,7 @@ static int init_pmd(pmd_t *pmdp, unsigned long addr, unsigned long end,
return ret;
BUG_ON(pmd_val(old_pmd) != 0 &&
- pmd_val(old_pmd) != READ_ONCE(pmd_val(*pmdp)));
+ pmd_val(old_pmd) != pmd_val(pmdp_get(pmdp)));
}
phys += next - addr;
} while (pmdp++, addr = next, addr != end);
@@ -1475,7 +1475,7 @@ static void unmap_hotplug_pmd_range(pud_t *pudp, unsigned long addr,
do {
next = pmd_addr_end(addr, end);
pmdp = pmd_offset(pudp, addr);
- pmd = READ_ONCE(*pmdp);
+ pmd = pmdp_get(pmdp);
if (pmd_none(pmd))
continue;
@@ -1623,7 +1623,7 @@ static void free_empty_pmd_table(pud_t *pudp, unsigned long addr,
do {
next = pmd_addr_end(addr, end);
pmdp = pmd_offset(pudp, addr);
- pmd = READ_ONCE(*pmdp);
+ pmd = pmdp_get(pmdp);
if (pmd_none(pmd))
continue;
@@ -1644,7 +1644,7 @@ static void free_empty_pmd_table(pud_t *pudp, unsigned long addr,
*/
pmdp = pmd_offset(pudp, 0UL);
for (i = 0; i < PTRS_PER_PMD; i++) {
- if (!pmd_none(READ_ONCE(pmdp[i])))
+ if (!pmd_none(pmdp_get(pmdp + i)))
return;
}
@@ -1763,7 +1763,7 @@ int __meminit vmemmap_check_pmd(pmd_t *pmdp, int node,
{
vmemmap_verify((pte_t *)pmdp, node, addr, next);
- return pmd_sect(READ_ONCE(*pmdp));
+ return pmd_sect(pmdp_get(pmdp));
}
int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
@@ -1810,7 +1810,7 @@ int pmd_set_huge(pmd_t *pmdp, phys_addr_t phys, pgprot_t prot)
pmd_t new_pmd = pfn_pmd(__phys_to_pfn(phys), mk_pmd_sect_prot(prot));
/* Only allow permission changes for now */
- if (!pgattr_change_is_safe(READ_ONCE(pmd_val(*pmdp)),
+ if (!pgattr_change_is_safe(pmd_val(pmdp_get(pmdp)),
pmd_val(new_pmd)))
return 0;
@@ -1835,7 +1835,7 @@ int pud_clear_huge(pud_t *pudp)
int pmd_clear_huge(pmd_t *pmdp)
{
- if (!pmd_sect(READ_ONCE(*pmdp)))
+ if (!pmd_sect(pmdp_get(pmdp)))
return 0;
pmd_clear(pmdp);
return 1;
@@ -1847,7 +1847,7 @@ static int __pmd_free_pte_page(pmd_t *pmdp, unsigned long addr,
pte_t *table;
pmd_t pmd;
- pmd = READ_ONCE(*pmdp);
+ pmd = pmdp_get(pmdp);
if (!pmd_table(pmd)) {
VM_WARN_ON(1);
@@ -2245,4 +2245,17 @@ int arch_set_user_pkey_access(struct task_struct *tsk, int pkey, unsigned long i
return 0;
}
+
+#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_ARCH_HAS_NONLEAF_PMD_YOUNG)
+int pmdp_test_and_clear_young(struct vm_area_struct *vma,
+ unsigned long address, pmd_t *pmdp)
+{
+ pmd_t pmdval = pmdp_get(pmdp);
+
+ /* Operation applies to PMD table entry only if FEAT_HAFT is enabled */
+ VM_WARN_ON(pmd_table(pmdval) && !system_supports_haft());
+ return __ptep_test_and_clear_young(vma, address, (pte_t *)pmdp);
+}
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_ARCH_HAS_NONLEAF_PMD_YOUNG */
+
#endif
diff --git a/arch/arm64/mm/pageattr.c b/arch/arm64/mm/pageattr.c
index 358d1dc9a576..ed1eec4c757d 100644
--- a/arch/arm64/mm/pageattr.c
+++ b/arch/arm64/mm/pageattr.c
@@ -408,7 +408,7 @@ bool kernel_page_present(struct page *page)
return true;
pmdp = pmd_offset(pudp, addr);
- pmd = READ_ONCE(*pmdp);
+ pmd = pmdp_get(pmdp);
if (pmd_none(pmd))
return false;
if (pmd_sect(pmd))
diff --git a/arch/arm64/mm/trans_pgd.c b/arch/arm64/mm/trans_pgd.c
index 18543b603c77..ddde0f2983b0 100644
--- a/arch/arm64/mm/trans_pgd.c
+++ b/arch/arm64/mm/trans_pgd.c
@@ -100,7 +100,7 @@ static int copy_pmd(struct trans_pgd_info *info, pud_t *dst_pudp,
src_pmdp = pmd_offset(src_pudp, start);
do {
- pmd_t pmd = READ_ONCE(*src_pmdp);
+ pmd_t pmd = pmdp_get(src_pmdp);
next = pmd_addr_end(addr, end);
if (pmd_none(pmd))
--
2.43.0
next prev parent reply other threads:[~2026-02-24 5:12 UTC|newest]
Thread overview: 18+ messages / expand[flat|nested] mbox.gz Atom feed top
2026-02-24 5:11 [RFC V1 00/16] arm64/mm: Enable 128 bit page table entries Anshuman Khandual
2026-02-24 5:11 ` [RFC V1 01/16] mm: Abstract printing of pxd_val() Anshuman Khandual
2026-02-24 5:11 ` [RFC V1 02/16] mm: Add read-write accessors for vm_page_prot Anshuman Khandual
2026-02-24 5:11 ` [RFC V1 03/16] mm: Replace READ_ONCE() in pud_trans_unstable() Anshuman Khandual
2026-02-24 5:11 ` [RFC V1 04/16] perf/events: Replace READ_ONCE() with standard pgtable accessors Anshuman Khandual
2026-02-24 8:48 ` Peter Zijlstra
2026-02-24 5:11 ` Anshuman Khandual [this message]
2026-02-24 5:11 ` [RFC V1 06/16] arm64/mm: Convert READ_ONCE() as pudp_get() while accessing PUD Anshuman Khandual
2026-02-24 5:11 ` [RFC V1 07/16] arm64/mm: Convert READ_ONCE() as p4dp_get() while accessing P4D Anshuman Khandual
2026-02-24 5:11 ` [RFC V1 08/16] arm64/mm: Convert READ_ONCE() as pgdp_get() while accessing PGD Anshuman Khandual
2026-02-24 5:11 ` [RFC V1 09/16] arm64/mm: Route all pgtable reads via ptdesc_get() Anshuman Khandual
2026-02-24 5:11 ` [RFC V1 10/16] arm64/mm: Route all pgtable writes via ptdesc_set() Anshuman Khandual
2026-02-24 5:11 ` [RFC V1 11/16] arm64/mm: Route all pgtable atomics to central helpers Anshuman Khandual
2026-02-24 5:11 ` [RFC V1 12/16] arm64/mm: Abstract printing of pxd_val() Anshuman Khandual
2026-02-24 5:11 ` [RFC V1 13/16] arm64/mm: Override read-write accessors for vm_page_prot Anshuman Khandual
2026-02-24 5:11 ` [RFC V1 14/16] arm64/mm: Enable fixmap with 5 level page table Anshuman Khandual
2026-02-24 5:11 ` [RFC V1 15/16] arm64/mm: Add macros __tlb_asid_level and __tlb_range Anshuman Khandual
2026-02-24 5:11 ` [RFC V1 16/16] arm64/mm: Add initial support for FEAT_D128 page tables Anshuman Khandual
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20260224051153.3150613-6-anshuman.khandual@arm.com \
--to=anshuman.khandual@arm.com \
--cc=akpm@linux-foundation.org \
--cc=catalin.marinas@arm.com \
--cc=david@kernel.org \
--cc=kasan-dev@googlegroups.com \
--cc=linu.cherian@arm.com \
--cc=linux-arm-kernel@lists.infradead.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=lorenzo.stoakes@oracle.com \
--cc=mark.rutland@arm.com \
--cc=rppt@kernel.org \
--cc=ryan.roberts@arm.com \
--cc=will@kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox