From: Ryan Roberts <ryan.roberts@arm.com>
To: Catalin Marinas <catalin.marinas@arm.com>,
Will Deacon <will@kernel.org>,
Madhavan Srinivasan <maddy@linux.ibm.com>,
Michael Ellerman <mpe@ellerman.id.au>,
Nicholas Piggin <npiggin@gmail.com>,
Christophe Leroy <christophe.leroy@csgroup.eu>,
"David S. Miller" <davem@davemloft.net>,
Andreas Larsson <andreas@gaisler.com>,
Juergen Gross <jgross@suse.com>,
Ajay Kaher <ajay.kaher@broadcom.com>,
Alexey Makhalov <alexey.makhalov@broadcom.com>,
Thomas Gleixner <tglx@linutronix.de>,
Ingo Molnar <mingo@redhat.com>, Borislav Petkov <bp@alien8.de>,
Dave Hansen <dave.hansen@linux.intel.com>,
"H. Peter Anvin" <hpa@zytor.com>,
Boris Ostrovsky <boris.ostrovsky@oracle.com>,
"Aneesh Kumar K.V" <aneesh.kumar@kernel.org>,
Andrew Morton <akpm@linux-foundation.org>,
Peter Zijlstra <peterz@infradead.org>,
Arnd Bergmann <arnd@arndb.de>,
David Hildenbrand <david@redhat.com>,
Lorenzo Stoakes <lorenzo.stoakes@oracle.com>,
"Liam R. Howlett" <Liam.Howlett@oracle.com>,
Vlastimil Babka <vbabka@suse.cz>, Mike Rapoport <rppt@kernel.org>,
Suren Baghdasaryan <surenb@google.com>,
Michal Hocko <mhocko@suse.com>,
Alexei Starovoitov <ast@kernel.org>,
Andrey Ryabinin <ryabinin.a.a@gmail.com>
Cc: Ryan Roberts <ryan.roberts@arm.com>,
linux-arm-kernel@lists.infradead.org,
linux-kernel@vger.kernel.org, linuxppc-dev@lists.ozlabs.org,
sparclinux@vger.kernel.org, virtualization@lists.linux.dev,
xen-devel@lists.xenproject.org, linux-mm@kvack.org
Subject: [RFC PATCH v1 4/6] mm: Introduce arch_in_lazy_mmu_mode()
Date: Fri, 30 May 2025 15:04:42 +0100 [thread overview]
Message-ID: <20250530140446.2387131-5-ryan.roberts@arm.com> (raw)
In-Reply-To: <20250530140446.2387131-1-ryan.roberts@arm.com>
Introduce new arch_in_lazy_mmu_mode() API, which returns true if the
calling context is currently in lazy mmu mode or false otherwise. Each
arch that supports lazy mmu mode must provide an implementation of this
API.
The API will shortly be used to prevent accidental lazy mmu mode nesting
when performing an allocation, and will additionally be used to ensure
pte modification vs tlb flushing order does not get inadvertantly
swapped.
Signed-off-by: Ryan Roberts <ryan.roberts@arm.com>
---
arch/arm64/include/asm/pgtable.h | 8 ++++++++
.../powerpc/include/asm/book3s/64/tlbflush-hash.h | 15 +++++++++++++++
arch/sparc/include/asm/tlbflush_64.h | 1 +
arch/sparc/mm/tlb.c | 12 ++++++++++++
arch/x86/include/asm/paravirt.h | 5 +++++
arch/x86/include/asm/paravirt_types.h | 1 +
arch/x86/kernel/paravirt.c | 6 ++++++
arch/x86/xen/mmu_pv.c | 6 ++++++
include/linux/pgtable.h | 1 +
9 files changed, 55 insertions(+)
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index 5285757ee0c1..add75dee49f5 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -119,6 +119,14 @@ static inline void arch_leave_lazy_mmu_mode(void)
clear_thread_flag(TIF_LAZY_MMU);
}
+static inline bool arch_in_lazy_mmu_mode(void)
+{
+ if (in_interrupt())
+ return false;
+
+ return test_thread_flag(TIF_LAZY_MMU);
+}
+
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
#define __HAVE_ARCH_FLUSH_PMD_TLB_RANGE
diff --git a/arch/powerpc/include/asm/book3s/64/tlbflush-hash.h b/arch/powerpc/include/asm/book3s/64/tlbflush-hash.h
index 146287d9580f..4123a9da32cc 100644
--- a/arch/powerpc/include/asm/book3s/64/tlbflush-hash.h
+++ b/arch/powerpc/include/asm/book3s/64/tlbflush-hash.h
@@ -57,6 +57,21 @@ static inline void arch_leave_lazy_mmu_mode(void)
#define arch_flush_lazy_mmu_mode() do {} while (0)
+static inline bool arch_in_lazy_mmu_mode(void)
+{
+ struct ppc64_tlb_batch *batch;
+ bool active;
+
+ if (radix_enabled())
+ return false;
+
+ batch = get_cpu_ptr(&ppc64_tlb_batch);
+ active = batch->active;
+ put_cpu_ptr(&ppc64_tlb_batch);
+
+ return active;
+}
+
extern void hash__tlbiel_all(unsigned int action);
extern void flush_hash_page(unsigned long vpn, real_pte_t pte, int psize,
diff --git a/arch/sparc/include/asm/tlbflush_64.h b/arch/sparc/include/asm/tlbflush_64.h
index 8b8cdaa69272..204bc957df9e 100644
--- a/arch/sparc/include/asm/tlbflush_64.h
+++ b/arch/sparc/include/asm/tlbflush_64.h
@@ -45,6 +45,7 @@ void flush_tlb_pending(void);
void arch_enter_lazy_mmu_mode(void);
void arch_leave_lazy_mmu_mode(void);
#define arch_flush_lazy_mmu_mode() do {} while (0)
+bool arch_in_lazy_mmu_mode(void);
/* Local cpu only. */
void __flush_tlb_all(void);
diff --git a/arch/sparc/mm/tlb.c b/arch/sparc/mm/tlb.c
index a35ddcca5e76..83ab4ba4f4fb 100644
--- a/arch/sparc/mm/tlb.c
+++ b/arch/sparc/mm/tlb.c
@@ -69,6 +69,18 @@ void arch_leave_lazy_mmu_mode(void)
preempt_enable();
}
+bool arch_in_lazy_mmu_mode(void)
+{
+ struct tlb_batch *tb;
+ bool active;
+
+ tb = get_cpu_ptr(&tlb_batch);
+ active = tb->active;
+ put_cpu_ptr(&tlb_batch);
+
+ return active;
+}
+
static void tlb_batch_add_one(struct mm_struct *mm, unsigned long vaddr,
bool exec, unsigned int hugepage_shift)
{
diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
index b5e59a7ba0d0..c7ea3ccb8a41 100644
--- a/arch/x86/include/asm/paravirt.h
+++ b/arch/x86/include/asm/paravirt.h
@@ -542,6 +542,11 @@ static inline void arch_flush_lazy_mmu_mode(void)
PVOP_VCALL0(mmu.lazy_mode.flush);
}
+static inline bool arch_in_lazy_mmu_mode(void)
+{
+ return PVOP_CALL0(bool, mmu.lazy_mode.in);
+}
+
static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
phys_addr_t phys, pgprot_t flags)
{
diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
index 37a8627d8277..41001ca9d010 100644
--- a/arch/x86/include/asm/paravirt_types.h
+++ b/arch/x86/include/asm/paravirt_types.h
@@ -46,6 +46,7 @@ struct pv_lazy_ops {
void (*enter)(void);
void (*leave)(void);
void (*flush)(void);
+ bool (*in)(void);
} __no_randomize_layout;
#endif
diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
index ab3e172dcc69..9af1a04a47fd 100644
--- a/arch/x86/kernel/paravirt.c
+++ b/arch/x86/kernel/paravirt.c
@@ -106,6 +106,11 @@ static noinstr void pv_native_set_debugreg(int regno, unsigned long val)
{
native_set_debugreg(regno, val);
}
+
+static noinstr bool paravirt_retfalse(void)
+{
+ return false;
+}
#endif
struct pv_info pv_info = {
@@ -228,6 +233,7 @@ struct paravirt_patch_template pv_ops = {
.enter = paravirt_nop,
.leave = paravirt_nop,
.flush = paravirt_nop,
+ .in = paravirt_retfalse,
},
.mmu.set_fixmap = native_set_fixmap,
diff --git a/arch/x86/xen/mmu_pv.c b/arch/x86/xen/mmu_pv.c
index 2a4a8deaf612..74f7a8537911 100644
--- a/arch/x86/xen/mmu_pv.c
+++ b/arch/x86/xen/mmu_pv.c
@@ -2147,6 +2147,11 @@ static void xen_flush_lazy_mmu(void)
preempt_enable();
}
+static bool xen_in_lazy_mmu(void)
+{
+ return xen_get_lazy_mode() == XEN_LAZY_MMU;
+}
+
static void __init xen_post_allocator_init(void)
{
pv_ops.mmu.set_pte = xen_set_pte;
@@ -2230,6 +2235,7 @@ static const typeof(pv_ops) xen_mmu_ops __initconst = {
.enter = xen_enter_lazy_mmu,
.leave = xen_leave_lazy_mmu,
.flush = xen_flush_lazy_mmu,
+ .in = xen_in_lazy_mmu,
},
.set_fixmap = xen_set_fixmap,
diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h
index b50447ef1c92..580d9971f435 100644
--- a/include/linux/pgtable.h
+++ b/include/linux/pgtable.h
@@ -235,6 +235,7 @@ static inline int pmd_dirty(pmd_t pmd)
#define arch_enter_lazy_mmu_mode() do {} while (0)
#define arch_leave_lazy_mmu_mode() do {} while (0)
#define arch_flush_lazy_mmu_mode() do {} while (0)
+#define arch_in_lazy_mmu_mode() false
#endif
#ifndef pte_batch_hint
--
2.43.0
next prev parent reply other threads:[~2025-05-30 14:05 UTC|newest]
Thread overview: 17+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-05-30 14:04 [RFC PATCH v1 0/6] Lazy mmu mode fixes and improvements Ryan Roberts
2025-05-30 14:04 ` [RFC PATCH v1 1/6] fs/proc/task_mmu: Fix pte update and tlb maintenance ordering in pagemap_scan_pmd_entry() Ryan Roberts
2025-05-30 16:26 ` Jann Horn
2025-05-30 16:45 ` Ryan Roberts
2025-05-30 16:48 ` Jann Horn
2025-05-30 14:04 ` [RFC PATCH v1 2/6] mm: Fix pte update and tlb maintenance ordering in migrate_vma_collect_pmd() Ryan Roberts
2025-05-30 14:04 ` [RFC PATCH v1 3/6] mm: Avoid calling page allocator from apply_to_page_range() Ryan Roberts
2025-05-30 16:23 ` Liam R. Howlett
2025-05-30 16:50 ` Ryan Roberts
2025-05-30 19:08 ` Liam R. Howlett
2025-05-30 14:04 ` Ryan Roberts [this message]
2025-05-30 14:04 ` [RFC PATCH v1 5/6] mm: Avoid calling page allocator while in lazy mmu mode Ryan Roberts
2025-05-30 14:04 ` [RFC PATCH v1 6/6] Revert "arm64/mm: Permit lazy_mmu_mode to be nested" Ryan Roberts
2025-05-30 14:47 ` [RFC PATCH v1 0/6] Lazy mmu mode fixes and improvements Lorenzo Stoakes
2025-05-30 15:55 ` Ryan Roberts
2025-05-31 7:46 ` Mike Rapoport
2025-06-02 10:31 ` Ryan Roberts
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20250530140446.2387131-5-ryan.roberts@arm.com \
--to=ryan.roberts@arm.com \
--cc=Liam.Howlett@oracle.com \
--cc=ajay.kaher@broadcom.com \
--cc=akpm@linux-foundation.org \
--cc=alexey.makhalov@broadcom.com \
--cc=andreas@gaisler.com \
--cc=aneesh.kumar@kernel.org \
--cc=arnd@arndb.de \
--cc=ast@kernel.org \
--cc=boris.ostrovsky@oracle.com \
--cc=bp@alien8.de \
--cc=catalin.marinas@arm.com \
--cc=christophe.leroy@csgroup.eu \
--cc=dave.hansen@linux.intel.com \
--cc=davem@davemloft.net \
--cc=david@redhat.com \
--cc=hpa@zytor.com \
--cc=jgross@suse.com \
--cc=linux-arm-kernel@lists.infradead.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=linuxppc-dev@lists.ozlabs.org \
--cc=lorenzo.stoakes@oracle.com \
--cc=maddy@linux.ibm.com \
--cc=mhocko@suse.com \
--cc=mingo@redhat.com \
--cc=mpe@ellerman.id.au \
--cc=npiggin@gmail.com \
--cc=peterz@infradead.org \
--cc=rppt@kernel.org \
--cc=ryabinin.a.a@gmail.com \
--cc=sparclinux@vger.kernel.org \
--cc=surenb@google.com \
--cc=tglx@linutronix.de \
--cc=vbabka@suse.cz \
--cc=virtualization@lists.linux.dev \
--cc=will@kernel.org \
--cc=xen-devel@lists.xenproject.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox