From: Xu Lu <luxu.kernel@bytedance.com>
To: pjw@kernel.org, palmer@dabbelt.com, aou@eecs.berkeley.edu,
alex@ghiti.fr, kees@kernel.org, mingo@redhat.com,
peterz@infradead.org, juri.lelli@redhat.com,
vincent.guittot@linaro.org, akpm@linux-foundation.org,
david@redhat.com, apatel@ventanamicro.com, guoren@kernel.org
Cc: linux-riscv@lists.infradead.org, linux-kernel@vger.kernel.org,
linux-mm@kvack.org, Xu Lu <luxu.kernel@bytedance.com>
Subject: [RFC PATCH v2 5/9] riscv: mm: Introduce arch_do_shoot_lazy_tlb
Date: Thu, 27 Nov 2025 22:11:13 +0800 [thread overview]
Message-ID: <20251127141117.87420-6-luxu.kernel@bytedance.com> (raw)
In-Reply-To: <20251127141117.87420-1-luxu.kernel@bytedance.com>
When an active_mm is shot down, we switch it to the init_mm, evict it
out of percpu active mm array.
Signed-off-by: Xu Lu <luxu.kernel@bytedance.com>
---
arch/riscv/include/asm/mmu_context.h | 5 ++++
arch/riscv/include/asm/tlbflush.h | 11 +++++++++
arch/riscv/mm/context.c | 19 ++++++++++++++++
arch/riscv/mm/tlbflush.c | 34 ++++++++++++++++++++++++----
4 files changed, 64 insertions(+), 5 deletions(-)
diff --git a/arch/riscv/include/asm/mmu_context.h b/arch/riscv/include/asm/mmu_context.h
index 8c4bc49a3a0f5..bc73cc3262ae6 100644
--- a/arch/riscv/include/asm/mmu_context.h
+++ b/arch/riscv/include/asm/mmu_context.h
@@ -16,6 +16,11 @@
void switch_mm(struct mm_struct *prev, struct mm_struct *next,
struct task_struct *task);
+#ifdef CONFIG_RISCV_LAZY_TLB_FLUSH
+#define arch_do_shoot_lazy_tlb arch_do_shoot_lazy_tlb
+void arch_do_shoot_lazy_tlb(void *arg);
+#endif
+
#define activate_mm activate_mm
static inline void activate_mm(struct mm_struct *prev,
struct mm_struct *next)
diff --git a/arch/riscv/include/asm/tlbflush.h b/arch/riscv/include/asm/tlbflush.h
index 3f83fd5ef36db..e7365a53265a6 100644
--- a/arch/riscv/include/asm/tlbflush.h
+++ b/arch/riscv/include/asm/tlbflush.h
@@ -15,6 +15,11 @@
#define FLUSH_TLB_NO_ASID ((unsigned long)-1)
#ifdef CONFIG_MMU
+static inline unsigned long get_mm_asid(struct mm_struct *mm)
+{
+ return mm ? cntx2asid(atomic_long_read(&mm->context.id)) : FLUSH_TLB_NO_ASID;
+}
+
static inline void local_flush_tlb_all(void)
{
__asm__ __volatile__ ("sfence.vma" : : : "memory");
@@ -86,11 +91,17 @@ struct tlb_info {
DECLARE_PER_CPU_SHARED_ALIGNED(struct tlb_info, tlbinfo);
void local_load_tlb_mm(struct mm_struct *mm);
+void local_flush_tlb_mm(struct mm_struct *mm);
#else /* CONFIG_RISCV_LAZY_TLB_FLUSH */
static inline void local_load_tlb_mm(struct mm_struct *mm) {}
+static inline void local_flush_tlb_mm(struct mm_struct *mm)
+{
+ local_flush_tlb_all_asid(get_mm_asid(mm));
+}
+
#endif /* CONFIG_RISCV_LAZY_TLB_FLUSH */
#else /* CONFIG_MMU */
diff --git a/arch/riscv/mm/context.c b/arch/riscv/mm/context.c
index a7cf36ad34678..3335080e5f720 100644
--- a/arch/riscv/mm/context.c
+++ b/arch/riscv/mm/context.c
@@ -274,6 +274,25 @@ static int __init asids_init(void)
return 0;
}
early_initcall(asids_init);
+
+#ifdef CONFIG_RISCV_LAZY_TLB_FLUSH
+void arch_do_shoot_lazy_tlb(void *arg)
+{
+ struct mm_struct *mm = arg;
+
+ if (current->active_mm == mm) {
+ WARN_ON_ONCE(current->mm);
+ current->active_mm = &init_mm;
+ switch_mm(mm, &init_mm, current);
+ }
+
+ if (!static_branch_unlikely(&use_asid_allocator) || !mm)
+ return;
+
+ local_flush_tlb_mm(mm);
+}
+#endif /* CONFIG_RISCV_LAZY_TLB_FLUSH */
+
#else
static inline void set_mm(struct mm_struct *prev,
struct mm_struct *next, unsigned int cpu)
diff --git a/arch/riscv/mm/tlbflush.c b/arch/riscv/mm/tlbflush.c
index 4b2ce06cbe6bd..a47bacf5801ab 100644
--- a/arch/riscv/mm/tlbflush.c
+++ b/arch/riscv/mm/tlbflush.c
@@ -164,11 +164,6 @@ static void __ipi_flush_tlb_range_asid(void *info)
local_flush_tlb_range_asid(d->start, d->size, d->stride, d->asid);
}
-static inline unsigned long get_mm_asid(struct mm_struct *mm)
-{
- return mm ? cntx2asid(atomic_long_read(&mm->context.id)) : FLUSH_TLB_NO_ASID;
-}
-
static void __flush_tlb_range(struct mm_struct *mm,
const struct cpumask *cmask,
unsigned long start, unsigned long size,
@@ -352,4 +347,33 @@ void local_load_tlb_mm(struct mm_struct *mm)
}
}
+void local_flush_tlb_mm(struct mm_struct *mm)
+{
+ struct tlb_info *info = this_cpu_ptr(&tlbinfo);
+ struct tlb_context *contexts = info->contexts;
+ unsigned long asid = get_mm_asid(mm);
+ unsigned int i;
+
+ if (!mm || mm == info->active_mm) {
+ local_flush_tlb_all_asid(asid);
+ return;
+ }
+
+ for (i = 0; i < MAX_LOADED_MM; i++) {
+ if (contexts[i].mm != mm)
+ continue;
+
+ write_lock(&info->rwlock);
+ contexts[i].mm = NULL;
+ contexts[i].gen = 0;
+ write_unlock(&info->rwlock);
+
+ cpumask_clear_cpu(raw_smp_processor_id(), mm_cpumask(mm));
+ mmdrop_lazy_mm(mm);
+ break;
+ }
+
+ local_flush_tlb_all_asid(asid);
+}
+
#endif /* CONFIG_RISCV_LAZY_TLB_FLUSH */
--
2.20.1
next prev parent reply other threads:[~2025-11-27 14:12 UTC|newest]
Thread overview: 10+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-11-27 14:11 [RFC PATCH v2 0/9] riscv: mm: Introduce lazy tlb flush Xu Lu
2025-11-27 14:11 ` [RFC PATCH v2 1/9] riscv: Introduce RISCV_LAZY_TLB_FLUSH config Xu Lu
2025-11-27 14:11 ` [RFC PATCH v2 2/9] riscv: mm: Apply a threshold to the number of active ASIDs on each CPU Xu Lu
2025-11-27 14:11 ` [RFC PATCH v2 3/9] riscv: mm: Grab mm_count to avoid mm getting released Xu Lu
2025-11-27 14:11 ` [RFC PATCH v2 4/9] fork: Add arch override for do_shoot_lazy_tlb() Xu Lu
2025-11-27 14:11 ` Xu Lu [this message]
2025-11-27 14:11 ` [RFC PATCH v2 6/9] riscv: mm: Introduce percpu TLB Flush queue Xu Lu
2025-11-27 14:11 ` [RFC PATCH v2 7/9] riscv: mm: Defer the TLB Flush to switch_mm Xu Lu
2025-11-27 14:11 ` [RFC PATCH v2 8/9] riscv: mm: Clear mm_cpumask during local_flush_tlb_all_asid() Xu Lu
2025-11-27 14:11 ` [RFC PATCH v2 9/9] riscv: mm: Clear mm_cpumask during local_flush_tlb_all() Xu Lu
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20251127141117.87420-6-luxu.kernel@bytedance.com \
--to=luxu.kernel@bytedance.com \
--cc=akpm@linux-foundation.org \
--cc=alex@ghiti.fr \
--cc=aou@eecs.berkeley.edu \
--cc=apatel@ventanamicro.com \
--cc=david@redhat.com \
--cc=guoren@kernel.org \
--cc=juri.lelli@redhat.com \
--cc=kees@kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=linux-riscv@lists.infradead.org \
--cc=mingo@redhat.com \
--cc=palmer@dabbelt.com \
--cc=peterz@infradead.org \
--cc=pjw@kernel.org \
--cc=vincent.guittot@linaro.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox