linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
From: Xu Lu <luxu.kernel@bytedance.com>
To: pjw@kernel.org, palmer@dabbelt.com, aou@eecs.berkeley.edu,
	alex@ghiti.fr, kees@kernel.org, mingo@redhat.com,
	peterz@infradead.org, juri.lelli@redhat.com,
	vincent.guittot@linaro.org, akpm@linux-foundation.org,
	david@redhat.com, apatel@ventanamicro.com, guoren@kernel.org
Cc: linux-riscv@lists.infradead.org, linux-kernel@vger.kernel.org,
	linux-mm@kvack.org, Xu Lu <luxu.kernel@bytedance.com>
Subject: [RFC PATCH v2 2/9] riscv: mm: Apply a threshold to the number of active ASIDs on each CPU
Date: Thu, 27 Nov 2025 22:11:10 +0800	[thread overview]
Message-ID: <20251127141117.87420-3-luxu.kernel@bytedance.com> (raw)
In-Reply-To: <20251127141117.87420-1-luxu.kernel@bytedance.com>

Since each CPU has limited TLB entries, there exist limited active ASIDs
in each CPU's TLB at the same time. Thus we apply a threshold here. When
a mm_struct is loaded, we mark its ASID as active. If the number of
active ASIDs exceeds the threshold, we evict the mm_struct that has not
been used for the longest time, flush its TLB entries, mark its ASID
inactive, and clear current CPU in its mm_cpumask.

Signed-off-by: Xu Lu <luxu.kernel@bytedance.com>
---
 arch/riscv/include/asm/tlbflush.h | 27 +++++++++++++
 arch/riscv/mm/context.c           |  1 +
 arch/riscv/mm/tlbflush.c          | 66 +++++++++++++++++++++++++++++++
 3 files changed, 94 insertions(+)

diff --git a/arch/riscv/include/asm/tlbflush.h b/arch/riscv/include/asm/tlbflush.h
index eed0abc405143..3f83fd5ef36db 100644
--- a/arch/riscv/include/asm/tlbflush.h
+++ b/arch/riscv/include/asm/tlbflush.h
@@ -66,6 +66,33 @@ void arch_tlbbatch_add_pending(struct arch_tlbflush_unmap_batch *batch,
 void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch);
 
 extern unsigned long tlb_flush_all_threshold;
+
+#ifdef CONFIG_RISCV_LAZY_TLB_FLUSH
+
+#define MAX_LOADED_MM					6
+
+struct tlb_context {
+	struct mm_struct *mm;
+	unsigned int gen;
+};
+
+struct tlb_info {
+	rwlock_t rwlock;
+	struct mm_struct *active_mm;
+	unsigned int next_gen;
+	struct tlb_context contexts[MAX_LOADED_MM];
+};
+
+DECLARE_PER_CPU_SHARED_ALIGNED(struct tlb_info, tlbinfo);
+
+void local_load_tlb_mm(struct mm_struct *mm);
+
+#else /* CONFIG_RISCV_LAZY_TLB_FLUSH */
+
+static inline void local_load_tlb_mm(struct mm_struct *mm) {}
+
+#endif /* CONFIG_RISCV_LAZY_TLB_FLUSH */
+
 #else /* CONFIG_MMU */
 #define local_flush_tlb_all()			do { } while (0)
 #endif /* CONFIG_MMU */
diff --git a/arch/riscv/mm/context.c b/arch/riscv/mm/context.c
index 55c20ad1f7444..a7cf36ad34678 100644
--- a/arch/riscv/mm/context.c
+++ b/arch/riscv/mm/context.c
@@ -217,6 +217,7 @@ static inline void set_mm(struct mm_struct *prev,
 	 */
 	cpumask_set_cpu(cpu, mm_cpumask(next));
 	if (static_branch_unlikely(&use_asid_allocator)) {
+		local_load_tlb_mm(next);
 		set_mm_asid(next, cpu);
 	} else {
 		cpumask_clear_cpu(cpu, mm_cpumask(prev));
diff --git a/arch/riscv/mm/tlbflush.c b/arch/riscv/mm/tlbflush.c
index 8404530ec00f9..0b1c21c7aafb8 100644
--- a/arch/riscv/mm/tlbflush.c
+++ b/arch/riscv/mm/tlbflush.c
@@ -103,6 +103,15 @@ struct flush_tlb_range_data {
 	unsigned long stride;
 };
 
+#ifdef CONFIG_RISCV_LAZY_TLB_FLUSH
+DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_info, tlbinfo) = {
+	.rwlock = __RW_LOCK_UNLOCKED(tlbinfo.rwlock),
+	.active_mm = NULL,
+	.next_gen = 1,
+	.contexts = { { NULL, 0, }, },
+};
+#endif /* CONFIG_RISCV_LAZY_TLB_FLUSH */
+
 static void __ipi_flush_tlb_range_asid(void *info)
 {
 	struct flush_tlb_range_data *d = info;
@@ -240,3 +249,60 @@ void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch)
 			  0, FLUSH_TLB_MAX_SIZE, PAGE_SIZE);
 	cpumask_clear(&batch->cpumask);
 }
+
+#ifdef CONFIG_RISCV_LAZY_TLB_FLUSH
+
+static inline unsigned int new_tlb_gen(struct tlb_info *info)
+{
+	unsigned int gen = info->next_gen++;
+	unsigned int i;
+
+	if (unlikely(!info->next_gen)) {
+		for (i = 0; i < MAX_LOADED_MM; i++) {
+			if (info->contexts[i].gen)
+				info->contexts[i].gen = 1;
+		}
+		info->next_gen = 1;
+		gen = info->next_gen++;
+	}
+
+	return gen;
+}
+
+void local_load_tlb_mm(struct mm_struct *mm)
+{
+	struct tlb_info *info = this_cpu_ptr(&tlbinfo);
+	struct tlb_context *contexts = info->contexts;
+	struct mm_struct *victim = NULL;
+	unsigned int i, pos = 0, min = UINT_MAX;
+
+	for (i = 0; i < MAX_LOADED_MM; i++) {
+		if (contexts[i].mm == mm) {
+			pos = i;
+			break;
+		}
+		if (min > contexts[i].gen) {
+			min = contexts[i].gen;
+			pos = i;
+		}
+	}
+
+	write_lock(&info->rwlock);
+
+	info->active_mm = mm;
+
+	if (contexts[pos].mm != mm) {
+		victim = contexts[pos].mm;
+		contexts[pos].mm = mm;
+	}
+	contexts[pos].gen = new_tlb_gen(info);
+
+	write_unlock(&info->rwlock);
+
+	if (victim) {
+		cpumask_clear_cpu(raw_smp_processor_id(), mm_cpumask(victim));
+		local_flush_tlb_all_asid(get_mm_asid(victim));
+	}
+}
+
+#endif /* CONFIG_RISCV_LAZY_TLB_FLUSH */
-- 
2.20.1



  parent reply	other threads:[~2025-11-27 14:11 UTC|newest]

Thread overview: 10+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2025-11-27 14:11 [RFC PATCH v2 0/9] riscv: mm: Introduce lazy tlb flush Xu Lu
2025-11-27 14:11 ` [RFC PATCH v2 1/9] riscv: Introduce RISCV_LAZY_TLB_FLUSH config Xu Lu
2025-11-27 14:11 ` Xu Lu [this message]
2025-11-27 14:11 ` [RFC PATCH v2 3/9] riscv: mm: Grab mm_count to avoid mm getting released Xu Lu
2025-11-27 14:11 ` [RFC PATCH v2 4/9] fork: Add arch override for do_shoot_lazy_tlb() Xu Lu
2025-11-27 14:11 ` [RFC PATCH v2 5/9] riscv: mm: Introduce arch_do_shoot_lazy_tlb Xu Lu
2025-11-27 14:11 ` [RFC PATCH v2 6/9] riscv: mm: Introduce percpu TLB Flush queue Xu Lu
2025-11-27 14:11 ` [RFC PATCH v2 7/9] riscv: mm: Defer the TLB Flush to switch_mm Xu Lu
2025-11-27 14:11 ` [RFC PATCH v2 8/9] riscv: mm: Clear mm_cpumask during local_flush_tlb_all_asid() Xu Lu
2025-11-27 14:11 ` [RFC PATCH v2 9/9] riscv: mm: Clear mm_cpumask during local_flush_tlb_all() Xu Lu

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20251127141117.87420-3-luxu.kernel@bytedance.com \
    --to=luxu.kernel@bytedance.com \
    --cc=akpm@linux-foundation.org \
    --cc=alex@ghiti.fr \
    --cc=aou@eecs.berkeley.edu \
    --cc=apatel@ventanamicro.com \
    --cc=david@redhat.com \
    --cc=guoren@kernel.org \
    --cc=juri.lelli@redhat.com \
    --cc=kees@kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=linux-riscv@lists.infradead.org \
    --cc=mingo@redhat.com \
    --cc=palmer@dabbelt.com \
    --cc=peterz@infradead.org \
    --cc=pjw@kernel.org \
    --cc=vincent.guittot@linaro.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox