From: Samuel Holland <samuel@sholland.org>
To: Palmer Dabbelt <palmer@dabbelt.com>,
Alexandre Ghiti <alexghiti@rivosinc.com>,
linux-riscv@lists.infradead.org
Cc: linux-mm@kvack.org, linux-kernel@vger.kernel.org,
Samuel Holland <samuel@sholland.org>
Subject: [PATCH 6/7] riscv: mm: Always flush a single MM context by ASID
Date: Sat, 9 Sep 2023 15:16:34 -0500 [thread overview]
Message-ID: <20230909201727.10909-7-samuel@sholland.org> (raw)
In-Reply-To: <20230909201727.10909-1-samuel@sholland.org>
Even if ASIDs are not supported, using the single-ASID variant of the
sfence.vma instruction preserves TLB entries for global (kernel) pages.
So it is always most efficient to use the single-ASID code path.
Signed-off-by: Samuel Holland <samuel@sholland.org>
---
arch/riscv/include/asm/mmu_context.h | 2 -
arch/riscv/include/asm/tlbflush.h | 11 +++--
arch/riscv/mm/context.c | 3 +-
arch/riscv/mm/tlbflush.c | 68 ++++++----------------------
4 files changed, 24 insertions(+), 60 deletions(-)
diff --git a/arch/riscv/include/asm/mmu_context.h b/arch/riscv/include/asm/mmu_context.h
index 7030837adc1a..b0659413a080 100644
--- a/arch/riscv/include/asm/mmu_context.h
+++ b/arch/riscv/include/asm/mmu_context.h
@@ -33,8 +33,6 @@ static inline int init_new_context(struct task_struct *tsk,
return 0;
}
-DECLARE_STATIC_KEY_FALSE(use_asid_allocator);
-
#include <asm-generic/mmu_context.h>
#endif /* _ASM_RISCV_MMU_CONTEXT_H */
diff --git a/arch/riscv/include/asm/tlbflush.h b/arch/riscv/include/asm/tlbflush.h
index e55831edfc19..ba27cf68b170 100644
--- a/arch/riscv/include/asm/tlbflush.h
+++ b/arch/riscv/include/asm/tlbflush.h
@@ -54,13 +54,18 @@ void flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start,
#define flush_tlb_all() local_flush_tlb_all()
#define flush_tlb_page(vma, addr) local_flush_tlb_page(addr)
+static inline void flush_tlb_mm(struct mm_struct *mm)
+{
+ unsigned long asid = cntx2asid(atomic_long_read(&mm->context.id));
+
+ local_flush_tlb_all_asid(asid);
+}
+
static inline void flush_tlb_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end)
{
- local_flush_tlb_all();
+ flush_tlb_mm(vma->vm_mm);
}
-
-#define flush_tlb_mm(mm) flush_tlb_all()
#endif /* !CONFIG_SMP || !CONFIG_MMU */
/* Flush a range of kernel pages */
diff --git a/arch/riscv/mm/context.c b/arch/riscv/mm/context.c
index 3ca9b653df7d..20057085ab8a 100644
--- a/arch/riscv/mm/context.c
+++ b/arch/riscv/mm/context.c
@@ -18,8 +18,7 @@
#ifdef CONFIG_MMU
-DEFINE_STATIC_KEY_FALSE(use_asid_allocator);
-
+static DEFINE_STATIC_KEY_FALSE(use_asid_allocator);
static unsigned long num_asids;
static atomic_long_t current_version;
diff --git a/arch/riscv/mm/tlbflush.c b/arch/riscv/mm/tlbflush.c
index 54c3e70ccd81..56c2d40681a2 100644
--- a/arch/riscv/mm/tlbflush.c
+++ b/arch/riscv/mm/tlbflush.c
@@ -6,15 +6,6 @@
#include <asm/sbi.h>
#include <asm/mmu_context.h>
-static inline void local_flush_tlb_range(unsigned long start,
- unsigned long size, unsigned long stride)
-{
- if (size <= stride)
- local_flush_tlb_page(start);
- else
- local_flush_tlb_all();
-}
-
static inline void local_flush_tlb_range_asid(unsigned long start,
unsigned long size, unsigned long stride, unsigned long asid)
{
@@ -51,62 +42,33 @@ static void __ipi_flush_tlb_range_asid(void *info)
local_flush_tlb_range_asid(d->start, d->size, d->stride, d->asid);
}
-static void __ipi_flush_tlb_range(void *info)
-{
- struct flush_tlb_range_data *d = info;
-
- local_flush_tlb_range(d->start, d->size, d->stride);
-}
-
static void __flush_tlb_range(struct mm_struct *mm, unsigned long start,
unsigned long size, unsigned long stride)
{
+ unsigned long asid = cntx2asid(atomic_long_read(&mm->context.id));
struct flush_tlb_range_data ftd;
struct cpumask *cmask = mm_cpumask(mm);
unsigned int cpuid;
- bool broadcast;
if (cpumask_empty(cmask))
return;
cpuid = get_cpu();
/* check if the tlbflush needs to be sent to other CPUs */
- broadcast = cpumask_any_but(cmask, cpuid) < nr_cpu_ids;
- if (static_branch_unlikely(&use_asid_allocator)) {
- unsigned long asid = cntx2asid(atomic_long_read(&mm->context.id));
-
- if (broadcast) {
- if (riscv_use_ipi_for_rfence()) {
- ftd.asid = asid;
- ftd.start = start;
- ftd.size = size;
- ftd.stride = stride;
- on_each_cpu_mask(cmask,
- __ipi_flush_tlb_range_asid,
- &ftd, 1);
- } else
- sbi_remote_sfence_vma_asid(cmask,
- start, size, asid);
- } else {
- local_flush_tlb_range_asid(start, size, stride, asid);
- }
- } else {
- if (broadcast) {
- if (riscv_use_ipi_for_rfence()) {
- ftd.asid = 0;
- ftd.start = start;
- ftd.size = size;
- ftd.stride = stride;
- on_each_cpu_mask(cmask,
- __ipi_flush_tlb_range,
- &ftd, 1);
- } else
- sbi_remote_sfence_vma(cmask, start, size);
- } else {
- local_flush_tlb_range(start, size, stride);
- }
- }
-
+ if (cpumask_any_but(cmask, cpuid) < nr_cpu_ids) {
+ if (riscv_use_ipi_for_rfence()) {
+ ftd.asid = asid;
+ ftd.start = start;
+ ftd.size = size;
+ ftd.stride = stride;
+ on_each_cpu_mask(cmask,
+ __ipi_flush_tlb_range_asid,
+ &ftd, 1);
+ } else
+ sbi_remote_sfence_vma_asid(cmask,
+ start, size, asid);
+ } else
+ local_flush_tlb_range_asid(start, size, stride, asid);
put_cpu();
}
--
2.41.0
next prev parent reply other threads:[~2023-09-09 20:17 UTC|newest]
Thread overview: 13+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-09-09 20:16 [PATCH 0/7] riscv: ASID-related and UP-related TLB flush enhancements Samuel Holland
2023-09-09 20:16 ` [PATCH 1/7] riscv: Apply SiFive CIP-1200 workaround to single-ASID sfence.vma Samuel Holland
2023-09-09 20:16 ` [PATCH 2/7] riscv: mm: Introduce cntx2asid/cntx2version helper macros Samuel Holland
2023-09-09 20:16 ` [PATCH 3/7] riscv: mm: Use a fixed layout for the MM context ID Samuel Holland
2023-09-09 20:16 ` [PATCH 4/7] riscv: mm: Make asid_bits a local variable Samuel Holland
2023-09-09 20:16 ` [PATCH 5/7] riscv: mm: Preserve global TLB entries when switching contexts Samuel Holland
2023-09-09 20:16 ` Samuel Holland [this message]
2023-09-10 19:46 ` [PATCH 6/7] riscv: mm: Always flush a single MM context by ASID Conor Dooley
2023-10-26 15:53 ` Palmer Dabbelt
2023-09-09 20:16 ` [PATCH 7/7] riscv: mm: Combine the SMP and non-SMP TLB flushing code Samuel Holland
2023-09-09 23:02 ` kernel test robot
2023-09-11 22:08 ` kernel test robot
2023-09-12 2:03 ` kernel test robot
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20230909201727.10909-7-samuel@sholland.org \
--to=samuel@sholland.org \
--cc=alexghiti@rivosinc.com \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=linux-riscv@lists.infradead.org \
--cc=palmer@dabbelt.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox