From: Rik van Riel <riel@surriel.com>
To: x86@kernel.org
Cc: linux-kernel@vger.kernel.org, bp@alien8.de, peterz@infradead.org,
dave.hansen@linux.intel.com, zhengqi.arch@bytedance.com,
nadav.amit@gmail.com, thomas.lendacky@amd.com,
kernel-team@meta.com, linux-mm@kvack.org,
akpm@linux-foundation.org, jackmanb@google.com, jannh@google.com,
mhklinux@outlook.com, andrew.cooper3@citrix.com,
Manali.Shukla@amd.com, mingo@kernel.org,
Rik van Riel <riel@surriel.com>
Subject: [PATCH v14 07/13] x86/mm: add global ASID allocation helper functions
Date: Tue, 25 Feb 2025 22:00:42 -0500 [thread overview]
Message-ID: <20250226030129.530345-8-riel@surriel.com> (raw)
In-Reply-To: <20250226030129.530345-1-riel@surriel.com>
Add functions to manage global ASID space. Multithreaded processes that
are simultaneously active on 4 or more CPUs can get a global ASID,
resulting in the same PCID being used for that process on every CPU.
This in turn will allow the kernel to use hardware-assisted TLB flushing
through AMD INVLPGB or Intel RAR for these processes.
Signed-off-by: Rik van Riel <riel@surriel.com>
Tested-by: Manali Shukla <Manali.Shukla@amd.com>
Tested-by: Brendan Jackman <jackmanb@google.com>
Tested-by: Michael Kelley <mhklinux@outlook.com>
---
arch/x86/include/asm/mmu.h | 11 +++
arch/x86/include/asm/mmu_context.h | 2 +
arch/x86/include/asm/tlbflush.h | 43 +++++++++
arch/x86/mm/tlb.c | 146 ++++++++++++++++++++++++++++-
4 files changed, 199 insertions(+), 3 deletions(-)
diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h
index 3b496cdcb74b..edb5942d4829 100644
--- a/arch/x86/include/asm/mmu.h
+++ b/arch/x86/include/asm/mmu.h
@@ -69,6 +69,17 @@ typedef struct {
u16 pkey_allocation_map;
s16 execute_only_pkey;
#endif
+
+#ifdef CONFIG_X86_BROADCAST_TLB_FLUSH
+ /*
+ * The global ASID will be a non-zero value when the process has
+ * the same ASID across all CPUs, allowing it to make use of
+ * hardware-assisted remote TLB invalidation like AMD INVLPGB.
+ */
+ u16 global_asid;
+ /* The process is transitioning to a new global ASID number. */
+ bool asid_transition;
+#endif
} mm_context_t;
#define INIT_MM_CONTEXT(mm) \
diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
index 795fdd53bd0a..a2c70e495b1b 100644
--- a/arch/x86/include/asm/mmu_context.h
+++ b/arch/x86/include/asm/mmu_context.h
@@ -139,6 +139,8 @@ static inline void mm_reset_untag_mask(struct mm_struct *mm)
#define enter_lazy_tlb enter_lazy_tlb
extern void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk);
+extern void mm_free_global_asid(struct mm_struct *mm);
+
/*
* Init a new mm. Used on mm copies, like at fork()
* and on mm's that are brand-new, like at execve().
diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
index 855c13da2045..8e7df0ed7005 100644
--- a/arch/x86/include/asm/tlbflush.h
+++ b/arch/x86/include/asm/tlbflush.h
@@ -6,6 +6,7 @@
#include <linux/mmu_notifier.h>
#include <linux/sched.h>
+#include <asm/barrier.h>
#include <asm/processor.h>
#include <asm/cpufeature.h>
#include <asm/special_insns.h>
@@ -234,6 +235,48 @@ void flush_tlb_one_kernel(unsigned long addr);
void flush_tlb_multi(const struct cpumask *cpumask,
const struct flush_tlb_info *info);
+static inline bool is_dyn_asid(u16 asid)
+{
+ return asid < TLB_NR_DYN_ASIDS;
+}
+
+#ifdef CONFIG_X86_BROADCAST_TLB_FLUSH
+static inline u16 mm_global_asid(struct mm_struct *mm)
+{
+ u16 asid;
+
+ if (!cpu_feature_enabled(X86_FEATURE_INVLPGB))
+ return 0;
+
+ asid = smp_load_acquire(&mm->context.global_asid);
+
+ /* mm->context.global_asid is either 0, or a global ASID */
+ VM_WARN_ON_ONCE(asid && is_dyn_asid(asid));
+
+ return asid;
+}
+
+static inline void mm_assign_global_asid(struct mm_struct *mm, u16 asid)
+{
+ /*
+ * Notably flush_tlb_mm_range() -> broadcast_tlb_flush() ->
+ * finish_asid_transition() needs to observe asid_transition = true
+ * once it observes global_asid.
+ */
+ mm->context.asid_transition = true;
+ smp_store_release(&mm->context.global_asid, asid);
+}
+#else
+static inline u16 mm_global_asid(struct mm_struct *mm)
+{
+ return 0;
+}
+
+static inline void mm_assign_global_asid(struct mm_struct *mm, u16 asid)
+{
+}
+#endif
+
#ifdef CONFIG_PARAVIRT
#include <asm/paravirt.h>
#endif
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
index 1cc25e83bd34..9b1652c02452 100644
--- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c
@@ -74,13 +74,15 @@
* use different names for each of them:
*
* ASID - [0, TLB_NR_DYN_ASIDS-1]
- * the canonical identifier for an mm
+ * the canonical identifier for an mm, dynamically allocated on each CPU
+ * [TLB_NR_DYN_ASIDS, MAX_ASID_AVAILABLE-1]
+ * the canonical, global identifier for an mm, identical across all CPUs
*
- * kPCID - [1, TLB_NR_DYN_ASIDS]
+ * kPCID - [1, MAX_ASID_AVAILABLE]
* the value we write into the PCID part of CR3; corresponds to the
* ASID+1, because PCID 0 is special.
*
- * uPCID - [2048 + 1, 2048 + TLB_NR_DYN_ASIDS]
+ * uPCID - [2048 + 1, 2048 + MAX_ASID_AVAILABLE]
* for KPTI each mm has two address spaces and thus needs two
* PCID values, but we can still do with a single ASID denomination
* for each mm. Corresponds to kPCID + 2048.
@@ -251,6 +253,144 @@ static void choose_new_asid(struct mm_struct *next, u64 next_tlb_gen,
*need_flush = true;
}
+/*
+ * Global ASIDs are allocated for multi-threaded processes that are
+ * active on multiple CPUs simultaneously, giving each of those
+ * processes the same PCID on every CPU, for use with hardware-assisted
+ * TLB shootdown on remote CPUs, like AMD INVLPGB or Intel RAR.
+ *
+ * These global ASIDs are held for the lifetime of the process.
+ */
+static DEFINE_RAW_SPINLOCK(global_asid_lock);
+static u16 last_global_asid = MAX_ASID_AVAILABLE;
+static DECLARE_BITMAP(global_asid_used, MAX_ASID_AVAILABLE);
+static DECLARE_BITMAP(global_asid_freed, MAX_ASID_AVAILABLE);
+static int global_asid_available = MAX_ASID_AVAILABLE - TLB_NR_DYN_ASIDS - 1;
+
+/*
+ * When the search for a free ASID in the global ASID space reaches
+ * MAX_ASID_AVAILABLE, a global TLB flush guarantees that previously
+ * freed global ASIDs are safe to re-use.
+ *
+ * This way the global flush only needs to happen at ASID rollover
+ * time, and not at ASID allocation time.
+ */
+static void reset_global_asid_space(void)
+{
+ lockdep_assert_held(&global_asid_lock);
+
+ invlpgb_flush_all_nonglobals();
+
+ /*
+ * The TLB flush above makes it safe to re-use the previously
+ * freed global ASIDs.
+ */
+ bitmap_andnot(global_asid_used, global_asid_used,
+ global_asid_freed, MAX_ASID_AVAILABLE);
+ bitmap_clear(global_asid_freed, 0, MAX_ASID_AVAILABLE);
+
+ /* Restart the search from the start of global ASID space. */
+ last_global_asid = TLB_NR_DYN_ASIDS;
+}
+
+static u16 allocate_global_asid(void)
+{
+ u16 asid;
+
+ lockdep_assert_held(&global_asid_lock);
+
+ /* The previous allocation hit the edge of available address space */
+ if (last_global_asid >= MAX_ASID_AVAILABLE - 1)
+ reset_global_asid_space();
+
+ asid = find_next_zero_bit(global_asid_used, MAX_ASID_AVAILABLE, last_global_asid);
+
+ if (asid >= MAX_ASID_AVAILABLE && !global_asid_available) {
+ /* This should never happen. */
+ VM_WARN_ONCE(1, "Unable to allocate global ASID despite %d available\n",
+ global_asid_available);
+ return 0;
+ }
+
+ /* Claim this global ASID. */
+ __set_bit(asid, global_asid_used);
+ last_global_asid = asid;
+ global_asid_available--;
+ return asid;
+}
+
+/*
+ * Check whether a process is currently active on more than @threshold CPUs.
+ * This is a cheap estimation on whether or not it may make sense to assign
+ * a global ASID to this process, and use broadcast TLB invalidation.
+ */
+static bool mm_active_cpus_exceeds(struct mm_struct *mm, int threshold)
+{
+ int count = 0;
+ int cpu;
+
+ /* This quick check should eliminate most single threaded programs. */
+ if (cpumask_weight(mm_cpumask(mm)) <= threshold)
+ return false;
+
+ /* Slower check to make sure. */
+ for_each_cpu(cpu, mm_cpumask(mm)) {
+ /* Skip the CPUs that aren't really running this process. */
+ if (per_cpu(cpu_tlbstate.loaded_mm, cpu) != mm)
+ continue;
+
+ if (per_cpu(cpu_tlbstate_shared.is_lazy, cpu))
+ continue;
+
+ if (++count > threshold)
+ return true;
+ }
+ return false;
+}
+
+/*
+ * Assign a global ASID to the current process, protecting against
+ * races between multiple threads in the process.
+ */
+static void use_global_asid(struct mm_struct *mm)
+{
+ u16 asid;
+
+ guard(raw_spinlock_irqsave)(&global_asid_lock);
+
+ /* This process is already using broadcast TLB invalidation. */
+ if (mm_global_asid(mm))
+ return;
+
+ /* The last global ASID was consumed while waiting for the lock. */
+ if (!global_asid_available) {
+ VM_WARN_ONCE(1, "Ran out of global ASIDs\n");
+ return;
+ }
+
+ asid = allocate_global_asid();
+ if (!asid)
+ return;
+
+ mm_assign_global_asid(mm, asid);
+}
+
+void mm_free_global_asid(struct mm_struct *mm)
+{
+ if (!mm_global_asid(mm))
+ return;
+
+ guard(raw_spinlock_irqsave)(&global_asid_lock);
+
+ /* The global ASID can be re-used only after flush at wrap-around. */
+#ifdef CONFIG_X86_BROADCAST_TLB_FLUSH
+ __set_bit(mm->context.global_asid, global_asid_freed);
+
+ mm->context.global_asid = 0;
+ global_asid_available++;
+#endif
+}
+
/*
* Given an ASID, flush the corresponding user ASID. We can delay this
* until the next time we switch to it.
--
2.47.1
next prev parent reply other threads:[~2025-02-26 3:02 UTC|newest]
Thread overview: 64+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-02-26 3:00 [PATCH v14 00/13] AMD broadcast TLB invalidation Rik van Riel
2025-02-26 3:00 ` [PATCH v14 01/13] x86/mm: consolidate full flush threshold decision Rik van Riel
2025-09-02 15:44 ` [BUG] x86/mm: regression after 4a02ed8e1cc3 Giovanni Cabiddu
2025-09-02 15:50 ` Dave Hansen
2025-09-02 16:08 ` Nadav Amit
2025-09-02 16:11 ` Dave Hansen
2025-09-03 14:00 ` Rik van Riel
2025-09-02 16:05 ` Jann Horn
2025-09-02 16:13 ` Jann Horn
2025-09-03 14:18 ` Nadav Amit
2025-09-03 14:42 ` Jann Horn
2025-09-02 16:31 ` Jann Horn
2025-09-02 16:57 ` Giovanni Cabiddu
2025-02-26 3:00 ` [PATCH v14 02/13] x86/mm: get INVLPGB count max from CPUID Rik van Riel
2025-02-28 16:21 ` Borislav Petkov
2025-02-28 19:27 ` Borislav Petkov
2025-02-26 3:00 ` [PATCH v14 03/13] x86/mm: add INVLPGB support code Rik van Riel
2025-02-28 18:46 ` Borislav Petkov
2025-02-28 18:51 ` Dave Hansen
2025-02-28 19:47 ` Borislav Petkov
2025-03-03 18:41 ` Dave Hansen
2025-03-03 19:23 ` Dave Hansen
2025-03-04 11:00 ` Borislav Petkov
2025-03-04 15:10 ` Dave Hansen
2025-03-04 16:19 ` Borislav Petkov
2025-03-04 16:57 ` Dave Hansen
2025-03-04 21:12 ` Borislav Petkov
2025-02-26 3:00 ` [PATCH v14 04/13] x86/mm: use INVLPGB for kernel TLB flushes Rik van Riel
2025-02-28 19:00 ` Dave Hansen
2025-02-28 21:43 ` Borislav Petkov
2025-02-26 3:00 ` [PATCH v14 05/13] x86/mm: use INVLPGB in flush_tlb_all Rik van Riel
2025-02-28 19:18 ` Dave Hansen
2025-03-01 12:20 ` Borislav Petkov
2025-03-01 15:54 ` Rik van Riel
2025-02-28 22:20 ` Borislav Petkov
2025-02-26 3:00 ` [PATCH v14 06/13] x86/mm: use broadcast TLB flushing for page reclaim TLB flushing Rik van Riel
2025-02-28 18:57 ` Borislav Petkov
2025-02-26 3:00 ` Rik van Riel [this message]
2025-03-02 7:06 ` [PATCH v14 07/13] x86/mm: add global ASID allocation helper functions Borislav Petkov
2025-02-26 3:00 ` [PATCH v14 08/13] x86/mm: global ASID context switch & TLB flush handling Rik van Riel
2025-03-02 7:58 ` Borislav Petkov
2025-02-26 3:00 ` [PATCH v14 09/13] x86/mm: global ASID process exit helpers Rik van Riel
2025-03-02 12:38 ` Borislav Petkov
2025-03-02 13:53 ` Rik van Riel
2025-03-03 10:16 ` Borislav Petkov
2025-02-26 3:00 ` [PATCH v14 10/13] x86/mm: enable broadcast TLB invalidation for multi-threaded processes Rik van Riel
2025-03-03 10:57 ` Borislav Petkov
2025-02-26 3:00 ` [PATCH v14 11/13] x86/mm: do targeted broadcast flushing from tlbbatch code Rik van Riel
2025-03-03 11:46 ` Borislav Petkov
2025-03-03 21:47 ` Dave Hansen
2025-03-04 11:52 ` Borislav Petkov
2025-03-04 15:24 ` Dave Hansen
2025-03-04 12:52 ` Brendan Jackman
2025-03-04 14:11 ` Borislav Petkov
2025-03-04 15:33 ` Brendan Jackman
2025-03-04 17:51 ` Dave Hansen
2025-02-26 3:00 ` [PATCH v14 12/13] x86/mm: enable AMD translation cache extensions Rik van Riel
2025-02-26 3:00 ` [PATCH v14 13/13] x86/mm: only invalidate final translations with INVLPGB Rik van Riel
2025-03-03 22:40 ` Dave Hansen
2025-03-04 11:53 ` Borislav Petkov
2025-03-03 12:42 ` [PATCH v14 00/13] AMD broadcast TLB invalidation Borislav Petkov
2025-03-03 13:29 ` Borislav Petkov
2025-03-04 12:04 ` [PATCH] x86/mm: Always set the ASID valid bit for the INVLPGB instruction Borislav Petkov
2025-03-04 12:43 ` Borislav Petkov
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20250226030129.530345-8-riel@surriel.com \
--to=riel@surriel.com \
--cc=Manali.Shukla@amd.com \
--cc=akpm@linux-foundation.org \
--cc=andrew.cooper3@citrix.com \
--cc=bp@alien8.de \
--cc=dave.hansen@linux.intel.com \
--cc=jackmanb@google.com \
--cc=jannh@google.com \
--cc=kernel-team@meta.com \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=mhklinux@outlook.com \
--cc=mingo@kernel.org \
--cc=nadav.amit@gmail.com \
--cc=peterz@infradead.org \
--cc=thomas.lendacky@amd.com \
--cc=x86@kernel.org \
--cc=zhengqi.arch@bytedance.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox