linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
From: Rik van Riel <riel@surriel.com>
To: x86@kernel.org
Cc: linux-kernel@vger.kernel.org, kernel-team@meta.com,
	dave.hansen@linux.intel.com, luto@kernel.org,
	peterz@infradead.org, tglx@linutronix.de, mingo@redhat.com,
	bp@alien8.de, hpa@zytor.com, akpm@linux-foundation.org,
	nadav.amit@gmail.com, zhengqi.arch@bytedance.com,
	linux-mm@kvack.org, Rik van Riel <riel@surriel.com>
Subject: [PATCH 09/12] x86/mm: enable broadcast TLB invalidation for multi-threaded processes
Date: Mon, 30 Dec 2024 12:53:10 -0500	[thread overview]
Message-ID: <20241230175550.4046587-10-riel@surriel.com> (raw)
In-Reply-To: <20241230175550.4046587-1-riel@surriel.com>

Use broadcast TLB invalidation, using the INVPLGB instruction, on AMD EPYC 3
and newer CPUs.

In order to not exhaust PCID space, and keep TLB flushes local for single
threaded processes, we only hand out broadcast ASIDs to processes active on
3 or more CPUs, and gradually increase the threshold as broadcast ASID space
is depleted.

Signed-off-by: Rik van Riel <riel@surriel.com>
---
 arch/x86/include/asm/mmu.h         |   6 +
 arch/x86/include/asm/mmu_context.h |  12 ++
 arch/x86/include/asm/tlbflush.h    |  17 ++
 arch/x86/mm/tlb.c                  | 310 ++++++++++++++++++++++++++++-
 4 files changed, 336 insertions(+), 9 deletions(-)

diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h
index 3b496cdcb74b..a8e8dfa5a520 100644
--- a/arch/x86/include/asm/mmu.h
+++ b/arch/x86/include/asm/mmu.h
@@ -48,6 +48,12 @@ typedef struct {
 	unsigned long flags;
 #endif
 
+#ifdef CONFIG_CPU_SUP_AMD
+	struct list_head broadcast_asid_list;
+	u16 broadcast_asid;
+	bool asid_transition;
+#endif
+
 #ifdef CONFIG_ADDRESS_MASKING
 	/* Active LAM mode:  X86_CR3_LAM_U48 or X86_CR3_LAM_U57 or 0 (disabled) */
 	unsigned long lam_cr3_mask;
diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
index 795fdd53bd0a..0dc446c427d2 100644
--- a/arch/x86/include/asm/mmu_context.h
+++ b/arch/x86/include/asm/mmu_context.h
@@ -139,6 +139,8 @@ static inline void mm_reset_untag_mask(struct mm_struct *mm)
 #define enter_lazy_tlb enter_lazy_tlb
 extern void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk);
 
+extern void destroy_context_free_broadcast_asid(struct mm_struct *mm);
+
 /*
  * Init a new mm.  Used on mm copies, like at fork()
  * and on mm's that are brand-new, like at execve().
@@ -161,6 +163,13 @@ static inline int init_new_context(struct task_struct *tsk,
 		mm->context.execute_only_pkey = -1;
 	}
 #endif
+
+#ifdef CONFIG_CPU_SUP_AMD
+	INIT_LIST_HEAD(&mm->context.broadcast_asid_list);
+	mm->context.broadcast_asid = 0;
+	mm->context.asid_transition = false;
+#endif
+
 	mm_reset_untag_mask(mm);
 	init_new_context_ldt(mm);
 	return 0;
@@ -170,6 +179,9 @@ static inline int init_new_context(struct task_struct *tsk,
 static inline void destroy_context(struct mm_struct *mm)
 {
 	destroy_context_ldt(mm);
+#ifdef CONFIG_CPU_SUP_AMD
+	destroy_context_free_broadcast_asid(mm);
+#endif
 }
 
 extern void switch_mm(struct mm_struct *prev, struct mm_struct *next,
diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
index 20074f17fbcd..5e9956af98d1 100644
--- a/arch/x86/include/asm/tlbflush.h
+++ b/arch/x86/include/asm/tlbflush.h
@@ -65,6 +65,23 @@ static inline void cr4_clear_bits(unsigned long mask)
  */
 #define TLB_NR_DYN_ASIDS	6
 
+#ifdef CONFIG_CPU_SUP_AMD
+#define is_dyn_asid(asid) (asid) < TLB_NR_DYN_ASIDS
+#define is_broadcast_asid(asid) (asid) >= TLB_NR_DYN_ASIDS
+#define in_asid_transition(info) (info->mm && info->mm->context.asid_transition)
+#define mm_broadcast_asid(mm) (mm->context.broadcast_asid)
+#else
+#define is_dyn_asid(asid) true
+#define is_broadcast_asid(asid) false
+#define in_asid_transition(info) false
+#define mm_broadcast_asid(mm) 0
+
+inline bool needs_broadcast_asid_reload(struct mm_struct *next, u16 prev_asid)
+{
+	return false;
+}
+#endif
+
 struct tlb_context {
 	u64 ctx_id;
 	u64 tlb_gen;
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
index 64f1679c37e1..eb83391385ce 100644
--- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c
@@ -74,13 +74,15 @@
  * use different names for each of them:
  *
  * ASID  - [0, TLB_NR_DYN_ASIDS-1]
- *         the canonical identifier for an mm
+ *         the canonical identifier for an mm, dynamically allocated on each CPU
+ *         [TLB_NR_DYN_ASIDS, MAX_ASID_AVAILABLE-1]
+ *         the canonical, global identifier for an mm, identical across all CPUs
  *
- * kPCID - [1, TLB_NR_DYN_ASIDS]
+ * kPCID - [1, MAX_ASID_AVAILABLE]
  *         the value we write into the PCID part of CR3; corresponds to the
  *         ASID+1, because PCID 0 is special.
  *
- * uPCID - [2048 + 1, 2048 + TLB_NR_DYN_ASIDS]
+ * uPCID - [2048 + 1, 2048 + MAX_ASID_AVAILABLE]
  *         for KPTI each mm has two address spaces and thus needs two
  *         PCID values, but we can still do with a single ASID denomination
  *         for each mm. Corresponds to kPCID + 2048.
@@ -225,6 +227,18 @@ static void choose_new_asid(struct mm_struct *next, u64 next_tlb_gen,
 		return;
 	}
 
+	/*
+	 * TLB consistency for this ASID is maintained with INVLPGB;
+	 * TLB flushes happen even while the process isn't running.
+	 */
+#ifdef CONFIG_CPU_SUP_AMD
+	if (static_cpu_has(X86_FEATURE_INVLPGB) && mm_broadcast_asid(next)) {
+		*new_asid = mm_broadcast_asid(next);
+		*need_flush = false;
+		return;
+	}
+#endif
+
 	if (this_cpu_read(cpu_tlbstate.invalidate_other))
 		clear_asid_other();
 
@@ -251,6 +265,245 @@ static void choose_new_asid(struct mm_struct *next, u64 next_tlb_gen,
 	*need_flush = true;
 }
 
+#ifdef CONFIG_CPU_SUP_AMD
+/*
+ * Logic for AMD INVLPGB support.
+ */
+static DEFINE_RAW_SPINLOCK(broadcast_asid_lock);
+static u16 last_broadcast_asid = TLB_NR_DYN_ASIDS;
+static DECLARE_BITMAP(broadcast_asid_used, MAX_ASID_AVAILABLE) = { 0 };
+static LIST_HEAD(broadcast_asid_list);
+static int broadcast_asid_available = MAX_ASID_AVAILABLE - TLB_NR_DYN_ASIDS - 1;
+
+static void reset_broadcast_asid_space(void)
+{
+	mm_context_t *context;
+
+	lockdep_assert_held(&broadcast_asid_lock);
+
+	/*
+	 * Flush once when we wrap around the ASID space, so we won't need
+	 * to flush every time we allocate an ASID for boradcast flushing.
+	 */
+	invlpgb_flush_all_nonglobals();
+	tlbsync();
+
+	/*
+	 * Leave the currently used broadcast ASIDs set in the bitmap, since
+	 * those cannot be reused before the next wraparound and flush..
+	 */
+	bitmap_clear(broadcast_asid_used, 0, MAX_ASID_AVAILABLE);
+	list_for_each_entry(context, &broadcast_asid_list, broadcast_asid_list)
+		__set_bit(context->broadcast_asid, broadcast_asid_used);
+
+	last_broadcast_asid = TLB_NR_DYN_ASIDS;
+}
+
+static u16 get_broadcast_asid(void)
+{
+	lockdep_assert_held(&broadcast_asid_lock);
+
+	do {
+		u16 start = last_broadcast_asid;
+		u16 asid = find_next_zero_bit(broadcast_asid_used, MAX_ASID_AVAILABLE, start);
+
+		if (asid >= MAX_ASID_AVAILABLE) {
+			reset_broadcast_asid_space();
+			continue;
+		}
+
+		/* Try claiming this broadcast ASID. */
+		if (!test_and_set_bit(asid, broadcast_asid_used)) {
+			last_broadcast_asid = asid;
+			return asid;
+		}
+	} while (1);
+}
+
+/*
+ * Returns true if the mm is transitioning from a CPU-local ASID to a broadcast
+ * (INVLPGB) ASID, or the other way around.
+ */
+static bool needs_broadcast_asid_reload(struct mm_struct *next, u16 prev_asid)
+{
+	u16 broadcast_asid = mm_broadcast_asid(next);
+
+	if (broadcast_asid && prev_asid != broadcast_asid)
+		return true;
+
+	if (!broadcast_asid && is_broadcast_asid(prev_asid))
+		return true;
+
+	return false;
+}
+
+void destroy_context_free_broadcast_asid(struct mm_struct *mm)
+{
+	if (!mm->context.broadcast_asid)
+		return;
+
+	guard(raw_spinlock_irqsave)(&broadcast_asid_lock);
+	mm->context.broadcast_asid = 0;
+	list_del(&mm->context.broadcast_asid_list);
+	broadcast_asid_available++;
+}
+
+static bool mm_active_cpus_exceeds(struct mm_struct *mm, int threshold)
+{
+	int count = 0;
+	int cpu;
+
+	if (cpumask_weight(mm_cpumask(mm)) <= threshold)
+		return false;
+
+	for_each_cpu(cpu, mm_cpumask(mm)) {
+		/* Skip the CPUs that aren't really running this process. */
+		if (per_cpu(cpu_tlbstate.loaded_mm, cpu) != mm)
+			continue;
+
+		if (per_cpu(cpu_tlbstate_shared.is_lazy, cpu))
+			continue;
+
+		if (++count > threshold)
+			return true;
+	}
+	return false;
+}
+
+/*
+ * Assign a broadcast ASID to the current process, protecting against
+ * races between multiple threads in the process.
+ */
+static void use_broadcast_asid(struct mm_struct *mm)
+{
+	guard(raw_spinlock_irqsave)(&broadcast_asid_lock);
+
+	/* This process is already using broadcast TLB invalidation. */
+	if (mm->context.broadcast_asid)
+		return;
+
+	mm->context.broadcast_asid = get_broadcast_asid();
+	mm->context.asid_transition = true;
+	list_add(&mm->context.broadcast_asid_list, &broadcast_asid_list);
+	broadcast_asid_available--;
+}
+
+/*
+ * Figure out whether to assign a broadcast (global) ASID to a process.
+ * We vary the threshold by how empty or full broadcast ASID space is.
+ * 1/4 full: >= 4 active threads
+ * 1/2 full: >= 8 active threads
+ * 3/4 full: >= 16 active threads
+ * 7/8 full: >= 32 active threads
+ * etc
+ *
+ * This way we should never exhaust the broadcast ASID space, even on very
+ * large systems, and the processes with the largest number of active
+ * threads should be able to use broadcast TLB invalidation.
+ */
+#define HALFFULL_THRESHOLD 8
+static bool meets_broadcast_asid_threshold(struct mm_struct *mm)
+{
+	int avail = broadcast_asid_available;
+	int threshold = HALFFULL_THRESHOLD;
+
+	if (!avail)
+		return false;
+
+	if (avail > MAX_ASID_AVAILABLE * 3 / 4) {
+		threshold = HALFFULL_THRESHOLD / 4;
+	} else if (avail > MAX_ASID_AVAILABLE / 2) {
+		threshold = HALFFULL_THRESHOLD / 2;
+	} else if (avail < MAX_ASID_AVAILABLE / 3) {
+		do {
+			avail *= 2;
+			threshold *= 2;
+		} while ((avail + threshold) < MAX_ASID_AVAILABLE / 2);
+	}
+
+	return mm_active_cpus_exceeds(mm, threshold);
+}
+
+static void count_tlb_flush(struct mm_struct *mm)
+{
+	if (!static_cpu_has(X86_FEATURE_INVLPGB))
+		return;
+
+	/* Check every once in a while. */
+	if ((current->pid & 0x1f) != (jiffies & 0x1f))
+		return;
+
+	if (meets_broadcast_asid_threshold(mm))
+		use_broadcast_asid(mm);
+}
+
+static void finish_asid_transition(struct flush_tlb_info *info)
+{
+	struct mm_struct *mm = info->mm;
+	int bc_asid = mm_broadcast_asid(mm);
+	int cpu;
+
+	if (!mm->context.asid_transition)
+		return;
+
+	for_each_cpu(cpu, mm_cpumask(mm)) {
+		if (READ_ONCE(per_cpu(cpu_tlbstate.loaded_mm, cpu)) != mm)
+			continue;
+
+		/*
+		 * If at least one CPU is not using the broadcast ASID yet,
+		 * send a TLB flush IPI. The IPI should cause stragglers
+		 * to transition soon.
+		 */
+		if (per_cpu(cpu_tlbstate.loaded_mm_asid, cpu) != bc_asid) {
+			flush_tlb_multi(mm_cpumask(info->mm), info);
+			return;
+		}
+	}
+
+	/* All the CPUs running this process are using the broadcast ASID. */
+	mm->context.asid_transition = 0;
+}
+
+static void broadcast_tlb_flush(struct flush_tlb_info *info)
+{
+	bool pmd = info->stride_shift == PMD_SHIFT;
+	unsigned long maxnr = invlpgb_count_max;
+	unsigned long asid = info->mm->context.broadcast_asid;
+	unsigned long addr = info->start;
+	unsigned long nr;
+
+	/* Flushing multiple pages at once is not supported with 1GB pages. */
+	if (info->stride_shift > PMD_SHIFT)
+		maxnr = 1;
+
+	if (info->end == TLB_FLUSH_ALL) {
+		invlpgb_flush_single_pcid(kern_pcid(asid));
+		/* Do any CPUs supporting INVLPGB need PTI? */
+		if (static_cpu_has(X86_FEATURE_PTI))
+			invlpgb_flush_single_pcid(user_pcid(asid));
+	} else do {
+		/*
+		 * Calculate how many pages can be flushed at once; if the
+		 * remainder of the range is less than one page, flush one.
+		 */
+		nr = min(maxnr, (info->end - addr) >> info->stride_shift);
+		nr = max(nr, 1);
+
+		invlpgb_flush_user_nr(kern_pcid(asid), addr, nr, pmd);
+		/* Do any CPUs supporting INVLPGB need PTI? */
+		if (static_cpu_has(X86_FEATURE_PTI))
+			invlpgb_flush_user_nr(user_pcid(asid), addr, nr, pmd);
+		addr += nr << info->stride_shift;
+	} while (addr < info->end);
+
+	finish_asid_transition(info);
+
+	/* Wait for the INVLPGBs kicked off above to finish. */
+	tlbsync();
+}
+#endif /* CONFIG_CPU_SUP_AMD */
+
 /*
  * Given an ASID, flush the corresponding user ASID.  We can delay this
  * until the next time we switch to it.
@@ -556,8 +809,9 @@ void switch_mm_irqs_off(struct mm_struct *unused, struct mm_struct *next,
 	 */
 	if (prev == next) {
 		/* Not actually switching mm's */
-		VM_WARN_ON(this_cpu_read(cpu_tlbstate.ctxs[prev_asid].ctx_id) !=
-			   next->context.ctx_id);
+		if (is_dyn_asid(prev_asid))
+			VM_WARN_ON(this_cpu_read(cpu_tlbstate.ctxs[prev_asid].ctx_id) !=
+				   next->context.ctx_id);
 
 		/*
 		 * If this races with another thread that enables lam, 'new_lam'
@@ -573,6 +827,23 @@ void switch_mm_irqs_off(struct mm_struct *unused, struct mm_struct *next,
 				 !cpumask_test_cpu(cpu, mm_cpumask(next))))
 			cpumask_set_cpu(cpu, mm_cpumask(next));
 
+		/*
+		 * Check if the current mm is transitioning to a new ASID.
+		 */
+		if (needs_broadcast_asid_reload(next, prev_asid)) {
+			next_tlb_gen = atomic64_read(&next->context.tlb_gen);
+
+			choose_new_asid(next, next_tlb_gen, &new_asid, &need_flush);
+			goto reload_tlb;
+		}
+
+		/*
+		 * Broadcast TLB invalidation keeps this PCID up to date
+		 * all the time.
+		 */
+		if (is_broadcast_asid(prev_asid))
+			return;
+
 		/*
 		 * If the CPU is not in lazy TLB mode, we are just switching
 		 * from one thread in a process to another thread in the same
@@ -626,8 +897,10 @@ void switch_mm_irqs_off(struct mm_struct *unused, struct mm_struct *next,
 		barrier();
 	}
 
+reload_tlb:
 	new_lam = mm_lam_cr3_mask(next);
 	if (need_flush) {
+		VM_BUG_ON(is_broadcast_asid(new_asid));
 		this_cpu_write(cpu_tlbstate.ctxs[new_asid].ctx_id, next->context.ctx_id);
 		this_cpu_write(cpu_tlbstate.ctxs[new_asid].tlb_gen, next_tlb_gen);
 		load_new_mm_cr3(next->pgd, new_asid, new_lam, true);
@@ -746,7 +1019,7 @@ static void flush_tlb_func(void *info)
 	const struct flush_tlb_info *f = info;
 	struct mm_struct *loaded_mm = this_cpu_read(cpu_tlbstate.loaded_mm);
 	u32 loaded_mm_asid = this_cpu_read(cpu_tlbstate.loaded_mm_asid);
-	u64 local_tlb_gen = this_cpu_read(cpu_tlbstate.ctxs[loaded_mm_asid].tlb_gen);
+	u64 local_tlb_gen;
 	bool local = smp_processor_id() == f->initiating_cpu;
 	unsigned long nr_invalidate = 0;
 	u64 mm_tlb_gen;
@@ -769,6 +1042,16 @@ static void flush_tlb_func(void *info)
 	if (unlikely(loaded_mm == &init_mm))
 		return;
 
+	/* Reload the ASID if transitioning into or out of a broadcast ASID */
+	if (needs_broadcast_asid_reload(loaded_mm, loaded_mm_asid)) {
+		switch_mm_irqs_off(NULL, loaded_mm, NULL);
+		loaded_mm_asid = this_cpu_read(cpu_tlbstate.loaded_mm_asid);
+	}
+
+	/* Broadcast ASIDs are always kept up to date with INVLPGB. */
+	if (is_broadcast_asid(loaded_mm_asid))
+		return;
+
 	VM_WARN_ON(this_cpu_read(cpu_tlbstate.ctxs[loaded_mm_asid].ctx_id) !=
 		   loaded_mm->context.ctx_id);
 
@@ -786,6 +1069,8 @@ static void flush_tlb_func(void *info)
 		return;
 	}
 
+	local_tlb_gen = this_cpu_read(cpu_tlbstate.ctxs[loaded_mm_asid].tlb_gen);
+
 	if (unlikely(f->new_tlb_gen != TLB_GENERATION_INVALID &&
 		     f->new_tlb_gen <= local_tlb_gen)) {
 		/*
@@ -953,7 +1238,7 @@ STATIC_NOPV void native_flush_tlb_multi(const struct cpumask *cpumask,
 	 * up on the new contents of what used to be page tables, while
 	 * doing a speculative memory access.
 	 */
-	if (info->freed_tables)
+	if (info->freed_tables || in_asid_transition(info))
 		on_each_cpu_mask(cpumask, flush_tlb_func, (void *)info, true);
 	else
 		on_each_cpu_cond_mask(should_flush_tlb, flush_tlb_func,
@@ -1026,14 +1311,18 @@ void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
 				bool freed_tables)
 {
 	struct flush_tlb_info *info;
+	unsigned long threshold = tlb_single_page_flush_ceiling;
 	u64 new_tlb_gen;
 	int cpu;
 
+	if (static_cpu_has(X86_FEATURE_INVLPGB))
+		threshold *= invlpgb_count_max;
+
 	cpu = get_cpu();
 
 	/* Should we flush just the requested range? */
 	if ((end == TLB_FLUSH_ALL) ||
-	    ((end - start) >> stride_shift) > tlb_single_page_flush_ceiling) {
+	    ((end - start) >> stride_shift) > threshold) {
 		start = 0;
 		end = TLB_FLUSH_ALL;
 	}
@@ -1049,9 +1338,12 @@ void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
 	 * a local TLB flush is needed. Optimize this use-case by calling
 	 * flush_tlb_func_local() directly in this case.
 	 */
-	if (cpumask_any_but(mm_cpumask(mm), cpu) < nr_cpu_ids) {
+	if (IS_ENABLED(CONFIG_CPU_SUP_AMD) && mm_broadcast_asid(mm)) {
+		broadcast_tlb_flush(info);
+	} else if (cpumask_any_but(mm_cpumask(mm), cpu) < nr_cpu_ids) {
 		info->trim_cpumask = should_trim_cpumask(mm);
 		flush_tlb_multi(mm_cpumask(mm), info);
+		count_tlb_flush(mm);
 	} else if (mm == this_cpu_read(cpu_tlbstate.loaded_mm)) {
 		lockdep_assert_irqs_enabled();
 		local_irq_disable();
-- 
2.47.1



  parent reply	other threads:[~2024-12-30 17:57 UTC|newest]

Thread overview: 89+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2024-12-30 17:53 [PATCH v3 00/12] AMD broadcast TLB invalidation Rik van Riel
2024-12-30 17:53 ` [PATCH 01/12] x86/mm: make MMU_GATHER_RCU_TABLE_FREE unconditional Rik van Riel
2024-12-30 18:41   ` Borislav Petkov
2024-12-31 16:11     ` Rik van Riel
2024-12-31 16:19       ` Borislav Petkov
2024-12-31 16:30         ` Rik van Riel
2025-01-02 11:52           ` Borislav Petkov
2025-01-02 19:56       ` Peter Zijlstra
2025-01-03 12:18         ` Borislav Petkov
2025-01-04 16:27           ` Peter Zijlstra
2025-01-06 15:54             ` Dave Hansen
2025-01-06 15:47           ` Rik van Riel
2024-12-30 17:53 ` [PATCH 02/12] x86/mm: remove pv_ops.mmu.tlb_remove_table call Rik van Riel
2024-12-31  3:18   ` Qi Zheng
2024-12-30 17:53 ` [PATCH 03/12] x86/mm: add X86_FEATURE_INVLPGB definition Rik van Riel
2025-01-02 12:04   ` Borislav Petkov
2025-01-03 18:27     ` Rik van Riel
2025-01-03 21:07       ` Borislav Petkov
2024-12-30 17:53 ` [PATCH 04/12] x86/mm: get INVLPGB count max from CPUID Rik van Riel
2025-01-02 12:15   ` Borislav Petkov
2025-01-10 18:44   ` Tom Lendacky
2025-01-10 20:27     ` Rik van Riel
2025-01-10 20:31       ` Tom Lendacky
2025-01-10 20:34       ` Borislav Petkov
2024-12-30 17:53 ` [PATCH 05/12] x86/mm: add INVLPGB support code Rik van Riel
2025-01-02 12:42   ` Borislav Petkov
2025-01-06 16:50     ` Dave Hansen
2025-01-06 17:32       ` Rik van Riel
2025-01-06 18:14       ` Borislav Petkov
2025-01-14 19:50     ` Rik van Riel
2025-01-03 12:44   ` Borislav Petkov
2024-12-30 17:53 ` [PATCH 06/12] x86/mm: use INVLPGB for kernel TLB flushes Rik van Riel
2025-01-03 12:39   ` Borislav Petkov
2025-01-06 17:21   ` Dave Hansen
2025-01-09 20:16     ` Rik van Riel
2025-01-09 21:18       ` Dave Hansen
2025-01-10  5:31         ` Rik van Riel
2025-01-10  6:07         ` Nadav Amit
2025-01-10 15:14           ` Dave Hansen
2025-01-10 16:08             ` Rik van Riel
2025-01-10 16:29               ` Dave Hansen
2025-01-10 16:36                 ` Rik van Riel
2025-01-10 18:53   ` Tom Lendacky
2025-01-10 20:29     ` Rik van Riel
2024-12-30 17:53 ` [PATCH 07/12] x86/tlb: use INVLPGB in flush_tlb_all Rik van Riel
2025-01-06 17:29   ` Dave Hansen
2025-01-06 17:35     ` Rik van Riel
2025-01-06 17:54       ` Dave Hansen
2024-12-30 17:53 ` [PATCH 08/12] x86/mm: use broadcast TLB flushing for page reclaim TLB flushing Rik van Riel
2024-12-30 17:53 ` Rik van Riel [this message]
2024-12-30 19:24   ` [PATCH 09/12] x86/mm: enable broadcast TLB invalidation for multi-threaded processes Nadav Amit
2025-01-01  4:42     ` Rik van Riel
2025-01-01 15:20       ` Nadav Amit
2025-01-01 16:15         ` Karim Manaouil
2025-01-01 16:23           ` Rik van Riel
2025-01-02  0:06             ` Nadav Amit
2025-01-03 17:36   ` Jann Horn
2025-01-04  2:55     ` Rik van Riel
2025-01-06 13:04       ` Jann Horn
2025-01-06 14:26         ` Rik van Riel
2025-01-06 14:52   ` Nadav Amit
2025-01-06 16:03     ` Rik van Riel
2025-01-06 18:40   ` Dave Hansen
2025-01-12  2:36     ` Rik van Riel
2024-12-30 17:53 ` [PATCH 10/12] x86,tlb: do targeted broadcast flushing from tlbbatch code Rik van Riel
2024-12-30 17:53 ` [PATCH 11/12] x86/mm: enable AMD translation cache extensions Rik van Riel
2024-12-30 18:25   ` Nadav Amit
2024-12-30 18:27     ` Rik van Riel
2025-01-03 17:49   ` Jann Horn
2025-01-04  3:08     ` Rik van Riel
2025-01-06 13:10       ` Jann Horn
2025-01-06 18:29         ` Sean Christopherson
2025-01-10 19:34   ` Tom Lendacky
2025-01-10 19:45     ` Rik van Riel
2025-01-10 19:58       ` Borislav Petkov
2025-01-10 20:43         ` Rik van Riel
2024-12-30 17:53 ` [PATCH 12/12] x86/mm: only invalidate final translations with INVLPGB Rik van Riel
2025-01-03 18:40   ` Jann Horn
2025-01-12  2:39     ` Rik van Riel
2025-01-06 19:03 ` [PATCH v3 00/12] AMD broadcast TLB invalidation Dave Hansen
2025-01-12  2:46   ` Rik van Riel
2025-01-06 22:49 ` Yosry Ahmed
2025-01-07  3:25   ` Rik van Riel
2025-01-08  1:36     ` Yosry Ahmed
2025-01-09  2:25       ` Andrew Cooper
2025-01-09  2:47       ` Andrew Cooper
2025-01-09 21:32         ` Yosry Ahmed
2025-01-09 23:00           ` Andrew Cooper
2025-01-09 23:26             ` Yosry Ahmed

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20241230175550.4046587-10-riel@surriel.com \
    --to=riel@surriel.com \
    --cc=akpm@linux-foundation.org \
    --cc=bp@alien8.de \
    --cc=dave.hansen@linux.intel.com \
    --cc=hpa@zytor.com \
    --cc=kernel-team@meta.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=luto@kernel.org \
    --cc=mingo@redhat.com \
    --cc=nadav.amit@gmail.com \
    --cc=peterz@infradead.org \
    --cc=tglx@linutronix.de \
    --cc=x86@kernel.org \
    --cc=zhengqi.arch@bytedance.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox