From: Rik van Riel <riel@surriel.com>
To: x86@kernel.org
Cc: linux-kernel@vger.kernel.org, kernel-team@meta.com,
dave.hansen@linux.intel.com, luto@kernel.org,
peterz@infradead.org, tglx@linutronix.de, mingo@redhat.com,
bp@alien8.de, hpa@zytor.com, akpm@linux-foundation.org,
nadav.amit@gmail.com, zhengqi.arch@bytedance.com,
linux-mm@kvack.org, Rik van Riel <riel@surriel.com>
Subject: [PATCH 10/12] x86,tlb: do targeted broadcast flushing from tlbbatch code
Date: Mon, 30 Dec 2024 12:53:11 -0500 [thread overview]
Message-ID: <20241230175550.4046587-11-riel@surriel.com> (raw)
In-Reply-To: <20241230175550.4046587-1-riel@surriel.com>
Instead of doing a system-wide TLB flush from arch_tlbbatch_flush,
queue up asynchronous, targeted flushes from arch_tlbbatch_add_pending.
This also allows us to avoid adding the CPUs of processes using broadcast
flushing to the batch->cpumask, and will hopefully further reduce TLB
flushing from the reclaim and compaction paths.
Signed-off-by: Rik van Riel <riel@surriel.com>
---
arch/x86/include/asm/tlbbatch.h | 1 +
arch/x86/include/asm/tlbflush.h | 12 +++------
arch/x86/mm/tlb.c | 48 ++++++++++++++++++++++++++-------
3 files changed, 42 insertions(+), 19 deletions(-)
diff --git a/arch/x86/include/asm/tlbbatch.h b/arch/x86/include/asm/tlbbatch.h
index 1ad56eb3e8a8..f9a17edf63ad 100644
--- a/arch/x86/include/asm/tlbbatch.h
+++ b/arch/x86/include/asm/tlbbatch.h
@@ -10,6 +10,7 @@ struct arch_tlbflush_unmap_batch {
* the PFNs being flushed..
*/
struct cpumask cpumask;
+ bool used_invlpgb;
};
#endif /* _ARCH_X86_TLBBATCH_H */
diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
index 5e9956af98d1..17ec1b169ebd 100644
--- a/arch/x86/include/asm/tlbflush.h
+++ b/arch/x86/include/asm/tlbflush.h
@@ -297,21 +297,15 @@ static inline u64 inc_mm_tlb_gen(struct mm_struct *mm)
return atomic64_inc_return(&mm->context.tlb_gen);
}
-static inline void arch_tlbbatch_add_pending(struct arch_tlbflush_unmap_batch *batch,
- struct mm_struct *mm,
- unsigned long uaddr)
-{
- inc_mm_tlb_gen(mm);
- cpumask_or(&batch->cpumask, &batch->cpumask, mm_cpumask(mm));
- mmu_notifier_arch_invalidate_secondary_tlbs(mm, 0, -1UL);
-}
-
static inline void arch_flush_tlb_batched_pending(struct mm_struct *mm)
{
flush_tlb_mm(mm);
}
extern void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch);
+extern void arch_tlbbatch_add_pending(struct arch_tlbflush_unmap_batch *batch,
+ struct mm_struct *mm,
+ unsigned long uaddr);
static inline bool pte_flags_need_flush(unsigned long oldflags,
unsigned long newflags,
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
index eb83391385ce..454a370494d3 100644
--- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c
@@ -1602,16 +1602,7 @@ EXPORT_SYMBOL_GPL(__flush_tlb_all);
void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch)
{
struct flush_tlb_info *info;
- int cpu;
-
- if (cpu_feature_enabled(X86_FEATURE_INVLPGB)) {
- guard(preempt)();
- invlpgb_flush_all_nonglobals();
- tlbsync();
- return;
- }
-
- cpu = get_cpu();
+ int cpu = get_cpu();
info = get_flush_tlb_info(NULL, 0, TLB_FLUSH_ALL, 0, false,
TLB_GENERATION_INVALID);
@@ -1629,12 +1620,49 @@ void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch)
local_irq_enable();
}
+ /*
+ * If we issued (asynchronous) INVLPGB flushes, wait for them here.
+ * The cpumask above contains only CPUs that were running tasks
+ * not using broadcast TLB flushing.
+ */
+ if (cpu_feature_enabled(X86_FEATURE_INVLPGB) && batch->used_invlpgb) {
+ tlbsync();
+ migrate_enable();
+ batch->used_invlpgb = false;
+ }
+
cpumask_clear(&batch->cpumask);
put_flush_tlb_info();
put_cpu();
}
+void arch_tlbbatch_add_pending(struct arch_tlbflush_unmap_batch *batch,
+ struct mm_struct *mm,
+ unsigned long uaddr)
+{
+ if (static_cpu_has(X86_FEATURE_INVLPGB) && mm_broadcast_asid(mm)) {
+ u16 asid = mm_broadcast_asid(mm);
+ /*
+ * Queue up an asynchronous invalidation. The corresponding
+ * TLBSYNC is done in arch_tlbbatch_flush(), and must be done
+ * on the same CPU.
+ */
+ if (!batch->used_invlpgb) {
+ batch->used_invlpgb = true;
+ migrate_disable();
+ }
+ invlpgb_flush_user_nr(kern_pcid(asid), uaddr, 1, 0);
+ /* Do any CPUs supporting INVLPGB need PTI? */
+ if (static_cpu_has(X86_FEATURE_PTI))
+ invlpgb_flush_user_nr(user_pcid(asid), uaddr, 1, 0);
+ } else {
+ inc_mm_tlb_gen(mm);
+ cpumask_or(&batch->cpumask, &batch->cpumask, mm_cpumask(mm));
+ }
+ mmu_notifier_arch_invalidate_secondary_tlbs(mm, 0, -1UL);
+}
+
/*
* Blindly accessing user memory from NMI context can be dangerous
* if we're in the middle of switching the current user task or
--
2.47.1
next prev parent reply other threads:[~2024-12-30 17:57 UTC|newest]
Thread overview: 89+ messages / expand[flat|nested] mbox.gz Atom feed top
2024-12-30 17:53 [PATCH v3 00/12] AMD broadcast TLB invalidation Rik van Riel
2024-12-30 17:53 ` [PATCH 01/12] x86/mm: make MMU_GATHER_RCU_TABLE_FREE unconditional Rik van Riel
2024-12-30 18:41 ` Borislav Petkov
2024-12-31 16:11 ` Rik van Riel
2024-12-31 16:19 ` Borislav Petkov
2024-12-31 16:30 ` Rik van Riel
2025-01-02 11:52 ` Borislav Petkov
2025-01-02 19:56 ` Peter Zijlstra
2025-01-03 12:18 ` Borislav Petkov
2025-01-04 16:27 ` Peter Zijlstra
2025-01-06 15:54 ` Dave Hansen
2025-01-06 15:47 ` Rik van Riel
2024-12-30 17:53 ` [PATCH 02/12] x86/mm: remove pv_ops.mmu.tlb_remove_table call Rik van Riel
2024-12-31 3:18 ` Qi Zheng
2024-12-30 17:53 ` [PATCH 03/12] x86/mm: add X86_FEATURE_INVLPGB definition Rik van Riel
2025-01-02 12:04 ` Borislav Petkov
2025-01-03 18:27 ` Rik van Riel
2025-01-03 21:07 ` Borislav Petkov
2024-12-30 17:53 ` [PATCH 04/12] x86/mm: get INVLPGB count max from CPUID Rik van Riel
2025-01-02 12:15 ` Borislav Petkov
2025-01-10 18:44 ` Tom Lendacky
2025-01-10 20:27 ` Rik van Riel
2025-01-10 20:31 ` Tom Lendacky
2025-01-10 20:34 ` Borislav Petkov
2024-12-30 17:53 ` [PATCH 05/12] x86/mm: add INVLPGB support code Rik van Riel
2025-01-02 12:42 ` Borislav Petkov
2025-01-06 16:50 ` Dave Hansen
2025-01-06 17:32 ` Rik van Riel
2025-01-06 18:14 ` Borislav Petkov
2025-01-14 19:50 ` Rik van Riel
2025-01-03 12:44 ` Borislav Petkov
2024-12-30 17:53 ` [PATCH 06/12] x86/mm: use INVLPGB for kernel TLB flushes Rik van Riel
2025-01-03 12:39 ` Borislav Petkov
2025-01-06 17:21 ` Dave Hansen
2025-01-09 20:16 ` Rik van Riel
2025-01-09 21:18 ` Dave Hansen
2025-01-10 5:31 ` Rik van Riel
2025-01-10 6:07 ` Nadav Amit
2025-01-10 15:14 ` Dave Hansen
2025-01-10 16:08 ` Rik van Riel
2025-01-10 16:29 ` Dave Hansen
2025-01-10 16:36 ` Rik van Riel
2025-01-10 18:53 ` Tom Lendacky
2025-01-10 20:29 ` Rik van Riel
2024-12-30 17:53 ` [PATCH 07/12] x86/tlb: use INVLPGB in flush_tlb_all Rik van Riel
2025-01-06 17:29 ` Dave Hansen
2025-01-06 17:35 ` Rik van Riel
2025-01-06 17:54 ` Dave Hansen
2024-12-30 17:53 ` [PATCH 08/12] x86/mm: use broadcast TLB flushing for page reclaim TLB flushing Rik van Riel
2024-12-30 17:53 ` [PATCH 09/12] x86/mm: enable broadcast TLB invalidation for multi-threaded processes Rik van Riel
2024-12-30 19:24 ` Nadav Amit
2025-01-01 4:42 ` Rik van Riel
2025-01-01 15:20 ` Nadav Amit
2025-01-01 16:15 ` Karim Manaouil
2025-01-01 16:23 ` Rik van Riel
2025-01-02 0:06 ` Nadav Amit
2025-01-03 17:36 ` Jann Horn
2025-01-04 2:55 ` Rik van Riel
2025-01-06 13:04 ` Jann Horn
2025-01-06 14:26 ` Rik van Riel
2025-01-06 14:52 ` Nadav Amit
2025-01-06 16:03 ` Rik van Riel
2025-01-06 18:40 ` Dave Hansen
2025-01-12 2:36 ` Rik van Riel
2024-12-30 17:53 ` Rik van Riel [this message]
2024-12-30 17:53 ` [PATCH 11/12] x86/mm: enable AMD translation cache extensions Rik van Riel
2024-12-30 18:25 ` Nadav Amit
2024-12-30 18:27 ` Rik van Riel
2025-01-03 17:49 ` Jann Horn
2025-01-04 3:08 ` Rik van Riel
2025-01-06 13:10 ` Jann Horn
2025-01-06 18:29 ` Sean Christopherson
2025-01-10 19:34 ` Tom Lendacky
2025-01-10 19:45 ` Rik van Riel
2025-01-10 19:58 ` Borislav Petkov
2025-01-10 20:43 ` Rik van Riel
2024-12-30 17:53 ` [PATCH 12/12] x86/mm: only invalidate final translations with INVLPGB Rik van Riel
2025-01-03 18:40 ` Jann Horn
2025-01-12 2:39 ` Rik van Riel
2025-01-06 19:03 ` [PATCH v3 00/12] AMD broadcast TLB invalidation Dave Hansen
2025-01-12 2:46 ` Rik van Riel
2025-01-06 22:49 ` Yosry Ahmed
2025-01-07 3:25 ` Rik van Riel
2025-01-08 1:36 ` Yosry Ahmed
2025-01-09 2:25 ` Andrew Cooper
2025-01-09 2:47 ` Andrew Cooper
2025-01-09 21:32 ` Yosry Ahmed
2025-01-09 23:00 ` Andrew Cooper
2025-01-09 23:26 ` Yosry Ahmed
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20241230175550.4046587-11-riel@surriel.com \
--to=riel@surriel.com \
--cc=akpm@linux-foundation.org \
--cc=bp@alien8.de \
--cc=dave.hansen@linux.intel.com \
--cc=hpa@zytor.com \
--cc=kernel-team@meta.com \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=luto@kernel.org \
--cc=mingo@redhat.com \
--cc=nadav.amit@gmail.com \
--cc=peterz@infradead.org \
--cc=tglx@linutronix.de \
--cc=x86@kernel.org \
--cc=zhengqi.arch@bytedance.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox