linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
From: Dave Hansen <dave.hansen@intel.com>
To: Borislav Petkov <bp@alien8.de>, Rik van Riel <riel@surriel.com>
Cc: x86@kernel.org, linux-kernel@vger.kernel.org,
	peterz@infradead.org, dave.hansen@linux.intel.com,
	zhengqi.arch@bytedance.com, nadav.amit@gmail.com,
	thomas.lendacky@amd.com, kernel-team@meta.com,
	linux-mm@kvack.org, akpm@linux-foundation.org,
	jackmanb@google.com, jannh@google.com, mhklinux@outlook.com,
	andrew.cooper3@citrix.com, Manali.Shukla@amd.com,
	mingo@kernel.org
Subject: Re: [PATCH v14 03/13] x86/mm: add INVLPGB support code
Date: Mon, 3 Mar 2025 10:41:44 -0800	[thread overview]
Message-ID: <30c721e0-338d-4172-989c-5226d584bcbc@intel.com> (raw)
In-Reply-To: <20250228194734.GGZ8IS1iFVpPzmEyYl@fat_crate.local>

[-- Attachment #1: Type: text/plain, Size: 870 bytes --]

On 2/28/25 11:47, Borislav Petkov wrote:
> @@ -157,11 +140,14 @@ index 77f52bc1578a..91c9a4da3ace 100644
>  +/* Flush all mappings for all PCIDs except globals. */
>  +static inline void invlpgb_flush_all_nonglobals(void)
>  +{
> ++	/*
> ++	 * @addr=0 means both rax[1] (valid PCID) and rax[2] (valid ASID) are clear
> ++	 * so flush *any* PCID and ASID.
> ++	 */
>  +	__invlpgb(0, 0, 0, 1, 0, 0);
>  +	__tlbsync();
>  +}

I had a bit of an allergic reaction to all of the magic numbers.

Could we do something like the attached where we give a _few_ of the
magic numbers some symbolic names?

For instance, instead of passing around a bool for pmd_stride, this uses
an enum. It also explicitly separates things that are setting
pmd_stride=0 but are really saying "this is a 4k stride" from things
that set pmd_stride=0 but are for operations that don't _have_ a stride.

[-- Attachment #2: supportcode.patch --]
[-- Type: text/x-patch, Size: 4430 bytes --]

--- c83449680170170f55a0ab2eb498b92ce97c0624.patch	2025-03-03 10:35:47.422277335 -0800
+++ 11ce4b22643be.patch	2025-03-03 10:38:05.692509993 -0800
@@ -1,8 +1,8 @@
-commit c83449680170170f55a0ab2eb498b92ce97c0624
+commit 11ce4b22643be54b2c70cf6b4743e6b73b461814
 Author: Rik van Riel <riel@surriel.com>
 Date:   Fri Feb 28 20:32:30 2025 +0100
 
-     x86/mm: Add INVLPGB support code
+    x86/mm: Add INVLPGB support code
     
     Add helper functions and definitions needed to use broadcast TLB
     invalidation on AMD CPUs.
@@ -17,7 +17,7 @@
     Link: https://lore.kernel.org/r/20250226030129.530345-4-riel@surriel.com
 
 diff --git a/arch/x86/include/asm/tlb.h b/arch/x86/include/asm/tlb.h
-index 77f52bc1578a7..5375145eb9596 100644
+index 77f52bc1578a7..3bd617c204346 100644
 --- a/arch/x86/include/asm/tlb.h
 +++ b/arch/x86/include/asm/tlb.h
 @@ -6,6 +6,9 @@
@@ -30,10 +30,15 @@
  
  static inline void tlb_flush(struct mmu_gather *tlb)
  {
-@@ -25,4 +28,110 @@ static inline void invlpg(unsigned long addr)
+@@ -25,4 +28,119 @@ static inline void invlpg(unsigned long addr)
  	asm volatile("invlpg (%0)" ::"r" (addr) : "memory");
  }
  
++enum invlpgb_stride {
++	NO_STRIDE  = 0,
++	PTE_STRIDE = 0,
++	PMD_STRIDE = 1
++};
 +
 +/*
 + * INVLPGB does broadcast TLB invalidation across all the CPUs in the system.
@@ -54,10 +59,10 @@
 + */
 +static inline void __invlpgb(unsigned long asid, unsigned long pcid,
 +			     unsigned long addr, u16 nr_pages,
-+			     bool pmd_stride, u8 flags)
++			     enum invlpgb_stride stride, u8 flags)
 +{
 +	u32 edx = (pcid << 16) | asid;
-+	u32 ecx = (pmd_stride << 31) | (nr_pages - 1);
++	u32 ecx = (stride << 31) | (nr_pages - 1);
 +	u64 rax = addr | flags;
 +
 +	/* The low bits in rax are for flags. Verify addr is clean. */
@@ -84,33 +89,37 @@
 +/*
 + * INVLPGB can be targeted by virtual address, PCID, ASID, or any combination
 + * of the three. For example:
-+ * - INVLPGB_VA | INVLPGB_INCLUDE_GLOBAL: invalidate all TLB entries at the address
-+ * - INVLPGB_PCID:			  invalidate all TLB entries matching the PCID
++ * - FLAG_VA | FLAG_INCLUDE_GLOBAL: invalidate all TLB entries at the address
++ * - FLAG_PCID:			    invalidate all TLB entries matching the PCID
 + *
-+ * The first can be used to invalidate (kernel) mappings at a particular
++ * The first is used to invalidate (kernel) mappings at a particular
 + * address across all processes.
 + *
 + * The latter invalidates all TLB entries matching a PCID.
 + */
-+#define INVLPGB_VA			BIT(0)
-+#define INVLPGB_PCID			BIT(1)
-+#define INVLPGB_ASID			BIT(2)
-+#define INVLPGB_INCLUDE_GLOBAL		BIT(3)
-+#define INVLPGB_FINAL_ONLY		BIT(4)
-+#define INVLPGB_INCLUDE_NESTED		BIT(5)
++#define INVLPGB_FLAG_VA			BIT(0)
++#define INVLPGB_FLAG_PCID		BIT(1)
++#define INVLPGB_FLAG_ASID		BIT(2)
++#define INVLPGB_FLAG_INCLUDE_GLOBAL	BIT(3)
++#define INVLPGB_FLAG_FINAL_ONLY		BIT(4)
++#define INVLPGB_FLAG_INCLUDE_NESTED	BIT(5)
++
++/* The implied mode when all bits are clear: */
++#define INVLPGB_MODE_ALL_NONGLOBALS	0UL
 +
 +static inline void invlpgb_flush_user_nr_nosync(unsigned long pcid,
 +						unsigned long addr,
 +						u16 nr,
 +						bool pmd_stride)
 +{
-+	__invlpgb(0, pcid, addr, nr, pmd_stride, INVLPGB_PCID | INVLPGB_VA);
++	__invlpgb(0, pcid, addr, nr, pmd_stride, INVLPGB_FLAG_PCID |
++		  				 INVLPGB_FLAG_VA);
 +}
 +
 +/* Flush all mappings for a given PCID, not including globals. */
 +static inline void invlpgb_flush_single_pcid_nosync(unsigned long pcid)
 +{
-+	__invlpgb(0, pcid, 0, 1, 0, INVLPGB_PCID);
++	__invlpgb(0, pcid, 0, 1, NO_STRIDE, INVLPGB_FLAG_PCID);
 +}
 +
 +/* Flush all mappings, including globals, for all PCIDs. */
@@ -123,21 +132,21 @@
 +	 * as it is cheaper.
 +	 */
 +	guard(preempt)();
-+	__invlpgb(0, 0, 0, 1, 0, INVLPGB_INCLUDE_GLOBAL);
++	__invlpgb(0, 0, 0, 1, NO_STRIDE, INVLPGB_FLAG_INCLUDE_GLOBAL);
 +	__tlbsync();
 +}
 +
 +/* Flush addr, including globals, for all PCIDs. */
 +static inline void invlpgb_flush_addr_nosync(unsigned long addr, u16 nr)
 +{
-+	__invlpgb(0, 0, addr, nr, 0, INVLPGB_INCLUDE_GLOBAL);
++	__invlpgb(0, 0, addr, nr, PTE_STRIDE, INVLPGB_FLAG_INCLUDE_GLOBAL);
 +}
 +
 +/* Flush all mappings for all PCIDs except globals. */
 +static inline void invlpgb_flush_all_nonglobals(void)
 +{
 +	guard(preempt)();
-+	__invlpgb(0, 0, 0, 1, 0, 0);
++	__invlpgb(0, 0, 0, 1, NO_STRIDE, INVLPGB_MODE_ALL_NONGLOBALS);
 +	__tlbsync();
 +}
  #endif /* _ASM_X86_TLB_H */

  reply	other threads:[~2025-03-03 18:41 UTC|newest]

Thread overview: 64+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2025-02-26  3:00 [PATCH v14 00/13] AMD broadcast TLB invalidation Rik van Riel
2025-02-26  3:00 ` [PATCH v14 01/13] x86/mm: consolidate full flush threshold decision Rik van Riel
2025-09-02 15:44   ` [BUG] x86/mm: regression after 4a02ed8e1cc3 Giovanni Cabiddu
2025-09-02 15:50     ` Dave Hansen
2025-09-02 16:08       ` Nadav Amit
2025-09-02 16:11         ` Dave Hansen
2025-09-03 14:00       ` Rik van Riel
2025-09-02 16:05     ` Jann Horn
2025-09-02 16:13       ` Jann Horn
2025-09-03 14:18       ` Nadav Amit
2025-09-03 14:42         ` Jann Horn
2025-09-02 16:31     ` Jann Horn
2025-09-02 16:57       ` Giovanni Cabiddu
2025-02-26  3:00 ` [PATCH v14 02/13] x86/mm: get INVLPGB count max from CPUID Rik van Riel
2025-02-28 16:21   ` Borislav Petkov
2025-02-28 19:27   ` Borislav Petkov
2025-02-26  3:00 ` [PATCH v14 03/13] x86/mm: add INVLPGB support code Rik van Riel
2025-02-28 18:46   ` Borislav Petkov
2025-02-28 18:51   ` Dave Hansen
2025-02-28 19:47   ` Borislav Petkov
2025-03-03 18:41     ` Dave Hansen [this message]
2025-03-03 19:23       ` Dave Hansen
2025-03-04 11:00         ` Borislav Petkov
2025-03-04 15:10           ` Dave Hansen
2025-03-04 16:19             ` Borislav Petkov
2025-03-04 16:57               ` Dave Hansen
2025-03-04 21:12                 ` Borislav Petkov
2025-02-26  3:00 ` [PATCH v14 04/13] x86/mm: use INVLPGB for kernel TLB flushes Rik van Riel
2025-02-28 19:00   ` Dave Hansen
2025-02-28 21:43   ` Borislav Petkov
2025-02-26  3:00 ` [PATCH v14 05/13] x86/mm: use INVLPGB in flush_tlb_all Rik van Riel
2025-02-28 19:18   ` Dave Hansen
2025-03-01 12:20     ` Borislav Petkov
2025-03-01 15:54       ` Rik van Riel
2025-02-28 22:20   ` Borislav Petkov
2025-02-26  3:00 ` [PATCH v14 06/13] x86/mm: use broadcast TLB flushing for page reclaim TLB flushing Rik van Riel
2025-02-28 18:57   ` Borislav Petkov
2025-02-26  3:00 ` [PATCH v14 07/13] x86/mm: add global ASID allocation helper functions Rik van Riel
2025-03-02  7:06   ` Borislav Petkov
2025-02-26  3:00 ` [PATCH v14 08/13] x86/mm: global ASID context switch & TLB flush handling Rik van Riel
2025-03-02  7:58   ` Borislav Petkov
2025-02-26  3:00 ` [PATCH v14 09/13] x86/mm: global ASID process exit helpers Rik van Riel
2025-03-02 12:38   ` Borislav Petkov
2025-03-02 13:53     ` Rik van Riel
2025-03-03 10:16       ` Borislav Petkov
2025-02-26  3:00 ` [PATCH v14 10/13] x86/mm: enable broadcast TLB invalidation for multi-threaded processes Rik van Riel
2025-03-03 10:57   ` Borislav Petkov
2025-02-26  3:00 ` [PATCH v14 11/13] x86/mm: do targeted broadcast flushing from tlbbatch code Rik van Riel
2025-03-03 11:46   ` Borislav Petkov
2025-03-03 21:47     ` Dave Hansen
2025-03-04 11:52       ` Borislav Petkov
2025-03-04 15:24         ` Dave Hansen
2025-03-04 12:52       ` Brendan Jackman
2025-03-04 14:11         ` Borislav Petkov
2025-03-04 15:33           ` Brendan Jackman
2025-03-04 17:51             ` Dave Hansen
2025-02-26  3:00 ` [PATCH v14 12/13] x86/mm: enable AMD translation cache extensions Rik van Riel
2025-02-26  3:00 ` [PATCH v14 13/13] x86/mm: only invalidate final translations with INVLPGB Rik van Riel
2025-03-03 22:40   ` Dave Hansen
2025-03-04 11:53     ` Borislav Petkov
2025-03-03 12:42 ` [PATCH v14 00/13] AMD broadcast TLB invalidation Borislav Petkov
2025-03-03 13:29   ` Borislav Petkov
2025-03-04 12:04 ` [PATCH] x86/mm: Always set the ASID valid bit for the INVLPGB instruction Borislav Petkov
2025-03-04 12:43   ` Borislav Petkov

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=30c721e0-338d-4172-989c-5226d584bcbc@intel.com \
    --to=dave.hansen@intel.com \
    --cc=Manali.Shukla@amd.com \
    --cc=akpm@linux-foundation.org \
    --cc=andrew.cooper3@citrix.com \
    --cc=bp@alien8.de \
    --cc=dave.hansen@linux.intel.com \
    --cc=jackmanb@google.com \
    --cc=jannh@google.com \
    --cc=kernel-team@meta.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=mhklinux@outlook.com \
    --cc=mingo@kernel.org \
    --cc=nadav.amit@gmail.com \
    --cc=peterz@infradead.org \
    --cc=riel@surriel.com \
    --cc=thomas.lendacky@amd.com \
    --cc=x86@kernel.org \
    --cc=zhengqi.arch@bytedance.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox