linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
From: Ryan Roberts <ryan.roberts@arm.com>
To: Catalin Marinas <catalin.marinas@arm.com>,
	Will Deacon <will@kernel.org>,
	Muchun Song <muchun.song@linux.dev>,
	Pasha Tatashin <pasha.tatashin@soleen.com>,
	Andrew Morton <akpm@linux-foundation.org>,
	Uladzislau Rezki <urezki@gmail.com>,
	Christoph Hellwig <hch@infradead.org>,
	Mark Rutland <mark.rutland@arm.com>,
	Ard Biesheuvel <ardb@kernel.org>,
	Anshuman Khandual <anshuman.khandual@arm.com>,
	Dev Jain <dev.jain@arm.com>,
	Alexandre Ghiti <alexghiti@rivosinc.com>,
	Steve Capper <steve.capper@linaro.org>,
	Kevin Brodsky <kevin.brodsky@arm.com>
Cc: Ryan Roberts <ryan.roberts@arm.com>,
	linux-arm-kernel@lists.infradead.org, linux-mm@kvack.org,
	linux-kernel@vger.kernel.org
Subject: [PATCH v1 16/16] arm64/mm: Defer barriers when updating kernel mappings
Date: Wed,  5 Feb 2025 15:09:56 +0000	[thread overview]
Message-ID: <20250205151003.88959-17-ryan.roberts@arm.com> (raw)
In-Reply-To: <20250205151003.88959-1-ryan.roberts@arm.com>

Because the kernel can't tolerate page faults for kernel mappings, when
setting a valid, kernel space pte (or pmd/pud/p4d/pgd), it emits a
dsb(ishst) to ensure that the store to the pgtable is observed by the
table walker immediately. Additionally it emits an isb() to ensure that
any already speculatively determined invalid mapping fault gets
canceled.

We can improve the performance of vmalloc operations by batching these
barriers until the end of a set up entry updates. The newly added
arch_update_kernel_mappings_begin() / arch_update_kernel_mappings_end()
provide the required hooks.

vmalloc improves by up to 30% as a result.

Two new TIF_ flags are created; TIF_KMAP_UPDATE_ACTIVE tells us if we
are in the batch mode and can therefore defer any barriers until the end
of the batch. TIF_KMAP_UPDATE_PENDING tells us if barriers are queued to
be emited at the end of the batch.

Signed-off-by: Ryan Roberts <ryan.roberts@arm.com>
---
 arch/arm64/include/asm/pgtable.h     | 65 +++++++++++++++++++---------
 arch/arm64/include/asm/thread_info.h |  2 +
 arch/arm64/kernel/process.c          | 20 +++++++--
 3 files changed, 63 insertions(+), 24 deletions(-)

diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index ff358d983583..1ee9b9588502 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -39,6 +39,41 @@
 #include <linux/mm_types.h>
 #include <linux/sched.h>
 #include <linux/page_table_check.h>
+#include <linux/pgtable_modmask.h>
+
+static inline void emit_pte_barriers(void)
+{
+	dsb(ishst);
+	isb();
+}
+
+static inline void queue_pte_barriers(void)
+{
+	if (test_thread_flag(TIF_KMAP_UPDATE_ACTIVE)) {
+		if (!test_thread_flag(TIF_KMAP_UPDATE_PENDING))
+			set_thread_flag(TIF_KMAP_UPDATE_PENDING);
+	} else
+		emit_pte_barriers();
+}
+
+#define arch_update_kernel_mappings_begin arch_update_kernel_mappings_begin
+static inline void arch_update_kernel_mappings_begin(unsigned long start,
+						     unsigned long end)
+{
+	set_thread_flag(TIF_KMAP_UPDATE_ACTIVE);
+}
+
+#define arch_update_kernel_mappings_end arch_update_kernel_mappings_end
+static inline void arch_update_kernel_mappings_end(unsigned long start,
+						   unsigned long end,
+						   pgtbl_mod_mask mask)
+{
+	if (test_thread_flag(TIF_KMAP_UPDATE_PENDING))
+		emit_pte_barriers();
+
+	clear_thread_flag(TIF_KMAP_UPDATE_PENDING);
+	clear_thread_flag(TIF_KMAP_UPDATE_ACTIVE);
+}
 
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
 #define __HAVE_ARCH_FLUSH_PMD_TLB_RANGE
@@ -323,10 +358,8 @@ static inline void __set_pte_complete(pte_t pte)
 	 * Only if the new pte is valid and kernel, otherwise TLB maintenance
 	 * or update_mmu_cache() have the necessary barriers.
 	 */
-	if (pte_valid_not_user(pte)) {
-		dsb(ishst);
-		isb();
-	}
+	if (pte_valid_not_user(pte))
+		queue_pte_barriers();
 }
 
 static inline void __set_pte(pte_t *ptep, pte_t pte)
@@ -791,10 +824,8 @@ static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
 
 	WRITE_ONCE(*pmdp, pmd);
 
-	if (pmd_valid_not_user(pmd)) {
-		dsb(ishst);
-		isb();
-	}
+	if (pmd_valid_not_user(pmd))
+		queue_pte_barriers();
 }
 
 static inline void pmd_clear(pmd_t *pmdp)
@@ -869,10 +900,8 @@ static inline void set_pud(pud_t *pudp, pud_t pud)
 
 	WRITE_ONCE(*pudp, pud);
 
-	if (pud_valid_not_user(pud)) {
-		dsb(ishst);
-		isb();
-	}
+	if (pud_valid_not_user(pud))
+		queue_pte_barriers();
 }
 
 static inline void pud_clear(pud_t *pudp)
@@ -960,10 +989,8 @@ static inline void set_p4d(p4d_t *p4dp, p4d_t p4d)
 
 	WRITE_ONCE(*p4dp, p4d);
 
-	if (p4d_valid_not_user(p4d)) {
-		dsb(ishst);
-		isb();
-	}
+	if (p4d_valid_not_user(p4d))
+		queue_pte_barriers();
 }
 
 static inline void p4d_clear(p4d_t *p4dp)
@@ -1098,10 +1125,8 @@ static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
 
 	WRITE_ONCE(*pgdp, pgd);
 
-	if (pgd_valid_not_user(pgd)) {
-		dsb(ishst);
-		isb();
-	}
+	if (pgd_valid_not_user(pgd))
+		queue_pte_barriers();
 }
 
 static inline void pgd_clear(pgd_t *pgdp)
diff --git a/arch/arm64/include/asm/thread_info.h b/arch/arm64/include/asm/thread_info.h
index 1114c1c3300a..382d2121261e 100644
--- a/arch/arm64/include/asm/thread_info.h
+++ b/arch/arm64/include/asm/thread_info.h
@@ -82,6 +82,8 @@ void arch_setup_new_exec(void);
 #define TIF_SME_VL_INHERIT	28	/* Inherit SME vl_onexec across exec */
 #define TIF_KERNEL_FPSTATE	29	/* Task is in a kernel mode FPSIMD section */
 #define TIF_TSC_SIGSEGV		30	/* SIGSEGV on counter-timer access */
+#define TIF_KMAP_UPDATE_ACTIVE	31	/* kernel map update in progress */
+#define TIF_KMAP_UPDATE_PENDING	32	/* kernel map updated with deferred barriers */
 
 #define _TIF_SIGPENDING		(1 << TIF_SIGPENDING)
 #define _TIF_NEED_RESCHED	(1 << TIF_NEED_RESCHED)
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
index 42faebb7b712..1367ec6407d1 100644
--- a/arch/arm64/kernel/process.c
+++ b/arch/arm64/kernel/process.c
@@ -680,10 +680,10 @@ struct task_struct *__switch_to(struct task_struct *prev,
 	gcs_thread_switch(next);
 
 	/*
-	 * Complete any pending TLB or cache maintenance on this CPU in case
-	 * the thread migrates to a different CPU.
-	 * This full barrier is also required by the membarrier system
-	 * call.
+	 * Complete any pending TLB or cache maintenance on this CPU in case the
+	 * thread migrates to a different CPU. This full barrier is also
+	 * required by the membarrier system call. Additionally it is required
+	 * for TIF_KMAP_UPDATE_PENDING, see below.
 	 */
 	dsb(ish);
 
@@ -696,6 +696,18 @@ struct task_struct *__switch_to(struct task_struct *prev,
 	/* avoid expensive SCTLR_EL1 accesses if no change */
 	if (prev->thread.sctlr_user != next->thread.sctlr_user)
 		update_sctlr_el1(next->thread.sctlr_user);
+	else if (unlikely(test_thread_flag(TIF_KMAP_UPDATE_PENDING))) {
+		/*
+		 * In unlikely event that a kernel map update is on-going when
+		 * preemption occurs, we must emit_pte_barriers() if pending.
+		 * emit_pte_barriers() consists of "dsb(ishst); isb();". The dsb
+		 * is already handled above. The isb() is handled if
+		 * update_sctlr_el1() was called. So only need to emit isb()
+		 * here if it wasn't called.
+		 */
+		isb();
+		clear_thread_flag(TIF_KMAP_UPDATE_PENDING);
+	}
 
 	/* the actual thread switch */
 	last = cpu_switch_to(prev, next);
-- 
2.43.0



  parent reply	other threads:[~2025-02-05 15:11 UTC|newest]

Thread overview: 62+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2025-02-05 15:09 [PATCH v1 00/16] hugetlb and vmalloc fixes and perf improvements Ryan Roberts
2025-02-05 15:09 ` [PATCH v1 01/16] mm: hugetlb: Add huge page size param to huge_ptep_get_and_clear() Ryan Roberts
2025-02-06  5:03   ` Anshuman Khandual
2025-02-06 12:15     ` Ryan Roberts
2025-02-05 15:09 ` [PATCH v1 02/16] arm64: hugetlb: Fix huge_ptep_get_and_clear() for non-present ptes Ryan Roberts
2025-02-06  6:15   ` Anshuman Khandual
2025-02-06 12:55     ` Ryan Roberts
2025-02-12 14:44       ` Ryan Roberts
2025-02-05 15:09 ` [PATCH v1 03/16] arm64: hugetlb: Fix flush_hugetlb_tlb_range() invalidation level Ryan Roberts
2025-02-06  6:46   ` Anshuman Khandual
2025-02-06 13:04     ` Ryan Roberts
2025-02-13  4:57       ` Anshuman Khandual
2025-02-05 15:09 ` [PATCH v1 04/16] arm64: hugetlb: Refine tlb maintenance scope Ryan Roberts
2025-02-05 15:09 ` [PATCH v1 05/16] mm/page_table_check: Batch-check pmds/puds just like ptes Ryan Roberts
2025-02-06 10:55   ` Anshuman Khandual
2025-02-06 13:07     ` Ryan Roberts
2025-02-05 15:09 ` [PATCH v1 06/16] arm64/mm: Refactor __set_ptes() and __ptep_get_and_clear() Ryan Roberts
2025-02-06 11:48   ` Anshuman Khandual
2025-02-06 13:26     ` Ryan Roberts
2025-02-07  9:38       ` Ryan Roberts
2025-02-12 15:29         ` Ryan Roberts
2025-02-05 15:09 ` [PATCH v1 07/16] arm64: hugetlb: Use ___set_ptes() and ___ptep_get_and_clear() Ryan Roberts
2025-02-07  4:09   ` Anshuman Khandual
2025-02-07 10:00     ` Ryan Roberts
2025-02-05 15:09 ` [PATCH v1 08/16] arm64/mm: Hoist barriers out of ___set_ptes() loop Ryan Roberts
2025-02-07  5:35   ` Anshuman Khandual
2025-02-07 10:38     ` Ryan Roberts
2025-02-12 16:00       ` Ryan Roberts
2025-02-05 15:09 ` [PATCH v1 09/16] arm64/mm: Avoid barriers for invalid or userspace mappings Ryan Roberts
2025-02-07  8:11   ` Anshuman Khandual
2025-02-07 10:53     ` Ryan Roberts
2025-02-12 16:48       ` Ryan Roberts
2025-02-05 15:09 ` [PATCH v1 10/16] mm/vmalloc: Warn on improper use of vunmap_range() Ryan Roberts
2025-02-07  8:41   ` Anshuman Khandual
2025-02-07 10:59     ` Ryan Roberts
2025-02-13  6:36       ` Anshuman Khandual
2025-02-05 15:09 ` [PATCH v1 11/16] mm/vmalloc: Gracefully unmap huge ptes Ryan Roberts
2025-02-07  9:19   ` Anshuman Khandual
2025-02-05 15:09 ` [PATCH v1 12/16] arm64/mm: Support huge pte-mapped pages in vmap Ryan Roberts
2025-02-07 10:04   ` Anshuman Khandual
2025-02-07 11:20     ` Ryan Roberts
2025-02-13  6:32       ` Anshuman Khandual
2025-02-13  9:09         ` Ryan Roberts
2025-02-17  4:33           ` Anshuman Khandual
2025-02-05 15:09 ` [PATCH v1 13/16] mm: Don't skip arch_sync_kernel_mappings() in error paths Ryan Roberts
2025-02-07 10:21   ` Anshuman Khandual
2025-02-05 15:09 ` [PATCH v1 14/16] mm/vmalloc: Batch arch_sync_kernel_mappings() more efficiently Ryan Roberts
2025-02-10  7:11   ` Anshuman Khandual
2025-02-05 15:09 ` [PATCH v1 15/16] mm: Generalize arch_sync_kernel_mappings() Ryan Roberts
2025-02-10  7:45   ` Anshuman Khandual
2025-02-10 11:04     ` Ryan Roberts
2025-02-13  5:57       ` Anshuman Khandual
2025-02-13  9:17         ` Ryan Roberts
2025-02-05 15:09 ` Ryan Roberts [this message]
2025-02-10  8:03   ` [PATCH v1 16/16] arm64/mm: Defer barriers when updating kernel mappings Anshuman Khandual
2025-02-10 11:12     ` Ryan Roberts
2025-02-13  5:30       ` Anshuman Khandual
2025-02-13  9:38         ` Ryan Roberts
2025-02-17  4:48           ` Anshuman Khandual
2025-02-17  9:40             ` Ryan Roberts
2025-02-06  7:52 ` [PATCH v1 00/16] hugetlb and vmalloc fixes and perf improvements Andrew Morton
2025-02-06 11:59   ` Ryan Roberts

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20250205151003.88959-17-ryan.roberts@arm.com \
    --to=ryan.roberts@arm.com \
    --cc=akpm@linux-foundation.org \
    --cc=alexghiti@rivosinc.com \
    --cc=anshuman.khandual@arm.com \
    --cc=ardb@kernel.org \
    --cc=catalin.marinas@arm.com \
    --cc=dev.jain@arm.com \
    --cc=hch@infradead.org \
    --cc=kevin.brodsky@arm.com \
    --cc=linux-arm-kernel@lists.infradead.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=mark.rutland@arm.com \
    --cc=muchun.song@linux.dev \
    --cc=pasha.tatashin@soleen.com \
    --cc=steve.capper@linaro.org \
    --cc=urezki@gmail.com \
    --cc=will@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox