linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
From: Yu Zhao <yuzhao@google.com>
To: Andrew Morton <akpm@linux-foundation.org>,
	Catalin Marinas <catalin.marinas@arm.com>,
	 Marc Zyngier <maz@kernel.org>,
	Muchun Song <muchun.song@linux.dev>,
	 Thomas Gleixner <tglx@linutronix.de>,
	Will Deacon <will@kernel.org>
Cc: Douglas Anderson <dianders@chromium.org>,
	Mark Rutland <mark.rutland@arm.com>,
	 Nanyong Sun <sunnanyong@huawei.com>,
	linux-arm-kernel@lists.infradead.org,
	 linux-kernel@vger.kernel.org, linux-mm@kvack.org,
	Yu Zhao <yuzhao@google.com>
Subject: [PATCH v1 1/6] mm/hugetlb_vmemmap: batch update PTEs
Date: Sun, 20 Oct 2024 22:22:13 -0600	[thread overview]
Message-ID: <20241021042218.746659-2-yuzhao@google.com> (raw)
In-Reply-To: <20241021042218.746659-1-yuzhao@google.com>

Convert vmemmap_remap_walk->remap_pte to ->remap_pte_range so that
vmemmap remap walks can batch update PTEs.

The goal of this conversion is to allow architectures to implement
their own optimizations if possible, e.g., only to stop remote CPUs
once for each batch when updating vmemmap on arm64. It is not intended
to change the remap workflow nor should it by itself have any side
effects on performance.

Signed-off-by: Yu Zhao <yuzhao@google.com>
---
 mm/hugetlb_vmemmap.c | 163 ++++++++++++++++++++++++-------------------
 1 file changed, 91 insertions(+), 72 deletions(-)

diff --git a/mm/hugetlb_vmemmap.c b/mm/hugetlb_vmemmap.c
index 57b7f591eee8..46befab48d41 100644
--- a/mm/hugetlb_vmemmap.c
+++ b/mm/hugetlb_vmemmap.c
@@ -22,7 +22,7 @@
 /**
  * struct vmemmap_remap_walk - walk vmemmap page table
  *
- * @remap_pte:		called for each lowest-level entry (PTE).
+ * @remap_pte_range:	called on a range of PTEs.
  * @nr_walked:		the number of walked pte.
  * @reuse_page:		the page which is reused for the tail vmemmap pages.
  * @reuse_addr:		the virtual address of the @reuse_page page.
@@ -32,8 +32,8 @@
  *			operations.
  */
 struct vmemmap_remap_walk {
-	void			(*remap_pte)(pte_t *pte, unsigned long addr,
-					     struct vmemmap_remap_walk *walk);
+	void			(*remap_pte_range)(pte_t *pte, unsigned long start,
+					unsigned long end, struct vmemmap_remap_walk *walk);
 	unsigned long		nr_walked;
 	struct page		*reuse_page;
 	unsigned long		reuse_addr;
@@ -101,10 +101,6 @@ static int vmemmap_pmd_entry(pmd_t *pmd, unsigned long addr,
 	struct page *head;
 	struct vmemmap_remap_walk *vmemmap_walk = walk->private;
 
-	/* Only splitting, not remapping the vmemmap pages. */
-	if (!vmemmap_walk->remap_pte)
-		walk->action = ACTION_CONTINUE;
-
 	spin_lock(&init_mm.page_table_lock);
 	head = pmd_leaf(*pmd) ? pmd_page(*pmd) : NULL;
 	/*
@@ -129,33 +125,36 @@ static int vmemmap_pmd_entry(pmd_t *pmd, unsigned long addr,
 			ret = -ENOTSUPP;
 	}
 	spin_unlock(&init_mm.page_table_lock);
-	if (!head || ret)
+	if (ret)
 		return ret;
 
-	return vmemmap_split_pmd(pmd, head, addr & PMD_MASK, vmemmap_walk);
-}
+	if (head) {
+		ret = vmemmap_split_pmd(pmd, head, addr & PMD_MASK, vmemmap_walk);
+		if (ret)
+			return ret;
+	}
 
-static int vmemmap_pte_entry(pte_t *pte, unsigned long addr,
-			     unsigned long next, struct mm_walk *walk)
-{
-	struct vmemmap_remap_walk *vmemmap_walk = walk->private;
+	if (vmemmap_walk->remap_pte_range) {
+		pte_t *pte = pte_offset_kernel(pmd, addr);
 
-	/*
-	 * The reuse_page is found 'first' in page table walking before
-	 * starting remapping.
-	 */
-	if (!vmemmap_walk->reuse_page)
-		vmemmap_walk->reuse_page = pte_page(ptep_get(pte));
-	else
-		vmemmap_walk->remap_pte(pte, addr, vmemmap_walk);
-	vmemmap_walk->nr_walked++;
+		vmemmap_walk->nr_walked += (next - addr) / PAGE_SIZE;
+		/*
+		 * The reuse_page is found 'first' in page table walking before
+		 * starting remapping.
+		 */
+		if (!vmemmap_walk->reuse_page) {
+			vmemmap_walk->reuse_page = pte_page(ptep_get(pte));
+			pte++;
+			addr += PAGE_SIZE;
+		}
+		vmemmap_walk->remap_pte_range(pte, addr, next, vmemmap_walk);
+	}
 
 	return 0;
 }
 
 static const struct mm_walk_ops vmemmap_remap_ops = {
 	.pmd_entry	= vmemmap_pmd_entry,
-	.pte_entry	= vmemmap_pte_entry,
 };
 
 static int vmemmap_remap_range(unsigned long start, unsigned long end,
@@ -172,7 +171,7 @@ static int vmemmap_remap_range(unsigned long start, unsigned long end,
 	if (ret)
 		return ret;
 
-	if (walk->remap_pte && !(walk->flags & VMEMMAP_REMAP_NO_TLB_FLUSH))
+	if (walk->remap_pte_range && !(walk->flags & VMEMMAP_REMAP_NO_TLB_FLUSH))
 		flush_tlb_kernel_range(start, end);
 
 	return 0;
@@ -204,33 +203,45 @@ static void free_vmemmap_page_list(struct list_head *list)
 		free_vmemmap_page(page);
 }
 
-static void vmemmap_remap_pte(pte_t *pte, unsigned long addr,
-			      struct vmemmap_remap_walk *walk)
+static void vmemmap_remap_pte_range(pte_t *pte, unsigned long start, unsigned long end,
+				    struct vmemmap_remap_walk *walk)
 {
-	/*
-	 * Remap the tail pages as read-only to catch illegal write operation
-	 * to the tail pages.
-	 */
-	pgprot_t pgprot = PAGE_KERNEL_RO;
-	struct page *page = pte_page(ptep_get(pte));
-	pte_t entry;
-
-	/* Remapping the head page requires r/w */
-	if (unlikely(addr == walk->reuse_addr)) {
-		pgprot = PAGE_KERNEL;
-		list_del(&walk->reuse_page->lru);
+	int i;
+	struct page *page;
+	int nr_pages = (end - start) / PAGE_SIZE;
 
+	for (i = 0; i < nr_pages; i++) {
+		page = pte_page(ptep_get(pte + i));
+
+		list_add(&page->lru, walk->vmemmap_pages);
+	}
+
+	page = walk->reuse_page;
+
+	if (start == walk->reuse_addr) {
+		list_del(&page->lru);
+		copy_page(page_to_virt(page), (void *)walk->reuse_addr);
 		/*
-		 * Makes sure that preceding stores to the page contents from
-		 * vmemmap_remap_free() become visible before the set_pte_at()
-		 * write.
+		 * Makes sure that preceding stores to the page contents become
+		 * visible before set_pte_at().
 		 */
 		smp_wmb();
 	}
 
-	entry = mk_pte(walk->reuse_page, pgprot);
-	list_add(&page->lru, walk->vmemmap_pages);
-	set_pte_at(&init_mm, addr, pte, entry);
+	for (i = 0; i < nr_pages; i++) {
+		pte_t val;
+
+		/*
+		 * The head page must be mapped read-write; the tail pages are
+		 * mapped read-only to catch illegal modifications.
+		 */
+		if (!i && start == walk->reuse_addr)
+			val = mk_pte(page, PAGE_KERNEL);
+		else
+			val = mk_pte(page, PAGE_KERNEL_RO);
+
+		set_pte_at(&init_mm, start + PAGE_SIZE * i, pte + i, val);
+	}
 }
 
 /*
@@ -252,27 +263,39 @@ static inline void reset_struct_pages(struct page *start)
 	memcpy(start, from, sizeof(*from) * NR_RESET_STRUCT_PAGE);
 }
 
-static void vmemmap_restore_pte(pte_t *pte, unsigned long addr,
-				struct vmemmap_remap_walk *walk)
+static void vmemmap_restore_pte_range(pte_t *pte, unsigned long start, unsigned long end,
+				      struct vmemmap_remap_walk *walk)
 {
-	pgprot_t pgprot = PAGE_KERNEL;
+	int i;
 	struct page *page;
-	void *to;
-
-	BUG_ON(pte_page(ptep_get(pte)) != walk->reuse_page);
+	int nr_pages = (end - start) / PAGE_SIZE;
 
 	page = list_first_entry(walk->vmemmap_pages, struct page, lru);
-	list_del(&page->lru);
-	to = page_to_virt(page);
-	copy_page(to, (void *)walk->reuse_addr);
-	reset_struct_pages(to);
+
+	for (i = 0; i < nr_pages; i++) {
+		BUG_ON(pte_page(ptep_get(pte + i)) != walk->reuse_page);
+
+		copy_page(page_to_virt(page), (void *)walk->reuse_addr);
+		reset_struct_pages(page_to_virt(page));
+
+		page = list_next_entry(page, lru);
+	}
 
 	/*
 	 * Makes sure that preceding stores to the page contents become visible
-	 * before the set_pte_at() write.
+	 * before set_pte_at().
 	 */
 	smp_wmb();
-	set_pte_at(&init_mm, addr, pte, mk_pte(page, pgprot));
+
+	for (i = 0; i < nr_pages; i++) {
+		pte_t val;
+
+		page = list_first_entry(walk->vmemmap_pages, struct page, lru);
+		list_del(&page->lru);
+
+		val = mk_pte(page, PAGE_KERNEL);
+		set_pte_at(&init_mm, start + PAGE_SIZE * i, pte + i, val);
+	}
 }
 
 /**
@@ -290,7 +313,6 @@ static int vmemmap_remap_split(unsigned long start, unsigned long end,
 			       unsigned long reuse)
 {
 	struct vmemmap_remap_walk walk = {
-		.remap_pte	= NULL,
 		.flags		= VMEMMAP_SPLIT_NO_TLB_FLUSH,
 	};
 
@@ -322,10 +344,10 @@ static int vmemmap_remap_free(unsigned long start, unsigned long end,
 {
 	int ret;
 	struct vmemmap_remap_walk walk = {
-		.remap_pte	= vmemmap_remap_pte,
-		.reuse_addr	= reuse,
-		.vmemmap_pages	= vmemmap_pages,
-		.flags		= flags,
+		.remap_pte_range	= vmemmap_remap_pte_range,
+		.reuse_addr		= reuse,
+		.vmemmap_pages		= vmemmap_pages,
+		.flags			= flags,
 	};
 	int nid = page_to_nid((struct page *)reuse);
 	gfp_t gfp_mask = GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN;
@@ -340,8 +362,6 @@ static int vmemmap_remap_free(unsigned long start, unsigned long end,
 	 */
 	walk.reuse_page = alloc_pages_node(nid, gfp_mask, 0);
 	if (walk.reuse_page) {
-		copy_page(page_to_virt(walk.reuse_page),
-			  (void *)walk.reuse_addr);
 		list_add(&walk.reuse_page->lru, vmemmap_pages);
 		memmap_pages_add(1);
 	}
@@ -371,10 +391,9 @@ static int vmemmap_remap_free(unsigned long start, unsigned long end,
 		 * They will be restored in the following call.
 		 */
 		walk = (struct vmemmap_remap_walk) {
-			.remap_pte	= vmemmap_restore_pte,
-			.reuse_addr	= reuse,
-			.vmemmap_pages	= vmemmap_pages,
-			.flags		= 0,
+			.remap_pte_range	= vmemmap_restore_pte_range,
+			.reuse_addr		= reuse,
+			.vmemmap_pages		= vmemmap_pages,
 		};
 
 		vmemmap_remap_range(reuse, end, &walk);
@@ -425,10 +444,10 @@ static int vmemmap_remap_alloc(unsigned long start, unsigned long end,
 {
 	LIST_HEAD(vmemmap_pages);
 	struct vmemmap_remap_walk walk = {
-		.remap_pte	= vmemmap_restore_pte,
-		.reuse_addr	= reuse,
-		.vmemmap_pages	= &vmemmap_pages,
-		.flags		= flags,
+		.remap_pte_range	= vmemmap_restore_pte_range,
+		.reuse_addr		= reuse,
+		.vmemmap_pages		= &vmemmap_pages,
+		.flags			= flags,
 	};
 
 	/* See the comment in the vmemmap_remap_free(). */
-- 
2.47.0.rc1.288.g06298d1525-goog



  reply	other threads:[~2024-10-21  4:22 UTC|newest]

Thread overview: 18+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2024-10-21  4:22 [PATCH v1 0/6] mm/arm64: re-enable HVO Yu Zhao
2024-10-21  4:22 ` Yu Zhao [this message]
2024-10-21  4:22 ` [PATCH v1 2/6] mm/hugetlb_vmemmap: add arch-independent helpers Yu Zhao
2024-10-21  4:22 ` [PATCH v1 3/6] irqchip/gic-v3: support SGI broadcast Yu Zhao
2024-10-22  0:24   ` kernel test robot
2024-10-22 15:03   ` Marc Zyngier
2024-10-25  5:07     ` Yu Zhao
2024-10-25 16:14       ` Marc Zyngier
2024-10-25 17:31         ` Yu Zhao
2024-10-29 19:02           ` Marc Zyngier
2024-10-29 19:53             ` Yu Zhao
2024-10-21  4:22 ` [PATCH v1 4/6] arm64: broadcast IPIs to pause remote CPUs Yu Zhao
2024-10-22 16:15   ` Marc Zyngier
2024-10-28 22:11     ` Yu Zhao
2024-10-29 19:36       ` Marc Zyngier
2024-10-31 18:10         ` Yu Zhao
2024-10-21  4:22 ` [PATCH v1 5/6] arm64: pause remote CPUs to update vmemmap Yu Zhao
2024-10-21  4:22 ` [PATCH v1 6/6] arm64: select ARCH_WANT_OPTIMIZE_HUGETLB_VMEMMAP Yu Zhao

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20241021042218.746659-2-yuzhao@google.com \
    --to=yuzhao@google.com \
    --cc=akpm@linux-foundation.org \
    --cc=catalin.marinas@arm.com \
    --cc=dianders@chromium.org \
    --cc=linux-arm-kernel@lists.infradead.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=mark.rutland@arm.com \
    --cc=maz@kernel.org \
    --cc=muchun.song@linux.dev \
    --cc=sunnanyong@huawei.com \
    --cc=tglx@linutronix.de \
    --cc=will@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox