linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
From: Ankur Arora <ankur.a.arora@oracle.com>
To: linux-kernel@vger.kernel.org, linux-mm@kvack.org, x86@kernel.org
Cc: torvalds@linux-foundation.org, akpm@linux-foundation.org,
	mike.kravetz@oracle.com, mingo@kernel.org, luto@kernel.org,
	tglx@linutronix.de, bp@alien8.de, peterz@infradead.org,
	ak@linux.intel.com, arnd@arndb.de, jgg@nvidia.com,
	jon.grimm@amd.com, boris.ostrovsky@oracle.com,
	konrad.wilk@oracle.com, joao.martins@oracle.com,
	ankur.a.arora@oracle.com
Subject: [PATCH v3 04/21] mm, clear_huge_page: support clear_user_pages()
Date: Mon,  6 Jun 2022 20:20:52 +0000	[thread overview]
Message-ID: <20220606202109.1306034-5-ankur.a.arora@oracle.com> (raw)
In-Reply-To: <20220606202109.1306034-1-ankur.a.arora@oracle.com>

process_huge_page() now handles page extents with process_subpages()
handling the individual page level operation.

process_subpages() workers, clear_subpages() and copy_subpages()
chunk the clearing in units of clear_page_unit, or continue to copy
using a single page operation.

Relatedly, define clear_user_extent() which uses clear_user_highpages()
to funnel through to clear_user_pages() or falls back to page-at-a-time
clearing via clear_user_highpage().

clear_page_unit, the clearing unit size, is defined to be:
   1 << min(MAX_ORDER - 1, ARCH_MAX_CLEAR_PAGES_ORDER).

Signed-off-by: Ankur Arora <ankur.a.arora@oracle.com>
---
 mm/memory.c | 95 ++++++++++++++++++++++++++++++++++++++---------------
 1 file changed, 69 insertions(+), 26 deletions(-)

diff --git a/mm/memory.c b/mm/memory.c
index 2c86d79c9d98..fbc7bc70dc3d 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -5563,6 +5563,31 @@ EXPORT_SYMBOL(__might_fault);
 
 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS)
 
+static unsigned int __ro_after_init clear_page_unit = 1;
+static int __init setup_clear_page_params(void)
+{
+	clear_page_unit = 1 << min(MAX_ORDER - 1, ARCH_MAX_CLEAR_PAGES_ORDER);
+	return 0;
+}
+
+/*
+ * cacheinfo is setup via device_initcall and we want to get set after
+ * that. Use the default value until then.
+ */
+late_initcall(setup_clear_page_params);
+
+/*
+ * Clear a page extent.
+ *
+ * With ARCH_MAX_CLEAR_PAGES == 1, clear_user_highpages() drops down
+ * to page-at-a-time mode. Or, funnels through to clear_user_pages().
+ */
+static void clear_user_extent(struct page *start_page, unsigned long vaddr,
+			      unsigned int npages)
+{
+	clear_user_highpages(start_page, vaddr, npages);
+}
+
 struct subpage_arg {
 	struct page *dst;
 	struct page *src;
@@ -5576,34 +5601,29 @@ struct subpage_arg {
  */
 static inline void process_huge_page(struct subpage_arg *sa,
 	unsigned long addr_hint, unsigned int pages_per_huge_page,
-	void (*process_subpage)(struct subpage_arg *sa,
-				unsigned long base_addr, int idx))
+	void (*process_subpages)(struct subpage_arg *sa,
+				 unsigned long base_addr, int lidx, int ridx))
 {
 	int i, n, base, l;
 	unsigned long addr = addr_hint &
 		~(((unsigned long)pages_per_huge_page << PAGE_SHIFT) - 1);
 
 	/* Process target subpage last to keep its cache lines hot */
-	might_sleep();
 	n = (addr_hint - addr) / PAGE_SIZE;
+
 	if (2 * n <= pages_per_huge_page) {
 		/* If target subpage in first half of huge page */
 		base = 0;
 		l = n;
 		/* Process subpages at the end of huge page */
-		for (i = pages_per_huge_page - 1; i >= 2 * n; i--) {
-			cond_resched();
-			process_subpage(sa, addr, i);
-		}
+		process_subpages(sa, addr, 2*n, pages_per_huge_page-1);
 	} else {
 		/* If target subpage in second half of huge page */
 		base = pages_per_huge_page - 2 * (pages_per_huge_page - n);
 		l = pages_per_huge_page - n;
+
 		/* Process subpages at the begin of huge page */
-		for (i = 0; i < base; i++) {
-			cond_resched();
-			process_subpage(sa, addr, i);
-		}
+		process_subpages(sa, addr, 0, base);
 	}
 	/*
 	 * Process remaining subpages in left-right-left-right pattern
@@ -5613,15 +5633,13 @@ static inline void process_huge_page(struct subpage_arg *sa,
 		int left_idx = base + i;
 		int right_idx = base + 2 * l - 1 - i;
 
-		cond_resched();
-		process_subpage(sa, addr, left_idx);
-		cond_resched();
-		process_subpage(sa, addr, right_idx);
+		process_subpages(sa, addr, left_idx, left_idx);
+		process_subpages(sa, addr, right_idx, right_idx);
 	}
 }
 
 static void clear_gigantic_page(struct page *page,
-				unsigned long addr,
+				unsigned long base_addr,
 				unsigned int pages_per_huge_page)
 {
 	int i;
@@ -5629,18 +5647,35 @@ static void clear_gigantic_page(struct page *page,
 
 	might_sleep();
 	for (i = 0; i < pages_per_huge_page;
-	     i++, p = mem_map_next(p, page, i)) {
+	     i += clear_page_unit, p = mem_map_offset(page, i)) {
+		/*
+		 * clear_page_unit is a factor of 1<<MAX_ORDER which
+		 * guarantees that p[0] and p[clear_page_unit-1]
+		 * never straddle a mem_map discontiguity.
+		 */
+		clear_user_extent(p, base_addr + i * PAGE_SIZE, clear_page_unit);
 		cond_resched();
-		clear_user_highpage(p, addr + i * PAGE_SIZE);
 	}
 }
 
-static void clear_subpage(struct subpage_arg *sa,
-			  unsigned long base_addr, int idx)
+static void clear_subpages(struct subpage_arg *sa,
+			   unsigned long base_addr, int lidx, int ridx)
 {
 	struct page *page = sa->dst;
+	int i, n;
 
-	clear_user_highpage(page + idx, base_addr + idx * PAGE_SIZE);
+	might_sleep();
+
+	for (i = lidx; i <= ridx; ) {
+		unsigned int remaining = (unsigned int) ridx - i + 1;
+
+		n = min(clear_page_unit, remaining);
+
+		clear_user_extent(page + i, base_addr + i * PAGE_SIZE, n);
+		i += n;
+
+		cond_resched();
+	}
 }
 
 void clear_huge_page(struct page *page,
@@ -5659,7 +5694,7 @@ void clear_huge_page(struct page *page,
 		return;
 	}
 
-	process_huge_page(&sa, addr_hint, pages_per_huge_page, clear_subpage);
+	process_huge_page(&sa, addr_hint, pages_per_huge_page, clear_subpages);
 }
 
 static void copy_user_gigantic_page(struct page *dst, struct page *src,
@@ -5681,11 +5716,19 @@ static void copy_user_gigantic_page(struct page *dst, struct page *src,
 	}
 }
 
-static void copy_subpage(struct subpage_arg *copy_arg,
-			 unsigned long base_addr, int idx)
+static void copy_subpages(struct subpage_arg *copy_arg,
+			  unsigned long base_addr, int lidx, int ridx)
 {
-	copy_user_highpage(copy_arg->dst + idx, copy_arg->src + idx,
+	int idx;
+
+	might_sleep();
+
+	for (idx = lidx; idx <= ridx; idx++) {
+		copy_user_highpage(copy_arg->dst + idx, copy_arg->src + idx,
 			   base_addr + idx * PAGE_SIZE, copy_arg->vma);
+
+		cond_resched();
+	}
 }
 
 void copy_user_huge_page(struct page *dst, struct page *src,
@@ -5706,7 +5749,7 @@ void copy_user_huge_page(struct page *dst, struct page *src,
 		return;
 	}
 
-	process_huge_page(&sa, addr_hint, pages_per_huge_page, copy_subpage);
+	process_huge_page(&sa, addr_hint, pages_per_huge_page, copy_subpages);
 }
 
 long copy_huge_page_from_user(struct page *dst_page,
-- 
2.31.1



  parent reply	other threads:[~2022-06-06 20:22 UTC|newest]

Thread overview: 35+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-06-06 20:20 [PATCH v3 00/21] huge page clearing optimizations Ankur Arora
2022-06-06 20:20 ` [PATCH v3 01/21] mm, huge-page: reorder arguments to process_huge_page() Ankur Arora
2022-06-06 20:20 ` [PATCH v3 02/21] mm, huge-page: refactor process_subpage() Ankur Arora
2022-06-06 20:20 ` [PATCH v3 03/21] clear_page: add generic clear_user_pages() Ankur Arora
2022-06-06 20:20 ` Ankur Arora [this message]
2022-06-06 20:37 ` [PATCH v3 05/21] mm/huge_page: generalize process_huge_page() Ankur Arora
2022-06-06 20:37 ` [PATCH v3 06/21] x86/clear_page: add clear_pages() Ankur Arora
2022-06-06 20:37 ` [PATCH v3 07/21] x86/asm: add memset_movnti() Ankur Arora
2022-06-06 20:37 ` [PATCH v3 08/21] perf bench: " Ankur Arora
2022-06-06 20:37 ` [PATCH v3 09/21] x86/asm: add clear_pages_movnt() Ankur Arora
2022-06-10 22:11   ` Noah Goldstein
2022-06-10 22:15     ` Noah Goldstein
2022-06-12 11:18       ` Ankur Arora
2022-06-06 20:37 ` [PATCH v3 10/21] x86/asm: add clear_pages_clzero() Ankur Arora
2022-06-06 20:37 ` [PATCH v3 11/21] x86/cpuid: add X86_FEATURE_MOVNT_SLOW Ankur Arora
2022-06-06 20:37 ` [PATCH v3 12/21] sparse: add address_space __incoherent Ankur Arora
2022-06-06 20:37 ` [PATCH v3 13/21] clear_page: add generic clear_user_pages_incoherent() Ankur Arora
2022-06-08  0:01   ` Luc Van Oostenryck
2022-06-12 11:19     ` Ankur Arora
2022-06-06 20:37 ` [PATCH v3 14/21] x86/clear_page: add clear_pages_incoherent() Ankur Arora
2022-06-06 20:37 ` [PATCH v3 15/21] mm/clear_page: add clear_page_non_caching_threshold() Ankur Arora
2022-06-06 20:37 ` [PATCH v3 16/21] x86/clear_page: add arch_clear_page_non_caching_threshold() Ankur Arora
2022-06-06 20:37 ` [PATCH v3 17/21] clear_huge_page: use non-cached clearing Ankur Arora
2022-06-06 20:37 ` [PATCH v3 18/21] gup: add FOLL_HINT_BULK, FAULT_FLAG_NON_CACHING Ankur Arora
2022-06-06 20:37 ` [PATCH v3 19/21] gup: hint non-caching if clearing large regions Ankur Arora
2022-06-06 20:37 ` [PATCH v3 20/21] vfio_iommu_type1: specify FOLL_HINT_BULK to pin_user_pages() Ankur Arora
2022-06-06 20:37 ` [PATCH v3 21/21] x86/cpu/intel: set X86_FEATURE_MOVNT_SLOW for Skylake Ankur Arora
2022-06-06 21:53 ` [PATCH v3 00/21] huge page clearing optimizations Linus Torvalds
2022-06-07 15:08   ` Ankur Arora
2022-06-07 17:56     ` Linus Torvalds
2022-06-08 19:24       ` Ankur Arora
2022-06-08 19:39         ` Linus Torvalds
2022-06-08 20:21           ` Ankur Arora
2022-06-08 19:49       ` Matthew Wilcox
2022-06-08 19:51         ` Matthew Wilcox

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20220606202109.1306034-5-ankur.a.arora@oracle.com \
    --to=ankur.a.arora@oracle.com \
    --cc=ak@linux.intel.com \
    --cc=akpm@linux-foundation.org \
    --cc=arnd@arndb.de \
    --cc=boris.ostrovsky@oracle.com \
    --cc=bp@alien8.de \
    --cc=jgg@nvidia.com \
    --cc=joao.martins@oracle.com \
    --cc=jon.grimm@amd.com \
    --cc=konrad.wilk@oracle.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=luto@kernel.org \
    --cc=mike.kravetz@oracle.com \
    --cc=mingo@kernel.org \
    --cc=peterz@infradead.org \
    --cc=tglx@linutronix.de \
    --cc=torvalds@linux-foundation.org \
    --cc=x86@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox