From: Ankur Arora <ankur.a.arora@oracle.com>
To: linux-kernel@vger.kernel.org, linux-mm@kvack.org, x86@kernel.org
Cc: akpm@linux-foundation.org, luto@kernel.org, bp@alien8.de,
dave.hansen@linux.intel.com, hpa@zytor.com, mingo@redhat.com,
juri.lelli@redhat.com, vincent.guittot@linaro.org,
willy@infradead.org, mgorman@suse.de, peterz@infradead.org,
rostedt@goodmis.org, tglx@linutronix.de, jon.grimm@amd.com,
bharata@amd.com, raghavendra.kt@amd.com,
boris.ostrovsky@oracle.com, konrad.wilk@oracle.com,
Ankur Arora <ankur.a.arora@oracle.com>
Subject: [PATCH v2 3/9] mm/huge_page: cleanup clear_/copy_subpage()
Date: Wed, 30 Aug 2023 11:49:52 -0700 [thread overview]
Message-ID: <20230830184958.2333078-4-ankur.a.arora@oracle.com> (raw)
In-Reply-To: <20230830184958.2333078-1-ankur.a.arora@oracle.com>
clear_subpage(), copy_subpage():
static int clear_subpage(unsigned long addr, int idx, void *arg)
static int copy_subpage(unsigned long addr, int idx, void *arg)
take as parameters: an index, a post indexing virtual address,
and a struct page * (clear_subpage()), or an opaque structure
(copy_subpage()), both of which we would then resolve using the
index.
Fold clear_subpage() back into the caller and cleanup the indexing
for copy_subpage().
Signed-off-by: Ankur Arora <ankur.a.arora@oracle.com>
---
mm/memory.c | 58 +++++++++++++++++------------------------------------
1 file changed, 18 insertions(+), 40 deletions(-)
diff --git a/mm/memory.c b/mm/memory.c
index 6e005b787608..4f1ce3ad0af5 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -5914,24 +5914,14 @@ static void clear_gigantic_page(struct page *page,
}
}
-static int clear_subpage(unsigned long addr, int idx, void *arg)
-{
- struct page *page = arg;
-
- clear_user_highpage(page + idx, addr);
- return 0;
-}
-
/*
* Clear subpages of the specified huge page. The target subpage will be
* processed last to keep its cache lines hot.
*/
-static int __clear_huge_page(
- unsigned long addr_hint, unsigned int pages_per_huge_page,
- int (*process_subpage)(unsigned long addr, int idx, void *arg),
- void *arg)
+static void __clear_huge_page(struct page *page,
+ unsigned long addr_hint, unsigned int pages_per_huge_page)
{
- int i, n, base, l, ret;
+ int i, n, base, l;
unsigned long addr = addr_hint &
~(((unsigned long)pages_per_huge_page << PAGE_SHIFT) - 1);
@@ -5945,9 +5935,7 @@ static int __clear_huge_page(
/* Process subpages at the end of huge page */
for (i = pages_per_huge_page - 1; i >= 2 * n; i--) {
cond_resched();
- ret = process_subpage(addr + i * PAGE_SIZE, i, arg);
- if (ret)
- return ret;
+ clear_user_highpage(page + i, addr + i * PAGE_SIZE);
}
} else {
/* If target subpage in second half of huge page */
@@ -5956,9 +5944,7 @@ static int __clear_huge_page(
/* Process subpages at the begin of huge page */
for (i = 0; i < base; i++) {
cond_resched();
- ret = process_subpage(addr + i * PAGE_SIZE, i, arg);
- if (ret)
- return ret;
+ clear_user_highpage(page + i, addr + i * PAGE_SIZE);
}
}
/*
@@ -5970,15 +5956,11 @@ static int __clear_huge_page(
int right_idx = base + 2 * l - 1 - i;
cond_resched();
- ret = process_subpage(addr + left_idx * PAGE_SIZE, left_idx, arg);
- if (ret)
- return ret;
+ clear_user_highpage(page + left_idx, addr + left_idx * PAGE_SIZE);
+
cond_resched();
- ret = process_subpage(addr + right_idx * PAGE_SIZE, right_idx, arg);
- if (ret)
- return ret;
+ clear_user_highpage(page + right_idx, addr + right_idx * PAGE_SIZE);
}
- return 0;
}
__weak void clear_huge_page(struct page *page,
@@ -5993,7 +5975,7 @@ __weak void clear_huge_page(struct page *page,
return;
}
- __clear_huge_page(addr_hint, pages_per_huge_page, clear_subpage, page);
+ __clear_huge_page(page, addr_hint, pages_per_huge_page);
}
static int copy_user_gigantic_page(struct folio *dst, struct folio *src,
@@ -6025,12 +6007,10 @@ struct copy_subpage_arg {
struct vm_area_struct *vma;
};
-static int copy_subpage(unsigned long addr, int idx, void *arg)
+static int copy_subpage(struct copy_subpage_arg *copy_arg, unsigned long addr, int idx)
{
- struct copy_subpage_arg *copy_arg = arg;
-
if (copy_mc_user_highpage(copy_arg->dst + idx, copy_arg->src + idx,
- addr, copy_arg->vma)) {
+ addr + idx * PAGE_SIZE, copy_arg->vma)) {
memory_failure_queue(page_to_pfn(copy_arg->src + idx), 0);
return -EHWPOISON;
}
@@ -6041,10 +6021,8 @@ static int copy_subpage(unsigned long addr, int idx, void *arg)
* Copy subpages of the specified huge page. The target subpage will be
* processed last to keep its cache lines hot.
*/
-static int __copy_huge_page(
- unsigned long addr_hint, unsigned int pages_per_huge_page,
- int (*process_subpage)(unsigned long addr, int idx, void *arg),
- void *arg)
+static int __copy_huge_page(struct copy_subpage_arg *arg,
+ unsigned long addr_hint, unsigned int pages_per_huge_page)
{
int i, n, base, l, ret;
unsigned long addr = addr_hint &
@@ -6060,7 +6038,7 @@ static int __copy_huge_page(
/* Process subpages at the end of huge page */
for (i = pages_per_huge_page - 1; i >= 2 * n; i--) {
cond_resched();
- ret = process_subpage(addr + i * PAGE_SIZE, i, arg);
+ ret = copy_subpage(arg, addr, i);
if (ret)
return ret;
}
@@ -6071,7 +6049,7 @@ static int __copy_huge_page(
/* Process subpages at the begin of huge page */
for (i = 0; i < base; i++) {
cond_resched();
- ret = process_subpage(addr + i * PAGE_SIZE, i, arg);
+ ret = copy_subpage(arg, addr, i);
if (ret)
return ret;
}
@@ -6085,11 +6063,11 @@ static int __copy_huge_page(
int right_idx = base + 2 * l - 1 - i;
cond_resched();
- ret = process_subpage(addr + left_idx * PAGE_SIZE, left_idx, arg);
+ ret = copy_subpage(arg, addr, left_idx);
if (ret)
return ret;
cond_resched();
- ret = process_subpage(addr + right_idx * PAGE_SIZE, right_idx, arg);
+ ret = copy_subpage(arg, addr, right_idx);
if (ret)
return ret;
}
@@ -6112,7 +6090,7 @@ int copy_user_large_folio(struct folio *dst, struct folio *src,
return copy_user_gigantic_page(dst, src, addr, vma,
pages_per_huge_page);
- return __copy_huge_page(addr_hint, pages_per_huge_page, copy_subpage, &arg);
+ return __copy_huge_page(&arg, addr_hint, pages_per_huge_page);
}
long copy_folio_from_user(struct folio *dst_folio,
--
2.31.1
next prev parent reply other threads:[~2023-08-30 18:50 UTC|newest]
Thread overview: 152+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-08-30 18:49 [PATCH v2 0/9] x86/clear_huge_page: multi-page clearing Ankur Arora
2023-08-30 18:49 ` [PATCH v2 1/9] mm/clear_huge_page: allow arch override for clear_huge_page() Ankur Arora
2023-08-30 18:49 ` [PATCH v2 2/9] mm/huge_page: separate clear_huge_page() and copy_huge_page() Ankur Arora
2023-08-30 18:49 ` Ankur Arora [this message]
2023-09-08 13:09 ` [PATCH v2 3/9] mm/huge_page: cleanup clear_/copy_subpage() Matthew Wilcox
2023-09-11 17:22 ` Ankur Arora
2023-08-30 18:49 ` [PATCH v2 4/9] x86/clear_page: extend clear_page*() for multi-page clearing Ankur Arora
2023-09-08 13:11 ` Matthew Wilcox
2023-08-30 18:49 ` [PATCH v2 5/9] x86/clear_page: add clear_pages() Ankur Arora
2023-08-30 18:49 ` [PATCH v2 6/9] x86/clear_huge_page: multi-page clearing Ankur Arora
2023-08-31 18:26 ` kernel test robot
2023-09-08 12:38 ` Peter Zijlstra
2023-09-13 6:43 ` Raghavendra K T
2023-08-30 18:49 ` [PATCH v2 7/9] sched: define TIF_ALLOW_RESCHED Ankur Arora
2023-09-08 7:02 ` Peter Zijlstra
2023-09-08 17:15 ` Linus Torvalds
2023-09-08 22:50 ` Peter Zijlstra
2023-09-09 5:15 ` Linus Torvalds
2023-09-09 6:39 ` Ankur Arora
2023-09-09 9:11 ` Peter Zijlstra
2023-09-09 20:04 ` Ankur Arora
2023-09-09 5:30 ` Ankur Arora
2023-09-09 9:12 ` Peter Zijlstra
2023-09-09 20:15 ` Ankur Arora
2023-09-09 21:16 ` Linus Torvalds
2023-09-10 3:48 ` Ankur Arora
2023-09-10 4:35 ` Linus Torvalds
2023-09-10 10:01 ` Ankur Arora
2023-09-10 18:32 ` Linus Torvalds
2023-09-11 15:04 ` Peter Zijlstra
2023-09-11 16:29 ` andrew.cooper3
2023-09-11 17:04 ` Ankur Arora
2023-09-12 8:26 ` Peter Zijlstra
2023-09-12 12:24 ` Phil Auld
2023-09-12 12:33 ` Matthew Wilcox
2023-09-18 23:42 ` Thomas Gleixner
2023-09-19 1:57 ` Linus Torvalds
2023-09-19 8:03 ` Ingo Molnar
2023-09-19 8:43 ` Ingo Molnar
2023-09-19 13:43 ` Thomas Gleixner
2023-09-19 13:25 ` Thomas Gleixner
2023-09-19 12:30 ` Thomas Gleixner
2023-09-19 13:00 ` Arches that don't support PREEMPT Matthew Wilcox
2023-09-19 13:34 ` Geert Uytterhoeven
2023-09-19 13:37 ` John Paul Adrian Glaubitz
2023-09-19 13:42 ` Peter Zijlstra
2023-09-19 13:48 ` John Paul Adrian Glaubitz
2023-09-19 14:16 ` Peter Zijlstra
2023-09-19 14:24 ` John Paul Adrian Glaubitz
2023-09-19 14:32 ` Matthew Wilcox
2023-09-19 15:31 ` Steven Rostedt
2023-09-20 14:38 ` Anton Ivanov
2023-09-21 12:20 ` Arnd Bergmann
2023-09-19 14:17 ` Thomas Gleixner
2023-09-19 14:50 ` H. Peter Anvin
2023-09-19 14:57 ` Matt Turner
2023-09-19 17:09 ` Ulrich Teichert
2023-09-19 17:25 ` Linus Torvalds
2023-09-19 17:58 ` John Paul Adrian Glaubitz
2023-09-19 18:31 ` Thomas Gleixner
2023-09-19 18:38 ` Steven Rostedt
2023-09-19 18:52 ` Linus Torvalds
2023-09-19 19:53 ` Thomas Gleixner
2023-09-20 7:32 ` Ingo Molnar
2023-09-20 7:29 ` Ingo Molnar
2023-09-20 8:26 ` Thomas Gleixner
2023-09-20 10:37 ` David Laight
2023-09-19 14:21 ` Anton Ivanov
2023-09-19 15:17 ` Thomas Gleixner
2023-09-19 15:21 ` Anton Ivanov
2023-09-19 16:22 ` Richard Weinberger
2023-09-19 16:41 ` Anton Ivanov
2023-09-19 17:33 ` Thomas Gleixner
2023-10-06 14:51 ` Geert Uytterhoeven
2023-09-20 14:22 ` [PATCH v2 7/9] sched: define TIF_ALLOW_RESCHED Ankur Arora
2023-09-20 20:51 ` Thomas Gleixner
2023-09-21 0:14 ` Thomas Gleixner
2023-09-21 0:58 ` Ankur Arora
2023-09-21 2:12 ` Thomas Gleixner
2023-09-20 23:58 ` Thomas Gleixner
2023-09-21 0:57 ` Ankur Arora
2023-09-21 2:02 ` Thomas Gleixner
2023-09-21 4:16 ` Ankur Arora
2023-09-21 13:59 ` Steven Rostedt
2023-09-21 16:00 ` Linus Torvalds
2023-09-21 22:55 ` Thomas Gleixner
2023-09-23 1:11 ` Thomas Gleixner
2023-10-02 14:15 ` Steven Rostedt
2023-10-02 16:13 ` Thomas Gleixner
2023-10-18 1:03 ` Paul E. McKenney
2023-10-18 12:09 ` Ankur Arora
2023-10-18 17:51 ` Paul E. McKenney
2023-10-18 22:53 ` Thomas Gleixner
2023-10-18 23:25 ` Paul E. McKenney
2023-10-18 13:16 ` Thomas Gleixner
2023-10-18 14:31 ` Steven Rostedt
2023-10-18 17:55 ` Paul E. McKenney
2023-10-18 18:00 ` Steven Rostedt
2023-10-18 18:13 ` Paul E. McKenney
2023-10-19 12:37 ` Daniel Bristot de Oliveira
2023-10-19 17:08 ` Paul E. McKenney
2023-10-18 17:19 ` Paul E. McKenney
2023-10-18 17:41 ` Steven Rostedt
2023-10-18 17:59 ` Paul E. McKenney
2023-10-18 20:15 ` Ankur Arora
2023-10-18 20:42 ` Paul E. McKenney
2023-10-19 0:21 ` Thomas Gleixner
2023-10-19 19:13 ` Paul E. McKenney
2023-10-20 21:59 ` Paul E. McKenney
2023-10-20 22:56 ` Ankur Arora
2023-10-20 23:36 ` Paul E. McKenney
2023-10-21 1:05 ` Ankur Arora
2023-10-21 2:08 ` Paul E. McKenney
2023-10-24 12:15 ` Thomas Gleixner
2023-10-24 18:59 ` Paul E. McKenney
2023-09-23 22:50 ` Thomas Gleixner
2023-09-24 0:10 ` Thomas Gleixner
2023-09-24 7:19 ` Matthew Wilcox
2023-09-24 7:55 ` Thomas Gleixner
2023-09-24 10:29 ` Matthew Wilcox
2023-09-25 0:13 ` Ankur Arora
2023-10-06 13:01 ` Geert Uytterhoeven
2023-09-19 7:21 ` Ingo Molnar
2023-09-19 19:05 ` Ankur Arora
2023-10-24 14:34 ` Steven Rostedt
2023-10-25 1:49 ` Steven Rostedt
2023-10-26 7:50 ` Sergey Senozhatsky
2023-10-26 12:48 ` Steven Rostedt
2023-09-11 16:48 ` Steven Rostedt
2023-09-11 20:50 ` Linus Torvalds
2023-09-11 21:16 ` Linus Torvalds
2023-09-12 7:20 ` Peter Zijlstra
2023-09-12 7:38 ` Ingo Molnar
2023-09-11 22:20 ` Steven Rostedt
2023-09-11 23:10 ` Ankur Arora
2023-09-11 23:16 ` Steven Rostedt
2023-09-12 16:30 ` Linus Torvalds
2023-09-12 3:27 ` Matthew Wilcox
2023-09-12 16:20 ` Linus Torvalds
2023-09-19 3:21 ` Andy Lutomirski
2023-09-19 9:20 ` Thomas Gleixner
2023-09-19 9:49 ` Ingo Molnar
2023-08-30 18:49 ` [PATCH v2 8/9] irqentry: define irqentry_exit_allow_resched() Ankur Arora
2023-09-08 12:42 ` Peter Zijlstra
2023-09-11 17:24 ` Ankur Arora
2023-08-30 18:49 ` [PATCH v2 9/9] x86/clear_huge_page: make clear_contig_region() preemptible Ankur Arora
2023-09-08 12:45 ` Peter Zijlstra
2023-09-03 8:14 ` [PATCH v2 0/9] x86/clear_huge_page: multi-page clearing Mateusz Guzik
2023-09-05 22:14 ` Ankur Arora
2023-09-08 2:18 ` Raghavendra K T
2023-09-05 1:06 ` Raghavendra K T
2023-09-05 19:36 ` Ankur Arora
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20230830184958.2333078-4-ankur.a.arora@oracle.com \
--to=ankur.a.arora@oracle.com \
--cc=akpm@linux-foundation.org \
--cc=bharata@amd.com \
--cc=boris.ostrovsky@oracle.com \
--cc=bp@alien8.de \
--cc=dave.hansen@linux.intel.com \
--cc=hpa@zytor.com \
--cc=jon.grimm@amd.com \
--cc=juri.lelli@redhat.com \
--cc=konrad.wilk@oracle.com \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=luto@kernel.org \
--cc=mgorman@suse.de \
--cc=mingo@redhat.com \
--cc=peterz@infradead.org \
--cc=raghavendra.kt@amd.com \
--cc=rostedt@goodmis.org \
--cc=tglx@linutronix.de \
--cc=vincent.guittot@linaro.org \
--cc=willy@infradead.org \
--cc=x86@kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox