From: Ankur Arora <ankur.a.arora@oracle.com>
To: linux-kernel@vger.kernel.org, linux-mm@kvack.org, x86@kernel.org
Cc: torvalds@linux-foundation.org, akpm@linux-foundation.org,
luto@kernel.org, bp@alien8.de, dave.hansen@linux.intel.com,
hpa@zytor.com, mingo@redhat.com, juri.lelli@redhat.com,
willy@infradead.org, mgorman@suse.de, peterz@infradead.org,
rostedt@goodmis.org, tglx@linutronix.de,
vincent.guittot@linaro.org, jon.grimm@amd.com, bharata@amd.com,
boris.ostrovsky@oracle.com, konrad.wilk@oracle.com,
ankur.a.arora@oracle.com
Subject: [PATCH 2/9] huge_page: get rid of {clear,copy}_subpage()
Date: Sun, 2 Apr 2023 22:22:26 -0700 [thread overview]
Message-ID: <20230403052233.1880567-3-ankur.a.arora@oracle.com> (raw)
In-Reply-To: <20230403052233.1880567-1-ankur.a.arora@oracle.com>
clear/copy_subpage():
static void clear_subpage(unsigned long addr, int idx, void *arg)
static void copy_subpage(unsigned long addr, int idx, void *arg)
take as parameters: an index, a post indexing virtual address,
and a base page * which is then resolved using the index.
Instead just use clear/copy_user_highpage() directly.
Signed-off-by: Ankur Arora <ankur.a.arora@oracle.com>
---
mm/memory.c | 46 ++++++++++++----------------------------------
1 file changed, 12 insertions(+), 34 deletions(-)
diff --git a/mm/memory.c b/mm/memory.c
index d54bc27a35ca..6da97e6c7d21 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -5645,13 +5645,6 @@ static void clear_gigantic_page(struct page *page,
}
}
-static void clear_subpage(unsigned long addr, int idx, void *arg)
-{
- struct page *page = arg;
-
- clear_user_highpage(page + idx, addr);
-}
-
void clear_huge_page(struct page *page,
unsigned long addr_hint, unsigned int pages_per_huge_page)
{
@@ -5674,7 +5667,7 @@ void clear_huge_page(struct page *page,
/* Process subpages at the end of huge page */
for (i = pages_per_huge_page - 1; i >= 2 * n; i--) {
cond_resched();
- clear_subpage(addr + i * PAGE_SIZE, i, (void *)page);
+ clear_user_highpage(page + i, addr + i * PAGE_SIZE);
}
} else {
/* If target subpage in second half of huge page */
@@ -5683,7 +5676,7 @@ void clear_huge_page(struct page *page,
/* Process subpages at the begin of huge page */
for (i = 0; i < base; i++) {
cond_resched();
- clear_subpage(addr + i * PAGE_SIZE, i, (void *)page);
+ clear_user_highpage(page + i, addr + i * PAGE_SIZE);
}
}
/*
@@ -5695,9 +5688,9 @@ void clear_huge_page(struct page *page,
int right_idx = base + 2 * l - 1 - i;
cond_resched();
- clear_subpage(addr + left_idx * PAGE_SIZE, left_idx, (void *)page);
+ clear_user_highpage(page + left_idx, addr + left_idx * PAGE_SIZE);
cond_resched();
- clear_subpage(addr + right_idx * PAGE_SIZE, right_idx, (void *)page);
+ clear_user_highpage(page + right_idx, addr + right_idx * PAGE_SIZE);
}
}
@@ -5719,31 +5712,12 @@ static void copy_user_gigantic_page(struct page *dst, struct page *src,
}
}
-struct copy_subpage_arg {
- struct page *dst;
- struct page *src;
- struct vm_area_struct *vma;
-};
-
-static void copy_subpage(unsigned long addr, int idx, void *arg)
-{
- struct copy_subpage_arg *copy_arg = arg;
-
- copy_user_highpage(copy_arg->dst + idx, copy_arg->src + idx,
- addr, copy_arg->vma);
-}
-
void copy_user_huge_page(struct page *dst, struct page *src,
unsigned long addr_hint, struct vm_area_struct *vma,
unsigned int pages_per_huge_page)
{
unsigned long addr = addr_hint &
~(((unsigned long)pages_per_huge_page << PAGE_SHIFT) - 1);
- struct copy_subpage_arg arg = {
- .dst = dst,
- .src = src,
- .vma = vma,
- };
int i, n, base, l;
if (unlikely(pages_per_huge_page > MAX_ORDER_NR_PAGES)) {
@@ -5762,7 +5736,8 @@ void copy_user_huge_page(struct page *dst, struct page *src,
/* Process subpages at the end of huge page */
for (i = pages_per_huge_page - 1; i >= 2 * n; i--) {
cond_resched();
- copy_subpage(addr + i * PAGE_SIZE, i, &arg);
+ copy_user_highpage(dst + i, src + i,
+ addr + i*PAGE_SIZE, vma);
}
} else {
/* If target subpage in second half of huge page */
@@ -5771,7 +5746,8 @@ void copy_user_huge_page(struct page *dst, struct page *src,
/* Process subpages at the begin of huge page */
for (i = 0; i < base; i++) {
cond_resched();
- copy_subpage(addr + i * PAGE_SIZE, i, &arg);
+ copy_user_highpage(dst + i, src + i,
+ addr + i*PAGE_SIZE, vma);
}
}
/*
@@ -5783,9 +5759,11 @@ void copy_user_huge_page(struct page *dst, struct page *src,
int right_idx = base + 2 * l - 1 - i;
cond_resched();
- copy_subpage(addr + left_idx * PAGE_SIZE, left_idx, &arg);
+ copy_user_highpage(dst + left_idx, src + left_idx,
+ addr + left_idx*PAGE_SIZE, vma);
cond_resched();
- copy_subpage(addr + right_idx * PAGE_SIZE, right_idx, &arg);
+ copy_user_highpage(dst + right_idx, src + right_idx,
+ addr + right_idx*PAGE_SIZE, vma);
}
}
--
2.31.1
next prev parent reply other threads:[~2023-04-03 5:23 UTC|newest]
Thread overview: 30+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-04-03 5:22 [PATCH 0/9] x86/clear_huge_page: multi-page clearing Ankur Arora
2023-04-03 5:22 ` [PATCH 1/9] huge_pages: get rid of process_huge_page() Ankur Arora
2023-04-03 5:22 ` Ankur Arora [this message]
2023-04-03 5:22 ` [PATCH 3/9] huge_page: allow arch override for clear/copy_huge_page() Ankur Arora
2023-04-03 5:22 ` [PATCH 4/9] x86/clear_page: parameterize clear_page*() to specify length Ankur Arora
2023-04-06 8:19 ` Peter Zijlstra
2023-04-07 3:03 ` Ankur Arora
2023-04-03 5:22 ` [PATCH 5/9] x86/clear_pages: add clear_pages() Ankur Arora
2023-04-06 8:23 ` Peter Zijlstra
2023-04-07 0:50 ` Ankur Arora
2023-04-07 10:34 ` Peter Zijlstra
2023-04-09 13:26 ` Matthew Wilcox
2023-04-03 5:22 ` [PATCH 6/9] mm/clear_huge_page: use multi-page clearing Ankur Arora
2023-04-03 5:22 ` [PATCH 7/9] sched: define TIF_ALLOW_RESCHED Ankur Arora
2023-04-05 20:07 ` Peter Zijlstra
2023-04-03 5:22 ` [PATCH 8/9] irqentry: define irqentry_exit_allow_resched() Ankur Arora
2023-04-04 9:38 ` Thomas Gleixner
2023-04-05 5:29 ` Ankur Arora
2023-04-05 20:22 ` Peter Zijlstra
2023-04-06 16:56 ` Ankur Arora
2023-04-06 20:13 ` Peter Zijlstra
2023-04-06 20:16 ` Peter Zijlstra
2023-04-07 2:29 ` Ankur Arora
2023-04-07 10:23 ` Peter Zijlstra
2023-04-03 5:22 ` [PATCH 9/9] x86/clear_huge_page: make clear_contig_region() preemptible Ankur Arora
2023-04-05 20:27 ` Peter Zijlstra
2023-04-06 17:00 ` Ankur Arora
2023-04-05 19:48 ` [PATCH 0/9] x86/clear_huge_page: multi-page clearing Raghavendra K T
2023-04-08 22:46 ` Ankur Arora
2023-04-10 6:26 ` Raghavendra K T
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20230403052233.1880567-3-ankur.a.arora@oracle.com \
--to=ankur.a.arora@oracle.com \
--cc=akpm@linux-foundation.org \
--cc=bharata@amd.com \
--cc=boris.ostrovsky@oracle.com \
--cc=bp@alien8.de \
--cc=dave.hansen@linux.intel.com \
--cc=hpa@zytor.com \
--cc=jon.grimm@amd.com \
--cc=juri.lelli@redhat.com \
--cc=konrad.wilk@oracle.com \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=luto@kernel.org \
--cc=mgorman@suse.de \
--cc=mingo@redhat.com \
--cc=peterz@infradead.org \
--cc=rostedt@goodmis.org \
--cc=tglx@linutronix.de \
--cc=torvalds@linux-foundation.org \
--cc=vincent.guittot@linaro.org \
--cc=willy@infradead.org \
--cc=x86@kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox