From: Kefeng Wang <wangkefeng.wang@huawei.com>
To: Andrew Morton <akpm@linux-foundation.org>
Cc: Muchun Song <muchun.song@linux.dev>,
"Matthew Wilcox (Oracle)" <willy@infradead.org>,
David Hildenbrand <david@redhat.com>, <linux-mm@kvack.org>,
Huang Ying <ying.huang@intel.com>,
Kefeng Wang <wangkefeng.wang@huawei.com>
Subject: [PATCH v2 3/4] mm: memory: improve copy_user_large_folio()
Date: Tue, 18 Jun 2024 17:12:41 +0800 [thread overview]
Message-ID: <20240618091242.2140164-4-wangkefeng.wang@huawei.com> (raw)
In-Reply-To: <20240618091242.2140164-1-wangkefeng.wang@huawei.com>
Use nr_pages instead of pages_per_huge_page and move the address
alignment from copy_user_large_folio() into the callers since it
is only needed when we don't know which address will be accessed.
Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
---
mm/hugetlb.c | 18 ++++++++----------
mm/memory.c | 11 ++++-------
2 files changed, 12 insertions(+), 17 deletions(-)
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 58d8703a1065..a41afeeb2188 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -5488,9 +5488,8 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
ret = PTR_ERR(new_folio);
break;
}
- ret = copy_user_large_folio(new_folio,
- pte_folio,
- addr, dst_vma);
+ ret = copy_user_large_folio(new_folio, pte_folio,
+ ALIGN_DOWN(addr, sz), dst_vma);
folio_put(pte_folio);
if (ret) {
folio_put(new_folio);
@@ -6680,7 +6679,7 @@ int hugetlb_mfill_atomic_pte(pte_t *dst_pte,
struct hstate *h = hstate_vma(dst_vma);
struct address_space *mapping = dst_vma->vm_file->f_mapping;
pgoff_t idx = vma_hugecache_offset(h, dst_vma, dst_addr);
- unsigned long size;
+ unsigned long size = huge_page_size(h);
int vm_shared = dst_vma->vm_flags & VM_SHARED;
pte_t _dst_pte;
spinlock_t *ptl;
@@ -6699,8 +6698,7 @@ int hugetlb_mfill_atomic_pte(pte_t *dst_pte,
}
_dst_pte = make_pte_marker(PTE_MARKER_POISONED);
- set_huge_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte,
- huge_page_size(h));
+ set_huge_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte, size);
/* No need to invalidate - it was non-present before */
update_mmu_cache(dst_vma, dst_addr, dst_pte);
@@ -6774,7 +6772,8 @@ int hugetlb_mfill_atomic_pte(pte_t *dst_pte,
*foliop = NULL;
goto out;
}
- ret = copy_user_large_folio(folio, *foliop, dst_addr, dst_vma);
+ ret = copy_user_large_folio(folio, *foliop,
+ ALIGN_DOWN(dst_addr, size), dst_vma);
folio_put(*foliop);
*foliop = NULL;
if (ret) {
@@ -6801,9 +6800,8 @@ int hugetlb_mfill_atomic_pte(pte_t *dst_pte,
/* Add shared, newly allocated pages to the page cache. */
if (vm_shared && !is_continue) {
- size = i_size_read(mapping->host) >> huge_page_shift(h);
ret = -EFAULT;
- if (idx >= size)
+ if (idx >= (i_size_read(mapping->host) >> huge_page_shift(h)))
goto out_release_nounlock;
/*
@@ -6860,7 +6858,7 @@ int hugetlb_mfill_atomic_pte(pte_t *dst_pte,
if (wp_enabled)
_dst_pte = huge_pte_mkuffd_wp(_dst_pte);
- set_huge_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte, huge_page_size(h));
+ set_huge_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte, size);
hugetlb_count_add(pages_per_huge_page(h), dst_mm);
diff --git a/mm/memory.c b/mm/memory.c
index a48a790a2b5b..12115e45dc24 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -6496,20 +6496,17 @@ static int copy_subpage(unsigned long addr, int idx, void *arg)
int copy_user_large_folio(struct folio *dst, struct folio *src,
unsigned long addr_hint, struct vm_area_struct *vma)
{
- unsigned int pages_per_huge_page = folio_nr_pages(dst);
- unsigned long addr = addr_hint &
- ~(((unsigned long)pages_per_huge_page << PAGE_SHIFT) - 1);
+ unsigned int nr_pages = folio_nr_pages(dst);
struct copy_subpage_arg arg = {
.dst = dst,
.src = src,
.vma = vma,
};
- if (unlikely(pages_per_huge_page > MAX_ORDER_NR_PAGES))
- return copy_user_gigantic_page(dst, src, addr, vma,
- pages_per_huge_page);
+ if (unlikely(nr_pages > MAX_ORDER_NR_PAGES))
+ return copy_user_gigantic_page(dst, src, addr_hint, vma, nr_pages);
- return process_huge_page(addr_hint, pages_per_huge_page, copy_subpage, &arg);
+ return process_huge_page(addr_hint, nr_pages, copy_subpage, &arg);
}
long copy_folio_from_user(struct folio *dst_folio,
--
2.27.0
next prev parent reply other threads:[~2024-06-18 9:13 UTC|newest]
Thread overview: 5+ messages / expand[flat|nested] mbox.gz Atom feed top
2024-06-18 9:12 [PATCH v2 0/4] mm: improve clear and copy user folio Kefeng Wang
2024-06-18 9:12 ` [PATCH v2 1/4] mm: memory: convert clear_huge_page() to folio_zero_user() Kefeng Wang
2024-06-18 9:12 ` [PATCH v2 2/4] mm: memory: use folio in struct copy_subpage_arg Kefeng Wang
2024-06-18 9:12 ` Kefeng Wang [this message]
2024-06-18 9:12 ` [PATCH v2 4/4] mm: memory: rename pages_per_huge_page to nr_pages Kefeng Wang
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20240618091242.2140164-4-wangkefeng.wang@huawei.com \
--to=wangkefeng.wang@huawei.com \
--cc=akpm@linux-foundation.org \
--cc=david@redhat.com \
--cc=linux-mm@kvack.org \
--cc=muchun.song@linux.dev \
--cc=willy@infradead.org \
--cc=ying.huang@intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox