From: Peter Xu <peterx@redhat.com>
To: linux-kernel@vger.kernel.org, linux-mm@kvack.org
Cc: Mike Kravetz <mike.kravetz@oracle.com>,
"Kirill A . Shutemov" <kirill@shutemov.name>,
Lorenzo Stoakes <lstoakes@gmail.com>,
Axel Rasmussen <axelrasmussen@google.com>,
Matthew Wilcox <willy@infradead.org>,
John Hubbard <jhubbard@nvidia.com>,
Mike Rapoport <rppt@kernel.org>,
peterx@redhat.com, Hugh Dickins <hughd@google.com>,
David Hildenbrand <david@redhat.com>,
Andrea Arcangeli <aarcange@redhat.com>,
Rik van Riel <riel@surriel.com>,
James Houghton <jthoughton@google.com>,
Yang Shi <shy828301@gmail.com>, Jason Gunthorpe <jgg@nvidia.com>,
Vlastimil Babka <vbabka@suse.cz>,
Andrew Morton <akpm@linux-foundation.org>
Subject: [PATCH RFC 09/12] mm/gup: Handle huge pud for follow_pud_mask()
Date: Wed, 15 Nov 2023 20:29:05 -0500 [thread overview]
Message-ID: <20231116012908.392077-10-peterx@redhat.com> (raw)
In-Reply-To: <20231116012908.392077-1-peterx@redhat.com>
Teach follow_pud_mask() to be able to handle normal PUD pages like hugetlb.
Rename follow_devmap_pud() to follow_huge_pud(), move it out of config
CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD, instead let that macro only
covers the devmap special operations, like pgmap.
In the new follow_huge_pud(), taking care of possible CoR for hugetlb if
necessary.
Since at it, optimize the non-present check by adding a pud_present() early
check before taking the pgtable lock, failing the follow_page() early if
PUD is not present: that is required by both devmap or hugetlb. Use
pud_huge() to also cover the pud_devmap() case.
We need to export "struct follow_page_context" along the way, so that
huge_memory.c can understand it.
One trivial more thing to mention is, introduce "pud_t pud" in the code
paths along the way, so the code doesn't dereference *pudp multiple time.
Not only because that looks less straightforward, but also because if the
dereference really happened, it's not clear whether there can be race to
see different *pudp values when it's being modified at the same time.
Setting ctx->page_mask properly for a PUD entry. As a side effect, this
should also be able to optimize devmap GUP on PUD to be able to jump over
the whole PUD range, but not yet verified.
Signed-off-by: Peter Xu <peterx@redhat.com>
---
include/linux/huge_mm.h | 17 +++----
mm/gup.c | 22 ++++-----
mm/huge_memory.c | 98 +++++++++++++++++++++++------------------
3 files changed, 73 insertions(+), 64 deletions(-)
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index ec463410aecc..84815012d3cf 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -7,6 +7,11 @@
#include <linux/fs.h> /* only for vma_is_dax() */
+struct follow_page_context {
+ struct dev_pagemap *pgmap;
+ unsigned int page_mask;
+};
+
vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf);
int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
@@ -222,8 +227,6 @@ static inline bool folio_test_pmd_mappable(struct folio *folio)
struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr,
pmd_t *pmd, int flags, struct dev_pagemap **pgmap);
-struct page *follow_devmap_pud(struct vm_area_struct *vma, unsigned long addr,
- pud_t *pud, int flags, struct dev_pagemap **pgmap);
vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf);
@@ -372,18 +375,16 @@ static inline struct page *follow_devmap_pmd(struct vm_area_struct *vma,
return NULL;
}
-static inline struct page *follow_devmap_pud(struct vm_area_struct *vma,
- unsigned long addr, pud_t *pud, int flags, struct dev_pagemap **pgmap)
-{
- return NULL;
-}
-
static inline bool thp_migration_supported(void)
{
return false;
}
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+struct page *follow_huge_pud(struct vm_area_struct *vma, unsigned long addr,
+ pud_t *pud, int flags,
+ struct follow_page_context *ctx);
+
static inline int split_folio_to_list(struct folio *folio,
struct list_head *list)
{
diff --git a/mm/gup.c b/mm/gup.c
index 89c1584d68f0..55a2ae55f00f 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -25,11 +25,6 @@
#include "internal.h"
-struct follow_page_context {
- struct dev_pagemap *pgmap;
- unsigned int page_mask;
-};
-
static inline void sanity_check_pinned_pages(struct page **pages,
unsigned long npages)
{
@@ -751,24 +746,25 @@ static struct page *follow_pud_mask(struct vm_area_struct *vma,
unsigned int flags,
struct follow_page_context *ctx)
{
- pud_t *pud;
+ pud_t *pudp, pud;
spinlock_t *ptl;
struct page *page;
struct mm_struct *mm = vma->vm_mm;
- pud = pud_offset(p4dp, address);
- if (pud_none(*pud))
+ pudp = pud_offset(p4dp, address);
+ pud = *pudp;
+ if (pud_none(pud) || !pud_present(pud))
return no_page_table(vma, flags, address);
- if (pud_devmap(*pud)) {
- ptl = pud_lock(mm, pud);
- page = follow_devmap_pud(vma, address, pud, flags, &ctx->pgmap);
+ if (pud_huge(pud)) {
+ ptl = pud_lock(mm, pudp);
+ page = follow_huge_pud(vma, address, pudp, flags, ctx);
spin_unlock(ptl);
return page;
}
- if (unlikely(pud_bad(*pud)))
+ if (unlikely(pud_bad(pud)))
return no_page_table(vma, flags, address);
- return follow_pmd_mask(vma, address, pud, flags, ctx);
+ return follow_pmd_mask(vma, address, pudp, flags, ctx);
}
static struct page *follow_p4d_mask(struct vm_area_struct *vma,
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 6eb55f97a3d2..6748ef5f3fd9 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1207,49 +1207,6 @@ static void touch_pud(struct vm_area_struct *vma, unsigned long addr,
update_mmu_cache_pud(vma, addr, pud);
}
-struct page *follow_devmap_pud(struct vm_area_struct *vma, unsigned long addr,
- pud_t *pud, int flags, struct dev_pagemap **pgmap)
-{
- unsigned long pfn = pud_pfn(*pud);
- struct mm_struct *mm = vma->vm_mm;
- struct page *page;
- int ret;
-
- assert_spin_locked(pud_lockptr(mm, pud));
-
- if (flags & FOLL_WRITE && !pud_write(*pud))
- return NULL;
-
- if (pud_present(*pud) && pud_devmap(*pud))
- /* pass */;
- else
- return NULL;
-
- if (flags & FOLL_TOUCH)
- touch_pud(vma, addr, pud, flags & FOLL_WRITE);
-
- /*
- * device mapped pages can only be returned if the
- * caller will manage the page reference count.
- *
- * At least one of FOLL_GET | FOLL_PIN must be set, so assert that here:
- */
- if (!(flags & (FOLL_GET | FOLL_PIN)))
- return ERR_PTR(-EEXIST);
-
- pfn += (addr & ~PUD_MASK) >> PAGE_SHIFT;
- *pgmap = get_dev_pagemap(pfn, *pgmap);
- if (!*pgmap)
- return ERR_PTR(-EFAULT);
- page = pfn_to_page(pfn);
-
- ret = try_grab_page(page, flags);
- if (ret)
- page = ERR_PTR(ret);
-
- return page;
-}
-
int copy_huge_pud(struct mm_struct *dst_mm, struct mm_struct *src_mm,
pud_t *dst_pud, pud_t *src_pud, unsigned long addr,
struct vm_area_struct *vma)
@@ -1305,6 +1262,61 @@ void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud)
}
#endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
+struct page *follow_huge_pud(struct vm_area_struct *vma, unsigned long addr,
+ pud_t *pudp, int flags,
+ struct follow_page_context *ctx)
+{
+ struct mm_struct *mm = vma->vm_mm;
+ struct page *page;
+ pud_t pud = *pudp;
+ unsigned long pfn = pud_pfn(pud);
+ int ret;
+
+ assert_spin_locked(pud_lockptr(mm, pudp));
+
+ if ((flags & FOLL_WRITE) && !pud_write(pud))
+ return NULL;
+
+ if (!pud_present(pud))
+ return NULL;
+
+ pfn += (addr & ~PUD_MASK) >> PAGE_SHIFT;
+
+#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
+ if (pud_devmap(pud)) {
+ /*
+ * device mapped pages can only be returned if the caller
+ * will manage the page reference count.
+ *
+ * At least one of FOLL_GET | FOLL_PIN must be set, so
+ * assert that here:
+ */
+ if (!(flags & (FOLL_GET | FOLL_PIN)))
+ return ERR_PTR(-EEXIST);
+
+ if (flags & FOLL_TOUCH)
+ touch_pud(vma, addr, pudp, flags & FOLL_WRITE);
+
+ ctx->pgmap = get_dev_pagemap(pfn, ctx->pgmap);
+ if (!ctx->pgmap)
+ return ERR_PTR(-EFAULT);
+ }
+#endif
+ page = pfn_to_page(pfn);
+
+ if (!pud_devmap(pud) && !pud_write(pud) &&
+ gup_must_unshare(vma, flags, page))
+ return ERR_PTR(-EMLINK);
+
+ ret = try_grab_page(page, flags);
+ if (ret)
+ page = ERR_PTR(ret);
+ else
+ ctx->page_mask = HPAGE_PUD_NR - 1;
+
+ return page;
+}
+
void huge_pmd_set_accessed(struct vm_fault *vmf)
{
bool write = vmf->flags & FAULT_FLAG_WRITE;
--
2.41.0
next prev parent reply other threads:[~2023-11-16 1:29 UTC|newest]
Thread overview: 58+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-11-16 1:28 [PATCH RFC 00/12] mm/gup: Unify hugetlb, part 2 Peter Xu
2023-11-16 1:28 ` [PATCH RFC 01/12] mm/hugetlb: Export hugetlbfs_pagecache_present() Peter Xu
2023-11-23 7:23 ` Christoph Hellwig
2023-11-23 16:05 ` Peter Xu
2023-11-16 1:28 ` [PATCH RFC 02/12] mm: Provide generic pmd_thp_or_huge() Peter Xu
2023-11-16 1:28 ` [PATCH RFC 03/12] mm: Export HPAGE_PXD_* macros even if !THP Peter Xu
2023-11-23 7:23 ` Christoph Hellwig
2023-11-23 9:53 ` Mike Rapoport
2023-11-23 15:27 ` Peter Xu
2023-11-16 1:29 ` [PATCH RFC 04/12] mm: Introduce vma_pgtable_walk_{begin|end}() Peter Xu
2023-11-23 7:24 ` Christoph Hellwig
2023-11-23 16:11 ` Peter Xu
2023-11-24 4:02 ` Aneesh Kumar K.V
2023-11-24 15:34 ` Peter Xu
2023-11-16 1:29 ` [PATCH RFC 05/12] mm/gup: Fix follow_devmap_p[mu]d() to return even if NULL Peter Xu
2023-11-23 7:25 ` Christoph Hellwig
2023-11-23 17:59 ` Peter Xu
2023-11-16 1:29 ` [PATCH RFC 06/12] mm/gup: Drop folio_fast_pin_allowed() in hugepd processing Peter Xu
2023-11-20 8:26 ` Christoph Hellwig
2023-11-21 15:59 ` Peter Xu
2023-11-22 8:00 ` Christoph Hellwig
2023-11-22 15:22 ` Peter Xu
2023-11-23 7:21 ` Christoph Hellwig
2023-11-23 16:10 ` Peter Xu
2023-11-23 18:22 ` Christophe Leroy
2023-11-23 19:37 ` Peter Xu
2023-11-24 5:28 ` Aneesh Kumar K.V
2023-11-24 7:03 ` Christophe Leroy
2023-11-24 1:06 ` Michael Ellerman
2023-11-23 15:47 ` Matthew Wilcox
2023-11-23 17:22 ` Peter Xu
2023-11-23 19:11 ` Ryan Roberts
2023-11-23 19:46 ` Peter Xu
2023-11-24 9:06 ` Ryan Roberts
2023-11-24 16:07 ` Peter Xu
2023-11-30 21:30 ` Peter Xu
2023-12-03 13:33 ` Christophe Leroy
2023-12-04 11:11 ` Ryan Roberts
2023-12-04 11:25 ` Christophe Leroy
2023-12-04 11:46 ` Ryan Roberts
2023-12-04 11:57 ` Christophe Leroy
2023-12-04 12:02 ` Ryan Roberts
2023-12-04 16:48 ` Peter Xu
2023-11-16 1:29 ` [PATCH RFC 07/12] mm/gup: Refactor record_subpages() to find 1st small page Peter Xu
2023-11-16 14:51 ` Matthew Wilcox
2023-11-16 19:40 ` Peter Xu
2023-11-16 19:41 ` Matthew Wilcox
2023-11-16 1:29 ` [PATCH RFC 08/12] mm/gup: Handle hugetlb for no_page_table() Peter Xu
2023-11-23 7:26 ` Christoph Hellwig
2023-11-16 1:29 ` Peter Xu [this message]
2023-11-23 7:28 ` [PATCH RFC 09/12] mm/gup: Handle huge pud for follow_pud_mask() Christoph Hellwig
2023-11-23 16:19 ` Peter Xu
2023-11-16 1:29 ` [PATCH RFC 10/12] mm/gup: Handle huge pmd for follow_pmd_mask() Peter Xu
2023-11-16 1:29 ` [PATCH RFC 11/12] mm/gup: Handle hugepd for follow_page() Peter Xu
2023-11-16 1:29 ` [PATCH RFC 12/12] mm/gup: Merge hugetlb into generic mm code Peter Xu
2023-11-23 7:29 ` Christoph Hellwig
2023-11-23 16:21 ` Peter Xu
2023-11-22 14:51 ` [PATCH RFC 00/12] mm/gup: Unify hugetlb, part 2 Jason Gunthorpe
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20231116012908.392077-10-peterx@redhat.com \
--to=peterx@redhat.com \
--cc=aarcange@redhat.com \
--cc=akpm@linux-foundation.org \
--cc=axelrasmussen@google.com \
--cc=david@redhat.com \
--cc=hughd@google.com \
--cc=jgg@nvidia.com \
--cc=jhubbard@nvidia.com \
--cc=jthoughton@google.com \
--cc=kirill@shutemov.name \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=lstoakes@gmail.com \
--cc=mike.kravetz@oracle.com \
--cc=riel@surriel.com \
--cc=rppt@kernel.org \
--cc=shy828301@gmail.com \
--cc=vbabka@suse.cz \
--cc=willy@infradead.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox