From: Peter Xu <peterx@redhat.com>
To: linux-kernel@vger.kernel.org, linux-mm@kvack.org
Cc: Mike Kravetz <mike.kravetz@oracle.com>,
"Kirill A . Shutemov" <kirill@shutemov.name>,
Lorenzo Stoakes <lstoakes@gmail.com>,
Axel Rasmussen <axelrasmussen@google.com>,
Matthew Wilcox <willy@infradead.org>,
John Hubbard <jhubbard@nvidia.com>,
Mike Rapoport <rppt@kernel.org>,
peterx@redhat.com, Hugh Dickins <hughd@google.com>,
David Hildenbrand <david@redhat.com>,
Andrea Arcangeli <aarcange@redhat.com>,
Rik van Riel <riel@surriel.com>,
James Houghton <jthoughton@google.com>,
Yang Shi <shy828301@gmail.com>, Jason Gunthorpe <jgg@nvidia.com>,
Vlastimil Babka <vbabka@suse.cz>,
Andrew Morton <akpm@linux-foundation.org>
Subject: [PATCH RFC 08/12] mm/gup: Handle hugetlb for no_page_table()
Date: Wed, 15 Nov 2023 20:29:04 -0500 [thread overview]
Message-ID: <20231116012908.392077-9-peterx@redhat.com> (raw)
In-Reply-To: <20231116012908.392077-1-peterx@redhat.com>
no_page_table() is not yet used for hugetlb code paths. Make it prepared.
The major difference here is hugetlb will return -EFAULT as long as page
cache does not exist, even if VM_SHARED. See hugetlb_follow_page_mask().
Pass "address" into no_page_table() too, as hugetlb will need it.
Signed-off-by: Peter Xu <peterx@redhat.com>
---
mm/gup.c | 40 ++++++++++++++++++++++++----------------
1 file changed, 24 insertions(+), 16 deletions(-)
diff --git a/mm/gup.c b/mm/gup.c
index 69dae51f3eb1..89c1584d68f0 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -501,19 +501,27 @@ static inline void mm_set_has_pinned_flag(unsigned long *mm_flags)
#ifdef CONFIG_MMU
static struct page *no_page_table(struct vm_area_struct *vma,
- unsigned int flags)
+ unsigned int flags, unsigned long address)
{
+ if (!(flags & FOLL_DUMP))
+ return NULL;
+
/*
- * When core dumping an enormous anonymous area that nobody
- * has touched so far, we don't want to allocate unnecessary pages or
+ * When core dumping, we don't want to allocate unnecessary pages or
* page tables. Return error instead of NULL to skip handle_mm_fault,
* then get_dump_page() will return NULL to leave a hole in the dump.
* But we can only make this optimization where a hole would surely
* be zero-filled if handle_mm_fault() actually did handle it.
*/
- if ((flags & FOLL_DUMP) &&
- (vma_is_anonymous(vma) || !vma->vm_ops->fault))
+ if (is_vm_hugetlb_page(vma)) {
+ struct hstate *h = hstate_vma(vma);
+
+ if (!hugetlbfs_pagecache_present(h, vma, address))
+ return ERR_PTR(-EFAULT);
+ } else if ((vma_is_anonymous(vma) || !vma->vm_ops->fault)) {
return ERR_PTR(-EFAULT);
+ }
+
return NULL;
}
@@ -593,7 +601,7 @@ static struct page *follow_page_pte(struct vm_area_struct *vma,
ptep = pte_offset_map_lock(mm, pmd, address, &ptl);
if (!ptep)
- return no_page_table(vma, flags);
+ return no_page_table(vma, flags, address);
pte = ptep_get(ptep);
if (!pte_present(pte))
goto no_page;
@@ -685,7 +693,7 @@ static struct page *follow_page_pte(struct vm_area_struct *vma,
pte_unmap_unlock(ptep, ptl);
if (!pte_none(pte))
return NULL;
- return no_page_table(vma, flags);
+ return no_page_table(vma, flags, address);
}
static struct page *follow_pmd_mask(struct vm_area_struct *vma,
@@ -701,9 +709,9 @@ static struct page *follow_pmd_mask(struct vm_area_struct *vma,
pmd = pmd_offset(pudp, address);
pmdval = pmdp_get_lockless(pmd);
if (pmd_none(pmdval))
- return no_page_table(vma, flags);
+ return no_page_table(vma, flags, address);
if (!pmd_present(pmdval))
- return no_page_table(vma, flags);
+ return no_page_table(vma, flags, address);
if (pmd_devmap(pmdval)) {
ptl = pmd_lock(mm, pmd);
page = follow_devmap_pmd(vma, address, pmd, flags, &ctx->pgmap);
@@ -714,12 +722,12 @@ static struct page *follow_pmd_mask(struct vm_area_struct *vma,
return follow_page_pte(vma, address, pmd, flags, &ctx->pgmap);
if (pmd_protnone(pmdval) && !gup_can_follow_protnone(vma, flags))
- return no_page_table(vma, flags);
+ return no_page_table(vma, flags, address);
ptl = pmd_lock(mm, pmd);
if (unlikely(!pmd_present(*pmd))) {
spin_unlock(ptl);
- return no_page_table(vma, flags);
+ return no_page_table(vma, flags, address);
}
if (unlikely(!pmd_trans_huge(*pmd))) {
spin_unlock(ptl);
@@ -750,7 +758,7 @@ static struct page *follow_pud_mask(struct vm_area_struct *vma,
pud = pud_offset(p4dp, address);
if (pud_none(*pud))
- return no_page_table(vma, flags);
+ return no_page_table(vma, flags, address);
if (pud_devmap(*pud)) {
ptl = pud_lock(mm, pud);
page = follow_devmap_pud(vma, address, pud, flags, &ctx->pgmap);
@@ -758,7 +766,7 @@ static struct page *follow_pud_mask(struct vm_area_struct *vma,
return page;
}
if (unlikely(pud_bad(*pud)))
- return no_page_table(vma, flags);
+ return no_page_table(vma, flags, address);
return follow_pmd_mask(vma, address, pud, flags, ctx);
}
@@ -772,10 +780,10 @@ static struct page *follow_p4d_mask(struct vm_area_struct *vma,
p4d = p4d_offset(pgdp, address);
if (p4d_none(*p4d))
- return no_page_table(vma, flags);
+ return no_page_table(vma, flags, address);
BUILD_BUG_ON(p4d_huge(*p4d));
if (unlikely(p4d_bad(*p4d)))
- return no_page_table(vma, flags);
+ return no_page_table(vma, flags, address);
return follow_pud_mask(vma, address, p4d, flags, ctx);
}
@@ -825,7 +833,7 @@ static struct page *follow_page_mask(struct vm_area_struct *vma,
pgd = pgd_offset(mm, address);
if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
- return no_page_table(vma, flags);
+ return no_page_table(vma, flags, address);
return follow_p4d_mask(vma, address, pgd, flags, ctx);
}
--
2.41.0
next prev parent reply other threads:[~2023-11-16 1:29 UTC|newest]
Thread overview: 58+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-11-16 1:28 [PATCH RFC 00/12] mm/gup: Unify hugetlb, part 2 Peter Xu
2023-11-16 1:28 ` [PATCH RFC 01/12] mm/hugetlb: Export hugetlbfs_pagecache_present() Peter Xu
2023-11-23 7:23 ` Christoph Hellwig
2023-11-23 16:05 ` Peter Xu
2023-11-16 1:28 ` [PATCH RFC 02/12] mm: Provide generic pmd_thp_or_huge() Peter Xu
2023-11-16 1:28 ` [PATCH RFC 03/12] mm: Export HPAGE_PXD_* macros even if !THP Peter Xu
2023-11-23 7:23 ` Christoph Hellwig
2023-11-23 9:53 ` Mike Rapoport
2023-11-23 15:27 ` Peter Xu
2023-11-16 1:29 ` [PATCH RFC 04/12] mm: Introduce vma_pgtable_walk_{begin|end}() Peter Xu
2023-11-23 7:24 ` Christoph Hellwig
2023-11-23 16:11 ` Peter Xu
2023-11-24 4:02 ` Aneesh Kumar K.V
2023-11-24 15:34 ` Peter Xu
2023-11-16 1:29 ` [PATCH RFC 05/12] mm/gup: Fix follow_devmap_p[mu]d() to return even if NULL Peter Xu
2023-11-23 7:25 ` Christoph Hellwig
2023-11-23 17:59 ` Peter Xu
2023-11-16 1:29 ` [PATCH RFC 06/12] mm/gup: Drop folio_fast_pin_allowed() in hugepd processing Peter Xu
2023-11-20 8:26 ` Christoph Hellwig
2023-11-21 15:59 ` Peter Xu
2023-11-22 8:00 ` Christoph Hellwig
2023-11-22 15:22 ` Peter Xu
2023-11-23 7:21 ` Christoph Hellwig
2023-11-23 16:10 ` Peter Xu
2023-11-23 18:22 ` Christophe Leroy
2023-11-23 19:37 ` Peter Xu
2023-11-24 5:28 ` Aneesh Kumar K.V
2023-11-24 7:03 ` Christophe Leroy
2023-11-24 1:06 ` Michael Ellerman
2023-11-23 15:47 ` Matthew Wilcox
2023-11-23 17:22 ` Peter Xu
2023-11-23 19:11 ` Ryan Roberts
2023-11-23 19:46 ` Peter Xu
2023-11-24 9:06 ` Ryan Roberts
2023-11-24 16:07 ` Peter Xu
2023-11-30 21:30 ` Peter Xu
2023-12-03 13:33 ` Christophe Leroy
2023-12-04 11:11 ` Ryan Roberts
2023-12-04 11:25 ` Christophe Leroy
2023-12-04 11:46 ` Ryan Roberts
2023-12-04 11:57 ` Christophe Leroy
2023-12-04 12:02 ` Ryan Roberts
2023-12-04 16:48 ` Peter Xu
2023-11-16 1:29 ` [PATCH RFC 07/12] mm/gup: Refactor record_subpages() to find 1st small page Peter Xu
2023-11-16 14:51 ` Matthew Wilcox
2023-11-16 19:40 ` Peter Xu
2023-11-16 19:41 ` Matthew Wilcox
2023-11-16 1:29 ` Peter Xu [this message]
2023-11-23 7:26 ` [PATCH RFC 08/12] mm/gup: Handle hugetlb for no_page_table() Christoph Hellwig
2023-11-16 1:29 ` [PATCH RFC 09/12] mm/gup: Handle huge pud for follow_pud_mask() Peter Xu
2023-11-23 7:28 ` Christoph Hellwig
2023-11-23 16:19 ` Peter Xu
2023-11-16 1:29 ` [PATCH RFC 10/12] mm/gup: Handle huge pmd for follow_pmd_mask() Peter Xu
2023-11-16 1:29 ` [PATCH RFC 11/12] mm/gup: Handle hugepd for follow_page() Peter Xu
2023-11-16 1:29 ` [PATCH RFC 12/12] mm/gup: Merge hugetlb into generic mm code Peter Xu
2023-11-23 7:29 ` Christoph Hellwig
2023-11-23 16:21 ` Peter Xu
2023-11-22 14:51 ` [PATCH RFC 00/12] mm/gup: Unify hugetlb, part 2 Jason Gunthorpe
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20231116012908.392077-9-peterx@redhat.com \
--to=peterx@redhat.com \
--cc=aarcange@redhat.com \
--cc=akpm@linux-foundation.org \
--cc=axelrasmussen@google.com \
--cc=david@redhat.com \
--cc=hughd@google.com \
--cc=jgg@nvidia.com \
--cc=jhubbard@nvidia.com \
--cc=jthoughton@google.com \
--cc=kirill@shutemov.name \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=lstoakes@gmail.com \
--cc=mike.kravetz@oracle.com \
--cc=riel@surriel.com \
--cc=rppt@kernel.org \
--cc=shy828301@gmail.com \
--cc=vbabka@suse.cz \
--cc=willy@infradead.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox