From: James Houghton <jthoughton@google.com>
To: Mike Kravetz <mike.kravetz@oracle.com>,
Muchun Song <songmuchun@bytedance.com>,
Peter Xu <peterx@redhat.com>,
Andrew Morton <akpm@linux-foundation.org>
Cc: David Hildenbrand <david@redhat.com>,
David Rientjes <rientjes@google.com>,
Axel Rasmussen <axelrasmussen@google.com>,
Mina Almasry <almasrymina@google.com>,
"Zach O'Keefe" <zokeefe@google.com>,
Manish Mishra <manish.mishra@nutanix.com>,
Naoya Horiguchi <naoya.horiguchi@nec.com>,
"Dr . David Alan Gilbert" <dgilbert@redhat.com>,
"Matthew Wilcox (Oracle)" <willy@infradead.org>,
Vlastimil Babka <vbabka@suse.cz>,
Baolin Wang <baolin.wang@linux.alibaba.com>,
Miaohe Lin <linmiaohe@huawei.com>,
Yang Shi <shy828301@gmail.com>,
Frank van der Linden <fvdl@google.com>,
Jiaqi Yan <jiaqiyan@google.com>,
linux-mm@kvack.org, linux-kernel@vger.kernel.org,
James Houghton <jthoughton@google.com>
Subject: [PATCH v2 20/46] hugetlb: add HGM support to follow_hugetlb_page
Date: Sat, 18 Feb 2023 00:27:53 +0000 [thread overview]
Message-ID: <20230218002819.1486479-21-jthoughton@google.com> (raw)
In-Reply-To: <20230218002819.1486479-1-jthoughton@google.com>
Enable high-granularity mapping support in GUP.
In case it is confusing, pfn_offset is the offset (in PAGE_SIZE units)
that vaddr points to within the subpage that hpte points to.
Signed-off-by: James Houghton <jthoughton@google.com>
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 7321c6602d6f..c26b040f4fb5 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -6634,11 +6634,9 @@ static void record_subpages_vmas(struct page *page, struct vm_area_struct *vma,
}
static inline bool __follow_hugetlb_must_fault(struct vm_area_struct *vma,
- unsigned int flags, pte_t *pte,
+ unsigned int flags, pte_t pteval,
bool *unshare)
{
- pte_t pteval = huge_ptep_get(pte);
-
*unshare = false;
if (is_swap_pte(pteval))
return true;
@@ -6713,11 +6711,13 @@ long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
int err = -EFAULT, refs;
while (vaddr < vma->vm_end && remainder) {
- pte_t *pte;
+ pte_t *ptep, pte;
spinlock_t *ptl = NULL;
bool unshare = false;
int absent;
- struct page *page;
+ unsigned long pages_per_hpte;
+ struct page *page, *subpage;
+ struct hugetlb_pte hpte;
/*
* If we have a pending SIGKILL, don't keep faulting pages and
@@ -6734,13 +6734,19 @@ long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
* each hugepage. We have to make sure we get the
* first, for the page indexing below to work.
*
- * Note that page table lock is not held when pte is null.
+ * hugetlb_full_walk will mask the address appropriately.
+ *
+ * Note that page table lock is not held when ptep is null.
*/
- pte = hugetlb_walk(vma, vaddr & huge_page_mask(h),
- huge_page_size(h));
- if (pte)
- ptl = huge_pte_lock(h, mm, pte);
- absent = !pte || huge_pte_none(huge_ptep_get(pte));
+ if (hugetlb_full_walk(&hpte, vma, vaddr)) {
+ ptep = NULL;
+ absent = true;
+ } else {
+ ptl = hugetlb_pte_lock(&hpte);
+ ptep = hpte.ptep;
+ pte = huge_ptep_get(ptep);
+ absent = huge_pte_none(pte);
+ }
/*
* When coredumping, it suits get_dump_page if we just return
@@ -6751,13 +6757,21 @@ long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
*/
if (absent && (flags & FOLL_DUMP) &&
!hugetlbfs_pagecache_present(h, vma, vaddr)) {
- if (pte)
+ if (ptep)
spin_unlock(ptl);
hugetlb_vma_unlock_read(vma);
remainder = 0;
break;
}
+ if (!absent && pte_present(pte) &&
+ !hugetlb_pte_present_leaf(&hpte, pte)) {
+ /* We raced with someone splitting the PTE, so retry. */
+ spin_unlock(ptl);
+ hugetlb_vma_unlock_read(vma);
+ continue;
+ }
+
/*
* We need call hugetlb_fault for both hugepages under migration
* (in which case hugetlb_fault waits for the migration,) and
@@ -6773,7 +6787,7 @@ long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
vm_fault_t ret;
unsigned int fault_flags = 0;
- if (pte)
+ if (ptep)
spin_unlock(ptl);
hugetlb_vma_unlock_read(vma);
@@ -6822,8 +6836,10 @@ long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
continue;
}
- pfn_offset = (vaddr & ~huge_page_mask(h)) >> PAGE_SHIFT;
- page = pte_page(huge_ptep_get(pte));
+ pfn_offset = (vaddr & ~hugetlb_pte_mask(&hpte)) >> PAGE_SHIFT;
+ subpage = pte_page(pte);
+ pages_per_hpte = hugetlb_pte_size(&hpte) / PAGE_SIZE;
+ page = compound_head(subpage);
VM_BUG_ON_PAGE((flags & FOLL_PIN) && PageAnon(page) &&
!PageAnonExclusive(page), page);
@@ -6833,22 +6849,22 @@ long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
* and skip the same_page loop below.
*/
if (!pages && !vmas && !pfn_offset &&
- (vaddr + huge_page_size(h) < vma->vm_end) &&
- (remainder >= pages_per_huge_page(h))) {
- vaddr += huge_page_size(h);
- remainder -= pages_per_huge_page(h);
- i += pages_per_huge_page(h);
+ (vaddr + hugetlb_pte_size(&hpte) < vma->vm_end) &&
+ (remainder >= pages_per_hpte)) {
+ vaddr += hugetlb_pte_size(&hpte);
+ remainder -= pages_per_hpte;
+ i += pages_per_hpte;
spin_unlock(ptl);
hugetlb_vma_unlock_read(vma);
continue;
}
/* vaddr may not be aligned to PAGE_SIZE */
- refs = min3(pages_per_huge_page(h) - pfn_offset, remainder,
+ refs = min3(pages_per_hpte - pfn_offset, remainder,
(vma->vm_end - ALIGN_DOWN(vaddr, PAGE_SIZE)) >> PAGE_SHIFT);
if (pages || vmas)
- record_subpages_vmas(nth_page(page, pfn_offset),
+ record_subpages_vmas(nth_page(subpage, pfn_offset),
vma, refs,
likely(pages) ? pages + i : NULL,
vmas ? vmas + i : NULL);
--
2.39.2.637.g21b0678d19-goog
next prev parent reply other threads:[~2023-02-18 0:29 UTC|newest]
Thread overview: 96+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-02-18 0:27 [PATCH v2 00/46] hugetlb: introduce HugeTLB high-granularity mapping James Houghton
2023-02-18 0:27 ` [PATCH v2 01/46] hugetlb: don't set PageUptodate for UFFDIO_CONTINUE James Houghton
2023-02-18 0:41 ` Mina Almasry
2023-02-21 15:59 ` James Houghton
2023-02-21 19:33 ` Mike Kravetz
2023-02-21 19:58 ` James Houghton
2023-02-18 0:27 ` [PATCH v2 02/46] hugetlb: remove mk_huge_pte; it is unused James Houghton
2023-02-18 0:27 ` [PATCH v2 03/46] hugetlb: remove redundant pte_mkhuge in migration path James Houghton
2023-02-18 0:27 ` [PATCH v2 04/46] hugetlb: only adjust address ranges when VMAs want PMD sharing James Houghton
2023-02-18 1:10 ` Mina Almasry
2023-02-18 0:27 ` [PATCH v2 05/46] rmap: hugetlb: switch from page_dup_file_rmap to page_add_file_rmap James Houghton
2023-03-02 1:06 ` Jiaqi Yan
2023-03-02 15:44 ` James Houghton
2023-03-02 16:43 ` James Houghton
2023-03-02 19:22 ` Mike Kravetz
2023-02-18 0:27 ` [PATCH v2 06/46] hugetlb: add CONFIG_HUGETLB_HIGH_GRANULARITY_MAPPING James Houghton
2023-02-18 0:27 ` [PATCH v2 07/46] mm: add VM_HUGETLB_HGM VMA flag James Houghton
2023-02-24 22:35 ` Mike Kravetz
2023-02-18 0:27 ` [PATCH v2 08/46] hugetlb: add HugeTLB HGM enablement helpers James Houghton
2023-02-18 1:40 ` Mina Almasry
2023-02-21 16:16 ` James Houghton
2023-02-24 23:08 ` Mike Kravetz
2023-02-18 0:27 ` [PATCH v2 09/46] mm: add MADV_SPLIT to enable HugeTLB HGM James Houghton
2023-02-18 1:58 ` Mina Almasry
2023-02-21 16:33 ` James Houghton
2023-02-24 23:25 ` Mike Kravetz
2023-02-27 15:14 ` James Houghton
2023-02-18 0:27 ` [PATCH v2 10/46] hugetlb: make huge_pte_lockptr take an explicit shift argument James Houghton
2023-02-18 0:27 ` [PATCH v2 11/46] hugetlb: add hugetlb_pte to track HugeTLB page table entries James Houghton
2023-02-18 5:24 ` Mina Almasry
2023-02-21 16:36 ` James Houghton
2023-02-25 0:09 ` Mike Kravetz
2023-02-18 0:27 ` [PATCH v2 12/46] hugetlb: add hugetlb_alloc_pmd and hugetlb_alloc_pte James Houghton
2023-02-18 17:46 ` kernel test robot
2023-02-27 19:16 ` Mike Kravetz
2023-02-27 19:31 ` James Houghton
2023-02-18 0:27 ` [PATCH v2 13/46] hugetlb: add hugetlb_hgm_walk and hugetlb_walk_step James Houghton
2023-02-18 7:43 ` kernel test robot
2023-02-18 18:07 ` kernel test robot
2023-02-21 17:09 ` James Houghton
2023-02-28 22:14 ` Mike Kravetz
2023-02-28 23:03 ` James Houghton
2023-02-18 0:27 ` [PATCH v2 14/46] hugetlb: split PTE markers when doing HGM walks James Houghton
2023-02-18 19:49 ` kernel test robot
2023-02-28 22:48 ` Mike Kravetz
2023-02-18 0:27 ` [PATCH v2 15/46] hugetlb: add make_huge_pte_with_shift James Houghton
2023-02-22 21:14 ` Mina Almasry
2023-02-22 22:53 ` James Houghton
2023-02-18 0:27 ` [PATCH v2 16/46] hugetlb: make default arch_make_huge_pte understand small mappings James Houghton
2023-02-22 21:17 ` Mina Almasry
2023-02-22 22:52 ` James Houghton
2023-02-28 23:02 ` Mike Kravetz
2023-02-18 0:27 ` [PATCH v2 17/46] hugetlbfs: do a full walk to check if vma maps a page James Houghton
2023-02-22 15:46 ` James Houghton
2023-02-28 23:52 ` Mike Kravetz
2023-02-18 0:27 ` [PATCH v2 18/46] hugetlb: add HGM support to __unmap_hugepage_range James Houghton
2023-02-18 0:27 ` [PATCH v2 19/46] hugetlb: add HGM support to hugetlb_change_protection James Houghton
2023-02-18 0:27 ` James Houghton [this message]
2023-02-18 0:27 ` [PATCH v2 21/46] hugetlb: add HGM support to hugetlb_follow_page_mask James Houghton
2023-02-18 0:27 ` [PATCH v2 22/46] hugetlb: add HGM support to copy_hugetlb_page_range James Houghton
2023-02-24 17:39 ` James Houghton
2023-02-18 0:27 ` [PATCH v2 23/46] hugetlb: add HGM support to move_hugetlb_page_tables James Houghton
2023-02-18 0:27 ` [PATCH v2 24/46] hugetlb: add HGM support to hugetlb_fault and hugetlb_no_page James Houghton
2023-02-18 0:27 ` [PATCH v2 25/46] hugetlb: use struct hugetlb_pte for walk_hugetlb_range James Houghton
2023-02-18 0:27 ` [PATCH v2 26/46] mm: rmap: provide pte_order in page_vma_mapped_walk James Houghton
2023-02-18 0:28 ` [PATCH v2 27/46] mm: rmap: update try_to_{migrate,unmap} to handle mapcount for HGM James Houghton
2023-02-18 0:28 ` [PATCH v2 28/46] mm: rmap: in try_to_{migrate,unmap}, check head page for hugetlb page flags James Houghton
2023-02-18 0:28 ` [PATCH v2 29/46] hugetlb: update page_vma_mapped to do high-granularity walks James Houghton
2023-02-18 0:28 ` [PATCH v2 30/46] hugetlb: add high-granularity migration support James Houghton
2023-02-18 0:28 ` [PATCH v2 31/46] hugetlb: sort hstates in hugetlb_init_hstates James Houghton
2023-02-18 0:28 ` [PATCH v2 32/46] hugetlb: add for_each_hgm_shift James Houghton
2023-02-18 0:28 ` [PATCH v2 33/46] hugetlb: userfaultfd: add support for high-granularity UFFDIO_CONTINUE James Houghton
2023-02-18 0:28 ` [PATCH v2 34/46] hugetlb: add MADV_COLLAPSE for hugetlb James Houghton
2023-02-18 0:28 ` [PATCH v2 35/46] hugetlb: add check to prevent refcount overflow via HGM James Houghton
2023-02-24 17:42 ` James Houghton
2023-02-24 18:05 ` James Houghton
2023-02-18 0:28 ` [PATCH v2 36/46] hugetlb: remove huge_pte_lock and huge_pte_lockptr James Houghton
2023-02-18 0:28 ` [PATCH v2 37/46] hugetlb: replace make_huge_pte with make_huge_pte_with_shift James Houghton
2023-02-18 0:28 ` [PATCH v2 38/46] mm: smaps: add stats for HugeTLB mapping size James Houghton
2023-02-18 0:28 ` [PATCH v2 39/46] hugetlb: x86: enable high-granularity mapping for x86_64 James Houghton
2023-02-18 0:28 ` [PATCH v2 40/46] docs: hugetlb: update hugetlb and userfaultfd admin-guides with HGM info James Houghton
2023-02-18 0:28 ` [PATCH v2 41/46] docs: proc: include information about HugeTLB HGM James Houghton
2023-02-18 0:28 ` [PATCH v2 42/46] selftests/mm: add HugeTLB HGM to userfaultfd selftest James Houghton
2023-02-18 0:28 ` [PATCH v2 43/46] KVM: selftests: add HugeTLB HGM to KVM demand paging selftest James Houghton
2023-02-18 0:28 ` [PATCH v2 44/46] selftests/mm: add anon and shared hugetlb to migration test James Houghton
2023-02-18 0:28 ` [PATCH v2 45/46] selftests/mm: add hugetlb HGM test to migration selftest James Houghton
2023-02-18 0:28 ` [PATCH v2 46/46] selftests/mm: add HGM UFFDIO_CONTINUE and hwpoison tests James Houghton
2023-02-24 17:37 ` James Houghton
2023-02-21 21:46 ` [PATCH v2 00/46] hugetlb: introduce HugeTLB high-granularity mapping Mike Kravetz
2023-02-22 15:48 ` David Hildenbrand
2023-02-22 20:57 ` Mina Almasry
2023-02-23 9:07 ` David Hildenbrand
2023-02-23 15:53 ` James Houghton
2023-02-23 16:17 ` David Hildenbrand
2023-02-23 18:33 ` Dr. David Alan Gilbert
2023-02-23 18:25 ` Mike Kravetz
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20230218002819.1486479-21-jthoughton@google.com \
--to=jthoughton@google.com \
--cc=akpm@linux-foundation.org \
--cc=almasrymina@google.com \
--cc=axelrasmussen@google.com \
--cc=baolin.wang@linux.alibaba.com \
--cc=david@redhat.com \
--cc=dgilbert@redhat.com \
--cc=fvdl@google.com \
--cc=jiaqiyan@google.com \
--cc=linmiaohe@huawei.com \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=manish.mishra@nutanix.com \
--cc=mike.kravetz@oracle.com \
--cc=naoya.horiguchi@nec.com \
--cc=peterx@redhat.com \
--cc=rientjes@google.com \
--cc=shy828301@gmail.com \
--cc=songmuchun@bytedance.com \
--cc=vbabka@suse.cz \
--cc=willy@infradead.org \
--cc=zokeefe@google.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox