From: Zi Yan <zi.yan@sent.com>
To: linux-mm@kvack.org
Cc: "Kirill A . Shutemov" <kirill.shutemov@linux.intel.com>,
Roman Gushchin <guro@fb.com>, Rik van Riel <riel@surriel.com>,
Matthew Wilcox <willy@infradead.org>,
Shakeel Butt <shakeelb@google.com>,
Yang Shi <shy828301@gmail.com>, Jason Gunthorpe <jgg@nvidia.com>,
Mike Kravetz <mike.kravetz@oracle.com>,
Michal Hocko <mhocko@suse.com>,
David Hildenbrand <david@redhat.com>,
William Kucharski <william.kucharski@oracle.com>,
Andrea Arcangeli <aarcange@redhat.com>,
John Hubbard <jhubbard@nvidia.com>,
David Nellans <dnellans@nvidia.com>,
linux-kernel@vger.kernel.org, Zi Yan <ziy@nvidia.com>
Subject: [RFC PATCH v2 20/30] mm: page_vma_walk: teach it about PMD-mapped PUD THP.
Date: Mon, 28 Sep 2020 13:54:18 -0400 [thread overview]
Message-ID: <20200928175428.4110504-21-zi.yan@sent.com> (raw)
In-Reply-To: <20200928175428.4110504-1-zi.yan@sent.com>
From: Zi Yan <ziy@nvidia.com>
We now have PMD-mapped PUD THP and PTE-mapped PUD THP, page_vma_walk
should handle them properly.
Signed-off-by: Zi Yan <ziy@nvidia.com>
---
mm/page_vma_mapped.c | 152 +++++++++++++++++++++++++++++++++----------
1 file changed, 118 insertions(+), 34 deletions(-)
diff --git a/mm/page_vma_mapped.c b/mm/page_vma_mapped.c
index f88e845ad5e6..5a3c1b561ff5 100644
--- a/mm/page_vma_mapped.c
+++ b/mm/page_vma_mapped.c
@@ -7,6 +7,12 @@
#include "internal.h"
+enum check_pmd_result {
+ PVM_NOT_MAPPED = 0,
+ PVM_LEAF_ENTRY,
+ PVM_NONLEAF_ENTRY,
+};
+
static inline bool not_found(struct page_vma_mapped_walk *pvmw)
{
page_vma_mapped_walk_done(pvmw);
@@ -52,6 +58,22 @@ static bool map_pte(struct page_vma_mapped_walk *pvmw)
return true;
}
+static bool map_pmd(struct page_vma_mapped_walk *pvmw)
+{
+ pmd_t pmde;
+
+ pvmw->pmd = pmd_offset(pvmw->pud, pvmw->address);
+ pmde = READ_ONCE(*pvmw->pmd);
+ if (pmd_trans_huge(pmde) || is_pmd_migration_entry(pmde)) {
+ pvmw->ptl = pmd_lock(pvmw->vma->vm_mm, pvmw->pmd);
+ return true;
+ } else if (!pmd_present(pmde))
+ return false;
+
+ pvmw->ptl = pmd_lock(pvmw->vma->vm_mm, pvmw->pmd);
+ return true;
+}
+
static inline bool pfn_is_match(struct page *page, unsigned long pfn)
{
unsigned long page_pfn = page_to_pfn(page);
@@ -115,6 +137,57 @@ static bool check_pte(struct page_vma_mapped_walk *pvmw)
return pfn_is_match(pvmw->page, pfn);
}
+/**
+ * check_pmd - check if @pvmw->page is mapped at the @pvmw->pmd
+ *
+ * page_vma_mapped_walk() found a place where @pvmw->page is *potentially*
+ * mapped. check_pmd() has to validate this.
+ *
+ * @pvmw->pmd may point to empty PMD, migraiton PMD, PMD pointing to arbitrary
+ * huge page, or PMD pointing to a PTE page table page.
+ *
+ * If PVMW_MIGRATION flag is set, returns PVM_LEAF_ENTRY if @pvmw->pmd contains
+ * migration entry that points to @pvmw->page.
+ *
+ * If PVMW_MIGRATION flag is not set, returns PVM_LEAF_ENTRY if @pvmw->pmd
+ * points to @pvmw->page.
+ *
+ * If @pvmw->pmd points to a PTE page table page, returns PVM_NONLEAF_ENTRY.
+ *
+ * Otherwise, return PVM_NOT_MAPPED.
+ *
+ */
+static enum check_pmd_result check_pmd(struct page_vma_mapped_walk *pvmw)
+{
+ unsigned long pfn;
+
+ if (likely(pmd_trans_huge(*pvmw->pmd))) {
+ if (pvmw->flags & PVMW_MIGRATION)
+ return 0;
+ pfn = pmd_pfn(*pvmw->pmd);
+ if (!pfn_is_match(pvmw->page, pfn))
+ return PVM_NOT_MAPPED;
+ return PVM_LEAF_ENTRY;
+ } else if (!pmd_present(*pvmw->pmd)) {
+ if (thp_migration_supported()) {
+ if (!(pvmw->flags & PVMW_MIGRATION))
+ return 0;
+ if (is_migration_entry(pmd_to_swp_entry(*pvmw->pmd))) {
+ swp_entry_t entry = pmd_to_swp_entry(*pvmw->pmd);
+
+ pfn = migration_entry_to_pfn(entry);
+ if (!pfn_is_match(pvmw->page, pfn))
+ return PVM_NOT_MAPPED;
+ return PVM_LEAF_ENTRY;
+ }
+ }
+ return 0;
+ }
+ /* THP pmd was split under us: handle on pte level */
+ spin_unlock(pvmw->ptl);
+ pvmw->ptl = NULL;
+ return PVM_NONLEAF_ENTRY;
+}
/**
* page_vma_mapped_walk - check if @pvmw->page is mapped in @pvmw->vma at
* @pvmw->address
@@ -146,14 +219,14 @@ bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
pgd_t *pgd;
p4d_t *p4d;
pud_t pude;
- pmd_t pmde;
+ enum check_pmd_result pmd_check_res;
if (!pvmw->pte && !pvmw->pmd && pvmw->pud)
return not_found(pvmw);
/* The only possible pmd mapping has been handled on last iteration */
if (pvmw->pmd && !pvmw->pte)
- return not_found(pvmw);
+ goto next_pmd;
if (pvmw->pte)
goto next_pte;
@@ -202,42 +275,47 @@ bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
} else if (!pud_present(pude))
return false;
- pvmw->pmd = pmd_offset(pvmw->pud, pvmw->address);
- /*
- * Make sure the pmd value isn't cached in a register by the
- * compiler and used as a stale value after we've observed a
- * subsequent update.
- */
- pmde = READ_ONCE(*pvmw->pmd);
- if (pmd_trans_huge(pmde) || is_pmd_migration_entry(pmde)) {
- pvmw->ptl = pmd_lock(mm, pvmw->pmd);
- if (likely(pmd_trans_huge(*pvmw->pmd))) {
- if (pvmw->flags & PVMW_MIGRATION)
- return not_found(pvmw);
- if (pmd_page(*pvmw->pmd) != page)
- return not_found(pvmw);
+ if (!map_pmd(pvmw))
+ goto next_pmd;
+ /* pmd locked after map_pmd */
+ while (1) {
+ pmd_check_res = check_pmd(pvmw);
+ if (pmd_check_res == PVM_LEAF_ENTRY)
return true;
- } else if (!pmd_present(*pvmw->pmd)) {
- if (thp_migration_supported()) {
- if (!(pvmw->flags & PVMW_MIGRATION))
- return not_found(pvmw);
- if (is_migration_entry(pmd_to_swp_entry(*pvmw->pmd))) {
- swp_entry_t entry = pmd_to_swp_entry(*pvmw->pmd);
-
- if (migration_entry_to_page(entry) != page)
- return not_found(pvmw);
- return true;
+ else if (pmd_check_res == PVM_NONLEAF_ENTRY)
+ goto pte_level;
+next_pmd:
+ /* Only PMD-mapped PUD THP has next pmd. */
+ if (!(PageTransHuge(pvmw->page) && compound_order(pvmw->page) == HPAGE_PUD_ORDER))
+ return not_found(pvmw);
+ do {
+ pvmw->address += HPAGE_PMD_SIZE;
+ if (pvmw->address >= pvmw->vma->vm_end ||
+ pvmw->address >=
+ __vma_address(pvmw->page, pvmw->vma) +
+ thp_nr_pages(pvmw->page) * PAGE_SIZE)
+ return not_found(pvmw);
+ /* Did we cross page table boundary? */
+ if (pvmw->address % PUD_SIZE == 0) {
+ /*
+ * Reset pmd here, so we will no stay at PMD
+ * level after restart.
+ */
+ pvmw->pmd = NULL;
+ if (pvmw->ptl) {
+ spin_unlock(pvmw->ptl);
+ pvmw->ptl = NULL;
}
+ goto restart;
+ } else {
+ pvmw->pmd++;
}
- return not_found(pvmw);
- } else {
- /* THP pmd was split under us: handle on pte level */
- spin_unlock(pvmw->ptl);
- pvmw->ptl = NULL;
- }
- } else if (!pmd_present(pmde)) {
- return false;
+ } while (pmd_none(*pvmw->pmd));
+
+ if (!pvmw->ptl)
+ pvmw->ptl = pmd_lock(mm, pvmw->pmd);
}
+pte_level:
if (!map_pte(pvmw))
goto next_pte;
while (1) {
@@ -257,6 +335,12 @@ bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
/* Did we cross page table boundary? */
if (pvmw->address % PMD_SIZE == 0) {
pte_unmap(pvmw->pte);
+ /*
+ * In the case of PTE-mapped PUD THP, next entry
+ * can be PMD. Reset pte here, so we will not
+ * stay at PTE level after restart.
+ */
+ pvmw->pte = NULL;
if (pvmw->ptl) {
spin_unlock(pvmw->ptl);
pvmw->ptl = NULL;
--
2.28.0
next prev parent reply other threads:[~2020-09-28 17:56 UTC|newest]
Thread overview: 56+ messages / expand[flat|nested] mbox.gz Atom feed top
2020-09-28 17:53 [RFC PATCH v2 00/30] 1GB PUD THP support on x86_64 Zi Yan
2020-09-28 17:53 ` [RFC PATCH v2 01/30] mm/pagewalk: use READ_ONCE when reading the PUD entry unlocked Zi Yan
2020-09-28 17:54 ` [RFC PATCH v2 02/30] mm: pagewalk: use READ_ONCE when reading the PMD " Zi Yan
2020-09-28 17:54 ` [RFC PATCH v2 03/30] mm: thp: use single linked list for THP page table page deposit Zi Yan
2020-09-28 19:34 ` Matthew Wilcox
2020-09-28 20:34 ` Zi Yan
2020-09-28 17:54 ` [RFC PATCH v2 04/30] mm: add new helper functions to allocate one PMD page with 512 PTE pages Zi Yan
2020-09-28 17:54 ` [RFC PATCH v2 05/30] mm: thp: add page table deposit/withdraw functions for PUD THP Zi Yan
2020-09-28 17:54 ` [RFC PATCH v2 06/30] mm: change thp_order and thp_nr as we will have not just PMD THPs Zi Yan
2020-09-28 17:54 ` [RFC PATCH v2 07/30] mm: thp: add anonymous PUD THP page fault support without enabling it Zi Yan
2020-09-28 17:54 ` [RFC PATCH v2 08/30] mm: thp: add PUD THP support for copy_huge_pud Zi Yan
2020-09-28 17:54 ` [RFC PATCH v2 09/30] mm: thp: add PUD THP support to zap_huge_pud Zi Yan
2020-09-28 17:54 ` [RFC PATCH v2 10/30] fs: proc: add PUD THP kpageflag Zi Yan
2020-09-28 17:54 ` [RFC PATCH v2 11/30] mm: thp: handling PUD THP reference bit Zi Yan
2020-09-28 17:54 ` [RFC PATCH v2 12/30] mm: rmap: add mappped/unmapped page order to anonymous page rmap functions Zi Yan
2020-09-28 17:54 ` [RFC PATCH v2 13/30] mm: rmap: add map_order to page_remove_anon_compound_rmap Zi Yan
2020-09-28 17:54 ` [RFC PATCH v2 14/30] mm: thp: add PUD THP split_huge_pud_page() function Zi Yan
2020-09-28 17:54 ` [RFC PATCH v2 15/30] mm: thp: add PUD THP to deferred split list when PUD mapping is gone Zi Yan
2020-09-28 17:54 ` [RFC PATCH v2 16/30] mm: debug: adapt dump_page to PUD THP Zi Yan
2020-09-28 17:54 ` [RFC PATCH v2 17/30] mm: thp: PUD THP COW splits PUD page and falls back to PMD page Zi Yan
2020-09-28 17:54 ` [RFC PATCH v2 18/30] mm: thp: PUD THP follow_p*d_page() support Zi Yan
2020-09-28 17:54 ` [RFC PATCH v2 19/30] mm: stats: make smap stats understand PUD THPs Zi Yan
2020-09-28 17:54 ` Zi Yan [this message]
2020-09-28 17:54 ` [RFC PATCH v2 21/30] mm: thp: PUD THP support in try_to_unmap() Zi Yan
2020-09-28 17:54 ` [RFC PATCH v2 22/30] mm: thp: split PUD THPs at page reclaim Zi Yan
2020-09-28 17:54 ` [RFC PATCH v2 23/30] mm: support PUD THP pagemap support Zi Yan
2020-09-28 17:54 ` [RFC PATCH v2 24/30] mm: madvise: add page size options to MADV_HUGEPAGE and MADV_NOHUGEPAGE Zi Yan
2020-09-28 17:54 ` [RFC PATCH v2 25/30] mm: vma: add VM_HUGEPAGE_PUD to vm_flags at bit 37 Zi Yan
2020-09-28 17:54 ` [RFC PATCH v2 26/30] mm: thp: add a global knob to enable/disable PUD THPs Zi Yan
2020-09-28 17:54 ` [RFC PATCH v2 27/30] mm: thp: make PUD THP size public Zi Yan
2020-09-28 17:54 ` [RFC PATCH v2 28/30] hugetlb: cma: move cma reserve function to cma.c Zi Yan
2020-09-28 17:54 ` [RFC PATCH v2 29/30] mm: thp: use cma reservation for pud thp allocation Zi Yan
2020-09-28 17:54 ` [RFC PATCH v2 30/30] mm: thp: enable anonymous PUD THP at page fault path Zi Yan
2020-09-30 11:55 ` [RFC PATCH v2 00/30] 1GB PUD THP support on x86_64 Michal Hocko
2020-10-01 15:14 ` Zi Yan
2020-10-02 7:32 ` Michal Hocko
2020-10-02 7:50 ` David Hildenbrand
2020-10-02 8:10 ` Michal Hocko
2020-10-02 8:30 ` David Hildenbrand
2020-10-05 15:03 ` Zi Yan
2020-10-05 15:55 ` Matthew Wilcox
2020-10-05 17:04 ` Roman Gushchin
2020-10-05 19:12 ` Zi Yan
2020-10-05 19:37 ` Matthew Wilcox
2020-10-05 17:16 ` Roman Gushchin
2020-10-05 17:27 ` David Hildenbrand
2020-10-05 18:25 ` Roman Gushchin
2020-10-05 18:33 ` David Hildenbrand
2020-10-05 19:11 ` Roman Gushchin
2020-10-06 8:25 ` David Hildenbrand
2020-10-05 17:39 ` David Hildenbrand
2020-10-05 18:05 ` Zi Yan
2020-10-05 18:48 ` David Hildenbrand
2020-10-06 11:59 ` Michal Hocko
2020-10-05 15:34 ` Zi Yan
2020-10-05 17:30 ` David Hildenbrand
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20200928175428.4110504-21-zi.yan@sent.com \
--to=zi.yan@sent.com \
--cc=aarcange@redhat.com \
--cc=david@redhat.com \
--cc=dnellans@nvidia.com \
--cc=guro@fb.com \
--cc=jgg@nvidia.com \
--cc=jhubbard@nvidia.com \
--cc=kirill.shutemov@linux.intel.com \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=mhocko@suse.com \
--cc=mike.kravetz@oracle.com \
--cc=riel@surriel.com \
--cc=shakeelb@google.com \
--cc=shy828301@gmail.com \
--cc=william.kucharski@oracle.com \
--cc=willy@infradead.org \
--cc=ziy@nvidia.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox