From: Oscar Salvador <osalvador@suse.de>
To: Andrew Morton <akpm@linux-foundation.org>
Cc: linux-kernel@vger.kernel.org, linux-mm@kvack.org,
Peter Xu <peterx@redhat.com>, Muchun Song <muchun.song@linux.dev>,
David Hildenbrand <david@redhat.com>,
SeongJae Park <sj@kernel.org>, Miaohe Lin <linmiaohe@huawei.com>,
Michal Hocko <mhocko@suse.com>,
Matthew Wilcox <willy@infradead.org>,
Christophe Leroy <christophe.leroy@csgroup.eu>,
Oscar Salvador <osalvador@suse.de>
Subject: [PATCH 16/45] fs/proc: Enable pagemap_scan_pmd_entry to handle hugetlb vmas
Date: Thu, 4 Jul 2024 06:31:03 +0200 [thread overview]
Message-ID: <20240704043132.28501-17-osalvador@suse.de> (raw)
In-Reply-To: <20240704043132.28501-1-osalvador@suse.de>
PMD-mapped hugetlb vmas will also reach pagemap_scan_pmd_entry.
Add the required code so it knows how to handle those there.
Signed-off-by: Oscar Salvador <osalvador@suse.de>
---
fs/proc/task_mmu.c | 41 ++++++++++++++++++++++++++---------------
1 file changed, 26 insertions(+), 15 deletions(-)
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 22200018371d..df649f69ea2c 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -2263,8 +2263,8 @@ static void make_uffd_wp_pte(struct vm_area_struct *vma,
}
}
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
-static unsigned long pagemap_thp_category(struct pagemap_scan_private *p,
+#ifdef CONFIG_PGTABLE_HAS_HUGE_LEAVES
+static unsigned long pagemap_pmd_category(struct pagemap_scan_private *p,
struct vm_area_struct *vma,
unsigned long addr, pmd_t pmd)
{
@@ -2296,7 +2296,8 @@ static unsigned long pagemap_thp_category(struct pagemap_scan_private *p,
if (pmd_swp_soft_dirty(pmd))
categories |= PAGE_IS_SOFT_DIRTY;
- if (p->masks_of_interest & PAGE_IS_FILE) {
+ if ((p->masks_of_interest & PAGE_IS_FILE) &&
+ !is_vm_hugetlb_page(vma)) {
swp = pmd_to_swp_entry(pmd);
if (is_pfn_swap_entry(swp) &&
!folio_test_anon(pfn_swap_entry_folio(swp)))
@@ -2321,7 +2322,7 @@ static void make_uffd_wp_pmd(struct vm_area_struct *vma,
set_pmd_at(vma->vm_mm, addr, pmdp, pmd);
}
}
-#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+#endif /* CONFIG_PGTABLE_HAS_HUGE_LEAVES */
#ifdef CONFIG_HUGETLB_PAGE
static unsigned long pagemap_hugetlb_category(pte_t pte)
@@ -2522,22 +2523,22 @@ static int pagemap_scan_output(unsigned long categories,
return ret;
}
-static int pagemap_scan_thp_entry(pmd_t *pmd, unsigned long start,
+static int pagemap_scan_huge_entry(pmd_t *pmd, unsigned long start,
unsigned long end, struct mm_walk *walk)
{
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+#ifdef CONFIG_PGTABLE_HAS_HUGE_LEAVES
struct pagemap_scan_private *p = walk->private;
struct vm_area_struct *vma = walk->vma;
unsigned long categories;
spinlock_t *ptl;
int ret = 0;
- ptl = pmd_trans_huge_lock(pmd, vma);
+ ptl = pmd_huge_lock(pmd, vma);
if (!ptl)
return -ENOENT;
categories = p->cur_vma_category |
- pagemap_thp_category(p, vma, start, *pmd);
+ pagemap_pmd_category(p, vma, start, *pmd);
if (!pagemap_scan_is_interesting_page(categories, p))
goto out_unlock;
@@ -2556,19 +2557,29 @@ static int pagemap_scan_thp_entry(pmd_t *pmd, unsigned long start,
* needs to be performed on a portion of the huge page.
*/
if (end != start + HPAGE_SIZE) {
- spin_unlock(ptl);
- split_huge_pmd(vma, pmd, start);
pagemap_scan_backout_range(p, start, end);
- /* Report as if there was no THP */
- return -ENOENT;
+ if (!is_vm_hugetlb_page(vma)) {
+ /* Report as if there was no THP */
+ spin_unlock(ptl);
+ split_huge_pmd(vma, pmd, start);
+ ret = -ENOENT;
+ goto out;
+ }
+ ret = 0;
+ p->arg.walk_end = start;
+ goto out_unlock;
}
make_uffd_wp_pmd(vma, start, pmd);
- flush_tlb_range(vma, start, end);
+ if (is_vm_hugetlb_page(vma))
+ flush_hugetlb_tlb_range(vma, start, end);
+ else
+ flush_tlb_range(vma, start, end);
out_unlock:
spin_unlock(ptl);
+out:
return ret;
-#else /* !CONFIG_TRANSPARENT_HUGEPAGE */
+#else /* CONFIG_PGTABLE_HAS_HUGE_LEAVES */
return -ENOENT;
#endif
}
@@ -2585,7 +2596,7 @@ static int pagemap_scan_pmd_entry(pmd_t *pmd, unsigned long start,
arch_enter_lazy_mmu_mode();
- ret = pagemap_scan_thp_entry(pmd, start, end, walk);
+ ret = pagemap_scan_huge_entry(pmd, start, end, walk);
if (ret != -ENOENT) {
arch_leave_lazy_mmu_mode();
return ret;
--
2.26.2
next prev parent reply other threads:[~2024-07-04 4:32 UTC|newest]
Thread overview: 66+ messages / expand[flat|nested] mbox.gz Atom feed top
2024-07-04 4:30 [PATCH 00/45] hugetlb pagewalk unification Oscar Salvador
2024-07-04 4:30 ` [PATCH 01/45] arch/x86: Drop own definition of pgd,p4d_leaf Oscar Salvador
2024-07-04 4:30 ` [PATCH 02/45] mm: Add {pmd,pud}_huge_lock helper Oscar Salvador
2024-07-04 15:02 ` Peter Xu
2024-07-04 4:30 ` [PATCH 03/45] mm/pagewalk: Move vma_pgtable_walk_begin and vma_pgtable_walk_end upfront Oscar Salvador
2024-07-04 4:30 ` [PATCH 04/45] mm/pagewalk: Only call pud_entry when we have a pud leaf Oscar Salvador
2024-07-04 4:30 ` [PATCH 05/45] mm/pagewalk: Enable walk_pmd_range to handle cont-pmds Oscar Salvador
2024-07-04 15:41 ` David Hildenbrand
2024-07-05 16:56 ` kernel test robot
2024-07-04 4:30 ` [PATCH 06/45] mm/pagewalk: Do not try to split non-thp pud or pmd leafs Oscar Salvador
2024-07-04 4:30 ` [PATCH 07/45] arch/s390: Enable __s390_enable_skey_pmd to handle hugetlb vmas Oscar Salvador
2024-07-04 4:30 ` [PATCH 08/45] fs/proc: Enable smaps_pmd_entry to handle PMD-mapped " Oscar Salvador
2024-07-04 4:30 ` [PATCH 09/45] mm: Implement pud-version functions for swap and vm_normal_page_pud Oscar Salvador
2024-07-04 4:30 ` [PATCH 10/45] fs/proc: Create smaps_pud_range to handle PUD-mapped hugetlb vmas Oscar Salvador
2024-07-04 4:30 ` [PATCH 11/45] fs/proc: Enable smaps_pte_entry to handle cont-pte mapped " Oscar Salvador
2024-07-04 10:30 ` David Hildenbrand
2024-07-04 4:30 ` [PATCH 12/45] fs/proc: Enable pagemap_pmd_range to handle " Oscar Salvador
2024-07-04 4:31 ` [PATCH 13/45] mm: Implement pud-version uffd functions Oscar Salvador
2024-07-05 15:48 ` kernel test robot
2024-07-05 15:48 ` kernel test robot
2024-07-04 4:31 ` [PATCH 14/45] fs/proc: Create pagemap_pud_range to handle PUD-mapped hugetlb vmas Oscar Salvador
2024-07-04 4:31 ` [PATCH 15/45] fs/proc: Adjust pte_to_pagemap_entry for " Oscar Salvador
2024-07-04 4:31 ` Oscar Salvador [this message]
2024-07-04 4:31 ` [PATCH 17/45] mm: Implement pud-version for pud_mkinvalid and pudp_establish Oscar Salvador
2024-07-04 4:31 ` [PATCH 18/45] fs/proc: Create pagemap_scan_pud_entry to handle PUD-mapped hugetlb vmas Oscar Salvador
2024-07-04 4:31 ` [PATCH 19/45] fs/proc: Enable gather_pte_stats to handle " Oscar Salvador
2024-07-04 4:31 ` [PATCH 20/45] fs/proc: Enable gather_pte_stats to handle cont-pte mapped " Oscar Salvador
2024-07-04 4:31 ` [PATCH 21/45] fs/proc: Create gather_pud_stats to handle PUD-mapped hugetlb pages Oscar Salvador
2024-07-04 4:31 ` [PATCH 22/45] mm/mempolicy: Enable queue_folios_pmd to handle hugetlb vmas Oscar Salvador
2024-07-04 4:31 ` [PATCH 23/45] mm/mempolicy: Create queue_folios_pud to handle PUD-mapped " Oscar Salvador
2024-07-04 4:31 ` [PATCH 24/45] mm/memory_failure: Enable check_hwpoisoned_pmd_entry to handle " Oscar Salvador
2024-07-04 4:31 ` [PATCH 25/45] mm/memory-failure: Create check_hwpoisoned_pud_entry to handle PUD-mapped " Oscar Salvador
2024-07-04 4:31 ` [PATCH 26/45] mm/damon: Enable damon_young_pmd_entry to handle " Oscar Salvador
2024-07-04 4:31 ` [PATCH 27/45] mm/damon: Create damon_young_pud_entry to handle PUD-mapped " Oscar Salvador
2024-07-04 4:31 ` [PATCH 28/45] mm/damon: Enable damon_mkold_pmd_entry to handle " Oscar Salvador
2024-07-04 11:03 ` David Hildenbrand
2024-07-04 4:31 ` [PATCH 29/45] mm/damon: Create damon_mkold_pud_entry to handle PUD-mapped " Oscar Salvador
2024-07-04 4:31 ` [PATCH 30/45] mm,mincore: Enable mincore_pte_range to handle " Oscar Salvador
2024-07-04 4:31 ` [PATCH 31/45] mm/mincore: Create mincore_pud_range to handle PUD-mapped " Oscar Salvador
2024-07-04 4:31 ` [PATCH 32/45] mm/hmm: Enable hmm_vma_walk_pmd, to handle " Oscar Salvador
2024-07-04 4:31 ` [PATCH 33/45] mm/hmm: Enable hmm_vma_walk_pud to handle PUD-mapped " Oscar Salvador
2024-07-04 4:31 ` [PATCH 34/45] arch/powerpc: Skip hugetlb vmas in subpage_mark_vma_nohuge Oscar Salvador
2024-07-04 4:31 ` [PATCH 35/45] arch/s390: Skip hugetlb vmas in thp_split_mm Oscar Salvador
2024-07-04 4:31 ` [PATCH 36/45] fs/proc: Make clear_refs_test_walk skip hugetlb vmas Oscar Salvador
2024-07-04 4:31 ` [PATCH 37/45] mm/lock: Make mlock_test_walk " Oscar Salvador
2024-07-04 4:31 ` [PATCH 38/45] mm/madvise: Make swapin_test_walk " Oscar Salvador
2024-07-04 4:31 ` [PATCH 39/45] mm/madvise: Make madvise_cold_test_walk " Oscar Salvador
2024-07-04 4:31 ` [PATCH 40/45] mm/madvise: Make madvise_free_test_walk " Oscar Salvador
2024-07-04 4:31 ` [PATCH 41/45] mm/migrate_device: Make migrate_vma_test_walk " Oscar Salvador
2024-07-04 4:31 ` [PATCH 42/45] mm/memcontrol: Make mem_cgroup_move_test_walk " Oscar Salvador
2024-07-04 4:31 ` [PATCH 43/45] mm/memcontrol: Make mem_cgroup_count_test_walk " Oscar Salvador
2024-07-04 4:31 ` [PATCH 44/45] mm/hugetlb_vmemmap: Make vmemmap_test_walk " Oscar Salvador
2024-07-04 4:31 ` [PATCH 45/45] mm: Delete all hugetlb_entry entries Oscar Salvador
2024-07-04 10:13 ` [PATCH 00/45] hugetlb pagewalk unification Oscar Salvador
2024-07-04 10:44 ` David Hildenbrand
2024-07-04 14:30 ` Peter Xu
2024-07-04 15:23 ` David Hildenbrand
2024-07-04 16:43 ` Peter Xu
2024-07-08 8:18 ` Oscar Salvador
2024-07-08 14:28 ` Jason Gunthorpe
2024-07-10 3:52 ` David Hildenbrand
2024-07-10 11:26 ` Oscar Salvador
2024-07-11 0:15 ` David Hildenbrand
2024-07-11 4:48 ` Oscar Salvador
2024-07-11 4:53 ` David Hildenbrand
2024-07-08 14:35 ` Jason Gunthorpe
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20240704043132.28501-17-osalvador@suse.de \
--to=osalvador@suse.de \
--cc=akpm@linux-foundation.org \
--cc=christophe.leroy@csgroup.eu \
--cc=david@redhat.com \
--cc=linmiaohe@huawei.com \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=mhocko@suse.com \
--cc=muchun.song@linux.dev \
--cc=peterx@redhat.com \
--cc=sj@kernel.org \
--cc=willy@infradead.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox