From: Oscar Salvador <osalvador@suse.de>
To: Andrew Morton <akpm@linux-foundation.org>
Cc: linux-kernel@vger.kernel.org, linux-mm@kvack.org,
Peter Xu <peterx@redhat.com>, Muchun Song <muchun.song@linux.dev>,
David Hildenbrand <david@redhat.com>,
SeongJae Park <sj@kernel.org>, Miaohe Lin <linmiaohe@huawei.com>,
Michal Hocko <mhocko@suse.com>,
Matthew Wilcox <willy@infradead.org>,
Christophe Leroy <christophe.leroy@csgroup.eu>,
Oscar Salvador <osalvador@suse.de>
Subject: [PATCH 22/45] mm/mempolicy: Enable queue_folios_pmd to handle hugetlb vmas
Date: Thu, 4 Jul 2024 06:31:09 +0200 [thread overview]
Message-ID: <20240704043132.28501-23-osalvador@suse.de> (raw)
In-Reply-To: <20240704043132.28501-1-osalvador@suse.de>
PMD-mapped hugetlb vmas will also reach smaps_pmd_entry.
Add the required code so it knows how to handle those there.
Signed-off-by: Oscar Salvador <osalvador@suse.de>
---
include/linux/mm_inline.h | 7 +++++++
mm/mempolicy.c | 42 ++++++++++++++++++++++++---------------
2 files changed, 33 insertions(+), 16 deletions(-)
diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h
index 93e3eb86ef4e..521a001429d2 100644
--- a/include/linux/mm_inline.h
+++ b/include/linux/mm_inline.h
@@ -591,6 +591,13 @@ static inline bool vma_has_recency(struct vm_area_struct *vma)
return true;
}
+static inline bool is_shared_pmd(pmd_t *pmd, struct vm_area_struct *vma)
+{
+ if (!is_vm_hugetlb_page(vma))
+ return false;
+ return hugetlb_pmd_shared((pte_t *)pmd);
+}
+
static inline spinlock_t *pmd_huge_lock(pmd_t *pmd, struct vm_area_struct *vma)
{
spinlock_t *ptl;
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index f8703feb68b7..5baf29da198c 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -455,7 +455,8 @@ static const struct mempolicy_operations mpol_ops[MPOL_MAX] = {
};
static bool migrate_folio_add(struct folio *folio, struct list_head *foliolist,
- unsigned long flags);
+ unsigned long flags, struct vm_area_struct *vma,
+ bool shared);
static nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *pol,
pgoff_t ilx, int *nid);
@@ -518,7 +519,8 @@ static void queue_folios_pmd(pmd_t *pmd, struct mm_walk *walk)
return;
if (!(qp->flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) ||
!vma_migratable(walk->vma) ||
- !migrate_folio_add(folio, qp->pagelist, qp->flags))
+ !migrate_folio_add(folio, qp->pagelist, qp->flags, walk->vma,
+ is_shared_pmd(pmd, walk->vma)))
qp->nr_failed++;
}
@@ -543,7 +545,7 @@ static int queue_folios_pte_range(pmd_t *pmd, unsigned long addr,
pte_t ptent;
spinlock_t *ptl;
- ptl = pmd_trans_huge_lock(pmd, vma);
+ ptl = pmd_huge_lock(pmd, vma);
if (ptl) {
queue_folios_pmd(pmd, walk);
spin_unlock(ptl);
@@ -598,7 +600,7 @@ static int queue_folios_pte_range(pmd_t *pmd, unsigned long addr,
}
if (!(flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) ||
!vma_migratable(vma) ||
- !migrate_folio_add(folio, qp->pagelist, flags)) {
+ !migrate_folio_add(folio, qp->pagelist, flags, vma, false)) {
qp->nr_failed++;
if (strictly_unmovable(flags))
break;
@@ -1025,8 +1027,11 @@ static long do_get_mempolicy(int *policy, nodemask_t *nmask,
#ifdef CONFIG_MIGRATION
static bool migrate_folio_add(struct folio *folio, struct list_head *foliolist,
- unsigned long flags)
+ unsigned long flags, struct vm_area_struct *vma,
+ bool shared)
{
+ bool ret = true;
+ bool is_hugetlb = is_vm_hugetlb_page(vma);
/*
* Unless MPOL_MF_MOVE_ALL, we try to avoid migrating a shared folio.
* Choosing not to migrate a shared folio is not counted as a failure.
@@ -1034,23 +1039,27 @@ static bool migrate_folio_add(struct folio *folio, struct list_head *foliolist,
* See folio_likely_mapped_shared() on possible imprecision when we
* cannot easily detect if a folio is shared.
*/
- if ((flags & MPOL_MF_MOVE_ALL) || !folio_likely_mapped_shared(folio)) {
- if (folio_isolate_lru(folio)) {
- list_add_tail(&folio->lru, foliolist);
- node_stat_mod_folio(folio,
- NR_ISOLATED_ANON + folio_is_file_lru(folio),
- folio_nr_pages(folio));
- } else {
+ if ((flags & MPOL_MF_MOVE_ALL) ||
+ (!folio_likely_mapped_shared(folio) && !shared)) {
+ if (is_hugetlb)
+ return isolate_hugetlb(folio, foliolist);
+
+ ret = folio_isolate_lru(folio);
+ if (!ret)
/*
* Non-movable folio may reach here. And, there may be
* temporary off LRU folios or non-LRU movable folios.
* Treat them as unmovable folios since they can't be
* isolated, so they can't be moved at the moment.
*/
- return false;
- }
+ return ret;
+
+ list_add_tail(&folio->lru, foliolist);
+ node_stat_mod_folio(folio,
+ NR_ISOLATED_ANON + folio_is_file_lru(folio),
+ folio_nr_pages(folio));
}
- return true;
+ return ret;
}
/*
@@ -1239,7 +1248,8 @@ static struct folio *alloc_migration_target_by_mpol(struct folio *src,
#else
static bool migrate_folio_add(struct folio *folio, struct list_head *foliolist,
- unsigned long flags)
+ unsigned long flags, struct vm_area_struct *vma,
+ bool shared)
{
return false;
}
--
2.26.2
next prev parent reply other threads:[~2024-07-04 4:32 UTC|newest]
Thread overview: 66+ messages / expand[flat|nested] mbox.gz Atom feed top
2024-07-04 4:30 [PATCH 00/45] hugetlb pagewalk unification Oscar Salvador
2024-07-04 4:30 ` [PATCH 01/45] arch/x86: Drop own definition of pgd,p4d_leaf Oscar Salvador
2024-07-04 4:30 ` [PATCH 02/45] mm: Add {pmd,pud}_huge_lock helper Oscar Salvador
2024-07-04 15:02 ` Peter Xu
2024-07-04 4:30 ` [PATCH 03/45] mm/pagewalk: Move vma_pgtable_walk_begin and vma_pgtable_walk_end upfront Oscar Salvador
2024-07-04 4:30 ` [PATCH 04/45] mm/pagewalk: Only call pud_entry when we have a pud leaf Oscar Salvador
2024-07-04 4:30 ` [PATCH 05/45] mm/pagewalk: Enable walk_pmd_range to handle cont-pmds Oscar Salvador
2024-07-04 15:41 ` David Hildenbrand
2024-07-05 16:56 ` kernel test robot
2024-07-04 4:30 ` [PATCH 06/45] mm/pagewalk: Do not try to split non-thp pud or pmd leafs Oscar Salvador
2024-07-04 4:30 ` [PATCH 07/45] arch/s390: Enable __s390_enable_skey_pmd to handle hugetlb vmas Oscar Salvador
2024-07-04 4:30 ` [PATCH 08/45] fs/proc: Enable smaps_pmd_entry to handle PMD-mapped " Oscar Salvador
2024-07-04 4:30 ` [PATCH 09/45] mm: Implement pud-version functions for swap and vm_normal_page_pud Oscar Salvador
2024-07-04 4:30 ` [PATCH 10/45] fs/proc: Create smaps_pud_range to handle PUD-mapped hugetlb vmas Oscar Salvador
2024-07-04 4:30 ` [PATCH 11/45] fs/proc: Enable smaps_pte_entry to handle cont-pte mapped " Oscar Salvador
2024-07-04 10:30 ` David Hildenbrand
2024-07-04 4:30 ` [PATCH 12/45] fs/proc: Enable pagemap_pmd_range to handle " Oscar Salvador
2024-07-04 4:31 ` [PATCH 13/45] mm: Implement pud-version uffd functions Oscar Salvador
2024-07-05 15:48 ` kernel test robot
2024-07-05 15:48 ` kernel test robot
2024-07-04 4:31 ` [PATCH 14/45] fs/proc: Create pagemap_pud_range to handle PUD-mapped hugetlb vmas Oscar Salvador
2024-07-04 4:31 ` [PATCH 15/45] fs/proc: Adjust pte_to_pagemap_entry for " Oscar Salvador
2024-07-04 4:31 ` [PATCH 16/45] fs/proc: Enable pagemap_scan_pmd_entry to handle " Oscar Salvador
2024-07-04 4:31 ` [PATCH 17/45] mm: Implement pud-version for pud_mkinvalid and pudp_establish Oscar Salvador
2024-07-04 4:31 ` [PATCH 18/45] fs/proc: Create pagemap_scan_pud_entry to handle PUD-mapped hugetlb vmas Oscar Salvador
2024-07-04 4:31 ` [PATCH 19/45] fs/proc: Enable gather_pte_stats to handle " Oscar Salvador
2024-07-04 4:31 ` [PATCH 20/45] fs/proc: Enable gather_pte_stats to handle cont-pte mapped " Oscar Salvador
2024-07-04 4:31 ` [PATCH 21/45] fs/proc: Create gather_pud_stats to handle PUD-mapped hugetlb pages Oscar Salvador
2024-07-04 4:31 ` Oscar Salvador [this message]
2024-07-04 4:31 ` [PATCH 23/45] mm/mempolicy: Create queue_folios_pud to handle PUD-mapped hugetlb vmas Oscar Salvador
2024-07-04 4:31 ` [PATCH 24/45] mm/memory_failure: Enable check_hwpoisoned_pmd_entry to handle " Oscar Salvador
2024-07-04 4:31 ` [PATCH 25/45] mm/memory-failure: Create check_hwpoisoned_pud_entry to handle PUD-mapped " Oscar Salvador
2024-07-04 4:31 ` [PATCH 26/45] mm/damon: Enable damon_young_pmd_entry to handle " Oscar Salvador
2024-07-04 4:31 ` [PATCH 27/45] mm/damon: Create damon_young_pud_entry to handle PUD-mapped " Oscar Salvador
2024-07-04 4:31 ` [PATCH 28/45] mm/damon: Enable damon_mkold_pmd_entry to handle " Oscar Salvador
2024-07-04 11:03 ` David Hildenbrand
2024-07-04 4:31 ` [PATCH 29/45] mm/damon: Create damon_mkold_pud_entry to handle PUD-mapped " Oscar Salvador
2024-07-04 4:31 ` [PATCH 30/45] mm,mincore: Enable mincore_pte_range to handle " Oscar Salvador
2024-07-04 4:31 ` [PATCH 31/45] mm/mincore: Create mincore_pud_range to handle PUD-mapped " Oscar Salvador
2024-07-04 4:31 ` [PATCH 32/45] mm/hmm: Enable hmm_vma_walk_pmd, to handle " Oscar Salvador
2024-07-04 4:31 ` [PATCH 33/45] mm/hmm: Enable hmm_vma_walk_pud to handle PUD-mapped " Oscar Salvador
2024-07-04 4:31 ` [PATCH 34/45] arch/powerpc: Skip hugetlb vmas in subpage_mark_vma_nohuge Oscar Salvador
2024-07-04 4:31 ` [PATCH 35/45] arch/s390: Skip hugetlb vmas in thp_split_mm Oscar Salvador
2024-07-04 4:31 ` [PATCH 36/45] fs/proc: Make clear_refs_test_walk skip hugetlb vmas Oscar Salvador
2024-07-04 4:31 ` [PATCH 37/45] mm/lock: Make mlock_test_walk " Oscar Salvador
2024-07-04 4:31 ` [PATCH 38/45] mm/madvise: Make swapin_test_walk " Oscar Salvador
2024-07-04 4:31 ` [PATCH 39/45] mm/madvise: Make madvise_cold_test_walk " Oscar Salvador
2024-07-04 4:31 ` [PATCH 40/45] mm/madvise: Make madvise_free_test_walk " Oscar Salvador
2024-07-04 4:31 ` [PATCH 41/45] mm/migrate_device: Make migrate_vma_test_walk " Oscar Salvador
2024-07-04 4:31 ` [PATCH 42/45] mm/memcontrol: Make mem_cgroup_move_test_walk " Oscar Salvador
2024-07-04 4:31 ` [PATCH 43/45] mm/memcontrol: Make mem_cgroup_count_test_walk " Oscar Salvador
2024-07-04 4:31 ` [PATCH 44/45] mm/hugetlb_vmemmap: Make vmemmap_test_walk " Oscar Salvador
2024-07-04 4:31 ` [PATCH 45/45] mm: Delete all hugetlb_entry entries Oscar Salvador
2024-07-04 10:13 ` [PATCH 00/45] hugetlb pagewalk unification Oscar Salvador
2024-07-04 10:44 ` David Hildenbrand
2024-07-04 14:30 ` Peter Xu
2024-07-04 15:23 ` David Hildenbrand
2024-07-04 16:43 ` Peter Xu
2024-07-08 8:18 ` Oscar Salvador
2024-07-08 14:28 ` Jason Gunthorpe
2024-07-10 3:52 ` David Hildenbrand
2024-07-10 11:26 ` Oscar Salvador
2024-07-11 0:15 ` David Hildenbrand
2024-07-11 4:48 ` Oscar Salvador
2024-07-11 4:53 ` David Hildenbrand
2024-07-08 14:35 ` Jason Gunthorpe
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20240704043132.28501-23-osalvador@suse.de \
--to=osalvador@suse.de \
--cc=akpm@linux-foundation.org \
--cc=christophe.leroy@csgroup.eu \
--cc=david@redhat.com \
--cc=linmiaohe@huawei.com \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=mhocko@suse.com \
--cc=muchun.song@linux.dev \
--cc=peterx@redhat.com \
--cc=sj@kernel.org \
--cc=willy@infradead.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox