From: mpenttil@redhat.com
To: linux-mm@kvack.org
Cc: linux-kernel@vger.kernel.org,
"Mika Penttilä" <mpenttil@redhat.com>,
"David Hildenbrand" <david@redhat.com>,
"Jason Gunthorpe" <jgg@nvidia.com>,
"Leon Romanovsky" <leonro@nvidia.com>,
"Alistair Popple" <apopple@nvidia.com>,
"Balbir Singh" <balbirs@nvidia.com>, "Zi Yan" <ziy@nvidia.com>,
"Matthew Brost" <matthew.brost@intel.com>
Subject: [PATCH 3/3] mm:/migrate_device.c: remove migrate_vma_collect_*() functions
Date: Wed, 14 Jan 2026 11:19:23 +0200 [thread overview]
Message-ID: <20260114091923.3950465-4-mpenttil@redhat.com> (raw)
In-Reply-To: <20260114091923.3950465-1-mpenttil@redhat.com>
From: Mika Penttilä <mpenttil@redhat.com>
With the unified fault handling and migrate path,
the migrate_vma_collect_*() functions are unused,
let's remove them.
Cc: David Hildenbrand <david@redhat.com>
Cc: Jason Gunthorpe <jgg@nvidia.com>
Cc: Leon Romanovsky <leonro@nvidia.com>
Cc: Alistair Popple <apopple@nvidia.com>
Cc: Balbir Singh <balbirs@nvidia.com>
Cc: Zi Yan <ziy@nvidia.com>
Cc: Matthew Brost <matthew.brost@intel.com>
Signed-off-by: Mika Penttilä <mpenttil@redhat.com>
---
mm/migrate_device.c | 508 --------------------------------------------
1 file changed, 508 deletions(-)
diff --git a/mm/migrate_device.c b/mm/migrate_device.c
index d89efdfca8f6..c8f5a0615a5e 100644
--- a/mm/migrate_device.c
+++ b/mm/migrate_device.c
@@ -18,514 +18,6 @@
#include <asm/tlbflush.h>
#include "internal.h"
-static int migrate_vma_collect_skip(unsigned long start,
- unsigned long end,
- struct mm_walk *walk)
-{
- struct migrate_vma *migrate = walk->private;
- unsigned long addr;
-
- for (addr = start; addr < end; addr += PAGE_SIZE) {
- migrate->dst[migrate->npages] = 0;
- migrate->src[migrate->npages++] = 0;
- }
-
- return 0;
-}
-
-static int migrate_vma_collect_hole(unsigned long start,
- unsigned long end,
- __always_unused int depth,
- struct mm_walk *walk)
-{
- struct migrate_vma *migrate = walk->private;
- unsigned long addr;
-
- /* Only allow populating anonymous memory. */
- if (!vma_is_anonymous(walk->vma))
- return migrate_vma_collect_skip(start, end, walk);
-
- if (thp_migration_supported() &&
- (migrate->flags & MIGRATE_VMA_SELECT_COMPOUND) &&
- (IS_ALIGNED(start, HPAGE_PMD_SIZE) &&
- IS_ALIGNED(end, HPAGE_PMD_SIZE))) {
- migrate->src[migrate->npages] = MIGRATE_PFN_MIGRATE |
- MIGRATE_PFN_COMPOUND;
- migrate->dst[migrate->npages] = 0;
- migrate->npages++;
- migrate->cpages++;
-
- /*
- * Collect the remaining entries as holes, in case we
- * need to split later
- */
- return migrate_vma_collect_skip(start + PAGE_SIZE, end, walk);
- }
-
- for (addr = start; addr < end; addr += PAGE_SIZE) {
- migrate->src[migrate->npages] = MIGRATE_PFN_MIGRATE;
- migrate->dst[migrate->npages] = 0;
- migrate->npages++;
- migrate->cpages++;
- }
-
- return 0;
-}
-
-/**
- * migrate_vma_split_folio() - Helper function to split a THP folio
- * @folio: the folio to split
- * @fault_page: struct page associated with the fault if any
- *
- * Returns 0 on success
- */
-static int migrate_vma_split_folio(struct folio *folio,
- struct page *fault_page)
-{
- int ret;
- struct folio *fault_folio = fault_page ? page_folio(fault_page) : NULL;
- struct folio *new_fault_folio = NULL;
-
- if (folio != fault_folio) {
- folio_get(folio);
- folio_lock(folio);
- }
-
- ret = split_folio(folio);
- if (ret) {
- if (folio != fault_folio) {
- folio_unlock(folio);
- folio_put(folio);
- }
- return ret;
- }
-
- new_fault_folio = fault_page ? page_folio(fault_page) : NULL;
-
- /*
- * Ensure the lock is held on the correct
- * folio after the split
- */
- if (!new_fault_folio) {
- folio_unlock(folio);
- folio_put(folio);
- } else if (folio != new_fault_folio) {
- if (new_fault_folio != fault_folio) {
- folio_get(new_fault_folio);
- folio_lock(new_fault_folio);
- }
- folio_unlock(folio);
- folio_put(folio);
- }
-
- return 0;
-}
-
-/** migrate_vma_collect_huge_pmd - collect THP pages without splitting the
- * folio for device private pages.
- * @pmdp: pointer to pmd entry
- * @start: start address of the range for migration
- * @end: end address of the range for migration
- * @walk: mm_walk callback structure
- * @fault_folio: folio associated with the fault if any
- *
- * Collect the huge pmd entry at @pmdp for migration and set the
- * MIGRATE_PFN_COMPOUND flag in the migrate src entry to indicate that
- * migration will occur at HPAGE_PMD granularity
- */
-static int migrate_vma_collect_huge_pmd(pmd_t *pmdp, unsigned long start,
- unsigned long end, struct mm_walk *walk,
- struct folio *fault_folio)
-{
- struct mm_struct *mm = walk->mm;
- struct folio *folio;
- struct migrate_vma *migrate = walk->private;
- spinlock_t *ptl;
- int ret;
- unsigned long write = 0;
-
- ptl = pmd_lock(mm, pmdp);
- if (pmd_none(*pmdp)) {
- spin_unlock(ptl);
- return migrate_vma_collect_hole(start, end, -1, walk);
- }
-
- if (pmd_trans_huge(*pmdp)) {
- if (!(migrate->flags & MIGRATE_VMA_SELECT_SYSTEM)) {
- spin_unlock(ptl);
- return migrate_vma_collect_skip(start, end, walk);
- }
-
- folio = pmd_folio(*pmdp);
- if (is_huge_zero_folio(folio)) {
- spin_unlock(ptl);
- return migrate_vma_collect_hole(start, end, -1, walk);
- }
- if (pmd_write(*pmdp))
- write = MIGRATE_PFN_WRITE;
- } else if (!pmd_present(*pmdp)) {
- const softleaf_t entry = softleaf_from_pmd(*pmdp);
-
- folio = softleaf_to_folio(entry);
-
- if (!softleaf_is_device_private(entry) ||
- !(migrate->flags & MIGRATE_VMA_SELECT_DEVICE_PRIVATE) ||
- (folio->pgmap->owner != migrate->pgmap_owner)) {
- spin_unlock(ptl);
- return migrate_vma_collect_skip(start, end, walk);
- }
-
- if (softleaf_is_migration(entry)) {
- migration_entry_wait_on_locked(entry, ptl);
- spin_unlock(ptl);
- return -EAGAIN;
- }
-
- if (softleaf_is_device_private_write(entry))
- write = MIGRATE_PFN_WRITE;
- } else {
- spin_unlock(ptl);
- return -EAGAIN;
- }
-
- folio_get(folio);
- if (folio != fault_folio && unlikely(!folio_trylock(folio))) {
- spin_unlock(ptl);
- folio_put(folio);
- return migrate_vma_collect_skip(start, end, walk);
- }
-
- if (thp_migration_supported() &&
- (migrate->flags & MIGRATE_VMA_SELECT_COMPOUND) &&
- (IS_ALIGNED(start, HPAGE_PMD_SIZE) &&
- IS_ALIGNED(end, HPAGE_PMD_SIZE))) {
-
- struct page_vma_mapped_walk pvmw = {
- .ptl = ptl,
- .address = start,
- .pmd = pmdp,
- .vma = walk->vma,
- };
-
- unsigned long pfn = page_to_pfn(folio_page(folio, 0));
-
- migrate->src[migrate->npages] = migrate_pfn(pfn) | write
- | MIGRATE_PFN_MIGRATE
- | MIGRATE_PFN_COMPOUND;
- migrate->dst[migrate->npages++] = 0;
- migrate->cpages++;
- ret = set_pmd_migration_entry(&pvmw, folio_page(folio, 0));
- if (ret) {
- migrate->npages--;
- migrate->cpages--;
- migrate->src[migrate->npages] = 0;
- migrate->dst[migrate->npages] = 0;
- goto fallback;
- }
- migrate_vma_collect_skip(start + PAGE_SIZE, end, walk);
- spin_unlock(ptl);
- return 0;
- }
-
-fallback:
- spin_unlock(ptl);
- if (!folio_test_large(folio))
- goto done;
- ret = split_folio(folio);
- if (fault_folio != folio)
- folio_unlock(folio);
- folio_put(folio);
- if (ret)
- return migrate_vma_collect_skip(start, end, walk);
- if (pmd_none(pmdp_get_lockless(pmdp)))
- return migrate_vma_collect_hole(start, end, -1, walk);
-
-done:
- return -ENOENT;
-}
-
-static int migrate_vma_collect_pmd(pmd_t *pmdp,
- unsigned long start,
- unsigned long end,
- struct mm_walk *walk)
-{
- struct migrate_vma *migrate = walk->private;
- struct vm_area_struct *vma = walk->vma;
- struct mm_struct *mm = vma->vm_mm;
- unsigned long addr = start, unmapped = 0;
- spinlock_t *ptl;
- struct folio *fault_folio = migrate->fault_page ?
- page_folio(migrate->fault_page) : NULL;
- pte_t *ptep;
-
-again:
- if (pmd_trans_huge(*pmdp) || !pmd_present(*pmdp)) {
- int ret = migrate_vma_collect_huge_pmd(pmdp, start, end, walk, fault_folio);
-
- if (ret == -EAGAIN)
- goto again;
- if (ret == 0)
- return 0;
- }
-
- ptep = pte_offset_map_lock(mm, pmdp, start, &ptl);
- if (!ptep)
- goto again;
- arch_enter_lazy_mmu_mode();
- ptep += (addr - start) / PAGE_SIZE;
-
- for (; addr < end; addr += PAGE_SIZE, ptep++) {
- struct dev_pagemap *pgmap;
- unsigned long mpfn = 0, pfn;
- struct folio *folio;
- struct page *page;
- softleaf_t entry;
- pte_t pte;
-
- pte = ptep_get(ptep);
-
- if (pte_none(pte)) {
- if (vma_is_anonymous(vma)) {
- mpfn = MIGRATE_PFN_MIGRATE;
- migrate->cpages++;
- }
- goto next;
- }
-
- if (!pte_present(pte)) {
- /*
- * Only care about unaddressable device page special
- * page table entry. Other special swap entries are not
- * migratable, and we ignore regular swapped page.
- */
- entry = softleaf_from_pte(pte);
- if (!softleaf_is_device_private(entry))
- goto next;
-
- page = softleaf_to_page(entry);
- pgmap = page_pgmap(page);
- if (!(migrate->flags &
- MIGRATE_VMA_SELECT_DEVICE_PRIVATE) ||
- pgmap->owner != migrate->pgmap_owner)
- goto next;
-
- folio = page_folio(page);
- if (folio_test_large(folio)) {
- int ret;
-
- arch_leave_lazy_mmu_mode();
- pte_unmap_unlock(ptep, ptl);
- ret = migrate_vma_split_folio(folio,
- migrate->fault_page);
-
- if (ret) {
- if (unmapped)
- flush_tlb_range(walk->vma, start, end);
-
- return migrate_vma_collect_skip(addr, end, walk);
- }
-
- goto again;
- }
-
- mpfn = migrate_pfn(page_to_pfn(page)) |
- MIGRATE_PFN_MIGRATE;
- if (softleaf_is_device_private_write(entry))
- mpfn |= MIGRATE_PFN_WRITE;
- } else {
- pfn = pte_pfn(pte);
- if (is_zero_pfn(pfn) &&
- (migrate->flags & MIGRATE_VMA_SELECT_SYSTEM)) {
- mpfn = MIGRATE_PFN_MIGRATE;
- migrate->cpages++;
- goto next;
- }
- page = vm_normal_page(migrate->vma, addr, pte);
- if (page && !is_zone_device_page(page) &&
- !(migrate->flags & MIGRATE_VMA_SELECT_SYSTEM)) {
- goto next;
- } else if (page && is_device_coherent_page(page)) {
- pgmap = page_pgmap(page);
-
- if (!(migrate->flags &
- MIGRATE_VMA_SELECT_DEVICE_COHERENT) ||
- pgmap->owner != migrate->pgmap_owner)
- goto next;
- }
- folio = page ? page_folio(page) : NULL;
- if (folio && folio_test_large(folio)) {
- int ret;
-
- arch_leave_lazy_mmu_mode();
- pte_unmap_unlock(ptep, ptl);
- ret = migrate_vma_split_folio(folio,
- migrate->fault_page);
-
- if (ret) {
- if (unmapped)
- flush_tlb_range(walk->vma, start, end);
-
- return migrate_vma_collect_skip(addr, end, walk);
- }
-
- goto again;
- }
- mpfn = migrate_pfn(pfn) | MIGRATE_PFN_MIGRATE;
- mpfn |= pte_write(pte) ? MIGRATE_PFN_WRITE : 0;
- }
-
- if (!page || !page->mapping) {
- mpfn = 0;
- goto next;
- }
-
- /*
- * By getting a reference on the folio we pin it and that blocks
- * any kind of migration. Side effect is that it "freezes" the
- * pte.
- *
- * We drop this reference after isolating the folio from the lru
- * for non device folio (device folio are not on the lru and thus
- * can't be dropped from it).
- */
- folio = page_folio(page);
- folio_get(folio);
-
- /*
- * We rely on folio_trylock() to avoid deadlock between
- * concurrent migrations where each is waiting on the others
- * folio lock. If we can't immediately lock the folio we fail this
- * migration as it is only best effort anyway.
- *
- * If we can lock the folio it's safe to set up a migration entry
- * now. In the common case where the folio is mapped once in a
- * single process setting up the migration entry now is an
- * optimisation to avoid walking the rmap later with
- * try_to_migrate().
- */
- if (fault_folio == folio || folio_trylock(folio)) {
- bool anon_exclusive;
- pte_t swp_pte;
-
- flush_cache_page(vma, addr, pte_pfn(pte));
- anon_exclusive = folio_test_anon(folio) &&
- PageAnonExclusive(page);
- if (anon_exclusive) {
- pte = ptep_clear_flush(vma, addr, ptep);
-
- if (folio_try_share_anon_rmap_pte(folio, page)) {
- set_pte_at(mm, addr, ptep, pte);
- if (fault_folio != folio)
- folio_unlock(folio);
- folio_put(folio);
- mpfn = 0;
- goto next;
- }
- } else {
- pte = ptep_get_and_clear(mm, addr, ptep);
- }
-
- migrate->cpages++;
-
- /* Set the dirty flag on the folio now the pte is gone. */
- if (pte_dirty(pte))
- folio_mark_dirty(folio);
-
- /* Setup special migration page table entry */
- if (mpfn & MIGRATE_PFN_WRITE)
- entry = make_writable_migration_entry(
- page_to_pfn(page));
- else if (anon_exclusive)
- entry = make_readable_exclusive_migration_entry(
- page_to_pfn(page));
- else
- entry = make_readable_migration_entry(
- page_to_pfn(page));
- if (pte_present(pte)) {
- if (pte_young(pte))
- entry = make_migration_entry_young(entry);
- if (pte_dirty(pte))
- entry = make_migration_entry_dirty(entry);
- }
- swp_pte = swp_entry_to_pte(entry);
- if (pte_present(pte)) {
- if (pte_soft_dirty(pte))
- swp_pte = pte_swp_mksoft_dirty(swp_pte);
- if (pte_uffd_wp(pte))
- swp_pte = pte_swp_mkuffd_wp(swp_pte);
- } else {
- if (pte_swp_soft_dirty(pte))
- swp_pte = pte_swp_mksoft_dirty(swp_pte);
- if (pte_swp_uffd_wp(pte))
- swp_pte = pte_swp_mkuffd_wp(swp_pte);
- }
- set_pte_at(mm, addr, ptep, swp_pte);
-
- /*
- * This is like regular unmap: we remove the rmap and
- * drop the folio refcount. The folio won't be freed, as
- * we took a reference just above.
- */
- folio_remove_rmap_pte(folio, page, vma);
- folio_put(folio);
-
- if (pte_present(pte))
- unmapped++;
- } else {
- folio_put(folio);
- mpfn = 0;
- }
-
-next:
- migrate->dst[migrate->npages] = 0;
- migrate->src[migrate->npages++] = mpfn;
- }
-
- /* Only flush the TLB if we actually modified any entries */
- if (unmapped)
- flush_tlb_range(walk->vma, start, end);
-
- arch_leave_lazy_mmu_mode();
- pte_unmap_unlock(ptep - 1, ptl);
-
- return 0;
-}
-
-static const struct mm_walk_ops migrate_vma_walk_ops = {
- .pmd_entry = migrate_vma_collect_pmd,
- .pte_hole = migrate_vma_collect_hole,
- .walk_lock = PGWALK_RDLOCK,
-};
-
-/*
- * migrate_vma_collect() - collect pages over a range of virtual addresses
- * @migrate: migrate struct containing all migration information
- *
- * This will walk the CPU page table. For each virtual address backed by a
- * valid page, it updates the src array and takes a reference on the page, in
- * order to pin the page until we lock it and unmap it.
- */
-static void migrate_vma_collect(struct migrate_vma *migrate)
-{
- struct mmu_notifier_range range;
-
- /*
- * Note that the pgmap_owner is passed to the mmu notifier callback so
- * that the registered device driver can skip invalidating device
- * private page mappings that won't be migrated.
- */
- mmu_notifier_range_init_owner(&range, MMU_NOTIFY_MIGRATE, 0,
- migrate->vma->vm_mm, migrate->start, migrate->end,
- migrate->pgmap_owner);
- mmu_notifier_invalidate_range_start(&range);
-
- walk_page_range(migrate->vma->vm_mm, migrate->start, migrate->end,
- &migrate_vma_walk_ops, migrate);
-
- mmu_notifier_invalidate_range_end(&range);
- migrate->end = migrate->start + (migrate->npages << PAGE_SHIFT);
-}
-
/*
* migrate_vma_check_page() - check if page is pinned or not
* @page: struct page to check
--
2.50.0
prev parent reply other threads:[~2026-01-14 9:20 UTC|newest]
Thread overview: 4+ messages / expand[flat|nested] mbox.gz Atom feed top
2026-01-14 9:19 [PATCH 0/3] Migrate on fault for device pages mpenttil
2026-01-14 9:19 ` [PATCH 1/3] mm: unified hmm fault and migrate device pagewalk paths mpenttil
2026-01-14 9:19 ` [PATCH 2/3] mm: add new testcase for the migrate on fault case mpenttil
2026-01-14 9:19 ` mpenttil [this message]
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20260114091923.3950465-4-mpenttil@redhat.com \
--to=mpenttil@redhat.com \
--cc=apopple@nvidia.com \
--cc=balbirs@nvidia.com \
--cc=david@redhat.com \
--cc=jgg@nvidia.com \
--cc=leonro@nvidia.com \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=matthew.brost@intel.com \
--cc=ziy@nvidia.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox