From: Jordan Niethe <jniethe@nvidia.com>
To: linux-mm@kvack.org
Cc: balbirs@nvidia.com, matthew.brost@intel.com,
akpm@linux-foundation.org, linux-kernel@vger.kernel.org,
dri-devel@lists.freedesktop.org, david@redhat.com,
ziy@nvidia.com, apopple@nvidia.com, lorenzo.stoakes@oracle.com,
lyude@redhat.com, dakr@kernel.org, airlied@gmail.com,
simona@ffwll.ch, rcampbell@nvidia.com, mpenttil@redhat.com,
jgg@nvidia.com, willy@infradead.org,
linuxppc-dev@lists.ozlabs.org, intel-xe@lists.freedesktop.org,
jgg@ziepe.ca, Felix.Kuehling@amd.com
Subject: [PATCH v1 3/8] mm: Add helpers to create migration entries from struct pages
Date: Wed, 31 Dec 2025 15:31:49 +1100 [thread overview]
Message-ID: <20251231043154.42931-4-jniethe@nvidia.com> (raw)
In-Reply-To: <20251231043154.42931-1-jniethe@nvidia.com>
To create a new migration entry for a given struct page, that page is
first converted to its pfn, before passing the pfn to
make_readable_migration_entry() (and friends).
A future change will remove device private pages from the physical
address space. This will mean that device private pages no longer have a
pfn and must be handled separately.
Prepare for this with a new set of helpers:
- make_readable_migration_entry_from_page()
- make_readable_exclusive_migration_entry_from_page()
- make_writable_migration_entry_from_page()
These helpers take a struct page as parameter instead of a pfn. This
will allow more flexibility for handling the swap offset field
differently for device private pages.
Signed-off-by: Jordan Niethe <jniethe@nvidia.com>
---
v1:
- New to series
---
include/linux/swapops.h | 25 +++++++++++++++++++++++++
mm/huge_memory.c | 18 +++++++++---------
mm/migrate_device.c | 12 ++++++------
mm/rmap.c | 12 ++++++------
4 files changed, 46 insertions(+), 21 deletions(-)
diff --git a/include/linux/swapops.h b/include/linux/swapops.h
index 8cfc966eae48..72aa636fdb48 100644
--- a/include/linux/swapops.h
+++ b/include/linux/swapops.h
@@ -173,16 +173,31 @@ static inline swp_entry_t make_readable_migration_entry(pgoff_t offset)
return swp_entry(SWP_MIGRATION_READ, offset);
}
+static inline swp_entry_t make_readable_migration_entry_from_page(struct page *page)
+{
+ return swp_entry(SWP_MIGRATION_READ, page_to_pfn(page));
+}
+
static inline swp_entry_t make_readable_exclusive_migration_entry(pgoff_t offset)
{
return swp_entry(SWP_MIGRATION_READ_EXCLUSIVE, offset);
}
+static inline swp_entry_t make_readable_exclusive_migration_entry_from_page(struct page *page)
+{
+ return swp_entry(SWP_MIGRATION_READ_EXCLUSIVE, page_to_pfn(page));
+}
+
static inline swp_entry_t make_writable_migration_entry(pgoff_t offset)
{
return swp_entry(SWP_MIGRATION_WRITE, offset);
}
+static inline swp_entry_t make_writable_migration_entry_from_page(struct page *page)
+{
+ return swp_entry(SWP_MIGRATION_WRITE, page_to_pfn(page));
+}
+
/*
* Returns whether the host has large enough swap offset field to support
* carrying over pgtable A/D bits for page migrations. The result is
@@ -222,11 +237,21 @@ static inline swp_entry_t make_readable_migration_entry(pgoff_t offset)
return swp_entry(0, 0);
}
+static inline swp_entry_t make_readable_migration_entry_from_page(struct page *page)
+{
+ return swp_entry(0, 0);
+}
+
static inline swp_entry_t make_readable_exclusive_migration_entry(pgoff_t offset)
{
return swp_entry(0, 0);
}
+static inline swp_entry_t make_readable_exclusive_migration_entry_from_page(struct page *page)
+{
+ return swp_entry(0, 0);
+}
+
static inline swp_entry_t make_writable_migration_entry(pgoff_t offset)
{
return swp_entry(0, 0);
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 40cf59301c21..08c68e2e3f06 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -3183,14 +3183,14 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
for (i = 0, addr = haddr; i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE) {
if (write)
- swp_entry = make_writable_migration_entry(
- page_to_pfn(page + i));
+ swp_entry = make_writable_migration_entry_from_page(
+ page + i);
else if (anon_exclusive)
- swp_entry = make_readable_exclusive_migration_entry(
- page_to_pfn(page + i));
+ swp_entry = make_readable_exclusive_migration_entry_from_page(
+ page + i);
else
- swp_entry = make_readable_migration_entry(
- page_to_pfn(page + i));
+ swp_entry = make_readable_migration_entry_from_page(
+ page + i);
if (young)
swp_entry = make_migration_entry_young(swp_entry);
if (dirty)
@@ -4890,11 +4890,11 @@ int set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw,
if (pmd_dirty(pmdval))
folio_mark_dirty(folio);
if (pmd_write(pmdval))
- entry = make_writable_migration_entry(page_to_pfn(page));
+ entry = make_writable_migration_entry_from_page(page);
else if (anon_exclusive)
- entry = make_readable_exclusive_migration_entry(page_to_pfn(page));
+ entry = make_readable_exclusive_migration_entry_from_page(page);
else
- entry = make_readable_migration_entry(page_to_pfn(page));
+ entry = make_readable_migration_entry_from_page(page);
if (pmd_young(pmdval))
entry = make_migration_entry_young(entry);
if (pmd_dirty(pmdval))
diff --git a/mm/migrate_device.c b/mm/migrate_device.c
index 5d108ddf1a97..7eef21d63364 100644
--- a/mm/migrate_device.c
+++ b/mm/migrate_device.c
@@ -438,14 +438,14 @@ static int migrate_vma_collect_pmd(pmd_t *pmdp,
/* Setup special migration page table entry */
if (mpfn & MIGRATE_PFN_WRITE)
- entry = make_writable_migration_entry(
- page_to_pfn(page));
+ entry = make_writable_migration_entry_from_page(
+ page);
else if (anon_exclusive)
- entry = make_readable_exclusive_migration_entry(
- page_to_pfn(page));
+ entry = make_readable_exclusive_migration_entry_from_page(
+ page);
else
- entry = make_readable_migration_entry(
- page_to_pfn(page));
+ entry = make_readable_migration_entry_from_page(
+ page);
if (pte_present(pte)) {
if (pte_young(pte))
entry = make_migration_entry_young(entry);
diff --git a/mm/rmap.c b/mm/rmap.c
index 79a2478b4aa9..bb881b0c4b06 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -2539,14 +2539,14 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma,
* pte is removed and then restart fault handling.
*/
if (writable)
- entry = make_writable_migration_entry(
- page_to_pfn(subpage));
+ entry = make_writable_migration_entry_from_page(
+ subpage);
else if (anon_exclusive)
- entry = make_readable_exclusive_migration_entry(
- page_to_pfn(subpage));
+ entry = make_readable_exclusive_migration_entry_from_page(
+ subpage);
else
- entry = make_readable_migration_entry(
- page_to_pfn(subpage));
+ entry = make_readable_migration_entry_from_page(
+ subpage);
if (likely(pte_present(pteval))) {
if (pte_young(pteval))
entry = make_migration_entry_young(entry);
--
2.34.1
next prev parent reply other threads:[~2025-12-31 4:32 UTC|newest]
Thread overview: 10+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-12-31 4:31 [PATCH v1 0/8] Remove device private pages from physical address space Jordan Niethe
2025-12-31 4:31 ` [PATCH v1 1/8] mm/migrate_device: Add migrate PFN flag to track device private pages Jordan Niethe
2025-12-31 17:03 ` Kuehling, Felix
2025-12-31 4:31 ` [PATCH v1 2/8] mm/page_vma_mapped: Add flags to page_vma_mapped_walk::pfn " Jordan Niethe
2025-12-31 4:31 ` Jordan Niethe [this message]
2025-12-31 4:31 ` [PATCH v1 4/8] mm: Add a new swap type for migration entries of " Jordan Niethe
2025-12-31 4:31 ` [PATCH v1 5/8] mm: Add helpers to create device private entries from struct pages Jordan Niethe
2025-12-31 4:31 ` [PATCH v1 6/8] mm/util: Add flag to track device private pages in page snapshots Jordan Niethe
2025-12-31 4:31 ` [PATCH v1 7/8] mm/hmm: Add flag to track device private pages Jordan Niethe
2025-12-31 4:31 ` [PATCH v1 8/8] mm: Remove device private pages from the physical address space Jordan Niethe
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20251231043154.42931-4-jniethe@nvidia.com \
--to=jniethe@nvidia.com \
--cc=Felix.Kuehling@amd.com \
--cc=airlied@gmail.com \
--cc=akpm@linux-foundation.org \
--cc=apopple@nvidia.com \
--cc=balbirs@nvidia.com \
--cc=dakr@kernel.org \
--cc=david@redhat.com \
--cc=dri-devel@lists.freedesktop.org \
--cc=intel-xe@lists.freedesktop.org \
--cc=jgg@nvidia.com \
--cc=jgg@ziepe.ca \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=linuxppc-dev@lists.ozlabs.org \
--cc=lorenzo.stoakes@oracle.com \
--cc=lyude@redhat.com \
--cc=matthew.brost@intel.com \
--cc=mpenttil@redhat.com \
--cc=rcampbell@nvidia.com \
--cc=simona@ffwll.ch \
--cc=willy@infradead.org \
--cc=ziy@nvidia.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox