From: Jordan Niethe <jniethe@nvidia.com>
To: linux-mm@kvack.org
Cc: balbirs@nvidia.com, matthew.brost@intel.com,
akpm@linux-foundation.org, linux-kernel@vger.kernel.org,
dri-devel@lists.freedesktop.org, david@redhat.com,
ziy@nvidia.com, apopple@nvidia.com, lorenzo.stoakes@oracle.com,
lyude@redhat.com, dakr@kernel.org, airlied@gmail.com,
simona@ffwll.ch, rcampbell@nvidia.com, mpenttil@redhat.com,
jgg@nvidia.com, willy@infradead.org,
linuxppc-dev@lists.ozlabs.org, intel-xe@lists.freedesktop.org,
jgg@ziepe.ca, Felix.Kuehling@amd.com, jniethe@nvidia.com,
jhubbard@nvidia.com
Subject: [PATCH v3 10/13] mm: Add helpers to create device private entries from struct pages
Date: Fri, 23 Jan 2026 17:23:06 +1100 [thread overview]
Message-ID: <20260123062309.23090-11-jniethe@nvidia.com> (raw)
In-Reply-To: <20260123062309.23090-1-jniethe@nvidia.com>
To create a new device private entry for a given struct page, that page
is first converted to its pfn, before passing the pfn to
make_writable_device_private_entry() (and friends).
A future change will remove device private pages from the physical
address space. This will mean that device private pages no longer have a
pfn and must be handled separately.
Prepare for this with a new set of helpers:
- make_readable_device_private_entry_from_page()
- make_writable_device_private_entry_from_page()
These helpers take a struct page as parameter instead of a pfn. This
will allow more flexibility for handling the swap offset field
differently for device private pages.
Signed-off-by: Jordan Niethe <jniethe@nvidia.com>
---
v1:
- New to series
v2:
- Add flag param
v3:
- No change
---
include/linux/swapops.h | 24 ++++++++++++++++++++++++
mm/huge_memory.c | 14 ++++++--------
mm/migrate.c | 6 ++----
mm/migrate_device.c | 12 ++++--------
4 files changed, 36 insertions(+), 20 deletions(-)
diff --git a/include/linux/swapops.h b/include/linux/swapops.h
index 2591ed914d22..b42c105e60c4 100644
--- a/include/linux/swapops.h
+++ b/include/linux/swapops.h
@@ -138,11 +138,23 @@ static inline swp_entry_t make_readable_device_private_entry(pgoff_t offset)
return swp_entry(SWP_DEVICE_READ, offset);
}
+static inline swp_entry_t make_readable_device_private_entry_from_page(struct page *page,
+ pgoff_t flags)
+{
+ return swp_entry(SWP_DEVICE_READ, page_to_pfn(page) | flags);
+}
+
static inline swp_entry_t make_writable_device_private_entry(pgoff_t offset)
{
return swp_entry(SWP_DEVICE_WRITE, offset);
}
+static inline swp_entry_t make_writable_device_private_entry_from_page(struct page *page,
+ pgoff_t flags)
+{
+ return swp_entry(SWP_DEVICE_WRITE, page_to_pfn(page) | flags);
+}
+
static inline swp_entry_t make_device_exclusive_entry(pgoff_t offset)
{
return swp_entry(SWP_DEVICE_EXCLUSIVE, offset);
@@ -169,11 +181,23 @@ static inline swp_entry_t make_readable_device_private_entry(pgoff_t offset)
return swp_entry(0, 0);
}
+static inline swp_entry_t make_readable_device_private_entry_from_page(struct page *page,
+ pgoff_t flags)
+{
+ return swp_entry(0, 0);
+}
+
static inline swp_entry_t make_writable_device_private_entry(pgoff_t offset)
{
return swp_entry(0, 0);
}
+static inline swp_entry_t make_writable_device_private_entry_from_page(struct page *page,
+ pgoff_t flags)
+{
+ return swp_entry(0, 0);
+}
+
static inline swp_entry_t make_device_exclusive_entry(pgoff_t offset)
{
return swp_entry(0, 0);
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index e3a448cdb34d..03f1f13bb24c 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -3219,11 +3219,11 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
* is false.
*/
if (write)
- swp_entry = make_writable_device_private_entry(
- page_to_pfn(page + i));
+ swp_entry = make_writable_device_private_entry_from_page(
+ page + i, 0);
else
- swp_entry = make_readable_device_private_entry(
- page_to_pfn(page + i));
+ swp_entry = make_readable_device_private_entry_from_page(
+ page + i, 0);
/*
* Young and dirty bits are not progated via swp_entry
*/
@@ -4950,11 +4950,9 @@ void remove_migration_pmd(struct page_vma_mapped_walk *pvmw, struct page *new)
swp_entry_t entry;
if (pmd_write(pmde))
- entry = make_writable_device_private_entry(
- page_to_pfn(new));
+ entry = make_writable_device_private_entry_from_page(new, 0);
else
- entry = make_readable_device_private_entry(
- page_to_pfn(new));
+ entry = make_readable_device_private_entry_from_page(new, 0);
pmde = swp_entry_to_pmd(entry);
if (pmd_swp_soft_dirty(*pvmw->pmd))
diff --git a/mm/migrate.c b/mm/migrate.c
index 5169f9717f60..6cc6c989ab6b 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -399,11 +399,9 @@ static bool remove_migration_pte(struct folio *folio,
if (unlikely(is_device_private_page(new))) {
if (pte_write(pte))
- entry = make_writable_device_private_entry(
- page_to_pfn(new));
+ entry = make_writable_device_private_entry_from_page(new, 0);
else
- entry = make_readable_device_private_entry(
- page_to_pfn(new));
+ entry = make_readable_device_private_entry_from_page(new, 0);
pte = softleaf_to_pte(entry);
if (pte_swp_soft_dirty(old_pte))
pte = pte_swp_mksoft_dirty(pte);
diff --git a/mm/migrate_device.c b/mm/migrate_device.c
index c876526ac6a3..0ca6f78df0e2 100644
--- a/mm/migrate_device.c
+++ b/mm/migrate_device.c
@@ -836,11 +836,9 @@ static int migrate_vma_insert_huge_pmd_page(struct migrate_vma *migrate,
swp_entry_t swp_entry;
if (vma->vm_flags & VM_WRITE)
- swp_entry = make_writable_device_private_entry(
- page_to_pfn(page));
+ swp_entry = make_writable_device_private_entry_from_page(page, 0);
else
- swp_entry = make_readable_device_private_entry(
- page_to_pfn(page));
+ swp_entry = make_readable_device_private_entry_from_page(page, 0);
entry = swp_entry_to_pmd(swp_entry);
} else {
if (folio_is_zone_device(folio) &&
@@ -1033,11 +1031,9 @@ static void migrate_vma_insert_page(struct migrate_vma *migrate,
swp_entry_t swp_entry;
if (vma->vm_flags & VM_WRITE)
- swp_entry = make_writable_device_private_entry(
- page_to_pfn(page));
+ swp_entry = make_writable_device_private_entry_from_page(page, 0);
else
- swp_entry = make_readable_device_private_entry(
- page_to_pfn(page));
+ swp_entry = make_readable_device_private_entry_from_page(page, 0);
entry = swp_entry_to_pte(swp_entry);
} else {
if (folio_is_zone_device(folio) &&
--
2.34.1
next prev parent reply other threads:[~2026-01-23 6:24 UTC|newest]
Thread overview: 26+ messages / expand[flat|nested] mbox.gz Atom feed top
2026-01-23 6:22 [PATCH v3 00/13] Remove device private pages from physical address space Jordan Niethe
2026-01-23 6:22 ` [PATCH v3 01/13] mm/migrate_device: Introduce migrate_pfn_from_page() helper Jordan Niethe
2026-01-28 5:07 ` Kuehling, Felix
2026-01-29 1:06 ` Jordan Niethe
2026-01-23 6:22 ` [PATCH v3 02/13] drm/amdkfd: Use migrate pfns internally Jordan Niethe
2026-01-27 23:15 ` Balbir Singh
2026-01-28 5:08 ` Kuehling, Felix
2026-01-23 6:22 ` [PATCH v3 03/13] mm/migrate_device: Make migrate_device_{pfns,range}() take mpfns Jordan Niethe
2026-01-23 6:23 ` [PATCH v3 04/13] mm/migrate_device: Add migrate PFN flag to track device private pages Jordan Niethe
2026-01-28 5:09 ` Kuehling, Felix
2026-01-23 6:23 ` [PATCH v3 05/13] mm/page_vma_mapped: Add flag to page_vma_mapped_walk::flags " Jordan Niethe
2026-01-27 21:01 ` Zi Yan
2026-01-23 6:23 ` [PATCH v3 06/13] mm: Add helpers to create migration entries from struct pages Jordan Niethe
2026-01-23 6:23 ` [PATCH v3 07/13] mm: Add a new swap type for migration entries of device private pages Jordan Niethe
2026-01-23 6:23 ` [PATCH v3 08/13] mm: Add softleaf support for device private migration entries Jordan Niethe
2026-01-23 6:23 ` [PATCH v3 09/13] mm: Begin creating " Jordan Niethe
2026-01-23 6:23 ` Jordan Niethe [this message]
2026-01-23 6:23 ` [PATCH v3 11/13] mm/util: Add flag to track device private pages in page snapshots Jordan Niethe
2026-01-23 6:23 ` [PATCH v3 12/13] mm/hmm: Add flag to track device private pages Jordan Niethe
2026-01-23 6:23 ` [PATCH v3 13/13] mm: Remove device private pages from the physical address space Jordan Niethe
2026-01-27 0:29 ` Jordan Niethe
2026-01-27 21:12 ` Zi Yan
2026-01-27 23:26 ` Jordan Niethe
2026-01-28 5:10 ` Kuehling, Felix
2026-01-29 13:49 ` [PATCH v3 00/13] Remove device private pages from " Huang, Ying
2026-01-29 23:26 ` Alistair Popple
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20260123062309.23090-11-jniethe@nvidia.com \
--to=jniethe@nvidia.com \
--cc=Felix.Kuehling@amd.com \
--cc=airlied@gmail.com \
--cc=akpm@linux-foundation.org \
--cc=apopple@nvidia.com \
--cc=balbirs@nvidia.com \
--cc=dakr@kernel.org \
--cc=david@redhat.com \
--cc=dri-devel@lists.freedesktop.org \
--cc=intel-xe@lists.freedesktop.org \
--cc=jgg@nvidia.com \
--cc=jgg@ziepe.ca \
--cc=jhubbard@nvidia.com \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=linuxppc-dev@lists.ozlabs.org \
--cc=lorenzo.stoakes@oracle.com \
--cc=lyude@redhat.com \
--cc=matthew.brost@intel.com \
--cc=mpenttil@redhat.com \
--cc=rcampbell@nvidia.com \
--cc=simona@ffwll.ch \
--cc=willy@infradead.org \
--cc=ziy@nvidia.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox