From: Jordan Niethe <jniethe@nvidia.com>
To: linux-mm@kvack.org
Cc: balbirs@nvidia.com, matthew.brost@intel.com,
akpm@linux-foundation.org, linux-kernel@vger.kernel.org,
dri-devel@lists.freedesktop.org, david@redhat.com,
ziy@nvidia.com, apopple@nvidia.com, lorenzo.stoakes@oracle.com,
lyude@redhat.com, dakr@kernel.org, airlied@gmail.com,
simona@ffwll.ch, rcampbell@nvidia.com, mpenttil@redhat.com,
jgg@nvidia.com, willy@infradead.org,
linuxppc-dev@lists.ozlabs.org, intel-xe@lists.freedesktop.org,
jgg@ziepe.ca, Felix.Kuehling@amd.com, jniethe@nvidia.com,
jhubbard@nvidia.com, maddy@linux.ibm.com, mpe@ellerman.id.au
Subject: [PATCH v5 03/13] mm/migrate_device: Make migrate_device_{pfns,range}() take mpfns
Date: Fri, 30 Jan 2026 22:10:40 +1100 [thread overview]
Message-ID: <20260130111050.53670-4-jniethe@nvidia.com> (raw)
In-Reply-To: <20260130111050.53670-1-jniethe@nvidia.com>
A future change will remove device private pages from the physical
address space. This will mean that device private pages no longer have a
pfn.
This causes an issue for migrate_device_{pfns,range}() which take pfn
parameters. Depending on if the device is MEMORY_DEVICE_PRIVATE or
MEMORY_DEVICE_COHERENT will effect how that parameter should be
interpreted.
A MIGRATE_PFN flag will be introduced that distinguishes between mpfns
that contain a pfn vs an offset into device private memory, we will take
advantage of that here.
Update migrate_device_{pfns,range}() to take a mpfn instead of pfn.
Update the users of migrate_device_{pfns,range}() to pass in an mpfn.
To support this change, update
dpagemap_devmem_ops::populate_devmem_pfn() to instead return mpfns and
rename accordingly.
Signed-off-by: Jordan Niethe <jniethe@nvidia.com>
---
v2: New to series
v3: No change
---
drivers/gpu/drm/drm_pagemap.c | 9 +++---
drivers/gpu/drm/nouveau/nouveau_dmem.c | 5 +--
drivers/gpu/drm/xe/xe_svm.c | 9 +++---
include/drm/drm_pagemap.h | 8 ++---
lib/test_hmm.c | 2 +-
mm/migrate_device.c | 45 ++++++++++++++------------
6 files changed, 41 insertions(+), 37 deletions(-)
diff --git a/drivers/gpu/drm/drm_pagemap.c b/drivers/gpu/drm/drm_pagemap.c
index 526105aa4b05..13072c8665b9 100644
--- a/drivers/gpu/drm/drm_pagemap.c
+++ b/drivers/gpu/drm/drm_pagemap.c
@@ -505,7 +505,7 @@ int drm_pagemap_migrate_to_devmem(struct drm_pagemap_devmem *devmem_allocation,
mmap_assert_locked(mm);
- if (!ops->populate_devmem_pfn || !ops->copy_to_devmem ||
+ if (!ops->populate_devmem_mpfn || !ops->copy_to_devmem ||
!ops->copy_to_ram)
return -EOPNOTSUPP;
@@ -590,14 +590,14 @@ int drm_pagemap_migrate_to_devmem(struct drm_pagemap_devmem *devmem_allocation,
goto err_aborted_migration;
}
- err = ops->populate_devmem_pfn(devmem_allocation, npages, migrate.dst);
+ err = ops->populate_devmem_mpfn(devmem_allocation, npages, migrate.dst);
if (err)
goto err_aborted_migration;
own_pages = 0;
for (i = 0; i < npages; ++i) {
- struct page *page = pfn_to_page(migrate.dst[i]);
+ struct page *page = migrate_pfn_to_page(migrate.dst[i]);
struct page *src_page = migrate_pfn_to_page(migrate.src[i]);
cur.start = i;
@@ -624,7 +624,6 @@ int drm_pagemap_migrate_to_devmem(struct drm_pagemap_devmem *devmem_allocation,
cur.device = dpagemap->drm->dev;
pages[i] = page;
}
- migrate.dst[i] = migrate_pfn(migrate.dst[i]);
drm_pagemap_get_devmem_page(page, zdd);
/* If we switched the migrating drm_pagemap, migrate previous pages now */
@@ -979,7 +978,7 @@ int drm_pagemap_evict_to_ram(struct drm_pagemap_devmem *devmem_allocation)
pagemap_addr = buf + (2 * sizeof(*src) * npages);
pages = buf + (2 * sizeof(*src) + sizeof(*pagemap_addr)) * npages;
- err = ops->populate_devmem_pfn(devmem_allocation, npages, src);
+ err = ops->populate_devmem_mpfn(devmem_allocation, npages, src);
if (err)
goto err_free;
diff --git a/drivers/gpu/drm/nouveau/nouveau_dmem.c b/drivers/gpu/drm/nouveau/nouveau_dmem.c
index a7edcdca9701..bd3f7102c3f9 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dmem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dmem.c
@@ -483,8 +483,9 @@ nouveau_dmem_evict_chunk(struct nouveau_dmem_chunk *chunk)
dst_pfns = kvcalloc(npages, sizeof(*dst_pfns), GFP_KERNEL | __GFP_NOFAIL);
dma_info = kvcalloc(npages, sizeof(*dma_info), GFP_KERNEL | __GFP_NOFAIL);
- migrate_device_range(src_pfns, chunk->pagemap.range.start >> PAGE_SHIFT,
- npages);
+ migrate_device_range(src_pfns,
+ migrate_pfn(chunk->pagemap.range.start >> PAGE_SHIFT),
+ npages);
for (i = 0; i < npages; i++) {
if (src_pfns[i] & MIGRATE_PFN_MIGRATE) {
diff --git a/drivers/gpu/drm/xe/xe_svm.c b/drivers/gpu/drm/xe/xe_svm.c
index 213f0334518a..fbf5fd284616 100644
--- a/drivers/gpu/drm/xe/xe_svm.c
+++ b/drivers/gpu/drm/xe/xe_svm.c
@@ -10,6 +10,7 @@
#include <drm/drm_pagemap.h>
#include <drm/drm_pagemap_util.h>
+#include <linux/migrate.h>
#include "xe_bo.h"
#include "xe_exec_queue_types.h"
#include "xe_gt_stats.h"
@@ -752,8 +753,8 @@ static struct drm_buddy *vram_to_buddy(struct xe_vram_region *vram)
return &vram->ttm.mm;
}
-static int xe_svm_populate_devmem_pfn(struct drm_pagemap_devmem *devmem_allocation,
- unsigned long npages, unsigned long *pfn)
+static int xe_svm_populate_devmem_mpfn(struct drm_pagemap_devmem *devmem_allocation,
+ unsigned long npages, unsigned long *pfn)
{
struct xe_bo *bo = to_xe_bo(devmem_allocation);
struct ttm_resource *res = bo->ttm.resource;
@@ -769,7 +770,7 @@ static int xe_svm_populate_devmem_pfn(struct drm_pagemap_devmem *devmem_allocati
int i;
for (i = 0; i < drm_buddy_block_size(buddy, block) >> PAGE_SHIFT; ++i)
- pfn[j++] = block_pfn + i;
+ pfn[j++] = migrate_pfn(block_pfn + i);
}
return 0;
@@ -777,7 +778,7 @@ static int xe_svm_populate_devmem_pfn(struct drm_pagemap_devmem *devmem_allocati
static const struct drm_pagemap_devmem_ops dpagemap_devmem_ops = {
.devmem_release = xe_svm_devmem_release,
- .populate_devmem_pfn = xe_svm_populate_devmem_pfn,
+ .populate_devmem_mpfn = xe_svm_populate_devmem_mpfn,
.copy_to_devmem = xe_svm_copy_to_devmem,
.copy_to_ram = xe_svm_copy_to_ram,
};
diff --git a/include/drm/drm_pagemap.h b/include/drm/drm_pagemap.h
index 2baf0861f78f..bffc7fd5bef3 100644
--- a/include/drm/drm_pagemap.h
+++ b/include/drm/drm_pagemap.h
@@ -192,17 +192,17 @@ struct drm_pagemap_devmem_ops {
void (*devmem_release)(struct drm_pagemap_devmem *devmem_allocation);
/**
- * @populate_devmem_pfn: Populate device memory PFN (required for migration)
+ * @populate_devmem_mpfn: Populate device memory PFN (required for migration)
* @devmem_allocation: device memory allocation
* @npages: Number of pages to populate
- * @pfn: Array of page frame numbers to populate
+ * @mpfn: Array of migrate page frame numbers to populate
*
* Populate device memory page frame numbers (PFN).
*
* Return: 0 on success, a negative error code on failure.
*/
- int (*populate_devmem_pfn)(struct drm_pagemap_devmem *devmem_allocation,
- unsigned long npages, unsigned long *pfn);
+ int (*populate_devmem_mpfn)(struct drm_pagemap_devmem *devmem_allocation,
+ unsigned long npages, unsigned long *pfn);
/**
* @copy_to_devmem: Copy to device memory (required for migration)
diff --git a/lib/test_hmm.c b/lib/test_hmm.c
index 7e5248404d00..a6ff292596f3 100644
--- a/lib/test_hmm.c
+++ b/lib/test_hmm.c
@@ -1389,7 +1389,7 @@ static void dmirror_device_evict_chunk(struct dmirror_chunk *chunk)
src_pfns = kvcalloc(npages, sizeof(*src_pfns), GFP_KERNEL | __GFP_NOFAIL);
dst_pfns = kvcalloc(npages, sizeof(*dst_pfns), GFP_KERNEL | __GFP_NOFAIL);
- migrate_device_range(src_pfns, start_pfn, npages);
+ migrate_device_range(src_pfns, migrate_pfn(start_pfn), npages);
for (i = 0; i < npages; i++) {
struct page *dpage, *spage;
diff --git a/mm/migrate_device.c b/mm/migrate_device.c
index 1a2067f830da..a2baaa2a81f9 100644
--- a/mm/migrate_device.c
+++ b/mm/migrate_device.c
@@ -1354,11 +1354,11 @@ void migrate_vma_finalize(struct migrate_vma *migrate)
}
EXPORT_SYMBOL(migrate_vma_finalize);
-static unsigned long migrate_device_pfn_lock(unsigned long pfn)
+static unsigned long migrate_device_pfn_lock(unsigned long mpfn)
{
struct folio *folio;
- folio = folio_get_nontail_page(pfn_to_page(pfn));
+ folio = folio_get_nontail_page(migrate_pfn_to_page(mpfn));
if (!folio)
return 0;
@@ -1367,13 +1367,14 @@ static unsigned long migrate_device_pfn_lock(unsigned long pfn)
return 0;
}
- return migrate_pfn(pfn) | MIGRATE_PFN_MIGRATE;
+ return mpfn | MIGRATE_PFN_MIGRATE;
}
/**
* migrate_device_range() - migrate device private pfns to normal memory.
- * @src_pfns: array large enough to hold migrating source device private pfns.
- * @start: starting pfn in the range to migrate.
+ * @src_mpfns: array large enough to hold migrating source device private
+ * migrate pfns.
+ * @start: starting migrate pfn in the range to migrate.
* @npages: number of pages to migrate.
*
* migrate_vma_setup() is similar in concept to migrate_vma_setup() except that
@@ -1389,28 +1390,29 @@ static unsigned long migrate_device_pfn_lock(unsigned long pfn)
* allocate destination pages and start copying data from the device to CPU
* memory before calling migrate_device_pages().
*/
-int migrate_device_range(unsigned long *src_pfns, unsigned long start,
+int migrate_device_range(unsigned long *src_mpfns, unsigned long start,
unsigned long npages)
{
- unsigned long i, j, pfn;
+ unsigned long i, j, mpfn;
- for (pfn = start, i = 0; i < npages; pfn++, i++) {
- struct page *page = pfn_to_page(pfn);
+ for (mpfn = start, i = 0; i < npages; i++) {
+ struct page *page = migrate_pfn_to_page(mpfn);
struct folio *folio = page_folio(page);
unsigned int nr = 1;
- src_pfns[i] = migrate_device_pfn_lock(pfn);
+ src_mpfns[i] = migrate_device_pfn_lock(mpfn);
nr = folio_nr_pages(folio);
if (nr > 1) {
- src_pfns[i] |= MIGRATE_PFN_COMPOUND;
+ src_mpfns[i] |= MIGRATE_PFN_COMPOUND;
for (j = 1; j < nr; j++)
- src_pfns[i+j] = 0;
+ src_mpfns[i+j] = 0;
i += j - 1;
- pfn += j - 1;
+ mpfn += (j - 1) << MIGRATE_PFN_SHIFT;
}
+ mpfn += 1 << MIGRATE_PFN_SHIFT;
}
- migrate_device_unmap(src_pfns, npages, NULL);
+ migrate_device_unmap(src_mpfns, npages, NULL);
return 0;
}
@@ -1418,32 +1420,33 @@ EXPORT_SYMBOL(migrate_device_range);
/**
* migrate_device_pfns() - migrate device private pfns to normal memory.
- * @src_pfns: pre-popluated array of source device private pfns to migrate.
+ * @src_mpfns: pre-popluated array of source device private migrate pfns to
+ * migrate.
* @npages: number of pages to migrate.
*
* Similar to migrate_device_range() but supports non-contiguous pre-popluated
* array of device pages to migrate.
*/
-int migrate_device_pfns(unsigned long *src_pfns, unsigned long npages)
+int migrate_device_pfns(unsigned long *src_mpfns, unsigned long npages)
{
unsigned long i, j;
for (i = 0; i < npages; i++) {
- struct page *page = pfn_to_page(src_pfns[i]);
+ struct page *page = migrate_pfn_to_page(src_mpfns[i]);
struct folio *folio = page_folio(page);
unsigned int nr = 1;
- src_pfns[i] = migrate_device_pfn_lock(src_pfns[i]);
+ src_mpfns[i] = migrate_device_pfn_lock(src_mpfns[i]);
nr = folio_nr_pages(folio);
if (nr > 1) {
- src_pfns[i] |= MIGRATE_PFN_COMPOUND;
+ src_mpfns[i] |= MIGRATE_PFN_COMPOUND;
for (j = 1; j < nr; j++)
- src_pfns[i+j] = 0;
+ src_mpfns[i+j] = 0;
i += j - 1;
}
}
- migrate_device_unmap(src_pfns, npages, NULL);
+ migrate_device_unmap(src_mpfns, npages, NULL);
return 0;
}
--
2.34.1
next prev parent reply other threads:[~2026-01-30 11:11 UTC|newest]
Thread overview: 17+ messages / expand[flat|nested] mbox.gz Atom feed top
2026-01-30 11:10 [PATCH v5 00/13] Remove device private pages from physical address space Jordan Niethe
2026-01-30 11:10 ` [PATCH v5 01/13] mm/migrate_device: Introduce migrate_pfn_from_page() helper Jordan Niethe
2026-01-30 11:10 ` [PATCH v5 02/13] drm/amdkfd: Use migrate pfns internally Jordan Niethe
2026-01-30 11:10 ` Jordan Niethe [this message]
2026-01-30 11:10 ` [PATCH v5 04/13] mm/migrate_device: Add migrate PFN flag to track device private pages Jordan Niethe
2026-01-30 11:10 ` [PATCH v5 05/13] mm/page_vma_mapped: Add flag to page_vma_mapped_walk::flags " Jordan Niethe
2026-01-30 11:10 ` [PATCH v5 06/13] mm: Add helpers to create migration entries from struct pages Jordan Niethe
2026-01-30 11:10 ` [PATCH v5 07/13] mm: Add a new swap type for migration entries of device private pages Jordan Niethe
2026-01-30 11:10 ` [PATCH v5 08/13] mm: Add softleaf support for device private migration entries Jordan Niethe
2026-01-30 11:10 ` [PATCH v5 09/13] mm: Begin creating " Jordan Niethe
2026-01-30 11:10 ` [PATCH v5 10/13] mm: Add helpers to create device private entries from struct pages Jordan Niethe
2026-01-30 11:10 ` [PATCH v5 11/13] mm/util: Add flag to track device private pages in page snapshots Jordan Niethe
2026-01-30 11:10 ` [PATCH v5 12/13] mm/hmm: Add flag to track device private pages Jordan Niethe
2026-01-30 11:10 ` [PATCH v5 13/13] mm: Remove device private pages from the physical address space Jordan Niethe
2026-01-30 21:32 ` Jordan Niethe
2026-02-01 13:03 ` kernel test robot
2026-02-06 13:06 ` [PATCH v5 00/13] Remove device private pages from " David Hildenbrand (Arm)
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20260130111050.53670-4-jniethe@nvidia.com \
--to=jniethe@nvidia.com \
--cc=Felix.Kuehling@amd.com \
--cc=airlied@gmail.com \
--cc=akpm@linux-foundation.org \
--cc=apopple@nvidia.com \
--cc=balbirs@nvidia.com \
--cc=dakr@kernel.org \
--cc=david@redhat.com \
--cc=dri-devel@lists.freedesktop.org \
--cc=intel-xe@lists.freedesktop.org \
--cc=jgg@nvidia.com \
--cc=jgg@ziepe.ca \
--cc=jhubbard@nvidia.com \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=linuxppc-dev@lists.ozlabs.org \
--cc=lorenzo.stoakes@oracle.com \
--cc=lyude@redhat.com \
--cc=maddy@linux.ibm.com \
--cc=matthew.brost@intel.com \
--cc=mpe@ellerman.id.au \
--cc=mpenttil@redhat.com \
--cc=rcampbell@nvidia.com \
--cc=simona@ffwll.ch \
--cc=willy@infradead.org \
--cc=ziy@nvidia.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox