* [PATCH v3 1/6] drm/gem-shmem: Use obj directly where appropriate in fault handler
2026-02-09 13:27 [PATCH v3 0/6] drm/gem-shmem: Track page accessed/dirty status Thomas Zimmermann
@ 2026-02-09 13:27 ` Thomas Zimmermann
2026-02-09 14:10 ` Boris Brezillon
2026-02-09 13:27 ` [PATCH v3 2/6] drm/gem-shmem: Test for existence of page in mmap " Thomas Zimmermann
` (4 subsequent siblings)
5 siblings, 1 reply; 13+ messages in thread
From: Thomas Zimmermann @ 2026-02-09 13:27 UTC (permalink / raw)
To: boris.brezillon, loic.molinari, willy, frank.binns, matt.coster,
maarten.lankhorst, mripard, airlied, simona
Cc: dri-devel, linux-mm, Thomas Zimmermann
Replace shmem->base with obj in several places. It is the same value,
but the latter is easier to read.
Signed-off-by: Thomas Zimmermann <tzimmermann@suse.de>
---
drivers/gpu/drm/drm_gem_shmem_helper.c | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c
index 3871a6d92f77..5bced7db0f1f 100644
--- a/drivers/gpu/drm/drm_gem_shmem_helper.c
+++ b/drivers/gpu/drm/drm_gem_shmem_helper.c
@@ -584,7 +584,7 @@ static vm_fault_t drm_gem_shmem_fault(struct vm_fault *vmf)
/* Offset to faulty address in the VMA. */
page_offset = vmf->pgoff - vma->vm_pgoff;
- dma_resv_lock(shmem->base.resv, NULL);
+ dma_resv_lock(obj->resv, NULL);
if (page_offset >= num_pages ||
drm_WARN_ON_ONCE(obj->dev, !shmem->pages) ||
@@ -602,7 +602,7 @@ static vm_fault_t drm_gem_shmem_fault(struct vm_fault *vmf)
ret = vmf_insert_pfn(vma, vmf->address, pfn);
out:
- dma_resv_unlock(shmem->base.resv);
+ dma_resv_unlock(obj->resv);
return ret;
}
--
2.52.0
^ permalink raw reply [flat|nested] 13+ messages in thread* Re: [PATCH v3 1/6] drm/gem-shmem: Use obj directly where appropriate in fault handler
2026-02-09 13:27 ` [PATCH v3 1/6] drm/gem-shmem: Use obj directly where appropriate in fault handler Thomas Zimmermann
@ 2026-02-09 14:10 ` Boris Brezillon
0 siblings, 0 replies; 13+ messages in thread
From: Boris Brezillon @ 2026-02-09 14:10 UTC (permalink / raw)
To: Thomas Zimmermann
Cc: loic.molinari, willy, frank.binns, matt.coster,
maarten.lankhorst, mripard, airlied, simona, dri-devel, linux-mm
On Mon, 9 Feb 2026 14:27:10 +0100
Thomas Zimmermann <tzimmermann@suse.de> wrote:
> Replace shmem->base with obj in several places. It is the same value,
> but the latter is easier to read.
>
> Signed-off-by: Thomas Zimmermann <tzimmermann@suse.de>
Reviewed-by: Boris Brezillon <boris.brezillon@collabora.com>
> ---
> drivers/gpu/drm/drm_gem_shmem_helper.c | 4 ++--
> 1 file changed, 2 insertions(+), 2 deletions(-)
>
> diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c
> index 3871a6d92f77..5bced7db0f1f 100644
> --- a/drivers/gpu/drm/drm_gem_shmem_helper.c
> +++ b/drivers/gpu/drm/drm_gem_shmem_helper.c
> @@ -584,7 +584,7 @@ static vm_fault_t drm_gem_shmem_fault(struct vm_fault *vmf)
> /* Offset to faulty address in the VMA. */
> page_offset = vmf->pgoff - vma->vm_pgoff;
>
> - dma_resv_lock(shmem->base.resv, NULL);
> + dma_resv_lock(obj->resv, NULL);
>
> if (page_offset >= num_pages ||
> drm_WARN_ON_ONCE(obj->dev, !shmem->pages) ||
> @@ -602,7 +602,7 @@ static vm_fault_t drm_gem_shmem_fault(struct vm_fault *vmf)
> ret = vmf_insert_pfn(vma, vmf->address, pfn);
>
> out:
> - dma_resv_unlock(shmem->base.resv);
> + dma_resv_unlock(obj->resv);
>
> return ret;
> }
^ permalink raw reply [flat|nested] 13+ messages in thread
* [PATCH v3 2/6] drm/gem-shmem: Test for existence of page in mmap fault handler
2026-02-09 13:27 [PATCH v3 0/6] drm/gem-shmem: Track page accessed/dirty status Thomas Zimmermann
2026-02-09 13:27 ` [PATCH v3 1/6] drm/gem-shmem: Use obj directly where appropriate in fault handler Thomas Zimmermann
@ 2026-02-09 13:27 ` Thomas Zimmermann
2026-02-09 14:10 ` Boris Brezillon
2026-02-09 13:27 ` [PATCH v3 3/6] drm/gem-shmem: Return vm_fault_t from drm_gem_shmem_try_map_pmd() Thomas Zimmermann
` (3 subsequent siblings)
5 siblings, 1 reply; 13+ messages in thread
From: Thomas Zimmermann @ 2026-02-09 13:27 UTC (permalink / raw)
To: boris.brezillon, loic.molinari, willy, frank.binns, matt.coster,
maarten.lankhorst, mripard, airlied, simona
Cc: dri-devel, linux-mm, Thomas Zimmermann
Not having a page pointer in the mmap fault handler is an error. Test
for this situation and return VM_FAULT_SIGBUS if so. Also replace several
lookups of the page with a local variable.
Signed-off-by: Thomas Zimmermann <tzimmermann@suse.de>
---
drivers/gpu/drm/drm_gem_shmem_helper.c | 24 ++++++++++++------------
1 file changed, 12 insertions(+), 12 deletions(-)
diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c
index 5bced7db0f1f..3ee54c24e535 100644
--- a/drivers/gpu/drm/drm_gem_shmem_helper.c
+++ b/drivers/gpu/drm/drm_gem_shmem_helper.c
@@ -574,31 +574,31 @@ static vm_fault_t drm_gem_shmem_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
struct drm_gem_object *obj = vma->vm_private_data;
+ struct drm_device *dev = obj->dev;
struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
loff_t num_pages = obj->size >> PAGE_SHIFT;
- vm_fault_t ret;
+ vm_fault_t ret = VM_FAULT_SIGBUS;
struct page **pages = shmem->pages;
- pgoff_t page_offset;
+ pgoff_t page_offset = vmf->pgoff - vma->vm_pgoff; /* page offset within VMA */
+ struct page *page;
unsigned long pfn;
- /* Offset to faulty address in the VMA. */
- page_offset = vmf->pgoff - vma->vm_pgoff;
-
dma_resv_lock(obj->resv, NULL);
- if (page_offset >= num_pages ||
- drm_WARN_ON_ONCE(obj->dev, !shmem->pages) ||
- shmem->madv < 0) {
- ret = VM_FAULT_SIGBUS;
+ if (page_offset >= num_pages || drm_WARN_ON_ONCE(dev, !shmem->pages) ||
+ shmem->madv < 0)
+ goto out;
+
+ page = pages[page_offset];
+ if (drm_WARN_ON_ONCE(dev, !page))
goto out;
- }
- if (drm_gem_shmem_try_map_pmd(vmf, vmf->address, pages[page_offset])) {
+ if (drm_gem_shmem_try_map_pmd(vmf, vmf->address, page)) {
ret = VM_FAULT_NOPAGE;
goto out;
}
- pfn = page_to_pfn(pages[page_offset]);
+ pfn = page_to_pfn(page);
ret = vmf_insert_pfn(vma, vmf->address, pfn);
out:
--
2.52.0
^ permalink raw reply [flat|nested] 13+ messages in thread* Re: [PATCH v3 2/6] drm/gem-shmem: Test for existence of page in mmap fault handler
2026-02-09 13:27 ` [PATCH v3 2/6] drm/gem-shmem: Test for existence of page in mmap " Thomas Zimmermann
@ 2026-02-09 14:10 ` Boris Brezillon
0 siblings, 0 replies; 13+ messages in thread
From: Boris Brezillon @ 2026-02-09 14:10 UTC (permalink / raw)
To: Thomas Zimmermann
Cc: loic.molinari, willy, frank.binns, matt.coster,
maarten.lankhorst, mripard, airlied, simona, dri-devel, linux-mm
On Mon, 9 Feb 2026 14:27:11 +0100
Thomas Zimmermann <tzimmermann@suse.de> wrote:
> Not having a page pointer in the mmap fault handler is an error. Test
> for this situation and return VM_FAULT_SIGBUS if so. Also replace several
> lookups of the page with a local variable.
>
> Signed-off-by: Thomas Zimmermann <tzimmermann@suse.de>
Reviewed-by: Boris Brezillon <boris.brezillon@collabora.com>
> ---
> drivers/gpu/drm/drm_gem_shmem_helper.c | 24 ++++++++++++------------
> 1 file changed, 12 insertions(+), 12 deletions(-)
>
> diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c
> index 5bced7db0f1f..3ee54c24e535 100644
> --- a/drivers/gpu/drm/drm_gem_shmem_helper.c
> +++ b/drivers/gpu/drm/drm_gem_shmem_helper.c
> @@ -574,31 +574,31 @@ static vm_fault_t drm_gem_shmem_fault(struct vm_fault *vmf)
> {
> struct vm_area_struct *vma = vmf->vma;
> struct drm_gem_object *obj = vma->vm_private_data;
> + struct drm_device *dev = obj->dev;
> struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
> loff_t num_pages = obj->size >> PAGE_SHIFT;
> - vm_fault_t ret;
> + vm_fault_t ret = VM_FAULT_SIGBUS;
> struct page **pages = shmem->pages;
> - pgoff_t page_offset;
> + pgoff_t page_offset = vmf->pgoff - vma->vm_pgoff; /* page offset within VMA */
> + struct page *page;
> unsigned long pfn;
>
> - /* Offset to faulty address in the VMA. */
> - page_offset = vmf->pgoff - vma->vm_pgoff;
> -
> dma_resv_lock(obj->resv, NULL);
>
> - if (page_offset >= num_pages ||
> - drm_WARN_ON_ONCE(obj->dev, !shmem->pages) ||
> - shmem->madv < 0) {
> - ret = VM_FAULT_SIGBUS;
> + if (page_offset >= num_pages || drm_WARN_ON_ONCE(dev, !shmem->pages) ||
> + shmem->madv < 0)
> + goto out;
> +
> + page = pages[page_offset];
> + if (drm_WARN_ON_ONCE(dev, !page))
> goto out;
> - }
>
> - if (drm_gem_shmem_try_map_pmd(vmf, vmf->address, pages[page_offset])) {
> + if (drm_gem_shmem_try_map_pmd(vmf, vmf->address, page)) {
> ret = VM_FAULT_NOPAGE;
> goto out;
> }
>
> - pfn = page_to_pfn(pages[page_offset]);
> + pfn = page_to_pfn(page);
> ret = vmf_insert_pfn(vma, vmf->address, pfn);
>
> out:
^ permalink raw reply [flat|nested] 13+ messages in thread
* [PATCH v3 3/6] drm/gem-shmem: Return vm_fault_t from drm_gem_shmem_try_map_pmd()
2026-02-09 13:27 [PATCH v3 0/6] drm/gem-shmem: Track page accessed/dirty status Thomas Zimmermann
2026-02-09 13:27 ` [PATCH v3 1/6] drm/gem-shmem: Use obj directly where appropriate in fault handler Thomas Zimmermann
2026-02-09 13:27 ` [PATCH v3 2/6] drm/gem-shmem: Test for existence of page in mmap " Thomas Zimmermann
@ 2026-02-09 13:27 ` Thomas Zimmermann
2026-02-09 13:27 ` [PATCH v3 4/6] drm/gem-shmem: Refactor drm_gem_shmem_try_map_pmd() Thomas Zimmermann
` (2 subsequent siblings)
5 siblings, 0 replies; 13+ messages in thread
From: Thomas Zimmermann @ 2026-02-09 13:27 UTC (permalink / raw)
To: boris.brezillon, loic.molinari, willy, frank.binns, matt.coster,
maarten.lankhorst, mripard, airlied, simona
Cc: dri-devel, linux-mm, Thomas Zimmermann
Return the exact VM_FAULT_ mask from drm_gem_shmem_try_map_pmd(). Gives
the caller better insight into the result. Return 0 if nothing was done.
If the caller sees VM_FAULT_NOPAGE, drm_gem_shmem_try_map_pmd() added a
PMD entry to the page table. As before, return early from the page-fault
handler in that case.
Signed-off-by: Thomas Zimmermann <tzimmermann@suse.de>
Suggested-by: Matthew Wilcox <willy@infradead.org>
Reviewed-by: Boris Brezillon <boris.brezillon@collabora.com>
---
drivers/gpu/drm/drm_gem_shmem_helper.c | 14 ++++++--------
1 file changed, 6 insertions(+), 8 deletions(-)
diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c
index 3ee54c24e535..ab8e331005f9 100644
--- a/drivers/gpu/drm/drm_gem_shmem_helper.c
+++ b/drivers/gpu/drm/drm_gem_shmem_helper.c
@@ -550,8 +550,8 @@ int drm_gem_shmem_dumb_create(struct drm_file *file, struct drm_device *dev,
}
EXPORT_SYMBOL_GPL(drm_gem_shmem_dumb_create);
-static bool drm_gem_shmem_try_map_pmd(struct vm_fault *vmf, unsigned long addr,
- struct page *page)
+static vm_fault_t drm_gem_shmem_try_map_pmd(struct vm_fault *vmf, unsigned long addr,
+ struct page *page)
{
#ifdef CONFIG_ARCH_SUPPORTS_PMD_PFNMAP
unsigned long pfn = page_to_pfn(page);
@@ -562,12 +562,11 @@ static bool drm_gem_shmem_try_map_pmd(struct vm_fault *vmf, unsigned long addr,
pmd_none(*vmf->pmd) &&
folio_test_pmd_mappable(page_folio(page))) {
pfn &= PMD_MASK >> PAGE_SHIFT;
- if (vmf_insert_pfn_pmd(vmf, pfn, false) == VM_FAULT_NOPAGE)
- return true;
+ return vmf_insert_pfn_pmd(vmf, pfn, false);
}
#endif
- return false;
+ return 0;
}
static vm_fault_t drm_gem_shmem_fault(struct vm_fault *vmf)
@@ -593,10 +592,9 @@ static vm_fault_t drm_gem_shmem_fault(struct vm_fault *vmf)
if (drm_WARN_ON_ONCE(dev, !page))
goto out;
- if (drm_gem_shmem_try_map_pmd(vmf, vmf->address, page)) {
- ret = VM_FAULT_NOPAGE;
+ ret = drm_gem_shmem_try_map_pmd(vmf, vmf->address, page);
+ if (ret == VM_FAULT_NOPAGE)
goto out;
- }
pfn = page_to_pfn(page);
ret = vmf_insert_pfn(vma, vmf->address, pfn);
--
2.52.0
^ permalink raw reply [flat|nested] 13+ messages in thread* [PATCH v3 4/6] drm/gem-shmem: Refactor drm_gem_shmem_try_map_pmd()
2026-02-09 13:27 [PATCH v3 0/6] drm/gem-shmem: Track page accessed/dirty status Thomas Zimmermann
` (2 preceding siblings ...)
2026-02-09 13:27 ` [PATCH v3 3/6] drm/gem-shmem: Return vm_fault_t from drm_gem_shmem_try_map_pmd() Thomas Zimmermann
@ 2026-02-09 13:27 ` Thomas Zimmermann
2026-02-09 14:25 ` Boris Brezillon
2026-02-09 13:27 ` [PATCH v3 5/6] drm/gem-shmem: Track folio accessed/dirty status in mmap Thomas Zimmermann
2026-02-09 13:27 ` [PATCH v3 6/6] drm/gem-shmem: Track folio accessed/dirty status in vmap Thomas Zimmermann
5 siblings, 1 reply; 13+ messages in thread
From: Thomas Zimmermann @ 2026-02-09 13:27 UTC (permalink / raw)
To: boris.brezillon, loic.molinari, willy, frank.binns, matt.coster,
maarten.lankhorst, mripard, airlied, simona
Cc: dri-devel, linux-mm, Thomas Zimmermann
The current mmap page-fault handler requires some changes before it
can track folio access.
Call to folio_test_pmd_mappable() into the mmap page-fault handler
before calling drm_gem_shmem_try_map_pmd(). The folio will become
useful for tracking the access status.
Also rename drm_gem_shmem_try_map_pmd() to _try_insert_pfn_pmd()
and only pass the page fault and page-frame number. The new name and
parameters make it similar to vmf_insert_pfn_pmd().
No functional changes. If PMD mapping fails or is not supported,
insert a regular PFN as before.
Signed-off-by: Thomas Zimmermann <tzimmermann@suse.de>
---
drivers/gpu/drm/drm_gem_shmem_helper.c | 25 ++++++++++++-------------
1 file changed, 12 insertions(+), 13 deletions(-)
diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c
index ab8e331005f9..c3a054899ba3 100644
--- a/drivers/gpu/drm/drm_gem_shmem_helper.c
+++ b/drivers/gpu/drm/drm_gem_shmem_helper.c
@@ -550,17 +550,14 @@ int drm_gem_shmem_dumb_create(struct drm_file *file, struct drm_device *dev,
}
EXPORT_SYMBOL_GPL(drm_gem_shmem_dumb_create);
-static vm_fault_t drm_gem_shmem_try_map_pmd(struct vm_fault *vmf, unsigned long addr,
- struct page *page)
+static vm_fault_t drm_gem_shmem_try_insert_pfn_pmd(struct vm_fault *vmf, unsigned long pfn)
{
#ifdef CONFIG_ARCH_SUPPORTS_PMD_PFNMAP
- unsigned long pfn = page_to_pfn(page);
unsigned long paddr = pfn << PAGE_SHIFT;
- bool aligned = (addr & ~PMD_MASK) == (paddr & ~PMD_MASK);
+ bool aligned = (vmf->address & ~PMD_MASK) == (paddr & ~PMD_MASK);
- if (aligned &&
- pmd_none(*vmf->pmd) &&
- folio_test_pmd_mappable(page_folio(page))) {
+ if (aligned && pmd_none(*vmf->pmd)) {
+ /* Read-only mapping; split upon write fault */
pfn &= PMD_MASK >> PAGE_SHIFT;
return vmf_insert_pfn_pmd(vmf, pfn, false);
}
@@ -580,6 +577,7 @@ static vm_fault_t drm_gem_shmem_fault(struct vm_fault *vmf)
struct page **pages = shmem->pages;
pgoff_t page_offset = vmf->pgoff - vma->vm_pgoff; /* page offset within VMA */
struct page *page;
+ struct folio *folio;
unsigned long pfn;
dma_resv_lock(obj->resv, NULL);
@@ -591,15 +589,16 @@ static vm_fault_t drm_gem_shmem_fault(struct vm_fault *vmf)
page = pages[page_offset];
if (drm_WARN_ON_ONCE(dev, !page))
goto out;
-
- ret = drm_gem_shmem_try_map_pmd(vmf, vmf->address, page);
- if (ret == VM_FAULT_NOPAGE)
- goto out;
+ folio = page_folio(page);
pfn = page_to_pfn(page);
- ret = vmf_insert_pfn(vma, vmf->address, pfn);
- out:
+ if (folio_test_pmd_mappable(folio))
+ ret = drm_gem_shmem_try_insert_pfn_pmd(vmf, pfn);
+ if (ret != VM_FAULT_NOPAGE)
+ ret = vmf_insert_pfn(vma, vmf->address, pfn);
+
+out:
dma_resv_unlock(obj->resv);
return ret;
--
2.52.0
^ permalink raw reply [flat|nested] 13+ messages in thread* Re: [PATCH v3 4/6] drm/gem-shmem: Refactor drm_gem_shmem_try_map_pmd()
2026-02-09 13:27 ` [PATCH v3 4/6] drm/gem-shmem: Refactor drm_gem_shmem_try_map_pmd() Thomas Zimmermann
@ 2026-02-09 14:25 ` Boris Brezillon
0 siblings, 0 replies; 13+ messages in thread
From: Boris Brezillon @ 2026-02-09 14:25 UTC (permalink / raw)
To: Thomas Zimmermann
Cc: loic.molinari, willy, frank.binns, matt.coster,
maarten.lankhorst, mripard, airlied, simona, dri-devel, linux-mm
On Mon, 9 Feb 2026 14:27:13 +0100
Thomas Zimmermann <tzimmermann@suse.de> wrote:
> The current mmap page-fault handler requires some changes before it
> can track folio access.
>
> Call to folio_test_pmd_mappable() into the mmap page-fault handler
> before calling drm_gem_shmem_try_map_pmd(). The folio will become
> useful for tracking the access status.
>
> Also rename drm_gem_shmem_try_map_pmd() to _try_insert_pfn_pmd()
> and only pass the page fault and page-frame number. The new name and
> parameters make it similar to vmf_insert_pfn_pmd().
>
> No functional changes. If PMD mapping fails or is not supported,
> insert a regular PFN as before.
>
> Signed-off-by: Thomas Zimmermann <tzimmermann@suse.de>
Reviewed-by: Boris Brezillon <boris.brezillon@collabora.com>
> ---
> drivers/gpu/drm/drm_gem_shmem_helper.c | 25 ++++++++++++-------------
> 1 file changed, 12 insertions(+), 13 deletions(-)
>
> diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c
> index ab8e331005f9..c3a054899ba3 100644
> --- a/drivers/gpu/drm/drm_gem_shmem_helper.c
> +++ b/drivers/gpu/drm/drm_gem_shmem_helper.c
> @@ -550,17 +550,14 @@ int drm_gem_shmem_dumb_create(struct drm_file *file, struct drm_device *dev,
> }
> EXPORT_SYMBOL_GPL(drm_gem_shmem_dumb_create);
>
> -static vm_fault_t drm_gem_shmem_try_map_pmd(struct vm_fault *vmf, unsigned long addr,
> - struct page *page)
> +static vm_fault_t drm_gem_shmem_try_insert_pfn_pmd(struct vm_fault *vmf, unsigned long pfn)
> {
> #ifdef CONFIG_ARCH_SUPPORTS_PMD_PFNMAP
> - unsigned long pfn = page_to_pfn(page);
> unsigned long paddr = pfn << PAGE_SHIFT;
> - bool aligned = (addr & ~PMD_MASK) == (paddr & ~PMD_MASK);
> + bool aligned = (vmf->address & ~PMD_MASK) == (paddr & ~PMD_MASK);
>
> - if (aligned &&
> - pmd_none(*vmf->pmd) &&
> - folio_test_pmd_mappable(page_folio(page))) {
> + if (aligned && pmd_none(*vmf->pmd)) {
> + /* Read-only mapping; split upon write fault */
> pfn &= PMD_MASK >> PAGE_SHIFT;
> return vmf_insert_pfn_pmd(vmf, pfn, false);
> }
> @@ -580,6 +577,7 @@ static vm_fault_t drm_gem_shmem_fault(struct vm_fault *vmf)
> struct page **pages = shmem->pages;
> pgoff_t page_offset = vmf->pgoff - vma->vm_pgoff; /* page offset within VMA */
> struct page *page;
> + struct folio *folio;
> unsigned long pfn;
>
> dma_resv_lock(obj->resv, NULL);
> @@ -591,15 +589,16 @@ static vm_fault_t drm_gem_shmem_fault(struct vm_fault *vmf)
> page = pages[page_offset];
> if (drm_WARN_ON_ONCE(dev, !page))
> goto out;
> -
> - ret = drm_gem_shmem_try_map_pmd(vmf, vmf->address, page);
> - if (ret == VM_FAULT_NOPAGE)
> - goto out;
> + folio = page_folio(page);
>
> pfn = page_to_pfn(page);
> - ret = vmf_insert_pfn(vma, vmf->address, pfn);
>
> - out:
> + if (folio_test_pmd_mappable(folio))
> + ret = drm_gem_shmem_try_insert_pfn_pmd(vmf, pfn);
> + if (ret != VM_FAULT_NOPAGE)
> + ret = vmf_insert_pfn(vma, vmf->address, pfn);
> +
> +out:
> dma_resv_unlock(obj->resv);
>
> return ret;
^ permalink raw reply [flat|nested] 13+ messages in thread
* [PATCH v3 5/6] drm/gem-shmem: Track folio accessed/dirty status in mmap
2026-02-09 13:27 [PATCH v3 0/6] drm/gem-shmem: Track page accessed/dirty status Thomas Zimmermann
` (3 preceding siblings ...)
2026-02-09 13:27 ` [PATCH v3 4/6] drm/gem-shmem: Refactor drm_gem_shmem_try_map_pmd() Thomas Zimmermann
@ 2026-02-09 13:27 ` Thomas Zimmermann
2026-02-09 14:23 ` Boris Brezillon
2026-02-09 13:27 ` [PATCH v3 6/6] drm/gem-shmem: Track folio accessed/dirty status in vmap Thomas Zimmermann
5 siblings, 1 reply; 13+ messages in thread
From: Thomas Zimmermann @ 2026-02-09 13:27 UTC (permalink / raw)
To: boris.brezillon, loic.molinari, willy, frank.binns, matt.coster,
maarten.lankhorst, mripard, airlied, simona
Cc: dri-devel, linux-mm, Thomas Zimmermann
Invoke folio_mark_accessed() in mmap page faults to add the folio to
the memory manager's LRU list. Userspace invokes mmap to get the memory
for software rendering. Compositors do the same when creating the final
on-screen image, so keeping the pages in LRU makes sense. Avoids paging
out graphics buffers when under memory pressure.
In pfn_mkwrite, further invoke the folio_mark_dirty() to add the folio
for writeback should the underlying file be paged out from system memory.
This rarely happens in practice, yet it would corrupt the buffer content.
This has little effect on a system's hardware-accelerated rendering, which
only mmaps for an initial setup of textures, meshes, shaders, etc.
v3:
- rewrite for VM_PFNMAP
v2:
- adapt to changes in drm_gem_shmem_try_mmap_pmd()
Signed-off-by: Thomas Zimmermann <tzimmermann@suse.de>
Reviewed-by: Boris Brezillon <boris.brezillon@collabora.com>
---
drivers/gpu/drm/drm_gem_shmem_helper.c | 20 ++++++++++++++++++++
1 file changed, 20 insertions(+)
diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c
index c3a054899ba3..0c86ad40a049 100644
--- a/drivers/gpu/drm/drm_gem_shmem_helper.c
+++ b/drivers/gpu/drm/drm_gem_shmem_helper.c
@@ -598,6 +598,9 @@ static vm_fault_t drm_gem_shmem_fault(struct vm_fault *vmf)
if (ret != VM_FAULT_NOPAGE)
ret = vmf_insert_pfn(vma, vmf->address, pfn);
+ if (likely(!(ret & VM_FAULT_ERROR)))
+ folio_mark_accessed(folio);
+
out:
dma_resv_unlock(obj->resv);
@@ -638,10 +641,27 @@ static void drm_gem_shmem_vm_close(struct vm_area_struct *vma)
drm_gem_vm_close(vma);
}
+static vm_fault_t drm_gem_shmem_pfn_mkwrite(struct vm_fault *vmf)
+{
+ struct vm_area_struct *vma = vmf->vma;
+ struct drm_gem_object *obj = vma->vm_private_data;
+ struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
+ pgoff_t page_offset = vmf->pgoff - vma->vm_pgoff; /* page offset within VMA */
+ struct page *page = shmem->pages[page_offset];
+ struct folio *folio = page_folio(page);
+
+ file_update_time(vma->vm_file);
+
+ folio_mark_dirty(folio);
+
+ return 0;
+}
+
const struct vm_operations_struct drm_gem_shmem_vm_ops = {
.fault = drm_gem_shmem_fault,
.open = drm_gem_shmem_vm_open,
.close = drm_gem_shmem_vm_close,
+ .pfn_mkwrite = drm_gem_shmem_pfn_mkwrite,
};
EXPORT_SYMBOL_GPL(drm_gem_shmem_vm_ops);
--
2.52.0
^ permalink raw reply [flat|nested] 13+ messages in thread* Re: [PATCH v3 5/6] drm/gem-shmem: Track folio accessed/dirty status in mmap
2026-02-09 13:27 ` [PATCH v3 5/6] drm/gem-shmem: Track folio accessed/dirty status in mmap Thomas Zimmermann
@ 2026-02-09 14:23 ` Boris Brezillon
2026-02-09 14:46 ` Thomas Zimmermann
0 siblings, 1 reply; 13+ messages in thread
From: Boris Brezillon @ 2026-02-09 14:23 UTC (permalink / raw)
To: Thomas Zimmermann
Cc: loic.molinari, willy, frank.binns, matt.coster,
maarten.lankhorst, mripard, airlied, simona, dri-devel, linux-mm
On Mon, 9 Feb 2026 14:27:14 +0100
Thomas Zimmermann <tzimmermann@suse.de> wrote:
> Invoke folio_mark_accessed() in mmap page faults to add the folio to
> the memory manager's LRU list. Userspace invokes mmap to get the memory
> for software rendering. Compositors do the same when creating the final
> on-screen image, so keeping the pages in LRU makes sense. Avoids paging
> out graphics buffers when under memory pressure.
>
> In pfn_mkwrite, further invoke the folio_mark_dirty() to add the folio
> for writeback should the underlying file be paged out from system memory.
> This rarely happens in practice, yet it would corrupt the buffer content.
>
> This has little effect on a system's hardware-accelerated rendering, which
> only mmaps for an initial setup of textures, meshes, shaders, etc.
>
> v3:
> - rewrite for VM_PFNMAP
> v2:
> - adapt to changes in drm_gem_shmem_try_mmap_pmd()
>
> Signed-off-by: Thomas Zimmermann <tzimmermann@suse.de>
> Reviewed-by: Boris Brezillon <boris.brezillon@collabora.com>
> ---
> drivers/gpu/drm/drm_gem_shmem_helper.c | 20 ++++++++++++++++++++
> 1 file changed, 20 insertions(+)
>
> diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c
> index c3a054899ba3..0c86ad40a049 100644
> --- a/drivers/gpu/drm/drm_gem_shmem_helper.c
> +++ b/drivers/gpu/drm/drm_gem_shmem_helper.c
> @@ -598,6 +598,9 @@ static vm_fault_t drm_gem_shmem_fault(struct vm_fault *vmf)
> if (ret != VM_FAULT_NOPAGE)
> ret = vmf_insert_pfn(vma, vmf->address, pfn);
>
> + if (likely(!(ret & VM_FAULT_ERROR)))
Can't we just go
if (ret == VM_FAULT_NOPAGE)
here?
> + folio_mark_accessed(folio);
> +
> out:
> dma_resv_unlock(obj->resv);
>
> @@ -638,10 +641,27 @@ static void drm_gem_shmem_vm_close(struct vm_area_struct *vma)
> drm_gem_vm_close(vma);
> }
>
> +static vm_fault_t drm_gem_shmem_pfn_mkwrite(struct vm_fault *vmf)
> +{
> + struct vm_area_struct *vma = vmf->vma;
> + struct drm_gem_object *obj = vma->vm_private_data;
> + struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
> + pgoff_t page_offset = vmf->pgoff - vma->vm_pgoff; /* page offset within VMA */
> + struct page *page = shmem->pages[page_offset];
Should we have a
if (WARN_ON(!shmem->pages ||
page_offset <= (obj->size >> PAGE_SHIFT)))
return VM_FAULT_SIGBUS;
?
> + struct folio *folio = page_folio(page);
> +
> + file_update_time(vma->vm_file);
> +
> + folio_mark_dirty(folio);
> +
> + return 0;
> +}
> +
> const struct vm_operations_struct drm_gem_shmem_vm_ops = {
> .fault = drm_gem_shmem_fault,
> .open = drm_gem_shmem_vm_open,
> .close = drm_gem_shmem_vm_close,
> + .pfn_mkwrite = drm_gem_shmem_pfn_mkwrite,
> };
> EXPORT_SYMBOL_GPL(drm_gem_shmem_vm_ops);
>
^ permalink raw reply [flat|nested] 13+ messages in thread* Re: [PATCH v3 5/6] drm/gem-shmem: Track folio accessed/dirty status in mmap
2026-02-09 14:23 ` Boris Brezillon
@ 2026-02-09 14:46 ` Thomas Zimmermann
2026-02-09 15:01 ` Boris Brezillon
0 siblings, 1 reply; 13+ messages in thread
From: Thomas Zimmermann @ 2026-02-09 14:46 UTC (permalink / raw)
To: Boris Brezillon
Cc: loic.molinari, willy, frank.binns, matt.coster,
maarten.lankhorst, mripard, airlied, simona, dri-devel, linux-mm
Hi Boris,
thanks for reviewing the series.
Am 09.02.26 um 15:23 schrieb Boris Brezillon:
> On Mon, 9 Feb 2026 14:27:14 +0100
> Thomas Zimmermann <tzimmermann@suse.de> wrote:
>
>> Invoke folio_mark_accessed() in mmap page faults to add the folio to
>> the memory manager's LRU list. Userspace invokes mmap to get the memory
>> for software rendering. Compositors do the same when creating the final
>> on-screen image, so keeping the pages in LRU makes sense. Avoids paging
>> out graphics buffers when under memory pressure.
>>
>> In pfn_mkwrite, further invoke the folio_mark_dirty() to add the folio
>> for writeback should the underlying file be paged out from system memory.
>> This rarely happens in practice, yet it would corrupt the buffer content.
>>
>> This has little effect on a system's hardware-accelerated rendering, which
>> only mmaps for an initial setup of textures, meshes, shaders, etc.
>>
>> v3:
>> - rewrite for VM_PFNMAP
>> v2:
>> - adapt to changes in drm_gem_shmem_try_mmap_pmd()
>>
>> Signed-off-by: Thomas Zimmermann <tzimmermann@suse.de>
>> Reviewed-by: Boris Brezillon <boris.brezillon@collabora.com>
>> ---
>> drivers/gpu/drm/drm_gem_shmem_helper.c | 20 ++++++++++++++++++++
>> 1 file changed, 20 insertions(+)
>>
>> diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c
>> index c3a054899ba3..0c86ad40a049 100644
>> --- a/drivers/gpu/drm/drm_gem_shmem_helper.c
>> +++ b/drivers/gpu/drm/drm_gem_shmem_helper.c
>> @@ -598,6 +598,9 @@ static vm_fault_t drm_gem_shmem_fault(struct vm_fault *vmf)
>> if (ret != VM_FAULT_NOPAGE)
>> ret = vmf_insert_pfn(vma, vmf->address, pfn);
>>
>> + if (likely(!(ret & VM_FAULT_ERROR)))
> Can't we just go
>
> if (ret == VM_FAULT_NOPAGE)
>
> here?
After reviewing the code in vmf_insert_pfn, I think so. All we'll see is
_OOM and _SIGBUS; or _NOPAGE on success. I'll change it then.
>
>> + folio_mark_accessed(folio);
>> +
>> out:
>> dma_resv_unlock(obj->resv);
>>
>> @@ -638,10 +641,27 @@ static void drm_gem_shmem_vm_close(struct vm_area_struct *vma)
>> drm_gem_vm_close(vma);
>> }
>>
>> +static vm_fault_t drm_gem_shmem_pfn_mkwrite(struct vm_fault *vmf)
>> +{
>> + struct vm_area_struct *vma = vmf->vma;
>> + struct drm_gem_object *obj = vma->vm_private_data;
>> + struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
>> + pgoff_t page_offset = vmf->pgoff - vma->vm_pgoff; /* page offset within VMA */
>> + struct page *page = shmem->pages[page_offset];
> Should we have a
>
> if (WARN_ON(!shmem->pages ||
> page_offset <= (obj->size >> PAGE_SHIFT)))
> return VM_FAULT_SIGBUS;
>
>
> ?
I left it out because it doesn't seem necessary. In the fault handler
in drm_gem_shmem_fault(), I can see that we could get an OOB access. But
we only call pfn_mkwrite() after going through _fault() first. I don't
see a way of getting here unless we've already tested for the page in
_fault().
Best regards
Thomas
>
>> + struct folio *folio = page_folio(page);
>> +
>> + file_update_time(vma->vm_file);
>> +
>> + folio_mark_dirty(folio);
>> +
>> + return 0;
>> +}
>> +
>> const struct vm_operations_struct drm_gem_shmem_vm_ops = {
>> .fault = drm_gem_shmem_fault,
>> .open = drm_gem_shmem_vm_open,
>> .close = drm_gem_shmem_vm_close,
>> + .pfn_mkwrite = drm_gem_shmem_pfn_mkwrite,
>> };
>> EXPORT_SYMBOL_GPL(drm_gem_shmem_vm_ops);
>>
--
--
Thomas Zimmermann
Graphics Driver Developer
SUSE Software Solutions Germany GmbH
Frankenstr. 146, 90461 Nürnberg, Germany, www.suse.com
GF: Jochen Jaser, Andrew McDonald, Werner Knoblich, (HRB 36809, AG Nürnberg)
^ permalink raw reply [flat|nested] 13+ messages in thread* Re: [PATCH v3 5/6] drm/gem-shmem: Track folio accessed/dirty status in mmap
2026-02-09 14:46 ` Thomas Zimmermann
@ 2026-02-09 15:01 ` Boris Brezillon
0 siblings, 0 replies; 13+ messages in thread
From: Boris Brezillon @ 2026-02-09 15:01 UTC (permalink / raw)
To: Thomas Zimmermann
Cc: loic.molinari, willy, frank.binns, matt.coster,
maarten.lankhorst, mripard, airlied, simona, dri-devel, linux-mm
On Mon, 9 Feb 2026 15:46:21 +0100
Thomas Zimmermann <tzimmermann@suse.de> wrote:
> Hi Boris,
>
> thanks for reviewing the series.
>
> Am 09.02.26 um 15:23 schrieb Boris Brezillon:
> > On Mon, 9 Feb 2026 14:27:14 +0100
> > Thomas Zimmermann <tzimmermann@suse.de> wrote:
> >
> >> Invoke folio_mark_accessed() in mmap page faults to add the folio to
> >> the memory manager's LRU list. Userspace invokes mmap to get the memory
> >> for software rendering. Compositors do the same when creating the final
> >> on-screen image, so keeping the pages in LRU makes sense. Avoids paging
> >> out graphics buffers when under memory pressure.
> >>
> >> In pfn_mkwrite, further invoke the folio_mark_dirty() to add the folio
> >> for writeback should the underlying file be paged out from system memory.
> >> This rarely happens in practice, yet it would corrupt the buffer content.
> >>
> >> This has little effect on a system's hardware-accelerated rendering, which
> >> only mmaps for an initial setup of textures, meshes, shaders, etc.
> >>
> >> v3:
> >> - rewrite for VM_PFNMAP
> >> v2:
> >> - adapt to changes in drm_gem_shmem_try_mmap_pmd()
> >>
> >> Signed-off-by: Thomas Zimmermann <tzimmermann@suse.de>
> >> Reviewed-by: Boris Brezillon <boris.brezillon@collabora.com>
> >> ---
> >> drivers/gpu/drm/drm_gem_shmem_helper.c | 20 ++++++++++++++++++++
> >> 1 file changed, 20 insertions(+)
> >>
> >> diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c
> >> index c3a054899ba3..0c86ad40a049 100644
> >> --- a/drivers/gpu/drm/drm_gem_shmem_helper.c
> >> +++ b/drivers/gpu/drm/drm_gem_shmem_helper.c
> >> @@ -598,6 +598,9 @@ static vm_fault_t drm_gem_shmem_fault(struct vm_fault *vmf)
> >> if (ret != VM_FAULT_NOPAGE)
> >> ret = vmf_insert_pfn(vma, vmf->address, pfn);
> >>
> >> + if (likely(!(ret & VM_FAULT_ERROR)))
> > Can't we just go
> >
> > if (ret == VM_FAULT_NOPAGE)
> >
> > here?
>
> After reviewing the code in vmf_insert_pfn, I think so. All we'll see is
> _OOM and _SIGBUS; or _NOPAGE on success. I'll change it then.
>
>
> >
> >> + folio_mark_accessed(folio);
> >> +
> >> out:
> >> dma_resv_unlock(obj->resv);
> >>
> >> @@ -638,10 +641,27 @@ static void drm_gem_shmem_vm_close(struct vm_area_struct *vma)
> >> drm_gem_vm_close(vma);
> >> }
> >>
> >> +static vm_fault_t drm_gem_shmem_pfn_mkwrite(struct vm_fault *vmf)
> >> +{
> >> + struct vm_area_struct *vma = vmf->vma;
> >> + struct drm_gem_object *obj = vma->vm_private_data;
> >> + struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
> >> + pgoff_t page_offset = vmf->pgoff - vma->vm_pgoff; /* page offset within VMA */
> >> + struct page *page = shmem->pages[page_offset];
> > Should we have a
> >
> > if (WARN_ON(!shmem->pages ||
> > page_offset <= (obj->size >> PAGE_SHIFT)))
> > return VM_FAULT_SIGBUS;
> >
> >
> > ?
>
> I left it out because it doesn't seem necessary. In the fault handler
> in drm_gem_shmem_fault(), I can see that we could get an OOB access. But
> we only call pfn_mkwrite() after going through _fault() first. I don't
> see a way of getting here unless we've already tested for the page in
> _fault().
I agree it's not supposed to happen, but isn't it what WARN_ON()s are
for (catching unexpected situations)?
^ permalink raw reply [flat|nested] 13+ messages in thread
* [PATCH v3 6/6] drm/gem-shmem: Track folio accessed/dirty status in vmap
2026-02-09 13:27 [PATCH v3 0/6] drm/gem-shmem: Track page accessed/dirty status Thomas Zimmermann
` (4 preceding siblings ...)
2026-02-09 13:27 ` [PATCH v3 5/6] drm/gem-shmem: Track folio accessed/dirty status in mmap Thomas Zimmermann
@ 2026-02-09 13:27 ` Thomas Zimmermann
5 siblings, 0 replies; 13+ messages in thread
From: Thomas Zimmermann @ 2026-02-09 13:27 UTC (permalink / raw)
To: boris.brezillon, loic.molinari, willy, frank.binns, matt.coster,
maarten.lankhorst, mripard, airlied, simona
Cc: dri-devel, linux-mm, Thomas Zimmermann
On successful vmap, set the page_mark_accessed_on_put and _dirty_on_put
flags in the gem-shmem object. Signals that the contained pages require
LRU and dirty tracking when they are being released back to SHMEM. Clear
these flags on put, so that the buffer remains quiet until the next call
to vmap. There's no means of handling dirty status in vmap as there's no
write-only mapping available.
Both flags, _accessed_on_put and _dirty_on_put, have always been part of
the gem-shmem object, but never used much. So most drivers did not track
the page status correctly.
Only the v3d and imagination drivers make limited use of _dirty_on_put. In
the case of imagination, move the flag setting from init to cleanup. This
ensures writeback of modified pages but does not interfere with the
internal vmap/vunmap calls. V3d already implements this behaviour.
Signed-off-by: Thomas Zimmermann <tzimmermann@suse.de>
Reviewed-by: Boris Brezillon <boris.brezillon@collabora.com> # gem-shmem
---
drivers/gpu/drm/drm_gem_shmem_helper.c | 4 ++++
drivers/gpu/drm/imagination/pvr_gem.c | 6 ++++--
2 files changed, 8 insertions(+), 2 deletions(-)
diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c
index 0c86ad40a049..dda9af9bf5b3 100644
--- a/drivers/gpu/drm/drm_gem_shmem_helper.c
+++ b/drivers/gpu/drm/drm_gem_shmem_helper.c
@@ -265,6 +265,8 @@ void drm_gem_shmem_put_pages_locked(struct drm_gem_shmem_object *shmem)
shmem->pages_mark_dirty_on_put,
shmem->pages_mark_accessed_on_put);
shmem->pages = NULL;
+ shmem->pages_mark_accessed_on_put = false;
+ shmem->pages_mark_dirty_on_put = false;
}
}
EXPORT_SYMBOL_GPL(drm_gem_shmem_put_pages_locked);
@@ -397,6 +399,8 @@ int drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem,
} else {
iosys_map_set_vaddr(map, shmem->vaddr);
refcount_set(&shmem->vmap_use_count, 1);
+ shmem->pages_mark_accessed_on_put = true;
+ shmem->pages_mark_dirty_on_put = true;
}
}
diff --git a/drivers/gpu/drm/imagination/pvr_gem.c b/drivers/gpu/drm/imagination/pvr_gem.c
index c07c9a915190..307b02c916d4 100644
--- a/drivers/gpu/drm/imagination/pvr_gem.c
+++ b/drivers/gpu/drm/imagination/pvr_gem.c
@@ -25,7 +25,10 @@
static void pvr_gem_object_free(struct drm_gem_object *obj)
{
- drm_gem_shmem_object_free(obj);
+ struct drm_gem_shmem_object *shmem_obj = to_drm_gem_shmem_obj(obj);
+
+ shmem_obj->pages_mark_dirty_on_put = true;
+ drm_gem_shmem_free(shmem_obj);
}
static struct dma_buf *pvr_gem_export(struct drm_gem_object *obj, int flags)
@@ -363,7 +366,6 @@ pvr_gem_object_create(struct pvr_device *pvr_dev, size_t size, u64 flags)
if (IS_ERR(shmem_obj))
return ERR_CAST(shmem_obj);
- shmem_obj->pages_mark_dirty_on_put = true;
shmem_obj->map_wc = !(flags & PVR_BO_CPU_CACHED);
pvr_obj = shmem_gem_to_pvr_gem(shmem_obj);
pvr_obj->flags = flags;
--
2.52.0
^ permalink raw reply [flat|nested] 13+ messages in thread