From: Ryosuke Yasuoka <ryasuoka@redhat.com>
To: maarten.lankhorst@linux.intel.com, mripard@kernel.org,
tzimmermann@suse.de, airlied@gmail.com, simona@ffwll.ch,
kraxel@redhat.com, gurchetansingh@chromium.org,
olvaffe@gmail.com, akpm@linux-foundation.org, urezki@gmail.com,
hch@infradead.org, dmitry.osipenko@collabora.com,
jfalempe@redhat.com
Cc: Ryosuke Yasuoka <ryasuoka@redhat.com>,
dri-devel@lists.freedesktop.org, linux-kernel@vger.kernel.org,
virtualization@lists.linux.dev, linux-mm@kvack.org
Subject: [PATCH drm-next 2/2] drm/virtio: Use atomic_vmap to work drm_panic in GUI
Date: Thu, 6 Mar 2025 00:25:54 +0900 [thread overview]
Message-ID: <20250305152555.318159-3-ryasuoka@redhat.com> (raw)
In-Reply-To: <20250305152555.318159-1-ryasuoka@redhat.com>
virtio drm_panic supports only vmapped shmem BO because there is no
atomic vmap feature. Now atomic_vmap is supported, so drm_panic tries to
vmap addr if it is not mapped.
Signed-off-by: Ryosuke Yasuoka <ryasuoka@redhat.com>
---
drivers/gpu/drm/drm_gem.c | 51 ++++++++++++++++++++++++++
drivers/gpu/drm/drm_gem_shmem_helper.c | 51 ++++++++++++++++++++++++++
drivers/gpu/drm/virtio/virtgpu_plane.c | 14 +++++--
include/drm/drm_gem.h | 1 +
include/drm/drm_gem_shmem_helper.h | 2 +
5 files changed, 116 insertions(+), 3 deletions(-)
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index ee811764c3df..eebfaef3a52e 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -535,6 +535,57 @@ static void drm_gem_check_release_batch(struct folio_batch *fbatch)
cond_resched();
}
+struct page **drm_gem_atomic_get_pages(struct drm_gem_object *obj)
+{
+ struct address_space *mapping;
+ struct page **pages;
+ struct folio *folio;
+ long i, j, npages;
+
+ if (WARN_ON(!obj->filp))
+ return ERR_PTR(-EINVAL);
+
+ /* This is the shared memory object that backs the GEM resource */
+ mapping = obj->filp->f_mapping;
+
+ /* We already BUG_ON() for non-page-aligned sizes in
+ * drm_gem_object_init(), so we should never hit this unless
+ * driver author is doing something really wrong:
+ */
+ WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0);
+
+ npages = obj->size >> PAGE_SHIFT;
+
+ pages = kmalloc_array(npages, sizeof(struct page *), GFP_ATOMIC);
+ if (pages == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ mapping_set_unevictable(mapping);
+
+ i = 0;
+ while (i < npages) {
+ long nr;
+
+ folio = shmem_read_folio_gfp(mapping, i,
+ GFP_ATOMIC);
+ if (IS_ERR(folio))
+ return ERR_PTR(-ENOMEM);
+ nr = min(npages - i, folio_nr_pages(folio));
+ for (j = 0; j < nr; j++, i++)
+ pages[i] = folio_file_page(folio, i);
+
+ /* Make sure shmem keeps __GFP_DMA32 allocated pages in the
+ * correct region during swapin. Note that this requires
+ * __GFP_DMA32 to be set in mapping_gfp_mask(inode->i_mapping)
+ * so shmem can relocate pages during swapin if required.
+ */
+ BUG_ON(mapping_gfp_constraint(mapping, __GFP_DMA32) &&
+ (folio_pfn(folio) >= 0x00100000UL));
+ }
+
+ return pages;
+}
+
/**
* drm_gem_get_pages - helper to allocate backing pages for a GEM object
* from shmem
diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c
index 5ab351409312..789dfd726a36 100644
--- a/drivers/gpu/drm/drm_gem_shmem_helper.c
+++ b/drivers/gpu/drm/drm_gem_shmem_helper.c
@@ -186,6 +186,34 @@ void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem)
}
EXPORT_SYMBOL_GPL(drm_gem_shmem_free);
+static int drm_gem_shmem_atomic_get_pages(struct drm_gem_shmem_object *shmem)
+{
+ struct drm_gem_object *obj = &shmem->base;
+ struct page **pages;
+
+ pages = drm_gem_atomic_get_pages(obj);
+ if (IS_ERR(pages)) {
+ drm_dbg_kms(obj->dev, "Failed to get pages (%ld)\n",
+ PTR_ERR(pages));
+ shmem->pages_use_count = 0;
+ return PTR_ERR(pages);
+ }
+
+ /*
+ * TODO: Allocating WC pages which are correctly flushed is only
+ * supported on x86. Ideal solution would be a GFP_WC flag, which also
+ * ttm_pool.c could use.
+ */
+#ifdef CONFIG_X86
+ if (shmem->map_wc)
+ set_pages_array_wc(pages, obj->size >> PAGE_SHIFT);
+#endif
+
+ shmem->pages = pages;
+
+ return 0;
+}
+
static int drm_gem_shmem_get_pages(struct drm_gem_shmem_object *shmem)
{
struct drm_gem_object *obj = &shmem->base;
@@ -317,6 +345,29 @@ void drm_gem_shmem_unpin(struct drm_gem_shmem_object *shmem)
}
EXPORT_SYMBOL(drm_gem_shmem_unpin);
+int drm_gem_shmem_atomic_vmap(struct drm_gem_shmem_object *shmem,
+ struct iosys_map *map)
+{
+ struct drm_gem_object *obj = &shmem->base;
+ int ret = 0;
+
+ pgprot_t prot = PAGE_KERNEL;
+
+ ret = drm_gem_shmem_atomic_get_pages(shmem);
+ if (ret)
+ return -ENOMEM;
+
+ if (shmem->map_wc)
+ prot = pgprot_writecombine(prot);
+ shmem->vaddr = atomic_vmap(shmem->pages, obj->size >> PAGE_SHIFT,
+ VM_MAP, prot);
+ if (!shmem->vaddr)
+ return -ENOMEM;
+ iosys_map_set_vaddr(map, shmem->vaddr);
+
+ return 0;
+}
+
/*
* drm_gem_shmem_vmap - Create a virtual mapping for a shmem GEM object
* @shmem: shmem GEM object
diff --git a/drivers/gpu/drm/virtio/virtgpu_plane.c b/drivers/gpu/drm/virtio/virtgpu_plane.c
index a6f5a78f436a..2a977c5cf42a 100644
--- a/drivers/gpu/drm/virtio/virtgpu_plane.c
+++ b/drivers/gpu/drm/virtio/virtgpu_plane.c
@@ -500,11 +500,19 @@ static int virtio_drm_get_scanout_buffer(struct drm_plane *plane,
bo = gem_to_virtio_gpu_obj(plane->state->fb->obj[0]);
- /* Only support mapped shmem bo */
- if (virtio_gpu_is_vram(bo) || bo->base.base.import_attach || !bo->base.vaddr)
+ if (virtio_gpu_is_vram(bo) || bo->base.base.import_attach)
return -ENODEV;
- iosys_map_set_vaddr(&sb->map[0], bo->base.vaddr);
+ /* try to vmap it if possible */
+ if (!bo->base.vaddr) {
+ int ret;
+
+ ret = drm_gem_shmem_atomic_vmap(&bo->base, &sb->map[0]);
+ if (ret)
+ return ret;
+ } else {
+ iosys_map_set_vaddr(&sb->map[0], bo->base.vaddr);
+ }
sb->format = plane->state->fb->format;
sb->height = plane->state->fb->height;
diff --git a/include/drm/drm_gem.h b/include/drm/drm_gem.h
index fdae947682cd..cfed66bc12ef 100644
--- a/include/drm/drm_gem.h
+++ b/include/drm/drm_gem.h
@@ -529,6 +529,7 @@ void drm_gem_free_mmap_offset(struct drm_gem_object *obj);
int drm_gem_create_mmap_offset(struct drm_gem_object *obj);
int drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size);
+struct page **drm_gem_atomic_get_pages(struct drm_gem_object *obj);
struct page **drm_gem_get_pages(struct drm_gem_object *obj);
void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
bool dirty, bool accessed);
diff --git a/include/drm/drm_gem_shmem_helper.h b/include/drm/drm_gem_shmem_helper.h
index d22e3fb53631..86a357945f42 100644
--- a/include/drm/drm_gem_shmem_helper.h
+++ b/include/drm/drm_gem_shmem_helper.h
@@ -105,6 +105,8 @@ void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem);
void drm_gem_shmem_put_pages(struct drm_gem_shmem_object *shmem);
int drm_gem_shmem_pin(struct drm_gem_shmem_object *shmem);
void drm_gem_shmem_unpin(struct drm_gem_shmem_object *shmem);
+int drm_gem_shmem_atomic_vmap(struct drm_gem_shmem_object *shmem,
+ struct iosys_map *map);
int drm_gem_shmem_vmap(struct drm_gem_shmem_object *shmem,
struct iosys_map *map);
void drm_gem_shmem_vunmap(struct drm_gem_shmem_object *shmem,
--
2.48.1
next prev parent reply other threads:[~2025-03-05 19:04 UTC|newest]
Thread overview: 14+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-03-05 15:25 [PATCH drm-next 0/2] Enhance drm_panic Support for Virtio-GPU Ryosuke Yasuoka
2025-03-05 15:25 ` [PATCH drm-next 1/2] vmalloc: Add atomic_vmap Ryosuke Yasuoka
2025-03-05 17:08 ` Markus Elfring
2025-03-05 17:27 ` Uladzislau Rezki
2025-03-06 4:52 ` Matthew Wilcox
2025-03-06 13:24 ` Jocelyn Falempe
2025-03-06 14:04 ` Uladzislau Rezki
2025-03-06 15:52 ` Simona Vetter
2025-03-07 7:54 ` Jocelyn Falempe
2025-03-09 8:07 ` Ryosuke Yasuoka
2025-03-10 10:23 ` Jocelyn Falempe
2025-03-05 15:25 ` Ryosuke Yasuoka [this message]
2025-03-06 23:56 ` [PATCH drm-next 2/2] drm/virtio: Use atomic_vmap to work drm_panic in GUI kernel test robot
2025-03-07 3:07 ` kernel test robot
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20250305152555.318159-3-ryasuoka@redhat.com \
--to=ryasuoka@redhat.com \
--cc=airlied@gmail.com \
--cc=akpm@linux-foundation.org \
--cc=dmitry.osipenko@collabora.com \
--cc=dri-devel@lists.freedesktop.org \
--cc=gurchetansingh@chromium.org \
--cc=hch@infradead.org \
--cc=jfalempe@redhat.com \
--cc=kraxel@redhat.com \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=maarten.lankhorst@linux.intel.com \
--cc=mripard@kernel.org \
--cc=olvaffe@gmail.com \
--cc=simona@ffwll.ch \
--cc=tzimmermann@suse.de \
--cc=urezki@gmail.com \
--cc=virtualization@lists.linux.dev \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox