* [PATCH v3 1/2] udmabuf: Use vmf_insert_pfn and VM_PFNMAP for handling mmap
2023-08-17 6:46 [PATCH v3 0/2] udmabuf: Add back support for mapping hugetlb pages (v3) Vivek Kasireddy
@ 2023-08-17 6:46 ` Vivek Kasireddy
2023-08-17 22:48 ` kernel test robot
2023-08-18 6:54 ` kernel test robot
2023-08-17 6:46 ` [PATCH v3 2/2] udmabuf: Add back support for mapping hugetlb pages (v3) Vivek Kasireddy
1 sibling, 2 replies; 5+ messages in thread
From: Vivek Kasireddy @ 2023-08-17 6:46 UTC (permalink / raw)
To: dri-devel, linux-mm
Cc: Vivek Kasireddy, David Hildenbrand, Daniel Vetter, Mike Kravetz,
Hugh Dickins, Peter Xu, Jason Gunthorpe, Gerd Hoffmann,
Dongwon Kim, Junxiao Chang
Add VM_PFNMAP to vm_flags in the mmap handler to ensure that
the mappings would be managed without using struct page.
And, in the vm_fault handler, use vmf_insert_pfn to share the
page's pfn to userspace instead of directly sharing the page
(via struct page *).
Cc: David Hildenbrand <david@redhat.com>
Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
Cc: Mike Kravetz <mike.kravetz@oracle.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Peter Xu <peterx@redhat.com>
Cc: Jason Gunthorpe <jgg@nvidia.com>
Cc: Gerd Hoffmann <kraxel@redhat.com>
Cc: Dongwon Kim <dongwon.kim@intel.com>
Cc: Junxiao Chang <junxiao.chang@intel.com>
Suggested-by: David Hildenbrand <david@redhat.com>
Acked-by: David Hildenbrand <david@redhat.com>
Signed-off-by: Vivek Kasireddy <vivek.kasireddy@intel.com>
---
drivers/dma-buf/udmabuf.c | 8 +++++---
1 file changed, 5 insertions(+), 3 deletions(-)
diff --git a/drivers/dma-buf/udmabuf.c b/drivers/dma-buf/udmabuf.c
index c40645999648..820c993c8659 100644
--- a/drivers/dma-buf/udmabuf.c
+++ b/drivers/dma-buf/udmabuf.c
@@ -35,12 +35,13 @@ static vm_fault_t udmabuf_vm_fault(struct vm_fault *vmf)
struct vm_area_struct *vma = vmf->vma;
struct udmabuf *ubuf = vma->vm_private_data;
pgoff_t pgoff = vmf->pgoff;
+ unsigned long pfn;
if (pgoff >= ubuf->pagecount)
return VM_FAULT_SIGBUS;
- vmf->page = ubuf->pages[pgoff];
- get_page(vmf->page);
- return 0;
+
+ pfn = page_to_pfn(ubuf->pages[pgoff]);
+ return vmf_insert_pfn(vma, vmf->address, pfn);
}
static const struct vm_operations_struct udmabuf_vm_ops = {
@@ -56,6 +57,7 @@ static int mmap_udmabuf(struct dma_buf *buf, struct vm_area_struct *vma)
vma->vm_ops = &udmabuf_vm_ops;
vma->vm_private_data = ubuf;
+ vm_flags_set(vma, VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP);
return 0;
}
--
2.39.2
^ permalink raw reply [flat|nested] 5+ messages in thread* [PATCH v3 2/2] udmabuf: Add back support for mapping hugetlb pages (v3)
2023-08-17 6:46 [PATCH v3 0/2] udmabuf: Add back support for mapping hugetlb pages (v3) Vivek Kasireddy
2023-08-17 6:46 ` [PATCH v3 1/2] udmabuf: Use vmf_insert_pfn and VM_PFNMAP for handling mmap Vivek Kasireddy
@ 2023-08-17 6:46 ` Vivek Kasireddy
1 sibling, 0 replies; 5+ messages in thread
From: Vivek Kasireddy @ 2023-08-17 6:46 UTC (permalink / raw)
To: dri-devel, linux-mm
Cc: Vivek Kasireddy, David Hildenbrand, Daniel Vetter, Mike Kravetz,
Hugh Dickins, Peter Xu, Jason Gunthorpe, Gerd Hoffmann,
Dongwon Kim, Junxiao Chang
A user or admin can configure a VMM (Qemu) Guest's memory to be
backed by hugetlb pages for various reasons. However, a Guest OS
would still allocate (and pin) buffers that are backed by regular
4k sized pages. In order to map these buffers and create dma-bufs
for them on the Host, we first need to find the hugetlb pages where
the buffer allocations are located and then determine the offsets
of individual chunks (within those pages) and use this information
to eventually populate a scatterlist.
Testcase: default_hugepagesz=2M hugepagesz=2M hugepages=2500 options
were passed to the Host kernel and Qemu was launched with these
relevant options: qemu-system-x86_64 -m 4096m....
-device virtio-gpu-pci,max_outputs=1,blob=true,xres=1920,yres=1080
-display gtk,gl=on
-object memory-backend-memfd,hugetlb=on,id=mem1,size=4096M
-machine memory-backend=mem1
Replacing -display gtk,gl=on with -display gtk,gl=off above would
exercise the mmap handler.
v2: Updated get_sg_table() to manually populate the scatterlist for
both huge page and non-huge-page cases.
v3: s/offsets/subpgoff/g
s/hpoff/mapidx/g
Cc: David Hildenbrand <david@redhat.com>
Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
Cc: Mike Kravetz <mike.kravetz@oracle.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Peter Xu <peterx@redhat.com>
Cc: Jason Gunthorpe <jgg@nvidia.com>
Cc: Gerd Hoffmann <kraxel@redhat.com>
Cc: Dongwon Kim <dongwon.kim@intel.com>
Cc: Junxiao Chang <junxiao.chang@intel.com>
Acked-by: Mike Kravetz <mike.kravetz@oracle.com> (v2)
Signed-off-by: Vivek Kasireddy <vivek.kasireddy@intel.com>
---
drivers/dma-buf/udmabuf.c | 85 +++++++++++++++++++++++++++++++++------
1 file changed, 72 insertions(+), 13 deletions(-)
diff --git a/drivers/dma-buf/udmabuf.c b/drivers/dma-buf/udmabuf.c
index 820c993c8659..1a41c4a069ea 100644
--- a/drivers/dma-buf/udmabuf.c
+++ b/drivers/dma-buf/udmabuf.c
@@ -10,6 +10,7 @@
#include <linux/miscdevice.h>
#include <linux/module.h>
#include <linux/shmem_fs.h>
+#include <linux/hugetlb.h>
#include <linux/slab.h>
#include <linux/udmabuf.h>
#include <linux/vmalloc.h>
@@ -28,6 +29,7 @@ struct udmabuf {
struct page **pages;
struct sg_table *sg;
struct miscdevice *device;
+ pgoff_t *subpgoff;
};
static vm_fault_t udmabuf_vm_fault(struct vm_fault *vmf)
@@ -41,6 +43,10 @@ static vm_fault_t udmabuf_vm_fault(struct vm_fault *vmf)
return VM_FAULT_SIGBUS;
pfn = page_to_pfn(ubuf->pages[pgoff]);
+ if (ubuf->subpgoff) {
+ pfn += ubuf->subpgoff[pgoff] >> PAGE_SHIFT;
+ }
+
return vmf_insert_pfn(vma, vmf->address, pfn);
}
@@ -90,23 +96,31 @@ static struct sg_table *get_sg_table(struct device *dev, struct dma_buf *buf,
{
struct udmabuf *ubuf = buf->priv;
struct sg_table *sg;
+ struct scatterlist *sgl;
+ pgoff_t offset;
+ unsigned long i = 0;
int ret;
sg = kzalloc(sizeof(*sg), GFP_KERNEL);
if (!sg)
return ERR_PTR(-ENOMEM);
- ret = sg_alloc_table_from_pages(sg, ubuf->pages, ubuf->pagecount,
- 0, ubuf->pagecount << PAGE_SHIFT,
- GFP_KERNEL);
+
+ ret = sg_alloc_table(sg, ubuf->pagecount, GFP_KERNEL);
if (ret < 0)
- goto err;
+ goto err_alloc;
+
+ for_each_sg(sg->sgl, sgl, ubuf->pagecount, i) {
+ offset = ubuf->subpgoff ? ubuf->subpgoff[i] : 0;
+ sg_set_page(sgl, ubuf->pages[i], PAGE_SIZE, offset);
+ }
ret = dma_map_sgtable(dev, sg, direction, 0);
if (ret < 0)
- goto err;
+ goto err_map;
return sg;
-err:
+err_map:
sg_free_table(sg);
+err_alloc:
kfree(sg);
return ERR_PTR(ret);
}
@@ -143,6 +157,7 @@ static void release_udmabuf(struct dma_buf *buf)
for (pg = 0; pg < ubuf->pagecount; pg++)
put_page(ubuf->pages[pg]);
+ kfree(ubuf->subpgoff);
kfree(ubuf->pages);
kfree(ubuf);
}
@@ -206,7 +221,9 @@ static long udmabuf_create(struct miscdevice *device,
struct udmabuf *ubuf;
struct dma_buf *buf;
pgoff_t pgoff, pgcnt, pgidx, pgbuf = 0, pglimit;
- struct page *page;
+ struct page *page, *hpage = NULL;
+ pgoff_t mapidx, chunkoff, maxchunks;
+ struct hstate *hpstate;
int seals, ret = -EINVAL;
u32 i, flags;
@@ -242,7 +259,7 @@ static long udmabuf_create(struct miscdevice *device,
if (!memfd)
goto err;
mapping = memfd->f_mapping;
- if (!shmem_mapping(mapping))
+ if (!shmem_mapping(mapping) && !is_file_hugepages(memfd))
goto err;
seals = memfd_fcntl(memfd, F_GET_SEALS, 0);
if (seals == -EINVAL)
@@ -253,16 +270,57 @@ static long udmabuf_create(struct miscdevice *device,
goto err;
pgoff = list[i].offset >> PAGE_SHIFT;
pgcnt = list[i].size >> PAGE_SHIFT;
+ if (is_file_hugepages(memfd)) {
+ if (!ubuf->subpgoff) {
+ ubuf->subpgoff = kmalloc_array(ubuf->pagecount,
+ sizeof(*ubuf->subpgoff),
+ GFP_KERNEL);
+ if (!ubuf->subpgoff) {
+ ret = -ENOMEM;
+ goto err;
+ }
+ }
+ hpstate = hstate_file(memfd);
+ mapidx = list[i].offset >> huge_page_shift(hpstate);
+ chunkoff = (list[i].offset &
+ ~huge_page_mask(hpstate)) >> PAGE_SHIFT;
+ maxchunks = huge_page_size(hpstate) >> PAGE_SHIFT;
+ }
for (pgidx = 0; pgidx < pgcnt; pgidx++) {
- page = shmem_read_mapping_page(mapping, pgoff + pgidx);
- if (IS_ERR(page)) {
- ret = PTR_ERR(page);
- goto err;
+ if (is_file_hugepages(memfd)) {
+ if (!hpage) {
+ hpage = find_get_page_flags(mapping, mapidx,
+ FGP_ACCESSED);
+ if (!hpage) {
+ ret = -EINVAL;
+ goto err;
+ }
+ }
+ get_page(hpage);
+ ubuf->pages[pgbuf] = hpage;
+ ubuf->subpgoff[pgbuf++] = chunkoff << PAGE_SHIFT;
+ if (++chunkoff == maxchunks) {
+ put_page(hpage);
+ hpage = NULL;
+ chunkoff = 0;
+ mapidx++;
+ }
+ } else {
+ mapidx = pgoff + pgidx;
+ page = shmem_read_mapping_page(mapping, mapidx);
+ if (IS_ERR(page)) {
+ ret = PTR_ERR(page);
+ goto err;
+ }
+ ubuf->pages[pgbuf++] = page;
}
- ubuf->pages[pgbuf++] = page;
}
fput(memfd);
memfd = NULL;
+ if (hpage) {
+ put_page(hpage);
+ hpage = NULL;
+ }
}
exp_info.ops = &udmabuf_ops;
@@ -287,6 +345,7 @@ static long udmabuf_create(struct miscdevice *device,
put_page(ubuf->pages[--pgbuf]);
if (memfd)
fput(memfd);
+ kfree(ubuf->subpgoff);
kfree(ubuf->pages);
kfree(ubuf);
return ret;
--
2.39.2
^ permalink raw reply [flat|nested] 5+ messages in thread