From: Leon Romanovsky <leon@kernel.org>
To: Jens Axboe <axboe@kernel.dk>, Jason Gunthorpe <jgg@ziepe.ca>,
Robin Murphy <robin.murphy@arm.com>,
Joerg Roedel <joro@8bytes.org>, Will Deacon <will@kernel.org>,
Keith Busch <kbusch@kernel.org>, Christoph Hellwig <hch@lst.de>,
"Zeng, Oak" <oak.zeng@intel.com>,
Chaitanya Kulkarni <kch@nvidia.com>
Cc: "Leon Romanovsky" <leonro@nvidia.com>,
"Sagi Grimberg" <sagi@grimberg.me>,
"Bjorn Helgaas" <bhelgaas@google.com>,
"Logan Gunthorpe" <logang@deltatee.com>,
"Yishai Hadas" <yishaih@nvidia.com>,
"Shameer Kolothum" <shameerali.kolothum.thodi@huawei.com>,
"Kevin Tian" <kevin.tian@intel.com>,
"Alex Williamson" <alex.williamson@redhat.com>,
"Marek Szyprowski" <m.szyprowski@samsung.com>,
"Jérôme Glisse" <jglisse@redhat.com>,
"Andrew Morton" <akpm@linux-foundation.org>,
linux-block@vger.kernel.org, linux-kernel@vger.kernel.org,
linux-rdma@vger.kernel.org, iommu@lists.linux.dev,
linux-nvme@lists.infradead.org, linux-pci@vger.kernel.org,
kvm@vger.kernel.org, linux-mm@kvack.org
Subject: [RFC PATCH v1 07/18] iommu/dma: Provide an interface to allow preallocate IOVA
Date: Tue, 2 Jul 2024 12:09:37 +0300 [thread overview]
Message-ID: <eff6e989367605a1fb31823b503a78ab79cac432.1719909395.git.leon@kernel.org> (raw)
In-Reply-To: <cover.1719909395.git.leon@kernel.org>
From: Leon Romanovsky <leonro@nvidia.com>
Separate IOVA allocation to dedicated callback so it will allow
cache of IOVA and reuse it in fast paths for devices which support
ODP (on-demand-paging) mechanism.
Signed-off-by: Leon Romanovsky <leonro@nvidia.com>
---
drivers/iommu/dma-iommu.c | 50 +++++++++++++++++++++++++++++----------
1 file changed, 38 insertions(+), 12 deletions(-)
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index 89e34503e0bb..0b5ca6961940 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -357,7 +357,7 @@ int iommu_dma_init_fq(struct iommu_domain *domain)
atomic_set(&cookie->fq_timer_on, 0);
/*
* Prevent incomplete fq state being observable. Pairs with path from
- * __iommu_dma_unmap() through iommu_dma_free_iova() to queue_iova()
+ * __iommu_dma_unmap() through __iommu_dma_free_iova() to queue_iova()
*/
smp_wmb();
WRITE_ONCE(cookie->fq_domain, domain);
@@ -745,7 +745,7 @@ static int dma_info_to_prot(enum dma_data_direction dir, bool coherent,
}
}
-static dma_addr_t iommu_dma_alloc_iova(struct iommu_domain *domain,
+static dma_addr_t __iommu_dma_alloc_iova(struct iommu_domain *domain,
size_t size, u64 dma_limit, struct device *dev)
{
struct iommu_dma_cookie *cookie = domain->iova_cookie;
@@ -791,7 +791,7 @@ static dma_addr_t iommu_dma_alloc_iova(struct iommu_domain *domain,
return (dma_addr_t)iova << shift;
}
-static void iommu_dma_free_iova(struct iommu_dma_cookie *cookie,
+static void __iommu_dma_free_iova(struct iommu_dma_cookie *cookie,
dma_addr_t iova, size_t size, struct iommu_iotlb_gather *gather)
{
struct iova_domain *iovad = &cookie->iovad;
@@ -828,7 +828,7 @@ static void __iommu_dma_unmap(struct device *dev, dma_addr_t dma_addr,
if (!iotlb_gather.queued)
iommu_iotlb_sync(domain, &iotlb_gather);
- iommu_dma_free_iova(cookie, dma_addr, size, &iotlb_gather);
+ __iommu_dma_free_iova(cookie, dma_addr, size, &iotlb_gather);
}
static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys,
@@ -851,12 +851,12 @@ static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys,
size = iova_align(iovad, size + iova_off);
- iova = iommu_dma_alloc_iova(domain, size, dma_mask, dev);
+ iova = __iommu_dma_alloc_iova(domain, size, dma_mask, dev);
if (!iova)
return DMA_MAPPING_ERROR;
if (iommu_map(domain, iova, phys - iova_off, size, prot, GFP_ATOMIC)) {
- iommu_dma_free_iova(cookie, iova, size, NULL);
+ __iommu_dma_free_iova(cookie, iova, size, NULL);
return DMA_MAPPING_ERROR;
}
return iova + iova_off;
@@ -960,7 +960,7 @@ static struct page **__iommu_dma_alloc_noncontiguous(struct device *dev,
return NULL;
size = iova_align(iovad, size);
- iova = iommu_dma_alloc_iova(domain, size, dev->coherent_dma_mask, dev);
+ iova = __iommu_dma_alloc_iova(domain, size, dev->coherent_dma_mask, dev);
if (!iova)
goto out_free_pages;
@@ -994,7 +994,7 @@ static struct page **__iommu_dma_alloc_noncontiguous(struct device *dev,
out_free_sg:
sg_free_table(sgt);
out_free_iova:
- iommu_dma_free_iova(cookie, iova, size, NULL);
+ __iommu_dma_free_iova(cookie, iova, size, NULL);
out_free_pages:
__iommu_dma_free_pages(pages, count);
return NULL;
@@ -1429,7 +1429,7 @@ static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
if (!iova_len)
return __finalise_sg(dev, sg, nents, 0);
- iova = iommu_dma_alloc_iova(domain, iova_len, dma_get_mask(dev), dev);
+ iova = __iommu_dma_alloc_iova(domain, iova_len, dma_get_mask(dev), dev);
if (!iova) {
ret = -ENOMEM;
goto out_restore_sg;
@@ -1446,7 +1446,7 @@ static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
return __finalise_sg(dev, sg, nents, iova);
out_free_iova:
- iommu_dma_free_iova(cookie, iova, iova_len, NULL);
+ __iommu_dma_free_iova(cookie, iova, iova_len, NULL);
out_restore_sg:
__invalidate_sg(sg, nents);
out:
@@ -1707,6 +1707,30 @@ static size_t iommu_dma_max_mapping_size(struct device *dev)
return SIZE_MAX;
}
+static dma_addr_t iommu_dma_alloc_iova(struct device *dev, size_t size)
+{
+ struct iommu_domain *domain = iommu_get_dma_domain(dev);
+ struct iommu_dma_cookie *cookie = domain->iova_cookie;
+ struct iova_domain *iovad = &cookie->iovad;
+ dma_addr_t dma_mask = dma_get_mask(dev);
+
+ size = iova_align(iovad, size);
+ return __iommu_dma_alloc_iova(domain, size, dma_mask, dev);
+}
+
+static void iommu_dma_free_iova(struct device *dev, dma_addr_t iova,
+ size_t size)
+{
+ struct iommu_domain *domain = iommu_get_dma_domain(dev);
+ struct iommu_dma_cookie *cookie = domain->iova_cookie;
+ struct iova_domain *iovad = &cookie->iovad;
+ struct iommu_iotlb_gather iotlb_gather;
+
+ size = iova_align(iovad, size);
+ iommu_iotlb_gather_init(&iotlb_gather);
+ __iommu_dma_free_iova(cookie, iova, size, &iotlb_gather);
+}
+
static const struct dma_map_ops iommu_dma_ops = {
.flags = DMA_F_PCI_P2PDMA_SUPPORTED |
DMA_F_CAN_SKIP_SYNC,
@@ -1731,6 +1755,8 @@ static const struct dma_map_ops iommu_dma_ops = {
.get_merge_boundary = iommu_dma_get_merge_boundary,
.opt_mapping_size = iommu_dma_opt_mapping_size,
.max_mapping_size = iommu_dma_max_mapping_size,
+ .alloc_iova = iommu_dma_alloc_iova,
+ .free_iova = iommu_dma_free_iova,
};
void iommu_setup_dma_ops(struct device *dev)
@@ -1773,7 +1799,7 @@ static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev,
if (!msi_page)
return NULL;
- iova = iommu_dma_alloc_iova(domain, size, dma_get_mask(dev), dev);
+ iova = __iommu_dma_alloc_iova(domain, size, dma_get_mask(dev), dev);
if (!iova)
goto out_free_page;
@@ -1787,7 +1813,7 @@ static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev,
return msi_page;
out_free_iova:
- iommu_dma_free_iova(cookie, iova, size, NULL);
+ __iommu_dma_free_iova(cookie, iova, size, NULL);
out_free_page:
kfree(msi_page);
return NULL;
--
2.45.2
next prev parent reply other threads:[~2024-07-02 9:10 UTC|newest]
Thread overview: 51+ messages / expand[flat|nested] mbox.gz Atom feed top
2024-07-02 9:09 [RFC PATCH v1 00/18] Provide a new two step DMA API mapping API Leon Romanovsky
2024-07-02 9:09 ` [RFC PATCH v1 01/18] dma-mapping: query DMA memory type Leon Romanovsky
2024-07-02 9:09 ` [RFC PATCH v1 02/18] dma-mapping: provide an interface to allocate IOVA Leon Romanovsky
2024-07-02 9:09 ` [RFC PATCH v1 03/18] dma-mapping: check if IOVA can be used Leon Romanovsky
2024-07-02 9:09 ` [RFC PATCH v1 04/18] dma-mapping: implement link range API Leon Romanovsky
2024-07-02 9:09 ` [RFC PATCH v1 05/18] mm/hmm: let users to tag specific PFN with DMA mapped bit Leon Romanovsky
2024-07-02 9:09 ` [RFC PATCH v1 06/18] dma-mapping: provide callbacks to link/unlink HMM PFNs to specific IOVA Leon Romanovsky
2024-07-02 9:09 ` Leon Romanovsky [this message]
2024-07-02 9:09 ` [RFC PATCH v1 08/18] iommu/dma: Implement link/unlink ranges callbacks Leon Romanovsky
2024-07-02 9:09 ` [RFC PATCH v1 09/18] RDMA/umem: Preallocate and cache IOVA for UMEM ODP Leon Romanovsky
2024-07-02 9:09 ` [RFC PATCH v1 10/18] RDMA/umem: Store ODP access mask information in PFN Leon Romanovsky
2024-07-02 9:09 ` [RFC PATCH v1 11/18] RDMA/core: Separate DMA mapping to caching IOVA and page linkage Leon Romanovsky
2024-07-02 9:09 ` [RFC PATCH v1 12/18] RDMA/umem: Prevent UMEM ODP creation with SWIOTLB Leon Romanovsky
2024-07-02 9:09 ` [RFC PATCH v1 13/18] vfio/mlx5: Explicitly use number of pages instead of allocated length Leon Romanovsky
2024-07-02 9:09 ` [RFC PATCH v1 14/18] vfio/mlx5: Rewrite create mkey flow to allow better code reuse Leon Romanovsky
2024-07-02 9:09 ` [RFC PATCH v1 15/18] vfio/mlx5: Explicitly store page list Leon Romanovsky
2024-07-02 9:09 ` [RFC PATCH v1 16/18] vfio/mlx5: Convert vfio to use DMA link API Leon Romanovsky
2024-07-02 9:09 ` [RFC PATCH v1 17/18] block: export helper to get segment max size Leon Romanovsky
2024-07-02 9:09 ` [RFC PATCH v1 18/18] nvme-pci: use new dma API Leon Romanovsky
2024-07-04 15:23 ` Robin Murphy
2024-07-04 17:16 ` Leon Romanovsky
2024-07-05 5:58 ` Christoph Hellwig
2024-07-05 18:48 ` Leon Romanovsky
2024-07-06 6:08 ` Christoph Hellwig
2024-07-03 5:42 ` [RFC PATCH v1 00/18] Provide a new two step DMA API mapping API Christoph Hellwig
2024-07-03 10:42 ` Zhu Yanjun
2024-07-03 10:52 ` Leon Romanovsky
2024-07-03 14:35 ` Christoph Hellwig
2024-07-03 15:51 ` Leon Romanovsky
2024-07-04 7:48 ` Christoph Hellwig
2024-07-04 13:18 ` Leon Romanovsky
2024-07-05 6:00 ` Christoph Hellwig
2024-07-08 16:52 ` Jason Gunthorpe
2024-07-09 6:17 ` Christoph Hellwig
2024-07-09 18:53 ` Jason Gunthorpe
2024-07-10 6:27 ` Christoph Hellwig
2024-07-11 23:21 ` Jason Gunthorpe
2024-07-05 22:53 ` Chaitanya Kulkarni
2024-07-06 6:26 ` Christoph Hellwig
2024-07-07 9:16 ` Leon Romanovsky
2024-07-07 12:45 ` Leon Romanovsky
2024-07-05 6:39 ` Christoph Hellwig
2024-07-07 9:45 ` Leon Romanovsky
2024-07-08 23:57 ` Jason Gunthorpe
2024-07-09 6:20 ` Christoph Hellwig
2024-07-09 19:03 ` Jason Gunthorpe
2024-07-10 6:22 ` Christoph Hellwig
2024-07-11 23:29 ` Jason Gunthorpe
2024-07-12 4:54 ` Christoph Hellwig
2024-07-12 12:42 ` Jason Gunthorpe
2024-07-13 5:24 ` Christoph Hellwig
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=eff6e989367605a1fb31823b503a78ab79cac432.1719909395.git.leon@kernel.org \
--to=leon@kernel.org \
--cc=akpm@linux-foundation.org \
--cc=alex.williamson@redhat.com \
--cc=axboe@kernel.dk \
--cc=bhelgaas@google.com \
--cc=hch@lst.de \
--cc=iommu@lists.linux.dev \
--cc=jgg@ziepe.ca \
--cc=jglisse@redhat.com \
--cc=joro@8bytes.org \
--cc=kbusch@kernel.org \
--cc=kch@nvidia.com \
--cc=kevin.tian@intel.com \
--cc=kvm@vger.kernel.org \
--cc=leonro@nvidia.com \
--cc=linux-block@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=linux-nvme@lists.infradead.org \
--cc=linux-pci@vger.kernel.org \
--cc=linux-rdma@vger.kernel.org \
--cc=logang@deltatee.com \
--cc=m.szyprowski@samsung.com \
--cc=oak.zeng@intel.com \
--cc=robin.murphy@arm.com \
--cc=sagi@grimberg.me \
--cc=shameerali.kolothum.thodi@huawei.com \
--cc=will@kernel.org \
--cc=yishaih@nvidia.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox