From: Leon Romanovsky <leon@kernel.org>
To: Jens Axboe <axboe@kernel.dk>, Jason Gunthorpe <jgg@ziepe.ca>,
Robin Murphy <robin.murphy@arm.com>,
Joerg Roedel <joro@8bytes.org>, Will Deacon <will@kernel.org>,
Keith Busch <kbusch@kernel.org>, Christoph Hellwig <hch@lst.de>,
"Zeng, Oak" <oak.zeng@intel.com>,
Chaitanya Kulkarni <kch@nvidia.com>
Cc: "Leon Romanovsky" <leonro@nvidia.com>,
"Sagi Grimberg" <sagi@grimberg.me>,
"Bjorn Helgaas" <bhelgaas@google.com>,
"Logan Gunthorpe" <logang@deltatee.com>,
"Yishai Hadas" <yishaih@nvidia.com>,
"Shameer Kolothum" <shameerali.kolothum.thodi@huawei.com>,
"Kevin Tian" <kevin.tian@intel.com>,
"Alex Williamson" <alex.williamson@redhat.com>,
"Marek Szyprowski" <m.szyprowski@samsung.com>,
"Jérôme Glisse" <jglisse@redhat.com>,
"Andrew Morton" <akpm@linux-foundation.org>,
linux-block@vger.kernel.org, linux-kernel@vger.kernel.org,
linux-rdma@vger.kernel.org, iommu@lists.linux.dev,
linux-nvme@lists.infradead.org, linux-pci@vger.kernel.org,
kvm@vger.kernel.org, linux-mm@kvack.org
Subject: [RFC PATCH v1 04/18] dma-mapping: implement link range API
Date: Tue, 2 Jul 2024 12:09:34 +0300 [thread overview]
Message-ID: <8944a1211b243fed1234a56bc8004a11dbf85a87.1719909395.git.leon@kernel.org> (raw)
In-Reply-To: <cover.1719909395.git.leon@kernel.org>
From: Leon Romanovsky <leonro@nvidia.com>
Introduce new DMA APIs to perform DMA linkage of buffers
in layers higher than DMA.
In proposed API, the callers will perform the following steps:
dma_alloc_iova()
if (dma_can_use_iova(...))
dma_start_range(...)
for (page in range)
dma_link_range(...)
dma_end_range(...)
else
/* Fallback to legacy map pages */
dma_map_page(...)
Signed-off-by: Leon Romanovsky <leonro@nvidia.com>
---
include/linux/dma-map-ops.h | 6 +++
include/linux/dma-mapping.h | 22 +++++++++++
kernel/dma/mapping.c | 78 ++++++++++++++++++++++++++++++++++++-
3 files changed, 105 insertions(+), 1 deletion(-)
diff --git a/include/linux/dma-map-ops.h b/include/linux/dma-map-ops.h
index b52e9c8db241..4868586b015e 100644
--- a/include/linux/dma-map-ops.h
+++ b/include/linux/dma-map-ops.h
@@ -90,6 +90,12 @@ struct dma_map_ops {
dma_addr_t (*alloc_iova)(struct device *dev, size_t size);
void (*free_iova)(struct device *dev, dma_addr_t dma_addr, size_t size);
+ int (*link_range)(struct dma_iova_state *state, phys_addr_t phys,
+ dma_addr_t addr, size_t size);
+ void (*unlink_range)(struct dma_iova_state *state,
+ dma_addr_t dma_handle, size_t size);
+ int (*start_range)(struct dma_iova_state *state);
+ void (*end_range)(struct dma_iova_state *state);
};
#ifdef CONFIG_DMA_OPS
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index 9d1e020869a6..c530095ff232 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -11,6 +11,7 @@
#include <linux/scatterlist.h>
#include <linux/bug.h>
#include <linux/mem_encrypt.h>
+#include <linux/iommu.h>
/**
* List of possible attributes associated with a DMA mapping. The semantics
@@ -103,6 +104,8 @@ struct dma_iova_attrs {
struct dma_iova_state {
struct dma_iova_attrs *iova;
struct dma_memory_type *type;
+ struct iommu_domain *domain;
+ size_t range_size;
};
#ifdef CONFIG_DMA_API_DEBUG
@@ -184,6 +187,10 @@ int dma_mmap_noncontiguous(struct device *dev, struct vm_area_struct *vma,
void dma_get_memory_type(struct page *page, struct dma_memory_type *type);
bool dma_can_use_iova(struct dma_iova_state *state, size_t size);
+int dma_start_range(struct dma_iova_state *state);
+void dma_end_range(struct dma_iova_state *state);
+int dma_link_range(struct dma_iova_state *state, phys_addr_t phys, size_t size);
+void dma_unlink_range(struct dma_iova_state *state);
#else /* CONFIG_HAS_DMA */
static inline int dma_alloc_iova(struct dma_iova_attrs *iova)
{
@@ -329,6 +336,21 @@ static inline bool dma_can_use_iova(struct dma_iova_state *state, size_t size)
{
return false;
}
+static inline int dma_start_range(struct dma_iova_state *state)
+{
+ return -EOPNOTSUPP;
+}
+static inline void dma_end_range(struct dma_iova_state *state)
+{
+}
+static inline int dma_link_range(struct dma_iova_state *state, phys_addr_t phys,
+ size_t size)
+{
+ return -EOPNOTSUPP;
+}
+static inline void dma_unlink_range(struct dma_iova_state *state)
+{
+}
#endif /* CONFIG_HAS_DMA */
#if defined(CONFIG_HAS_DMA) && defined(CONFIG_DMA_NEED_SYNC)
diff --git a/kernel/dma/mapping.c b/kernel/dma/mapping.c
index 9044ee525fdb..089b4a977bab 100644
--- a/kernel/dma/mapping.c
+++ b/kernel/dma/mapping.c
@@ -989,7 +989,8 @@ bool dma_can_use_iova(struct dma_iova_state *state, size_t size)
dev_use_swiotlb(dev, size, state->iova->dir))
return false;
- if (dma_map_direct(dev, ops) || !ops->alloc_iova)
+ if (dma_map_direct(dev, ops) || !ops->alloc_iova || !ops->link_range ||
+ !ops->start_range)
return false;
if (type->type == DMA_MEMORY_TYPE_P2P) {
@@ -1000,3 +1001,78 @@ bool dma_can_use_iova(struct dma_iova_state *state, size_t size)
return type->type == DMA_MEMORY_TYPE_NORMAL;
}
EXPORT_SYMBOL_GPL(dma_can_use_iova);
+
+/**
+ * dma_start_range - Start a range of IOVA space
+ * @state: IOVA state
+ *
+ * Start a range of IOVA space for the given IOVA state.
+ */
+int dma_start_range(struct dma_iova_state *state)
+{
+ struct device *dev = state->iova->dev;
+ const struct dma_map_ops *ops = get_dma_ops(dev);
+
+ if (!ops->start_range)
+ return 0;
+
+ return ops->start_range(state);
+}
+EXPORT_SYMBOL_GPL(dma_start_range);
+
+/**
+ * dma_end_range - End a range of IOVA space
+ * @state: IOVA state
+ *
+ * End a range of IOVA space for the given IOVA state.
+ */
+void dma_end_range(struct dma_iova_state *state)
+{
+ struct device *dev = state->iova->dev;
+ const struct dma_map_ops *ops = get_dma_ops(dev);
+
+ if (!ops->end_range)
+ return;
+
+ ops->end_range(state);
+}
+EXPORT_SYMBOL_GPL(dma_end_range);
+
+/**
+ * dma_link_range - Link a range of IOVA space
+ * @state: IOVA state
+ * @phys: physical address to link
+ * @size: size of the buffer
+ *
+ * Link a range of IOVA space for the given IOVA state.
+ */
+int dma_link_range(struct dma_iova_state *state, phys_addr_t phys, size_t size)
+{
+ struct device *dev = state->iova->dev;
+ dma_addr_t addr = state->iova->addr + state->range_size;
+ const struct dma_map_ops *ops = get_dma_ops(dev);
+ int ret;
+
+ ret = ops->link_range(state, phys, addr, size);
+ if (ret)
+ return ret;
+
+ state->range_size += size;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dma_link_range);
+
+/**
+ * dma_unlink_range - Unlink a range of IOVA space
+ * @state: IOVA state
+ *
+ * Unlink a range of IOVA space for the given IOVA state.
+ */
+void dma_unlink_range(struct dma_iova_state *state)
+{
+ struct device *dev = state->iova->dev;
+ const struct dma_map_ops *ops = get_dma_ops(dev);
+
+ ops->unlink_range(state, state->iova->addr, state->range_size);
+}
+EXPORT_SYMBOL_GPL(dma_unlink_range);
--
2.45.2
next prev parent reply other threads:[~2024-07-02 9:10 UTC|newest]
Thread overview: 51+ messages / expand[flat|nested] mbox.gz Atom feed top
2024-07-02 9:09 [RFC PATCH v1 00/18] Provide a new two step DMA API mapping API Leon Romanovsky
2024-07-02 9:09 ` [RFC PATCH v1 01/18] dma-mapping: query DMA memory type Leon Romanovsky
2024-07-02 9:09 ` [RFC PATCH v1 02/18] dma-mapping: provide an interface to allocate IOVA Leon Romanovsky
2024-07-02 9:09 ` [RFC PATCH v1 03/18] dma-mapping: check if IOVA can be used Leon Romanovsky
2024-07-02 9:09 ` Leon Romanovsky [this message]
2024-07-02 9:09 ` [RFC PATCH v1 05/18] mm/hmm: let users to tag specific PFN with DMA mapped bit Leon Romanovsky
2024-07-02 9:09 ` [RFC PATCH v1 06/18] dma-mapping: provide callbacks to link/unlink HMM PFNs to specific IOVA Leon Romanovsky
2024-07-02 9:09 ` [RFC PATCH v1 07/18] iommu/dma: Provide an interface to allow preallocate IOVA Leon Romanovsky
2024-07-02 9:09 ` [RFC PATCH v1 08/18] iommu/dma: Implement link/unlink ranges callbacks Leon Romanovsky
2024-07-02 9:09 ` [RFC PATCH v1 09/18] RDMA/umem: Preallocate and cache IOVA for UMEM ODP Leon Romanovsky
2024-07-02 9:09 ` [RFC PATCH v1 10/18] RDMA/umem: Store ODP access mask information in PFN Leon Romanovsky
2024-07-02 9:09 ` [RFC PATCH v1 11/18] RDMA/core: Separate DMA mapping to caching IOVA and page linkage Leon Romanovsky
2024-07-02 9:09 ` [RFC PATCH v1 12/18] RDMA/umem: Prevent UMEM ODP creation with SWIOTLB Leon Romanovsky
2024-07-02 9:09 ` [RFC PATCH v1 13/18] vfio/mlx5: Explicitly use number of pages instead of allocated length Leon Romanovsky
2024-07-02 9:09 ` [RFC PATCH v1 14/18] vfio/mlx5: Rewrite create mkey flow to allow better code reuse Leon Romanovsky
2024-07-02 9:09 ` [RFC PATCH v1 15/18] vfio/mlx5: Explicitly store page list Leon Romanovsky
2024-07-02 9:09 ` [RFC PATCH v1 16/18] vfio/mlx5: Convert vfio to use DMA link API Leon Romanovsky
2024-07-02 9:09 ` [RFC PATCH v1 17/18] block: export helper to get segment max size Leon Romanovsky
2024-07-02 9:09 ` [RFC PATCH v1 18/18] nvme-pci: use new dma API Leon Romanovsky
2024-07-04 15:23 ` Robin Murphy
2024-07-04 17:16 ` Leon Romanovsky
2024-07-05 5:58 ` Christoph Hellwig
2024-07-05 18:48 ` Leon Romanovsky
2024-07-06 6:08 ` Christoph Hellwig
2024-07-03 5:42 ` [RFC PATCH v1 00/18] Provide a new two step DMA API mapping API Christoph Hellwig
2024-07-03 10:42 ` Zhu Yanjun
2024-07-03 10:52 ` Leon Romanovsky
2024-07-03 14:35 ` Christoph Hellwig
2024-07-03 15:51 ` Leon Romanovsky
2024-07-04 7:48 ` Christoph Hellwig
2024-07-04 13:18 ` Leon Romanovsky
2024-07-05 6:00 ` Christoph Hellwig
2024-07-08 16:52 ` Jason Gunthorpe
2024-07-09 6:17 ` Christoph Hellwig
2024-07-09 18:53 ` Jason Gunthorpe
2024-07-10 6:27 ` Christoph Hellwig
2024-07-11 23:21 ` Jason Gunthorpe
2024-07-05 22:53 ` Chaitanya Kulkarni
2024-07-06 6:26 ` Christoph Hellwig
2024-07-07 9:16 ` Leon Romanovsky
2024-07-07 12:45 ` Leon Romanovsky
2024-07-05 6:39 ` Christoph Hellwig
2024-07-07 9:45 ` Leon Romanovsky
2024-07-08 23:57 ` Jason Gunthorpe
2024-07-09 6:20 ` Christoph Hellwig
2024-07-09 19:03 ` Jason Gunthorpe
2024-07-10 6:22 ` Christoph Hellwig
2024-07-11 23:29 ` Jason Gunthorpe
2024-07-12 4:54 ` Christoph Hellwig
2024-07-12 12:42 ` Jason Gunthorpe
2024-07-13 5:24 ` Christoph Hellwig
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=8944a1211b243fed1234a56bc8004a11dbf85a87.1719909395.git.leon@kernel.org \
--to=leon@kernel.org \
--cc=akpm@linux-foundation.org \
--cc=alex.williamson@redhat.com \
--cc=axboe@kernel.dk \
--cc=bhelgaas@google.com \
--cc=hch@lst.de \
--cc=iommu@lists.linux.dev \
--cc=jgg@ziepe.ca \
--cc=jglisse@redhat.com \
--cc=joro@8bytes.org \
--cc=kbusch@kernel.org \
--cc=kch@nvidia.com \
--cc=kevin.tian@intel.com \
--cc=kvm@vger.kernel.org \
--cc=leonro@nvidia.com \
--cc=linux-block@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=linux-nvme@lists.infradead.org \
--cc=linux-pci@vger.kernel.org \
--cc=linux-rdma@vger.kernel.org \
--cc=logang@deltatee.com \
--cc=m.szyprowski@samsung.com \
--cc=oak.zeng@intel.com \
--cc=robin.murphy@arm.com \
--cc=sagi@grimberg.me \
--cc=shameerali.kolothum.thodi@huawei.com \
--cc=will@kernel.org \
--cc=yishaih@nvidia.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox