From: Leon Romanovsky <leon@kernel.org>
To: Marek Szyprowski <m.szyprowski@samsung.com>,
Jens Axboe <axboe@kernel.dk>, Christoph Hellwig <hch@lst.de>,
Keith Busch <kbusch@kernel.org>
Cc: "Jake Edge" <jake@lwn.net>, "Jonathan Corbet" <corbet@lwn.net>,
"Jason Gunthorpe" <jgg@ziepe.ca>,
"Zhu Yanjun" <zyjzyj2000@gmail.com>,
"Robin Murphy" <robin.murphy@arm.com>,
"Joerg Roedel" <joro@8bytes.org>, "Will Deacon" <will@kernel.org>,
"Sagi Grimberg" <sagi@grimberg.me>,
"Bjorn Helgaas" <bhelgaas@google.com>,
"Logan Gunthorpe" <logang@deltatee.com>,
"Yishai Hadas" <yishaih@nvidia.com>,
"Shameer Kolothum" <shameerali.kolothum.thodi@huawei.com>,
"Kevin Tian" <kevin.tian@intel.com>,
"Alex Williamson" <alex.williamson@redhat.com>,
"Jérôme Glisse" <jglisse@redhat.com>,
"Andrew Morton" <akpm@linux-foundation.org>,
linux-doc@vger.kernel.org, linux-kernel@vger.kernel.org,
linux-block@vger.kernel.org, linux-rdma@vger.kernel.org,
iommu@lists.linux.dev, linux-nvme@lists.infradead.org,
linux-pci@vger.kernel.org, kvm@vger.kernel.org,
linux-mm@kvack.org, "Niklas Schnelle" <schnelle@linux.ibm.com>,
"Chuck Lever" <chuck.lever@oracle.com>,
"Luis Chamberlain" <mcgrof@kernel.org>,
"Matthew Wilcox" <willy@infradead.org>,
"Dan Williams" <dan.j.williams@intel.com>,
"Kanchan Joshi" <joshi.k@samsung.com>,
"Chaitanya Kulkarni" <kch@nvidia.com>,
"Leon Romanovsky" <leonro@nvidia.com>
Subject: [PATCH v10 20/24] blk-mq: add scatterlist-less DMA mapping helpers
Date: Mon, 28 Apr 2025 12:22:26 +0300 [thread overview]
Message-ID: <0ecf38e2b83f8ed60cdc3b9be56012654f284751.1745831017.git.leon@kernel.org> (raw)
In-Reply-To: <cover.1745831017.git.leon@kernel.org>
From: Christoph Hellwig <hch@lst.de>
Add a new blk_rq_dma_map / blk_rq_dma_unmap pair that does away with
the wasteful scatterlist structure. Instead it uses the mapping iterator
to either add segments to the IOVA for IOMMU operations, or just maps
them one by one for the direct mapping. For the IOMMU case instead of
a scatterlist with an entry for each segment, only a single [dma_addr,len]
pair needs to be stored for processing a request, and for the direct
mapping the per-segment allocation shrinks from
[page,offset,len,dma_addr,dma_len] to just [dma_addr,len].
The major downside of this API is that the IOVA collapsing only works
when the driver sets a virt_boundary that matches the IOMMU granule.
Note that struct blk_dma_vec, struct blk_dma_mapping and blk_rq_dma_unmap
aren't really block specific, but for they are kept with the only mapping
routine to keep things simple.
Signed-off-by: Christoph Hellwig <hch@lst.de>
Tested-by: Jens Axboe <axboe@kernel.dk>
Signed-off-by: Leon Romanovsky <leonro@nvidia.com>
---
block/blk-merge.c | 163 +++++++++++++++++++++++++++++++++++++
include/linux/blk-mq-dma.h | 63 ++++++++++++++
2 files changed, 226 insertions(+)
create mode 100644 include/linux/blk-mq-dma.h
diff --git a/block/blk-merge.c b/block/blk-merge.c
index d9691e900cc6..5e2e6db60fda 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -7,6 +7,8 @@
#include <linux/bio.h>
#include <linux/blkdev.h>
#include <linux/blk-integrity.h>
+#include <linux/blk-mq-dma.h>
+#include <linux/dma-mapping.h>
#include <linux/scatterlist.h>
#include <linux/part_stat.h>
#include <linux/blk-cgroup.h>
@@ -535,6 +537,167 @@ static bool blk_map_iter_next(struct request *req,
return true;
}
+/*
+ * The IOVA-based DMA API wants to be able to coalesce at the minimal IOMMU page
+ * size granularity (which is guaranteed to be <= PAGE_SIZE and usually 4k), so
+ * we need to ensure our segments are aligned to this as well.
+ *
+ * Note that there is no point in using the slightly more complicated IOVA based
+ * path for single segment mappings.
+ */
+static inline bool blk_can_dma_map_iova(struct request *req,
+ struct device *dma_dev)
+{
+ return !((queue_virt_boundary(req->q) + 1) &
+ dma_get_merge_boundary(dma_dev));
+}
+
+static bool blk_dma_map_bus(struct request *req, struct device *dma_dev,
+ struct blk_dma_iter *iter, struct phys_vec *vec)
+{
+ iter->addr = pci_p2pdma_bus_addr_map(&iter->p2pdma, vec->paddr);
+ iter->len = vec->len;
+ return true;
+}
+
+static bool blk_dma_map_direct(struct request *req, struct device *dma_dev,
+ struct blk_dma_iter *iter, struct phys_vec *vec)
+{
+ iter->addr = dma_map_page(dma_dev, phys_to_page(vec->paddr),
+ offset_in_page(vec->paddr), vec->len, rq_dma_dir(req));
+ if (dma_mapping_error(dma_dev, iter->addr)) {
+ iter->status = BLK_STS_RESOURCE;
+ return false;
+ }
+ iter->len = vec->len;
+ return true;
+}
+
+static bool blk_rq_dma_map_iova(struct request *req, struct device *dma_dev,
+ struct dma_iova_state *state, struct blk_dma_iter *iter,
+ struct phys_vec *vec)
+{
+ enum dma_data_direction dir = rq_dma_dir(req);
+ unsigned int mapped = 0;
+ int error = 0;
+
+ iter->addr = state->addr;
+ iter->len = dma_iova_size(state);
+
+ do {
+ error = dma_iova_link(dma_dev, state, vec->paddr, mapped,
+ vec->len, dir, 0);
+ if (error)
+ break;
+ mapped += vec->len;
+ } while (blk_map_iter_next(req, &iter->iter, vec));
+
+ error = dma_iova_sync(dma_dev, state, 0, mapped);
+ if (error) {
+ iter->status = errno_to_blk_status(error);
+ return false;
+ }
+
+ return true;
+}
+
+/**
+ * blk_rq_dma_map_iter_start - map the first DMA segment for a request
+ * @req: request to map
+ * @dma_dev: device to map to
+ * @state: DMA IOVA state
+ * @iter: block layer DMA iterator
+ *
+ * Start DMA mapping @req to @dma_dev. @state and @iter are provided by the
+ * caller and don't need to be initialized. @state needs to be stored for use
+ * at unmap time, @iter is only needed at map time.
+ *
+ * Returns %false if there is no segment to map, including due to an error, or
+ * %true ft it did map a segment.
+ *
+ * If a segment was mapped, the DMA address for it is returned in @iter.addr and
+ * the length in @iter.len. If no segment was mapped the status code is
+ * returned in @iter.status.
+ *
+ * The caller can call blk_rq_dma_map_coalesce() to check if further segments
+ * need to be mapped after this, or go straight to blk_rq_dma_map_iter_next()
+ * to try to map the following segments.
+ */
+bool blk_rq_dma_map_iter_start(struct request *req, struct device *dma_dev,
+ struct dma_iova_state *state, struct blk_dma_iter *iter)
+{
+ unsigned int total_len = blk_rq_payload_bytes(req);
+ struct phys_vec vec;
+
+ iter->iter.bio = req->bio;
+ iter->iter.iter = req->bio->bi_iter;
+ memset(&iter->p2pdma, 0, sizeof(iter->p2pdma));
+ iter->status = BLK_STS_OK;
+
+ /*
+ * Grab the first segment ASAP because we'll need it to check for P2P
+ * transfers.
+ */
+ if (!blk_map_iter_next(req, &iter->iter, &vec))
+ return false;
+
+ if (IS_ENABLED(CONFIG_PCI_P2PDMA) && (req->cmd_flags & REQ_P2PDMA)) {
+ switch (pci_p2pdma_state(&iter->p2pdma, dma_dev,
+ phys_to_page(vec.paddr))) {
+ case PCI_P2PDMA_MAP_BUS_ADDR:
+ return blk_dma_map_bus(req, dma_dev, iter, &vec);
+ case PCI_P2PDMA_MAP_THRU_HOST_BRIDGE:
+ /*
+ * P2P transfers through the host bridge are treated the
+ * same as non-P2P transfers below and during unmap.
+ */
+ req->cmd_flags &= ~REQ_P2PDMA;
+ break;
+ default:
+ iter->status = BLK_STS_INVAL;
+ return false;
+ }
+ }
+
+ if (blk_can_dma_map_iova(req, dma_dev) &&
+ dma_iova_try_alloc(dma_dev, state, vec.paddr, total_len))
+ return blk_rq_dma_map_iova(req, dma_dev, state, iter, &vec);
+ return blk_dma_map_direct(req, dma_dev, iter, &vec);
+}
+EXPORT_SYMBOL_GPL(blk_rq_dma_map_iter_start);
+
+/**
+ * blk_rq_dma_map_iter_next - map the next DMA segment for a request
+ * @req: request to map
+ * @dma_dev: device to map to
+ * @state: DMA IOVA state
+ * @iter: block layer DMA iterator
+ *
+ * Iterate to the next mapping after a previous call to
+ * blk_rq_dma_map_iter_start(). See there for a detailed description of the
+ * arguments.
+ *
+ * Returns %false if there is no segment to map, including due to an error, or
+ * %true ft it did map a segment.
+ *
+ * If a segment was mapped, the DMA address for it is returned in @iter.addr and
+ * the length in @iter.len. If no segment was mapped the status code is
+ * returned in @iter.status.
+ */
+bool blk_rq_dma_map_iter_next(struct request *req, struct device *dma_dev,
+ struct dma_iova_state *state, struct blk_dma_iter *iter)
+{
+ struct phys_vec vec;
+
+ if (!blk_map_iter_next(req, &iter->iter, &vec))
+ return false;
+
+ if (iter->p2pdma.map == PCI_P2PDMA_MAP_BUS_ADDR)
+ return blk_dma_map_bus(req, dma_dev, iter, &vec);
+ return blk_dma_map_direct(req, dma_dev, iter, &vec);
+}
+EXPORT_SYMBOL_GPL(blk_rq_dma_map_iter_next);
+
static inline struct scatterlist *blk_next_sg(struct scatterlist **sg,
struct scatterlist *sglist)
{
diff --git a/include/linux/blk-mq-dma.h b/include/linux/blk-mq-dma.h
new file mode 100644
index 000000000000..6d85b4bedcba
--- /dev/null
+++ b/include/linux/blk-mq-dma.h
@@ -0,0 +1,63 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef BLK_MQ_DMA_H
+#define BLK_MQ_DMA_H
+
+#include <linux/blk-mq.h>
+#include <linux/pci-p2pdma.h>
+
+struct blk_dma_iter {
+ /* Output address range for this iteration */
+ dma_addr_t addr;
+ u32 len;
+
+ /* Status code. Only valid when blk_rq_dma_map_iter_* returned false */
+ blk_status_t status;
+
+ /* Internal to blk_rq_dma_map_iter_* */
+ struct req_iterator iter;
+ struct pci_p2pdma_map_state p2pdma;
+};
+
+bool blk_rq_dma_map_iter_start(struct request *req, struct device *dma_dev,
+ struct dma_iova_state *state, struct blk_dma_iter *iter);
+bool blk_rq_dma_map_iter_next(struct request *req, struct device *dma_dev,
+ struct dma_iova_state *state, struct blk_dma_iter *iter);
+
+/**
+ * blk_rq_dma_map_coalesce - were all segments coalesced?
+ * @state: DMA state to check
+ *
+ * Returns true if blk_rq_dma_map_iter_start coalesced all segments into a
+ * single DMA range.
+ */
+static inline bool blk_rq_dma_map_coalesce(struct dma_iova_state *state)
+{
+ return dma_use_iova(state);
+}
+
+/**
+ * blk_rq_dma_unmap - try to DMA unmap a request
+ * @req: request to unmap
+ * @dma_dev: device to unmap from
+ * @state: DMA IOVA state
+ * @mapped_len: number of bytes to unmap
+ *
+ * Returns %false if the callers need to manually unmap every DMA segment
+ * mapped using @iter or %true if no work is left to be done.
+ */
+static inline bool blk_rq_dma_unmap(struct request *req, struct device *dma_dev,
+ struct dma_iova_state *state, size_t mapped_len)
+{
+ if (req->cmd_flags & REQ_P2PDMA)
+ return true;
+
+ if (dma_use_iova(state)) {
+ dma_iova_destroy(dma_dev, state, mapped_len, rq_dma_dir(req),
+ 0);
+ return true;
+ }
+
+ return !dma_need_unmap(dma_dev);
+}
+
+#endif /* BLK_MQ_DMA_H */
--
2.49.0
next prev parent reply other threads:[~2025-04-28 9:24 UTC|newest]
Thread overview: 41+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-04-28 9:22 [PATCH v10 00/24] Provide a new two step DMA mapping API Leon Romanovsky
2025-04-28 9:22 ` [PATCH v10 01/24] PCI/P2PDMA: Refactor the p2pdma mapping helpers Leon Romanovsky
2025-04-29 2:08 ` Baolu Lu
2025-04-28 9:22 ` [PATCH v10 02/24] dma-mapping: move the PCI P2PDMA mapping helpers to pci-p2pdma.h Leon Romanovsky
2025-04-29 2:09 ` Baolu Lu
2025-04-28 9:22 ` [PATCH v10 03/24] iommu: generalize the batched sync after map interface Leon Romanovsky
2025-04-29 2:19 ` Baolu Lu
2025-04-29 6:09 ` Leon Romanovsky
2025-04-29 11:53 ` Jason Gunthorpe
2025-04-28 9:22 ` [PATCH v10 04/24] iommu: add kernel-doc for iommu_unmap_fast Leon Romanovsky
2025-04-29 2:37 ` Baolu Lu
2025-04-28 9:22 ` [PATCH v10 05/24] dma-mapping: Provide an interface to allow allocate IOVA Leon Romanovsky
2025-04-29 3:10 ` Baolu Lu
2025-04-29 5:46 ` Leon Romanovsky
2025-04-28 9:22 ` [PATCH v10 06/24] iommu/dma: Factor out a iommu_dma_map_swiotlb helper Leon Romanovsky
2025-04-29 4:58 ` Baolu Lu
2025-04-29 5:53 ` Leon Romanovsky
2025-04-29 5:58 ` Baolu Lu
2025-04-29 6:18 ` Leon Romanovsky
2025-04-28 9:22 ` [PATCH v10 07/24] dma-mapping: Implement link/unlink ranges API Leon Romanovsky
2025-04-28 9:22 ` [PATCH v10 08/24] dma-mapping: add a dma_need_unmap helper Leon Romanovsky
2025-04-28 9:22 ` [PATCH v10 09/24] docs: core-api: document the IOVA-based API Leon Romanovsky
2025-04-28 9:22 ` [PATCH v10 10/24] mm/hmm: let users to tag specific PFN with DMA mapped bit Leon Romanovsky
2025-04-28 9:22 ` [PATCH v10 11/24] mm/hmm: provide generic DMA managing logic Leon Romanovsky
2025-04-28 9:22 ` [PATCH v10 12/24] RDMA/umem: Store ODP access mask information in PFN Leon Romanovsky
2025-04-28 9:22 ` [PATCH v10 13/24] RDMA/core: Convert UMEM ODP DMA mapping to caching IOVA and page linkage Leon Romanovsky
2025-04-28 9:22 ` [PATCH v10 14/24] RDMA/umem: Separate implicit ODP initialization from explicit ODP Leon Romanovsky
2025-04-28 9:22 ` [PATCH v10 15/24] vfio/mlx5: Explicitly use number of pages instead of allocated length Leon Romanovsky
2025-04-28 9:22 ` [PATCH v10 16/24] vfio/mlx5: Rewrite create mkey flow to allow better code reuse Leon Romanovsky
2025-04-28 9:22 ` [PATCH v10 17/24] vfio/mlx5: Enable the DMA link API Leon Romanovsky
2025-04-28 9:22 ` [PATCH v10 18/24] block: share more code for bio addition helper Leon Romanovsky
2025-04-28 9:22 ` [PATCH v10 19/24] block: don't merge different kinds of P2P transfers in a single bio Leon Romanovsky
2025-04-28 9:22 ` Leon Romanovsky [this message]
2025-04-28 9:22 ` [PATCH v10 21/24] nvme-pci: remove struct nvme_descriptor Leon Romanovsky
2025-04-28 9:22 ` [PATCH v10 22/24] nvme-pci: use a better encoding for small prp pool allocations Leon Romanovsky
2025-04-28 9:22 ` [PATCH v10 23/24] nvme-pci: convert to blk_rq_dma_map Leon Romanovsky
2025-04-28 16:46 ` Keith Busch
2025-04-28 17:22 ` Leon Romanovsky
2025-04-28 17:30 ` Keith Busch
2025-04-28 9:22 ` [PATCH v10 24/24] nvme-pci: store aborted state in flags variable Leon Romanovsky
2025-05-12 10:07 ` (subset) [PATCH v10 00/24] Provide a new two step DMA mapping API Leon Romanovsky
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=0ecf38e2b83f8ed60cdc3b9be56012654f284751.1745831017.git.leon@kernel.org \
--to=leon@kernel.org \
--cc=akpm@linux-foundation.org \
--cc=alex.williamson@redhat.com \
--cc=axboe@kernel.dk \
--cc=bhelgaas@google.com \
--cc=chuck.lever@oracle.com \
--cc=corbet@lwn.net \
--cc=dan.j.williams@intel.com \
--cc=hch@lst.de \
--cc=iommu@lists.linux.dev \
--cc=jake@lwn.net \
--cc=jgg@ziepe.ca \
--cc=jglisse@redhat.com \
--cc=joro@8bytes.org \
--cc=joshi.k@samsung.com \
--cc=kbusch@kernel.org \
--cc=kch@nvidia.com \
--cc=kevin.tian@intel.com \
--cc=kvm@vger.kernel.org \
--cc=leonro@nvidia.com \
--cc=linux-block@vger.kernel.org \
--cc=linux-doc@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=linux-nvme@lists.infradead.org \
--cc=linux-pci@vger.kernel.org \
--cc=linux-rdma@vger.kernel.org \
--cc=logang@deltatee.com \
--cc=m.szyprowski@samsung.com \
--cc=mcgrof@kernel.org \
--cc=robin.murphy@arm.com \
--cc=sagi@grimberg.me \
--cc=schnelle@linux.ibm.com \
--cc=shameerali.kolothum.thodi@huawei.com \
--cc=will@kernel.org \
--cc=willy@infradead.org \
--cc=yishaih@nvidia.com \
--cc=zyjzyj2000@gmail.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox