From: Leon Romanovsky <leon@kernel.org>
To: Jens Axboe <axboe@kernel.dk>, Jason Gunthorpe <jgg@ziepe.ca>,
Robin Murphy <robin.murphy@arm.com>,
Joerg Roedel <joro@8bytes.org>, Will Deacon <will@kernel.org>,
Keith Busch <kbusch@kernel.org>, Christoph Hellwig <hch@lst.de>,
"Zeng, Oak" <oak.zeng@intel.com>,
Chaitanya Kulkarni <kch@nvidia.com>
Cc: "Leon Romanovsky" <leonro@nvidia.com>,
"Sagi Grimberg" <sagi@grimberg.me>,
"Bjorn Helgaas" <bhelgaas@google.com>,
"Logan Gunthorpe" <logang@deltatee.com>,
"Yishai Hadas" <yishaih@nvidia.com>,
"Shameer Kolothum" <shameerali.kolothum.thodi@huawei.com>,
"Kevin Tian" <kevin.tian@intel.com>,
"Alex Williamson" <alex.williamson@redhat.com>,
"Marek Szyprowski" <m.szyprowski@samsung.com>,
"Jérôme Glisse" <jglisse@redhat.com>,
"Andrew Morton" <akpm@linux-foundation.org>,
linux-block@vger.kernel.org, linux-kernel@vger.kernel.org,
linux-rdma@vger.kernel.org, iommu@lists.linux.dev,
linux-nvme@lists.infradead.org, linux-pci@vger.kernel.org,
kvm@vger.kernel.org, linux-mm@kvack.org
Subject: [RFC v2 18/21] nvme-pci: remove optimizations for single DMA entry
Date: Thu, 12 Sep 2024 14:15:53 +0300 [thread overview]
Message-ID: <875d92e2c453649e9d95080f27e631f196270008.1726138681.git.leon@kernel.org> (raw)
In-Reply-To: <cover.1726138681.git.leon@kernel.org>
From: Leon Romanovsky <leonro@nvidia.com>
Future patches will remove SG table allocation from the NVMe PCI
code, which these single DMA entries tried to optimize. As a
preparation, let's remove them to unify the DMA mapping code.
Signed-off-by: Leon Romanovsky <leonro@nvidia.com>
---
drivers/nvme/host/pci.c | 69 -----------------------------------------
1 file changed, 69 deletions(-)
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 6cd9395ba9ec..a9a66f184138 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -233,7 +233,6 @@ struct nvme_iod {
bool aborted;
s8 nr_allocations; /* PRP list pool allocations. 0 means small
pool in use */
- unsigned int dma_len; /* length of single DMA segment mapping */
dma_addr_t first_dma;
dma_addr_t meta_dma;
struct sg_table sgt;
@@ -541,12 +540,6 @@ static void nvme_unmap_data(struct nvme_dev *dev, struct request *req)
{
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
- if (iod->dma_len) {
- dma_unmap_page(dev->dev, iod->first_dma, iod->dma_len,
- rq_dma_dir(req));
- return;
- }
-
WARN_ON_ONCE(!iod->sgt.nents);
dma_unmap_sgtable(dev->dev, &iod->sgt, rq_dma_dir(req), 0);
@@ -696,11 +689,6 @@ static blk_status_t nvme_pci_setup_sgls(struct nvme_dev *dev,
/* setting the transfer type as SGL */
cmd->flags = NVME_CMD_SGL_METABUF;
- if (entries == 1) {
- nvme_pci_sgl_set_data(&cmd->dptr.sgl, sg);
- return BLK_STS_OK;
- }
-
if (entries <= (256 / sizeof(struct nvme_sgl_desc))) {
pool = dev->prp_small_pool;
iod->nr_allocations = 0;
@@ -727,45 +715,6 @@ static blk_status_t nvme_pci_setup_sgls(struct nvme_dev *dev,
return BLK_STS_OK;
}
-static blk_status_t nvme_setup_prp_simple(struct nvme_dev *dev,
- struct request *req, struct nvme_rw_command *cmnd,
- struct bio_vec *bv)
-{
- struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
- unsigned int offset = bv->bv_offset & (NVME_CTRL_PAGE_SIZE - 1);
- unsigned int first_prp_len = NVME_CTRL_PAGE_SIZE - offset;
-
- iod->first_dma = dma_map_bvec(dev->dev, bv, rq_dma_dir(req), 0);
- if (dma_mapping_error(dev->dev, iod->first_dma))
- return BLK_STS_RESOURCE;
- iod->dma_len = bv->bv_len;
-
- cmnd->dptr.prp1 = cpu_to_le64(iod->first_dma);
- if (bv->bv_len > first_prp_len)
- cmnd->dptr.prp2 = cpu_to_le64(iod->first_dma + first_prp_len);
- else
- cmnd->dptr.prp2 = 0;
- return BLK_STS_OK;
-}
-
-static blk_status_t nvme_setup_sgl_simple(struct nvme_dev *dev,
- struct request *req, struct nvme_rw_command *cmnd,
- struct bio_vec *bv)
-{
- struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
-
- iod->first_dma = dma_map_bvec(dev->dev, bv, rq_dma_dir(req), 0);
- if (dma_mapping_error(dev->dev, iod->first_dma))
- return BLK_STS_RESOURCE;
- iod->dma_len = bv->bv_len;
-
- cmnd->flags = NVME_CMD_SGL_METABUF;
- cmnd->dptr.sgl.addr = cpu_to_le64(iod->first_dma);
- cmnd->dptr.sgl.length = cpu_to_le32(iod->dma_len);
- cmnd->dptr.sgl.type = NVME_SGL_FMT_DATA_DESC << 4;
- return BLK_STS_OK;
-}
-
static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
struct nvme_command *cmnd)
{
@@ -773,24 +722,6 @@ static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
blk_status_t ret = BLK_STS_RESOURCE;
int rc;
- if (blk_rq_nr_phys_segments(req) == 1) {
- struct nvme_queue *nvmeq = req->mq_hctx->driver_data;
- struct bio_vec bv = req_bvec(req);
-
- if (!is_pci_p2pdma_page(bv.bv_page)) {
- if ((bv.bv_offset & (NVME_CTRL_PAGE_SIZE - 1)) +
- bv.bv_len <= NVME_CTRL_PAGE_SIZE * 2)
- return nvme_setup_prp_simple(dev, req,
- &cmnd->rw, &bv);
-
- if (nvmeq->qid && sgl_threshold &&
- nvme_ctrl_sgl_supported(&dev->ctrl))
- return nvme_setup_sgl_simple(dev, req,
- &cmnd->rw, &bv);
- }
- }
-
- iod->dma_len = 0;
iod->sgt.sgl = mempool_alloc(dev->iod_mempool, GFP_ATOMIC);
if (!iod->sgt.sgl)
return BLK_STS_RESOURCE;
--
2.46.0
next prev parent reply other threads:[~2024-09-12 11:17 UTC|newest]
Thread overview: 22+ messages / expand[flat|nested] mbox.gz Atom feed top
2024-09-12 11:15 [RFC v2 00/21] Provide a new two step DMA API mapping API Leon Romanovsky
2024-09-12 11:15 ` [RFC v2 01/21] iommu/dma: Provide an interface to allow preallocate IOVA Leon Romanovsky
2024-09-12 11:15 ` [RFC v2 02/21] iommu/dma: Implement link/unlink ranges callbacks Leon Romanovsky
2024-09-12 11:15 ` [RFC v2 03/21] iommu/dma: Add check if IOVA can be used Leon Romanovsky
2024-09-12 11:15 ` [RFC v2 04/21] dma-mapping: initialize IOVA state struct Leon Romanovsky
2024-09-12 11:15 ` [RFC v2 05/21] dma-mapping: provide an interface to allocate IOVA Leon Romanovsky
2024-09-12 11:15 ` [RFC v2 06/21] dma-mapping: set and query DMA IOVA state Leon Romanovsky
2024-09-12 11:15 ` [RFC v2 07/21] dma-mapping: implement link range API Leon Romanovsky
2024-09-12 11:15 ` [RFC v2 08/21] mm/hmm: let users to tag specific PFN with DMA mapped bit Leon Romanovsky
2024-09-12 11:15 ` [RFC v2 09/21] dma-mapping: provide callbacks to link/unlink HMM PFNs to specific IOVA Leon Romanovsky
2024-09-12 11:15 ` [RFC v2 10/21] RDMA/umem: Preallocate and cache IOVA for UMEM ODP Leon Romanovsky
2024-09-12 11:15 ` [RFC v2 11/21] RDMA/umem: Store ODP access mask information in PFN Leon Romanovsky
2024-09-12 11:15 ` [RFC v2 12/21] RDMA/core: Separate DMA mapping to caching IOVA and page linkage Leon Romanovsky
2024-09-12 11:15 ` [RFC v2 13/21] RDMA/umem: Prevent UMEM ODP creation with SWIOTLB Leon Romanovsky
2024-09-12 11:15 ` [RFC v2 14/21] vfio/mlx5: Explicitly use number of pages instead of allocated length Leon Romanovsky
2024-09-12 11:15 ` [RFC v2 15/21] vfio/mlx5: Rewrite create mkey flow to allow better code reuse Leon Romanovsky
2024-09-12 11:15 ` [RFC v2 16/21] vfio/mlx5: Explicitly store page list Leon Romanovsky
2024-09-12 11:15 ` [RFC v2 17/21] vfio/mlx5: Convert vfio to use DMA link API Leon Romanovsky
2024-09-12 11:15 ` Leon Romanovsky [this message]
2024-09-12 11:15 ` [RFC v2 19/21] nvme-pci: precalculate number of DMA entries for each command Leon Romanovsky
2024-09-12 11:15 ` [RFC v2 20/21] nvme-pci: use new dma API Leon Romanovsky
2024-09-12 11:15 ` [RFC v2 21/21] nvme-pci: don't allow mapping of bvecs with offset Leon Romanovsky
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=875d92e2c453649e9d95080f27e631f196270008.1726138681.git.leon@kernel.org \
--to=leon@kernel.org \
--cc=akpm@linux-foundation.org \
--cc=alex.williamson@redhat.com \
--cc=axboe@kernel.dk \
--cc=bhelgaas@google.com \
--cc=hch@lst.de \
--cc=iommu@lists.linux.dev \
--cc=jgg@ziepe.ca \
--cc=jglisse@redhat.com \
--cc=joro@8bytes.org \
--cc=kbusch@kernel.org \
--cc=kch@nvidia.com \
--cc=kevin.tian@intel.com \
--cc=kvm@vger.kernel.org \
--cc=leonro@nvidia.com \
--cc=linux-block@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=linux-nvme@lists.infradead.org \
--cc=linux-pci@vger.kernel.org \
--cc=linux-rdma@vger.kernel.org \
--cc=logang@deltatee.com \
--cc=m.szyprowski@samsung.com \
--cc=oak.zeng@intel.com \
--cc=robin.murphy@arm.com \
--cc=sagi@grimberg.me \
--cc=shameerali.kolothum.thodi@huawei.com \
--cc=will@kernel.org \
--cc=yishaih@nvidia.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox