linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
From: Leon Romanovsky <leon@kernel.org>
To: Jens Axboe <axboe@kernel.dk>, Jason Gunthorpe <jgg@ziepe.ca>,
	Robin Murphy <robin.murphy@arm.com>,
	Joerg Roedel <joro@8bytes.org>, Will Deacon <will@kernel.org>,
	Christoph Hellwig <hch@lst.de>, Sagi Grimberg <sagi@grimberg.me>
Cc: "Leon Romanovsky" <leonro@nvidia.com>,
	"Keith Busch" <kbusch@kernel.org>,
	"Bjorn Helgaas" <bhelgaas@google.com>,
	"Logan Gunthorpe" <logang@deltatee.com>,
	"Yishai Hadas" <yishaih@nvidia.com>,
	"Shameer Kolothum" <shameerali.kolothum.thodi@huawei.com>,
	"Kevin Tian" <kevin.tian@intel.com>,
	"Alex Williamson" <alex.williamson@redhat.com>,
	"Marek Szyprowski" <m.szyprowski@samsung.com>,
	"Jérôme Glisse" <jglisse@redhat.com>,
	"Andrew Morton" <akpm@linux-foundation.org>,
	"Jonathan Corbet" <corbet@lwn.net>,
	linux-doc@vger.kernel.org, linux-kernel@vger.kernel.org,
	linux-block@vger.kernel.org, linux-rdma@vger.kernel.org,
	iommu@lists.linux.dev, linux-nvme@lists.infradead.org,
	linux-pci@vger.kernel.org, kvm@vger.kernel.org,
	linux-mm@kvack.org
Subject: [PATCH v1 14/17] RDMA/umem: Separate implicit ODP initialization from explicit ODP
Date: Wed, 30 Oct 2024 17:13:00 +0200	[thread overview]
Message-ID: <23253254adaf3a1e6953de0c5852ac4598b456bd.1730298502.git.leon@kernel.org> (raw)
In-Reply-To: <cover.1730298502.git.leon@kernel.org>

From: Leon Romanovsky <leonro@nvidia.com>

Create separate functions for the implicit ODP initialization
which is different from the explicit ODP initialization.

Signed-off-by: Leon Romanovsky <leonro@nvidia.com>
---
 drivers/infiniband/core/umem_odp.c | 91 +++++++++++++++---------------
 1 file changed, 46 insertions(+), 45 deletions(-)

diff --git a/drivers/infiniband/core/umem_odp.c b/drivers/infiniband/core/umem_odp.c
index 30cd8f353476..51d518989914 100644
--- a/drivers/infiniband/core/umem_odp.c
+++ b/drivers/infiniband/core/umem_odp.c
@@ -48,41 +48,44 @@
 
 #include "uverbs.h"
 
-static inline int ib_init_umem_odp(struct ib_umem_odp *umem_odp,
-				   const struct mmu_interval_notifier_ops *ops)
+static void ib_init_umem_implicit_odp(struct ib_umem_odp *umem_odp)
+{
+	umem_odp->is_implicit_odp = 1;
+	umem_odp->umem.is_odp = 1;
+	mutex_init(&umem_odp->umem_mutex);
+}
+
+static int ib_init_umem_odp(struct ib_umem_odp *umem_odp,
+			    const struct mmu_interval_notifier_ops *ops)
 {
 	struct ib_device *dev = umem_odp->umem.ibdev;
+	size_t page_size = 1UL << umem_odp->page_shift;
+	unsigned long start;
+	unsigned long end;
 	int ret;
 
 	umem_odp->umem.is_odp = 1;
 	mutex_init(&umem_odp->umem_mutex);
 
-	if (!umem_odp->is_implicit_odp) {
-		size_t page_size = 1UL << umem_odp->page_shift;
-		unsigned long start;
-		unsigned long end;
-
-		start = ALIGN_DOWN(umem_odp->umem.address, page_size);
-		if (check_add_overflow(umem_odp->umem.address,
-				       (unsigned long)umem_odp->umem.length,
-				       &end))
-			return -EOVERFLOW;
-		end = ALIGN(end, page_size);
-		if (unlikely(end < page_size))
-			return -EOVERFLOW;
-
-		ret = hmm_dma_map_alloc(dev->dma_device, &umem_odp->map,
-					(end - start) >> PAGE_SHIFT,
-					1 << umem_odp->page_shift);
-		if (ret)
-			return ret;
-
-		ret = mmu_interval_notifier_insert(&umem_odp->notifier,
-						   umem_odp->umem.owning_mm,
-						   start, end - start, ops);
-		if (ret)
-			goto out_free_map;
-	}
+	start = ALIGN_DOWN(umem_odp->umem.address, page_size);
+	if (check_add_overflow(umem_odp->umem.address,
+			       (unsigned long)umem_odp->umem.length, &end))
+		return -EOVERFLOW;
+	end = ALIGN(end, page_size);
+	if (unlikely(end < page_size))
+		return -EOVERFLOW;
+
+	ret = hmm_dma_map_alloc(dev->dma_device, &umem_odp->map,
+				(end - start) >> PAGE_SHIFT,
+				1 << umem_odp->page_shift);
+	if (ret)
+		return ret;
+
+	ret = mmu_interval_notifier_insert(&umem_odp->notifier,
+					   umem_odp->umem.owning_mm, start,
+					   end - start, ops);
+	if (ret)
+		goto out_free_map;
 
 	return 0;
 
@@ -106,7 +109,6 @@ struct ib_umem_odp *ib_umem_odp_alloc_implicit(struct ib_device *device,
 {
 	struct ib_umem *umem;
 	struct ib_umem_odp *umem_odp;
-	int ret;
 
 	if (access & IB_ACCESS_HUGETLB)
 		return ERR_PTR(-EINVAL);
@@ -118,16 +120,10 @@ struct ib_umem_odp *ib_umem_odp_alloc_implicit(struct ib_device *device,
 	umem->ibdev = device;
 	umem->writable = ib_access_writable(access);
 	umem->owning_mm = current->mm;
-	umem_odp->is_implicit_odp = 1;
 	umem_odp->page_shift = PAGE_SHIFT;
 
 	umem_odp->tgid = get_task_pid(current->group_leader, PIDTYPE_PID);
-	ret = ib_init_umem_odp(umem_odp, NULL);
-	if (ret) {
-		put_pid(umem_odp->tgid);
-		kfree(umem_odp);
-		return ERR_PTR(ret);
-	}
+	ib_init_umem_implicit_odp(umem_odp);
 	return umem_odp;
 }
 EXPORT_SYMBOL(ib_umem_odp_alloc_implicit);
@@ -248,7 +244,7 @@ struct ib_umem_odp *ib_umem_odp_get(struct ib_device *device,
 }
 EXPORT_SYMBOL(ib_umem_odp_get);
 
-void ib_umem_odp_release(struct ib_umem_odp *umem_odp)
+static void ib_umem_odp_free(struct ib_umem_odp *umem_odp)
 {
 	struct ib_device *dev = umem_odp->umem.ibdev;
 
@@ -258,14 +254,19 @@ void ib_umem_odp_release(struct ib_umem_odp *umem_odp)
 	 * It is the driver's responsibility to ensure, before calling us,
 	 * that the hardware will not attempt to access the MR any more.
 	 */
-	if (!umem_odp->is_implicit_odp) {
-		mutex_lock(&umem_odp->umem_mutex);
-		ib_umem_odp_unmap_dma_pages(umem_odp, ib_umem_start(umem_odp),
-					    ib_umem_end(umem_odp));
-		mutex_unlock(&umem_odp->umem_mutex);
-		mmu_interval_notifier_remove(&umem_odp->notifier);
-		hmm_dma_map_free(dev->dma_device, &umem_odp->map);
-	}
+	mutex_lock(&umem_odp->umem_mutex);
+	ib_umem_odp_unmap_dma_pages(umem_odp, ib_umem_start(umem_odp),
+				    ib_umem_end(umem_odp));
+	mutex_unlock(&umem_odp->umem_mutex);
+	mmu_interval_notifier_remove(&umem_odp->notifier);
+	hmm_dma_map_free(dev->dma_device, &umem_odp->map);
+}
+
+void ib_umem_odp_release(struct ib_umem_odp *umem_odp)
+{
+	if (!umem_odp->is_implicit_odp)
+		ib_umem_odp_free(umem_odp);
+
 	put_pid(umem_odp->tgid);
 	kfree(umem_odp);
 }
-- 
2.46.2



  parent reply	other threads:[~2024-10-30 15:14 UTC|newest]

Thread overview: 63+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2024-10-30 15:12 [PATCH v1 00/17] Provide a new two step DMA mapping API Leon Romanovsky
2024-10-30 15:12 ` [PATCH v1 01/17] PCI/P2PDMA: Refactor the p2pdma mapping helpers Leon Romanovsky
2024-10-30 15:12 ` [PATCH v1 02/17] dma-mapping: move the PCI P2PDMA mapping helpers to pci-p2pdma.h Leon Romanovsky
2024-10-30 15:12 ` [PATCH v1 03/17] iommu: generalize the batched sync after map interface Leon Romanovsky
2024-10-30 15:12 ` [PATCH v1 04/17] dma-mapping: Add check if IOVA can be used Leon Romanovsky
2024-11-10 15:09   ` Zhu Yanjun
2024-11-10 15:19     ` Leon Romanovsky
2024-11-11  6:39     ` Christoph Hellwig
2024-11-11  7:19       ` Greg Sword
2024-10-30 15:12 ` [PATCH v1 05/17] dma: Provide an interface to allow allocate IOVA Leon Romanovsky
2024-10-30 15:12 ` [PATCH v1 06/17] iommu/dma: Factor out a iommu_dma_map_swiotlb helper Leon Romanovsky
2024-10-30 15:12 ` [PATCH v1 07/17] dma-mapping: Implement link/unlink ranges API Leon Romanovsky
2024-10-31 21:18   ` Robin Murphy
2024-11-04  9:10     ` Christoph Hellwig
2024-11-04 12:19       ` Jason Gunthorpe
2024-11-04 12:53         ` Christoph Hellwig
2024-11-07 14:50           ` Leon Romanovsky
2024-10-30 15:12 ` [PATCH v1 08/17] dma-mapping: add a dma_need_unmap helper Leon Romanovsky
2024-10-31 21:18   ` Robin Murphy
2024-11-01 11:06     ` Leon Romanovsky
2024-11-04  9:15     ` Christoph Hellwig
2024-10-30 15:12 ` [PATCH v1 09/17] docs: core-api: document the IOVA-based API Leon Romanovsky
2024-10-31  1:41   ` Randy Dunlap
2024-10-31  7:59     ` Leon Romanovsky
2024-11-08 19:34   ` Jonathan Corbet
2024-11-08 20:03     ` Leon Romanovsky
2024-11-08 20:13       ` Jonathan Corbet
2024-11-08 20:27         ` Leon Romanovsky
2024-11-10 10:41           ` Leon Romanovsky
2024-11-11  6:38             ` Christoph Hellwig
2024-11-11  6:43               ` anish kumar
2024-11-11 14:59                 ` Jonathan Corbet
2024-10-30 15:12 ` [PATCH v1 10/17] mm/hmm: let users to tag specific PFN with DMA mapped bit Leon Romanovsky
2024-10-30 15:12 ` [PATCH v1 11/17] mm/hmm: provide generic DMA managing logic Leon Romanovsky
2024-10-30 15:12 ` [PATCH v1 12/17] RDMA/umem: Store ODP access mask information in PFN Leon Romanovsky
2024-10-30 15:12 ` [PATCH v1 13/17] RDMA/core: Convert UMEM ODP DMA mapping to caching IOVA and page linkage Leon Romanovsky
2024-10-30 15:13 ` Leon Romanovsky [this message]
2024-10-30 15:13 ` [PATCH v1 15/17] vfio/mlx5: Explicitly use number of pages instead of allocated length Leon Romanovsky
2024-10-30 15:13 ` [PATCH v1 16/17] vfio/mlx5: Rewrite create mkey flow to allow better code reuse Leon Romanovsky
2024-10-30 15:13 ` [PATCH v1 17/17] vfio/mlx5: Convert vfio to use DMA link API Leon Romanovsky
2024-10-31  1:44 ` [PATCH v1 00/17] Provide a new two step DMA mapping API Jens Axboe
2024-10-31  8:34   ` Christoph Hellwig
2024-10-31  9:05     ` Leon Romanovsky
2024-10-31  9:21       ` Christoph Hellwig
2024-10-31  9:37         ` Leon Romanovsky
2024-10-31 17:43           ` Jens Axboe
2024-10-31 20:43             ` Leon Romanovsky
2024-10-31 17:42     ` Jens Axboe
2024-10-31 21:17 ` Robin Murphy
2024-11-04  9:58   ` Christoph Hellwig
2024-11-04 11:39     ` Leon Romanovsky
2024-11-05 19:53     ` Jason Gunthorpe
2024-11-07  8:32       ` Christoph Hellwig
2024-11-07 13:28         ` Jason Gunthorpe
2024-11-07 13:50           ` Christoph Hellwig
2024-11-08 15:02             ` Jason Gunthorpe
2024-11-08 15:05               ` Christoph Hellwig
2024-11-08 15:25                 ` Jason Gunthorpe
2024-11-08 15:29                   ` Christoph Hellwig
2024-11-08 15:38                     ` Jason Gunthorpe
2024-11-12  6:01                       ` Christoph Hellwig
2024-11-13 18:41                         ` Jason Gunthorpe
2024-11-05 18:51 ` Jason Gunthorpe

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=23253254adaf3a1e6953de0c5852ac4598b456bd.1730298502.git.leon@kernel.org \
    --to=leon@kernel.org \
    --cc=akpm@linux-foundation.org \
    --cc=alex.williamson@redhat.com \
    --cc=axboe@kernel.dk \
    --cc=bhelgaas@google.com \
    --cc=corbet@lwn.net \
    --cc=hch@lst.de \
    --cc=iommu@lists.linux.dev \
    --cc=jgg@ziepe.ca \
    --cc=jglisse@redhat.com \
    --cc=joro@8bytes.org \
    --cc=kbusch@kernel.org \
    --cc=kevin.tian@intel.com \
    --cc=kvm@vger.kernel.org \
    --cc=leonro@nvidia.com \
    --cc=linux-block@vger.kernel.org \
    --cc=linux-doc@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=linux-nvme@lists.infradead.org \
    --cc=linux-pci@vger.kernel.org \
    --cc=linux-rdma@vger.kernel.org \
    --cc=logang@deltatee.com \
    --cc=m.szyprowski@samsung.com \
    --cc=robin.murphy@arm.com \
    --cc=sagi@grimberg.me \
    --cc=shameerali.kolothum.thodi@huawei.com \
    --cc=will@kernel.org \
    --cc=yishaih@nvidia.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox