linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
From: Leon Romanovsky <leon@kernel.org>
To: Jens Axboe <axboe@kernel.dk>, Jason Gunthorpe <jgg@ziepe.ca>,
	Robin Murphy <robin.murphy@arm.com>,
	Joerg Roedel <joro@8bytes.org>, Will Deacon <will@kernel.org>,
	Christoph Hellwig <hch@lst.de>, Sagi Grimberg <sagi@grimberg.me>
Cc: "Leon Romanovsky" <leonro@nvidia.com>,
	"Keith Busch" <kbusch@kernel.org>,
	"Bjorn Helgaas" <bhelgaas@google.com>,
	"Logan Gunthorpe" <logang@deltatee.com>,
	"Yishai Hadas" <yishaih@nvidia.com>,
	"Shameer Kolothum" <shameerali.kolothum.thodi@huawei.com>,
	"Kevin Tian" <kevin.tian@intel.com>,
	"Alex Williamson" <alex.williamson@redhat.com>,
	"Marek Szyprowski" <m.szyprowski@samsung.com>,
	"Jérôme Glisse" <jglisse@redhat.com>,
	"Andrew Morton" <akpm@linux-foundation.org>,
	"Jonathan Corbet" <corbet@lwn.net>,
	linux-doc@vger.kernel.org, linux-kernel@vger.kernel.org,
	linux-block@vger.kernel.org, linux-rdma@vger.kernel.org,
	iommu@lists.linux.dev, linux-nvme@lists.infradead.org,
	linux-pci@vger.kernel.org, kvm@vger.kernel.org,
	linux-mm@kvack.org
Subject: [PATCH v1 10/17] mm/hmm: let users to tag specific PFN with DMA mapped bit
Date: Wed, 30 Oct 2024 17:12:56 +0200	[thread overview]
Message-ID: <6ae52934e3f7652d8ae1630f9c16f2c8a15215ff.1730298502.git.leon@kernel.org> (raw)
In-Reply-To: <cover.1730298502.git.leon@kernel.org>

From: Leon Romanovsky <leonro@nvidia.com>

Introduce new sticky flag (HMM_PFN_DMA_MAPPED), which isn't overwritten
by HMM range fault. Such flag allows users to tag specific PFNs with information
if this specific PFN was already DMA mapped.

Signed-off-by: Leon Romanovsky <leonro@nvidia.com>
---
 include/linux/hmm.h | 14 ++++++++++++++
 mm/hmm.c            | 34 +++++++++++++++++++++-------------
 2 files changed, 35 insertions(+), 13 deletions(-)

diff --git a/include/linux/hmm.h b/include/linux/hmm.h
index 126a36571667..5dd655f6766b 100644
--- a/include/linux/hmm.h
+++ b/include/linux/hmm.h
@@ -23,6 +23,8 @@ struct mmu_interval_notifier;
  * HMM_PFN_WRITE - if the page memory can be written to (requires HMM_PFN_VALID)
  * HMM_PFN_ERROR - accessing the pfn is impossible and the device should
  *                 fail. ie poisoned memory, special pages, no vma, etc
+ * HMM_PFN_DMA_MAPPED - Flag preserved on input-to-output transformation
+ *                      to mark that page is already DMA mapped
  *
  * On input:
  * 0                 - Return the current state of the page, do not fault it.
@@ -36,6 +38,10 @@ enum hmm_pfn_flags {
 	HMM_PFN_VALID = 1UL << (BITS_PER_LONG - 1),
 	HMM_PFN_WRITE = 1UL << (BITS_PER_LONG - 2),
 	HMM_PFN_ERROR = 1UL << (BITS_PER_LONG - 3),
+
+	/* Sticky flag, carried from Input to Output */
+	HMM_PFN_DMA_MAPPED = 1UL << (BITS_PER_LONG - 7),
+
 	HMM_PFN_ORDER_SHIFT = (BITS_PER_LONG - 8),
 
 	/* Input flags */
@@ -57,6 +63,14 @@ static inline struct page *hmm_pfn_to_page(unsigned long hmm_pfn)
 	return pfn_to_page(hmm_pfn & ~HMM_PFN_FLAGS);
 }
 
+/*
+ * hmm_pfn_to_phys() - return physical address pointed to by a device entry
+ */
+static inline phys_addr_t hmm_pfn_to_phys(unsigned long hmm_pfn)
+{
+	return __pfn_to_phys(hmm_pfn & ~HMM_PFN_FLAGS);
+}
+
 /*
  * hmm_pfn_to_map_order() - return the CPU mapping size order
  *
diff --git a/mm/hmm.c b/mm/hmm.c
index 7e0229ae4a5a..2a0c34d7cb2b 100644
--- a/mm/hmm.c
+++ b/mm/hmm.c
@@ -44,8 +44,10 @@ static int hmm_pfns_fill(unsigned long addr, unsigned long end,
 {
 	unsigned long i = (addr - range->start) >> PAGE_SHIFT;
 
-	for (; addr < end; addr += PAGE_SIZE, i++)
-		range->hmm_pfns[i] = cpu_flags;
+	for (; addr < end; addr += PAGE_SIZE, i++) {
+		range->hmm_pfns[i] &= HMM_PFN_DMA_MAPPED;
+		range->hmm_pfns[i] |= cpu_flags;
+	}
 	return 0;
 }
 
@@ -202,8 +204,10 @@ static int hmm_vma_handle_pmd(struct mm_walk *walk, unsigned long addr,
 		return hmm_vma_fault(addr, end, required_fault, walk);
 
 	pfn = pmd_pfn(pmd) + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
-	for (i = 0; addr < end; addr += PAGE_SIZE, i++, pfn++)
-		hmm_pfns[i] = pfn | cpu_flags;
+	for (i = 0; addr < end; addr += PAGE_SIZE, i++, pfn++) {
+		hmm_pfns[i] &= HMM_PFN_DMA_MAPPED;
+		hmm_pfns[i] |= pfn | cpu_flags;
+	}
 	return 0;
 }
 #else /* CONFIG_TRANSPARENT_HUGEPAGE */
@@ -236,7 +240,7 @@ static int hmm_vma_handle_pte(struct mm_walk *walk, unsigned long addr,
 			hmm_pte_need_fault(hmm_vma_walk, pfn_req_flags, 0);
 		if (required_fault)
 			goto fault;
-		*hmm_pfn = 0;
+		*hmm_pfn = *hmm_pfn & HMM_PFN_DMA_MAPPED;
 		return 0;
 	}
 
@@ -253,14 +257,14 @@ static int hmm_vma_handle_pte(struct mm_walk *walk, unsigned long addr,
 			cpu_flags = HMM_PFN_VALID;
 			if (is_writable_device_private_entry(entry))
 				cpu_flags |= HMM_PFN_WRITE;
-			*hmm_pfn = swp_offset_pfn(entry) | cpu_flags;
+			*hmm_pfn = (*hmm_pfn & HMM_PFN_DMA_MAPPED) | swp_offset_pfn(entry) | cpu_flags;
 			return 0;
 		}
 
 		required_fault =
 			hmm_pte_need_fault(hmm_vma_walk, pfn_req_flags, 0);
 		if (!required_fault) {
-			*hmm_pfn = 0;
+			*hmm_pfn = *hmm_pfn & HMM_PFN_DMA_MAPPED;
 			return 0;
 		}
 
@@ -304,11 +308,11 @@ static int hmm_vma_handle_pte(struct mm_walk *walk, unsigned long addr,
 			pte_unmap(ptep);
 			return -EFAULT;
 		}
-		*hmm_pfn = HMM_PFN_ERROR;
+		*hmm_pfn = (*hmm_pfn & HMM_PFN_DMA_MAPPED) | HMM_PFN_ERROR;
 		return 0;
 	}
 
-	*hmm_pfn = pte_pfn(pte) | cpu_flags;
+	*hmm_pfn = (*hmm_pfn & HMM_PFN_DMA_MAPPED) | pte_pfn(pte) | cpu_flags;
 	return 0;
 
 fault:
@@ -448,8 +452,10 @@ static int hmm_vma_walk_pud(pud_t *pudp, unsigned long start, unsigned long end,
 		}
 
 		pfn = pud_pfn(pud) + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
-		for (i = 0; i < npages; ++i, ++pfn)
-			hmm_pfns[i] = pfn | cpu_flags;
+		for (i = 0; i < npages; ++i, ++pfn) {
+			hmm_pfns[i] &= HMM_PFN_DMA_MAPPED;
+			hmm_pfns[i] |= pfn | cpu_flags;
+		}
 		goto out_unlock;
 	}
 
@@ -507,8 +513,10 @@ static int hmm_vma_walk_hugetlb_entry(pte_t *pte, unsigned long hmask,
 	}
 
 	pfn = pte_pfn(entry) + ((start & ~hmask) >> PAGE_SHIFT);
-	for (; addr < end; addr += PAGE_SIZE, i++, pfn++)
-		range->hmm_pfns[i] = pfn | cpu_flags;
+	for (; addr < end; addr += PAGE_SIZE, i++, pfn++) {
+		range->hmm_pfns[i] &= HMM_PFN_DMA_MAPPED;
+		range->hmm_pfns[i] |= pfn | cpu_flags;
+	}
 
 	spin_unlock(ptl);
 	return 0;
-- 
2.46.2



  parent reply	other threads:[~2024-10-30 15:13 UTC|newest]

Thread overview: 63+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2024-10-30 15:12 [PATCH v1 00/17] Provide a new two step DMA mapping API Leon Romanovsky
2024-10-30 15:12 ` [PATCH v1 01/17] PCI/P2PDMA: Refactor the p2pdma mapping helpers Leon Romanovsky
2024-10-30 15:12 ` [PATCH v1 02/17] dma-mapping: move the PCI P2PDMA mapping helpers to pci-p2pdma.h Leon Romanovsky
2024-10-30 15:12 ` [PATCH v1 03/17] iommu: generalize the batched sync after map interface Leon Romanovsky
2024-10-30 15:12 ` [PATCH v1 04/17] dma-mapping: Add check if IOVA can be used Leon Romanovsky
2024-11-10 15:09   ` Zhu Yanjun
2024-11-10 15:19     ` Leon Romanovsky
2024-11-11  6:39     ` Christoph Hellwig
2024-11-11  7:19       ` Greg Sword
2024-10-30 15:12 ` [PATCH v1 05/17] dma: Provide an interface to allow allocate IOVA Leon Romanovsky
2024-10-30 15:12 ` [PATCH v1 06/17] iommu/dma: Factor out a iommu_dma_map_swiotlb helper Leon Romanovsky
2024-10-30 15:12 ` [PATCH v1 07/17] dma-mapping: Implement link/unlink ranges API Leon Romanovsky
2024-10-31 21:18   ` Robin Murphy
2024-11-04  9:10     ` Christoph Hellwig
2024-11-04 12:19       ` Jason Gunthorpe
2024-11-04 12:53         ` Christoph Hellwig
2024-11-07 14:50           ` Leon Romanovsky
2024-10-30 15:12 ` [PATCH v1 08/17] dma-mapping: add a dma_need_unmap helper Leon Romanovsky
2024-10-31 21:18   ` Robin Murphy
2024-11-01 11:06     ` Leon Romanovsky
2024-11-04  9:15     ` Christoph Hellwig
2024-10-30 15:12 ` [PATCH v1 09/17] docs: core-api: document the IOVA-based API Leon Romanovsky
2024-10-31  1:41   ` Randy Dunlap
2024-10-31  7:59     ` Leon Romanovsky
2024-11-08 19:34   ` Jonathan Corbet
2024-11-08 20:03     ` Leon Romanovsky
2024-11-08 20:13       ` Jonathan Corbet
2024-11-08 20:27         ` Leon Romanovsky
2024-11-10 10:41           ` Leon Romanovsky
2024-11-11  6:38             ` Christoph Hellwig
2024-11-11  6:43               ` anish kumar
2024-11-11 14:59                 ` Jonathan Corbet
2024-10-30 15:12 ` Leon Romanovsky [this message]
2024-10-30 15:12 ` [PATCH v1 11/17] mm/hmm: provide generic DMA managing logic Leon Romanovsky
2024-10-30 15:12 ` [PATCH v1 12/17] RDMA/umem: Store ODP access mask information in PFN Leon Romanovsky
2024-10-30 15:12 ` [PATCH v1 13/17] RDMA/core: Convert UMEM ODP DMA mapping to caching IOVA and page linkage Leon Romanovsky
2024-10-30 15:13 ` [PATCH v1 14/17] RDMA/umem: Separate implicit ODP initialization from explicit ODP Leon Romanovsky
2024-10-30 15:13 ` [PATCH v1 15/17] vfio/mlx5: Explicitly use number of pages instead of allocated length Leon Romanovsky
2024-10-30 15:13 ` [PATCH v1 16/17] vfio/mlx5: Rewrite create mkey flow to allow better code reuse Leon Romanovsky
2024-10-30 15:13 ` [PATCH v1 17/17] vfio/mlx5: Convert vfio to use DMA link API Leon Romanovsky
2024-10-31  1:44 ` [PATCH v1 00/17] Provide a new two step DMA mapping API Jens Axboe
2024-10-31  8:34   ` Christoph Hellwig
2024-10-31  9:05     ` Leon Romanovsky
2024-10-31  9:21       ` Christoph Hellwig
2024-10-31  9:37         ` Leon Romanovsky
2024-10-31 17:43           ` Jens Axboe
2024-10-31 20:43             ` Leon Romanovsky
2024-10-31 17:42     ` Jens Axboe
2024-10-31 21:17 ` Robin Murphy
2024-11-04  9:58   ` Christoph Hellwig
2024-11-04 11:39     ` Leon Romanovsky
2024-11-05 19:53     ` Jason Gunthorpe
2024-11-07  8:32       ` Christoph Hellwig
2024-11-07 13:28         ` Jason Gunthorpe
2024-11-07 13:50           ` Christoph Hellwig
2024-11-08 15:02             ` Jason Gunthorpe
2024-11-08 15:05               ` Christoph Hellwig
2024-11-08 15:25                 ` Jason Gunthorpe
2024-11-08 15:29                   ` Christoph Hellwig
2024-11-08 15:38                     ` Jason Gunthorpe
2024-11-12  6:01                       ` Christoph Hellwig
2024-11-13 18:41                         ` Jason Gunthorpe
2024-11-05 18:51 ` Jason Gunthorpe

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=6ae52934e3f7652d8ae1630f9c16f2c8a15215ff.1730298502.git.leon@kernel.org \
    --to=leon@kernel.org \
    --cc=akpm@linux-foundation.org \
    --cc=alex.williamson@redhat.com \
    --cc=axboe@kernel.dk \
    --cc=bhelgaas@google.com \
    --cc=corbet@lwn.net \
    --cc=hch@lst.de \
    --cc=iommu@lists.linux.dev \
    --cc=jgg@ziepe.ca \
    --cc=jglisse@redhat.com \
    --cc=joro@8bytes.org \
    --cc=kbusch@kernel.org \
    --cc=kevin.tian@intel.com \
    --cc=kvm@vger.kernel.org \
    --cc=leonro@nvidia.com \
    --cc=linux-block@vger.kernel.org \
    --cc=linux-doc@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=linux-nvme@lists.infradead.org \
    --cc=linux-pci@vger.kernel.org \
    --cc=linux-rdma@vger.kernel.org \
    --cc=logang@deltatee.com \
    --cc=m.szyprowski@samsung.com \
    --cc=robin.murphy@arm.com \
    --cc=sagi@grimberg.me \
    --cc=shameerali.kolothum.thodi@huawei.com \
    --cc=will@kernel.org \
    --cc=yishaih@nvidia.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox