linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
From: Jisheng Zhang <jszhang@kernel.org>
To: Catalin Marinas <catalin.marinas@arm.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>,
	Christoph Hellwig <hch@lst.de>,
	Robin Murphy <robin.murphy@arm.com>,
	Arnd Bergmann <arnd@arndb.de>,
	Greg Kroah-Hartman <gregkh@linuxfoundation.org>,
	Will Deacon <will@kernel.org>, Marc Zyngier <maz@kernel.org>,
	Andrew Morton <akpm@linux-foundation.org>,
	Herbert Xu <herbert@gondor.apana.org.au>,
	Ard Biesheuvel <ardb@kernel.org>,
	Isaac Manjarres <isaacmanjarres@google.com>,
	Saravana Kannan <saravanak@google.com>,
	Alasdair Kergon <agk@redhat.com>, Daniel Vetter <daniel@ffwll.ch>,
	Joerg Roedel <joro@8bytes.org>, Mark Brown <broonie@kernel.org>,
	Mike Snitzer <snitzer@kernel.org>,
	"Rafael J. Wysocki" <rafael@kernel.org>,
	linux-mm@kvack.org, iommu@lists.linux.dev,
	linux-arm-kernel@lists.infradead.org
Subject: Re: [PATCH v5 13/15] iommu/dma: Force bouncing if the size is not cacheline-aligned
Date: Sat, 27 May 2023 00:36:30 +0800	[thread overview]
Message-ID: <ZHDgDrQL+WPDsxxh@xhacker> (raw)
In-Reply-To: <20230524171904.3967031-14-catalin.marinas@arm.com>

On Wed, May 24, 2023 at 06:19:02PM +0100, Catalin Marinas wrote:
> Similarly to the direct DMA, bounce small allocations as they may have
> originated from a kmalloc() cache not safe for DMA. Unlike the direct
> DMA, iommu_dma_map_sg() cannot call iommu_dma_map_sg_swiotlb() for all
> non-coherent devices as this would break some cases where the iova is
> expected to be contiguous (dmabuf). Instead, scan the scatterlist for
> any small sizes and only go the swiotlb path if any element of the list
> needs bouncing (note that iommu_dma_map_page() would still only bounce
> those buffers which are not DMA-aligned).
> 
> To avoid scanning the scatterlist on the 'sync' operations, introduce an
> SG_DMA_USE_SWIOTLB flag set by iommu_dma_map_sg_swiotlb(). The
> dev_use_swiotlb() function together with the newly added
> dev_use_sg_swiotlb() now check for both untrusted devices and unaligned
> kmalloc() buffers (suggested by Robin Murphy).
> 
> Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
> Cc: Joerg Roedel <joro@8bytes.org>
> Cc: Christoph Hellwig <hch@lst.de>
> Cc: Robin Murphy <robin.murphy@arm.com>
> ---
>  drivers/iommu/Kconfig       |  1 +
>  drivers/iommu/dma-iommu.c   | 50 ++++++++++++++++++++++++++++++-------
>  include/linux/scatterlist.h | 25 +++++++++++++++++--
>  3 files changed, 65 insertions(+), 11 deletions(-)
> 
> diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
> index db98c3f86e8c..670eff7a8e11 100644
> --- a/drivers/iommu/Kconfig
> +++ b/drivers/iommu/Kconfig
> @@ -152,6 +152,7 @@ config IOMMU_DMA
>  	select IOMMU_IOVA
>  	select IRQ_MSI_IOMMU
>  	select NEED_SG_DMA_LENGTH
> +	select NEED_SG_DMA_FLAGS if SWIOTLB
>  
>  # Shared Virtual Addressing
>  config IOMMU_SVA
> diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
> index 7a9f0b0bddbd..24a8b8c2368c 100644
> --- a/drivers/iommu/dma-iommu.c
> +++ b/drivers/iommu/dma-iommu.c
> @@ -520,9 +520,38 @@ static bool dev_is_untrusted(struct device *dev)
>  	return dev_is_pci(dev) && to_pci_dev(dev)->untrusted;
>  }
>  
> -static bool dev_use_swiotlb(struct device *dev)
> +static bool dev_use_swiotlb(struct device *dev, size_t size,
> +			    enum dma_data_direction dir)
>  {
> -	return IS_ENABLED(CONFIG_SWIOTLB) && dev_is_untrusted(dev);
> +	return IS_ENABLED(CONFIG_SWIOTLB) &&
> +		(dev_is_untrusted(dev) ||
> +		 dma_kmalloc_needs_bounce(dev, size, dir));
> +}
> +
> +static bool dev_use_sg_swiotlb(struct device *dev, struct scatterlist *sg,
> +			       int nents, enum dma_data_direction dir)
> +{
> +	struct scatterlist *s;
> +	int i;
> +
> +	if (!IS_ENABLED(CONFIG_SWIOTLB))
> +		return false;
> +
> +	if (dev_is_untrusted(dev))
> +		return true;
> +
> +	/*
> +	 * If kmalloc() buffers are not DMA-safe for this device and
> +	 * direction, check the individual lengths in the sg list. If any
> +	 * element is deemed unsafe, use the swiotlb for bouncing.
> +	 */
> +	if (!dma_kmalloc_safe(dev, dir)) {
> +		for_each_sg(sg, s, nents, i)
> +			if (!dma_kmalloc_size_aligned(s->length))
> +				return true;
> +	}
> +
> +	return false;
>  }
>  
>  /**
> @@ -922,7 +951,7 @@ static void iommu_dma_sync_single_for_cpu(struct device *dev,
>  {
>  	phys_addr_t phys;
>  
> -	if (dev_is_dma_coherent(dev) && !dev_use_swiotlb(dev))
> +	if (dev_is_dma_coherent(dev) && !dev_use_swiotlb(dev, size, dir))
>  		return;
>  
>  	phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dma_handle);
> @@ -938,7 +967,7 @@ static void iommu_dma_sync_single_for_device(struct device *dev,
>  {
>  	phys_addr_t phys;
>  
> -	if (dev_is_dma_coherent(dev) && !dev_use_swiotlb(dev))
> +	if (dev_is_dma_coherent(dev) && !dev_use_swiotlb(dev, size, dir))
>  		return;
>  
>  	phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dma_handle);
> @@ -956,7 +985,7 @@ static void iommu_dma_sync_sg_for_cpu(struct device *dev,
>  	struct scatterlist *sg;
>  	int i;
>  
> -	if (dev_use_swiotlb(dev))
> +	if (sg_is_dma_use_swiotlb(sgl))
>  		for_each_sg(sgl, sg, nelems, i)
>  			iommu_dma_sync_single_for_cpu(dev, sg_dma_address(sg),
>  						      sg->length, dir);
> @@ -972,7 +1001,7 @@ static void iommu_dma_sync_sg_for_device(struct device *dev,
>  	struct scatterlist *sg;
>  	int i;
>  
> -	if (dev_use_swiotlb(dev))
> +	if (sg_is_dma_use_swiotlb(sgl))
>  		for_each_sg(sgl, sg, nelems, i)
>  			iommu_dma_sync_single_for_device(dev,
>  							 sg_dma_address(sg),
> @@ -998,7 +1027,8 @@ static dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
>  	 * If both the physical buffer start address and size are
>  	 * page aligned, we don't need to use a bounce page.
>  	 */
> -	if (dev_use_swiotlb(dev) && iova_offset(iovad, phys | size)) {
> +	if (dev_use_swiotlb(dev, size, dir) &&
> +	    iova_offset(iovad, phys | size)) {
>  		void *padding_start;
>  		size_t padding_size, aligned_size;
>  
> @@ -1166,6 +1196,8 @@ static int iommu_dma_map_sg_swiotlb(struct device *dev, struct scatterlist *sg,
>  	struct scatterlist *s;
>  	int i;
>  
> +	sg_dma_mark_use_swiotlb(sg);
> +
>  	for_each_sg(sg, s, nents, i) {
>  		sg_dma_address(s) = iommu_dma_map_page(dev, sg_page(s),
>  				s->offset, s->length, dir, attrs);
> @@ -1210,7 +1242,7 @@ static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
>  			goto out;
>  	}
>  
> -	if (dev_use_swiotlb(dev))
> +	if (dev_use_sg_swiotlb(dev, sg, nents, dir))
>  		return iommu_dma_map_sg_swiotlb(dev, sg, nents, dir, attrs);
>  
>  	if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
> @@ -1315,7 +1347,7 @@ static void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
>  	struct scatterlist *tmp;
>  	int i;
>  
> -	if (dev_use_swiotlb(dev)) {
> +	if (sg_is_dma_use_swiotlb(sg)) {
>  		iommu_dma_unmap_sg_swiotlb(dev, sg, nents, dir, attrs);
>  		return;
>  	}
> diff --git a/include/linux/scatterlist.h b/include/linux/scatterlist.h
> index 87aaf8b5cdb4..330a157c5501 100644
> --- a/include/linux/scatterlist.h
> +++ b/include/linux/scatterlist.h
> @@ -248,6 +248,29 @@ static inline void sg_unmark_end(struct scatterlist *sg)
>  	sg->page_link &= ~SG_END;
>  }
>  
> +#define SG_DMA_BUS_ADDRESS	(1 << 0)
> +#define SG_DMA_USE_SWIOTLB	(1 << 1)
> +
> +#ifdef CONFIG_SWIOTLB

s/CONFIG_SWIOTLB/CONFIG_NEED_SG_DMA_FLAGS ?
Otherwise, there's compiler error if SWIOTLB=y but IOMMU=n

Thanks

> +static inline bool sg_is_dma_use_swiotlb(struct scatterlist *sg)
> +{
> +	return sg->dma_flags & SG_DMA_USE_SWIOTLB;
> +}
> +
> +static inline void sg_dma_mark_use_swiotlb(struct scatterlist *sg)
> +{
> +	sg->dma_flags |= SG_DMA_USE_SWIOTLB;
> +}
> +#else
> +static inline bool sg_is_dma_use_swiotlb(struct scatterlist *sg)
> +{
> +	return false;
> +}
> +static inline void sg_dma_mark_use_swiotlb(struct scatterlist *sg)
> +{
> +}
> +#endif
> +
>  /*
>   * CONFIG_PCI_P2PDMA depends on CONFIG_64BIT which means there is 4 bytes
>   * in struct scatterlist (assuming also CONFIG_NEED_SG_DMA_LENGTH is set).
> @@ -256,8 +279,6 @@ static inline void sg_unmark_end(struct scatterlist *sg)
>   */
>  #ifdef CONFIG_PCI_P2PDMA
>  
> -#define SG_DMA_BUS_ADDRESS (1 << 0)
> -
>  /**
>   * sg_dma_is_bus address - Return whether a given segment was marked
>   *			   as a bus address
> 
> _______________________________________________
> linux-arm-kernel mailing list
> linux-arm-kernel@lists.infradead.org
> http://lists.infradead.org/mailman/listinfo/linux-arm-kernel


  parent reply	other threads:[~2023-05-26 16:47 UTC|newest]

Thread overview: 31+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-05-24 17:18 [PATCH v5 00/15] mm, dma, arm64: Reduce ARCH_KMALLOC_MINALIGN to 8 Catalin Marinas
2023-05-24 17:18 ` [PATCH v5 01/15] mm/slab: Decouple ARCH_KMALLOC_MINALIGN from ARCH_DMA_MINALIGN Catalin Marinas
2023-05-24 17:18 ` [PATCH v5 02/15] dma: Allow dma_get_cache_alignment() to be overridden by the arch code Catalin Marinas
2023-05-25 13:59   ` Christoph Hellwig
2023-05-24 17:18 ` [PATCH v5 03/15] mm/slab: Simplify create_kmalloc_cache() args and make it static Catalin Marinas
2023-05-24 17:18 ` [PATCH v5 04/15] mm/slab: Limit kmalloc() minimum alignment to dma_get_cache_alignment() Catalin Marinas
2023-05-24 17:18 ` [PATCH v5 05/15] drivers/base: Use ARCH_DMA_MINALIGN instead of ARCH_KMALLOC_MINALIGN Catalin Marinas
2023-05-24 17:18 ` [PATCH v5 06/15] drivers/gpu: " Catalin Marinas
2023-05-24 17:18 ` [PATCH v5 07/15] drivers/usb: " Catalin Marinas
2023-05-24 17:18 ` [PATCH v5 08/15] drivers/spi: " Catalin Marinas
2023-05-24 17:18 ` [PATCH v5 09/15] drivers/md: " Catalin Marinas
2023-05-25 14:00   ` Christoph Hellwig
2023-05-24 17:18 ` [PATCH v5 10/15] arm64: Allow kmalloc() caches aligned to the smaller cache_line_size() Catalin Marinas
2023-05-24 17:19 ` [PATCH v5 11/15] scatterlist: Add dedicated config for DMA flags Catalin Marinas
2023-05-24 17:19 ` [PATCH v5 12/15] dma-mapping: Force bouncing if the kmalloc() size is not cache-line-aligned Catalin Marinas
2023-05-25 15:53   ` Robin Murphy
2023-05-24 17:19 ` [PATCH v5 13/15] iommu/dma: Force bouncing if the size is not cacheline-aligned Catalin Marinas
2023-05-25 15:57   ` Robin Murphy
2023-05-26 16:36   ` Jisheng Zhang [this message]
2023-05-26 19:22     ` Catalin Marinas
2023-05-30 13:01       ` Robin Murphy
2023-05-24 17:19 ` [PATCH v5 14/15] mm: slab: Reduce the kmalloc() minimum alignment if DMA bouncing possible Catalin Marinas
2023-05-24 17:19 ` [PATCH v5 15/15] arm64: Enable ARCH_WANT_KMALLOC_DMA_BOUNCE for arm64 Catalin Marinas
2023-05-25 16:12   ` Robin Murphy
2023-05-25 17:08     ` Catalin Marinas
2023-05-25 12:31 ` [PATCH v5 00/15] mm, dma, arm64: Reduce ARCH_KMALLOC_MINALIGN to 8 Jonathan Cameron
2023-05-25 14:31   ` Catalin Marinas
2023-05-26 16:07     ` Jonathan Cameron
2023-05-26 16:29       ` Jonathan Cameron
2023-05-30 13:38         ` Catalin Marinas
2023-05-30 16:31           ` Jonathan Cameron

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=ZHDgDrQL+WPDsxxh@xhacker \
    --to=jszhang@kernel.org \
    --cc=agk@redhat.com \
    --cc=akpm@linux-foundation.org \
    --cc=ardb@kernel.org \
    --cc=arnd@arndb.de \
    --cc=broonie@kernel.org \
    --cc=catalin.marinas@arm.com \
    --cc=daniel@ffwll.ch \
    --cc=gregkh@linuxfoundation.org \
    --cc=hch@lst.de \
    --cc=herbert@gondor.apana.org.au \
    --cc=iommu@lists.linux.dev \
    --cc=isaacmanjarres@google.com \
    --cc=joro@8bytes.org \
    --cc=linux-arm-kernel@lists.infradead.org \
    --cc=linux-mm@kvack.org \
    --cc=maz@kernel.org \
    --cc=rafael@kernel.org \
    --cc=robin.murphy@arm.com \
    --cc=saravanak@google.com \
    --cc=snitzer@kernel.org \
    --cc=torvalds@linux-foundation.org \
    --cc=will@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox