From: Marek Szyprowski <m.szyprowski@samsung.com>
To: linux-arm-kernel@lists.infradead.org,
linaro-mm-sig@lists.linaro.org, linux-mm@kvack.org,
linux-arch@vger.kernel.org, linux-samsung-soc@vger.kernel.org,
iommu@lists.linux-foundation.org
Cc: Marek Szyprowski <m.szyprowski@samsung.com>,
Kyungmin Park <kyungmin.park@samsung.com>,
Arnd Bergmann <arnd@arndb.de>, Joerg Roedel <joro@8bytes.org>,
Russell King - ARM Linux <linux@arm.linux.org.uk>,
Shariq Hasnain <shariq.hasnain@linaro.org>,
Chunsang Jeong <chunsang.jeong@linaro.org>,
Krishna Reddy <vdumpa@nvidia.com>,
KyongHo Cho <pullip.cho@samsung.com>,
Andrzej Pietrasiewicz <andrzej.p@samsung.com>,
Benjamin Herrenschmidt <benh@kernel.crashing.org>
Subject: [PATCH 5/8] ARM: dma-mapping: remove redundant code and cleanup
Date: Fri, 09 Dec 2011 17:39:55 +0100 [thread overview]
Message-ID: <1323448798-18184-6-git-send-email-m.szyprowski@samsung.com> (raw)
In-Reply-To: <1323448798-18184-1-git-send-email-m.szyprowski@samsung.com>
This patch just performs a global cleanup in DMA mapping implementation
for ARM architecture. Some of the tiny helper functions have been moved
to the caller code, some have been merged together.
Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com>
Signed-off-by: Kyungmin Park <kyungmin.park@samsung.com>
---
arch/arm/mm/dma-mapping.c | 88 ++++++++++++--------------------------------
1 files changed, 24 insertions(+), 64 deletions(-)
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 5715e2e..7c0e68b 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -40,64 +40,12 @@
* the CPU does do speculative prefetches, which means we clean caches
* before transfers and delay cache invalidation until transfer completion.
*
- * Private support functions: these are not part of the API and are
- * liable to change. Drivers must not use these.
*/
-static inline void __dma_single_cpu_to_dev(const void *kaddr, size_t size,
- enum dma_data_direction dir)
-{
- extern void ___dma_single_cpu_to_dev(const void *, size_t,
- enum dma_data_direction);
-
- if (!arch_is_coherent())
- ___dma_single_cpu_to_dev(kaddr, size, dir);
-}
-
-static inline void __dma_single_dev_to_cpu(const void *kaddr, size_t size,
- enum dma_data_direction dir)
-{
- extern void ___dma_single_dev_to_cpu(const void *, size_t,
- enum dma_data_direction);
-
- if (!arch_is_coherent())
- ___dma_single_dev_to_cpu(kaddr, size, dir);
-}
-
-static inline void __dma_page_cpu_to_dev(struct page *page, unsigned long off,
- size_t size, enum dma_data_direction dir)
-{
- extern void ___dma_page_cpu_to_dev(struct page *, unsigned long,
+static void __dma_page_cpu_to_dev(struct page *, unsigned long,
size_t, enum dma_data_direction);
-
- if (!arch_is_coherent())
- ___dma_page_cpu_to_dev(page, off, size, dir);
-}
-
-static inline void __dma_page_dev_to_cpu(struct page *page, unsigned long off,
- size_t size, enum dma_data_direction dir)
-{
- extern void ___dma_page_dev_to_cpu(struct page *, unsigned long,
+static void __dma_page_dev_to_cpu(struct page *, unsigned long,
size_t, enum dma_data_direction);
- if (!arch_is_coherent())
- ___dma_page_dev_to_cpu(page, off, size, dir);
-}
-
-
-static inline dma_addr_t __dma_map_page(struct device *dev, struct page *page,
- unsigned long offset, size_t size, enum dma_data_direction dir)
-{
- __dma_page_cpu_to_dev(page, offset, size, dir);
- return pfn_to_dma(dev, page_to_pfn(page)) + offset;
-}
-
-static inline void __dma_unmap_page(struct device *dev, dma_addr_t handle,
- size_t size, enum dma_data_direction dir)
-{
- __dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev, handle)),
- handle & ~PAGE_MASK, size, dir);
-}
-
/**
* dma_map_page - map a portion of a page for streaming DMA
* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
@@ -112,11 +60,13 @@ static inline void __dma_unmap_page(struct device *dev, dma_addr_t handle,
* The device owns this memory once this call has completed. The CPU
* can regain ownership by calling dma_unmap_page().
*/
-static inline dma_addr_t arm_dma_map_page(struct device *dev, struct page *page,
+static dma_addr_t arm_dma_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size, enum dma_data_direction dir,
struct dma_attrs *attrs)
{
- return __dma_map_page(dev, page, offset, size, dir);
+ if (!arch_is_coherent())
+ __dma_page_cpu_to_dev(page, offset, size, dir);
+ return pfn_to_dma(dev, page_to_pfn(page)) + offset;
}
/**
@@ -134,27 +84,31 @@ static inline dma_addr_t arm_dma_map_page(struct device *dev, struct page *page,
* whatever the device wrote there.
*/
-static inline void arm_dma_unmap_page(struct device *dev, dma_addr_t handle,
+static void arm_dma_unmap_page(struct device *dev, dma_addr_t handle,
size_t size, enum dma_data_direction dir,
struct dma_attrs *attrs)
{
- __dma_unmap_page(dev, handle, size, dir);
+ if (!arch_is_coherent())
+ __dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev, handle)),
+ handle & ~PAGE_MASK, size, dir);
}
-static inline void arm_dma_sync_single_for_cpu(struct device *dev,
+static void arm_dma_sync_single_for_cpu(struct device *dev,
dma_addr_t handle, size_t size, enum dma_data_direction dir)
{
unsigned int offset = handle & (PAGE_SIZE - 1);
struct page *page = pfn_to_page(dma_to_pfn(dev, handle-offset));
- __dma_page_dev_to_cpu(page, offset, size, dir);
+ if (!arch_is_coherent())
+ __dma_page_dev_to_cpu(page, offset, size, dir);
}
-static inline void arm_dma_sync_single_for_device(struct device *dev,
+static void arm_dma_sync_single_for_device(struct device *dev,
dma_addr_t handle, size_t size, enum dma_data_direction dir)
{
unsigned int offset = handle & (PAGE_SIZE - 1);
struct page *page = pfn_to_page(dma_to_pfn(dev, handle-offset));
- __dma_page_cpu_to_dev(page, offset, size, dir);
+ if (!arch_is_coherent())
+ __dma_page_cpu_to_dev(page, offset, size, dir);
}
static int arm_dma_set_mask(struct device *dev, u64 dma_mask);
@@ -642,7 +596,13 @@ static void dma_cache_maint_page(struct page *page, unsigned long offset,
} while (left);
}
-void ___dma_page_cpu_to_dev(struct page *page, unsigned long off,
+/*
+ * Make an area consistent for devices.
+ * Note: Drivers should NOT use this function directly, as it will break
+ * platforms with CONFIG_DMABOUNCE.
+ * Use the driver DMA support - see dma-mapping.h (dma_sync_*)
+ */
+static void __dma_page_cpu_to_dev(struct page *page, unsigned long off,
size_t size, enum dma_data_direction dir)
{
unsigned long paddr;
@@ -658,7 +618,7 @@ void ___dma_page_cpu_to_dev(struct page *page, unsigned long off,
/* FIXME: non-speculating: flush on bidirectional mappings? */
}
-void ___dma_page_dev_to_cpu(struct page *page, unsigned long off,
+static void __dma_page_dev_to_cpu(struct page *page, unsigned long off,
size_t size, enum dma_data_direction dir)
{
unsigned long paddr = page_to_phys(page) + off;
--
1.7.1.569.g6f426
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Fight unfair telecom internet charges in Canada: sign http://stopthemeter.ca/
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
next prev parent reply other threads:[~2011-12-09 16:40 UTC|newest]
Thread overview: 18+ messages / expand[flat|nested] mbox.gz Atom feed top
2011-12-09 16:39 [PATCH 0/8 v4] ARM: DMA-mapping framework redesign Marek Szyprowski
2011-12-09 16:39 ` [PATCH 1/8] ARM: dma-mapping: remove offset parameter to prepare for generic dma_ops Marek Szyprowski
2011-12-09 16:39 ` [PATCH 2/8] ARM: dma-mapping: use asm-generic/dma-mapping-common.h Marek Szyprowski
2011-12-09 16:39 ` [PATCH 3/8] ARM: dma-mapping: implement dma sg methods on top of any generic dma ops Marek Szyprowski
2011-12-09 16:39 ` [PATCH 4/8] ARM: dma-mapping: move all dma bounce code to separate dma ops structure Marek Szyprowski
2011-12-09 16:39 ` Marek Szyprowski [this message]
2011-12-09 16:39 ` [PATCH 6/8] common: dma-mapping: change alloc/free_coherent method to more generic alloc/free_attrs Marek Szyprowski
2011-12-11 22:45 ` Stephen Rothwell
2011-12-14 12:37 ` Marek Szyprowski
2011-12-09 16:39 ` [PATCH 7/8] ARM: dma-mapping: use alloc, mmap, free from dma_ops Marek Szyprowski
2011-12-09 16:39 ` [PATCH 8/8] ARM: dma-mapping: add support for IOMMU mapper Marek Szyprowski
2012-01-09 15:49 ` [PATCH 8/8 RESEND] " Marek Szyprowski
2012-01-25 12:59 ` Russell King - ARM Linux
2012-01-26 8:09 ` Marek Szyprowski
2012-01-25 12:47 ` [PATCH 8/8] " Hiroshi Doyu
2012-01-26 7:46 ` Marek Szyprowski
2012-01-10 8:42 ` [PATCH 0/8 v4] ARM: DMA-mapping framework redesign Marek Szyprowski
-- strict thread matches above, loose matches on Subject: below --
2011-10-18 17:19 [PATCH 0/8 v3] " Marek Szyprowski
2011-10-18 17:19 ` [PATCH 5/8] ARM: dma-mapping: remove redundant code and cleanup Marek Szyprowski
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1323448798-18184-6-git-send-email-m.szyprowski@samsung.com \
--to=m.szyprowski@samsung.com \
--cc=andrzej.p@samsung.com \
--cc=arnd@arndb.de \
--cc=benh@kernel.crashing.org \
--cc=chunsang.jeong@linaro.org \
--cc=iommu@lists.linux-foundation.org \
--cc=joro@8bytes.org \
--cc=kyungmin.park@samsung.com \
--cc=linaro-mm-sig@lists.linaro.org \
--cc=linux-arch@vger.kernel.org \
--cc=linux-arm-kernel@lists.infradead.org \
--cc=linux-mm@kvack.org \
--cc=linux-samsung-soc@vger.kernel.org \
--cc=linux@arm.linux.org.uk \
--cc=pullip.cho@samsung.com \
--cc=shariq.hasnain@linaro.org \
--cc=vdumpa@nvidia.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox