linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
From: Abhinav Kochhar <kochhar.abhinav@gmail.com>
To: Marek Szyprowski <m.szyprowski@samsung.com>
Cc: linux-arm-kernel@lists.infradead.org,
	linaro-mm-sig@lists.linaro.org, linux-mm@kvack.org,
	Kyungmin Park <kyungmin.park@samsung.com>
Subject: [Linaro-mm-sig] [PATCH 3/3] [RFC] Kernel Virtual Memory allocation issue in dma-mapping framework
Date: Fri, 11 May 2012 11:02:02 +0900	[thread overview]
Message-ID: <CALYq+qR15JYChs8LLuc4sFf1neXD3-1B949aefg=JXxtjGVuYQ@mail.gmail.com> (raw)
In-Reply-To: <CALYq+qQA9HAME8Fg5cLopCzkLLMB5G3V_UOni-eWp2UGrcMqNQ@mail.gmail.com>

[-- Attachment #1: Type: text/plain, Size: 5756 bytes --]

With this we can do a run time check on the allocation type for either
kernel or user using the dma attribute passed to dma-mapping iommu ops.

diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c

index 2d11aa0..1f454cc 100644

--- a/arch/arm/mm/dma-mapping.c

+++ b/arch/arm/mm/dma-mapping.c

@@ -428,6 +428,7 @@ static void __dma_free_remap(void *cpu_addr, size_t
size)

        arm_vmregion_free(&consistent_head, c);

 }

+

 #else  /* !CONFIG_MMU */

 #define __dma_alloc_remap(page, size, gfp, prot, c)    page_address(page)

@@ -894,6 +895,35 @@ __iommu_alloc_remap(struct page **pages, size_t size,
gfp_t gfp, pgprot_t prot)

        size_t align;

        size_t count = size >> PAGE_SHIFT;

        int bit;

+        unsigned long mem_type = (unsigned long)gfp;

+

+

+       if(mem_type){

+

+               struct page_infodma *pages_in;

+

+               pages_in = kzalloc( sizeof(struct page_infodma*),
GFP_KERNEL);

+               if(!pages_in)

+                        return NULL;

+

+               pages_in->nr_pages = count;

+

+               return (void*)pages_in;

+

+       }

+

+       /*

+         * Align the virtual region allocation - maximum alignment is

+         * a section size, minimum is a page size.  This helps reduce

+         * fragmentation of the DMA space, and also prevents allocations

+         * smaller than a section from crossing a section boundary.

+         */

+

+        bit = fls(size - 1);

+        if (bit > SECTION_SHIFT)

+                bit = SECTION_SHIFT;

+        align = 1 << bit;

+

        if (!consistent_pte[0]) {

                pr_err("%s: not initialised\n", __func__);

@@ -901,16 +931,6 @@ __iommu_alloc_remap(struct page **pages, size_t size,
gfp_t gfp, pgprot_t prot)

                return NULL;

        }

-       /*

-        * Align the virtual region allocation - maximum alignment is

-        * a section size, minimum is a page size.  This helps reduce

-        * fragmentation of the DMA space, and also prevents allocations

-        * smaller than a section from crossing a section boundary.

-        */

-       bit = fls(size - 1);

-       if (bit > SECTION_SHIFT)

-               bit = SECTION_SHIFT;

-       align = 1 << bit;

        /*

         * Allocate a virtual address in the consistent mapping region.

@@ -946,6 +966,7 @@ __iommu_alloc_remap(struct page **pages, size_t size,
gfp_t gfp, pgprot_t prot)

        return NULL;

 }

+

 /*

  * Create a mapping in device IO address space for specified pages

  */

@@ -973,13 +994,16 @@ __iommu_create_mapping(struct device *dev, struct
page **pages, size_t size)

                len = (j - i) << PAGE_SHIFT;

                ret = iommu_map(mapping->domain, iova, phys, len, 0);

+

                if (ret < 0)

                        goto fail;

+

                iova += len;

                i = j;

        }

        return dma_addr;

 fail:

+

        iommu_unmap(mapping->domain, dma_addr, iova-dma_addr);

        __free_iova(mapping, dma_addr, size);

        return DMA_ERROR_CODE;

@@ -1007,6 +1031,8 @@ static void *arm_iommu_alloc_attrs(struct device
*dev, size_t size,

        pgprot_t prot = __get_dma_pgprot(attrs, pgprot_kernel);

        struct page **pages;

        void *addr = NULL;

+       struct page_infodma *page_ret;

+       unsigned long mem_type;

        *handle = DMA_ERROR_CODE;

        size = PAGE_ALIGN(size);

@@ -1019,11 +1045,19 @@ static void *arm_iommu_alloc_attrs(struct device
*dev, size_t size,

        if (*handle == DMA_ERROR_CODE)

                goto err_buffer;

-       addr = __iommu_alloc_remap(pages, size, gfp, prot);

+       mem_type = dma_get_attr(DMA_ATTR_USER_SPACE, attrs);

+

+       addr = __iommu_alloc_remap(pages, size, mem_type, prot);

        if (!addr)

                goto err_mapping;

-       return addr;

+       if(mem_type){

+               page_ret = (struct page_infodma *)addr;

+               page_ret->pages = pages;

+               return page_ret;

+       }

+       else

+               return addr;

 err_mapping:

        __iommu_remove_mapping(dev, *handle, size);

@@ -1071,18 +1105,34 @@ static int arm_iommu_mmap_attrs(struct device *dev,
struct vm_area_struct *vma,

 void arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,

                          dma_addr_t handle, struct dma_attrs *attrs)

 {

-       struct arm_vmregion *c;

+

+       unsigned long mem_type = dma_get_attr(DMA_ATTR_USER_SPACE, attrs);

+

        size = PAGE_ALIGN(size);

-       c = arm_vmregion_find(&consistent_head, (unsigned long)cpu_addr);

-       if (c) {

-               struct page **pages = c->priv;

-               __dma_free_remap(cpu_addr, size);

-               __iommu_remove_mapping(dev, handle, size);

-               __iommu_free_buffer(dev, pages, size);

+

+       if(mem_type){

+

+               struct page_infodma *pagesin = cpu_addr;

+               if (pagesin) {

+                       struct page **pages = pagesin->pages;

+                       __iommu_remove_mapping(dev, handle, size);

+                       __iommu_free_buffer(dev, pages, size);

+                }

+       }

+       else{

+               struct arm_vmregion *c;

+               c = arm_vmregion_find(&consistent_head, (unsigned
long)cpu_addr);

+               if (c) {

+                       struct page **pages = c->priv;

+                       __dma_free_remap(cpu_addr, size);

+                       __iommu_remove_mapping(dev, handle, size);

+                       __iommu_free_buffer(dev, pages, size);

+               }

        }

 }

+

 /*

  * Map a part of the scatter-gather list into contiguous io address space

  */

[-- Attachment #2: Type: text/html, Size: 6954 bytes --]

           reply	other threads:[~2012-05-11  2:02 UTC|newest]

Thread overview: expand[flat|nested]  mbox.gz  Atom feed
 [parent not found: <CALYq+qQA9HAME8Fg5cLopCzkLLMB5G3V_UOni-eWp2UGrcMqNQ@mail.gmail.com>]

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to='CALYq+qR15JYChs8LLuc4sFf1neXD3-1B949aefg=JXxtjGVuYQ@mail.gmail.com' \
    --to=kochhar.abhinav@gmail.com \
    --cc=kyungmin.park@samsung.com \
    --cc=linaro-mm-sig@lists.linaro.org \
    --cc=linux-arm-kernel@lists.infradead.org \
    --cc=linux-mm@kvack.org \
    --cc=m.szyprowski@samsung.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox