linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
From: Zhang Yanfei <zhangyanfei@cn.fujitsu.com>
To: Minchan Kim <minchan@kernel.org>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>,
	Andrew Morton <akpm@linux-foundation.org>,
	"Aneesh Kumar K.V" <aneesh.kumar@linux.vnet.ibm.com>,
	Marek Szyprowski <m.szyprowski@samsung.com>,
	Michal Nazarewicz <mina86@mina86.com>,
	Russell King - ARM Linux <linux@arm.linux.org.uk>,
	kvm@vger.kernel.org, linux-mm@kvack.org,
	Gleb Natapov <gleb@kernel.org>,
	Greg Kroah-Hartman <gregkh@linuxfoundation.org>,
	Alexander Graf <agraf@suse.de>,
	kvm-ppc@vger.kernel.org, linux-kernel@vger.kernel.org,
	Paul Mackerras <paulus@samba.org>,
	Benjamin Herrenschmidt <benh@kernel.crashing.org>,
	Paolo Bonzini <pbonzini@redhat.com>,
	linuxppc-dev@lists.ozlabs.org,
	linux-arm-kernel@lists.infradead.org
Subject: Re: [PATCH v2 05/10] DMA, CMA: support arbitrary bitmap granularity
Date: Thu, 12 Jun 2014 15:25:25 +0800	[thread overview]
Message-ID: <539955E5.9070504@cn.fujitsu.com> (raw)
In-Reply-To: <20140612070811.GI12415@bbox>

On 06/12/2014 03:08 PM, Minchan Kim wrote:
> On Thu, Jun 12, 2014 at 12:21:42PM +0900, Joonsoo Kim wrote:
>> ppc kvm's cma region management requires arbitrary bitmap granularity,
>> since they want to reserve very large memory and manage this region
>> with bitmap that one bit for several pages to reduce management overheads.
>> So support arbitrary bitmap granularity for following generalization.
>>
>> Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com>
> Acked-by: Minchan Kim <minchan@kernel.org>
> 
> Just a nit below.
> 
>>
>> diff --git a/drivers/base/dma-contiguous.c b/drivers/base/dma-contiguous.c
>> index bc4c171..9bc9340 100644
>> --- a/drivers/base/dma-contiguous.c
>> +++ b/drivers/base/dma-contiguous.c
>> @@ -38,6 +38,7 @@ struct cma {
>>  	unsigned long	base_pfn;
>>  	unsigned long	count;
>>  	unsigned long	*bitmap;
>> +	int order_per_bit; /* Order of pages represented by one bit */
>>  	struct mutex	lock;
>>  };
>>  
>> @@ -157,9 +158,38 @@ void __init dma_contiguous_reserve(phys_addr_t limit)
>>  
>>  static DEFINE_MUTEX(cma_mutex);
>>  
>> +static unsigned long cma_bitmap_aligned_mask(struct cma *cma, int align_order)
>> +{
>> +	return (1 << (align_order >> cma->order_per_bit)) - 1;
>> +}
>> +
>> +static unsigned long cma_bitmap_maxno(struct cma *cma)
>> +{
>> +	return cma->count >> cma->order_per_bit;
>> +}
>> +
>> +static unsigned long cma_bitmap_pages_to_bits(struct cma *cma,
>> +						unsigned long pages)
>> +{
>> +	return ALIGN(pages, 1 << cma->order_per_bit) >> cma->order_per_bit;
>> +}
>> +
>> +static void clear_cma_bitmap(struct cma *cma, unsigned long pfn, int count)
>> +{
>> +	unsigned long bitmapno, nr_bits;
>> +
>> +	bitmapno = (pfn - cma->base_pfn) >> cma->order_per_bit;
>> +	nr_bits = cma_bitmap_pages_to_bits(cma, count);
>> +
>> +	mutex_lock(&cma->lock);
>> +	bitmap_clear(cma->bitmap, bitmapno, nr_bits);
>> +	mutex_unlock(&cma->lock);
>> +}
>> +
>>  static int __init cma_activate_area(struct cma *cma)
>>  {
>> -	int bitmap_size = BITS_TO_LONGS(cma->count) * sizeof(long);
>> +	int bitmap_maxno = cma_bitmap_maxno(cma);
>> +	int bitmap_size = BITS_TO_LONGS(bitmap_maxno) * sizeof(long);
>>  	unsigned long base_pfn = cma->base_pfn, pfn = base_pfn;
>>  	unsigned i = cma->count >> pageblock_order;
>>  	struct zone *zone;
>> @@ -221,6 +251,7 @@ core_initcall(cma_init_reserved_areas);
>>   * @base: Base address of the reserved area optional, use 0 for any
>>   * @limit: End address of the reserved memory (optional, 0 for any).
>>   * @alignment: Alignment for the contiguous memory area, should be power of 2
>> + * @order_per_bit: Order of pages represented by one bit on bitmap.
>>   * @res_cma: Pointer to store the created cma region.
>>   * @fixed: hint about where to place the reserved area
>>   *
>> @@ -235,7 +266,7 @@ core_initcall(cma_init_reserved_areas);
>>   */
>>  static int __init __dma_contiguous_reserve_area(phys_addr_t size,
>>  				phys_addr_t base, phys_addr_t limit,
>> -				phys_addr_t alignment,
>> +				phys_addr_t alignment, int order_per_bit,
>>  				struct cma **res_cma, bool fixed)
>>  {
>>  	struct cma *cma = &cma_areas[cma_area_count];
>> @@ -269,6 +300,8 @@ static int __init __dma_contiguous_reserve_area(phys_addr_t size,
>>  	base = ALIGN(base, alignment);
>>  	size = ALIGN(size, alignment);
>>  	limit &= ~(alignment - 1);
>> +	/* size should be aligned with order_per_bit */
>> +	BUG_ON(!IS_ALIGNED(size >> PAGE_SHIFT, 1 << order_per_bit));
>>  
>>  	/* Reserve memory */
>>  	if (base && fixed) {
>> @@ -294,6 +327,7 @@ static int __init __dma_contiguous_reserve_area(phys_addr_t size,
>>  	 */
>>  	cma->base_pfn = PFN_DOWN(base);
>>  	cma->count = size >> PAGE_SHIFT;
>> +	cma->order_per_bit = order_per_bit;
>>  	*res_cma = cma;
>>  	cma_area_count++;
>>  
>> @@ -313,7 +347,7 @@ int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base,
>>  {
>>  	int ret;
>>  
>> -	ret = __dma_contiguous_reserve_area(size, base, limit, 0,
>> +	ret = __dma_contiguous_reserve_area(size, base, limit, 0, 0,
>>  						res_cma, fixed);
>>  	if (ret)
>>  		return ret;
>> @@ -324,13 +358,6 @@ int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base,
>>  	return 0;
>>  }
>>  
>> -static void clear_cma_bitmap(struct cma *cma, unsigned long pfn, int count)
>> -{
>> -	mutex_lock(&cma->lock);
>> -	bitmap_clear(cma->bitmap, pfn - cma->base_pfn, count);
>> -	mutex_unlock(&cma->lock);
>> -}
>> -
>>  /**
>>   * dma_alloc_from_contiguous() - allocate pages from contiguous area
>>   * @dev:   Pointer to device for which the allocation is performed.
>> @@ -345,7 +372,8 @@ static void clear_cma_bitmap(struct cma *cma, unsigned long pfn, int count)
>>  static struct page *__dma_alloc_from_contiguous(struct cma *cma, int count,
>>  				       unsigned int align)
>>  {
>> -	unsigned long mask, pfn, pageno, start = 0;
>> +	unsigned long mask, pfn, start = 0;
>> +	unsigned long bitmap_maxno, bitmapno, nr_bits;
> 
> Just Nit: bitmap_maxno, bitmap_no or something consistent.
> I know you love consistent when I read description in first patch
> in this patchset. ;-)
> 

Yeah, not only in this patchset, I saw Joonsoo trying to unify all
kinds of things in the MM. This is great for newbies, IMO.

-- 
Thanks.
Zhang Yanfei

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

  reply	other threads:[~2014-06-12  7:25 UTC|newest]

Thread overview: 69+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2014-06-12  3:21 [PATCH v2 00/10] CMA: generalize CMA reserved area management code Joonsoo Kim
2014-06-12  3:21 ` [PATCH v2 01/10] DMA, CMA: clean-up log message Joonsoo Kim
2014-06-12  4:41   ` Aneesh Kumar K.V
2014-06-12  5:53     ` Joonsoo Kim
2014-06-12  8:55       ` Michal Nazarewicz
2014-06-12  9:53         ` Michal Nazarewicz
2014-06-16  5:18           ` Joonsoo Kim
2014-06-12  5:18   ` Minchan Kim
2014-06-12  5:55     ` Joonsoo Kim
2014-06-12  8:15   ` Zhang Yanfei
2014-06-12  8:56   ` Michal Nazarewicz
2014-06-12  3:21 ` [PATCH v2 02/10] DMA, CMA: fix possible memory leak Joonsoo Kim
2014-06-12  4:43   ` Aneesh Kumar K.V
2014-06-12  5:25   ` Minchan Kim
2014-06-12  6:02     ` Joonsoo Kim
2014-06-12  8:19       ` Zhang Yanfei
2014-06-12  9:47   ` Michal Nazarewicz
2014-06-12  3:21 ` [PATCH v2 03/10] DMA, CMA: separate core cma management codes from DMA APIs Joonsoo Kim
2014-06-12  4:44   ` Aneesh Kumar K.V
2014-06-12  5:37   ` Minchan Kim
2014-06-16  5:24     ` Joonsoo Kim
2014-06-12  9:55   ` Michal Nazarewicz
2014-06-12  3:21 ` [PATCH v2 04/10] DMA, CMA: support alignment constraint on cma region Joonsoo Kim
2014-06-12  4:50   ` Aneesh Kumar K.V
2014-06-12  5:52   ` Minchan Kim
2014-06-12  6:07     ` Joonsoo Kim
2014-06-12 10:02   ` Michal Nazarewicz
2014-06-16  5:19     ` Joonsoo Kim
2014-06-12  3:21 ` [PATCH v2 05/10] DMA, CMA: support arbitrary bitmap granularity Joonsoo Kim
2014-06-12  6:06   ` Minchan Kim
2014-06-12  6:43     ` Joonsoo Kim
2014-06-12  6:42       ` Minchan Kim
2014-06-12  7:08   ` Minchan Kim
2014-06-12  7:25     ` Zhang Yanfei [this message]
2014-06-12  7:41     ` Joonsoo Kim
2014-06-12  8:28   ` Zhang Yanfei
2014-06-12 10:19   ` Michal Nazarewicz
2014-06-16  5:23     ` Joonsoo Kim
2014-06-14 10:09   ` Aneesh Kumar K.V
2014-06-12  3:21 ` [PATCH v2 06/10] CMA: generalize CMA reserved area management functionality Joonsoo Kim
2014-06-12  7:13   ` Minchan Kim
2014-06-12  7:42     ` Joonsoo Kim
2014-06-12  8:29   ` Zhang Yanfei
2014-06-14 10:06   ` Aneesh Kumar K.V
2014-06-14 10:08   ` Aneesh Kumar K.V
2014-06-14 10:16   ` Aneesh Kumar K.V
2014-06-16  5:27     ` Joonsoo Kim
2014-06-12  3:21 ` [PATCH v2 07/10] PPC, KVM, CMA: use general CMA reserved area management framework Joonsoo Kim
2014-06-14  8:53   ` Aneesh Kumar K.V
2014-06-16  5:34     ` Joonsoo Kim
2014-06-16  7:02       ` Aneesh Kumar K.V
2014-06-14 10:05   ` Aneesh Kumar K.V
2014-06-16  5:29     ` Joonsoo Kim
2014-06-12  3:21 ` [PATCH v2 08/10] mm, cma: clean-up cma allocation error path Joonsoo Kim
2014-06-12  7:16   ` Minchan Kim
2014-06-12  8:31   ` Zhang Yanfei
2014-06-12 11:34   ` Michal Nazarewicz
2014-06-14  7:18   ` Aneesh Kumar K.V
2014-06-12  3:21 ` [PATCH v2 09/10] mm, cma: move output param to the end of param list Joonsoo Kim
2014-06-12  7:19   ` Minchan Kim
2014-06-12  7:43     ` Joonsoo Kim
2014-06-12 11:38   ` Michal Nazarewicz
2014-06-14  7:20   ` Aneesh Kumar K.V
2014-06-12  3:21 ` [PATCH v2 10/10] mm, cma: use spinlock instead of mutex Joonsoo Kim
2014-06-12  7:40   ` Minchan Kim
2014-06-12  7:56     ` Joonsoo Kim
2014-06-14  7:25 ` [PATCH v2 00/10] CMA: generalize CMA reserved area management code Aneesh Kumar K.V
2014-06-16  5:32   ` Joonsoo Kim
2014-06-16  7:04     ` Aneesh Kumar K.V

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=539955E5.9070504@cn.fujitsu.com \
    --to=zhangyanfei@cn.fujitsu.com \
    --cc=agraf@suse.de \
    --cc=akpm@linux-foundation.org \
    --cc=aneesh.kumar@linux.vnet.ibm.com \
    --cc=benh@kernel.crashing.org \
    --cc=gleb@kernel.org \
    --cc=gregkh@linuxfoundation.org \
    --cc=iamjoonsoo.kim@lge.com \
    --cc=kvm-ppc@vger.kernel.org \
    --cc=kvm@vger.kernel.org \
    --cc=linux-arm-kernel@lists.infradead.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=linux@arm.linux.org.uk \
    --cc=linuxppc-dev@lists.ozlabs.org \
    --cc=m.szyprowski@samsung.com \
    --cc=mina86@mina86.com \
    --cc=minchan@kernel.org \
    --cc=paulus@samba.org \
    --cc=pbonzini@redhat.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox