From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mail-pg0-f70.google.com (mail-pg0-f70.google.com [74.125.83.70]) by kanga.kvack.org (Postfix) with ESMTP id DE84D6B026C for ; Fri, 15 Dec 2017 09:10:44 -0500 (EST) Received: by mail-pg0-f70.google.com with SMTP id f3so7048120pgv.21 for ; Fri, 15 Dec 2017 06:10:44 -0800 (PST) Received: from bombadil.infradead.org (bombadil.infradead.org. [65.50.211.133]) by mx.google.com with ESMTPS id v29si5146131pfl.292.2017.12.15.06.10.43 for (version=TLS1_2 cipher=ECDHE-RSA-AES128-GCM-SHA256 bits=128/128); Fri, 15 Dec 2017 06:10:43 -0800 (PST) From: Christoph Hellwig Subject: [PATCH 10/17] mm: merge vmem_altmap_alloc into altmap_alloc_block_buf Date: Fri, 15 Dec 2017 15:09:40 +0100 Message-Id: <20171215140947.26075-11-hch@lst.de> In-Reply-To: <20171215140947.26075-1-hch@lst.de> References: <20171215140947.26075-1-hch@lst.de> Sender: owner-linux-mm@kvack.org List-ID: To: Dan Williams Cc: =?UTF-8?q?J=C3=A9r=C3=B4me=20Glisse?= , Logan Gunthorpe , linux-nvdimm@lists.01.org, linuxppc-dev@lists.ozlabs.org, x86@kernel.org, linux-mm@kvack.org, linux-kernel@vger.kernel.org There is no clear separation between the two, so merge them. Signed-off-by: Christoph Hellwig Reviewed-by: Logan Gunthorpe --- mm/sparse-vmemmap.c | 45 ++++++++++++++++----------------------------- 1 file changed, 16 insertions(+), 29 deletions(-) diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c index d012c9e2811b..bd0276d5f66b 100644 --- a/mm/sparse-vmemmap.c +++ b/mm/sparse-vmemmap.c @@ -107,33 +107,16 @@ static unsigned long __meminit vmem_altmap_nr_free(struct vmem_altmap *altmap) } /** - * vmem_altmap_alloc - allocate pages from the vmem_altmap reservation - * @altmap - reserved page pool for the allocation - * @nr_pfns - size (in pages) of the allocation + * altmap_alloc_block_buf - allocate pages from the device page map + * @altmap: device page map + * @size: size (in bytes) of the allocation * - * Allocations are aligned to the size of the request + * Allocations are aligned to the size of the request. */ -static unsigned long __meminit vmem_altmap_alloc(struct vmem_altmap *altmap, - unsigned long nr_pfns) -{ - unsigned long pfn = vmem_altmap_next_pfn(altmap); - unsigned long nr_align; - - nr_align = 1UL << find_first_bit(&nr_pfns, BITS_PER_LONG); - nr_align = ALIGN(pfn, nr_align) - pfn; - - if (nr_pfns + nr_align > vmem_altmap_nr_free(altmap)) - return ULONG_MAX; - altmap->alloc += nr_pfns; - altmap->align += nr_align; - return pfn + nr_align; -} - void * __meminit altmap_alloc_block_buf(unsigned long size, struct vmem_altmap *altmap) { - unsigned long pfn, nr_pfns; - void *ptr; + unsigned long pfn, nr_pfns, nr_align; if (size & ~PAGE_MASK) { pr_warn_once("%s: allocations must be multiple of PAGE_SIZE (%ld)\n", @@ -141,16 +124,20 @@ void * __meminit altmap_alloc_block_buf(unsigned long size, return NULL; } + pfn = vmem_altmap_next_pfn(altmap); nr_pfns = size >> PAGE_SHIFT; - pfn = vmem_altmap_alloc(altmap, nr_pfns); - if (pfn < ULONG_MAX) - ptr = __va(__pfn_to_phys(pfn)); - else - ptr = NULL; + nr_align = 1UL << find_first_bit(&nr_pfns, BITS_PER_LONG); + nr_align = ALIGN(pfn, nr_align) - pfn; + if (nr_pfns + nr_align > vmem_altmap_nr_free(altmap)) + return NULL; + + altmap->alloc += nr_pfns; + altmap->align += nr_align; + pfn += nr_align; + pr_debug("%s: pfn: %#lx alloc: %ld align: %ld nr: %#lx\n", __func__, pfn, altmap->alloc, altmap->align, nr_pfns); - - return ptr; + return __va(__pfn_to_phys(pfn)); } void __meminit vmemmap_verify(pte_t *pte, int node, -- 2.14.2 -- To unsubscribe, send a message with 'unsubscribe linux-mm' in the body to majordomo@kvack.org. For more info on Linux MM, see: http://www.linux-mm.org/ . Don't email: email@kvack.org