From: Andrew Morton <akpm@linux-foundation.org>
To: Wanpeng Li <liwanp@linux.vnet.ibm.com>
Cc: Dave Hansen <dave.hansen@linux.intel.com>,
Rik van Riel <riel@redhat.com>,
Fengguang Wu <fengguang.wu@intel.com>,
Joonsoo Kim <iamjoonsoo.kim@lge.com>,
Johannes Weiner <hannes@cmpxchg.org>, Tejun Heo <tj@kernel.org>,
Yasuaki Ishimatsu <isimatu.yasuaki@jp.fujitsu.com>,
David Rientjes <rientjes@google.com>,
KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>,
Jiri Kosina <jkosina@suse.cz>,
linux-mm@kvack.org, linux-kernel@vger.kernel.org,
Yinghai Lu <yinghai@kernel.org>
Subject: Re: [PATCH v2 2/4] mm/sparse: introduce alloc_usemap_and_memmap
Date: Tue, 20 Aug 2013 16:07:35 -0700 [thread overview]
Message-ID: <20130820160735.b12fe1b3dd64b4dc146d2fa0@linux-foundation.org> (raw)
In-Reply-To: <1376981696-4312-2-git-send-email-liwanp@linux.vnet.ibm.com>
On Tue, 20 Aug 2013 14:54:54 +0800 Wanpeng Li <liwanp@linux.vnet.ibm.com> wrote:
> v1 -> v2:
> * add comments to describe alloc_usemap_and_memmap
>
> After commit 9bdac91424075("sparsemem: Put mem map for one node together."),
> vmemmap for one node will be allocated together, its logic is similar as
> memory allocation for pageblock flags. This patch introduce alloc_usemap_and_memmap
> to extract the same logic of memory alloction for pageblock flags and vmemmap.
>
9bdac91424075 was written by Yinghai. He is an excellent reviewer, as
long as people remember to cc him!
> ---
> mm/sparse.c | 140 ++++++++++++++++++++++++++++--------------------------------
> 1 file changed, 66 insertions(+), 74 deletions(-)
>
> diff --git a/mm/sparse.c b/mm/sparse.c
> index 308d503..d27db9b 100644
> --- a/mm/sparse.c
> +++ b/mm/sparse.c
> @@ -439,6 +439,14 @@ static void __init sparse_early_mem_maps_alloc_node(struct page **map_map,
> map_count, nodeid);
> }
> #else
> +
> +static void __init sparse_early_mem_maps_alloc_node(struct page **map_map,
> + unsigned long pnum_begin,
> + unsigned long pnum_end,
> + unsigned long map_count, int nodeid)
> +{
> +}
> +
> static struct page __init *sparse_early_mem_map_alloc(unsigned long pnum)
> {
> struct page *map;
> @@ -460,6 +468,62 @@ void __attribute__((weak)) __meminit vmemmap_populate_print_last(void)
> {
> }
>
> +/**
> + * alloc_usemap_and_memmap - memory alloction for pageblock flags and vmemmap
> + * @map: usemap_map for pageblock flags or mmap_map for vmemmap
> + * @use_map: true if memory allocated for pageblock flags, otherwise false
> + */
> +static void alloc_usemap_and_memmap(unsigned long **map, bool use_map)
> +{
> + unsigned long pnum;
> + unsigned long map_count;
> + int nodeid_begin = 0;
> + unsigned long pnum_begin = 0;
> +
> + for (pnum = 0; pnum < NR_MEM_SECTIONS; pnum++) {
> + struct mem_section *ms;
> +
> + if (!present_section_nr(pnum))
> + continue;
> + ms = __nr_to_section(pnum);
> + nodeid_begin = sparse_early_nid(ms);
> + pnum_begin = pnum;
> + break;
> + }
> + map_count = 1;
> + for (pnum = pnum_begin + 1; pnum < NR_MEM_SECTIONS; pnum++) {
> + struct mem_section *ms;
> + int nodeid;
> +
> + if (!present_section_nr(pnum))
> + continue;
> + ms = __nr_to_section(pnum);
> + nodeid = sparse_early_nid(ms);
> + if (nodeid == nodeid_begin) {
> + map_count++;
> + continue;
> + }
> + /* ok, we need to take cake of from pnum_begin to pnum - 1*/
> + if (use_map)
> + sparse_early_usemaps_alloc_node(map, pnum_begin, pnum,
> + map_count, nodeid_begin);
> + else
> + sparse_early_mem_maps_alloc_node((struct page **)map,
> + pnum_begin, pnum, map_count, nodeid_begin);
> + /* new start, update count etc*/
> + nodeid_begin = nodeid;
> + pnum_begin = pnum;
> + map_count = 1;
> + }
> + /* ok, last chunk */
> + if (use_map)
> + sparse_early_usemaps_alloc_node(map, pnum_begin,
> + NR_MEM_SECTIONS, map_count, nodeid_begin);
> + else
> + sparse_early_mem_maps_alloc_node((struct page **)map,
> + pnum_begin, NR_MEM_SECTIONS, map_count, nodeid_begin);
> +}
> +
> /*
> * Allocate the accumulated non-linear sections, allocate a mem_map
> * for each and record the physical to section mapping.
> @@ -471,11 +535,7 @@ void __init sparse_init(void)
> unsigned long *usemap;
> unsigned long **usemap_map;
> int size;
> - int nodeid_begin = 0;
> - unsigned long pnum_begin = 0;
> - unsigned long usemap_count;
> #ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER
> - unsigned long map_count;
> int size2;
> struct page **map_map;
> #endif
> @@ -501,82 +561,14 @@ void __init sparse_init(void)
> usemap_map = alloc_bootmem(size);
> if (!usemap_map)
> panic("can not allocate usemap_map\n");
> -
> - for (pnum = 0; pnum < NR_MEM_SECTIONS; pnum++) {
> - struct mem_section *ms;
> -
> - if (!present_section_nr(pnum))
> - continue;
> - ms = __nr_to_section(pnum);
> - nodeid_begin = sparse_early_nid(ms);
> - pnum_begin = pnum;
> - break;
> - }
> - usemap_count = 1;
> - for (pnum = pnum_begin + 1; pnum < NR_MEM_SECTIONS; pnum++) {
> - struct mem_section *ms;
> - int nodeid;
> -
> - if (!present_section_nr(pnum))
> - continue;
> - ms = __nr_to_section(pnum);
> - nodeid = sparse_early_nid(ms);
> - if (nodeid == nodeid_begin) {
> - usemap_count++;
> - continue;
> - }
> - /* ok, we need to take cake of from pnum_begin to pnum - 1*/
> - sparse_early_usemaps_alloc_node(usemap_map, pnum_begin, pnum,
> - usemap_count, nodeid_begin);
> - /* new start, update count etc*/
> - nodeid_begin = nodeid;
> - pnum_begin = pnum;
> - usemap_count = 1;
> - }
> - /* ok, last chunk */
> - sparse_early_usemaps_alloc_node(usemap_map, pnum_begin, NR_MEM_SECTIONS,
> - usemap_count, nodeid_begin);
> + alloc_usemap_and_memmap(usemap_map, true);
>
> #ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER
> size2 = sizeof(struct page *) * NR_MEM_SECTIONS;
> map_map = alloc_bootmem(size2);
> if (!map_map)
> panic("can not allocate map_map\n");
> -
> - for (pnum = 0; pnum < NR_MEM_SECTIONS; pnum++) {
> - struct mem_section *ms;
> -
> - if (!present_section_nr(pnum))
> - continue;
> - ms = __nr_to_section(pnum);
> - nodeid_begin = sparse_early_nid(ms);
> - pnum_begin = pnum;
> - break;
> - }
> - map_count = 1;
> - for (pnum = pnum_begin + 1; pnum < NR_MEM_SECTIONS; pnum++) {
> - struct mem_section *ms;
> - int nodeid;
> -
> - if (!present_section_nr(pnum))
> - continue;
> - ms = __nr_to_section(pnum);
> - nodeid = sparse_early_nid(ms);
> - if (nodeid == nodeid_begin) {
> - map_count++;
> - continue;
> - }
> - /* ok, we need to take cake of from pnum_begin to pnum - 1*/
> - sparse_early_mem_maps_alloc_node(map_map, pnum_begin, pnum,
> - map_count, nodeid_begin);
> - /* new start, update count etc*/
> - nodeid_begin = nodeid;
> - pnum_begin = pnum;
> - map_count = 1;
> - }
> - /* ok, last chunk */
> - sparse_early_mem_maps_alloc_node(map_map, pnum_begin, NR_MEM_SECTIONS,
> - map_count, nodeid_begin);
> + alloc_usemap_and_memmap((unsigned long **)map_map, false);
> #endif
>
> for (pnum = 0; pnum < NR_MEM_SECTIONS; pnum++) {
> --
> 1.8.1.2
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
next prev parent reply other threads:[~2013-08-20 23:07 UTC|newest]
Thread overview: 31+ messages / expand[flat|nested] mbox.gz Atom feed top
2013-08-20 6:54 [PATCH v2 1/4] mm/pgtable: Fix continue to preallocate pmds even if failure occurrence Wanpeng Li
2013-08-20 6:54 ` [PATCH v2 2/4] mm/sparse: introduce alloc_usemap_and_memmap Wanpeng Li
2013-08-20 23:07 ` Andrew Morton [this message]
2013-08-21 0:02 ` Yinghai Lu
2013-08-21 3:11 ` Wanpeng Li
2013-08-21 3:11 ` Wanpeng Li
[not found] ` <52142ffe.84c0440a.57e5.02acSMTPIN_ADDED_BROKEN@mx.google.com>
2013-08-21 4:28 ` Yinghai Lu
2013-08-21 7:29 ` Wanpeng Li
2013-08-21 7:29 ` Wanpeng Li
[not found] ` <52146c58.a3e2440a.0f5a.ffffed8dSMTPIN_ADDED_BROKEN@mx.google.com>
2013-08-22 5:19 ` Yinghai Lu
2013-08-22 12:08 ` Wanpeng Li
2013-08-22 12:14 ` Wanpeng Li
2013-08-22 12:14 ` Wanpeng Li
2013-08-22 12:08 ` Wanpeng Li
[not found] ` <521600cc.22ab440a.2703.53f1SMTPIN_ADDED_BROKEN@mx.google.com>
2013-08-29 2:18 ` Yinghai Lu
2013-08-29 2:34 ` Yinghai Lu
2013-08-29 2:42 ` Yinghai Lu
2013-08-29 2:51 ` Wanpeng Li
2013-08-29 2:51 ` Wanpeng Li
[not found] ` <521eb73e.e3bf420a.2ad0.09c2SMTPIN_ADDED_BROKEN@mx.google.com>
2013-08-29 4:10 ` Yinghai Lu
2013-08-29 5:32 ` Wanpeng Li
2013-08-29 5:32 ` Wanpeng Li
2013-08-20 6:54 ` [PATCH v2 3/4] mm/writeback: make writeback_inodes_wb static Wanpeng Li
2013-08-20 16:01 ` Seth Jennings
2013-08-20 6:54 ` [PATCH v2 4/4] mm/vmalloc: use wrapper function get_vm_area_size to caculate size of vm area Wanpeng Li
2013-08-20 16:03 ` Seth Jennings
2013-08-20 16:00 ` [PATCH v2 1/4] mm/pgtable: Fix continue to preallocate pmds even if failure occurrence Seth Jennings
2013-08-20 23:04 ` Andrew Morton
2013-08-20 23:39 ` Wanpeng Li
2013-08-20 23:39 ` Wanpeng Li
[not found] ` <5213fe45.660c420a.4066.ffffd8c7SMTPIN_ADDED_BROKEN@mx.google.com>
2013-08-21 0:18 ` Andrew Morton
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20130820160735.b12fe1b3dd64b4dc146d2fa0@linux-foundation.org \
--to=akpm@linux-foundation.org \
--cc=dave.hansen@linux.intel.com \
--cc=fengguang.wu@intel.com \
--cc=hannes@cmpxchg.org \
--cc=iamjoonsoo.kim@lge.com \
--cc=isimatu.yasuaki@jp.fujitsu.com \
--cc=jkosina@suse.cz \
--cc=kosaki.motohiro@jp.fujitsu.com \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=liwanp@linux.vnet.ibm.com \
--cc=riel@redhat.com \
--cc=rientjes@google.com \
--cc=tj@kernel.org \
--cc=yinghai@kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox