From: Tang Chen <tangchen@cn.fujitsu.com>
To: hpa@zytor.com, akpm@linux-foundation.org, rob@landley.net,
isimatu.yasuaki@jp.fujitsu.com, tangchen@cn.fujitsu.com,
laijs@cn.fujitsu.com, wency@cn.fujitsu.com,
linfeng@cn.fujitsu.com, jiang.liu@huawei.com, yinghai@kernel.org,
kosaki.motohiro@jp.fujitsu.com, minchan.kim@gmail.com,
mgorman@suse.de, rientjes@google.com, rusty@rustcorp.com.au
Cc: linux-kernel@vger.kernel.org, linux-mm@kvack.org,
linux-doc@vger.kernel.org
Subject: [PATCH v2 2/5] page_alloc: add movable_memmap kernel parameter
Date: Fri, 23 Nov 2012 18:44:02 +0800 [thread overview]
Message-ID: <1353667445-7593-3-git-send-email-tangchen@cn.fujitsu.com> (raw)
In-Reply-To: <1353667445-7593-1-git-send-email-tangchen@cn.fujitsu.com>
This patch adds functions to parse movablecore_map boot option. Since the
option could be specified more then once, all the maps will be stored in
the global variable movablecore_map.map array.
And also, we keep the array in monotonic increasing order by start_pfn.
And merge all overlapped ranges.
Signed-off-by: Tang Chen <tangchen@cn.fujitsu.com>
Signed-off-by: Lai Jiangshan <laijs@cn.fujitsu.com>
Reviewed-by: Wen Congyang <wency@cn.fujitsu.com>
Tested-by: Lin Feng <linfeng@cn.fujitsu.com>
---
Documentation/kernel-parameters.txt | 17 +++++
include/linux/mm.h | 11 +++
mm/page_alloc.c | 126 +++++++++++++++++++++++++++++++++++
3 files changed, 154 insertions(+), 0 deletions(-)
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index 9776f06..785f878 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -1620,6 +1620,23 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
that the amount of memory usable for all allocations
is not too small.
+ movablecore_map=nn[KMG]@ss[KMG]
+ [KNL,X86,IA-64,PPC] This parameter is similar to
+ memmap except it specifies the memory map of
+ ZONE_MOVABLE.
+ If more areas are all within one node, then from
+ lowest ss to the end of the node will be ZONE_MOVABLE.
+ If an area covers two or more nodes, the area from
+ ss to the end of the 1st node will be ZONE_MOVABLE,
+ and all the rest nodes will only have ZONE_MOVABLE.
+ If memmap is specified at the same time, the
+ movablecore_map will be limited within the memmap
+ areas. If kernelcore or movablecore is also specified,
+ movablecore_map will have higher priority to be
+ satisfied. So the administrator should be careful that
+ the amount of movablecore_map areas are not too large.
+ Otherwise kernel won't have enough memory to start.
+
MTD_Partition= [MTD]
Format: <name>,<region-number>,<size>,<offset>
diff --git a/include/linux/mm.h b/include/linux/mm.h
index fa06804..647c980 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1328,6 +1328,17 @@ extern void free_bootmem_with_active_regions(int nid,
unsigned long max_low_pfn);
extern void sparse_memory_present_with_active_regions(int nid);
+#define MOVABLECORE_MAP_MAX MAX_NUMNODES
+struct movablecore_entry {
+ unsigned long start; /* start pfn of memory segment */
+ unsigned long end; /* end pfn of memory segment */
+};
+
+struct movablecore_map {
+ int nr_map;
+ struct movablecore_entry map[MOVABLECORE_MAP_MAX];
+};
+
#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
#if !defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP) && \
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 5b74de6..fb5cf12 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -198,6 +198,9 @@ static unsigned long __meminitdata nr_all_pages;
static unsigned long __meminitdata dma_reserve;
#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
+/* Movable memory ranges, will also be used by memblock subsystem. */
+struct movablecore_map movablecore_map;
+
static unsigned long __meminitdata arch_zone_lowest_possible_pfn[MAX_NR_ZONES];
static unsigned long __meminitdata arch_zone_highest_possible_pfn[MAX_NR_ZONES];
static unsigned long __initdata required_kernelcore;
@@ -4986,6 +4989,129 @@ static int __init cmdline_parse_movablecore(char *p)
early_param("kernelcore", cmdline_parse_kernelcore);
early_param("movablecore", cmdline_parse_movablecore);
+/**
+ * insert_movablecore_map - Insert a memory range in to movablecore_map.map.
+ * @start_pfn: start pfn of the range
+ * @end_pfn: end pfn of the range
+ *
+ * This function will also merge the overlapped ranges, and sort the array
+ * by start_pfn in monotonic increasing order.
+ */
+static void __init insert_movablecore_map(unsigned long start_pfn,
+ unsigned long end_pfn)
+{
+ int pos, overlap;
+
+ /*
+ * pos will be at the 1st overlapped range, or the position
+ * where the element should be inserted.
+ */
+ for (pos = 0; pos < movablecore_map.nr_map; pos++)
+ if (start_pfn <= movablecore_map.map[pos].end)
+ break;
+
+ /* If there is no overlapped range, just insert the element. */
+ if (pos == movablecore_map.nr_map ||
+ end_pfn < movablecore_map.map[pos].start) {
+ /*
+ * If pos is not the end of array, we need to move all
+ * the rest elements backward.
+ */
+ if (pos < movablecore_map.nr_map)
+ memmove(&movablecore_map.map[pos+1],
+ &movablecore_map.map[pos],
+ sizeof(struct movablecore_entry) *
+ (movablecore_map.nr_map - pos));
+ movablecore_map.map[pos].start = start_pfn;
+ movablecore_map.map[pos].end = end_pfn;
+ movablecore_map.nr_map++;
+ return;
+ }
+
+ /* overlap will be at the last overlapped range */
+ for (overlap = pos + 1; overlap < movablecore_map.nr_map; overlap++)
+ if (end_pfn < movablecore_map.map[overlap].start)
+ break;
+
+ /*
+ * If there are more ranges overlapped, we need to merge them,
+ * and move the rest elements forward.
+ */
+ overlap--;
+ movablecore_map.map[pos].start = min(start_pfn,
+ movablecore_map.map[pos].start);
+ movablecore_map.map[pos].end = max(end_pfn,
+ movablecore_map.map[overlap].end);
+
+ if (pos != overlap && overlap + 1 != movablecore_map.nr_map)
+ memmove(&movablecore_map.map[pos+1],
+ &movablecore_map.map[overlap+1],
+ sizeof(struct movablecore_entry) *
+ (movablecore_map.nr_map - overlap - 1));
+
+ movablecore_map.nr_map -= overlap - pos;
+}
+
+/**
+ * movablecore_map_add_region - Add a memory range into movablecore_map.
+ * @start: physical start address of range
+ * @end: physical end address of range
+ *
+ * This function transform the physical address into pfn, and then add the
+ * range into movablecore_map by calling insert_movablecore_map().
+ */
+static void __init movablecore_map_add_region(u64 start, u64 size)
+{
+ unsigned long start_pfn, end_pfn;
+
+ /* In case size == 0 or start + size overflows */
+ if (start + size <= start)
+ return;
+
+ if (movablecore_map.nr_map >= ARRAY_SIZE(movablecore_map.map)) {
+ pr_err("movable_memory_map: too many entries;"
+ " ignoring [mem %#010llx-%#010llx]\n",
+ (unsigned long long) start,
+ (unsigned long long) (start + size - 1));
+ return;
+ }
+
+ start_pfn = PFN_DOWN(start);
+ end_pfn = PFN_UP(start + size);
+ insert_movablecore_map(start_pfn, end_pfn);
+}
+
+/*
+ * movablecore_map=nn[KMG]@ss[KMG] sets the region of memory to be used as
+ * movable memory.
+ */
+static int __init cmdline_parse_movablecore_map(char *p)
+{
+ char *oldp;
+ u64 start_at, mem_size;
+
+ if (!p)
+ goto err;
+
+ oldp = p;
+ mem_size = memparse(p, &p);
+ if (p == oldp)
+ goto err;
+
+ if (*p == '@') {
+ oldp = ++p;
+ start_at = memparse(p, &p);
+ if (p == oldp || *p != '\0')
+ goto err;
+
+ movablecore_map_add_region(start_at, mem_size);
+ return 0;
+ }
+err:
+ return -EINVAL;
+}
+early_param("movablecore_map", cmdline_parse_movablecore_map);
+
#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
/**
--
1.7.1
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
next prev parent reply other threads:[~2012-11-23 10:45 UTC|newest]
Thread overview: 86+ messages / expand[flat|nested] mbox.gz Atom feed top
2012-11-23 10:44 [PATCH v2 0/5] Add movablecore_map boot option Tang Chen
2012-11-23 10:44 ` [PATCH v2 1/5] x86: get pg_data_t's memory from other node Tang Chen
2012-11-24 1:19 ` Jiang Liu
2012-11-26 1:19 ` Tang Chen
2012-12-02 15:11 ` Jiang Liu
2012-11-23 10:44 ` Tang Chen [this message]
2012-11-23 10:44 ` [PATCH v2 3/5] page_alloc: Introduce zone_movable_limit[] to keep movable limit for nodes Tang Chen
2012-12-05 15:46 ` Jiang Liu
2012-12-06 1:20 ` Tang Chen
2012-11-23 10:44 ` [PATCH v2 4/5] page_alloc: Make movablecore_map has higher priority Tang Chen
2012-12-05 15:43 ` Jiang Liu
2012-12-06 1:26 ` Tang Chen
2012-12-06 2:26 ` Jiang Liu
2012-12-06 2:51 ` Jianguo Wu
2012-12-06 2:57 ` Tang Chen
2012-12-09 8:10 ` Tang Chen
2012-12-10 2:15 ` Jiang Liu
2012-11-23 10:44 ` [PATCH v2 5/5] page_alloc: Bootmem limit with movablecore_map Tang Chen
2012-11-26 12:22 ` wujianguo
2012-11-26 12:53 ` Tang Chen
2012-11-26 12:40 ` wujianguo
2012-11-26 13:15 ` Tang Chen
2012-11-26 15:48 ` H. Peter Anvin
2012-11-27 0:58 ` Jianguo Wu
2012-11-27 3:19 ` Wen Congyang
2012-11-27 3:22 ` Jianguo Wu
2012-11-27 3:34 ` Wen Congyang
2012-11-27 1:12 ` Jiang Liu
2012-11-27 1:20 ` H. Peter Anvin
2012-11-27 3:15 ` Wen Congyang
2012-11-27 5:31 ` H. Peter Anvin
2012-12-06 17:28 ` Jiang Liu
2012-12-06 17:41 ` H. Peter Anvin
2012-12-07 0:18 ` Jiang Liu
2012-12-19 9:17 ` Tang Chen
2012-11-27 3:10 ` [PATCH v2 0/5] Add movablecore_map boot option wujianguo
2012-11-27 5:43 ` Tang Chen
2012-11-27 6:20 ` H. Peter Anvin
2012-11-27 6:47 ` Jianguo Wu
2012-11-28 3:47 ` Tang Chen
2012-11-28 4:01 ` Jiang Liu
2012-11-28 5:21 ` Wen Congyang
2012-11-28 5:17 ` Jiang Liu
2012-11-28 4:53 ` Jianguo Wu
2012-11-27 8:00 ` Bob Liu
2012-11-27 8:29 ` Tang Chen
2012-11-27 8:49 ` H. Peter Anvin
2012-11-27 9:47 ` Wen Congyang
2012-11-27 9:53 ` H. Peter Anvin
2012-11-27 9:59 ` Yasuaki Ishimatsu
2012-11-27 12:09 ` Bob Liu
2012-11-27 12:49 ` Tang Chen
2012-11-28 3:24 ` Bob Liu
2012-11-28 4:08 ` Jiang Liu
2012-11-28 6:16 ` Tang Chen
2012-11-28 7:03 ` Jiang Liu
2012-11-28 8:29 ` Wen Congyang
2012-11-28 8:28 ` Jiang Liu
2012-11-28 8:38 ` Wen Congyang
2012-11-29 0:43 ` Jaegeuk Hanse
2012-11-29 1:24 ` Tang Chen
2012-11-30 9:20 ` Lai Jiangshan
2012-11-28 8:47 ` Jiang Liu
2012-11-28 21:34 ` Luck, Tony
2012-11-28 21:38 ` H. Peter Anvin
2012-11-29 11:00 ` Mel Gorman
2012-11-29 16:07 ` H. Peter Anvin
2012-11-29 22:41 ` Luck, Tony
2012-11-29 22:45 ` H. Peter Anvin
2012-11-30 2:56 ` Jiang Liu
2012-11-30 3:15 ` Yasuaki Ishimatsu
2012-11-30 15:36 ` Jiang Liu
2012-11-30 2:58 ` Luck, Tony
2012-11-30 3:28 ` H. Peter Anvin
2012-11-30 10:19 ` Glauber Costa
2012-11-30 10:52 ` Mel Gorman
2012-11-29 10:38 ` Yasuaki Ishimatsu
2012-11-29 11:05 ` Mel Gorman
2012-11-29 15:47 ` Jiang Liu
2012-11-29 15:53 ` Jiang Liu
2012-11-29 1:42 ` Jaegeuk Hanse
2012-11-29 2:25 ` Jiang Liu
2012-11-29 2:49 ` Wanpeng Li
2012-11-29 2:59 ` Jiang Liu
2012-11-29 2:49 ` Wanpeng Li
2012-11-30 22:27 ` Toshi Kani
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1353667445-7593-3-git-send-email-tangchen@cn.fujitsu.com \
--to=tangchen@cn.fujitsu.com \
--cc=akpm@linux-foundation.org \
--cc=hpa@zytor.com \
--cc=isimatu.yasuaki@jp.fujitsu.com \
--cc=jiang.liu@huawei.com \
--cc=kosaki.motohiro@jp.fujitsu.com \
--cc=laijs@cn.fujitsu.com \
--cc=linfeng@cn.fujitsu.com \
--cc=linux-doc@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=mgorman@suse.de \
--cc=minchan.kim@gmail.com \
--cc=rientjes@google.com \
--cc=rob@landley.net \
--cc=rusty@rustcorp.com.au \
--cc=wency@cn.fujitsu.com \
--cc=yinghai@kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox