* [RFC PATCH 1/2] bootmem: refactor free_all_bootmem_core
2009-10-23 17:10 [RFC PATCH 0/2] allow bootmem to be freed to allocator late Chris Wright
@ 2009-10-23 17:10 ` Chris Wright
2009-10-23 17:10 ` [RFC PATCH 2/2] bootmem: add free_bootmem_late Chris Wright
1 sibling, 0 replies; 3+ messages in thread
From: Chris Wright @ 2009-10-23 17:10 UTC (permalink / raw)
To: linux-mm; +Cc: David Woodhouse, FUJITA Tomonori, iommu, linux-kernel
[-- Attachment #1: bootmem-break-out-free_pages_bootmem-loop.patch --]
[-- Type: text/plain, Size: 3584 bytes --]
Move the loop that frees all bootmem pages back to page allocator into
its own function. This should have not functional effect and allows the
function to be reused later.
Cc: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>
Signed-off-by: Chris Wright <chrisw@sous-sol.org>
---
mm/bootmem.c | 61 +++++++++++++++++++++++++++++++++++++++-------------------
1 files changed, 41 insertions(+), 20 deletions(-)
diff --git a/mm/bootmem.c b/mm/bootmem.c
index 555d5d2..94ef2e7 100644
--- a/mm/bootmem.c
+++ b/mm/bootmem.c
@@ -143,17 +143,22 @@ unsigned long __init init_bootmem(unsigned long start, unsigned long pages)
return init_bootmem_core(NODE_DATA(0)->bdata, start, 0, pages);
}
-static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata)
+/**
+ * free_bootmem_pages - frees bootmem pages to page allocator
+ * @start: start pfn
+ * @end: end pfn
+ * @map: bootmem bitmap of reserved pages
+ *
+ * This will free the pages in the range @start to @end, making them
+ * available to the page allocator. The @map will be used to skip
+ * reserved pages. Returns the count of pages freed.
+ */
+static unsigned long __init free_bootmem_pages(unsigned long start,
+ unsigned long end,
+ unsigned long *map)
{
+ unsigned long cursor, count = 0;
int aligned;
- struct page *page;
- unsigned long start, end, pages, count = 0;
-
- if (!bdata->node_bootmem_map)
- return 0;
-
- start = bdata->node_min_pfn;
- end = bdata->node_low_pfn;
/*
* If the start is aligned to the machines wordsize, we might
@@ -161,27 +166,25 @@ static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata)
*/
aligned = !(start & (BITS_PER_LONG - 1));
- bdebug("nid=%td start=%lx end=%lx aligned=%d\n",
- bdata - bootmem_node_data, start, end, aligned);
+ for (cursor = start; cursor < end; cursor += BITS_PER_LONG) {
+ unsigned long idx, vec;
- while (start < end) {
- unsigned long *map, idx, vec;
-
- map = bdata->node_bootmem_map;
- idx = start - bdata->node_min_pfn;
+ idx = cursor - start;
vec = ~map[idx / BITS_PER_LONG];
- if (aligned && vec == ~0UL && start + BITS_PER_LONG < end) {
+ if (aligned && vec == ~0UL && cursor + BITS_PER_LONG < end) {
int order = ilog2(BITS_PER_LONG);
- __free_pages_bootmem(pfn_to_page(start), order);
+ __free_pages_bootmem(pfn_to_page(cursor), order);
count += BITS_PER_LONG;
} else {
unsigned long off = 0;
while (vec && off < BITS_PER_LONG) {
if (vec & 1) {
- page = pfn_to_page(start + off);
+ struct page *page;
+
+ page = pfn_to_page(cursor + off);
__free_pages_bootmem(page, 0);
count++;
}
@@ -189,8 +192,26 @@ static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata)
off++;
}
}
- start += BITS_PER_LONG;
}
+ return count;
+}
+
+static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata)
+{
+ struct page *page;
+ unsigned long start, end, *map, pages, count = 0;
+
+ if (!bdata->node_bootmem_map)
+ return 0;
+
+ start = bdata->node_min_pfn;
+ end = bdata->node_low_pfn;
+ map = bdata->node_bootmem_map;
+
+ bdebug("nid=%td start=%lx end=%lx\n", bdata - bootmem_node_data,
+ start, end);
+
+ count = free_bootmem_pages(start, end, map);
page = virt_to_page(bdata->node_bootmem_map);
pages = bdata->node_low_pfn - bdata->node_min_pfn;
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply [flat|nested] 3+ messages in thread* [RFC PATCH 2/2] bootmem: add free_bootmem_late
2009-10-23 17:10 [RFC PATCH 0/2] allow bootmem to be freed to allocator late Chris Wright
2009-10-23 17:10 ` [RFC PATCH 1/2] bootmem: refactor free_all_bootmem_core Chris Wright
@ 2009-10-23 17:10 ` Chris Wright
1 sibling, 0 replies; 3+ messages in thread
From: Chris Wright @ 2009-10-23 17:10 UTC (permalink / raw)
To: linux-mm; +Cc: David Woodhouse, FUJITA Tomonori, iommu, linux-kernel
[-- Attachment #1: bootmem-make-free_pages_bootmem-generic.patch --]
[-- Type: text/plain, Size: 3360 bytes --]
Add a new function for freeing bootmem after the bootmem allocator has
been released and the unreserved pages given to the page allocator.
This allows us to reserve bootmem and then release it if we later
discover it was not needed.
Cc: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>
Signed-off-by: Chris Wright <chrisw@sous-sol.org>
---
include/linux/bootmem.h | 1 +
mm/bootmem.c | 43 ++++++++++++++++++++++++++++++++++++++-----
2 files changed, 39 insertions(+), 5 deletions(-)
--- a/include/linux/bootmem.h
+++ b/include/linux/bootmem.h
@@ -53,6 +53,7 @@ extern void free_bootmem_node(pg_data_t
unsigned long addr,
unsigned long size);
extern void free_bootmem(unsigned long addr, unsigned long size);
+extern void free_bootmem_late(unsigned long addr, unsigned long size);
/*
* Flags for reserve_bootmem (also if CONFIG_HAVE_ARCH_BOOTMEM_NODE,
--- a/mm/bootmem.c
+++ b/mm/bootmem.c
@@ -151,7 +151,9 @@ unsigned long __init init_bootmem(unsign
*
* This will free the pages in the range @start to @end, making them
* available to the page allocator. The @map will be used to skip
- * reserved pages. Returns the count of pages freed.
+ * reserved pages. In the case that @map is NULL, the bootmem allocator
+ * is already free and the range is contiguous. Returns the count of
+ * pages freed.
*/
static unsigned long __init free_bootmem_pages(unsigned long start,
unsigned long end,
@@ -164,13 +166,23 @@ static unsigned long __init free_bootmem
* If the start is aligned to the machines wordsize, we might
* be able to free pages in bulks of that order.
*/
- aligned = !(start & (BITS_PER_LONG - 1));
+ if (map)
+ aligned = !(start & (BITS_PER_LONG - 1));
+ else
+ aligned = 1;
for (cursor = start; cursor < end; cursor += BITS_PER_LONG) {
- unsigned long idx, vec;
+ unsigned long vec;
- idx = cursor - start;
- vec = ~map[idx / BITS_PER_LONG];
+ if (map) {
+ unsigned long idx = cursor - start;
+ vec = ~map[idx / BITS_PER_LONG];
+ } else {
+ if (end - cursor >= BITS_PER_LONG)
+ vec = ~0UL;
+ else
+ vec = (1UL << (end - cursor)) - 1;
+ }
if (aligned && vec == ~0UL && cursor + BITS_PER_LONG < end) {
int order = ilog2(BITS_PER_LONG);
@@ -387,6 +399,27 @@ void __init free_bootmem(unsigned long a
}
/**
+ * free_bootmem_late - free bootmem pages directly to page allocator
+ * @addr: starting address of the range
+ * @size: size of the range in bytes
+ *
+ * This is only useful when the bootmem allocator has already been torn
+ * down, but we are still initializing the system. Pages are given directly
+ * to the page allocator, no bootmem metadata is updated because it is gone.
+ */
+void __init free_bootmem_late(unsigned long addr, unsigned long size)
+{
+ unsigned long start, end;
+
+ kmemleak_free_part(__va(addr), size);
+
+ start = PFN_UP(addr);
+ end = PFN_DOWN(addr + size);
+
+ totalram_pages += free_bootmem_pages(start, end, NULL);
+}
+
+/**
* reserve_bootmem_node - mark a page range as reserved
* @pgdat: node the range resides on
* @physaddr: starting address of the range
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply [flat|nested] 3+ messages in thread