* [PATCH -mm 00/14] bootmem rewrite v2
@ 2008-05-30 19:42 Johannes Weiner
2008-05-30 19:42 ` [PATCH -mm 01/14] bootmem: reorder code to match new bootmem structure Johannes Weiner
` (13 more replies)
0 siblings, 14 replies; 22+ messages in thread
From: Johannes Weiner @ 2008-05-30 19:42 UTC (permalink / raw)
To: Andrew Morton; +Cc: Ingo Molnar, Yinghai Lu, Andi Kleen, linux-kernel, linux-mm
Hi Andrew,
to your request I broke up this big diff into more reviewable smaller
chunks. They apply to -mmotm (modulo the conflicts I ran into, but
they seemed unrelated).
So, here is another version of my attempt to cleanly rewrite the
bootmem allocator. More details in the respective patch changelogs.
Compile- and runtime tested on x86 32bit UMA.
Hannes
arch/alpha/mm/numa.c | 2 +-
arch/arm/plat-omap/fb.c | 4 +-
arch/avr32/mm/init.c | 3 +-
arch/ia64/mm/discontig.c | 19 +-
arch/m32r/mm/discontig.c | 3 +-
arch/m32r/mm/init.c | 4 +-
arch/mn10300/mm/init.c | 6 +-
arch/sh/mm/init.c | 2 +-
include/linux/bootmem.h | 82 ++--
mm/bootmem.c | 918 +++++++++++++++++++++++++---------------------
10 files changed, 552 insertions(+), 491 deletions(-)
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply [flat|nested] 22+ messages in thread
* [PATCH -mm 01/14] bootmem: reorder code to match new bootmem structure
2008-05-30 19:42 [PATCH -mm 00/14] bootmem rewrite v2 Johannes Weiner
@ 2008-05-30 19:42 ` Johannes Weiner
2008-05-30 19:42 ` [PATCH -mm 02/14] bootmem: clean up bootmem.c file header Johannes Weiner
` (12 subsequent siblings)
13 siblings, 0 replies; 22+ messages in thread
From: Johannes Weiner @ 2008-05-30 19:42 UTC (permalink / raw)
To: Andrew Morton; +Cc: Ingo Molnar, Yinghai Lu, Andi Kleen, linux-kernel, linux-mm
[-- Attachment #1: bootmem-reorder-code.patch --]
[-- Type: text/plain, Size: 17318 bytes --]
This only reorders functions so that further patches will be easier to
read. No code changed.
Signed-off-by: Johannes Weiner <hannes@saeurebad.de>
---
include/linux/bootmem.h | 79 ++++-----
mm/bootmem.c | 413 +++++++++++++++++++++++-------------------------
2 files changed, 246 insertions(+), 246 deletions(-)
--- a/mm/bootmem.c
+++ b/mm/bootmem.c
@@ -38,6 +38,19 @@ unsigned long saved_max_pfn;
bootmem_data_t bootmem_node_data[MAX_NUMNODES] __initdata;
+/*
+ * Given an initialised bdata, it returns the size of the boot bitmap
+ */
+static unsigned long __init get_mapsize(bootmem_data_t *bdata)
+{
+ unsigned long mapsize;
+ unsigned long start = PFN_DOWN(bdata->node_boot_start);
+ unsigned long end = bdata->node_low_pfn;
+
+ mapsize = ((end - start) + 7) / 8;
+ return ALIGN(mapsize, sizeof(long));
+}
+
/* return the number of _pages_ that will be allocated for the boot bitmap */
unsigned long __init bootmem_bootmap_pages(unsigned long pages)
{
@@ -72,19 +85,6 @@ static void __init link_bootmem(bootmem_
}
/*
- * Given an initialised bdata, it returns the size of the boot bitmap
- */
-static unsigned long __init get_mapsize(bootmem_data_t *bdata)
-{
- unsigned long mapsize;
- unsigned long start = PFN_DOWN(bdata->node_boot_start);
- unsigned long end = bdata->node_low_pfn;
-
- mapsize = ((end - start) + 7) / 8;
- return ALIGN(mapsize, sizeof(long));
-}
-
-/*
* Called once to set up the allocator itself.
*/
static unsigned long __init init_bootmem_core(bootmem_data_t *bdata,
@@ -108,6 +108,146 @@ static unsigned long __init init_bootmem
return mapsize;
}
+unsigned long __init init_bootmem_node(pg_data_t *pgdat, unsigned long freepfn,
+ unsigned long startpfn, unsigned long endpfn)
+{
+ return init_bootmem_core(pgdat->bdata, freepfn, startpfn, endpfn);
+}
+
+unsigned long __init init_bootmem(unsigned long start, unsigned long pages)
+{
+ max_low_pfn = pages;
+ min_low_pfn = start;
+ return init_bootmem_core(NODE_DATA(0)->bdata, start, 0, pages);
+}
+
+static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata)
+{
+ struct page *page;
+ unsigned long pfn;
+ unsigned long i, count;
+ unsigned long idx;
+ unsigned long *map;
+ int gofast = 0;
+
+ BUG_ON(!bdata->node_bootmem_map);
+
+ count = 0;
+ /* first extant page of the node */
+ pfn = PFN_DOWN(bdata->node_boot_start);
+ idx = bdata->node_low_pfn - pfn;
+ map = bdata->node_bootmem_map;
+ /*
+ * Check if we are aligned to BITS_PER_LONG pages. If so, we might
+ * be able to free page orders of that size at once.
+ */
+ if (!(pfn & (BITS_PER_LONG-1)))
+ gofast = 1;
+
+ for (i = 0; i < idx; ) {
+ unsigned long v = ~map[i / BITS_PER_LONG];
+
+ if (gofast && v == ~0UL) {
+ int order;
+
+ page = pfn_to_page(pfn);
+ count += BITS_PER_LONG;
+ order = ffs(BITS_PER_LONG) - 1;
+ __free_pages_bootmem(page, order);
+ i += BITS_PER_LONG;
+ page += BITS_PER_LONG;
+ } else if (v) {
+ unsigned long m;
+
+ page = pfn_to_page(pfn);
+ for (m = 1; m && i < idx; m<<=1, page++, i++) {
+ if (v & m) {
+ count++;
+ __free_pages_bootmem(page, 0);
+ }
+ }
+ } else {
+ i += BITS_PER_LONG;
+ }
+ pfn += BITS_PER_LONG;
+ }
+
+ /*
+ * Now free the allocator bitmap itself, it's not
+ * needed anymore:
+ */
+ page = virt_to_page(bdata->node_bootmem_map);
+ idx = (get_mapsize(bdata) + PAGE_SIZE-1) >> PAGE_SHIFT;
+ for (i = 0; i < idx; i++, page++)
+ __free_pages_bootmem(page, 0);
+ count += i;
+ bdata->node_bootmem_map = NULL;
+
+ return count;
+}
+
+unsigned long __init free_all_bootmem_node(pg_data_t *pgdat)
+{
+ register_page_bootmem_info_node(pgdat);
+ return free_all_bootmem_core(pgdat->bdata);
+}
+
+unsigned long __init free_all_bootmem(void)
+{
+ return free_all_bootmem_core(NODE_DATA(0)->bdata);
+}
+
+static void __init free_bootmem_core(bootmem_data_t *bdata, unsigned long addr,
+ unsigned long size)
+{
+ unsigned long sidx, eidx;
+ unsigned long i;
+
+ BUG_ON(!size);
+
+ /* out range */
+ if (addr + size < bdata->node_boot_start ||
+ PFN_DOWN(addr) > bdata->node_low_pfn)
+ return;
+ /*
+ * round down end of usable mem, partially free pages are
+ * considered reserved.
+ */
+
+ if (addr >= bdata->node_boot_start && addr < bdata->last_success)
+ bdata->last_success = addr;
+
+ /*
+ * Round up to index to the range.
+ */
+ if (PFN_UP(addr) > PFN_DOWN(bdata->node_boot_start))
+ sidx = PFN_UP(addr) - PFN_DOWN(bdata->node_boot_start);
+ else
+ sidx = 0;
+
+ eidx = PFN_DOWN(addr + size - bdata->node_boot_start);
+ if (eidx > bdata->node_low_pfn - PFN_DOWN(bdata->node_boot_start))
+ eidx = bdata->node_low_pfn - PFN_DOWN(bdata->node_boot_start);
+
+ for (i = sidx; i < eidx; i++) {
+ if (unlikely(!test_and_clear_bit(i, bdata->node_bootmem_map)))
+ BUG();
+ }
+}
+
+void __init free_bootmem_node(pg_data_t *pgdat, unsigned long physaddr,
+ unsigned long size)
+{
+ free_bootmem_core(pgdat->bdata, physaddr, size);
+}
+
+void __init free_bootmem(unsigned long addr, unsigned long size)
+{
+ bootmem_data_t *bdata;
+ list_for_each_entry(bdata, &bdata_list, list)
+ free_bootmem_core(bdata, addr, size);
+}
+
/*
* Marks a particular physical memory range as unallocatable. Usable RAM
* might be used for boot-time allocations - or it might get added
@@ -183,43 +323,35 @@ static void __init reserve_bootmem_core(
}
}
-static void __init free_bootmem_core(bootmem_data_t *bdata, unsigned long addr,
- unsigned long size)
+void __init reserve_bootmem_node(pg_data_t *pgdat, unsigned long physaddr,
+ unsigned long size, int flags)
{
- unsigned long sidx, eidx;
- unsigned long i;
-
- BUG_ON(!size);
+ int ret;
- /* out range */
- if (addr + size < bdata->node_boot_start ||
- PFN_DOWN(addr) > bdata->node_low_pfn)
+ ret = can_reserve_bootmem_core(pgdat->bdata, physaddr, size, flags);
+ if (ret < 0)
return;
- /*
- * round down end of usable mem, partially free pages are
- * considered reserved.
- */
-
- if (addr >= bdata->node_boot_start && addr < bdata->last_success)
- bdata->last_success = addr;
-
- /*
- * Round up to index to the range.
- */
- if (PFN_UP(addr) > PFN_DOWN(bdata->node_boot_start))
- sidx = PFN_UP(addr) - PFN_DOWN(bdata->node_boot_start);
- else
- sidx = 0;
+ reserve_bootmem_core(pgdat->bdata, physaddr, size, flags);
+}
- eidx = PFN_DOWN(addr + size - bdata->node_boot_start);
- if (eidx > bdata->node_low_pfn - PFN_DOWN(bdata->node_boot_start))
- eidx = bdata->node_low_pfn - PFN_DOWN(bdata->node_boot_start);
+#ifndef CONFIG_HAVE_ARCH_BOOTMEM_NODE
+int __init reserve_bootmem(unsigned long addr, unsigned long size,
+ int flags)
+{
+ bootmem_data_t *bdata;
+ int ret;
- for (i = sidx; i < eidx; i++) {
- if (unlikely(!test_and_clear_bit(i, bdata->node_bootmem_map)))
- BUG();
+ list_for_each_entry(bdata, &bdata_list, list) {
+ ret = can_reserve_bootmem_core(bdata, addr, size, flags);
+ if (ret < 0)
+ return ret;
}
+ list_for_each_entry(bdata, &bdata_list, list)
+ reserve_bootmem_core(bdata, addr, size, flags);
+
+ return 0;
}
+#endif /* !CONFIG_HAVE_ARCH_BOOTMEM_NODE */
/*
* We 'merge' subsequent allocations to save space. We might 'lose'
@@ -371,138 +503,6 @@ found:
return ret;
}
-static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata)
-{
- struct page *page;
- unsigned long pfn;
- unsigned long i, count;
- unsigned long idx;
- unsigned long *map;
- int gofast = 0;
-
- BUG_ON(!bdata->node_bootmem_map);
-
- count = 0;
- /* first extant page of the node */
- pfn = PFN_DOWN(bdata->node_boot_start);
- idx = bdata->node_low_pfn - pfn;
- map = bdata->node_bootmem_map;
- /*
- * Check if we are aligned to BITS_PER_LONG pages. If so, we might
- * be able to free page orders of that size at once.
- */
- if (!(pfn & (BITS_PER_LONG-1)))
- gofast = 1;
-
- for (i = 0; i < idx; ) {
- unsigned long v = ~map[i / BITS_PER_LONG];
-
- if (gofast && v == ~0UL) {
- int order;
-
- page = pfn_to_page(pfn);
- count += BITS_PER_LONG;
- order = ffs(BITS_PER_LONG) - 1;
- __free_pages_bootmem(page, order);
- i += BITS_PER_LONG;
- page += BITS_PER_LONG;
- } else if (v) {
- unsigned long m;
-
- page = pfn_to_page(pfn);
- for (m = 1; m && i < idx; m<<=1, page++, i++) {
- if (v & m) {
- count++;
- __free_pages_bootmem(page, 0);
- }
- }
- } else {
- i += BITS_PER_LONG;
- }
- pfn += BITS_PER_LONG;
- }
-
- /*
- * Now free the allocator bitmap itself, it's not
- * needed anymore:
- */
- page = virt_to_page(bdata->node_bootmem_map);
- idx = (get_mapsize(bdata) + PAGE_SIZE-1) >> PAGE_SHIFT;
- for (i = 0; i < idx; i++, page++)
- __free_pages_bootmem(page, 0);
- count += i;
- bdata->node_bootmem_map = NULL;
-
- return count;
-}
-
-unsigned long __init init_bootmem_node(pg_data_t *pgdat, unsigned long freepfn,
- unsigned long startpfn, unsigned long endpfn)
-{
- return init_bootmem_core(pgdat->bdata, freepfn, startpfn, endpfn);
-}
-
-void __init reserve_bootmem_node(pg_data_t *pgdat, unsigned long physaddr,
- unsigned long size, int flags)
-{
- int ret;
-
- ret = can_reserve_bootmem_core(pgdat->bdata, physaddr, size, flags);
- if (ret < 0)
- return;
- reserve_bootmem_core(pgdat->bdata, physaddr, size, flags);
-}
-
-void __init free_bootmem_node(pg_data_t *pgdat, unsigned long physaddr,
- unsigned long size)
-{
- free_bootmem_core(pgdat->bdata, physaddr, size);
-}
-
-unsigned long __init free_all_bootmem_node(pg_data_t *pgdat)
-{
- register_page_bootmem_info_node(pgdat);
- return free_all_bootmem_core(pgdat->bdata);
-}
-
-unsigned long __init init_bootmem(unsigned long start, unsigned long pages)
-{
- max_low_pfn = pages;
- min_low_pfn = start;
- return init_bootmem_core(NODE_DATA(0)->bdata, start, 0, pages);
-}
-
-#ifndef CONFIG_HAVE_ARCH_BOOTMEM_NODE
-int __init reserve_bootmem(unsigned long addr, unsigned long size,
- int flags)
-{
- bootmem_data_t *bdata;
- int ret;
-
- list_for_each_entry(bdata, &bdata_list, list) {
- ret = can_reserve_bootmem_core(bdata, addr, size, flags);
- if (ret < 0)
- return ret;
- }
- list_for_each_entry(bdata, &bdata_list, list)
- reserve_bootmem_core(bdata, addr, size, flags);
-
- return 0;
-}
-#endif /* !CONFIG_HAVE_ARCH_BOOTMEM_NODE */
-
-void __init free_bootmem(unsigned long addr, unsigned long size)
-{
- bootmem_data_t *bdata;
- list_for_each_entry(bdata, &bdata_list, list)
- free_bootmem_core(bdata, addr, size);
-}
-
-unsigned long __init free_all_bootmem(void)
-{
- return free_all_bootmem_core(NODE_DATA(0)->bdata);
-}
-
void * __init __alloc_bootmem_nopanic(unsigned long size, unsigned long align,
unsigned long goal)
{
@@ -532,6 +532,30 @@ void * __init __alloc_bootmem(unsigned l
return NULL;
}
+#ifndef ARCH_LOW_ADDRESS_LIMIT
+#define ARCH_LOW_ADDRESS_LIMIT 0xffffffffUL
+#endif
+
+void * __init __alloc_bootmem_low(unsigned long size, unsigned long align,
+ unsigned long goal)
+{
+ bootmem_data_t *bdata;
+ void *ptr;
+
+ list_for_each_entry(bdata, &bdata_list, list) {
+ ptr = alloc_bootmem_core(bdata, size, align, goal,
+ ARCH_LOW_ADDRESS_LIMIT);
+ if (ptr)
+ return ptr;
+ }
+
+ /*
+ * Whoops, we cannot satisfy the allocation request.
+ */
+ printk(KERN_ALERT "low bootmem alloc of %lu bytes failed!\n", size);
+ panic("Out of low memory");
+ return NULL;
+}
void * __init __alloc_bootmem_node(pg_data_t *pgdat, unsigned long size,
unsigned long align, unsigned long goal)
@@ -545,6 +569,13 @@ void * __init __alloc_bootmem_node(pg_da
return __alloc_bootmem(size, align, goal);
}
+void * __init __alloc_bootmem_low_node(pg_data_t *pgdat, unsigned long size,
+ unsigned long align, unsigned long goal)
+{
+ return alloc_bootmem_core(pgdat->bdata, size, align, goal,
+ ARCH_LOW_ADDRESS_LIMIT);
+}
+
#ifdef CONFIG_SPARSEMEM
void * __init alloc_bootmem_section(unsigned long size,
unsigned long section_nr)
@@ -575,35 +606,3 @@ void * __init alloc_bootmem_section(unsi
return ptr;
}
#endif
-
-#ifndef ARCH_LOW_ADDRESS_LIMIT
-#define ARCH_LOW_ADDRESS_LIMIT 0xffffffffUL
-#endif
-
-void * __init __alloc_bootmem_low(unsigned long size, unsigned long align,
- unsigned long goal)
-{
- bootmem_data_t *bdata;
- void *ptr;
-
- list_for_each_entry(bdata, &bdata_list, list) {
- ptr = alloc_bootmem_core(bdata, size, align, goal,
- ARCH_LOW_ADDRESS_LIMIT);
- if (ptr)
- return ptr;
- }
-
- /*
- * Whoops, we cannot satisfy the allocation request.
- */
- printk(KERN_ALERT "low bootmem alloc of %lu bytes failed!\n", size);
- panic("Out of low memory");
- return NULL;
-}
-
-void * __init __alloc_bootmem_low_node(pg_data_t *pgdat, unsigned long size,
- unsigned long align, unsigned long goal)
-{
- return alloc_bootmem_core(pgdat->bdata, size, align, goal,
- ARCH_LOW_ADDRESS_LIMIT);
-}
--- a/include/linux/bootmem.h
+++ b/include/linux/bootmem.h
@@ -41,36 +41,58 @@ typedef struct bootmem_data {
extern bootmem_data_t bootmem_node_data[];
extern unsigned long bootmem_bootmap_pages(unsigned long);
+
+extern unsigned long init_bootmem_node(pg_data_t *pgdat,
+ unsigned long freepfn,
+ unsigned long startpfn,
+ unsigned long endpfn);
extern unsigned long init_bootmem(unsigned long addr, unsigned long memend);
+
+extern unsigned long free_all_bootmem_node(pg_data_t *pgdat);
+extern unsigned long free_all_bootmem(void);
+
+extern void free_bootmem_node(pg_data_t *pgdat,
+ unsigned long addr,
+ unsigned long size);
extern void free_bootmem(unsigned long addr, unsigned long size);
-extern void *__alloc_bootmem(unsigned long size,
+
+/*
+ * Flags for reserve_bootmem (also if CONFIG_HAVE_ARCH_BOOTMEM_NODE,
+ * the architecture-specific code should honor this).
+ *
+ * If flags is 0, then the return value is always 0 (success). If
+ * flags contains BOOTMEM_EXCLUSIVE, then -EBUSY is returned if the
+ * memory already was reserved.
+ */
+#define BOOTMEM_DEFAULT 0
+#define BOOTMEM_EXCLUSIVE (1<<0)
+
+extern void reserve_bootmem_node(pg_data_t *pgdat,
+ unsigned long physaddr,
+ unsigned long size,
+ int flags);
+#ifndef CONFIG_HAVE_ARCH_BOOTMEM_NODE
+extern int reserve_bootmem(unsigned long addr, unsigned long size, int flags);
+#endif
+
+extern void *__alloc_bootmem_nopanic(unsigned long size,
unsigned long align,
unsigned long goal);
-extern void *__alloc_bootmem_nopanic(unsigned long size,
+extern void *__alloc_bootmem(unsigned long size,
unsigned long align,
unsigned long goal);
extern void *__alloc_bootmem_low(unsigned long size,
unsigned long align,
unsigned long goal);
+extern void *__alloc_bootmem_node(pg_data_t *pgdat,
+ unsigned long size,
+ unsigned long align,
+ unsigned long goal);
extern void *__alloc_bootmem_low_node(pg_data_t *pgdat,
unsigned long size,
unsigned long align,
unsigned long goal);
-
-/*
- * flags for reserve_bootmem (also if CONFIG_HAVE_ARCH_BOOTMEM_NODE,
- * the architecture-specific code should honor this)
- */
-#define BOOTMEM_DEFAULT 0
-#define BOOTMEM_EXCLUSIVE (1<<0)
-
#ifndef CONFIG_HAVE_ARCH_BOOTMEM_NODE
-/*
- * If flags is 0, then the return value is always 0 (success). If
- * flags contains BOOTMEM_EXCLUSIVE, then -EBUSY is returned if the
- * memory already was reserved.
- */
-extern int reserve_bootmem(unsigned long addr, unsigned long size, int flags);
#define alloc_bootmem(x) \
__alloc_bootmem(x, SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS))
#define alloc_bootmem_low(x) \
@@ -79,29 +101,6 @@ extern int reserve_bootmem(unsigned long
__alloc_bootmem(x, PAGE_SIZE, __pa(MAX_DMA_ADDRESS))
#define alloc_bootmem_low_pages(x) \
__alloc_bootmem_low(x, PAGE_SIZE, 0)
-#endif /* !CONFIG_HAVE_ARCH_BOOTMEM_NODE */
-
-extern unsigned long free_all_bootmem(void);
-extern unsigned long free_all_bootmem_node(pg_data_t *pgdat);
-extern void *__alloc_bootmem_node(pg_data_t *pgdat,
- unsigned long size,
- unsigned long align,
- unsigned long goal);
-extern unsigned long init_bootmem_node(pg_data_t *pgdat,
- unsigned long freepfn,
- unsigned long startpfn,
- unsigned long endpfn);
-extern void reserve_bootmem_node(pg_data_t *pgdat,
- unsigned long physaddr,
- unsigned long size,
- int flags);
-extern void free_bootmem_node(pg_data_t *pgdat,
- unsigned long addr,
- unsigned long size);
-extern void *alloc_bootmem_section(unsigned long size,
- unsigned long section_nr);
-
-#ifndef CONFIG_HAVE_ARCH_BOOTMEM_NODE
#define alloc_bootmem_node(pgdat, x) \
__alloc_bootmem_node(pgdat, x, SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS))
#define alloc_bootmem_pages_node(pgdat, x) \
@@ -109,6 +108,8 @@ extern void *alloc_bootmem_section(unsig
#define alloc_bootmem_low_pages_node(pgdat, x) \
__alloc_bootmem_low_node(pgdat, x, PAGE_SIZE, 0)
#endif /* !CONFIG_HAVE_ARCH_BOOTMEM_NODE */
+extern void *alloc_bootmem_section(unsigned long size,
+ unsigned long section_nr);
#ifdef CONFIG_HAVE_ARCH_ALLOC_REMAP
extern void *alloc_remap(int nid, unsigned long size);
--
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply [flat|nested] 22+ messages in thread
* [PATCH -mm 02/14] bootmem: clean up bootmem.c file header
2008-05-30 19:42 [PATCH -mm 00/14] bootmem rewrite v2 Johannes Weiner
2008-05-30 19:42 ` [PATCH -mm 01/14] bootmem: reorder code to match new bootmem structure Johannes Weiner
@ 2008-05-30 19:42 ` Johannes Weiner
2008-05-30 19:42 ` [PATCH -mm 03/14] bootmem: add documentation to API functions Johannes Weiner
` (11 subsequent siblings)
13 siblings, 0 replies; 22+ messages in thread
From: Johannes Weiner @ 2008-05-30 19:42 UTC (permalink / raw)
To: Andrew Morton; +Cc: Ingo Molnar, Yinghai Lu, Andi Kleen, linux-kernel, linux-mm
[-- Attachment #1: bootmem-adjust-file-header.patch --]
[-- Type: text/plain, Size: 1216 bytes --]
Change the description, move a misplaced comment about the allocator
itself and add me to the list of copyright holders.
Signed-off-by: Johannes Weiner <hannes@saeurebad.de>
CC: Ingo Molnar <mingo@elte.hu>
---
mm/bootmem.c | 14 +++++---------
1 file changed, 5 insertions(+), 9 deletions(-)
--- a/mm/bootmem.c
+++ b/mm/bootmem.c
@@ -1,12 +1,12 @@
/*
- * linux/mm/bootmem.c
+ * bootmem - A boot-time physical memory allocator and configurator
*
* Copyright (C) 1999 Ingo Molnar
- * Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999
+ * 1999 Kanoj Sarcar, SGI
+ * 2008 Johannes Weiner
*
- * simple boot-time physical memory area allocator and
- * free memory collector. It's used to deal with reserved
- * system memory and memory holes as well.
+ * Access to this subsystem has to be serialized externally (which is true
+ * for the boot process anyway).
*/
#include <linux/init.h>
#include <linux/pfn.h>
@@ -19,10 +19,6 @@
#include "internal.h"
-/*
- * Access to this subsystem has to be serialized externally. (this is
- * true for the boot process anyway)
- */
unsigned long max_low_pfn;
unsigned long min_low_pfn;
unsigned long max_pfn;
--
^ permalink raw reply [flat|nested] 22+ messages in thread
* [PATCH -mm 03/14] bootmem: add documentation to API functions
2008-05-30 19:42 [PATCH -mm 00/14] bootmem rewrite v2 Johannes Weiner
2008-05-30 19:42 ` [PATCH -mm 01/14] bootmem: reorder code to match new bootmem structure Johannes Weiner
2008-05-30 19:42 ` [PATCH -mm 02/14] bootmem: clean up bootmem.c file header Johannes Weiner
@ 2008-05-30 19:42 ` Johannes Weiner
2008-06-02 12:18 ` Chris Malley
2008-05-30 19:42 ` [PATCH -mm 04/14] bootmem: add debugging framework Johannes Weiner
` (10 subsequent siblings)
13 siblings, 1 reply; 22+ messages in thread
From: Johannes Weiner @ 2008-05-30 19:42 UTC (permalink / raw)
To: Andrew Morton; +Cc: Ingo Molnar, Yinghai Lu, Andi Kleen, linux-kernel, linux-mm
[-- Attachment #1: bootmem-document-api.patch --]
[-- Type: text/plain, Size: 8018 bytes --]
---
mm/bootmem.c | 147 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++-
1 file changed, 146 insertions(+), 1 deletion(-)
--- a/mm/bootmem.c
+++ b/mm/bootmem.c
@@ -47,7 +47,10 @@ static unsigned long __init get_mapsize(
return ALIGN(mapsize, sizeof(long));
}
-/* return the number of _pages_ that will be allocated for the boot bitmap */
+/**
+ * bootmem_bootmap_pages - calculate bitmap size in pages
+ * @pages: number of pages the bitmap has to represent
+ */
unsigned long __init bootmem_bootmap_pages(unsigned long pages)
{
unsigned long mapsize;
@@ -104,12 +107,28 @@ static unsigned long __init init_bootmem
return mapsize;
}
+/**
+ * init_bootmem_node - register a node as boot memory
+ * @pgdat: node to register
+ * @freepfn: pfn where the bitmap for this node is to be placed
+ * @startpfn: first pfn on the node
+ * @endpfn: first pfn after the node
+ *
+ * Returns the number of bytes needed to hold the bitmap for this node.
+ */
unsigned long __init init_bootmem_node(pg_data_t *pgdat, unsigned long freepfn,
unsigned long startpfn, unsigned long endpfn)
{
return init_bootmem_core(pgdat->bdata, freepfn, startpfn, endpfn);
}
+/**
+ * init_bootmem - register boot memory
+ * @start: pfn where the bitmap is to be placed
+ * @pages: number of available physical pages
+ *
+ * Returns the number of bytes needed to hold the bitmap.
+ */
unsigned long __init init_bootmem(unsigned long start, unsigned long pages)
{
max_low_pfn = pages;
@@ -182,12 +201,23 @@ static unsigned long __init free_all_boo
return count;
}
+/**
+ * free_all_bootmem_node - release a node's free pages to the buddy allocator
+ * @pgdat: node to be released
+ *
+ * Returns the number of pages actually released.
+ */
unsigned long __init free_all_bootmem_node(pg_data_t *pgdat)
{
register_page_bootmem_info_node(pgdat);
return free_all_bootmem_core(pgdat->bdata);
}
+/**
+ * free_all_bootmem - release free pages to the buddy allocator
+ *
+ * Returns the number of pages actually released.
+ */
unsigned long __init free_all_bootmem(void)
{
return free_all_bootmem_core(NODE_DATA(0)->bdata);
@@ -231,12 +261,32 @@ static void __init free_bootmem_core(boo
}
}
+/**
+ * free_bootmem_node - mark a page range as usable
+ * @pgdat: node the range resides on
+ * @physaddr: starting address of the range
+ * @size: size of the range in bytes
+ *
+ * Partial pages will be considered reserved and left as they are.
+ *
+ * Only physical pages that actually reside on @pgdat are marked.
+ */
void __init free_bootmem_node(pg_data_t *pgdat, unsigned long physaddr,
unsigned long size)
{
free_bootmem_core(pgdat->bdata, physaddr, size);
}
+/**
+ * free_bootmem - mark a page range as usable
+ * @addr: starting address of the range
+ * @size: size of the range in bytes
+ *
+ * Partial pages will be considered reserved and left as they are.
+ *
+ * All physical pages within the range are marked, no matter what
+ * node they reside on.
+ */
void __init free_bootmem(unsigned long addr, unsigned long size)
{
bootmem_data_t *bdata;
@@ -319,6 +369,15 @@ static void __init reserve_bootmem_core(
}
}
+/**
+ * reserve_bootmem_node - mark a page range as reserved
+ * @addr: starting address of the range
+ * @size: size of the range in bytes
+ *
+ * Partial pages will be reserved.
+ *
+ * Only physical pages that actually reside on @pgdat are marked.
+ */
void __init reserve_bootmem_node(pg_data_t *pgdat, unsigned long physaddr,
unsigned long size, int flags)
{
@@ -331,6 +390,16 @@ void __init reserve_bootmem_node(pg_data
}
#ifndef CONFIG_HAVE_ARCH_BOOTMEM_NODE
+/**
+ * reserve_bootmem - mark a page range as usable
+ * @addr: starting address of the range
+ * @size: size of the range in bytes
+ *
+ * Partial pages will be reserved.
+ *
+ * All physical pages within the range are marked, no matter what
+ * node they reside on.
+ */
int __init reserve_bootmem(unsigned long addr, unsigned long size,
int flags)
{
@@ -499,6 +568,19 @@ found:
return ret;
}
+/**
+ * __alloc_bootmem_nopanic - allocate boot memory without panicking
+ * @size: size of the request in bytes
+ * @align: alignment of the region
+ * @goal: preferred starting address of the region
+ *
+ * The goal is dropped if it can not be satisfied and the allocation will
+ * fall back to memory below @goal.
+ *
+ * Allocation may happen on any node in the system.
+ *
+ * Returns NULL on failure.
+ */
void * __init __alloc_bootmem_nopanic(unsigned long size, unsigned long align,
unsigned long goal)
{
@@ -513,6 +595,19 @@ void * __init __alloc_bootmem_nopanic(un
return NULL;
}
+/**
+ * __alloc_bootmem - allocate boot memory
+ * @size: size of the request in bytes
+ * @align: alignment of the region
+ * @goal: preferred starting address of the region
+ *
+ * The goal is dropped if it can not be satisfied and the allocation will
+ * fall back to memory below @goal.
+ *
+ * Allocation may happen on any node in the system.
+ *
+ * The function panics if the request can not be satisfied.
+ */
void * __init __alloc_bootmem(unsigned long size, unsigned long align,
unsigned long goal)
{
@@ -532,6 +627,19 @@ void * __init __alloc_bootmem(unsigned l
#define ARCH_LOW_ADDRESS_LIMIT 0xffffffffUL
#endif
+/**
+ * __alloc_bootmem_low - allocate low boot memory
+ * @size: size of the request in bytes
+ * @align: alignment of the region
+ * @goal: preferred starting address of the region
+ *
+ * The goal is dropped if it can not be satisfied and the allocation will
+ * fall back to memory below @goal.
+ *
+ * Allocation may happen on any node in the system.
+ *
+ * The function panics if the request can not be satisfied.
+ */
void * __init __alloc_bootmem_low(unsigned long size, unsigned long align,
unsigned long goal)
{
@@ -553,6 +661,21 @@ void * __init __alloc_bootmem_low(unsign
return NULL;
}
+/**
+ * __alloc_bootmem_node - allocate boot memory from a specific node
+ * @pgdat: node to allocate from
+ * @size: size of the request in bytes
+ * @align: alignment of the region
+ * @goal: preferred starting address of the region
+ *
+ * The goal is dropped if it can not be satisfied and the allocation will
+ * fall back to memory below @goal.
+ *
+ * Allocation may fall back to any node in the system if the specified node
+ * can not hold the requested memory.
+ *
+ * The function panics if the request can not be satisfied.
+ */
void * __init __alloc_bootmem_node(pg_data_t *pgdat, unsigned long size,
unsigned long align, unsigned long goal)
{
@@ -565,6 +688,21 @@ void * __init __alloc_bootmem_node(pg_da
return __alloc_bootmem(size, align, goal);
}
+/**
+ * __alloc_bootmem_low_node - allocate low boot memory from a specific node
+ * @pgdat: node to allocate from
+ * @size: size of the request in bytes
+ * @align: alignment of the region
+ * @goal: preferred starting address of the region
+ *
+ * The goal is dropped if it can not be satisfied and the allocation will
+ * fall back to memory below @goal.
+ *
+ * Allocation may fall back to any node in the system if the specified node
+ * can not hold the requested memory.
+ *
+ * The function panics if the request can not be satisfied.
+ */
void * __init __alloc_bootmem_low_node(pg_data_t *pgdat, unsigned long size,
unsigned long align, unsigned long goal)
{
@@ -573,6 +711,13 @@ void * __init __alloc_bootmem_low_node(p
}
#ifdef CONFIG_SPARSEMEM
+/**
+ * alloc_bootmem_section - allocate boot memory from a specific section
+ * @size: size of the request in bytes
+ * @section_nr: sparse map section to allocate from
+ *
+ * Return NULL on failure.
+ */
void * __init alloc_bootmem_section(unsigned long size,
unsigned long section_nr)
{
--
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply [flat|nested] 22+ messages in thread
* [PATCH -mm 04/14] bootmem: add debugging framework
2008-05-30 19:42 [PATCH -mm 00/14] bootmem rewrite v2 Johannes Weiner
` (2 preceding siblings ...)
2008-05-30 19:42 ` [PATCH -mm 03/14] bootmem: add documentation to API functions Johannes Weiner
@ 2008-05-30 19:42 ` Johannes Weiner
2008-05-30 19:42 ` [PATCH -mm 05/14] bootmem: revisit bitmap size calculations Johannes Weiner
` (9 subsequent siblings)
13 siblings, 0 replies; 22+ messages in thread
From: Johannes Weiner @ 2008-05-30 19:42 UTC (permalink / raw)
To: Andrew Morton; +Cc: Ingo Molnar, Yinghai Lu, Andi Kleen, linux-kernel, linux-mm
[-- Attachment #1: bootmem-debugging.patch --]
[-- Type: text/plain, Size: 3628 bytes --]
Introduce the bootmem_debug kernel parameter that enables very verbose
diagnostics regarding all range operations of bootmem as well as the
initialization and release of nodes.
Signed-off-by: Johannes Weiner <hannes@saeurebad.de>
---
mm/bootmem.c | 51 ++++++++++++++++++++++++++++++++++++++++++++-------
1 file changed, 44 insertions(+), 7 deletions(-)
--- a/mm/bootmem.c
+++ b/mm/bootmem.c
@@ -34,6 +34,22 @@ unsigned long saved_max_pfn;
bootmem_data_t bootmem_node_data[MAX_NUMNODES] __initdata;
+static int bootmem_debug;
+
+static int __init bootmem_debug_setup(char *buf)
+{
+ bootmem_debug = 1;
+ return 0;
+}
+early_param("bootmem_debug", bootmem_debug_setup);
+
+#define bdebug(fmt, args...) ({ \
+ if (unlikely(bootmem_debug)) \
+ printk(KERN_INFO \
+ "bootmem::%s " fmt, \
+ __FUNCTION__, ## args); \
+})
+
/*
* Given an initialised bdata, it returns the size of the boot bitmap
*/
@@ -104,6 +120,9 @@ static unsigned long __init init_bootmem
mapsize = get_mapsize(bdata);
memset(bdata->node_bootmem_map, 0xff, mapsize);
+ bdebug("nid=%d start=%lx map=%lx end=%lx mapsize=%ld\n",
+ bdata - bootmem_node_data, start, mapstart, end, mapsize);
+
return mapsize;
}
@@ -198,6 +217,8 @@ static unsigned long __init free_all_boo
count += i;
bdata->node_bootmem_map = NULL;
+ bdebug("nid=%d released=%ld\n", bdata - bootmem_node_data, count);
+
return count;
}
@@ -255,6 +276,10 @@ static void __init free_bootmem_core(boo
if (eidx > bdata->node_low_pfn - PFN_DOWN(bdata->node_boot_start))
eidx = bdata->node_low_pfn - PFN_DOWN(bdata->node_boot_start);
+ bdebug("nid=%d start=%lx end=%lx\n", bdata - bootmem_node_data,
+ sidx + PFN_DOWN(bdata->node_boot_start),
+ eidx + PFN_DOWN(bdata->node_boot_start));
+
for (i = sidx; i < eidx; i++) {
if (unlikely(!test_and_clear_bit(i, bdata->node_bootmem_map)))
BUG();
@@ -360,13 +385,16 @@ static void __init reserve_bootmem_core(
if (eidx > bdata->node_low_pfn - PFN_DOWN(bdata->node_boot_start))
eidx = bdata->node_low_pfn - PFN_DOWN(bdata->node_boot_start);
- for (i = sidx; i < eidx; i++) {
- if (test_and_set_bit(i, bdata->node_bootmem_map)) {
-#ifdef CONFIG_DEBUG_BOOTMEM
- printk("hm, page %08lx reserved twice.\n", i*PAGE_SIZE);
-#endif
- }
- }
+ bdebug("nid=%d start=%lx end=%lx flags=%x\n",
+ bdata - bootmem_node_data,
+ sidx + PFN_DOWN(bdata->node_boot_start),
+ eidx + PFN_DOWN(bdata->node_boot_start),
+ flags);
+
+ for (i = sidx; i < eidx; i++)
+ if (test_and_set_bit(i, bdata->node_bootmem_map))
+ bdebug("hm, page %lx reserved twice.\n",
+ PFN_DOWN(bdata->node_boot_start) + i);
}
/**
@@ -451,6 +479,10 @@ alloc_bootmem_core(struct bootmem_data *
if (!bdata->node_bootmem_map)
return NULL;
+ bdebug("nid=%d size=%lx [%lu pages] align=%lx goal=%lx limit=%lx\n",
+ bdata - bootmem_node_data, size, PAGE_ALIGN(size) >> PAGE_SHIFT,
+ align, goal, limit);
+
/* bdata->node_boot_start is supposed to be (12+6)bits alignment on x86_64 ? */
node_boot_start = bdata->node_boot_start;
node_bootmem_map = bdata->node_bootmem_map;
@@ -558,6 +590,11 @@ found:
ret = phys_to_virt(start * PAGE_SIZE + node_boot_start);
}
+ bdebug("nid=%d start=%lx end=%lx\n",
+ bdata - bootmem_node_data,
+ start + PFN_DOWN(bdata->node_boot_start),
+ start + areasize + PFN_DOWN(bdata->node_boot_start));
+
/*
* Reserve the area now:
*/
--
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply [flat|nested] 22+ messages in thread
* [PATCH -mm 05/14] bootmem: revisit bitmap size calculations
2008-05-30 19:42 [PATCH -mm 00/14] bootmem rewrite v2 Johannes Weiner
` (3 preceding siblings ...)
2008-05-30 19:42 ` [PATCH -mm 04/14] bootmem: add debugging framework Johannes Weiner
@ 2008-05-30 19:42 ` Johannes Weiner
2008-05-30 19:42 ` [PATCH -mm 06/14] bootmem: revisit bootmem descriptor list handling Johannes Weiner
` (8 subsequent siblings)
13 siblings, 0 replies; 22+ messages in thread
From: Johannes Weiner @ 2008-05-30 19:42 UTC (permalink / raw)
To: Andrew Morton; +Cc: Ingo Molnar, Yinghai Lu, Andi Kleen, linux-kernel, linux-mm
[-- Attachment #1: bootmem-revisit-bitmap-size-calculation.patch --]
[-- Type: text/plain, Size: 2564 bytes --]
Reincarnate get_mapsize as bootmap_bytes and implement
bootmem_bootmap_pages on top of it.
Adjust users of these helpers and make free_all_bootmem_core use
bootmem_bootmap_pages instead of open-coding it.
Signed-off-by: Johannes Weiner <hannes@saeurebad.de>
---
mm/bootmem.c | 27 +++++++++------------------
1 file changed, 9 insertions(+), 18 deletions(-)
--- a/mm/bootmem.c
+++ b/mm/bootmem.c
@@ -50,17 +50,11 @@ early_param("bootmem_debug", bootmem_deb
__FUNCTION__, ## args); \
})
-/*
- * Given an initialised bdata, it returns the size of the boot bitmap
- */
-static unsigned long __init get_mapsize(bootmem_data_t *bdata)
+static unsigned long __init bootmap_bytes(unsigned long pages)
{
- unsigned long mapsize;
- unsigned long start = PFN_DOWN(bdata->node_boot_start);
- unsigned long end = bdata->node_low_pfn;
+ unsigned long bytes = (pages + 7) / 8;
- mapsize = ((end - start) + 7) / 8;
- return ALIGN(mapsize, sizeof(long));
+ return ALIGN(bytes, sizeof(long));
}
/**
@@ -69,13 +63,9 @@ static unsigned long __init get_mapsize(
*/
unsigned long __init bootmem_bootmap_pages(unsigned long pages)
{
- unsigned long mapsize;
-
- mapsize = (pages+7)/8;
- mapsize = (mapsize + ~PAGE_MASK) & PAGE_MASK;
- mapsize >>= PAGE_SHIFT;
+ unsigned long bytes = bootmap_bytes(pages);
- return mapsize;
+ return PAGE_ALIGN(bytes) >> PAGE_SHIFT;
}
/*
@@ -117,7 +107,7 @@ static unsigned long __init init_bootmem
* Initially all pages are reserved - setup_arch() has to
* register free RAM areas explicitly.
*/
- mapsize = get_mapsize(bdata);
+ mapsize = bootmap_bytes(end - start);
memset(bdata->node_bootmem_map, 0xff, mapsize);
bdebug("nid=%d start=%lx map=%lx end=%lx mapsize=%ld\n",
@@ -160,7 +150,7 @@ static unsigned long __init free_all_boo
struct page *page;
unsigned long pfn;
unsigned long i, count;
- unsigned long idx;
+ unsigned long idx, pages;
unsigned long *map;
int gofast = 0;
@@ -211,7 +201,8 @@ static unsigned long __init free_all_boo
* needed anymore:
*/
page = virt_to_page(bdata->node_bootmem_map);
- idx = (get_mapsize(bdata) + PAGE_SIZE-1) >> PAGE_SHIFT;
+ pages = bdata->node_low_pfn - PFN_DOWN(bdata->node_boot_start);
+ idx = bootmem_bootmap_pages(pages);
for (i = 0; i < idx; i++, page++)
__free_pages_bootmem(page, 0);
count += i;
--
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply [flat|nested] 22+ messages in thread
* [PATCH -mm 06/14] bootmem: revisit bootmem descriptor list handling
2008-05-30 19:42 [PATCH -mm 00/14] bootmem rewrite v2 Johannes Weiner
` (4 preceding siblings ...)
2008-05-30 19:42 ` [PATCH -mm 05/14] bootmem: revisit bitmap size calculations Johannes Weiner
@ 2008-05-30 19:42 ` Johannes Weiner
2008-05-30 19:42 ` [PATCH -mm 07/14] bootmem: clean up free_all_bootmem_core Johannes Weiner
` (7 subsequent siblings)
13 siblings, 0 replies; 22+ messages in thread
From: Johannes Weiner @ 2008-05-30 19:42 UTC (permalink / raw)
To: Andrew Morton; +Cc: Ingo Molnar, Yinghai Lu, Andi Kleen, linux-kernel, linux-mm
[-- Attachment #1: bootmem-revisit-bootmem-descriptor-list.patch --]
[-- Type: text/plain, Size: 1942 bytes --]
link_bootmem handles an insertion of a new descriptor into the sorted
list in more or less three explicit branches; empty list, insert in
between and append. These cases can be expressed implicite.
Also mark the sorted list as initdata as it can be thrown away after
boot as well.
Signed-off-by: Johannes Weiner <hannes@saeurebad.de>
---
mm/bootmem.c | 23 ++++++++++-------------
1 file changed, 10 insertions(+), 13 deletions(-)
--- a/mm/bootmem.c
+++ b/mm/bootmem.c
@@ -23,7 +23,6 @@ unsigned long max_low_pfn;
unsigned long min_low_pfn;
unsigned long max_pfn;
-static LIST_HEAD(bdata_list);
#ifdef CONFIG_CRASH_DUMP
/*
* If we have booted due to a crash, max_pfn will be a very low value. We need
@@ -34,6 +33,8 @@ unsigned long saved_max_pfn;
bootmem_data_t bootmem_node_data[MAX_NUMNODES] __initdata;
+static struct list_head bdata_list __initdata = LIST_HEAD_INIT(bdata_list);
+
static int bootmem_debug;
static int __init bootmem_debug_setup(char *buf)
@@ -73,20 +74,16 @@ unsigned long __init bootmem_bootmap_pag
*/
static void __init link_bootmem(bootmem_data_t *bdata)
{
- bootmem_data_t *ent;
+ struct list_head *iter;
- if (list_empty(&bdata_list)) {
- list_add(&bdata->list, &bdata_list);
- return;
- }
- /* insert in order */
- list_for_each_entry(ent, &bdata_list, list) {
- if (bdata->node_boot_start < ent->node_boot_start) {
- list_add_tail(&bdata->list, &ent->list);
- return;
- }
+ list_for_each(iter, &bdata_list) {
+ bootmem_data_t *ent;
+
+ ent = list_entry(iter, bootmem_data_t, list);
+ if (bdata->node_boot_start < ent->node_boot_start)
+ break;
}
- list_add_tail(&bdata->list, &bdata_list);
+ list_add_tail(&bdata->list, iter);
}
/*
--
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply [flat|nested] 22+ messages in thread
* [PATCH -mm 07/14] bootmem: clean up free_all_bootmem_core
2008-05-30 19:42 [PATCH -mm 00/14] bootmem rewrite v2 Johannes Weiner
` (5 preceding siblings ...)
2008-05-30 19:42 ` [PATCH -mm 06/14] bootmem: revisit bootmem descriptor list handling Johannes Weiner
@ 2008-05-30 19:42 ` Johannes Weiner
2008-05-30 19:42 ` [PATCH -mm 08/14] bootmem: clean up alloc_bootmem_core Johannes Weiner
` (6 subsequent siblings)
13 siblings, 0 replies; 22+ messages in thread
From: Johannes Weiner @ 2008-05-30 19:42 UTC (permalink / raw)
To: Andrew Morton; +Cc: Ingo Molnar, Yinghai Lu, Andi Kleen, linux-kernel, linux-mm
[-- Attachment #1: bootmem-cleanup-free_all_bootmem.patch --]
[-- Type: text/plain, Size: 3412 bytes --]
Rewrite the code in a more concise way using less variables.
Signed-off-by: Johannes Weiner <hannes@saeurebad.de>
CC: Ingo Molnar <mingo@elte.hu>
CC: Yinghai Lu <yhlu.kernel@gmail.com>
CC: Andi Kleen <andi@firstfloor.org>
---
mm/bootmem.c | 83 +++++++++++++++++++++++++++--------------------------------
1 file changed, 38 insertions(+), 45 deletions(-)
--- a/mm/bootmem.c
+++ b/mm/bootmem.c
@@ -144,66 +144,59 @@ unsigned long __init init_bootmem(unsign
static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata)
{
+ int aligned;
struct page *page;
- unsigned long pfn;
- unsigned long i, count;
- unsigned long idx, pages;
- unsigned long *map;
- int gofast = 0;
-
- BUG_ON(!bdata->node_bootmem_map);
-
- count = 0;
- /* first extant page of the node */
- pfn = PFN_DOWN(bdata->node_boot_start);
- idx = bdata->node_low_pfn - pfn;
- map = bdata->node_bootmem_map;
+ unsigned long start, end, pages, count = 0;
+
+ if (!bdata->node_bootmem_map)
+ return 0;
+
+ start = PFN_DOWN(bdata->node_boot_start);
+ end = bdata->node_low_pfn;
+
/*
- * Check if we are aligned to BITS_PER_LONG pages. If so, we might
- * be able to free page orders of that size at once.
+ * If the start is aligned to the machines wordsize, we might
+ * be able to free pages in bulks of that order.
*/
- if (!(pfn & (BITS_PER_LONG-1)))
- gofast = 1;
+ aligned = !(start & (BITS_PER_LONG - 1));
+
+ bdebug("nid=%d start=%lx end=%lx aligned=%d\n",
+ bdata - bootmem_node_data, start, end, aligned);
+
+ while (start < end) {
+ unsigned long *map, idx, vec;
- for (i = 0; i < idx; ) {
- unsigned long v = ~map[i / BITS_PER_LONG];
+ map = bdata->node_bootmem_map;
+ idx = start - PFN_DOWN(bdata->node_boot_start);
+ vec = ~map[idx / BITS_PER_LONG];
- if (gofast && v == ~0UL) {
- int order;
+ if (aligned && vec == ~0UL && start + BITS_PER_LONG < end) {
+ int order = ilog2(BITS_PER_LONG);
- page = pfn_to_page(pfn);
+ __free_pages_bootmem(pfn_to_page(start), order);
count += BITS_PER_LONG;
- order = ffs(BITS_PER_LONG) - 1;
- __free_pages_bootmem(page, order);
- i += BITS_PER_LONG;
- page += BITS_PER_LONG;
- } else if (v) {
- unsigned long m;
-
- page = pfn_to_page(pfn);
- for (m = 1; m && i < idx; m<<=1, page++, i++) {
- if (v & m) {
- count++;
+ } else {
+ unsigned long off = 0;
+
+ while (vec && off < BITS_PER_LONG) {
+ if (vec & 1) {
+ page = pfn_to_page(start + off);
__free_pages_bootmem(page, 0);
+ count++;
}
+ vec >>= 1;
+ off++;
}
- } else {
- i += BITS_PER_LONG;
}
- pfn += BITS_PER_LONG;
+ start += BITS_PER_LONG;
}
- /*
- * Now free the allocator bitmap itself, it's not
- * needed anymore:
- */
page = virt_to_page(bdata->node_bootmem_map);
pages = bdata->node_low_pfn - PFN_DOWN(bdata->node_boot_start);
- idx = bootmem_bootmap_pages(pages);
- for (i = 0; i < idx; i++, page++)
- __free_pages_bootmem(page, 0);
- count += i;
- bdata->node_bootmem_map = NULL;
+ pages = bootmem_bootmap_pages(pages);
+ count += pages;
+ while (pages--)
+ __free_pages_bootmem(page++, 0);
bdebug("nid=%d released=%ld\n", bdata - bootmem_node_data, count);
--
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply [flat|nested] 22+ messages in thread
* [PATCH -mm 08/14] bootmem: clean up alloc_bootmem_core
2008-05-30 19:42 [PATCH -mm 00/14] bootmem rewrite v2 Johannes Weiner
` (6 preceding siblings ...)
2008-05-30 19:42 ` [PATCH -mm 07/14] bootmem: clean up free_all_bootmem_core Johannes Weiner
@ 2008-05-30 19:42 ` Johannes Weiner
2008-05-30 22:11 ` Johannes Weiner
2008-06-02 12:34 ` Yasunori Goto
2008-05-30 19:42 ` [PATCH -mm 09/14] bootmem: free/reserve helpers Johannes Weiner
` (5 subsequent siblings)
13 siblings, 2 replies; 22+ messages in thread
From: Johannes Weiner @ 2008-05-30 19:42 UTC (permalink / raw)
To: Andrew Morton; +Cc: Ingo Molnar, Yinghai Lu, Andi Kleen, linux-kernel, linux-mm
[-- Attachment #1: bootmem-cleanup-alloc_bootmem_core.patch --]
[-- Type: text/plain, Size: 7626 bytes --]
alloc_bootmem_core has become quite nasty to read over time. This is
a clean rewrite that keeps the semantics.
Signed-off-by: Johannes Weiner <hannes@saeurebad.de>
---
include/linux/bootmem.h | 1
mm/bootmem.c | 208 ++++++++++++++++--------------------------------
2 files changed, 72 insertions(+), 137 deletions(-)
--- a/mm/bootmem.c
+++ b/mm/bootmem.c
@@ -427,36 +427,16 @@ int __init reserve_bootmem(unsigned long
}
#endif /* !CONFIG_HAVE_ARCH_BOOTMEM_NODE */
-/*
- * We 'merge' subsequent allocations to save space. We might 'lose'
- * some fraction of a page if allocations cannot be satisfied due to
- * size constraints on boxes where there is physical RAM space
- * fragmentation - in these cases (mostly large memory boxes) this
- * is not a problem.
- *
- * On low memory boxes we get it right in 100% of the cases.
- *
- * alignment has to be a power of 2 value.
- *
- * NOTE: This function is _not_ reentrant.
- */
-static void * __init
-alloc_bootmem_core(struct bootmem_data *bdata, unsigned long size,
- unsigned long align, unsigned long goal, unsigned long limit)
+static void * __init alloc_bootmem_core(struct bootmem_data *bdata,
+ unsigned long size, unsigned long align,
+ unsigned long goal, unsigned long limit)
{
- unsigned long areasize, preferred;
- unsigned long i, start = 0, incr, eidx, end_pfn;
- void *ret;
- unsigned long node_boot_start;
- void *node_bootmem_map;
-
- if (!size) {
- printk("alloc_bootmem_core(): zero-sized request\n");
- BUG();
- }
- BUG_ON(align & (align-1));
+ unsigned long min, max, start, step;
+
+ BUG_ON(!size);
+ BUG_ON(align & (align - 1));
+ BUG_ON(limit && goal + size > limit);
- /* on nodes without memory - bootmem_map is NULL */
if (!bdata->node_bootmem_map)
return NULL;
@@ -464,126 +444,82 @@ alloc_bootmem_core(struct bootmem_data *
bdata - bootmem_node_data, size, PAGE_ALIGN(size) >> PAGE_SHIFT,
align, goal, limit);
- /* bdata->node_boot_start is supposed to be (12+6)bits alignment on x86_64 ? */
- node_boot_start = bdata->node_boot_start;
- node_bootmem_map = bdata->node_bootmem_map;
- if (align) {
- node_boot_start = ALIGN(bdata->node_boot_start, align);
- if (node_boot_start > bdata->node_boot_start)
- node_bootmem_map = (unsigned long *)bdata->node_bootmem_map +
- PFN_DOWN(node_boot_start - bdata->node_boot_start)/BITS_PER_LONG;
- }
+ min = PFN_DOWN(bdata->node_boot_start);
+ max = bdata->node_low_pfn;
- if (limit && node_boot_start >= limit)
- return NULL;
+ goal >>= PAGE_SHIFT;
+ limit >>= PAGE_SHIFT;
- end_pfn = bdata->node_low_pfn;
- limit = PFN_DOWN(limit);
- if (limit && end_pfn > limit)
- end_pfn = limit;
+ if (limit && max > limit)
+ max = limit;
+ if (max <= min)
+ return NULL;
- eidx = end_pfn - PFN_DOWN(node_boot_start);
+ step = max(align >> PAGE_SHIFT, 1UL);
- /*
- * We try to allocate bootmem pages above 'goal'
- * first, then we try to allocate lower pages.
- */
- preferred = 0;
- if (goal && PFN_DOWN(goal) < end_pfn) {
- if (goal > node_boot_start)
- preferred = goal - node_boot_start;
-
- if (bdata->last_success > node_boot_start &&
- bdata->last_success - node_boot_start >= preferred)
- if (!limit || (limit && limit > bdata->last_success))
- preferred = bdata->last_success - node_boot_start;
+ if (goal && goal < max)
+ start = ALIGN(goal, step);
+ else
+ start = ALIGN(min, step);
+
+ if (bdata->last_success > start) {
+ /* Set goal here to trigger a retry on failure */
+ start = goal = ALIGN(bdata->last_success, step);
}
- preferred = PFN_DOWN(ALIGN(preferred, align));
- areasize = (size + PAGE_SIZE-1) / PAGE_SIZE;
- incr = align >> PAGE_SHIFT ? : 1;
-
-restart_scan:
- for (i = preferred; i < eidx;) {
- unsigned long j;
-
- i = find_next_zero_bit(node_bootmem_map, eidx, i);
- i = ALIGN(i, incr);
- if (i >= eidx)
- break;
- if (test_bit(i, node_bootmem_map)) {
- i += incr;
- continue;
- }
- for (j = i + 1; j < i + areasize; ++j) {
- if (j >= eidx)
- goto fail_block;
- if (test_bit(j, node_bootmem_map))
- goto fail_block;
- }
- start = i;
- goto found;
- fail_block:
- i = ALIGN(j, incr);
- if (i == j)
- i += incr;
- }
+ max -= PFN_DOWN(bdata->node_boot_start);
+ start -= PFN_DOWN(bdata->node_boot_start);
- if (preferred > 0) {
- preferred = 0;
- goto restart_scan;
- }
- return NULL;
+ while (1) {
+ int merge;
+ void *region;
+ unsigned long end, i, new_start, new_end;
+find_block:
+ start = find_next_zero_bit(bdata->node_bootmem_map, max, start);
+ start = ALIGN(start, step);
+ end = start + PFN_UP(size);
-found:
- bdata->last_success = PFN_PHYS(start) + node_boot_start;
- BUG_ON(start >= eidx);
+ if (start >= max || end > max)
+ break;
- /*
- * Is the next page of the previous allocation-end the start
- * of this allocation's buffer? If yes then we can 'merge'
- * the previous partial page with this allocation.
- */
- if (align < PAGE_SIZE &&
- bdata->last_offset && bdata->last_pos+1 == start) {
- unsigned long offset, remaining_size;
- offset = ALIGN(bdata->last_offset, align);
- BUG_ON(offset > PAGE_SIZE);
- remaining_size = PAGE_SIZE - offset;
- if (size < remaining_size) {
- areasize = 0;
- /* last_pos unchanged */
- bdata->last_offset = offset + size;
- ret = phys_to_virt(bdata->last_pos * PAGE_SIZE +
- offset + node_boot_start);
- } else {
- remaining_size = size - remaining_size;
- areasize = (remaining_size + PAGE_SIZE-1) / PAGE_SIZE;
- ret = phys_to_virt(bdata->last_pos * PAGE_SIZE +
- offset + node_boot_start);
- bdata->last_pos = start + areasize - 1;
- bdata->last_offset = remaining_size;
- }
- bdata->last_offset &= ~PAGE_MASK;
- } else {
- bdata->last_pos = start + areasize - 1;
- bdata->last_offset = size & ~PAGE_MASK;
- ret = phys_to_virt(start * PAGE_SIZE + node_boot_start);
+ for (i = start; i < end; i++)
+ if (test_bit(i, bdata->node_bootmem_map)) {
+ start = ALIGN(i, step);
+ if (start == i)
+ start += step;
+ goto find_block;
+ }
+
+ if (bdata->last_offset &&
+ PFN_DOWN(bdata->last_offset) + 1 == start)
+ new_start = ALIGN(bdata->last_offset, align);
+ else
+ new_start = PFN_PHYS(start);
+
+ merge = PFN_DOWN(new_start) < start;
+ new_end = new_start + size;
+
+ bdata->last_offset = new_end;
+
+ /*
+ * Reserve the area now:
+ */
+ for (i = PFN_DOWN(new_start) + merge; i < PFN_UP(new_end); i++)
+ if (test_and_set_bit(i, bdata->node_bootmem_map))
+ BUG();
+
+ region = phys_to_virt(bdata->node_boot_start + new_start);
+ memset(region, 0, size);
+ return region;
}
- bdebug("nid=%d start=%lx end=%lx\n",
- bdata - bootmem_node_data,
- start + PFN_DOWN(bdata->node_boot_start),
- start + areasize + PFN_DOWN(bdata->node_boot_start));
+ if (goal) {
+ goal = 0;
+ start = 0;
+ goto find_block;
+ }
- /*
- * Reserve the area now:
- */
- for (i = start; i < start + areasize; i++)
- if (unlikely(test_and_set_bit(i, node_bootmem_map)))
- BUG();
- memset(ret, 0, size);
- return ret;
+ return NULL;
}
/**
--- a/include/linux/bootmem.h
+++ b/include/linux/bootmem.h
@@ -32,7 +32,6 @@ typedef struct bootmem_data {
unsigned long node_low_pfn;
void *node_bootmem_map;
unsigned long last_offset;
- unsigned long last_pos;
unsigned long last_success; /* Previous allocation point. To speed
* up searching */
struct list_head list;
--
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply [flat|nested] 22+ messages in thread
* [PATCH -mm 09/14] bootmem: free/reserve helpers
2008-05-30 19:42 [PATCH -mm 00/14] bootmem rewrite v2 Johannes Weiner
` (7 preceding siblings ...)
2008-05-30 19:42 ` [PATCH -mm 08/14] bootmem: clean up alloc_bootmem_core Johannes Weiner
@ 2008-05-30 19:42 ` Johannes Weiner
2008-05-30 19:42 ` [PATCH -mm 10/14] bootmem: factor out the marking of a PFN range Johannes Weiner
` (4 subsequent siblings)
13 siblings, 0 replies; 22+ messages in thread
From: Johannes Weiner @ 2008-05-30 19:42 UTC (permalink / raw)
To: Andrew Morton; +Cc: Ingo Molnar, Yinghai Lu, Andi Kleen, linux-kernel, linux-mm
[-- Attachment #1: bootmem-free-reserve-helpers.patch --]
[-- Type: text/plain, Size: 3427 bytes --]
Factor out the common operation of marking a range on the bitmap.
Signed-off-by: Johannes Weiner <hannes@saeurebad.de>
CC: Ingo Molnar <mingo@elte.hu>
CC: Yinghai Lu <yhlu.kernel@gmail.com>
CC: Andi Kleen <andi@firstfloor.org>
---
mm/bootmem.c | 64 +++++++++++++++++++++++++++++++++++++++--------------------
1 file changed, 43 insertions(+), 21 deletions(-)
--- a/mm/bootmem.c
+++ b/mm/bootmem.c
@@ -225,6 +225,44 @@ unsigned long __init free_all_bootmem(vo
return free_all_bootmem_core(NODE_DATA(0)->bdata);
}
+static void __init __free(bootmem_data_t *bdata,
+ unsigned long sidx, unsigned long eidx)
+{
+ unsigned long idx;
+
+ bdebug("nid=%d start=%lx end=%lx\n", bdata - bootmem_node_data,
+ sidx + PFN_DOWN(bdata->node_boot_start),
+ eidx + PFN_DOWN(bdata->node_boot_start));
+
+ for (idx = sidx; idx < eidx; idx++)
+ if (!test_and_clear_bit(idx, bdata->node_bootmem_map))
+ BUG();
+}
+
+static int __init __reserve(bootmem_data_t *bdata, unsigned long sidx,
+ unsigned long eidx, int flags)
+{
+ unsigned long idx;
+ int exclusive = flags & BOOTMEM_EXCLUSIVE;
+
+ bdebug("nid=%d start=%lx end=%lx flags=%x\n",
+ bdata - bootmem_node_data,
+ sidx + PFN_DOWN(bdata->node_boot_start),
+ eidx + PFN_DOWN(bdata->node_boot_start),
+ flags);
+
+ for (idx = sidx; idx < eidx; idx++)
+ if (test_and_set_bit(idx, bdata->node_bootmem_map)) {
+ if (exclusive) {
+ __free(bdata, sidx, idx);
+ return -EBUSY;
+ }
+ bdebug("silent double reserve of PFN %lx\n",
+ idx + PFN_DOWN(bdata->node_boot_start));
+ }
+ return 0;
+}
+
static void __init free_bootmem_core(bootmem_data_t *bdata, unsigned long addr,
unsigned long size)
{
@@ -257,14 +295,7 @@ static void __init free_bootmem_core(boo
if (eidx > bdata->node_low_pfn - PFN_DOWN(bdata->node_boot_start))
eidx = bdata->node_low_pfn - PFN_DOWN(bdata->node_boot_start);
- bdebug("nid=%d start=%lx end=%lx\n", bdata - bootmem_node_data,
- sidx + PFN_DOWN(bdata->node_boot_start),
- eidx + PFN_DOWN(bdata->node_boot_start));
-
- for (i = sidx; i < eidx; i++) {
- if (unlikely(!test_and_clear_bit(i, bdata->node_bootmem_map)))
- BUG();
- }
+ __free(bdata, sidx, eidx);
}
/**
@@ -366,16 +397,7 @@ static void __init reserve_bootmem_core(
if (eidx > bdata->node_low_pfn - PFN_DOWN(bdata->node_boot_start))
eidx = bdata->node_low_pfn - PFN_DOWN(bdata->node_boot_start);
- bdebug("nid=%d start=%lx end=%lx flags=%x\n",
- bdata - bootmem_node_data,
- sidx + PFN_DOWN(bdata->node_boot_start),
- eidx + PFN_DOWN(bdata->node_boot_start),
- flags);
-
- for (i = sidx; i < eidx; i++)
- if (test_and_set_bit(i, bdata->node_bootmem_map))
- bdebug("hm, page %lx reserved twice.\n",
- PFN_DOWN(bdata->node_boot_start) + i);
+ return __reserve(bdata, sidx, eidx, flags);
}
/**
@@ -504,9 +526,9 @@ find_block:
/*
* Reserve the area now:
*/
- for (i = PFN_DOWN(new_start) + merge; i < PFN_UP(new_end); i++)
- if (test_and_set_bit(i, bdata->node_bootmem_map))
- BUG();
+ if (__reserve(bdata, PFN_DOWN(new_start) + merge,
+ PFN_UP(new_end), BOOTMEM_EXCLUSIVE))
+ BUG();
region = phys_to_virt(bdata->node_boot_start + new_start);
memset(region, 0, size);
--
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply [flat|nested] 22+ messages in thread
* [PATCH -mm 10/14] bootmem: factor out the marking of a PFN range
2008-05-30 19:42 [PATCH -mm 00/14] bootmem rewrite v2 Johannes Weiner
` (8 preceding siblings ...)
2008-05-30 19:42 ` [PATCH -mm 09/14] bootmem: free/reserve helpers Johannes Weiner
@ 2008-05-30 19:42 ` Johannes Weiner
2008-05-30 19:42 ` [PATCH -mm 11/14] bootmem: respect goal more likely Johannes Weiner
` (3 subsequent siblings)
13 siblings, 0 replies; 22+ messages in thread
From: Johannes Weiner @ 2008-05-30 19:42 UTC (permalink / raw)
To: Andrew Morton; +Cc: Ingo Molnar, Yinghai Lu, Andi Kleen, linux-kernel, linux-mm
[-- Attachment #1: bootmem-refactor-range-marking.patch --]
[-- Type: text/plain, Size: 9316 bytes --]
Introduce new helpers that mark a range that resides completely on a
node or node-agnostic ranges that might also span node boundaries.
The free/reserve API functions will then directly use these helpers.
Note that the free/reserve semantics become more strict: while the
prior code took basically arbitrary range arguments and marked the
PFNs that happen to fall into that range, the new code requires
node-specific ranges to be completely on the node. The node-agnostic
requests might span node boundaries as long as the nodes are
contiguous.
Passing ranges that do not satisfy these criteria is a bug.
Signed-off-by: Johannes Weiner <hannes@saeurebad.de>
CC: Ingo Molnar <mingo@elte.hu>
CC: Yinghai Lu <yhlu.kernel@gmail.com>
CC: Andi Kleen <andi@firstfloor.org>
---
Sorry, this diff is still not really readable. Any ideas how to split
that stuff properly?
include/linux/bootmem.h | 2
mm/bootmem.c | 191 ++++++++++++++++++------------------------------
2 files changed, 73 insertions(+), 120 deletions(-)
--- a/mm/bootmem.c
+++ b/mm/bootmem.c
@@ -228,12 +228,16 @@ unsigned long __init free_all_bootmem(vo
static void __init __free(bootmem_data_t *bdata,
unsigned long sidx, unsigned long eidx)
{
- unsigned long idx;
+ unsigned long idx, start;
bdebug("nid=%d start=%lx end=%lx\n", bdata - bootmem_node_data,
sidx + PFN_DOWN(bdata->node_boot_start),
eidx + PFN_DOWN(bdata->node_boot_start));
+ start = bdata->node_boot_start + PFN_PHYS(sidx);
+ if (bdata->last_success > start)
+ bdata->last_success = start;
+
for (idx = sidx; idx < eidx; idx++)
if (!test_and_clear_bit(idx, bdata->node_bootmem_map))
BUG();
@@ -263,39 +267,57 @@ static int __init __reserve(bootmem_data
return 0;
}
-static void __init free_bootmem_core(bootmem_data_t *bdata, unsigned long addr,
- unsigned long size)
+static int __init mark_bootmem_node(bootmem_data_t *bdata,
+ unsigned long start, unsigned long end,
+ int reserve, int flags)
{
unsigned long sidx, eidx;
- unsigned long i;
- BUG_ON(!size);
+ bdebug("nid=%d start=%lx end=%lx reserve=%d flags=%x\n",
+ bdata - bootmem_node_data, start, end, reserve, flags);
- /* out range */
- if (addr + size < bdata->node_boot_start ||
- PFN_DOWN(addr) > bdata->node_low_pfn)
- return;
- /*
- * round down end of usable mem, partially free pages are
- * considered reserved.
- */
+ BUG_ON(start < PFN_DOWN(bdata->node_boot_start));
+ BUG_ON(end > bdata->node_low_pfn);
- if (addr >= bdata->node_boot_start && addr < bdata->last_success)
- bdata->last_success = addr;
+ sidx = start - PFN_DOWN(bdata->node_boot_start);
+ eidx = end - PFN_DOWN(bdata->node_boot_start);
- /*
- * Round up to index to the range.
- */
- if (PFN_UP(addr) > PFN_DOWN(bdata->node_boot_start))
- sidx = PFN_UP(addr) - PFN_DOWN(bdata->node_boot_start);
+ if (reserve)
+ return __reserve(bdata, sidx, eidx, flags);
else
- sidx = 0;
+ __free(bdata, sidx, eidx);
+ return 0;
+}
+
+static int __init mark_bootmem(unsigned long start, unsigned long end,
+ int reserve, int flags)
+{
+ unsigned long pos;
+ bootmem_data_t *bdata;
- eidx = PFN_DOWN(addr + size - bdata->node_boot_start);
- if (eidx > bdata->node_low_pfn - PFN_DOWN(bdata->node_boot_start))
- eidx = bdata->node_low_pfn - PFN_DOWN(bdata->node_boot_start);
+ pos = start;
+ list_for_each_entry(bdata, &bdata_list, list) {
+ int err;
+ unsigned long max;
+
+ if (pos < PFN_DOWN(bdata->node_boot_start)) {
+ BUG_ON(pos != start);
+ continue;
+ }
- __free(bdata, sidx, eidx);
+ max = min(bdata->node_low_pfn, end);
+
+ err = mark_bootmem_node(bdata, pos, max, reserve, flags);
+ if (reserve && err) {
+ mark_bootmem(start, pos, 0, 0);
+ return err;
+ }
+
+ if (max == end)
+ return 0;
+ pos = bdata->node_low_pfn;
+ }
+ BUG();
}
/**
@@ -306,12 +328,17 @@ static void __init free_bootmem_core(boo
*
* Partial pages will be considered reserved and left as they are.
*
- * Only physical pages that actually reside on @pgdat are marked.
+ * The range must reside completely on the specified node.
*/
void __init free_bootmem_node(pg_data_t *pgdat, unsigned long physaddr,
unsigned long size)
{
- free_bootmem_core(pgdat->bdata, physaddr, size);
+ unsigned long start, end;
+
+ start = PFN_UP(physaddr);
+ end = PFN_DOWN(physaddr + size);
+
+ mark_bootmem_node(pgdat->bdata, start, end, 0, 0);
}
/**
@@ -321,83 +348,16 @@ void __init free_bootmem_node(pg_data_t
*
* Partial pages will be considered reserved and left as they are.
*
- * All physical pages within the range are marked, no matter what
- * node they reside on.
+ * The range must be contiguous but may span node boundaries.
*/
void __init free_bootmem(unsigned long addr, unsigned long size)
{
- bootmem_data_t *bdata;
- list_for_each_entry(bdata, &bdata_list, list)
- free_bootmem_core(bdata, addr, size);
-}
-
-/*
- * Marks a particular physical memory range as unallocatable. Usable RAM
- * might be used for boot-time allocations - or it might get added
- * to the free page pool later on.
- */
-static int __init can_reserve_bootmem_core(bootmem_data_t *bdata,
- unsigned long addr, unsigned long size, int flags)
-{
- unsigned long sidx, eidx;
- unsigned long i;
-
- BUG_ON(!size);
+ unsigned long start, end;
- /* out of range, don't hold other */
- if (addr + size < bdata->node_boot_start ||
- PFN_DOWN(addr) > bdata->node_low_pfn)
- return 0;
+ start = PFN_UP(addr);
+ end = PFN_DOWN(addr + size);
- /*
- * Round up to index to the range.
- */
- if (addr > bdata->node_boot_start)
- sidx= PFN_DOWN(addr - bdata->node_boot_start);
- else
- sidx = 0;
-
- eidx = PFN_UP(addr + size - bdata->node_boot_start);
- if (eidx > bdata->node_low_pfn - PFN_DOWN(bdata->node_boot_start))
- eidx = bdata->node_low_pfn - PFN_DOWN(bdata->node_boot_start);
-
- for (i = sidx; i < eidx; i++) {
- if (test_bit(i, bdata->node_bootmem_map)) {
- if (flags & BOOTMEM_EXCLUSIVE)
- return -EBUSY;
- }
- }
-
- return 0;
-
-}
-
-static void __init reserve_bootmem_core(bootmem_data_t *bdata,
- unsigned long addr, unsigned long size, int flags)
-{
- unsigned long sidx, eidx;
- unsigned long i;
-
- BUG_ON(!size);
-
- /* out of range */
- if (addr + size < bdata->node_boot_start ||
- PFN_DOWN(addr) > bdata->node_low_pfn)
- return;
-
- /*
- * Round up to index to the range.
- */
- if (addr > bdata->node_boot_start)
- sidx= PFN_DOWN(addr - bdata->node_boot_start);
- else
- sidx = 0;
-
- eidx = PFN_UP(addr + size - bdata->node_boot_start);
- if (eidx > bdata->node_low_pfn - PFN_DOWN(bdata->node_boot_start))
- eidx = bdata->node_low_pfn - PFN_DOWN(bdata->node_boot_start);
-
- return __reserve(bdata, sidx, eidx, flags);
+ mark_bootmem(start, end, 0, 0);
}
/**
@@ -407,17 +367,17 @@ static void __init reserve_bootmem_core(
*
* Partial pages will be reserved.
*
- * Only physical pages that actually reside on @pgdat are marked.
+ * The range must reside completely on the specified node.
*/
-void __init reserve_bootmem_node(pg_data_t *pgdat, unsigned long physaddr,
+int __init reserve_bootmem_node(pg_data_t *pgdat, unsigned long physaddr,
unsigned long size, int flags)
{
- int ret;
+ unsigned long start, end;
- ret = can_reserve_bootmem_core(pgdat->bdata, physaddr, size, flags);
- if (ret < 0)
- return;
- reserve_bootmem_core(pgdat->bdata, physaddr, size, flags);
+ start = PFN_DOWN(physaddr);
+ end = PFN_UP(physaddr + size);
+
+ return mark_bootmem_node(pgdat->bdata, start, end, 1, flags);
}
#ifndef CONFIG_HAVE_ARCH_BOOTMEM_NODE
@@ -428,24 +388,17 @@ void __init reserve_bootmem_node(pg_data
*
* Partial pages will be reserved.
*
- * All physical pages within the range are marked, no matter what
- * node they reside on.
+ * The range must be contiguous but may span node boundaries.
*/
int __init reserve_bootmem(unsigned long addr, unsigned long size,
int flags)
{
- bootmem_data_t *bdata;
- int ret;
+ unsigned long start, end;
- list_for_each_entry(bdata, &bdata_list, list) {
- ret = can_reserve_bootmem_core(bdata, addr, size, flags);
- if (ret < 0)
- return ret;
- }
- list_for_each_entry(bdata, &bdata_list, list)
- reserve_bootmem_core(bdata, addr, size, flags);
+ start = PFN_DOWN(addr);
+ end = PFN_UP(addr + size);
- return 0;
+ return mark_bootmem(start, end, 1, flags);
}
#endif /* !CONFIG_HAVE_ARCH_BOOTMEM_NODE */
@@ -716,7 +669,7 @@ void * __init alloc_bootmem_section(unsi
if (start_nr != section_nr || end_nr != section_nr) {
printk(KERN_WARNING "alloc_bootmem failed on section %ld.\n",
section_nr);
- free_bootmem_core(pgdat->bdata, __pa(ptr), size);
+ free_bootmem_node(pgdat, __pa(ptr), size);
ptr = NULL;
}
--- a/include/linux/bootmem.h
+++ b/include/linux/bootmem.h
@@ -66,7 +66,7 @@ extern void free_bootmem(unsigned long a
#define BOOTMEM_DEFAULT 0
#define BOOTMEM_EXCLUSIVE (1<<0)
-extern void reserve_bootmem_node(pg_data_t *pgdat,
+extern int reserve_bootmem_node(pg_data_t *pgdat,
unsigned long physaddr,
unsigned long size,
int flags);
--
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply [flat|nested] 22+ messages in thread
* [PATCH -mm 11/14] bootmem: respect goal more likely
2008-05-30 19:42 [PATCH -mm 00/14] bootmem rewrite v2 Johannes Weiner
` (9 preceding siblings ...)
2008-05-30 19:42 ` [PATCH -mm 10/14] bootmem: factor out the marking of a PFN range Johannes Weiner
@ 2008-05-30 19:42 ` Johannes Weiner
2008-05-30 20:16 ` Johannes Weiner
2008-05-30 19:42 ` [PATCH -mm 12/14] bootmem: Make __alloc_bootmem_low_node fall back to other nodes Johannes Weiner
` (2 subsequent siblings)
13 siblings, 1 reply; 22+ messages in thread
From: Johannes Weiner @ 2008-05-30 19:42 UTC (permalink / raw)
To: Andrew Morton; +Cc: Ingo Molnar, Yinghai Lu, Andi Kleen, linux-kernel, linux-mm
[-- Attachment #1: bootmem-respect-goal-more-likely.patch --]
[-- Type: text/plain, Size: 3829 bytes --]
The old node-agnostic code tried allocating on all nodes starting from
the one with the lowest range. alloc_bootmem_core retried without the
goal if it could not satisfy it and so the goal was only respected at
all when it happened to be on the first (lowest page numbers) node (or
theoretically if allocations failed on all nodes before to the one
holding the goal).
Introduce a non-panicking helper that starts allocating from the node
holding the goal and falls back only after all thes tries failed.
Make all other allocation helpers benefit from this new helper.
Signed-off-by: Johannes Weiner <hannes@saeurebad.de>
CC: Ingo Molnar <mingo@elte.hu>
CC: Yinghai Lu <yhlu.kernel@gmail.com>
CC: Andi Kleen <andi@firstfloor.org>
---
mm/bootmem.c | 77 +++++++++++++++++++++++++++++++----------------------------
1 file changed, 41 insertions(+), 36 deletions(-)
--- a/mm/bootmem.c
+++ b/mm/bootmem.c
@@ -487,11 +487,33 @@ find_block:
memset(region, 0, size);
return region;
}
+ return NULL;
+}
+
+static void * __init ___alloc_bootmem_nopanic(unsigned long size,
+ unsigned long align,
+ unsigned long goal,
+ unsigned long limit)
+{
+ bootmem_data_t *bdata;
+
+restart:
+ list_for_each_entry(bdata, &bdata_list, list) {
+ void *region;
+
+ if (goal && goal < bdata->node_boot_start)
+ continue;
+ if (limit && limit < bdata->node_boot_start)
+ continue;
+
+ region = alloc_bootmem_core(bdata, size, align, goal, limit);
+ if (region)
+ return region;
+ }
if (goal) {
goal = 0;
- start = 0;
- goto find_block;
+ goto restart;
}
return NULL;
@@ -511,16 +533,23 @@ find_block:
* Returns NULL on failure.
*/
void * __init __alloc_bootmem_nopanic(unsigned long size, unsigned long align,
- unsigned long goal)
+ unsigned long goal)
{
- bootmem_data_t *bdata;
- void *ptr;
+ return ___alloc_bootmem_nopanic(size, align, goal, 0);
+}
- list_for_each_entry(bdata, &bdata_list, list) {
- ptr = alloc_bootmem_core(bdata, size, align, goal, 0);
- if (ptr)
- return ptr;
- }
+static void * __init ___alloc_bootmem(unsigned long size, unsigned long align,
+ unsigned long goal, unsigned long limit)
+{
+ void *mem = ___alloc_bootmem_nopanic(size, align, goal, limit);
+
+ if (mem)
+ return mem;
+ /*
+ * Whoops, we cannot satisfy the allocation request.
+ */
+ printk(KERN_ALERT "bootmem alloc of %lu bytes failed!\n", size);
+ panic("Out of memory");
return NULL;
}
@@ -540,16 +569,7 @@ void * __init __alloc_bootmem_nopanic(un
void * __init __alloc_bootmem(unsigned long size, unsigned long align,
unsigned long goal)
{
- void *mem = __alloc_bootmem_nopanic(size,align,goal);
-
- if (mem)
- return mem;
- /*
- * Whoops, we cannot satisfy the allocation request.
- */
- printk(KERN_ALERT "bootmem alloc of %lu bytes failed!\n", size);
- panic("Out of memory");
- return NULL;
+ return ___alloc_bootmem(size, align, goal, 0);
}
#ifndef ARCH_LOW_ADDRESS_LIMIT
@@ -572,22 +592,7 @@ void * __init __alloc_bootmem(unsigned l
void * __init __alloc_bootmem_low(unsigned long size, unsigned long align,
unsigned long goal)
{
- bootmem_data_t *bdata;
- void *ptr;
-
- list_for_each_entry(bdata, &bdata_list, list) {
- ptr = alloc_bootmem_core(bdata, size, align, goal,
- ARCH_LOW_ADDRESS_LIMIT);
- if (ptr)
- return ptr;
- }
-
- /*
- * Whoops, we cannot satisfy the allocation request.
- */
- printk(KERN_ALERT "low bootmem alloc of %lu bytes failed!\n", size);
- panic("Out of low memory");
- return NULL;
+ return ___alloc_bootmem(size, align, goal, ARCH_LOW_ADDRESS_LIMIT);
}
/**
--
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply [flat|nested] 22+ messages in thread
* [PATCH -mm 12/14] bootmem: Make __alloc_bootmem_low_node fall back to other nodes
2008-05-30 19:42 [PATCH -mm 00/14] bootmem rewrite v2 Johannes Weiner
` (10 preceding siblings ...)
2008-05-30 19:42 ` [PATCH -mm 11/14] bootmem: respect goal more likely Johannes Weiner
@ 2008-05-30 19:42 ` Johannes Weiner
2008-05-30 19:42 ` [PATCH -mm 13/14] bootmem: revisit alloc_bootmem_section Johannes Weiner
2008-05-30 19:42 ` [PATCH -mm 14/14] bootmem: replace node_boot_start in struct bootmem_data Johannes Weiner
13 siblings, 0 replies; 22+ messages in thread
From: Johannes Weiner @ 2008-05-30 19:42 UTC (permalink / raw)
To: Andrew Morton; +Cc: Ingo Molnar, Yinghai Lu, Andi Kleen, linux-kernel, linux-mm
[-- Attachment #1: bootmem-make__alloc_bootmem_low_node-fall-back-to-other-nodes.patch --]
[-- Type: text/plain, Size: 2002 bytes --]
__alloc_bootmem_node already does this, make the interface consistent.
Signed-off-by: Johannes Weiner <hannes@saeurebad.de>
CC: Ingo Molnar <mingo@elte.hu>
CC: Yinghai Lu <yhlu.kernel@gmail.com>
CC: Andi Kleen <andi@firstfloor.org>
---
mm/bootmem.c | 25 ++++++++++++++++---------
1 file changed, 16 insertions(+), 9 deletions(-)
--- a/mm/bootmem.c
+++ b/mm/bootmem.c
@@ -595,6 +595,19 @@ void * __init __alloc_bootmem_low(unsign
return ___alloc_bootmem(size, align, goal, ARCH_LOW_ADDRESS_LIMIT);
}
+static void * __init ___alloc_bootmem_node(bootmem_data_t *bdata,
+ unsigned long size, unsigned long align,
+ unsigned long goal, unsigned long limit)
+{
+ void *ptr;
+
+ ptr = alloc_bootmem_core(bdata, size, align, goal, limit);
+ if (ptr)
+ return ptr;
+
+ return ___alloc_bootmem(size, align, goal, limit);
+}
+
/**
* __alloc_bootmem_node - allocate boot memory from a specific node
* @pgdat: node to allocate from
@@ -613,13 +626,7 @@ void * __init __alloc_bootmem_low(unsign
void * __init __alloc_bootmem_node(pg_data_t *pgdat, unsigned long size,
unsigned long align, unsigned long goal)
{
- void *ptr;
-
- ptr = alloc_bootmem_core(pgdat->bdata, size, align, goal, 0);
- if (ptr)
- return ptr;
-
- return __alloc_bootmem(size, align, goal);
+ return ___alloc_bootmem_node(pgdat->bdata, size, align, goal, 0);
}
/**
@@ -640,8 +647,8 @@ void * __init __alloc_bootmem_node(pg_da
void * __init __alloc_bootmem_low_node(pg_data_t *pgdat, unsigned long size,
unsigned long align, unsigned long goal)
{
- return alloc_bootmem_core(pgdat->bdata, size, align, goal,
- ARCH_LOW_ADDRESS_LIMIT);
+ return ___alloc_bootmem_node(pgdat->bdata, size, align,
+ goal, ARCH_LOW_ADDRESS_LIMIT);
}
#ifdef CONFIG_SPARSEMEM
--
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply [flat|nested] 22+ messages in thread
* [PATCH -mm 13/14] bootmem: revisit alloc_bootmem_section
2008-05-30 19:42 [PATCH -mm 00/14] bootmem rewrite v2 Johannes Weiner
` (11 preceding siblings ...)
2008-05-30 19:42 ` [PATCH -mm 12/14] bootmem: Make __alloc_bootmem_low_node fall back to other nodes Johannes Weiner
@ 2008-05-30 19:42 ` Johannes Weiner
2008-05-30 19:42 ` [PATCH -mm 14/14] bootmem: replace node_boot_start in struct bootmem_data Johannes Weiner
13 siblings, 0 replies; 22+ messages in thread
From: Johannes Weiner @ 2008-05-30 19:42 UTC (permalink / raw)
To: Andrew Morton
Cc: Ingo Molnar, Yinghai Lu, Andi Kleen, linux-kernel, linux-mm,
Yasunori Goto
[-- Attachment #1: bootmem-revisit-alloc_bootmem_section.patch --]
[-- Type: text/plain, Size: 1963 bytes --]
Since alloc_bootmem_core does no goal-fallback anymore and just
returns NULL if the allocation fails, we might now use it in
alloc_bootmem_section without all the fixup code for a misplaced
allocation.
Also, the limit can be the first PFN of the next section as the
semantics is that the limit is _above_ the allocated region, not
within.
Signed-off-by: Johannes Weiner <hannes@saeurebad.de>
CC: Yasunori Goto <y-goto@jp.fujitsu.com>
---
mm/bootmem.c | 27 ++++++---------------------
1 file changed, 6 insertions(+), 21 deletions(-)
--- a/mm/bootmem.c
+++ b/mm/bootmem.c
@@ -662,29 +662,14 @@ void * __init __alloc_bootmem_low_node(p
void * __init alloc_bootmem_section(unsigned long size,
unsigned long section_nr)
{
- void *ptr;
- unsigned long limit, goal, start_nr, end_nr, pfn;
- struct pglist_data *pgdat;
+ bootmem_data_t *bdata;
+ unsigned long pfn, goal, limit;
pfn = section_nr_to_pfn(section_nr);
- goal = PFN_PHYS(pfn);
- limit = PFN_PHYS(section_nr_to_pfn(section_nr + 1)) - 1;
- pgdat = NODE_DATA(early_pfn_to_nid(pfn));
- ptr = alloc_bootmem_core(pgdat->bdata, size, SMP_CACHE_BYTES, goal,
- limit);
+ goal = pfn << PAGE_SHIFT;
+ limit = section_nr_to_pfn(section_nr + 1) << PAGE_SHIFT;
+ bdata = &bootmem_node_data[early_pfn_to_nid(pfn)];
- if (!ptr)
- return NULL;
-
- start_nr = pfn_to_section_nr(PFN_DOWN(__pa(ptr)));
- end_nr = pfn_to_section_nr(PFN_DOWN(__pa(ptr) + size));
- if (start_nr != section_nr || end_nr != section_nr) {
- printk(KERN_WARNING "alloc_bootmem failed on section %ld.\n",
- section_nr);
- free_bootmem_node(pgdat, __pa(ptr), size);
- ptr = NULL;
- }
-
- return ptr;
+ return alloc_bootmem_core(bdata, size, SMP_CACHE_BYTES, goal, limit);
}
#endif
--
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply [flat|nested] 22+ messages in thread
* [PATCH -mm 14/14] bootmem: replace node_boot_start in struct bootmem_data
2008-05-30 19:42 [PATCH -mm 00/14] bootmem rewrite v2 Johannes Weiner
` (12 preceding siblings ...)
2008-05-30 19:42 ` [PATCH -mm 13/14] bootmem: revisit alloc_bootmem_section Johannes Weiner
@ 2008-05-30 19:42 ` Johannes Weiner
13 siblings, 0 replies; 22+ messages in thread
From: Johannes Weiner @ 2008-05-30 19:42 UTC (permalink / raw)
To: Andrew Morton
Cc: Ingo Molnar, Yinghai Lu, Andi Kleen, linux-kernel, linux-mm, linux-arch
[-- Attachment #1: bootmem-replace-node_boot_start.patch --]
[-- Type: text/plain, Size: 12106 bytes --]
Almost all users of this field need a PFN instead of a physical
address, so replace node_boot_start with node_min_pfn.
Signed-off-by: Johannes Weiner <hannes@saeureba.de>
CC: linux-arch@vger.kernel.org
---
arch/alpha/mm/numa.c | 2 +-
arch/arm/plat-omap/fb.c | 4 +---
arch/avr32/mm/init.c | 3 +--
arch/ia64/mm/discontig.c | 19 ++++++++++---------
arch/m32r/mm/discontig.c | 3 +--
arch/m32r/mm/init.c | 4 +---
arch/mn10300/mm/init.c | 6 +++---
arch/sh/mm/init.c | 2 +-
arch/sh64/mm/init.c | 3 +--
include/linux/bootmem.h | 2 +-
mm/bootmem.c | 42 +++++++++++++++++++++---------------------
11 files changed, 42 insertions(+), 48 deletions(-)
--- a/arch/alpha/mm/numa.c
+++ b/arch/alpha/mm/numa.c
@@ -304,7 +304,7 @@ void __init paging_init(void)
for_each_online_node(nid) {
bootmem_data_t *bdata = &bootmem_node_data[nid];
- unsigned long start_pfn = bdata->node_boot_start >> PAGE_SHIFT;
+ unsigned long start_pfn = bdata->node_min_pfn;
unsigned long end_pfn = bdata->node_low_pfn;
if (dma_local_pfn >= end_pfn - start_pfn)
--- a/arch/arm/plat-omap/fb.c
+++ b/arch/arm/plat-omap/fb.c
@@ -182,7 +182,7 @@ void __init omapfb_reserve_sdram(void)
return;
bdata = NODE_DATA(0)->bdata;
- sdram_start = bdata->node_boot_start;
+ sdram_start = bdata->node_min_pfn << PAGE_SHIFT;
sdram_size = (bdata->node_low_pfn << PAGE_SHIFT) - sdram_start;
reserved = 0;
for (i = 0; ; i++) {
@@ -340,5 +340,3 @@ unsigned long omapfb_reserve_sram(unsign
#endif
-
-
--- a/arch/avr32/mm/init.c
+++ b/arch/avr32/mm/init.c
@@ -125,8 +125,7 @@ void __init paging_init(void)
unsigned long zones_size[MAX_NR_ZONES];
unsigned long low, start_pfn;
- start_pfn = pgdat->bdata->node_boot_start;
- start_pfn >>= PAGE_SHIFT;
+ start_pfn = pgdat->bdata->node_min_pfn;
low = pgdat->bdata->node_low_pfn;
memset(zones_size, 0, sizeof(zones_size));
--- a/arch/ia64/mm/discontig.c
+++ b/arch/ia64/mm/discontig.c
@@ -74,17 +74,17 @@ pg_data_t *pgdat_list[MAX_NUMNODES];
static int __init build_node_maps(unsigned long start, unsigned long len,
int node)
{
- unsigned long cstart, epfn, end = start + len;
+ unsigned long spfn, epfn, end = start + len;
struct bootmem_data *bdp = &bootmem_node_data[node];
epfn = GRANULEROUNDUP(end) >> PAGE_SHIFT;
- cstart = GRANULEROUNDDOWN(start);
+ spfn = GRANULEROUNDDOWN(start) >> PAGE_SHIFT;
if (!bdp->node_low_pfn) {
- bdp->node_boot_start = cstart;
+ bdp->node_min_pfn = spfn;
bdp->node_low_pfn = epfn;
} else {
- bdp->node_boot_start = min(cstart, bdp->node_boot_start);
+ bdp->node_min_pfn = min(spfn, bdp->node_min_pfn);
bdp->node_low_pfn = max(epfn, bdp->node_low_pfn);
}
@@ -221,20 +221,21 @@ static void __init fill_pernode(int node
static int __init find_pernode_space(unsigned long start, unsigned long len,
int node)
{
- unsigned long epfn;
+ unsigned long spfn, epfn;
unsigned long pernodesize = 0, pernode, pages, mapsize;
struct bootmem_data *bdp = &bootmem_node_data[node];
+ spfn = start >> PAGE_SHIFT;
epfn = (start + len) >> PAGE_SHIFT;
- pages = bdp->node_low_pfn - (bdp->node_boot_start >> PAGE_SHIFT);
+ pages = bdp->node_low_pfn - bdp->node_min_pfn;
mapsize = bootmem_bootmap_pages(pages) << PAGE_SHIFT;
/*
* Make sure this memory falls within this node's usable memory
* since we may have thrown some away in build_maps().
*/
- if (start < bdp->node_boot_start || epfn > bdp->node_low_pfn)
+ if (spfn < bdp->node_min_pfn || epfn > bdp->node_low_pfn)
return 0;
/* Don't setup this node's local space twice... */
@@ -296,7 +297,7 @@ static void __init reserve_pernode_space
bdp = pdp->bdata;
/* First the bootmem_map itself */
- pages = bdp->node_low_pfn - (bdp->node_boot_start>>PAGE_SHIFT);
+ pages = bdp->node_low_pfn - bdp->node_min_pfn;
size = bootmem_bootmap_pages(pages) << PAGE_SHIFT;
base = __pa(bdp->node_bootmem_map);
reserve_bootmem_node(pdp, base, size, BOOTMEM_DEFAULT);
@@ -466,7 +467,7 @@ void __init find_memory(void)
init_bootmem_node(pgdat_list[node],
map>>PAGE_SHIFT,
- bdp->node_boot_start>>PAGE_SHIFT,
+ bdp->node_min_pfn,
bdp->node_low_pfn);
}
--- a/arch/m32r/mm/discontig.c
+++ b/arch/m32r/mm/discontig.c
@@ -123,8 +123,7 @@ unsigned long __init setup_memory(void)
return max_low_pfn;
}
-#define START_PFN(nid) \
- (NODE_DATA(nid)->bdata->node_boot_start >> PAGE_SHIFT)
+#define START_PFN(nid) (NODE_DATA(nid)->bdata->node_min_pfn)
#define MAX_LOW_PFN(nid) (NODE_DATA(nid)->bdata->node_low_pfn)
unsigned long __init zone_sizes_init(void)
--- a/arch/m32r/mm/init.c
+++ b/arch/m32r/mm/init.c
@@ -93,8 +93,7 @@ void free_initrd_mem(unsigned long, unsi
#endif
/* It'd be good if these lines were in the standard header file. */
-#define START_PFN(nid) \
- (NODE_DATA(nid)->bdata->node_boot_start >> PAGE_SHIFT)
+#define START_PFN(nid) (NODE_DATA(nid)->bdata->node_min_pfn)
#define MAX_LOW_PFN(nid) (NODE_DATA(nid)->bdata->node_low_pfn)
#ifndef CONFIG_DISCONTIGMEM
@@ -252,4 +251,3 @@ void free_initrd_mem(unsigned long start
printk (KERN_INFO "Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
}
#endif
-
--- a/arch/mn10300/mm/init.c
+++ b/arch/mn10300/mm/init.c
@@ -67,8 +67,8 @@ void __init paging_init(void)
/* declare the sizes of the RAM zones (only use the normal zone) */
zones_size[ZONE_NORMAL] =
- (contig_page_data.bdata->node_low_pfn) -
- (contig_page_data.bdata->node_boot_start >> PAGE_SHIFT);
+ contig_page_data.bdata->node_low_pfn -
+ contig_page_data.bdata->node_min_pfn;
/* pass the memory from the bootmem allocator to the main allocator */
free_area_init(zones_size);
@@ -87,7 +87,7 @@ void __init mem_init(void)
if (!mem_map)
BUG();
-#define START_PFN (contig_page_data.bdata->node_boot_start >> PAGE_SHIFT)
+#define START_PFN (contig_page_data.bdata->node_min_pfn)
#define MAX_LOW_PFN (contig_page_data.bdata->node_low_pfn)
max_mapnr = num_physpages = MAX_LOW_PFN - START_PFN;
--- a/arch/sh/mm/init.c
+++ b/arch/sh/mm/init.c
@@ -191,7 +191,7 @@ void __init paging_init(void)
pg_data_t *pgdat = NODE_DATA(nid);
unsigned long low, start_pfn;
- start_pfn = pgdat->bdata->node_boot_start >> PAGE_SHIFT;
+ start_pfn = pgdat->bdata->node_min_pfn;
low = pgdat->bdata->node_low_pfn;
if (max_zone_pfns[ZONE_NORMAL] < low)
--- a/arch/sh64/mm/init.c
+++ b/arch/sh64/mm/init.c
@@ -58,7 +58,7 @@ extern char _text, _etext, _edata, __bss
extern char __init_begin, __init_end;
/* It'd be good if these lines were in the standard header file. */
-#define START_PFN (NODE_DATA(0)->bdata->node_boot_start >> PAGE_SHIFT)
+#define START_PFN (NODE_DATA(0)->bdata->node_min_pfn)
#define MAX_LOW_PFN (NODE_DATA(0)->bdata->node_low_pfn)
@@ -190,4 +190,3 @@ void free_initrd_mem(unsigned long start
printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
}
#endif
-
--- a/include/linux/bootmem.h
+++ b/include/linux/bootmem.h
@@ -28,7 +28,7 @@ extern unsigned long saved_max_pfn;
* memory pages (including holes) on the node.
*/
typedef struct bootmem_data {
- unsigned long node_boot_start;
+ unsigned long node_min_pfn;
unsigned long node_low_pfn;
void *node_bootmem_map;
unsigned long last_offset;
--- a/mm/bootmem.c
+++ b/mm/bootmem.c
@@ -80,7 +80,7 @@ static void __init link_bootmem(bootmem_
bootmem_data_t *ent;
ent = list_entry(iter, bootmem_data_t, list);
- if (bdata->node_boot_start < ent->node_boot_start)
+ if (bdata->node_min_pfn < ent->node_min_pfn)
break;
}
list_add_tail(&bdata->list, iter);
@@ -96,7 +96,7 @@ static unsigned long __init init_bootmem
mminit_validate_memmodel_limits(&start, &end);
bdata->node_bootmem_map = phys_to_virt(PFN_PHYS(mapstart));
- bdata->node_boot_start = PFN_PHYS(start);
+ bdata->node_min_pfn = start;
bdata->node_low_pfn = end;
link_bootmem(bdata);
@@ -151,7 +151,7 @@ static unsigned long __init free_all_boo
if (!bdata->node_bootmem_map)
return 0;
- start = PFN_DOWN(bdata->node_boot_start);
+ start = bdata->node_min_pfn;
end = bdata->node_low_pfn;
/*
@@ -167,7 +167,7 @@ static unsigned long __init free_all_boo
unsigned long *map, idx, vec;
map = bdata->node_bootmem_map;
- idx = start - PFN_DOWN(bdata->node_boot_start);
+ idx = start - bdata->node_min_pfn;
vec = ~map[idx / BITS_PER_LONG];
if (aligned && vec == ~0UL && start + BITS_PER_LONG < end) {
@@ -192,7 +192,7 @@ static unsigned long __init free_all_boo
}
page = virt_to_page(bdata->node_bootmem_map);
- pages = bdata->node_low_pfn - PFN_DOWN(bdata->node_boot_start);
+ pages = bdata->node_low_pfn - bdata->node_min_pfn;
pages = bootmem_bootmap_pages(pages);
count += pages;
while (pages--)
@@ -231,10 +231,10 @@ static void __init __free(bootmem_data_t
unsigned long idx, start;
bdebug("nid=%d start=%lx end=%lx\n", bdata - bootmem_node_data,
- sidx + PFN_DOWN(bdata->node_boot_start),
- eidx + PFN_DOWN(bdata->node_boot_start));
+ sidx + bdata->node_min_pfn,
+ eidx + bdata->node_min_pfn);
- start = bdata->node_boot_start + PFN_PHYS(sidx);
+ start = PFN_PHYS(bdata->node_min_pfn + sidx);
if (bdata->last_success > start)
bdata->last_success = start;
@@ -251,8 +251,8 @@ static int __init __reserve(bootmem_data
bdebug("nid=%d start=%lx end=%lx flags=%x\n",
bdata - bootmem_node_data,
- sidx + PFN_DOWN(bdata->node_boot_start),
- eidx + PFN_DOWN(bdata->node_boot_start),
+ sidx + bdata->node_min_pfn,
+ eidx + bdata->node_min_pfn,
flags);
for (idx = sidx; idx < eidx; idx++)
@@ -262,7 +262,7 @@ static int __init __reserve(bootmem_data
return -EBUSY;
}
bdebug("silent double reserve of PFN %lx\n",
- idx + PFN_DOWN(bdata->node_boot_start));
+ idx + bdata->node_min_pfn);
}
return 0;
}
@@ -276,11 +276,11 @@ static int __init mark_bootmem_node(boot
bdebug("nid=%d start=%lx end=%lx reserve=%d flags=%x\n",
bdata - bootmem_node_data, start, end, reserve, flags);
- BUG_ON(start < PFN_DOWN(bdata->node_boot_start));
+ BUG_ON(start < bdata->node_min_pfn);
BUG_ON(end > bdata->node_low_pfn);
- sidx = start - PFN_DOWN(bdata->node_boot_start);
- eidx = end - PFN_DOWN(bdata->node_boot_start);
+ sidx = start - bdata->node_min_pfn;
+ eidx = end - bdata->node_min_pfn;
if (reserve)
return __reserve(bdata, sidx, eidx, flags);
@@ -300,7 +300,7 @@ static int __init mark_bootmem(unsigned
int err;
unsigned long max;
- if (pos < PFN_DOWN(bdata->node_boot_start)) {
+ if (pos < bdata->node_min_pfn) {
BUG_ON(pos != start);
continue;
}
@@ -419,7 +419,7 @@ static void * __init alloc_bootmem_core(
bdata - bootmem_node_data, size, PAGE_ALIGN(size) >> PAGE_SHIFT,
align, goal, limit);
- min = PFN_DOWN(bdata->node_boot_start);
+ min = bdata->node_min_pfn;
max = bdata->node_low_pfn;
goal >>= PAGE_SHIFT;
@@ -442,8 +442,8 @@ static void * __init alloc_bootmem_core(
start = goal = ALIGN(bdata->last_success, step);
}
- max -= PFN_DOWN(bdata->node_boot_start);
- start -= PFN_DOWN(bdata->node_boot_start);
+ max -= bdata->node_min_pfn;
+ start -= bdata->node_min_pfn;
while (1) {
int merge;
@@ -483,7 +483,7 @@ find_block:
PFN_UP(new_end), BOOTMEM_EXCLUSIVE))
BUG();
- region = phys_to_virt(bdata->node_boot_start + new_start);
+ region = phys_to_virt(PFN_PHYS(bdata->node_min_pfn) + new_start);
memset(region, 0, size);
return region;
}
@@ -501,9 +501,9 @@ restart:
list_for_each_entry(bdata, &bdata_list, list) {
void *region;
- if (goal && goal < bdata->node_boot_start)
+ if (goal && goal < PFN_PHYS(bdata->node_min_pfn))
continue;
- if (limit && limit < bdata->node_boot_start)
+ if (limit && limit < PFN_PHYS(bdata->node_min_pfn))
continue;
region = alloc_bootmem_core(bdata, size, align, goal, limit);
--
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply [flat|nested] 22+ messages in thread
* Re: [PATCH -mm 11/14] bootmem: respect goal more likely
2008-05-30 19:42 ` [PATCH -mm 11/14] bootmem: respect goal more likely Johannes Weiner
@ 2008-05-30 20:16 ` Johannes Weiner
0 siblings, 0 replies; 22+ messages in thread
From: Johannes Weiner @ 2008-05-30 20:16 UTC (permalink / raw)
To: Andrew Morton; +Cc: Ingo Molnar, Yinghai Lu, Andi Kleen, linux-kernel, linux-mm
Hi,
Johannes Weiner <hannes@saeurebad.de> writes:
> The old node-agnostic code tried allocating on all nodes starting from
> the one with the lowest range. alloc_bootmem_core retried without the
> goal if it could not satisfy it and so the goal was only respected at
> all when it happened to be on the first (lowest page numbers) node (or
> theoretically if allocations failed on all nodes before to the one
> holding the goal).
>
> Introduce a non-panicking helper that starts allocating from the node
> holding the goal and falls back only after all thes tries failed.
>
> Make all other allocation helpers benefit from this new helper.
>
> Signed-off-by: Johannes Weiner <hannes@saeurebad.de>
> CC: Ingo Molnar <mingo@elte.hu>
> CC: Yinghai Lu <yhlu.kernel@gmail.com>
> CC: Andi Kleen <andi@firstfloor.org>
> ---
>
> mm/bootmem.c | 77 +++++++++++++++++++++++++++++++----------------------------
> 1 file changed, 41 insertions(+), 36 deletions(-)
>
> --- a/mm/bootmem.c
> +++ b/mm/bootmem.c
> @@ -487,11 +487,33 @@ find_block:
> memset(region, 0, size);
> return region;
> }
> + return NULL;
> +}
Sorry, forgot to update ->last_success handling here. Update coming soon.
Hannes
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply [flat|nested] 22+ messages in thread
* Re: [PATCH -mm 08/14] bootmem: clean up alloc_bootmem_core
2008-05-30 19:42 ` [PATCH -mm 08/14] bootmem: clean up alloc_bootmem_core Johannes Weiner
@ 2008-05-30 22:11 ` Johannes Weiner
2008-06-02 12:34 ` Yasunori Goto
1 sibling, 0 replies; 22+ messages in thread
From: Johannes Weiner @ 2008-05-30 22:11 UTC (permalink / raw)
To: Andrew Morton; +Cc: Ingo Molnar, Yinghai Lu, Andi Kleen, linux-kernel, linux-mm
Hi,
Johannes Weiner <hannes@saeurebad.de> writes:
> alloc_bootmem_core has become quite nasty to read over time. This is
> a clean rewrite that keeps the semantics.
Another ->last_success error (missed updating it).
I already have a fixed up series here, will wait a bit for sending it
out to incorporate feedback as well.
Hannes
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply [flat|nested] 22+ messages in thread
* Re: [PATCH -mm 03/14] bootmem: add documentation to API functions
2008-05-30 19:42 ` [PATCH -mm 03/14] bootmem: add documentation to API functions Johannes Weiner
@ 2008-06-02 12:18 ` Chris Malley
2008-06-02 13:58 ` Johannes Weiner
0 siblings, 1 reply; 22+ messages in thread
From: Chris Malley @ 2008-06-02 12:18 UTC (permalink / raw)
To: Johannes Weiner
Cc: Andrew Morton, Ingo Molnar, Yinghai Lu, Andi Kleen, linux-kernel,
linux-mm
On Fri, 2008-05-30 at 21:42 +0200, Johannes Weiner wrote:
> Signed-off-by: Johannes Weiner <hannes@saeurebad.de>
> ---
>
> mm/bootmem.c | 147 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++-
> 1 file changed, 146 insertions(+), 1 deletion(-)
>
> --- a/mm/bootmem.c
> +++ b/mm/bootmem.c
[snip]
>
> +/**
> + * reserve_bootmem_node - mark a page range as reserved
> + * @addr: starting address of the range
> + * @size: size of the range in bytes
kerneldoc arguments don't match the actual function definition.
> + *
> + * Partial pages will be reserved.
> + *
> + * Only physical pages that actually reside on @pgdat are marked.
> + */
> void __init reserve_bootmem_node(pg_data_t *pgdat, unsigned long physaddr,
> unsigned long size, int flags)
> {
> @@ -331,6 +390,16 @@ void __init reserve_bootmem_node(pg_data
> }
>
> #ifndef CONFIG_HAVE_ARCH_BOOTMEM_NODE
> +/**
> + * reserve_bootmem - mark a page range as usable
> + * @addr: starting address of the range
> + * @size: size of the range in bytes
and here (missing @flags)
> i>>?i>>?+ *
> + * Partial pages will be reserved.
> + *
> + * All physical pages within the range are marked, no matter what
> + * node they reside on.
> + */
> int __init reserve_bootmem(unsigned long addr, unsigned long size,
> int flags)
> {
> @@ -499,6 +568,19 @@ found:
> return ret;
> }
>
cheers
Chris
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply [flat|nested] 22+ messages in thread
* Re: [PATCH -mm 08/14] bootmem: clean up alloc_bootmem_core
2008-05-30 19:42 ` [PATCH -mm 08/14] bootmem: clean up alloc_bootmem_core Johannes Weiner
2008-05-30 22:11 ` Johannes Weiner
@ 2008-06-02 12:34 ` Yasunori Goto
2008-06-02 13:57 ` Johannes Weiner
1 sibling, 1 reply; 22+ messages in thread
From: Yasunori Goto @ 2008-06-02 12:34 UTC (permalink / raw)
To: Johannes Weiner
Cc: Andrew Morton, Ingo Molnar, Yinghai Lu, Andi Kleen, linux-kernel,
linux-mm
Hello.
> + /*
> + * Reserve the area now:
> + */
> + for (i = PFN_DOWN(new_start) + merge; i < PFN_UP(new_end); i++)
> + if (test_and_set_bit(i, bdata->node_bootmem_map))
> + BUG();
> +
> + region = phys_to_virt(bdata->node_boot_start + new_start);
> + memset(region, 0, size);
> + return region;
bdata->last_success doesn't seem to be updated in alloc_bootmem_core(),
it is updated in only __free().
Is it intended? If not, it should be updated, I suppose....
Bye.
--
Yasunori Goto
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply [flat|nested] 22+ messages in thread
* Re: [PATCH -mm 08/14] bootmem: clean up alloc_bootmem_core
2008-06-02 12:34 ` Yasunori Goto
@ 2008-06-02 13:57 ` Johannes Weiner
2008-06-02 14:48 ` Yasunori Goto
0 siblings, 1 reply; 22+ messages in thread
From: Johannes Weiner @ 2008-06-02 13:57 UTC (permalink / raw)
To: Yasunori Goto
Cc: Andrew Morton, Ingo Molnar, Yinghai Lu, Andi Kleen, linux-kernel,
linux-mm
Hi,
Yasunori Goto <y-goto@jp.fujitsu.com> writes:
> Hello.
>
>> + /*
>> + * Reserve the area now:
>> + */
>> + for (i = PFN_DOWN(new_start) + merge; i < PFN_UP(new_end); i++)
>> + if (test_and_set_bit(i, bdata->node_bootmem_map))
>> + BUG();
>> +
>> + region = phys_to_virt(bdata->node_boot_start + new_start);
>> + memset(region, 0, size);
>> + return region;
>
> bdata->last_success doesn't seem to be updated in alloc_bootmem_core(),
> it is updated in only __free().
> Is it intended? If not, it should be updated, I suppose....
Yeah, I forgot that. See my reply to `bootmem: respect goal more
likely'.
Thanks for reviewing!
Hannes
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply [flat|nested] 22+ messages in thread
* Re: [PATCH -mm 03/14] bootmem: add documentation to API functions
2008-06-02 12:18 ` Chris Malley
@ 2008-06-02 13:58 ` Johannes Weiner
0 siblings, 0 replies; 22+ messages in thread
From: Johannes Weiner @ 2008-06-02 13:58 UTC (permalink / raw)
To: Chris Malley
Cc: Andrew Morton, Ingo Molnar, Yinghai Lu, Andi Kleen, linux-kernel,
linux-mm
Hi,
Chris Malley <mail@chrismalley.co.uk> writes:
> On Fri, 2008-05-30 at 21:42 +0200, Johannes Weiner wrote:
>
>> Signed-off-by: Johannes Weiner <hannes@saeurebad.de>
>> ---
>>
>> mm/bootmem.c | 147 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++-
>> 1 file changed, 146 insertions(+), 1 deletion(-)
>>
>> --- a/mm/bootmem.c
>> +++ b/mm/bootmem.c
>
> [snip]
>
>>
>> +/**
>> + * reserve_bootmem_node - mark a page range as reserved
>> + * @addr: starting address of the range
>> + * @size: size of the range in bytes
>
> kerneldoc arguments don't match the actual function definition.
>
>> + *
>> + * Partial pages will be reserved.
>> + *
>> + * Only physical pages that actually reside on @pgdat are marked.
>> + */
>> void __init reserve_bootmem_node(pg_data_t *pgdat, unsigned long physaddr,
>> unsigned long size, int flags)
>> {
>> @@ -331,6 +390,16 @@ void __init reserve_bootmem_node(pg_data
>> }
>>
>> #ifndef CONFIG_HAVE_ARCH_BOOTMEM_NODE
>> +/**
>> + * reserve_bootmem - mark a page range as usable
>> + * @addr: starting address of the range
>> + * @size: size of the range in bytes
>
>
> and here (missing @flags)
>
>> i>>?i>>?+ *
>> + * Partial pages will be reserved.
>> + *
>> + * All physical pages within the range are marked, no matter what
>> + * node they reside on.
>> + */
>> int __init reserve_bootmem(unsigned long addr, unsigned long size,
>> int flags)
>> {
>> @@ -499,6 +568,19 @@ found:
>> return ret;
>> }
>>
Will be updated in the next send-out. Thanks for your time.
Hannes
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply [flat|nested] 22+ messages in thread
* Re: [PATCH -mm 08/14] bootmem: clean up alloc_bootmem_core
2008-06-02 13:57 ` Johannes Weiner
@ 2008-06-02 14:48 ` Yasunori Goto
0 siblings, 0 replies; 22+ messages in thread
From: Yasunori Goto @ 2008-06-02 14:48 UTC (permalink / raw)
To: Johannes Weiner
Cc: Andrew Morton, Ingo Molnar, Yinghai Lu, Andi Kleen, linux-kernel,
linux-mm
> Hi,
>
> Yasunori Goto <y-goto@jp.fujitsu.com> writes:
>
> > Hello.
> >
> >> + /*
> >> + * Reserve the area now:
> >> + */
> >> + for (i = PFN_DOWN(new_start) + merge; i < PFN_UP(new_end); i++)
> >> + if (test_and_set_bit(i, bdata->node_bootmem_map))
> >> + BUG();
> >> +
> >> + region = phys_to_virt(bdata->node_boot_start + new_start);
> >> + memset(region, 0, size);
> >> + return region;
> >
> > bdata->last_success doesn't seem to be updated in alloc_bootmem_core(),
> > it is updated in only __free().
> > Is it intended? If not, it should be updated, I suppose....
>
> Yeah, I forgot that. See my reply to `bootmem: respect goal more
> likely'.
>
> Thanks for reviewing!
>
> Hannes
Oh, I didn't notice it.
Ok. I'll test newer version on my ia64 box.
Bye.
--
Yasunori Goto
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply [flat|nested] 22+ messages in thread
end of thread, other threads:[~2008-06-02 14:48 UTC | newest]
Thread overview: 22+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2008-05-30 19:42 [PATCH -mm 00/14] bootmem rewrite v2 Johannes Weiner
2008-05-30 19:42 ` [PATCH -mm 01/14] bootmem: reorder code to match new bootmem structure Johannes Weiner
2008-05-30 19:42 ` [PATCH -mm 02/14] bootmem: clean up bootmem.c file header Johannes Weiner
2008-05-30 19:42 ` [PATCH -mm 03/14] bootmem: add documentation to API functions Johannes Weiner
2008-06-02 12:18 ` Chris Malley
2008-06-02 13:58 ` Johannes Weiner
2008-05-30 19:42 ` [PATCH -mm 04/14] bootmem: add debugging framework Johannes Weiner
2008-05-30 19:42 ` [PATCH -mm 05/14] bootmem: revisit bitmap size calculations Johannes Weiner
2008-05-30 19:42 ` [PATCH -mm 06/14] bootmem: revisit bootmem descriptor list handling Johannes Weiner
2008-05-30 19:42 ` [PATCH -mm 07/14] bootmem: clean up free_all_bootmem_core Johannes Weiner
2008-05-30 19:42 ` [PATCH -mm 08/14] bootmem: clean up alloc_bootmem_core Johannes Weiner
2008-05-30 22:11 ` Johannes Weiner
2008-06-02 12:34 ` Yasunori Goto
2008-06-02 13:57 ` Johannes Weiner
2008-06-02 14:48 ` Yasunori Goto
2008-05-30 19:42 ` [PATCH -mm 09/14] bootmem: free/reserve helpers Johannes Weiner
2008-05-30 19:42 ` [PATCH -mm 10/14] bootmem: factor out the marking of a PFN range Johannes Weiner
2008-05-30 19:42 ` [PATCH -mm 11/14] bootmem: respect goal more likely Johannes Weiner
2008-05-30 20:16 ` Johannes Weiner
2008-05-30 19:42 ` [PATCH -mm 12/14] bootmem: Make __alloc_bootmem_low_node fall back to other nodes Johannes Weiner
2008-05-30 19:42 ` [PATCH -mm 13/14] bootmem: revisit alloc_bootmem_section Johannes Weiner
2008-05-30 19:42 ` [PATCH -mm 14/14] bootmem: replace node_boot_start in struct bootmem_data Johannes Weiner
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox