Johannes Weiner wrote: > On Thu, Jun 18, 2009 at 01:24:10PM -0700, Andrew Morton wrote: >> On Thu, 18 Jun 2009 13:07:13 -0700 >> Joe Perches wrote: >> >>> Signed-off-by: Joe Perches >>> >>> diff --git a/mm/bootmem.c b/mm/bootmem.c >>> index 282df0a..09d9c98 100644 >>> --- a/mm/bootmem.c >>> +++ b/mm/bootmem.c >>> @@ -536,11 +536,13 @@ static void * __init alloc_arch_preferred_bootmem(bootmem_data_t *bdata, >>> return kzalloc(size, GFP_NOWAIT); >>> >>> #ifdef CONFIG_HAVE_ARCH_BOOTMEM >>> + { >>> bootmem_data_t *p_bdata; >>> >>> p_bdata = bootmem_arch_preferred_node(bdata, size, align, goal, limit); >>> if (p_bdata) >>> return alloc_bootmem_core(p_bdata, size, align, goal, limit); >>> + } >>> #endif >>> return NULL; >>> } >> Well yes. >> >> We'll be needing some tabs there. >> >> Unrelatedly, I'm struggling a bit with bootmem_arch_preferred_node(). >> It's only defined if CONFIG_X86_32=y && CONFIG_NEED_MULTIPLE_NODES=y, >> but it gets called if CONFIG_HAVE_ARCH_BOOTMEM=y. >> >> Is this correct, logical and as simple as we can make it?? > > x86_32 numa is the only setter of HAVE_ARCH_BOOTMEM. I don't know why > this arch has a strict preference/requirement(?) for bootmem on node > 0. > > I found this mail from Yinghai > > http://marc.info/?l=linux-kernel&m=123614990906256&w=2 > > where he says that it expects all bootmem on node zero but with the > current code and alloc_arch_preferred_bootmem() failing, we could fall > back to another node. Won't this break? Yinghai? not sure it is the same problem. the fix was in mainline already. > > Otherwise, could we perhaps use something as simple as this? > > diff --git a/arch/x86/include/asm/mmzone_32.h b/arch/x86/include/asm/mmzone_32.h > index ede6998..b68a672 100644 > --- a/arch/x86/include/asm/mmzone_32.h > +++ b/arch/x86/include/asm/mmzone_32.h > @@ -92,8 +92,7 @@ static inline int pfn_valid(int pfn) > > #ifdef CONFIG_NEED_MULTIPLE_NODES > /* always use node 0 for bootmem on this numa platform */ > -#define bootmem_arch_preferred_node(__bdata, size, align, goal, limit) \ > - (NODE_DATA(0)->bdata) > +#define bootmem_arch_preferred_node (NODE(0)->bdata) > #endif /* CONFIG_NEED_MULTIPLE_NODES */ > > #endif /* _ASM_X86_MMZONE_32_H */ > diff --git a/mm/bootmem.c b/mm/bootmem.c > index 282df0a..0097fa2 100644 > --- a/mm/bootmem.c > +++ b/mm/bootmem.c > @@ -528,23 +528,6 @@ find_block: > return NULL; > } > > -static void * __init alloc_arch_preferred_bootmem(bootmem_data_t *bdata, > - unsigned long size, unsigned long align, > - unsigned long goal, unsigned long limit) > -{ > - if (WARN_ON_ONCE(slab_is_available())) > - return kzalloc(size, GFP_NOWAIT); > - > -#ifdef CONFIG_HAVE_ARCH_BOOTMEM > - bootmem_data_t *p_bdata; > - > - p_bdata = bootmem_arch_preferred_node(bdata, size, align, goal, limit); > - if (p_bdata) > - return alloc_bootmem_core(p_bdata, size, align, goal, limit); > -#endif > - return NULL; > -} > - > static void * __init ___alloc_bootmem_nopanic(unsigned long size, > unsigned long align, > unsigned long goal, > @@ -553,11 +536,15 @@ static void * __init ___alloc_bootmem_nopanic(unsigned long size, > bootmem_data_t *bdata; > void *region; > > + if (WARN_ON_ONCE(slab_is_available())) > + return kzalloc(size, GFP_NOWAIT); > restart: > - region = alloc_arch_preferred_bootmem(NULL, size, align, goal, limit); > +#ifdef bootmem_arch_preferred_node > + region = alloc_bootmem_core(bootmem_arch_preferred_node, > + size, align, goal, limit); > if (region) > return region; > - > +#endif > list_for_each_entry(bdata, &bdata_list, list) { > if (goal && bdata->node_low_pfn <= PFN_DOWN(goal)) > continue; > @@ -636,13 +623,11 @@ static void * __init ___alloc_bootmem_node(bootmem_data_t *bdata, > { > void *ptr; > > - ptr = alloc_arch_preferred_bootmem(bdata, size, align, goal, limit); > - if (ptr) > - return ptr; > - > +#ifndef bootmem_arch_preferred_node > ptr = alloc_bootmem_core(bdata, size, align, goal, limit); > if (ptr) > return ptr; > +#endif > > return ___alloc_bootmem(size, align, goal, limit); > } any reason to kill alloc_arch_preferred_bootmem? YH