* [Patch/RFC]Removing zone and node ID from page->flags[1/3]
2004-09-23 22:55 [Patch/RFC]Removing zone and node ID from page->flags[0/3] Yasunori Goto
@ 2004-09-23 23:00 ` Yasunori Goto
2004-09-23 23:02 ` [Patch/RFC]Make second level zone_table[2/3] Yasunori Goto
` (2 subsequent siblings)
3 siblings, 0 replies; 7+ messages in thread
From: Yasunori Goto @ 2004-09-23 23:00 UTC (permalink / raw)
To: linux-mm, Linux Kernel ML; +Cc: Linux Hotplug Memory Support
This patche is to remove zone and node id from page->flags.
Note:
In this patch, zone is obtained from zone_table array whose
index is offset of page struct's virtual address from PAGE_OFFSET.
A entry must not include 2 zones by its alignment.
To avoid it, one entry of array cannot treat larger area than
1 << (MAX_ORDER - 1). So, array size must be large.
(If alighment of zone is smaller than 1 << (MAX_ORDER - 1),
kernel will warn at free_area_init_core().)
Next two patches are to reduce size of it.
--
Yasunori Goto <ygoto at us.fujitsu.com>
---
erase_zoneid-goto/include/asm-ia64/pgtable.h | 3 ++
erase_zoneid-goto/include/linux/mm.h | 32 +++++++++++----------------
erase_zoneid-goto/include/linux/mmzone.h | 11 ---------
erase_zoneid-goto/mm/page_alloc.c | 19 +++++++++++++---
4 files changed, 33 insertions(+), 32 deletions(-)
diff -puN include/asm-ia64/pgtable.h~erase_zoneid include/asm-ia64/pgtable.h
--- erase_zoneid/include/asm-ia64/pgtable.h~erase_zoneid Thu Sep 23 11:20:08 2004
+++ erase_zoneid-goto/include/asm-ia64/pgtable.h Thu Sep 23 11:20:08 2004
@@ -219,6 +219,9 @@ ia64_phys_addr_valid (unsigned long addr
#define kc_vaddr_to_offset(v) ((v) - 0xa000000000000000)
#define kc_offset_to_vaddr(o) ((o) + 0xa000000000000000)
+#define PAGE_INDEX_OFFSET 0xa000000000000000 /* IA64's mem_maps are mapped
+ in region 5 */
+
/*
* Conversion functions: convert page frame number (pfn) and a protection value to a page
* table entry (pte).
diff -puN include/linux/mm.h~erase_zoneid include/linux/mm.h
--- erase_zoneid/include/linux/mm.h~erase_zoneid Thu Sep 23 11:20:08 2004
+++ erase_zoneid-goto/include/linux/mm.h Thu Sep 23 11:20:08 2004
@@ -374,22 +374,18 @@ static inline void put_page(struct page
* to swap space and (later) to be read back into memory.
*/
-/*
- * The zone field is never updated after free_area_init_core()
- * sets it, so none of the operations on it need to be atomic.
- * We'll have up to (MAX_NUMNODES * MAX_NR_ZONES) zones total,
- * so we use (MAX_NODES_SHIFT + MAX_ZONES_SHIFT) here to get enough bits.
- */
-#define NODEZONE_SHIFT (sizeof(page_flags_t)*8 - MAX_NODES_SHIFT - MAX_ZONES_SHIFT)
-#define NODEZONE(node, zone) ((node << ZONES_SHIFT) | zone)
+#define PAGEZONE_SHIFT (MAX_ORDER - 1) /* XXX */
+#define PAGEZONE_SIZE (1 << PAGEZONE_SHIFT)
+#define PAGEZONE_MASK (PAGEZONE_SIZE - 1)
-static inline unsigned long page_zonenum(struct page *page)
-{
- return (page->flags >> NODEZONE_SHIFT) & (~(~0UL << ZONES_SHIFT));
-}
-static inline unsigned long page_to_nid(struct page *page)
+#ifndef PAGE_INDEX_OFFSET
+#define PAGE_INDEX_OFFSET PAGE_OFFSET
+#endif
+
+static inline unsigned long page_to_index(struct page *page)
{
- return (page->flags >> (NODEZONE_SHIFT + ZONES_SHIFT));
+ unsigned long out = (unsigned long)(page - (struct page *)PAGE_INDEX_OFFSET);
+ return out >> PAGEZONE_SHIFT;
}
struct zone;
@@ -397,14 +393,14 @@ extern struct zone *zone_table[];
static inline struct zone *page_zone(struct page *page)
{
- return zone_table[page->flags >> NODEZONE_SHIFT];
+ return zone_table[ page_to_index(page)];
}
-static inline void set_page_zone(struct page *page, unsigned long nodezone_num)
+static inline unsigned long page_to_nid(struct page *page)
{
- page->flags &= ~(~0UL << NODEZONE_SHIFT);
- page->flags |= nodezone_num << NODEZONE_SHIFT;
+ return page_zone(page)->zone_pgdat->node_id;
}
+
#ifndef CONFIG_DISCONTIGMEM
/* The array of struct pages - for discontigmem use pgdat->lmem_map */
diff -puN include/linux/mmzone.h~erase_zoneid include/linux/mmzone.h
--- erase_zoneid/include/linux/mmzone.h~erase_zoneid Thu Sep 23 11:20:08 2004
+++ erase_zoneid-goto/include/linux/mmzone.h Thu Sep 23 11:20:08 2004
@@ -399,17 +399,6 @@ extern struct pglist_data contig_page_da
#endif /* !CONFIG_DISCONTIGMEM */
-#if NODES_SHIFT > MAX_NODES_SHIFT
-#error NODES_SHIFT > MAX_NODES_SHIFT
-#endif
-
-/* There are currently 3 zones: DMA, Normal & Highmem, thus we need 2 bits */
-#define MAX_ZONES_SHIFT 2
-
-#if ZONES_SHIFT > MAX_ZONES_SHIFT
-#error ZONES_SHIFT > MAX_ZONES_SHIFT
-#endif
-
#endif /* !__ASSEMBLY__ */
#endif /* __KERNEL__ */
#endif /* _LINUX_MMZONE_H */
diff -puN mm/page_alloc.c~erase_zoneid mm/page_alloc.c
--- erase_zoneid/mm/page_alloc.c~erase_zoneid Thu Sep 23 11:20:08 2004
+++ erase_zoneid-goto/mm/page_alloc.c Thu Sep 23 11:20:08 2004
@@ -52,7 +52,7 @@ EXPORT_SYMBOL(nr_swap_pages);
* Used by page_zone() to look up the address of the struct zone whose
* id is encoded in the upper bits of page->flags
*/
-struct zone *zone_table[1 << (ZONES_SHIFT + NODES_SHIFT)];
+struct zone *zone_table[ (~PAGE_OFFSET + 1) >> (PAGEZONE_SHIFT + PAGE_SHIFT) ];
EXPORT_SYMBOL(zone_table);
static char *zone_names[MAX_NR_ZONES] = { "DMA", "Normal", "HighMem" };
@@ -1512,7 +1512,6 @@ void __init memmap_init_zone(unsigned lo
struct page *page;
for (page = start; page < (start + size); page++) {
- set_page_zone(page, NODEZONE(nid, zone));
set_page_count(page, 0);
reset_page_mapcount(page);
SetPageReserved(page);
@@ -1577,6 +1576,19 @@ void zone_init_free_lists(struct pglist_
memmap_init_zone((size), (nid), (zone), (start_pfn))
#endif
+void set_page_zone(struct page *lmem_map, unsigned int size, struct zone *zone)
+{
+ struct zone **entry;
+ entry = &zone_table[page_to_index(lmem_map)];
+
+ size = size + PAGEZONE_MASK; /* round up */
+ size >>= PAGEZONE_SHIFT;
+
+ for ( ; size > 0; entry++, size--)
+ *entry = zone;
+
+}
+
/*
* Set up the zone data structures:
* - mark all pages reserved
@@ -1599,7 +1611,6 @@ static void __init free_area_init_core(s
unsigned long size, realsize;
unsigned long batch;
- zone_table[NODEZONE(nid, j)] = zone;
realsize = size = zones_size[j];
if (zholes_size)
realsize -= zholes_size[j];
@@ -1681,6 +1692,8 @@ static void __init free_area_init_core(s
if ((zone_start_pfn) & (zone_required_alignment-1))
printk("BUG: wrong zone alignment, it will crash\n");
+
+ set_page_zone(zone->zone_mem_map, size, zone);
memmap_init(size, nid, j, zone_start_pfn);
_
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"aart@kvack.org"> aart@kvack.org </a>
^ permalink raw reply [flat|nested] 7+ messages in thread* [Patch/RFC]Make second level zone_table[2/3]
2004-09-23 22:55 [Patch/RFC]Removing zone and node ID from page->flags[0/3] Yasunori Goto
2004-09-23 23:00 ` [Patch/RFC]Removing zone and node ID from page->flags[1/3] Yasunori Goto
@ 2004-09-23 23:02 ` Yasunori Goto
2004-09-23 23:04 ` [Patch/RFC]Reduce second level zone_table[3/3] Yasunori Goto
2004-09-23 23:27 ` [Patch/RFC]Removing zone and node ID from page->flags[0/3] William Lee Irwin III
3 siblings, 0 replies; 7+ messages in thread
From: Yasunori Goto @ 2004-09-23 23:02 UTC (permalink / raw)
To: linux-mm, Linux Kernel ML; +Cc: Linux Hotplug Memory Support
This patch make second level of zone_table to reduce size of
first level zone_table like below.
zone_table_directory.
+------------+ zone_table
| |------------>+-----------+
|------------| | |-> zone
| | |-----------|
|------------| | |-> zone
| | +-----------+
+------------+
Yasunori Goto <ygoto at us.fujitsu.com>
---
erase_zoneid-goto/include/linux/mm.h | 37 +++++++++++++++++++++++++++++++---
erase_zoneid-goto/mm/page_alloc.c | 38 +++++++++++++++++++++++++++++------
2 files changed, 66 insertions(+), 9 deletions(-)
diff -puN include/linux/mm.h~double_zone_table include/linux/mm.h
--- erase_zoneid/include/linux/mm.h~double_zone_table Thu Sep 23 11:20:12 2004
+++ erase_zoneid-goto/include/linux/mm.h Thu Sep 23 11:20:12 2004
@@ -378,22 +378,53 @@ static inline void put_page(struct page
#define PAGEZONE_SIZE (1 << PAGEZONE_SHIFT)
#define PAGEZONE_MASK (PAGEZONE_SIZE - 1)
+#define PAGEZONE_DIR_SHIFT 8 /* XXX */
+#define PAGEZONE_DIR_SIZE (1 << PAGEZONE_DIR_SHIFT)
+#define PAGEZONE_DIR_MASK (PAGEZONE_DIR_SIZE - 1)
+
+#define PZDIR_SHIFT (PAGEZONE_SHIFT + PAGEZONE_DIR_SHIFT)
+#define PZDIR_SIZE (1 << PZDIR_SHIFT)
+#define PZDIR_MASK (PZDIR_SIZE - 1)
+
#ifndef PAGE_INDEX_OFFSET
#define PAGE_INDEX_OFFSET PAGE_OFFSET
#endif
static inline unsigned long page_to_index(struct page *page)
{
- unsigned long out = (unsigned long)(page - (struct page *)PAGE_INDEX_OFFSET);
+ return (unsigned long)(page - (struct page *)PAGE_INDEX_OFFSET);
+}
+
+static inline unsigned long page_to_primary_index(struct page *page)
+{
+ return page_to_index(page) >> PZDIR_SHIFT;
+}
+
+static inline unsigned long page_to_secondary_index(struct page *page)
+{
+ unsigned long out = page_to_index(page);
+ out &= PZDIR_MASK;
return out >> PAGEZONE_SHIFT;
}
struct zone;
-extern struct zone *zone_table[];
+struct zone_tbl{
+ union {
+ struct zone *zone;
+ struct zone_tbl *sec_zone_table;
+ };
+};
+
+extern struct zone_tbl pri_zone_table[];
static inline struct zone *page_zone(struct page *page)
{
- return zone_table[ page_to_index(page)];
+ struct zone_tbl *entry;
+
+ entry = pri_zone_table + page_to_primary_index(page);
+ entry = entry->sec_zone_table;
+ entry += page_to_secondary_index(page);
+ return entry->zone;
}
static inline unsigned long page_to_nid(struct page *page)
diff -puN mm/page_alloc.c~double_zone_table mm/page_alloc.c
--- erase_zoneid/mm/page_alloc.c~double_zone_table Thu Sep 23 11:20:12 2004
+++ erase_zoneid-goto/mm/page_alloc.c Thu Sep 23 11:20:12 2004
@@ -52,8 +52,8 @@ EXPORT_SYMBOL(nr_swap_pages);
* Used by page_zone() to look up the address of the struct zone whose
* id is encoded in the upper bits of page->flags
*/
-struct zone *zone_table[ (~PAGE_OFFSET + 1) >> (PAGEZONE_SHIFT + PAGE_SHIFT) ];
-EXPORT_SYMBOL(zone_table);
+struct zone_tbl pri_zone_table[ (~PAGE_OFFSET + 1) >> (PZDIR_SHIFT + PAGE_SHIFT) ];
+EXPORT_SYMBOL(pri_zone_table);
static char *zone_names[MAX_NR_ZONES] = { "DMA", "Normal", "HighMem" };
int min_free_kbytes = 1024;
@@ -1578,15 +1578,41 @@ void zone_init_free_lists(struct pglist_
void set_page_zone(struct page *lmem_map, unsigned int size, struct zone *zone)
{
- struct zone **entry;
- entry = &zone_table[page_to_index(lmem_map)];
+ struct zone_tbl *pri_entry;
+ struct page *page = lmem_map;
+
+ pri_entry = &pri_zone_table[page_to_primary_index(page)];
size = size + PAGEZONE_MASK; /* round up */
size >>= PAGEZONE_SHIFT;
- for ( ; size > 0; entry++, size--)
- *entry = zone;
+ for ( ; size > 0; pri_entry++){
+ struct zone_tbl *sec_entry, *sec_start_entry;
+ unsigned int sec_index, sec_count;
+
+ sec_start_entry = pri_entry->sec_zone_table;
+ if (!sec_start_entry){
+ unsigned int entry_size;
+ entry_size = sizeof(struct zone_tbl) << PAGEZONE_DIR_SHIFT;
+
+ sec_start_entry = alloc_bootmem_node(NODE_DATA(nid), entry_size);
+ memset(sec_start_entry, 0, entry_size);
+ }
+
+ sec_index = page_to_secondary_index(page);
+ sec_entry = sec_start_entry + sec_index;
+
+ for (sec_count = sec_index; sec_count < PAGEZONE_DIR_SIZE;
+ sec_count++, sec_entry++){
+ sec_entry->zone = zone;
+ page += PAGEZONE_SIZE;
+ size--;
+ if (size == 0)
+ break;
+ }
+ pri_entry->sec_zone_table = sec_start_entry;
+ }
}
/*
_
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"aart@kvack.org"> aart@kvack.org </a>
^ permalink raw reply [flat|nested] 7+ messages in thread* [Patch/RFC]Reduce second level zone_table[3/3]
2004-09-23 22:55 [Patch/RFC]Removing zone and node ID from page->flags[0/3] Yasunori Goto
2004-09-23 23:00 ` [Patch/RFC]Removing zone and node ID from page->flags[1/3] Yasunori Goto
2004-09-23 23:02 ` [Patch/RFC]Make second level zone_table[2/3] Yasunori Goto
@ 2004-09-23 23:04 ` Yasunori Goto
2004-09-23 23:27 ` [Patch/RFC]Removing zone and node ID from page->flags[0/3] William Lee Irwin III
3 siblings, 0 replies; 7+ messages in thread
From: Yasunori Goto @ 2004-09-23 23:04 UTC (permalink / raw)
To: linux-mm, Linux Kernel ML; +Cc: Linux Hotplug Memory Support
This patch make reduce array of second level zone_table.
If all of second level zone_table points same zone,
the table is not necessary.
zone_table_directory.
+------------+ zone_table
| |------------>+-----------+
|------------| | |-> zone DMA
| | |-----------|
|------------| | |-> zone Normal
| | +-----------+
|------------|
| |------------>+-----------+
+------------+ | |-> zone Normal
|-----------|
| |-> zone Normal
+-----------+
So, in this case, first level zone_table points the zone
directly.
zone_table_directory.
+------------+ zone_table
| Bit on|------------>+-----------+
|------------| | |-> zone DMA
| | |-----------|
|------------| | |-> zone Normal
| | +-----------+
|------------|
| Bit off|-> zone Normal
+------------+
--
Yasunori Goto <ygoto at us.fujitsu.com>
---
erase_zoneid-goto/include/linux/mm.h | 26 ++++++++++++++++++++++++--
erase_zoneid-goto/mm/page_alloc.c | 14 +++++++++++---
2 files changed, 35 insertions(+), 5 deletions(-)
diff -puN include/linux/mm.h~reduce_zone_table include/linux/mm.h
--- erase_zoneid/include/linux/mm.h~reduce_zone_table Thu Sep 23 11:20:15 2004
+++ erase_zoneid-goto/include/linux/mm.h Thu Sep 23 11:20:15 2004
@@ -415,15 +415,37 @@ struct zone_tbl{
};
};
+#define ZONE_TABLE_BIT 0x1
+#define ZONE_TABLE_BITMASK ~(ZONE_TABLE_BIT)
+
extern struct zone_tbl pri_zone_table[];
+static inline struct zone_tbl *get_zone_table(struct zone_tbl *entry)
+{
+ return (struct zone_tbl *)((unsigned long)entry->sec_zone_table
+ & ZONE_TABLE_BITMASK);
+}
+
+static inline unsigned long is_second_zone_table(struct zone_tbl *entry)
+{
+ return (unsigned long)entry->sec_zone_table & ZONE_TABLE_BIT;
+}
+
+static inline void set_zone_table(struct zone_tbl *entry, struct zone_tbl *val)
+{
+ entry->sec_zone_table =
+ (struct zone_tbl *)((unsigned long)val | ZONE_TABLE_BIT);
+}
+
static inline struct zone *page_zone(struct page *page)
{
struct zone_tbl *entry;
entry = pri_zone_table + page_to_primary_index(page);
- entry = entry->sec_zone_table;
- entry += page_to_secondary_index(page);
+ if (is_second_zone_table(entry)){
+ entry = get_zone_table(entry);
+ entry += page_to_secondary_index(page);
+ }
return entry->zone;
}
diff -puN mm/page_alloc.c~reduce_zone_table mm/page_alloc.c
--- erase_zoneid/mm/page_alloc.c~reduce_zone_table Thu Sep 23 11:20:15 2004
+++ erase_zoneid-goto/mm/page_alloc.c Thu Sep 23 11:20:15 2004
@@ -1499,7 +1499,6 @@ static void __init calculate_zone_totalp
printk(KERN_DEBUG "On node %d totalpages: %lu\n", pgdat->node_id, realtotalpages);
}
-
/*
* Initially all pages are reserved - free ones are freed
* up by free_all_bootmem() once the early boot process is
@@ -1590,7 +1589,16 @@ void set_page_zone(struct page *lmem_map
struct zone_tbl *sec_entry, *sec_start_entry;
unsigned int sec_index, sec_count;
- sec_start_entry = pri_entry->sec_zone_table;
+ if (size / PAGEZONE_DIR_SIZE > 0 &&
+ (PAGEZONE_DIR_MASK & size) == 0){ /* All of second level entry will be same zone.
+ So, Second level isn't necessary. */
+ pri_entry->zone = zone;
+ size -= PAGEZONE_DIR_SIZE;
+ page += PZDIR_SIZE;
+ continue;
+ }
+
+ sec_start_entry = get_zone_table(pri_entry);
if (!sec_start_entry){
unsigned int entry_size;
entry_size = sizeof(struct zone_tbl) << PAGEZONE_DIR_SHIFT;
@@ -1611,7 +1619,7 @@ void set_page_zone(struct page *lmem_map
break;
}
- pri_entry->sec_zone_table = sec_start_entry;
+ set_zone_table(pri_entry, sec_start_entry);
}
}
_
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"aart@kvack.org"> aart@kvack.org </a>
^ permalink raw reply [flat|nested] 7+ messages in thread* Re: [Patch/RFC]Removing zone and node ID from page->flags[0/3]
2004-09-23 22:55 [Patch/RFC]Removing zone and node ID from page->flags[0/3] Yasunori Goto
` (2 preceding siblings ...)
2004-09-23 23:04 ` [Patch/RFC]Reduce second level zone_table[3/3] Yasunori Goto
@ 2004-09-23 23:27 ` William Lee Irwin III
2004-09-24 3:51 ` Yasunori Goto
3 siblings, 1 reply; 7+ messages in thread
From: William Lee Irwin III @ 2004-09-23 23:27 UTC (permalink / raw)
To: Yasunori Goto; +Cc: linux-mm, Linux Kernel ML, Linux Hotplug Memory Support
On Thu, Sep 23, 2004 at 03:55:16PM -0700, Yasunori Goto wrote:
> I updated my patches which remove zone and node ID from page->flags.
> Page->flags is 32bit space and 19 bits of them have already been used on
> 2.6.9-rc2-mm2 kernel, and zone and node ID uses 8 bits on 32 archtecture.
> So, remaining bits is only 5 bits. In addition, only 3 bits have remained
> on 2.6.8.1 stock kernel.
> But, my patches make more 8 bits space in page->flags again.
> And kernel can use large number of node and types of zone.
> These patches are for 2.6.9-rc2-mm2.
Looks relatively innocuous. I wonder if cosmetically we may want
s/struct zone_tbl/struct zone_table/
I like the path compression in the 2-level radix tree.
Thanks.
-- wli
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"aart@kvack.org"> aart@kvack.org </a>
^ permalink raw reply [flat|nested] 7+ messages in thread* Re: [Patch/RFC]Removing zone and node ID from page->flags[0/3]
2004-09-23 23:27 ` [Patch/RFC]Removing zone and node ID from page->flags[0/3] William Lee Irwin III
@ 2004-09-24 3:51 ` Yasunori Goto
2004-09-24 4:01 ` William Lee Irwin III
0 siblings, 1 reply; 7+ messages in thread
From: Yasunori Goto @ 2004-09-24 3:51 UTC (permalink / raw)
To: William Lee Irwin III
Cc: linux-mm, Linux Kernel ML, Linux Hotplug Memory Support
Thank you for comment.
> Looks relatively innocuous. I wonder if cosmetically we may want
> s/struct zone_tbl/struct zone_table/
Do you mean "struct zone_table" is better as its name?
If so, I'll change it.
> I like the path compression in the 2-level radix tree.
Hmmmm.....
Current radix tree code uses slab allocator.
But, zone_table must be initialized before free_all_bootmem()
and kmem_cache_alloc().
So, if I use it for zone_table, I think I have to change radix tree
code to use bootmem or have to write other original code.
I'm not sure it is better way....
Bye.
--
Yasunori Goto <ygoto at us.fujitsu.com>
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"aart@kvack.org"> aart@kvack.org </a>
^ permalink raw reply [flat|nested] 7+ messages in thread
* Re: [Patch/RFC]Removing zone and node ID from page->flags[0/3]
2004-09-24 3:51 ` Yasunori Goto
@ 2004-09-24 4:01 ` William Lee Irwin III
0 siblings, 0 replies; 7+ messages in thread
From: William Lee Irwin III @ 2004-09-24 4:01 UTC (permalink / raw)
To: Yasunori Goto; +Cc: linux-mm, Linux Kernel ML, Linux Hotplug Memory Support
On Thu, Sep 23, 2004 at 08:51:58PM -0700, Yasunori Goto wrote:
> Thank you for comment.
At some point in the past, I wrote:
>> Looks relatively innocuous. I wonder if cosmetically we may want
>> s/struct zone_tbl/struct zone_table/
On Thu, Sep 23, 2004 at 08:51:58PM -0700, Yasunori Goto wrote:
> Do you mean "struct zone_table" is better as its name?
> If so, I'll change it.
I'm not extremely picky about naming conventions, and the abbreviation
isn't bad or anything. If there's someone else who also likes it better,
or if you yourself do, I'd change it then.
At some point in the past, I wrote:
>> I like the path compression in the 2-level radix tree.
On Thu, Sep 23, 2004 at 08:51:58PM -0700, Yasunori Goto wrote:
> Hmmmm.....
> Current radix tree code uses slab allocator.
> But, zone_table must be initialized before free_all_bootmem()
> and kmem_cache_alloc().
> So, if I use it for zone_table, I think I have to change radix tree
> code to use bootmem or have to write other original code.
> I'm not sure it is better way....
I meant it as an instance of a radix tree data structure, not to e.g.
be consolidated with the kernel's radix tree library functions (which
have the bootstrap ordering issues you describe preventing their use
for this kind of purpose). The generic software pagetables are also
radix trees, but similarly have constraints (e.g. use on machines with
hardware-interpreted pagetables) preventing consolidation with the
radix tree library code.
-- wli
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"aart@kvack.org"> aart@kvack.org </a>
^ permalink raw reply [flat|nested] 7+ messages in thread