This very boring patch breaks out struct page into it's own header file. This should allow you to do struct page arithmetic in other header files using static inlines instead of horribly complex macros ... by just including , which avoids dependency problems. (inlined to read, attatched for lower probability of mangling) Martin. diff -purN -X /home/mbligh/.diff.exclude virgin/include/linux/mm.h struct_page/include/linux/mm.h --- virgin/include/linux/mm.h Fri Oct 4 12:15:24 2002 +++ struct_page/include/linux/mm.h Fri Oct 4 23:10:08 2002 @@ -132,55 +132,7 @@ struct vm_operations_struct { struct page * (*nopage)(struct vm_area_struct * area, unsigned long address, int unused); }; -/* forward declaration; pte_chain is meant to be internal to rmap.c */ -struct pte_chain; - -/* - * Each physical page in the system has a struct page associated with - * it to keep track of whatever it is we are using the page for at the - * moment. Note that we have no way to track which tasks are using - * a page. - * - * Try to keep the most commonly accessed fields in single cache lines - * here (16 bytes or greater). This ordering should be particularly - * beneficial on 32-bit processors. - * - * The first line is data used in page cache lookup, the second line - * is used for linear searches (eg. clock algorithm scans). - * - * TODO: make this structure smaller, it could be as small as 32 bytes. - */ -struct page { - unsigned long flags; /* atomic flags, some possibly - updated asynchronously */ - atomic_t count; /* Usage count, see below. */ - struct list_head list; /* ->mapping has some page lists. */ - struct address_space *mapping; /* The inode (or ...) we belong to. */ - unsigned long index; /* Our offset within mapping. */ - struct list_head lru; /* Pageout list, eg. active_list; - protected by zone->lru_lock !! */ - union { - struct pte_chain *chain;/* Reverse pte mapping pointer. - * protected by PG_chainlock */ - pte_addr_t direct; - } pte; - unsigned long private; /* mapping-private opaque data */ - - /* - * On machines where all RAM is mapped into kernel address space, - * we can simply calculate the virtual address. On machines with - * highmem some memory is mapped into kernel virtual memory - * dynamically, so we need a place to store that address. - * Note that this field could be 16 bits on x86 ... ;) - * - * Architectures with slow multiplication can define - * WANT_PAGE_VIRTUAL in asm/page.h - */ -#if defined(WANT_PAGE_VIRTUAL) - void *virtual; /* Kernel virtual address (NULL if - not kmapped, ie. highmem) */ -#endif /* CONFIG_HIGMEM || WANT_PAGE_VIRTUAL */ -}; +#include /* * FIXME: take this include out, include page-flags.h in diff -purN -X /home/mbligh/.diff.exclude virgin/include/linux/struct_page.h struct_page/include/linux/struct_page.h --- virgin/include/linux/struct_page.h Wed Dec 31 16:00:00 1969 +++ struct_page/include/linux/struct_page.h Fri Oct 4 23:09:15 2002 @@ -0,0 +1,54 @@ +#ifndef _LINUX_STRUCT_PAGE_H +#define _LINUX_STRUCT_PAGE_H + +/* forward declaration; pte_chain is meant to be internal to rmap.c */ +struct pte_chain; + +/* + * Each physical page in the system has a struct page associated with + * it to keep track of whatever it is we are using the page for at the + * moment. Note that we have no way to track which tasks are using + * a page. + * + * Try to keep the most commonly accessed fields in single cache lines + * here (16 bytes or greater). This ordering should be particularly + * beneficial on 32-bit processors. + * + * The first line is data used in page cache lookup, the second line + * is used for linear searches (eg. clock algorithm scans). + * + * TODO: make this structure smaller, it could be as small as 32 bytes. + */ +struct page { + unsigned long flags; /* atomic flags, some possibly + updated asynchronously */ + atomic_t count; /* Usage count, see below. */ + struct list_head list; /* ->mapping has some page lists. */ + struct address_space *mapping; /* The inode (or ...) we belong to. */ + unsigned long index; /* Our offset within mapping. */ + struct list_head lru; /* Pageout list, eg. active_list; + protected by zone->lru_lock !! */ + union { + struct pte_chain *chain;/* Reverse pte mapping pointer. + * protected by PG_chainlock */ + pte_addr_t direct; + } pte; + unsigned long private; /* mapping-private opaque data */ + + /* + * On machines where all RAM is mapped into kernel address space, + * we can simply calculate the virtual address. On machines with + * highmem some memory is mapped into kernel virtual memory + * dynamically, so we need a place to store that address. + * Note that this field could be 16 bits on x86 ... ;) + * + * Architectures with slow multiplication can define + * WANT_PAGE_VIRTUAL in asm/page.h + */ +#if defined(WANT_PAGE_VIRTUAL) + void *virtual; /* Kernel virtual address (NULL if + not kmapped, ie. highmem) */ +#endif /* CONFIG_HIGMEM || WANT_PAGE_VIRTUAL */ +}; + +#endif