Convert some pagetable walking functions over to be inline where they are only used once. This is worth a percent or so on lmbench fork. Signed-off-by: Nick Piggin --- linux-2.6-npiggin/mm/memory.c | 8 ++++---- linux-2.6-npiggin/mm/msync.c | 4 ++-- linux-2.6-npiggin/mm/swapfile.c | 6 +++--- linux-2.6-npiggin/mm/vmalloc.c | 12 ++++++------ 4 files changed, 15 insertions(+), 15 deletions(-) diff -puN mm/memory.c~mm-inline-ptbl-walkers mm/memory.c --- linux-2.6/mm/memory.c~mm-inline-ptbl-walkers 2004-12-18 17:47:33.000000000 +1100 +++ linux-2.6-npiggin/mm/memory.c 2004-12-18 17:48:14.000000000 +1100 @@ -462,7 +462,7 @@ int copy_page_range(struct mm_struct *ds return err; } -static void zap_pte_range(struct mmu_gather *tlb, +static inline void zap_pte_range(struct mmu_gather *tlb, pmd_t *pmd, unsigned long address, unsigned long size, struct zap_details *details) { @@ -545,7 +545,7 @@ static void zap_pte_range(struct mmu_gat pte_unmap(ptep-1); } -static void zap_pmd_range(struct mmu_gather *tlb, +static inline void zap_pmd_range(struct mmu_gather *tlb, pud_t *pud, unsigned long address, unsigned long size, struct zap_details *details) { @@ -570,7 +570,7 @@ static void zap_pmd_range(struct mmu_gat } while (address && (address < end)); } -static void zap_pud_range(struct mmu_gather *tlb, +static inline void zap_pud_range(struct mmu_gather *tlb, pgd_t * pgd, unsigned long address, unsigned long end, struct zap_details *details) { @@ -973,7 +973,7 @@ out: EXPORT_SYMBOL(get_user_pages); -static void zeromap_pte_range(pte_t * pte, unsigned long address, +static inline void zeromap_pte_range(pte_t * pte, unsigned long address, unsigned long size, pgprot_t prot) { unsigned long end; diff -puN mm/msync.c~mm-inline-ptbl-walkers mm/msync.c --- linux-2.6/mm/msync.c~mm-inline-ptbl-walkers 2004-12-18 17:47:33.000000000 +1100 +++ linux-2.6-npiggin/mm/msync.c 2004-12-18 17:47:33.000000000 +1100 @@ -21,7 +21,7 @@ * Called with mm->page_table_lock held to protect against other * threads/the swapper from ripping pte's out from under us. */ -static int filemap_sync_pte(pte_t *ptep, struct vm_area_struct *vma, +static inline int filemap_sync_pte(pte_t *ptep, struct vm_area_struct *vma, unsigned long address, unsigned int flags) { pte_t pte = *ptep; @@ -38,7 +38,7 @@ static int filemap_sync_pte(pte_t *ptep, return 0; } -static int filemap_sync_pte_range(pmd_t * pmd, +static inline int filemap_sync_pte_range(pmd_t * pmd, unsigned long address, unsigned long end, struct vm_area_struct *vma, unsigned int flags) { diff -puN mm/swapfile.c~mm-inline-ptbl-walkers mm/swapfile.c --- linux-2.6/mm/swapfile.c~mm-inline-ptbl-walkers 2004-12-18 17:47:33.000000000 +1100 +++ linux-2.6-npiggin/mm/swapfile.c 2004-12-18 17:47:33.000000000 +1100 @@ -427,7 +427,7 @@ void free_swap_and_cache(swp_entry_t ent * what to do if a write is requested later. */ /* vma->vm_mm->page_table_lock is held */ -static void +static inline void unuse_pte(struct vm_area_struct *vma, unsigned long address, pte_t *dir, swp_entry_t entry, struct page *page) { @@ -439,7 +439,7 @@ unuse_pte(struct vm_area_struct *vma, un } /* vma->vm_mm->page_table_lock is held */ -static unsigned long unuse_pmd(struct vm_area_struct * vma, pmd_t *dir, +static inline unsigned long unuse_pmd(struct vm_area_struct * vma, pmd_t *dir, unsigned long address, unsigned long size, unsigned long offset, swp_entry_t entry, struct page *page) { @@ -486,7 +486,7 @@ static unsigned long unuse_pmd(struct vm } /* vma->vm_mm->page_table_lock is held */ -static unsigned long unuse_pud(struct vm_area_struct * vma, pud_t *pud, +static inline unsigned long unuse_pud(struct vm_area_struct * vma, pud_t *pud, unsigned long address, unsigned long size, unsigned long offset, swp_entry_t entry, struct page *page) { diff -puN mm/vmalloc.c~mm-inline-ptbl-walkers mm/vmalloc.c --- linux-2.6/mm/vmalloc.c~mm-inline-ptbl-walkers 2004-12-18 17:47:33.000000000 +1100 +++ linux-2.6-npiggin/mm/vmalloc.c 2004-12-18 17:47:33.000000000 +1100 @@ -23,7 +23,7 @@ rwlock_t vmlist_lock = RW_LOCK_UNLOCKED; struct vm_struct *vmlist; -static void unmap_area_pte(pmd_t *pmd, unsigned long address, +static inline void unmap_area_pte(pmd_t *pmd, unsigned long address, unsigned long size) { unsigned long end; @@ -56,7 +56,7 @@ static void unmap_area_pte(pmd_t *pmd, u } while (address < end); } -static void unmap_area_pmd(pud_t *pud, unsigned long address, +static inline void unmap_area_pmd(pud_t *pud, unsigned long address, unsigned long size) { unsigned long end; @@ -83,7 +83,7 @@ static void unmap_area_pmd(pud_t *pud, u } while (address < end); } -static void unmap_area_pud(pgd_t *pgd, unsigned long address, +static inline void unmap_area_pud(pgd_t *pgd, unsigned long address, unsigned long size) { pud_t *pud; @@ -110,7 +110,7 @@ static void unmap_area_pud(pgd_t *pgd, u } while (address && (address < end)); } -static int map_area_pte(pte_t *pte, unsigned long address, +static inline int map_area_pte(pte_t *pte, unsigned long address, unsigned long size, pgprot_t prot, struct page ***pages) { @@ -135,7 +135,7 @@ static int map_area_pte(pte_t *pte, unsi return 0; } -static int map_area_pmd(pmd_t *pmd, unsigned long address, +static inline int map_area_pmd(pmd_t *pmd, unsigned long address, unsigned long size, pgprot_t prot, struct page ***pages) { @@ -160,7 +160,7 @@ static int map_area_pmd(pmd_t *pmd, unsi return 0; } -static int map_area_pud(pud_t *pud, unsigned long address, +static inline int map_area_pud(pud_t *pud, unsigned long address, unsigned long end, pgprot_t prot, struct page ***pages) { _