--- 2.5.52-mm2-shsent/./include/linux/mm.h 2002-12-20 10:39:44.000000000 -0600 +++ 2.5.52-mm2-shpte/./include/linux/mm.h 2002-12-20 11:09:51.000000000 -0600 @@ -123,9 +123,6 @@ * low four bits) to a page protection mask.. */ extern pgprot_t protection_map[16]; -#ifdef CONFIG_SHAREPTE -extern pgprot_t protection_pmd[8]; -#endif /* * These are the virtual MM functions - opening of an area, closing and --- 2.5.52-mm2-shsent/./include/linux/sched.h 2002-12-20 10:39:44.000000000 -0600 +++ 2.5.52-mm2-shpte/./include/linux/sched.h 2002-12-23 10:18:08.000000000 -0600 @@ -183,6 +183,7 @@ struct vm_area_struct * mmap_cache; /* last find_vma result */ unsigned long free_area_cache; /* first hole */ pgd_t * pgd; + atomic_t ptepages; /* Number of pte pages allocated */ atomic_t mm_users; /* How many users with user space? */ atomic_t mm_count; /* How many references to "struct mm_struct" (users count as 1) */ int map_count; /* number of VMAs */ --- 2.5.52-mm2-shsent/./kernel/fork.c 2002-12-20 10:39:44.000000000 -0600 +++ 2.5.52-mm2-shpte/./kernel/fork.c 2002-12-23 10:32:15.000000000 -0600 @@ -238,6 +238,7 @@ mm->free_area_cache = TASK_UNMAPPED_BASE; mm->map_count = 0; mm->rss = 0; + atomic_set(&mm->ptepages, 0); mm->cpu_vm_mask = 0; pprev = &mm->mmap; --- 2.5.52-mm2-shsent/./mm/memory.c 2002-12-20 10:39:45.000000000 -0600 +++ 2.5.52-mm2-shpte/./mm/memory.c 2002-12-23 10:22:52.000000000 -0600 @@ -116,6 +116,7 @@ pmd_clear(dir); pgtable_remove_rmap_locked(ptepage, tlb->mm); + atomic_dec(&tlb->mm->ptepages); dec_page_state(nr_page_table_pages); ClearPagePtepage(ptepage); @@ -184,6 +185,7 @@ SetPagePtepage(new); pgtable_add_rmap(new, mm, address); pmd_populate(mm, pmd, new); + atomic_inc(&mm->ptepages); inc_page_state(nr_page_table_pages); } out: @@ -217,7 +219,6 @@ #define PTE_TABLE_MASK ((PTRS_PER_PTE-1) * sizeof(pte_t)) #define PMD_TABLE_MASK ((PTRS_PER_PMD-1) * sizeof(pmd_t)) -#ifndef CONFIG_SHAREPTE /* * copy one vm_area from one task to the other. Assumes the page tables * already present in the new task to be cleared in the whole range @@ -354,7 +355,6 @@ nomem: return -ENOMEM; } -#endif static void zap_pte_range(mmu_gather_t *tlb, pmd_t * pmd, unsigned long address, unsigned long size) { --- 2.5.52-mm2-shsent/./mm/ptshare.c 2002-12-20 10:39:45.000000000 -0600 +++ 2.5.52-mm2-shpte/./mm/ptshare.c 2002-12-23 12:17:23.000000000 -0600 @@ -23,7 +23,7 @@ /* * Protections that can be set on the pmd entry (see discussion in mmap.c). */ -pgprot_t protection_pmd[8] = { +static pgprot_t protection_pmd[8] = { __PMD000, __PMD001, __PMD010, __PMD011, __PMD100, __PMD101, __PMD110, __PMD111 }; @@ -459,6 +459,28 @@ } /** + * fork_page_range - Either copy or share a page range at fork time + * @dst: the mm_struct of the forked child + * @src: the mm_struct of the forked parent + * @vma: the vm_area to be shared + * @prev_pmd: A pointer to the pmd entry we did at last invocation + * + * This wrapper decides whether to share page tables on fork or just make + * a copy. The current criterion is whether a page table has more than 3 + * pte pages, since all forked processes will unshare 3 pte pages after fork, + * even the ones doing an immediate exec. Tests indicate that if a page + * table has more than 3 pte pages, it's a performance win to share. + */ +int fork_page_range(struct mm_struct *dst, struct mm_struct *src, + struct vm_area_struct *vma, pmd_t **prev_pmd) +{ + if (atomic_read(&src->ptepages) > 3) + return share_page_range(dst, src, vma, prev_pmd); + + return copy_page_range(dst, src, vma); +} + +/** * unshare_page_range - Make sure no pte pages are shared in a given range * @mm: the mm_struct whose page table we unshare from * @address: the base address of the range