--- 2.5.41-mm2/./include/linux/mm.h 2002-10-10 10:17:56.000000000 -0500 +++ 2.5.41-mm2-shpte/./include/linux/mm.h 2002-10-10 10:34:58.000000000 -0500 @@ -359,8 +359,11 @@ extern int shmem_zero_setup(struct vm_area_struct *); extern void zap_page_range(struct vm_area_struct *vma, unsigned long address, unsigned long size); -extern int copy_page_range(struct mm_struct *dst, struct mm_struct *src, struct vm_area_struct *vma); +#ifdef CONFIG_SHAREPTE extern int share_page_range(struct mm_struct *dst, struct mm_struct *src, struct vm_area_struct *vma, pmd_t **prev_pmd); +#else +extern int copy_page_range(struct mm_struct *dst, struct mm_struct *src, struct vm_area_struct *vma); +#endif extern int remap_page_range(struct vm_area_struct *vma, unsigned long from, unsigned long to, unsigned long size, pgprot_t prot); extern int zeromap_page_range(struct vm_area_struct *vma, unsigned long from, unsigned long size, pgprot_t prot); --- 2.5.41-mm2/./mm/memory.c 2002-10-10 10:17:57.000000000 -0500 +++ 2.5.41-mm2-shpte/./mm/memory.c 2002-10-10 10:35:34.000000000 -0500 @@ -153,6 +153,7 @@ } while (--nr); } +#ifdef CONFIG_SHAREPTE /* * This function makes the decision whether a pte page needs to be unshared * or not. Note that page_count() == 1 isn't even tested here. The assumption @@ -166,7 +167,6 @@ static inline int pte_needs_unshare(struct mm_struct *mm, struct vm_area_struct *vma, pmd_t *pmd, unsigned long address, int write_access) { -#ifdef CONFIG_SHAREPTE struct page *ptepage; /* It's not even there, nothing to unshare. */ @@ -198,9 +198,6 @@ * Ok, we have to unshare. */ return 1; -#else - return 0; -#endif } /* @@ -223,7 +220,6 @@ static pte_t *pte_unshare(struct mm_struct *mm, pmd_t *pmd, unsigned long address) { -#ifdef CONFIG_SHAREPTE pte_t *src_ptb, *dst_ptb; struct page *oldpage, *newpage, *tmppage; struct vm_area_struct *vma; @@ -359,9 +355,9 @@ return dst_ptb + __pte_offset(address); out_map: -#endif return pte_offset_map(pmd, address); } +#endif pte_t * pte_alloc_map(struct mm_struct *mm, pmd_t *pmd, unsigned long address) { @@ -417,10 +413,10 @@ #define PTE_TABLE_MASK ((PTRS_PER_PTE-1) * sizeof(pte_t)) #define PMD_TABLE_MASK ((PTRS_PER_PMD-1) * sizeof(pmd_t)) +#ifdef CONFIG_SHAREPTE int share_page_range(struct mm_struct *dst, struct mm_struct *src, struct vm_area_struct *vma, pmd_t **prev_pmd) { -#ifdef CONFIG_SHAREPTE pgd_t *src_pgd, *dst_pgd; unsigned long address = vma->vm_start; unsigned long end = vma->vm_end; @@ -505,10 +501,9 @@ out: return 0; nomem: -#endif return -ENOMEM; } - +#else /* * copy one vm_area from one task to the other. Assumes the page tables * already present in the new task to be cleared in the whole range @@ -640,6 +635,7 @@ nomem: return -ENOMEM; } +#endif static void zap_pte_range(mmu_gather_t *tlb, pmd_t * pmd, unsigned long address, unsigned long size) { @@ -668,6 +664,7 @@ */ ptepage = pmd_page(*pmd); pte_page_lock(ptepage); +#ifdef CONFIG_SHAREPTE if (page_count(ptepage) > 1) { if ((offset == 0) && (size == PMD_SIZE)) { pmd_clear(pmd); @@ -679,9 +676,10 @@ } ptep = pte_unshare(tlb->mm, pmd, address); ptepage = pmd_page(*pmd); - } else { + } else +#endif ptep = pte_offset_map(pmd, address); - } + for (offset=0; offset < size; ptep++, offset += PAGE_SIZE) { pte_t pte = *ptep; if (pte_none(pte)) @@ -1839,6 +1837,7 @@ if (pmd) { pte_t * pte; +#ifdef CONFIG_SHAREPTE if (pte_needs_unshare(mm, vma, pmd, address, write_access)) { pte_page_lock(pmd_page(*pmd)); pte = pte_unshare(mm, pmd, address); @@ -1846,7 +1845,10 @@ pte = pte_alloc_map(mm, pmd, address); pte_page_lock(pmd_page(*pmd)); } - +#else + pte = pte_alloc_map(mm, pmd, address); + pte_page_lock(pmd_page(*pmd)); +#endif if (pte) { spin_unlock(&mm->page_table_lock); return handle_pte_fault(mm, vma, address, write_access, pte, pmd);