linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
* [PATCH 2.5.48-mm1] Break COW page tables on mmap
@ 2002-11-22 16:40 Dave McCracken
  2002-11-22 19:52 ` Andrew Morton
  0 siblings, 1 reply; 2+ messages in thread
From: Dave McCracken @ 2002-11-22 16:40 UTC (permalink / raw)
  To: Andrew Morton; +Cc: Linux Memory Management

[-- Attachment #1: Type: text/plain, Size: 528 bytes --]


I found a fairly large hole in my unsharing logic.  Pte page COW behavior
breaks down when new objects are mapped.  This patch makes sure there
aren't any COW pte pages in the range of a new mapping at mmap time.

This should fix the KDE problem.  It fixed it on the test machine I've been
using.

Dave McCracken

======================================================================
Dave McCracken          IBM Linux Base Kernel Team      1-512-838-3059
dmccr@us.ibm.com                                        T/L   678-3059

[-- Attachment #2: shpte-2.5.48-mm1-2.diff --]
[-- Type: text/plain, Size: 2321 bytes --]

--- 2.5.48-mm1-shsent/./mm/memory.c	2002-11-21 11:26:32.000000000 -0600
+++ 2.5.48-mm1-shpte/./mm/memory.c	2002-11-22 10:27:39.000000000 -0600
@@ -1080,6 +1080,65 @@
 	tlb_finish_mmu(tlb, 0, TASK_SIZE);
 }
 
+#ifdef CONFIG_SHAREPTE
+void
+clear_share_range(struct mm_struct *mm, unsigned long address, unsigned long len)
+{
+	pgd_t		*pgd;
+	pmd_t		*pmd;
+	struct page	*ptepage;
+	unsigned long	end = address + len;
+
+	spin_lock(&mm->page_table_lock);
+
+	pgd = pgd_offset(mm, address);
+	if (pgd_none(*pgd) || pgd_bad(*pgd))
+		goto skip_start;
+
+	pmd = pmd_offset(pgd, address);
+	if (pmd_none(*pmd) || pmd_bad(*pmd))
+		goto skip_start;
+
+	ptepage = pmd_page(*pmd);
+	pte_page_lock(ptepage);
+	if (page_count(ptepage) > 1) {
+		pte_t *pte;
+
+		pte = pte_unshare(mm, pmd, address);
+		pte_unmap(pte);
+		ptepage = pmd_page(*pmd);
+	}
+	pte_page_unlock(ptepage);
+
+skip_start:
+	/* This range is contained in one pte page.  We're done. */
+	if ((address >> PMD_SHIFT) == (end >> PMD_SHIFT))
+		goto out;
+
+	pgd = pgd_offset(mm, end);
+	if (pgd_none(*pgd) || pgd_bad(*pgd))
+		goto out;
+
+	pmd = pmd_offset(pgd, end);
+	if (pmd_none(*pmd) || pmd_bad(*pmd))
+		goto out;
+
+	ptepage = pmd_page(*pmd);
+	pte_page_lock(ptepage);
+	if (page_count(ptepage) > 1) {
+		pte_t *pte;
+
+		pte = pte_unshare(mm, pmd, end);
+		pte_unmap(pte);
+		ptepage = pmd_page(*pmd);
+	}
+	pte_page_unlock(ptepage);
+
+out:
+	spin_unlock(&mm->page_table_lock);
+}
+#endif
+
 /*
  * Do a quick page-table lookup for a single page.
  * mm->page_table_lock must be held.
--- 2.5.48-mm1-shsent/./mm/mmap.c	2002-11-19 09:17:36.000000000 -0600
+++ 2.5.48-mm1-shpte/./mm/mmap.c	2002-11-22 10:26:11.000000000 -0600
@@ -57,6 +57,8 @@
 pgprot_t protection_pmd[8] = {
 	__PMD000, __PMD001, __PMD010, __PMD011, __PMD100, __PMD101, __PMD110, __PMD111
 };
+extern void clear_share_range(struct mm_struct *mm, unsigned long address,
+			      unsigned long len);
 #endif
 
 int sysctl_overcommit_memory = 0;	/* default is heuristic overcommit */
@@ -524,6 +526,9 @@
 			return -ENOMEM;
 		goto munmap_back;
 	}
+#ifdef CONFIG_SHAREPTE
+	clear_share_range(mm, addr, len);
+#endif
 
 	/* Check against address space limit. */
 	if ((mm->total_vm << PAGE_SHIFT) + len

^ permalink raw reply	[flat|nested] 2+ messages in thread

end of thread, other threads:[~2002-11-22 19:52 UTC | newest]

Thread overview: 2+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2002-11-22 16:40 [PATCH 2.5.48-mm1] Break COW page tables on mmap Dave McCracken
2002-11-22 19:52 ` Andrew Morton

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox