linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
* [PATCH 2.5.42-mm3] Fix mremap for shared page tables
@ 2002-10-15 22:32 Dave McCracken
  2002-10-16 15:08 ` Hugh Dickins
  0 siblings, 1 reply; 2+ messages in thread
From: Dave McCracken @ 2002-10-15 22:32 UTC (permalink / raw)
  To: Andrew Morton; +Cc: Linux Kernel, Linux Memory Management

[-- Attachment #1: Type: text/plain, Size: 315 bytes --]


Hugh Dickens was right.  mremap was broken wrt shared page tables.  Here's
the fix.

Dave McCracken

======================================================================
Dave McCracken          IBM Linux Base Kernel Team      1-512-838-3059
dmccr@us.ibm.com                                        T/L   678-3059

[-- Attachment #2: shpte-2.5.42-mm3-3.diff --]
[-- Type: text/plain, Size: 2808 bytes --]

--- 2.5.42-mm3-shsent/mm/mremap.c	2002-10-15 09:59:37.000000000 -0500
+++ 2.5.42-mm3-shpte/mm/mremap.c	2002-10-15 17:16:59.000000000 -0500
@@ -15,6 +15,7 @@
 #include <linux/swap.h>
 #include <linux/fs.h>
 #include <linux/highmem.h>
+#include <linux/rmap-locking.h>
 
 #include <asm/uaccess.h>
 #include <asm/pgalloc.h>
@@ -23,6 +24,7 @@
 
 static pte_t *get_one_pte_map_nested(struct mm_struct *mm, unsigned long addr)
 {
+	struct page *ptepage;
 	pgd_t * pgd;
 	pmd_t * pmd;
 	pte_t * pte = NULL;
@@ -45,8 +47,17 @@
 		goto end;
 	}
 
+	ptepage = pmd_page(*pmd);
+	pte_page_lock(ptepage);
+#ifdef CONFIG_SHAREPTE
+	if (page_count(ptepage) > 1) {
+		pte_unshare(mm, pmd, addr);
+		ptepage = pmd_page(*pmd);
+	}
+#endif
 	pte = pte_offset_map_nested(pmd, addr);
 	if (pte_none(*pte)) {
+		pte_page_unlock(ptepage);
 		pte_unmap_nested(pte);
 		pte = NULL;
 	}
@@ -54,6 +65,32 @@
 	return pte;
 }
 
+static inline void drop_pte_nested(struct mm_struct *mm, unsigned long addr, pte_t *pte)
+{
+	struct page *ptepage;
+	pgd_t *pgd;
+	pmd_t *pmd;
+
+	pgd = pgd_offset(mm, addr);
+	pmd = pmd_offset(pgd, addr);
+	ptepage = pmd_page(*pmd);
+	pte_page_unlock(ptepage);
+	pte_unmap_nested(pte);
+}
+
+static inline void drop_pte(struct mm_struct *mm, unsigned long addr, pte_t *pte)
+{
+	struct page *ptepage;
+	pgd_t *pgd;
+	pmd_t *pmd;
+
+	pgd = pgd_offset(mm, addr);
+	pmd = pmd_offset(pgd, addr);
+	ptepage = pmd_page(*pmd);
+	pte_page_unlock(ptepage);
+	pte_unmap(pte);
+}
+
 #ifdef CONFIG_HIGHPTE	/* Save a few cycles on the sane machines */
 static inline int page_table_present(struct mm_struct *mm, unsigned long addr)
 {
@@ -72,12 +109,24 @@
 
 static inline pte_t *alloc_one_pte_map(struct mm_struct *mm, unsigned long addr)
 {
+	struct page *ptepage;
 	pmd_t * pmd;
 	pte_t * pte = NULL;
 
 	pmd = pmd_alloc(mm, pgd_offset(mm, addr), addr);
-	if (pmd)
+	if (pmd) {
+		ptepage = pmd_page(*pmd);
+#ifdef CONFIG_SHAREPTE
+		pte_page_lock(ptepage);
+		if (page_count(ptepage) > 1) {
+			pte_unshare(mm, pmd, addr);
+			ptepage = pmd_page(*pmd);
+		}
+		pte_page_unlock(ptepage);
+#endif
 		pte = pte_alloc_map(mm, pmd, addr);
+		pte_page_lock(ptepage);
+	}
 	return pte;
 }
 
@@ -121,15 +170,15 @@
 		 * atomic kmap
 		 */
 		if (!page_table_present(mm, new_addr)) {
-			pte_unmap_nested(src);
+			drop_pte_nested(mm, old_addr, src);
 			src = NULL;
 		}
 		dst = alloc_one_pte_map(mm, new_addr);
 		if (src == NULL)
 			src = get_one_pte_map_nested(mm, old_addr);
 		error = copy_one_pte(mm, src, dst);
-		pte_unmap_nested(src);
-		pte_unmap(dst);
+		drop_pte_nested(mm, old_addr, src);
+		drop_pte(mm, new_addr, dst);
 	}
 	flush_tlb_page(vma, old_addr);
 	spin_unlock(&mm->page_table_lock);

^ permalink raw reply	[flat|nested] 2+ messages in thread

end of thread, other threads:[~2002-10-16 15:08 UTC | newest]

Thread overview: 2+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2002-10-15 22:32 [PATCH 2.5.42-mm3] Fix mremap for shared page tables Dave McCracken
2002-10-16 15:08 ` Hugh Dickins

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox