linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
From: Paul Davies <pauld@gelato.unsw.edu.au>
To: linux-mm@kvack.org
Cc: Paul Davies <pauld@gelato.unsw.edu.au>
Subject: [PATCH 8/18] PTI - Page fault handler
Date: Thu, 13 Jul 2006 14:27:55 +1000	[thread overview]
Message-ID: <20060713042755.9978.16103.sendpatchset@localhost.localdomain> (raw)
In-Reply-To: <20060713042630.9978.66924.sendpatchset@localhost.localdomain>

1) Continues calling the PTI interface to abstract page table dependent 
calls from the page fault handler functions.

2) Remove get_locked_pte from memory.c to be replaced by build_page_table.

3) Call lookup_page_table in filemap_xip.c


Signed-Off-By: Paul Davies <pauld@gelato.unsw.edu.au>

---

 include/linux/mm.h |    2 -
 mm/filemap_xip.c   |   26 +++++++++++++++++++---
 mm/memory.c        |   62 +++++++++++++++++++++--------------------------------
 3 files changed, 48 insertions(+), 42 deletions(-)
Index: linux-2.6.17.2/mm/memory.c
===================================================================
--- linux-2.6.17.2.orig/mm/memory.c	2006-07-08 20:35:44.823860064 +1000
+++ linux-2.6.17.2/mm/memory.c	2006-07-08 20:38:43.631677144 +1000
@@ -934,18 +934,6 @@
 	return err;
 }
 
-pte_t * fastcall get_locked_pte(struct mm_struct *mm, unsigned long addr, spinlock_t **ptl)
-{
-	pgd_t * pgd = pgd_offset(mm, addr);
-	pud_t * pud = pud_alloc(mm, pgd, addr);
-	if (pud) {
-		pmd_t * pmd = pmd_alloc(mm, pud, addr);
-		if (pmd)
-			return pte_alloc_map_lock(mm, pmd, addr, ptl);
-	}
-	return NULL;
-}
-
 /*
  * This is the old fallback for page remapping.
  *
@@ -957,14 +945,14 @@
 {
 	int retval;
 	pte_t *pte;
-	spinlock_t *ptl;  
+	pt_path_t pt_path;
 
 	retval = -EINVAL;
 	if (PageAnon(page))
 		goto out;
 	retval = -ENOMEM;
 	flush_dcache_page(page);
-	pte = get_locked_pte(mm, addr, &ptl);
+	pte = build_page_table(mm, addr, &pt_path);
 	if (!pte)
 		goto out;
 	retval = -EBUSY;
@@ -979,7 +967,8 @@
 
 	retval = 0;
 out_unlock:
-	pte_unmap_unlock(pte, ptl);
+	unlock_pte(mm, pt_path);
+	pte_unmap(pte);
 out:
 	return retval;
 }
@@ -1136,17 +1125,13 @@
  * (but do_wp_page is only called after already making such a check;
  * and do_anonymous_page and do_no_page can safely check later on).
  */
-static inline int pte_unmap_same(struct mm_struct *mm, pmd_t *pmd,
+static inline int pte_unmap_same(struct mm_struct *mm, pt_path_t pt_path,
 				pte_t *page_table, pte_t orig_pte)
 {
 	int same = 1;
 #if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT)
-	if (sizeof(pte_t) > sizeof(unsigned long)) {
-		spinlock_t *ptl = pte_lockptr(mm, pmd);
-		spin_lock(ptl);
-		same = pte_same(*page_table, orig_pte);
-		spin_unlock(ptl);
-	}
+	if (sizeof(pte_t) > sizeof(unsigned long))
+		same = atomic_pte_same(mm, page_table, orig_pte, pt_path);
 #endif
 	pte_unmap(page_table);
 	return same;
@@ -1210,10 +1195,9 @@
  * but allow concurrent faults), with pte both mapped and locked.
  * We return with mmap_sem still held, but pte unmapped and unlocked.
  */
-
 static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
-		unsigned long address, pte_t *page_table, pmd_t *pmd,
-		spinlock_t *ptl, pte_t orig_pte)
+		unsigned long address, pte_t *page_table, pt_path_t pt_path,
+		pte_t orig_pte)
 {
 	struct page *old_page, *new_page;
 	pte_t entry;
@@ -1243,7 +1227,8 @@
 	 */
 	page_cache_get(old_page);
 gotten:
-	pte_unmap_unlock(page_table, ptl);
+	unlock_pte(mm, pt_path);
+	pte_unmap(page_table);
 
 	if (unlikely(anon_vma_prepare(vma)))
 		goto oom;
@@ -1261,7 +1246,7 @@
 	/*
 	 * Re-check the pte - we dropped the lock
 	 */
-	page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
+	page_table = lookup_page_table_fast(mm, pt_path, address);
 	if (likely(pte_same(*page_table, orig_pte))) {
 		if (old_page) {
 			page_remove_rmap(old_page);
@@ -1289,7 +1274,8 @@
 	if (old_page)
 		page_cache_release(old_page);
 unlock:
-	pte_unmap_unlock(page_table, ptl);
+	unlock_pte(mm, pt_path);
+	pte_unmap(page_table);
 	return ret;
 oom:
 	if (old_page)
@@ -1638,16 +1624,15 @@
  * We return with mmap_sem still held, but pte unmapped and unlocked.
  */
 static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
-		unsigned long address, pte_t *page_table, pmd_t *pmd,
+		unsigned long address, pte_t *page_table, pt_path_t pt_path,
 		int write_access, pte_t orig_pte)
 {
-	spinlock_t *ptl;
 	struct page *page;
 	swp_entry_t entry;
 	pte_t pte;
 	int ret = VM_FAULT_MINOR;
 
-	if (!pte_unmap_same(mm, pmd, page_table, orig_pte))
+	if (!pte_unmap_same(mm, pt_path, page_table, orig_pte))
 		goto out;
 
 	entry = pte_to_swp_entry(orig_pte);
@@ -1661,7 +1646,7 @@
 			 * Back out if somebody else faulted in this pte
 			 * while we released the pte lock.
 			 */
-			page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
+			page_table = lookup_page_table_fast(mm, pt_path, address);
 			if (likely(pte_same(*page_table, orig_pte)))
 				ret = VM_FAULT_OOM;
 			goto unlock;
@@ -1685,7 +1670,7 @@
 	/*
 	 * Back out if somebody else already faulted in this pte.
 	 */
-	page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
+	page_table = lookup_page_table_fast(mm, pt_path, address);
 	if (unlikely(!pte_same(*page_table, orig_pte)))
 		goto out_nomap;
 
@@ -1713,8 +1698,8 @@
 	unlock_page(page);
 
 	if (write_access) {
-		if (do_wp_page(mm, vma, address,
-				page_table, pmd, ptl, pte) == VM_FAULT_OOM)
+			if (do_wp_page(mm, vma, address,
+				page_table, pt_path, pte) == VM_FAULT_OOM)
 			ret = VM_FAULT_OOM;
 		goto out;
 	}
@@ -1723,11 +1708,14 @@
 	update_mmu_cache(vma, address, pte);
 	lazy_mmu_prot_update(pte);
 unlock:
-	pte_unmap_unlock(page_table, ptl);
+	unlock_pte(mm, pt_path);
+	pte_unmap(page_table);
 out:
 	return ret;
 out_nomap:
-	pte_unmap_unlock(page_table, ptl);
+	unlock_pte(mm, pt_path);
+	pte_unmap(page_table);
+
 	unlock_page(page);
 	page_cache_release(page);
 	return ret;
Index: linux-2.6.17.2/mm/filemap_xip.c
===================================================================
--- linux-2.6.17.2.orig/mm/filemap_xip.c	2006-07-08 20:35:44.823860064 +1000
+++ linux-2.6.17.2/mm/filemap_xip.c	2006-07-08 20:35:46.151658208 +1000
@@ -16,6 +16,8 @@
 #include <asm/tlbflush.h>
 #include "filemap.h"
 
+#include <linux/pt.h>
+
 /*
  * This is a file read routine for execute in place files, and uses
  * the mapping->a_ops->get_xip_page() function for the actual low-level
@@ -174,7 +176,7 @@
 	unsigned long address;
 	pte_t *pte;
 	pte_t pteval;
-	spinlock_t *ptl;
+	pt_path_t pt_path;
 	struct page *page;
 
 	spin_lock(&mapping->i_mmap_lock);
@@ -184,7 +186,23 @@
 			((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
 		BUG_ON(address < vma->vm_start || address >= vma->vm_end);
 		page = ZERO_PAGE(address);
-		pte = page_check_address(page, mm, address, &ptl);
+		pte = lookup_page_table(mm, address, &pt_path);
+		if(!pte)
+			goto out;
+
+		/* Make a quick check before getting the lock */
+		if (!pte_present(*pte)) {
+			pte_unmap(pte);
+			goto out;
+		}
+
+		lock_pte(mm, pt_path);
+		if (!(pte_present(*pte) && page_to_pfn(page) == pte_pfn(*pte))) {
+			unlock_pte(mm, pt_path);
+			pte_unmap(pte);
+			goto out;
+		}
+
 		if (pte) {
 			/* Nuke the page table entry. */
 			flush_cache_page(vma, address, pte_pfn(*pte));
@@ -192,10 +210,12 @@
 			page_remove_rmap(page);
 			dec_mm_counter(mm, file_rss);
 			BUG_ON(pte_dirty(pteval));
-			pte_unmap_unlock(pte, ptl);
+			unlock_pte(mm, pt_path);
+			pte_unmap(pte);
 			page_cache_release(page);
 		}
 	}
+out:
 	spin_unlock(&mapping->i_mmap_lock);
 }
 
Index: linux-2.6.17.2/include/linux/mm.h
===================================================================
--- linux-2.6.17.2.orig/include/linux/mm.h	2006-07-08 20:35:44.899848512 +1000
+++ linux-2.6.17.2/include/linux/mm.h	2006-07-08 20:35:46.153657904 +1000
@@ -796,8 +796,6 @@
 	unsigned long private_dirty;
 };
 
-extern pte_t *FASTCALL(get_locked_pte(struct mm_struct *mm, unsigned long addr, spinlock_t **ptl));
-
 #include <linux/pt-mm.h>
 
 extern void free_area_init(unsigned long * zones_size);

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

  parent reply	other threads:[~2006-07-13  4:27 UTC|newest]

Thread overview: 19+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2006-07-13  4:26 [PATCH 0/18] PTI - Explanation Paul Davies
2006-07-13  4:26 ` [PATCH 1/18] PTI - Introduce page table interface Paul Davies
2006-07-13  4:26 ` [PATCH 2/18] PTI - Page table type Paul Davies
2006-07-13  4:27 ` [PATCH 3/18] PTI - Abstract default page table Paul Davies
2006-07-13  4:27 ` [PATCH 4/18] " Paul Davies
2006-07-13  4:27 ` [PATCH 5/18] " Paul Davies
2006-07-13  4:27 ` [PATCH 6/18] " Paul Davies
2006-07-13  4:27 ` [PATCH 7/18] PTI - Page fault handler Paul Davies
2006-07-13  4:27 ` Paul Davies [this message]
2006-07-13  4:28 ` [PATCH 9/18] PTI - Call interface Paul Davies
2006-07-13  4:28 ` [PATCH 10/18] PTI - Copy iterator abstraction Paul Davies
2006-07-13  4:28 ` [PATCH 11/18] PTI - Unmap page range abstraction Paul Davies
2006-07-13  4:28 ` [PATCH 12/18] PTI - Zeromap iterator abstraction Paul Davies
2006-07-13  4:28 ` [PATCH 13/18] PTI - Msync " Paul Davies
2006-07-13  4:29 ` [PATCH 14/18] PTI - Vmalloc iterators asbstractions Paul Davies
2006-07-13  4:29 ` [PATCH 15/18] PTI - Change protection iterator abstraction Paul Davies
2006-07-13  4:29 ` [PATCH 16/18] PTI - Mremap " Paul Davies
2006-07-13  4:29 ` [PATCH 17/18] PTI - Swapfile " Paul Davies
2006-07-13  4:29 ` [PATCH 18/18] PTI - Mempolicy " Paul Davies

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20060713042755.9978.16103.sendpatchset@localhost.localdomain \
    --to=pauld@gelato.unsw.edu.au \
    --cc=linux-mm@kvack.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox