linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
From: Paul Davies <pauld@gelato.unsw.edu.au>
To: linux-mm@kvack.org
Cc: Paul Davies <pauld@gelato.unsw.edu.au>
Subject: [PATCH 7/18] PTI - Page fault handler
Date: Thu, 13 Jul 2006 14:27:45 +1000	[thread overview]
Message-ID: <20060713042745.9978.53008.sendpatchset@localhost.localdomain> (raw)
In-Reply-To: <20060713042630.9978.66924.sendpatchset@localhost.localdomain>

1) Calls the PTI interface to abstract page table dependent calls
from the page fault handler functions.

2) Abstracts page table dependent generic asm-generic/tlb.h #defines 
to asm-generic/pt-tlb.h 

Signed-Off-By: Paul Davies <pauld@gelato.unsw.edu.au>

---

 include/asm-generic/pt-tlb.h |   24 +++++++++++++++
 include/asm-generic/tlb.h    |   22 +------------
 mm/memory.c                  |   68 ++++++++++++++++++-------------------------
 3 files changed, 56 insertions(+), 58 deletions(-)
Index: linux-2.6.17.2/include/asm-generic/pt-tlb.h
===================================================================
--- /dev/null	1970-01-01 00:00:00.000000000 +0000
+++ linux-2.6.17.2/include/asm-generic/pt-tlb.h	2006-07-08 00:46:42.890847200 +1000
@@ -0,0 +1,24 @@
+#ifndef _ASM_GENERIC_PT_TLB_H
+#define _ASM_GENERIC_PT_TLB_H 1
+
+#define pte_free_tlb(tlb, ptep)					\
+	do {							\
+		tlb->need_flush = 1;				\
+		__pte_free_tlb(tlb, ptep);			\
+	} while (0)
+
+#ifndef __ARCH_HAS_4LEVEL_HACK
+#define pud_free_tlb(tlb, pudp)					\
+	do {							\
+		tlb->need_flush = 1;				\
+		__pud_free_tlb(tlb, pudp);			\
+	} while (0)
+#endif
+
+#define pmd_free_tlb(tlb, pmdp)					\
+	do {							\
+		tlb->need_flush = 1;				\
+		__pmd_free_tlb(tlb, pmdp);			\
+	} while (0)
+
+#endif
Index: linux-2.6.17.2/include/asm-generic/tlb.h
===================================================================
--- linux-2.6.17.2.orig/include/asm-generic/tlb.h	2006-07-08 00:46:34.233163368 +1000
+++ linux-2.6.17.2/include/asm-generic/tlb.h	2006-07-08 00:46:42.890847200 +1000
@@ -124,26 +124,8 @@
 		__tlb_remove_tlb_entry(tlb, ptep, address);	\
 	} while (0)
 
-#define pte_free_tlb(tlb, ptep)					\
-	do {							\
-		tlb->need_flush = 1;				\
-		__pte_free_tlb(tlb, ptep);			\
-	} while (0)
-
-#ifndef __ARCH_HAS_4LEVEL_HACK
-#define pud_free_tlb(tlb, pudp)					\
-	do {							\
-		tlb->need_flush = 1;				\
-		__pud_free_tlb(tlb, pudp);			\
-	} while (0)
-#endif
-
-#define pmd_free_tlb(tlb, pmdp)					\
-	do {							\
-		tlb->need_flush = 1;				\
-		__pmd_free_tlb(tlb, pmdp);			\
-	} while (0)
-
 #define tlb_migrate_finish(mm) do {} while (0)
 
+#include <asm-generic/pt-tlb.h>
+
 #endif /* _ASM_GENERIC__TLB_H */
Index: linux-2.6.17.2/mm/memory.c
===================================================================
--- linux-2.6.17.2.orig/mm/memory.c	2006-07-08 00:46:42.857852216 +1000
+++ linux-2.6.17.2/mm/memory.c	2006-07-08 01:04:37.485483984 +1000
@@ -1210,6 +1210,7 @@
  * but allow concurrent faults), with pte both mapped and locked.
  * We return with mmap_sem still held, but pte unmapped and unlocked.
  */
+
 static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
 		unsigned long address, pte_t *page_table, pmd_t *pmd,
 		spinlock_t *ptl, pte_t orig_pte)
@@ -1738,11 +1739,10 @@
  * We return with mmap_sem still held, but pte unmapped and unlocked.
  */
 static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
-		unsigned long address, pte_t *page_table, pmd_t *pmd,
+		unsigned long address, pte_t *page_table, pt_path_t pt_path,
 		int write_access)
 {
 	struct page *page;
-	spinlock_t *ptl;
 	pte_t entry;
 
 	if (write_access) {
@@ -1758,7 +1758,7 @@
 		entry = mk_pte(page, vma->vm_page_prot);
 		entry = maybe_mkwrite(pte_mkdirty(entry), vma);
 
-		page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
+		page_table = lookup_page_table_fast(mm, pt_path, address);
 		if (!pte_none(*page_table))
 			goto release;
 		inc_mm_counter(mm, anon_rss);
@@ -1770,8 +1770,8 @@
 		page_cache_get(page);
 		entry = mk_pte(page, vma->vm_page_prot);
 
-		ptl = pte_lockptr(mm, pmd);
-		spin_lock(ptl);
+		lock_pte(mm, pt_path);
+
 		if (!pte_none(*page_table))
 			goto release;
 		inc_mm_counter(mm, file_rss);
@@ -1784,7 +1784,8 @@
 	update_mmu_cache(vma, address, entry);
 	lazy_mmu_prot_update(entry);
 unlock:
-	pte_unmap_unlock(page_table, ptl);
+	unlock_pte(mm, pt_path);
+	pte_unmap(page_table);
 	return VM_FAULT_MINOR;
 release:
 	page_cache_release(page);
@@ -1807,10 +1808,9 @@
  * We return with mmap_sem still held, but pte unmapped and unlocked.
  */
 static int do_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
-		unsigned long address, pte_t *page_table, pmd_t *pmd,
+		unsigned long address, pte_t *page_table, pt_path_t pt_path,
 		int write_access)
 {
-	spinlock_t *ptl;
 	struct page *new_page;
 	struct address_space *mapping = NULL;
 	pte_t entry;
@@ -1859,14 +1859,17 @@
 		anon = 1;
 	}
 
-	page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
+	page_table = lookup_page_table_fast(mm, pt_path, address);
+	
 	/*
 	 * For a file-backed vma, someone could have truncated or otherwise
 	 * invalidated this page.  If unmap_mapping_range got called,
 	 * retry getting the page.
 	 */
 	if (mapping && unlikely(sequence != mapping->truncate_count)) {
-		pte_unmap_unlock(page_table, ptl);
+		unlock_pte(mm, pt_path);
+		pte_unmap(page_table);
+
 		page_cache_release(new_page);
 		cond_resched();
 		sequence = mapping->truncate_count;
@@ -1909,7 +1912,8 @@
 	update_mmu_cache(vma, address, entry);
 	lazy_mmu_prot_update(entry);
 unlock:
-	pte_unmap_unlock(page_table, ptl);
+	unlock_pte(mm, pt_path);
+	pte_unmap(page_table);
 	return ret;
 oom:
 	page_cache_release(new_page);
@@ -1926,13 +1930,13 @@
  * We return with mmap_sem still held, but pte unmapped and unlocked.
  */
 static int do_file_page(struct mm_struct *mm, struct vm_area_struct *vma,
-		unsigned long address, pte_t *page_table, pmd_t *pmd,
+		unsigned long address, pte_t *page_table, pt_path_t pt_path,
 		int write_access, pte_t orig_pte)
 {
 	pgoff_t pgoff;
 	int err;
 
-	if (!pte_unmap_same(mm, pmd, page_table, orig_pte))
+	if (!pte_unmap_same(mm, pt_path, page_table, orig_pte))
 		return VM_FAULT_MINOR;
 
 	if (unlikely(!(vma->vm_flags & VM_NONLINEAR))) {
@@ -1969,36 +1973,35 @@
  */
 static inline int handle_pte_fault(struct mm_struct *mm,
 		struct vm_area_struct *vma, unsigned long address,
-		pte_t *pte, pmd_t *pmd, int write_access)
+		pte_t *pte, pt_path_t pt_path, int write_access)
 {
 	pte_t entry;
 	pte_t old_entry;
-	spinlock_t *ptl;
 
 	old_entry = entry = *pte;
 	if (!pte_present(entry)) {
 		if (pte_none(entry)) {
 			if (!vma->vm_ops || !vma->vm_ops->nopage)
 				return do_anonymous_page(mm, vma, address,
-					pte, pmd, write_access);
+					pte, pt_path, write_access);
 			return do_no_page(mm, vma, address,
-					pte, pmd, write_access);
+					pte, pt_path, write_access);
 		}
 		if (pte_file(entry))
 			return do_file_page(mm, vma, address,
-					pte, pmd, write_access, entry);
+					pte, pt_path, write_access, entry);
 		return do_swap_page(mm, vma, address,
-					pte, pmd, write_access, entry);
+					pte, pt_path, write_access, entry);
 	}
 
-	ptl = pte_lockptr(mm, pmd);
-	spin_lock(ptl);
+	lock_pte(mm, pt_path);
+
 	if (unlikely(!pte_same(*pte, entry)))
 		goto unlock;
 	if (write_access) {
 		if (!pte_write(entry))
 			return do_wp_page(mm, vma, address,
-					pte, pmd, ptl, entry);
+					pte, pt_path, entry);
 		entry = pte_mkdirty(entry);
 	}
 	entry = pte_mkyoung(entry);
@@ -2017,7 +2020,8 @@
 			flush_tlb_page(vma, address);
 	}
 unlock:
-	pte_unmap_unlock(pte, ptl);
+	unlock_pte(mm, pt_path);
+	pte_unmap(pte);
 	return VM_FAULT_MINOR;
 }
 
@@ -2027,30 +2031,18 @@
 int __handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
 		unsigned long address, int write_access)
 {
-	pgd_t *pgd;
-	pud_t *pud;
-	pmd_t *pmd;
 	pte_t *pte;
+	pt_path_t pt_path;
 
 	__set_current_state(TASK_RUNNING);
 
 	inc_page_state(pgfault);
 
-	if (unlikely(is_vm_hugetlb_page(vma)))
-		return hugetlb_fault(mm, vma, address, write_access);
-
-	pgd = pgd_offset(mm, address);
-	pud = pud_alloc(mm, pgd, address);
-	if (!pud)
-		return VM_FAULT_OOM;
-	pmd = pmd_alloc(mm, pud, address);
-	if (!pmd)
-		return VM_FAULT_OOM;
-	pte = pte_alloc_map(mm, pmd, address);
+	pte = build_page_table(mm, address, &pt_path);
 	if (!pte)
 		return VM_FAULT_OOM;
 
-	return handle_pte_fault(mm, vma, address, pte, pmd, write_access);
+	return handle_pte_fault(mm, vma, address, pte, pt_path, write_access);
 }
 
 EXPORT_SYMBOL_GPL(__handle_mm_fault);

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

  parent reply	other threads:[~2006-07-13  4:27 UTC|newest]

Thread overview: 19+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2006-07-13  4:26 [PATCH 0/18] PTI - Explanation Paul Davies
2006-07-13  4:26 ` [PATCH 1/18] PTI - Introduce page table interface Paul Davies
2006-07-13  4:26 ` [PATCH 2/18] PTI - Page table type Paul Davies
2006-07-13  4:27 ` [PATCH 3/18] PTI - Abstract default page table Paul Davies
2006-07-13  4:27 ` [PATCH 4/18] " Paul Davies
2006-07-13  4:27 ` [PATCH 5/18] " Paul Davies
2006-07-13  4:27 ` [PATCH 6/18] " Paul Davies
2006-07-13  4:27 ` Paul Davies [this message]
2006-07-13  4:27 ` [PATCH 8/18] PTI - Page fault handler Paul Davies
2006-07-13  4:28 ` [PATCH 9/18] PTI - Call interface Paul Davies
2006-07-13  4:28 ` [PATCH 10/18] PTI - Copy iterator abstraction Paul Davies
2006-07-13  4:28 ` [PATCH 11/18] PTI - Unmap page range abstraction Paul Davies
2006-07-13  4:28 ` [PATCH 12/18] PTI - Zeromap iterator abstraction Paul Davies
2006-07-13  4:28 ` [PATCH 13/18] PTI - Msync " Paul Davies
2006-07-13  4:29 ` [PATCH 14/18] PTI - Vmalloc iterators asbstractions Paul Davies
2006-07-13  4:29 ` [PATCH 15/18] PTI - Change protection iterator abstraction Paul Davies
2006-07-13  4:29 ` [PATCH 16/18] PTI - Mremap " Paul Davies
2006-07-13  4:29 ` [PATCH 17/18] PTI - Swapfile " Paul Davies
2006-07-13  4:29 ` [PATCH 18/18] PTI - Mempolicy " Paul Davies

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20060713042745.9978.53008.sendpatchset@localhost.localdomain \
    --to=pauld@gelato.unsw.edu.au \
    --cc=linux-mm@kvack.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox