linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
From: Paul Cameron Davies <pauld@cse.unsw.EDU.AU>
To: linux-mm@kvack.org
Subject: [PATCH 7/15] PTI: continue calling interface
Date: Sat, 21 May 2005 13:54:13 +1000 (EST)	[thread overview]
Message-ID: <Pine.LNX.4.61.0505211352170.28095@wagner.orchestra.cse.unsw.EDU.AU> (raw)
In-Reply-To: <Pine.LNX.4.61.0505211344350.24777@wagner.orchestra.cse.unsw.EDU.AU>

Patch 7 of 15.

This patch continues to call the new interface.

 	*lookup_page_table is called in page_check_address in rmap.c
 	*build_page_table is called in handle_mm_fault.
 	*handle_pte_fault, do_file_page and do_anonymous_page are no
 	 longer passed pmds. lookup_page_table is called later on to
 	 avoid passing the pmds
 	*do_no_page is not passed a pmd anymore.  lookup_page_table
 	 is called instead to get the relevant pte.
 	*do_swap_page and do_wp_page are no longer passed pmds.
 	 lookup_page_table is called instead.

  mm/memory.c |   59 
++++++++++++++++++++++++-----------------------------------
  mm/rmap.c   |   27 ++++++++++-----------------
  2 files changed, 34 insertions(+), 52 deletions(-)

Index: linux-2.6.12-rc4/mm/rmap.c
===================================================================
--- linux-2.6.12-rc4.orig/mm/rmap.c	2005-05-19 17:01:14.000000000 
+1000
+++ linux-2.6.12-rc4/mm/rmap.c	2005-05-19 18:01:20.000000000 +1000
@@ -53,6 +53,7 @@
  #include <linux/init.h>
  #include <linux/rmap.h>
  #include <linux/rcupdate.h>
+#include <linux/page_table.h>

  #include <asm/tlbflush.h>

@@ -250,9 +251,6 @@
  static pte_t *page_check_address(struct page *page, struct mm_struct *mm,
  					unsigned long address)
  {
-	pgd_t *pgd;
-	pud_t *pud;
-	pmd_t *pmd;
  	pte_t *pte;

  	/*
@@ -260,20 +258,15 @@
  	 * munmap, fork, etc...
  	 */
  	spin_lock(&mm->page_table_lock);
-	pgd = pgd_offset(mm, address);
-	if (likely(pgd_present(*pgd))) {
-		pud = pud_offset(pgd, address);
-		if (likely(pud_present(*pud))) {
-			pmd = pmd_offset(pud, address);
-			if (likely(pmd_present(*pmd))) {
-				pte = pte_offset_map(pmd, address);
-				if (likely(pte_present(*pte) &&
-					   page_to_pfn(page) == 
pte_pfn(*pte)))
-					return pte;
-				pte_unmap(pte);
-			}
-		}
-	}
+	pte = lookup_page_table(mm, address);
+	if(!pte)
+		goto out_unlock;
+	if (likely(pte_present(*pte) &&
+	   page_to_pfn(page) == pte_pfn(*pte)))
+		return pte;
+	pte_unmap(pte);
+
+out_unlock:
  	spin_unlock(&mm->page_table_lock);
  	return ERR_PTR(-ENOENT);
  }
Index: linux-2.6.12-rc4/mm/memory.c
===================================================================
--- linux-2.6.12-rc4.orig/mm/memory.c	2005-05-19 17:41:04.000000000 
+1000
+++ linux-2.6.12-rc4/mm/memory.c	2005-05-19 18:01:20.000000000 
+1000
@@ -993,7 +993,7 @@
   * with the page_table_lock released.
   */
  static int do_wp_page(struct mm_struct *mm, struct vm_area_struct * vma,
-	unsigned long address, pte_t *page_table, pmd_t *pmd, pte_t pte)
+	unsigned long address, pte_t *page_table, pte_t pte)
  {
  	struct page *old_page, *new_page;
  	unsigned long pfn = pte_pfn(pte);
@@ -1053,7 +1053,8 @@
  	 * Re-check the pte - we dropped the lock
  	 */
  	spin_lock(&mm->page_table_lock);
-	page_table = pte_offset_map(pmd, address);
+	page_table = lookup_page_table(mm, address);
+
  	if (likely(pte_same(*page_table, pte))) {
  		if (PageAnon(old_page))
  			dec_mm_counter(mm, anon_rss);
@@ -1405,7 +1406,7 @@
   */
  static int do_swap_page(struct mm_struct * mm,
  	struct vm_area_struct * vma, unsigned long address,
-	pte_t *page_table, pmd_t *pmd, pte_t orig_pte, int write_access)
+	pte_t *page_table, pte_t orig_pte, int write_access)
  {
  	struct page *page;
  	swp_entry_t entry = pte_to_swp_entry(orig_pte);
@@ -1424,7 +1425,7 @@
  			 * we released the page table lock.
  			 */
  			spin_lock(&mm->page_table_lock);
-			page_table = pte_offset_map(pmd, address);
+			page_table = lookup_page_table(mm, address);
  			if (likely(pte_same(*page_table, orig_pte)))
  				ret = VM_FAULT_OOM;
  			else
@@ -1448,7 +1449,7 @@
  	 * released the page table lock.
  	 */
  	spin_lock(&mm->page_table_lock);
-	page_table = pte_offset_map(pmd, address);
+	page_table = lookup_page_table(mm, address);
  	if (unlikely(!pte_same(*page_table, orig_pte))) {
  		pte_unmap(page_table);
  		spin_unlock(&mm->page_table_lock);
@@ -1478,7 +1479,7 @@

  	if (write_access) {
  		if (do_wp_page(mm, vma, address,
-				page_table, pmd, pte) == VM_FAULT_OOM)
+				page_table, pte) == VM_FAULT_OOM)
  			ret = VM_FAULT_OOM;
  		goto out;
  	}
@@ -1499,7 +1500,7 @@
   */
  static int
  do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
-		pte_t *page_table, pmd_t *pmd, int write_access,
+		pte_t *page_table, int write_access,
  		unsigned long addr)
  {
  	pte_t entry;
@@ -1521,8 +1522,7 @@
  			goto no_mem;

  		spin_lock(&mm->page_table_lock);
-		page_table = pte_offset_map(pmd, addr);
-
+		page_table = lookup_page_table(mm, addr);
  		if (!pte_none(*page_table)) {
  			pte_unmap(page_table);
  			page_cache_release(page);
@@ -1565,7 +1565,7 @@
   */
  static int
  do_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
-	unsigned long address, int write_access, pte_t *page_table, pmd_t 
*pmd)
+	unsigned long address, int write_access, pte_t *page_table)
  {
  	struct page * new_page;
  	struct address_space *mapping = NULL;
@@ -1576,7 +1576,7 @@

  	if (!vma->vm_ops || !vma->vm_ops->nopage)
  		return do_anonymous_page(mm, vma, page_table,
-					pmd, write_access, address);
+					write_access, address);
  	pte_unmap(page_table);
  	spin_unlock(&mm->page_table_lock);

@@ -1631,7 +1631,7 @@
  		page_cache_release(new_page);
  		goto retry;
  	}
-	page_table = pte_offset_map(pmd, address);
+	page_table = lookup_page_table(mm, address);

  	/*
  	 * This silly early PAGE_DIRTY setting removes a race
@@ -1685,7 +1685,7 @@
   * nonlinear vmas.
   */
  static int do_file_page(struct mm_struct * mm, struct vm_area_struct * 
vma,
-	unsigned long address, int write_access, pte_t *pte, pmd_t *pmd)
+	unsigned long address, int write_access, pte_t *pte)
  {
  	unsigned long pgoff;
  	int err;
@@ -1698,7 +1698,7 @@
  	if (!vma->vm_ops || !vma->vm_ops->populate ||
  			(write_access && !(vma->vm_flags & VM_SHARED))) {
  		pte_clear(mm, address, pte);
-		return do_no_page(mm, vma, address, write_access, pte, 
pmd);
+		return do_no_page(mm, vma, address, write_access, pte);
  	}

  	pgoff = pte_to_pgoff(*pte);
@@ -1706,7 +1706,8 @@
  	pte_unmap(pte);
  	spin_unlock(&mm->page_table_lock);

-	err = vma->vm_ops->populate(vma, address & PAGE_MASK, PAGE_SIZE, 
vma->vm_page_prot, pgoff, 0);
+	err = vma->vm_ops->populate(vma, address & PAGE_MASK, PAGE_SIZE,
+		vma->vm_page_prot, pgoff, 0);
  	if (err == -ENOMEM)
  		return VM_FAULT_OOM;
  	if (err)
@@ -1737,8 +1738,8 @@
   */
  static inline int handle_pte_fault(struct mm_struct *mm,
  	struct vm_area_struct * vma, unsigned long address,
-	int write_access, pte_t *pte, pmd_t *pmd)
-{
+	int write_access, pte_t *pte)
+{
  	pte_t entry;

  	entry = *pte;
@@ -1749,15 +1750,15 @@
  		 * drop the lock.
  		 */
  		if (pte_none(entry))
-			return do_no_page(mm, vma, address, write_access, 
pte, pmd);
+			return do_no_page(mm, vma, address, write_access, 
pte);
  		if (pte_file(entry))
-			return do_file_page(mm, vma, address, 
write_access, pte, pmd);
-		return do_swap_page(mm, vma, address, pte, pmd, entry, 
write_access);
+			return do_file_page(mm, vma, address, 
write_access, pte);
+		return do_swap_page(mm, vma, address, pte, entry, 
write_access);
  	}

  	if (write_access) {
  		if (!pte_write(entry))
-			return do_wp_page(mm, vma, address, pte, pmd, 
entry);
+			return do_wp_page(mm, vma, address, pte, entry);

  		entry = pte_mkdirty(entry);
  	}
@@ -1776,9 +1777,6 @@
  int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct * vma,
  		unsigned long address, int write_access)
  {
-	pgd_t *pgd;
-	pud_t *pud;
-	pmd_t *pmd;
  	pte_t *pte;

  	__set_current_state(TASK_RUNNING);
@@ -1792,22 +1790,13 @@
  	 * We need the page table lock to synchronize with kswapd
  	 * and the SMP-safe atomic PTE updates.
  	 */
-	pgd = pgd_offset(mm, address);
  	spin_lock(&mm->page_table_lock);

-	pud = pud_alloc(mm, pgd, address);
-	if (!pud)
-		goto oom;
-
-	pmd = pmd_alloc(mm, pud, address);
-	if (!pmd)
-		goto oom;
-
-	pte = pte_alloc_map(mm, pmd, address);
+	pte = build_page_table(mm, address);
  	if (!pte)
  		goto oom;

-	return handle_pte_fault(mm, vma, address, write_access, pte, pmd);
+	return handle_pte_fault(mm, vma, address, write_access, pte);

   oom:
  	spin_unlock(&mm->page_table_lock);

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"aart@kvack.org"> aart@kvack.org </a>

  reply	other threads:[~2005-05-21  3:54 UTC|newest]

Thread overview: 19+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2005-05-21  2:43 [PATCH 1/15] PTI: clean page table interface Paul Davies
2005-05-21  2:53 ` [PATCH 2/15] PTI: Add general files and directories Paul Cameron Davies
2005-05-21  3:08   ` [PATCH 3/15] PTI: move mlpt behind interface Paul Cameron Davies
2005-05-21  3:15     ` [PATCH 4/15] PTI: move mlpt behind interface cont Paul Cameron Davies
2005-05-21  3:26       ` [PATCH 5/15] PTI: Finish moving mlpt behind interface Paul Cameron Davies
2005-05-21  3:47         ` [PATCH 6/15] PTI: Start calling the interface Paul Cameron Davies
2005-05-21  3:54           ` Paul Cameron Davies [this message]
2005-05-21  4:04             ` [PATCH 8/15] PTI: Keep calling interface Paul Cameron Davies
2005-05-21  4:12               ` [PATCH 9/15] PTI: Introduce iterators Paul Cameron Davies
2005-05-21  4:19                 ` [PATCH 10/15] PTI: Call iterators Paul Cameron Davies
2005-05-21  4:58                   ` [PATCH 11/15] PTI: Continue calling iterators Paul Cameron Davies
2005-05-21  5:04                     ` [PATCH 12/15] PTI: Finish " Paul Cameron Davies
2005-05-21  5:09                       ` [PATCH 13/15] PTI: Add files and IA64 part of interface Paul Cameron Davies
2005-05-21  5:15                         ` [PATCH 14/15] PTI: Move IA64 mlpt code behind interface Paul Cameron Davies
2005-05-21  5:27                           ` [PATCH 15/15] PTI: Call IA64 interface Paul Cameron Davies
2005-05-21  5:46                             ` PTI: Patch 10/15 URL Paul Cameron Davies
2005-05-21  5:47                               ` PTI: LMbench results Paul Cameron Davies
2005-05-28  8:53 ` [PATCH 1/15] PTI: clean page table interface Christoph Hellwig
2005-05-30  5:16   ` Paul Davies

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=Pine.LNX.4.61.0505211352170.28095@wagner.orchestra.cse.unsw.EDU.AU \
    --to=pauld@cse.unsw.edu.au \
    --cc=linux-mm@kvack.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox