linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
From: Jan Kara <jack@suse.cz>
To: linux-mm@kvack.org
Cc: "Kirill A. Shutemov" <kirill@shutemov.name>,
	Ross Zwisler <ross.zwisler@linux.intel.com>,
	Andrew Morton <akpm@linux-foundation.org>,
	linux-fsdevel@vger.kernel.org, linux-nvdimm@lists.01.org,
	Jan Kara <jack@suse.cz>
Subject: [PATCH 05/20] mm: Trim __do_fault() arguments
Date: Fri, 18 Nov 2016 10:17:09 +0100	[thread overview]
Message-ID: <1479460644-25076-6-git-send-email-jack@suse.cz> (raw)
In-Reply-To: <1479460644-25076-1-git-send-email-jack@suse.cz>

Use vm_fault structure to pass cow_page, page, and entry in and out of
the function. That reduces number of __do_fault() arguments from 4 to 1.

Acked-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Reviewed-by: Ross Zwisler <ross.zwisler@linux.intel.com>
Signed-off-by: Jan Kara <jack@suse.cz>
---
 mm/memory.c | 67 ++++++++++++++++++++++++++-----------------------------------
 1 file changed, 29 insertions(+), 38 deletions(-)

diff --git a/mm/memory.c b/mm/memory.c
index 2d7cdefc0690..acbe77a37aa0 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2848,26 +2848,22 @@ static int do_anonymous_page(struct vm_fault *vmf)
  * released depending on flags and vma->vm_ops->fault() return value.
  * See filemap_fault() and __lock_page_retry().
  */
-static int __do_fault(struct vm_fault *vmf, struct page *cow_page,
-		      struct page **page, void **entry)
+static int __do_fault(struct vm_fault *vmf)
 {
 	struct vm_area_struct *vma = vmf->vma;
 	int ret;
 
-	vmf->cow_page = cow_page;
-
 	ret = vma->vm_ops->fault(vma, vmf);
 	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
 		return ret;
-	if (ret & VM_FAULT_DAX_LOCKED) {
-		*entry = vmf->entry;
+	if (ret & VM_FAULT_DAX_LOCKED)
 		return ret;
-	}
 
 	if (unlikely(PageHWPoison(vmf->page))) {
 		if (ret & VM_FAULT_LOCKED)
 			unlock_page(vmf->page);
 		put_page(vmf->page);
+		vmf->page = NULL;
 		return VM_FAULT_HWPOISON;
 	}
 
@@ -2876,7 +2872,6 @@ static int __do_fault(struct vm_fault *vmf, struct page *cow_page,
 	else
 		VM_BUG_ON_PAGE(!PageLocked(vmf->page), vmf->page);
 
-	*page = vmf->page;
 	return ret;
 }
 
@@ -3173,7 +3168,6 @@ static int do_fault_around(struct vm_fault *vmf)
 static int do_read_fault(struct vm_fault *vmf)
 {
 	struct vm_area_struct *vma = vmf->vma;
-	struct page *fault_page;
 	int ret = 0;
 
 	/*
@@ -3187,54 +3181,52 @@ static int do_read_fault(struct vm_fault *vmf)
 			return ret;
 	}
 
-	ret = __do_fault(vmf, NULL, &fault_page, NULL);
+	ret = __do_fault(vmf);
 	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
 		return ret;
 
-	ret |= alloc_set_pte(vmf, NULL, fault_page);
+	ret |= alloc_set_pte(vmf, NULL, vmf->page);
 	if (vmf->pte)
 		pte_unmap_unlock(vmf->pte, vmf->ptl);
-	unlock_page(fault_page);
+	unlock_page(vmf->page);
 	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
-		put_page(fault_page);
+		put_page(vmf->page);
 	return ret;
 }
 
 static int do_cow_fault(struct vm_fault *vmf)
 {
 	struct vm_area_struct *vma = vmf->vma;
-	struct page *fault_page, *new_page;
-	void *fault_entry;
 	struct mem_cgroup *memcg;
 	int ret;
 
 	if (unlikely(anon_vma_prepare(vma)))
 		return VM_FAULT_OOM;
 
-	new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, vmf->address);
-	if (!new_page)
+	vmf->cow_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, vmf->address);
+	if (!vmf->cow_page)
 		return VM_FAULT_OOM;
 
-	if (mem_cgroup_try_charge(new_page, vma->vm_mm, GFP_KERNEL,
+	if (mem_cgroup_try_charge(vmf->cow_page, vma->vm_mm, GFP_KERNEL,
 				&memcg, false)) {
-		put_page(new_page);
+		put_page(vmf->cow_page);
 		return VM_FAULT_OOM;
 	}
 
-	ret = __do_fault(vmf, new_page, &fault_page, &fault_entry);
+	ret = __do_fault(vmf);
 	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
 		goto uncharge_out;
 
 	if (!(ret & VM_FAULT_DAX_LOCKED))
-		copy_user_highpage(new_page, fault_page, vmf->address, vma);
-	__SetPageUptodate(new_page);
+		copy_user_highpage(vmf->cow_page, vmf->page, vmf->address, vma);
+	__SetPageUptodate(vmf->cow_page);
 
-	ret |= alloc_set_pte(vmf, memcg, new_page);
+	ret |= alloc_set_pte(vmf, memcg, vmf->cow_page);
 	if (vmf->pte)
 		pte_unmap_unlock(vmf->pte, vmf->ptl);
 	if (!(ret & VM_FAULT_DAX_LOCKED)) {
-		unlock_page(fault_page);
-		put_page(fault_page);
+		unlock_page(vmf->page);
+		put_page(vmf->page);
 	} else {
 		dax_unlock_mapping_entry(vma->vm_file->f_mapping, vmf->pgoff);
 	}
@@ -3242,20 +3234,19 @@ static int do_cow_fault(struct vm_fault *vmf)
 		goto uncharge_out;
 	return ret;
 uncharge_out:
-	mem_cgroup_cancel_charge(new_page, memcg, false);
-	put_page(new_page);
+	mem_cgroup_cancel_charge(vmf->cow_page, memcg, false);
+	put_page(vmf->cow_page);
 	return ret;
 }
 
 static int do_shared_fault(struct vm_fault *vmf)
 {
 	struct vm_area_struct *vma = vmf->vma;
-	struct page *fault_page;
 	struct address_space *mapping;
 	int dirtied = 0;
 	int ret, tmp;
 
-	ret = __do_fault(vmf, NULL, &fault_page, NULL);
+	ret = __do_fault(vmf);
 	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
 		return ret;
 
@@ -3264,26 +3255,26 @@ static int do_shared_fault(struct vm_fault *vmf)
 	 * about to become writable
 	 */
 	if (vma->vm_ops->page_mkwrite) {
-		unlock_page(fault_page);
-		tmp = do_page_mkwrite(vma, fault_page, vmf->address);
+		unlock_page(vmf->page);
+		tmp = do_page_mkwrite(vma, vmf->page, vmf->address);
 		if (unlikely(!tmp ||
 				(tmp & (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))) {
-			put_page(fault_page);
+			put_page(vmf->page);
 			return tmp;
 		}
 	}
 
-	ret |= alloc_set_pte(vmf, NULL, fault_page);
+	ret |= alloc_set_pte(vmf, NULL, vmf->page);
 	if (vmf->pte)
 		pte_unmap_unlock(vmf->pte, vmf->ptl);
 	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE |
 					VM_FAULT_RETRY))) {
-		unlock_page(fault_page);
-		put_page(fault_page);
+		unlock_page(vmf->page);
+		put_page(vmf->page);
 		return ret;
 	}
 
-	if (set_page_dirty(fault_page))
+	if (set_page_dirty(vmf->page))
 		dirtied = 1;
 	/*
 	 * Take a local copy of the address_space - page.mapping may be zeroed
@@ -3291,8 +3282,8 @@ static int do_shared_fault(struct vm_fault *vmf)
 	 * pinned by vma->vm_file's reference.  We rely on unlock_page()'s
 	 * release semantics to prevent the compiler from undoing this copying.
 	 */
-	mapping = page_rmapping(fault_page);
-	unlock_page(fault_page);
+	mapping = page_rmapping(vmf->page);
+	unlock_page(vmf->page);
 	if ((dirtied || vma->vm_ops->page_mkwrite) && mapping) {
 		/*
 		 * Some device drivers do not set page.mapping but still
-- 
2.6.6

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

  parent reply	other threads:[~2016-11-18  9:17 UTC|newest]

Thread overview: 28+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2016-11-18  9:17 [PATCH 0/20 v5] dax: Clear dirty bits after flushing caches Jan Kara
2016-11-18  9:17 ` [PATCH 01/20] mm: Join struct fault_env and vm_fault Jan Kara
2016-11-18 10:22   ` Kirill A. Shutemov
2016-11-18  9:17 ` [PATCH 02/20] mm: Use vmf->address instead of of vmf->virtual_address Jan Kara
2016-11-18 10:22   ` Kirill A. Shutemov
2016-11-18  9:17 ` [PATCH 03/20] mm: Use pgoff in struct vm_fault instead of passing it separately Jan Kara
2016-11-18  9:17 ` [PATCH 04/20] mm: Use passed vm_fault structure in __do_fault() Jan Kara
2016-11-18  9:17 ` Jan Kara [this message]
2016-11-18  9:17 ` [PATCH 06/20] mm: Use passed vm_fault structure for in wp_pfn_shared() Jan Kara
2016-11-18  9:17 ` [PATCH 07/20] mm: Add orig_pte field into vm_fault Jan Kara
2016-11-18  9:17 ` [PATCH 08/20] mm: Allow full handling of COW faults in ->fault handlers Jan Kara
2016-11-18  9:17 ` [PATCH 09/20] mm: Factor out functionality to finish page faults Jan Kara
2016-11-18  9:17 ` [PATCH 10/20] mm: Move handling of COW faults into DAX code Jan Kara
2016-11-21  4:39   ` Ross Zwisler
2016-11-18  9:17 ` [PATCH 11/20] mm: Factor out common parts of write fault handling Jan Kara
2016-11-18  9:17 ` [PATCH 12/20] mm: Pass vm_fault structure into do_page_mkwrite() Jan Kara
2016-11-18 10:23   ` Kirill A. Shutemov
2016-11-18  9:17 ` [PATCH 13/20] mm: Use vmf->page during WP faults Jan Kara
2016-11-18  9:17 ` [PATCH 14/20] mm: Move part of wp_page_reuse() into the single call site Jan Kara
2016-11-18  9:17 ` [PATCH 15/20] mm: Provide helper for finishing mkwrite faults Jan Kara
2016-11-18  9:17 ` [PATCH 16/20] mm: Change return values of finish_mkwrite_fault() Jan Kara
2016-11-18  9:17 ` [PATCH 17/20] mm: Export follow_pte() Jan Kara
2016-11-18  9:17 ` [PATCH 18/20] dax: Make cache flushing protected by entry lock Jan Kara
2016-11-18  9:17 ` [PATCH 19/20] dax: Protect PTE modification on WP fault by radix tree " Jan Kara
2016-11-18  9:17 ` [PATCH 20/20] dax: Clear dirty entry tags on cache flush Jan Kara
  -- strict thread matches above, loose matches on Subject: below --
2016-09-27 16:08 [PATCH 0/20 v3] dax: Clear dirty bits after flushing caches Jan Kara
2016-09-27 16:08 ` [PATCH 05/20] mm: Trim __do_fault() arguments Jan Kara
2016-10-14 20:31   ` Ross Zwisler
2016-10-17  9:04     ` Jan Kara

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1479460644-25076-6-git-send-email-jack@suse.cz \
    --to=jack@suse.cz \
    --cc=akpm@linux-foundation.org \
    --cc=kirill@shutemov.name \
    --cc=linux-fsdevel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=linux-nvdimm@lists.01.org \
    --cc=ross.zwisler@linux.intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox