From: Kefeng Wang <wangkefeng.wang@huawei.com>
To: Andrew Morton <akpm@linux-foundation.org>
Cc: Mike Rapoport <rppt@kernel.org>,
Matthew Wilcox <willy@infradead.org>,
David Hildenbrand <david@redhat.com>, <linux-mm@kvack.org>,
<linux-kernel@vger.kernel.org>, <ying.huang@intel.com>,
Zi Yan <ziy@nvidia.com>, Kefeng Wang <wangkefeng.wang@huawei.com>
Subject: [PATCH -next 6/9] mm: make wp_page_reuse() and finish_mkwrite_fault() to take a folio
Date: Tue, 26 Sep 2023 08:52:51 +0800 [thread overview]
Message-ID: <20230926005254.2861577-7-wangkefeng.wang@huawei.com> (raw)
In-Reply-To: <20230926005254.2861577-1-wangkefeng.wang@huawei.com>
Make finish_mkwrite_fault() to a static function, and convert
wp_page_reuse() and finish_mkwrite_fault() to take a folio in
preparation for page_cpupid_xchg_last() to folio conversion.
Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
---
include/linux/mm.h | 1 -
mm/memory.c | 37 ++++++++++++++++++++-----------------
2 files changed, 20 insertions(+), 18 deletions(-)
diff --git a/include/linux/mm.h b/include/linux/mm.h
index aa7fdda1b56c..9933f6345e66 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1335,7 +1335,6 @@ void set_pte_range(struct vm_fault *vmf, struct folio *folio,
struct page *page, unsigned int nr, unsigned long addr);
vm_fault_t finish_fault(struct vm_fault *vmf);
-vm_fault_t finish_mkwrite_fault(struct vm_fault *vmf);
#endif
/*
diff --git a/mm/memory.c b/mm/memory.c
index 5ab6e8d45a7d..119c40e4465e 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -3014,23 +3014,24 @@ static vm_fault_t fault_dirty_shared_page(struct vm_fault *vmf)
* case, all we need to do here is to mark the page as writable and update
* any related book-keeping.
*/
-static inline void wp_page_reuse(struct vm_fault *vmf)
+static inline void wp_page_reuse(struct vm_fault *vmf, struct folio *folio)
__releases(vmf->ptl)
{
struct vm_area_struct *vma = vmf->vma;
- struct page *page = vmf->page;
pte_t entry;
VM_BUG_ON(!(vmf->flags & FAULT_FLAG_WRITE));
- VM_BUG_ON(page && PageAnon(page) && !PageAnonExclusive(page));
+ if (folio) {
+ VM_BUG_ON(folio_test_anon(folio) &&
+ !PageAnonExclusive(vmf->page));
- /*
- * Clear the pages cpupid information as the existing
- * information potentially belongs to a now completely
- * unrelated process.
- */
- if (page)
- page_cpupid_xchg_last(page, (1 << LAST_CPUPID_SHIFT) - 1);
+ /*
+ * Clear the pages cpupid information as the existing
+ * information potentially belongs to a now completely
+ * unrelated process.
+ */
+ page_cpupid_xchg_last(vmf->page, (1 << LAST_CPUPID_SHIFT) - 1);
+ }
flush_cache_page(vma, vmf->address, pte_pfn(vmf->orig_pte));
entry = pte_mkyoung(vmf->orig_pte);
@@ -3223,6 +3224,7 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf)
* writeable once the page is prepared
*
* @vmf: structure describing the fault
+ * @folio: the folio of vmf->page
*
* This function handles all that is needed to finish a write page fault in a
* shared mapping due to PTE being read-only once the mapped page is prepared.
@@ -3234,7 +3236,8 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf)
* Return: %0 on success, %VM_FAULT_NOPAGE when PTE got changed before
* we acquired PTE lock.
*/
-vm_fault_t finish_mkwrite_fault(struct vm_fault *vmf)
+static vm_fault_t finish_mkwrite_fault(struct vm_fault *vmf,
+ struct folio *folio)
{
WARN_ON_ONCE(!(vmf->vma->vm_flags & VM_SHARED));
vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd, vmf->address,
@@ -3250,7 +3253,7 @@ vm_fault_t finish_mkwrite_fault(struct vm_fault *vmf)
pte_unmap_unlock(vmf->pte, vmf->ptl);
return VM_FAULT_NOPAGE;
}
- wp_page_reuse(vmf);
+ wp_page_reuse(vmf, folio);
return 0;
}
@@ -3275,9 +3278,9 @@ static vm_fault_t wp_pfn_shared(struct vm_fault *vmf)
ret = vma->vm_ops->pfn_mkwrite(vmf);
if (ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE))
return ret;
- return finish_mkwrite_fault(vmf);
+ return finish_mkwrite_fault(vmf, NULL);
}
- wp_page_reuse(vmf);
+ wp_page_reuse(vmf, NULL);
return 0;
}
@@ -3305,14 +3308,14 @@ static vm_fault_t wp_page_shared(struct vm_fault *vmf, struct folio *folio)
folio_put(folio);
return tmp;
}
- tmp = finish_mkwrite_fault(vmf);
+ tmp = finish_mkwrite_fault(vmf, folio);
if (unlikely(tmp & (VM_FAULT_ERROR | VM_FAULT_NOPAGE))) {
folio_unlock(folio);
folio_put(folio);
return tmp;
}
} else {
- wp_page_reuse(vmf);
+ wp_page_reuse(vmf, folio);
folio_lock(folio);
}
ret |= fault_dirty_shared_page(vmf);
@@ -3436,7 +3439,7 @@ static vm_fault_t do_wp_page(struct vm_fault *vmf)
pte_unmap_unlock(vmf->pte, vmf->ptl);
return 0;
}
- wp_page_reuse(vmf);
+ wp_page_reuse(vmf, folio);
return 0;
}
copy:
--
2.27.0
next prev parent reply other threads:[~2023-09-26 1:11 UTC|newest]
Thread overview: 13+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-09-26 0:52 [PATCH -next rfc 0/9] mm: convert page cpupid functions to folios Kefeng Wang
2023-09-26 0:52 ` [PATCH -next 1/9] mm_types: add _last_cpupid into folio Kefeng Wang
2023-09-26 0:52 ` [PATCH -next 2/9] mm: mprotect: use a folio in change_pte_range() Kefeng Wang
2023-09-26 0:52 ` [PATCH -next 3/9] mm: huge_memory: use a folio in change_huge_pmd() Kefeng Wang
2023-09-26 0:52 ` [PATCH -next 4/9] mm: convert xchg_page_access_time to xchg_folio_access_time() Kefeng Wang
2023-09-26 0:52 ` [PATCH -next 5/9] mm: convert page_cpupid_last() to folio_cpupid_last() Kefeng Wang
2023-09-26 0:52 ` Kefeng Wang [this message]
2023-09-26 0:52 ` [PATCH -next 7/9] mm: convert page_cpupid_xchg_last() to folio_cpupid_xchg_last() Kefeng Wang
2023-09-26 0:52 ` [PATCH -next 8/9] mm: page_alloc: use a folio in free_pages_prepare() Kefeng Wang
2023-09-26 7:49 ` David Hildenbrand
2023-09-26 9:39 ` Kefeng Wang
2023-09-27 12:08 ` Kefeng Wang
2023-09-26 0:52 ` [PATCH -next 9/9] mm: convert page_cpupid_reset_last() to folio_cpupid_reset_last() Kefeng Wang
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20230926005254.2861577-7-wangkefeng.wang@huawei.com \
--to=wangkefeng.wang@huawei.com \
--cc=akpm@linux-foundation.org \
--cc=david@redhat.com \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=rppt@kernel.org \
--cc=willy@infradead.org \
--cc=ying.huang@intel.com \
--cc=ziy@nvidia.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox