linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
* [PATCH v3 0/5] mm: cleanup and use more folio in page fault
@ 2023-11-18  2:32 Kefeng Wang
  2023-11-18  2:32 ` [PATCH v3 1/5] mm: ksm: use more folio api in ksm_might_need_to_copy() Kefeng Wang
                   ` (4 more replies)
  0 siblings, 5 replies; 7+ messages in thread
From: Kefeng Wang @ 2023-11-18  2:32 UTC (permalink / raw)
  To: Andrew Morton
  Cc: linux-kernel, linux-mm, Matthew Wilcox, David Hildenbrand,
	Sidhartha Kumar, Vishal Moola, Kefeng Wang

Rename page_copy_prealloc() to folio_prealloc(), which is used by
more functions, also do more folio conversion in page fault.

v3:
- drop patch6 as Small-sized THP for anon will change anon allocation
- correct do_cow_page to do_cow_fault in subject and changlog
- add RB of Vishal

v2:
- add folio_test_large check in ksm_might_need_to_copy() and
  replace page->index to folio->index, per David, Matthew
- add RB of Sidhartha

Kefeng Wang (5):
  mm: ksm: use more folio api in ksm_might_need_to_copy()
  mm: memory: use a folio in validate_page_before_insert()
  mm: memory: rename page_copy_prealloc() to folio_prealloc()
  mm: memory: use a folio in do_cow_fault()
  mm: memory: use folio_prealloc() in wp_page_copy()

 include/linux/ksm.h |  4 ++--
 mm/ksm.c            | 39 ++++++++++++++++--------------
 mm/memory.c         | 58 +++++++++++++++++++++------------------------
 3 files changed, 50 insertions(+), 51 deletions(-)

-- 
2.27.0



^ permalink raw reply	[flat|nested] 7+ messages in thread

* [PATCH v3 1/5] mm: ksm: use more folio api in ksm_might_need_to_copy()
  2023-11-18  2:32 [PATCH v3 0/5] mm: cleanup and use more folio in page fault Kefeng Wang
@ 2023-11-18  2:32 ` Kefeng Wang
  2023-11-18  2:32 ` [PATCH v3 2/5] mm: memory: use a folio in validate_page_before_insert() Kefeng Wang
                   ` (3 subsequent siblings)
  4 siblings, 0 replies; 7+ messages in thread
From: Kefeng Wang @ 2023-11-18  2:32 UTC (permalink / raw)
  To: Andrew Morton
  Cc: linux-kernel, linux-mm, Matthew Wilcox, David Hildenbrand,
	Sidhartha Kumar, Vishal Moola, Kefeng Wang

Since ksm only support normal page, no swapout/in for ksm large
folio too, add large folio check in ksm_might_need_to_copy(),
also convert page->index to folio->index as page->index is going away.

Then convert ksm_might_need_to_copy() to use more folio api to save
nine compound_head() calls, short 'address' to reduce max-line-length.

Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
---
 include/linux/ksm.h |  4 ++--
 mm/ksm.c            | 39 +++++++++++++++++++++------------------
 2 files changed, 23 insertions(+), 20 deletions(-)

diff --git a/include/linux/ksm.h b/include/linux/ksm.h
index c2dd786a30e1..4643d5244e77 100644
--- a/include/linux/ksm.h
+++ b/include/linux/ksm.h
@@ -77,7 +77,7 @@ static inline void ksm_exit(struct mm_struct *mm)
  * but what if the vma was unmerged while the page was swapped out?
  */
 struct page *ksm_might_need_to_copy(struct page *page,
-			struct vm_area_struct *vma, unsigned long address);
+			struct vm_area_struct *vma, unsigned long addr);
 
 void rmap_walk_ksm(struct folio *folio, struct rmap_walk_control *rwc);
 void folio_migrate_ksm(struct folio *newfolio, struct folio *folio);
@@ -130,7 +130,7 @@ static inline int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
 }
 
 static inline struct page *ksm_might_need_to_copy(struct page *page,
-			struct vm_area_struct *vma, unsigned long address)
+			struct vm_area_struct *vma, unsigned long addr)
 {
 	return page;
 }
diff --git a/mm/ksm.c b/mm/ksm.c
index 6a831009b4cb..6d841c22642b 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -2876,48 +2876,51 @@ void __ksm_exit(struct mm_struct *mm)
 }
 
 struct page *ksm_might_need_to_copy(struct page *page,
-			struct vm_area_struct *vma, unsigned long address)
+			struct vm_area_struct *vma, unsigned long addr)
 {
 	struct folio *folio = page_folio(page);
 	struct anon_vma *anon_vma = folio_anon_vma(folio);
-	struct page *new_page;
+	struct folio *new_folio;
 
-	if (PageKsm(page)) {
-		if (page_stable_node(page) &&
+	if (folio_test_large(folio))
+		return page;
+
+	if (folio_test_ksm(folio)) {
+		if (folio_stable_node(folio) &&
 		    !(ksm_run & KSM_RUN_UNMERGE))
 			return page;	/* no need to copy it */
 	} else if (!anon_vma) {
 		return page;		/* no need to copy it */
-	} else if (page->index == linear_page_index(vma, address) &&
+	} else if (folio->index == linear_page_index(vma, addr) &&
 			anon_vma->root == vma->anon_vma->root) {
 		return page;		/* still no need to copy it */
 	}
 	if (PageHWPoison(page))
 		return ERR_PTR(-EHWPOISON);
-	if (!PageUptodate(page))
+	if (!folio_test_uptodate(folio))
 		return page;		/* let do_swap_page report the error */
 
-	new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address);
-	if (new_page &&
-	    mem_cgroup_charge(page_folio(new_page), vma->vm_mm, GFP_KERNEL)) {
-		put_page(new_page);
-		new_page = NULL;
+	new_folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, vma, addr, false);
+	if (new_folio &&
+	    mem_cgroup_charge(new_folio, vma->vm_mm, GFP_KERNEL)) {
+		folio_put(new_folio);
+		new_folio = NULL;
 	}
-	if (new_page) {
-		if (copy_mc_user_highpage(new_page, page, address, vma)) {
-			put_page(new_page);
+	if (new_folio) {
+		if (copy_mc_user_highpage(&new_folio->page, page, addr, vma)) {
+			folio_put(new_folio);
 			memory_failure_queue(page_to_pfn(page), 0);
 			return ERR_PTR(-EHWPOISON);
 		}
-		SetPageDirty(new_page);
-		__SetPageUptodate(new_page);
-		__SetPageLocked(new_page);
+		folio_set_dirty(new_folio);
+		__folio_mark_uptodate(new_folio);
+		__folio_set_locked(new_folio);
 #ifdef CONFIG_SWAP
 		count_vm_event(KSM_SWPIN_COPY);
 #endif
 	}
 
-	return new_page;
+	return new_folio ? &new_folio->page : NULL;
 }
 
 void rmap_walk_ksm(struct folio *folio, struct rmap_walk_control *rwc)
-- 
2.27.0



^ permalink raw reply	[flat|nested] 7+ messages in thread

* [PATCH v3 2/5] mm: memory: use a folio in validate_page_before_insert()
  2023-11-18  2:32 [PATCH v3 0/5] mm: cleanup and use more folio in page fault Kefeng Wang
  2023-11-18  2:32 ` [PATCH v3 1/5] mm: ksm: use more folio api in ksm_might_need_to_copy() Kefeng Wang
@ 2023-11-18  2:32 ` Kefeng Wang
  2023-11-18  2:32 ` [PATCH v3 3/5] mm: memory: rename page_copy_prealloc() to folio_prealloc() Kefeng Wang
                   ` (2 subsequent siblings)
  4 siblings, 0 replies; 7+ messages in thread
From: Kefeng Wang @ 2023-11-18  2:32 UTC (permalink / raw)
  To: Andrew Morton
  Cc: linux-kernel, linux-mm, Matthew Wilcox, David Hildenbrand,
	Sidhartha Kumar, Vishal Moola, Kefeng Wang

Use a folio in validate_page_before_insert() to save two
compound_head() calls.

Reviewed-by: Sidhartha Kumar <sidhartha.kumar@oracle.com>
Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
---
 mm/memory.c | 7 +++++--
 1 file changed, 5 insertions(+), 2 deletions(-)

diff --git a/mm/memory.c b/mm/memory.c
index 4de9fa7d7073..dffd39c9b3af 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1839,9 +1839,12 @@ pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
 
 static int validate_page_before_insert(struct page *page)
 {
-	if (PageAnon(page) || PageSlab(page) || page_has_type(page))
+	struct folio *folio = page_folio(page);
+
+	if (folio_test_anon(folio) || folio_test_slab(folio) ||
+	    page_has_type(page))
 		return -EINVAL;
-	flush_dcache_page(page);
+	flush_dcache_folio(folio);
 	return 0;
 }
 
-- 
2.27.0



^ permalink raw reply	[flat|nested] 7+ messages in thread

* [PATCH v3 3/5] mm: memory: rename page_copy_prealloc() to folio_prealloc()
  2023-11-18  2:32 [PATCH v3 0/5] mm: cleanup and use more folio in page fault Kefeng Wang
  2023-11-18  2:32 ` [PATCH v3 1/5] mm: ksm: use more folio api in ksm_might_need_to_copy() Kefeng Wang
  2023-11-18  2:32 ` [PATCH v3 2/5] mm: memory: use a folio in validate_page_before_insert() Kefeng Wang
@ 2023-11-18  2:32 ` Kefeng Wang
  2023-11-18  2:32 ` [PATCH v3 4/5] mm: memory: use a folio in do_cow_fault() Kefeng Wang
  2023-11-18  2:32 ` [PATCH v3 5/5] mm: memory: use folio_prealloc() in wp_page_copy() Kefeng Wang
  4 siblings, 0 replies; 7+ messages in thread
From: Kefeng Wang @ 2023-11-18  2:32 UTC (permalink / raw)
  To: Andrew Morton
  Cc: linux-kernel, linux-mm, Matthew Wilcox, David Hildenbrand,
	Sidhartha Kumar, Vishal Moola, Kefeng Wang

Let's rename page_copy_prealloc() to folio_prealloc(), which could
be reused in more functons, as it maybe zero the new page, pass a
new need_zero to it, and call the vma_alloc_zeroed_movable_folio()
if need_zero is true.

Reviewed-by: Sidhartha Kumar <sidhartha.kumar@oracle.com>
Reviewed-by: Vishal Moola (Oracle) <vishal.moola@gmail.com>
Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
---
 mm/memory.c | 13 +++++++++----
 1 file changed, 9 insertions(+), 4 deletions(-)

diff --git a/mm/memory.c b/mm/memory.c
index dffd39c9b3af..93c9ed01fc86 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -990,12 +990,17 @@ copy_present_pte(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
 	return 0;
 }
 
-static inline struct folio *page_copy_prealloc(struct mm_struct *src_mm,
-		struct vm_area_struct *vma, unsigned long addr)
+static inline struct folio *folio_prealloc(struct mm_struct *src_mm,
+		struct vm_area_struct *vma, unsigned long addr, bool need_zero)
 {
 	struct folio *new_folio;
 
-	new_folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, vma, addr, false);
+	if (need_zero)
+		new_folio = vma_alloc_zeroed_movable_folio(vma, addr);
+	else
+		new_folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, vma,
+					    addr, false);
+
 	if (!new_folio)
 		return NULL;
 
@@ -1127,7 +1132,7 @@ copy_pte_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
 	} else if (ret == -EBUSY) {
 		goto out;
 	} else if (ret ==  -EAGAIN) {
-		prealloc = page_copy_prealloc(src_mm, src_vma, addr);
+		prealloc = folio_prealloc(src_mm, src_vma, addr, false);
 		if (!prealloc)
 			return -ENOMEM;
 	} else if (ret) {
-- 
2.27.0



^ permalink raw reply	[flat|nested] 7+ messages in thread

* [PATCH v3 4/5] mm: memory: use a folio in do_cow_fault()
  2023-11-18  2:32 [PATCH v3 0/5] mm: cleanup and use more folio in page fault Kefeng Wang
                   ` (2 preceding siblings ...)
  2023-11-18  2:32 ` [PATCH v3 3/5] mm: memory: rename page_copy_prealloc() to folio_prealloc() Kefeng Wang
@ 2023-11-18  2:32 ` Kefeng Wang
  2023-11-18  2:32 ` [PATCH v3 5/5] mm: memory: use folio_prealloc() in wp_page_copy() Kefeng Wang
  4 siblings, 0 replies; 7+ messages in thread
From: Kefeng Wang @ 2023-11-18  2:32 UTC (permalink / raw)
  To: Andrew Morton
  Cc: linux-kernel, linux-mm, Matthew Wilcox, David Hildenbrand,
	Sidhartha Kumar, Vishal Moola, Kefeng Wang

Use folio_prealloc() helper and convert to use a folio in
do_cow_fault(), which save five compound_head() calls.

Reviewed-by: Vishal Moola (Oracle) <vishal.moola@gmail.com>
Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
---
 mm/memory.c | 16 ++++++----------
 1 file changed, 6 insertions(+), 10 deletions(-)

diff --git a/mm/memory.c b/mm/memory.c
index 93c9ed01fc86..868a2fc54549 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -4739,6 +4739,7 @@ static vm_fault_t do_read_fault(struct vm_fault *vmf)
 static vm_fault_t do_cow_fault(struct vm_fault *vmf)
 {
 	struct vm_area_struct *vma = vmf->vma;
+	struct folio *folio;
 	vm_fault_t ret;
 
 	ret = vmf_can_call_fault(vmf);
@@ -4747,16 +4748,11 @@ static vm_fault_t do_cow_fault(struct vm_fault *vmf)
 	if (ret)
 		return ret;
 
-	vmf->cow_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, vmf->address);
-	if (!vmf->cow_page)
+	folio = folio_prealloc(vma->vm_mm, vma, vmf->address, false);
+	if (!folio)
 		return VM_FAULT_OOM;
 
-	if (mem_cgroup_charge(page_folio(vmf->cow_page), vma->vm_mm,
-				GFP_KERNEL)) {
-		put_page(vmf->cow_page);
-		return VM_FAULT_OOM;
-	}
-	folio_throttle_swaprate(page_folio(vmf->cow_page), GFP_KERNEL);
+	vmf->cow_page = &folio->page;
 
 	ret = __do_fault(vmf);
 	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
@@ -4765,7 +4761,7 @@ static vm_fault_t do_cow_fault(struct vm_fault *vmf)
 		return ret;
 
 	copy_user_highpage(vmf->cow_page, vmf->page, vmf->address, vma);
-	__SetPageUptodate(vmf->cow_page);
+	__folio_mark_uptodate(folio);
 
 	ret |= finish_fault(vmf);
 	unlock_page(vmf->page);
@@ -4774,7 +4770,7 @@ static vm_fault_t do_cow_fault(struct vm_fault *vmf)
 		goto uncharge_out;
 	return ret;
 uncharge_out:
-	put_page(vmf->cow_page);
+	folio_put(folio);
 	return ret;
 }
 
-- 
2.27.0



^ permalink raw reply	[flat|nested] 7+ messages in thread

* [PATCH v3 5/5] mm: memory: use folio_prealloc() in wp_page_copy()
  2023-11-18  2:32 [PATCH v3 0/5] mm: cleanup and use more folio in page fault Kefeng Wang
                   ` (3 preceding siblings ...)
  2023-11-18  2:32 ` [PATCH v3 4/5] mm: memory: use a folio in do_cow_fault() Kefeng Wang
@ 2023-11-18  2:32 ` Kefeng Wang
  2023-11-18  2:43   ` Kefeng Wang
  4 siblings, 1 reply; 7+ messages in thread
From: Kefeng Wang @ 2023-11-18  2:32 UTC (permalink / raw)
  To: Andrew Morton
  Cc: linux-kernel, linux-mm, Matthew Wilcox, David Hildenbrand,
	Sidhartha Kumar, Vishal Moola, Kefeng Wang

Use folio_prealloc() helper to simplify code a bit.

Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
---
 mm/memory.c | 22 +++++++---------------
 1 file changed, 7 insertions(+), 15 deletions(-)

diff --git a/mm/memory.c b/mm/memory.c
index 868a2fc54549..98d9c7094cab 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -3112,6 +3112,7 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf)
 	int page_copied = 0;
 	struct mmu_notifier_range range;
 	vm_fault_t ret;
+	bool pfn_is_zero;
 
 	delayacct_wpcopy_start();
 
@@ -3121,16 +3122,13 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf)
 	if (unlikely(ret))
 		goto out;
 
-	if (is_zero_pfn(pte_pfn(vmf->orig_pte))) {
-		new_folio = vma_alloc_zeroed_movable_folio(vma, vmf->address);
-		if (!new_folio)
-			goto oom;
-	} else {
+	pfn_is_zero = is_zero_pfn(pte_pfn(vmf->orig_pte));
+	new_folio = folio_prealloc(mm, vma, vmf->address, pfn_is_zero);
+	if (!new_folio)
+		goto oom;
+
+	if (!pfn_is_zero) {
 		int err;
-		new_folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, vma,
-				vmf->address, false);
-		if (!new_folio)
-			goto oom;
 
 		err = __wp_page_copy_user(&new_folio->page, vmf->page, vmf);
 		if (err) {
@@ -3151,10 +3149,6 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf)
 		kmsan_copy_page_meta(&new_folio->page, vmf->page);
 	}
 
-	if (mem_cgroup_charge(new_folio, mm, GFP_KERNEL))
-		goto oom_free_new;
-	folio_throttle_swaprate(new_folio, GFP_KERNEL);
-
 	__folio_mark_uptodate(new_folio);
 
 	mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm,
@@ -3253,8 +3247,6 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf)
 
 	delayacct_wpcopy_end();
 	return 0;
-oom_free_new:
-	folio_put(new_folio);
 oom:
 	ret = VM_FAULT_OOM;
 out:
-- 
2.27.0



^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH v3 5/5] mm: memory: use folio_prealloc() in wp_page_copy()
  2023-11-18  2:32 ` [PATCH v3 5/5] mm: memory: use folio_prealloc() in wp_page_copy() Kefeng Wang
@ 2023-11-18  2:43   ` Kefeng Wang
  0 siblings, 0 replies; 7+ messages in thread
From: Kefeng Wang @ 2023-11-18  2:43 UTC (permalink / raw)
  To: Andrew Morton
  Cc: linux-kernel, linux-mm, Matthew Wilcox, David Hildenbrand,
	Sidhartha Kumar, Vishal Moola



On 2023/11/18 10:32, Kefeng Wang wrote:
> Use folio_prealloc() helper to simplify code a bit.
> 
Forget to add RB of Vishal,adding it

Reviewed-by: Vishal Moola (Oracle) <vishal.moola@gmail.com>
> Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
> ---
>   mm/memory.c | 22 +++++++---------------
>   1 file changed, 7 insertions(+), 15 deletions(-)
> 
> diff --git a/mm/memory.c b/mm/memory.c
> index 868a2fc54549..98d9c7094cab 100644
> --- a/mm/memory.c
> +++ b/mm/memory.c
> @@ -3112,6 +3112,7 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf)
>   	int page_copied = 0;
>   	struct mmu_notifier_range range;
>   	vm_fault_t ret;
> +	bool pfn_is_zero;
>   
>   	delayacct_wpcopy_start();
>   
> @@ -3121,16 +3122,13 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf)
>   	if (unlikely(ret))
>   		goto out;
>   
> -	if (is_zero_pfn(pte_pfn(vmf->orig_pte))) {
> -		new_folio = vma_alloc_zeroed_movable_folio(vma, vmf->address);
> -		if (!new_folio)
> -			goto oom;
> -	} else {
> +	pfn_is_zero = is_zero_pfn(pte_pfn(vmf->orig_pte));
> +	new_folio = folio_prealloc(mm, vma, vmf->address, pfn_is_zero);
> +	if (!new_folio)
> +		goto oom;
> +
> +	if (!pfn_is_zero) {
>   		int err;
> -		new_folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, vma,
> -				vmf->address, false);
> -		if (!new_folio)
> -			goto oom;
>   
>   		err = __wp_page_copy_user(&new_folio->page, vmf->page, vmf);
>   		if (err) {
> @@ -3151,10 +3149,6 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf)
>   		kmsan_copy_page_meta(&new_folio->page, vmf->page);
>   	}
>   
> -	if (mem_cgroup_charge(new_folio, mm, GFP_KERNEL))
> -		goto oom_free_new;
> -	folio_throttle_swaprate(new_folio, GFP_KERNEL);
> -
>   	__folio_mark_uptodate(new_folio);
>   
>   	mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm,
> @@ -3253,8 +3247,6 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf)
>   
>   	delayacct_wpcopy_end();
>   	return 0;
> -oom_free_new:
> -	folio_put(new_folio);
>   oom:
>   	ret = VM_FAULT_OOM;
>   out:


^ permalink raw reply	[flat|nested] 7+ messages in thread

end of thread, other threads:[~2023-11-18  2:52 UTC | newest]

Thread overview: 7+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2023-11-18  2:32 [PATCH v3 0/5] mm: cleanup and use more folio in page fault Kefeng Wang
2023-11-18  2:32 ` [PATCH v3 1/5] mm: ksm: use more folio api in ksm_might_need_to_copy() Kefeng Wang
2023-11-18  2:32 ` [PATCH v3 2/5] mm: memory: use a folio in validate_page_before_insert() Kefeng Wang
2023-11-18  2:32 ` [PATCH v3 3/5] mm: memory: rename page_copy_prealloc() to folio_prealloc() Kefeng Wang
2023-11-18  2:32 ` [PATCH v3 4/5] mm: memory: use a folio in do_cow_fault() Kefeng Wang
2023-11-18  2:32 ` [PATCH v3 5/5] mm: memory: use folio_prealloc() in wp_page_copy() Kefeng Wang
2023-11-18  2:43   ` Kefeng Wang

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox