linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
* [PATCH 1/2] mm: convert clear_huge_page() to clear_large_folio()
@ 2024-06-13 10:53 Kefeng Wang
  2024-06-13 10:53 ` [PATCH 2/2] mm: memory: use folio in struct copy_subpage_arg Kefeng Wang
  2024-06-13 14:51 ` [PATCH 1/2] mm: convert clear_huge_page() to clear_large_folio() Matthew Wilcox
  0 siblings, 2 replies; 5+ messages in thread
From: Kefeng Wang @ 2024-06-13 10:53 UTC (permalink / raw)
  To: Andrew Morton
  Cc: Muchun Song, Matthew Wilcox (Oracle),
	David Hildenbrand, linux-mm, Kefeng Wang

Replace clear_huge_page() with clear_large_folio(), and take a folio
instead of a page. Directly get number of pages by folio_nr_pages()
to remove pages_per_huge_page argument.

Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
---
 fs/hugetlbfs/inode.c |  2 +-
 include/linux/mm.h   |  4 +---
 mm/huge_memory.c     |  4 ++--
 mm/hugetlb.c         |  3 +--
 mm/memory.c          | 21 +++++++++------------
 5 files changed, 14 insertions(+), 20 deletions(-)

diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index 986c1df63361..0e71ee8fee4a 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -893,7 +893,7 @@ static long hugetlbfs_fallocate(struct file *file, int mode, loff_t offset,
 			error = PTR_ERR(folio);
 			goto out;
 		}
-		clear_huge_page(&folio->page, addr, pages_per_huge_page(h));
+		clear_large_folio(folio, addr);
 		__folio_mark_uptodate(folio);
 		error = hugetlb_add_to_page_cache(folio, mapping, index);
 		if (unlikely(error)) {
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 106bb0310352..4c5b20ee1106 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -4071,9 +4071,7 @@ enum mf_action_page_type {
 };
 
 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS)
-extern void clear_huge_page(struct page *page,
-			    unsigned long addr_hint,
-			    unsigned int pages_per_huge_page);
+void clear_large_folio(struct folio *folio, unsigned long addr_hint);
 int copy_user_large_folio(struct folio *dst, struct folio *src,
 			  unsigned long addr_hint,
 			  struct vm_area_struct *vma);
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index f409ea9fcc18..0a33eda80790 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -943,10 +943,10 @@ static vm_fault_t __do_huge_pmd_anonymous_page(struct vm_fault *vmf,
 		goto release;
 	}
 
-	clear_huge_page(page, vmf->address, HPAGE_PMD_NR);
+	clear_large_folio(folio, vmf->address);
 	/*
 	 * The memory barrier inside __folio_mark_uptodate makes sure that
-	 * clear_huge_page writes become visible before the set_pmd_at()
+	 * clear_large_folio writes become visible before the set_pmd_at()
 	 * write.
 	 */
 	__folio_mark_uptodate(folio);
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 3518321f6598..99d8cd0f7f11 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -6296,8 +6296,7 @@ static vm_fault_t hugetlb_no_page(struct address_space *mapping,
 				ret = 0;
 			goto out;
 		}
-		clear_huge_page(&folio->page, vmf->real_address,
-				pages_per_huge_page(h));
+		clear_large_folio(folio, vmf->real_address);
 		__folio_mark_uptodate(folio);
 		new_folio = true;
 
diff --git a/mm/memory.c b/mm/memory.c
index 3f11664590d2..6ef84cd0b2bf 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -4488,7 +4488,7 @@ static struct folio *alloc_anon_folio(struct vm_fault *vmf)
 				goto next;
 			}
 			folio_throttle_swaprate(folio, gfp);
-			clear_huge_page(&folio->page, vmf->address, 1 << order);
+			clear_large_folio(folio, vmf->address);
 			return folio;
 		}
 next:
@@ -6419,41 +6419,38 @@ static inline int process_huge_page(
 	return 0;
 }
 
-static void clear_gigantic_page(struct page *page,
-				unsigned long addr,
+static void clear_gigantic_page(struct folio *folio, unsigned long addr,
 				unsigned int pages_per_huge_page)
 {
 	int i;
-	struct page *p;
 
 	might_sleep();
 	for (i = 0; i < pages_per_huge_page; i++) {
-		p = nth_page(page, i);
 		cond_resched();
-		clear_user_highpage(p, addr + i * PAGE_SIZE);
+		clear_user_highpage(folio_page(folio, i), addr + i * PAGE_SIZE);
 	}
 }
 
 static int clear_subpage(unsigned long addr, int idx, void *arg)
 {
-	struct page *page = arg;
+	struct folio *folio = arg;
 
-	clear_user_highpage(nth_page(page, idx), addr);
+	clear_user_highpage(folio_page(folio, idx), addr);
 	return 0;
 }
 
-void clear_huge_page(struct page *page,
-		     unsigned long addr_hint, unsigned int pages_per_huge_page)
+void clear_large_folio(struct folio *folio, unsigned long addr_hint)
 {
+	unsigned int pages_per_huge_page = folio_nr_pages(folio);
 	unsigned long addr = addr_hint &
 		~(((unsigned long)pages_per_huge_page << PAGE_SHIFT) - 1);
 
 	if (unlikely(pages_per_huge_page > MAX_ORDER_NR_PAGES)) {
-		clear_gigantic_page(page, addr, pages_per_huge_page);
+		clear_gigantic_page(folio, addr, pages_per_huge_page);
 		return;
 	}
 
-	process_huge_page(addr_hint, pages_per_huge_page, clear_subpage, page);
+	process_huge_page(addr_hint, pages_per_huge_page, clear_subpage, folio);
 }
 
 static int copy_user_gigantic_page(struct folio *dst, struct folio *src,
-- 
2.27.0



^ permalink raw reply	[flat|nested] 5+ messages in thread

* [PATCH 2/2] mm: memory: use folio in struct copy_subpage_arg
  2024-06-13 10:53 [PATCH 1/2] mm: convert clear_huge_page() to clear_large_folio() Kefeng Wang
@ 2024-06-13 10:53 ` Kefeng Wang
  2024-06-14 12:26   ` David Hildenbrand
  2024-06-13 14:51 ` [PATCH 1/2] mm: convert clear_huge_page() to clear_large_folio() Matthew Wilcox
  1 sibling, 1 reply; 5+ messages in thread
From: Kefeng Wang @ 2024-06-13 10:53 UTC (permalink / raw)
  To: Andrew Morton
  Cc: Muchun Song, Matthew Wilcox (Oracle),
	David Hildenbrand, linux-mm, Kefeng Wang

Directly use folio in struct copy_subpage_arg.

Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
---
 mm/memory.c | 12 ++++++------
 1 file changed, 6 insertions(+), 6 deletions(-)

diff --git a/mm/memory.c b/mm/memory.c
index 6ef84cd0b2bf..ca44da80fd47 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -6475,16 +6475,16 @@ static int copy_user_gigantic_page(struct folio *dst, struct folio *src,
 }
 
 struct copy_subpage_arg {
-	struct page *dst;
-	struct page *src;
+	struct folio *dst;
+	struct folio *src;
 	struct vm_area_struct *vma;
 };
 
 static int copy_subpage(unsigned long addr, int idx, void *arg)
 {
 	struct copy_subpage_arg *copy_arg = arg;
-	struct page *dst = nth_page(copy_arg->dst, idx);
-	struct page *src = nth_page(copy_arg->src, idx);
+	struct page *dst = folio_page(copy_arg->dst, idx);
+	struct page *src = folio_page(copy_arg->src, idx);
 
 	if (copy_mc_user_highpage(dst, src, addr, copy_arg->vma))
 		return -EHWPOISON;
@@ -6498,8 +6498,8 @@ int copy_user_large_folio(struct folio *dst, struct folio *src,
 	unsigned long addr = addr_hint &
 		~(((unsigned long)pages_per_huge_page << PAGE_SHIFT) - 1);
 	struct copy_subpage_arg arg = {
-		.dst = &dst->page,
-		.src = &src->page,
+		.dst = dst,
+		.src = src,
 		.vma = vma,
 	};
 
-- 
2.27.0



^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [PATCH 1/2] mm: convert clear_huge_page() to clear_large_folio()
  2024-06-13 10:53 [PATCH 1/2] mm: convert clear_huge_page() to clear_large_folio() Kefeng Wang
  2024-06-13 10:53 ` [PATCH 2/2] mm: memory: use folio in struct copy_subpage_arg Kefeng Wang
@ 2024-06-13 14:51 ` Matthew Wilcox
  2024-06-14  5:29   ` Kefeng Wang
  1 sibling, 1 reply; 5+ messages in thread
From: Matthew Wilcox @ 2024-06-13 14:51 UTC (permalink / raw)
  To: Kefeng Wang; +Cc: Andrew Morton, Muchun Song, David Hildenbrand, linux-mm

On Thu, Jun 13, 2024 at 06:53:43PM +0800, Kefeng Wang wrote:
> +++ b/include/linux/mm.h
> @@ -4071,9 +4071,7 @@ enum mf_action_page_type {
>  };
>  
>  #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS)
> -extern void clear_huge_page(struct page *page,
> -			    unsigned long addr_hint,
> -			    unsigned int pages_per_huge_page);
> +void clear_large_folio(struct folio *folio, unsigned long addr_hint);

I think this is properly called

void folio_zero_user(struct folio *folio, unsigned long user_addr);

> -void clear_huge_page(struct page *page,
> -		     unsigned long addr_hint, unsigned int pages_per_huge_page)
> +void clear_large_folio(struct folio *folio, unsigned long addr_hint)
>  {
> +	unsigned int pages_per_huge_page = folio_nr_pages(folio);

I think it's worth renaming to nr_pages here.

>  	unsigned long addr = addr_hint &
>  		~(((unsigned long)pages_per_huge_page << PAGE_SHIFT) - 1);

This can just be:

	unsigned long addr = user_addr & ~(folio_size(folio) - 1);

Umm.  Except this assumes that the folio is always mapped to userspace
at its natural alignment.  And that's true for hugetlb, but not true
for THP.  So I think this needs to be moved into the callers for which
it is true, and we need documentation that user_addr is expected to be
the base address that the folio is mapped at.


^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [PATCH 1/2] mm: convert clear_huge_page() to clear_large_folio()
  2024-06-13 14:51 ` [PATCH 1/2] mm: convert clear_huge_page() to clear_large_folio() Matthew Wilcox
@ 2024-06-14  5:29   ` Kefeng Wang
  0 siblings, 0 replies; 5+ messages in thread
From: Kefeng Wang @ 2024-06-14  5:29 UTC (permalink / raw)
  To: Matthew Wilcox, Huang Ying
  Cc: Andrew Morton, Muchun Song, David Hildenbrand, linux-mm



On 2024/6/13 22:51, Matthew Wilcox wrote:
> On Thu, Jun 13, 2024 at 06:53:43PM +0800, Kefeng Wang wrote:
>> +++ b/include/linux/mm.h
>> @@ -4071,9 +4071,7 @@ enum mf_action_page_type {
>>   };
>>   
>>   #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS)
>> -extern void clear_huge_page(struct page *page,
>> -			    unsigned long addr_hint,
>> -			    unsigned int pages_per_huge_page);
>> +void clear_large_folio(struct folio *folio, unsigned long addr_hint);
> 
> I think this is properly called
> 
> void folio_zero_user(struct folio *folio, unsigned long user_addr);

OK,

> 
>> -void clear_huge_page(struct page *page,
>> -		     unsigned long addr_hint, unsigned int pages_per_huge_page)
>> +void clear_large_folio(struct folio *folio, unsigned long addr_hint)
>>   {
>> +	unsigned int pages_per_huge_page = folio_nr_pages(folio);
> 
> I think it's worth renaming to nr_pages here. >
>>   	unsigned long addr = addr_hint &
>>   		~(((unsigned long)pages_per_huge_page << PAGE_SHIFT) - 1);
> 
> This can just be:
> 
> 	unsigned long addr = user_addr & ~(folio_size(folio) - 1);
> 
> Umm.  Except this assumes that the folio is always mapped to userspace
> at its natural alignment.  And that's true for hugetlb, but not true
> for THP.  So I think this needs to be moved into the callers for which
> it is true, and we need documentation that user_addr is expected to be
> the base address that the folio is mapped at.
> 

It's better to move it into the callers, but one more question, the
user_addr is that the app will access, or the base addres if we can't
know it, so even in hugetlb, we should use vmf->real_address in
hugetlb_no_page(), also Cced Huang Ying, correct me if I'm wrong.

Thanks.


^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [PATCH 2/2] mm: memory: use folio in struct copy_subpage_arg
  2024-06-13 10:53 ` [PATCH 2/2] mm: memory: use folio in struct copy_subpage_arg Kefeng Wang
@ 2024-06-14 12:26   ` David Hildenbrand
  0 siblings, 0 replies; 5+ messages in thread
From: David Hildenbrand @ 2024-06-14 12:26 UTC (permalink / raw)
  To: Kefeng Wang, Andrew Morton; +Cc: Muchun Song, Matthew Wilcox (Oracle), linux-mm

On 13.06.24 12:53, Kefeng Wang wrote:
> Directly use folio in struct copy_subpage_arg.
> 
> Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
> ---

Acked-by: David Hildenbrand <david@redhat.com>

-- 
Cheers,

David / dhildenb



^ permalink raw reply	[flat|nested] 5+ messages in thread

end of thread, other threads:[~2024-06-14 12:27 UTC | newest]

Thread overview: 5+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2024-06-13 10:53 [PATCH 1/2] mm: convert clear_huge_page() to clear_large_folio() Kefeng Wang
2024-06-13 10:53 ` [PATCH 2/2] mm: memory: use folio in struct copy_subpage_arg Kefeng Wang
2024-06-14 12:26   ` David Hildenbrand
2024-06-13 14:51 ` [PATCH 1/2] mm: convert clear_huge_page() to clear_large_folio() Matthew Wilcox
2024-06-14  5:29   ` Kefeng Wang

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox