linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
* [PATCH] rmap: Add folio parameter to __page_set_anon_rmap()
@ 2023-01-16 19:29 Matthew Wilcox (Oracle)
  2023-01-18  5:55 ` William Kucharski
  0 siblings, 1 reply; 2+ messages in thread
From: Matthew Wilcox (Oracle) @ 2023-01-16 19:29 UTC (permalink / raw)
  To: Andrew Morton; +Cc: Matthew Wilcox (Oracle), linux-mm

Avoid the compound_head() call in PageAnon() by passing in the folio that
all callers have.  Also save me from wondering whether page->mapping
can ever be overwritten on a tail page (I don't think it can, but I'm
not 100% sure).

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
---
 mm/rmap.c | 21 +++++++++++----------
 1 file changed, 11 insertions(+), 10 deletions(-)

diff --git a/mm/rmap.c b/mm/rmap.c
index b26fbbcc9257..ab2246e6f20a 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1131,19 +1131,20 @@ void page_move_anon_rmap(struct page *page, struct vm_area_struct *vma)
 
 /**
  * __page_set_anon_rmap - set up new anonymous rmap
- * @page:	Page or Hugepage to add to rmap
+ * @folio:	Folio which contains page.
+ * @page:	Page to add to rmap.
  * @vma:	VM area to add page to.
  * @address:	User virtual address of the mapping	
  * @exclusive:	the page is exclusively owned by the current process
  */
-static void __page_set_anon_rmap(struct page *page,
+static void __page_set_anon_rmap(struct folio *folio, struct page *page,
 	struct vm_area_struct *vma, unsigned long address, int exclusive)
 {
 	struct anon_vma *anon_vma = vma->anon_vma;
 
 	BUG_ON(!anon_vma);
 
-	if (PageAnon(page))
+	if (folio_test_anon(folio))
 		goto out;
 
 	/*
@@ -1155,14 +1156,14 @@ static void __page_set_anon_rmap(struct page *page,
 		anon_vma = anon_vma->root;
 
 	/*
-	 * page_idle does a lockless/optimistic rmap scan on page->mapping.
+	 * page_idle does a lockless/optimistic rmap scan on folio->mapping.
 	 * Make sure the compiler doesn't split the stores of anon_vma and
 	 * the PAGE_MAPPING_ANON type identifier, otherwise the rmap code
 	 * could mistake the mapping for a struct address_space and crash.
 	 */
 	anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
-	WRITE_ONCE(page->mapping, (struct address_space *) anon_vma);
-	page->index = linear_page_index(vma, address);
+	WRITE_ONCE(folio->mapping, (struct address_space *) anon_vma);
+	folio->index = linear_page_index(vma, address);
 out:
 	if (exclusive)
 		SetPageAnonExclusive(page);
@@ -1254,7 +1255,7 @@ void page_add_anon_rmap(struct page *page, struct vm_area_struct *vma,
 	if (likely(!folio_test_ksm(folio))) {
 		/* address might be in next vma when migration races vma_adjust */
 		if (first)
-			__page_set_anon_rmap(page, vma, address,
+			__page_set_anon_rmap(folio, page, vma, address,
 					     !!(flags & RMAP_EXCLUSIVE));
 		else
 			__page_check_anon_rmap(page, vma, address);
@@ -1297,7 +1298,7 @@ void folio_add_new_anon_rmap(struct folio *folio, struct vm_area_struct *vma,
 	}
 
 	__lruvec_stat_mod_folio(folio, NR_ANON_MAPPED, nr);
-	__page_set_anon_rmap(&folio->page, vma, address, 1);
+	__page_set_anon_rmap(folio, &folio->page, vma, address, 1);
 }
 
 /**
@@ -2542,7 +2543,7 @@ void hugepage_add_anon_rmap(struct page *page, struct vm_area_struct *vma,
 	VM_BUG_ON_PAGE(!first && (flags & RMAP_EXCLUSIVE), page);
 	VM_BUG_ON_PAGE(!first && PageAnonExclusive(page), page);
 	if (first)
-		__page_set_anon_rmap(page, vma, address,
+		__page_set_anon_rmap(folio, page, vma, address,
 				     !!(flags & RMAP_EXCLUSIVE));
 }
 
@@ -2555,6 +2556,6 @@ void hugepage_add_new_anon_rmap(struct page *page,
 	/* increment count (starts at -1) */
 	atomic_set(&folio->_entire_mapcount, 0);
 	folio_clear_hugetlb_restore_reserve(folio);
-	__page_set_anon_rmap(page, vma, address, 1);
+	__page_set_anon_rmap(folio, page, vma, address, 1);
 }
 #endif /* CONFIG_HUGETLB_PAGE */
-- 
2.35.1



^ permalink raw reply	[flat|nested] 2+ messages in thread

* Re: [PATCH] rmap: Add folio parameter to __page_set_anon_rmap()
  2023-01-16 19:29 [PATCH] rmap: Add folio parameter to __page_set_anon_rmap() Matthew Wilcox (Oracle)
@ 2023-01-18  5:55 ` William Kucharski
  0 siblings, 0 replies; 2+ messages in thread
From: William Kucharski @ 2023-01-18  5:55 UTC (permalink / raw)
  To: Matthew Wilcox (Oracle); +Cc: Andrew Morton, linux-mm

Looks good.

Reviewed-by: William Kucharski <william.kucharski@oracle.com>

> On Jan 16, 2023, at 12:29 PM, Matthew Wilcox (Oracle) <willy@infradead.org> wrote:
> 
> Avoid the compound_head() call in PageAnon() by passing in the folio that
> all callers have.  Also save me from wondering whether page->mapping
> can ever be overwritten on a tail page (I don't think it can, but I'm
> not 100% sure).
> 
> Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
> ---
> mm/rmap.c | 21 +++++++++++----------
> 1 file changed, 11 insertions(+), 10 deletions(-)
> 
> diff --git a/mm/rmap.c b/mm/rmap.c
> index b26fbbcc9257..ab2246e6f20a 100644
> --- a/mm/rmap.c
> +++ b/mm/rmap.c
> @@ -1131,19 +1131,20 @@ void page_move_anon_rmap(struct page *page, struct vm_area_struct *vma)
> 
> /**
>  * __page_set_anon_rmap - set up new anonymous rmap
> - * @page: Page or Hugepage to add to rmap
> + * @folio: Folio which contains page.
> + * @page: Page to add to rmap.
>  * @vma: VM area to add page to.
>  * @address: User virtual address of the mapping 
>  * @exclusive: the page is exclusively owned by the current process
>  */
> -static void __page_set_anon_rmap(struct page *page,
> +static void __page_set_anon_rmap(struct folio *folio, struct page *page,
> struct vm_area_struct *vma, unsigned long address, int exclusive)
> {
> struct anon_vma *anon_vma = vma->anon_vma;
> 
> BUG_ON(!anon_vma);
> 
> - if (PageAnon(page))
> + if (folio_test_anon(folio))
> goto out;
> 
> /*
> @@ -1155,14 +1156,14 @@ static void __page_set_anon_rmap(struct page *page,
> anon_vma = anon_vma->root;
> 
> /*
> - * page_idle does a lockless/optimistic rmap scan on page->mapping.
> + * page_idle does a lockless/optimistic rmap scan on folio->mapping.
> * Make sure the compiler doesn't split the stores of anon_vma and
> * the PAGE_MAPPING_ANON type identifier, otherwise the rmap code
> * could mistake the mapping for a struct address_space and crash.
> */
> anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
> - WRITE_ONCE(page->mapping, (struct address_space *) anon_vma);
> - page->index = linear_page_index(vma, address);
> + WRITE_ONCE(folio->mapping, (struct address_space *) anon_vma);
> + folio->index = linear_page_index(vma, address);
> out:
> if (exclusive)
> SetPageAnonExclusive(page);
> @@ -1254,7 +1255,7 @@ void page_add_anon_rmap(struct page *page, struct vm_area_struct *vma,
> if (likely(!folio_test_ksm(folio))) {
> /* address might be in next vma when migration races vma_adjust */
> if (first)
> - __page_set_anon_rmap(page, vma, address,
> + __page_set_anon_rmap(folio, page, vma, address,
>     !!(flags & RMAP_EXCLUSIVE));
> else
> __page_check_anon_rmap(page, vma, address);
> @@ -1297,7 +1298,7 @@ void folio_add_new_anon_rmap(struct folio *folio, struct vm_area_struct *vma,
> }
> 
> __lruvec_stat_mod_folio(folio, NR_ANON_MAPPED, nr);
> - __page_set_anon_rmap(&folio->page, vma, address, 1);
> + __page_set_anon_rmap(folio, &folio->page, vma, address, 1);
> }
> 
> /**
> @@ -2542,7 +2543,7 @@ void hugepage_add_anon_rmap(struct page *page, struct vm_area_struct *vma,
> VM_BUG_ON_PAGE(!first && (flags & RMAP_EXCLUSIVE), page);
> VM_BUG_ON_PAGE(!first && PageAnonExclusive(page), page);
> if (first)
> - __page_set_anon_rmap(page, vma, address,
> + __page_set_anon_rmap(folio, page, vma, address,
>     !!(flags & RMAP_EXCLUSIVE));
> }
> 
> @@ -2555,6 +2556,6 @@ void hugepage_add_new_anon_rmap(struct page *page,
> /* increment count (starts at -1) */
> atomic_set(&folio->_entire_mapcount, 0);
> folio_clear_hugetlb_restore_reserve(folio);
> - __page_set_anon_rmap(page, vma, address, 1);
> + __page_set_anon_rmap(folio, page, vma, address, 1);
> }
> #endif /* CONFIG_HUGETLB_PAGE */
> -- 
> 2.35.1
> 
> 



^ permalink raw reply	[flat|nested] 2+ messages in thread

end of thread, other threads:[~2023-01-18  6:05 UTC | newest]

Thread overview: 2+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2023-01-16 19:29 [PATCH] rmap: Add folio parameter to __page_set_anon_rmap() Matthew Wilcox (Oracle)
2023-01-18  5:55 ` William Kucharski

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox