linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
From: "Matthew Wilcox (Oracle)" <willy@infradead.org>
To: akpm@linuxfoundation.org
Cc: "Matthew Wilcox (Oracle)" <willy@infradead.org>, linux-mm@kvack.org
Subject: [PATCH 16/21] mm/shmem: Convert shmem_add_to_page_cache to take a folio
Date: Fri, 29 Apr 2022 20:23:24 +0100	[thread overview]
Message-ID: <20220429192329.3034378-17-willy@infradead.org> (raw)
In-Reply-To: <20220429192329.3034378-1-willy@infradead.org>

Shrinks shmem_add_to_page_cache() by 16 bytes.  All the callers grow,
but this is temporary as they will all be converted to folios soon.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
---
 mm/shmem.c | 57 +++++++++++++++++++++++++++++-------------------------
 1 file changed, 31 insertions(+), 26 deletions(-)

diff --git a/mm/shmem.c b/mm/shmem.c
index 3461bdec6b38..4331a4daac01 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -695,36 +695,35 @@ static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo,
 /*
  * Like add_to_page_cache_locked, but error if expected item has gone.
  */
-static int shmem_add_to_page_cache(struct page *page,
+static int shmem_add_to_page_cache(struct folio *folio,
 				   struct address_space *mapping,
 				   pgoff_t index, void *expected, gfp_t gfp,
 				   struct mm_struct *charge_mm)
 {
-	XA_STATE_ORDER(xas, &mapping->i_pages, index, compound_order(page));
-	unsigned long nr = compound_nr(page);
+	XA_STATE_ORDER(xas, &mapping->i_pages, index, folio_order(folio));
+	long nr = folio_nr_pages(folio);
 	int error;
 
-	VM_BUG_ON_PAGE(PageTail(page), page);
-	VM_BUG_ON_PAGE(index != round_down(index, nr), page);
-	VM_BUG_ON_PAGE(!PageLocked(page), page);
-	VM_BUG_ON_PAGE(!PageSwapBacked(page), page);
-	VM_BUG_ON(expected && PageTransHuge(page));
+	VM_BUG_ON_FOLIO(index != round_down(index, nr), folio);
+	VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
+	VM_BUG_ON_FOLIO(!folio_test_swapbacked(folio), folio);
+	VM_BUG_ON(expected && folio_test_large(folio));
 
-	page_ref_add(page, nr);
-	page->mapping = mapping;
-	page->index = index;
+	folio_ref_add(folio, nr);
+	folio->mapping = mapping;
+	folio->index = index;
 
-	if (!PageSwapCache(page)) {
-		error = mem_cgroup_charge(page_folio(page), charge_mm, gfp);
+	if (!folio_test_swapcache(folio)) {
+		error = mem_cgroup_charge(folio, charge_mm, gfp);
 		if (error) {
-			if (PageTransHuge(page)) {
+			if (folio_test_large(folio)) {
 				count_vm_event(THP_FILE_FALLBACK);
 				count_vm_event(THP_FILE_FALLBACK_CHARGE);
 			}
 			goto error;
 		}
 	}
-	cgroup_throttle_swaprate(page, gfp);
+	folio_throttle_swaprate(folio, gfp);
 
 	do {
 		xas_lock_irq(&xas);
@@ -736,16 +735,16 @@ static int shmem_add_to_page_cache(struct page *page,
 			xas_set_err(&xas, -EEXIST);
 			goto unlock;
 		}
-		xas_store(&xas, page);
+		xas_store(&xas, folio);
 		if (xas_error(&xas))
 			goto unlock;
-		if (PageTransHuge(page)) {
+		if (folio_test_large(folio)) {
 			count_vm_event(THP_FILE_ALLOC);
-			__mod_lruvec_page_state(page, NR_SHMEM_THPS, nr);
+			__lruvec_stat_mod_folio(folio, NR_SHMEM_THPS, nr);
 		}
 		mapping->nrpages += nr;
-		__mod_lruvec_page_state(page, NR_FILE_PAGES, nr);
-		__mod_lruvec_page_state(page, NR_SHMEM, nr);
+		__lruvec_stat_mod_folio(folio, NR_FILE_PAGES, nr);
+		__lruvec_stat_mod_folio(folio, NR_SHMEM, nr);
 unlock:
 		xas_unlock_irq(&xas);
 	} while (xas_nomem(&xas, gfp));
@@ -757,8 +756,8 @@ static int shmem_add_to_page_cache(struct page *page,
 
 	return 0;
 error:
-	page->mapping = NULL;
-	page_ref_sub(page, nr);
+	folio->mapping = NULL;
+	folio_ref_sub(folio, nr);
 	return error;
 }
 
@@ -1690,7 +1689,8 @@ static int shmem_swapin_page(struct inode *inode, pgoff_t index,
 	struct address_space *mapping = inode->i_mapping;
 	struct shmem_inode_info *info = SHMEM_I(inode);
 	struct mm_struct *charge_mm = vma ? vma->vm_mm : NULL;
-	struct page *page;
+	struct page *page = NULL;
+	struct folio *folio;
 	swp_entry_t swap;
 	int error;
 
@@ -1740,7 +1740,8 @@ static int shmem_swapin_page(struct inode *inode, pgoff_t index,
 			goto failed;
 	}
 
-	error = shmem_add_to_page_cache(page, mapping, index,
+	folio = page_folio(page);
+	error = shmem_add_to_page_cache(folio, mapping, index,
 					swp_to_radix_entry(swap), gfp,
 					charge_mm);
 	if (error)
@@ -1791,6 +1792,7 @@ static int shmem_getpage_gfp(struct inode *inode, pgoff_t index,
 	struct shmem_inode_info *info = SHMEM_I(inode);
 	struct shmem_sb_info *sbinfo;
 	struct mm_struct *charge_mm;
+	struct folio *folio;
 	struct page *page;
 	pgoff_t hindex = index;
 	gfp_t huge_gfp;
@@ -1905,7 +1907,8 @@ static int shmem_getpage_gfp(struct inode *inode, pgoff_t index,
 	if (sgp == SGP_WRITE)
 		__SetPageReferenced(page);
 
-	error = shmem_add_to_page_cache(page, mapping, hindex,
+	folio = page_folio(page);
+	error = shmem_add_to_page_cache(folio, mapping, hindex,
 					NULL, gfp & GFP_RECLAIM_MASK,
 					charge_mm);
 	if (error)
@@ -2327,6 +2330,7 @@ int shmem_mfill_atomic_pte(struct mm_struct *dst_mm,
 	gfp_t gfp = mapping_gfp_mask(mapping);
 	pgoff_t pgoff = linear_page_index(dst_vma, dst_addr);
 	void *page_kaddr;
+	struct folio *folio;
 	struct page *page;
 	int ret;
 	pgoff_t max_off;
@@ -2385,7 +2389,8 @@ int shmem_mfill_atomic_pte(struct mm_struct *dst_mm,
 	if (unlikely(pgoff >= max_off))
 		goto out_release;
 
-	ret = shmem_add_to_page_cache(page, mapping, pgoff, NULL,
+	folio = page_folio(page);
+	ret = shmem_add_to_page_cache(folio, mapping, pgoff, NULL,
 				      gfp & GFP_RECLAIM_MASK, dst_mm);
 	if (ret)
 		goto out_release;
-- 
2.34.1



  parent reply	other threads:[~2022-04-29 19:23 UTC|newest]

Thread overview: 29+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-04-29 19:23 [PATCH 00/21] Folio patches for 5.19 Matthew Wilcox (Oracle)
2022-04-29 19:23 ` [PATCH 01/21] shmem: Convert shmem_alloc_hugepage() to use vma_alloc_folio() Matthew Wilcox (Oracle)
2022-04-29 19:23 ` [PATCH 02/21] mm/huge_memory: Convert do_huge_pmd_anonymous_page() " Matthew Wilcox (Oracle)
2022-04-29 19:23 ` [PATCH 03/21] mm: Remove alloc_pages_vma() Matthew Wilcox (Oracle)
2022-04-29 19:23 ` [PATCH 04/21] vmscan: Use folio_mapped() in shrink_page_list() Matthew Wilcox (Oracle)
2022-04-29 19:23 ` [PATCH 05/21] vmscan: Convert the writeback handling in shrink_page_list() to folios Matthew Wilcox (Oracle)
2022-04-29 19:23 ` [PATCH 06/21] swap: Turn get_swap_page() into folio_alloc_swap() Matthew Wilcox (Oracle)
2022-04-29 19:23 ` [PATCH 07/21] swap: Convert add_to_swap() to take a folio Matthew Wilcox (Oracle)
2022-04-29 19:23 ` [PATCH 08/21] vmscan: Convert dirty page handling to folios Matthew Wilcox (Oracle)
2022-04-29 19:23 ` [PATCH 09/21] vmscan: Convert page buffer handling to use folios Matthew Wilcox (Oracle)
2022-04-29 19:50   ` Andrew Morton
2022-04-29 19:23 ` [PATCH 10/21] vmscan: Convert lazy freeing to folios Matthew Wilcox (Oracle)
2022-04-29 19:23 ` [PATCH 11/21] vmscan: Move initialisation of mapping down Matthew Wilcox (Oracle)
2022-04-29 19:23 ` [PATCH 12/21] vmscan: Convert the activate_locked portion of shrink_page_list to folios Matthew Wilcox (Oracle)
2022-04-29 19:23 ` [PATCH 13/21] vmscan: Remove remaining uses of page in shrink_page_list Matthew Wilcox (Oracle)
2022-04-29 19:23 ` [PATCH 14/21] mm/shmem: Use a folio in shmem_unused_huge_shrink Matthew Wilcox (Oracle)
2022-04-29 19:23 ` [PATCH 15/21] mm/swap: Add folio_throttle_swaprate Matthew Wilcox (Oracle)
2022-04-29 19:23 ` Matthew Wilcox (Oracle) [this message]
2022-05-03 11:10   ` [PATCH 16/21] mm/shmem: Convert shmem_add_to_page_cache to take a folio Sebastian Andrzej Siewior
2022-05-03 12:48     ` Matthew Wilcox
2022-05-03 13:00       ` Sebastian Andrzej Siewior
2022-05-03 13:05         ` Matthew Wilcox
2022-05-03 13:09           ` Sebastian Andrzej Siewior
2022-04-29 19:23 ` [PATCH 17/21] mm/shmem: Turn shmem_should_replace_page into shmem_should_replace_folio Matthew Wilcox (Oracle)
2022-04-29 19:23 ` [PATCH 18/21] mm/shmem: Turn shmem_alloc_page() into shmem_alloc_folio() Matthew Wilcox (Oracle)
2022-04-29 19:23 ` [PATCH 19/21] mm/shmem: Convert shmem_alloc_and_acct_page to use a folio Matthew Wilcox (Oracle)
2022-04-29 19:23 ` [PATCH 20/21] mm/shmem: Convert shmem_getpage_gfp " Matthew Wilcox (Oracle)
2022-04-29 19:23 ` [PATCH 21/21] mm/shmem: Convert shmem_swapin_page() to shmem_swapin_folio() Matthew Wilcox (Oracle)
2022-05-03 15:14 ` [PATCH 00/21] Folio patches for 5.19 Nathan Chancellor

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20220429192329.3034378-17-willy@infradead.org \
    --to=willy@infradead.org \
    --cc=akpm@linuxfoundation.org \
    --cc=linux-mm@kvack.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox