linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
* [PATCH 00/13] More swap folio conversions
@ 2023-12-13 21:58 Matthew Wilcox (Oracle)
  2023-12-13 21:58 ` [PATCH 01/13] mm: Return the folio from __read_swap_cache_async() Matthew Wilcox (Oracle)
                   ` (12 more replies)
  0 siblings, 13 replies; 17+ messages in thread
From: Matthew Wilcox (Oracle) @ 2023-12-13 21:58 UTC (permalink / raw)
  To: Andrew Morton; +Cc: Matthew Wilcox (Oracle), linux-mm

These all seem like fairly straightforward conversions to me.
A lot of compound_head() calls get removed.  And page_swap_info(),
which is nice.  Passes my fairly limited testing.

Matthew Wilcox (Oracle) (13):
  mm: Return the folio from __read_swap_cache_async()
  mm: Pass a folio to __swap_writepage()
  mm: Pass a folio to swap_writepage_fs()
  mm: Pass a folio to swap_writepage_bdev_sync()
  mm: Pass a folio to swap_writepage_bdev_async()
  mm: Pass a folio to swap_readpage_fs()
  mm: Pass a folio to swap_readpage_bdev_sync()
  mm: Pass a folio to swap_readpage_bdev_async()
  mm: Convert swap_page_sector() to swap_folio_sector()
  mm: Convert swap_readpage() to swap_read_folio()
  mm: Remove page_swap_info()
  mm: Return a folio from read_swap_cache_async()
  mm: Convert swap_cluster_readahead and swap_vma_readahead to return a
    folio

 include/linux/swap.h  |   5 +--
 include/linux/zswap.h |   4 +-
 mm/madvise.c          |  22 +++++-----
 mm/memory.c           |   4 +-
 mm/page_io.c          |  79 ++++++++++++++++-----------------
 mm/shmem.c            |   8 ++--
 mm/swap.h             |  29 ++++++------
 mm/swap_state.c       | 100 ++++++++++++++++++++----------------------
 mm/swapfile.c         |  16 +++----
 mm/zswap.c            |  48 ++++++++++----------
 10 files changed, 148 insertions(+), 167 deletions(-)

-- 
2.42.0



^ permalink raw reply	[flat|nested] 17+ messages in thread

* [PATCH 01/13] mm: Return the folio from __read_swap_cache_async()
  2023-12-13 21:58 [PATCH 00/13] More swap folio conversions Matthew Wilcox (Oracle)
@ 2023-12-13 21:58 ` Matthew Wilcox (Oracle)
  2023-12-13 23:14   ` Andrew Morton
  2023-12-13 21:58 ` [PATCH 02/13] mm: Pass a folio to __swap_writepage() Matthew Wilcox (Oracle)
                   ` (11 subsequent siblings)
  12 siblings, 1 reply; 17+ messages in thread
From: Matthew Wilcox (Oracle) @ 2023-12-13 21:58 UTC (permalink / raw)
  To: Andrew Morton; +Cc: Matthew Wilcox (Oracle), linux-mm

Move the folio->page conversion into the callers that actually want
that.  Most of the callers are happier with the folio anyway.
If the page_allocated boolean is set, the folio allocated is of order-0,
so it is safe to pass the page directly to swap_readpage().

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
---
 include/linux/zswap.h |  4 +--
 mm/swap.h             |  7 ++--
 mm/swap_state.c       | 75 ++++++++++++++++++++-----------------------
 mm/zswap.c            | 48 +++++++++++++--------------
 4 files changed, 64 insertions(+), 70 deletions(-)

diff --git a/include/linux/zswap.h b/include/linux/zswap.h
index 08c240e16a01..e88572d4c720 100644
--- a/include/linux/zswap.h
+++ b/include/linux/zswap.h
@@ -34,7 +34,7 @@ void zswap_swapon(int type);
 void zswap_swapoff(int type);
 void zswap_memcg_offline_cleanup(struct mem_cgroup *memcg);
 void zswap_lruvec_state_init(struct lruvec *lruvec);
-void zswap_page_swapin(struct page *page);
+void zswap_folio_swapin(struct folio *folio);
 #else
 
 struct zswap_lruvec_state {};
@@ -54,7 +54,7 @@ static inline void zswap_swapon(int type) {}
 static inline void zswap_swapoff(int type) {}
 static inline void zswap_memcg_offline_cleanup(struct mem_cgroup *memcg) {}
 static inline void zswap_lruvec_state_init(struct lruvec *lruvec) {}
-static inline void zswap_page_swapin(struct page *page) {}
+static inline void zswap_folio_swapin(struct folio *folio) {}
 #endif
 
 #endif /* _LINUX_ZSWAP_H */
diff --git a/mm/swap.h b/mm/swap.h
index c0dc73e10e91..a60ab1cfcaf2 100644
--- a/mm/swap.h
+++ b/mm/swap.h
@@ -49,10 +49,9 @@ struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
 				   struct vm_area_struct *vma,
 				   unsigned long addr,
 				   struct swap_iocb **plug);
-struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
-				     struct mempolicy *mpol, pgoff_t ilx,
-				     bool *new_page_allocated,
-				     bool skip_if_exists);
+struct folio *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_flags,
+		struct mempolicy *mpol, pgoff_t ilx, bool *new_page_allocated,
+		bool skip_if_exists);
 struct page *swap_cluster_readahead(swp_entry_t entry, gfp_t flag,
 				    struct mempolicy *mpol, pgoff_t ilx);
 struct page *swapin_readahead(swp_entry_t entry, gfp_t flag,
diff --git a/mm/swap_state.c b/mm/swap_state.c
index 7d775d0b1312..d4e25d9b5dc6 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -419,14 +419,12 @@ struct folio *filemap_get_incore_folio(struct address_space *mapping,
 	return folio;
 }
 
-struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
-				     struct mempolicy *mpol, pgoff_t ilx,
-				     bool *new_page_allocated,
-				     bool skip_if_exists)
+struct folio *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
+		struct mempolicy *mpol, pgoff_t ilx, bool *new_page_allocated,
+		bool skip_if_exists)
 {
 	struct swap_info_struct *si;
 	struct folio *folio;
-	struct page *page;
 	void *shadow = NULL;
 
 	*new_page_allocated = false;
@@ -443,10 +441,8 @@ struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
 		 */
 		folio = filemap_get_folio(swap_address_space(entry),
 						swp_offset(entry));
-		if (!IS_ERR(folio)) {
-			page = folio_file_page(folio, swp_offset(entry));
-			goto got_page;
-		}
+		if (!IS_ERR(folio))
+			goto got_folio;
 
 		/*
 		 * Just skip read ahead for unused swap slot.
@@ -460,7 +456,7 @@ struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
 			goto fail_put_swap;
 
 		/*
-		 * Get a new page to read into from swap.  Allocate it now,
+		 * Get a new folio to read into from swap.  Allocate it now,
 		 * before marking swap_map SWAP_HAS_CACHE, when -EEXIST will
 		 * cause any racers to loop around until we add it to cache.
 		 */
@@ -496,13 +492,13 @@ struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
 		 * stumble across a swap_map entry whose SWAP_HAS_CACHE
 		 * has not yet been cleared.  Or race against another
 		 * __read_swap_cache_async(), which has set SWAP_HAS_CACHE
-		 * in swap_map, but not yet added its page to swap cache.
+		 * in swap_map, but not yet added its folio to swap cache.
 		 */
 		schedule_timeout_uninterruptible(1);
 	}
 
 	/*
-	 * The swap entry is ours to swap in. Prepare the new page.
+	 * The swap entry is ours to swap in. Prepare the new folio.
 	 */
 
 	__folio_set_locked(folio);
@@ -523,10 +519,9 @@ struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
 	/* Caller will initiate read into locked folio */
 	folio_add_lru(folio);
 	*new_page_allocated = true;
-	page = &folio->page;
-got_page:
+got_folio:
 	put_swap_device(si);
-	return page;
+	return folio;
 
 fail_unlock:
 	put_swap_folio(folio, entry);
@@ -554,16 +549,16 @@ struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
 	bool page_allocated;
 	struct mempolicy *mpol;
 	pgoff_t ilx;
-	struct page *page;
+	struct folio *folio;
 
 	mpol = get_vma_policy(vma, addr, 0, &ilx);
-	page = __read_swap_cache_async(entry, gfp_mask, mpol, ilx,
+	folio = __read_swap_cache_async(entry, gfp_mask, mpol, ilx,
 					&page_allocated, false);
 	mpol_cond_put(mpol);
 
 	if (page_allocated)
-		swap_readpage(page, false, plug);
-	return page;
+		swap_readpage(&folio->page, false, plug);
+	return folio_file_page(folio, swp_offset(entry));
 }
 
 static unsigned int __swapin_nr_pages(unsigned long prev_offset,
@@ -648,7 +643,7 @@ static unsigned long swapin_nr_pages(unsigned long offset)
 struct page *swap_cluster_readahead(swp_entry_t entry, gfp_t gfp_mask,
 				    struct mempolicy *mpol, pgoff_t ilx)
 {
-	struct page *page;
+	struct folio *folio;
 	unsigned long entry_offset = swp_offset(entry);
 	unsigned long offset = entry_offset;
 	unsigned long start_offset, end_offset;
@@ -673,31 +668,31 @@ struct page *swap_cluster_readahead(swp_entry_t entry, gfp_t gfp_mask,
 	blk_start_plug(&plug);
 	for (offset = start_offset; offset <= end_offset ; offset++) {
 		/* Ok, do the async read-ahead now */
-		page = __read_swap_cache_async(
+		folio = __read_swap_cache_async(
 				swp_entry(swp_type(entry), offset),
 				gfp_mask, mpol, ilx, &page_allocated, false);
-		if (!page)
+		if (!folio)
 			continue;
 		if (page_allocated) {
-			swap_readpage(page, false, &splug);
+			swap_readpage(&folio->page, false, &splug);
 			if (offset != entry_offset) {
-				SetPageReadahead(page);
+				folio_set_readahead(folio);
 				count_vm_event(SWAP_RA);
 			}
 		}
-		put_page(page);
+		folio_put(folio);
 	}
 	blk_finish_plug(&plug);
 	swap_read_unplug(splug);
 	lru_add_drain();	/* Push any new pages onto the LRU now */
 skip:
 	/* The page was likely read above, so no need for plugging here */
-	page = __read_swap_cache_async(entry, gfp_mask, mpol, ilx,
+	folio = __read_swap_cache_async(entry, gfp_mask, mpol, ilx,
 					&page_allocated, false);
 	if (unlikely(page_allocated))
-		swap_readpage(page, false, NULL);
-	zswap_page_swapin(page);
-	return page;
+		swap_readpage(&folio->page, false, NULL);
+	zswap_folio_swapin(folio);
+	return folio_file_page(folio, swp_offset(entry));
 }
 
 int init_swap_address_space(unsigned int type, unsigned long nr_pages)
@@ -815,7 +810,7 @@ static struct page *swap_vma_readahead(swp_entry_t targ_entry, gfp_t gfp_mask,
 {
 	struct blk_plug plug;
 	struct swap_iocb *splug = NULL;
-	struct page *page;
+	struct folio *folio;
 	pte_t *pte = NULL, pentry;
 	unsigned long addr;
 	swp_entry_t entry;
@@ -848,18 +843,18 @@ static struct page *swap_vma_readahead(swp_entry_t targ_entry, gfp_t gfp_mask,
 			continue;
 		pte_unmap(pte);
 		pte = NULL;
-		page = __read_swap_cache_async(entry, gfp_mask, mpol, ilx,
+		folio = __read_swap_cache_async(entry, gfp_mask, mpol, ilx,
 						&page_allocated, false);
-		if (!page)
+		if (!folio)
 			continue;
 		if (page_allocated) {
-			swap_readpage(page, false, &splug);
+			swap_readpage(&folio->page, false, &splug);
 			if (i != ra_info.offset) {
-				SetPageReadahead(page);
+				folio_set_readahead(folio);
 				count_vm_event(SWAP_RA);
 			}
 		}
-		put_page(page);
+		folio_put(folio);
 	}
 	if (pte)
 		pte_unmap(pte);
@@ -867,13 +862,13 @@ static struct page *swap_vma_readahead(swp_entry_t targ_entry, gfp_t gfp_mask,
 	swap_read_unplug(splug);
 	lru_add_drain();
 skip:
-	/* The page was likely read above, so no need for plugging here */
-	page = __read_swap_cache_async(targ_entry, gfp_mask, mpol, targ_ilx,
+	/* The folio was likely read above, so no need for plugging here */
+	folio = __read_swap_cache_async(targ_entry, gfp_mask, mpol, targ_ilx,
 					&page_allocated, false);
 	if (unlikely(page_allocated))
-		swap_readpage(page, false, NULL);
-	zswap_page_swapin(page);
-	return page;
+		swap_readpage(&folio->page, false, NULL);
+	zswap_folio_swapin(folio);
+	return folio_file_page(folio, swp_offset(entry));
 }
 
 /**
diff --git a/mm/zswap.c b/mm/zswap.c
index d49aad0359d2..08b5212aa6b8 100644
--- a/mm/zswap.c
+++ b/mm/zswap.c
@@ -368,12 +368,12 @@ void zswap_lruvec_state_init(struct lruvec *lruvec)
 	atomic_long_set(&lruvec->zswap_lruvec_state.nr_zswap_protected, 0);
 }
 
-void zswap_page_swapin(struct page *page)
+void zswap_folio_swapin(struct folio *folio)
 {
 	struct lruvec *lruvec;
 
-	if (page) {
-		lruvec = folio_lruvec(page_folio(page));
+	if (folio) {
+		lruvec = folio_lruvec(folio);
 		atomic_long_inc(&lruvec->zswap_lruvec_state.nr_zswap_protected);
 	}
 }
@@ -1379,14 +1379,14 @@ static int zswap_enabled_param_set(const char *val,
 * writeback code
 **********************************/
 /*
- * Attempts to free an entry by adding a page to the swap cache,
- * decompressing the entry data into the page, and issuing a
- * bio write to write the page back to the swap device.
+ * Attempts to free an entry by adding a folio to the swap cache,
+ * decompressing the entry data into the folio, and issuing a
+ * bio write to write the folio back to the swap device.
  *
- * This can be thought of as a "resumed writeback" of the page
+ * This can be thought of as a "resumed writeback" of the folio
  * to the swap device.  We are basically resuming the same swap
  * writeback path that was intercepted with the zswap_store()
- * in the first place.  After the page has been decompressed into
+ * in the first place.  After the folio has been decompressed into
  * the swap cache, the compressed version stored by zswap can be
  * freed.
  */
@@ -1394,7 +1394,7 @@ static int zswap_writeback_entry(struct zswap_entry *entry,
 				 struct zswap_tree *tree)
 {
 	swp_entry_t swpentry = entry->swpentry;
-	struct page *page;
+	struct folio *folio;
 	struct mempolicy *mpol;
 	struct scatterlist input, output;
 	struct crypto_acomp_ctx *acomp_ctx;
@@ -1413,18 +1413,18 @@ static int zswap_writeback_entry(struct zswap_entry *entry,
 			return -ENOMEM;
 	}
 
-	/* try to allocate swap cache page */
+	/* try to allocate swap cache folio */
 	mpol = get_task_policy(current);
-	page = __read_swap_cache_async(swpentry, GFP_KERNEL, mpol,
+	folio = __read_swap_cache_async(swpentry, GFP_KERNEL, mpol,
 				NO_INTERLEAVE_INDEX, &page_was_allocated, true);
-	if (!page) {
+	if (!folio) {
 		ret = -ENOMEM;
 		goto fail;
 	}
 
-	/* Found an existing page, we raced with load/swapin */
+	/* Found an existing folio, we raced with load/swapin */
 	if (!page_was_allocated) {
-		put_page(page);
+		folio_put(folio);
 		ret = -EEXIST;
 		goto fail;
 	}
@@ -1434,12 +1434,12 @@ static int zswap_writeback_entry(struct zswap_entry *entry,
 	 * concurrent swapping to and from the slot. Verify that the
 	 * swap entry hasn't been invalidated and recycled behind our
 	 * backs (our zswap_entry reference doesn't prevent that), to
-	 * avoid overwriting a new swap page with old compressed data.
+	 * avoid overwriting a new swap folio with old compressed data.
 	 */
 	spin_lock(&tree->lock);
 	if (zswap_rb_search(&tree->rbroot, swp_offset(entry->swpentry)) != entry) {
 		spin_unlock(&tree->lock);
-		delete_from_swap_cache(page_folio(page));
+		delete_from_swap_cache(folio);
 		ret = -ENOMEM;
 		goto fail;
 	}
@@ -1459,7 +1459,7 @@ static int zswap_writeback_entry(struct zswap_entry *entry,
 	mutex_lock(acomp_ctx->mutex);
 	sg_init_one(&input, src, entry->length);
 	sg_init_table(&output, 1);
-	sg_set_page(&output, page, PAGE_SIZE, 0);
+	sg_set_page(&output, &folio->page, PAGE_SIZE, 0);
 	acomp_request_set_params(acomp_ctx->req, &input, &output, entry->length, dlen);
 	ret = crypto_wait_req(crypto_acomp_decompress(acomp_ctx->req), &acomp_ctx->wait);
 	dlen = acomp_ctx->req->dlen;
@@ -1473,20 +1473,20 @@ static int zswap_writeback_entry(struct zswap_entry *entry,
 	BUG_ON(ret);
 	BUG_ON(dlen != PAGE_SIZE);
 
-	/* page is up to date */
-	SetPageUptodate(page);
+	/* folio is up to date */
+	folio_mark_uptodate(folio);
 
 	/* move it to the tail of the inactive list after end_writeback */
-	SetPageReclaim(page);
+	folio_set_reclaim(folio);
 
-	if (!PageLRU(page)) {
+	if (!folio_test_lru(folio)) {
 		/* drain lru cache to help folio_rotate_reclaimable() */
 		lru_add_drain();
 	}
 
 	/* start writeback */
-	__swap_writepage(page, &wbc);
-	put_page(page);
+	__swap_writepage(&folio->page, &wbc);
+	folio_put(folio);
 
 	return ret;
 
@@ -1495,7 +1495,7 @@ static int zswap_writeback_entry(struct zswap_entry *entry,
 		kfree(tmp);
 
 	/*
-	 * If we get here because the page is already in swapcache, a
+	 * If we get here because the folio is already in swapcache, a
 	 * load may be happening concurrently. It is safe and okay to
 	 * not free the entry. It is also okay to return !0.
 	 */
-- 
2.42.0



^ permalink raw reply	[flat|nested] 17+ messages in thread

* [PATCH 02/13] mm: Pass a folio to __swap_writepage()
  2023-12-13 21:58 [PATCH 00/13] More swap folio conversions Matthew Wilcox (Oracle)
  2023-12-13 21:58 ` [PATCH 01/13] mm: Return the folio from __read_swap_cache_async() Matthew Wilcox (Oracle)
@ 2023-12-13 21:58 ` Matthew Wilcox (Oracle)
  2023-12-13 21:58 ` [PATCH 03/13] mm: Pass a folio to swap_writepage_fs() Matthew Wilcox (Oracle)
                   ` (10 subsequent siblings)
  12 siblings, 0 replies; 17+ messages in thread
From: Matthew Wilcox (Oracle) @ 2023-12-13 21:58 UTC (permalink / raw)
  To: Andrew Morton; +Cc: Matthew Wilcox (Oracle), linux-mm

Both callers now have a folio, so pass that in instead of the page.
Removes a few hidden calls to compound_head().

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
---
 mm/page_io.c | 14 +++++++-------
 mm/swap.h    |  2 +-
 mm/zswap.c   |  2 +-
 3 files changed, 9 insertions(+), 9 deletions(-)

diff --git a/mm/page_io.c b/mm/page_io.c
index cb559ae324c6..e0d59f36b70b 100644
--- a/mm/page_io.c
+++ b/mm/page_io.c
@@ -201,7 +201,7 @@ int swap_writepage(struct page *page, struct writeback_control *wbc)
 		folio_end_writeback(folio);
 		return 0;
 	}
-	__swap_writepage(&folio->page, wbc);
+	__swap_writepage(folio, wbc);
 	return 0;
 }
 
@@ -368,22 +368,22 @@ static void swap_writepage_bdev_async(struct page *page,
 	submit_bio(bio);
 }
 
-void __swap_writepage(struct page *page, struct writeback_control *wbc)
+void __swap_writepage(struct folio *folio, struct writeback_control *wbc)
 {
-	struct swap_info_struct *sis = page_swap_info(page);
+	struct swap_info_struct *sis = swp_swap_info(folio->swap);
 
-	VM_BUG_ON_PAGE(!PageSwapCache(page), page);
+	VM_BUG_ON_FOLIO(!folio_test_swapcache(folio), folio);
 	/*
 	 * ->flags can be updated non-atomicially (scan_swap_map_slots),
 	 * but that will never affect SWP_FS_OPS, so the data_race
 	 * is safe.
 	 */
 	if (data_race(sis->flags & SWP_FS_OPS))
-		swap_writepage_fs(page, wbc);
+		swap_writepage_fs(&folio->page, wbc);
 	else if (sis->flags & SWP_SYNCHRONOUS_IO)
-		swap_writepage_bdev_sync(page, wbc, sis);
+		swap_writepage_bdev_sync(&folio->page, wbc, sis);
 	else
-		swap_writepage_bdev_async(page, wbc, sis);
+		swap_writepage_bdev_async(&folio->page, wbc, sis);
 }
 
 void swap_write_unplug(struct swap_iocb *sio)
diff --git a/mm/swap.h b/mm/swap.h
index a60ab1cfcaf2..b81587740cf1 100644
--- a/mm/swap.h
+++ b/mm/swap.h
@@ -19,7 +19,7 @@ static inline void swap_read_unplug(struct swap_iocb *plug)
 }
 void swap_write_unplug(struct swap_iocb *sio);
 int swap_writepage(struct page *page, struct writeback_control *wbc);
-void __swap_writepage(struct page *page, struct writeback_control *wbc);
+void __swap_writepage(struct folio *folio, struct writeback_control *wbc);
 
 /* linux/mm/swap_state.c */
 /* One swap address space for each 64M swap space */
diff --git a/mm/zswap.c b/mm/zswap.c
index 08b5212aa6b8..0aa3b434d9b5 100644
--- a/mm/zswap.c
+++ b/mm/zswap.c
@@ -1485,7 +1485,7 @@ static int zswap_writeback_entry(struct zswap_entry *entry,
 	}
 
 	/* start writeback */
-	__swap_writepage(&folio->page, &wbc);
+	__swap_writepage(folio, &wbc);
 	folio_put(folio);
 
 	return ret;
-- 
2.42.0



^ permalink raw reply	[flat|nested] 17+ messages in thread

* [PATCH 03/13] mm: Pass a folio to swap_writepage_fs()
  2023-12-13 21:58 [PATCH 00/13] More swap folio conversions Matthew Wilcox (Oracle)
  2023-12-13 21:58 ` [PATCH 01/13] mm: Return the folio from __read_swap_cache_async() Matthew Wilcox (Oracle)
  2023-12-13 21:58 ` [PATCH 02/13] mm: Pass a folio to __swap_writepage() Matthew Wilcox (Oracle)
@ 2023-12-13 21:58 ` Matthew Wilcox (Oracle)
  2023-12-13 21:58 ` [PATCH 04/13] mm: Pass a folio to swap_writepage_bdev_sync() Matthew Wilcox (Oracle)
                   ` (9 subsequent siblings)
  12 siblings, 0 replies; 17+ messages in thread
From: Matthew Wilcox (Oracle) @ 2023-12-13 21:58 UTC (permalink / raw)
  To: Andrew Morton; +Cc: Matthew Wilcox (Oracle), linux-mm

Saves several calls to compound_head().

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
---
 mm/page_io.c | 18 +++++++++---------
 1 file changed, 9 insertions(+), 9 deletions(-)

diff --git a/mm/page_io.c b/mm/page_io.c
index e0d59f36b70b..7f67d8e2ed9a 100644
--- a/mm/page_io.c
+++ b/mm/page_io.c
@@ -288,16 +288,16 @@ static void sio_write_complete(struct kiocb *iocb, long ret)
 	mempool_free(sio, sio_pool);
 }
 
-static void swap_writepage_fs(struct page *page, struct writeback_control *wbc)
+static void swap_writepage_fs(struct folio *folio, struct writeback_control *wbc)
 {
 	struct swap_iocb *sio = NULL;
-	struct swap_info_struct *sis = page_swap_info(page);
+	struct swap_info_struct *sis = swp_swap_info(folio->swap);
 	struct file *swap_file = sis->swap_file;
-	loff_t pos = page_file_offset(page);
+	loff_t pos = folio_file_pos(folio);
 
-	count_swpout_vm_event(page_folio(page));
-	set_page_writeback(page);
-	unlock_page(page);
+	count_swpout_vm_event(folio);
+	folio_start_writeback(folio);
+	folio_unlock(folio);
 	if (wbc->swap_plug)
 		sio = *wbc->swap_plug;
 	if (sio) {
@@ -315,8 +315,8 @@ static void swap_writepage_fs(struct page *page, struct writeback_control *wbc)
 		sio->pages = 0;
 		sio->len = 0;
 	}
-	bvec_set_page(&sio->bvec[sio->pages], page, thp_size(page), 0);
-	sio->len += thp_size(page);
+	bvec_set_folio(&sio->bvec[sio->pages], folio, folio_size(folio), 0);
+	sio->len += folio_size(folio);
 	sio->pages += 1;
 	if (sio->pages == ARRAY_SIZE(sio->bvec) || !wbc->swap_plug) {
 		swap_write_unplug(sio);
@@ -379,7 +379,7 @@ void __swap_writepage(struct folio *folio, struct writeback_control *wbc)
 	 * is safe.
 	 */
 	if (data_race(sis->flags & SWP_FS_OPS))
-		swap_writepage_fs(&folio->page, wbc);
+		swap_writepage_fs(folio, wbc);
 	else if (sis->flags & SWP_SYNCHRONOUS_IO)
 		swap_writepage_bdev_sync(&folio->page, wbc, sis);
 	else
-- 
2.42.0



^ permalink raw reply	[flat|nested] 17+ messages in thread

* [PATCH 04/13] mm: Pass a folio to swap_writepage_bdev_sync()
  2023-12-13 21:58 [PATCH 00/13] More swap folio conversions Matthew Wilcox (Oracle)
                   ` (2 preceding siblings ...)
  2023-12-13 21:58 ` [PATCH 03/13] mm: Pass a folio to swap_writepage_fs() Matthew Wilcox (Oracle)
@ 2023-12-13 21:58 ` Matthew Wilcox (Oracle)
  2023-12-13 21:58 ` [PATCH 05/13] mm: Pass a folio to swap_writepage_bdev_async() Matthew Wilcox (Oracle)
                   ` (8 subsequent siblings)
  12 siblings, 0 replies; 17+ messages in thread
From: Matthew Wilcox (Oracle) @ 2023-12-13 21:58 UTC (permalink / raw)
  To: Andrew Morton; +Cc: Matthew Wilcox (Oracle), linux-mm

Saves a call to compound_head().

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
---
 mm/page_io.c | 9 ++++-----
 1 file changed, 4 insertions(+), 5 deletions(-)

diff --git a/mm/page_io.c b/mm/page_io.c
index 7f67d8e2ed9a..1f57e26fa282 100644
--- a/mm/page_io.c
+++ b/mm/page_io.c
@@ -326,17 +326,16 @@ static void swap_writepage_fs(struct folio *folio, struct writeback_control *wbc
 		*wbc->swap_plug = sio;
 }
 
-static void swap_writepage_bdev_sync(struct page *page,
+static void swap_writepage_bdev_sync(struct folio *folio,
 		struct writeback_control *wbc, struct swap_info_struct *sis)
 {
 	struct bio_vec bv;
 	struct bio bio;
-	struct folio *folio = page_folio(page);
 
 	bio_init(&bio, sis->bdev, &bv, 1,
 		 REQ_OP_WRITE | REQ_SWAP | wbc_to_write_flags(wbc));
-	bio.bi_iter.bi_sector = swap_page_sector(page);
-	__bio_add_page(&bio, page, thp_size(page), 0);
+	bio.bi_iter.bi_sector = swap_page_sector(&folio->page);
+	bio_add_folio_nofail(&bio, folio, folio_size(folio), 0);
 
 	bio_associate_blkg_from_page(&bio, folio);
 	count_swpout_vm_event(folio);
@@ -381,7 +380,7 @@ void __swap_writepage(struct folio *folio, struct writeback_control *wbc)
 	if (data_race(sis->flags & SWP_FS_OPS))
 		swap_writepage_fs(folio, wbc);
 	else if (sis->flags & SWP_SYNCHRONOUS_IO)
-		swap_writepage_bdev_sync(&folio->page, wbc, sis);
+		swap_writepage_bdev_sync(folio, wbc, sis);
 	else
 		swap_writepage_bdev_async(&folio->page, wbc, sis);
 }
-- 
2.42.0



^ permalink raw reply	[flat|nested] 17+ messages in thread

* [PATCH 05/13] mm: Pass a folio to swap_writepage_bdev_async()
  2023-12-13 21:58 [PATCH 00/13] More swap folio conversions Matthew Wilcox (Oracle)
                   ` (3 preceding siblings ...)
  2023-12-13 21:58 ` [PATCH 04/13] mm: Pass a folio to swap_writepage_bdev_sync() Matthew Wilcox (Oracle)
@ 2023-12-13 21:58 ` Matthew Wilcox (Oracle)
  2023-12-13 21:58 ` [PATCH 06/13] mm: Pass a folio to swap_readpage_fs() Matthew Wilcox (Oracle)
                   ` (7 subsequent siblings)
  12 siblings, 0 replies; 17+ messages in thread
From: Matthew Wilcox (Oracle) @ 2023-12-13 21:58 UTC (permalink / raw)
  To: Andrew Morton; +Cc: Matthew Wilcox (Oracle), linux-mm

Saves a call to compound_head().

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
---
 mm/page_io.c | 9 ++++-----
 1 file changed, 4 insertions(+), 5 deletions(-)

diff --git a/mm/page_io.c b/mm/page_io.c
index 1f57e26fa282..454ab67b33e3 100644
--- a/mm/page_io.c
+++ b/mm/page_io.c
@@ -347,18 +347,17 @@ static void swap_writepage_bdev_sync(struct folio *folio,
 	__end_swap_bio_write(&bio);
 }
 
-static void swap_writepage_bdev_async(struct page *page,
+static void swap_writepage_bdev_async(struct folio *folio,
 		struct writeback_control *wbc, struct swap_info_struct *sis)
 {
 	struct bio *bio;
-	struct folio *folio = page_folio(page);
 
 	bio = bio_alloc(sis->bdev, 1,
 			REQ_OP_WRITE | REQ_SWAP | wbc_to_write_flags(wbc),
 			GFP_NOIO);
-	bio->bi_iter.bi_sector = swap_page_sector(page);
+	bio->bi_iter.bi_sector = swap_page_sector(&folio->page);
 	bio->bi_end_io = end_swap_bio_write;
-	__bio_add_page(bio, page, thp_size(page), 0);
+	bio_add_folio_nofail(bio, folio, folio_size(folio), 0);
 
 	bio_associate_blkg_from_page(bio, folio);
 	count_swpout_vm_event(folio);
@@ -382,7 +381,7 @@ void __swap_writepage(struct folio *folio, struct writeback_control *wbc)
 	else if (sis->flags & SWP_SYNCHRONOUS_IO)
 		swap_writepage_bdev_sync(folio, wbc, sis);
 	else
-		swap_writepage_bdev_async(&folio->page, wbc, sis);
+		swap_writepage_bdev_async(folio, wbc, sis);
 }
 
 void swap_write_unplug(struct swap_iocb *sio)
-- 
2.42.0



^ permalink raw reply	[flat|nested] 17+ messages in thread

* [PATCH 06/13] mm: Pass a folio to swap_readpage_fs()
  2023-12-13 21:58 [PATCH 00/13] More swap folio conversions Matthew Wilcox (Oracle)
                   ` (4 preceding siblings ...)
  2023-12-13 21:58 ` [PATCH 05/13] mm: Pass a folio to swap_writepage_bdev_async() Matthew Wilcox (Oracle)
@ 2023-12-13 21:58 ` Matthew Wilcox (Oracle)
  2023-12-13 21:58 ` [PATCH 07/13] mm: Pass a folio to swap_readpage_bdev_sync() Matthew Wilcox (Oracle)
                   ` (6 subsequent siblings)
  12 siblings, 0 replies; 17+ messages in thread
From: Matthew Wilcox (Oracle) @ 2023-12-13 21:58 UTC (permalink / raw)
  To: Andrew Morton; +Cc: Matthew Wilcox (Oracle), linux-mm

Saves a call to compound_head().

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
---
 mm/page_io.c | 13 ++++++-------
 1 file changed, 6 insertions(+), 7 deletions(-)

diff --git a/mm/page_io.c b/mm/page_io.c
index 454ab67b33e3..864a558b7b68 100644
--- a/mm/page_io.c
+++ b/mm/page_io.c
@@ -420,12 +420,11 @@ static void sio_read_complete(struct kiocb *iocb, long ret)
 	mempool_free(sio, sio_pool);
 }
 
-static void swap_readpage_fs(struct page *page,
-			     struct swap_iocb **plug)
+static void swap_readpage_fs(struct folio *folio, struct swap_iocb **plug)
 {
-	struct swap_info_struct *sis = page_swap_info(page);
+	struct swap_info_struct *sis = swp_swap_info(folio->swap);
 	struct swap_iocb *sio = NULL;
-	loff_t pos = page_file_offset(page);
+	loff_t pos = folio_file_pos(folio);
 
 	if (plug)
 		sio = *plug;
@@ -444,8 +443,8 @@ static void swap_readpage_fs(struct page *page,
 		sio->pages = 0;
 		sio->len = 0;
 	}
-	bvec_set_page(&sio->bvec[sio->pages], page, thp_size(page), 0);
-	sio->len += thp_size(page);
+	bvec_set_folio(&sio->bvec[sio->pages], folio, folio_size(folio), 0);
+	sio->len += folio_size(folio);
 	sio->pages += 1;
 	if (sio->pages == ARRAY_SIZE(sio->bvec) || !plug) {
 		swap_read_unplug(sio);
@@ -515,7 +514,7 @@ void swap_readpage(struct page *page, bool synchronous, struct swap_iocb **plug)
 		folio_mark_uptodate(folio);
 		folio_unlock(folio);
 	} else if (data_race(sis->flags & SWP_FS_OPS)) {
-		swap_readpage_fs(page, plug);
+		swap_readpage_fs(folio, plug);
 	} else if (synchronous || (sis->flags & SWP_SYNCHRONOUS_IO)) {
 		swap_readpage_bdev_sync(page, sis);
 	} else {
-- 
2.42.0



^ permalink raw reply	[flat|nested] 17+ messages in thread

* [PATCH 07/13] mm: Pass a folio to swap_readpage_bdev_sync()
  2023-12-13 21:58 [PATCH 00/13] More swap folio conversions Matthew Wilcox (Oracle)
                   ` (5 preceding siblings ...)
  2023-12-13 21:58 ` [PATCH 06/13] mm: Pass a folio to swap_readpage_fs() Matthew Wilcox (Oracle)
@ 2023-12-13 21:58 ` Matthew Wilcox (Oracle)
  2023-12-13 21:58 ` [PATCH 08/13] mm: Pass a folio to swap_readpage_bdev_async() Matthew Wilcox (Oracle)
                   ` (5 subsequent siblings)
  12 siblings, 0 replies; 17+ messages in thread
From: Matthew Wilcox (Oracle) @ 2023-12-13 21:58 UTC (permalink / raw)
  To: Andrew Morton; +Cc: Matthew Wilcox (Oracle), linux-mm

Make it plain that this takes the head page (which before this point
was just an assumption, but is now enforced by the compiler).

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
---
 mm/page_io.c | 8 ++++----
 1 file changed, 4 insertions(+), 4 deletions(-)

diff --git a/mm/page_io.c b/mm/page_io.c
index 864a558b7b68..0bc52e16452d 100644
--- a/mm/page_io.c
+++ b/mm/page_io.c
@@ -454,15 +454,15 @@ static void swap_readpage_fs(struct folio *folio, struct swap_iocb **plug)
 		*plug = sio;
 }
 
-static void swap_readpage_bdev_sync(struct page *page,
+static void swap_readpage_bdev_sync(struct folio *folio,
 		struct swap_info_struct *sis)
 {
 	struct bio_vec bv;
 	struct bio bio;
 
 	bio_init(&bio, sis->bdev, &bv, 1, REQ_OP_READ);
-	bio.bi_iter.bi_sector = swap_page_sector(page);
-	__bio_add_page(&bio, page, thp_size(page), 0);
+	bio.bi_iter.bi_sector = swap_page_sector(&folio->page);
+	bio_add_folio_nofail(&bio, folio, folio_size(folio), 0);
 	/*
 	 * Keep this task valid during swap readpage because the oom killer may
 	 * attempt to access it in the page fault retry time check.
@@ -516,7 +516,7 @@ void swap_readpage(struct page *page, bool synchronous, struct swap_iocb **plug)
 	} else if (data_race(sis->flags & SWP_FS_OPS)) {
 		swap_readpage_fs(folio, plug);
 	} else if (synchronous || (sis->flags & SWP_SYNCHRONOUS_IO)) {
-		swap_readpage_bdev_sync(page, sis);
+		swap_readpage_bdev_sync(folio, sis);
 	} else {
 		swap_readpage_bdev_async(page, sis);
 	}
-- 
2.42.0



^ permalink raw reply	[flat|nested] 17+ messages in thread

* [PATCH 08/13] mm: Pass a folio to swap_readpage_bdev_async()
  2023-12-13 21:58 [PATCH 00/13] More swap folio conversions Matthew Wilcox (Oracle)
                   ` (6 preceding siblings ...)
  2023-12-13 21:58 ` [PATCH 07/13] mm: Pass a folio to swap_readpage_bdev_sync() Matthew Wilcox (Oracle)
@ 2023-12-13 21:58 ` Matthew Wilcox (Oracle)
  2023-12-13 21:58 ` [PATCH 09/13] mm: Convert swap_page_sector() to swap_folio_sector() Matthew Wilcox (Oracle)
                   ` (4 subsequent siblings)
  12 siblings, 0 replies; 17+ messages in thread
From: Matthew Wilcox (Oracle) @ 2023-12-13 21:58 UTC (permalink / raw)
  To: Andrew Morton; +Cc: Matthew Wilcox (Oracle), linux-mm

Make it plain that this takes the head page (which before this point
was just an assumption, but is now enforced by the compiler).

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
---
 mm/page_io.c | 8 ++++----
 1 file changed, 4 insertions(+), 4 deletions(-)

diff --git a/mm/page_io.c b/mm/page_io.c
index 0bc52e16452d..e18afcd9c19a 100644
--- a/mm/page_io.c
+++ b/mm/page_io.c
@@ -474,15 +474,15 @@ static void swap_readpage_bdev_sync(struct folio *folio,
 	put_task_struct(current);
 }
 
-static void swap_readpage_bdev_async(struct page *page,
+static void swap_readpage_bdev_async(struct folio *folio,
 		struct swap_info_struct *sis)
 {
 	struct bio *bio;
 
 	bio = bio_alloc(sis->bdev, 1, REQ_OP_READ, GFP_KERNEL);
-	bio->bi_iter.bi_sector = swap_page_sector(page);
+	bio->bi_iter.bi_sector = swap_page_sector(&folio->page);
 	bio->bi_end_io = end_swap_bio_read;
-	__bio_add_page(bio, page, thp_size(page), 0);
+	bio_add_folio_nofail(bio, folio, folio_size(folio), 0);
 	count_vm_event(PSWPIN);
 	submit_bio(bio);
 }
@@ -518,7 +518,7 @@ void swap_readpage(struct page *page, bool synchronous, struct swap_iocb **plug)
 	} else if (synchronous || (sis->flags & SWP_SYNCHRONOUS_IO)) {
 		swap_readpage_bdev_sync(folio, sis);
 	} else {
-		swap_readpage_bdev_async(page, sis);
+		swap_readpage_bdev_async(folio, sis);
 	}
 
 	if (workingset) {
-- 
2.42.0



^ permalink raw reply	[flat|nested] 17+ messages in thread

* [PATCH 09/13] mm: Convert swap_page_sector() to swap_folio_sector()
  2023-12-13 21:58 [PATCH 00/13] More swap folio conversions Matthew Wilcox (Oracle)
                   ` (7 preceding siblings ...)
  2023-12-13 21:58 ` [PATCH 08/13] mm: Pass a folio to swap_readpage_bdev_async() Matthew Wilcox (Oracle)
@ 2023-12-13 21:58 ` Matthew Wilcox (Oracle)
  2023-12-13 21:58 ` [PATCH 10/13] mm: Convert swap_readpage() to swap_read_folio() Matthew Wilcox (Oracle)
                   ` (3 subsequent siblings)
  12 siblings, 0 replies; 17+ messages in thread
From: Matthew Wilcox (Oracle) @ 2023-12-13 21:58 UTC (permalink / raw)
  To: Andrew Morton; +Cc: Matthew Wilcox (Oracle), linux-mm

All callers have a folio, so pass it in.  Saves a couple of calls to
compound_head().

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
---
 include/linux/swap.h | 2 +-
 mm/page_io.c         | 8 ++++----
 mm/swapfile.c        | 6 +++---
 3 files changed, 8 insertions(+), 8 deletions(-)

diff --git a/include/linux/swap.h b/include/linux/swap.h
index edc0f2c8ce01..77c43715ad5b 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -493,7 +493,7 @@ struct backing_dev_info;
 extern int init_swap_address_space(unsigned int type, unsigned long nr_pages);
 extern void exit_swap_address_space(unsigned int type);
 extern struct swap_info_struct *get_swap_device(swp_entry_t entry);
-sector_t swap_page_sector(struct page *page);
+sector_t swap_folio_sector(struct folio *folio);
 
 static inline void put_swap_device(struct swap_info_struct *si)
 {
diff --git a/mm/page_io.c b/mm/page_io.c
index e18afcd9c19a..6736c56526bf 100644
--- a/mm/page_io.c
+++ b/mm/page_io.c
@@ -334,7 +334,7 @@ static void swap_writepage_bdev_sync(struct folio *folio,
 
 	bio_init(&bio, sis->bdev, &bv, 1,
 		 REQ_OP_WRITE | REQ_SWAP | wbc_to_write_flags(wbc));
-	bio.bi_iter.bi_sector = swap_page_sector(&folio->page);
+	bio.bi_iter.bi_sector = swap_folio_sector(folio);
 	bio_add_folio_nofail(&bio, folio, folio_size(folio), 0);
 
 	bio_associate_blkg_from_page(&bio, folio);
@@ -355,7 +355,7 @@ static void swap_writepage_bdev_async(struct folio *folio,
 	bio = bio_alloc(sis->bdev, 1,
 			REQ_OP_WRITE | REQ_SWAP | wbc_to_write_flags(wbc),
 			GFP_NOIO);
-	bio->bi_iter.bi_sector = swap_page_sector(&folio->page);
+	bio->bi_iter.bi_sector = swap_folio_sector(folio);
 	bio->bi_end_io = end_swap_bio_write;
 	bio_add_folio_nofail(bio, folio, folio_size(folio), 0);
 
@@ -461,7 +461,7 @@ static void swap_readpage_bdev_sync(struct folio *folio,
 	struct bio bio;
 
 	bio_init(&bio, sis->bdev, &bv, 1, REQ_OP_READ);
-	bio.bi_iter.bi_sector = swap_page_sector(&folio->page);
+	bio.bi_iter.bi_sector = swap_folio_sector(folio);
 	bio_add_folio_nofail(&bio, folio, folio_size(folio), 0);
 	/*
 	 * Keep this task valid during swap readpage because the oom killer may
@@ -480,7 +480,7 @@ static void swap_readpage_bdev_async(struct folio *folio,
 	struct bio *bio;
 
 	bio = bio_alloc(sis->bdev, 1, REQ_OP_READ, GFP_KERNEL);
-	bio->bi_iter.bi_sector = swap_page_sector(&folio->page);
+	bio->bi_iter.bi_sector = swap_folio_sector(folio);
 	bio->bi_end_io = end_swap_bio_read;
 	bio_add_folio_nofail(bio, folio, folio_size(folio), 0);
 	count_vm_event(PSWPIN);
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 1501bc956456..b22c47b11d65 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -227,14 +227,14 @@ offset_to_swap_extent(struct swap_info_struct *sis, unsigned long offset)
 	BUG();
 }
 
-sector_t swap_page_sector(struct page *page)
+sector_t swap_folio_sector(struct folio *folio)
 {
-	struct swap_info_struct *sis = page_swap_info(page);
+	struct swap_info_struct *sis = swp_swap_info(folio->swap);
 	struct swap_extent *se;
 	sector_t sector;
 	pgoff_t offset;
 
-	offset = __page_file_index(page);
+	offset = swp_offset(folio->swap);
 	se = offset_to_swap_extent(sis, offset);
 	sector = se->start_block + (offset - se->start_page);
 	return sector << (PAGE_SHIFT - 9);
-- 
2.42.0



^ permalink raw reply	[flat|nested] 17+ messages in thread

* [PATCH 10/13] mm: Convert swap_readpage() to swap_read_folio()
  2023-12-13 21:58 [PATCH 00/13] More swap folio conversions Matthew Wilcox (Oracle)
                   ` (8 preceding siblings ...)
  2023-12-13 21:58 ` [PATCH 09/13] mm: Convert swap_page_sector() to swap_folio_sector() Matthew Wilcox (Oracle)
@ 2023-12-13 21:58 ` Matthew Wilcox (Oracle)
  2023-12-13 21:58 ` [PATCH 11/13] mm: Remove page_swap_info() Matthew Wilcox (Oracle)
                   ` (2 subsequent siblings)
  12 siblings, 0 replies; 17+ messages in thread
From: Matthew Wilcox (Oracle) @ 2023-12-13 21:58 UTC (permalink / raw)
  To: Andrew Morton; +Cc: Matthew Wilcox (Oracle), linux-mm

All callers have a folio, so pass it in, saving two calls to
compound_head().

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
---
 mm/memory.c     |  4 ++--
 mm/page_io.c    | 18 +++++++++---------
 mm/swap.h       |  5 +++--
 mm/swap_state.c | 12 ++++++------
 mm/swapfile.c   |  2 +-
 5 files changed, 21 insertions(+), 20 deletions(-)

diff --git a/mm/memory.c b/mm/memory.c
index e402340e3f46..2f7b212b7d71 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -3882,9 +3882,9 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
 
 				folio_add_lru(folio);
 
-				/* To provide entry to swap_readpage() */
+				/* To provide entry to swap_read_folio() */
 				folio->swap = entry;
-				swap_readpage(page, true, NULL);
+				swap_read_folio(folio, true, NULL);
 				folio->private = NULL;
 			}
 		} else {
diff --git a/mm/page_io.c b/mm/page_io.c
index 6736c56526bf..09c6a4f316f3 100644
--- a/mm/page_io.c
+++ b/mm/page_io.c
@@ -420,7 +420,7 @@ static void sio_read_complete(struct kiocb *iocb, long ret)
 	mempool_free(sio, sio_pool);
 }
 
-static void swap_readpage_fs(struct folio *folio, struct swap_iocb **plug)
+static void swap_read_folio_fs(struct folio *folio, struct swap_iocb **plug)
 {
 	struct swap_info_struct *sis = swp_swap_info(folio->swap);
 	struct swap_iocb *sio = NULL;
@@ -454,7 +454,7 @@ static void swap_readpage_fs(struct folio *folio, struct swap_iocb **plug)
 		*plug = sio;
 }
 
-static void swap_readpage_bdev_sync(struct folio *folio,
+static void swap_read_folio_bdev_sync(struct folio *folio,
 		struct swap_info_struct *sis)
 {
 	struct bio_vec bv;
@@ -474,7 +474,7 @@ static void swap_readpage_bdev_sync(struct folio *folio,
 	put_task_struct(current);
 }
 
-static void swap_readpage_bdev_async(struct folio *folio,
+static void swap_read_folio_bdev_async(struct folio *folio,
 		struct swap_info_struct *sis)
 {
 	struct bio *bio;
@@ -487,10 +487,10 @@ static void swap_readpage_bdev_async(struct folio *folio,
 	submit_bio(bio);
 }
 
-void swap_readpage(struct page *page, bool synchronous, struct swap_iocb **plug)
+void swap_read_folio(struct folio *folio, bool synchronous,
+		struct swap_iocb **plug)
 {
-	struct folio *folio = page_folio(page);
-	struct swap_info_struct *sis = page_swap_info(page);
+	struct swap_info_struct *sis = swp_swap_info(folio->swap);
 	bool workingset = folio_test_workingset(folio);
 	unsigned long pflags;
 	bool in_thrashing;
@@ -514,11 +514,11 @@ void swap_readpage(struct page *page, bool synchronous, struct swap_iocb **plug)
 		folio_mark_uptodate(folio);
 		folio_unlock(folio);
 	} else if (data_race(sis->flags & SWP_FS_OPS)) {
-		swap_readpage_fs(folio, plug);
+		swap_read_folio_fs(folio, plug);
 	} else if (synchronous || (sis->flags & SWP_SYNCHRONOUS_IO)) {
-		swap_readpage_bdev_sync(folio, sis);
+		swap_read_folio_bdev_sync(folio, sis);
 	} else {
-		swap_readpage_bdev_async(folio, sis);
+		swap_read_folio_bdev_async(folio, sis);
 	}
 
 	if (workingset) {
diff --git a/mm/swap.h b/mm/swap.h
index b81587740cf1..859ae8f0fd2d 100644
--- a/mm/swap.h
+++ b/mm/swap.h
@@ -10,7 +10,8 @@ struct mempolicy;
 /* linux/mm/page_io.c */
 int sio_pool_init(void);
 struct swap_iocb;
-void swap_readpage(struct page *page, bool do_poll, struct swap_iocb **plug);
+void swap_read_folio(struct folio *folio, bool do_poll,
+		struct swap_iocb **plug);
 void __swap_read_unplug(struct swap_iocb *plug);
 static inline void swap_read_unplug(struct swap_iocb *plug)
 {
@@ -63,7 +64,7 @@ static inline unsigned int folio_swap_flags(struct folio *folio)
 }
 #else /* CONFIG_SWAP */
 struct swap_iocb;
-static inline void swap_readpage(struct page *page, bool do_poll,
+static inline void swap_read_folio(struct folio *folio, bool do_poll,
 		struct swap_iocb **plug)
 {
 }
diff --git a/mm/swap_state.c b/mm/swap_state.c
index d4e25d9b5dc6..efff7148a59d 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -539,7 +539,7 @@ struct folio *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
  * the swap entry is no longer in use.
  *
  * get/put_swap_device() aren't needed to call this function, because
- * __read_swap_cache_async() call them and swap_readpage() holds the
+ * __read_swap_cache_async() call them and swap_read_folio() holds the
  * swap cache folio lock.
  */
 struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
@@ -557,7 +557,7 @@ struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
 	mpol_cond_put(mpol);
 
 	if (page_allocated)
-		swap_readpage(&folio->page, false, plug);
+		swap_read_folio(folio, false, plug);
 	return folio_file_page(folio, swp_offset(entry));
 }
 
@@ -674,7 +674,7 @@ struct page *swap_cluster_readahead(swp_entry_t entry, gfp_t gfp_mask,
 		if (!folio)
 			continue;
 		if (page_allocated) {
-			swap_readpage(&folio->page, false, &splug);
+			swap_read_folio(folio, false, &splug);
 			if (offset != entry_offset) {
 				folio_set_readahead(folio);
 				count_vm_event(SWAP_RA);
@@ -690,7 +690,7 @@ struct page *swap_cluster_readahead(swp_entry_t entry, gfp_t gfp_mask,
 	folio = __read_swap_cache_async(entry, gfp_mask, mpol, ilx,
 					&page_allocated, false);
 	if (unlikely(page_allocated))
-		swap_readpage(&folio->page, false, NULL);
+		swap_read_folio(folio, false, NULL);
 	zswap_folio_swapin(folio);
 	return folio_file_page(folio, swp_offset(entry));
 }
@@ -848,7 +848,7 @@ static struct page *swap_vma_readahead(swp_entry_t targ_entry, gfp_t gfp_mask,
 		if (!folio)
 			continue;
 		if (page_allocated) {
-			swap_readpage(&folio->page, false, &splug);
+			swap_read_folio(folio, false, &splug);
 			if (i != ra_info.offset) {
 				folio_set_readahead(folio);
 				count_vm_event(SWAP_RA);
@@ -866,7 +866,7 @@ static struct page *swap_vma_readahead(swp_entry_t targ_entry, gfp_t gfp_mask,
 	folio = __read_swap_cache_async(targ_entry, gfp_mask, mpol, targ_ilx,
 					&page_allocated, false);
 	if (unlikely(page_allocated))
-		swap_readpage(&folio->page, false, NULL);
+		swap_read_folio(folio, false, NULL);
 	zswap_folio_swapin(folio);
 	return folio_file_page(folio, swp_offset(entry));
 }
diff --git a/mm/swapfile.c b/mm/swapfile.c
index b22c47b11d65..f3e23a3d26ae 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -2225,7 +2225,7 @@ EXPORT_SYMBOL_GPL(add_swap_extent);
 /*
  * A `swap extent' is a simple thing which maps a contiguous range of pages
  * onto a contiguous range of disk blocks.  A rbtree of swap extents is
- * built at swapon time and is then used at swap_writepage/swap_readpage
+ * built at swapon time and is then used at swap_writepage/swap_read_folio
  * time for locating where on disk a page belongs.
  *
  * If the swapfile is an S_ISBLK block device, a single extent is installed.
-- 
2.42.0



^ permalink raw reply	[flat|nested] 17+ messages in thread

* [PATCH 11/13] mm: Remove page_swap_info()
  2023-12-13 21:58 [PATCH 00/13] More swap folio conversions Matthew Wilcox (Oracle)
                   ` (9 preceding siblings ...)
  2023-12-13 21:58 ` [PATCH 10/13] mm: Convert swap_readpage() to swap_read_folio() Matthew Wilcox (Oracle)
@ 2023-12-13 21:58 ` Matthew Wilcox (Oracle)
  2023-12-13 21:58 ` [PATCH 12/13] mm: Return a folio from read_swap_cache_async() Matthew Wilcox (Oracle)
  2023-12-13 21:58 ` [PATCH 13/13] mm: Convert swap_cluster_readahead and swap_vma_readahead to return a folio Matthew Wilcox (Oracle)
  12 siblings, 0 replies; 17+ messages in thread
From: Matthew Wilcox (Oracle) @ 2023-12-13 21:58 UTC (permalink / raw)
  To: Andrew Morton; +Cc: Matthew Wilcox (Oracle), linux-mm

It's more efficient to get the swap_info_struct by calling
swp_swap_info() directly.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
---
 include/linux/swap.h | 3 +--
 mm/swap.h            | 2 +-
 mm/swapfile.c        | 8 +-------
 3 files changed, 3 insertions(+), 10 deletions(-)

diff --git a/include/linux/swap.h b/include/linux/swap.h
index 77c43715ad5b..fac9b80b5e32 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -487,8 +487,7 @@ extern sector_t swapdev_block(int, pgoff_t);
 extern int __swap_count(swp_entry_t entry);
 extern int swap_swapcount(struct swap_info_struct *si, swp_entry_t entry);
 extern int swp_swapcount(swp_entry_t entry);
-extern struct swap_info_struct *page_swap_info(struct page *);
-extern struct swap_info_struct *swp_swap_info(swp_entry_t entry);
+struct swap_info_struct *swp_swap_info(swp_entry_t entry);
 struct backing_dev_info;
 extern int init_swap_address_space(unsigned int type, unsigned long nr_pages);
 extern void exit_swap_address_space(unsigned int type);
diff --git a/mm/swap.h b/mm/swap.h
index 859ae8f0fd2d..6bf25342589f 100644
--- a/mm/swap.h
+++ b/mm/swap.h
@@ -60,7 +60,7 @@ struct page *swapin_readahead(swp_entry_t entry, gfp_t flag,
 
 static inline unsigned int folio_swap_flags(struct folio *folio)
 {
-	return page_swap_info(&folio->page)->flags;
+	return swp_swap_info(folio->swap)->flags;
 }
 #else /* CONFIG_SWAP */
 struct swap_iocb;
diff --git a/mm/swapfile.c b/mm/swapfile.c
index f3e23a3d26ae..2f877ca44513 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -3369,18 +3369,12 @@ struct swap_info_struct *swp_swap_info(swp_entry_t entry)
 	return swap_type_to_swap_info(swp_type(entry));
 }
 
-struct swap_info_struct *page_swap_info(struct page *page)
-{
-	swp_entry_t entry = page_swap_entry(page);
-	return swp_swap_info(entry);
-}
-
 /*
  * out-of-line methods to avoid include hell.
  */
 struct address_space *swapcache_mapping(struct folio *folio)
 {
-	return page_swap_info(&folio->page)->swap_file->f_mapping;
+	return swp_swap_info(folio->swap)->swap_file->f_mapping;
 }
 EXPORT_SYMBOL_GPL(swapcache_mapping);
 
-- 
2.42.0



^ permalink raw reply	[flat|nested] 17+ messages in thread

* [PATCH 12/13] mm: Return a folio from read_swap_cache_async()
  2023-12-13 21:58 [PATCH 00/13] More swap folio conversions Matthew Wilcox (Oracle)
                   ` (10 preceding siblings ...)
  2023-12-13 21:58 ` [PATCH 11/13] mm: Remove page_swap_info() Matthew Wilcox (Oracle)
@ 2023-12-13 21:58 ` Matthew Wilcox (Oracle)
  2023-12-13 21:58 ` [PATCH 13/13] mm: Convert swap_cluster_readahead and swap_vma_readahead to return a folio Matthew Wilcox (Oracle)
  12 siblings, 0 replies; 17+ messages in thread
From: Matthew Wilcox (Oracle) @ 2023-12-13 21:58 UTC (permalink / raw)
  To: Andrew Morton; +Cc: Matthew Wilcox (Oracle), linux-mm

The only two callers simply call put_page() on the page returned, so
they're happier calling folio_put().  Saves two calls to compound_head().

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
---
 mm/madvise.c    | 22 +++++++++++-----------
 mm/swap.h       |  7 +++----
 mm/swap_state.c |  8 ++++----
 3 files changed, 18 insertions(+), 19 deletions(-)

diff --git a/mm/madvise.c b/mm/madvise.c
index 6214a1ab5654..912155a94ed5 100644
--- a/mm/madvise.c
+++ b/mm/madvise.c
@@ -180,7 +180,7 @@ static int swapin_walk_pmd_entry(pmd_t *pmd, unsigned long start,
 	for (addr = start; addr < end; addr += PAGE_SIZE) {
 		pte_t pte;
 		swp_entry_t entry;
-		struct page *page;
+		struct folio *folio;
 
 		if (!ptep++) {
 			ptep = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
@@ -198,10 +198,10 @@ static int swapin_walk_pmd_entry(pmd_t *pmd, unsigned long start,
 		pte_unmap_unlock(ptep, ptl);
 		ptep = NULL;
 
-		page = read_swap_cache_async(entry, GFP_HIGHUSER_MOVABLE,
+		folio = read_swap_cache_async(entry, GFP_HIGHUSER_MOVABLE,
 					     vma, addr, &splug);
-		if (page)
-			put_page(page);
+		if (folio)
+			folio_put(folio);
 	}
 
 	if (ptep)
@@ -223,17 +223,17 @@ static void shmem_swapin_range(struct vm_area_struct *vma,
 {
 	XA_STATE(xas, &mapping->i_pages, linear_page_index(vma, start));
 	pgoff_t end_index = linear_page_index(vma, end) - 1;
-	struct page *page;
+	struct folio *folio;
 	struct swap_iocb *splug = NULL;
 
 	rcu_read_lock();
-	xas_for_each(&xas, page, end_index) {
+	xas_for_each(&xas, folio, end_index) {
 		unsigned long addr;
 		swp_entry_t entry;
 
-		if (!xa_is_value(page))
+		if (!xa_is_value(folio))
 			continue;
-		entry = radix_to_swp_entry(page);
+		entry = radix_to_swp_entry(folio);
 		/* There might be swapin error entries in shmem mapping. */
 		if (non_swap_entry(entry))
 			continue;
@@ -243,10 +243,10 @@ static void shmem_swapin_range(struct vm_area_struct *vma,
 		xas_pause(&xas);
 		rcu_read_unlock();
 
-		page = read_swap_cache_async(entry, mapping_gfp_mask(mapping),
+		folio = read_swap_cache_async(entry, mapping_gfp_mask(mapping),
 					     vma, addr, &splug);
-		if (page)
-			put_page(page);
+		if (folio)
+			folio_put(folio);
 
 		rcu_read_lock();
 	}
diff --git a/mm/swap.h b/mm/swap.h
index 6bf25342589f..82c68ccb5ab1 100644
--- a/mm/swap.h
+++ b/mm/swap.h
@@ -46,10 +46,9 @@ struct folio *swap_cache_get_folio(swp_entry_t entry,
 struct folio *filemap_get_incore_folio(struct address_space *mapping,
 		pgoff_t index);
 
-struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
-				   struct vm_area_struct *vma,
-				   unsigned long addr,
-				   struct swap_iocb **plug);
+struct folio *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
+		struct vm_area_struct *vma, unsigned long addr,
+		struct swap_iocb **plug);
 struct folio *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_flags,
 		struct mempolicy *mpol, pgoff_t ilx, bool *new_page_allocated,
 		bool skip_if_exists);
diff --git a/mm/swap_state.c b/mm/swap_state.c
index efff7148a59d..1cb1d5d0583e 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -542,9 +542,9 @@ struct folio *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
  * __read_swap_cache_async() call them and swap_read_folio() holds the
  * swap cache folio lock.
  */
-struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
-				   struct vm_area_struct *vma,
-				   unsigned long addr, struct swap_iocb **plug)
+struct folio *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
+		struct vm_area_struct *vma, unsigned long addr,
+		struct swap_iocb **plug)
 {
 	bool page_allocated;
 	struct mempolicy *mpol;
@@ -558,7 +558,7 @@ struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
 
 	if (page_allocated)
 		swap_read_folio(folio, false, plug);
-	return folio_file_page(folio, swp_offset(entry));
+	return folio;
 }
 
 static unsigned int __swapin_nr_pages(unsigned long prev_offset,
-- 
2.42.0



^ permalink raw reply	[flat|nested] 17+ messages in thread

* [PATCH 13/13] mm: Convert swap_cluster_readahead and swap_vma_readahead to return a folio
  2023-12-13 21:58 [PATCH 00/13] More swap folio conversions Matthew Wilcox (Oracle)
                   ` (11 preceding siblings ...)
  2023-12-13 21:58 ` [PATCH 12/13] mm: Return a folio from read_swap_cache_async() Matthew Wilcox (Oracle)
@ 2023-12-13 21:58 ` Matthew Wilcox (Oracle)
  2023-12-16 13:58   ` Kairui Song
  12 siblings, 1 reply; 17+ messages in thread
From: Matthew Wilcox (Oracle) @ 2023-12-13 21:58 UTC (permalink / raw)
  To: Andrew Morton; +Cc: Matthew Wilcox (Oracle), linux-mm

shmem_swapin_cluster() immediately converts the page back to a folio,
and swapin_readahead() may as well call folio_file_page() once instead
of having each function call it.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
---
 mm/shmem.c      |  8 +++-----
 mm/swap.h       |  6 +++---
 mm/swap_state.c | 21 ++++++++++-----------
 3 files changed, 16 insertions(+), 19 deletions(-)

diff --git a/mm/shmem.c b/mm/shmem.c
index c62f904ba1ca..a4d388973021 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -1570,15 +1570,13 @@ static struct folio *shmem_swapin_cluster(swp_entry_t swap, gfp_t gfp,
 {
 	struct mempolicy *mpol;
 	pgoff_t ilx;
-	struct page *page;
+	struct folio *folio;
 
 	mpol = shmem_get_pgoff_policy(info, index, 0, &ilx);
-	page = swap_cluster_readahead(swap, gfp, mpol, ilx);
+	folio = swap_cluster_readahead(swap, gfp, mpol, ilx);
 	mpol_cond_put(mpol);
 
-	if (!page)
-		return NULL;
-	return page_folio(page);
+	return folio;
 }
 
 /*
diff --git a/mm/swap.h b/mm/swap.h
index 82c68ccb5ab1..758c46ca671e 100644
--- a/mm/swap.h
+++ b/mm/swap.h
@@ -52,8 +52,8 @@ struct folio *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
 struct folio *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_flags,
 		struct mempolicy *mpol, pgoff_t ilx, bool *new_page_allocated,
 		bool skip_if_exists);
-struct page *swap_cluster_readahead(swp_entry_t entry, gfp_t flag,
-				    struct mempolicy *mpol, pgoff_t ilx);
+struct folio *swap_cluster_readahead(swp_entry_t entry, gfp_t flag,
+		struct mempolicy *mpol, pgoff_t ilx);
 struct page *swapin_readahead(swp_entry_t entry, gfp_t flag,
 			      struct vm_fault *vmf);
 
@@ -80,7 +80,7 @@ static inline void show_swap_cache_info(void)
 {
 }
 
-static inline struct page *swap_cluster_readahead(swp_entry_t entry,
+static inline struct folio *swap_cluster_readahead(swp_entry_t entry,
 			gfp_t gfp_mask, struct mempolicy *mpol, pgoff_t ilx)
 {
 	return NULL;
diff --git a/mm/swap_state.c b/mm/swap_state.c
index 1cb1d5d0583e..793b5b9e4f96 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -629,7 +629,7 @@ static unsigned long swapin_nr_pages(unsigned long offset)
  * @mpol: NUMA memory allocation policy to be applied
  * @ilx: NUMA interleave index, for use only when MPOL_INTERLEAVE
  *
- * Returns the struct page for entry and addr, after queueing swapin.
+ * Returns the struct folio for entry and addr, after queueing swapin.
  *
  * Primitive swap readahead code. We simply read an aligned block of
  * (1 << page_cluster) entries in the swap area. This method is chosen
@@ -640,7 +640,7 @@ static unsigned long swapin_nr_pages(unsigned long offset)
  * are used for every page of the readahead: neighbouring pages on swap
  * are fairly likely to have been swapped out from the same node.
  */
-struct page *swap_cluster_readahead(swp_entry_t entry, gfp_t gfp_mask,
+struct folio *swap_cluster_readahead(swp_entry_t entry, gfp_t gfp_mask,
 				    struct mempolicy *mpol, pgoff_t ilx)
 {
 	struct folio *folio;
@@ -692,7 +692,7 @@ struct page *swap_cluster_readahead(swp_entry_t entry, gfp_t gfp_mask,
 	if (unlikely(page_allocated))
 		swap_read_folio(folio, false, NULL);
 	zswap_folio_swapin(folio);
-	return folio_file_page(folio, swp_offset(entry));
+	return folio;
 }
 
 int init_swap_address_space(unsigned int type, unsigned long nr_pages)
@@ -796,7 +796,7 @@ static void swap_ra_info(struct vm_fault *vmf,
  * @targ_ilx: NUMA interleave index, for use only when MPOL_INTERLEAVE
  * @vmf: fault information
  *
- * Returns the struct page for entry and addr, after queueing swapin.
+ * Returns the struct folio for entry and addr, after queueing swapin.
  *
  * Primitive swap readahead code. We simply read in a few pages whose
  * virtual addresses are around the fault address in the same vma.
@@ -804,9 +804,8 @@ static void swap_ra_info(struct vm_fault *vmf,
  * Caller must hold read mmap_lock if vmf->vma is not NULL.
  *
  */
-static struct page *swap_vma_readahead(swp_entry_t targ_entry, gfp_t gfp_mask,
-				       struct mempolicy *mpol, pgoff_t targ_ilx,
-				       struct vm_fault *vmf)
+static struct folio *swap_vma_readahead(swp_entry_t targ_entry, gfp_t gfp_mask,
+		struct mempolicy *mpol, pgoff_t targ_ilx, struct vm_fault *vmf)
 {
 	struct blk_plug plug;
 	struct swap_iocb *splug = NULL;
@@ -868,7 +867,7 @@ static struct page *swap_vma_readahead(swp_entry_t targ_entry, gfp_t gfp_mask,
 	if (unlikely(page_allocated))
 		swap_read_folio(folio, false, NULL);
 	zswap_folio_swapin(folio);
-	return folio_file_page(folio, swp_offset(entry));
+	return folio;
 }
 
 /**
@@ -888,14 +887,14 @@ struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask,
 {
 	struct mempolicy *mpol;
 	pgoff_t ilx;
-	struct page *page;
+	struct folio *folio;
 
 	mpol = get_vma_policy(vmf->vma, vmf->address, 0, &ilx);
-	page = swap_use_vma_readahead() ?
+	folio = swap_use_vma_readahead() ?
 		swap_vma_readahead(entry, gfp_mask, mpol, ilx, vmf) :
 		swap_cluster_readahead(entry, gfp_mask, mpol, ilx);
 	mpol_cond_put(mpol);
-	return page;
+	return folio_file_page(folio, swp_offset(entry));
 }
 
 #ifdef CONFIG_SYSFS
-- 
2.42.0



^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [PATCH 01/13] mm: Return the folio from __read_swap_cache_async()
  2023-12-13 21:58 ` [PATCH 01/13] mm: Return the folio from __read_swap_cache_async() Matthew Wilcox (Oracle)
@ 2023-12-13 23:14   ` Andrew Morton
  0 siblings, 0 replies; 17+ messages in thread
From: Andrew Morton @ 2023-12-13 23:14 UTC (permalink / raw)
  To: Matthew Wilcox (Oracle); +Cc: linux-mm

On Wed, 13 Dec 2023 21:58:30 +0000 "Matthew Wilcox (Oracle)" <willy@infradead.org> wrote:

> Move the folio->page conversion into the callers that actually want
> that.  Most of the callers are happier with the folio anyway.
> If the page_allocated boolean is set, the folio allocated is of order-0,
> so it is safe to pass the page directly to swap_readpage().

Lots of rejects in mm-unstable's mm/zswap.c.  I think I got everything,
please check the result.  Pushed out now.



^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [PATCH 13/13] mm: Convert swap_cluster_readahead and swap_vma_readahead to return a folio
  2023-12-13 21:58 ` [PATCH 13/13] mm: Convert swap_cluster_readahead and swap_vma_readahead to return a folio Matthew Wilcox (Oracle)
@ 2023-12-16 13:58   ` Kairui Song
  2023-12-20  0:54     ` Matthew Wilcox
  0 siblings, 1 reply; 17+ messages in thread
From: Kairui Song @ 2023-12-16 13:58 UTC (permalink / raw)
  To: Matthew Wilcox (Oracle); +Cc: Andrew Morton, linux-mm

Matthew Wilcox (Oracle) <willy@infradead.org> 于2023年12月14日周四 05:59写道:
>
> shmem_swapin_cluster() immediately converts the page back to a folio,
> and swapin_readahead() may as well call folio_file_page() once instead
> of having each function call it.
>
> Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
> ---
>  mm/shmem.c      |  8 +++-----
>  mm/swap.h       |  6 +++---
>  mm/swap_state.c | 21 ++++++++++-----------
>  3 files changed, 16 insertions(+), 19 deletions(-)
>
> diff --git a/mm/shmem.c b/mm/shmem.c
> index c62f904ba1ca..a4d388973021 100644
> --- a/mm/shmem.c
> +++ b/mm/shmem.c
> @@ -1570,15 +1570,13 @@ static struct folio *shmem_swapin_cluster(swp_entry_t swap, gfp_t gfp,
>  {
>         struct mempolicy *mpol;
>         pgoff_t ilx;
> -       struct page *page;
> +       struct folio *folio;
>
>         mpol = shmem_get_pgoff_policy(info, index, 0, &ilx);
> -       page = swap_cluster_readahead(swap, gfp, mpol, ilx);
> +       folio = swap_cluster_readahead(swap, gfp, mpol, ilx);
>         mpol_cond_put(mpol);
>
> -       if (!page)
> -               return NULL;
> -       return page_folio(page);
> +       return folio;
>  }
>
>  /*
> diff --git a/mm/swap.h b/mm/swap.h
> index 82c68ccb5ab1..758c46ca671e 100644
> --- a/mm/swap.h
> +++ b/mm/swap.h
> @@ -52,8 +52,8 @@ struct folio *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
>  struct folio *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_flags,
>                 struct mempolicy *mpol, pgoff_t ilx, bool *new_page_allocated,
>                 bool skip_if_exists);
> -struct page *swap_cluster_readahead(swp_entry_t entry, gfp_t flag,
> -                                   struct mempolicy *mpol, pgoff_t ilx);
> +struct folio *swap_cluster_readahead(swp_entry_t entry, gfp_t flag,
> +               struct mempolicy *mpol, pgoff_t ilx);
>  struct page *swapin_readahead(swp_entry_t entry, gfp_t flag,
>                               struct vm_fault *vmf);
>
> @@ -80,7 +80,7 @@ static inline void show_swap_cache_info(void)
>  {
>  }
>
> -static inline struct page *swap_cluster_readahead(swp_entry_t entry,
> +static inline struct folio *swap_cluster_readahead(swp_entry_t entry,
>                         gfp_t gfp_mask, struct mempolicy *mpol, pgoff_t ilx)
>  {
>         return NULL;
> diff --git a/mm/swap_state.c b/mm/swap_state.c
> index 1cb1d5d0583e..793b5b9e4f96 100644
> --- a/mm/swap_state.c
> +++ b/mm/swap_state.c
> @@ -629,7 +629,7 @@ static unsigned long swapin_nr_pages(unsigned long offset)
>   * @mpol: NUMA memory allocation policy to be applied
>   * @ilx: NUMA interleave index, for use only when MPOL_INTERLEAVE
>   *
> - * Returns the struct page for entry and addr, after queueing swapin.
> + * Returns the struct folio for entry and addr, after queueing swapin.
>   *
>   * Primitive swap readahead code. We simply read an aligned block of
>   * (1 << page_cluster) entries in the swap area. This method is chosen
> @@ -640,7 +640,7 @@ static unsigned long swapin_nr_pages(unsigned long offset)
>   * are used for every page of the readahead: neighbouring pages on swap
>   * are fairly likely to have been swapped out from the same node.
>   */
> -struct page *swap_cluster_readahead(swp_entry_t entry, gfp_t gfp_mask,
> +struct folio *swap_cluster_readahead(swp_entry_t entry, gfp_t gfp_mask,
>                                     struct mempolicy *mpol, pgoff_t ilx)
>  {
>         struct folio *folio;
> @@ -692,7 +692,7 @@ struct page *swap_cluster_readahead(swp_entry_t entry, gfp_t gfp_mask,
>         if (unlikely(page_allocated))
>                 swap_read_folio(folio, false, NULL);
>         zswap_folio_swapin(folio);
> -       return folio_file_page(folio, swp_offset(entry));
> +       return folio;
>  }
>
>  int init_swap_address_space(unsigned int type, unsigned long nr_pages)
> @@ -796,7 +796,7 @@ static void swap_ra_info(struct vm_fault *vmf,
>   * @targ_ilx: NUMA interleave index, for use only when MPOL_INTERLEAVE
>   * @vmf: fault information
>   *
> - * Returns the struct page for entry and addr, after queueing swapin.
> + * Returns the struct folio for entry and addr, after queueing swapin.
>   *
>   * Primitive swap readahead code. We simply read in a few pages whose
>   * virtual addresses are around the fault address in the same vma.
> @@ -804,9 +804,8 @@ static void swap_ra_info(struct vm_fault *vmf,
>   * Caller must hold read mmap_lock if vmf->vma is not NULL.
>   *
>   */
> -static struct page *swap_vma_readahead(swp_entry_t targ_entry, gfp_t gfp_mask,
> -                                      struct mempolicy *mpol, pgoff_t targ_ilx,
> -                                      struct vm_fault *vmf)
> +static struct folio *swap_vma_readahead(swp_entry_t targ_entry, gfp_t gfp_mask,
> +               struct mempolicy *mpol, pgoff_t targ_ilx, struct vm_fault *vmf)
>  {
>         struct blk_plug plug;
>         struct swap_iocb *splug = NULL;
> @@ -868,7 +867,7 @@ static struct page *swap_vma_readahead(swp_entry_t targ_entry, gfp_t gfp_mask,
>         if (unlikely(page_allocated))
>                 swap_read_folio(folio, false, NULL);
>         zswap_folio_swapin(folio);
> -       return folio_file_page(folio, swp_offset(entry));
> +       return folio;
>  }
>
>  /**
> @@ -888,14 +887,14 @@ struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask,
>  {
>         struct mempolicy *mpol;
>         pgoff_t ilx;
> -       struct page *page;
> +       struct folio *folio;
>
>         mpol = get_vma_policy(vmf->vma, vmf->address, 0, &ilx);
> -       page = swap_use_vma_readahead() ?
> +       folio = swap_use_vma_readahead() ?
>                 swap_vma_readahead(entry, gfp_mask, mpol, ilx, vmf) :
>                 swap_cluster_readahead(entry, gfp_mask, mpol, ilx);
>         mpol_cond_put(mpol);
> -       return page;
> +       return folio_file_page(folio, swp_offset(entry));

Hi Matthew,

There is a bug here, folio could be NULL, and cause NULL dereference.


^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [PATCH 13/13] mm: Convert swap_cluster_readahead and swap_vma_readahead to return a folio
  2023-12-16 13:58   ` Kairui Song
@ 2023-12-20  0:54     ` Matthew Wilcox
  0 siblings, 0 replies; 17+ messages in thread
From: Matthew Wilcox @ 2023-12-20  0:54 UTC (permalink / raw)
  To: Kairui Song; +Cc: Andrew Morton, linux-mm

On Sat, Dec 16, 2023 at 09:58:03PM +0800, Kairui Song wrote:
> > @@ -888,14 +887,14 @@ struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask,
> >  {
> >         struct mempolicy *mpol;
> >         pgoff_t ilx;
> > -       struct page *page;
> > +       struct folio *folio;
> >
> >         mpol = get_vma_policy(vmf->vma, vmf->address, 0, &ilx);
> > -       page = swap_use_vma_readahead() ?
> > +       folio = swap_use_vma_readahead() ?
> >                 swap_vma_readahead(entry, gfp_mask, mpol, ilx, vmf) :
> >                 swap_cluster_readahead(entry, gfp_mask, mpol, ilx);
> >         mpol_cond_put(mpol);
> > -       return page;
> > +       return folio_file_page(folio, swp_offset(entry));
> 
> Hi Matthew,
> 
> There is a bug here, folio could be NULL, and cause NULL dereference.

Andrew, syzbot has also picked up on this.  Please add this -fix patch?

diff --git a/mm/swap_state.c b/mm/swap_state.c
index 793b5b9e4f96..8a3a8f1ab20a 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -894,6 +894,9 @@ struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask,
 		swap_vma_readahead(entry, gfp_mask, mpol, ilx, vmf) :
 		swap_cluster_readahead(entry, gfp_mask, mpol, ilx);
 	mpol_cond_put(mpol);
+
+	if (!folio)
+		return NULL;
 	return folio_file_page(folio, swp_offset(entry));
 }
 


^ permalink raw reply	[flat|nested] 17+ messages in thread

end of thread, other threads:[~2023-12-20  0:54 UTC | newest]

Thread overview: 17+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2023-12-13 21:58 [PATCH 00/13] More swap folio conversions Matthew Wilcox (Oracle)
2023-12-13 21:58 ` [PATCH 01/13] mm: Return the folio from __read_swap_cache_async() Matthew Wilcox (Oracle)
2023-12-13 23:14   ` Andrew Morton
2023-12-13 21:58 ` [PATCH 02/13] mm: Pass a folio to __swap_writepage() Matthew Wilcox (Oracle)
2023-12-13 21:58 ` [PATCH 03/13] mm: Pass a folio to swap_writepage_fs() Matthew Wilcox (Oracle)
2023-12-13 21:58 ` [PATCH 04/13] mm: Pass a folio to swap_writepage_bdev_sync() Matthew Wilcox (Oracle)
2023-12-13 21:58 ` [PATCH 05/13] mm: Pass a folio to swap_writepage_bdev_async() Matthew Wilcox (Oracle)
2023-12-13 21:58 ` [PATCH 06/13] mm: Pass a folio to swap_readpage_fs() Matthew Wilcox (Oracle)
2023-12-13 21:58 ` [PATCH 07/13] mm: Pass a folio to swap_readpage_bdev_sync() Matthew Wilcox (Oracle)
2023-12-13 21:58 ` [PATCH 08/13] mm: Pass a folio to swap_readpage_bdev_async() Matthew Wilcox (Oracle)
2023-12-13 21:58 ` [PATCH 09/13] mm: Convert swap_page_sector() to swap_folio_sector() Matthew Wilcox (Oracle)
2023-12-13 21:58 ` [PATCH 10/13] mm: Convert swap_readpage() to swap_read_folio() Matthew Wilcox (Oracle)
2023-12-13 21:58 ` [PATCH 11/13] mm: Remove page_swap_info() Matthew Wilcox (Oracle)
2023-12-13 21:58 ` [PATCH 12/13] mm: Return a folio from read_swap_cache_async() Matthew Wilcox (Oracle)
2023-12-13 21:58 ` [PATCH 13/13] mm: Convert swap_cluster_readahead and swap_vma_readahead to return a folio Matthew Wilcox (Oracle)
2023-12-16 13:58   ` Kairui Song
2023-12-20  0:54     ` Matthew Wilcox

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox