linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
* stop passing a writeback_control to swap/shmem writeout v2
@ 2025-05-16  7:40 Christoph Hellwig
  2025-05-16  7:40 ` [PATCH 1/5] mm: split out a writeout helper from pageout Christoph Hellwig
                   ` (5 more replies)
  0 siblings, 6 replies; 7+ messages in thread
From: Christoph Hellwig @ 2025-05-16  7:40 UTC (permalink / raw)
  To: Andrew Morton, Hugh Dickins, Johannes Weiner, Yosry Ahmed, Nhat Pham
  Cc: Matthew Wilcox, Chengming Zhou, Baolin Wang, linux-mm

Hi all,

this series was intended to remove the last remaining users of
AOP_WRITEPAGE_ACTIVATE after my other pending patches removed the
rest, but spectacularly failed at that.

But instead it nicely improves the code, and removes two pointers
from struct writeback_control.

Changes since v1:
 - drop to patch to be merged through a different tree or later
 - fix !CONFIG_SWAP compilation

Diffstat:
 drivers/gpu/drm/i915/gem/i915_gem_shmem.c |    2 
 drivers/gpu/drm/ttm/ttm_backup.c          |    9 ---
 include/linux/shmem_fs.h                  |    5 +
 include/linux/writeback.h                 |   10 ---
 mm/page_io.c                              |   71 +++++++++++--------------
 mm/shmem.c                                |   17 ++----
 mm/swap.h                                 |    9 ++-
 mm/vmscan.c                               |   84 +++++++++++++-----------------
 mm/zswap.c                                |    5 -
 9 files changed, 91 insertions(+), 121 deletions(-)


^ permalink raw reply	[flat|nested] 7+ messages in thread

* [PATCH 1/5] mm: split out a writeout helper from pageout
  2025-05-16  7:40 stop passing a writeback_control to swap/shmem writeout v2 Christoph Hellwig
@ 2025-05-16  7:40 ` Christoph Hellwig
  2025-05-16  7:40 ` [PATCH 2/5] mm: stop passing a writeback_control structure to shmem_writeout Christoph Hellwig
                   ` (4 subsequent siblings)
  5 siblings, 0 replies; 7+ messages in thread
From: Christoph Hellwig @ 2025-05-16  7:40 UTC (permalink / raw)
  To: Andrew Morton, Hugh Dickins, Johannes Weiner, Yosry Ahmed, Nhat Pham
  Cc: Matthew Wilcox, Chengming Zhou, Baolin Wang, linux-mm

Move the code to write back swap / shmem folios into a self-contained
helper to keep prepare for refactoring it.

Signed-off-by: Christoph Hellwig <hch@lst.de>
---
 mm/vmscan.c | 94 +++++++++++++++++++++++++++--------------------------
 1 file changed, 48 insertions(+), 46 deletions(-)

diff --git a/mm/vmscan.c b/mm/vmscan.c
index 0eda493fc383..52e6eee4d896 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -652,14 +652,55 @@ typedef enum {
 	PAGE_CLEAN,
 } pageout_t;
 
+static pageout_t writeout(struct folio *folio, struct address_space *mapping,
+		struct swap_iocb **plug, struct list_head *folio_list)
+{
+	struct writeback_control wbc = {
+		.sync_mode = WB_SYNC_NONE,
+		.nr_to_write = SWAP_CLUSTER_MAX,
+		.range_start = 0,
+		.range_end = LLONG_MAX,
+		.for_reclaim = 1,
+		.swap_plug = plug,
+	};
+	int res;
+
+	folio_set_reclaim(folio);
+
+	/*
+	 * The large shmem folio can be split if CONFIG_THP_SWAP is not enabled
+	 * or we failed to allocate contiguous swap entries.
+	 */
+	if (shmem_mapping(mapping)) {
+		if (folio_test_large(folio))
+			wbc.list = folio_list;
+		res = shmem_writeout(folio, &wbc);
+	} else {
+		res = swap_writeout(folio, &wbc);
+	}
+
+	if (res < 0)
+		handle_write_error(mapping, folio, res);
+	if (res == AOP_WRITEPAGE_ACTIVATE) {
+		folio_clear_reclaim(folio);
+		return PAGE_ACTIVATE;
+	}
+
+	/* synchronous write? */
+	if (!folio_test_writeback(folio))
+		folio_clear_reclaim(folio);
+
+	trace_mm_vmscan_write_folio(folio);
+	node_stat_add_folio(folio, NR_VMSCAN_WRITE);
+	return PAGE_SUCCESS;
+}
+
 /*
  * pageout is called by shrink_folio_list() for each dirty folio.
  */
 static pageout_t pageout(struct folio *folio, struct address_space *mapping,
 			 struct swap_iocb **plug, struct list_head *folio_list)
 {
-	int (*writeout)(struct folio *, struct writeback_control *);
-
 	/*
 	 * We no longer attempt to writeback filesystem folios here, other
 	 * than tmpfs/shmem.  That's taken care of in page-writeback.
@@ -690,51 +731,12 @@ static pageout_t pageout(struct folio *folio, struct address_space *mapping,
 		}
 		return PAGE_KEEP;
 	}
-	if (shmem_mapping(mapping))
-		writeout = shmem_writeout;
-	else if (folio_test_anon(folio))
-		writeout = swap_writeout;
-	else
-		return PAGE_ACTIVATE;
-
-	if (folio_clear_dirty_for_io(folio)) {
-		int res;
-		struct writeback_control wbc = {
-			.sync_mode = WB_SYNC_NONE,
-			.nr_to_write = SWAP_CLUSTER_MAX,
-			.range_start = 0,
-			.range_end = LLONG_MAX,
-			.for_reclaim = 1,
-			.swap_plug = plug,
-		};
 
-		/*
-		 * The large shmem folio can be split if CONFIG_THP_SWAP is
-		 * not enabled or contiguous swap entries are failed to
-		 * allocate.
-		 */
-		if (shmem_mapping(mapping) && folio_test_large(folio))
-			wbc.list = folio_list;
-
-		folio_set_reclaim(folio);
-		res = writeout(folio, &wbc);
-		if (res < 0)
-			handle_write_error(mapping, folio, res);
-		if (res == AOP_WRITEPAGE_ACTIVATE) {
-			folio_clear_reclaim(folio);
-			return PAGE_ACTIVATE;
-		}
-
-		if (!folio_test_writeback(folio)) {
-			/* synchronous write? */
-			folio_clear_reclaim(folio);
-		}
-		trace_mm_vmscan_write_folio(folio);
-		node_stat_add_folio(folio, NR_VMSCAN_WRITE);
-		return PAGE_SUCCESS;
-	}
-
-	return PAGE_CLEAN;
+	if (!shmem_mapping(mapping) && !folio_test_anon(folio))
+		return PAGE_ACTIVATE;
+	if (!folio_clear_dirty_for_io(folio))
+		return PAGE_CLEAN;
+	return writeout(folio, mapping, plug, folio_list);
 }
 
 /*
-- 
2.47.2



^ permalink raw reply	[flat|nested] 7+ messages in thread

* [PATCH 2/5] mm: stop passing a writeback_control structure to shmem_writeout
  2025-05-16  7:40 stop passing a writeback_control to swap/shmem writeout v2 Christoph Hellwig
  2025-05-16  7:40 ` [PATCH 1/5] mm: split out a writeout helper from pageout Christoph Hellwig
@ 2025-05-16  7:40 ` Christoph Hellwig
  2025-05-16  7:40 ` [PATCH 3/5] mm: tidy up swap_writeout Christoph Hellwig
                   ` (3 subsequent siblings)
  5 siblings, 0 replies; 7+ messages in thread
From: Christoph Hellwig @ 2025-05-16  7:40 UTC (permalink / raw)
  To: Andrew Morton, Hugh Dickins, Johannes Weiner, Yosry Ahmed, Nhat Pham
  Cc: Matthew Wilcox, Chengming Zhou, Baolin Wang, linux-mm

shmem_writeout only needs the swap_iocb cookie and the split folio
list.  Pass those explicitly and remove the now unused list member
from struct writeback_control.

Signed-off-by: Christoph Hellwig <hch@lst.de>
---
 drivers/gpu/drm/i915/gem/i915_gem_shmem.c |  2 +-
 drivers/gpu/drm/ttm/ttm_backup.c          |  9 +-------
 include/linux/shmem_fs.h                  |  5 ++++-
 include/linux/writeback.h                 |  3 ---
 mm/shmem.c                                | 25 ++++++++++++++---------
 mm/vmscan.c                               | 12 +++++------
 6 files changed, 26 insertions(+), 30 deletions(-)

diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
index 19a3eb82dc6a..24d8daa4fdb3 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
@@ -317,7 +317,7 @@ void __shmem_writeback(size_t size, struct address_space *mapping)
 		if (folio_mapped(folio))
 			folio_redirty_for_writepage(&wbc, folio);
 		else
-			error = shmem_writeout(folio, &wbc);
+			error = shmem_writeout(folio, NULL, NULL);
 	}
 }
 
diff --git a/drivers/gpu/drm/ttm/ttm_backup.c b/drivers/gpu/drm/ttm/ttm_backup.c
index ffaab68bd5dd..6f2e58be4f3e 100644
--- a/drivers/gpu/drm/ttm/ttm_backup.c
+++ b/drivers/gpu/drm/ttm/ttm_backup.c
@@ -112,15 +112,8 @@ ttm_backup_backup_page(struct file *backup, struct page *page,
 
 	if (writeback && !folio_mapped(to_folio) &&
 	    folio_clear_dirty_for_io(to_folio)) {
-		struct writeback_control wbc = {
-			.sync_mode = WB_SYNC_NONE,
-			.nr_to_write = SWAP_CLUSTER_MAX,
-			.range_start = 0,
-			.range_end = LLONG_MAX,
-			.for_reclaim = 1,
-		};
 		folio_set_reclaim(to_folio);
-		ret = shmem_writeout(to_folio, &wbc);
+		ret = shmem_writeout(to_folio, NULL, NULL);
 		if (!folio_test_writeback(to_folio))
 			folio_clear_reclaim(to_folio);
 		/*
diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h
index 5f03a39a26f7..6d0f9c599ff7 100644
--- a/include/linux/shmem_fs.h
+++ b/include/linux/shmem_fs.h
@@ -11,6 +11,8 @@
 #include <linux/fs_parser.h>
 #include <linux/userfaultfd_k.h>
 
+struct swap_iocb;
+
 /* inode in-kernel data */
 
 #ifdef CONFIG_TMPFS_QUOTA
@@ -107,7 +109,8 @@ static inline bool shmem_mapping(struct address_space *mapping)
 void shmem_unlock_mapping(struct address_space *mapping);
 struct page *shmem_read_mapping_page_gfp(struct address_space *mapping,
 					pgoff_t index, gfp_t gfp_mask);
-int shmem_writeout(struct folio *folio, struct writeback_control *wbc);
+int shmem_writeout(struct folio *folio, struct swap_iocb **plug,
+		struct list_head *folio_list);
 void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end);
 int shmem_unuse(unsigned int type);
 
diff --git a/include/linux/writeback.h b/include/linux/writeback.h
index eda4b62511f7..82f217970092 100644
--- a/include/linux/writeback.h
+++ b/include/linux/writeback.h
@@ -79,9 +79,6 @@ struct writeback_control {
 	 */
 	struct swap_iocb **swap_plug;
 
-	/* Target list for splitting a large folio */
-	struct list_head *list;
-
 	/* internal fields used by the ->writepages implementation: */
 	struct folio_batch fbatch;
 	pgoff_t index;
diff --git a/mm/shmem.c b/mm/shmem.c
index 858cee02ca49..941b9b29e78a 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -1540,10 +1540,13 @@ int shmem_unuse(unsigned int type)
  * shmem_writeout - Write the folio to swap
  * @folio: The folio to write
  * @wbc: How writeback is to be done
+ * @plug: swap plug
+ * @folio_list: list to put back folios on split
  *
  * Move the folio from the page cache to the swap cache.
  */
-int shmem_writeout(struct folio *folio, struct writeback_control *wbc)
+int shmem_writeout(struct folio *folio, struct swap_iocb **plug,
+		struct list_head *folio_list)
 {
 	struct address_space *mapping = folio->mapping;
 	struct inode *inode = mapping->host;
@@ -1553,9 +1556,6 @@ int shmem_writeout(struct folio *folio, struct writeback_control *wbc)
 	int nr_pages;
 	bool split = false;
 
-	if (WARN_ON_ONCE(!wbc->for_reclaim))
-		goto redirty;
-
 	if ((info->flags & VM_LOCKED) || sbinfo->noswap)
 		goto redirty;
 
@@ -1582,7 +1582,7 @@ int shmem_writeout(struct folio *folio, struct writeback_control *wbc)
 try_split:
 		/* Ensure the subpages are still dirty */
 		folio_test_set_dirty(folio);
-		if (split_folio_to_list(folio, wbc->list))
+		if (split_folio_to_list(folio, folio_list))
 			goto redirty;
 		folio_clear_dirty(folio);
 	}
@@ -1635,13 +1635,21 @@ int shmem_writeout(struct folio *folio, struct writeback_control *wbc)
 		list_add(&info->swaplist, &shmem_swaplist);
 
 	if (!folio_alloc_swap(folio, __GFP_HIGH | __GFP_NOMEMALLOC | __GFP_NOWARN)) {
+		struct writeback_control wbc = {
+			.sync_mode	= WB_SYNC_NONE,
+			.nr_to_write	= SWAP_CLUSTER_MAX,
+			.range_start	= 0,
+			.range_end	= LLONG_MAX,
+			.for_reclaim	= 1,
+			.swap_plug	= plug,
+		};
 		shmem_recalc_inode(inode, 0, nr_pages);
 		swap_shmem_alloc(folio->swap, nr_pages);
 		shmem_delete_from_page_cache(folio, swp_to_radix_entry(folio->swap));
 
 		mutex_unlock(&shmem_swaplist_mutex);
 		BUG_ON(folio_mapped(folio));
-		return swap_writeout(folio, wbc);
+		return swap_writeout(folio, &wbc);
 	}
 
 	list_del_init(&info->swaplist);
@@ -1650,10 +1658,7 @@ int shmem_writeout(struct folio *folio, struct writeback_control *wbc)
 		goto try_split;
 redirty:
 	folio_mark_dirty(folio);
-	if (wbc->for_reclaim)
-		return AOP_WRITEPAGE_ACTIVATE;	/* Return with folio locked */
-	folio_unlock(folio);
-	return 0;
+	return AOP_WRITEPAGE_ACTIVATE;	/* Return with folio locked */
 }
 EXPORT_SYMBOL_GPL(shmem_writeout);
 
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 52e6eee4d896..2cf954006d6d 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -669,15 +669,13 @@ static pageout_t writeout(struct folio *folio, struct address_space *mapping,
 
 	/*
 	 * The large shmem folio can be split if CONFIG_THP_SWAP is not enabled
-	 * or we failed to allocate contiguous swap entries.
+	 * or we failed to allocate contiguous swap entries, in which case
+	 * the split out folios get added back to folio_list.
 	 */
-	if (shmem_mapping(mapping)) {
-		if (folio_test_large(folio))
-			wbc.list = folio_list;
-		res = shmem_writeout(folio, &wbc);
-	} else {
+	if (shmem_mapping(mapping))
+		res = shmem_writeout(folio, plug, folio_list);
+	else
 		res = swap_writeout(folio, &wbc);
-	}
 
 	if (res < 0)
 		handle_write_error(mapping, folio, res);
-- 
2.47.2



^ permalink raw reply	[flat|nested] 7+ messages in thread

* [PATCH 3/5] mm: tidy up swap_writeout
  2025-05-16  7:40 stop passing a writeback_control to swap/shmem writeout v2 Christoph Hellwig
  2025-05-16  7:40 ` [PATCH 1/5] mm: split out a writeout helper from pageout Christoph Hellwig
  2025-05-16  7:40 ` [PATCH 2/5] mm: stop passing a writeback_control structure to shmem_writeout Christoph Hellwig
@ 2025-05-16  7:40 ` Christoph Hellwig
  2025-05-16  7:40 ` [PATCH 4/5] mm: stop passing a writeback_control structure to __swap_writepage Christoph Hellwig
                   ` (2 subsequent siblings)
  5 siblings, 0 replies; 7+ messages in thread
From: Christoph Hellwig @ 2025-05-16  7:40 UTC (permalink / raw)
  To: Andrew Morton, Hugh Dickins, Johannes Weiner, Yosry Ahmed, Nhat Pham
  Cc: Matthew Wilcox, Chengming Zhou, Baolin Wang, linux-mm

Use a goto label to consolidate the unlock folio and return pattern
and don't bother with an else after a return / goto.

Signed-off-by: Christoph Hellwig <hch@lst.de>
---
 mm/page_io.c | 36 ++++++++++++++++++------------------
 1 file changed, 18 insertions(+), 18 deletions(-)

diff --git a/mm/page_io.c b/mm/page_io.c
index f7716b6569fa..c420b0aa0f22 100644
--- a/mm/page_io.c
+++ b/mm/page_io.c
@@ -239,12 +239,11 @@ static void swap_zeromap_folio_clear(struct folio *folio)
  */
 int swap_writeout(struct folio *folio, struct writeback_control *wbc)
 {
-	int ret;
+	int ret = 0;
+
+	if (folio_free_swap(folio))
+		goto out_unlock;
 
-	if (folio_free_swap(folio)) {
-		folio_unlock(folio);
-		return 0;
-	}
 	/*
 	 * Arch code may have to preserve more data than just the page
 	 * contents, e.g. memory tags.
@@ -252,8 +251,7 @@ int swap_writeout(struct folio *folio, struct writeback_control *wbc)
 	ret = arch_prepare_to_swap(folio);
 	if (ret) {
 		folio_mark_dirty(folio);
-		folio_unlock(folio);
-		return ret;
+		goto out_unlock;
 	}
 
 	/*
@@ -264,20 +262,19 @@ int swap_writeout(struct folio *folio, struct writeback_control *wbc)
 	 */
 	if (is_folio_zero_filled(folio)) {
 		swap_zeromap_folio_set(folio);
-		folio_unlock(folio);
-		return 0;
-	} else {
-		/*
-		 * Clear bits this folio occupies in the zeromap to prevent
-		 * zero data being read in from any previous zero writes that
-		 * occupied the same swap entries.
-		 */
-		swap_zeromap_folio_clear(folio);
+		goto out_unlock;
 	}
+
+	/*
+	 * Clear bits this folio occupies in the zeromap to prevent zero data
+	 * being read in from any previous zero writes that occupied the same
+	 * swap entries.
+	 */
+	swap_zeromap_folio_clear(folio);
+
 	if (zswap_store(folio)) {
 		count_mthp_stat(folio_order(folio), MTHP_STAT_ZSWPOUT);
-		folio_unlock(folio);
-		return 0;
+		goto out_unlock;
 	}
 	if (!mem_cgroup_zswap_writeback_enabled(folio_memcg(folio))) {
 		folio_mark_dirty(folio);
@@ -286,6 +283,9 @@ int swap_writeout(struct folio *folio, struct writeback_control *wbc)
 
 	__swap_writepage(folio, wbc);
 	return 0;
+out_unlock:
+	folio_unlock(folio);
+	return ret;
 }
 
 static inline void count_swpout_vm_event(struct folio *folio)
-- 
2.47.2



^ permalink raw reply	[flat|nested] 7+ messages in thread

* [PATCH 4/5] mm: stop passing a writeback_control structure to __swap_writepage
  2025-05-16  7:40 stop passing a writeback_control to swap/shmem writeout v2 Christoph Hellwig
                   ` (2 preceding siblings ...)
  2025-05-16  7:40 ` [PATCH 3/5] mm: tidy up swap_writeout Christoph Hellwig
@ 2025-05-16  7:40 ` Christoph Hellwig
  2025-05-16  7:40 ` [PATCH 5/5] mm: stop passing a writeback_control structure to swap_writeout Christoph Hellwig
  2025-05-19 23:54 ` stop passing a writeback_control to swap/shmem writeout v2 Andrew Morton
  5 siblings, 0 replies; 7+ messages in thread
From: Christoph Hellwig @ 2025-05-16  7:40 UTC (permalink / raw)
  To: Andrew Morton, Hugh Dickins, Johannes Weiner, Yosry Ahmed, Nhat Pham
  Cc: Matthew Wilcox, Chengming Zhou, Baolin Wang, linux-mm

__swap_writepage only needs the swap_iocb cookie from the
writeback_control structure, so pass it explicitly and remove the
now unused swap_iocb member from struct writeback_control.

Signed-off-by: Christoph Hellwig <hch@lst.de>
---
 mm/page_io.c | 33 ++++++++++++++-------------------
 mm/swap.h    |  2 +-
 mm/zswap.c   |  5 +----
 3 files changed, 16 insertions(+), 24 deletions(-)

diff --git a/mm/page_io.c b/mm/page_io.c
index c420b0aa0f22..fb52bedcc966 100644
--- a/mm/page_io.c
+++ b/mm/page_io.c
@@ -281,7 +281,7 @@ int swap_writeout(struct folio *folio, struct writeback_control *wbc)
 		return AOP_WRITEPAGE_ACTIVATE;
 	}
 
-	__swap_writepage(folio, wbc);
+	__swap_writepage(folio, wbc->swap_plug);
 	return 0;
 out_unlock:
 	folio_unlock(folio);
@@ -371,9 +371,9 @@ static void sio_write_complete(struct kiocb *iocb, long ret)
 	mempool_free(sio, sio_pool);
 }
 
-static void swap_writepage_fs(struct folio *folio, struct writeback_control *wbc)
+static void swap_writepage_fs(struct folio *folio, struct swap_iocb **swap_plug)
 {
-	struct swap_iocb *sio = NULL;
+	struct swap_iocb *sio = swap_plug ? *swap_plug : NULL;
 	struct swap_info_struct *sis = swp_swap_info(folio->swap);
 	struct file *swap_file = sis->swap_file;
 	loff_t pos = swap_dev_pos(folio->swap);
@@ -381,8 +381,6 @@ static void swap_writepage_fs(struct folio *folio, struct writeback_control *wbc
 	count_swpout_vm_event(folio);
 	folio_start_writeback(folio);
 	folio_unlock(folio);
-	if (wbc->swap_plug)
-		sio = *wbc->swap_plug;
 	if (sio) {
 		if (sio->iocb.ki_filp != swap_file ||
 		    sio->iocb.ki_pos + sio->len != pos) {
@@ -401,22 +399,21 @@ static void swap_writepage_fs(struct folio *folio, struct writeback_control *wbc
 	bvec_set_folio(&sio->bvec[sio->pages], folio, folio_size(folio), 0);
 	sio->len += folio_size(folio);
 	sio->pages += 1;
-	if (sio->pages == ARRAY_SIZE(sio->bvec) || !wbc->swap_plug) {
+	if (sio->pages == ARRAY_SIZE(sio->bvec) || !swap_plug) {
 		swap_write_unplug(sio);
 		sio = NULL;
 	}
-	if (wbc->swap_plug)
-		*wbc->swap_plug = sio;
+	if (swap_plug)
+		*swap_plug = sio;
 }
 
 static void swap_writepage_bdev_sync(struct folio *folio,
-		struct writeback_control *wbc, struct swap_info_struct *sis)
+		struct swap_info_struct *sis)
 {
 	struct bio_vec bv;
 	struct bio bio;
 
-	bio_init(&bio, sis->bdev, &bv, 1,
-		 REQ_OP_WRITE | REQ_SWAP | wbc_to_write_flags(wbc));
+	bio_init(&bio, sis->bdev, &bv, 1, REQ_OP_WRITE | REQ_SWAP);
 	bio.bi_iter.bi_sector = swap_folio_sector(folio);
 	bio_add_folio_nofail(&bio, folio, folio_size(folio), 0);
 
@@ -431,13 +428,11 @@ static void swap_writepage_bdev_sync(struct folio *folio,
 }
 
 static void swap_writepage_bdev_async(struct folio *folio,
-		struct writeback_control *wbc, struct swap_info_struct *sis)
+		struct swap_info_struct *sis)
 {
 	struct bio *bio;
 
-	bio = bio_alloc(sis->bdev, 1,
-			REQ_OP_WRITE | REQ_SWAP | wbc_to_write_flags(wbc),
-			GFP_NOIO);
+	bio = bio_alloc(sis->bdev, 1, REQ_OP_WRITE | REQ_SWAP, GFP_NOIO);
 	bio->bi_iter.bi_sector = swap_folio_sector(folio);
 	bio->bi_end_io = end_swap_bio_write;
 	bio_add_folio_nofail(bio, folio, folio_size(folio), 0);
@@ -449,7 +444,7 @@ static void swap_writepage_bdev_async(struct folio *folio,
 	submit_bio(bio);
 }
 
-void __swap_writepage(struct folio *folio, struct writeback_control *wbc)
+void __swap_writepage(struct folio *folio, struct swap_iocb **swap_plug)
 {
 	struct swap_info_struct *sis = swp_swap_info(folio->swap);
 
@@ -460,16 +455,16 @@ void __swap_writepage(struct folio *folio, struct writeback_control *wbc)
 	 * is safe.
 	 */
 	if (data_race(sis->flags & SWP_FS_OPS))
-		swap_writepage_fs(folio, wbc);
+		swap_writepage_fs(folio, swap_plug);
 	/*
 	 * ->flags can be updated non-atomicially (scan_swap_map_slots),
 	 * but that will never affect SWP_SYNCHRONOUS_IO, so the data_race
 	 * is safe.
 	 */
 	else if (data_race(sis->flags & SWP_SYNCHRONOUS_IO))
-		swap_writepage_bdev_sync(folio, wbc, sis);
+		swap_writepage_bdev_sync(folio, sis);
 	else
-		swap_writepage_bdev_async(folio, wbc, sis);
+		swap_writepage_bdev_async(folio, sis);
 }
 
 void swap_write_unplug(struct swap_iocb *sio)
diff --git a/mm/swap.h b/mm/swap.h
index 2269eb9df0af..045415e2eb4f 100644
--- a/mm/swap.h
+++ b/mm/swap.h
@@ -21,7 +21,7 @@ static inline void swap_read_unplug(struct swap_iocb *plug)
 }
 void swap_write_unplug(struct swap_iocb *sio);
 int swap_writeout(struct folio *folio, struct writeback_control *wbc);
-void __swap_writepage(struct folio *folio, struct writeback_control *wbc);
+void __swap_writepage(struct folio *folio, struct swap_iocb **swap_plug);
 
 /* linux/mm/swap_state.c */
 /* One swap address space for each 64M swap space */
diff --git a/mm/zswap.c b/mm/zswap.c
index 455e9425c5f5..3c0fd8a13718 100644
--- a/mm/zswap.c
+++ b/mm/zswap.c
@@ -1070,9 +1070,6 @@ static int zswap_writeback_entry(struct zswap_entry *entry,
 	struct mempolicy *mpol;
 	bool folio_was_allocated;
 	struct swap_info_struct *si;
-	struct writeback_control wbc = {
-		.sync_mode = WB_SYNC_NONE,
-	};
 	int ret = 0;
 
 	/* try to allocate swap cache folio */
@@ -1134,7 +1131,7 @@ static int zswap_writeback_entry(struct zswap_entry *entry,
 	folio_set_reclaim(folio);
 
 	/* start writeback */
-	__swap_writepage(folio, &wbc);
+	__swap_writepage(folio, NULL);
 
 out:
 	if (ret && ret != -EEXIST) {
-- 
2.47.2



^ permalink raw reply	[flat|nested] 7+ messages in thread

* [PATCH 5/5] mm: stop passing a writeback_control structure to swap_writeout
  2025-05-16  7:40 stop passing a writeback_control to swap/shmem writeout v2 Christoph Hellwig
                   ` (3 preceding siblings ...)
  2025-05-16  7:40 ` [PATCH 4/5] mm: stop passing a writeback_control structure to __swap_writepage Christoph Hellwig
@ 2025-05-16  7:40 ` Christoph Hellwig
  2025-05-19 23:54 ` stop passing a writeback_control to swap/shmem writeout v2 Andrew Morton
  5 siblings, 0 replies; 7+ messages in thread
From: Christoph Hellwig @ 2025-05-16  7:40 UTC (permalink / raw)
  To: Andrew Morton, Hugh Dickins, Johannes Weiner, Yosry Ahmed, Nhat Pham
  Cc: Matthew Wilcox, Chengming Zhou, Baolin Wang, linux-mm

swap_writeout only needs the swap_iocb cookie from the writeback_control
structure, so pass it explicitly.

Signed-off-by: Christoph Hellwig <hch@lst.de>
---
 include/linux/writeback.h |  7 -------
 mm/page_io.c              |  4 ++--
 mm/shmem.c                | 10 +---------
 mm/swap.h                 |  7 +++++--
 mm/vmscan.c               | 10 +---------
 5 files changed, 9 insertions(+), 29 deletions(-)

diff --git a/include/linux/writeback.h b/include/linux/writeback.h
index 82f217970092..9e960f2faf79 100644
--- a/include/linux/writeback.h
+++ b/include/linux/writeback.h
@@ -72,13 +72,6 @@ struct writeback_control {
 	 */
 	unsigned no_cgroup_owner:1;
 
-	/* To enable batching of swap writes to non-block-device backends,
-	 * "plug" can be set point to a 'struct swap_iocb *'.  When all swap
-	 * writes have been submitted, if with swap_iocb is not NULL,
-	 * swap_write_unplug() should be called.
-	 */
-	struct swap_iocb **swap_plug;
-
 	/* internal fields used by the ->writepages implementation: */
 	struct folio_batch fbatch;
 	pgoff_t index;
diff --git a/mm/page_io.c b/mm/page_io.c
index fb52bedcc966..a2056a5ecb13 100644
--- a/mm/page_io.c
+++ b/mm/page_io.c
@@ -237,7 +237,7 @@ static void swap_zeromap_folio_clear(struct folio *folio)
  * We may have stale swap cache pages in memory: notice
  * them here and get rid of the unnecessary final write.
  */
-int swap_writeout(struct folio *folio, struct writeback_control *wbc)
+int swap_writeout(struct folio *folio, struct swap_iocb **swap_plug)
 {
 	int ret = 0;
 
@@ -281,7 +281,7 @@ int swap_writeout(struct folio *folio, struct writeback_control *wbc)
 		return AOP_WRITEPAGE_ACTIVATE;
 	}
 
-	__swap_writepage(folio, wbc->swap_plug);
+	__swap_writepage(folio, swap_plug);
 	return 0;
 out_unlock:
 	folio_unlock(folio);
diff --git a/mm/shmem.c b/mm/shmem.c
index 941b9b29e78a..2033d2e3d35d 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -1635,21 +1635,13 @@ int shmem_writeout(struct folio *folio, struct swap_iocb **plug,
 		list_add(&info->swaplist, &shmem_swaplist);
 
 	if (!folio_alloc_swap(folio, __GFP_HIGH | __GFP_NOMEMALLOC | __GFP_NOWARN)) {
-		struct writeback_control wbc = {
-			.sync_mode	= WB_SYNC_NONE,
-			.nr_to_write	= SWAP_CLUSTER_MAX,
-			.range_start	= 0,
-			.range_end	= LLONG_MAX,
-			.for_reclaim	= 1,
-			.swap_plug	= plug,
-		};
 		shmem_recalc_inode(inode, 0, nr_pages);
 		swap_shmem_alloc(folio->swap, nr_pages);
 		shmem_delete_from_page_cache(folio, swp_to_radix_entry(folio->swap));
 
 		mutex_unlock(&shmem_swaplist_mutex);
 		BUG_ON(folio_mapped(folio));
-		return swap_writeout(folio, &wbc);
+		return swap_writeout(folio, plug);
 	}
 
 	list_del_init(&info->swaplist);
diff --git a/mm/swap.h b/mm/swap.h
index 045415e2eb4f..e87a0f19a0ee 100644
--- a/mm/swap.h
+++ b/mm/swap.h
@@ -3,6 +3,8 @@
 #define _MM_SWAP_H
 
 struct mempolicy;
+struct swap_iocb;
+
 extern int page_cluster;
 
 #ifdef CONFIG_SWAP
@@ -20,7 +22,7 @@ static inline void swap_read_unplug(struct swap_iocb *plug)
 		__swap_read_unplug(plug);
 }
 void swap_write_unplug(struct swap_iocb *sio);
-int swap_writeout(struct folio *folio, struct writeback_control *wbc);
+int swap_writeout(struct folio *folio, struct swap_iocb **swap_plug);
 void __swap_writepage(struct folio *folio, struct swap_iocb **swap_plug);
 
 /* linux/mm/swap_state.c */
@@ -141,7 +143,8 @@ static inline struct folio *swapin_readahead(swp_entry_t swp, gfp_t gfp_mask,
 	return NULL;
 }
 
-static inline int swap_writeout(struct folio *f, struct writeback_control *wbc)
+static inline int swap_writeout(struct folio *folio,
+		struct swap_iocb **swap_plug)
 {
 	return 0;
 }
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 2cf954006d6d..0c2a762fe729 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -655,14 +655,6 @@ typedef enum {
 static pageout_t writeout(struct folio *folio, struct address_space *mapping,
 		struct swap_iocb **plug, struct list_head *folio_list)
 {
-	struct writeback_control wbc = {
-		.sync_mode = WB_SYNC_NONE,
-		.nr_to_write = SWAP_CLUSTER_MAX,
-		.range_start = 0,
-		.range_end = LLONG_MAX,
-		.for_reclaim = 1,
-		.swap_plug = plug,
-	};
 	int res;
 
 	folio_set_reclaim(folio);
@@ -675,7 +667,7 @@ static pageout_t writeout(struct folio *folio, struct address_space *mapping,
 	if (shmem_mapping(mapping))
 		res = shmem_writeout(folio, plug, folio_list);
 	else
-		res = swap_writeout(folio, &wbc);
+		res = swap_writeout(folio, plug);
 
 	if (res < 0)
 		handle_write_error(mapping, folio, res);
-- 
2.47.2



^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: stop passing a writeback_control to swap/shmem writeout v2
  2025-05-16  7:40 stop passing a writeback_control to swap/shmem writeout v2 Christoph Hellwig
                   ` (4 preceding siblings ...)
  2025-05-16  7:40 ` [PATCH 5/5] mm: stop passing a writeback_control structure to swap_writeout Christoph Hellwig
@ 2025-05-19 23:54 ` Andrew Morton
  5 siblings, 0 replies; 7+ messages in thread
From: Andrew Morton @ 2025-05-19 23:54 UTC (permalink / raw)
  To: Christoph Hellwig
  Cc: Hugh Dickins, Johannes Weiner, Yosry Ahmed, Nhat Pham,
	Matthew Wilcox, Chengming Zhou, Baolin Wang, linux-mm

On Fri, 16 May 2025 09:40:34 +0200 Christoph Hellwig <hch@lst.de> wrote:

> this series was intended to remove the last remaining users of
> AOP_WRITEPAGE_ACTIVATE after my other pending patches removed the
> rest, but spectacularly failed at that.
> 
> But instead it nicely improves the code, and removes two pointers
> from struct writeback_control.
> 
> Changes since v1:
>  - drop to patch to be merged through a different tree or later
>  - fix !CONFIG_SWAP compilation
> 
> Diffstat:
>  drivers/gpu/drm/i915/gem/i915_gem_shmem.c |    2 
>  drivers/gpu/drm/ttm/ttm_backup.c          |    9 ---
>  include/linux/shmem_fs.h                  |    5 +
>  include/linux/writeback.h                 |   10 ---
>  mm/page_io.c                              |   71 +++++++++++--------------
>  mm/shmem.c                                |   17 ++----
>  mm/swap.h                                 |    9 ++-
>  mm/vmscan.c                               |   84 +++++++++++++-----------------
>  mm/zswap.c                                |    5 -
>  9 files changed, 91 insertions(+), 121 deletions(-)

I can't merge this without a lot of messing about because Matthew's
series "Remove aops->writepage" is sitting over in the vfs tree.

We're at -rc7 anyway, so I'll park this series for after -rc1, thanks.


^ permalink raw reply	[flat|nested] 7+ messages in thread

end of thread, other threads:[~2025-05-19 23:54 UTC | newest]

Thread overview: 7+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2025-05-16  7:40 stop passing a writeback_control to swap/shmem writeout v2 Christoph Hellwig
2025-05-16  7:40 ` [PATCH 1/5] mm: split out a writeout helper from pageout Christoph Hellwig
2025-05-16  7:40 ` [PATCH 2/5] mm: stop passing a writeback_control structure to shmem_writeout Christoph Hellwig
2025-05-16  7:40 ` [PATCH 3/5] mm: tidy up swap_writeout Christoph Hellwig
2025-05-16  7:40 ` [PATCH 4/5] mm: stop passing a writeback_control structure to __swap_writepage Christoph Hellwig
2025-05-16  7:40 ` [PATCH 5/5] mm: stop passing a writeback_control structure to swap_writeout Christoph Hellwig
2025-05-19 23:54 ` stop passing a writeback_control to swap/shmem writeout v2 Andrew Morton

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox