linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
From: Ryan Roberts <ryan.roberts@arm.com>
To: Will Deacon <will@kernel.org>,
	"Aneesh Kumar K.V" <aneesh.kumar@linux.ibm.com>,
	Andrew Morton <akpm@linux-foundation.org>,
	Nick Piggin <npiggin@gmail.com>,
	Peter Zijlstra <peterz@infradead.org>,
	Christian Borntraeger <borntraeger@linux.ibm.com>,
	Sven Schnelle <svens@linux.ibm.com>,
	Arnd Bergmann <arnd@arndb.de>,
	"Matthew Wilcox (Oracle)" <willy@infradead.org>,
	David Hildenbrand <david@redhat.com>, Yu Zhao <yuzhao@google.com>,
	"Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>,
	Yin Fengwei <fengwei.yin@intel.com>,
	Yang Shi <shy828301@gmail.com>,
	"Huang, Ying" <ying.huang@intel.com>, Zi Yan <ziy@nvidia.com>
Cc: Ryan Roberts <ryan.roberts@arm.com>,
	linux-mm@kvack.org, linux-kernel@vger.kernel.org
Subject: [PATCH v2 4/5] mm: Refector release_pages()
Date: Wed, 30 Aug 2023 10:50:10 +0100	[thread overview]
Message-ID: <20230830095011.1228673-5-ryan.roberts@arm.com> (raw)
In-Reply-To: <20230830095011.1228673-1-ryan.roberts@arm.com>

In preparation for implementing folios_put_refs() in the next patch,
refactor release_pages() into a set of helper functions, which can be
reused. The primary difference between release_pages() and
folios_put_refs() is how they iterate over the set of folios. The
per-folio actions are identical.

No functional change intended.

Signed-off-by: Ryan Roberts <ryan.roberts@arm.com>
---
 mm/swap.c | 167 +++++++++++++++++++++++++++++++-----------------------
 1 file changed, 97 insertions(+), 70 deletions(-)

diff --git a/mm/swap.c b/mm/swap.c
index b05cce475202..5d3e35668929 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -945,6 +945,98 @@ void lru_cache_disable(void)
 #endif
 }
 
+struct folios_put_refs_ctx {
+	struct list_head pages_to_free;
+	struct lruvec *lruvec;
+	unsigned long flags;
+	unsigned int lock_batch;
+};
+
+static void __folios_put_refs_init(struct folios_put_refs_ctx *ctx)
+{
+	*ctx = (struct folios_put_refs_ctx) {
+		.pages_to_free = LIST_HEAD_INIT(ctx->pages_to_free),
+		.lruvec = NULL,
+		.flags = 0,
+	};
+}
+
+static void __folios_put_refs_complete(struct folios_put_refs_ctx *ctx)
+{
+	if (ctx->lruvec)
+		unlock_page_lruvec_irqrestore(ctx->lruvec, ctx->flags);
+
+	mem_cgroup_uncharge_list(&ctx->pages_to_free);
+	free_unref_page_list(&ctx->pages_to_free);
+}
+
+static void __folios_put_refs_do_one(struct folios_put_refs_ctx *ctx,
+					struct folio *folio, int refs)
+{
+	/*
+	 * Make sure the IRQ-safe lock-holding time does not get
+	 * excessive with a continuous string of pages from the
+	 * same lruvec. The lock is held only if lruvec != NULL.
+	 */
+	if (ctx->lruvec && ++ctx->lock_batch == SWAP_CLUSTER_MAX) {
+		unlock_page_lruvec_irqrestore(ctx->lruvec, ctx->flags);
+		ctx->lruvec = NULL;
+	}
+
+	if (is_huge_zero_page(&folio->page))
+		return;
+
+	if (folio_is_zone_device(folio)) {
+		if (ctx->lruvec) {
+			unlock_page_lruvec_irqrestore(ctx->lruvec, ctx->flags);
+			ctx->lruvec = NULL;
+		}
+		if (put_devmap_managed_page_refs(&folio->page, refs))
+			return;
+		if (folio_ref_sub_and_test(folio, refs))
+			free_zone_device_page(&folio->page);
+		return;
+	}
+
+	if (!folio_ref_sub_and_test(folio, refs))
+		return;
+
+	if (folio_test_large(folio)) {
+		if (ctx->lruvec) {
+			unlock_page_lruvec_irqrestore(ctx->lruvec, ctx->flags);
+			ctx->lruvec = NULL;
+		}
+		__folio_put_large(folio);
+		return;
+	}
+
+	if (folio_test_lru(folio)) {
+		struct lruvec *prev_lruvec = ctx->lruvec;
+
+		ctx->lruvec = folio_lruvec_relock_irqsave(folio, ctx->lruvec,
+								&ctx->flags);
+		if (prev_lruvec != ctx->lruvec)
+			ctx->lock_batch = 0;
+
+		lruvec_del_folio(ctx->lruvec, folio);
+		__folio_clear_lru_flags(folio);
+	}
+
+	/*
+	 * In rare cases, when truncation or holepunching raced with
+	 * munlock after VM_LOCKED was cleared, Mlocked may still be
+	 * found set here.  This does not indicate a problem, unless
+	 * "unevictable_pgs_cleared" appears worryingly large.
+	 */
+	if (unlikely(folio_test_mlocked(folio))) {
+		__folio_clear_mlocked(folio);
+		zone_stat_sub_folio(folio, NR_MLOCK);
+		count_vm_event(UNEVICTABLE_PGCLEARED);
+	}
+
+	list_add(&folio->lru, &ctx->pages_to_free);
+}
+
 /**
  * release_pages - batched put_page()
  * @arg: array of pages to release
@@ -959,10 +1051,9 @@ void release_pages(release_pages_arg arg, int nr)
 {
 	int i;
 	struct page **pages = arg.pages;
-	LIST_HEAD(pages_to_free);
-	struct lruvec *lruvec = NULL;
-	unsigned long flags = 0;
-	unsigned int lock_batch;
+	struct folios_put_refs_ctx ctx;
+
+	__folios_put_refs_init(&ctx);
 
 	for (i = 0; i < nr; i++) {
 		struct folio *folio;
@@ -970,74 +1061,10 @@ void release_pages(release_pages_arg arg, int nr)
 		/* Turn any of the argument types into a folio */
 		folio = page_folio(pages[i]);
 
-		/*
-		 * Make sure the IRQ-safe lock-holding time does not get
-		 * excessive with a continuous string of pages from the
-		 * same lruvec. The lock is held only if lruvec != NULL.
-		 */
-		if (lruvec && ++lock_batch == SWAP_CLUSTER_MAX) {
-			unlock_page_lruvec_irqrestore(lruvec, flags);
-			lruvec = NULL;
-		}
-
-		if (is_huge_zero_page(&folio->page))
-			continue;
-
-		if (folio_is_zone_device(folio)) {
-			if (lruvec) {
-				unlock_page_lruvec_irqrestore(lruvec, flags);
-				lruvec = NULL;
-			}
-			if (put_devmap_managed_page(&folio->page))
-				continue;
-			if (folio_put_testzero(folio))
-				free_zone_device_page(&folio->page);
-			continue;
-		}
-
-		if (!folio_put_testzero(folio))
-			continue;
-
-		if (folio_test_large(folio)) {
-			if (lruvec) {
-				unlock_page_lruvec_irqrestore(lruvec, flags);
-				lruvec = NULL;
-			}
-			__folio_put_large(folio);
-			continue;
-		}
-
-		if (folio_test_lru(folio)) {
-			struct lruvec *prev_lruvec = lruvec;
-
-			lruvec = folio_lruvec_relock_irqsave(folio, lruvec,
-									&flags);
-			if (prev_lruvec != lruvec)
-				lock_batch = 0;
-
-			lruvec_del_folio(lruvec, folio);
-			__folio_clear_lru_flags(folio);
-		}
-
-		/*
-		 * In rare cases, when truncation or holepunching raced with
-		 * munlock after VM_LOCKED was cleared, Mlocked may still be
-		 * found set here.  This does not indicate a problem, unless
-		 * "unevictable_pgs_cleared" appears worryingly large.
-		 */
-		if (unlikely(folio_test_mlocked(folio))) {
-			__folio_clear_mlocked(folio);
-			zone_stat_sub_folio(folio, NR_MLOCK);
-			count_vm_event(UNEVICTABLE_PGCLEARED);
-		}
-
-		list_add(&folio->lru, &pages_to_free);
+		__folios_put_refs_do_one(&ctx, folio, 1);
 	}
-	if (lruvec)
-		unlock_page_lruvec_irqrestore(lruvec, flags);
 
-	mem_cgroup_uncharge_list(&pages_to_free);
-	free_unref_page_list(&pages_to_free);
+	__folios_put_refs_complete(&ctx);
 }
 EXPORT_SYMBOL(release_pages);
 
-- 
2.25.1



  parent reply	other threads:[~2023-08-30  9:50 UTC|newest]

Thread overview: 15+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-08-30  9:50 [PATCH v2 0/5] Optimize mmap_exit for large folios Ryan Roberts
2023-08-30  9:50 ` [PATCH v2 1/5] mm: Implement folio_remove_rmap_range() Ryan Roberts
2023-08-30 14:51   ` Matthew Wilcox
2023-08-30 15:42     ` Ryan Roberts
2023-08-30 16:24       ` David Hildenbrand
2023-08-30  9:50 ` [PATCH v2 2/5] mm/mmu_gather: generalize mmu_gather rmap removal mechanism Ryan Roberts
2023-08-30  9:50 ` [PATCH v2 3/5] mm/mmu_gather: Remove encoded_page infrastructure Ryan Roberts
2023-08-30  9:50 ` Ryan Roberts [this message]
2023-08-30 19:11   ` [PATCH v2 4/5] mm: Refector release_pages() Matthew Wilcox
2023-08-30 21:17     ` Matthew Wilcox
2023-08-31 19:57     ` Ryan Roberts
2023-09-01  4:36       ` Matthew Wilcox
2023-08-30  9:50 ` [PATCH v2 5/5] mm/mmu_gather: Store and process pages in contig ranges Ryan Roberts
2023-08-30 15:07   ` Matthew Wilcox
2023-08-30 15:32     ` Ryan Roberts

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20230830095011.1228673-5-ryan.roberts@arm.com \
    --to=ryan.roberts@arm.com \
    --cc=akpm@linux-foundation.org \
    --cc=aneesh.kumar@linux.ibm.com \
    --cc=arnd@arndb.de \
    --cc=borntraeger@linux.ibm.com \
    --cc=david@redhat.com \
    --cc=fengwei.yin@intel.com \
    --cc=kirill.shutemov@linux.intel.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=npiggin@gmail.com \
    --cc=peterz@infradead.org \
    --cc=shy828301@gmail.com \
    --cc=svens@linux.ibm.com \
    --cc=will@kernel.org \
    --cc=willy@infradead.org \
    --cc=ying.huang@intel.com \
    --cc=yuzhao@google.com \
    --cc=ziy@nvidia.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox