From: Joshua Hahn <joshua.hahnjy@gmail.com>
Cc: "Liam R. Howlett" <Liam.Howlett@oracle.com>,
Andrew Morton <akpm@linux-foundation.org>,
Axel Rasmussen <axelrasmussen@google.com>,
Brendan Jackman <jackmanb@google.com>,
David Hildenbrand <david@kernel.org>,
Johannes Weiner <hannes@cmpxchg.org>,
Lorenzo Stoakes <lorenzo.stoakes@oracle.com>,
Michal Hocko <mhocko@suse.com>, Mike Rapoport <rppt@kernel.org>,
Qi Zheng <zhengqi.arch@bytedance.com>,
Shakeel Butt <shakeel.butt@linux.dev>,
Suren Baghdasaryan <surenb@google.com>,
Vlastimil Babka <vbabka@suse.cz>, Wei Xu <weixugc@google.com>,
Yuanchu Xie <yuanchu@google.com>, Zi Yan <ziy@nvidia.com>,
linux-kernel@vger.kernel.org, linux-mm@kvack.org
Subject: [RFC LPC2025 PATCH 2/4] mm/vmscan/page_alloc: Remove node_reclaim
Date: Fri, 5 Dec 2025 15:32:13 -0800 [thread overview]
Message-ID: <20251205233217.3344186-3-joshua.hahnjy@gmail.com> (raw)
In-Reply-To: <20251205233217.3344186-1-joshua.hahnjy@gmail.com>
node_reclaim() is currently only called when the zone_reclaim_mode
sysctl is set, during get_page_from_freelist if the current node is
full.
With the zone_reclaim_mode sysctl being deprecated later in the series,
there are no more callsites for node_reclaim. Remove node_reclaim and
its associated return values NODE_RECLAIM_{NOSCAN, FULL, SOME, SUCCESS},
as well as the zone_reclaim_{success, failed} vmstat items.
We can also remove zone_allows_reclaim, since with node_reclaim_enabled
always returning false, it will never get evaluated.
Signed-off-by: Joshua Hahn <joshua.hahnjy@gmail.com>
---
include/linux/vm_event_item.h | 4 ---
mm/internal.h | 11 ------
mm/page_alloc.c | 34 ------------------
mm/vmscan.c | 67 -----------------------------------
mm/vmstat.c | 4 ---
5 files changed, 120 deletions(-)
diff --git a/include/linux/vm_event_item.h b/include/linux/vm_event_item.h
index 92f80b4d69a6..2520200b65f0 100644
--- a/include/linux/vm_event_item.h
+++ b/include/linux/vm_event_item.h
@@ -53,10 +53,6 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
PGSCAN_FILE,
PGSTEAL_ANON,
PGSTEAL_FILE,
-#ifdef CONFIG_NUMA
- PGSCAN_ZONE_RECLAIM_SUCCESS,
- PGSCAN_ZONE_RECLAIM_FAILED,
-#endif
PGINODESTEAL, SLABS_SCANNED, KSWAPD_INODESTEAL,
KSWAPD_LOW_WMARK_HIT_QUICKLY, KSWAPD_HIGH_WMARK_HIT_QUICKLY,
PAGEOUTRUN, PGROTATED,
diff --git a/mm/internal.h b/mm/internal.h
index 04c307ee33ae..743fcebe53a8 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -1196,24 +1196,13 @@ static inline void mminit_verify_zonelist(void)
}
#endif /* CONFIG_DEBUG_MEMORY_INIT */
-#define NODE_RECLAIM_NOSCAN -2
-#define NODE_RECLAIM_FULL -1
-#define NODE_RECLAIM_SOME 0
-#define NODE_RECLAIM_SUCCESS 1
-
#ifdef CONFIG_NUMA
extern int node_reclaim_mode;
-extern int node_reclaim(struct pglist_data *, gfp_t, unsigned int);
extern int find_next_best_node(int node, nodemask_t *used_node_mask);
#else
#define node_reclaim_mode 0
-static inline int node_reclaim(struct pglist_data *pgdat, gfp_t mask,
- unsigned int order)
-{
- return NODE_RECLAIM_NOSCAN;
-}
static inline int find_next_best_node(int node, nodemask_t *used_node_mask)
{
return NUMA_NO_NODE;
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index d0f026ec10b6..010a035e81bd 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -3684,17 +3684,6 @@ static inline bool zone_watermark_fast(struct zone *z, unsigned int order,
#ifdef CONFIG_NUMA
int __read_mostly node_reclaim_distance = RECLAIM_DISTANCE;
-
-static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone)
-{
- return node_distance(zone_to_nid(local_zone), zone_to_nid(zone)) <=
- node_reclaim_distance;
-}
-#else /* CONFIG_NUMA */
-static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone)
-{
- return true;
-}
#endif /* CONFIG_NUMA */
/*
@@ -3868,8 +3857,6 @@ get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags,
if (!zone_watermark_fast(zone, order, mark,
ac->highest_zoneidx, alloc_flags,
gfp_mask)) {
- int ret;
-
if (cond_accept_memory(zone, order, alloc_flags))
goto try_this_zone;
@@ -3885,27 +3872,6 @@ get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags,
BUILD_BUG_ON(ALLOC_NO_WATERMARKS < NR_WMARK);
if (alloc_flags & ALLOC_NO_WATERMARKS)
goto try_this_zone;
-
- if (!node_reclaim_enabled() ||
- !zone_allows_reclaim(zonelist_zone(ac->preferred_zoneref), zone))
- continue;
-
- ret = node_reclaim(zone->zone_pgdat, gfp_mask, order);
- switch (ret) {
- case NODE_RECLAIM_NOSCAN:
- /* did not scan */
- continue;
- case NODE_RECLAIM_FULL:
- /* scanned but unreclaimable */
- continue;
- default:
- /* did we reclaim enough */
- if (zone_watermark_ok(zone, order, mark,
- ac->highest_zoneidx, alloc_flags))
- goto try_this_zone;
-
- continue;
- }
}
try_this_zone:
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 3b85652a42b9..d07acd76fdea 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -7537,13 +7537,6 @@ module_init(kswapd_init)
*/
int node_reclaim_mode __read_mostly;
-/*
- * Priority for NODE_RECLAIM. This determines the fraction of pages
- * of a node considered for each zone_reclaim. 4 scans 1/16th of
- * a zone.
- */
-#define NODE_RECLAIM_PRIORITY 4
-
/*
* Percentage of pages in a zone that must be unmapped for node_reclaim to
* occur.
@@ -7646,66 +7639,6 @@ static unsigned long __node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask,
return sc->nr_reclaimed;
}
-int node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned int order)
-{
- int ret;
- /* Minimum pages needed in order to stay on node */
- const unsigned long nr_pages = 1 << order;
- struct scan_control sc = {
- .nr_to_reclaim = max(nr_pages, SWAP_CLUSTER_MAX),
- .gfp_mask = current_gfp_context(gfp_mask),
- .order = order,
- .priority = NODE_RECLAIM_PRIORITY,
- .may_writepage = !!(node_reclaim_mode & RECLAIM_WRITE),
- .may_unmap = !!(node_reclaim_mode & RECLAIM_UNMAP),
- .may_swap = 1,
- .reclaim_idx = gfp_zone(gfp_mask),
- };
-
- /*
- * Node reclaim reclaims unmapped file backed pages and
- * slab pages if we are over the defined limits.
- *
- * A small portion of unmapped file backed pages is needed for
- * file I/O otherwise pages read by file I/O will be immediately
- * thrown out if the node is overallocated. So we do not reclaim
- * if less than a specified percentage of the node is used by
- * unmapped file backed pages.
- */
- if (node_pagecache_reclaimable(pgdat) <= pgdat->min_unmapped_pages &&
- node_page_state_pages(pgdat, NR_SLAB_RECLAIMABLE_B) <=
- pgdat->min_slab_pages)
- return NODE_RECLAIM_FULL;
-
- /*
- * Do not scan if the allocation should not be delayed.
- */
- if (!gfpflags_allow_blocking(gfp_mask) || (current->flags & PF_MEMALLOC))
- return NODE_RECLAIM_NOSCAN;
-
- /*
- * Only run node reclaim on the local node or on nodes that do not
- * have associated processors. This will favor the local processor
- * over remote processors and spread off node memory allocations
- * as wide as possible.
- */
- if (node_state(pgdat->node_id, N_CPU) && pgdat->node_id != numa_node_id())
- return NODE_RECLAIM_NOSCAN;
-
- if (test_and_set_bit_lock(PGDAT_RECLAIM_LOCKED, &pgdat->flags))
- return NODE_RECLAIM_NOSCAN;
-
- ret = __node_reclaim(pgdat, gfp_mask, nr_pages, &sc) >= nr_pages;
- clear_bit_unlock(PGDAT_RECLAIM_LOCKED, &pgdat->flags);
-
- if (ret)
- count_vm_event(PGSCAN_ZONE_RECLAIM_SUCCESS);
- else
- count_vm_event(PGSCAN_ZONE_RECLAIM_FAILED);
-
- return ret;
-}
-
enum {
MEMORY_RECLAIM_SWAPPINESS = 0,
MEMORY_RECLAIM_SWAPPINESS_MAX,
diff --git a/mm/vmstat.c b/mm/vmstat.c
index 65de88cdf40e..3564bc62325a 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -1349,10 +1349,6 @@ const char * const vmstat_text[] = {
[I(PGSTEAL_ANON)] = "pgsteal_anon",
[I(PGSTEAL_FILE)] = "pgsteal_file",
-#ifdef CONFIG_NUMA
- [I(PGSCAN_ZONE_RECLAIM_SUCCESS)] = "zone_reclaim_success",
- [I(PGSCAN_ZONE_RECLAIM_FAILED)] = "zone_reclaim_failed",
-#endif
[I(PGINODESTEAL)] = "pginodesteal",
[I(SLABS_SCANNED)] = "slabs_scanned",
[I(KSWAPD_INODESTEAL)] = "kswapd_inodesteal",
--
2.47.3
next prev parent reply other threads:[~2025-12-05 23:32 UTC|newest]
Thread overview: 5+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-12-05 23:32 [RFC LPC2025 PATCH 0/4] Deprecate zone_reclaim_mode Joshua Hahn
2025-12-05 23:32 ` [RFC LPC2025 PATCH 1/4] mm/khugepaged: Remove hpage_collapse_scan_abort Joshua Hahn
2025-12-05 23:32 ` Joshua Hahn [this message]
2025-12-05 23:32 ` [RFC LPC2025 PATCH 3/4] mm/vmscan/page_alloc: Deprecate min_{slab, unmapped}_ratio Joshua Hahn
2025-12-05 23:32 ` [RFC LPC2025 PATCH 4/4] mm/vmscan: Deprecate zone_reclaim_mode Joshua Hahn
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20251205233217.3344186-3-joshua.hahnjy@gmail.com \
--to=joshua.hahnjy@gmail.com \
--cc=Liam.Howlett@oracle.com \
--cc=akpm@linux-foundation.org \
--cc=axelrasmussen@google.com \
--cc=david@kernel.org \
--cc=hannes@cmpxchg.org \
--cc=jackmanb@google.com \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=lorenzo.stoakes@oracle.com \
--cc=mhocko@suse.com \
--cc=rppt@kernel.org \
--cc=shakeel.butt@linux.dev \
--cc=surenb@google.com \
--cc=vbabka@suse.cz \
--cc=weixugc@google.com \
--cc=yuanchu@google.com \
--cc=zhengqi.arch@bytedance.com \
--cc=ziy@nvidia.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox