From: Kairui Song via B4 Relay <devnull+kasong.tencent.com@kernel.org>
To: linux-mm@kvack.org
Cc: Andrew Morton <akpm@linux-foundation.org>,
David Hildenbrand <david@kernel.org>, Zi Yan <ziy@nvidia.com>,
Baolin Wang <baolin.wang@linux.alibaba.com>,
Barry Song <baohua@kernel.org>, Hugh Dickins <hughd@google.com>,
Chris Li <chrisl@kernel.org>,
Kemeng Shi <shikemeng@huaweicloud.com>,
Nhat Pham <nphamcs@gmail.com>, Baoquan He <bhe@redhat.com>,
Johannes Weiner <hannes@cmpxchg.org>,
Youngjun Park <youngjun.park@lge.com>,
Chengming Zhou <chengming.zhou@linux.dev>,
Roman Gushchin <roman.gushchin@linux.dev>,
Shakeel Butt <shakeel.butt@linux.dev>,
Muchun Song <muchun.song@linux.dev>,
Qi Zheng <zhengqi.arch@bytedance.com>,
linux-kernel@vger.kernel.org, cgroups@vger.kernel.org,
Kairui Song <kasong@tencent.com>, Yosry Ahmed <yosry@kernel.org>,
Lorenzo Stoakes <ljs@kernel.org>, Dev Jain <dev.jain@arm.com>,
Lance Yang <lance.yang@linux.dev>,
Michal Hocko <mhocko@suse.com>, Michal Hocko <mhocko@kernel.org>,
Suren Baghdasaryan <surenb@google.com>,
Axel Rasmussen <axelrasmussen@google.com>,
Lorenzo Stoakes <ljs@kernel.org>, Yosry Ahmed <yosry@kernel.org>
Subject: [PATCH v3 09/12] mm, swap: consolidate cluster allocation helpers
Date: Tue, 21 Apr 2026 14:16:53 +0800 [thread overview]
Message-ID: <20260421-swap-table-p4-v3-9-2f23759a76bc@tencent.com> (raw)
In-Reply-To: <20260421-swap-table-p4-v3-0-2f23759a76bc@tencent.com>
From: Kairui Song <kasong@tencent.com>
Swap cluster table management is spread across several narrow
helpers. As a result, the allocation and fallback sequences are
open-coded in multiple places.
A few more per-cluster tables will be added soon, so avoid
duplicating these sequences per table type. Fold the existing
pairs into cluster-oriented helpers, and rename for consistency.
No functional change, only a few sanity checks are slightly adjusted.
Signed-off-by: Kairui Song <kasong@tencent.com>
---
mm/swapfile.c | 110 ++++++++++++++++++++++++++--------------------------------
1 file changed, 49 insertions(+), 61 deletions(-)
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 8d3d22c463f3..2d16aa89a4fd 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -411,20 +411,7 @@ static inline unsigned int cluster_offset(struct swap_info_struct *si,
return cluster_index(si, ci) * SWAPFILE_CLUSTER;
}
-static struct swap_table *swap_table_alloc(gfp_t gfp)
-{
- struct folio *folio;
-
- if (!SWP_TABLE_USE_PAGE)
- return kmem_cache_zalloc(swap_table_cachep, gfp);
-
- folio = folio_alloc(gfp | __GFP_ZERO, 0);
- if (folio)
- return folio_address(folio);
- return NULL;
-}
-
-static void swap_table_free_folio_rcu_cb(struct rcu_head *head)
+static void swap_cluster_free_table_folio_rcu_cb(struct rcu_head *head)
{
struct folio *folio;
@@ -432,15 +419,46 @@ static void swap_table_free_folio_rcu_cb(struct rcu_head *head)
folio_put(folio);
}
-static void swap_table_free(struct swap_table *table)
+static void swap_cluster_free_table(struct swap_cluster_info *ci)
{
+ struct swap_table *table;
+
+ table = (struct swap_table *)rcu_dereference_protected(ci->table, true);
+ if (!table)
+ return;
+
+ rcu_assign_pointer(ci->table, NULL);
if (!SWP_TABLE_USE_PAGE) {
kmem_cache_free(swap_table_cachep, table);
return;
}
call_rcu(&(folio_page(virt_to_folio(table), 0)->rcu_head),
- swap_table_free_folio_rcu_cb);
+ swap_cluster_free_table_folio_rcu_cb);
+}
+
+static int swap_cluster_alloc_table(struct swap_cluster_info *ci, gfp_t gfp)
+{
+ struct swap_table *table = NULL;
+ struct folio *folio;
+
+ /* The cluster must be empty and not on any list during allocation. */
+ VM_WARN_ON_ONCE(ci->flags || !cluster_is_empty(ci));
+ if (rcu_access_pointer(ci->table))
+ return 0;
+
+ if (SWP_TABLE_USE_PAGE) {
+ folio = folio_alloc(gfp | __GFP_ZERO, 0);
+ if (folio)
+ table = folio_address(folio);
+ } else {
+ table = kmem_cache_zalloc(swap_table_cachep, gfp);
+ }
+ if (!table)
+ return -ENOMEM;
+
+ rcu_assign_pointer(ci->table, table);
+ return 0;
}
/*
@@ -471,27 +489,15 @@ static void swap_cluster_assert_empty(struct swap_cluster_info *ci,
WARN_ON_ONCE(nr == SWAPFILE_CLUSTER && ci->extend_table);
}
-static void swap_cluster_free_table(struct swap_cluster_info *ci)
-{
- struct swap_table *table;
-
- /* Only empty cluster's table is allow to be freed */
- lockdep_assert_held(&ci->lock);
- table = (void *)rcu_dereference_protected(ci->table, true);
- rcu_assign_pointer(ci->table, NULL);
-
- swap_table_free(table);
-}
-
/*
* Allocate swap table for one cluster. Attempt an atomic allocation first,
* then fallback to sleeping allocation.
*/
static struct swap_cluster_info *
-swap_cluster_alloc_table(struct swap_info_struct *si,
+swap_cluster_populate(struct swap_info_struct *si,
struct swap_cluster_info *ci)
{
- struct swap_table *table;
+ int ret;
/*
* Only cluster isolation from the allocator does table allocation.
@@ -502,14 +508,9 @@ swap_cluster_alloc_table(struct swap_info_struct *si,
lockdep_assert_held(&si->global_cluster_lock);
lockdep_assert_held(&ci->lock);
- /* The cluster must be free and was just isolated from the free list. */
- VM_WARN_ON_ONCE(ci->flags || !cluster_is_empty(ci));
-
- table = swap_table_alloc(__GFP_HIGH | __GFP_NOMEMALLOC | __GFP_NOWARN);
- if (table) {
- rcu_assign_pointer(ci->table, table);
+ if (!swap_cluster_alloc_table(ci, __GFP_HIGH | __GFP_NOMEMALLOC |
+ __GFP_NOWARN))
return ci;
- }
/*
* Try a sleep allocation. Each isolated free cluster may cause
@@ -521,7 +522,8 @@ swap_cluster_alloc_table(struct swap_info_struct *si,
spin_unlock(&si->global_cluster_lock);
local_unlock(&percpu_swap_cluster.lock);
- table = swap_table_alloc(__GFP_HIGH | __GFP_NOMEMALLOC | GFP_KERNEL);
+ ret = swap_cluster_alloc_table(ci, __GFP_HIGH | __GFP_NOMEMALLOC |
+ GFP_KERNEL);
/*
* Back to atomic context. We might have migrated to a new CPU with a
@@ -536,20 +538,11 @@ swap_cluster_alloc_table(struct swap_info_struct *si,
spin_lock(&si->global_cluster_lock);
spin_lock(&ci->lock);
- /* Nothing except this helper should touch a dangling empty cluster. */
- if (WARN_ON_ONCE(cluster_table_is_alloced(ci))) {
- if (table)
- swap_table_free(table);
- return ci;
- }
-
- if (!table) {
+ if (ret) {
move_cluster(si, ci, &si->free_clusters, CLUSTER_FLAG_FREE);
spin_unlock(&ci->lock);
return NULL;
}
-
- rcu_assign_pointer(ci->table, table);
return ci;
}
@@ -621,12 +614,11 @@ static struct swap_cluster_info *isolate_lock_cluster(
}
spin_unlock(&si->lock);
- if (found && !cluster_table_is_alloced(found)) {
- /* Only an empty free cluster's swap table can be freed. */
- VM_WARN_ON_ONCE(flags != CLUSTER_FLAG_FREE);
+ /* Cluster's table is freed when and only when it's on the free list. */
+ if (found && flags == CLUSTER_FLAG_FREE) {
VM_WARN_ON_ONCE(list != &si->free_clusters);
- VM_WARN_ON_ONCE(!cluster_is_empty(found));
- return swap_cluster_alloc_table(si, found);
+ VM_WARN_ON_ONCE(cluster_table_is_alloced(found));
+ return swap_cluster_populate(si, found);
}
return found;
@@ -769,7 +761,6 @@ static int swap_cluster_setup_bad_slot(struct swap_info_struct *si,
unsigned int ci_off = offset % SWAPFILE_CLUSTER;
unsigned long idx = offset / SWAPFILE_CLUSTER;
struct swap_cluster_info *ci;
- struct swap_table *table;
int ret = 0;
/* si->max may got shrunk by swap swap_activate() */
@@ -790,12 +781,9 @@ static int swap_cluster_setup_bad_slot(struct swap_info_struct *si,
}
ci = cluster_info + idx;
- if (!ci->table) {
- table = swap_table_alloc(GFP_KERNEL);
- if (!table)
- return -ENOMEM;
- rcu_assign_pointer(ci->table, table);
- }
+ /* Need to allocate swap table first for initial bad slot marking. */
+ if (!ci->count && swap_cluster_alloc_table(ci, GFP_KERNEL))
+ return -ENOMEM;
spin_lock(&ci->lock);
/* Check for duplicated bad swap slots. */
if (__swap_table_xchg(ci, ci_off, SWP_TB_BAD) != SWP_TB_NULL) {
@@ -2992,7 +2980,7 @@ static void free_swap_cluster_info(struct swap_cluster_info *cluster_info,
ci = cluster_info + i;
/* Cluster with bad marks count will have a remaining table */
spin_lock(&ci->lock);
- if (rcu_dereference_protected(ci->table, true)) {
+ if (cluster_table_is_alloced(ci)) {
swap_cluster_assert_empty(ci, 0, SWAPFILE_CLUSTER, true);
swap_cluster_free_table(ci);
}
--
2.53.0
next prev parent reply other threads:[~2026-04-21 6:17 UTC|newest]
Thread overview: 13+ messages / expand[flat|nested] mbox.gz Atom feed top
2026-04-21 6:16 [PATCH v3 00/12] mm, swap: swap table phase IV: unify allocation and reduce static metadata Kairui Song via B4 Relay
2026-04-21 6:16 ` [PATCH v3 01/12] mm, swap: simplify swap cache allocation helper Kairui Song via B4 Relay
2026-04-21 6:16 ` [PATCH v3 02/12] mm, swap: move common swap cache operations into standalone helpers Kairui Song via B4 Relay
2026-04-21 6:16 ` [PATCH v3 03/12] mm/huge_memory: move THP gfp limit helper into header Kairui Song via B4 Relay
2026-04-21 6:16 ` [PATCH v3 04/12] mm, swap: add support for stable large allocation in swap cache directly Kairui Song via B4 Relay
2026-04-21 6:16 ` [PATCH v3 05/12] mm, swap: unify large folio allocation Kairui Song via B4 Relay
2026-04-21 6:16 ` [PATCH v3 06/12] mm/memcg, swap: tidy up cgroup v1 memsw swap helpers Kairui Song via B4 Relay
2026-04-21 6:16 ` [PATCH v3 07/12] mm, swap: support flexible batch freeing of slots in different memcgs Kairui Song via B4 Relay
2026-04-21 6:16 ` [PATCH v3 08/12] mm, swap: delay and unify memcg lookup and charging for swapin Kairui Song via B4 Relay
2026-04-21 6:16 ` Kairui Song via B4 Relay [this message]
2026-04-21 6:16 ` [PATCH v3 10/12] mm/memcg, swap: store cgroup id in cluster table directly Kairui Song via B4 Relay
2026-04-21 6:16 ` [PATCH v3 11/12] mm/memcg: remove no longer used swap cgroup array Kairui Song via B4 Relay
2026-04-21 6:16 ` [PATCH v3 12/12] mm, swap: merge zeromap into swap table Kairui Song via B4 Relay
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20260421-swap-table-p4-v3-9-2f23759a76bc@tencent.com \
--to=devnull+kasong.tencent.com@kernel.org \
--cc=akpm@linux-foundation.org \
--cc=axelrasmussen@google.com \
--cc=baohua@kernel.org \
--cc=baolin.wang@linux.alibaba.com \
--cc=bhe@redhat.com \
--cc=cgroups@vger.kernel.org \
--cc=chengming.zhou@linux.dev \
--cc=chrisl@kernel.org \
--cc=david@kernel.org \
--cc=dev.jain@arm.com \
--cc=hannes@cmpxchg.org \
--cc=hughd@google.com \
--cc=kasong@tencent.com \
--cc=lance.yang@linux.dev \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=ljs@kernel.org \
--cc=mhocko@kernel.org \
--cc=mhocko@suse.com \
--cc=muchun.song@linux.dev \
--cc=nphamcs@gmail.com \
--cc=roman.gushchin@linux.dev \
--cc=shakeel.butt@linux.dev \
--cc=shikemeng@huaweicloud.com \
--cc=surenb@google.com \
--cc=yosry@kernel.org \
--cc=youngjun.park@lge.com \
--cc=zhengqi.arch@bytedance.com \
--cc=ziy@nvidia.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox