From: Nhat Pham <nphamcs@gmail.com>
To: linux-mm@kvack.org
Cc: akpm@linux-foundation.org, hannes@cmpxchg.org, hughd@google.com,
yosry.ahmed@linux.dev, mhocko@kernel.org,
roman.gushchin@linux.dev, shakeel.butt@linux.dev,
muchun.song@linux.dev, len.brown@intel.com,
chengming.zhou@linux.dev, kasong@tencent.com, chrisl@kernel.org,
huang.ying.caritas@gmail.com, ryan.roberts@arm.com,
shikemeng@huaweicloud.com, viro@zeniv.linux.org.uk,
baohua@kernel.org, bhe@redhat.com, osalvador@suse.de,
lorenzo.stoakes@oracle.com, christophe.leroy@csgroup.eu,
pavel@kernel.org, kernel-team@meta.com,
linux-kernel@vger.kernel.org, cgroups@vger.kernel.org,
linux-pm@vger.kernel.org, peterx@redhat.com, riel@surriel.com,
joshua.hahnjy@gmail.com, npache@redhat.com, gourry@gourry.net,
axelrasmussen@google.com, yuanchu@google.com, weixugc@google.com,
rafael@kernel.org, jannh@google.com, pfalcato@suse.de,
zhengqi.arch@bytedance.com
Subject: [PATCH v3 08/20] zswap: prepare zswap for swap virtualization
Date: Sun, 8 Feb 2026 13:58:21 -0800 [thread overview]
Message-ID: <20260208215839.87595-9-nphamcs@gmail.com> (raw)
In-Reply-To: <20260208215839.87595-1-nphamcs@gmail.com>
The zswap tree code, specifically the range partition logic, can no
longer easily be reused for the new virtual swap space design. Use a
simple unified zswap tree in the new implementation for now.
Signed-off-by: Nhat Pham <nphamcs@gmail.com>
---
include/linux/zswap.h | 7 -----
mm/swapfile.c | 9 +-----
mm/zswap.c | 69 +++++++------------------------------------
3 files changed, 11 insertions(+), 74 deletions(-)
diff --git a/include/linux/zswap.h b/include/linux/zswap.h
index 30c193a1207e1..1a04caf283dc8 100644
--- a/include/linux/zswap.h
+++ b/include/linux/zswap.h
@@ -28,8 +28,6 @@ unsigned long zswap_total_pages(void);
bool zswap_store(struct folio *folio);
int zswap_load(struct folio *folio);
void zswap_invalidate(swp_entry_t swp);
-int zswap_swapon(int type, unsigned long nr_pages);
-void zswap_swapoff(int type);
void zswap_memcg_offline_cleanup(struct mem_cgroup *memcg);
void zswap_lruvec_state_init(struct lruvec *lruvec);
void zswap_folio_swapin(struct folio *folio);
@@ -50,11 +48,6 @@ static inline int zswap_load(struct folio *folio)
}
static inline void zswap_invalidate(swp_entry_t swp) {}
-static inline int zswap_swapon(int type, unsigned long nr_pages)
-{
- return 0;
-}
-static inline void zswap_swapoff(int type) {}
static inline void zswap_memcg_offline_cleanup(struct mem_cgroup *memcg) {}
static inline void zswap_lruvec_state_init(struct lruvec *lruvec) {}
static inline void zswap_folio_swapin(struct folio *folio) {}
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 4b4126d4e2769..3f70df488c1da 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -2970,7 +2970,6 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
spin_unlock(&p->lock);
spin_unlock(&swap_lock);
arch_swap_invalidate_area(p->type);
- zswap_swapoff(p->type);
mutex_unlock(&swapon_mutex);
kfree(p->global_cluster);
p->global_cluster = NULL;
@@ -3613,10 +3612,6 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
}
}
- error = zswap_swapon(si->type, maxpages);
- if (error)
- goto bad_swap_unlock_inode;
-
/*
* Flush any pending IO and dirty mappings before we start using this
* swap device.
@@ -3625,7 +3620,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
error = inode_drain_writes(inode);
if (error) {
inode->i_flags &= ~S_SWAPFILE;
- goto free_swap_zswap;
+ goto bad_swap_unlock_inode;
}
mutex_lock(&swapon_mutex);
@@ -3648,8 +3643,6 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
error = 0;
goto out;
-free_swap_zswap:
- zswap_swapoff(si->type);
bad_swap_unlock_inode:
inode_unlock(inode);
bad_swap:
diff --git a/mm/zswap.c b/mm/zswap.c
index a5a3f068bd1a6..f7313261673ff 100644
--- a/mm/zswap.c
+++ b/mm/zswap.c
@@ -197,8 +197,6 @@ struct zswap_entry {
struct list_head lru;
};
-static struct xarray *zswap_trees[MAX_SWAPFILES];
-static unsigned int nr_zswap_trees[MAX_SWAPFILES];
/* RCU-protected iteration */
static LIST_HEAD(zswap_pools);
@@ -225,45 +223,35 @@ static bool zswap_has_pool;
* helpers and fwd declarations
**********************************/
-/* One swap address space for each 64M swap space */
-#define ZSWAP_ADDRESS_SPACE_SHIFT 14
-#define ZSWAP_ADDRESS_SPACE_PAGES (1 << ZSWAP_ADDRESS_SPACE_SHIFT)
-static inline struct xarray *swap_zswap_tree(swp_entry_t swp)
-{
- return &zswap_trees[swp_type(swp)][swp_offset(swp)
- >> ZSWAP_ADDRESS_SPACE_SHIFT];
-}
+static DEFINE_XARRAY(zswap_tree);
+
+#define zswap_tree_index(entry) (entry.val)
static inline void *zswap_entry_store(swp_entry_t swpentry,
struct zswap_entry *entry)
{
- struct xarray *tree = swap_zswap_tree(swpentry);
- pgoff_t offset = swp_offset(swpentry);
+ pgoff_t offset = zswap_tree_index(swpentry);
- return xa_store(tree, offset, entry, GFP_KERNEL);
+ return xa_store(&zswap_tree, offset, entry, GFP_KERNEL);
}
static inline void *zswap_entry_load(swp_entry_t swpentry)
{
- struct xarray *tree = swap_zswap_tree(swpentry);
- pgoff_t offset = swp_offset(swpentry);
+ pgoff_t offset = zswap_tree_index(swpentry);
- return xa_load(tree, offset);
+ return xa_load(&zswap_tree, offset);
}
static inline void *zswap_entry_erase(swp_entry_t swpentry)
{
- struct xarray *tree = swap_zswap_tree(swpentry);
- pgoff_t offset = swp_offset(swpentry);
+ pgoff_t offset = zswap_tree_index(swpentry);
- return xa_erase(tree, offset);
+ return xa_erase(&zswap_tree, offset);
}
static inline bool zswap_empty(swp_entry_t swpentry)
{
- struct xarray *tree = swap_zswap_tree(swpentry);
-
- return xa_empty(tree);
+ return xa_empty(&zswap_tree);
}
#define zswap_pool_debug(msg, p) \
@@ -1691,43 +1679,6 @@ void zswap_invalidate(swp_entry_t swp)
zswap_entry_free(entry);
}
-int zswap_swapon(int type, unsigned long nr_pages)
-{
- struct xarray *trees, *tree;
- unsigned int nr, i;
-
- nr = DIV_ROUND_UP(nr_pages, ZSWAP_ADDRESS_SPACE_PAGES);
- trees = kvcalloc(nr, sizeof(*tree), GFP_KERNEL);
- if (!trees) {
- pr_err("alloc failed, zswap disabled for swap type %d\n", type);
- return -ENOMEM;
- }
-
- for (i = 0; i < nr; i++)
- xa_init(trees + i);
-
- nr_zswap_trees[type] = nr;
- zswap_trees[type] = trees;
- return 0;
-}
-
-void zswap_swapoff(int type)
-{
- struct xarray *trees = zswap_trees[type];
- unsigned int i;
-
- if (!trees)
- return;
-
- /* try_to_unuse() invalidated all the entries already */
- for (i = 0; i < nr_zswap_trees[type]; i++)
- WARN_ON_ONCE(!xa_empty(trees + i));
-
- kvfree(trees);
- nr_zswap_trees[type] = 0;
- zswap_trees[type] = NULL;
-}
-
/*********************************
* debugfs functions
**********************************/
--
2.47.3
next prev parent reply other threads:[~2026-02-08 21:59 UTC|newest]
Thread overview: 52+ messages / expand[flat|nested] mbox.gz Atom feed top
2026-02-08 21:58 [PATCH v3 00/20] Virtual Swap Space Nhat Pham
2026-02-08 21:58 ` [PATCH v3 01/20] mm/swap: decouple swap cache from physical swap infrastructure Nhat Pham
2026-02-08 22:26 ` [PATCH v3 00/20] Virtual Swap Space Nhat Pham
2026-02-10 17:59 ` Kairui Song
2026-02-10 18:52 ` Johannes Weiner
2026-02-10 19:11 ` Nhat Pham
2026-02-10 19:23 ` Nhat Pham
2026-02-12 5:07 ` Chris Li
2026-02-17 23:36 ` Nhat Pham
2026-02-10 21:58 ` Chris Li
2026-02-20 21:05 ` [PATCH] vswap: fix poor batching behavior of vswap free path Nhat Pham
2026-02-08 22:31 ` [PATCH v3 00/20] Virtual Swap Space Nhat Pham
2026-02-09 12:20 ` Chris Li
2026-02-10 2:36 ` Johannes Weiner
2026-02-10 21:24 ` Chris Li
2026-02-10 23:01 ` Johannes Weiner
2026-02-10 18:00 ` Nhat Pham
2026-02-10 23:17 ` Chris Li
2026-02-08 22:39 ` Nhat Pham
2026-02-09 2:22 ` [PATCH v3 01/20] mm/swap: decouple swap cache from physical swap infrastructure kernel test robot
2026-02-08 21:58 ` [PATCH v3 02/20] swap: rearrange the swap header file Nhat Pham
2026-02-08 21:58 ` [PATCH v3 03/20] mm: swap: add an abstract API for locking out swapoff Nhat Pham
2026-02-08 21:58 ` [PATCH v3 04/20] zswap: add new helpers for zswap entry operations Nhat Pham
2026-02-08 21:58 ` [PATCH v3 05/20] mm/swap: add a new function to check if a swap entry is in swap cached Nhat Pham
2026-02-08 21:58 ` [PATCH v3 06/20] mm: swap: add a separate type for physical swap slots Nhat Pham
2026-02-08 21:58 ` [PATCH v3 07/20] mm: create scaffolds for the new virtual swap implementation Nhat Pham
2026-02-08 21:58 ` Nhat Pham [this message]
2026-02-08 21:58 ` [PATCH v3 09/20] mm: swap: allocate a virtual swap slot for each swapped out page Nhat Pham
2026-02-09 17:12 ` kernel test robot
2026-02-11 13:42 ` kernel test robot
2026-02-08 21:58 ` [PATCH v3 10/20] swap: move swap cache to virtual swap descriptor Nhat Pham
2026-02-08 21:58 ` [PATCH v3 11/20] zswap: move zswap entry management to the " Nhat Pham
2026-02-08 21:58 ` [PATCH v3 12/20] swap: implement the swap_cgroup API using virtual swap Nhat Pham
2026-02-08 21:58 ` [PATCH v3 13/20] swap: manage swap entry lifecycle at the virtual swap layer Nhat Pham
2026-02-08 21:58 ` [PATCH v3 14/20] mm: swap: decouple virtual swap slot from backing store Nhat Pham
2026-02-10 6:31 ` Dan Carpenter
2026-02-08 21:58 ` [PATCH v3 15/20] zswap: do not start zswap shrinker if there is no physical swap slots Nhat Pham
2026-02-08 21:58 ` [PATCH v3 16/20] swap: do not unnecesarily pin readahead swap entries Nhat Pham
2026-02-08 21:58 ` [PATCH v3 17/20] swapfile: remove zeromap bitmap Nhat Pham
2026-02-08 21:58 ` [PATCH v3 18/20] memcg: swap: only charge physical swap slots Nhat Pham
2026-02-09 2:01 ` kernel test robot
2026-02-09 2:12 ` kernel test robot
2026-02-08 21:58 ` [PATCH v3 19/20] swap: simplify swapoff using virtual swap Nhat Pham
2026-02-08 21:58 ` [PATCH v3 20/20] swapfile: replace the swap map with bitmaps Nhat Pham
2026-02-08 22:51 ` [PATCH v3 00/20] Virtual Swap Space Nhat Pham
2026-02-12 12:23 ` David Hildenbrand (Arm)
2026-02-12 17:29 ` Nhat Pham
2026-02-12 17:39 ` Nhat Pham
2026-02-12 20:11 ` David Hildenbrand (Arm)
2026-02-12 17:41 ` David Hildenbrand (Arm)
2026-02-12 17:45 ` Nhat Pham
2026-02-10 15:45 ` [syzbot ci] " syzbot ci
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20260208215839.87595-9-nphamcs@gmail.com \
--to=nphamcs@gmail.com \
--cc=akpm@linux-foundation.org \
--cc=axelrasmussen@google.com \
--cc=baohua@kernel.org \
--cc=bhe@redhat.com \
--cc=cgroups@vger.kernel.org \
--cc=chengming.zhou@linux.dev \
--cc=chrisl@kernel.org \
--cc=christophe.leroy@csgroup.eu \
--cc=gourry@gourry.net \
--cc=hannes@cmpxchg.org \
--cc=huang.ying.caritas@gmail.com \
--cc=hughd@google.com \
--cc=jannh@google.com \
--cc=joshua.hahnjy@gmail.com \
--cc=kasong@tencent.com \
--cc=kernel-team@meta.com \
--cc=len.brown@intel.com \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=linux-pm@vger.kernel.org \
--cc=lorenzo.stoakes@oracle.com \
--cc=mhocko@kernel.org \
--cc=muchun.song@linux.dev \
--cc=npache@redhat.com \
--cc=osalvador@suse.de \
--cc=pavel@kernel.org \
--cc=peterx@redhat.com \
--cc=pfalcato@suse.de \
--cc=rafael@kernel.org \
--cc=riel@surriel.com \
--cc=roman.gushchin@linux.dev \
--cc=ryan.roberts@arm.com \
--cc=shakeel.butt@linux.dev \
--cc=shikemeng@huaweicloud.com \
--cc=viro@zeniv.linux.org.uk \
--cc=weixugc@google.com \
--cc=yosry.ahmed@linux.dev \
--cc=yuanchu@google.com \
--cc=zhengqi.arch@bytedance.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox