From: Nhat Pham <nphamcs@gmail.com>
To: linux-mm@kvack.org
Cc: akpm@linux-foundation.org, hannes@cmpxchg.org, hughd@google.com,
yosry.ahmed@linux.dev, mhocko@kernel.org,
roman.gushchin@linux.dev, shakeel.butt@linux.dev,
muchun.song@linux.dev, len.brown@intel.com,
chengming.zhou@linux.dev, kasong@tencent.com, chrisl@kernel.org,
huang.ying.caritas@gmail.com, ryan.roberts@arm.com,
shikemeng@huaweicloud.com, viro@zeniv.linux.org.uk,
baohua@kernel.org, bhe@redhat.com, osalvador@suse.de,
lorenzo.stoakes@oracle.com, christophe.leroy@csgroup.eu,
pavel@kernel.org, kernel-team@meta.com,
linux-kernel@vger.kernel.org, cgroups@vger.kernel.org,
linux-pm@vger.kernel.org, peterx@redhat.com, riel@surriel.com,
joshua.hahnjy@gmail.com, npache@redhat.com, gourry@gourry.net,
axelrasmussen@google.com, yuanchu@google.com, weixugc@google.com,
rafael@kernel.org, jannh@google.com, pfalcato@suse.de,
zhengqi.arch@bytedance.com
Subject: [PATCH v3 12/20] swap: implement the swap_cgroup API using virtual swap
Date: Sun, 8 Feb 2026 13:58:25 -0800 [thread overview]
Message-ID: <20260208215839.87595-13-nphamcs@gmail.com> (raw)
In-Reply-To: <20260208215839.87595-1-nphamcs@gmail.com>
Once we decouple a swap entry from its backing store via the virtual
swap, we can no longer statically allocate an array to store the swap
entries' cgroup information. Move it to the swap descriptor.
Note that the memory overhead for swap cgroup information is now on
demand, i.e dynamically incurred when the virtual swap cluster is
allocated. This help reduces the memory overhead in a huge but
sparsely used swap space.
For instance, a 2 TB swapfile consists of 2147483648 swap slots, each
incurring 2 bytes of overhead for swap cgroup, for a total of 1 GB. If
we only utilize 10% of the swapfile, we will save 900 MB.
Signed-off-by: Nhat Pham <nphamcs@gmail.com>
---
include/linux/swap_cgroup.h | 13 ---
mm/Makefile | 3 -
mm/swap_cgroup.c | 174 ------------------------------------
mm/swapfile.c | 7 --
mm/vswap.c | 95 ++++++++++++++++++++
5 files changed, 95 insertions(+), 197 deletions(-)
delete mode 100644 mm/swap_cgroup.c
diff --git a/include/linux/swap_cgroup.h b/include/linux/swap_cgroup.h
index 91cdf12190a03..a2abb4d6fa085 100644
--- a/include/linux/swap_cgroup.h
+++ b/include/linux/swap_cgroup.h
@@ -9,8 +9,6 @@
extern void swap_cgroup_record(struct folio *folio, unsigned short id, swp_entry_t ent);
extern unsigned short swap_cgroup_clear(swp_entry_t ent, unsigned int nr_ents);
extern unsigned short lookup_swap_cgroup_id(swp_entry_t ent);
-extern int swap_cgroup_swapon(int type, unsigned long max_pages);
-extern void swap_cgroup_swapoff(int type);
#else
@@ -31,17 +29,6 @@ unsigned short lookup_swap_cgroup_id(swp_entry_t ent)
return 0;
}
-static inline int
-swap_cgroup_swapon(int type, unsigned long max_pages)
-{
- return 0;
-}
-
-static inline void swap_cgroup_swapoff(int type)
-{
- return;
-}
-
#endif
#endif /* __LINUX_SWAP_CGROUP_H */
diff --git a/mm/Makefile b/mm/Makefile
index 67fa4586e7e18..a7538784191bf 100644
--- a/mm/Makefile
+++ b/mm/Makefile
@@ -103,9 +103,6 @@ obj-$(CONFIG_PAGE_COUNTER) += page_counter.o
obj-$(CONFIG_LIVEUPDATE) += memfd_luo.o
obj-$(CONFIG_MEMCG_V1) += memcontrol-v1.o
obj-$(CONFIG_MEMCG) += memcontrol.o vmpressure.o
-ifdef CONFIG_SWAP
-obj-$(CONFIG_MEMCG) += swap_cgroup.o
-endif
obj-$(CONFIG_CGROUP_HUGETLB) += hugetlb_cgroup.o
obj-$(CONFIG_GUP_TEST) += gup_test.o
obj-$(CONFIG_DMAPOOL_TEST) += dmapool_test.o
diff --git a/mm/swap_cgroup.c b/mm/swap_cgroup.c
deleted file mode 100644
index 77ce1d66c318d..0000000000000
--- a/mm/swap_cgroup.c
+++ /dev/null
@@ -1,174 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include <linux/swap_cgroup.h>
-#include <linux/vmalloc.h>
-#include <linux/mm.h>
-
-#include <linux/swapops.h> /* depends on mm.h include */
-
-static DEFINE_MUTEX(swap_cgroup_mutex);
-
-/* Pack two cgroup id (short) of two entries in one swap_cgroup (atomic_t) */
-#define ID_PER_SC (sizeof(struct swap_cgroup) / sizeof(unsigned short))
-#define ID_SHIFT (BITS_PER_TYPE(unsigned short))
-#define ID_MASK (BIT(ID_SHIFT) - 1)
-struct swap_cgroup {
- atomic_t ids;
-};
-
-struct swap_cgroup_ctrl {
- struct swap_cgroup *map;
-};
-
-static struct swap_cgroup_ctrl swap_cgroup_ctrl[MAX_SWAPFILES];
-
-static unsigned short __swap_cgroup_id_lookup(struct swap_cgroup *map,
- pgoff_t offset)
-{
- unsigned int shift = (offset % ID_PER_SC) * ID_SHIFT;
- unsigned int old_ids = atomic_read(&map[offset / ID_PER_SC].ids);
-
- BUILD_BUG_ON(!is_power_of_2(ID_PER_SC));
- BUILD_BUG_ON(sizeof(struct swap_cgroup) != sizeof(atomic_t));
-
- return (old_ids >> shift) & ID_MASK;
-}
-
-static unsigned short __swap_cgroup_id_xchg(struct swap_cgroup *map,
- pgoff_t offset,
- unsigned short new_id)
-{
- unsigned short old_id;
- struct swap_cgroup *sc = &map[offset / ID_PER_SC];
- unsigned int shift = (offset % ID_PER_SC) * ID_SHIFT;
- unsigned int new_ids, old_ids = atomic_read(&sc->ids);
-
- do {
- old_id = (old_ids >> shift) & ID_MASK;
- new_ids = (old_ids & ~(ID_MASK << shift));
- new_ids |= ((unsigned int)new_id) << shift;
- } while (!atomic_try_cmpxchg(&sc->ids, &old_ids, new_ids));
-
- return old_id;
-}
-
-/**
- * swap_cgroup_record - record mem_cgroup for a set of swap entries.
- * These entries must belong to one single folio, and that folio
- * must be being charged for swap space (swap out), and these
- * entries must not have been charged
- *
- * @folio: the folio that the swap entry belongs to
- * @id: mem_cgroup ID to be recorded
- * @ent: the first swap entry to be recorded
- */
-void swap_cgroup_record(struct folio *folio, unsigned short id,
- swp_entry_t ent)
-{
- unsigned int nr_ents = folio_nr_pages(folio);
- swp_slot_t slot = swp_entry_to_swp_slot(ent);
- struct swap_cgroup *map;
- pgoff_t offset, end;
- unsigned short old;
-
- offset = swp_slot_offset(slot);
- end = offset + nr_ents;
- map = swap_cgroup_ctrl[swp_slot_type(slot)].map;
-
- do {
- old = __swap_cgroup_id_xchg(map, offset, id);
- VM_BUG_ON(old);
- } while (++offset != end);
-}
-
-/**
- * swap_cgroup_clear - clear mem_cgroup for a set of swap entries.
- * These entries must be being uncharged from swap. They either
- * belongs to one single folio in the swap cache (swap in for
- * cgroup v1), or no longer have any users (slot freeing).
- *
- * @ent: the first swap entry to be recorded into
- * @nr_ents: number of swap entries to be recorded
- *
- * Returns the existing old value.
- */
-unsigned short swap_cgroup_clear(swp_entry_t ent, unsigned int nr_ents)
-{
- swp_slot_t slot = swp_entry_to_swp_slot(ent);
- pgoff_t offset = swp_slot_offset(slot);
- pgoff_t end = offset + nr_ents;
- struct swap_cgroup *map;
- unsigned short old, iter = 0;
-
- map = swap_cgroup_ctrl[swp_slot_type(slot)].map;
-
- do {
- old = __swap_cgroup_id_xchg(map, offset, 0);
- if (!iter)
- iter = old;
- VM_BUG_ON(iter != old);
- } while (++offset != end);
-
- return old;
-}
-
-/**
- * lookup_swap_cgroup_id - lookup mem_cgroup id tied to swap entry
- * @ent: swap entry to be looked up.
- *
- * Returns ID of mem_cgroup at success. 0 at failure. (0 is invalid ID)
- */
-unsigned short lookup_swap_cgroup_id(swp_entry_t ent)
-{
- struct swap_cgroup_ctrl *ctrl;
- swp_slot_t slot = swp_entry_to_swp_slot(ent);
-
- if (mem_cgroup_disabled())
- return 0;
-
- ctrl = &swap_cgroup_ctrl[swp_slot_type(slot)];
- return __swap_cgroup_id_lookup(ctrl->map, swp_slot_offset(slot));
-}
-
-int swap_cgroup_swapon(int type, unsigned long max_pages)
-{
- struct swap_cgroup *map;
- struct swap_cgroup_ctrl *ctrl;
-
- if (mem_cgroup_disabled())
- return 0;
-
- BUILD_BUG_ON(sizeof(unsigned short) * ID_PER_SC !=
- sizeof(struct swap_cgroup));
- map = vzalloc(DIV_ROUND_UP(max_pages, ID_PER_SC) *
- sizeof(struct swap_cgroup));
- if (!map)
- goto nomem;
-
- ctrl = &swap_cgroup_ctrl[type];
- mutex_lock(&swap_cgroup_mutex);
- ctrl->map = map;
- mutex_unlock(&swap_cgroup_mutex);
-
- return 0;
-nomem:
- pr_info("couldn't allocate enough memory for swap_cgroup\n");
- pr_info("swap_cgroup can be disabled by swapaccount=0 boot option\n");
- return -ENOMEM;
-}
-
-void swap_cgroup_swapoff(int type)
-{
- struct swap_cgroup *map;
- struct swap_cgroup_ctrl *ctrl;
-
- if (mem_cgroup_disabled())
- return;
-
- mutex_lock(&swap_cgroup_mutex);
- ctrl = &swap_cgroup_ctrl[type];
- map = ctrl->map;
- ctrl->map = NULL;
- mutex_unlock(&swap_cgroup_mutex);
-
- vfree(map);
-}
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 68ec5d9f05848..345877786e432 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -2931,8 +2931,6 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
vfree(swap_map);
kvfree(zeromap);
free_cluster_info(cluster_info, maxpages);
- /* Destroy swap account information */
- swap_cgroup_swapoff(p->type);
inode = mapping->host;
@@ -3497,10 +3495,6 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
goto bad_swap_unlock_inode;
}
- error = swap_cgroup_swapon(si->type, maxpages);
- if (error)
- goto bad_swap_unlock_inode;
-
error = setup_swap_map(si, swap_header, swap_map, maxpages);
if (error)
goto bad_swap_unlock_inode;
@@ -3605,7 +3599,6 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
si->global_cluster = NULL;
inode = NULL;
destroy_swap_extents(si);
- swap_cgroup_swapoff(si->type);
spin_lock(&swap_lock);
si->swap_file = NULL;
si->flags = 0;
diff --git a/mm/vswap.c b/mm/vswap.c
index 9bb733f00fd21..64747493ca9f7 100644
--- a/mm/vswap.c
+++ b/mm/vswap.c
@@ -41,6 +41,7 @@
* @zswap_entry: The zswap entry associated with this swap slot.
* @swap_cache: The folio in swap cache.
* @shadow: The shadow entry.
+ * @memcgid: The memcg id of the owning memcg, if any.
*/
struct swp_desc {
swp_slot_t slot;
@@ -49,6 +50,9 @@ struct swp_desc {
struct folio *swap_cache;
void *shadow;
};
+#ifdef CONFIG_MEMCG
+ unsigned short memcgid;
+#endif
};
#define VSWAP_CLUSTER_SHIFT HPAGE_PMD_ORDER
@@ -242,6 +246,9 @@ static void __vswap_alloc_from_cluster(struct vswap_cluster *cluster, int start)
desc = &cluster->descriptors[start + i];
desc->slot.val = 0;
desc->zswap_entry = NULL;
+#ifdef CONFIG_MEMCG
+ desc->memcgid = 0;
+#endif
}
cluster->count += nr;
}
@@ -1109,6 +1116,94 @@ bool zswap_empty(swp_entry_t swpentry)
}
#endif /* CONFIG_ZSWAP */
+#ifdef CONFIG_MEMCG
+static unsigned short vswap_cgroup_record(swp_entry_t entry,
+ unsigned short memcgid, unsigned int nr_ents)
+{
+ struct vswap_cluster *cluster = NULL;
+ struct swp_desc *desc;
+ unsigned short oldid, iter = 0;
+ int i;
+
+ rcu_read_lock();
+ for (i = 0; i < nr_ents; i++) {
+ desc = vswap_iter(&cluster, entry.val + i);
+ VM_WARN_ON(!desc);
+ oldid = desc->memcgid;
+ desc->memcgid = memcgid;
+ if (!iter)
+ iter = oldid;
+ VM_WARN_ON(iter != oldid);
+ }
+ spin_unlock(&cluster->lock);
+ rcu_read_unlock();
+
+ return oldid;
+}
+
+/**
+ * swap_cgroup_record - record mem_cgroup for a set of swap entries.
+ * These entries must belong to one single folio, and that folio
+ * must be being charged for swap space (swap out), and these
+ * entries must not have been charged
+ *
+ * @folio: the folio that the swap entry belongs to
+ * @memcgid: mem_cgroup ID to be recorded
+ * @entry: the first swap entry to be recorded
+ */
+void swap_cgroup_record(struct folio *folio, unsigned short memcgid,
+ swp_entry_t entry)
+{
+ unsigned short oldid =
+ vswap_cgroup_record(entry, memcgid, folio_nr_pages(folio));
+
+ VM_WARN_ON(oldid);
+}
+
+/**
+ * swap_cgroup_clear - clear mem_cgroup for a set of swap entries.
+ * These entries must be being uncharged from swap. They either
+ * belongs to one single folio in the swap cache (swap in for
+ * cgroup v1), or no longer have any users (slot freeing).
+ *
+ * @entry: the first swap entry to be recorded into
+ * @nr_ents: number of swap entries to be recorded
+ *
+ * Returns the existing old value.
+ */
+unsigned short swap_cgroup_clear(swp_entry_t entry, unsigned int nr_ents)
+{
+ return vswap_cgroup_record(entry, 0, nr_ents);
+}
+
+/**
+ * lookup_swap_cgroup_id - lookup mem_cgroup id tied to swap entry
+ * @entry: swap entry to be looked up.
+ *
+ * Returns ID of mem_cgroup at success. 0 at failure. (0 is invalid ID)
+ */
+unsigned short lookup_swap_cgroup_id(swp_entry_t entry)
+{
+ struct vswap_cluster *cluster = NULL;
+ struct swp_desc *desc;
+ unsigned short ret;
+
+ /*
+ * Note that the virtual swap slot can be freed under us, for instance in
+ * the invocation of mem_cgroup_swapin_charge_folio. We need to wrap the
+ * entire lookup in RCU read-side critical section, and double check the
+ * existence of the swap descriptor.
+ */
+ rcu_read_lock();
+ desc = vswap_iter(&cluster, entry.val);
+ ret = desc ? desc->memcgid : 0;
+ if (cluster)
+ spin_unlock(&cluster->lock);
+ rcu_read_unlock();
+ return ret;
+}
+#endif /* CONFIG_MEMCG */
+
int vswap_init(void)
{
int i;
--
2.47.3
next prev parent reply other threads:[~2026-02-08 21:59 UTC|newest]
Thread overview: 52+ messages / expand[flat|nested] mbox.gz Atom feed top
2026-02-08 21:58 [PATCH v3 00/20] Virtual Swap Space Nhat Pham
2026-02-08 21:58 ` [PATCH v3 01/20] mm/swap: decouple swap cache from physical swap infrastructure Nhat Pham
2026-02-08 22:26 ` [PATCH v3 00/20] Virtual Swap Space Nhat Pham
2026-02-10 17:59 ` Kairui Song
2026-02-10 18:52 ` Johannes Weiner
2026-02-10 19:11 ` Nhat Pham
2026-02-10 19:23 ` Nhat Pham
2026-02-12 5:07 ` Chris Li
2026-02-17 23:36 ` Nhat Pham
2026-02-10 21:58 ` Chris Li
2026-02-20 21:05 ` [PATCH] vswap: fix poor batching behavior of vswap free path Nhat Pham
2026-02-08 22:31 ` [PATCH v3 00/20] Virtual Swap Space Nhat Pham
2026-02-09 12:20 ` Chris Li
2026-02-10 2:36 ` Johannes Weiner
2026-02-10 21:24 ` Chris Li
2026-02-10 23:01 ` Johannes Weiner
2026-02-10 18:00 ` Nhat Pham
2026-02-10 23:17 ` Chris Li
2026-02-08 22:39 ` Nhat Pham
2026-02-09 2:22 ` [PATCH v3 01/20] mm/swap: decouple swap cache from physical swap infrastructure kernel test robot
2026-02-08 21:58 ` [PATCH v3 02/20] swap: rearrange the swap header file Nhat Pham
2026-02-08 21:58 ` [PATCH v3 03/20] mm: swap: add an abstract API for locking out swapoff Nhat Pham
2026-02-08 21:58 ` [PATCH v3 04/20] zswap: add new helpers for zswap entry operations Nhat Pham
2026-02-08 21:58 ` [PATCH v3 05/20] mm/swap: add a new function to check if a swap entry is in swap cached Nhat Pham
2026-02-08 21:58 ` [PATCH v3 06/20] mm: swap: add a separate type for physical swap slots Nhat Pham
2026-02-08 21:58 ` [PATCH v3 07/20] mm: create scaffolds for the new virtual swap implementation Nhat Pham
2026-02-08 21:58 ` [PATCH v3 08/20] zswap: prepare zswap for swap virtualization Nhat Pham
2026-02-08 21:58 ` [PATCH v3 09/20] mm: swap: allocate a virtual swap slot for each swapped out page Nhat Pham
2026-02-09 17:12 ` kernel test robot
2026-02-11 13:42 ` kernel test robot
2026-02-08 21:58 ` [PATCH v3 10/20] swap: move swap cache to virtual swap descriptor Nhat Pham
2026-02-08 21:58 ` [PATCH v3 11/20] zswap: move zswap entry management to the " Nhat Pham
2026-02-08 21:58 ` Nhat Pham [this message]
2026-02-08 21:58 ` [PATCH v3 13/20] swap: manage swap entry lifecycle at the virtual swap layer Nhat Pham
2026-02-08 21:58 ` [PATCH v3 14/20] mm: swap: decouple virtual swap slot from backing store Nhat Pham
2026-02-10 6:31 ` Dan Carpenter
2026-02-08 21:58 ` [PATCH v3 15/20] zswap: do not start zswap shrinker if there is no physical swap slots Nhat Pham
2026-02-08 21:58 ` [PATCH v3 16/20] swap: do not unnecesarily pin readahead swap entries Nhat Pham
2026-02-08 21:58 ` [PATCH v3 17/20] swapfile: remove zeromap bitmap Nhat Pham
2026-02-08 21:58 ` [PATCH v3 18/20] memcg: swap: only charge physical swap slots Nhat Pham
2026-02-09 2:01 ` kernel test robot
2026-02-09 2:12 ` kernel test robot
2026-02-08 21:58 ` [PATCH v3 19/20] swap: simplify swapoff using virtual swap Nhat Pham
2026-02-08 21:58 ` [PATCH v3 20/20] swapfile: replace the swap map with bitmaps Nhat Pham
2026-02-08 22:51 ` [PATCH v3 00/20] Virtual Swap Space Nhat Pham
2026-02-12 12:23 ` David Hildenbrand (Arm)
2026-02-12 17:29 ` Nhat Pham
2026-02-12 17:39 ` Nhat Pham
2026-02-12 20:11 ` David Hildenbrand (Arm)
2026-02-12 17:41 ` David Hildenbrand (Arm)
2026-02-12 17:45 ` Nhat Pham
2026-02-10 15:45 ` [syzbot ci] " syzbot ci
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20260208215839.87595-13-nphamcs@gmail.com \
--to=nphamcs@gmail.com \
--cc=akpm@linux-foundation.org \
--cc=axelrasmussen@google.com \
--cc=baohua@kernel.org \
--cc=bhe@redhat.com \
--cc=cgroups@vger.kernel.org \
--cc=chengming.zhou@linux.dev \
--cc=chrisl@kernel.org \
--cc=christophe.leroy@csgroup.eu \
--cc=gourry@gourry.net \
--cc=hannes@cmpxchg.org \
--cc=huang.ying.caritas@gmail.com \
--cc=hughd@google.com \
--cc=jannh@google.com \
--cc=joshua.hahnjy@gmail.com \
--cc=kasong@tencent.com \
--cc=kernel-team@meta.com \
--cc=len.brown@intel.com \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=linux-pm@vger.kernel.org \
--cc=lorenzo.stoakes@oracle.com \
--cc=mhocko@kernel.org \
--cc=muchun.song@linux.dev \
--cc=npache@redhat.com \
--cc=osalvador@suse.de \
--cc=pavel@kernel.org \
--cc=peterx@redhat.com \
--cc=pfalcato@suse.de \
--cc=rafael@kernel.org \
--cc=riel@surriel.com \
--cc=roman.gushchin@linux.dev \
--cc=ryan.roberts@arm.com \
--cc=shakeel.butt@linux.dev \
--cc=shikemeng@huaweicloud.com \
--cc=viro@zeniv.linux.org.uk \
--cc=weixugc@google.com \
--cc=yosry.ahmed@linux.dev \
--cc=yuanchu@google.com \
--cc=zhengqi.arch@bytedance.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox