From: Nhat Pham <nphamcs@gmail.com>
To: linux-mm@kvack.org
Cc: akpm@linux-foundation.org, hannes@cmpxchg.org, hughd@google.com,
yosry.ahmed@linux.dev, mhocko@kernel.org,
roman.gushchin@linux.dev, shakeel.butt@linux.dev,
muchun.song@linux.dev, len.brown@intel.com,
chengming.zhou@linux.dev, kasong@tencent.com, chrisl@kernel.org,
huang.ying.caritas@gmail.com, ryan.roberts@arm.com,
shikemeng@huaweicloud.com, viro@zeniv.linux.org.uk,
baohua@kernel.org, bhe@redhat.com, osalvador@suse.de,
lorenzo.stoakes@oracle.com, christophe.leroy@csgroup.eu,
pavel@kernel.org, kernel-team@meta.com,
linux-kernel@vger.kernel.org, cgroups@vger.kernel.org,
linux-pm@vger.kernel.org, peterx@redhat.com, riel@surriel.com,
joshua.hahnjy@gmail.com, npache@redhat.com, gourry@gourry.net,
axelrasmussen@google.com, yuanchu@google.com, weixugc@google.com,
rafael@kernel.org, jannh@google.com, pfalcato@suse.de,
zhengqi.arch@bytedance.com
Subject: [PATCH v3 18/20] memcg: swap: only charge physical swap slots
Date: Sun, 8 Feb 2026 13:58:31 -0800 [thread overview]
Message-ID: <20260208215839.87595-19-nphamcs@gmail.com> (raw)
In-Reply-To: <20260208215839.87595-1-nphamcs@gmail.com>
Now that zswap and the zero-filled swap page optimization no longer
takes up any physical swap space, we should not charge towards the swap
usage and limits of the memcg in these case. We will only record the
memcg id on virtual swap slot allocation, and defer physical swap
charging (i.e towards memory.swap.current) until the virtual swap slot
is backed by an actual physical swap slot (on zswap store failure
fallback or zswap writeback).
Signed-off-by: Nhat Pham <nphamcs@gmail.com>
---
include/linux/swap.h | 16 +++++++++
mm/memcontrol-v1.c | 6 ++++
mm/memcontrol.c | 83 ++++++++++++++++++++++++++++++++------------
mm/vswap.c | 39 +++++++++------------
4 files changed, 98 insertions(+), 46 deletions(-)
diff --git a/include/linux/swap.h b/include/linux/swap.h
index 9cd45eab313f8..a30d382fb5ee1 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -613,6 +613,22 @@ static inline void folio_throttle_swaprate(struct folio *folio, gfp_t gfp)
#endif
#if defined(CONFIG_MEMCG) && defined(CONFIG_SWAP)
+void __mem_cgroup_record_swap(struct folio *folio, swp_entry_t entry);
+static inline void mem_cgroup_record_swap(struct folio *folio,
+ swp_entry_t entry)
+{
+ if (!mem_cgroup_disabled())
+ __mem_cgroup_record_swap(folio, entry);
+}
+
+void __mem_cgroup_clear_swap(swp_entry_t entry, unsigned int nr_pages);
+static inline void mem_cgroup_clear_swap(swp_entry_t entry,
+ unsigned int nr_pages)
+{
+ if (!mem_cgroup_disabled())
+ __mem_cgroup_clear_swap(entry, nr_pages);
+}
+
int __mem_cgroup_try_charge_swap(struct folio *folio, swp_entry_t entry);
static inline int mem_cgroup_try_charge_swap(struct folio *folio,
swp_entry_t entry)
diff --git a/mm/memcontrol-v1.c b/mm/memcontrol-v1.c
index 6eed14bff7426..4580a034dcf72 100644
--- a/mm/memcontrol-v1.c
+++ b/mm/memcontrol-v1.c
@@ -680,6 +680,12 @@ void memcg1_swapin(swp_entry_t entry, unsigned int nr_pages)
* memory+swap charge, drop the swap entry duplicate.
*/
mem_cgroup_uncharge_swap(entry, nr_pages);
+
+ /*
+ * Clear the cgroup association now to prevent double memsw
+ * uncharging when the backends are released later.
+ */
+ mem_cgroup_clear_swap(entry, nr_pages);
}
}
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 2ba5811e7edba..50be8066bebec 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -5172,6 +5172,49 @@ int __init mem_cgroup_init(void)
}
#ifdef CONFIG_SWAP
+/**
+ * __mem_cgroup_record_swap - record the folio's cgroup for the swap entries.
+ * @folio: folio being swapped out.
+ * @entry: the first swap entry in the range.
+ */
+void __mem_cgroup_record_swap(struct folio *folio, swp_entry_t entry)
+{
+ unsigned int nr_pages = folio_nr_pages(folio);
+ struct mem_cgroup *memcg;
+
+ /* Recording will be done by memcg1_swapout(). */
+ if (do_memsw_account())
+ return;
+
+ memcg = folio_memcg(folio);
+
+ VM_WARN_ON_ONCE_FOLIO(!memcg, folio);
+ if (!memcg)
+ return;
+
+ memcg = mem_cgroup_id_get_online(memcg);
+ if (nr_pages > 1)
+ mem_cgroup_id_get_many(memcg, nr_pages - 1);
+ swap_cgroup_record(folio, mem_cgroup_id(memcg), entry);
+}
+
+/**
+ * __mem_cgroup_clear_swap - clear cgroup information of the swap entries.
+ * @folio: folio being swapped out.
+ * @entry: the first swap entry in the range.
+ */
+void __mem_cgroup_clear_swap(swp_entry_t entry, unsigned int nr_pages)
+{
+ unsigned short id = swap_cgroup_clear(entry, nr_pages);
+ struct mem_cgroup *memcg;
+
+ rcu_read_lock();
+ memcg = mem_cgroup_from_id(id);
+ if (memcg)
+ mem_cgroup_id_put_many(memcg, nr_pages);
+ rcu_read_unlock();
+}
+
/**
* __mem_cgroup_try_charge_swap - try charging swap space for a folio
* @folio: folio being added to swap
@@ -5190,34 +5233,24 @@ int __mem_cgroup_try_charge_swap(struct folio *folio, swp_entry_t entry)
if (do_memsw_account())
return 0;
- memcg = folio_memcg(folio);
-
- VM_WARN_ON_ONCE_FOLIO(!memcg, folio);
- if (!memcg)
- return 0;
-
- if (!entry.val) {
- memcg_memory_event(memcg, MEMCG_SWAP_FAIL);
- return 0;
- }
-
- memcg = mem_cgroup_id_get_online(memcg);
+ /*
+ * We already record the cgroup on virtual swap allocation.
+ * Note that the virtual swap slot holds a reference to memcg,
+ * so this lookup should be safe.
+ */
+ rcu_read_lock();
+ memcg = mem_cgroup_from_id(lookup_swap_cgroup_id(entry));
+ rcu_read_unlock();
if (!mem_cgroup_is_root(memcg) &&
!page_counter_try_charge(&memcg->swap, nr_pages, &counter)) {
memcg_memory_event(memcg, MEMCG_SWAP_MAX);
memcg_memory_event(memcg, MEMCG_SWAP_FAIL);
- mem_cgroup_id_put(memcg);
return -ENOMEM;
}
- /* Get references for the tail pages, too */
- if (nr_pages > 1)
- mem_cgroup_id_get_many(memcg, nr_pages - 1);
mod_memcg_state(memcg, MEMCG_SWAP, nr_pages);
- swap_cgroup_record(folio, mem_cgroup_id(memcg), entry);
-
return 0;
}
@@ -5231,7 +5264,8 @@ void __mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_pages)
struct mem_cgroup *memcg;
unsigned short id;
- id = swap_cgroup_clear(entry, nr_pages);
+ id = lookup_swap_cgroup_id(entry);
+
rcu_read_lock();
memcg = mem_cgroup_from_id(id);
if (memcg) {
@@ -5242,7 +5276,6 @@ void __mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_pages)
page_counter_uncharge(&memcg->swap, nr_pages);
}
mod_memcg_state(memcg, MEMCG_SWAP, -nr_pages);
- mem_cgroup_id_put_many(memcg, nr_pages);
}
rcu_read_unlock();
}
@@ -5251,14 +5284,18 @@ static bool mem_cgroup_may_zswap(struct mem_cgroup *original_memcg);
long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg)
{
- long nr_swap_pages, nr_zswap_pages = 0;
+ long nr_swap_pages;
if (zswap_is_enabled() && (mem_cgroup_disabled() || do_memsw_account() ||
mem_cgroup_may_zswap(memcg))) {
- nr_zswap_pages = PAGE_COUNTER_MAX;
+ /*
+ * No need to check swap cgroup limits, since zswap is not charged
+ * towards swap consumption.
+ */
+ return PAGE_COUNTER_MAX;
}
- nr_swap_pages = max_t(long, nr_zswap_pages, get_nr_swap_pages());
+ nr_swap_pages = get_nr_swap_pages();
if (mem_cgroup_disabled() || do_memsw_account())
return nr_swap_pages;
for (; !mem_cgroup_is_root(memcg); memcg = parent_mem_cgroup(memcg))
diff --git a/mm/vswap.c b/mm/vswap.c
index 7563107eb8eee..2a071d5ae173c 100644
--- a/mm/vswap.c
+++ b/mm/vswap.c
@@ -543,6 +543,7 @@ void vswap_rmap_set(struct swap_cluster_info *ci, swp_slot_t slot,
struct vswap_cluster *cluster = NULL;
struct swp_desc *desc;
unsigned long flush_nr, phys_swap_start = 0, phys_swap_end = 0;
+ unsigned long phys_swap_released = 0;
unsigned int phys_swap_type = 0;
bool need_flushing_phys_swap = false;
swp_slot_t flush_slot;
@@ -572,6 +573,7 @@ void vswap_rmap_set(struct swap_cluster_info *ci, swp_slot_t slot,
if (desc->type == VSWAP_ZSWAP && desc->zswap_entry) {
zswap_entry_free(desc->zswap_entry);
} else if (desc->type == VSWAP_SWAPFILE) {
+ phys_swap_released++;
if (!phys_swap_start) {
/* start a new contiguous range of phys swap */
phys_swap_start = swp_slot_offset(desc->slot);
@@ -602,6 +604,9 @@ void vswap_rmap_set(struct swap_cluster_info *ci, swp_slot_t slot,
flush_nr = phys_swap_end - phys_swap_start;
swap_slot_free_nr(flush_slot, flush_nr);
}
+
+ if (phys_swap_released)
+ mem_cgroup_uncharge_swap(entry, phys_swap_released);
}
/*
@@ -629,7 +634,7 @@ static void vswap_free(struct vswap_cluster *cluster, struct swp_desc *desc,
spin_unlock(&cluster->lock);
release_backing(entry, 1);
- mem_cgroup_uncharge_swap(entry, 1);
+ mem_cgroup_clear_swap(entry, 1);
/* erase forward mapping and release the virtual slot for reallocation */
spin_lock(&cluster->lock);
@@ -644,9 +649,6 @@ static void vswap_free(struct vswap_cluster *cluster, struct swp_desc *desc,
*/
int folio_alloc_swap(struct folio *folio)
{
- struct vswap_cluster *cluster = NULL;
- int i, nr = folio_nr_pages(folio);
- struct swp_desc *desc;
swp_entry_t entry;
VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
@@ -656,25 +658,7 @@ int folio_alloc_swap(struct folio *folio)
if (!entry.val)
return -ENOMEM;
- /*
- * XXX: for now, we charge towards the memory cgroup's swap limit on virtual
- * swap slots allocation. This will be changed soon - we will only charge on
- * physical swap slots allocation.
- */
- if (mem_cgroup_try_charge_swap(folio, entry)) {
- rcu_read_lock();
- for (i = 0; i < nr; i++) {
- desc = vswap_iter(&cluster, entry.val + i);
- VM_WARN_ON(!desc);
- vswap_free(cluster, desc, (swp_entry_t){ entry.val + i });
- }
- spin_unlock(&cluster->lock);
- rcu_read_unlock();
- atomic_add(nr, &vswap_alloc_reject);
- entry.val = 0;
- return -ENOMEM;
- }
-
+ mem_cgroup_record_swap(folio, entry);
swap_cache_add_folio(folio, entry, NULL);
return 0;
@@ -716,6 +700,15 @@ bool vswap_alloc_swap_slot(struct folio *folio)
if (!slot.val)
return false;
+ if (mem_cgroup_try_charge_swap(folio, entry)) {
+ /*
+ * We have not updated the backing type of the virtual swap slot.
+ * Simply free up the physical swap slots here!
+ */
+ swap_slot_free_nr(slot, nr);
+ return false;
+ }
+
/* establish the vrtual <-> physical swap slots linkages. */
si = __swap_slot_to_info(slot);
ci = swap_cluster_lock(si, swp_slot_offset(slot));
--
2.47.3
next prev parent reply other threads:[~2026-02-08 21:59 UTC|newest]
Thread overview: 52+ messages / expand[flat|nested] mbox.gz Atom feed top
2026-02-08 21:58 [PATCH v3 00/20] Virtual Swap Space Nhat Pham
2026-02-08 21:58 ` [PATCH v3 01/20] mm/swap: decouple swap cache from physical swap infrastructure Nhat Pham
2026-02-08 22:26 ` [PATCH v3 00/20] Virtual Swap Space Nhat Pham
2026-02-10 17:59 ` Kairui Song
2026-02-10 18:52 ` Johannes Weiner
2026-02-10 19:11 ` Nhat Pham
2026-02-10 19:23 ` Nhat Pham
2026-02-12 5:07 ` Chris Li
2026-02-17 23:36 ` Nhat Pham
2026-02-10 21:58 ` Chris Li
2026-02-20 21:05 ` [PATCH] vswap: fix poor batching behavior of vswap free path Nhat Pham
2026-02-08 22:31 ` [PATCH v3 00/20] Virtual Swap Space Nhat Pham
2026-02-09 12:20 ` Chris Li
2026-02-10 2:36 ` Johannes Weiner
2026-02-10 21:24 ` Chris Li
2026-02-10 23:01 ` Johannes Weiner
2026-02-10 18:00 ` Nhat Pham
2026-02-10 23:17 ` Chris Li
2026-02-08 22:39 ` Nhat Pham
2026-02-09 2:22 ` [PATCH v3 01/20] mm/swap: decouple swap cache from physical swap infrastructure kernel test robot
2026-02-08 21:58 ` [PATCH v3 02/20] swap: rearrange the swap header file Nhat Pham
2026-02-08 21:58 ` [PATCH v3 03/20] mm: swap: add an abstract API for locking out swapoff Nhat Pham
2026-02-08 21:58 ` [PATCH v3 04/20] zswap: add new helpers for zswap entry operations Nhat Pham
2026-02-08 21:58 ` [PATCH v3 05/20] mm/swap: add a new function to check if a swap entry is in swap cached Nhat Pham
2026-02-08 21:58 ` [PATCH v3 06/20] mm: swap: add a separate type for physical swap slots Nhat Pham
2026-02-08 21:58 ` [PATCH v3 07/20] mm: create scaffolds for the new virtual swap implementation Nhat Pham
2026-02-08 21:58 ` [PATCH v3 08/20] zswap: prepare zswap for swap virtualization Nhat Pham
2026-02-08 21:58 ` [PATCH v3 09/20] mm: swap: allocate a virtual swap slot for each swapped out page Nhat Pham
2026-02-09 17:12 ` kernel test robot
2026-02-11 13:42 ` kernel test robot
2026-02-08 21:58 ` [PATCH v3 10/20] swap: move swap cache to virtual swap descriptor Nhat Pham
2026-02-08 21:58 ` [PATCH v3 11/20] zswap: move zswap entry management to the " Nhat Pham
2026-02-08 21:58 ` [PATCH v3 12/20] swap: implement the swap_cgroup API using virtual swap Nhat Pham
2026-02-08 21:58 ` [PATCH v3 13/20] swap: manage swap entry lifecycle at the virtual swap layer Nhat Pham
2026-02-08 21:58 ` [PATCH v3 14/20] mm: swap: decouple virtual swap slot from backing store Nhat Pham
2026-02-10 6:31 ` Dan Carpenter
2026-02-08 21:58 ` [PATCH v3 15/20] zswap: do not start zswap shrinker if there is no physical swap slots Nhat Pham
2026-02-08 21:58 ` [PATCH v3 16/20] swap: do not unnecesarily pin readahead swap entries Nhat Pham
2026-02-08 21:58 ` [PATCH v3 17/20] swapfile: remove zeromap bitmap Nhat Pham
2026-02-08 21:58 ` Nhat Pham [this message]
2026-02-09 2:01 ` [PATCH v3 18/20] memcg: swap: only charge physical swap slots kernel test robot
2026-02-09 2:12 ` kernel test robot
2026-02-08 21:58 ` [PATCH v3 19/20] swap: simplify swapoff using virtual swap Nhat Pham
2026-02-08 21:58 ` [PATCH v3 20/20] swapfile: replace the swap map with bitmaps Nhat Pham
2026-02-08 22:51 ` [PATCH v3 00/20] Virtual Swap Space Nhat Pham
2026-02-12 12:23 ` David Hildenbrand (Arm)
2026-02-12 17:29 ` Nhat Pham
2026-02-12 17:39 ` Nhat Pham
2026-02-12 20:11 ` David Hildenbrand (Arm)
2026-02-12 17:41 ` David Hildenbrand (Arm)
2026-02-12 17:45 ` Nhat Pham
2026-02-10 15:45 ` [syzbot ci] " syzbot ci
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20260208215839.87595-19-nphamcs@gmail.com \
--to=nphamcs@gmail.com \
--cc=akpm@linux-foundation.org \
--cc=axelrasmussen@google.com \
--cc=baohua@kernel.org \
--cc=bhe@redhat.com \
--cc=cgroups@vger.kernel.org \
--cc=chengming.zhou@linux.dev \
--cc=chrisl@kernel.org \
--cc=christophe.leroy@csgroup.eu \
--cc=gourry@gourry.net \
--cc=hannes@cmpxchg.org \
--cc=huang.ying.caritas@gmail.com \
--cc=hughd@google.com \
--cc=jannh@google.com \
--cc=joshua.hahnjy@gmail.com \
--cc=kasong@tencent.com \
--cc=kernel-team@meta.com \
--cc=len.brown@intel.com \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=linux-pm@vger.kernel.org \
--cc=lorenzo.stoakes@oracle.com \
--cc=mhocko@kernel.org \
--cc=muchun.song@linux.dev \
--cc=npache@redhat.com \
--cc=osalvador@suse.de \
--cc=pavel@kernel.org \
--cc=peterx@redhat.com \
--cc=pfalcato@suse.de \
--cc=rafael@kernel.org \
--cc=riel@surriel.com \
--cc=roman.gushchin@linux.dev \
--cc=ryan.roberts@arm.com \
--cc=shakeel.butt@linux.dev \
--cc=shikemeng@huaweicloud.com \
--cc=viro@zeniv.linux.org.uk \
--cc=weixugc@google.com \
--cc=yosry.ahmed@linux.dev \
--cc=yuanchu@google.com \
--cc=zhengqi.arch@bytedance.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox