From: Joshua Hahn <joshua.hahnjy@gmail.com>
To: Minchan Kim <minchan@kernel.org>,
Sergey Senozhatsky <senozhatsky@chromium.org>
Cc: Johannes Weiner <hannes@cmpxchg.org>,
Yosry Ahmed <yosry.ahmed@linux.dev>,
Nhat Pham <hoangnhat.pham@linux.dev>,
Nhat Pham <nphamcs@gmail.com>,
Chengming Zhou <chengming.zhou@linux.dev>,
Andrew Morton <akpm@linux-foundation.org>,
linux-mm@kvack.org, linux-kernel@vger.kernel.org,
kernel-team@meta.com
Subject: [PATCH 5/8] mm/zsmalloc,zswap: Redirect zswap_entry->obcg to zpdesc
Date: Thu, 26 Feb 2026 11:29:28 -0800 [thread overview]
Message-ID: <20260226192936.3190275-6-joshua.hahnjy@gmail.com> (raw)
In-Reply-To: <20260226192936.3190275-1-joshua.hahnjy@gmail.com>
Now that obj_cgroups are tracked in zpdesc, redirect the zswap layer to
use the pointer stored in the zpdesc and remove the pointer in
struct zswap_entry.
This offsets the temporary memory increase caused by the duplicate
storage of the obj_cgroup pointer and results in a net zero memory
footprint change. The lifetime and charging of the obj_cgroup is still
handled in the zswap layer.
Clean up mem_cgroup_from_entry, which has no more callers.
Suggested-by: Johannes Weiner <hannes@cmpxchg.org>
Signed-off-by: Joshua Hahn <joshua.hahnjy@gmail.com>
---
include/linux/zsmalloc.h | 1 +
mm/zsmalloc.c | 29 +++++++++++++++++++++++
mm/zswap.c | 51 ++++++++++++++++++----------------------
3 files changed, 53 insertions(+), 28 deletions(-)
diff --git a/include/linux/zsmalloc.h b/include/linux/zsmalloc.h
index 22f3baa13f24..05b2b163a427 100644
--- a/include/linux/zsmalloc.h
+++ b/include/linux/zsmalloc.h
@@ -38,6 +38,7 @@ unsigned long zs_get_total_pages(struct zs_pool *pool);
unsigned long zs_compact(struct zs_pool *pool);
unsigned int zs_lookup_class_index(struct zs_pool *pool, unsigned int size);
+struct obj_cgroup *zs_lookup_objcg(struct zs_pool *pool, unsigned long handle);
void zs_pool_stats(struct zs_pool *pool, struct zs_pool_stats *stats);
diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
index e5ae9a0fc78a..067215a6ddcc 100644
--- a/mm/zsmalloc.c
+++ b/mm/zsmalloc.c
@@ -977,6 +977,30 @@ static void migrate_obj_objcg(unsigned long used_obj, unsigned long free_obj,
zpdesc_set_obj_cgroup(d_zpdesc, d_obj_idx, size, objcg);
zpdesc_set_obj_cgroup(s_zpdesc, s_obj_idx, size, NULL);
}
+
+struct obj_cgroup *zs_lookup_objcg(struct zs_pool *pool, unsigned long handle)
+{
+ unsigned long obj;
+ struct zpdesc *zpdesc;
+ struct zspage *zspage;
+ struct size_class *class;
+ struct obj_cgroup *objcg;
+ unsigned int obj_idx;
+
+ read_lock(&pool->lock);
+ obj = handle_to_obj(handle);
+ obj_to_location(obj, &zpdesc, &obj_idx);
+
+ zspage = get_zspage(zpdesc);
+ zspage_read_lock(zspage);
+ read_unlock(&pool->lock);
+
+ class = zspage_class(pool, zspage);
+ objcg = zpdesc_obj_cgroup(zpdesc, obj_idx, class->size);
+ zspage_read_unlock(zspage);
+
+ return objcg;
+}
#else
static inline struct obj_cgroup *zpdesc_obj_cgroup(struct zpdesc *zpdesc,
unsigned int offset,
@@ -996,6 +1020,11 @@ static bool alloc_zspage_objcgs(struct size_class *class, gfp_t gfp,
static void migrate_obj_objcg(unsigned long used_obj, unsigned long free_obj,
int size) {}
+
+struct obj_cgroup *zs_lookup_objcg(struct zs_pool *pool, unsigned long handle)
+{
+ return NULL;
+}
#endif
static void create_page_chain(struct size_class *class, struct zspage *zspage,
diff --git a/mm/zswap.c b/mm/zswap.c
index 1e2d60f47919..55161a5c9d4c 100644
--- a/mm/zswap.c
+++ b/mm/zswap.c
@@ -193,7 +193,6 @@ struct zswap_entry {
bool referenced;
struct zswap_pool *pool;
unsigned long handle;
- struct obj_cgroup *objcg;
struct list_head lru;
};
@@ -601,25 +600,13 @@ static int zswap_enabled_param_set(const char *val,
* lru functions
**********************************/
-/* should be called under RCU */
-#ifdef CONFIG_MEMCG
-static inline struct mem_cgroup *mem_cgroup_from_entry(struct zswap_entry *entry)
-{
- return entry->objcg ? obj_cgroup_memcg(entry->objcg) : NULL;
-}
-#else
-static inline struct mem_cgroup *mem_cgroup_from_entry(struct zswap_entry *entry)
-{
- return NULL;
-}
-#endif
-
static inline int entry_to_nid(struct zswap_entry *entry)
{
return page_to_nid(virt_to_page(entry));
}
-static void zswap_lru_add(struct list_lru *list_lru, struct zswap_entry *entry)
+static void zswap_lru_add(struct list_lru *list_lru, struct zswap_entry *entry,
+ struct obj_cgroup *objcg)
{
int nid = entry_to_nid(entry);
struct mem_cgroup *memcg;
@@ -636,19 +623,20 @@ static void zswap_lru_add(struct list_lru *list_lru, struct zswap_entry *entry)
* Similar reasoning holds for list_lru_del().
*/
rcu_read_lock();
- memcg = mem_cgroup_from_entry(entry);
+ memcg = objcg ? obj_cgroup_memcg(objcg) : NULL;
/* will always succeed */
list_lru_add(list_lru, &entry->lru, nid, memcg);
rcu_read_unlock();
}
-static void zswap_lru_del(struct list_lru *list_lru, struct zswap_entry *entry)
+static void zswap_lru_del(struct list_lru *list_lru, struct zswap_entry *entry,
+ struct obj_cgroup *objcg)
{
int nid = entry_to_nid(entry);
struct mem_cgroup *memcg;
rcu_read_lock();
- memcg = mem_cgroup_from_entry(entry);
+ memcg = objcg ? obj_cgroup_memcg(objcg) : NULL;
/* will always succeed */
list_lru_del(list_lru, &entry->lru, nid, memcg);
rcu_read_unlock();
@@ -716,12 +704,16 @@ static void zswap_entry_cache_free(struct zswap_entry *entry)
*/
static void zswap_entry_free(struct zswap_entry *entry)
{
- zswap_lru_del(&zswap_list_lru, entry);
+ struct obj_cgroup *objcg = zs_lookup_objcg(entry->pool->zs_pool,
+ entry->handle);
+
+ zswap_lru_del(&zswap_list_lru, entry, objcg);
zs_free(entry->pool->zs_pool, entry->handle);
zswap_pool_put(entry->pool);
- if (entry->objcg) {
- obj_cgroup_uncharge_zswap(entry->objcg, entry->length);
- obj_cgroup_put(entry->objcg);
+
+ if (objcg) {
+ obj_cgroup_uncharge_zswap(objcg, entry->length);
+ obj_cgroup_put(objcg);
}
if (entry->length == PAGE_SIZE)
atomic_long_dec(&zswap_stored_incompressible_pages);
@@ -994,6 +986,7 @@ static int zswap_writeback_entry(struct zswap_entry *entry,
struct mempolicy *mpol;
bool folio_was_allocated;
struct swap_info_struct *si;
+ struct obj_cgroup *objcg;
int ret = 0;
/* try to allocate swap cache folio */
@@ -1043,8 +1036,9 @@ static int zswap_writeback_entry(struct zswap_entry *entry,
xa_erase(tree, offset);
count_vm_event(ZSWPWB);
- if (entry->objcg)
- count_objcg_events(entry->objcg, ZSWPWB, 1);
+ objcg = zs_lookup_objcg(entry->pool->zs_pool, entry->handle);
+ if (objcg)
+ count_objcg_events(objcg, ZSWPWB, 1);
zswap_entry_free(entry);
@@ -1463,11 +1457,10 @@ static bool zswap_store_page(struct page *page,
*/
entry->pool = pool;
entry->swpentry = page_swpentry;
- entry->objcg = objcg;
entry->referenced = true;
if (entry->length) {
INIT_LIST_HEAD(&entry->lru);
- zswap_lru_add(&zswap_list_lru, entry);
+ zswap_lru_add(&zswap_list_lru, entry, objcg);
}
return true;
@@ -1592,6 +1585,7 @@ int zswap_load(struct folio *folio)
bool swapcache = folio_test_swapcache(folio);
struct xarray *tree = swap_zswap_tree(swp);
struct zswap_entry *entry;
+ struct obj_cgroup *objcg;
VM_WARN_ON_ONCE(!folio_test_locked(folio));
@@ -1620,8 +1614,9 @@ int zswap_load(struct folio *folio)
folio_mark_uptodate(folio);
count_vm_event(ZSWPIN);
- if (entry->objcg)
- count_objcg_events(entry->objcg, ZSWPIN, 1);
+ objcg = zs_lookup_objcg(entry->pool->zs_pool, entry->handle);
+ if (objcg)
+ count_objcg_events(objcg, ZSWPIN, 1);
/*
* When reading into the swapcache, invalidate our entry. The
--
2.47.3
next prev parent reply other threads:[~2026-02-26 19:29 UTC|newest]
Thread overview: 14+ messages / expand[flat|nested] mbox.gz Atom feed top
2026-02-26 19:29 [PATCH 0/8] mm/zswap, zsmalloc: Per-memcg-lruvec zswap accounting Joshua Hahn
2026-02-26 19:29 ` [PATCH 1/8] mm/zsmalloc: Rename zs_object_copy to zs_obj_copy Joshua Hahn
2026-02-26 19:29 ` [PATCH 2/8] mm/zsmalloc: Make all obj_idx unsigned ints Joshua Hahn
2026-02-26 19:29 ` [PATCH 3/8] mm/zsmalloc: Introduce objcgs pointer in struct zpdesc Joshua Hahn
2026-02-26 21:37 ` Shakeel Butt
2026-02-26 21:43 ` Joshua Hahn
2026-02-26 19:29 ` [PATCH 4/8] mm/zsmalloc: Store obj_cgroup pointer in zpdesc Joshua Hahn
2026-02-26 19:29 ` Joshua Hahn [this message]
2026-02-26 23:13 ` [PATCH 5/8] mm/zsmalloc,zswap: Redirect zswap_entry->obcg to zpdesc kernel test robot
2026-02-26 19:29 ` [PATCH 6/8] mm/zsmalloc, zswap: Handle objcg charging and lifetime in zsmalloc Joshua Hahn
2026-02-26 19:29 ` [PATCH 7/8] mm/memcontrol: Track MEMCG_ZSWAPPED in bytes Joshua Hahn
2026-02-26 19:29 ` [PATCH 8/8] mm/vmstat, memcontrol: Track ZSWAP_B, ZSWAPPED_B per-memcg-lruvec Joshua Hahn
2026-02-26 22:40 ` kernel test robot
2026-02-26 23:02 ` kernel test robot
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20260226192936.3190275-6-joshua.hahnjy@gmail.com \
--to=joshua.hahnjy@gmail.com \
--cc=akpm@linux-foundation.org \
--cc=chengming.zhou@linux.dev \
--cc=hannes@cmpxchg.org \
--cc=hoangnhat.pham@linux.dev \
--cc=kernel-team@meta.com \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=minchan@kernel.org \
--cc=nphamcs@gmail.com \
--cc=senozhatsky@chromium.org \
--cc=yosry.ahmed@linux.dev \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox