linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
From: Joshua Hahn <joshua.hahnjy@gmail.com>
To: Minchan Kim <minchan@kernel.org>,
	Sergey Senozhatsky <senozhatsky@chromium.org>
Cc: Johannes Weiner <hannes@cmpxchg.org>,
	Yosry Ahmed <yosry.ahmed@linux.dev>,
	Nhat Pham <hoangnhat.pham@linux.dev>,
	Nhat Pham <nphamcs@gmail.com>,
	Chengming Zhou <chengming.zhou@linux.dev>,
	Andrew Morton <akpm@linux-foundation.org>,
	linux-mm@kvack.org, linux-kernel@vger.kernel.org,
	kernel-team@meta.com
Subject: [PATCH 06/11] mm/zsmalloc, zswap: Redirect zswap_entry->objcg to zspage
Date: Wed, 11 Mar 2026 12:51:43 -0700	[thread overview]
Message-ID: <20260311195153.4013476-7-joshua.hahnjy@gmail.com> (raw)
In-Reply-To: <20260311195153.4013476-1-joshua.hahnjy@gmail.com>

Now that obj_cgroups are tracked in the zspage, redirect the zswap layer
to use the pointer stored in the zspage and remove the pointer in
struct zswap_entry.

This offsets the temporary memory increase caused by the duplicate
storage of the obj_cgroup pointer and results in a net zero memory
footprint change (aside from the array pointer and flags in zspage).

The lifetime and charging of the obj_cgroup is still handled in the
zswap layer.

Clean up mem_cgroup_from_entry, which has no remaining callers.

Suggested-by: Johannes Weiner <hannes@cmpxchg.org>
Signed-off-by: Joshua Hahn <joshua.hahnjy@gmail.com>
---
 include/linux/memcontrol.h |  5 ++++
 include/linux/zsmalloc.h   |  1 +
 mm/zsmalloc.c              | 25 +++++++++++++++++++
 mm/zswap.c                 | 50 +++++++++++++++++---------------------
 4 files changed, 53 insertions(+), 28 deletions(-)

diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 70b685a85bf4..0652db4ff2d5 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -1072,6 +1072,11 @@ static inline struct mem_cgroup *get_mem_cgroup_from_objcg(struct obj_cgroup *ob
 	return NULL;
 }
 
+static inline struct mem_cgroup *obj_cgroup_memcg(struct obj_cgroup *objcg)
+{
+	return NULL;
+}
+
 static inline bool folio_memcg_kmem(struct folio *folio)
 {
 	return false;
diff --git a/include/linux/zsmalloc.h b/include/linux/zsmalloc.h
index 645957a156c4..6010d8dac9ff 100644
--- a/include/linux/zsmalloc.h
+++ b/include/linux/zsmalloc.h
@@ -41,6 +41,7 @@ unsigned long zs_get_total_pages(struct zs_pool *pool);
 unsigned long zs_compact(struct zs_pool *pool);
 
 unsigned int zs_lookup_class_index(struct zs_pool *pool, unsigned int size);
+struct obj_cgroup *zs_lookup_objcg(struct zs_pool *pool, unsigned long handle);
 
 void zs_pool_stats(struct zs_pool *pool, struct zs_pool_stats *stats);
 
diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
index d4735451c273..a94ca8c26ad9 100644
--- a/mm/zsmalloc.c
+++ b/mm/zsmalloc.c
@@ -1049,6 +1049,31 @@ unsigned int zs_lookup_class_index(struct zs_pool *pool, unsigned int size)
 }
 EXPORT_SYMBOL_GPL(zs_lookup_class_index);
 
+struct obj_cgroup *zs_lookup_objcg(struct zs_pool *pool, unsigned long handle)
+{
+	unsigned long obj;
+	struct zpdesc *zpdesc;
+	struct zspage *zspage;
+	struct obj_cgroup *objcg;
+	unsigned int obj_idx;
+
+	if (!pool->memcg_aware)
+		return NULL;
+
+	read_lock(&pool->lock);
+	obj = handle_to_obj(handle);
+	obj_to_location(obj, &zpdesc, &obj_idx);
+
+	zspage = get_zspage(zpdesc);
+	zspage_read_lock(zspage);
+	read_unlock(&pool->lock);
+
+	objcg = zspage->objcgs[obj_idx];
+	zspage_read_unlock(zspage);
+
+	return objcg;
+}
+
 unsigned long zs_get_total_pages(struct zs_pool *pool)
 {
 	return atomic_long_read(&pool->pages_allocated);
diff --git a/mm/zswap.c b/mm/zswap.c
index 68b87c3cc326..436066965413 100644
--- a/mm/zswap.c
+++ b/mm/zswap.c
@@ -193,7 +193,6 @@ struct zswap_entry {
 	bool referenced;
 	struct zswap_pool *pool;
 	unsigned long handle;
-	struct obj_cgroup *objcg;
 	struct list_head lru;
 };
 
@@ -602,25 +601,13 @@ static int zswap_enabled_param_set(const char *val,
 * lru functions
 **********************************/
 
-/* should be called under RCU */
-#ifdef CONFIG_MEMCG
-static inline struct mem_cgroup *mem_cgroup_from_entry(struct zswap_entry *entry)
-{
-	return entry->objcg ? obj_cgroup_memcg(entry->objcg) : NULL;
-}
-#else
-static inline struct mem_cgroup *mem_cgroup_from_entry(struct zswap_entry *entry)
-{
-	return NULL;
-}
-#endif
-
 static inline int entry_to_nid(struct zswap_entry *entry)
 {
 	return page_to_nid(virt_to_page(entry));
 }
 
-static void zswap_lru_add(struct list_lru *list_lru, struct zswap_entry *entry)
+static void zswap_lru_add(struct list_lru *list_lru, struct zswap_entry *entry,
+			  struct obj_cgroup *objcg)
 {
 	int nid = entry_to_nid(entry);
 	struct mem_cgroup *memcg;
@@ -637,19 +624,20 @@ static void zswap_lru_add(struct list_lru *list_lru, struct zswap_entry *entry)
 	 * Similar reasoning holds for list_lru_del().
 	 */
 	rcu_read_lock();
-	memcg = mem_cgroup_from_entry(entry);
+	memcg = objcg ? obj_cgroup_memcg(objcg) : NULL;
 	/* will always succeed */
 	list_lru_add(list_lru, &entry->lru, nid, memcg);
 	rcu_read_unlock();
 }
 
-static void zswap_lru_del(struct list_lru *list_lru, struct zswap_entry *entry)
+static void zswap_lru_del(struct list_lru *list_lru, struct zswap_entry *entry,
+			  struct obj_cgroup *objcg)
 {
 	int nid = entry_to_nid(entry);
 	struct mem_cgroup *memcg;
 
 	rcu_read_lock();
-	memcg = mem_cgroup_from_entry(entry);
+	memcg = objcg ? obj_cgroup_memcg(objcg) : NULL;
 	/* will always succeed */
 	list_lru_del(list_lru, &entry->lru, nid, memcg);
 	rcu_read_unlock();
@@ -717,12 +705,15 @@ static void zswap_entry_cache_free(struct zswap_entry *entry)
  */
 static void zswap_entry_free(struct zswap_entry *entry)
 {
-	zswap_lru_del(&zswap_list_lru, entry);
+	struct obj_cgroup *objcg = zs_lookup_objcg(entry->pool->zs_pool,
+						   entry->handle);
+
+	zswap_lru_del(&zswap_list_lru, entry, objcg);
 	zs_free(entry->pool->zs_pool, entry->handle);
 	zswap_pool_put(entry->pool);
-	if (entry->objcg) {
-		obj_cgroup_uncharge_zswap(entry->objcg, entry->length);
-		obj_cgroup_put(entry->objcg);
+	if (objcg) {
+		obj_cgroup_uncharge_zswap(objcg, entry->length);
+		obj_cgroup_put(objcg);
 	}
 	if (entry->length == PAGE_SIZE)
 		atomic_long_dec(&zswap_stored_incompressible_pages);
@@ -995,6 +986,7 @@ static int zswap_writeback_entry(struct zswap_entry *entry,
 	struct mempolicy *mpol;
 	bool folio_was_allocated;
 	struct swap_info_struct *si;
+	struct obj_cgroup *objcg;
 	int ret = 0;
 
 	/* try to allocate swap cache folio */
@@ -1044,8 +1036,9 @@ static int zswap_writeback_entry(struct zswap_entry *entry,
 	xa_erase(tree, offset);
 
 	count_vm_event(ZSWPWB);
-	if (entry->objcg)
-		count_objcg_events(entry->objcg, ZSWPWB, 1);
+	objcg = zs_lookup_objcg(entry->pool->zs_pool, entry->handle);
+	if (objcg)
+		count_objcg_events(objcg, ZSWPWB, 1);
 
 	zswap_entry_free(entry);
 
@@ -1464,11 +1457,10 @@ static bool zswap_store_page(struct page *page,
 	 */
 	entry->pool = pool;
 	entry->swpentry = page_swpentry;
-	entry->objcg = objcg;
 	entry->referenced = true;
 	if (entry->length) {
 		INIT_LIST_HEAD(&entry->lru);
-		zswap_lru_add(&zswap_list_lru, entry);
+		zswap_lru_add(&zswap_list_lru, entry, objcg);
 	}
 
 	return true;
@@ -1593,6 +1585,7 @@ int zswap_load(struct folio *folio)
 	bool swapcache = folio_test_swapcache(folio);
 	struct xarray *tree = swap_zswap_tree(swp);
 	struct zswap_entry *entry;
+	struct obj_cgroup *objcg;
 
 	VM_WARN_ON_ONCE(!folio_test_locked(folio));
 
@@ -1621,8 +1614,9 @@ int zswap_load(struct folio *folio)
 	folio_mark_uptodate(folio);
 
 	count_vm_event(ZSWPIN);
-	if (entry->objcg)
-		count_objcg_events(entry->objcg, ZSWPIN, 1);
+	objcg = zs_lookup_objcg(entry->pool->zs_pool, entry->handle);
+	if (objcg)
+		count_objcg_events(objcg, ZSWPIN, 1);
 
 	/*
 	 * When reading into the swapcache, invalidate our entry. The
-- 
2.52.0



  parent reply	other threads:[~2026-03-11 19:52 UTC|newest]

Thread overview: 33+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2026-03-11 19:51 [PATCH 00/11] mm/zswap, zsmalloc: Per-memcg-lruvec zswap accounting Joshua Hahn
2026-03-11 19:51 ` [PATCH 01/11] mm/zsmalloc: Rename zs_object_copy to zs_obj_copy Joshua Hahn
2026-03-11 19:56   ` Yosry Ahmed
2026-03-11 20:00   ` Nhat Pham
2026-03-11 19:51 ` [PATCH 02/11] mm/zsmalloc: Make all obj_idx unsigned ints Joshua Hahn
2026-03-11 19:58   ` Yosry Ahmed
2026-03-11 20:01   ` Nhat Pham
2026-03-11 19:51 ` [PATCH 03/11] mm/zsmalloc: Introduce conditional memcg awareness to zs_pool Joshua Hahn
2026-03-11 20:12   ` Nhat Pham
2026-03-11 20:16   ` Johannes Weiner
2026-03-11 20:19     ` Yosry Ahmed
2026-03-11 20:20     ` Joshua Hahn
2026-03-11 19:51 ` [PATCH 04/11] mm/zsmalloc: Introduce objcgs pointer in struct zspage Joshua Hahn
2026-03-11 20:17   ` Nhat Pham
2026-03-11 20:22     ` Joshua Hahn
2026-03-11 19:51 ` [PATCH 05/11] mm/zsmalloc: Store obj_cgroup pointer in zspage Joshua Hahn
2026-03-11 20:17   ` Yosry Ahmed
2026-03-11 20:24     ` Joshua Hahn
2026-03-11 19:51 ` Joshua Hahn [this message]
2026-03-11 19:51 ` [PATCH 07/11] mm/zsmalloc, zswap: Handle objcg charging and lifetime in zsmalloc Joshua Hahn
2026-03-12 21:42   ` Johannes Weiner
2026-03-13 15:34     ` Joshua Hahn
2026-03-13 16:49       ` Johannes Weiner
2026-03-11 19:51 ` [PATCH 08/11] mm/memcontrol: Track MEMCG_ZSWAPPED in bytes Joshua Hahn
2026-03-11 20:33   ` Nhat Pham
2026-03-17 19:13     ` Joshua Hahn
2026-03-11 19:51 ` [PATCH 09/11] mm/vmstat, memcontrol: Track ZSWAP_B, ZSWAPPED_B per-memcg-lruvec Joshua Hahn
2026-03-11 19:51 ` [PATCH 10/11] mm/zsmalloc: Handle single object charge migration in migrate_zspage Joshua Hahn
2026-03-12  3:51   ` kernel test robot
2026-03-12  3:51   ` kernel test robot
2026-03-12 16:56     ` Joshua Hahn
2026-03-11 19:51 ` [PATCH 11/11] mm/zsmalloc: Handle charge migration in zpdesc substitution Joshua Hahn
2026-03-11 19:54 ` [PATCH 00/11] mm/zswap, zsmalloc: Per-memcg-lruvec zswap accounting Joshua Hahn

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20260311195153.4013476-7-joshua.hahnjy@gmail.com \
    --to=joshua.hahnjy@gmail.com \
    --cc=akpm@linux-foundation.org \
    --cc=chengming.zhou@linux.dev \
    --cc=hannes@cmpxchg.org \
    --cc=hoangnhat.pham@linux.dev \
    --cc=kernel-team@meta.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=minchan@kernel.org \
    --cc=nphamcs@gmail.com \
    --cc=senozhatsky@chromium.org \
    --cc=yosry.ahmed@linux.dev \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox