linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
* [RFC PATCH] zsmalloc: make common caches global
@ 2026-01-16  4:48 Sergey Senozhatsky
  2026-01-16  5:52 ` Sergey Senozhatsky
                   ` (2 more replies)
  0 siblings, 3 replies; 14+ messages in thread
From: Sergey Senozhatsky @ 2026-01-16  4:48 UTC (permalink / raw)
  To: Andrew Morton, Yosry Ahmed
  Cc: Minchan Kim, Nhat Pham, Johannes Weiner, Brian Geffon,
	linux-kernel, linux-mm, Sergey Senozhatsky

Currently, zsmalloc creates kmem_cache of handles and zspages
for each pool, which may be suboptimal from the memory usage
point of view (extra internal fragmentation per pool).  Systems
that create multiple zsmalloc pools may benefit from shared
common zsmalloc caches.

Make handles and zspages kmem caches global.

Signed-off-by: Sergey Senozhatsky <senozhatsky@chromium.org>
---
 mm/zsmalloc.c | 95 ++++++++++++++++++++++-----------------------------
 1 file changed, 40 insertions(+), 55 deletions(-)

diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
index 5abb8bc0956a..05ed3539aa1e 100644
--- a/mm/zsmalloc.c
+++ b/mm/zsmalloc.c
@@ -198,12 +198,13 @@ struct link_free {
 	};
 };
 
+static struct kmem_cache *handle_cachep;
+static struct kmem_cache *zspage_cachep;
+
 struct zs_pool {
 	const char *name;
 
 	struct size_class *size_class[ZS_SIZE_CLASSES];
-	struct kmem_cache *handle_cachep;
-	struct kmem_cache *zspage_cachep;
 
 	atomic_long_t pages_allocated;
 
@@ -376,60 +377,28 @@ static void init_deferred_free(struct zs_pool *pool) {}
 static void SetZsPageMovable(struct zs_pool *pool, struct zspage *zspage) {}
 #endif
 
-static int create_cache(struct zs_pool *pool)
+static unsigned long cache_alloc_handle(gfp_t gfp)
 {
-	char *name;
-
-	name = kasprintf(GFP_KERNEL, "zs_handle-%s", pool->name);
-	if (!name)
-		return -ENOMEM;
-	pool->handle_cachep = kmem_cache_create(name, ZS_HANDLE_SIZE,
-						0, 0, NULL);
-	kfree(name);
-	if (!pool->handle_cachep)
-		return -EINVAL;
-
-	name = kasprintf(GFP_KERNEL, "zspage-%s", pool->name);
-	if (!name)
-		return -ENOMEM;
-	pool->zspage_cachep = kmem_cache_create(name, sizeof(struct zspage),
-						0, 0, NULL);
-	kfree(name);
-	if (!pool->zspage_cachep) {
-		kmem_cache_destroy(pool->handle_cachep);
-		pool->handle_cachep = NULL;
-		return -EINVAL;
-	}
-
-	return 0;
-}
+	gfp = gfp & ~(__GFP_HIGHMEM | __GFP_MOVABLE);
 
-static void destroy_cache(struct zs_pool *pool)
-{
-	kmem_cache_destroy(pool->handle_cachep);
-	kmem_cache_destroy(pool->zspage_cachep);
+	return (unsigned long)kmem_cache_alloc(handle_cachep, gfp);
 }
 
-static unsigned long cache_alloc_handle(struct zs_pool *pool, gfp_t gfp)
+static void cache_free_handle(unsigned long handle)
 {
-	return (unsigned long)kmem_cache_alloc(pool->handle_cachep,
-			gfp & ~(__GFP_HIGHMEM|__GFP_MOVABLE));
+	kmem_cache_free(handle_cachep, (void *)handle);
 }
 
-static void cache_free_handle(struct zs_pool *pool, unsigned long handle)
+static struct zspage *cache_alloc_zspage(gfp_t gfp)
 {
-	kmem_cache_free(pool->handle_cachep, (void *)handle);
-}
+	gfp = gfp & ~(__GFP_HIGHMEM | __GFP_MOVABLE);
 
-static struct zspage *cache_alloc_zspage(struct zs_pool *pool, gfp_t flags)
-{
-	return kmem_cache_zalloc(pool->zspage_cachep,
-			flags & ~(__GFP_HIGHMEM|__GFP_MOVABLE));
+	return kmem_cache_zalloc(zspage_cachep, gfp);
 }
 
-static void cache_free_zspage(struct zs_pool *pool, struct zspage *zspage)
+static void cache_free_zspage(struct zspage *zspage)
 {
-	kmem_cache_free(pool->zspage_cachep, zspage);
+	kmem_cache_free(zspage_cachep, zspage);
 }
 
 /* class->lock(which owns the handle) synchronizes races */
@@ -858,7 +827,7 @@ static void __free_zspage(struct zs_pool *pool, struct size_class *class,
 		zpdesc = next;
 	} while (zpdesc != NULL);
 
-	cache_free_zspage(pool, zspage);
+	cache_free_zspage(zspage);
 
 	class_stat_sub(class, ZS_OBJS_ALLOCATED, class->objs_per_zspage);
 	atomic_long_sub(class->pages_per_zspage, &pool->pages_allocated);
@@ -971,7 +940,7 @@ static struct zspage *alloc_zspage(struct zs_pool *pool,
 {
 	int i;
 	struct zpdesc *zpdescs[ZS_MAX_PAGES_PER_ZSPAGE];
-	struct zspage *zspage = cache_alloc_zspage(pool, gfp);
+	struct zspage *zspage = cache_alloc_zspage(gfp);
 
 	if (!zspage)
 		return NULL;
@@ -993,7 +962,7 @@ static struct zspage *alloc_zspage(struct zs_pool *pool,
 				zpdesc_dec_zone_page_state(zpdescs[i]);
 				free_zpdesc(zpdescs[i]);
 			}
-			cache_free_zspage(pool, zspage);
+			cache_free_zspage(zspage);
 			return NULL;
 		}
 		__zpdesc_set_zsmalloc(zpdesc);
@@ -1346,7 +1315,7 @@ unsigned long zs_malloc(struct zs_pool *pool, size_t size, gfp_t gfp,
 	if (unlikely(size > ZS_MAX_ALLOC_SIZE))
 		return (unsigned long)ERR_PTR(-ENOSPC);
 
-	handle = cache_alloc_handle(pool, gfp);
+	handle = cache_alloc_handle(gfp);
 	if (!handle)
 		return (unsigned long)ERR_PTR(-ENOMEM);
 
@@ -1370,7 +1339,7 @@ unsigned long zs_malloc(struct zs_pool *pool, size_t size, gfp_t gfp,
 
 	zspage = alloc_zspage(pool, class, gfp, nid);
 	if (!zspage) {
-		cache_free_handle(pool, handle);
+		cache_free_handle(handle);
 		return (unsigned long)ERR_PTR(-ENOMEM);
 	}
 
@@ -1450,7 +1419,7 @@ void zs_free(struct zs_pool *pool, unsigned long handle)
 		free_zspage(pool, class, zspage);
 
 	spin_unlock(&class->lock);
-	cache_free_handle(pool, handle);
+	cache_free_handle(handle);
 }
 EXPORT_SYMBOL_GPL(zs_free);
 
@@ -2112,9 +2081,6 @@ struct zs_pool *zs_create_pool(const char *name)
 	if (!pool->name)
 		goto err;
 
-	if (create_cache(pool))
-		goto err;
-
 	/*
 	 * Iterate reversely, because, size of size_class that we want to use
 	 * for merging should be larger or equal to current size.
@@ -2236,7 +2202,6 @@ void zs_destroy_pool(struct zs_pool *pool)
 		kfree(class);
 	}
 
-	destroy_cache(pool);
 	kfree(pool->name);
 	kfree(pool);
 }
@@ -2246,10 +2211,28 @@ static int __init zs_init(void)
 {
 	int rc __maybe_unused;
 
+	handle_cachep = kmem_cache_create("zs_handle", ZS_HANDLE_SIZE, 0, 0,
+					  NULL);
+	if (!handle_cachep)
+		return -ENOMEM;
+
+	zspage_cachep = kmem_cache_create("zspage", sizeof(struct zspage), 0,
+					  0, NULL);
+	if (!zspage_cachep) {
+		kmem_cache_destroy(handle_cachep);
+		handle_cachep = NULL;
+		return -ENOMEM;
+	}
+
 #ifdef CONFIG_COMPACTION
 	rc = set_movable_ops(&zsmalloc_mops, PGTY_zsmalloc);
-	if (rc)
+	if (rc) {
+		kmem_cache_destroy(zspage_cachep);
+		kmem_cache_destroy(handle_cachep);
+		zspage_cachep = NULL;
+		handle_cachep = NULL;
 		return rc;
+	}
 #endif
 	zs_stat_init();
 	return 0;
@@ -2261,6 +2244,8 @@ static void __exit zs_exit(void)
 	set_movable_ops(NULL, PGTY_zsmalloc);
 #endif
 	zs_stat_exit();
+	kmem_cache_destroy(zspage_cachep);
+	kmem_cache_destroy(handle_cachep);
 }
 
 module_init(zs_init);
-- 
2.52.0.457.g6b5491de43-goog



^ permalink raw reply	[flat|nested] 14+ messages in thread

end of thread, other threads:[~2026-01-22  3:55 UTC | newest]

Thread overview: 14+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2026-01-16  4:48 [RFC PATCH] zsmalloc: make common caches global Sergey Senozhatsky
2026-01-16  5:52 ` Sergey Senozhatsky
2026-01-19 21:44   ` Nhat Pham
2026-01-21  3:41     ` Sergey Senozhatsky
2026-01-21 23:58       ` Yosry Ahmed
2026-01-22  3:28         ` Sergey Senozhatsky
2026-01-22  3:39           ` Yosry Ahmed
2026-01-22  3:55             ` Sergey Senozhatsky
2026-01-16 20:49 ` Yosry Ahmed
2026-01-17  2:24   ` Sergey Senozhatsky
2026-01-21  1:30     ` Yosry Ahmed
2026-01-21  1:56       ` Sergey Senozhatsky
2026-01-19 21:43 ` Nhat Pham
2026-01-20  1:19   ` Sergey Senozhatsky

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox