From: Haifeng Xu <haifeng.xu@shopee.com>
To: akpm@linux-foundation.org, david@fromorbit.com, roman.gushchin@linux.dev
Cc: zhengqi.arch@bytedance.com, muchun.song@linux.dev,
linux-mm@kvack.org, linux-kernel@vger.kernel.org,
Haifeng Xu <haifeng.xu@shopee.com>
Subject: [PATCH 2/3] mm: shrinker: optimize the allocation of shrinker_info when setting cgroup_memory_nokmem
Date: Fri, 6 Mar 2026 15:57:55 +0800 [thread overview]
Message-ID: <20260306075757.198887-3-haifeng.xu@shopee.com> (raw)
In-Reply-To: <20260306075757.198887-1-haifeng.xu@shopee.com>
When kmem is disabled, memcg slab shrink only call non-slab shrinkers,
so just allocates shrinker info for non-slab shrinkers to non-root memcgs.
Therefore, if memcg_kmem_online is true, all things keep same as before.
Otherwise, root memcg allocates id from shrinker_idr to identify each
shrinker and non-root memcgs use nonslab_id to identify non-slab shrinkers.
The size of shrinkers_info in non-root memcgs can be very low because the
number of shrinkers marked as SHRINKER_NONSLAB | SHRINKER_MEMCG_AWARE is
few. Also, the time spending in expand_shrinker_info() can reduce a lot.
When setting shrinker bit or updating nr_deferred, use nonslab_id for
non-root memcgs if the shrinker is marked as SHRINKER_NONSLAB.
Signed-off-by: Haifeng Xu <haifeng.xu@shopee.com>
---
include/linux/shrinker.h | 3 +
mm/huge_memory.c | 21 ++++--
mm/shrinker.c | 135 ++++++++++++++++++++++++++++++++++-----
3 files changed, 138 insertions(+), 21 deletions(-)
diff --git a/include/linux/shrinker.h b/include/linux/shrinker.h
index 1a00be90d93a..df53008ed8b5 100644
--- a/include/linux/shrinker.h
+++ b/include/linux/shrinker.h
@@ -107,6 +107,9 @@ struct shrinker {
#ifdef CONFIG_MEMCG
/* ID in shrinker_idr */
int id;
+
+ /* ID in shrinker_nonslab_idr */
+ int nonslab_id;
#endif
#ifdef CONFIG_SHRINKER_DEBUG
int debugfs_id;
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 8e2746ea74ad..319349b5da5d 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -4351,9 +4351,14 @@ void deferred_split_folio(struct folio *folio, bool partially_mapped)
memcg = folio_split_queue_memcg(folio, ds_queue);
list_add_tail(&folio->_deferred_list, &ds_queue->split_queue);
ds_queue->split_queue_len++;
- if (memcg)
- set_shrinker_bit(memcg, folio_nid(folio),
- shrinker_id(deferred_split_shrinker));
+ if (memcg) {
+ int id = deferred_split_shrinker->id;
+
+ if (!memcg_kmem_online() && memcg != root_mem_cgroup)
+ id = deferred_split_shrinker->nonslab_id;
+
+ set_shrinker_bit(memcg, folio_nid(folio), id);
+ }
}
split_queue_unlock_irqrestore(ds_queue, flags);
}
@@ -4508,8 +4513,14 @@ void reparent_deferred_split_queue(struct mem_cgroup *memcg)
parent_ds_queue->split_queue_len += ds_queue->split_queue_len;
ds_queue->split_queue_len = 0;
- for_each_node(nid)
- set_shrinker_bit(parent, nid, shrinker_id(deferred_split_shrinker));
+ for_each_node(nid) {
+ int id = deferred_split_shrinker->id;
+
+ if (!memcg_kmem_online() && parent != root_mem_cgroup)
+ id = deferred_split_shrinker->nonslab_id;
+
+ set_shrinker_bit(parent, nid, id);
+ }
unlock:
spin_unlock(&parent_ds_queue->split_queue_lock);
diff --git a/mm/shrinker.c b/mm/shrinker.c
index f0c6dfa026b0..52ea0e6391af 100644
--- a/mm/shrinker.c
+++ b/mm/shrinker.c
@@ -12,6 +12,7 @@ DEFINE_MUTEX(shrinker_mutex);
#ifdef CONFIG_MEMCG
static int shrinker_nr_max;
+static int shrinker_nonslab_nr_max;
static inline int shrinker_unit_size(int nr_items)
{
@@ -78,15 +79,25 @@ int alloc_shrinker_info(struct mem_cgroup *memcg)
{
int nid, ret = 0;
int array_size = 0;
+ int alloc_nr_max;
+
+ if (memcg_kmem_online()) {
+ alloc_nr_max = shrinker_nr_max;
+ } else {
+ if (memcg == root_mem_cgroup)
+ alloc_nr_max = shrinker_nr_max;
+ else
+ alloc_nr_max = shrinker_nonslab_nr_max;
+ }
mutex_lock(&shrinker_mutex);
- array_size = shrinker_unit_size(shrinker_nr_max);
+ array_size = shrinker_unit_size(alloc_nr_max);
for_each_node(nid) {
struct shrinker_info *info = kvzalloc_node(sizeof(*info) + array_size,
GFP_KERNEL, nid);
if (!info)
goto err;
- info->map_nr_max = shrinker_nr_max;
+ info->map_nr_max = alloc_nr_max;
if (shrinker_unit_alloc(info, NULL, nid)) {
kvfree(info);
goto err;
@@ -147,33 +158,47 @@ static int expand_one_shrinker_info(struct mem_cgroup *memcg, int new_size,
return 0;
}
-static int expand_shrinker_info(int new_id)
+static int expand_shrinker_info(int new_id, bool full, bool root)
{
int ret = 0;
int new_nr_max = round_up(new_id + 1, SHRINKER_UNIT_BITS);
int new_size, old_size = 0;
struct mem_cgroup *memcg;
+ struct mem_cgroup *start = NULL;
+ int old_nr_max = shrinker_nr_max;
if (!root_mem_cgroup)
goto out;
lockdep_assert_held(&shrinker_mutex);
+ if (!full && !root) {
+ start = root_mem_cgroup;
+ old_nr_max = shrinker_nonslab_nr_max;
+ }
+
new_size = shrinker_unit_size(new_nr_max);
- old_size = shrinker_unit_size(shrinker_nr_max);
+ old_size = shrinker_unit_size(old_nr_max);
+
+ memcg = mem_cgroup_iter(NULL, start, NULL);
+ if (!memcg)
+ goto out;
- memcg = mem_cgroup_iter(NULL, NULL, NULL);
do {
ret = expand_one_shrinker_info(memcg, new_size, old_size,
new_nr_max);
- if (ret) {
+ if (ret || (root && memcg == root_mem_cgroup)) {
mem_cgroup_iter_break(NULL, memcg);
goto out;
}
} while ((memcg = mem_cgroup_iter(NULL, memcg, NULL)) != NULL);
out:
- if (!ret)
- shrinker_nr_max = new_nr_max;
+ if (!ret) {
+ if (!full && !root)
+ shrinker_nonslab_nr_max = new_nr_max;
+ else
+ shrinker_nr_max = new_nr_max;
+ }
return ret;
}
@@ -195,7 +220,13 @@ static inline int calc_shrinker_id(int index, int offset)
static inline int get_shrinker_id(struct mem_cgroup *memcg, struct shrinker *shrinker)
{
- return shrinker->id;
+ int id = shrinker->id;
+
+ if (!memcg_kmem_online() && (shrinker->flags & SHRINKER_NONSLAB) &&
+ memcg != root_mem_cgroup)
+ id = shrinker->nonslab_id;
+
+ return id;
}
void set_shrinker_bit(struct mem_cgroup *memcg, int nid, int shrinker_id)
@@ -217,6 +248,8 @@ void set_shrinker_bit(struct mem_cgroup *memcg, int nid, int shrinker_id)
}
static DEFINE_IDR(shrinker_idr);
+static DEFINE_IDR(shrinker_nonslab_idr);
+
static int shrinker_memcg_alloc(struct shrinker *shrinker)
{
@@ -230,10 +263,46 @@ static int shrinker_memcg_alloc(struct shrinker *shrinker)
if (id < 0)
goto unlock;
- if (id >= shrinker_nr_max) {
- if (expand_shrinker_info(id)) {
- idr_remove(&shrinker_idr, id);
- goto unlock;
+ shrinker->nonslab_id = -1;
+
+ if (!mem_cgroup_kmem_disabled()) {
+ if (id >= shrinker_nr_max) {
+ /* expand shrinker info for all memory cgroups */
+ if (expand_shrinker_info(id, true, false)) {
+ idr_remove(&shrinker_idr, id);
+ goto unlock;
+ }
+ }
+ } else {
+ /*
+ * If cgroup_memory_nokmem is set, every shrinker needs to be recorded in
+ * root memory cgroup because gloal slab shrink traverse all shrinkers. For
+ * non-root memcgs, record shrinkers with SHRINKER_NONSLAB because memcg
+ * slab shrink only call non-slab shrinkers.
+ */
+ if (id >= shrinker_nr_max) {
+ /* expand shrinker info for root memory cgroup only */
+ if (expand_shrinker_info(id, false, true)) {
+ idr_remove(&shrinker_idr, id);
+ goto unlock;
+ }
+ }
+
+ if (shrinker->flags & SHRINKER_NONSLAB) {
+ int nonslab_id;
+
+ nonslab_id = idr_alloc(&shrinker_nonslab_idr, shrinker, 0, 0, GFP_KERNEL);
+ if (nonslab_id < 0)
+ goto unlock;
+
+ if (nonslab_id >= shrinker_nonslab_nr_max) {
+ /* expand shrinker info for non-root memory cgroups */
+ if (expand_shrinker_info(nonslab_id, false, false)) {
+ idr_remove(&shrinker_nonslab_idr, id);
+ goto unlock;
+ }
+ }
+ shrinker->nonslab_id = nonslab_id;
}
}
shrinker->id = id;
@@ -252,6 +321,12 @@ static void shrinker_memcg_remove(struct shrinker *shrinker)
lockdep_assert_held(&shrinker_mutex);
idr_remove(&shrinker_idr, id);
+
+ if (shrinker->flags & SHRINKER_NONSLAB) {
+ id = shrinker->nonslab_id;
+ if (id >= 0)
+ idr_remove(&shrinker_nonslab_idr, id);
+ }
}
static long xchg_nr_deferred_memcg(int nid, struct shrinker *shrinker,
@@ -310,10 +385,33 @@ void reparent_shrinker_deferred(struct mem_cgroup *memcg)
parent_info = shrinker_info_protected(parent, nid);
for (index = 0; index < shrinker_id_to_index(child_info->map_nr_max); index++) {
child_unit = child_info->unit[index];
- parent_unit = parent_info->unit[index];
for (offset = 0; offset < SHRINKER_UNIT_BITS; offset++) {
nr = atomic_long_read(&child_unit->nr_deferred[offset]);
- atomic_long_add(nr, &parent_unit->nr_deferred[offset]);
+
+ /*
+ * If memcg_kmem_online() is false, the non-root memcgs use
+ * nonslab_id but root memory cgroup use id. When reparenting
+ * shrinker info to it, must convert the nonslab_id to id.
+ */
+ if (!memcg_kmem_online() && parent == root_mem_cgroup) {
+ int id, p_index, p_off;
+ struct shrinker *shrinker;
+
+ id = calc_shrinker_id(index, offset);
+ shrinker = idr_find(&shrinker_nonslab_idr, id);
+ if (shrinker) {
+ id = shrinker->id;
+ p_index = shrinker_id_to_index(id);
+ p_off = shrinker_id_to_offset(id);
+
+ parent_unit = parent_info->unit[p_index];
+ atomic_long_add(nr,
+ &parent_unit->nr_deferred[p_off]);
+ }
+ } else {
+ parent_unit = parent_info->unit[index];
+ atomic_long_add(nr, &parent_unit->nr_deferred[offset]);
+ }
}
}
}
@@ -543,7 +641,12 @@ static unsigned long shrink_slab_memcg(gfp_t gfp_mask, int nid,
int shrinker_id = calc_shrinker_id(index, offset);
rcu_read_lock();
- shrinker = idr_find(&shrinker_idr, shrinker_id);
+
+ if (memcg_kmem_online())
+ shrinker = idr_find(&shrinker_idr, shrinker_id);
+ else
+ shrinker = idr_find(&shrinker_nonslab_idr, shrinker_id);
+
if (unlikely(!shrinker || !shrinker_try_get(shrinker))) {
clear_bit(offset, unit->map);
rcu_read_unlock();
--
2.43.0
next prev parent reply other threads:[~2026-03-06 7:59 UTC|newest]
Thread overview: 6+ messages / expand[flat|nested] mbox.gz Atom feed top
2026-03-06 7:57 [PATCH 0/3] record non-slab shrinkers for non-root memcgs when kmem is disabled Haifeng Xu
2026-03-06 7:57 ` [PATCH 1/3] mm: shrinker: introduce new function get_shrinker_id() Haifeng Xu
2026-03-06 8:24 ` Qi Zheng
2026-03-06 11:21 ` Haifeng Xu
2026-03-06 7:57 ` Haifeng Xu [this message]
2026-03-06 7:57 ` [PATCH 3/3] mm: shrinker: remove unnecessary check in shrink_slab_memcg() Haifeng Xu
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20260306075757.198887-3-haifeng.xu@shopee.com \
--to=haifeng.xu@shopee.com \
--cc=akpm@linux-foundation.org \
--cc=david@fromorbit.com \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=muchun.song@linux.dev \
--cc=roman.gushchin@linux.dev \
--cc=zhengqi.arch@bytedance.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox