From: Muchun Song <songmuchun@bytedance.com>
To: willy@infradead.org, akpm@linux-foundation.org,
hannes@cmpxchg.org, mhocko@kernel.org, vdavydov.dev@gmail.com,
shakeelb@google.com, guro@fb.com, shy828301@gmail.com,
alexs@kernel.org, richard.weiyang@gmail.com, david@fromorbit.com,
trond.myklebust@hammerspace.com, anna.schumaker@netapp.com
Cc: linux-fsdevel@vger.kernel.org, linux-kernel@vger.kernel.org,
linux-mm@kvack.org, linux-nfs@vger.kernel.org,
zhengqi.arch@bytedance.com, duanxiongchun@bytedance.com,
fam.zheng@bytedance.com, Muchun Song <songmuchun@bytedance.com>
Subject: [PATCH v2 04/21] mm: memcontrol: do it in mem_cgroup_css_online to make the kmem online
Date: Thu, 27 May 2021 14:21:31 +0800 [thread overview]
Message-ID: <20210527062148.9361-5-songmuchun@bytedance.com> (raw)
In-Reply-To: <20210527062148.9361-1-songmuchun@bytedance.com>
If we do it in the mem_cgroup_css_online() to make the kmem online,
we do not need to set ->kmemcg_id when the kmem is offline. And we
also can remove memcg_free_kmem(). So just do that to simplify the
code.
Signed-off-by: Muchun Song <songmuchun@bytedance.com>
---
mm/memcontrol.c | 42 +++++++++++++++---------------------------
1 file changed, 15 insertions(+), 27 deletions(-)
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 23a9fc8dc143..377ec9847179 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -3629,7 +3629,8 @@ static int memcg_online_kmem(struct mem_cgroup *memcg)
if (cgroup_memory_nokmem)
return 0;
- BUG_ON(memcg->kmemcg_id >= 0);
+ if (unlikely(mem_cgroup_is_root(memcg)))
+ return 0;
memcg_id = memcg_alloc_cache_id();
if (memcg_id < 0)
@@ -3658,6 +3659,9 @@ static void memcg_offline_kmem(struct mem_cgroup *memcg)
if (cgroup_memory_nokmem)
return;
+ if (unlikely(mem_cgroup_is_root(memcg)))
+ return;
+
parent = parent_mem_cgroup(memcg);
if (!parent)
parent = root_mem_cgroup;
@@ -3665,20 +3669,11 @@ static void memcg_offline_kmem(struct mem_cgroup *memcg)
memcg_reparent_objcgs(memcg, parent);
kmemcg_id = memcg->kmemcg_id;
- BUG_ON(kmemcg_id < 0);
/* memcg_reparent_objcgs() must be called before this. */
memcg_drain_all_list_lrus(kmemcg_id, parent);
memcg_free_cache_id(kmemcg_id);
- memcg->kmemcg_id = -1;
-}
-
-static void memcg_free_kmem(struct mem_cgroup *memcg)
-{
- /* css_alloc() failed, offlining didn't happen */
- if (unlikely(memcg->kmemcg_id != -1))
- memcg_offline_kmem(memcg);
}
#else
static int memcg_online_kmem(struct mem_cgroup *memcg)
@@ -3688,9 +3683,6 @@ static int memcg_online_kmem(struct mem_cgroup *memcg)
static void memcg_offline_kmem(struct mem_cgroup *memcg)
{
}
-static void memcg_free_kmem(struct mem_cgroup *memcg)
-{
-}
#endif /* CONFIG_MEMCG_KMEM */
static int memcg_update_kmem_max(struct mem_cgroup *memcg,
@@ -5219,7 +5211,6 @@ mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
{
struct mem_cgroup *parent = mem_cgroup_from_css(parent_css);
struct mem_cgroup *memcg, *old_memcg;
- long error = -ENOMEM;
old_memcg = set_active_memcg(parent);
memcg = mem_cgroup_alloc();
@@ -5249,38 +5240,36 @@ mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
}
/* The following stuff does not apply to the root */
- error = memcg_online_kmem(memcg);
- if (error)
- goto fail;
-
if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket)
static_branch_inc(&memcg_sockets_enabled_key);
return &memcg->css;
-fail:
- mem_cgroup_id_remove(memcg);
- mem_cgroup_free(memcg);
- return ERR_PTR(error);
}
static int mem_cgroup_css_online(struct cgroup_subsys_state *css)
{
struct mem_cgroup *memcg = mem_cgroup_from_css(css);
+ if (memcg_online_kmem(memcg))
+ goto remove_id;
+
/*
* A memcg must be visible for expand_shrinker_info()
* by the time the maps are allocated. So, we allocate maps
* here, when for_each_mem_cgroup() can't skip it.
*/
- if (alloc_shrinker_info(memcg)) {
- mem_cgroup_id_remove(memcg);
- return -ENOMEM;
- }
+ if (alloc_shrinker_info(memcg))
+ goto offline_kmem;
/* Online state pins memcg ID, memcg ID pins CSS */
refcount_set(&memcg->id.ref, 1);
css_get(css);
return 0;
+offline_kmem:
+ memcg_offline_kmem(memcg);
+remove_id:
+ mem_cgroup_id_remove(memcg);
+ return -ENOMEM;
}
static void mem_cgroup_css_offline(struct cgroup_subsys_state *css)
@@ -5338,7 +5327,6 @@ static void mem_cgroup_css_free(struct cgroup_subsys_state *css)
cancel_work_sync(&memcg->high_work);
mem_cgroup_remove_from_trees(memcg);
free_shrinker_info(memcg);
- memcg_free_kmem(memcg);
mem_cgroup_free(memcg);
}
--
2.11.0
next prev parent reply other threads:[~2021-05-27 6:24 UTC|newest]
Thread overview: 26+ messages / expand[flat|nested] mbox.gz Atom feed top
2021-05-27 6:21 [PATCH v2 00/21] Optimize list lru memory consumption Muchun Song
2021-05-27 6:21 ` [PATCH v2 01/21] mm: list_lru: fix list_lru_count_one() return value Muchun Song
2021-05-27 6:21 ` [PATCH v2 02/21] mm: memcontrol: remove kmemcg_id reparenting Muchun Song
2021-05-27 6:21 ` [PATCH v2 03/21] mm: memcontrol: remove the kmem states Muchun Song
2021-05-27 6:21 ` Muchun Song [this message]
2021-05-27 6:21 ` [PATCH v2 05/21] mm: list_lru: remove lru node locking from memcg_update_list_lru_node Muchun Song
2021-05-27 6:21 ` [PATCH v2 06/21] mm: list_lru: only add the memcg aware lrus to the list_lrus Muchun Song
2021-05-27 6:21 ` [PATCH v2 07/21] mm: list_lru: optimize the array of per memcg lists memory consumption Muchun Song
2021-05-27 6:21 ` [PATCH v2 08/21] mm: list_lru: remove memcg_aware field from struct list_lru Muchun Song
2021-05-27 6:21 ` [PATCH v2 09/21] mm: introduce kmem_cache_alloc_lru Muchun Song
2021-05-27 6:21 ` [PATCH v2 10/21] fs: introduce alloc_inode_sb() to allocate filesystems specific inode Muchun Song
2021-05-27 6:21 ` [PATCH v2 11/21] mm: dcache: use kmem_cache_alloc_lru() to allocate dentry Muchun Song
2021-05-27 6:21 ` [PATCH v2 12/21] xarray: use kmem_cache_alloc_lru to allocate xa_node Muchun Song
2021-05-27 6:21 ` [PATCH v2 13/21] mm: workingset: use xas_set_lru() to pass shadow_nodes Muchun Song
2021-05-27 6:21 ` [PATCH v2 14/21] nfs42: use a specific kmem_cache to allocate nfs4_xattr_entry Muchun Song
2021-05-27 6:21 ` [PATCH v2 15/21] mm: list_lru: allocate list_lru_one only when needed Muchun Song
2021-05-27 6:21 ` [PATCH v2 16/21] mm: list_lru: rename memcg_drain_all_list_lrus to memcg_reparent_list_lrus Muchun Song
2021-05-27 6:21 ` [PATCH v2 17/21] mm: list_lru: replace linear array with xarray Muchun Song
2021-05-27 12:07 ` Matthew Wilcox
2021-05-28 3:43 ` [External] " Muchun Song
2021-05-28 8:04 ` Muchun Song
2021-05-27 6:21 ` [PATCH v2 18/21] mm: memcontrol: reuse memory cgroup ID for kmem ID Muchun Song
2021-05-27 6:21 ` [PATCH v2 19/21] mm: memcontrol: fix cannot alloc the maximum memcg ID Muchun Song
2021-05-27 6:21 ` [PATCH v2 20/21] mm: list_lru: rename list_lru_per_memcg to list_lru_memcg Muchun Song
2021-05-27 6:21 ` [PATCH v2 21/21] mm: memcontrol: rename memcg_cache_id to memcg_kmem_id Muchun Song
2021-06-16 10:53 ` [PATCH v2 00/21] Optimize list lru memory consumption Muchun Song
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20210527062148.9361-5-songmuchun@bytedance.com \
--to=songmuchun@bytedance.com \
--cc=akpm@linux-foundation.org \
--cc=alexs@kernel.org \
--cc=anna.schumaker@netapp.com \
--cc=david@fromorbit.com \
--cc=duanxiongchun@bytedance.com \
--cc=fam.zheng@bytedance.com \
--cc=guro@fb.com \
--cc=hannes@cmpxchg.org \
--cc=linux-fsdevel@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=linux-nfs@vger.kernel.org \
--cc=mhocko@kernel.org \
--cc=richard.weiyang@gmail.com \
--cc=shakeelb@google.com \
--cc=shy828301@gmail.com \
--cc=trond.myklebust@hammerspace.com \
--cc=vdavydov.dev@gmail.com \
--cc=willy@infradead.org \
--cc=zhengqi.arch@bytedance.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox