From: Qi Zheng <qi.zheng@linux.dev>
To: hannes@cmpxchg.org, hughd@google.com, mhocko@suse.com,
roman.gushchin@linux.dev, shakeel.butt@linux.dev,
muchun.song@linux.dev, david@kernel.org,
lorenzo.stoakes@oracle.com, ziy@nvidia.com, harry.yoo@oracle.com,
yosry.ahmed@linux.dev, imran.f.khan@oracle.com,
kamalesh.babulal@oracle.com, axelrasmussen@google.com,
yuanchu@google.com, weixugc@google.com,
chenridong@huaweicloud.com, mkoutny@suse.com,
akpm@linux-foundation.org, hamzamahfooz@linux.microsoft.com,
apais@linux.microsoft.com, lance.yang@linux.dev, bhe@redhat.com,
usamaarif642@gmail.com
Cc: linux-mm@kvack.org, linux-kernel@vger.kernel.org,
cgroups@vger.kernel.org, Muchun Song <songmuchun@bytedance.com>,
Qi Zheng <zhengqi.arch@bytedance.com>
Subject: [PATCH v5 10/32] writeback: prevent memory cgroup release in writeback module
Date: Wed, 25 Feb 2026 15:48:43 +0800 [thread overview]
Message-ID: <645f99bc344575417f67def3744f975596df2793.1772005110.git.zhengqi.arch@bytedance.com> (raw)
In-Reply-To: <cover.1772005110.git.zhengqi.arch@bytedance.com>
From: Muchun Song <songmuchun@bytedance.com>
In the near future, a folio will no longer pin its corresponding
memory cgroup. To ensure safety, it will only be appropriate to
hold the rcu read lock or acquire a reference to the memory cgroup
returned by folio_memcg(), thereby preventing it from being released.
In the current patch, the function get_mem_cgroup_css_from_folio()
and the rcu read lock are employed to safeguard against the release
of the memory cgroup.
This serves as a preparatory measure for the reparenting of the
LRU pages.
Signed-off-by: Muchun Song <songmuchun@bytedance.com>
Signed-off-by: Qi Zheng <zhengqi.arch@bytedance.com>
Reviewed-by: Harry Yoo <harry.yoo@oracle.com>
Acked-by: Johannes Weiner <hannes@cmpxchg.org>
Acked-by: Shakeel Butt <shakeel.butt@linux.dev>
---
fs/fs-writeback.c | 22 +++++++++++-----------
include/linux/memcontrol.h | 9 +++++++--
include/trace/events/writeback.h | 3 +++
mm/memcontrol.c | 14 ++++++++------
4 files changed, 29 insertions(+), 19 deletions(-)
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index 7c75ed7e89799..c3442a38450ca 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -280,15 +280,13 @@ void __inode_attach_wb(struct inode *inode, struct folio *folio)
if (inode_cgwb_enabled(inode)) {
struct cgroup_subsys_state *memcg_css;
- if (folio) {
- memcg_css = mem_cgroup_css_from_folio(folio);
- wb = wb_get_create(bdi, memcg_css, GFP_ATOMIC);
- } else {
- /* must pin memcg_css, see wb_get_create() */
+ /* must pin memcg_css, see wb_get_create() */
+ if (folio)
+ memcg_css = get_mem_cgroup_css_from_folio(folio);
+ else
memcg_css = task_get_css(current, memory_cgrp_id);
- wb = wb_get_create(bdi, memcg_css, GFP_ATOMIC);
- css_put(memcg_css);
- }
+ wb = wb_get_create(bdi, memcg_css, GFP_ATOMIC);
+ css_put(memcg_css);
}
if (!wb)
@@ -979,16 +977,16 @@ void wbc_account_cgroup_owner(struct writeback_control *wbc, struct folio *folio
if (!wbc->wb || wbc->no_cgroup_owner)
return;
- css = mem_cgroup_css_from_folio(folio);
+ css = get_mem_cgroup_css_from_folio(folio);
/* dead cgroups shouldn't contribute to inode ownership arbitration */
if (!css_is_online(css))
- return;
+ goto out;
id = css->id;
if (id == wbc->wb_id) {
wbc->wb_bytes += bytes;
- return;
+ goto out;
}
if (id == wbc->wb_lcand_id)
@@ -1001,6 +999,8 @@ void wbc_account_cgroup_owner(struct writeback_control *wbc, struct folio *folio
wbc->wb_tcand_bytes += bytes;
else
wbc->wb_tcand_bytes -= min(bytes, wbc->wb_tcand_bytes);
+out:
+ css_put(css);
}
EXPORT_SYMBOL_GPL(wbc_account_cgroup_owner);
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index f4b6158b77d8e..20d38262b984b 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -895,7 +895,7 @@ static inline bool mm_match_cgroup(struct mm_struct *mm,
return match;
}
-struct cgroup_subsys_state *mem_cgroup_css_from_folio(struct folio *folio);
+struct cgroup_subsys_state *get_mem_cgroup_css_from_folio(struct folio *folio);
ino_t page_cgroup_ino(struct page *page);
static inline bool mem_cgroup_online(struct mem_cgroup *memcg)
@@ -1564,9 +1564,14 @@ static inline void mem_cgroup_track_foreign_dirty(struct folio *folio,
if (mem_cgroup_disabled())
return;
+ if (!folio_memcg_charged(folio))
+ return;
+
+ rcu_read_lock();
memcg = folio_memcg(folio);
- if (unlikely(memcg && &memcg->css != wb->memcg_css))
+ if (unlikely(&memcg->css != wb->memcg_css))
mem_cgroup_track_foreign_dirty_slowpath(folio, wb);
+ rcu_read_unlock();
}
void mem_cgroup_flush_foreign(struct bdi_writeback *wb);
diff --git a/include/trace/events/writeback.h b/include/trace/events/writeback.h
index 4d3d8c8f3a1bc..b849b8cc96b1e 100644
--- a/include/trace/events/writeback.h
+++ b/include/trace/events/writeback.h
@@ -294,7 +294,10 @@ TRACE_EVENT(track_foreign_dirty,
__entry->ino = inode ? inode->i_ino : 0;
__entry->memcg_id = wb->memcg_css->id;
__entry->cgroup_ino = __trace_wb_assign_cgroup(wb);
+
+ rcu_read_lock();
__entry->page_cgroup_ino = cgroup_ino(folio_memcg(folio)->css.cgroup);
+ rcu_read_unlock();
),
TP_printk("bdi %s[%llu]: ino=%lu memcg_id=%u cgroup_ino=%lu page_cgroup_ino=%lu",
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 4820919c0d219..a4bb8b8b2c457 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -244,7 +244,7 @@ DEFINE_STATIC_KEY_FALSE(memcg_bpf_enabled_key);
EXPORT_SYMBOL(memcg_bpf_enabled_key);
/**
- * mem_cgroup_css_from_folio - css of the memcg associated with a folio
+ * get_mem_cgroup_css_from_folio - acquire a css of the memcg associated with a folio
* @folio: folio of interest
*
* If memcg is bound to the default hierarchy, css of the memcg associated
@@ -254,14 +254,16 @@ EXPORT_SYMBOL(memcg_bpf_enabled_key);
* If memcg is bound to a traditional hierarchy, the css of root_mem_cgroup
* is returned.
*/
-struct cgroup_subsys_state *mem_cgroup_css_from_folio(struct folio *folio)
+struct cgroup_subsys_state *get_mem_cgroup_css_from_folio(struct folio *folio)
{
- struct mem_cgroup *memcg = folio_memcg(folio);
+ struct mem_cgroup *memcg;
- if (!memcg || !cgroup_subsys_on_dfl(memory_cgrp_subsys))
- memcg = root_mem_cgroup;
+ if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
+ return &root_mem_cgroup->css;
- return &memcg->css;
+ memcg = get_mem_cgroup_from_folio(folio);
+
+ return memcg ? &memcg->css : &root_mem_cgroup->css;
}
/**
--
2.20.1
next prev parent reply other threads:[~2026-02-25 7:51 UTC|newest]
Thread overview: 34+ messages / expand[flat|nested] mbox.gz Atom feed top
2026-02-25 7:48 [PATCH v5 00/32] Eliminate Dying Memory Cgroup Qi Zheng
2026-02-25 7:48 ` [PATCH v5 01/32] mm: memcontrol: remove dead code of checking parent memory cgroup Qi Zheng
2026-02-25 7:48 ` [PATCH v5 02/32] mm: workingset: use folio_lruvec() in workingset_refault() Qi Zheng
2026-02-25 7:48 ` [PATCH v5 03/32] mm: rename unlock_page_lruvec_irq and its variants Qi Zheng
2026-02-25 7:48 ` [PATCH v5 04/32] mm: vmscan: prepare for the refactoring the move_folios_to_lru() Qi Zheng
2026-02-25 7:48 ` [PATCH v5 05/32] mm: vmscan: refactor move_folios_to_lru() Qi Zheng
2026-02-25 7:48 ` [PATCH v5 06/32] mm: memcontrol: allocate object cgroup for non-kmem case Qi Zheng
2026-02-25 7:48 ` [PATCH v5 07/32] mm: memcontrol: return root object cgroup for root memory cgroup Qi Zheng
2026-02-25 7:48 ` [PATCH v5 08/32] mm: memcontrol: prevent memory cgroup release in get_mem_cgroup_from_folio() Qi Zheng
2026-02-25 7:48 ` [PATCH v5 09/32] buffer: prevent memory cgroup release in folio_alloc_buffers() Qi Zheng
2026-02-25 7:48 ` Qi Zheng [this message]
2026-02-25 7:48 ` [PATCH v5 11/32] mm: memcontrol: prevent memory cgroup release in count_memcg_folio_events() Qi Zheng
2026-02-25 7:48 ` [PATCH v5 12/32] mm: page_io: prevent memory cgroup release in page_io module Qi Zheng
2026-02-25 7:52 ` [PATCH v5 13/32] mm: migrate: prevent memory cgroup release in folio_migrate_mapping() Qi Zheng
2026-02-25 7:52 ` [PATCH v5 14/32] mm: mglru: prevent memory cgroup release in mglru Qi Zheng
2026-02-25 7:52 ` [PATCH v5 15/32] mm: memcontrol: prevent memory cgroup release in mem_cgroup_swap_full() Qi Zheng
2026-02-25 7:52 ` [PATCH v5 16/32] mm: workingset: prevent memory cgroup release in lru_gen_eviction() Qi Zheng
2026-02-25 7:53 ` [PATCH v5 17/32] mm: thp: prevent memory cgroup release in folio_split_queue_lock{_irqsave}() Qi Zheng
2026-02-25 7:53 ` [PATCH v5 18/32] mm: zswap: prevent memory cgroup release in zswap_compress() Qi Zheng
2026-02-25 7:53 ` [PATCH v5 19/32] mm: workingset: prevent lruvec release in workingset_refault() Qi Zheng
2026-02-25 7:53 ` [PATCH v5 20/32] mm: zswap: prevent lruvec release in zswap_folio_swapin() Qi Zheng
2026-02-25 7:53 ` [PATCH v5 21/32] mm: swap: prevent lruvec release in lru_gen_clear_refs() Qi Zheng
2026-02-25 7:53 ` [PATCH v5 22/32] mm: workingset: prevent lruvec release in workingset_activation() Qi Zheng
2026-02-25 7:53 ` [PATCH v5 23/32] mm: do not open-code lruvec lock Qi Zheng
2026-02-25 7:53 ` [PATCH v5 24/32] mm: memcontrol: prepare for reparenting LRU pages for " Qi Zheng
2026-02-25 7:53 ` [PATCH v5 25/32] mm: vmscan: prepare for reparenting traditional LRU folios Qi Zheng
2026-02-25 7:53 ` [PATCH v5 26/32] mm: vmscan: prepare for reparenting MGLRU folios Qi Zheng
2026-02-25 7:53 ` [PATCH v5 27/32] mm: memcontrol: refactor memcg_reparent_objcgs() Qi Zheng
2026-02-25 7:53 ` [PATCH v5 28/32] mm: workingset: use lruvec_lru_size() to get the number of lru pages Qi Zheng
2026-02-25 7:53 ` [PATCH v5 29/32] mm: memcontrol: prepare for reparenting non-hierarchical stats Qi Zheng
2026-02-25 7:53 ` [PATCH v5 30/32] mm: memcontrol: convert objcg to be per-memcg per-node type Qi Zheng
2026-02-25 9:44 ` [PATCH v5 update " Qi Zheng
2026-02-25 7:53 ` [PATCH v5 31/32] mm: memcontrol: eliminate the problem of dying memory cgroup for LRU folios Qi Zheng
2026-02-25 7:53 ` [PATCH v5 32/32] mm: lru: add VM_WARN_ON_ONCE_FOLIO to lru maintenance helpers Qi Zheng
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=645f99bc344575417f67def3744f975596df2793.1772005110.git.zhengqi.arch@bytedance.com \
--to=qi.zheng@linux.dev \
--cc=akpm@linux-foundation.org \
--cc=apais@linux.microsoft.com \
--cc=axelrasmussen@google.com \
--cc=bhe@redhat.com \
--cc=cgroups@vger.kernel.org \
--cc=chenridong@huaweicloud.com \
--cc=david@kernel.org \
--cc=hamzamahfooz@linux.microsoft.com \
--cc=hannes@cmpxchg.org \
--cc=harry.yoo@oracle.com \
--cc=hughd@google.com \
--cc=imran.f.khan@oracle.com \
--cc=kamalesh.babulal@oracle.com \
--cc=lance.yang@linux.dev \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=lorenzo.stoakes@oracle.com \
--cc=mhocko@suse.com \
--cc=mkoutny@suse.com \
--cc=muchun.song@linux.dev \
--cc=roman.gushchin@linux.dev \
--cc=shakeel.butt@linux.dev \
--cc=songmuchun@bytedance.com \
--cc=usamaarif642@gmail.com \
--cc=weixugc@google.com \
--cc=yosry.ahmed@linux.dev \
--cc=yuanchu@google.com \
--cc=zhengqi.arch@bytedance.com \
--cc=ziy@nvidia.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox