From: Muchun Song <songmuchun@bytedance.com>
To: hannes@cmpxchg.org, mhocko@kernel.org, roman.gushchin@linux.dev,
shakeel.butt@linux.dev, muchun.song@linux.dev,
akpm@linux-foundation.org, david@fromorbit.com,
zhengqi.arch@bytedance.com, yosry.ahmed@linux.dev,
nphamcs@gmail.com, chengming.zhou@linux.dev
Cc: linux-kernel@vger.kernel.org, cgroups@vger.kernel.org,
linux-mm@kvack.org, hamzamahfooz@linux.microsoft.com,
apais@linux.microsoft.com, Muchun Song <songmuchun@bytedance.com>
Subject: [PATCH RFC 06/28] mm: thp: introduce folio_split_queue_lock and its variants
Date: Tue, 15 Apr 2025 10:45:10 +0800 [thread overview]
Message-ID: <20250415024532.26632-7-songmuchun@bytedance.com> (raw)
In-Reply-To: <20250415024532.26632-1-songmuchun@bytedance.com>
In future memcg removal, the binding between a folio and a memcg may change,
making the split lock within the memcg unstable when held.
A new approach is required to reparent the split queue to its parent. This
patch starts introducing a unified way to acquire the split lock for future
work.
It's a code-only refactoring with no functional changes.
Signed-off-by: Muchun Song <songmuchun@bytedance.com>
---
include/linux/memcontrol.h | 10 ++++
mm/huge_memory.c | 100 +++++++++++++++++++++++++++----------
2 files changed, 83 insertions(+), 27 deletions(-)
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index a045819bcf40..bb4f203733f3 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -1639,6 +1639,11 @@ int alloc_shrinker_info(struct mem_cgroup *memcg);
void free_shrinker_info(struct mem_cgroup *memcg);
void set_shrinker_bit(struct mem_cgroup *memcg, int nid, int shrinker_id);
void reparent_shrinker_deferred(struct mem_cgroup *memcg);
+
+static inline int shrinker_id(struct shrinker *shrinker)
+{
+ return shrinker->id;
+}
#else
#define mem_cgroup_sockets_enabled 0
static inline void mem_cgroup_sk_alloc(struct sock *sk) { };
@@ -1652,6 +1657,11 @@ static inline void set_shrinker_bit(struct mem_cgroup *memcg,
int nid, int shrinker_id)
{
}
+
+static inline int shrinker_id(struct shrinker *shrinker)
+{
+ return -1;
+}
#endif
#ifdef CONFIG_MEMCG
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index a81e89987ca2..70820fa75c1f 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1059,26 +1059,75 @@ pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma)
#ifdef CONFIG_MEMCG
static inline
-struct deferred_split *get_deferred_split_queue(struct folio *folio)
+struct mem_cgroup *folio_split_queue_memcg(struct folio *folio,
+ struct deferred_split *queue)
+{
+ if (mem_cgroup_disabled())
+ return NULL;
+ if (&NODE_DATA(folio_nid(folio))->deferred_split_queue == queue)
+ return NULL;
+ return container_of(queue, struct mem_cgroup, deferred_split_queue);
+}
+
+static inline struct deferred_split *folio_memcg_split_queue(struct folio *folio)
{
struct mem_cgroup *memcg = folio_memcg(folio);
- struct pglist_data *pgdat = NODE_DATA(folio_nid(folio));
- if (memcg)
- return &memcg->deferred_split_queue;
- else
- return &pgdat->deferred_split_queue;
+ return memcg ? &memcg->deferred_split_queue : NULL;
}
#else
static inline
-struct deferred_split *get_deferred_split_queue(struct folio *folio)
+struct mem_cgroup *folio_split_queue_memcg(struct folio *folio,
+ struct deferred_split *queue)
{
- struct pglist_data *pgdat = NODE_DATA(folio_nid(folio));
+ return NULL;
+}
- return &pgdat->deferred_split_queue;
+static inline struct deferred_split *folio_memcg_split_queue(struct folio *folio)
+{
+ return NULL;
}
#endif
+static struct deferred_split *folio_split_queue(struct folio *folio)
+{
+ struct deferred_split *queue = folio_memcg_split_queue(folio);
+
+ return queue ? : &NODE_DATA(folio_nid(folio))->deferred_split_queue;
+}
+
+static struct deferred_split *folio_split_queue_lock(struct folio *folio)
+{
+ struct deferred_split *queue;
+
+ queue = folio_split_queue(folio);
+ spin_lock(&queue->split_queue_lock);
+
+ return queue;
+}
+
+static struct deferred_split *
+folio_split_queue_lock_irqsave(struct folio *folio, unsigned long *flags)
+{
+ struct deferred_split *queue;
+
+ queue = folio_split_queue(folio);
+ spin_lock_irqsave(&queue->split_queue_lock, *flags);
+
+ return queue;
+}
+
+static inline void split_queue_unlock(struct deferred_split *queue)
+{
+ spin_unlock(&queue->split_queue_lock);
+}
+
+static inline void split_queue_unlock_irqrestore(struct deferred_split *queue,
+ unsigned long flags)
+{
+ spin_unlock_irqrestore(&queue->split_queue_lock, flags);
+}
+
static inline bool is_transparent_hugepage(const struct folio *folio)
{
if (!folio_test_large(folio))
@@ -3723,7 +3772,7 @@ static int __folio_split(struct folio *folio, unsigned int new_order,
struct page *split_at, struct page *lock_at,
struct list_head *list, bool uniform_split)
{
- struct deferred_split *ds_queue = get_deferred_split_queue(folio);
+ struct deferred_split *ds_queue;
XA_STATE(xas, &folio->mapping->i_pages, folio->index);
bool is_anon = folio_test_anon(folio);
struct address_space *mapping = NULL;
@@ -3857,7 +3906,7 @@ static int __folio_split(struct folio *folio, unsigned int new_order,
}
/* Prevent deferred_split_scan() touching ->_refcount */
- spin_lock(&ds_queue->split_queue_lock);
+ ds_queue = folio_split_queue_lock(folio);
if (folio_ref_freeze(folio, 1 + extra_pins)) {
if (folio_order(folio) > 1 &&
!list_empty(&folio->_deferred_list)) {
@@ -3875,7 +3924,7 @@ static int __folio_split(struct folio *folio, unsigned int new_order,
*/
list_del_init(&folio->_deferred_list);
}
- spin_unlock(&ds_queue->split_queue_lock);
+ split_queue_unlock(ds_queue);
if (mapping) {
int nr = folio_nr_pages(folio);
@@ -3896,7 +3945,7 @@ static int __folio_split(struct folio *folio, unsigned int new_order,
split_at, lock_at, list, end, &xas, mapping,
uniform_split);
} else {
- spin_unlock(&ds_queue->split_queue_lock);
+ split_queue_unlock(ds_queue);
fail:
if (mapping)
xas_unlock(&xas);
@@ -4050,8 +4099,7 @@ bool __folio_unqueue_deferred_split(struct folio *folio)
WARN_ON_ONCE(folio_ref_count(folio));
WARN_ON_ONCE(!mem_cgroup_disabled() && !folio_memcg_charged(folio));
- ds_queue = get_deferred_split_queue(folio);
- spin_lock_irqsave(&ds_queue->split_queue_lock, flags);
+ ds_queue = folio_split_queue_lock_irqsave(folio, &flags);
if (!list_empty(&folio->_deferred_list)) {
ds_queue->split_queue_len--;
if (folio_test_partially_mapped(folio)) {
@@ -4062,7 +4110,7 @@ bool __folio_unqueue_deferred_split(struct folio *folio)
list_del_init(&folio->_deferred_list);
unqueued = true;
}
- spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags);
+ split_queue_unlock_irqrestore(ds_queue, flags);
return unqueued; /* useful for debug warnings */
}
@@ -4070,10 +4118,7 @@ bool __folio_unqueue_deferred_split(struct folio *folio)
/* partially_mapped=false won't clear PG_partially_mapped folio flag */
void deferred_split_folio(struct folio *folio, bool partially_mapped)
{
- struct deferred_split *ds_queue = get_deferred_split_queue(folio);
-#ifdef CONFIG_MEMCG
- struct mem_cgroup *memcg = folio_memcg(folio);
-#endif
+ struct deferred_split *ds_queue;
unsigned long flags;
/*
@@ -4096,7 +4141,7 @@ void deferred_split_folio(struct folio *folio, bool partially_mapped)
if (folio_test_swapcache(folio))
return;
- spin_lock_irqsave(&ds_queue->split_queue_lock, flags);
+ ds_queue = folio_split_queue_lock_irqsave(folio, &flags);
if (partially_mapped) {
if (!folio_test_partially_mapped(folio)) {
folio_set_partially_mapped(folio);
@@ -4111,15 +4156,16 @@ void deferred_split_folio(struct folio *folio, bool partially_mapped)
VM_WARN_ON_FOLIO(folio_test_partially_mapped(folio), folio);
}
if (list_empty(&folio->_deferred_list)) {
+ struct mem_cgroup *memcg;
+
+ memcg = folio_split_queue_memcg(folio, ds_queue);
list_add_tail(&folio->_deferred_list, &ds_queue->split_queue);
ds_queue->split_queue_len++;
-#ifdef CONFIG_MEMCG
if (memcg)
set_shrinker_bit(memcg, folio_nid(folio),
- deferred_split_shrinker->id);
-#endif
+ shrinker_id(deferred_split_shrinker));
}
- spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags);
+ split_queue_unlock_irqrestore(ds_queue, flags);
}
static unsigned long deferred_split_count(struct shrinker *shrink,
@@ -4202,7 +4248,7 @@ static unsigned long deferred_split_scan(struct shrinker *shrink,
if (!--sc->nr_to_scan)
break;
}
- spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags);
+ split_queue_unlock_irqrestore(ds_queue, flags);
list_for_each_entry_safe(folio, next, &list, _deferred_list) {
bool did_split = false;
@@ -4251,7 +4297,7 @@ static unsigned long deferred_split_scan(struct shrinker *shrink,
spin_lock_irqsave(&ds_queue->split_queue_lock, flags);
list_splice_tail(&list, &ds_queue->split_queue);
ds_queue->split_queue_len -= removed;
- spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags);
+ split_queue_unlock_irqrestore(ds_queue, flags);
if (prev)
folio_put(prev);
--
2.20.1
next prev parent reply other threads:[~2025-04-15 2:46 UTC|newest]
Thread overview: 69+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-04-15 2:45 [PATCH RFC 00/28] Eliminate Dying Memory Cgroup Muchun Song
2025-04-15 2:45 ` [PATCH RFC 01/28] mm: memcontrol: remove dead code of checking parent memory cgroup Muchun Song
2025-04-17 14:35 ` Johannes Weiner
2025-04-15 2:45 ` [PATCH RFC 02/28] mm: memcontrol: use folio_memcg_charged() to avoid potential rcu lock holding Muchun Song
2025-04-17 14:48 ` Johannes Weiner
2025-04-18 2:38 ` Muchun Song
2025-04-15 2:45 ` [PATCH RFC 03/28] mm: workingset: use folio_lruvec() in workingset_refault() Muchun Song
2025-04-17 14:52 ` Johannes Weiner
2025-04-15 2:45 ` [PATCH RFC 04/28] mm: rename unlock_page_lruvec_irq and its variants Muchun Song
2025-04-17 14:53 ` Johannes Weiner
2025-04-15 2:45 ` [PATCH RFC 05/28] mm: thp: replace folio_memcg() with folio_memcg_charged() Muchun Song
2025-04-17 14:54 ` Johannes Weiner
2025-04-15 2:45 ` Muchun Song [this message]
2025-04-17 14:58 ` [PATCH RFC 06/28] mm: thp: introduce folio_split_queue_lock and its variants Johannes Weiner
2025-04-18 19:50 ` Johannes Weiner
2025-04-19 14:20 ` Muchun Song
2025-04-15 2:45 ` [PATCH RFC 07/28] mm: thp: use folio_batch to handle THP splitting in deferred_split_scan() Muchun Song
2025-04-30 14:37 ` Johannes Weiner
2025-05-06 6:44 ` Hugh Dickins
2025-05-06 21:44 ` Hugh Dickins
2025-05-07 3:30 ` Muchun Song
2025-04-15 2:45 ` [PATCH RFC 08/28] mm: vmscan: refactor move_folios_to_lru() Muchun Song
2025-04-30 14:49 ` Johannes Weiner
2025-04-15 2:45 ` [PATCH RFC 09/28] mm: memcontrol: allocate object cgroup for non-kmem case Muchun Song
2025-04-15 2:45 ` [PATCH RFC 10/28] mm: memcontrol: return root object cgroup for root memory cgroup Muchun Song
2025-06-28 3:09 ` Chen Ridong
2025-06-30 7:16 ` Muchun Song
2025-04-15 2:45 ` [PATCH RFC 11/28] mm: memcontrol: prevent memory cgroup release in get_mem_cgroup_from_folio() Muchun Song
2025-04-15 2:45 ` [PATCH RFC 12/28] buffer: prevent memory cgroup release in folio_alloc_buffers() Muchun Song
2025-04-15 2:45 ` [PATCH RFC 13/28] writeback: prevent memory cgroup release in writeback module Muchun Song
2025-04-15 2:45 ` [PATCH RFC 14/28] mm: memcontrol: prevent memory cgroup release in count_memcg_folio_events() Muchun Song
2025-04-15 2:45 ` [PATCH RFC 15/28] mm: page_io: prevent memory cgroup release in page_io module Muchun Song
2025-04-15 2:45 ` [PATCH RFC 16/28] mm: migrate: prevent memory cgroup release in folio_migrate_mapping() Muchun Song
2025-04-15 2:45 ` [PATCH RFC 17/28] mm: mglru: prevent memory cgroup release in mglru Muchun Song
2025-04-15 2:45 ` [PATCH RFC 18/28] mm: memcontrol: prevent memory cgroup release in mem_cgroup_swap_full() Muchun Song
2025-04-15 2:45 ` [PATCH RFC 19/28] mm: workingset: prevent memory cgroup release in lru_gen_eviction() Muchun Song
2025-04-15 2:45 ` [PATCH RFC 20/28] mm: workingset: prevent lruvec release in workingset_refault() Muchun Song
2025-04-15 2:45 ` [PATCH RFC 21/28] mm: zswap: prevent lruvec release in zswap_folio_swapin() Muchun Song
2025-04-17 17:39 ` Nhat Pham
2025-04-18 2:36 ` Chengming Zhou
2025-04-15 2:45 ` [PATCH RFC 22/28] mm: swap: prevent lruvec release in swap module Muchun Song
2025-04-15 2:45 ` [PATCH RFC 23/28] mm: workingset: prevent lruvec release in workingset_activation() Muchun Song
2025-04-15 2:45 ` [PATCH RFC 24/28] mm: memcontrol: prepare for reparenting LRU pages for lruvec lock Muchun Song
2025-04-15 2:45 ` [PATCH RFC 25/28] mm: thp: prepare for reparenting LRU pages for split queue lock Muchun Song
2025-04-15 2:45 ` [PATCH RFC 26/28] mm: memcontrol: introduce memcg_reparent_ops Muchun Song
2025-06-30 12:47 ` Harry Yoo
2025-07-01 22:12 ` Harry Yoo
2025-07-07 9:29 ` [External] " Muchun Song
2025-07-09 0:14 ` Harry Yoo
2025-04-15 2:45 ` [PATCH RFC 27/28] mm: memcontrol: eliminate the problem of dying memory cgroup for LRU folios Muchun Song
2025-05-20 11:27 ` Harry Yoo
2025-05-22 2:31 ` Muchun Song
2025-05-23 1:24 ` Harry Yoo
2025-04-15 2:45 ` [PATCH RFC 28/28] mm: lru: add VM_WARN_ON_ONCE_FOLIO to lru maintenance helpers Muchun Song
2025-04-15 2:53 ` [PATCH RFC 00/28] Eliminate Dying Memory Cgroup Muchun Song
2025-04-15 6:19 ` Kairui Song
2025-04-15 8:01 ` Muchun Song
2025-04-17 18:22 ` Kairui Song
2025-04-17 19:04 ` Johannes Weiner
2025-06-27 8:50 ` Chen Ridong
2025-04-17 21:45 ` Roman Gushchin
2025-04-28 3:43 ` Kairui Song
2025-06-27 9:02 ` Chen Ridong
2025-06-27 18:54 ` Kairui Song
2025-06-27 19:14 ` Shakeel Butt
2025-06-28 9:21 ` Chen Ridong
2025-04-22 14:20 ` Yosry Ahmed
2025-05-23 1:23 ` Harry Yoo
2025-05-23 2:39 ` Muchun Song
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20250415024532.26632-7-songmuchun@bytedance.com \
--to=songmuchun@bytedance.com \
--cc=akpm@linux-foundation.org \
--cc=apais@linux.microsoft.com \
--cc=cgroups@vger.kernel.org \
--cc=chengming.zhou@linux.dev \
--cc=david@fromorbit.com \
--cc=hamzamahfooz@linux.microsoft.com \
--cc=hannes@cmpxchg.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=mhocko@kernel.org \
--cc=muchun.song@linux.dev \
--cc=nphamcs@gmail.com \
--cc=roman.gushchin@linux.dev \
--cc=shakeel.butt@linux.dev \
--cc=yosry.ahmed@linux.dev \
--cc=zhengqi.arch@bytedance.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox