From: Qi Zheng <qi.zheng@linux.dev>
To: hannes@cmpxchg.org, hughd@google.com, mhocko@suse.com,
roman.gushchin@linux.dev, shakeel.butt@linux.dev,
muchun.song@linux.dev, david@kernel.org,
lorenzo.stoakes@oracle.com, ziy@nvidia.com, harry.yoo@oracle.com,
yosry.ahmed@linux.dev, imran.f.khan@oracle.com,
kamalesh.babulal@oracle.com, axelrasmussen@google.com,
yuanchu@google.com, weixugc@google.com,
chenridong@huaweicloud.com, mkoutny@suse.com,
akpm@linux-foundation.org, hamzamahfooz@linux.microsoft.com,
apais@linux.microsoft.com, lance.yang@linux.dev, bhe@redhat.com
Cc: linux-mm@kvack.org, linux-kernel@vger.kernel.org,
cgroups@vger.kernel.org, Qi Zheng <zhengqi.arch@bytedance.com>
Subject: [PATCH v4 23/31] mm: do not open-code lruvec lock
Date: Thu, 5 Feb 2026 17:01:42 +0800 [thread overview]
Message-ID: <679b1c28f5ee8f40911195d7984b287c5da39e05.1770279888.git.zhengqi.arch@bytedance.com> (raw)
In-Reply-To: <cover.1770279888.git.zhengqi.arch@bytedance.com>
From: Qi Zheng <zhengqi.arch@bytedance.com>
Now we have lruvec_unlock(), lruvec_unlock_irq() and
lruvec_unlock_irqrestore(), but no the paired lruvec_lock(),
lruvec_lock_irq() and lruvec_lock_irqsave().
There is currently no use case for lruvec_lock_irqsave(), so only
introduce lruvec_lock_irq(), and change all open-code places to use
this helper function. This looks cleaner and prepares for reparenting
LRU pages, preventing user from missing RCU lock calls due to
open-code lruvec lock.
Signed-off-by: Qi Zheng <zhengqi.arch@bytedance.com>
Acked-by: Muchun Song <muchun.song@linux.dev>
Acked-by: Shakeel Butt <shakeel.butt@linux.dev>
Reviewed-by: Harry Yoo <harry.yoo@oracle.com>
---
include/linux/memcontrol.h | 5 +++++
mm/vmscan.c | 38 +++++++++++++++++++-------------------
2 files changed, 24 insertions(+), 19 deletions(-)
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index f1556759d0d3f..4b6f20dc694ba 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -1499,6 +1499,11 @@ static inline struct lruvec *parent_lruvec(struct lruvec *lruvec)
return mem_cgroup_lruvec(memcg, lruvec_pgdat(lruvec));
}
+static inline void lruvec_lock_irq(struct lruvec *lruvec)
+{
+ spin_lock_irq(&lruvec->lru_lock);
+}
+
static inline void lruvec_unlock(struct lruvec *lruvec)
{
spin_unlock(&lruvec->lru_lock);
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 6a7eacd39bc5f..f904231e33ec0 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -2003,7 +2003,7 @@ static unsigned long shrink_inactive_list(unsigned long nr_to_scan,
lru_add_drain();
- spin_lock_irq(&lruvec->lru_lock);
+ lruvec_lock_irq(lruvec);
nr_taken = isolate_lru_folios(nr_to_scan, lruvec, &folio_list,
&nr_scanned, sc, lru);
@@ -2015,7 +2015,7 @@ static unsigned long shrink_inactive_list(unsigned long nr_to_scan,
count_memcg_events(lruvec_memcg(lruvec), item, nr_scanned);
__count_vm_events(PGSCAN_ANON + file, nr_scanned);
- spin_unlock_irq(&lruvec->lru_lock);
+ lruvec_unlock_irq(lruvec);
if (nr_taken == 0)
return 0;
@@ -2034,7 +2034,7 @@ static unsigned long shrink_inactive_list(unsigned long nr_to_scan,
count_memcg_events(lruvec_memcg(lruvec), item, nr_reclaimed);
count_vm_events(PGSTEAL_ANON + file, nr_reclaimed);
- spin_lock_irq(&lruvec->lru_lock);
+ lruvec_lock_irq(lruvec);
lru_note_cost_unlock_irq(lruvec, file, stat.nr_pageout,
nr_scanned - nr_reclaimed);
@@ -2113,7 +2113,7 @@ static void shrink_active_list(unsigned long nr_to_scan,
lru_add_drain();
- spin_lock_irq(&lruvec->lru_lock);
+ lruvec_lock_irq(lruvec);
nr_taken = isolate_lru_folios(nr_to_scan, lruvec, &l_hold,
&nr_scanned, sc, lru);
@@ -2124,7 +2124,7 @@ static void shrink_active_list(unsigned long nr_to_scan,
__count_vm_events(PGREFILL, nr_scanned);
count_memcg_events(lruvec_memcg(lruvec), PGREFILL, nr_scanned);
- spin_unlock_irq(&lruvec->lru_lock);
+ lruvec_unlock_irq(lruvec);
while (!list_empty(&l_hold)) {
struct folio *folio;
@@ -2180,7 +2180,7 @@ static void shrink_active_list(unsigned long nr_to_scan,
count_memcg_events(lruvec_memcg(lruvec), PGDEACTIVATE, nr_deactivate);
mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, -nr_taken);
- spin_lock_irq(&lruvec->lru_lock);
+ lruvec_lock_irq(lruvec);
lru_note_cost_unlock_irq(lruvec, file, 0, nr_rotated);
trace_mm_vmscan_lru_shrink_active(pgdat->node_id, nr_taken, nr_activate,
nr_deactivate, nr_rotated, sc->priority, file);
@@ -3801,9 +3801,9 @@ static void walk_mm(struct mm_struct *mm, struct lru_gen_mm_walk *walk)
}
if (walk->batched) {
- spin_lock_irq(&lruvec->lru_lock);
+ lruvec_lock_irq(lruvec);
reset_batch_size(walk);
- spin_unlock_irq(&lruvec->lru_lock);
+ lruvec_unlock_irq(lruvec);
}
cond_resched();
@@ -3962,7 +3962,7 @@ static bool inc_max_seq(struct lruvec *lruvec, unsigned long seq, int swappiness
if (seq < READ_ONCE(lrugen->max_seq))
return false;
- spin_lock_irq(&lruvec->lru_lock);
+ lruvec_lock_irq(lruvec);
VM_WARN_ON_ONCE(!seq_is_valid(lruvec));
@@ -3977,7 +3977,7 @@ static bool inc_max_seq(struct lruvec *lruvec, unsigned long seq, int swappiness
if (inc_min_seq(lruvec, type, swappiness))
continue;
- spin_unlock_irq(&lruvec->lru_lock);
+ lruvec_unlock_irq(lruvec);
cond_resched();
goto restart;
}
@@ -4012,7 +4012,7 @@ static bool inc_max_seq(struct lruvec *lruvec, unsigned long seq, int swappiness
/* make sure preceding modifications appear */
smp_store_release(&lrugen->max_seq, lrugen->max_seq + 1);
unlock:
- spin_unlock_irq(&lruvec->lru_lock);
+ lruvec_unlock_irq(lruvec);
return success;
}
@@ -4708,7 +4708,7 @@ static int evict_folios(unsigned long nr_to_scan, struct lruvec *lruvec,
struct mem_cgroup *memcg = lruvec_memcg(lruvec);
struct pglist_data *pgdat = lruvec_pgdat(lruvec);
- spin_lock_irq(&lruvec->lru_lock);
+ lruvec_lock_irq(lruvec);
scanned = isolate_folios(nr_to_scan, lruvec, sc, swappiness, &type, &list);
@@ -4717,7 +4717,7 @@ static int evict_folios(unsigned long nr_to_scan, struct lruvec *lruvec,
if (evictable_min_seq(lrugen->min_seq, swappiness) + MIN_NR_GENS > lrugen->max_seq)
scanned = 0;
- spin_unlock_irq(&lruvec->lru_lock);
+ lruvec_unlock_irq(lruvec);
if (list_empty(&list))
return scanned;
@@ -4755,9 +4755,9 @@ static int evict_folios(unsigned long nr_to_scan, struct lruvec *lruvec,
walk = current->reclaim_state->mm_walk;
if (walk && walk->batched) {
walk->lruvec = lruvec;
- spin_lock_irq(&lruvec->lru_lock);
+ lruvec_lock_irq(lruvec);
reset_batch_size(walk);
- spin_unlock_irq(&lruvec->lru_lock);
+ lruvec_unlock_irq(lruvec);
}
mod_lruvec_state(lruvec, PGDEMOTE_KSWAPD + reclaimer_offset(sc),
@@ -5195,7 +5195,7 @@ static void lru_gen_change_state(bool enabled)
for_each_node(nid) {
struct lruvec *lruvec = get_lruvec(memcg, nid);
- spin_lock_irq(&lruvec->lru_lock);
+ lruvec_lock_irq(lruvec);
VM_WARN_ON_ONCE(!seq_is_valid(lruvec));
VM_WARN_ON_ONCE(!state_is_valid(lruvec));
@@ -5203,12 +5203,12 @@ static void lru_gen_change_state(bool enabled)
lruvec->lrugen.enabled = enabled;
while (!(enabled ? fill_evictable(lruvec) : drain_evictable(lruvec))) {
- spin_unlock_irq(&lruvec->lru_lock);
+ lruvec_unlock_irq(lruvec);
cond_resched();
- spin_lock_irq(&lruvec->lru_lock);
+ lruvec_lock_irq(lruvec);
}
- spin_unlock_irq(&lruvec->lru_lock);
+ lruvec_unlock_irq(lruvec);
}
cond_resched();
--
2.20.1
next prev parent reply other threads:[~2026-02-05 9:04 UTC|newest]
Thread overview: 50+ messages / expand[flat|nested] mbox.gz Atom feed top
2026-02-05 8:54 [PATCH v4 00/31] Eliminate Dying Memory Cgroup Qi Zheng
2026-02-05 8:54 ` [PATCH v4 01/31] mm: memcontrol: remove dead code of checking parent memory cgroup Qi Zheng
2026-02-05 8:54 ` [PATCH v4 02/31] mm: workingset: use folio_lruvec() in workingset_refault() Qi Zheng
2026-02-05 8:54 ` [PATCH v4 03/31] mm: rename unlock_page_lruvec_irq and its variants Qi Zheng
2026-02-05 8:54 ` [PATCH v4 04/31] mm: vmscan: prepare for the refactoring the move_folios_to_lru() Qi Zheng
2026-02-05 8:54 ` [PATCH v4 05/31] mm: vmscan: refactor move_folios_to_lru() Qi Zheng
2026-02-05 8:54 ` [PATCH v4 06/31] mm: memcontrol: allocate object cgroup for non-kmem case Qi Zheng
2026-02-05 8:54 ` [PATCH v4 07/31] mm: memcontrol: return root object cgroup for root memory cgroup Qi Zheng
2026-02-05 9:01 ` [PATCH v4 08/31] mm: memcontrol: prevent memory cgroup release in get_mem_cgroup_from_folio() Qi Zheng
2026-02-05 9:01 ` [PATCH v4 09/31] buffer: prevent memory cgroup release in folio_alloc_buffers() Qi Zheng
2026-02-05 9:01 ` [PATCH v4 10/31] writeback: prevent memory cgroup release in writeback module Qi Zheng
2026-02-05 9:01 ` [PATCH v4 11/31] mm: memcontrol: prevent memory cgroup release in count_memcg_folio_events() Qi Zheng
2026-02-05 9:01 ` [PATCH v4 12/31] mm: page_io: prevent memory cgroup release in page_io module Qi Zheng
2026-02-05 9:01 ` [PATCH v4 13/31] mm: migrate: prevent memory cgroup release in folio_migrate_mapping() Qi Zheng
2026-02-05 9:01 ` [PATCH v4 14/31] mm: mglru: prevent memory cgroup release in mglru Qi Zheng
2026-02-05 9:01 ` [PATCH v4 15/31] mm: memcontrol: prevent memory cgroup release in mem_cgroup_swap_full() Qi Zheng
2026-02-05 9:01 ` [PATCH v4 16/31] mm: workingset: prevent memory cgroup release in lru_gen_eviction() Qi Zheng
2026-02-05 9:01 ` [PATCH v4 17/31] mm: thp: prevent memory cgroup release in folio_split_queue_lock{_irqsave}() Qi Zheng
2026-02-05 9:01 ` [PATCH v4 18/31] mm: zswap: prevent memory cgroup release in zswap_compress() Qi Zheng
2026-02-05 9:01 ` [PATCH v4 19/31] mm: workingset: prevent lruvec release in workingset_refault() Qi Zheng
2026-02-05 9:01 ` [PATCH v4 20/31] mm: zswap: prevent lruvec release in zswap_folio_swapin() Qi Zheng
2026-02-05 9:01 ` [PATCH v4 21/31] mm: swap: prevent lruvec release in lru_gen_clear_refs() Qi Zheng
2026-02-05 9:01 ` [PATCH v4 22/31] mm: workingset: prevent lruvec release in workingset_activation() Qi Zheng
2026-02-05 9:01 ` Qi Zheng [this message]
2026-02-05 9:01 ` [PATCH v4 24/31] mm: memcontrol: prepare for reparenting LRU pages for lruvec lock Qi Zheng
2026-02-05 15:02 ` kernel test robot
2026-02-05 15:02 ` kernel test robot
2026-02-06 6:13 ` Qi Zheng
2026-02-06 23:34 ` Shakeel Butt
2026-02-05 9:01 ` [PATCH v4 25/31] mm: vmscan: prepare for reparenting traditional LRU folios Qi Zheng
2026-02-07 1:28 ` Shakeel Butt
2026-02-05 9:01 ` [PATCH v4 26/31] mm: vmscan: prepare for reparenting MGLRU folios Qi Zheng
2026-02-12 8:46 ` Harry Yoo
2026-02-15 7:28 ` Qi Zheng
2026-02-05 9:01 ` [PATCH v4 27/31] mm: memcontrol: refactor memcg_reparent_objcgs() Qi Zheng
2026-02-05 9:01 ` [PATCH v4 28/31] mm: workingset: use lruvec_lru_size() to get the number of lru pages Qi Zheng
2026-02-07 1:48 ` Shakeel Butt
2026-02-07 3:59 ` Muchun Song
2026-02-05 9:01 ` [PATCH v4 29/31] mm: memcontrol: prepare for reparenting non-hierarchical stats Qi Zheng
2026-02-07 2:19 ` Shakeel Butt
2026-02-10 6:47 ` Qi Zheng
2026-02-11 0:38 ` Shakeel Butt
2026-02-05 9:01 ` [PATCH v4 30/31] mm: memcontrol: eliminate the problem of dying memory cgroup for LRU folios Qi Zheng
2026-02-07 19:59 ` Usama Arif
2026-02-07 22:25 ` Shakeel Butt
2026-02-09 3:49 ` Qi Zheng
2026-02-09 17:53 ` Shakeel Butt
2026-02-10 3:11 ` Qi Zheng
2026-02-05 9:01 ` [PATCH v4 31/31] mm: lru: add VM_WARN_ON_ONCE_FOLIO to lru maintenance helpers Qi Zheng
2026-02-07 22:26 ` Shakeel Butt
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=679b1c28f5ee8f40911195d7984b287c5da39e05.1770279888.git.zhengqi.arch@bytedance.com \
--to=qi.zheng@linux.dev \
--cc=akpm@linux-foundation.org \
--cc=apais@linux.microsoft.com \
--cc=axelrasmussen@google.com \
--cc=bhe@redhat.com \
--cc=cgroups@vger.kernel.org \
--cc=chenridong@huaweicloud.com \
--cc=david@kernel.org \
--cc=hamzamahfooz@linux.microsoft.com \
--cc=hannes@cmpxchg.org \
--cc=harry.yoo@oracle.com \
--cc=hughd@google.com \
--cc=imran.f.khan@oracle.com \
--cc=kamalesh.babulal@oracle.com \
--cc=lance.yang@linux.dev \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=lorenzo.stoakes@oracle.com \
--cc=mhocko@suse.com \
--cc=mkoutny@suse.com \
--cc=muchun.song@linux.dev \
--cc=roman.gushchin@linux.dev \
--cc=shakeel.butt@linux.dev \
--cc=weixugc@google.com \
--cc=yosry.ahmed@linux.dev \
--cc=yuanchu@google.com \
--cc=zhengqi.arch@bytedance.com \
--cc=ziy@nvidia.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox