From: Qi Zheng <qi.zheng@linux.dev>
To: hannes@cmpxchg.org, hughd@google.com, mhocko@suse.com,
roman.gushchin@linux.dev, shakeel.butt@linux.dev,
muchun.song@linux.dev, david@kernel.org,
lorenzo.stoakes@oracle.com, ziy@nvidia.com, harry.yoo@oracle.com,
imran.f.khan@oracle.com, kamalesh.babulal@oracle.com,
axelrasmussen@google.com, yuanchu@google.com, weixugc@google.com,
chenridong@huaweicloud.com, mkoutny@suse.com,
akpm@linux-foundation.org, hamzamahfooz@linux.microsoft.com,
apais@linux.microsoft.com, lance.yang@linux.dev
Cc: linux-mm@kvack.org, linux-kernel@vger.kernel.org,
cgroups@vger.kernel.org, Muchun Song <songmuchun@bytedance.com>,
Qi Zheng <zhengqi.arch@bytedance.com>,
Chen Ridong <chenridong@huawei.com>
Subject: [PATCH v2 03/28] mm: rename unlock_page_lruvec_irq and its variants
Date: Wed, 17 Dec 2025 15:27:27 +0800 [thread overview]
Message-ID: <a11ca717ddd52fd83e1fff9942fc49c9c5c5b78c.1765956025.git.zhengqi.arch@bytedance.com> (raw)
In-Reply-To: <cover.1765956025.git.zhengqi.arch@bytedance.com>
From: Muchun Song <songmuchun@bytedance.com>
It is inappropriate to use folio_lruvec_lock() variants in conjunction
with unlock_page_lruvec() variants, as this involves the inconsistent
operation of locking a folio while unlocking a page. To rectify this, the
functions unlock_page_lruvec{_irq, _irqrestore} are renamed to
lruvec_unlock{_irq,_irqrestore}.
Signed-off-by: Muchun Song <songmuchun@bytedance.com>
Acked-by: Roman Gushchin <roman.gushchin@linux.dev>
Acked-by: Johannes Weiner <hannes@cmpxchg.org>
Signed-off-by: Qi Zheng <zhengqi.arch@bytedance.com>
Reviewed-by: Harry Yoo <harry.yoo@oracle.com>
Reviewed-by: Chen Ridong <chenridong@huawei.com>
---
include/linux/memcontrol.h | 10 +++++-----
mm/compaction.c | 14 +++++++-------
mm/huge_memory.c | 2 +-
mm/mlock.c | 2 +-
mm/swap.c | 12 ++++++------
mm/vmscan.c | 4 ++--
6 files changed, 22 insertions(+), 22 deletions(-)
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 6a48398a1f4e7..288dd6337f80f 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -1465,17 +1465,17 @@ static inline struct lruvec *parent_lruvec(struct lruvec *lruvec)
return mem_cgroup_lruvec(memcg, lruvec_pgdat(lruvec));
}
-static inline void unlock_page_lruvec(struct lruvec *lruvec)
+static inline void lruvec_unlock(struct lruvec *lruvec)
{
spin_unlock(&lruvec->lru_lock);
}
-static inline void unlock_page_lruvec_irq(struct lruvec *lruvec)
+static inline void lruvec_unlock_irq(struct lruvec *lruvec)
{
spin_unlock_irq(&lruvec->lru_lock);
}
-static inline void unlock_page_lruvec_irqrestore(struct lruvec *lruvec,
+static inline void lruvec_unlock_irqrestore(struct lruvec *lruvec,
unsigned long flags)
{
spin_unlock_irqrestore(&lruvec->lru_lock, flags);
@@ -1497,7 +1497,7 @@ static inline struct lruvec *folio_lruvec_relock_irq(struct folio *folio,
if (folio_matches_lruvec(folio, locked_lruvec))
return locked_lruvec;
- unlock_page_lruvec_irq(locked_lruvec);
+ lruvec_unlock_irq(locked_lruvec);
}
return folio_lruvec_lock_irq(folio);
@@ -1511,7 +1511,7 @@ static inline void folio_lruvec_relock_irqsave(struct folio *folio,
if (folio_matches_lruvec(folio, *lruvecp))
return;
- unlock_page_lruvec_irqrestore(*lruvecp, *flags);
+ lruvec_unlock_irqrestore(*lruvecp, *flags);
}
*lruvecp = folio_lruvec_lock_irqsave(folio, flags);
diff --git a/mm/compaction.c b/mm/compaction.c
index 1e8f8eca318c6..c3e338aaa0ffb 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -913,7 +913,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
*/
if (!(low_pfn % COMPACT_CLUSTER_MAX)) {
if (locked) {
- unlock_page_lruvec_irqrestore(locked, flags);
+ lruvec_unlock_irqrestore(locked, flags);
locked = NULL;
}
@@ -964,7 +964,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
}
/* for alloc_contig case */
if (locked) {
- unlock_page_lruvec_irqrestore(locked, flags);
+ lruvec_unlock_irqrestore(locked, flags);
locked = NULL;
}
@@ -1053,7 +1053,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
if (unlikely(page_has_movable_ops(page)) &&
!PageMovableOpsIsolated(page)) {
if (locked) {
- unlock_page_lruvec_irqrestore(locked, flags);
+ lruvec_unlock_irqrestore(locked, flags);
locked = NULL;
}
@@ -1158,7 +1158,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
/* If we already hold the lock, we can skip some rechecking */
if (lruvec != locked) {
if (locked)
- unlock_page_lruvec_irqrestore(locked, flags);
+ lruvec_unlock_irqrestore(locked, flags);
compact_lock_irqsave(&lruvec->lru_lock, &flags, cc);
locked = lruvec;
@@ -1226,7 +1226,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
isolate_fail_put:
/* Avoid potential deadlock in freeing page under lru_lock */
if (locked) {
- unlock_page_lruvec_irqrestore(locked, flags);
+ lruvec_unlock_irqrestore(locked, flags);
locked = NULL;
}
folio_put(folio);
@@ -1242,7 +1242,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
*/
if (nr_isolated) {
if (locked) {
- unlock_page_lruvec_irqrestore(locked, flags);
+ lruvec_unlock_irqrestore(locked, flags);
locked = NULL;
}
putback_movable_pages(&cc->migratepages);
@@ -1274,7 +1274,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
isolate_abort:
if (locked)
- unlock_page_lruvec_irqrestore(locked, flags);
+ lruvec_unlock_irqrestore(locked, flags);
if (folio) {
folio_set_lru(folio);
folio_put(folio);
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 40cf59301c21a..12b46215b30c1 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -3899,7 +3899,7 @@ static int __folio_freeze_and_split_unmapped(struct folio *folio, unsigned int n
folio_ref_unfreeze(folio, folio_cache_ref_count(folio) + 1);
if (do_lru)
- unlock_page_lruvec(lruvec);
+ lruvec_unlock(lruvec);
if (ci)
swap_cluster_unlock(ci);
diff --git a/mm/mlock.c b/mm/mlock.c
index 2f699c3497a57..66740e16679c3 100644
--- a/mm/mlock.c
+++ b/mm/mlock.c
@@ -205,7 +205,7 @@ static void mlock_folio_batch(struct folio_batch *fbatch)
}
if (lruvec)
- unlock_page_lruvec_irq(lruvec);
+ lruvec_unlock_irq(lruvec);
folios_put(fbatch);
}
diff --git a/mm/swap.c b/mm/swap.c
index 2260dcd2775e7..ec0c654e128dc 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -91,7 +91,7 @@ static void page_cache_release(struct folio *folio)
__page_cache_release(folio, &lruvec, &flags);
if (lruvec)
- unlock_page_lruvec_irqrestore(lruvec, flags);
+ lruvec_unlock_irqrestore(lruvec, flags);
}
void __folio_put(struct folio *folio)
@@ -175,7 +175,7 @@ static void folio_batch_move_lru(struct folio_batch *fbatch, move_fn_t move_fn)
}
if (lruvec)
- unlock_page_lruvec_irqrestore(lruvec, flags);
+ lruvec_unlock_irqrestore(lruvec, flags);
folios_put(fbatch);
}
@@ -349,7 +349,7 @@ void folio_activate(struct folio *folio)
lruvec = folio_lruvec_lock_irq(folio);
lru_activate(lruvec, folio);
- unlock_page_lruvec_irq(lruvec);
+ lruvec_unlock_irq(lruvec);
folio_set_lru(folio);
}
#endif
@@ -963,7 +963,7 @@ void folios_put_refs(struct folio_batch *folios, unsigned int *refs)
if (folio_is_zone_device(folio)) {
if (lruvec) {
- unlock_page_lruvec_irqrestore(lruvec, flags);
+ lruvec_unlock_irqrestore(lruvec, flags);
lruvec = NULL;
}
if (folio_ref_sub_and_test(folio, nr_refs))
@@ -977,7 +977,7 @@ void folios_put_refs(struct folio_batch *folios, unsigned int *refs)
/* hugetlb has its own memcg */
if (folio_test_hugetlb(folio)) {
if (lruvec) {
- unlock_page_lruvec_irqrestore(lruvec, flags);
+ lruvec_unlock_irqrestore(lruvec, flags);
lruvec = NULL;
}
free_huge_folio(folio);
@@ -991,7 +991,7 @@ void folios_put_refs(struct folio_batch *folios, unsigned int *refs)
j++;
}
if (lruvec)
- unlock_page_lruvec_irqrestore(lruvec, flags);
+ lruvec_unlock_irqrestore(lruvec, flags);
if (!j) {
folio_batch_reinit(folios);
return;
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 670fe9fae5baa..28d9b3af47130 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1829,7 +1829,7 @@ bool folio_isolate_lru(struct folio *folio)
folio_get(folio);
lruvec = folio_lruvec_lock_irq(folio);
lruvec_del_folio(lruvec, folio);
- unlock_page_lruvec_irq(lruvec);
+ lruvec_unlock_irq(lruvec);
ret = true;
}
@@ -7855,7 +7855,7 @@ void check_move_unevictable_folios(struct folio_batch *fbatch)
if (lruvec) {
__count_vm_events(UNEVICTABLE_PGRESCUED, pgrescued);
__count_vm_events(UNEVICTABLE_PGSCANNED, pgscanned);
- unlock_page_lruvec_irq(lruvec);
+ lruvec_unlock_irq(lruvec);
} else if (pgscanned) {
count_vm_events(UNEVICTABLE_PGSCANNED, pgscanned);
}
--
2.20.1
next prev parent reply other threads:[~2025-12-17 7:29 UTC|newest]
Thread overview: 149+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-12-17 7:27 [PATCH v2 00/28] Eliminate Dying Memory Cgroup Qi Zheng
2025-12-17 7:27 ` [PATCH v2 01/28] mm: memcontrol: remove dead code of checking parent memory cgroup Qi Zheng
2025-12-18 23:31 ` Shakeel Butt
2025-12-17 7:27 ` [PATCH v2 02/28] mm: workingset: use folio_lruvec() in workingset_refault() Qi Zheng
2025-12-18 23:32 ` Shakeel Butt
2025-12-17 7:27 ` Qi Zheng [this message]
2025-12-18 9:00 ` [PATCH v2 03/28] mm: rename unlock_page_lruvec_irq and its variants David Hildenbrand (Red Hat)
2025-12-18 23:34 ` Shakeel Butt
2025-12-17 7:27 ` [PATCH v2 04/28] mm: vmscan: prepare for the refactoring the move_folios_to_lru() Qi Zheng
2025-12-17 21:13 ` Johannes Weiner
2025-12-18 9:04 ` David Hildenbrand (Red Hat)
2025-12-18 9:31 ` Qi Zheng
2025-12-18 23:39 ` Shakeel Butt
2025-12-25 3:45 ` Chen Ridong
2025-12-17 7:27 ` [PATCH v2 05/28] mm: vmscan: refactor move_folios_to_lru() Qi Zheng
2025-12-19 0:04 ` Shakeel Butt
2025-12-17 7:27 ` [PATCH v2 06/28] mm: memcontrol: allocate object cgroup for non-kmem case Qi Zheng
2025-12-17 21:22 ` Johannes Weiner
2025-12-18 6:25 ` Qi Zheng
2025-12-19 0:23 ` Shakeel Butt
2025-12-25 6:23 ` Chen Ridong
2025-12-17 7:27 ` [PATCH v2 07/28] mm: memcontrol: return root object cgroup for root memory cgroup Qi Zheng
2025-12-17 21:28 ` Johannes Weiner
2025-12-19 0:39 ` Shakeel Butt
2025-12-26 1:03 ` Chen Ridong
2025-12-26 3:10 ` Muchun Song
2025-12-26 3:50 ` Chen Ridong
2025-12-26 3:58 ` Chen Ridong
2025-12-17 7:27 ` [PATCH v2 08/28] mm: memcontrol: prevent memory cgroup release in get_mem_cgroup_from_folio() Qi Zheng
2025-12-17 21:45 ` Johannes Weiner
2025-12-18 6:31 ` Qi Zheng
2025-12-19 2:09 ` Shakeel Butt
2025-12-19 3:53 ` Johannes Weiner
2025-12-19 3:56 ` Johannes Weiner
2025-12-17 7:27 ` [PATCH v2 09/28] buffer: prevent memory cgroup release in folio_alloc_buffers() Qi Zheng
2025-12-17 21:45 ` Johannes Weiner
2025-12-19 2:14 ` Shakeel Butt
2025-12-26 2:01 ` Chen Ridong
2025-12-17 7:27 ` [PATCH v2 10/28] writeback: prevent memory cgroup release in writeback module Qi Zheng
2025-12-17 22:08 ` Johannes Weiner
2025-12-19 2:30 ` Shakeel Butt
2025-12-17 7:27 ` [PATCH v2 11/28] mm: memcontrol: prevent memory cgroup release in count_memcg_folio_events() Qi Zheng
2025-12-17 22:11 ` Johannes Weiner
2025-12-19 23:31 ` Shakeel Butt
2025-12-26 2:12 ` Chen Ridong
2025-12-17 7:27 ` [PATCH v2 12/28] mm: page_io: prevent memory cgroup release in page_io module Qi Zheng
2025-12-17 22:12 ` Johannes Weiner
2025-12-19 23:44 ` Shakeel Butt
2025-12-17 7:27 ` [PATCH v2 13/28] mm: migrate: prevent memory cgroup release in folio_migrate_mapping() Qi Zheng
2025-12-17 22:14 ` Johannes Weiner
2025-12-18 9:09 ` David Hildenbrand (Red Hat)
2025-12-18 9:36 ` Qi Zheng
2025-12-18 9:43 ` David Hildenbrand (Red Hat)
2025-12-18 11:40 ` Qi Zheng
2025-12-18 11:56 ` David Hildenbrand (Red Hat)
2025-12-18 13:00 ` Qi Zheng
2025-12-18 13:04 ` David Hildenbrand (Red Hat)
2025-12-18 13:16 ` Qi Zheng
2025-12-19 4:12 ` Harry Yoo
2025-12-19 6:18 ` David Hildenbrand (Red Hat)
2025-12-18 14:26 ` Johannes Weiner
2025-12-22 3:42 ` Qi Zheng
2025-12-30 20:07 ` David Hildenbrand (Red Hat)
2025-12-19 23:51 ` Shakeel Butt
2025-12-17 7:27 ` [PATCH v2 14/28] mm: mglru: prevent memory cgroup release in mglru Qi Zheng
2025-12-17 22:18 ` Johannes Weiner
2025-12-18 6:50 ` Qi Zheng
2025-12-20 0:58 ` Shakeel Butt
2025-12-17 7:27 ` [PATCH v2 15/28] mm: memcontrol: prevent memory cgroup release in mem_cgroup_swap_full() Qi Zheng
2025-12-17 22:21 ` Johannes Weiner
2025-12-20 1:05 ` Shakeel Butt
2025-12-22 4:02 ` Qi Zheng
2025-12-26 2:29 ` Chen Ridong
2025-12-17 7:27 ` [PATCH v2 16/28] mm: workingset: prevent memory cgroup release in lru_gen_eviction() Qi Zheng
2025-12-17 22:23 ` Johannes Weiner
2025-12-20 1:06 ` Shakeel Butt
2025-12-17 7:27 ` [PATCH v2 17/28] mm: thp: prevent memory cgroup release in folio_split_queue_lock{_irqsave}() Qi Zheng
2025-12-17 22:27 ` Johannes Weiner
2025-12-20 1:11 ` Shakeel Butt
2025-12-22 3:33 ` Qi Zheng
2025-12-18 9:10 ` David Hildenbrand (Red Hat)
2025-12-17 7:27 ` [PATCH v2 18/28] mm: zswap: prevent memory cgroup release in zswap_compress() Qi Zheng
2025-12-17 22:27 ` Johannes Weiner
2025-12-20 1:14 ` Shakeel Butt
2025-12-17 7:27 ` [PATCH v2 19/28] mm: workingset: prevent lruvec release in workingset_refault() Qi Zheng
2025-12-17 22:30 ` Johannes Weiner
2025-12-18 6:57 ` Qi Zheng
2025-12-17 7:27 ` [PATCH v2 20/28] mm: zswap: prevent lruvec release in zswap_folio_swapin() Qi Zheng
2025-12-17 22:33 ` Johannes Weiner
2025-12-18 7:09 ` Qi Zheng
2025-12-18 13:02 ` Johannes Weiner
2025-12-20 1:23 ` Shakeel Butt
2025-12-17 7:27 ` [PATCH v2 21/28] mm: swap: prevent lruvec release in lru_gen_clear_refs() Qi Zheng
2025-12-17 22:34 ` Johannes Weiner
2025-12-20 1:24 ` Shakeel Butt
2025-12-17 7:27 ` [PATCH v2 22/28] mm: workingset: prevent lruvec release in workingset_activation() Qi Zheng
2025-12-17 22:36 ` Johannes Weiner
2025-12-20 1:25 ` Shakeel Butt
2025-12-17 7:27 ` [PATCH v2 23/28] mm: memcontrol: prepare for reparenting LRU pages for lruvec lock Qi Zheng
2025-12-18 13:00 ` Johannes Weiner
2025-12-18 13:17 ` Qi Zheng
2025-12-20 2:03 ` Shakeel Butt
2025-12-23 6:14 ` Qi Zheng
2025-12-17 7:27 ` [PATCH v2 24/28] mm: vmscan: prepare for reparenting traditional LRU folios Qi Zheng
2025-12-18 13:32 ` Johannes Weiner
2025-12-22 3:55 ` Qi Zheng
2025-12-17 7:27 ` [PATCH v2 25/28] mm: vmscan: prepare for reparenting MGLRU folios Qi Zheng
2025-12-17 7:27 ` [PATCH v2 26/28] mm: memcontrol: refactor memcg_reparent_objcgs() Qi Zheng
2025-12-18 13:45 ` Johannes Weiner
2025-12-22 3:56 ` Qi Zheng
2025-12-17 7:27 ` [PATCH v2 27/28] mm: memcontrol: eliminate the problem of dying memory cgroup for LRU folios Qi Zheng
2025-12-18 14:06 ` Johannes Weiner
2025-12-22 3:59 ` Qi Zheng
2025-12-17 7:27 ` [PATCH v2 28/28] mm: lru: add VM_WARN_ON_ONCE_FOLIO to lru maintenance helpers Qi Zheng
2025-12-18 14:07 ` Johannes Weiner
2025-12-23 20:04 ` [PATCH v2 00/28] Eliminate Dying Memory Cgroup Yosry Ahmed
2025-12-23 23:20 ` Shakeel Butt
2025-12-24 0:07 ` Yosry Ahmed
2025-12-24 0:36 ` Shakeel Butt
2025-12-24 0:43 ` Yosry Ahmed
2025-12-24 0:58 ` Shakeel Butt
2025-12-29 9:42 ` Qi Zheng
2025-12-29 10:52 ` Michal Koutný
2025-12-29 7:48 ` Qi Zheng
2025-12-29 9:35 ` Harry Yoo
2025-12-29 9:46 ` Qi Zheng
2025-12-29 10:53 ` Michal Koutný
2025-12-24 8:43 ` Harry Yoo
2025-12-24 14:51 ` Yosry Ahmed
2025-12-26 11:24 ` Harry Yoo
2025-12-30 1:36 ` Roman Gushchin
2025-12-30 2:44 ` Qi Zheng
2025-12-30 4:20 ` Roman Gushchin
2025-12-30 4:25 ` Qi Zheng
2025-12-30 4:48 ` Shakeel Butt
2025-12-30 16:46 ` Zi Yan
2025-12-30 18:13 ` Shakeel Butt
2025-12-30 19:18 ` Chris Mason
2025-12-30 20:51 ` Matthew Wilcox
2025-12-30 21:10 ` Chris Mason
2025-12-30 22:30 ` Roman Gushchin
2025-12-30 22:03 ` Roman Gushchin
2025-12-30 21:07 ` Zi Yan
2025-12-30 19:34 ` Roman Gushchin
2025-12-30 21:13 ` Zi Yan
2025-12-30 4:01 ` Shakeel Butt
2025-12-30 4:11 ` Roman Gushchin
2025-12-30 18:36 ` Shakeel Butt
2025-12-30 20:47 ` Roman Gushchin
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=a11ca717ddd52fd83e1fff9942fc49c9c5c5b78c.1765956025.git.zhengqi.arch@bytedance.com \
--to=qi.zheng@linux.dev \
--cc=akpm@linux-foundation.org \
--cc=apais@linux.microsoft.com \
--cc=axelrasmussen@google.com \
--cc=cgroups@vger.kernel.org \
--cc=chenridong@huawei.com \
--cc=chenridong@huaweicloud.com \
--cc=david@kernel.org \
--cc=hamzamahfooz@linux.microsoft.com \
--cc=hannes@cmpxchg.org \
--cc=harry.yoo@oracle.com \
--cc=hughd@google.com \
--cc=imran.f.khan@oracle.com \
--cc=kamalesh.babulal@oracle.com \
--cc=lance.yang@linux.dev \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=lorenzo.stoakes@oracle.com \
--cc=mhocko@suse.com \
--cc=mkoutny@suse.com \
--cc=muchun.song@linux.dev \
--cc=roman.gushchin@linux.dev \
--cc=shakeel.butt@linux.dev \
--cc=songmuchun@bytedance.com \
--cc=weixugc@google.com \
--cc=yuanchu@google.com \
--cc=zhengqi.arch@bytedance.com \
--cc=ziy@nvidia.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox