From: Huan Yang <link@vivo.com>
To: Andrew Morton <akpm@linux-foundation.org>,
David Hildenbrand <david@redhat.com>,
Lorenzo Stoakes <lorenzo.stoakes@oracle.com>,
Rik van Riel <riel@surriel.com>,
"Liam R. Howlett" <Liam.Howlett@oracle.com>,
Vlastimil Babka <vbabka@suse.cz>,
Harry Yoo <harry.yoo@oracle.com>, Xu Xin <xu.xin16@zte.com.cn>,
Chengming Zhou <chengming.zhou@linux.dev>,
Mike Rapoport <rppt@kernel.org>,
Suren Baghdasaryan <surenb@google.com>,
Michal Hocko <mhocko@suse.com>, Zi Yan <ziy@nvidia.com>,
Matthew Brost <matthew.brost@intel.com>,
Joshua Hahn <joshua.hahnjy@gmail.com>,
Rakie Kim <rakie.kim@sk.com>, Byungchul Park <byungchul@sk.com>,
Gregory Price <gourry@gourry.net>,
Ying Huang <ying.huang@linux.alibaba.com>,
Alistair Popple <apopple@nvidia.com>,
"Matthew Wilcox (Oracle)" <willy@infradead.org>,
Huan Yang <link@vivo.com>, Christian Brauner <brauner@kernel.org>,
Usama Arif <usamaarif642@gmail.com>, Yu Zhao <yuzhao@google.com>,
Baolin Wang <baolin.wang@linux.alibaba.com>,
linux-mm@kvack.org, linux-kernel@vger.kernel.org
Subject: [RFC PATCH 3/9] mm/rmap: simplify rmap_walk invoke
Date: Thu, 24 Jul 2025 16:44:31 +0800 [thread overview]
Message-ID: <20250724084441.380404-4-link@vivo.com> (raw)
In-Reply-To: <20250724084441.380404-1-link@vivo.com>
Currently, the rmap walk is split into two functions: rmap_walk_locked
and rmap_walk, but their implementation functionality is very similar.
This patch simplifies the rmap walk function and moves the locked
parameter to rmap walk control.
No functional change.
Signed-off-by: Huan Yang <link@vivo.com>
---
include/linux/rmap.h | 3 ++-
mm/migrate.c | 6 ++----
mm/rmap.c | 43 ++++++++++++++++---------------------------
3 files changed, 20 insertions(+), 32 deletions(-)
diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index 45904ff413ab..f0d17c971a20 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -996,6 +996,7 @@ void remove_migration_ptes(struct folio *src, struct folio *dst, int flags);
* arg: passed to rmap_one() and invalid_vma()
* try_lock: bail out if the rmap lock is contended
* contended: indicate the rmap traversal bailed out due to lock contention
+ * locked: already locked before invoke rmap_walk
* rmap_one: executed on each vma where page is mapped
* done: for checking traversing termination condition
* anon_lock: for getting anon_lock by optimized way rather than default
@@ -1005,6 +1006,7 @@ struct rmap_walk_control {
void *arg;
bool try_lock;
bool contended;
+ bool locked;
/*
* Return false if page table scanning in rmap_walk should be stopped.
* Otherwise, return true.
@@ -1018,7 +1020,6 @@ struct rmap_walk_control {
};
void rmap_walk(struct folio *folio, struct rmap_walk_control *rwc);
-void rmap_walk_locked(struct folio *folio, struct rmap_walk_control *rwc);
struct anon_vma *folio_lock_anon_vma_read(const struct folio *folio,
struct rmap_walk_control *rwc);
diff --git a/mm/migrate.c b/mm/migrate.c
index 8cf0f9c9599d..a5a49af7857a 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -355,15 +355,13 @@ void remove_migration_ptes(struct folio *src, struct folio *dst, int flags)
struct rmap_walk_control rwc = {
.rmap_one = remove_migration_pte,
+ .locked = flags & RMP_LOCKED,
.arg = &rmap_walk_arg,
};
VM_BUG_ON_FOLIO((flags & RMP_USE_SHARED_ZEROPAGE) && (src != dst), src);
- if (flags & RMP_LOCKED)
- rmap_walk_locked(dst, &rwc);
- else
- rmap_walk(dst, &rwc);
+ rmap_walk(dst, &rwc);
}
/*
diff --git a/mm/rmap.c b/mm/rmap.c
index a312cae16bb5..bae9f79c7dc9 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -2253,14 +2253,12 @@ void try_to_unmap(struct folio *folio, enum ttu_flags flags)
struct rmap_walk_control rwc = {
.rmap_one = try_to_unmap_one,
.arg = (void *)flags,
+ .locked = flags & TTU_RMAP_LOCKED,
.done = folio_not_mapped,
.anon_lock = folio_lock_anon_vma_read,
};
- if (flags & TTU_RMAP_LOCKED)
- rmap_walk_locked(folio, &rwc);
- else
- rmap_walk(folio, &rwc);
+ rmap_walk(folio, &rwc);
}
/*
@@ -2581,6 +2579,7 @@ void try_to_migrate(struct folio *folio, enum ttu_flags flags)
.rmap_one = try_to_migrate_one,
.arg = (void *)flags,
.done = folio_not_mapped,
+ .locked = flags & TTU_RMAP_LOCKED,
.anon_lock = folio_lock_anon_vma_read,
};
@@ -2607,10 +2606,7 @@ void try_to_migrate(struct folio *folio, enum ttu_flags flags)
if (!folio_test_ksm(folio) && folio_test_anon(folio))
rwc.invalid_vma = invalid_migration_vma;
- if (flags & TTU_RMAP_LOCKED)
- rmap_walk_locked(folio, &rwc);
- else
- rmap_walk(folio, &rwc);
+ rmap_walk(folio, &rwc);
}
#ifdef CONFIG_DEVICE_PRIVATE
@@ -2795,17 +2791,16 @@ static struct anon_vma *rmap_walk_anon_lock(const struct folio *folio,
* rmap method
* @folio: the folio to be handled
* @rwc: control variable according to each walk type
- * @locked: caller holds relevant rmap lock
*
* Find all the mappings of a folio using the mapping pointer and the vma
* chains contained in the anon_vma struct it points to.
*/
-static void rmap_walk_anon(struct folio *folio,
- struct rmap_walk_control *rwc, bool locked)
+static void rmap_walk_anon(struct folio *folio, struct rmap_walk_control *rwc)
{
struct anon_vma *anon_vma;
pgoff_t pgoff_start, pgoff_end;
struct anon_vma_chain *avc;
+ bool locked = rwc->locked;
if (locked) {
anon_vma = folio_anon_vma(folio);
@@ -2908,14 +2903,14 @@ static void __rmap_walk_file(struct folio *folio, struct address_space *mapping,
* rmap_walk_file - do something to file page using the object-based rmap method
* @folio: the folio to be handled
* @rwc: control variable according to each walk type
- * @locked: caller holds relevant rmap lock
*
* Find all the mappings of a folio using the mapping pointer and the vma chains
* contained in the address_space struct it points to.
*/
-static void rmap_walk_file(struct folio *folio,
- struct rmap_walk_control *rwc, bool locked)
+static void rmap_walk_file(struct folio *folio, struct rmap_walk_control *rwc)
{
+ bool locked = rwc->locked;
+
/*
* The folio lock not only makes sure that folio->mapping cannot
* suddenly be NULLified by truncation, it makes sure that the structure
@@ -2933,23 +2928,17 @@ static void rmap_walk_file(struct folio *folio,
void rmap_walk(struct folio *folio, struct rmap_walk_control *rwc)
{
+ /* no ksm support for now if locked */
+ VM_BUG_ON_FOLIO(rwc->locked && folio_test_ksm(folio), folio);
+ /* if already locked, why try lock again? */
+ VM_BUG_ON(rwc->locked && rwc->try_lock);
+
if (unlikely(folio_test_ksm(folio)))
rmap_walk_ksm(folio, rwc);
else if (folio_test_anon(folio))
- rmap_walk_anon(folio, rwc, false);
- else
- rmap_walk_file(folio, rwc, false);
-}
-
-/* Like rmap_walk, but caller holds relevant rmap lock */
-void rmap_walk_locked(struct folio *folio, struct rmap_walk_control *rwc)
-{
- /* no ksm support for now */
- VM_BUG_ON_FOLIO(folio_test_ksm(folio), folio);
- if (folio_test_anon(folio))
- rmap_walk_anon(folio, rwc, true);
+ rmap_walk_anon(folio, rwc);
else
- rmap_walk_file(folio, rwc, true);
+ rmap_walk_file(folio, rwc);
}
#ifdef CONFIG_HUGETLB_PAGE
--
2.34.1
next prev parent reply other threads:[~2025-07-24 8:47 UTC|newest]
Thread overview: 25+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-07-24 8:44 [RFC PATCH 0/9] introduce PGTY_mgt_entry page_type Huan Yang
2025-07-24 8:44 ` [RFC PATCH 1/9] mm: introduce PAGE_TYPE_SHIFT Huan Yang
2025-07-24 8:44 ` [RFC PATCH 2/9] mm: add page_type value helper Huan Yang
2025-07-24 8:44 ` Huan Yang [this message]
2025-07-24 8:44 ` [RFC PATCH 4/9] mm/rmap: add args in rmap_walk_control done hook Huan Yang
2025-07-24 8:44 ` [RFC PATCH 5/9] mm/rmap: introduce exit hook Huan Yang
2025-07-24 8:44 ` [RFC PATCH 6/9] mm/rmap: introduce migrate_walk_arg Huan Yang
2025-07-24 8:44 ` [RFC PATCH 7/9] mm/migrate: rename rmap_walk_arg folio Huan Yang
2025-07-24 8:44 ` [RFC PATCH 8/9] mm/migrate: infrastructure for migrate entry page_type Huan Yang
2025-07-24 8:44 ` [RFC PATCH 9/9] mm/migrate: apply " Huan Yang
2025-07-24 8:59 ` [RFC PATCH 0/9] introduce PGTY_mgt_entry page_type David Hildenbrand
2025-07-24 9:09 ` Huan Yang
2025-07-24 9:12 ` David Hildenbrand
2025-07-24 9:20 ` David Hildenbrand
2025-07-24 9:32 ` David Hildenbrand
2025-07-24 9:36 ` Huan Yang
2025-07-24 9:45 ` Lorenzo Stoakes
2025-07-24 9:56 ` Huan Yang
2025-07-24 9:58 ` Lorenzo Stoakes
2025-07-24 10:01 ` Huan Yang
2025-07-24 9:15 ` Lorenzo Stoakes
2025-07-24 9:29 ` Huan Yang
2025-07-25 1:37 ` Huang, Ying
2025-07-25 1:47 ` Huan Yang
2025-07-25 9:26 ` David Hildenbrand
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20250724084441.380404-4-link@vivo.com \
--to=link@vivo.com \
--cc=Liam.Howlett@oracle.com \
--cc=akpm@linux-foundation.org \
--cc=apopple@nvidia.com \
--cc=baolin.wang@linux.alibaba.com \
--cc=brauner@kernel.org \
--cc=byungchul@sk.com \
--cc=chengming.zhou@linux.dev \
--cc=david@redhat.com \
--cc=gourry@gourry.net \
--cc=harry.yoo@oracle.com \
--cc=joshua.hahnjy@gmail.com \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=lorenzo.stoakes@oracle.com \
--cc=matthew.brost@intel.com \
--cc=mhocko@suse.com \
--cc=rakie.kim@sk.com \
--cc=riel@surriel.com \
--cc=rppt@kernel.org \
--cc=surenb@google.com \
--cc=usamaarif642@gmail.com \
--cc=vbabka@suse.cz \
--cc=willy@infradead.org \
--cc=xu.xin16@zte.com.cn \
--cc=ying.huang@linux.alibaba.com \
--cc=yuzhao@google.com \
--cc=ziy@nvidia.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox