From: Ye Liu <ye.liu@linux.dev>
To: Trond Myklebust <trondmy@kernel.org>,
Anna Schumaker <anna@kernel.org>,
Andrew Morton <akpm@linux-foundation.org>,
Vlastimil Babka <vbabka@kernel.org>,
David Hildenbrand <david@kernel.org>,
Lorenzo Stoakes <ljs@kernel.org>,
"Matthew Wilcox (Oracle)" <willy@infradead.org>,
Chris Li <chrisl@kernel.org>, Kairui Song <kasong@tencent.com>
Cc: Ye Liu <liuye@kylinos.cn>, Suren Baghdasaryan <surenb@google.com>,
Michal Hocko <mhocko@suse.com>,
Brendan Jackman <jackmanb@google.com>,
Johannes Weiner <hannes@cmpxchg.org>, Zi Yan <ziy@nvidia.com>,
Jason Gunthorpe <jgg@ziepe.ca>,
John Hubbard <jhubbard@nvidia.com>, Peter Xu <peterx@redhat.com>,
Baolin Wang <baolin.wang@linux.alibaba.com>,
"Liam R. Howlett" <Liam.Howlett@oracle.com>,
Nico Pache <npache@redhat.com>,
Ryan Roberts <ryan.roberts@arm.com>, Dev Jain <dev.jain@arm.com>,
Barry Song <baohua@kernel.org>, Lance Yang <lance.yang@linux.dev>,
Matthew Brost <matthew.brost@intel.com>,
Joshua Hahn <joshua.hahnjy@gmail.com>,
Rakie Kim <rakie.kim@sk.com>, Byungchul Park <byungchul@sk.com>,
Gregory Price <gourry@gourry.net>,
Ying Huang <ying.huang@linux.alibaba.com>,
Alistair Popple <apopple@nvidia.com>,
Kemeng Shi <shikemeng@huaweicloud.com>,
Nhat Pham <nphamcs@gmail.com>, Baoquan He <bhe@redhat.com>,
Youngjun Park <youngjun.park@lge.com>,
linux-nfs@vger.kernel.org, linux-kernel@vger.kernel.org,
linux-mm@kvack.org, linux-fsdevel@vger.kernel.org
Subject: [PATCH 1/4] mm/vmstat: use node_stat_add_folio/sub_folio for folio_nr_pages operations
Date: Tue, 14 Apr 2026 17:15:18 +0800 [thread overview]
Message-ID: <20260414091527.2970844-2-ye.liu@linux.dev> (raw)
In-Reply-To: <20260414091527.2970844-1-ye.liu@linux.dev>
From: Ye Liu <liuye@kylinos.cn>
Replace node_stat_mod_folio() calls that pass folio_nr_pages(folio) or
-folio_nr_pages(folio) as the third argument with the more concise
node_stat_add_folio() and node_stat_sub_folio() functions respectively.
This makes the code more readable and reduces the number of arguments
passed to these functions.
Signed-off-by: Ye Liu <liuye@kylinos.cn>
---
fs/nfs/internal.h | 2 +-
fs/nfs/write.c | 2 +-
mm/compaction.c | 5 ++---
mm/gup.c | 5 ++---
mm/khugepaged.c | 10 ++++------
mm/mempolicy.c | 5 ++---
mm/migrate.c | 12 +++++-------
mm/page-writeback.c | 4 ++--
mm/swap_state.c | 4 ++--
9 files changed, 21 insertions(+), 28 deletions(-)
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
index fc5456377160..f5c52a2d2a1f 100644
--- a/fs/nfs/internal.h
+++ b/fs/nfs/internal.h
@@ -858,7 +858,7 @@ static inline void nfs_folio_mark_unstable(struct folio *folio,
/* This page is really still in write-back - just that the
* writeback is happening on the server now.
*/
- node_stat_mod_folio(folio, NR_WRITEBACK, nr);
+ node_stat_add_folio(folio, NR_WRITEBACK);
bdi_wb_stat_mod(inode, WB_WRITEBACK, nr);
__mark_inode_dirty(inode, I_DIRTY_DATASYNC);
}
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index cc02b57de3c7..a8700824a61b 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -871,7 +871,7 @@ static void nfs_folio_clear_commit(struct folio *folio)
if (folio) {
long nr = folio_nr_pages(folio);
- node_stat_mod_folio(folio, NR_WRITEBACK, -nr);
+ node_stat_sub_folio(folio, NR_WRITEBACK);
bdi_wb_stat_mod(folio->mapping->host, WB_WRITEBACK, -nr);
}
}
diff --git a/mm/compaction.c b/mm/compaction.c
index 3648ce22c807..d7ce622aeed1 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -1215,9 +1215,8 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
/* Successfully isolated */
lruvec_del_folio(lruvec, folio);
- node_stat_mod_folio(folio,
- NR_ISOLATED_ANON + folio_is_file_lru(folio),
- folio_nr_pages(folio));
+ node_stat_add_folio(folio,
+ NR_ISOLATED_ANON + folio_is_file_lru(folio));
isolate_success:
list_add(&folio->lru, &cc->migratepages);
diff --git a/mm/gup.c b/mm/gup.c
index ad9ded39609c..2cb2efa20bff 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -2304,9 +2304,8 @@ static unsigned long collect_longterm_unpinnable_folios(
continue;
list_add_tail(&folio->lru, movable_folio_list);
- node_stat_mod_folio(folio,
- NR_ISOLATED_ANON + folio_is_file_lru(folio),
- folio_nr_pages(folio));
+ node_stat_add_folio(folio,
+ NR_ISOLATED_ANON + folio_is_file_lru(folio));
}
return collected;
diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index b8452dbdb043..f662de753305 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -502,9 +502,8 @@ void __khugepaged_exit(struct mm_struct *mm)
static void release_pte_folio(struct folio *folio)
{
- node_stat_mod_folio(folio,
- NR_ISOLATED_ANON + folio_is_file_lru(folio),
- -folio_nr_pages(folio));
+ node_stat_sub_folio(folio,
+ NR_ISOLATED_ANON + folio_is_file_lru(folio));
folio_unlock(folio);
folio_putback_lru(folio);
}
@@ -650,9 +649,8 @@ static enum scan_result __collapse_huge_page_isolate(struct vm_area_struct *vma,
result = SCAN_DEL_PAGE_LRU;
goto out;
}
- node_stat_mod_folio(folio,
- NR_ISOLATED_ANON + folio_is_file_lru(folio),
- folio_nr_pages(folio));
+ node_stat_add_folio(folio,
+ NR_ISOLATED_ANON + folio_is_file_lru(folio));
VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
VM_BUG_ON_FOLIO(folio_test_lru(folio), folio);
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 4e4421b22b59..1c413f66b35f 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -1259,9 +1259,8 @@ static bool migrate_folio_add(struct folio *folio, struct list_head *foliolist,
if ((flags & MPOL_MF_MOVE_ALL) || !folio_maybe_mapped_shared(folio)) {
if (folio_isolate_lru(folio)) {
list_add_tail(&folio->lru, foliolist);
- node_stat_mod_folio(folio,
- NR_ISOLATED_ANON + folio_is_file_lru(folio),
- folio_nr_pages(folio));
+ node_stat_add_folio(folio,
+ NR_ISOLATED_ANON + folio_is_file_lru(folio));
} else {
/*
* Non-movable folio may reach here. And, there may be
diff --git a/mm/migrate.c b/mm/migrate.c
index 8a64291ab5b4..dc8cfee37a70 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -268,8 +268,8 @@ void putback_movable_pages(struct list_head *l)
if (unlikely(page_has_movable_ops(&folio->page))) {
putback_movable_ops_page(&folio->page);
} else {
- node_stat_mod_folio(folio, NR_ISOLATED_ANON +
- folio_is_file_lru(folio), -folio_nr_pages(folio));
+ node_stat_sub_folio(folio, NR_ISOLATED_ANON +
+ folio_is_file_lru(folio));
folio_putback_lru(folio);
}
}
@@ -2272,9 +2272,8 @@ static int __add_folio_for_migration(struct folio *folio, int node,
return 1;
} else if (folio_isolate_lru(folio)) {
list_add_tail(&folio->lru, pagelist);
- node_stat_mod_folio(folio,
- NR_ISOLATED_ANON + folio_is_file_lru(folio),
- folio_nr_pages(folio));
+ node_stat_add_folio(folio,
+ NR_ISOLATED_ANON + folio_is_file_lru(folio));
return 1;
}
return -EBUSY;
@@ -2726,8 +2725,7 @@ int migrate_misplaced_folio_prepare(struct folio *folio,
if (!folio_isolate_lru(folio))
return -EAGAIN;
- node_stat_mod_folio(folio, NR_ISOLATED_ANON + folio_is_file_lru(folio),
- nr_pages);
+ node_stat_add_folio(folio, NR_ISOLATED_ANON + folio_is_file_lru(folio));
return 0;
}
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 833f743f309f..87e9ea41313a 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -2750,7 +2750,7 @@ bool folio_redirty_for_writepage(struct writeback_control *wbc,
wb = unlocked_inode_to_wb_begin(inode, &cookie);
current->nr_dirtied -= nr;
- node_stat_mod_folio(folio, NR_DIRTIED, -nr);
+ node_stat_sub_folio(folio, NR_DIRTIED);
wb_stat_mod(wb, WB_DIRTIED, -nr);
unlocked_inode_to_wb_end(inode, &cookie);
}
@@ -2981,7 +2981,7 @@ bool __folio_end_writeback(struct folio *folio)
lruvec_stat_mod_folio(folio, NR_WRITEBACK, -nr);
zone_stat_mod_folio(folio, NR_ZONE_WRITE_PENDING, -nr);
- node_stat_mod_folio(folio, NR_WRITTEN, nr);
+ node_stat_add_folio(folio, NR_WRITTEN);
return ret;
}
diff --git a/mm/swap_state.c b/mm/swap_state.c
index 1415a5c54a43..d08e923c9979 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -160,7 +160,7 @@ void __swap_cache_add_folio(struct swap_cluster_info *ci,
folio_set_swapcache(folio);
folio->swap = entry;
- node_stat_mod_folio(folio, NR_FILE_PAGES, nr_pages);
+ node_stat_add_folio(folio, NR_FILE_PAGES);
lruvec_stat_mod_folio(folio, NR_SWAPCACHE, nr_pages);
}
@@ -265,7 +265,7 @@ void __swap_cache_del_folio(struct swap_cluster_info *ci, struct folio *folio,
folio->swap.val = 0;
folio_clear_swapcache(folio);
- node_stat_mod_folio(folio, NR_FILE_PAGES, -nr_pages);
+ node_stat_sub_folio(folio, NR_FILE_PAGES);
lruvec_stat_mod_folio(folio, NR_SWAPCACHE, -nr_pages);
if (!folio_swapped) {
--
2.43.0
next prev parent reply other threads:[~2026-04-14 9:16 UTC|newest]
Thread overview: 8+ messages / expand[flat|nested] mbox.gz Atom feed top
2026-04-14 9:15 [PATCH 0/4] mm/vmstat: simplify folio stat APIs Ye Liu
2026-04-14 9:15 ` Ye Liu [this message]
2026-04-14 17:52 ` [PATCH 1/4] mm/vmstat: use node_stat_add_folio/sub_folio for folio_nr_pages operations David Hildenbrand (Arm)
2026-04-14 9:15 ` [PATCH 2/4] mm/vmstat: use zone_stat_add_folio/sub_folio " Ye Liu
2026-04-14 9:15 ` [PATCH 3/4] mm/vmstat: remove unused __node_stat_* wrappers Ye Liu
2026-04-14 14:59 ` Joshua Hahn
2026-04-14 9:15 ` [PATCH 4/4] mm/vmstat: remove unused __zone_stat_* wrappers Ye Liu
2026-04-14 13:18 ` [PATCH 0/4] mm/vmstat: simplify folio stat APIs Matthew Wilcox
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20260414091527.2970844-2-ye.liu@linux.dev \
--to=ye.liu@linux.dev \
--cc=Liam.Howlett@oracle.com \
--cc=akpm@linux-foundation.org \
--cc=anna@kernel.org \
--cc=apopple@nvidia.com \
--cc=baohua@kernel.org \
--cc=baolin.wang@linux.alibaba.com \
--cc=bhe@redhat.com \
--cc=byungchul@sk.com \
--cc=chrisl@kernel.org \
--cc=david@kernel.org \
--cc=dev.jain@arm.com \
--cc=gourry@gourry.net \
--cc=hannes@cmpxchg.org \
--cc=jackmanb@google.com \
--cc=jgg@ziepe.ca \
--cc=jhubbard@nvidia.com \
--cc=joshua.hahnjy@gmail.com \
--cc=kasong@tencent.com \
--cc=lance.yang@linux.dev \
--cc=linux-fsdevel@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=linux-nfs@vger.kernel.org \
--cc=liuye@kylinos.cn \
--cc=ljs@kernel.org \
--cc=matthew.brost@intel.com \
--cc=mhocko@suse.com \
--cc=npache@redhat.com \
--cc=nphamcs@gmail.com \
--cc=peterx@redhat.com \
--cc=rakie.kim@sk.com \
--cc=ryan.roberts@arm.com \
--cc=shikemeng@huaweicloud.com \
--cc=surenb@google.com \
--cc=trondmy@kernel.org \
--cc=vbabka@kernel.org \
--cc=willy@infradead.org \
--cc=ying.huang@linux.alibaba.com \
--cc=youngjun.park@lge.com \
--cc=ziy@nvidia.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox