From: Kiryl Shutsemau <kas@kernel.org>
To: Andrew Morton <akpm@linux-foundation.org>,
Muchun Song <muchun.song@linux.dev>,
David Hildenbrand <david@redhat.com>,
Matthew Wilcox <willy@infradead.org>,
Usama Arif <usamaarif642@gmail.com>,
Frank van der Linden <fvdl@google.com>
Cc: Oscar Salvador <osalvador@suse.de>,
Mike Rapoport <rppt@kernel.org>, Vlastimil Babka <vbabka@suse.cz>,
Lorenzo Stoakes <lorenzo.stoakes@oracle.com>,
Zi Yan <ziy@nvidia.com>, Baoquan He <bhe@redhat.com>,
Michal Hocko <mhocko@suse.com>,
Johannes Weiner <hannes@cmpxchg.org>,
Jonathan Corbet <corbet@lwn.net>,
Huacai Chen <chenhuacai@kernel.org>,
WANG Xuerui <kernel@xen0n.name>,
Palmer Dabbelt <palmer@dabbelt.com>,
Paul Walmsley <paul.walmsley@sifive.com>,
Albert Ou <aou@eecs.berkeley.edu>,
Alexandre Ghiti <alex@ghiti.fr>,
kernel-team@meta.com, linux-mm@kvack.org,
linux-kernel@vger.kernel.org, linux-doc@vger.kernel.org,
loongarch@lists.linux.dev, linux-riscv@lists.infradead.org,
Kiryl Shutsemau <kas@kernel.org>
Subject: [PATCHv5 12/17] mm: Drop fake head checks
Date: Wed, 28 Jan 2026 13:54:53 +0000 [thread overview]
Message-ID: <20260128135500.22121-13-kas@kernel.org> (raw)
In-Reply-To: <20260128135500.22121-1-kas@kernel.org>
With fake head pages eliminated in the previous commit, remove the
supporting infrastructure:
- page_fixed_fake_head(): no longer needed to detect fake heads;
- page_is_fake_head(): no longer needed;
- page_count_writable(): no longer needed for RCU protection;
- RCU read_lock in page_ref_add_unless(): no longer needed;
This substantially simplifies compound_head() and page_ref_add_unless(),
removing both branches and RCU overhead from these hot paths.
Signed-off-by: Kiryl Shutsemau <kas@kernel.org>
Reviewed-by: Muchun Song <muchun.song@linux.dev>
---
include/linux/page-flags.h | 93 ++------------------------------------
include/linux/page_ref.h | 8 +---
2 files changed, 4 insertions(+), 97 deletions(-)
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index 8f2c7fbc739b..5a8f6fab2255 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -221,102 +221,15 @@ static __always_inline bool compound_info_has_mask(void)
return is_power_of_2(sizeof(struct page));
}
-#ifdef CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP
DECLARE_STATIC_KEY_FALSE(hugetlb_optimize_vmemmap_key);
-/*
- * Return the real head page struct iff the @page is a fake head page, otherwise
- * return the @page itself. See Documentation/mm/vmemmap_dedup.rst.
- */
-static __always_inline const struct page *page_fixed_fake_head(const struct page *page)
-{
- if (!static_branch_unlikely(&hugetlb_optimize_vmemmap_key))
- return page;
-
- /* Fake heads only exists if compound_info_has_mask() is true */
- if (!compound_info_has_mask())
- return page;
-
- /*
- * Only addresses aligned with PAGE_SIZE of struct page may be fake head
- * struct page. The alignment check aims to avoid access the fields (
- * e.g. compound_info) of the @page[1]. It can avoid touch a (possibly)
- * cold cacheline in some cases.
- */
- if (IS_ALIGNED((unsigned long)page, PAGE_SIZE) &&
- test_bit(PG_head, &page->flags.f)) {
- /*
- * We can safely access the field of the @page[1] with PG_head
- * because the @page is a compound page composed with at least
- * two contiguous pages.
- */
- unsigned long info = READ_ONCE(page[1].compound_info);
-
- /* See set_compound_head() */
- if (likely(info & 1)) {
- unsigned long p = (unsigned long)page;
-
- return (const struct page *)(p & info);
- }
- }
- return page;
-}
-
-static __always_inline bool page_count_writable(const struct page *page, int u)
-{
- if (!static_branch_unlikely(&hugetlb_optimize_vmemmap_key))
- return true;
-
- /*
- * The refcount check is ordered before the fake-head check to prevent
- * the following race:
- * CPU 1 (HVO) CPU 2 (speculative PFN walker)
- *
- * page_ref_freeze()
- * synchronize_rcu()
- * rcu_read_lock()
- * page_is_fake_head() is false
- * vmemmap_remap_pte()
- * XXX: struct page[] becomes r/o
- *
- * page_ref_unfreeze()
- * page_ref_count() is not zero
- *
- * atomic_add_unless(&page->_refcount)
- * XXX: try to modify r/o struct page[]
- *
- * The refcount check also prevents modification attempts to other (r/o)
- * tail pages that are not fake heads.
- */
- if (atomic_read_acquire(&page->_refcount) == u)
- return false;
-
- return page_fixed_fake_head(page) == page;
-}
-#else
-static inline const struct page *page_fixed_fake_head(const struct page *page)
-{
- return page;
-}
-
-static inline bool page_count_writable(const struct page *page, int u)
-{
- return true;
-}
-#endif
-
-static __always_inline int page_is_fake_head(const struct page *page)
-{
- return page_fixed_fake_head(page) != page;
-}
-
static __always_inline unsigned long _compound_head(const struct page *page)
{
unsigned long info = READ_ONCE(page->compound_info);
/* Bit 0 encodes PageTail() */
if (!(info & 1))
- return (unsigned long)page_fixed_fake_head(page);
+ return (unsigned long)page;
/*
* If compound_info_has_mask() is false, the rest of compound_info is
@@ -397,7 +310,7 @@ static __always_inline void clear_compound_head(struct page *page)
static __always_inline int PageTail(const struct page *page)
{
- return READ_ONCE(page->compound_info) & 1 || page_is_fake_head(page);
+ return READ_ONCE(page->compound_info) & 1;
}
static __always_inline int PageCompound(const struct page *page)
@@ -924,7 +837,7 @@ static __always_inline bool folio_test_head(const struct folio *folio)
static __always_inline int PageHead(const struct page *page)
{
PF_POISONED_CHECK(page);
- return test_bit(PG_head, &page->flags.f) && !page_is_fake_head(page);
+ return test_bit(PG_head, &page->flags.f);
}
__SETPAGEFLAG(Head, head, PF_ANY)
diff --git a/include/linux/page_ref.h b/include/linux/page_ref.h
index 544150d1d5fd..490d0ad6e56d 100644
--- a/include/linux/page_ref.h
+++ b/include/linux/page_ref.h
@@ -230,13 +230,7 @@ static inline int folio_ref_dec_return(struct folio *folio)
static inline bool page_ref_add_unless(struct page *page, int nr, int u)
{
- bool ret = false;
-
- rcu_read_lock();
- /* avoid writing to the vmemmap area being remapped */
- if (page_count_writable(page, u))
- ret = atomic_add_unless(&page->_refcount, nr, u);
- rcu_read_unlock();
+ bool ret = atomic_add_unless(&page->_refcount, nr, u);
if (page_ref_tracepoint_active(page_ref_mod_unless))
__page_ref_mod_unless(page, nr, ret);
--
2.51.2
next prev parent reply other threads:[~2026-01-28 13:56 UTC|newest]
Thread overview: 26+ messages / expand[flat|nested] mbox.gz Atom feed top
2026-01-28 13:54 [PATCHv5 00/17] mm: Eliminate fake head pages from vmemmap optimization Kiryl Shutsemau
2026-01-28 13:54 ` [PATCHv5 01/17] mm: Move MAX_FOLIO_ORDER definition to mmzone.h Kiryl Shutsemau
2026-01-28 13:54 ` [PATCHv5 02/17] mm: Change the interface of prep_compound_tail() Kiryl Shutsemau
2026-01-28 13:54 ` [PATCHv5 03/17] mm: Rename the 'compound_head' field in the 'struct page' to 'compound_info' Kiryl Shutsemau
2026-01-28 13:54 ` [PATCHv5 04/17] mm: Move set/clear_compound_head() next to compound_head() Kiryl Shutsemau
2026-01-28 13:54 ` [PATCHv5 05/17] riscv/mm: Align vmemmap to maximal folio size Kiryl Shutsemau
2026-01-28 13:54 ` [PATCHv5 06/17] LoongArch/mm: " Kiryl Shutsemau
2026-01-28 13:54 ` [PATCHv5 07/17] mm: Rework compound_head() for power-of-2 sizeof(struct page) Kiryl Shutsemau
2026-01-28 13:54 ` [PATCHv5 08/17] mm: Make page_zonenum() use head page Kiryl Shutsemau
2026-01-28 13:54 ` [PATCHv5 09/17] mm/sparse: Check memmap alignment for compound_info_has_mask() Kiryl Shutsemau
2026-01-29 3:00 ` Muchun Song
2026-01-29 3:10 ` Zi Yan
2026-01-29 3:23 ` Muchun Song
2026-01-29 3:29 ` Zi Yan
2026-01-29 7:03 ` Muchun Song
2026-01-29 17:33 ` Zi Yan
2026-01-28 13:54 ` [PATCHv5 10/17] mm/hugetlb: Refactor code around vmemmap_walk Kiryl Shutsemau
2026-01-29 2:51 ` Muchun Song
2026-01-28 13:54 ` [PATCHv5 11/17] mm/hugetlb: Remove fake head pages Kiryl Shutsemau
2026-01-29 6:54 ` Muchun Song
2026-01-28 13:54 ` Kiryl Shutsemau [this message]
2026-01-28 13:54 ` [PATCHv5 13/17] hugetlb: Remove VMEMMAP_SYNCHRONIZE_RCU Kiryl Shutsemau
2026-01-28 13:54 ` [PATCHv5 14/17] mm/hugetlb: Remove hugetlb_optimize_vmemmap_key static key Kiryl Shutsemau
2026-01-28 13:54 ` [PATCHv5 15/17] mm: Remove the branch from compound_head() Kiryl Shutsemau
2026-01-28 13:54 ` [PATCHv5 16/17] hugetlb: Update vmemmap_dedup.rst Kiryl Shutsemau
2026-01-28 13:54 ` [PATCHv5 17/17] mm/slab: Use compound_head() in page_slab() Kiryl Shutsemau
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20260128135500.22121-13-kas@kernel.org \
--to=kas@kernel.org \
--cc=akpm@linux-foundation.org \
--cc=alex@ghiti.fr \
--cc=aou@eecs.berkeley.edu \
--cc=bhe@redhat.com \
--cc=chenhuacai@kernel.org \
--cc=corbet@lwn.net \
--cc=david@redhat.com \
--cc=fvdl@google.com \
--cc=hannes@cmpxchg.org \
--cc=kernel-team@meta.com \
--cc=kernel@xen0n.name \
--cc=linux-doc@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=linux-riscv@lists.infradead.org \
--cc=loongarch@lists.linux.dev \
--cc=lorenzo.stoakes@oracle.com \
--cc=mhocko@suse.com \
--cc=muchun.song@linux.dev \
--cc=osalvador@suse.de \
--cc=palmer@dabbelt.com \
--cc=paul.walmsley@sifive.com \
--cc=rppt@kernel.org \
--cc=usamaarif642@gmail.com \
--cc=vbabka@suse.cz \
--cc=willy@infradead.org \
--cc=ziy@nvidia.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox