From: Muchun Song <songmuchun@bytedance.com>
To: Andrew Morton <akpm@linux-foundation.org>,
David Hildenbrand <david@kernel.org>,
Muchun Song <muchun.song@linux.dev>,
Oscar Salvador <osalvador@suse.de>,
Michael Ellerman <mpe@ellerman.id.au>,
Madhavan Srinivasan <maddy@linux.ibm.com>
Cc: Lorenzo Stoakes <ljs@kernel.org>,
"Liam R . Howlett" <Liam.Howlett@oracle.com>,
Vlastimil Babka <vbabka@kernel.org>,
Mike Rapoport <rppt@kernel.org>,
Suren Baghdasaryan <surenb@google.com>,
Michal Hocko <mhocko@suse.com>,
Nicholas Piggin <npiggin@gmail.com>,
Christophe Leroy <chleroy@kernel.org>,
aneesh.kumar@linux.ibm.com, joao.m.martins@oracle.com,
linux-mm@kvack.org, linuxppc-dev@lists.ozlabs.org,
linux-kernel@vger.kernel.org,
Muchun Song <songmuchun@bytedance.com>
Subject: [PATCH v2 5/6] mm/sparse-vmemmap: Fix missing architecture-specific page table sync
Date: Wed, 15 Apr 2026 19:14:11 +0800 [thread overview]
Message-ID: <20260415111412.1003526-6-songmuchun@bytedance.com> (raw)
In-Reply-To: <20260415111412.1003526-1-songmuchun@bytedance.com>
On x86-64, vmemmap_populate() normally calls sync_global_pgds() to
keep the page tables in sync. However, when vmemmap optimization for
compound devmaps is enabled, vmemmap_populate_compound_pages() is called
directly from __populate_section_memmap(), bypassing the architecture-
specific vmemmap_populate() entirely. This skips the sync on x86-64
and can later trigger vmemmap-access faults.
Fix this by moving the vmemmap_can_optimize() dispatch from
__populate_section_memmap() into the generic helpers --
vmemmap_populate_basepages() and vmemmap_populate_hugepages(). This way,
the architecture vmemmap_populate() is always invoked first, ensuring
any arch-specific post-population steps (e.g. sync_global_pgds()) are
executed before returning.
Architectures that override vmemmap_populate() (e.g. powerpc) handle
the optimization dispatch in their own implementation instead.
Fixes: 4917f55b4ef9 ("mm/sparse-vmemmap: improve memory savings for compound devmaps")
Signed-off-by: Muchun Song <songmuchun@bytedance.com>
---
arch/powerpc/include/asm/book3s/64/radix.h | 6 ------
arch/powerpc/mm/book3s64/radix_pgtable.c | 16 ++++++++++-----
mm/sparse-vmemmap.c | 24 +++++++++++-----------
3 files changed, 23 insertions(+), 23 deletions(-)
diff --git a/arch/powerpc/include/asm/book3s/64/radix.h b/arch/powerpc/include/asm/book3s/64/radix.h
index bde07c6f900f..2600defa2dc2 100644
--- a/arch/powerpc/include/asm/book3s/64/radix.h
+++ b/arch/powerpc/include/asm/book3s/64/radix.h
@@ -357,11 +357,5 @@ int radix__remove_section_mapping(unsigned long start, unsigned long end);
#define vmemmap_can_optimize vmemmap_can_optimize
bool vmemmap_can_optimize(struct vmem_altmap *altmap, struct dev_pagemap *pgmap);
#endif
-
-#define vmemmap_populate_compound_pages vmemmap_populate_compound_pages
-int __meminit vmemmap_populate_compound_pages(unsigned long start_pfn,
- unsigned long start,
- unsigned long end, int node,
- struct dev_pagemap *pgmap);
#endif /* __ASSEMBLER__ */
#endif
diff --git a/arch/powerpc/mm/book3s64/radix_pgtable.c b/arch/powerpc/mm/book3s64/radix_pgtable.c
index 568500343e5f..21fece355fbb 100644
--- a/arch/powerpc/mm/book3s64/radix_pgtable.c
+++ b/arch/powerpc/mm/book3s64/radix_pgtable.c
@@ -1109,7 +1109,10 @@ static inline pte_t *vmemmap_pte_alloc(pmd_t *pmdp, int node,
return pte_offset_kernel(pmdp, address);
}
-
+static int __meminit vmemmap_populate_compound_pages(unsigned long start_pfn,
+ unsigned long start,
+ unsigned long end, int node,
+ struct dev_pagemap *pgmap);
int __meminit radix__vmemmap_populate(unsigned long start, unsigned long end, int node,
struct vmem_altmap *altmap, struct dev_pagemap *pgmap)
@@ -1122,6 +1125,9 @@ int __meminit radix__vmemmap_populate(unsigned long start, unsigned long end, in
pmd_t *pmd;
pte_t *pte;
+ if (vmemmap_can_optimize(altmap, pgmap))
+ return vmemmap_populate_compound_pages(page_to_pfn((struct page *)start),
+ start, end, node, pgmap);
/*
* If altmap is present, Make sure we align the start vmemmap addr
* to PAGE_SIZE so that we calculate the correct start_pfn in
@@ -1303,10 +1309,10 @@ static pte_t * __meminit vmemmap_compound_tail_page(unsigned long addr,
return pte;
}
-int __meminit vmemmap_populate_compound_pages(unsigned long start_pfn,
- unsigned long start,
- unsigned long end, int node,
- struct dev_pagemap *pgmap)
+static int __meminit vmemmap_populate_compound_pages(unsigned long start_pfn,
+ unsigned long start,
+ unsigned long end, int node,
+ struct dev_pagemap *pgmap)
{
/*
* we want to map things as base page size mapping so that
diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c
index f5245647afee..7f684ed3479e 100644
--- a/mm/sparse-vmemmap.c
+++ b/mm/sparse-vmemmap.c
@@ -296,10 +296,16 @@ static int __meminit vmemmap_populate_range(unsigned long start,
return 0;
}
+static int __meminit vmemmap_populate_compound_pages(unsigned long start,
+ unsigned long end, int node,
+ struct dev_pagemap *pgmap);
+
int __meminit vmemmap_populate_basepages(unsigned long start, unsigned long end,
int node, struct vmem_altmap *altmap,
struct dev_pagemap *pgmap)
{
+ if (vmemmap_can_optimize(altmap, pgmap))
+ return vmemmap_populate_compound_pages(start, end, node, pgmap);
return vmemmap_populate_range(start, end, node, altmap, -1, 0);
}
@@ -411,6 +417,9 @@ int __meminit vmemmap_populate_hugepages(unsigned long start, unsigned long end,
pud_t *pud;
pmd_t *pmd;
+ if (vmemmap_can_optimize(altmap, pgmap))
+ return vmemmap_populate_compound_pages(start, end, node, pgmap);
+
for (addr = start; addr < end; addr = next) {
next = pmd_addr_end(addr, end);
@@ -453,7 +462,6 @@ int __meminit vmemmap_populate_hugepages(unsigned long start, unsigned long end,
return 0;
}
-#ifndef vmemmap_populate_compound_pages
/*
* For compound pages bigger than section size (e.g. x86 1G compound
* pages with 2M subsection size) fill the rest of sections as tail
@@ -491,14 +499,14 @@ static pte_t * __meminit compound_section_tail_page(unsigned long addr)
return pte;
}
-static int __meminit vmemmap_populate_compound_pages(unsigned long start_pfn,
- unsigned long start,
+static int __meminit vmemmap_populate_compound_pages(unsigned long start,
unsigned long end, int node,
struct dev_pagemap *pgmap)
{
unsigned long size, addr;
pte_t *pte;
int rc;
+ unsigned long start_pfn = page_to_pfn((struct page *)start);
if (reuse_compound_section(start_pfn, pgmap)) {
pte = compound_section_tail_page(start);
@@ -544,26 +552,18 @@ static int __meminit vmemmap_populate_compound_pages(unsigned long start_pfn,
return 0;
}
-#endif
-
struct page * __meminit __populate_section_memmap(unsigned long pfn,
unsigned long nr_pages, int nid, struct vmem_altmap *altmap,
struct dev_pagemap *pgmap)
{
unsigned long start = (unsigned long) pfn_to_page(pfn);
unsigned long end = start + nr_pages * sizeof(struct page);
- int r;
if (WARN_ON_ONCE(!IS_ALIGNED(pfn, PAGES_PER_SUBSECTION) ||
!IS_ALIGNED(nr_pages, PAGES_PER_SUBSECTION)))
return NULL;
- if (vmemmap_can_optimize(altmap, pgmap))
- r = vmemmap_populate_compound_pages(pfn, start, end, nid, pgmap);
- else
- r = vmemmap_populate(start, end, nid, altmap, pgmap);
-
- if (r < 0)
+ if (vmemmap_populate(start, end, nid, altmap, pgmap))
return NULL;
return pfn_to_page(pfn);
--
2.20.1
next prev parent reply other threads:[~2026-04-15 11:14 UTC|newest]
Thread overview: 13+ messages / expand[flat|nested] mbox.gz Atom feed top
2026-04-15 11:14 [PATCH v2 0/6] mm: Fix vmemmap optimization accounting and initialization Muchun Song
2026-04-15 11:14 ` [PATCH v2 1/6] mm/sparse-vmemmap: Fix vmemmap accounting underflow Muchun Song
2026-04-15 11:26 ` Muchun Song
2026-04-15 15:53 ` Mike Rapoport
2026-04-15 11:14 ` [PATCH v2 2/6] mm/sparse-vmemmap: Pass @pgmap argument to memory deactivation paths Muchun Song
2026-04-15 15:55 ` Mike Rapoport
2026-04-15 11:14 ` [PATCH v2 3/6] mm/sparse-vmemmap: Fix DAX vmemmap accounting with optimization Muchun Song
2026-04-15 15:58 ` Mike Rapoport
2026-04-15 11:14 ` [PATCH v2 4/6] mm/sparse-vmemmap: Pass @pgmap argument to arch vmemmap_populate() Muchun Song
2026-04-15 12:13 ` Joao Martins
2026-04-15 12:21 ` Muchun Song
2026-04-15 11:14 ` Muchun Song [this message]
2026-04-15 11:14 ` [PATCH v2 6/6] mm/mm_init: Fix pageblock migratetype for ZONE_DEVICE compound pages Muchun Song
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20260415111412.1003526-6-songmuchun@bytedance.com \
--to=songmuchun@bytedance.com \
--cc=Liam.Howlett@oracle.com \
--cc=akpm@linux-foundation.org \
--cc=aneesh.kumar@linux.ibm.com \
--cc=chleroy@kernel.org \
--cc=david@kernel.org \
--cc=joao.m.martins@oracle.com \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=linuxppc-dev@lists.ozlabs.org \
--cc=ljs@kernel.org \
--cc=maddy@linux.ibm.com \
--cc=mhocko@suse.com \
--cc=mpe@ellerman.id.au \
--cc=muchun.song@linux.dev \
--cc=npiggin@gmail.com \
--cc=osalvador@suse.de \
--cc=rppt@kernel.org \
--cc=surenb@google.com \
--cc=vbabka@kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox