From: Muchun Song <songmuchun@bytedance.com>
To: Andrew Morton <akpm@linux-foundation.org>,
David Hildenbrand <david@kernel.org>,
Muchun Song <muchun.song@linux.dev>,
Oscar Salvador <osalvador@suse.de>,
Michael Ellerman <mpe@ellerman.id.au>,
Madhavan Srinivasan <maddy@linux.ibm.com>
Cc: Lorenzo Stoakes <ljs@kernel.org>,
"Liam R . Howlett" <Liam.Howlett@oracle.com>,
Vlastimil Babka <vbabka@kernel.org>,
Mike Rapoport <rppt@kernel.org>,
Suren Baghdasaryan <surenb@google.com>,
Michal Hocko <mhocko@suse.com>,
Nicholas Piggin <npiggin@gmail.com>,
Christophe Leroy <chleroy@kernel.org>,
aneesh.kumar@linux.ibm.com, joao.m.martins@oracle.com,
linux-mm@kvack.org, linuxppc-dev@lists.ozlabs.org,
linux-kernel@vger.kernel.org,
Muchun Song <songmuchun@bytedance.com>
Subject: [PATCH 17/49] mm: remove sparse_vmemmap_init_nid_late()
Date: Sun, 5 Apr 2026 20:52:08 +0800 [thread overview]
Message-ID: <20260405125240.2558577-18-songmuchun@bytedance.com> (raw)
In-Reply-To: <20260405125240.2558577-1-songmuchun@bytedance.com>
After deferring hugetlb bootmem allocation until after free_area_init()
and checking cross-zone pages during allocation, the hugetlb_vmemmap_init_late()
function is no longer needed:
1. hugetlb_bootmem_alloc() is now called after free_area_init(), so zone
information is available during bootmem huge page allocation.
2. During alloc_bootmem(), cross-zone pages are identified and marked with
HUGE_BOOTMEM_ZONES_VALID flag.
3. After allocation, hugetlb_free_cross_zone_pages() frees those pages that
intersect multiple zones.
Since cross-zone pages are already handled in the allocation path, the late-stage
validation in hugetlb_vmemmap_init_late() is redundant and can be removed.
Also, the sparse_vmemmap_init_nid_late() function is now empty and unused.
Remove it to clean up the code.
Signed-off-by: Muchun Song <songmuchun@bytedance.com>
---
include/linux/hugetlb.h | 2 --
include/linux/mmzone.h | 7 -----
mm/hugetlb.c | 70 -----------------------------------------
mm/hugetlb_vmemmap.c | 58 ----------------------------------
mm/hugetlb_vmemmap.h | 5 ---
mm/sparse-vmemmap.c | 11 -------
mm/sparse.c | 2 --
7 files changed, 155 deletions(-)
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 9c098a02a09e..23d95ed6121f 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -699,8 +699,6 @@ struct huge_bootmem_page {
#define HUGE_BOOTMEM_ZONES_VALID 0x0002
#define HUGE_BOOTMEM_CMA 0x0004
-bool hugetlb_bootmem_page_zones_valid(int nid, struct huge_bootmem_page *m);
-
int isolate_or_dissolve_huge_folio(struct folio *folio, struct list_head *list);
int replace_free_hugepage_folios(unsigned long start_pfn, unsigned long end_pfn);
void wait_for_freed_hugetlb_folios(void);
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index a071f1a0e242..8ee9dc60120a 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -2153,8 +2153,6 @@ static inline int preinited_vmemmap_section(const struct mem_section *section)
}
void sparse_vmemmap_init_nid_early(int nid);
-void sparse_vmemmap_init_nid_late(int nid);
-
#else
static inline int preinited_vmemmap_section(const struct mem_section *section)
{
@@ -2163,10 +2161,6 @@ static inline int preinited_vmemmap_section(const struct mem_section *section)
static inline void sparse_vmemmap_init_nid_early(int nid)
{
}
-
-static inline void sparse_vmemmap_init_nid_late(int nid)
-{
-}
#endif
static inline int online_section_nr(unsigned long nr)
@@ -2371,7 +2365,6 @@ static inline unsigned long next_present_section_nr(unsigned long section_nr)
#else
#define sparse_vmemmap_init_nid_early(_nid) do {} while (0)
-#define sparse_vmemmap_init_nid_late(_nid) do {} while (0)
#define pfn_in_present_section pfn_valid
#endif /* CONFIG_SPARSEMEM */
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 238495fd04e4..a00c9f3672b7 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -58,7 +58,6 @@ struct hstate hstates[HUGE_MAX_HSTATE];
__initdata nodemask_t hugetlb_bootmem_nodes;
__initdata struct list_head huge_boot_pages[MAX_NUMNODES];
-static unsigned long hstate_boot_nrinvalid[HUGE_MAX_HSTATE] __initdata;
/*
* Due to ordering constraints across the init code for various
@@ -3254,57 +3253,6 @@ static void __init prep_and_add_bootmem_folios(struct hstate *h,
}
}
-bool __init hugetlb_bootmem_page_zones_valid(int nid,
- struct huge_bootmem_page *m)
-{
- unsigned long start_pfn;
- bool valid;
-
- if (m->flags & HUGE_BOOTMEM_ZONES_VALID) {
- /*
- * Already validated, skip check.
- */
- return true;
- }
-
- if (hugetlb_bootmem_page_earlycma(m)) {
- valid = cma_validate_zones(m->cma);
- goto out;
- }
-
- start_pfn = virt_to_phys(m) >> PAGE_SHIFT;
-
- valid = !pfn_range_intersects_zones(nid, start_pfn,
- pages_per_huge_page(m->hstate));
-out:
- if (!valid)
- hstate_boot_nrinvalid[hstate_index(m->hstate)]++;
-
- return valid;
-}
-
-/*
- * Free a bootmem page that was found to be invalid (intersecting with
- * multiple zones).
- *
- * Since it intersects with multiple zones, we can't just do a free
- * operation on all pages at once, but instead have to walk all
- * pages, freeing them one by one.
- */
-static void __init hugetlb_bootmem_free_invalid_page(int nid, struct page *page,
- struct hstate *h)
-{
- unsigned long npages = pages_per_huge_page(h);
- unsigned long pfn;
-
- while (npages--) {
- pfn = page_to_pfn(page);
- __init_page_from_nid(pfn, nid);
- free_reserved_page(page);
- page++;
- }
-}
-
/*
* Put bootmem huge pages into the standard lists after mem_map is up.
* Note: This only applies to gigantic (order > MAX_PAGE_ORDER) pages.
@@ -3320,17 +3268,6 @@ static void __init gather_bootmem_prealloc_node(unsigned long nid)
struct folio *folio = (void *)page;
h = m->hstate;
- if (!hugetlb_bootmem_page_zones_valid(nid, m)) {
- /*
- * Can't use this page. Initialize the
- * page structures if that hasn't already
- * been done, and give them to the page
- * allocator.
- */
- hugetlb_bootmem_free_invalid_page(nid, page, h);
- continue;
- }
-
/*
* It is possible to have multiple huge page sizes (hstates)
* in this list. If so, process each size separately.
@@ -3700,20 +3637,13 @@ static void __init hugetlb_init_hstates(void)
static void __init report_hugepages(void)
{
struct hstate *h;
- unsigned long nrinvalid;
for_each_hstate(h) {
char buf[32];
- nrinvalid = hstate_boot_nrinvalid[hstate_index(h)];
- h->max_huge_pages -= nrinvalid;
-
string_get_size(huge_page_size(h), 1, STRING_UNITS_2, buf, 32);
pr_info("HugeTLB: registered %s page size, pre-allocated %ld pages\n",
buf, h->nr_huge_pages);
- if (nrinvalid)
- pr_info("HugeTLB: %s page size: %lu invalid page%s discarded\n",
- buf, nrinvalid, str_plural(nrinvalid));
pr_info("HugeTLB: %d KiB vmemmap can be freed for a %s page\n",
hugetlb_vmemmap_optimizable_size(h) / SZ_1K, buf);
}
diff --git a/mm/hugetlb_vmemmap.c b/mm/hugetlb_vmemmap.c
index e25c70453928..535f0369a496 100644
--- a/mm/hugetlb_vmemmap.c
+++ b/mm/hugetlb_vmemmap.c
@@ -807,64 +807,6 @@ void __init hugetlb_vmemmap_init_early(int nid)
m->flags |= HUGE_BOOTMEM_HVO;
}
}
-
-void __init hugetlb_vmemmap_init_late(int nid)
-{
- struct huge_bootmem_page *m, *tm;
- unsigned long phys, nr_pages, start, end;
- unsigned long pfn, nr_mmap;
- struct zone *zone = NULL;
- struct hstate *h;
- void *map;
-
- if (!READ_ONCE(vmemmap_optimize_enabled))
- return;
-
- list_for_each_entry_safe(m, tm, &huge_boot_pages[nid], list) {
- if (!(m->flags & HUGE_BOOTMEM_HVO))
- continue;
-
- phys = virt_to_phys(m);
- h = m->hstate;
- pfn = PHYS_PFN(phys);
- nr_pages = pages_per_huge_page(h);
- map = pfn_to_page(pfn);
- start = (unsigned long)map;
- end = start + nr_pages * sizeof(struct page);
-
- if (!hugetlb_bootmem_page_zones_valid(nid, m)) {
- /*
- * Oops, the hugetlb page spans multiple zones.
- * Remove it from the list, and populate it normally.
- */
- list_del(&m->list);
-
- vmemmap_populate(start, end, nid, NULL, NULL);
- nr_mmap = end - start;
- memmap_boot_pages_add(DIV_ROUND_UP(nr_mmap, PAGE_SIZE));
-
- memblock_phys_free(phys, huge_page_size(h));
- continue;
- }
-
- if (!zone || !zone_spans_pfn(zone, pfn))
- zone = pfn_to_zone(nid, pfn);
- if (WARN_ON_ONCE(!zone))
- continue;
-
- if (vmemmap_populate_hvo(start, end, huge_page_order(h), zone,
- HUGETLB_VMEMMAP_RESERVE_SIZE) < 0) {
- /* Fallback if HVO population fails */
- vmemmap_populate(start, end, nid, NULL, NULL);
- nr_mmap = end - start;
- } else {
- m->flags |= HUGE_BOOTMEM_ZONES_VALID;
- nr_mmap = HUGETLB_VMEMMAP_RESERVE_SIZE;
- }
-
- memmap_boot_pages_add(DIV_ROUND_UP(nr_mmap, PAGE_SIZE));
- }
-}
#endif
static const struct ctl_table hugetlb_vmemmap_sysctls[] = {
diff --git a/mm/hugetlb_vmemmap.h b/mm/hugetlb_vmemmap.h
index 18b490825215..7ac49c52457d 100644
--- a/mm/hugetlb_vmemmap.h
+++ b/mm/hugetlb_vmemmap.h
@@ -29,7 +29,6 @@ void hugetlb_vmemmap_optimize_folios(struct hstate *h, struct list_head *folio_l
void hugetlb_vmemmap_optimize_bootmem_folios(struct hstate *h, struct list_head *folio_list);
#ifdef CONFIG_SPARSEMEM_VMEMMAP_PREINIT
void hugetlb_vmemmap_init_early(int nid);
-void hugetlb_vmemmap_init_late(int nid);
#endif
@@ -81,10 +80,6 @@ static inline void hugetlb_vmemmap_init_early(int nid)
{
}
-static inline void hugetlb_vmemmap_init_late(int nid)
-{
-}
-
static inline unsigned int hugetlb_vmemmap_optimizable_size(const struct hstate *h)
{
return 0;
diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c
index b7201c235419..26cb55c12a83 100644
--- a/mm/sparse-vmemmap.c
+++ b/mm/sparse-vmemmap.c
@@ -581,17 +581,6 @@ void __init sparse_vmemmap_init_nid_early(int nid)
{
hugetlb_vmemmap_init_early(nid);
}
-
-/*
- * This is called just before the initialization of page structures
- * through memmap_init. Zones are now initialized, so any work that
- * needs to be done that needs zone information can be done from
- * here.
- */
-void __init sparse_vmemmap_init_nid_late(int nid)
-{
- hugetlb_vmemmap_init_late(nid);
-}
#endif
static void subsection_mask_set(unsigned long *map, unsigned long pfn,
diff --git a/mm/sparse.c b/mm/sparse.c
index d940b973df66..5fe0a7e66775 100644
--- a/mm/sparse.c
+++ b/mm/sparse.c
@@ -383,8 +383,6 @@ static void __init sparse_init_nid(int nid, unsigned long pnum_begin,
}
sparse_usage_fini();
sparse_buffer_fini();
-
- sparse_vmemmap_init_nid_late(nid);
}
/*
--
2.20.1
next prev parent reply other threads:[~2026-04-05 12:55 UTC|newest]
Thread overview: 53+ messages / expand[flat|nested] mbox.gz Atom feed top
2026-04-05 12:51 [PATCH 00/49] mm: Generalize vmemmap optimization for DAX and HugeTLB Muchun Song
2026-04-05 12:51 ` [PATCH 01/49] mm/sparse: fix vmemmap accounting imbalance on memory hotplug error Muchun Song
2026-04-05 12:51 ` [PATCH 02/49] mm/sparse: add a @pgmap argument to memory deactivation paths Muchun Song
2026-04-05 12:51 ` [PATCH 03/49] mm/sparse: fix vmemmap page accounting for HVOed DAX Muchun Song
2026-04-05 12:51 ` [PATCH 04/49] mm/sparse: add a @pgmap parameter to arch vmemmap_populate() Muchun Song
2026-04-05 12:51 ` [PATCH 05/49] mm/sparse: fix missing architecture-specific page table sync for HVO DAX Muchun Song
2026-04-05 12:51 ` [PATCH 06/49] mm/mm_init: fix uninitialized pageblock migratetype for ZONE_DEVICE compound pages Muchun Song
2026-04-05 12:51 ` [PATCH 07/49] mm/mm_init: use pageblock_migratetype_init_range() in deferred_free_pages() Muchun Song
2026-04-05 12:51 ` [PATCH 08/49] mm: Convert vmemmap_p?d_populate() to static functions Muchun Song
2026-04-05 12:52 ` [PATCH 09/49] mm: panic on memory allocation failure in sparse_init_nid() Muchun Song
2026-04-05 12:52 ` [PATCH 10/49] mm: move subsection_map_init() into sparse_init() Muchun Song
2026-04-05 12:52 ` [PATCH 11/49] mm: defer sparse_init() until after zone initialization Muchun Song
2026-04-05 12:52 ` [PATCH 12/49] mm: make set_pageblock_order() static Muchun Song
2026-04-05 12:52 ` [PATCH 13/49] mm: integrate sparse_vmemmap_init_nid_late() into sparse_init_nid() Muchun Song
2026-04-05 12:52 ` [PATCH 14/49] mm/cma: validate hugetlb CMA range by zone at reserve time Muchun Song
2026-04-05 12:52 ` [PATCH 15/49] mm/hugetlb: free cross-zone bootmem gigantic pages after allocation Muchun Song
2026-04-05 12:52 ` [PATCH 16/49] mm/hugetlb: initialize vmemmap optimization in early stage Muchun Song
2026-04-05 12:52 ` Muchun Song [this message]
2026-04-05 12:52 ` [PATCH 18/49] mm/mm_init: make __init_page_from_nid() static Muchun Song
2026-04-05 12:52 ` [PATCH 19/49] mm/sparse-vmemmap: remove the VMEMMAP_POPULATE_PAGEREF flag Muchun Song
2026-04-05 12:52 ` [PATCH 20/49] mm: rename vmemmap optimization macros to generic names Muchun Song
2026-04-05 12:52 ` [PATCH 21/49] mm/sparse: drop power-of-2 size requirement for struct mem_section Muchun Song
2026-04-05 12:52 ` [PATCH 22/49] mm/sparse: introduce compound page order to mem_section Muchun Song
2026-04-05 12:52 ` [PATCH 23/49] mm/mm_init: skip initializing shared tail pages for compound pages Muchun Song
2026-04-05 12:52 ` [PATCH 24/49] mm/sparse-vmemmap: initialize shared tail vmemmap page upon allocation Muchun Song
2026-04-05 12:52 ` [PATCH 25/49] mm/sparse-vmemmap: support vmemmap-optimizable compound page population Muchun Song
2026-04-05 12:52 ` [PATCH 26/49] mm/hugetlb: use generic vmemmap optimization macros Muchun Song
2026-04-05 12:52 ` [PATCH 27/49] mm: call memblocks_present() before HugeTLB initialization Muchun Song
2026-04-05 12:52 ` [PATCH 28/49] mm/hugetlb: switch HugeTLB to use generic vmemmap optimization Muchun Song
2026-04-05 12:52 ` [PATCH 29/49] mm: extract pfn_to_zone() helper Muchun Song
2026-04-05 12:52 ` [PATCH 30/49] mm/sparse-vmemmap: remove unused SPARSEMEM_VMEMMAP_PREINIT feature Muchun Song
2026-04-05 12:52 ` [PATCH 31/49] mm/hugetlb: remove HUGE_BOOTMEM_HVO flag and simplify pre-HVO logic Muchun Song
2026-04-05 12:52 ` [PATCH 32/49] mm/sparse-vmemmap: consolidate shared tail page allocation Muchun Song
2026-04-05 12:52 ` [PATCH 33/49] mm: introduce CONFIG_SPARSEMEM_VMEMMAP_OPTIMIZATION Muchun Song
2026-04-05 12:52 ` [PATCH 34/49] mm/sparse-vmemmap: switch DAX to use generic vmemmap optimization Muchun Song
2026-04-05 12:52 ` [PATCH 35/49] mm/sparse-vmemmap: introduce section zone to struct mem_section Muchun Song
2026-04-05 12:52 ` [PATCH 36/49] powerpc/mm: use generic vmemmap_shared_tail_page() in compound vmemmap Muchun Song
2026-04-05 12:52 ` [PATCH 37/49] mm/sparse-vmemmap: unify DAX and HugeTLB vmemmap optimization Muchun Song
2026-04-05 12:52 ` [PATCH 38/49] mm/sparse-vmemmap: remap the shared tail pages as read-only Muchun Song
2026-04-05 12:52 ` [PATCH 39/49] mm/sparse-vmemmap: remove unused ptpfn argument Muchun Song
2026-04-05 12:52 ` [PATCH 40/49] mm/hugetlb_vmemmap: remove vmemmap_wrprotect_hvo() and related code Muchun Song
2026-04-05 12:52 ` [PATCH 41/49] mm/sparse: simplify section_vmemmap_pages() Muchun Song
2026-04-05 12:52 ` [PATCH 42/49] mm/sparse-vmemmap: introduce section_vmemmap_page_structs() Muchun Song
2026-04-05 12:52 ` [PATCH 43/49] powerpc/mm: rely on generic vmemmap_can_optimize() to simplify code Muchun Song
2026-04-05 12:52 ` [PATCH 44/49] mm/sparse-vmemmap: drop ARCH_WANT_OPTIMIZE_DAX_VMEMMAP and simplify checks Muchun Song
2026-04-05 12:52 ` [PATCH 45/49] mm/sparse-vmemmap: drop @pgmap parameter from vmemmap populate APIs Muchun Song
2026-04-05 12:52 ` [PATCH 46/49] mm/sparse: replace pgmap with order and zone in sparse_add_section() Muchun Song
2026-04-05 12:52 ` [PATCH 47/49] mm: redefine HVO as Hugepage Vmemmap Optimization Muchun Song
2026-04-05 12:52 ` [PATCH 48/49] Documentation/mm: restructure vmemmap_dedup.rst to reflect generalized HVO Muchun Song
2026-04-05 12:52 ` [PATCH 49/49] mm: consolidate struct page power-of-2 size checks for HVO Muchun Song
2026-04-05 13:34 ` [PATCH 00/49] mm: Generalize vmemmap optimization for DAX and HugeTLB Mike Rapoport
2026-04-06 19:59 ` David Hildenbrand (arm)
2026-04-08 15:29 ` Frank van der Linden
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20260405125240.2558577-18-songmuchun@bytedance.com \
--to=songmuchun@bytedance.com \
--cc=Liam.Howlett@oracle.com \
--cc=akpm@linux-foundation.org \
--cc=aneesh.kumar@linux.ibm.com \
--cc=chleroy@kernel.org \
--cc=david@kernel.org \
--cc=joao.m.martins@oracle.com \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=linuxppc-dev@lists.ozlabs.org \
--cc=ljs@kernel.org \
--cc=maddy@linux.ibm.com \
--cc=mhocko@suse.com \
--cc=mpe@ellerman.id.au \
--cc=muchun.song@linux.dev \
--cc=npiggin@gmail.com \
--cc=osalvador@suse.de \
--cc=rppt@kernel.org \
--cc=surenb@google.com \
--cc=vbabka@kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox