linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
From: Muchun Song <songmuchun@bytedance.com>
To: Andrew Morton <akpm@linux-foundation.org>,
	David Hildenbrand <david@kernel.org>,
	Muchun Song <muchun.song@linux.dev>,
	Oscar Salvador <osalvador@suse.de>,
	Michael Ellerman <mpe@ellerman.id.au>,
	Madhavan Srinivasan <maddy@linux.ibm.com>
Cc: Lorenzo Stoakes <ljs@kernel.org>,
	"Liam R . Howlett" <Liam.Howlett@oracle.com>,
	Vlastimil Babka <vbabka@kernel.org>,
	Mike Rapoport <rppt@kernel.org>,
	Suren Baghdasaryan <surenb@google.com>,
	Michal Hocko <mhocko@suse.com>,
	Nicholas Piggin <npiggin@gmail.com>,
	Christophe Leroy <chleroy@kernel.org>,
	aneesh.kumar@linux.ibm.com, joao.m.martins@oracle.com,
	linux-mm@kvack.org, linuxppc-dev@lists.ozlabs.org,
	linux-kernel@vger.kernel.org,
	Muchun Song <songmuchun@bytedance.com>
Subject: [PATCH 49/49] mm: consolidate struct page power-of-2 size checks for HVO
Date: Sun,  5 Apr 2026 20:52:40 +0800	[thread overview]
Message-ID: <20260405125240.2558577-50-songmuchun@bytedance.com> (raw)
In-Reply-To: <20260405125240.2558577-1-songmuchun@bytedance.com>

The Hugepage Vmemmap Optimization (HVO) requires that struct page
size is a power of two. This size is evaluated by the C compiler
and currently cannot be natively evaluated by Kconfig. Therefore,
the condition is_power_of_2(sizeof(struct page)) was scattered
across several macros and static inline functions.

Extract the check into a preprocessor macro
STRUCT_PAGE_SIZE_IS_POWER_OF_2 evaluated during the Kbuild process.

Define SPARSEMEM_VMEMMAP_OPTIMIZATION_ENABLED as a master toggle
that is 1 only if both Kconfig CONFIG_SPARSEMEM_VMEMMAP_OPTIMIZATION
and the power of 2 size check are true.

This allows us to completely remove all scattered sizeof(struct page)
checks, making the code much cleaner and eliminating redundant logic.

Additionally, mm/hugetlb_vmemmap.c and its corresponding header are now
guarded by SPARSEMEM_VMEMMAP_OPTIMIZATION_ENABLED. This brings an added
benefit: when struct page size is not a power of 2, the compiler can
entirely optimize away the unused functions in mm/hugetlb_vmemmap.c,
reducing kernel image size.

Signed-off-by: Muchun Song <songmuchun@bytedance.com>
---
 include/linux/mm_types.h      |  2 ++
 include/linux/mm_types_task.h |  4 ++++
 include/linux/mmzone.h        | 32 +++++++++++++++-----------------
 include/linux/page-flags.h    | 28 ++++------------------------
 kernel/bounds.c               |  2 ++
 mm/hugetlb_vmemmap.c          |  2 ++
 mm/hugetlb_vmemmap.h          |  4 +---
 mm/internal.h                 |  3 ---
 mm/sparse.c                   |  6 ++----
 mm/util.c                     |  2 +-
 10 files changed, 33 insertions(+), 52 deletions(-)

diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index a308e2c23b82..6de6c0c20f8b 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -15,7 +15,9 @@
 #include <linux/cpumask.h>
 #include <linux/uprobes.h>
 #include <linux/rcupdate.h>
+#ifndef __GENERATING_BOUNDS_H
 #include <linux/page-flags-layout.h>
+#endif
 #include <linux/workqueue.h>
 #include <linux/seqlock.h>
 #include <linux/percpu_counter.h>
diff --git a/include/linux/mm_types_task.h b/include/linux/mm_types_task.h
index 11bf319d78ec..09e5039fff97 100644
--- a/include/linux/mm_types_task.h
+++ b/include/linux/mm_types_task.h
@@ -17,7 +17,11 @@
 #include <asm/tlbbatch.h>
 #endif
 
+#ifndef __GENERATING_BOUNDS_H
 #define ALLOC_SPLIT_PTLOCKS	(SPINLOCK_SIZE > BITS_PER_LONG/8)
+#else
+#define ALLOC_SPLIT_PTLOCKS	0
+#endif
 
 /*
  * When updating this, please also update struct resident_page_types[] in
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index a6900f585f9b..3a46cb0bfaaa 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -96,27 +96,26 @@
 
 #define MAX_FOLIO_NR_PAGES	(1UL << MAX_FOLIO_ORDER)
 
-/*
- * Hugepage Vmemmap Optimization (HVO) requires struct pages of the head page to
- * be naturally aligned with regard to the folio size.
- *
- * HVO which is only active if the size of struct page is a power of 2.
- */
-#define MAX_FOLIO_VMEMMAP_ALIGN					\
-	(IS_ENABLED(CONFIG_SPARSEMEM_VMEMMAP_OPTIMIZATION) &&	\
-	 is_power_of_2(sizeof(struct page)) ?			\
-	 MAX_FOLIO_NR_PAGES * sizeof(struct page) : 0)
-
 /* The number of vmemmap pages required by a vmemmap-optimized folio. */
 #define OPTIMIZED_FOLIO_VMEMMAP_PAGES		1
 #define OPTIMIZED_FOLIO_VMEMMAP_SIZE		(OPTIMIZED_FOLIO_VMEMMAP_PAGES * PAGE_SIZE)
 #define OPTIMIZED_FOLIO_VMEMMAP_PAGE_STRUCTS	(OPTIMIZED_FOLIO_VMEMMAP_SIZE / sizeof(struct page))
 #define OPTIMIZABLE_FOLIO_MIN_ORDER		(ilog2(OPTIMIZED_FOLIO_VMEMMAP_PAGE_STRUCTS) + 1)
 
+#if defined(CONFIG_SPARSEMEM_VMEMMAP_OPTIMIZATION) && STRUCT_PAGE_SIZE_IS_POWER_OF_2
+#define SPARSEMEM_VMEMMAP_OPTIMIZATION_ENABLED	1
+/*
+ * Hugepage Vmemmap Optimization (HVO) requires struct pages of the head page to
+ * be naturally aligned with regard to the folio size.
+ */
+#define MAX_FOLIO_VMEMMAP_ALIGN			(MAX_FOLIO_NR_PAGES * sizeof(struct page))
 #define __NR_OPTIMIZABLE_FOLIO_SIZES		(MAX_FOLIO_ORDER - OPTIMIZABLE_FOLIO_MIN_ORDER + 1)
 #define NR_OPTIMIZABLE_FOLIO_SIZES		\
-	((__NR_OPTIMIZABLE_FOLIO_SIZES > 0 &&	\
-	  IS_ENABLED(CONFIG_SPARSEMEM_VMEMMAP_OPTIMIZATION)) ? __NR_OPTIMIZABLE_FOLIO_SIZES : 0)
+	(__NR_OPTIMIZABLE_FOLIO_SIZES > 0 ? __NR_OPTIMIZABLE_FOLIO_SIZES : 0)
+#else
+#define MAX_FOLIO_VMEMMAP_ALIGN			0
+#define NR_OPTIMIZABLE_FOLIO_SIZES		0
+#endif
 
 enum migratetype {
 	MIGRATE_UNMOVABLE,
@@ -2015,7 +2014,7 @@ struct mem_section {
 	 */
 	struct page_ext *page_ext;
 #endif
-#ifdef CONFIG_SPARSEMEM_VMEMMAP_OPTIMIZATION
+#ifdef SPARSEMEM_VMEMMAP_OPTIMIZATION_ENABLED
 	/*
 	 * The order of compound pages in this section. Typically, the section
 	 * holds compound pages of this order; a larger compound page will span
@@ -2208,7 +2207,7 @@ static inline bool pfn_section_first_valid(struct mem_section *ms, unsigned long
 }
 #endif
 
-#ifdef CONFIG_SPARSEMEM_VMEMMAP_OPTIMIZATION
+#ifdef SPARSEMEM_VMEMMAP_OPTIMIZATION_ENABLED
 static inline void section_set_order(struct mem_section *section, unsigned int order)
 {
 	VM_BUG_ON(section->order && order && section->order != order);
@@ -2267,8 +2266,7 @@ static inline void section_set_compound_range(unsigned long pfn,
 
 static inline bool section_vmemmap_optimizable(const struct mem_section *section)
 {
-	return is_power_of_2(sizeof(struct page)) &&
-	       section_order(section) >= OPTIMIZABLE_FOLIO_MIN_ORDER;
+	return section_order(section) >= OPTIMIZABLE_FOLIO_MIN_ORDER;
 }
 
 void sparse_init_early_section(int nid, struct page *map, unsigned long pnum,
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index 12665b34586c..bea934d49750 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -198,32 +198,12 @@ enum pageflags {
 
 #ifndef __GENERATING_BOUNDS_H
 
-/*
- * For tail pages, if the size of struct page is power-of-2 ->compound_info
- * encodes the mask that converts the address of the tail page address to
- * the head page address.
- *
- * Otherwise, ->compound_info has direct pointer to head pages.
- */
-static __always_inline bool compound_info_has_mask(void)
-{
-	/*
-	 * The approach with mask would work in the wider set of conditions,
-	 * but it requires validating that struct pages are naturally aligned
-	 * for all orders up to the MAX_FOLIO_ORDER, which can be tricky.
-	 */
-	if (!IS_ENABLED(CONFIG_SPARSEMEM_VMEMMAP_OPTIMIZATION))
-		return false;
-
-	return is_power_of_2(sizeof(struct page));
-}
-
 static __always_inline unsigned long _compound_head(const struct page *page)
 {
 	unsigned long info = READ_ONCE(page->compound_info);
 	unsigned long mask;
 
-	if (!compound_info_has_mask()) {
+	if (!IS_ENABLED(SPARSEMEM_VMEMMAP_OPTIMIZATION_ENABLED)) {
 		/* Bit 0 encodes PageTail() */
 		if (info & 1)
 			return info - 1;
@@ -232,8 +212,8 @@ static __always_inline unsigned long _compound_head(const struct page *page)
 	}
 
 	/*
-	 * If compound_info_has_mask() is true the rest of the info encodes
-	 * the mask that converts the address of the tail page to the head page.
+	 * If HVO is enabled the rest of the info encodes the mask that converts
+	 * the address of the tail page to the head page.
 	 *
 	 * No need to clear bit 0 in the mask as 'page' always has it clear.
 	 *
@@ -257,7 +237,7 @@ static __always_inline void set_compound_head(struct page *tail,
 	unsigned int shift;
 	unsigned long mask;
 
-	if (!compound_info_has_mask()) {
+	if (!IS_ENABLED(SPARSEMEM_VMEMMAP_OPTIMIZATION_ENABLED)) {
 		WRITE_ONCE(tail->compound_info, (unsigned long)head | 1);
 		return;
 	}
diff --git a/kernel/bounds.c b/kernel/bounds.c
index 02b619eb6106..ff2ec3834d32 100644
--- a/kernel/bounds.c
+++ b/kernel/bounds.c
@@ -8,6 +8,7 @@
 #define __GENERATING_BOUNDS_H
 #define COMPILE_OFFSETS
 /* Include headers that define the enum constants of interest */
+#include <linux/mm_types.h>
 #include <linux/page-flags.h>
 #include <linux/mmzone.h>
 #include <linux/kbuild.h>
@@ -30,6 +31,7 @@ int main(void)
 	DEFINE(LRU_GEN_WIDTH, 0);
 	DEFINE(__LRU_REFS_WIDTH, 0);
 #endif
+	DEFINE(STRUCT_PAGE_SIZE_IS_POWER_OF_2, is_power_of_2(sizeof(struct page)));
 	/* End of constants */
 
 	return 0;
diff --git a/mm/hugetlb_vmemmap.c b/mm/hugetlb_vmemmap.c
index d595ef759bc2..0347341be156 100644
--- a/mm/hugetlb_vmemmap.c
+++ b/mm/hugetlb_vmemmap.c
@@ -21,6 +21,7 @@
 #include "hugetlb_vmemmap.h"
 #include "internal.h"
 
+#ifdef SPARSEMEM_VMEMMAP_OPTIMIZATION_ENABLED
 /**
  * struct vmemmap_remap_walk - walk vmemmap page table
  *
@@ -693,3 +694,4 @@ static int __init hugetlb_vmemmap_init(void)
 	return 0;
 }
 late_initcall(hugetlb_vmemmap_init);
+#endif
diff --git a/mm/hugetlb_vmemmap.h b/mm/hugetlb_vmemmap.h
index 0022f9c5a101..bd576ef41ee7 100644
--- a/mm/hugetlb_vmemmap.h
+++ b/mm/hugetlb_vmemmap.h
@@ -12,7 +12,7 @@
 #include <linux/io.h>
 #include <linux/memblock.h>
 
-#ifdef CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP
+#if defined(CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP) && defined(SPARSEMEM_VMEMMAP_OPTIMIZATION_ENABLED)
 int hugetlb_vmemmap_restore_folio(const struct hstate *h, struct folio *folio);
 long hugetlb_vmemmap_restore_folios(const struct hstate *h,
 					struct list_head *folio_list,
@@ -34,8 +34,6 @@ static inline unsigned int hugetlb_vmemmap_optimizable_size(const struct hstate
 {
 	int size = hugetlb_vmemmap_size(h) - OPTIMIZED_FOLIO_VMEMMAP_SIZE;
 
-	if (!is_power_of_2(sizeof(struct page)))
-		return 0;
 	return size > 0 ? size : 0;
 }
 #else
diff --git a/mm/internal.h b/mm/internal.h
index 02064f21bfe1..121c9076f09a 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -1026,9 +1026,6 @@ static inline bool vmemmap_page_optimizable(const struct page *page)
 	unsigned long pfn = page_to_pfn(page);
 	unsigned int order = section_order(__pfn_to_section(pfn));
 
-	if (!is_power_of_2(sizeof(struct page)))
-		return false;
-
 	return (pfn & ((1L << order) - 1)) >= OPTIMIZED_FOLIO_VMEMMAP_PAGE_STRUCTS;
 }
 
diff --git a/mm/sparse.c b/mm/sparse.c
index 77bb0113bac5..7375f66a58d5 100644
--- a/mm/sparse.c
+++ b/mm/sparse.c
@@ -404,10 +404,8 @@ void __init sparse_init(void)
 	unsigned long pnum_end, pnum_begin, map_count = 1;
 	int nid_begin;
 
-	if (compound_info_has_mask()) {
-		VM_WARN_ON_ONCE(!IS_ALIGNED((unsigned long) pfn_to_page(0),
-				    MAX_FOLIO_VMEMMAP_ALIGN));
-	}
+	VM_WARN_ON_ONCE(IS_ENABLED(SPARSEMEM_VMEMMAP_OPTIMIZATION_ENABLED) &&
+			!IS_ALIGNED((unsigned long)pfn_to_page(0), MAX_FOLIO_VMEMMAP_ALIGN));
 
 	pnum_begin = first_present_section_nr();
 	nid_begin = sparse_early_nid(__nr_to_section(pnum_begin));
diff --git a/mm/util.c b/mm/util.c
index f063fd4de1e8..783b2081ea74 100644
--- a/mm/util.c
+++ b/mm/util.c
@@ -1348,7 +1348,7 @@ void snapshot_page(struct page_snapshot *ps, const struct page *page)
 		foliop = (struct folio *)page;
 	} else {
 		/* See compound_head() */
-		if (compound_info_has_mask()) {
+		if (IS_ENABLED(SPARSEMEM_VMEMMAP_OPTIMIZATION_ENABLED)) {
 			unsigned long p = (unsigned long)page;
 
 			foliop = (struct folio *)(p & info);
-- 
2.20.1



  parent reply	other threads:[~2026-04-05 12:59 UTC|newest]

Thread overview: 53+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2026-04-05 12:51 [PATCH 00/49] mm: Generalize vmemmap optimization for DAX and HugeTLB Muchun Song
2026-04-05 12:51 ` [PATCH 01/49] mm/sparse: fix vmemmap accounting imbalance on memory hotplug error Muchun Song
2026-04-05 12:51 ` [PATCH 02/49] mm/sparse: add a @pgmap argument to memory deactivation paths Muchun Song
2026-04-05 12:51 ` [PATCH 03/49] mm/sparse: fix vmemmap page accounting for HVOed DAX Muchun Song
2026-04-05 12:51 ` [PATCH 04/49] mm/sparse: add a @pgmap parameter to arch vmemmap_populate() Muchun Song
2026-04-05 12:51 ` [PATCH 05/49] mm/sparse: fix missing architecture-specific page table sync for HVO DAX Muchun Song
2026-04-05 12:51 ` [PATCH 06/49] mm/mm_init: fix uninitialized pageblock migratetype for ZONE_DEVICE compound pages Muchun Song
2026-04-05 12:51 ` [PATCH 07/49] mm/mm_init: use pageblock_migratetype_init_range() in deferred_free_pages() Muchun Song
2026-04-05 12:51 ` [PATCH 08/49] mm: Convert vmemmap_p?d_populate() to static functions Muchun Song
2026-04-05 12:52 ` [PATCH 09/49] mm: panic on memory allocation failure in sparse_init_nid() Muchun Song
2026-04-05 12:52 ` [PATCH 10/49] mm: move subsection_map_init() into sparse_init() Muchun Song
2026-04-05 12:52 ` [PATCH 11/49] mm: defer sparse_init() until after zone initialization Muchun Song
2026-04-05 12:52 ` [PATCH 12/49] mm: make set_pageblock_order() static Muchun Song
2026-04-05 12:52 ` [PATCH 13/49] mm: integrate sparse_vmemmap_init_nid_late() into sparse_init_nid() Muchun Song
2026-04-05 12:52 ` [PATCH 14/49] mm/cma: validate hugetlb CMA range by zone at reserve time Muchun Song
2026-04-05 12:52 ` [PATCH 15/49] mm/hugetlb: free cross-zone bootmem gigantic pages after allocation Muchun Song
2026-04-05 12:52 ` [PATCH 16/49] mm/hugetlb: initialize vmemmap optimization in early stage Muchun Song
2026-04-05 12:52 ` [PATCH 17/49] mm: remove sparse_vmemmap_init_nid_late() Muchun Song
2026-04-05 12:52 ` [PATCH 18/49] mm/mm_init: make __init_page_from_nid() static Muchun Song
2026-04-05 12:52 ` [PATCH 19/49] mm/sparse-vmemmap: remove the VMEMMAP_POPULATE_PAGEREF flag Muchun Song
2026-04-05 12:52 ` [PATCH 20/49] mm: rename vmemmap optimization macros to generic names Muchun Song
2026-04-05 12:52 ` [PATCH 21/49] mm/sparse: drop power-of-2 size requirement for struct mem_section Muchun Song
2026-04-05 12:52 ` [PATCH 22/49] mm/sparse: introduce compound page order to mem_section Muchun Song
2026-04-05 12:52 ` [PATCH 23/49] mm/mm_init: skip initializing shared tail pages for compound pages Muchun Song
2026-04-05 12:52 ` [PATCH 24/49] mm/sparse-vmemmap: initialize shared tail vmemmap page upon allocation Muchun Song
2026-04-05 12:52 ` [PATCH 25/49] mm/sparse-vmemmap: support vmemmap-optimizable compound page population Muchun Song
2026-04-05 12:52 ` [PATCH 26/49] mm/hugetlb: use generic vmemmap optimization macros Muchun Song
2026-04-05 12:52 ` [PATCH 27/49] mm: call memblocks_present() before HugeTLB initialization Muchun Song
2026-04-05 12:52 ` [PATCH 28/49] mm/hugetlb: switch HugeTLB to use generic vmemmap optimization Muchun Song
2026-04-05 12:52 ` [PATCH 29/49] mm: extract pfn_to_zone() helper Muchun Song
2026-04-05 12:52 ` [PATCH 30/49] mm/sparse-vmemmap: remove unused SPARSEMEM_VMEMMAP_PREINIT feature Muchun Song
2026-04-05 12:52 ` [PATCH 31/49] mm/hugetlb: remove HUGE_BOOTMEM_HVO flag and simplify pre-HVO logic Muchun Song
2026-04-05 12:52 ` [PATCH 32/49] mm/sparse-vmemmap: consolidate shared tail page allocation Muchun Song
2026-04-05 12:52 ` [PATCH 33/49] mm: introduce CONFIG_SPARSEMEM_VMEMMAP_OPTIMIZATION Muchun Song
2026-04-05 12:52 ` [PATCH 34/49] mm/sparse-vmemmap: switch DAX to use generic vmemmap optimization Muchun Song
2026-04-05 12:52 ` [PATCH 35/49] mm/sparse-vmemmap: introduce section zone to struct mem_section Muchun Song
2026-04-05 12:52 ` [PATCH 36/49] powerpc/mm: use generic vmemmap_shared_tail_page() in compound vmemmap Muchun Song
2026-04-05 12:52 ` [PATCH 37/49] mm/sparse-vmemmap: unify DAX and HugeTLB vmemmap optimization Muchun Song
2026-04-05 12:52 ` [PATCH 38/49] mm/sparse-vmemmap: remap the shared tail pages as read-only Muchun Song
2026-04-05 12:52 ` [PATCH 39/49] mm/sparse-vmemmap: remove unused ptpfn argument Muchun Song
2026-04-05 12:52 ` [PATCH 40/49] mm/hugetlb_vmemmap: remove vmemmap_wrprotect_hvo() and related code Muchun Song
2026-04-05 12:52 ` [PATCH 41/49] mm/sparse: simplify section_vmemmap_pages() Muchun Song
2026-04-05 12:52 ` [PATCH 42/49] mm/sparse-vmemmap: introduce section_vmemmap_page_structs() Muchun Song
2026-04-05 12:52 ` [PATCH 43/49] powerpc/mm: rely on generic vmemmap_can_optimize() to simplify code Muchun Song
2026-04-05 12:52 ` [PATCH 44/49] mm/sparse-vmemmap: drop ARCH_WANT_OPTIMIZE_DAX_VMEMMAP and simplify checks Muchun Song
2026-04-05 12:52 ` [PATCH 45/49] mm/sparse-vmemmap: drop @pgmap parameter from vmemmap populate APIs Muchun Song
2026-04-05 12:52 ` [PATCH 46/49] mm/sparse: replace pgmap with order and zone in sparse_add_section() Muchun Song
2026-04-05 12:52 ` [PATCH 47/49] mm: redefine HVO as Hugepage Vmemmap Optimization Muchun Song
2026-04-05 12:52 ` [PATCH 48/49] Documentation/mm: restructure vmemmap_dedup.rst to reflect generalized HVO Muchun Song
2026-04-05 12:52 ` Muchun Song [this message]
2026-04-05 13:34 ` [PATCH 00/49] mm: Generalize vmemmap optimization for DAX and HugeTLB Mike Rapoport
2026-04-06 19:59 ` David Hildenbrand (arm)
2026-04-08 15:29 ` Frank van der Linden

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20260405125240.2558577-50-songmuchun@bytedance.com \
    --to=songmuchun@bytedance.com \
    --cc=Liam.Howlett@oracle.com \
    --cc=akpm@linux-foundation.org \
    --cc=aneesh.kumar@linux.ibm.com \
    --cc=chleroy@kernel.org \
    --cc=david@kernel.org \
    --cc=joao.m.martins@oracle.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=linuxppc-dev@lists.ozlabs.org \
    --cc=ljs@kernel.org \
    --cc=maddy@linux.ibm.com \
    --cc=mhocko@suse.com \
    --cc=mpe@ellerman.id.au \
    --cc=muchun.song@linux.dev \
    --cc=npiggin@gmail.com \
    --cc=osalvador@suse.de \
    --cc=rppt@kernel.org \
    --cc=surenb@google.com \
    --cc=vbabka@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox