From: Changyuan Lyu <changyuanl@google.com>
To: linux-kernel@vger.kernel.org
Cc: graf@amazon.com, akpm@linux-foundation.org, luto@kernel.org,
anthony.yznaga@oracle.com, arnd@arndb.de, ashish.kalra@amd.com,
benh@kernel.crashing.org, bp@alien8.de, catalin.marinas@arm.com,
dave.hansen@linux.intel.com, dwmw2@infradead.org,
ebiederm@xmission.com, mingo@redhat.com, jgowans@amazon.com,
corbet@lwn.net, krzk@kernel.org, rppt@kernel.org,
mark.rutland@arm.com, pbonzini@redhat.com,
pasha.tatashin@soleen.com, hpa@zytor.com, peterz@infradead.org,
ptyadav@amazon.de, robh+dt@kernel.org, robh@kernel.org,
saravanak@google.com, skinsburskii@linux.microsoft.com,
rostedt@goodmis.org, tglx@linutronix.de,
thomas.lendacky@amd.com, usama.arif@bytedance.com,
will@kernel.org, devicetree@vger.kernel.org,
kexec@lists.infradead.org, linux-arm-kernel@lists.infradead.org,
linux-doc@vger.kernel.org, linux-mm@kvack.org, x86@kernel.org,
Changyuan Lyu <changyuanl@google.com>
Subject: [PATCH v5 03/16] memblock: add MEMBLOCK_RSRV_KERN flag
Date: Wed, 19 Mar 2025 18:55:38 -0700 [thread overview]
Message-ID: <20250320015551.2157511-4-changyuanl@google.com> (raw)
In-Reply-To: <20250320015551.2157511-1-changyuanl@google.com>
From: "Mike Rapoport (Microsoft)" <rppt@kernel.org>
to denote areas that were reserved for kernel use either directly with
memblock_reserve_kern() or via memblock allocations.
Signed-off-by: Mike Rapoport (Microsoft) <rppt@kernel.org>
Co-developed-by: Changyuan Lyu <changyuanl@google.com>
Signed-off-by: Changyuan Lyu <changyuanl@google.com>
---
include/linux/memblock.h | 19 ++++++++-
mm/memblock.c | 40 +++++++++++++++----
tools/testing/memblock/tests/alloc_api.c | 22 +++++-----
.../memblock/tests/alloc_helpers_api.c | 4 +-
tools/testing/memblock/tests/alloc_nid_api.c | 20 +++++-----
5 files changed, 73 insertions(+), 32 deletions(-)
diff --git a/include/linux/memblock.h b/include/linux/memblock.h
index e79eb6ac516f..1037fd7aabf4 100644
--- a/include/linux/memblock.h
+++ b/include/linux/memblock.h
@@ -42,6 +42,9 @@ extern unsigned long long max_possible_pfn;
* kernel resource tree.
* @MEMBLOCK_RSRV_NOINIT: memory region for which struct pages are
* not initialized (only for reserved regions).
+ * @MEMBLOCK_RSRV_KERN: memory region that is reserved for kernel use,
+ * either explictitly with memblock_reserve_kern() or via memblock
+ * allocation APIs. All memblock allocations set this flag.
*/
enum memblock_flags {
MEMBLOCK_NONE = 0x0, /* No special request */
@@ -50,6 +53,7 @@ enum memblock_flags {
MEMBLOCK_NOMAP = 0x4, /* don't add to kernel direct mapping */
MEMBLOCK_DRIVER_MANAGED = 0x8, /* always detected via a driver */
MEMBLOCK_RSRV_NOINIT = 0x10, /* don't initialize struct pages */
+ MEMBLOCK_RSRV_KERN = 0x20, /* memory reserved for kernel use */
};
/**
@@ -116,7 +120,19 @@ int memblock_add_node(phys_addr_t base, phys_addr_t size, int nid,
int memblock_add(phys_addr_t base, phys_addr_t size);
int memblock_remove(phys_addr_t base, phys_addr_t size);
int memblock_phys_free(phys_addr_t base, phys_addr_t size);
-int memblock_reserve(phys_addr_t base, phys_addr_t size);
+int __memblock_reserve(phys_addr_t base, phys_addr_t size, int nid,
+ enum memblock_flags flags);
+
+static __always_inline int memblock_reserve(phys_addr_t base, phys_addr_t size)
+{
+ return __memblock_reserve(base, size, NUMA_NO_NODE, 0);
+}
+
+static __always_inline int memblock_reserve_kern(phys_addr_t base, phys_addr_t size)
+{
+ return __memblock_reserve(base, size, NUMA_NO_NODE, MEMBLOCK_RSRV_KERN);
+}
+
#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
int memblock_physmem_add(phys_addr_t base, phys_addr_t size);
#endif
@@ -477,6 +493,7 @@ static inline __init_memblock bool memblock_bottom_up(void)
phys_addr_t memblock_phys_mem_size(void);
phys_addr_t memblock_reserved_size(void);
+phys_addr_t memblock_reserved_kern_size(phys_addr_t limit, int nid);
unsigned long memblock_estimated_nr_free_pages(void);
phys_addr_t memblock_start_of_DRAM(void);
phys_addr_t memblock_end_of_DRAM(void);
diff --git a/mm/memblock.c b/mm/memblock.c
index 95af35fd1389..e704e3270b32 100644
--- a/mm/memblock.c
+++ b/mm/memblock.c
@@ -491,7 +491,7 @@ static int __init_memblock memblock_double_array(struct memblock_type *type,
* needn't do it
*/
if (!use_slab)
- BUG_ON(memblock_reserve(addr, new_alloc_size));
+ BUG_ON(memblock_reserve_kern(addr, new_alloc_size));
/* Update slab flag */
*in_slab = use_slab;
@@ -641,7 +641,7 @@ static int __init_memblock memblock_add_range(struct memblock_type *type,
#ifdef CONFIG_NUMA
WARN_ON(nid != memblock_get_region_node(rgn));
#endif
- WARN_ON(flags != rgn->flags);
+ WARN_ON(flags != MEMBLOCK_NONE && flags != rgn->flags);
nr_new++;
if (insert) {
if (start_rgn == -1)
@@ -901,14 +901,15 @@ int __init_memblock memblock_phys_free(phys_addr_t base, phys_addr_t size)
return memblock_remove_range(&memblock.reserved, base, size);
}
-int __init_memblock memblock_reserve(phys_addr_t base, phys_addr_t size)
+int __init_memblock __memblock_reserve(phys_addr_t base, phys_addr_t size,
+ int nid, enum memblock_flags flags)
{
phys_addr_t end = base + size - 1;
- memblock_dbg("%s: [%pa-%pa] %pS\n", __func__,
- &base, &end, (void *)_RET_IP_);
+ memblock_dbg("%s: [%pa-%pa] nid=%d flags=%x %pS\n", __func__,
+ &base, &end, nid, flags, (void *)_RET_IP_);
- return memblock_add_range(&memblock.reserved, base, size, MAX_NUMNODES, 0);
+ return memblock_add_range(&memblock.reserved, base, size, nid, flags);
}
#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
@@ -1459,14 +1460,14 @@ phys_addr_t __init memblock_alloc_range_nid(phys_addr_t size,
again:
found = memblock_find_in_range_node(size, align, start, end, nid,
flags);
- if (found && !memblock_reserve(found, size))
+ if (found && !__memblock_reserve(found, size, nid, MEMBLOCK_RSRV_KERN))
goto done;
if (numa_valid_node(nid) && !exact_nid) {
found = memblock_find_in_range_node(size, align, start,
end, NUMA_NO_NODE,
flags);
- if (found && !memblock_reserve(found, size))
+ if (found && !memblock_reserve_kern(found, size))
goto done;
}
@@ -1751,6 +1752,28 @@ phys_addr_t __init_memblock memblock_reserved_size(void)
return memblock.reserved.total_size;
}
+phys_addr_t __init_memblock memblock_reserved_kern_size(phys_addr_t limit, int nid)
+{
+ struct memblock_region *r;
+ phys_addr_t total = 0;
+
+ for_each_reserved_mem_region(r) {
+ phys_addr_t size = r->size;
+
+ if (r->base > limit)
+ break;
+
+ if (r->base + r->size > limit)
+ size = limit - r->base;
+
+ if (nid == memblock_get_region_node(r) || !numa_valid_node(nid))
+ if (r->flags & MEMBLOCK_RSRV_KERN)
+ total += size;
+ }
+
+ return total;
+}
+
/**
* memblock_estimated_nr_free_pages - return estimated number of free pages
* from memblock point of view
@@ -2397,6 +2420,7 @@ static const char * const flagname[] = {
[ilog2(MEMBLOCK_NOMAP)] = "NOMAP",
[ilog2(MEMBLOCK_DRIVER_MANAGED)] = "DRV_MNG",
[ilog2(MEMBLOCK_RSRV_NOINIT)] = "RSV_NIT",
+ [ilog2(MEMBLOCK_RSRV_KERN)] = "RSV_KERN",
};
static int memblock_debug_show(struct seq_file *m, void *private)
diff --git a/tools/testing/memblock/tests/alloc_api.c b/tools/testing/memblock/tests/alloc_api.c
index 68f1a75cd72c..c55f67dd367d 100644
--- a/tools/testing/memblock/tests/alloc_api.c
+++ b/tools/testing/memblock/tests/alloc_api.c
@@ -134,7 +134,7 @@ static int alloc_top_down_before_check(void)
PREFIX_PUSH();
setup_memblock();
- memblock_reserve(memblock_end_of_DRAM() - total_size, r1_size);
+ memblock_reserve_kern(memblock_end_of_DRAM() - total_size, r1_size);
allocated_ptr = run_memblock_alloc(r2_size, SMP_CACHE_BYTES);
@@ -182,7 +182,7 @@ static int alloc_top_down_after_check(void)
total_size = r1.size + r2_size;
- memblock_reserve(r1.base, r1.size);
+ memblock_reserve_kern(r1.base, r1.size);
allocated_ptr = run_memblock_alloc(r2_size, SMP_CACHE_BYTES);
@@ -231,8 +231,8 @@ static int alloc_top_down_second_fit_check(void)
total_size = r1.size + r2.size + r3_size;
- memblock_reserve(r1.base, r1.size);
- memblock_reserve(r2.base, r2.size);
+ memblock_reserve_kern(r1.base, r1.size);
+ memblock_reserve_kern(r2.base, r2.size);
allocated_ptr = run_memblock_alloc(r3_size, SMP_CACHE_BYTES);
@@ -285,8 +285,8 @@ static int alloc_in_between_generic_check(void)
total_size = r1.size + r2.size + r3_size;
- memblock_reserve(r1.base, r1.size);
- memblock_reserve(r2.base, r2.size);
+ memblock_reserve_kern(r1.base, r1.size);
+ memblock_reserve_kern(r2.base, r2.size);
allocated_ptr = run_memblock_alloc(r3_size, SMP_CACHE_BYTES);
@@ -422,7 +422,7 @@ static int alloc_limited_space_generic_check(void)
setup_memblock();
/* Simulate almost-full memory */
- memblock_reserve(memblock_start_of_DRAM(), reserved_size);
+ memblock_reserve_kern(memblock_start_of_DRAM(), reserved_size);
allocated_ptr = run_memblock_alloc(available_size, SMP_CACHE_BYTES);
@@ -608,7 +608,7 @@ static int alloc_bottom_up_before_check(void)
PREFIX_PUSH();
setup_memblock();
- memblock_reserve(memblock_start_of_DRAM() + r1_size, r2_size);
+ memblock_reserve_kern(memblock_start_of_DRAM() + r1_size, r2_size);
allocated_ptr = run_memblock_alloc(r1_size, SMP_CACHE_BYTES);
@@ -655,7 +655,7 @@ static int alloc_bottom_up_after_check(void)
total_size = r1.size + r2_size;
- memblock_reserve(r1.base, r1.size);
+ memblock_reserve_kern(r1.base, r1.size);
allocated_ptr = run_memblock_alloc(r2_size, SMP_CACHE_BYTES);
@@ -705,8 +705,8 @@ static int alloc_bottom_up_second_fit_check(void)
total_size = r1.size + r2.size + r3_size;
- memblock_reserve(r1.base, r1.size);
- memblock_reserve(r2.base, r2.size);
+ memblock_reserve_kern(r1.base, r1.size);
+ memblock_reserve_kern(r2.base, r2.size);
allocated_ptr = run_memblock_alloc(r3_size, SMP_CACHE_BYTES);
diff --git a/tools/testing/memblock/tests/alloc_helpers_api.c b/tools/testing/memblock/tests/alloc_helpers_api.c
index 3ef9486da8a0..e5362cfd2ff3 100644
--- a/tools/testing/memblock/tests/alloc_helpers_api.c
+++ b/tools/testing/memblock/tests/alloc_helpers_api.c
@@ -163,7 +163,7 @@ static int alloc_from_top_down_no_space_above_check(void)
min_addr = memblock_end_of_DRAM() - SMP_CACHE_BYTES * 2;
/* No space above this address */
- memblock_reserve(min_addr, r2_size);
+ memblock_reserve_kern(min_addr, r2_size);
allocated_ptr = memblock_alloc_from(r1_size, SMP_CACHE_BYTES, min_addr);
@@ -199,7 +199,7 @@ static int alloc_from_top_down_min_addr_cap_check(void)
start_addr = (phys_addr_t)memblock_start_of_DRAM();
min_addr = start_addr - SMP_CACHE_BYTES * 3;
- memblock_reserve(start_addr + r1_size, MEM_SIZE - r1_size);
+ memblock_reserve_kern(start_addr + r1_size, MEM_SIZE - r1_size);
allocated_ptr = memblock_alloc_from(r1_size, SMP_CACHE_BYTES, min_addr);
diff --git a/tools/testing/memblock/tests/alloc_nid_api.c b/tools/testing/memblock/tests/alloc_nid_api.c
index 49bb416d34ff..562e4701b0e0 100644
--- a/tools/testing/memblock/tests/alloc_nid_api.c
+++ b/tools/testing/memblock/tests/alloc_nid_api.c
@@ -324,7 +324,7 @@ static int alloc_nid_min_reserved_generic_check(void)
min_addr = max_addr - r2_size;
reserved_base = min_addr - r1_size;
- memblock_reserve(reserved_base, r1_size);
+ memblock_reserve_kern(reserved_base, r1_size);
allocated_ptr = run_memblock_alloc_nid(r2_size, SMP_CACHE_BYTES,
min_addr, max_addr,
@@ -374,7 +374,7 @@ static int alloc_nid_max_reserved_generic_check(void)
max_addr = memblock_end_of_DRAM() - r1_size;
min_addr = max_addr - r2_size;
- memblock_reserve(max_addr, r1_size);
+ memblock_reserve_kern(max_addr, r1_size);
allocated_ptr = run_memblock_alloc_nid(r2_size, SMP_CACHE_BYTES,
min_addr, max_addr,
@@ -436,8 +436,8 @@ static int alloc_nid_top_down_reserved_with_space_check(void)
min_addr = r2.base + r2.size;
max_addr = r1.base;
- memblock_reserve(r1.base, r1.size);
- memblock_reserve(r2.base, r2.size);
+ memblock_reserve_kern(r1.base, r1.size);
+ memblock_reserve_kern(r2.base, r2.size);
allocated_ptr = run_memblock_alloc_nid(r3_size, SMP_CACHE_BYTES,
min_addr, max_addr,
@@ -499,8 +499,8 @@ static int alloc_nid_reserved_full_merge_generic_check(void)
min_addr = r2.base + r2.size;
max_addr = r1.base;
- memblock_reserve(r1.base, r1.size);
- memblock_reserve(r2.base, r2.size);
+ memblock_reserve_kern(r1.base, r1.size);
+ memblock_reserve_kern(r2.base, r2.size);
allocated_ptr = run_memblock_alloc_nid(r3_size, SMP_CACHE_BYTES,
min_addr, max_addr,
@@ -563,8 +563,8 @@ static int alloc_nid_top_down_reserved_no_space_check(void)
min_addr = r2.base + r2.size;
max_addr = r1.base;
- memblock_reserve(r1.base, r1.size);
- memblock_reserve(r2.base, r2.size);
+ memblock_reserve_kern(r1.base, r1.size);
+ memblock_reserve_kern(r2.base, r2.size);
allocated_ptr = run_memblock_alloc_nid(r3_size, SMP_CACHE_BYTES,
min_addr, max_addr,
@@ -909,8 +909,8 @@ static int alloc_nid_bottom_up_reserved_with_space_check(void)
min_addr = r2.base + r2.size;
max_addr = r1.base;
- memblock_reserve(r1.base, r1.size);
- memblock_reserve(r2.base, r2.size);
+ memblock_reserve_kern(r1.base, r1.size);
+ memblock_reserve_kern(r2.base, r2.size);
allocated_ptr = run_memblock_alloc_nid(r3_size, SMP_CACHE_BYTES,
min_addr, max_addr,
--
2.48.1.711.g2feabab25a-goog
next prev parent reply other threads:[~2025-03-20 1:56 UTC|newest]
Thread overview: 103+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-03-20 1:55 [PATCH v5 00/16] kexec: introduce Kexec HandOver (KHO) Changyuan Lyu
2025-03-20 1:55 ` [PATCH v5 01/16] kexec: define functions to map and unmap segments Changyuan Lyu
2025-03-20 1:55 ` [PATCH v5 02/16] mm/mm_init: rename init_reserved_page to init_deferred_page Changyuan Lyu
2025-03-20 7:10 ` Krzysztof Kozlowski
2025-03-20 17:15 ` Changyuan Lyu
2025-03-20 1:55 ` Changyuan Lyu [this message]
2025-03-20 1:55 ` [PATCH v5 04/16] memblock: Add support for scratch memory Changyuan Lyu
2025-03-20 1:55 ` [PATCH v5 05/16] memblock: introduce memmap_init_kho_scratch() Changyuan Lyu
2025-03-20 1:55 ` [PATCH v5 06/16] hashtable: add macro HASHTABLE_INIT Changyuan Lyu
2025-03-20 1:55 ` [PATCH v5 07/16] kexec: add Kexec HandOver (KHO) generation helpers Changyuan Lyu
2025-03-21 13:34 ` Jason Gunthorpe
2025-03-23 19:02 ` Changyuan Lyu
2025-03-24 16:28 ` Jason Gunthorpe
2025-03-25 0:21 ` Changyuan Lyu
2025-03-25 2:20 ` Jason Gunthorpe
2025-03-24 18:40 ` Frank van der Linden
2025-03-25 19:19 ` Mike Rapoport
2025-03-25 21:56 ` Frank van der Linden
2025-03-26 11:59 ` Mike Rapoport
2025-03-26 16:25 ` Frank van der Linden
2025-03-20 1:55 ` [PATCH v5 08/16] kexec: add KHO parsing support Changyuan Lyu
2025-03-20 1:55 ` [PATCH v5 09/16] kexec: enable KHO support for memory preservation Changyuan Lyu
2025-03-21 13:46 ` Jason Gunthorpe
2025-03-22 19:12 ` Mike Rapoport
2025-03-23 18:55 ` Jason Gunthorpe
2025-03-24 18:18 ` Mike Rapoport
2025-03-24 20:07 ` Jason Gunthorpe
2025-03-26 12:07 ` Mike Rapoport
2025-03-23 19:07 ` Changyuan Lyu
2025-03-25 2:04 ` Jason Gunthorpe
2025-03-27 10:03 ` Pratyush Yadav
2025-03-27 13:31 ` Jason Gunthorpe
2025-03-27 17:28 ` Pratyush Yadav
2025-03-28 12:53 ` Jason Gunthorpe
2025-04-02 16:44 ` Changyuan Lyu
2025-04-02 16:47 ` Pratyush Yadav
2025-04-02 18:37 ` Pasha Tatashin
2025-04-02 18:49 ` Pratyush Yadav
2025-04-02 19:16 ` Pratyush Yadav
2025-04-03 11:42 ` Jason Gunthorpe
2025-04-03 13:58 ` Mike Rapoport
2025-04-03 14:24 ` Jason Gunthorpe
2025-04-04 9:54 ` Mike Rapoport
2025-04-04 12:47 ` Jason Gunthorpe
2025-04-04 13:53 ` Mike Rapoport
2025-04-04 14:30 ` Jason Gunthorpe
2025-04-04 16:24 ` Pratyush Yadav
2025-04-04 17:31 ` Jason Gunthorpe
2025-04-06 16:13 ` Mike Rapoport
2025-04-06 16:11 ` Mike Rapoport
2025-04-07 14:16 ` Jason Gunthorpe
2025-04-07 16:31 ` Mike Rapoport
2025-04-07 17:03 ` Jason Gunthorpe
2025-04-09 9:06 ` Mike Rapoport
2025-04-09 12:56 ` Jason Gunthorpe
2025-04-09 13:58 ` Mike Rapoport
2025-04-09 15:37 ` Jason Gunthorpe
2025-04-09 16:19 ` Mike Rapoport
2025-04-09 16:28 ` Jason Gunthorpe
2025-04-10 16:51 ` Matthew Wilcox
2025-04-10 17:31 ` Jason Gunthorpe
2025-04-09 16:28 ` Mike Rapoport
2025-04-09 18:32 ` Jason Gunthorpe
2025-04-04 16:15 ` Pratyush Yadav
2025-04-06 16:34 ` Mike Rapoport
2025-04-07 14:23 ` Jason Gunthorpe
2025-04-03 13:57 ` Mike Rapoport
2025-04-11 4:02 ` Changyuan Lyu
2025-04-03 15:50 ` Pratyush Yadav
2025-04-03 16:10 ` Jason Gunthorpe
2025-04-03 17:37 ` Pratyush Yadav
2025-04-04 12:54 ` Jason Gunthorpe
2025-04-04 15:39 ` Pratyush Yadav
2025-04-09 8:35 ` Mike Rapoport
2025-03-20 1:55 ` [PATCH v5 10/16] kexec: add KHO support to kexec file loads Changyuan Lyu
2025-03-21 13:48 ` Jason Gunthorpe
2025-03-20 1:55 ` [PATCH v5 11/16] kexec: add config option for KHO Changyuan Lyu
2025-03-20 7:10 ` Krzysztof Kozlowski
2025-03-20 17:18 ` Changyuan Lyu
2025-03-24 4:18 ` Dave Young
2025-03-24 19:26 ` Pasha Tatashin
2025-03-25 1:24 ` Dave Young
2025-03-25 3:07 ` Dave Young
2025-03-25 6:57 ` Baoquan He
2025-03-25 8:36 ` Dave Young
2025-03-26 9:17 ` Dave Young
2025-03-26 11:28 ` Mike Rapoport
2025-03-26 12:09 ` Dave Young
2025-03-25 14:04 ` Pasha Tatashin
2025-03-20 1:55 ` [PATCH v5 12/16] arm64: add KHO support Changyuan Lyu
2025-03-20 7:13 ` Krzysztof Kozlowski
2025-03-20 8:30 ` Krzysztof Kozlowski
2025-03-20 23:29 ` Changyuan Lyu
2025-04-11 3:47 ` Changyuan Lyu
2025-03-20 1:55 ` [PATCH v5 13/16] x86/setup: use memblock_reserve_kern for memory used by kernel Changyuan Lyu
2025-03-20 1:55 ` [PATCH v5 14/16] x86: add KHO support Changyuan Lyu
2025-03-20 1:55 ` [PATCH v5 15/16] memblock: add KHO support for reserve_mem Changyuan Lyu
2025-03-20 1:55 ` [PATCH v5 16/16] Documentation: add documentation for KHO Changyuan Lyu
2025-03-20 14:45 ` Jonathan Corbet
2025-03-21 6:33 ` Changyuan Lyu
2025-03-21 13:46 ` Jonathan Corbet
2025-03-25 14:19 ` [PATCH v5 00/16] kexec: introduce Kexec HandOver (KHO) Pasha Tatashin
2025-03-25 15:03 ` Mike Rapoport
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20250320015551.2157511-4-changyuanl@google.com \
--to=changyuanl@google.com \
--cc=akpm@linux-foundation.org \
--cc=anthony.yznaga@oracle.com \
--cc=arnd@arndb.de \
--cc=ashish.kalra@amd.com \
--cc=benh@kernel.crashing.org \
--cc=bp@alien8.de \
--cc=catalin.marinas@arm.com \
--cc=corbet@lwn.net \
--cc=dave.hansen@linux.intel.com \
--cc=devicetree@vger.kernel.org \
--cc=dwmw2@infradead.org \
--cc=ebiederm@xmission.com \
--cc=graf@amazon.com \
--cc=hpa@zytor.com \
--cc=jgowans@amazon.com \
--cc=kexec@lists.infradead.org \
--cc=krzk@kernel.org \
--cc=linux-arm-kernel@lists.infradead.org \
--cc=linux-doc@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=luto@kernel.org \
--cc=mark.rutland@arm.com \
--cc=mingo@redhat.com \
--cc=pasha.tatashin@soleen.com \
--cc=pbonzini@redhat.com \
--cc=peterz@infradead.org \
--cc=ptyadav@amazon.de \
--cc=robh+dt@kernel.org \
--cc=robh@kernel.org \
--cc=rostedt@goodmis.org \
--cc=rppt@kernel.org \
--cc=saravanak@google.com \
--cc=skinsburskii@linux.microsoft.com \
--cc=tglx@linutronix.de \
--cc=thomas.lendacky@amd.com \
--cc=usama.arif@bytedance.com \
--cc=will@kernel.org \
--cc=x86@kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox