From: Ackerley Tng <ackerleytng@google.com>
To: tabba@google.com, quic_eberman@quicinc.com, roypat@amazon.co.uk,
jgg@nvidia.com, peterx@redhat.com, david@redhat.com,
rientjes@google.com, fvdl@google.com, jthoughton@google.com,
seanjc@google.com, pbonzini@redhat.com, zhiquan1.li@intel.com,
fan.du@intel.com, jun.miao@intel.com, isaku.yamahata@intel.com,
muchun.song@linux.dev, mike.kravetz@oracle.com
Cc: erdemaktas@google.com, vannapurve@google.com,
ackerleytng@google.com, qperret@google.com, jhubbard@nvidia.com,
willy@infradead.org, shuah@kernel.org, brauner@kernel.org,
bfoster@redhat.com, kent.overstreet@linux.dev, pvorel@suse.cz,
rppt@kernel.org, richard.weiyang@gmail.com, anup@brainfault.org,
haibo1.xu@intel.com, ajones@ventanamicro.com,
vkuznets@redhat.com, maciej.wieczor-retman@intel.com,
pgonda@google.com, oliver.upton@linux.dev,
linux-kernel@vger.kernel.org, linux-mm@kvack.org,
kvm@vger.kernel.org, linux-kselftest@vger.kernel.org,
linux-fsdevel@kvack.org
Subject: [RFC PATCH 21/39] KVM: selftests: Test that guest_memfd usage is reported via hugetlb
Date: Tue, 10 Sep 2024 23:43:52 +0000 [thread overview]
Message-ID: <405825c1c3924ca534da3016dda812df17d6c233.1726009989.git.ackerleytng@google.com> (raw)
In-Reply-To: <cover.1726009989.git.ackerleytng@google.com>
Using HugeTLB as the huge page allocator for guest_memfd allows reuse
of HugeTLB's reporting mechanism.
Signed-off-by: Ackerley Tng <ackerleytng@google.com>
---
tools/testing/selftests/kvm/Makefile | 1 +
.../kvm/guest_memfd_hugetlb_reporting_test.c | 222 ++++++++++++++++++
2 files changed, 223 insertions(+)
create mode 100644 tools/testing/selftests/kvm/guest_memfd_hugetlb_reporting_test.c
diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile
index 48d32c5aa3eb..b3b7e83f39fc 100644
--- a/tools/testing/selftests/kvm/Makefile
+++ b/tools/testing/selftests/kvm/Makefile
@@ -134,6 +134,7 @@ TEST_GEN_PROGS_x86_64 += demand_paging_test
TEST_GEN_PROGS_x86_64 += dirty_log_test
TEST_GEN_PROGS_x86_64 += dirty_log_perf_test
TEST_GEN_PROGS_x86_64 += guest_memfd_test
+TEST_GEN_PROGS_x86_64 += guest_memfd_hugetlb_reporting_test
TEST_GEN_PROGS_x86_64 += guest_print_test
TEST_GEN_PROGS_x86_64 += hardware_disable_test
TEST_GEN_PROGS_x86_64 += kvm_create_max_vcpus
diff --git a/tools/testing/selftests/kvm/guest_memfd_hugetlb_reporting_test.c b/tools/testing/selftests/kvm/guest_memfd_hugetlb_reporting_test.c
new file mode 100644
index 000000000000..cb9fdf0d4ec8
--- /dev/null
+++ b/tools/testing/selftests/kvm/guest_memfd_hugetlb_reporting_test.c
@@ -0,0 +1,222 @@
+#include <fcntl.h>
+#include <linux/falloc.h>
+#include <linux/kvm.h>
+#include <linux/limits.h>
+#include <linux/memfd.h>
+#include <string.h>
+#include <sys/mman.h>
+
+#include "kvm_util.h"
+#include "test_util.h"
+#include "processor.h"
+
+static int read_int(const char *file_name)
+{
+ FILE *fp;
+ int num;
+
+ fp = fopen(file_name, "r");
+ TEST_ASSERT(fp != NULL, "Error opening file %s!\n", file_name);
+
+ TEST_ASSERT_EQ(fscanf(fp, "%d", &num), 1);
+
+ fclose(fp);
+
+ return num;
+}
+
+enum hugetlb_statistic {
+ FREE_HUGEPAGES,
+ NR_HUGEPAGES,
+ NR_OVERCOMMIT_HUGEPAGES,
+ RESV_HUGEPAGES,
+ SURPLUS_HUGEPAGES,
+ NR_TESTED_HUGETLB_STATISTICS,
+};
+
+static const char *hugetlb_statistics[NR_TESTED_HUGETLB_STATISTICS] = {
+ [FREE_HUGEPAGES] = "free_hugepages",
+ [NR_HUGEPAGES] = "nr_hugepages",
+ [NR_OVERCOMMIT_HUGEPAGES] = "nr_overcommit_hugepages",
+ [RESV_HUGEPAGES] = "resv_hugepages",
+ [SURPLUS_HUGEPAGES] = "surplus_hugepages",
+};
+
+enum test_page_size {
+ TEST_SZ_2M,
+ TEST_SZ_1G,
+ NR_TEST_SIZES,
+};
+
+struct test_param {
+ size_t page_size;
+ int memfd_create_flags;
+ int guest_memfd_flags;
+ char *path_suffix;
+};
+
+const struct test_param *test_params(enum test_page_size size)
+{
+ static const struct test_param params[] = {
+ [TEST_SZ_2M] = {
+ .page_size = PG_SIZE_2M,
+ .memfd_create_flags = MFD_HUGETLB | MFD_HUGE_2MB,
+ .guest_memfd_flags = KVM_GUEST_MEMFD_HUGETLB | KVM_GUEST_MEMFD_HUGE_2MB,
+ .path_suffix = "2048kB",
+ },
+ [TEST_SZ_1G] = {
+ .page_size = PG_SIZE_1G,
+ .memfd_create_flags = MFD_HUGETLB | MFD_HUGE_1GB,
+ .guest_memfd_flags = KVM_GUEST_MEMFD_HUGETLB | KVM_GUEST_MEMFD_HUGE_1GB,
+ .path_suffix = "1048576kB",
+ },
+ };
+
+ return ¶ms[size];
+}
+
+static int read_statistic(enum test_page_size size, enum hugetlb_statistic statistic)
+{
+ char path[PATH_MAX] = "/sys/kernel/mm/hugepages/hugepages-";
+
+ strcat(path, test_params(size)->path_suffix);
+ strcat(path, "/");
+ strcat(path, hugetlb_statistics[statistic]);
+
+ return read_int(path);
+}
+
+static int baseline[NR_TEST_SIZES][NR_TESTED_HUGETLB_STATISTICS];
+
+static void establish_baseline(void)
+{
+ int i, j;
+
+ for (i = 0; i < NR_TEST_SIZES; ++i)
+ for (j = 0; j < NR_TESTED_HUGETLB_STATISTICS; ++j)
+ baseline[i][j] = read_statistic(i, j);
+}
+
+static void assert_stats_at_baseline(void)
+{
+ TEST_ASSERT_EQ(read_statistic(TEST_SZ_2M, FREE_HUGEPAGES),
+ baseline[TEST_SZ_2M][FREE_HUGEPAGES]);
+ TEST_ASSERT_EQ(read_statistic(TEST_SZ_2M, NR_HUGEPAGES),
+ baseline[TEST_SZ_2M][NR_HUGEPAGES]);
+ TEST_ASSERT_EQ(read_statistic(TEST_SZ_2M, NR_OVERCOMMIT_HUGEPAGES),
+ baseline[TEST_SZ_2M][NR_OVERCOMMIT_HUGEPAGES]);
+ TEST_ASSERT_EQ(read_statistic(TEST_SZ_2M, RESV_HUGEPAGES),
+ baseline[TEST_SZ_2M][RESV_HUGEPAGES]);
+ TEST_ASSERT_EQ(read_statistic(TEST_SZ_2M, SURPLUS_HUGEPAGES),
+ baseline[TEST_SZ_2M][SURPLUS_HUGEPAGES]);
+
+ TEST_ASSERT_EQ(read_statistic(TEST_SZ_1G, FREE_HUGEPAGES),
+ baseline[TEST_SZ_1G][FREE_HUGEPAGES]);
+ TEST_ASSERT_EQ(read_statistic(TEST_SZ_1G, NR_HUGEPAGES),
+ baseline[TEST_SZ_1G][NR_HUGEPAGES]);
+ TEST_ASSERT_EQ(read_statistic(TEST_SZ_1G, NR_OVERCOMMIT_HUGEPAGES),
+ baseline[TEST_SZ_1G][NR_OVERCOMMIT_HUGEPAGES]);
+ TEST_ASSERT_EQ(read_statistic(TEST_SZ_1G, RESV_HUGEPAGES),
+ baseline[TEST_SZ_1G][RESV_HUGEPAGES]);
+ TEST_ASSERT_EQ(read_statistic(TEST_SZ_1G, SURPLUS_HUGEPAGES),
+ baseline[TEST_SZ_1G][SURPLUS_HUGEPAGES]);
+}
+
+static void assert_stats(enum test_page_size size, int num_reserved, int num_faulted)
+{
+ TEST_ASSERT_EQ(read_statistic(size, FREE_HUGEPAGES),
+ baseline[size][FREE_HUGEPAGES] - num_faulted);
+ TEST_ASSERT_EQ(read_statistic(size, NR_HUGEPAGES),
+ baseline[size][NR_HUGEPAGES]);
+ TEST_ASSERT_EQ(read_statistic(size, NR_OVERCOMMIT_HUGEPAGES),
+ baseline[size][NR_OVERCOMMIT_HUGEPAGES]);
+ TEST_ASSERT_EQ(read_statistic(size, RESV_HUGEPAGES),
+ baseline[size][RESV_HUGEPAGES] + num_reserved - num_faulted);
+ TEST_ASSERT_EQ(read_statistic(size, SURPLUS_HUGEPAGES),
+ baseline[size][SURPLUS_HUGEPAGES]);
+}
+
+/* Use hugetlb behavior as a baseline. guest_memfd should have comparable behavior. */
+static void test_hugetlb_behavior(enum test_page_size test_size)
+{
+ const struct test_param *param;
+ char *mem;
+ int memfd;
+
+ param = test_params(test_size);
+
+ assert_stats_at_baseline();
+
+ memfd = memfd_create("guest_memfd_hugetlb_reporting_test",
+ param->memfd_create_flags);
+
+ mem = mmap(NULL, param->page_size, PROT_READ | PROT_WRITE,
+ MAP_SHARED | MAP_HUGETLB, memfd, 0);
+ TEST_ASSERT(mem != MAP_FAILED, "Couldn't mmap()");
+
+ assert_stats(test_size, 1, 0);
+
+ *mem = 'A';
+
+ assert_stats(test_size, 1, 1);
+
+ munmap(mem, param->page_size);
+
+ assert_stats(test_size, 1, 1);
+
+ madvise(mem, param->page_size, MADV_DONTNEED);
+
+ assert_stats(test_size, 1, 1);
+
+ madvise(mem, param->page_size, MADV_REMOVE);
+
+ assert_stats(test_size, 1, 1);
+
+ close(memfd);
+
+ assert_stats_at_baseline();
+}
+
+static void test_guest_memfd_behavior(enum test_page_size test_size)
+{
+ const struct test_param *param;
+ struct kvm_vm *vm;
+ int guest_memfd;
+
+ param = test_params(test_size);
+
+ assert_stats_at_baseline();
+
+ vm = vm_create_barebones_type(KVM_X86_SW_PROTECTED_VM);
+
+ guest_memfd = vm_create_guest_memfd(vm, param->page_size,
+ param->guest_memfd_flags);
+
+ assert_stats(test_size, 1, 0);
+
+ fallocate(guest_memfd, FALLOC_FL_KEEP_SIZE, 0, param->page_size);
+
+ assert_stats(test_size, 1, 1);
+
+ fallocate(guest_memfd, FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE, 0,
+ param->page_size);
+
+ assert_stats(test_size, 1, 0);
+
+ close(guest_memfd);
+
+ assert_stats_at_baseline();
+
+ kvm_vm_free(vm);
+}
+
+int main(int argc, char *argv[])
+{
+ establish_baseline();
+
+ test_hugetlb_behavior(TEST_SZ_2M);
+ test_hugetlb_behavior(TEST_SZ_1G);
+
+ test_guest_memfd_behavior(TEST_SZ_2M);
+ test_guest_memfd_behavior(TEST_SZ_1G);
+}
--
2.46.0.598.g6f2099f65c-goog
next prev parent reply other threads:[~2024-09-10 23:45 UTC|newest]
Thread overview: 130+ messages / expand[flat|nested] mbox.gz Atom feed top
2024-09-10 23:43 [RFC PATCH 00/39] 1G page support for guest_memfd Ackerley Tng
2024-09-10 23:43 ` [RFC PATCH 01/39] mm: hugetlb: Simplify logic in dequeue_hugetlb_folio_vma() Ackerley Tng
2024-09-10 23:43 ` [RFC PATCH 02/39] mm: hugetlb: Refactor vma_has_reserves() to should_use_hstate_resv() Ackerley Tng
2024-09-10 23:43 ` [RFC PATCH 03/39] mm: hugetlb: Remove unnecessary check for avoid_reserve Ackerley Tng
2024-09-10 23:43 ` [RFC PATCH 04/39] mm: mempolicy: Refactor out policy_node_nodemask() Ackerley Tng
2024-09-11 16:46 ` Gregory Price
2024-09-10 23:43 ` [RFC PATCH 05/39] mm: hugetlb: Refactor alloc_buddy_hugetlb_folio_with_mpol() to interpret mempolicy instead of vma Ackerley Tng
2024-09-10 23:43 ` [RFC PATCH 06/39] mm: hugetlb: Refactor dequeue_hugetlb_folio_vma() to use mpol Ackerley Tng
2024-09-10 23:43 ` [RFC PATCH 07/39] mm: hugetlb: Refactor out hugetlb_alloc_folio Ackerley Tng
2024-09-10 23:43 ` [RFC PATCH 08/39] mm: truncate: Expose preparation steps for truncate_inode_pages_final Ackerley Tng
2024-09-10 23:43 ` [RFC PATCH 09/39] mm: hugetlb: Expose hugetlb_subpool_{get,put}_pages() Ackerley Tng
2024-09-10 23:43 ` [RFC PATCH 10/39] mm: hugetlb: Add option to create new subpool without using surplus Ackerley Tng
2024-09-10 23:43 ` [RFC PATCH 11/39] mm: hugetlb: Expose hugetlb_acct_memory() Ackerley Tng
2024-09-10 23:43 ` [RFC PATCH 12/39] mm: hugetlb: Move and expose hugetlb_zero_partial_page() Ackerley Tng
2024-09-10 23:43 ` [RFC PATCH 13/39] KVM: guest_memfd: Make guest mem use guest mem inodes instead of anonymous inodes Ackerley Tng
2025-04-02 4:01 ` Yan Zhao
2025-04-23 20:22 ` Ackerley Tng
2025-04-24 3:53 ` Yan Zhao
2024-09-10 23:43 ` [RFC PATCH 14/39] KVM: guest_memfd: hugetlb: initialization and cleanup Ackerley Tng
2024-09-20 9:17 ` Vishal Annapurve
2024-10-01 23:00 ` Ackerley Tng
2024-12-01 17:59 ` Peter Xu
2025-02-13 9:47 ` Ackerley Tng
2025-02-26 18:55 ` Ackerley Tng
2025-03-06 17:33 ` Peter Xu
2024-09-10 23:43 ` [RFC PATCH 15/39] KVM: guest_memfd: hugetlb: allocate and truncate from hugetlb Ackerley Tng
2024-09-13 22:26 ` Elliot Berman
2024-10-03 20:23 ` Ackerley Tng
2024-10-30 9:01 ` Jun Miao
2025-02-11 1:21 ` Ackerley Tng
2024-12-01 17:55 ` Peter Xu
2025-02-13 7:52 ` Ackerley Tng
2025-02-13 16:48 ` Peter Xu
2024-09-10 23:43 ` [RFC PATCH 16/39] KVM: guest_memfd: Add page alignment check for hugetlb guest_memfd Ackerley Tng
2024-09-10 23:43 ` [RFC PATCH 17/39] KVM: selftests: Add basic selftests for hugetlb-backed guest_memfd Ackerley Tng
2024-09-10 23:43 ` [RFC PATCH 18/39] KVM: selftests: Support various types of backing sources for private memory Ackerley Tng
2024-09-10 23:43 ` [RFC PATCH 19/39] KVM: selftests: Update test for various private memory backing source types Ackerley Tng
2024-09-10 23:43 ` [RFC PATCH 20/39] KVM: selftests: Add private_mem_conversions_test.sh Ackerley Tng
2024-09-10 23:43 ` Ackerley Tng [this message]
2024-09-10 23:43 ` [RFC PATCH 22/39] mm: hugetlb: Expose vmemmap optimization functions Ackerley Tng
2024-09-10 23:43 ` [RFC PATCH 23/39] mm: hugetlb: Expose HugeTLB functions for promoting/demoting pages Ackerley Tng
2024-09-10 23:43 ` [RFC PATCH 24/39] mm: hugetlb: Add functions to add/move/remove from hugetlb lists Ackerley Tng
2024-09-10 23:43 ` [RFC PATCH 25/39] KVM: guest_memfd: Split HugeTLB pages for guest_memfd use Ackerley Tng
2024-09-10 23:43 ` [RFC PATCH 26/39] KVM: guest_memfd: Track faultability within a struct kvm_gmem_private Ackerley Tng
2024-10-10 16:06 ` Peter Xu
2024-10-11 23:32 ` Ackerley Tng
2024-10-15 21:34 ` Peter Xu
2024-10-15 23:42 ` Ackerley Tng
2024-10-16 8:45 ` David Hildenbrand
2024-10-16 20:16 ` Peter Xu
2024-10-16 22:51 ` Jason Gunthorpe
2024-10-16 23:49 ` Peter Xu
2024-10-16 23:54 ` Jason Gunthorpe
2024-10-17 14:58 ` Peter Xu
2024-10-17 16:47 ` Jason Gunthorpe
2024-10-17 17:05 ` Peter Xu
2024-10-17 17:10 ` Jason Gunthorpe
2024-10-17 19:11 ` Peter Xu
2024-10-17 19:18 ` Jason Gunthorpe
2024-10-17 19:29 ` David Hildenbrand
2024-10-18 7:15 ` Patrick Roy
2024-10-18 7:50 ` David Hildenbrand
2024-10-18 9:34 ` Patrick Roy
2024-10-17 17:11 ` David Hildenbrand
2024-10-17 17:16 ` Jason Gunthorpe
2024-10-17 17:55 ` David Hildenbrand
2024-10-17 18:26 ` Vishal Annapurve
2024-10-17 14:56 ` David Hildenbrand
2024-10-17 15:02 ` David Hildenbrand
2024-10-16 8:50 ` David Hildenbrand
2024-10-16 10:48 ` Vishal Annapurve
2024-10-16 11:54 ` David Hildenbrand
2024-10-16 11:57 ` Jason Gunthorpe
2025-02-25 20:37 ` Peter Xu
2025-04-23 22:07 ` Ackerley Tng
2024-09-10 23:43 ` [RFC PATCH 27/39] KVM: guest_memfd: Allow mmapping guest_memfd files Ackerley Tng
2025-01-20 22:42 ` Peter Xu
2025-04-23 20:25 ` Ackerley Tng
2025-03-04 23:24 ` Peter Xu
2025-04-02 4:07 ` Yan Zhao
2025-04-23 20:28 ` Ackerley Tng
2024-09-10 23:43 ` [RFC PATCH 28/39] KVM: guest_memfd: Use vm_type to determine default faultability Ackerley Tng
2024-09-10 23:44 ` [RFC PATCH 29/39] KVM: Handle conversions in the SET_MEMORY_ATTRIBUTES ioctl Ackerley Tng
2024-09-10 23:44 ` [RFC PATCH 30/39] KVM: guest_memfd: Handle folio preparation for guest_memfd mmap Ackerley Tng
2024-09-16 20:00 ` Elliot Berman
2024-10-03 21:32 ` Ackerley Tng
2024-10-03 23:43 ` Ackerley Tng
2024-10-08 19:30 ` Sean Christopherson
2024-10-07 15:56 ` Patrick Roy
2024-10-08 18:07 ` Ackerley Tng
2024-10-08 19:56 ` Sean Christopherson
2024-10-09 3:51 ` Manwaring, Derek
2024-10-09 13:52 ` Andrew Cooper
2024-10-10 16:21 ` Patrick Roy
2024-10-10 19:27 ` Manwaring, Derek
2024-10-17 23:16 ` Ackerley Tng
2024-10-18 7:10 ` Patrick Roy
2024-09-10 23:44 ` [RFC PATCH 31/39] KVM: selftests: Allow vm_set_memory_attributes to be used without asserting return value of 0 Ackerley Tng
2024-09-10 23:44 ` [RFC PATCH 32/39] KVM: selftests: Test using guest_memfd memory from userspace Ackerley Tng
2024-09-10 23:44 ` [RFC PATCH 33/39] KVM: selftests: Test guest_memfd memory sharing between guest and host Ackerley Tng
2024-09-10 23:44 ` [RFC PATCH 34/39] KVM: selftests: Add notes in private_mem_kvm_exits_test for mmap-able guest_memfd Ackerley Tng
2024-09-10 23:44 ` [RFC PATCH 35/39] KVM: selftests: Test that pinned pages block KVM from setting memory attributes to PRIVATE Ackerley Tng
2024-09-10 23:44 ` [RFC PATCH 36/39] KVM: selftests: Refactor vm_mem_add to be more flexible Ackerley Tng
2024-09-10 23:44 ` [RFC PATCH 37/39] KVM: selftests: Add helper to perform madvise by memslots Ackerley Tng
2024-09-10 23:44 ` [RFC PATCH 38/39] KVM: selftests: Update private_mem_conversions_test for mmap()able guest_memfd Ackerley Tng
2024-09-10 23:44 ` [RFC PATCH 39/39] KVM: guest_memfd: Dynamically split/reconstruct HugeTLB page Ackerley Tng
2025-04-03 12:33 ` Yan Zhao
2025-04-23 22:02 ` Ackerley Tng
2025-04-24 1:09 ` Yan Zhao
2025-04-24 4:25 ` Yan Zhao
2025-04-24 5:55 ` Chenyi Qiang
2025-04-24 8:13 ` Yan Zhao
2025-04-24 14:10 ` Vishal Annapurve
2025-04-24 18:15 ` Ackerley Tng
2025-04-25 4:02 ` Yan Zhao
2025-04-25 22:45 ` Ackerley Tng
2025-04-28 1:05 ` Yan Zhao
2025-04-28 19:02 ` Vishal Annapurve
2025-04-30 20:09 ` Ackerley Tng
2025-05-06 1:23 ` Yan Zhao
2025-05-06 19:22 ` Ackerley Tng
2025-05-07 3:15 ` Yan Zhao
2025-05-13 17:33 ` Ackerley Tng
2024-09-11 6:56 ` [RFC PATCH 00/39] 1G page support for guest_memfd Michal Hocko
2024-09-14 1:08 ` Du, Fan
2024-09-14 13:34 ` Vishal Annapurve
2025-01-28 9:42 ` Amit Shah
2025-02-03 8:35 ` Ackerley Tng
2025-02-06 11:07 ` Amit Shah
2025-02-07 6:25 ` Ackerley Tng
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=405825c1c3924ca534da3016dda812df17d6c233.1726009989.git.ackerleytng@google.com \
--to=ackerleytng@google.com \
--cc=ajones@ventanamicro.com \
--cc=anup@brainfault.org \
--cc=bfoster@redhat.com \
--cc=brauner@kernel.org \
--cc=david@redhat.com \
--cc=erdemaktas@google.com \
--cc=fan.du@intel.com \
--cc=fvdl@google.com \
--cc=haibo1.xu@intel.com \
--cc=isaku.yamahata@intel.com \
--cc=jgg@nvidia.com \
--cc=jhubbard@nvidia.com \
--cc=jthoughton@google.com \
--cc=jun.miao@intel.com \
--cc=kent.overstreet@linux.dev \
--cc=kvm@vger.kernel.org \
--cc=linux-fsdevel@kvack.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-kselftest@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=maciej.wieczor-retman@intel.com \
--cc=mike.kravetz@oracle.com \
--cc=muchun.song@linux.dev \
--cc=oliver.upton@linux.dev \
--cc=pbonzini@redhat.com \
--cc=peterx@redhat.com \
--cc=pgonda@google.com \
--cc=pvorel@suse.cz \
--cc=qperret@google.com \
--cc=quic_eberman@quicinc.com \
--cc=richard.weiyang@gmail.com \
--cc=rientjes@google.com \
--cc=roypat@amazon.co.uk \
--cc=rppt@kernel.org \
--cc=seanjc@google.com \
--cc=shuah@kernel.org \
--cc=tabba@google.com \
--cc=vannapurve@google.com \
--cc=vkuznets@redhat.com \
--cc=willy@infradead.org \
--cc=zhiquan1.li@intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox