From: Brendan Jackman <jackmanb@google.com>
To: Borislav Petkov <bp@alien8.de>,
Dave Hansen <dave.hansen@linux.intel.com>,
Peter Zijlstra <peterz@infradead.org>,
Andrew Morton <akpm@linux-foundation.org>,
David Hildenbrand <david@kernel.org>,
Lorenzo Stoakes <lorenzo.stoakes@oracle.com>,
Vlastimil Babka <vbabka@kernel.org>, Wei Xu <weixugc@google.com>,
Johannes Weiner <hannes@cmpxchg.org>, Zi Yan <ziy@nvidia.com>
Cc: linux-mm@kvack.org, linux-kernel@vger.kernel.org, x86@kernel.org,
rppt@kernel.org, Sumit Garg <sumit.garg@oss.qualcomm.com>,
derkling@google.com, reijiw@google.com,
Will Deacon <will@kernel.org>,
rientjes@google.com, "Kalyazin, Nikita" <kalyazin@amazon.co.uk>,
patrick.roy@linux.dev, "Itazuri, Takahiro" <itazur@amazon.co.uk>,
Andy Lutomirski <luto@kernel.org>,
David Kaplan <david.kaplan@amd.com>,
Thomas Gleixner <tglx@kernel.org>,
Brendan Jackman <jackmanb@google.com>,
Yosry Ahmed <yosry.ahmed@linux.dev>
Subject: [PATCH RFC 05/19] mm: KUnit tests for the mermap
Date: Wed, 25 Feb 2026 16:34:30 +0000 [thread overview]
Message-ID: <20260225-page_alloc-unmapped-v1-5-e8808a03cd66@google.com> (raw)
In-Reply-To: <20260225-page_alloc-unmapped-v1-0-e8808a03cd66@google.com>
Some simple smoke-tests for the mermap. Mainly aiming to test:
1. That there aren't any silly off-by-ones.
2. That the pagetables are not completely broken.
3. That the TLB appears to get flushed basically when expected.
This last point requires a bit of ifdeffery to detect when the flushing
has been performed.
Signed-off-by: Brendan Jackman <jackmanb@google.com>
---
include/linux/mermap_types.h | 2 +-
mm/Kconfig | 11 +++
mm/Makefile | 1 +
mm/mermap.c | 14 ++-
mm/tests/mermap_kunit.c | 231 +++++++++++++++++++++++++++++++++++++++++++
5 files changed, 253 insertions(+), 6 deletions(-)
diff --git a/include/linux/mermap_types.h b/include/linux/mermap_types.h
index 08e43100b790e..6b295251b7b01 100644
--- a/include/linux/mermap_types.h
+++ b/include/linux/mermap_types.h
@@ -23,7 +23,7 @@ struct mermap_cpu {
/* Next address immediately available for alloc (no TLB flush needed). */
unsigned long next_addr;
struct mermap_alloc allocs[4];
-#ifdef CONFIG_MERMAP_KUNIT_TEST
+#if IS_ENABLED(CONFIG_MERMAP_KUNIT_TEST)
u64 tlb_flushes;
#endif
};
diff --git a/mm/Kconfig b/mm/Kconfig
index 06c1c125e9636..bd49eb9ef2165 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -1491,4 +1491,15 @@ config MERMAP
help
Support for epheMERal mappings within the kernel.
+config MERMAP_KUNIT_TEST
+ tristate "KUnit tests for the mermap" if !KUNIT_ALL_TESTS
+ depends on ARCH_SUPPORTS_MERMAP
+ depends on KUNIT
+ select MERMAP
+ default KUNIT_ALL_TESTS
+ help
+ KUnit test for the mermap.
+
+ If unsure, say N.
+
endmenu
diff --git a/mm/Makefile b/mm/Makefile
index b1ac133fe603e..42c8ca32359ae 100644
--- a/mm/Makefile
+++ b/mm/Makefile
@@ -151,3 +151,4 @@ obj-$(CONFIG_EXECMEM) += execmem.o
obj-$(CONFIG_TMPFS_QUOTA) += shmem_quota.o
obj-$(CONFIG_LAZY_MMU_MODE_KUNIT_TEST) += tests/lazy_mmu_mode_kunit.o
obj-$(CONFIG_MERMAP) += mermap.o
+obj-$(CONFIG_MERMAP_KUNIT_TEST) += tests/mermap_kunit.o
diff --git a/mm/mermap.c b/mm/mermap.c
index d65ecfc06b58e..d840d27cae14c 100644
--- a/mm/mermap.c
+++ b/mm/mermap.c
@@ -24,7 +24,7 @@ static inline int set_unmapped_pte(pte_t *ptep, unsigned long addr, void *data)
return 0;
}
-static void __mermap_put(struct mm_struct *mm, struct mermap_alloc *alloc)
+VISIBLE_IF_KUNIT void __mermap_put(struct mm_struct *mm, struct mermap_alloc *alloc)
{
unsigned long size = PAGE_ALIGN(alloc->end - alloc->base);
@@ -37,6 +37,7 @@ static void __mermap_put(struct mm_struct *mm, struct mermap_alloc *alloc)
migrate_enable();
}
+EXPORT_SYMBOL_IF_KUNIT(__mermap_put);
/* Return a region allocated by mermap_get(). */
void mermap_put(struct mermap_alloc *alloc)
@@ -45,22 +46,24 @@ void mermap_put(struct mermap_alloc *alloc)
}
EXPORT_SYMBOL(mermap_put);
-static inline unsigned long mermap_cpu_base(int cpu)
+VISIBLE_IF_KUNIT inline unsigned long mermap_cpu_base(int cpu)
{
return MERMAP_BASE_ADDR + (cpu * MERMAP_CPU_REGION_SIZE);
}
+EXPORT_SYMBOL_IF_KUNIT(mermap_cpu_base);
/* Non-inclusive */
-static inline unsigned long mermap_cpu_end(int cpu)
+VISIBLE_IF_KUNIT inline unsigned long mermap_cpu_end(int cpu)
{
return MERMAP_BASE_ADDR + ((cpu + 1) * MERMAP_CPU_REGION_SIZE);
}
+EXPORT_SYMBOL_IF_KUNIT(mermap_cpu_end);
static inline void mermap_flush_tlb(int cpu, struct mermap_cpu *mc)
{
-#ifdef CONFIG_MERMAP_KUNIT_TEST
+#if IS_ENABLED(CONFIG_MERMAP_KUNIT_TEST)
mc->tlb_flushes++;
#endif
arch_mermap_flush_tlb();
@@ -173,7 +176,7 @@ static inline int do_set_pte(pte_t *pte, unsigned long addr, void *data)
return 0;
}
-static struct mermap_alloc *
+VISIBLE_IF_KUNIT struct mermap_alloc *
__mermap_get(struct mm_struct *mm, struct page *page,
unsigned long size, pgprot_t prot, bool use_reserve)
{
@@ -207,6 +210,7 @@ __mermap_get(struct mm_struct *mm, struct page *page,
return alloc;
}
+EXPORT_SYMBOL_IF_KUNIT(__mermap_get);
/*
* Allocate a region of virtual memory, and map the page into it. This tries
diff --git a/mm/tests/mermap_kunit.c b/mm/tests/mermap_kunit.c
new file mode 100644
index 0000000000000..ec035b50b8250
--- /dev/null
+++ b/mm/tests/mermap_kunit.c
@@ -0,0 +1,231 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include <linux/cacheflush.h>
+#include <linux/kthread.h>
+#include <linux/mermap.h>
+#include <linux/pgtable.h>
+
+#include <kunit/test.h>
+
+#define MERMAP_NR_ALLOCS ARRAY_SIZE(((struct mm_struct *)NULL)->mermap.cpu->allocs)
+
+KUNIT_DEFINE_ACTION_WRAPPER(__free_page_wrapper, __free_page, struct page *);
+
+static inline struct page *alloc_page_wrapper(struct kunit *test, gfp_t gfp)
+{
+ struct page *page = alloc_page(gfp);
+
+ KUNIT_ASSERT_NOT_NULL(test, page);
+ KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, __free_page_wrapper, page), 0);
+ return page;
+}
+
+KUNIT_DEFINE_ACTION_WRAPPER(mmput_wrapper, mmput, struct mm_struct *);
+
+static inline struct mm_struct *mm_alloc_wrapper(struct kunit *test)
+{
+ struct mm_struct *mm = mm_alloc();
+
+ KUNIT_ASSERT_NOT_NULL(test, mm);
+ KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, mmput_wrapper, mm), 0);
+ return mm;
+}
+
+static inline struct mm_struct *get_mm(struct kunit *test)
+{
+ struct mm_struct *mm = mm_alloc_wrapper(test);
+
+ KUNIT_ASSERT_EQ(test, mermap_mm_prepare(mm), 0);
+ return mm;
+}
+
+struct __mermap_put_args {
+ struct mm_struct *mm;
+ struct mermap_alloc *alloc;
+ unsigned long size;
+};
+
+static inline void __mermap_put_wrapper(void *ctx)
+{
+ struct __mermap_put_args *args = (struct __mermap_put_args *)ctx;
+
+ __mermap_put(args->mm, args->alloc);
+}
+
+/* Call __mermap_get() with use_reserve=false, deal with cleanup. */
+static inline struct __mermap_put_args *
+__mermap_get_wrapper(struct kunit *test, struct mm_struct *mm,
+ struct page *page, unsigned long size, pgprot_t prot)
+{
+ struct __mermap_put_args *args =
+ kunit_kmalloc(test, sizeof(struct __mermap_put_args), GFP_KERNEL);
+
+ KUNIT_ASSERT_NOT_NULL(test, args);
+ args->mm = mm;
+ args->alloc = __mermap_get(mm, page, size, prot, false);
+ args->size = size;
+
+ if (args->alloc) {
+ int err = kunit_add_action_or_reset(test, __mermap_put_wrapper, args);
+
+ KUNIT_ASSERT_EQ(test, err, 0);
+ }
+
+ return args;
+}
+
+/* Do the cleanup from __mermap_get_wrapper, now. */
+static inline void __mermap_put_early(struct kunit *test, struct __mermap_put_args *args)
+{
+ kunit_release_action(test, __mermap_put_wrapper, args);
+}
+
+static void test_basic_alloc(struct kunit *test)
+{
+ struct page *page = alloc_page_wrapper(test, GFP_KERNEL);
+ struct mm_struct *mm = get_mm(test);
+ struct __mermap_put_args *args;
+
+ args = __mermap_get_wrapper(test, mm, page, PAGE_SIZE, PAGE_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, args->alloc);
+}
+
+/* Dumb check for off-by-ones. */
+static void test_size(struct kunit *test)
+{
+ struct page *page = alloc_page_wrapper(test, GFP_KERNEL);
+ struct __mermap_put_args *full, *large, *small, *fail;
+ struct mm_struct *mm = get_mm(test);
+ unsigned long region_size, large_size;
+ struct mermap_alloc *alloc;
+ int cpu;
+
+ migrate_disable();
+ cpu = raw_smp_processor_id();
+ region_size = mermap_cpu_end(cpu) - mermap_cpu_base(cpu) - PAGE_SIZE;
+ large_size = region_size - PAGE_SIZE;
+
+ /* Allocate whole region at once. */
+ full = __mermap_get_wrapper(test, mm, page, region_size, PAGE_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, full->alloc);
+ __mermap_put_early(test, full);
+
+ /* Allocate larger than region size. */
+ fail = __mermap_get_wrapper(test, mm, page, region_size + PAGE_SIZE, PAGE_KERNEL);
+ KUNIT_ASSERT_NULL(test, fail->alloc);
+
+ /* Tiptoe up to the edge then past it. */
+ large = __mermap_get_wrapper(test, mm, page, large_size, PAGE_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, large->alloc);
+ small = __mermap_get_wrapper(test, mm, page, PAGE_SIZE, PAGE_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, small->alloc);
+ fail = __mermap_get_wrapper(test, mm, page, PAGE_SIZE, PAGE_KERNEL);
+ KUNIT_ASSERT_NULL(test, fail->alloc);
+
+ /* Can still allocate the reserved page. */
+ local_irq_disable();
+ alloc = __mermap_get(mm, page, PAGE_SIZE, PAGE_KERNEL, true);
+ local_irq_enable();
+ KUNIT_ASSERT_NOT_NULL(test, alloc);
+ __mermap_put(mm, alloc);
+}
+
+static void test_multiple_allocs(struct kunit *test)
+{
+ struct mm_struct *mm = get_mm(test);
+ struct __mermap_put_args *argss[MERMAP_NR_ALLOCS] = { };
+ struct page *pages[MERMAP_NR_ALLOCS];
+ int magic = 0xE4A4;
+
+ for (int i = 0; i < ARRAY_SIZE(pages); i++) {
+ pages[i] = alloc_page_wrapper(test, GFP_KERNEL);
+ WRITE_ONCE(*(int *)page_to_virt(pages[i]), magic + i);
+ }
+
+ for (int i = 0; i < ARRAY_SIZE(argss); i++) {
+ unsigned long base = mermap_cpu_base(raw_smp_processor_id());
+ unsigned long end = mermap_cpu_end(raw_smp_processor_id());
+ unsigned long addr;
+
+ argss[i] = __mermap_get_wrapper(test, mm, pages[i], PAGE_SIZE, PAGE_KERNEL);
+ KUNIT_ASSERT_NOT_NULL_MSG(test, argss[i], "alloc %d failed", i);
+
+ addr = (unsigned long) mermap_addr(argss[i]->alloc);
+ KUNIT_EXPECT_GE_MSG(test, addr, base, "alloc %d out of range", i);
+ KUNIT_EXPECT_LT_MSG(test, addr, end, "alloc %d out of range", i);
+ };
+
+ /*
+ * Read through the mappings to try and detect if they point to the
+ * pages we wrote earlier.
+ */
+ kthread_use_mm(mm);
+ for (int i = 0; i < ARRAY_SIZE(pages); i++) {
+ int *ptr = (int *)mermap_addr(argss[i]->alloc);
+
+ KUNIT_EXPECT_EQ(test, *ptr, magic + i);
+ }
+ kthread_unuse_mm(mm);
+}
+
+static void test_tlb_flushed(struct kunit *test)
+{
+ struct page *page = alloc_page_wrapper(test, GFP_KERNEL);
+ struct mm_struct *mm = get_mm(test);
+ unsigned long addr, prev_addr = 0;
+ /* Avoid running for ever in failure case. */
+ unsigned long max_iters = 1000000;
+ struct mermap_cpu *mc;
+
+ migrate_disable();
+ mc = this_cpu_ptr(mm->mermap.cpu);
+
+ /*
+ * Allocate until we see an address less than what we had before - assume
+ * that means a reuse.
+ */
+ for (int i = 0; i < max_iters; i++) {
+ struct mermap_alloc *alloc;
+
+ /*
+ * Obviously flushing the TLB already is not wrong per se, but
+ * it's unexpected and probably means there's some bug.
+ * Use ASSERT to avoid spamming the log in the failure case.
+ */
+ KUNIT_ASSERT_EQ_MSG(test, mc->tlb_flushes, 0,
+ "unexpected flush before alloc %d", i);
+
+ alloc = __mermap_get(mm, page, PAGE_SIZE, PAGE_KERNEL, false);
+ KUNIT_ASSERT_NOT_NULL_MSG(test, alloc, "alloc %d failed", i);
+
+ addr = (unsigned long)mermap_addr(alloc);
+ __mermap_put(mm, alloc);
+ if (addr < prev_addr)
+ break;
+
+ prev_addr = addr;
+ cond_resched();
+ }
+ KUNIT_ASSERT_TRUE_MSG(test, addr < prev_addr, "no address reuse");
+ /* Again, more than one flush isn't wrong per se, but probably a bug. */
+ KUNIT_ASSERT_EQ(test, mc->tlb_flushes, 1);
+
+ migrate_enable();
+}
+
+static struct kunit_case mermap_test_cases[] = {
+ KUNIT_CASE(test_basic_alloc),
+ KUNIT_CASE(test_size),
+ KUNIT_CASE(test_multiple_allocs),
+ KUNIT_CASE(test_tlb_flushed),
+ {}
+};
+
+static struct kunit_suite mermap_test_suite = {
+ .name = "mermap",
+ .test_cases = mermap_test_cases,
+};
+kunit_test_suite(mermap_test_suite);
+
+MODULE_DESCRIPTION("Mermap unit tests");
+MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS("EXPORTED_FOR_KUNIT_TESTING");
--
2.51.2
next prev parent reply other threads:[~2026-02-25 16:34 UTC|newest]
Thread overview: 20+ messages / expand[flat|nested] mbox.gz Atom feed top
2026-02-25 16:34 [PATCH RFC 00/19] mm: Add __GFP_UNMAPPED Brendan Jackman
2026-02-25 16:34 ` [PATCH RFC 01/19] x86/mm: split out preallocate_sub_pgd() Brendan Jackman
2026-02-25 16:34 ` [PATCH RFC 02/19] x86/mm: Generalize LDT remap into "mm-local region" Brendan Jackman
2026-02-25 16:34 ` [PATCH RFC 03/19] x86/tlb: Expose some flush function declarations to modules Brendan Jackman
2026-02-25 16:34 ` [PATCH RFC 04/19] x86/mm: introduce the mermap Brendan Jackman
2026-02-25 16:34 ` Brendan Jackman [this message]
2026-02-25 16:34 ` [PATCH RFC 06/19] mm: introduce for_each_free_list() Brendan Jackman
2026-02-25 16:34 ` [PATCH RFC 07/19] mm/page_alloc: don't overload migratetype in find_suitable_fallback() Brendan Jackman
2026-02-25 16:34 ` [PATCH RFC 08/19] mm: introduce freetype_t Brendan Jackman
2026-02-25 16:34 ` [PATCH RFC 09/19] mm: move migratetype definitions to freetype.h Brendan Jackman
2026-02-25 16:34 ` [PATCH RFC 10/19] mm: add definitions for allocating unmapped pages Brendan Jackman
2026-02-25 16:34 ` [PATCH RFC 11/19] mm: rejig pageblock mask definitions Brendan Jackman
2026-02-25 16:34 ` [PATCH RFC 12/19] mm: encode freetype flags in pageblock flags Brendan Jackman
2026-02-25 16:34 ` [PATCH RFC 13/19] mm/page_alloc: remove ifdefs from pindex helpers Brendan Jackman
2026-02-25 16:34 ` [PATCH RFC 14/19] mm/page_alloc: separate pcplists by freetype flags Brendan Jackman
2026-02-25 16:34 ` [PATCH RFC 15/19] mm/page_alloc: rename ALLOC_NON_BLOCK back to _HARDER Brendan Jackman
2026-02-25 16:34 ` [PATCH RFC 16/19] mm/page_alloc: introduce ALLOC_NOBLOCK Brendan Jackman
2026-02-25 16:34 ` [PATCH RFC 17/19] mm/page_alloc: implement __GFP_UNMAPPED allocations Brendan Jackman
2026-02-25 16:34 ` [PATCH RFC 18/19] mm/page_alloc: implement __GFP_UNMAPPED|__GFP_ZERO allocations Brendan Jackman
2026-02-25 16:34 ` [PATCH RFC 19/19] mm: Minimal KUnit tests for some new page_alloc logic Brendan Jackman
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20260225-page_alloc-unmapped-v1-5-e8808a03cd66@google.com \
--to=jackmanb@google.com \
--cc=akpm@linux-foundation.org \
--cc=bp@alien8.de \
--cc=dave.hansen@linux.intel.com \
--cc=david.kaplan@amd.com \
--cc=david@kernel.org \
--cc=derkling@google.com \
--cc=hannes@cmpxchg.org \
--cc=itazur@amazon.co.uk \
--cc=kalyazin@amazon.co.uk \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=lorenzo.stoakes@oracle.com \
--cc=luto@kernel.org \
--cc=patrick.roy@linux.dev \
--cc=peterz@infradead.org \
--cc=reijiw@google.com \
--cc=rientjes@google.com \
--cc=rppt@kernel.org \
--cc=sumit.garg@oss.qualcomm.com \
--cc=tglx@kernel.org \
--cc=vbabka@kernel.org \
--cc=weixugc@google.com \
--cc=will@kernel.org \
--cc=x86@kernel.org \
--cc=yosry.ahmed@linux.dev \
--cc=ziy@nvidia.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox