From: Yafang Shao <laoar.shao@gmail.com>
To: akpm@linux-foundation.org, david@redhat.com, ziy@nvidia.com,
baolin.wang@linux.alibaba.com, lorenzo.stoakes@oracle.com,
Liam.Howlett@oracle.com, npache@redhat.com, ryan.roberts@arm.com,
dev.jain@arm.com, hannes@cmpxchg.org, usamaarif642@gmail.com,
gutierrez.asier@huawei-partners.com, willy@infradead.org,
ast@kernel.org, daniel@iogearbox.net, andrii@kernel.org,
ameryhung@gmail.com, rientjes@google.com
Cc: bpf@vger.kernel.org, linux-mm@kvack.org,
Yafang Shao <laoar.shao@gmail.com>
Subject: [RFC PATCH v5 mm-new 5/5] selftest/bpf: add selftest for BPF based THP order seletection
Date: Mon, 18 Aug 2025 13:55:10 +0800 [thread overview]
Message-ID: <20250818055510.968-6-laoar.shao@gmail.com> (raw)
In-Reply-To: <20250818055510.968-1-laoar.shao@gmail.com>
This self-test verifies that PMD-mapped THP allocation is restricted in
page faults for tasks within a specific cgroup, while still permitting
THP allocation via khugepaged.
Since THP allocation depends on various factors (e.g., system memory
pressure), using the actual allocated THP size for validation is
unreliable. Instead, we check the return value of get_suggested_order(),
which indicates whether the system intends to allocate a THP, regardless of
whether the allocation ultimately succeeds.
Signed-off-by: Yafang Shao <laoar.shao@gmail.com>
---
tools/testing/selftests/bpf/config | 3 +
.../selftests/bpf/prog_tests/thp_adjust.c | 224 ++++++++++++++++++
.../selftests/bpf/progs/test_thp_adjust.c | 76 ++++++
.../bpf/progs/test_thp_adjust_failure.c | 25 ++
4 files changed, 328 insertions(+)
create mode 100644 tools/testing/selftests/bpf/prog_tests/thp_adjust.c
create mode 100644 tools/testing/selftests/bpf/progs/test_thp_adjust.c
create mode 100644 tools/testing/selftests/bpf/progs/test_thp_adjust_failure.c
diff --git a/tools/testing/selftests/bpf/config b/tools/testing/selftests/bpf/config
index 8916ab814a3e..27f0249c7600 100644
--- a/tools/testing/selftests/bpf/config
+++ b/tools/testing/selftests/bpf/config
@@ -26,6 +26,7 @@ CONFIG_DMABUF_HEAPS=y
CONFIG_DMABUF_HEAPS_SYSTEM=y
CONFIG_DUMMY=y
CONFIG_DYNAMIC_FTRACE=y
+CONFIG_EXPERIMENTAL_BPF_ORDER_SELECTION=y
CONFIG_FPROBE=y
CONFIG_FTRACE_SYSCALLS=y
CONFIG_FUNCTION_ERROR_INJECTION=y
@@ -51,6 +52,7 @@ CONFIG_IPV6_TUNNEL=y
CONFIG_KEYS=y
CONFIG_LIRC=y
CONFIG_LWTUNNEL=y
+CONFIG_MEMCG=y
CONFIG_MODULE_SIG=y
CONFIG_MODULE_SRCVERSION_ALL=y
CONFIG_MODULE_UNLOAD=y
@@ -114,6 +116,7 @@ CONFIG_SECURITY=y
CONFIG_SECURITYFS=y
CONFIG_SYN_COOKIES=y
CONFIG_TEST_BPF=m
+CONFIG_TRANSPARENT_HUGEPAGE=y
CONFIG_UDMABUF=y
CONFIG_USERFAULTFD=y
CONFIG_VSOCKETS=y
diff --git a/tools/testing/selftests/bpf/prog_tests/thp_adjust.c b/tools/testing/selftests/bpf/prog_tests/thp_adjust.c
new file mode 100644
index 000000000000..959ea920b0ef
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/thp_adjust.c
@@ -0,0 +1,224 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <math.h>
+#include <sys/mman.h>
+#include <test_progs.h>
+#include "cgroup_helpers.h"
+#include "test_thp_adjust.skel.h"
+#include "test_thp_adjust_failure.skel.h"
+
+#define LEN (16 * 1024 * 1024) /* 16MB */
+#define THP_ENABLED_FILE "/sys/kernel/mm/transparent_hugepage/enabled"
+#define PMD_SIZE_FILE "/sys/kernel/mm/transparent_hugepage/hpage_pmd_size"
+
+static char *thp_addr;
+static char old_mode[32];
+
+static int thp_mode_save(void)
+{
+ const char *start, *end;
+ char buf[128];
+ int fd, err;
+ size_t len;
+
+ fd = open(THP_ENABLED_FILE, O_RDONLY);
+ if (fd == -1)
+ return -1;
+
+ err = read(fd, buf, sizeof(buf) - 1);
+ if (err == -1)
+ goto close;
+
+ start = strchr(buf, '[');
+ end = start ? strchr(start, ']') : NULL;
+ if (!start || !end || end <= start) {
+ err = -1;
+ goto close;
+ }
+
+ len = end - start - 1;
+ if (len >= sizeof(old_mode))
+ len = sizeof(old_mode) - 1;
+ strncpy(old_mode, start + 1, len);
+ old_mode[len] = '\0';
+
+close:
+ close(fd);
+ return err;
+}
+
+static int thp_mode_set(const char *desired_mode)
+{
+ int fd, err;
+
+ fd = open(THP_ENABLED_FILE, O_RDWR);
+ if (fd == -1)
+ return -1;
+
+ err = write(fd, desired_mode, strlen(desired_mode));
+ close(fd);
+ return err;
+}
+
+static int thp_mode_reset(void)
+{
+ int fd, err;
+
+ fd = open(THP_ENABLED_FILE, O_WRONLY);
+ if (fd == -1)
+ return -1;
+
+ err = write(fd, old_mode, strlen(old_mode));
+ close(fd);
+ return err;
+}
+
+int thp_alloc(long pagesize)
+{
+ int err, i;
+
+ thp_addr = mmap(NULL, LEN, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0);
+ if (thp_addr == MAP_FAILED)
+ return -1;
+
+ err = madvise(thp_addr, LEN, MADV_HUGEPAGE);
+ if (err == -1)
+ goto unmap;
+
+ /* Accessing a single byte within a page is sufficient to trigger a page fault. */
+ for (i = 0; i < LEN; i += pagesize)
+ thp_addr[i] = 1;
+ return 0;
+
+unmap:
+ munmap(thp_addr, LEN);
+ return -1;
+}
+
+static void thp_free(void)
+{
+ if (!thp_addr)
+ return;
+ munmap(thp_addr, LEN);
+}
+
+static int get_pmd_order(long pagesize)
+{
+ ssize_t bytes_read, size;
+ char buf[64], *endptr;
+ int fd, ret = -1;
+
+ fd = open(PMD_SIZE_FILE, O_RDONLY);
+ if (fd < 0)
+ return -1;
+
+ bytes_read = read(fd, buf, sizeof(buf) - 1);
+ if (bytes_read <= 0)
+ goto close_fd;
+
+ /* Remove potential newline character */
+ if (buf[bytes_read - 1] == '\n')
+ buf[bytes_read - 1] = '\0';
+
+ size = strtoul(buf, &endptr, 10);
+ if (endptr == buf || *endptr != '\0')
+ goto close_fd;
+ if (size % pagesize != 0)
+ goto close_fd;
+ ret = size / pagesize;
+ if ((ret & (ret - 1)) == 0)
+ ret = log2(ret);
+
+close_fd:
+ close(fd);
+ return ret;
+}
+
+static void subtest_thp_adjust(void)
+{
+ struct bpf_link *fentry_link, *ops_link;
+ int err, cgrp_fd, cgrp_id, pmd_order;
+ struct test_thp_adjust *skel;
+ long pagesize;
+
+ pagesize = sysconf(_SC_PAGESIZE);
+ pmd_order = get_pmd_order(pagesize);
+ if (!ASSERT_NEQ(pmd_order, -1, "get_pmd_order"))
+ return;
+
+ err = setup_cgroup_environment();
+ if (!ASSERT_OK(err, "cgrp_env_setup"))
+ return;
+
+ cgrp_fd = create_and_get_cgroup("thp_adjust");
+ if (!ASSERT_GE(cgrp_fd, 0, "create_and_get_cgroup"))
+ goto cleanup;
+
+ err = join_cgroup("thp_adjust");
+ if (!ASSERT_OK(err, "join_cgroup"))
+ goto close_fd;
+
+ cgrp_id = get_cgroup_id("thp_adjust");
+ if (!ASSERT_GE(cgrp_id, 0, "create_and_get_cgroup"))
+ goto join_root;
+
+ if (!ASSERT_NEQ(thp_mode_save(), -1, "THP mode save"))
+ goto join_root;
+ if (!ASSERT_GE(thp_mode_set("madvise"), 0, "THP mode set"))
+ goto join_root;
+
+ skel = test_thp_adjust__open();
+ if (!ASSERT_OK_PTR(skel, "open"))
+ goto thp_reset;
+
+ skel->bss->cgrp_id = cgrp_id;
+ skel->bss->pmd_order = pmd_order;
+
+ err = test_thp_adjust__load(skel);
+ if (!ASSERT_OK(err, "load"))
+ goto destroy;
+
+ fentry_link = bpf_program__attach_trace(skel->progs.thp_run);
+ if (!ASSERT_OK_PTR(fentry_link, "attach fentry"))
+ goto destroy;
+
+ ops_link = bpf_map__attach_struct_ops(skel->maps.thp);
+ if (!ASSERT_OK_PTR(ops_link, "attach struct_ops"))
+ goto destroy;
+
+ if (!ASSERT_NEQ(thp_alloc(pagesize), -1, "THP alloc"))
+ goto destroy;
+
+ /* After attaching struct_ops, THP will be allocated only in khugepaged . */
+ if (!ASSERT_EQ(skel->bss->pf_alloc, 0, "alloc_in_pf"))
+ goto thp_free;
+ if (!ASSERT_GT(skel->bss->pf_disallow, 0, "disallow_in_pf"))
+ goto thp_free;
+
+ if (!ASSERT_GT(skel->bss->khugepaged_alloc, 0, "alloc_in_khugepaged"))
+ goto thp_free;
+ ASSERT_EQ(skel->bss->khugepaged_disallow, 0, "disallow_in_khugepaged");
+
+thp_free:
+ thp_free();
+destroy:
+ test_thp_adjust__destroy(skel);
+thp_reset:
+ ASSERT_GE(thp_mode_reset(), 0, "THP mode reset");
+join_root:
+ /* We must join the root cgroup before removing the created cgroup. */
+ err = join_root_cgroup();
+ ASSERT_OK(err, "join_cgroup to root");
+close_fd:
+ close(cgrp_fd);
+ remove_cgroup("thp_adjust");
+cleanup:
+ cleanup_cgroup_environment();
+}
+
+void test_thp_adjust(void)
+{
+ if (test__start_subtest("thp_adjust"))
+ subtest_thp_adjust();
+ RUN_TESTS(test_thp_adjust_failure);
+}
diff --git a/tools/testing/selftests/bpf/progs/test_thp_adjust.c b/tools/testing/selftests/bpf/progs/test_thp_adjust.c
new file mode 100644
index 000000000000..97908ef29852
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_thp_adjust.c
@@ -0,0 +1,76 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+char _license[] SEC("license") = "GPL";
+
+#define TVA_IN_PF (1 << 1)
+
+int pf_alloc, pf_disallow, khugepaged_alloc, khugepaged_disallow;
+struct mm_struct *target_mm;
+int pmd_order, cgrp_id;
+
+/* Detecting whether a task can successfully allocate THP is unreliable because
+ * it may be influenced by system memory pressure. Instead of making the result
+ * dependent on unpredictable factors, we should simply check
+ * get_suggested_order()'s return value, which is deterministic.
+ */
+SEC("fexit/get_suggested_order")
+int BPF_PROG(thp_run, struct mm_struct *mm, struct vm_area_struct *vma__nullable,
+ u64 vma_flags, u64 tva_flags, int orders, int retval)
+{
+ if (mm != target_mm)
+ return 0;
+
+ if (orders != (1 << pmd_order))
+ return 0;
+
+ if (tva_flags == TVA_PAGEFAULT) {
+ if (retval == (1 << pmd_order))
+ pf_alloc++;
+ else if (!retval)
+ pf_disallow++;
+ } else if (tva_flags == TVA_KHUGEPAGED || tva_flags == -1) {
+ if (retval == (1 << pmd_order))
+ khugepaged_alloc++;
+ else if (!retval)
+ khugepaged_disallow++;
+ }
+ return 0;
+}
+
+SEC("struct_ops/get_suggested_order")
+int BPF_PROG(bpf_suggested_order, struct mm_struct *mm, struct vm_area_struct *vma__nullable,
+ u64 vma_flags, enum tva_type tva_flags, int orders)
+{
+ struct mem_cgroup *memcg = bpf_mm_get_mem_cgroup(mm);
+ int suggested_orders = 0;
+
+ /* Only works when CONFIG_MEMCG is enabled. */
+ if (!memcg)
+ return suggested_orders;
+
+ if (memcg->css.cgroup->kn->id == cgrp_id) {
+ if (!target_mm)
+ target_mm = mm;
+ /* BPF THP allocation policy:
+ * - Allow PMD allocation in khugepagd only
+ */
+ if ((tva_flags == TVA_KHUGEPAGED || tva_flags == -1) &&
+ orders == (1 << pmd_order)) {
+ suggested_orders = orders;
+ goto out;
+ }
+ }
+
+out:
+ bpf_put_mem_cgroup(memcg);
+ return suggested_orders;
+}
+
+SEC(".struct_ops.link")
+struct bpf_thp_ops thp = {
+ .get_suggested_order = (void *)bpf_suggested_order,
+};
diff --git a/tools/testing/selftests/bpf/progs/test_thp_adjust_failure.c b/tools/testing/selftests/bpf/progs/test_thp_adjust_failure.c
new file mode 100644
index 000000000000..0742886eeddd
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_thp_adjust_failure.c
@@ -0,0 +1,25 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+#include "bpf_misc.h"
+
+char _license[] SEC("license") = "GPL";
+
+SEC("struct_ops/get_suggested_order")
+__failure __msg("Unreleased reference")
+int BPF_PROG(unreleased_task, struct mm_struct *mm, struct vm_area_struct *vma__nullable,
+ u64 vma_flags, u64 tva_flags, int orders, int retval)
+{
+ struct task_struct *p = bpf_mm_get_task(mm);
+
+ /* The task should be released with bpf_task_release() */
+ return p ? 0 : 1;
+}
+
+SEC(".struct_ops.link")
+struct bpf_thp_ops thp = {
+ .get_suggested_order = (void *)unreleased_task,
+};
--
2.47.3
next prev parent reply other threads:[~2025-08-18 5:56 UTC|newest]
Thread overview: 17+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-08-18 5:55 [RFC PATCH v5 mm-new 0/5] mm, bpf: BPF based THP order selection Yafang Shao
2025-08-18 5:55 ` [RFC PATCH v5 mm-new 1/5] mm: thp: add support for " Yafang Shao
2025-08-18 13:17 ` Usama Arif
2025-08-19 3:08 ` Yafang Shao
2025-08-19 10:11 ` Usama Arif
2025-08-19 11:10 ` Gutierrez Asier
2025-08-19 11:43 ` Yafang Shao
2025-08-18 5:55 ` [RFC PATCH v5 mm-new 2/5] mm: thp: add a new kfunc bpf_mm_get_mem_cgroup() Yafang Shao
2025-08-18 5:55 ` [RFC PATCH v5 mm-new 3/5] mm: thp: add a new kfunc bpf_mm_get_task() Yafang Shao
2025-08-18 5:55 ` [RFC PATCH v5 mm-new 4/5] bpf: mark vma->vm_mm as trusted Yafang Shao
2025-08-18 5:55 ` Yafang Shao [this message]
2025-08-18 14:00 ` [RFC PATCH v5 mm-new 5/5] selftest/bpf: add selftest for BPF based THP order seletection Usama Arif
2025-08-19 3:09 ` Yafang Shao
2025-08-18 14:35 ` [RFC PATCH v5 mm-new 0/5] mm, bpf: BPF based THP order selection Usama Arif
2025-08-19 2:41 ` Yafang Shao
2025-08-19 10:44 ` Usama Arif
2025-08-19 11:33 ` Yafang Shao
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20250818055510.968-6-laoar.shao@gmail.com \
--to=laoar.shao@gmail.com \
--cc=Liam.Howlett@oracle.com \
--cc=akpm@linux-foundation.org \
--cc=ameryhung@gmail.com \
--cc=andrii@kernel.org \
--cc=ast@kernel.org \
--cc=baolin.wang@linux.alibaba.com \
--cc=bpf@vger.kernel.org \
--cc=daniel@iogearbox.net \
--cc=david@redhat.com \
--cc=dev.jain@arm.com \
--cc=gutierrez.asier@huawei-partners.com \
--cc=hannes@cmpxchg.org \
--cc=linux-mm@kvack.org \
--cc=lorenzo.stoakes@oracle.com \
--cc=npache@redhat.com \
--cc=rientjes@google.com \
--cc=ryan.roberts@arm.com \
--cc=usamaarif642@gmail.com \
--cc=willy@infradead.org \
--cc=ziy@nvidia.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox