linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
* [PATCH bpf-next 1/2] bpf: Add open coded version of kmem_cache iterator
@ 2024-10-17  8:06 Namhyung Kim
  2024-10-17  8:06 ` [PATCH bpf-next 2/2] selftests/bpf: Add a test for open coded kmem_cache iter Namhyung Kim
                   ` (2 more replies)
  0 siblings, 3 replies; 11+ messages in thread
From: Namhyung Kim @ 2024-10-17  8:06 UTC (permalink / raw)
  To: Alexei Starovoitov, Daniel Borkmann, Andrii Nakryiko
  Cc: Martin KaFai Lau, Eduard Zingerman, Song Liu, Yonghong Song,
	John Fastabend, KP Singh, Stanislav Fomichev, Hao Luo, Jiri Olsa,
	LKML, bpf, Andrew Morton, Christoph Lameter, Pekka Enberg,
	David Rientjes, Joonsoo Kim, Vlastimil Babka, Roman Gushchin,
	Hyeonggon Yoo, linux-mm, Arnaldo Carvalho de Melo, Kees Cook

Add a new open coded iterator for kmem_cache which can be called from a
BPF program like below.  It doesn't take any argument and traverses all
kmem_cache entries.

  struct kmem_cache *pos;

  bpf_for_each(kmem_cache, pos) {
      ...
  }

As it needs to grab slab_mutex, it should be called from sleepable BPF
programs only.

Signed-off-by: Namhyung Kim <namhyung@kernel.org>
---
 kernel/bpf/helpers.c         |  3 ++
 kernel/bpf/kmem_cache_iter.c | 87 ++++++++++++++++++++++++++++++++++++
 2 files changed, 90 insertions(+)

diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c
index 073e6f04f4d765ff..d1dfa4f335577914 100644
--- a/kernel/bpf/helpers.c
+++ b/kernel/bpf/helpers.c
@@ -3111,6 +3111,9 @@ BTF_ID_FLAGS(func, bpf_iter_bits_next, KF_ITER_NEXT | KF_RET_NULL)
 BTF_ID_FLAGS(func, bpf_iter_bits_destroy, KF_ITER_DESTROY)
 BTF_ID_FLAGS(func, bpf_copy_from_user_str, KF_SLEEPABLE)
 BTF_ID_FLAGS(func, bpf_get_kmem_cache)
+BTF_ID_FLAGS(func, bpf_iter_kmem_cache_new, KF_ITER_NEW | KF_SLEEPABLE)
+BTF_ID_FLAGS(func, bpf_iter_kmem_cache_next, KF_ITER_NEXT | KF_RET_NULL | KF_SLEEPABLE)
+BTF_ID_FLAGS(func, bpf_iter_kmem_cache_destroy, KF_ITER_DESTROY | KF_SLEEPABLE)
 BTF_KFUNCS_END(common_btf_ids)
 
 static const struct btf_kfunc_id_set common_kfunc_set = {
diff --git a/kernel/bpf/kmem_cache_iter.c b/kernel/bpf/kmem_cache_iter.c
index ebc101d7da51b57c..31ddaf452b20a458 100644
--- a/kernel/bpf/kmem_cache_iter.c
+++ b/kernel/bpf/kmem_cache_iter.c
@@ -145,6 +145,93 @@ static const struct bpf_iter_seq_info kmem_cache_iter_seq_info = {
 	.seq_ops		= &kmem_cache_iter_seq_ops,
 };
 
+/* open-coded version */
+struct bpf_iter_kmem_cache {
+	__u64 __opaque[1];
+} __attribute__((aligned(8)));
+
+struct bpf_iter_kmem_cache_kern {
+	struct kmem_cache *pos;
+} __attribute__((aligned(8)));
+
+__bpf_kfunc_start_defs();
+
+__bpf_kfunc int bpf_iter_kmem_cache_new(struct bpf_iter_kmem_cache *it)
+{
+	struct bpf_iter_kmem_cache_kern *kit = (void *)it;
+
+	BUILD_BUG_ON(sizeof(*kit) > sizeof(*it));
+	BUILD_BUG_ON(__alignof__(*kit) != __alignof__(*it));
+
+	kit->pos = NULL;
+	return 0;
+}
+
+__bpf_kfunc struct kmem_cache *bpf_iter_kmem_cache_next(struct bpf_iter_kmem_cache *it)
+{
+	struct bpf_iter_kmem_cache_kern *kit = (void *)it;
+	struct kmem_cache *prev = kit->pos;
+	struct kmem_cache *next;
+	bool destroy = false;
+
+	mutex_lock(&slab_mutex);
+
+	if (list_empty(&slab_caches)) {
+		mutex_unlock(&slab_mutex);
+		return NULL;
+	}
+
+	if (prev == NULL)
+		next = list_first_entry(&slab_caches, struct kmem_cache, list);
+	else if (list_last_entry(&slab_caches, struct kmem_cache, list) == prev)
+		next = NULL;
+	else
+		next = list_next_entry(prev, list);
+
+	/* boot_caches have negative refcount, don't touch them */
+	if (next && next->refcount > 0)
+		next->refcount++;
+
+	/* Skip kmem_cache_destroy() for active entries */
+	if (prev && prev->refcount > 1)
+		prev->refcount--;
+	else if (prev && prev->refcount == 1)
+		destroy = true;
+
+	mutex_unlock(&slab_mutex);
+
+	if (destroy)
+		kmem_cache_destroy(prev);
+
+	kit->pos = next;
+	return next;
+}
+
+__bpf_kfunc void bpf_iter_kmem_cache_destroy(struct bpf_iter_kmem_cache *it)
+{
+	struct bpf_iter_kmem_cache_kern *kit = (void *)it;
+	struct kmem_cache *s = kit->pos;
+	bool destroy = false;
+
+	if (s == NULL)
+		return;
+
+	mutex_lock(&slab_mutex);
+
+	/* Skip kmem_cache_destroy() for active entries */
+	if (s->refcount > 1)
+		s->refcount--;
+	else if (s->refcount == 1)
+		destroy = true;
+
+	mutex_unlock(&slab_mutex);
+
+	if (destroy)
+		kmem_cache_destroy(s);
+}
+
+__bpf_kfunc_end_defs();
+
 static void bpf_iter_kmem_cache_show_fdinfo(const struct bpf_iter_aux_info *aux,
 					    struct seq_file *seq)
 {
-- 
2.47.0.rc1.288.g06298d1525-goog



^ permalink raw reply	[flat|nested] 11+ messages in thread

* [PATCH bpf-next 2/2] selftests/bpf: Add a test for open coded kmem_cache iter
  2024-10-17  8:06 [PATCH bpf-next 1/2] bpf: Add open coded version of kmem_cache iterator Namhyung Kim
@ 2024-10-17  8:06 ` Namhyung Kim
  2024-10-18 18:46   ` Martin KaFai Lau
  2024-10-21 23:36   ` Andrii Nakryiko
  2024-10-18 18:22 ` [PATCH bpf-next 1/2] bpf: Add open coded version of kmem_cache iterator Martin KaFai Lau
  2024-10-21 23:32 ` Andrii Nakryiko
  2 siblings, 2 replies; 11+ messages in thread
From: Namhyung Kim @ 2024-10-17  8:06 UTC (permalink / raw)
  To: Alexei Starovoitov, Daniel Borkmann, Andrii Nakryiko
  Cc: Martin KaFai Lau, Eduard Zingerman, Song Liu, Yonghong Song,
	John Fastabend, KP Singh, Stanislav Fomichev, Hao Luo, Jiri Olsa,
	LKML, bpf, Andrew Morton, Christoph Lameter, Pekka Enberg,
	David Rientjes, Joonsoo Kim, Vlastimil Babka, Roman Gushchin,
	Hyeonggon Yoo, linux-mm, Arnaldo Carvalho de Melo, Kees Cook

The new subtest is attached to sleepable fentry of syncfs() syscall.
It iterates the kmem_cache using bpf_for_each loop and count the number
of entries.  Finally it checks it with the number of entries from the
regular iterator.

  $ ./vmtest.sh -- ./test_progs -t kmem_cache_iter
  ...
  #130/1   kmem_cache_iter/check_task_struct:OK
  #130/2   kmem_cache_iter/check_slabinfo:OK
  #130/3   kmem_cache_iter/open_coded_iter:OK
  #130     kmem_cache_iter:OK
  Summary: 1/3 PASSED, 0 SKIPPED, 0 FAILED

Also simplify the code by using attach routine of the skeleton.

Signed-off-by: Namhyung Kim <namhyung@kernel.org>
---
 .../testing/selftests/bpf/bpf_experimental.h  |  6 ++++
 .../bpf/prog_tests/kmem_cache_iter.c          | 28 +++++++++++--------
 .../selftests/bpf/progs/kmem_cache_iter.c     | 24 ++++++++++++++++
 3 files changed, 46 insertions(+), 12 deletions(-)

diff --git a/tools/testing/selftests/bpf/bpf_experimental.h b/tools/testing/selftests/bpf/bpf_experimental.h
index b0668f29f7b394eb..cd8ecd39c3f3c68d 100644
--- a/tools/testing/selftests/bpf/bpf_experimental.h
+++ b/tools/testing/selftests/bpf/bpf_experimental.h
@@ -582,4 +582,10 @@ extern int bpf_wq_set_callback_impl(struct bpf_wq *wq,
 		unsigned int flags__k, void *aux__ign) __ksym;
 #define bpf_wq_set_callback(timer, cb, flags) \
 	bpf_wq_set_callback_impl(timer, cb, flags, NULL)
+
+struct bpf_iter_kmem_cache;
+extern int bpf_iter_kmem_cache_new(struct bpf_iter_kmem_cache *it) __weak __ksym;
+extern struct kmem_cache *bpf_iter_kmem_cache_next(struct bpf_iter_kmem_cache *it) __weak __ksym;
+extern void bpf_iter_kmem_cache_destroy(struct bpf_iter_kmem_cache *it) __weak __ksym;
+
 #endif
diff --git a/tools/testing/selftests/bpf/prog_tests/kmem_cache_iter.c b/tools/testing/selftests/bpf/prog_tests/kmem_cache_iter.c
index 848d8fc9171fae45..a1fd3bc57c0b21bb 100644
--- a/tools/testing/selftests/bpf/prog_tests/kmem_cache_iter.c
+++ b/tools/testing/selftests/bpf/prog_tests/kmem_cache_iter.c
@@ -68,12 +68,18 @@ static void subtest_kmem_cache_iter_check_slabinfo(struct kmem_cache_iter *skel)
 	fclose(fp);
 }
 
+static void subtest_kmem_cache_iter_open_coded(struct kmem_cache_iter *skel)
+{
+	/* To trigger the open coded iterator attached to the syscall */
+	syncfs(0);
+
+	/* It should be same as we've seen from the explicit iterator */
+	ASSERT_EQ(skel->bss->open_coded_seen, skel->bss->kmem_cache_seen, "open_code_seen_eq");
+}
+
 void test_kmem_cache_iter(void)
 {
-	DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
 	struct kmem_cache_iter *skel = NULL;
-	union bpf_iter_link_info linfo = {};
-	struct bpf_link *link;
 	char buf[256];
 	int iter_fd;
 
@@ -81,16 +87,12 @@ void test_kmem_cache_iter(void)
 	if (!ASSERT_OK_PTR(skel, "kmem_cache_iter__open_and_load"))
 		return;
 
-	opts.link_info = &linfo;
-	opts.link_info_len = sizeof(linfo);
-
-	link = bpf_program__attach_iter(skel->progs.slab_info_collector, &opts);
-	if (!ASSERT_OK_PTR(link, "attach_iter"))
+	if (!ASSERT_OK(kmem_cache_iter__attach(skel), "skel_attach"))
 		goto destroy;
 
-	iter_fd = bpf_iter_create(bpf_link__fd(link));
+	iter_fd = bpf_iter_create(bpf_link__fd(skel->links.slab_info_collector));
 	if (!ASSERT_GE(iter_fd, 0, "iter_create"))
-		goto free_link;
+		goto detach;
 
 	memset(buf, 0, sizeof(buf));
 	while (read(iter_fd, buf, sizeof(buf) > 0)) {
@@ -105,11 +107,13 @@ void test_kmem_cache_iter(void)
 		subtest_kmem_cache_iter_check_task_struct(skel);
 	if (test__start_subtest("check_slabinfo"))
 		subtest_kmem_cache_iter_check_slabinfo(skel);
+	if (test__start_subtest("open_coded_iter"))
+		subtest_kmem_cache_iter_open_coded(skel);
 
 	close(iter_fd);
 
-free_link:
-	bpf_link__destroy(link);
+detach:
+	kmem_cache_iter__detach(skel);
 destroy:
 	kmem_cache_iter__destroy(skel);
 }
diff --git a/tools/testing/selftests/bpf/progs/kmem_cache_iter.c b/tools/testing/selftests/bpf/progs/kmem_cache_iter.c
index 72c9dafecd98406b..4c44aa279a5328fe 100644
--- a/tools/testing/selftests/bpf/progs/kmem_cache_iter.c
+++ b/tools/testing/selftests/bpf/progs/kmem_cache_iter.c
@@ -2,6 +2,8 @@
 /* Copyright (c) 2024 Google */
 
 #include "bpf_iter.h"
+#include "bpf_experimental.h"
+#include "bpf_misc.h"
 #include <bpf/bpf_helpers.h>
 #include <bpf/bpf_tracing.h>
 
@@ -33,6 +35,7 @@ extern struct kmem_cache *bpf_get_kmem_cache(u64 addr) __ksym;
 /* Result, will be checked by userspace */
 int task_struct_found;
 int kmem_cache_seen;
+int open_coded_seen;
 
 SEC("iter/kmem_cache")
 int slab_info_collector(struct bpf_iter__kmem_cache *ctx)
@@ -85,3 +88,24 @@ int BPF_PROG(check_task_struct)
 		task_struct_found = -2;
 	return 0;
 }
+
+SEC("fentry.s/" SYS_PREFIX "sys_syncfs")
+int open_coded_iter(const void *ctx)
+{
+	struct kmem_cache *s;
+
+	bpf_for_each(kmem_cache, s) {
+		struct kmem_cache_result *r;
+		int idx = open_coded_seen;
+
+		r = bpf_map_lookup_elem(&slab_result, &idx);
+		if (r == NULL)
+			break;
+
+		open_coded_seen++;
+
+		if (r->obj_size != s->size)
+			break;
+	}
+	return 0;
+}
-- 
2.47.0.rc1.288.g06298d1525-goog



^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [PATCH bpf-next 1/2] bpf: Add open coded version of kmem_cache iterator
  2024-10-17  8:06 [PATCH bpf-next 1/2] bpf: Add open coded version of kmem_cache iterator Namhyung Kim
  2024-10-17  8:06 ` [PATCH bpf-next 2/2] selftests/bpf: Add a test for open coded kmem_cache iter Namhyung Kim
@ 2024-10-18 18:22 ` Martin KaFai Lau
  2024-10-22 17:47   ` Namhyung Kim
  2024-10-21 23:32 ` Andrii Nakryiko
  2 siblings, 1 reply; 11+ messages in thread
From: Martin KaFai Lau @ 2024-10-18 18:22 UTC (permalink / raw)
  To: Namhyung Kim
  Cc: Alexei Starovoitov, Daniel Borkmann, Andrii Nakryiko,
	Eduard Zingerman, Song Liu, Yonghong Song, John Fastabend,
	KP Singh, Stanislav Fomichev, Hao Luo, Jiri Olsa, LKML, bpf,
	Andrew Morton, Christoph Lameter, Pekka Enberg, David Rientjes,
	Joonsoo Kim, Vlastimil Babka, Roman Gushchin, Hyeonggon Yoo,
	linux-mm, Arnaldo Carvalho de Melo, Kees Cook

On 10/17/24 1:06 AM, Namhyung Kim wrote:
> Add a new open coded iterator for kmem_cache which can be called from a
> BPF program like below.  It doesn't take any argument and traverses all
> kmem_cache entries.
> 
>    struct kmem_cache *pos;
> 
>    bpf_for_each(kmem_cache, pos) {
>        ...
>    }
> 
> As it needs to grab slab_mutex, it should be called from sleepable BPF
> programs only.
> 
> Signed-off-by: Namhyung Kim <namhyung@kernel.org>
> ---
>   kernel/bpf/helpers.c         |  3 ++
>   kernel/bpf/kmem_cache_iter.c | 87 ++++++++++++++++++++++++++++++++++++
>   2 files changed, 90 insertions(+)
> 
> diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c
> index 073e6f04f4d765ff..d1dfa4f335577914 100644
> --- a/kernel/bpf/helpers.c
> +++ b/kernel/bpf/helpers.c
> @@ -3111,6 +3111,9 @@ BTF_ID_FLAGS(func, bpf_iter_bits_next, KF_ITER_NEXT | KF_RET_NULL)
>   BTF_ID_FLAGS(func, bpf_iter_bits_destroy, KF_ITER_DESTROY)
>   BTF_ID_FLAGS(func, bpf_copy_from_user_str, KF_SLEEPABLE)
>   BTF_ID_FLAGS(func, bpf_get_kmem_cache)
> +BTF_ID_FLAGS(func, bpf_iter_kmem_cache_new, KF_ITER_NEW | KF_SLEEPABLE)
> +BTF_ID_FLAGS(func, bpf_iter_kmem_cache_next, KF_ITER_NEXT | KF_RET_NULL | KF_SLEEPABLE)
> +BTF_ID_FLAGS(func, bpf_iter_kmem_cache_destroy, KF_ITER_DESTROY | KF_SLEEPABLE)
>   BTF_KFUNCS_END(common_btf_ids)
>   
>   static const struct btf_kfunc_id_set common_kfunc_set = {
> diff --git a/kernel/bpf/kmem_cache_iter.c b/kernel/bpf/kmem_cache_iter.c
> index ebc101d7da51b57c..31ddaf452b20a458 100644
> --- a/kernel/bpf/kmem_cache_iter.c
> +++ b/kernel/bpf/kmem_cache_iter.c
> @@ -145,6 +145,93 @@ static const struct bpf_iter_seq_info kmem_cache_iter_seq_info = {
>   	.seq_ops		= &kmem_cache_iter_seq_ops,
>   };
>   
> +/* open-coded version */
> +struct bpf_iter_kmem_cache {
> +	__u64 __opaque[1];
> +} __attribute__((aligned(8)));
> +
> +struct bpf_iter_kmem_cache_kern {
> +	struct kmem_cache *pos;
> +} __attribute__((aligned(8)));
> +
> +__bpf_kfunc_start_defs();
> +
> +__bpf_kfunc int bpf_iter_kmem_cache_new(struct bpf_iter_kmem_cache *it)
> +{
> +	struct bpf_iter_kmem_cache_kern *kit = (void *)it;
> +
> +	BUILD_BUG_ON(sizeof(*kit) > sizeof(*it));
> +	BUILD_BUG_ON(__alignof__(*kit) != __alignof__(*it));
> +
> +	kit->pos = NULL;
> +	return 0;
> +}
> +
> +__bpf_kfunc struct kmem_cache *bpf_iter_kmem_cache_next(struct bpf_iter_kmem_cache *it)
> +{
> +	struct bpf_iter_kmem_cache_kern *kit = (void *)it;
> +	struct kmem_cache *prev = kit->pos;
> +	struct kmem_cache *next;
> +	bool destroy = false;
> +
> +	mutex_lock(&slab_mutex);

I think taking mutex_lock here should be fine since sleepable tracing prog 
should be limited to the error injection whitelist. Those functions should not 
have held the mutex afaict.

> +
> +	if (list_empty(&slab_caches)) {
> +		mutex_unlock(&slab_mutex);
> +		return NULL;
> +	}
> +
> +	if (prev == NULL)
> +		next = list_first_entry(&slab_caches, struct kmem_cache, list);
> +	else if (list_last_entry(&slab_caches, struct kmem_cache, list) == prev)
> +		next = NULL;

At the last entry, next is NULL.

> +	else
> +		next = list_next_entry(prev, list);
> +
> +	/* boot_caches have negative refcount, don't touch them */
> +	if (next && next->refcount > 0)
> +		next->refcount++;
> +
> +	/* Skip kmem_cache_destroy() for active entries */
> +	if (prev && prev->refcount > 1)
> +		prev->refcount--;
> +	else if (prev && prev->refcount == 1)
> +		destroy = true;
> +
> +	mutex_unlock(&slab_mutex);
> +
> +	if (destroy)
> +		kmem_cache_destroy(prev);
> +
> +	kit->pos = next;

so kit->pos will be NULL also. Does it mean the bpf prog will be able to call 
bpf_iter_kmem_cache_next() again and re-loop from the beginning of the 
slab_caches list?

> +	return next;
> +}
> +
> +__bpf_kfunc void bpf_iter_kmem_cache_destroy(struct bpf_iter_kmem_cache *it)
> +{
> +	struct bpf_iter_kmem_cache_kern *kit = (void *)it;
> +	struct kmem_cache *s = kit->pos;
> +	bool destroy = false;
> +
> +	if (s == NULL)
> +		return;
> +
> +	mutex_lock(&slab_mutex);
> +
> +	/* Skip kmem_cache_destroy() for active entries */
> +	if (s->refcount > 1)
> +		s->refcount--;
> +	else if (s->refcount == 1)
> +		destroy = true;
> +
> +	mutex_unlock(&slab_mutex);
> +
> +	if (destroy)
> +		kmem_cache_destroy(s);
> +}
> +
> +__bpf_kfunc_end_defs();
> +
>   static void bpf_iter_kmem_cache_show_fdinfo(const struct bpf_iter_aux_info *aux,
>   					    struct seq_file *seq)
>   {



^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [PATCH bpf-next 2/2] selftests/bpf: Add a test for open coded kmem_cache iter
  2024-10-17  8:06 ` [PATCH bpf-next 2/2] selftests/bpf: Add a test for open coded kmem_cache iter Namhyung Kim
@ 2024-10-18 18:46   ` Martin KaFai Lau
  2024-10-22 17:51     ` Namhyung Kim
  2024-10-21 23:36   ` Andrii Nakryiko
  1 sibling, 1 reply; 11+ messages in thread
From: Martin KaFai Lau @ 2024-10-18 18:46 UTC (permalink / raw)
  To: Namhyung Kim
  Cc: Alexei Starovoitov, Daniel Borkmann, Andrii Nakryiko,
	Eduard Zingerman, Song Liu, Yonghong Song, John Fastabend,
	KP Singh, Stanislav Fomichev, Hao Luo, Jiri Olsa, LKML, bpf,
	Andrew Morton, Christoph Lameter, Pekka Enberg, David Rientjes,
	Joonsoo Kim, Vlastimil Babka, Roman Gushchin, Hyeonggon Yoo,
	linux-mm, Arnaldo Carvalho de Melo, Kees Cook

On 10/17/24 1:06 AM, Namhyung Kim wrote:
> The new subtest is attached to sleepable fentry of syncfs() syscall.
> It iterates the kmem_cache using bpf_for_each loop and count the number
> of entries.  Finally it checks it with the number of entries from the
> regular iterator.
> 
>    $ ./vmtest.sh -- ./test_progs -t kmem_cache_iter
>    ...
>    #130/1   kmem_cache_iter/check_task_struct:OK
>    #130/2   kmem_cache_iter/check_slabinfo:OK
>    #130/3   kmem_cache_iter/open_coded_iter:OK
>    #130     kmem_cache_iter:OK
>    Summary: 1/3 PASSED, 0 SKIPPED, 0 FAILED
> 
> Also simplify the code by using attach routine of the skeleton.
> 
> Signed-off-by: Namhyung Kim <namhyung@kernel.org>
> ---
>   .../testing/selftests/bpf/bpf_experimental.h  |  6 ++++
>   .../bpf/prog_tests/kmem_cache_iter.c          | 28 +++++++++++--------
>   .../selftests/bpf/progs/kmem_cache_iter.c     | 24 ++++++++++++++++
>   3 files changed, 46 insertions(+), 12 deletions(-)
> 
> diff --git a/tools/testing/selftests/bpf/bpf_experimental.h b/tools/testing/selftests/bpf/bpf_experimental.h
> index b0668f29f7b394eb..cd8ecd39c3f3c68d 100644
> --- a/tools/testing/selftests/bpf/bpf_experimental.h
> +++ b/tools/testing/selftests/bpf/bpf_experimental.h
> @@ -582,4 +582,10 @@ extern int bpf_wq_set_callback_impl(struct bpf_wq *wq,
>   		unsigned int flags__k, void *aux__ign) __ksym;
>   #define bpf_wq_set_callback(timer, cb, flags) \
>   	bpf_wq_set_callback_impl(timer, cb, flags, NULL)
> +
> +struct bpf_iter_kmem_cache;
> +extern int bpf_iter_kmem_cache_new(struct bpf_iter_kmem_cache *it) __weak __ksym;
> +extern struct kmem_cache *bpf_iter_kmem_cache_next(struct bpf_iter_kmem_cache *it) __weak __ksym;
> +extern void bpf_iter_kmem_cache_destroy(struct bpf_iter_kmem_cache *it) __weak __ksym;
> +
>   #endif
> diff --git a/tools/testing/selftests/bpf/prog_tests/kmem_cache_iter.c b/tools/testing/selftests/bpf/prog_tests/kmem_cache_iter.c
> index 848d8fc9171fae45..a1fd3bc57c0b21bb 100644
> --- a/tools/testing/selftests/bpf/prog_tests/kmem_cache_iter.c
> +++ b/tools/testing/selftests/bpf/prog_tests/kmem_cache_iter.c
> @@ -68,12 +68,18 @@ static void subtest_kmem_cache_iter_check_slabinfo(struct kmem_cache_iter *skel)
>   	fclose(fp);
>   }
>   
> +static void subtest_kmem_cache_iter_open_coded(struct kmem_cache_iter *skel)
> +{
> +	/* To trigger the open coded iterator attached to the syscall */
> +	syncfs(0);
> +
> +	/* It should be same as we've seen from the explicit iterator */
> +	ASSERT_EQ(skel->bss->open_coded_seen, skel->bss->kmem_cache_seen, "open_code_seen_eq");
> +}
> +
>   void test_kmem_cache_iter(void)
>   {
> -	DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
>   	struct kmem_cache_iter *skel = NULL;
> -	union bpf_iter_link_info linfo = {};
> -	struct bpf_link *link;
>   	char buf[256];
>   	int iter_fd;
>   
> @@ -81,16 +87,12 @@ void test_kmem_cache_iter(void)
>   	if (!ASSERT_OK_PTR(skel, "kmem_cache_iter__open_and_load"))
>   		return;
>   
> -	opts.link_info = &linfo;
> -	opts.link_info_len = sizeof(linfo);
> -
> -	link = bpf_program__attach_iter(skel->progs.slab_info_collector, &opts);
> -	if (!ASSERT_OK_PTR(link, "attach_iter"))
> +	if (!ASSERT_OK(kmem_cache_iter__attach(skel), "skel_attach"))

with this change.

>   		goto destroy;
>   
> -	iter_fd = bpf_iter_create(bpf_link__fd(link));
> +	iter_fd = bpf_iter_create(bpf_link__fd(skel->links.slab_info_collector));
>   	if (!ASSERT_GE(iter_fd, 0, "iter_create"))
> -		goto free_link;
> +		goto detach;
>   
>   	memset(buf, 0, sizeof(buf));
>   	while (read(iter_fd, buf, sizeof(buf) > 0)) {
> @@ -105,11 +107,13 @@ void test_kmem_cache_iter(void)
>   		subtest_kmem_cache_iter_check_task_struct(skel);
>   	if (test__start_subtest("check_slabinfo"))
>   		subtest_kmem_cache_iter_check_slabinfo(skel);
> +	if (test__start_subtest("open_coded_iter"))
> +		subtest_kmem_cache_iter_open_coded(skel);
>   
>   	close(iter_fd);
>   
> -free_link:
> -	bpf_link__destroy(link);
> +detach:
> +	kmem_cache_iter__detach(skel);

nit. I think the kmem_cache_iter__destroy() below will also detach, so no need 
to explicit kmem_cache_iter__detach().

>   destroy:
>   	kmem_cache_iter__destroy(skel);
>   }
> diff --git a/tools/testing/selftests/bpf/progs/kmem_cache_iter.c b/tools/testing/selftests/bpf/progs/kmem_cache_iter.c
> index 72c9dafecd98406b..4c44aa279a5328fe 100644
> --- a/tools/testing/selftests/bpf/progs/kmem_cache_iter.c
> +++ b/tools/testing/selftests/bpf/progs/kmem_cache_iter.c
> @@ -2,6 +2,8 @@
>   /* Copyright (c) 2024 Google */
>   
>   #include "bpf_iter.h"
> +#include "bpf_experimental.h"
> +#include "bpf_misc.h"
>   #include <bpf/bpf_helpers.h>
>   #include <bpf/bpf_tracing.h>
>   
> @@ -33,6 +35,7 @@ extern struct kmem_cache *bpf_get_kmem_cache(u64 addr) __ksym;
>   /* Result, will be checked by userspace */
>   int task_struct_found;
>   int kmem_cache_seen;
> +int open_coded_seen;
>   
>   SEC("iter/kmem_cache")
>   int slab_info_collector(struct bpf_iter__kmem_cache *ctx)
> @@ -85,3 +88,24 @@ int BPF_PROG(check_task_struct)
>   		task_struct_found = -2;
>   	return 0;
>   }
> +
> +SEC("fentry.s/" SYS_PREFIX "sys_syncfs")
> +int open_coded_iter(const void *ctx)
> +{
> +	struct kmem_cache *s;
> +
> +	bpf_for_each(kmem_cache, s) {
> +		struct kmem_cache_result *r;
> +		int idx = open_coded_seen;
> +
> +		r = bpf_map_lookup_elem(&slab_result, &idx);
> +		if (r == NULL)
> +			break;
> +
> +		open_coded_seen++;

I am not sure if this will work well if the testing system somehow has another 
process calling syncfs. It is probably a good idea to guard this by checking the 
tid of the test_progs at the beginning of this bpf prog.

> +
> +		if (r->obj_size != s->size)
> +			break;
> +	}
> +	return 0;
> +}



^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [PATCH bpf-next 1/2] bpf: Add open coded version of kmem_cache iterator
  2024-10-17  8:06 [PATCH bpf-next 1/2] bpf: Add open coded version of kmem_cache iterator Namhyung Kim
  2024-10-17  8:06 ` [PATCH bpf-next 2/2] selftests/bpf: Add a test for open coded kmem_cache iter Namhyung Kim
  2024-10-18 18:22 ` [PATCH bpf-next 1/2] bpf: Add open coded version of kmem_cache iterator Martin KaFai Lau
@ 2024-10-21 23:32 ` Andrii Nakryiko
  2024-10-22 17:50   ` Namhyung Kim
  2 siblings, 1 reply; 11+ messages in thread
From: Andrii Nakryiko @ 2024-10-21 23:32 UTC (permalink / raw)
  To: Namhyung Kim
  Cc: Alexei Starovoitov, Daniel Borkmann, Andrii Nakryiko,
	Martin KaFai Lau, Eduard Zingerman, Song Liu, Yonghong Song,
	John Fastabend, KP Singh, Stanislav Fomichev, Hao Luo, Jiri Olsa,
	LKML, bpf, Andrew Morton, Christoph Lameter, Pekka Enberg,
	David Rientjes, Joonsoo Kim, Vlastimil Babka, Roman Gushchin,
	Hyeonggon Yoo, linux-mm, Arnaldo Carvalho de Melo, Kees Cook

On Thu, Oct 17, 2024 at 1:06 AM Namhyung Kim <namhyung@kernel.org> wrote:
>
> Add a new open coded iterator for kmem_cache which can be called from a
> BPF program like below.  It doesn't take any argument and traverses all
> kmem_cache entries.
>
>   struct kmem_cache *pos;
>
>   bpf_for_each(kmem_cache, pos) {
>       ...
>   }
>
> As it needs to grab slab_mutex, it should be called from sleepable BPF
> programs only.
>
> Signed-off-by: Namhyung Kim <namhyung@kernel.org>
> ---
>  kernel/bpf/helpers.c         |  3 ++
>  kernel/bpf/kmem_cache_iter.c | 87 ++++++++++++++++++++++++++++++++++++
>  2 files changed, 90 insertions(+)
>
> diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c
> index 073e6f04f4d765ff..d1dfa4f335577914 100644
> --- a/kernel/bpf/helpers.c
> +++ b/kernel/bpf/helpers.c
> @@ -3111,6 +3111,9 @@ BTF_ID_FLAGS(func, bpf_iter_bits_next, KF_ITER_NEXT | KF_RET_NULL)
>  BTF_ID_FLAGS(func, bpf_iter_bits_destroy, KF_ITER_DESTROY)
>  BTF_ID_FLAGS(func, bpf_copy_from_user_str, KF_SLEEPABLE)
>  BTF_ID_FLAGS(func, bpf_get_kmem_cache)
> +BTF_ID_FLAGS(func, bpf_iter_kmem_cache_new, KF_ITER_NEW | KF_SLEEPABLE)
> +BTF_ID_FLAGS(func, bpf_iter_kmem_cache_next, KF_ITER_NEXT | KF_RET_NULL | KF_SLEEPABLE)
> +BTF_ID_FLAGS(func, bpf_iter_kmem_cache_destroy, KF_ITER_DESTROY | KF_SLEEPABLE)

I'm curious. Having bpf_iter_kmem_cache_{new,next,destroy} functions,
can we rewrite kmem_cache_iter_seq_next in terms of these ones, so
that we have less duplication of iteration logic? Or there will be
some locking concerns preventing this? (I haven't looked into the
actual logic much, sorry, lazy question)

>  BTF_KFUNCS_END(common_btf_ids)
>
>  static const struct btf_kfunc_id_set common_kfunc_set = {
> diff --git a/kernel/bpf/kmem_cache_iter.c b/kernel/bpf/kmem_cache_iter.c
> index ebc101d7da51b57c..31ddaf452b20a458 100644
> --- a/kernel/bpf/kmem_cache_iter.c
> +++ b/kernel/bpf/kmem_cache_iter.c
> @@ -145,6 +145,93 @@ static const struct bpf_iter_seq_info kmem_cache_iter_seq_info = {
>         .seq_ops                = &kmem_cache_iter_seq_ops,
>  };
>

[...]


^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [PATCH bpf-next 2/2] selftests/bpf: Add a test for open coded kmem_cache iter
  2024-10-17  8:06 ` [PATCH bpf-next 2/2] selftests/bpf: Add a test for open coded kmem_cache iter Namhyung Kim
  2024-10-18 18:46   ` Martin KaFai Lau
@ 2024-10-21 23:36   ` Andrii Nakryiko
  2024-10-22 17:52     ` Namhyung Kim
  2024-10-24  7:44     ` Namhyung Kim
  1 sibling, 2 replies; 11+ messages in thread
From: Andrii Nakryiko @ 2024-10-21 23:36 UTC (permalink / raw)
  To: Namhyung Kim
  Cc: Alexei Starovoitov, Daniel Borkmann, Andrii Nakryiko,
	Martin KaFai Lau, Eduard Zingerman, Song Liu, Yonghong Song,
	John Fastabend, KP Singh, Stanislav Fomichev, Hao Luo, Jiri Olsa,
	LKML, bpf, Andrew Morton, Christoph Lameter, Pekka Enberg,
	David Rientjes, Joonsoo Kim, Vlastimil Babka, Roman Gushchin,
	Hyeonggon Yoo, linux-mm, Arnaldo Carvalho de Melo, Kees Cook

On Thu, Oct 17, 2024 at 1:06 AM Namhyung Kim <namhyung@kernel.org> wrote:
>
> The new subtest is attached to sleepable fentry of syncfs() syscall.
> It iterates the kmem_cache using bpf_for_each loop and count the number
> of entries.  Finally it checks it with the number of entries from the
> regular iterator.
>
>   $ ./vmtest.sh -- ./test_progs -t kmem_cache_iter
>   ...
>   #130/1   kmem_cache_iter/check_task_struct:OK
>   #130/2   kmem_cache_iter/check_slabinfo:OK
>   #130/3   kmem_cache_iter/open_coded_iter:OK
>   #130     kmem_cache_iter:OK
>   Summary: 1/3 PASSED, 0 SKIPPED, 0 FAILED
>
> Also simplify the code by using attach routine of the skeleton.
>
> Signed-off-by: Namhyung Kim <namhyung@kernel.org>
> ---
>  .../testing/selftests/bpf/bpf_experimental.h  |  6 ++++
>  .../bpf/prog_tests/kmem_cache_iter.c          | 28 +++++++++++--------
>  .../selftests/bpf/progs/kmem_cache_iter.c     | 24 ++++++++++++++++
>  3 files changed, 46 insertions(+), 12 deletions(-)
>
> diff --git a/tools/testing/selftests/bpf/bpf_experimental.h b/tools/testing/selftests/bpf/bpf_experimental.h
> index b0668f29f7b394eb..cd8ecd39c3f3c68d 100644
> --- a/tools/testing/selftests/bpf/bpf_experimental.h
> +++ b/tools/testing/selftests/bpf/bpf_experimental.h
> @@ -582,4 +582,10 @@ extern int bpf_wq_set_callback_impl(struct bpf_wq *wq,
>                 unsigned int flags__k, void *aux__ign) __ksym;
>  #define bpf_wq_set_callback(timer, cb, flags) \
>         bpf_wq_set_callback_impl(timer, cb, flags, NULL)
> +
> +struct bpf_iter_kmem_cache;
> +extern int bpf_iter_kmem_cache_new(struct bpf_iter_kmem_cache *it) __weak __ksym;
> +extern struct kmem_cache *bpf_iter_kmem_cache_next(struct bpf_iter_kmem_cache *it) __weak __ksym;
> +extern void bpf_iter_kmem_cache_destroy(struct bpf_iter_kmem_cache *it) __weak __ksym;
> +

we should be getting this from vmlinux.h nowadays, so this is probably
unnecessary

>  #endif
> diff --git a/tools/testing/selftests/bpf/prog_tests/kmem_cache_iter.c b/tools/testing/selftests/bpf/prog_tests/kmem_cache_iter.c
> index 848d8fc9171fae45..a1fd3bc57c0b21bb 100644
> --- a/tools/testing/selftests/bpf/prog_tests/kmem_cache_iter.c
> +++ b/tools/testing/selftests/bpf/prog_tests/kmem_cache_iter.c
> @@ -68,12 +68,18 @@ static void subtest_kmem_cache_iter_check_slabinfo(struct kmem_cache_iter *skel)
>         fclose(fp);
>  }
>
> +static void subtest_kmem_cache_iter_open_coded(struct kmem_cache_iter *skel)
> +{
> +       /* To trigger the open coded iterator attached to the syscall */
> +       syncfs(0);

what Martin said, you still need to filter by PID

> +
> +       /* It should be same as we've seen from the explicit iterator */
> +       ASSERT_EQ(skel->bss->open_coded_seen, skel->bss->kmem_cache_seen, "open_code_seen_eq");
> +}
> +
>  void test_kmem_cache_iter(void)
>  {
> -       DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
>         struct kmem_cache_iter *skel = NULL;
> -       union bpf_iter_link_info linfo = {};
> -       struct bpf_link *link;
>         char buf[256];
>         int iter_fd;
>
> @@ -81,16 +87,12 @@ void test_kmem_cache_iter(void)
>         if (!ASSERT_OK_PTR(skel, "kmem_cache_iter__open_and_load"))
>                 return;
>
> -       opts.link_info = &linfo;
> -       opts.link_info_len = sizeof(linfo);
> -
> -       link = bpf_program__attach_iter(skel->progs.slab_info_collector, &opts);
> -       if (!ASSERT_OK_PTR(link, "attach_iter"))
> +       if (!ASSERT_OK(kmem_cache_iter__attach(skel), "skel_attach"))
>                 goto destroy;
>
> -       iter_fd = bpf_iter_create(bpf_link__fd(link));
> +       iter_fd = bpf_iter_create(bpf_link__fd(skel->links.slab_info_collector));
>         if (!ASSERT_GE(iter_fd, 0, "iter_create"))
> -               goto free_link;
> +               goto detach;
>
>         memset(buf, 0, sizeof(buf));
>         while (read(iter_fd, buf, sizeof(buf) > 0)) {
> @@ -105,11 +107,13 @@ void test_kmem_cache_iter(void)
>                 subtest_kmem_cache_iter_check_task_struct(skel);
>         if (test__start_subtest("check_slabinfo"))
>                 subtest_kmem_cache_iter_check_slabinfo(skel);
> +       if (test__start_subtest("open_coded_iter"))
> +               subtest_kmem_cache_iter_open_coded(skel);
>
>         close(iter_fd);
>
> -free_link:
> -       bpf_link__destroy(link);
> +detach:
> +       kmem_cache_iter__detach(skel);
>  destroy:
>         kmem_cache_iter__destroy(skel);
>  }
> diff --git a/tools/testing/selftests/bpf/progs/kmem_cache_iter.c b/tools/testing/selftests/bpf/progs/kmem_cache_iter.c
> index 72c9dafecd98406b..4c44aa279a5328fe 100644
> --- a/tools/testing/selftests/bpf/progs/kmem_cache_iter.c
> +++ b/tools/testing/selftests/bpf/progs/kmem_cache_iter.c
> @@ -2,6 +2,8 @@
>  /* Copyright (c) 2024 Google */
>
>  #include "bpf_iter.h"
> +#include "bpf_experimental.h"
> +#include "bpf_misc.h"
>  #include <bpf/bpf_helpers.h>
>  #include <bpf/bpf_tracing.h>
>
> @@ -33,6 +35,7 @@ extern struct kmem_cache *bpf_get_kmem_cache(u64 addr) __ksym;
>  /* Result, will be checked by userspace */
>  int task_struct_found;
>  int kmem_cache_seen;
> +int open_coded_seen;
>
>  SEC("iter/kmem_cache")
>  int slab_info_collector(struct bpf_iter__kmem_cache *ctx)
> @@ -85,3 +88,24 @@ int BPF_PROG(check_task_struct)
>                 task_struct_found = -2;
>         return 0;
>  }
> +
> +SEC("fentry.s/" SYS_PREFIX "sys_syncfs")
> +int open_coded_iter(const void *ctx)
> +{
> +       struct kmem_cache *s;
> +
> +       bpf_for_each(kmem_cache, s) {
> +               struct kmem_cache_result *r;
> +               int idx = open_coded_seen;
> +
> +               r = bpf_map_lookup_elem(&slab_result, &idx);

nit: you don't need idx, just `&open_coded_seen` should be fine, I think

> +               if (r == NULL)

nit: !r

> +                       break;
> +
> +               open_coded_seen++;
> +
> +               if (r->obj_size != s->size)
> +                       break;
> +       }
> +       return 0;
> +}
> --
> 2.47.0.rc1.288.g06298d1525-goog
>


^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [PATCH bpf-next 1/2] bpf: Add open coded version of kmem_cache iterator
  2024-10-18 18:22 ` [PATCH bpf-next 1/2] bpf: Add open coded version of kmem_cache iterator Martin KaFai Lau
@ 2024-10-22 17:47   ` Namhyung Kim
  0 siblings, 0 replies; 11+ messages in thread
From: Namhyung Kim @ 2024-10-22 17:47 UTC (permalink / raw)
  To: Martin KaFai Lau
  Cc: Alexei Starovoitov, Daniel Borkmann, Andrii Nakryiko,
	Eduard Zingerman, Song Liu, Yonghong Song, John Fastabend,
	KP Singh, Stanislav Fomichev, Hao Luo, Jiri Olsa, LKML, bpf,
	Andrew Morton, Christoph Lameter, Pekka Enberg, David Rientjes,
	Joonsoo Kim, Vlastimil Babka, Roman Gushchin, Hyeonggon Yoo,
	linux-mm, Arnaldo Carvalho de Melo, Kees Cook

Hello,

On Fri, Oct 18, 2024 at 11:22:00AM -0700, Martin KaFai Lau wrote:
> On 10/17/24 1:06 AM, Namhyung Kim wrote:
> > Add a new open coded iterator for kmem_cache which can be called from a
> > BPF program like below.  It doesn't take any argument and traverses all
> > kmem_cache entries.
> > 
> >    struct kmem_cache *pos;
> > 
> >    bpf_for_each(kmem_cache, pos) {
> >        ...
> >    }
> > 
> > As it needs to grab slab_mutex, it should be called from sleepable BPF
> > programs only.
> > 
> > Signed-off-by: Namhyung Kim <namhyung@kernel.org>
> > ---
> >   kernel/bpf/helpers.c         |  3 ++
> >   kernel/bpf/kmem_cache_iter.c | 87 ++++++++++++++++++++++++++++++++++++
> >   2 files changed, 90 insertions(+)
> > 
> > diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c
> > index 073e6f04f4d765ff..d1dfa4f335577914 100644
> > --- a/kernel/bpf/helpers.c
> > +++ b/kernel/bpf/helpers.c
> > @@ -3111,6 +3111,9 @@ BTF_ID_FLAGS(func, bpf_iter_bits_next, KF_ITER_NEXT | KF_RET_NULL)
> >   BTF_ID_FLAGS(func, bpf_iter_bits_destroy, KF_ITER_DESTROY)
> >   BTF_ID_FLAGS(func, bpf_copy_from_user_str, KF_SLEEPABLE)
> >   BTF_ID_FLAGS(func, bpf_get_kmem_cache)
> > +BTF_ID_FLAGS(func, bpf_iter_kmem_cache_new, KF_ITER_NEW | KF_SLEEPABLE)
> > +BTF_ID_FLAGS(func, bpf_iter_kmem_cache_next, KF_ITER_NEXT | KF_RET_NULL | KF_SLEEPABLE)
> > +BTF_ID_FLAGS(func, bpf_iter_kmem_cache_destroy, KF_ITER_DESTROY | KF_SLEEPABLE)
> >   BTF_KFUNCS_END(common_btf_ids)
> >   static const struct btf_kfunc_id_set common_kfunc_set = {
> > diff --git a/kernel/bpf/kmem_cache_iter.c b/kernel/bpf/kmem_cache_iter.c
> > index ebc101d7da51b57c..31ddaf452b20a458 100644
> > --- a/kernel/bpf/kmem_cache_iter.c
> > +++ b/kernel/bpf/kmem_cache_iter.c
> > @@ -145,6 +145,93 @@ static const struct bpf_iter_seq_info kmem_cache_iter_seq_info = {
> >   	.seq_ops		= &kmem_cache_iter_seq_ops,
> >   };
> > +/* open-coded version */
> > +struct bpf_iter_kmem_cache {
> > +	__u64 __opaque[1];
> > +} __attribute__((aligned(8)));
> > +
> > +struct bpf_iter_kmem_cache_kern {
> > +	struct kmem_cache *pos;
> > +} __attribute__((aligned(8)));
> > +
> > +__bpf_kfunc_start_defs();
> > +
> > +__bpf_kfunc int bpf_iter_kmem_cache_new(struct bpf_iter_kmem_cache *it)
> > +{
> > +	struct bpf_iter_kmem_cache_kern *kit = (void *)it;
> > +
> > +	BUILD_BUG_ON(sizeof(*kit) > sizeof(*it));
> > +	BUILD_BUG_ON(__alignof__(*kit) != __alignof__(*it));
> > +
> > +	kit->pos = NULL;
> > +	return 0;
> > +}
> > +
> > +__bpf_kfunc struct kmem_cache *bpf_iter_kmem_cache_next(struct bpf_iter_kmem_cache *it)
> > +{
> > +	struct bpf_iter_kmem_cache_kern *kit = (void *)it;
> > +	struct kmem_cache *prev = kit->pos;
> > +	struct kmem_cache *next;
> > +	bool destroy = false;
> > +
> > +	mutex_lock(&slab_mutex);
> 
> I think taking mutex_lock here should be fine since sleepable tracing prog
> should be limited to the error injection whitelist. Those functions should
> not have held the mutex afaict.
> 
> > +
> > +	if (list_empty(&slab_caches)) {
> > +		mutex_unlock(&slab_mutex);
> > +		return NULL;
> > +	}
> > +
> > +	if (prev == NULL)
> > +		next = list_first_entry(&slab_caches, struct kmem_cache, list);
> > +	else if (list_last_entry(&slab_caches, struct kmem_cache, list) == prev)
> > +		next = NULL;
> 
> At the last entry, next is NULL.
> 
> > +	else
> > +		next = list_next_entry(prev, list);
> > +
> > +	/* boot_caches have negative refcount, don't touch them */
> > +	if (next && next->refcount > 0)
> > +		next->refcount++;
> > +
> > +	/* Skip kmem_cache_destroy() for active entries */
> > +	if (prev && prev->refcount > 1)
> > +		prev->refcount--;
> > +	else if (prev && prev->refcount == 1)
> > +		destroy = true;
> > +
> > +	mutex_unlock(&slab_mutex);
> > +
> > +	if (destroy)
> > +		kmem_cache_destroy(prev);
> > +
> > +	kit->pos = next;
> 
> so kit->pos will be NULL also. Does it mean the bpf prog will be able to
> call bpf_iter_kmem_cache_next() again and re-loop from the beginning of the
> slab_caches list?

Right, I'll mark the start pos differently to prevent that.

Thanks,
Namhyung

> 
> > +	return next;
> > +}
> > +
> > +__bpf_kfunc void bpf_iter_kmem_cache_destroy(struct bpf_iter_kmem_cache *it)
> > +{
> > +	struct bpf_iter_kmem_cache_kern *kit = (void *)it;
> > +	struct kmem_cache *s = kit->pos;
> > +	bool destroy = false;
> > +
> > +	if (s == NULL)
> > +		return;
> > +
> > +	mutex_lock(&slab_mutex);
> > +
> > +	/* Skip kmem_cache_destroy() for active entries */
> > +	if (s->refcount > 1)
> > +		s->refcount--;
> > +	else if (s->refcount == 1)
> > +		destroy = true;
> > +
> > +	mutex_unlock(&slab_mutex);
> > +
> > +	if (destroy)
> > +		kmem_cache_destroy(s);
> > +}
> > +
> > +__bpf_kfunc_end_defs();
> > +
> >   static void bpf_iter_kmem_cache_show_fdinfo(const struct bpf_iter_aux_info *aux,
> >   					    struct seq_file *seq)
> >   {
> 


^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [PATCH bpf-next 1/2] bpf: Add open coded version of kmem_cache iterator
  2024-10-21 23:32 ` Andrii Nakryiko
@ 2024-10-22 17:50   ` Namhyung Kim
  0 siblings, 0 replies; 11+ messages in thread
From: Namhyung Kim @ 2024-10-22 17:50 UTC (permalink / raw)
  To: Andrii Nakryiko
  Cc: Alexei Starovoitov, Daniel Borkmann, Andrii Nakryiko,
	Martin KaFai Lau, Eduard Zingerman, Song Liu, Yonghong Song,
	John Fastabend, KP Singh, Stanislav Fomichev, Hao Luo, Jiri Olsa,
	LKML, bpf, Andrew Morton, Christoph Lameter, Pekka Enberg,
	David Rientjes, Joonsoo Kim, Vlastimil Babka, Roman Gushchin,
	Hyeonggon Yoo, linux-mm, Arnaldo Carvalho de Melo, Kees Cook

Hello,

On Mon, Oct 21, 2024 at 04:32:10PM -0700, Andrii Nakryiko wrote:
> On Thu, Oct 17, 2024 at 1:06 AM Namhyung Kim <namhyung@kernel.org> wrote:
> >
> > Add a new open coded iterator for kmem_cache which can be called from a
> > BPF program like below.  It doesn't take any argument and traverses all
> > kmem_cache entries.
> >
> >   struct kmem_cache *pos;
> >
> >   bpf_for_each(kmem_cache, pos) {
> >       ...
> >   }
> >
> > As it needs to grab slab_mutex, it should be called from sleepable BPF
> > programs only.
> >
> > Signed-off-by: Namhyung Kim <namhyung@kernel.org>
> > ---
> >  kernel/bpf/helpers.c         |  3 ++
> >  kernel/bpf/kmem_cache_iter.c | 87 ++++++++++++++++++++++++++++++++++++
> >  2 files changed, 90 insertions(+)
> >
> > diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c
> > index 073e6f04f4d765ff..d1dfa4f335577914 100644
> > --- a/kernel/bpf/helpers.c
> > +++ b/kernel/bpf/helpers.c
> > @@ -3111,6 +3111,9 @@ BTF_ID_FLAGS(func, bpf_iter_bits_next, KF_ITER_NEXT | KF_RET_NULL)
> >  BTF_ID_FLAGS(func, bpf_iter_bits_destroy, KF_ITER_DESTROY)
> >  BTF_ID_FLAGS(func, bpf_copy_from_user_str, KF_SLEEPABLE)
> >  BTF_ID_FLAGS(func, bpf_get_kmem_cache)
> > +BTF_ID_FLAGS(func, bpf_iter_kmem_cache_new, KF_ITER_NEW | KF_SLEEPABLE)
> > +BTF_ID_FLAGS(func, bpf_iter_kmem_cache_next, KF_ITER_NEXT | KF_RET_NULL | KF_SLEEPABLE)
> > +BTF_ID_FLAGS(func, bpf_iter_kmem_cache_destroy, KF_ITER_DESTROY | KF_SLEEPABLE)
> 
> I'm curious. Having bpf_iter_kmem_cache_{new,next,destroy} functions,
> can we rewrite kmem_cache_iter_seq_next in terms of these ones, so
> that we have less duplication of iteration logic? Or there will be
> some locking concerns preventing this? (I haven't looked into the
> actual logic much, sorry, lazy question)

It should be fine with locking, I think there's a subtle difference
between seq interface and the open coded iterator.  But I'll think about
how to reduce the duplication.

Thanks for your review!
Namhyung



^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [PATCH bpf-next 2/2] selftests/bpf: Add a test for open coded kmem_cache iter
  2024-10-18 18:46   ` Martin KaFai Lau
@ 2024-10-22 17:51     ` Namhyung Kim
  0 siblings, 0 replies; 11+ messages in thread
From: Namhyung Kim @ 2024-10-22 17:51 UTC (permalink / raw)
  To: Martin KaFai Lau
  Cc: Alexei Starovoitov, Daniel Borkmann, Andrii Nakryiko,
	Eduard Zingerman, Song Liu, Yonghong Song, John Fastabend,
	KP Singh, Stanislav Fomichev, Hao Luo, Jiri Olsa, LKML, bpf,
	Andrew Morton, Christoph Lameter, Pekka Enberg, David Rientjes,
	Joonsoo Kim, Vlastimil Babka, Roman Gushchin, Hyeonggon Yoo,
	linux-mm, Arnaldo Carvalho de Melo, Kees Cook

On Fri, Oct 18, 2024 at 11:46:31AM -0700, Martin KaFai Lau wrote:
> On 10/17/24 1:06 AM, Namhyung Kim wrote:
> > The new subtest is attached to sleepable fentry of syncfs() syscall.
> > It iterates the kmem_cache using bpf_for_each loop and count the number
> > of entries.  Finally it checks it with the number of entries from the
> > regular iterator.
> > 
> >    $ ./vmtest.sh -- ./test_progs -t kmem_cache_iter
> >    ...
> >    #130/1   kmem_cache_iter/check_task_struct:OK
> >    #130/2   kmem_cache_iter/check_slabinfo:OK
> >    #130/3   kmem_cache_iter/open_coded_iter:OK
> >    #130     kmem_cache_iter:OK
> >    Summary: 1/3 PASSED, 0 SKIPPED, 0 FAILED
> > 
> > Also simplify the code by using attach routine of the skeleton.
> > 
> > Signed-off-by: Namhyung Kim <namhyung@kernel.org>
> > ---
> >   .../testing/selftests/bpf/bpf_experimental.h  |  6 ++++
> >   .../bpf/prog_tests/kmem_cache_iter.c          | 28 +++++++++++--------
> >   .../selftests/bpf/progs/kmem_cache_iter.c     | 24 ++++++++++++++++
> >   3 files changed, 46 insertions(+), 12 deletions(-)
> > 
> > diff --git a/tools/testing/selftests/bpf/bpf_experimental.h b/tools/testing/selftests/bpf/bpf_experimental.h
> > index b0668f29f7b394eb..cd8ecd39c3f3c68d 100644
> > --- a/tools/testing/selftests/bpf/bpf_experimental.h
> > +++ b/tools/testing/selftests/bpf/bpf_experimental.h
> > @@ -582,4 +582,10 @@ extern int bpf_wq_set_callback_impl(struct bpf_wq *wq,
> >   		unsigned int flags__k, void *aux__ign) __ksym;
> >   #define bpf_wq_set_callback(timer, cb, flags) \
> >   	bpf_wq_set_callback_impl(timer, cb, flags, NULL)
> > +
> > +struct bpf_iter_kmem_cache;
> > +extern int bpf_iter_kmem_cache_new(struct bpf_iter_kmem_cache *it) __weak __ksym;
> > +extern struct kmem_cache *bpf_iter_kmem_cache_next(struct bpf_iter_kmem_cache *it) __weak __ksym;
> > +extern void bpf_iter_kmem_cache_destroy(struct bpf_iter_kmem_cache *it) __weak __ksym;
> > +
> >   #endif
> > diff --git a/tools/testing/selftests/bpf/prog_tests/kmem_cache_iter.c b/tools/testing/selftests/bpf/prog_tests/kmem_cache_iter.c
> > index 848d8fc9171fae45..a1fd3bc57c0b21bb 100644
> > --- a/tools/testing/selftests/bpf/prog_tests/kmem_cache_iter.c
> > +++ b/tools/testing/selftests/bpf/prog_tests/kmem_cache_iter.c
> > @@ -68,12 +68,18 @@ static void subtest_kmem_cache_iter_check_slabinfo(struct kmem_cache_iter *skel)
> >   	fclose(fp);
> >   }
> > +static void subtest_kmem_cache_iter_open_coded(struct kmem_cache_iter *skel)
> > +{
> > +	/* To trigger the open coded iterator attached to the syscall */
> > +	syncfs(0);
> > +
> > +	/* It should be same as we've seen from the explicit iterator */
> > +	ASSERT_EQ(skel->bss->open_coded_seen, skel->bss->kmem_cache_seen, "open_code_seen_eq");
> > +}
> > +
> >   void test_kmem_cache_iter(void)
> >   {
> > -	DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
> >   	struct kmem_cache_iter *skel = NULL;
> > -	union bpf_iter_link_info linfo = {};
> > -	struct bpf_link *link;
> >   	char buf[256];
> >   	int iter_fd;
> > @@ -81,16 +87,12 @@ void test_kmem_cache_iter(void)
> >   	if (!ASSERT_OK_PTR(skel, "kmem_cache_iter__open_and_load"))
> >   		return;
> > -	opts.link_info = &linfo;
> > -	opts.link_info_len = sizeof(linfo);
> > -
> > -	link = bpf_program__attach_iter(skel->progs.slab_info_collector, &opts);
> > -	if (!ASSERT_OK_PTR(link, "attach_iter"))
> > +	if (!ASSERT_OK(kmem_cache_iter__attach(skel), "skel_attach"))
> 
> with this change.
> 
> >   		goto destroy;
> > -	iter_fd = bpf_iter_create(bpf_link__fd(link));
> > +	iter_fd = bpf_iter_create(bpf_link__fd(skel->links.slab_info_collector));
> >   	if (!ASSERT_GE(iter_fd, 0, "iter_create"))
> > -		goto free_link;
> > +		goto detach;
> >   	memset(buf, 0, sizeof(buf));
> >   	while (read(iter_fd, buf, sizeof(buf) > 0)) {
> > @@ -105,11 +107,13 @@ void test_kmem_cache_iter(void)
> >   		subtest_kmem_cache_iter_check_task_struct(skel);
> >   	if (test__start_subtest("check_slabinfo"))
> >   		subtest_kmem_cache_iter_check_slabinfo(skel);
> > +	if (test__start_subtest("open_coded_iter"))
> > +		subtest_kmem_cache_iter_open_coded(skel);
> >   	close(iter_fd);
> > -free_link:
> > -	bpf_link__destroy(link);
> > +detach:
> > +	kmem_cache_iter__detach(skel);
> 
> nit. I think the kmem_cache_iter__destroy() below will also detach, so no
> need to explicit kmem_cache_iter__detach().

Ok, will remove.

> 
> >   destroy:
> >   	kmem_cache_iter__destroy(skel);
> >   }
> > diff --git a/tools/testing/selftests/bpf/progs/kmem_cache_iter.c b/tools/testing/selftests/bpf/progs/kmem_cache_iter.c
> > index 72c9dafecd98406b..4c44aa279a5328fe 100644
> > --- a/tools/testing/selftests/bpf/progs/kmem_cache_iter.c
> > +++ b/tools/testing/selftests/bpf/progs/kmem_cache_iter.c
> > @@ -2,6 +2,8 @@
> >   /* Copyright (c) 2024 Google */
> >   #include "bpf_iter.h"
> > +#include "bpf_experimental.h"
> > +#include "bpf_misc.h"
> >   #include <bpf/bpf_helpers.h>
> >   #include <bpf/bpf_tracing.h>
> > @@ -33,6 +35,7 @@ extern struct kmem_cache *bpf_get_kmem_cache(u64 addr) __ksym;
> >   /* Result, will be checked by userspace */
> >   int task_struct_found;
> >   int kmem_cache_seen;
> > +int open_coded_seen;
> >   SEC("iter/kmem_cache")
> >   int slab_info_collector(struct bpf_iter__kmem_cache *ctx)
> > @@ -85,3 +88,24 @@ int BPF_PROG(check_task_struct)
> >   		task_struct_found = -2;
> >   	return 0;
> >   }
> > +
> > +SEC("fentry.s/" SYS_PREFIX "sys_syncfs")
> > +int open_coded_iter(const void *ctx)
> > +{
> > +	struct kmem_cache *s;
> > +
> > +	bpf_for_each(kmem_cache, s) {
> > +		struct kmem_cache_result *r;
> > +		int idx = open_coded_seen;
> > +
> > +		r = bpf_map_lookup_elem(&slab_result, &idx);
> > +		if (r == NULL)
> > +			break;
> > +
> > +		open_coded_seen++;
> 
> I am not sure if this will work well if the testing system somehow has
> another process calling syncfs. It is probably a good idea to guard this by
> checking the tid of the test_progs at the beginning of this bpf prog.

Right, I'll add the tid check.

Thanks for the review,
Namhyung



^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [PATCH bpf-next 2/2] selftests/bpf: Add a test for open coded kmem_cache iter
  2024-10-21 23:36   ` Andrii Nakryiko
@ 2024-10-22 17:52     ` Namhyung Kim
  2024-10-24  7:44     ` Namhyung Kim
  1 sibling, 0 replies; 11+ messages in thread
From: Namhyung Kim @ 2024-10-22 17:52 UTC (permalink / raw)
  To: Andrii Nakryiko
  Cc: Alexei Starovoitov, Daniel Borkmann, Andrii Nakryiko,
	Martin KaFai Lau, Eduard Zingerman, Song Liu, Yonghong Song,
	John Fastabend, KP Singh, Stanislav Fomichev, Hao Luo, Jiri Olsa,
	LKML, bpf, Andrew Morton, Christoph Lameter, Pekka Enberg,
	David Rientjes, Joonsoo Kim, Vlastimil Babka, Roman Gushchin,
	Hyeonggon Yoo, linux-mm, Arnaldo Carvalho de Melo, Kees Cook

On Mon, Oct 21, 2024 at 04:36:49PM -0700, Andrii Nakryiko wrote:
> On Thu, Oct 17, 2024 at 1:06 AM Namhyung Kim <namhyung@kernel.org> wrote:
> >
> > The new subtest is attached to sleepable fentry of syncfs() syscall.
> > It iterates the kmem_cache using bpf_for_each loop and count the number
> > of entries.  Finally it checks it with the number of entries from the
> > regular iterator.
> >
> >   $ ./vmtest.sh -- ./test_progs -t kmem_cache_iter
> >   ...
> >   #130/1   kmem_cache_iter/check_task_struct:OK
> >   #130/2   kmem_cache_iter/check_slabinfo:OK
> >   #130/3   kmem_cache_iter/open_coded_iter:OK
> >   #130     kmem_cache_iter:OK
> >   Summary: 1/3 PASSED, 0 SKIPPED, 0 FAILED
> >
> > Also simplify the code by using attach routine of the skeleton.
> >
> > Signed-off-by: Namhyung Kim <namhyung@kernel.org>
> > ---
> >  .../testing/selftests/bpf/bpf_experimental.h  |  6 ++++
> >  .../bpf/prog_tests/kmem_cache_iter.c          | 28 +++++++++++--------
> >  .../selftests/bpf/progs/kmem_cache_iter.c     | 24 ++++++++++++++++
> >  3 files changed, 46 insertions(+), 12 deletions(-)
> >
> > diff --git a/tools/testing/selftests/bpf/bpf_experimental.h b/tools/testing/selftests/bpf/bpf_experimental.h
> > index b0668f29f7b394eb..cd8ecd39c3f3c68d 100644
> > --- a/tools/testing/selftests/bpf/bpf_experimental.h
> > +++ b/tools/testing/selftests/bpf/bpf_experimental.h
> > @@ -582,4 +582,10 @@ extern int bpf_wq_set_callback_impl(struct bpf_wq *wq,
> >                 unsigned int flags__k, void *aux__ign) __ksym;
> >  #define bpf_wq_set_callback(timer, cb, flags) \
> >         bpf_wq_set_callback_impl(timer, cb, flags, NULL)
> > +
> > +struct bpf_iter_kmem_cache;
> > +extern int bpf_iter_kmem_cache_new(struct bpf_iter_kmem_cache *it) __weak __ksym;
> > +extern struct kmem_cache *bpf_iter_kmem_cache_next(struct bpf_iter_kmem_cache *it) __weak __ksym;
> > +extern void bpf_iter_kmem_cache_destroy(struct bpf_iter_kmem_cache *it) __weak __ksym;
> > +
> 
> we should be getting this from vmlinux.h nowadays, so this is probably
> unnecessary

That'd be nice.  Will remove.

> 
> >  #endif
> > diff --git a/tools/testing/selftests/bpf/prog_tests/kmem_cache_iter.c b/tools/testing/selftests/bpf/prog_tests/kmem_cache_iter.c
> > index 848d8fc9171fae45..a1fd3bc57c0b21bb 100644
> > --- a/tools/testing/selftests/bpf/prog_tests/kmem_cache_iter.c
> > +++ b/tools/testing/selftests/bpf/prog_tests/kmem_cache_iter.c
> > @@ -68,12 +68,18 @@ static void subtest_kmem_cache_iter_check_slabinfo(struct kmem_cache_iter *skel)
> >         fclose(fp);
> >  }
> >
> > +static void subtest_kmem_cache_iter_open_coded(struct kmem_cache_iter *skel)
> > +{
> > +       /* To trigger the open coded iterator attached to the syscall */
> > +       syncfs(0);
> 
> what Martin said, you still need to filter by PID

Yep.

>
> > +
> > +       /* It should be same as we've seen from the explicit iterator */
> > +       ASSERT_EQ(skel->bss->open_coded_seen, skel->bss->kmem_cache_seen, "open_code_seen_eq");
> > +}
> > +
> >  void test_kmem_cache_iter(void)
> >  {
> > -       DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
> >         struct kmem_cache_iter *skel = NULL;
> > -       union bpf_iter_link_info linfo = {};
> > -       struct bpf_link *link;
> >         char buf[256];
> >         int iter_fd;
> >
> > @@ -81,16 +87,12 @@ void test_kmem_cache_iter(void)
> >         if (!ASSERT_OK_PTR(skel, "kmem_cache_iter__open_and_load"))
> >                 return;
> >
> > -       opts.link_info = &linfo;
> > -       opts.link_info_len = sizeof(linfo);
> > -
> > -       link = bpf_program__attach_iter(skel->progs.slab_info_collector, &opts);
> > -       if (!ASSERT_OK_PTR(link, "attach_iter"))
> > +       if (!ASSERT_OK(kmem_cache_iter__attach(skel), "skel_attach"))
> >                 goto destroy;
> >
> > -       iter_fd = bpf_iter_create(bpf_link__fd(link));
> > +       iter_fd = bpf_iter_create(bpf_link__fd(skel->links.slab_info_collector));
> >         if (!ASSERT_GE(iter_fd, 0, "iter_create"))
> > -               goto free_link;
> > +               goto detach;
> >
> >         memset(buf, 0, sizeof(buf));
> >         while (read(iter_fd, buf, sizeof(buf) > 0)) {
> > @@ -105,11 +107,13 @@ void test_kmem_cache_iter(void)
> >                 subtest_kmem_cache_iter_check_task_struct(skel);
> >         if (test__start_subtest("check_slabinfo"))
> >                 subtest_kmem_cache_iter_check_slabinfo(skel);
> > +       if (test__start_subtest("open_coded_iter"))
> > +               subtest_kmem_cache_iter_open_coded(skel);
> >
> >         close(iter_fd);
> >
> > -free_link:
> > -       bpf_link__destroy(link);
> > +detach:
> > +       kmem_cache_iter__detach(skel);
> >  destroy:
> >         kmem_cache_iter__destroy(skel);
> >  }
> > diff --git a/tools/testing/selftests/bpf/progs/kmem_cache_iter.c b/tools/testing/selftests/bpf/progs/kmem_cache_iter.c
> > index 72c9dafecd98406b..4c44aa279a5328fe 100644
> > --- a/tools/testing/selftests/bpf/progs/kmem_cache_iter.c
> > +++ b/tools/testing/selftests/bpf/progs/kmem_cache_iter.c
> > @@ -2,6 +2,8 @@
> >  /* Copyright (c) 2024 Google */
> >
> >  #include "bpf_iter.h"
> > +#include "bpf_experimental.h"
> > +#include "bpf_misc.h"
> >  #include <bpf/bpf_helpers.h>
> >  #include <bpf/bpf_tracing.h>
> >
> > @@ -33,6 +35,7 @@ extern struct kmem_cache *bpf_get_kmem_cache(u64 addr) __ksym;
> >  /* Result, will be checked by userspace */
> >  int task_struct_found;
> >  int kmem_cache_seen;
> > +int open_coded_seen;
> >
> >  SEC("iter/kmem_cache")
> >  int slab_info_collector(struct bpf_iter__kmem_cache *ctx)
> > @@ -85,3 +88,24 @@ int BPF_PROG(check_task_struct)
> >                 task_struct_found = -2;
> >         return 0;
> >  }
> > +
> > +SEC("fentry.s/" SYS_PREFIX "sys_syncfs")
> > +int open_coded_iter(const void *ctx)
> > +{
> > +       struct kmem_cache *s;
> > +
> > +       bpf_for_each(kmem_cache, s) {
> > +               struct kmem_cache_result *r;
> > +               int idx = open_coded_seen;
> > +
> > +               r = bpf_map_lookup_elem(&slab_result, &idx);
> 
> nit: you don't need idx, just `&open_coded_seen` should be fine, I think

Ok.

> 
> > +               if (r == NULL)
> 
> nit: !r

Will change!

Thanks,
Namhyung

> 
> > +                       break;
> > +
> > +               open_coded_seen++;
> > +
> > +               if (r->obj_size != s->size)
> > +                       break;
> > +       }
> > +       return 0;
> > +}
> > --
> > 2.47.0.rc1.288.g06298d1525-goog
> >


^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [PATCH bpf-next 2/2] selftests/bpf: Add a test for open coded kmem_cache iter
  2024-10-21 23:36   ` Andrii Nakryiko
  2024-10-22 17:52     ` Namhyung Kim
@ 2024-10-24  7:44     ` Namhyung Kim
  1 sibling, 0 replies; 11+ messages in thread
From: Namhyung Kim @ 2024-10-24  7:44 UTC (permalink / raw)
  To: Andrii Nakryiko
  Cc: Alexei Starovoitov, Daniel Borkmann, Andrii Nakryiko,
	Martin KaFai Lau, Eduard Zingerman, Song Liu, Yonghong Song,
	John Fastabend, KP Singh, Stanislav Fomichev, Hao Luo, Jiri Olsa,
	LKML, bpf, Andrew Morton, Christoph Lameter, Pekka Enberg,
	David Rientjes, Joonsoo Kim, Vlastimil Babka, Roman Gushchin,
	Hyeonggon Yoo, linux-mm, Arnaldo Carvalho de Melo, Kees Cook

On Mon, Oct 21, 2024 at 04:36:49PM -0700, Andrii Nakryiko wrote:
> On Thu, Oct 17, 2024 at 1:06 AM Namhyung Kim <namhyung@kernel.org> wrote:
> >
> > The new subtest is attached to sleepable fentry of syncfs() syscall.
> > It iterates the kmem_cache using bpf_for_each loop and count the number
> > of entries.  Finally it checks it with the number of entries from the
> > regular iterator.
> >
> >   $ ./vmtest.sh -- ./test_progs -t kmem_cache_iter
> >   ...
> >   #130/1   kmem_cache_iter/check_task_struct:OK
> >   #130/2   kmem_cache_iter/check_slabinfo:OK
> >   #130/3   kmem_cache_iter/open_coded_iter:OK
> >   #130     kmem_cache_iter:OK
> >   Summary: 1/3 PASSED, 0 SKIPPED, 0 FAILED
> >
> > Also simplify the code by using attach routine of the skeleton.
> >
> > Signed-off-by: Namhyung Kim <namhyung@kernel.org>
> > ---
> >  .../testing/selftests/bpf/bpf_experimental.h  |  6 ++++
> >  .../bpf/prog_tests/kmem_cache_iter.c          | 28 +++++++++++--------
> >  .../selftests/bpf/progs/kmem_cache_iter.c     | 24 ++++++++++++++++
> >  3 files changed, 46 insertions(+), 12 deletions(-)
> >
> > diff --git a/tools/testing/selftests/bpf/bpf_experimental.h b/tools/testing/selftests/bpf/bpf_experimental.h
> > index b0668f29f7b394eb..cd8ecd39c3f3c68d 100644
> > --- a/tools/testing/selftests/bpf/bpf_experimental.h
> > +++ b/tools/testing/selftests/bpf/bpf_experimental.h
> > @@ -582,4 +582,10 @@ extern int bpf_wq_set_callback_impl(struct bpf_wq *wq,
> >                 unsigned int flags__k, void *aux__ign) __ksym;
> >  #define bpf_wq_set_callback(timer, cb, flags) \
> >         bpf_wq_set_callback_impl(timer, cb, flags, NULL)
> > +
> > +struct bpf_iter_kmem_cache;
> > +extern int bpf_iter_kmem_cache_new(struct bpf_iter_kmem_cache *it) __weak __ksym;
> > +extern struct kmem_cache *bpf_iter_kmem_cache_next(struct bpf_iter_kmem_cache *it) __weak __ksym;
> > +extern void bpf_iter_kmem_cache_destroy(struct bpf_iter_kmem_cache *it) __weak __ksym;
> > +
> 
> we should be getting this from vmlinux.h nowadays, so this is probably
> unnecessary

I got some build errors without this.  I'll leave it for v2.

Thanks,
Namhyung


^ permalink raw reply	[flat|nested] 11+ messages in thread

end of thread, other threads:[~2024-10-24  7:44 UTC | newest]

Thread overview: 11+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2024-10-17  8:06 [PATCH bpf-next 1/2] bpf: Add open coded version of kmem_cache iterator Namhyung Kim
2024-10-17  8:06 ` [PATCH bpf-next 2/2] selftests/bpf: Add a test for open coded kmem_cache iter Namhyung Kim
2024-10-18 18:46   ` Martin KaFai Lau
2024-10-22 17:51     ` Namhyung Kim
2024-10-21 23:36   ` Andrii Nakryiko
2024-10-22 17:52     ` Namhyung Kim
2024-10-24  7:44     ` Namhyung Kim
2024-10-18 18:22 ` [PATCH bpf-next 1/2] bpf: Add open coded version of kmem_cache iterator Martin KaFai Lau
2024-10-22 17:47   ` Namhyung Kim
2024-10-21 23:32 ` Andrii Nakryiko
2024-10-22 17:50   ` Namhyung Kim

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox