From: Yafang Shao <laoar.shao@gmail.com>
To: 42.hyeyoo@gmail.com, vbabka@suse.cz, ast@kernel.org,
daniel@iogearbox.net, andrii@kernel.org, kafai@fb.com,
songliubraving@fb.com, yhs@fb.com, john.fastabend@gmail.com,
kpsingh@kernel.org, sdf@google.com, haoluo@google.com,
jolsa@kernel.org, tj@kernel.org, dennis@kernel.org, cl@linux.com,
akpm@linux-foundation.org, penberg@kernel.org,
rientjes@google.com, iamjoonsoo.kim@lge.com,
roman.gushchin@linux.dev
Cc: linux-mm@kvack.org, bpf@vger.kernel.org,
Yafang Shao <laoar.shao@gmail.com>
Subject: [RFC PATCH bpf-next v2 07/11] bpf: introduce new helpers bpf_ringbuf_pages_{alloc,free}
Date: Thu, 12 Jan 2023 15:53:22 +0000 [thread overview]
Message-ID: <20230112155326.26902-8-laoar.shao@gmail.com> (raw)
In-Reply-To: <20230112155326.26902-1-laoar.shao@gmail.com>
Allocate pages related memory into the new helper
bpf_ringbuf_pages_alloc(), then it can be handled as a single unit.
Suggested-by: Andrii Nakryiko <andrii@kernel.org>
Signed-off-by: Yafang Shao <laoar.shao@gmail.com>
Acked-by: Andrii Nakryiko <andrii@kernel.org>
---
kernel/bpf/ringbuf.c | 71 ++++++++++++++++++++++++++++++++++------------------
1 file changed, 47 insertions(+), 24 deletions(-)
diff --git a/kernel/bpf/ringbuf.c b/kernel/bpf/ringbuf.c
index 80f4b4d..3264bf5 100644
--- a/kernel/bpf/ringbuf.c
+++ b/kernel/bpf/ringbuf.c
@@ -92,6 +92,48 @@ struct bpf_ringbuf_hdr {
u32 pg_off;
};
+static void bpf_ringbuf_pages_free(struct page **pages, int nr_pages)
+{
+ int i;
+
+ for (i = 0; i < nr_pages; i++)
+ __free_page(pages[i]);
+ bpf_map_area_free(pages);
+}
+
+static struct page **bpf_ringbuf_pages_alloc(int nr_meta_pages,
+ int nr_data_pages, int numa_node,
+ const gfp_t flags)
+{
+ int nr_pages = nr_meta_pages + nr_data_pages;
+ struct page **pages, *page;
+ int array_size;
+ int i;
+
+ array_size = (nr_meta_pages + 2 * nr_data_pages) * sizeof(*pages);
+ pages = bpf_map_area_alloc(array_size, numa_node);
+ if (!pages)
+ goto err;
+
+ for (i = 0; i < nr_pages; i++) {
+ page = alloc_pages_node(numa_node, flags, 0);
+ if (!page) {
+ nr_pages = i;
+ goto err_free_pages;
+ }
+ pages[i] = page;
+ if (i >= nr_meta_pages)
+ pages[nr_data_pages + i] = page;
+ }
+
+ return pages;
+
+err_free_pages:
+ bpf_ringbuf_pages_free(pages, nr_pages);
+err:
+ return NULL;
+}
+
static struct bpf_ringbuf *bpf_ringbuf_area_alloc(size_t data_sz, int numa_node)
{
const gfp_t flags = GFP_KERNEL_ACCOUNT | __GFP_RETRY_MAYFAIL |
@@ -99,10 +141,8 @@ static struct bpf_ringbuf *bpf_ringbuf_area_alloc(size_t data_sz, int numa_node)
int nr_meta_pages = RINGBUF_PGOFF + RINGBUF_POS_PAGES;
int nr_data_pages = data_sz >> PAGE_SHIFT;
int nr_pages = nr_meta_pages + nr_data_pages;
- struct page **pages, *page;
struct bpf_ringbuf *rb;
- size_t array_size;
- int i;
+ struct page **pages;
/* Each data page is mapped twice to allow "virtual"
* continuous read of samples wrapping around the end of ring
@@ -121,22 +161,11 @@ static struct bpf_ringbuf *bpf_ringbuf_area_alloc(size_t data_sz, int numa_node)
* when mmap()'ed in user-space, simplifying both kernel and
* user-space implementations significantly.
*/
- array_size = (nr_meta_pages + 2 * nr_data_pages) * sizeof(*pages);
- pages = bpf_map_area_alloc(array_size, numa_node);
+ pages = bpf_ringbuf_pages_alloc(nr_meta_pages, nr_data_pages,
+ numa_node, flags);
if (!pages)
return NULL;
- for (i = 0; i < nr_pages; i++) {
- page = alloc_pages_node(numa_node, flags, 0);
- if (!page) {
- nr_pages = i;
- goto err_free_pages;
- }
- pages[i] = page;
- if (i >= nr_meta_pages)
- pages[nr_data_pages + i] = page;
- }
-
rb = vmap(pages, nr_meta_pages + 2 * nr_data_pages,
VM_MAP | VM_USERMAP, PAGE_KERNEL);
if (rb) {
@@ -146,10 +175,6 @@ static struct bpf_ringbuf *bpf_ringbuf_area_alloc(size_t data_sz, int numa_node)
return rb;
}
-err_free_pages:
- for (i = 0; i < nr_pages; i++)
- __free_page(pages[i]);
- bpf_map_area_free(pages);
return NULL;
}
@@ -219,12 +244,10 @@ static void bpf_ringbuf_free(struct bpf_ringbuf *rb)
* to unmap rb itself with vunmap() below
*/
struct page **pages = rb->pages;
- int i, nr_pages = rb->nr_pages;
+ int nr_pages = rb->nr_pages;
vunmap(rb);
- for (i = 0; i < nr_pages; i++)
- __free_page(pages[i]);
- bpf_map_area_free(pages);
+ bpf_ringbuf_pages_free(pages, nr_pages);
}
static void ringbuf_map_free(struct bpf_map *map)
--
1.8.3.1
next prev parent reply other threads:[~2023-01-12 15:53 UTC|newest]
Thread overview: 22+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-01-12 15:53 [RFC PATCH bpf-next v2 00/11] mm, bpf: Add BPF into /proc/meminfo Yafang Shao
2023-01-12 15:53 ` [RFC PATCH bpf-next v2 01/11] mm: percpu: count memcg relevant memory only when kmemcg is enabled Yafang Shao
2023-01-12 15:53 ` [RFC PATCH bpf-next v2 02/11] mm: percpu: introduce percpu_size() Yafang Shao
2023-01-12 15:53 ` [RFC PATCH bpf-next v2 03/11] mm: slab: rename obj_full_size() Yafang Shao
2023-01-12 15:53 ` [RFC PATCH bpf-next v2 04/11] mm: slab: introduce ksize_full() Yafang Shao
2023-01-12 15:53 ` [RFC PATCH bpf-next v2 05/11] mm: vmalloc: introduce vsize() Yafang Shao
2023-01-12 15:53 ` [RFC PATCH bpf-next v2 06/11] mm: util: introduce kvsize() Yafang Shao
2023-01-12 15:53 ` Yafang Shao [this message]
2023-01-12 15:53 ` [RFC PATCH bpf-next v2 08/11] bpf: use bpf_map_kzalloc in arraymap Yafang Shao
2023-01-12 15:53 ` [RFC PATCH bpf-next v2 09/11] bpf: use bpf_map_kvcalloc in bpf_local_storage Yafang Shao
2023-01-12 15:53 ` [RFC PATCH bpf-next v2 10/11] bpf: add and use bpf map free helpers Yafang Shao
2023-01-12 15:53 ` [RFC PATCH bpf-next v2 11/11] bpf: introduce bpf memory statistics Yafang Shao
2023-01-12 21:05 ` [RFC PATCH bpf-next v2 00/11] mm, bpf: Add BPF into /proc/meminfo Alexei Starovoitov
2023-01-13 11:53 ` Yafang Shao
2023-01-17 17:25 ` Alexei Starovoitov
2023-01-18 3:07 ` Yafang Shao
2023-01-18 5:39 ` Alexei Starovoitov
2023-01-18 6:49 ` Yafang Shao
2023-01-26 5:45 ` Alexei Starovoitov
2023-01-28 11:49 ` Yafang Shao
2023-01-30 13:14 ` Uladzislau Rezki
2023-01-31 6:28 ` Yafang Shao
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20230112155326.26902-8-laoar.shao@gmail.com \
--to=laoar.shao@gmail.com \
--cc=42.hyeyoo@gmail.com \
--cc=akpm@linux-foundation.org \
--cc=andrii@kernel.org \
--cc=ast@kernel.org \
--cc=bpf@vger.kernel.org \
--cc=cl@linux.com \
--cc=daniel@iogearbox.net \
--cc=dennis@kernel.org \
--cc=haoluo@google.com \
--cc=iamjoonsoo.kim@lge.com \
--cc=john.fastabend@gmail.com \
--cc=jolsa@kernel.org \
--cc=kafai@fb.com \
--cc=kpsingh@kernel.org \
--cc=linux-mm@kvack.org \
--cc=penberg@kernel.org \
--cc=rientjes@google.com \
--cc=roman.gushchin@linux.dev \
--cc=sdf@google.com \
--cc=songliubraving@fb.com \
--cc=tj@kernel.org \
--cc=vbabka@suse.cz \
--cc=yhs@fb.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox