From: alexei.starovoitov@gmail.com
To: bpf@vger.kernel.org
Cc: andrii@kernel.org, memxor@gmail.com, akpm@linux-foundation.org,
peterz@infradead.org, vbabka@suse.cz, bigeasy@linutronix.de,
rostedt@goodmis.org, houtao1@huawei.com, hannes@cmpxchg.org,
shakeel.butt@linux.dev, mhocko@suse.com, willy@infradead.org,
tglx@linutronix.de, jannh@google.com, tj@kernel.org,
linux-mm@kvack.org, kernel-team@fb.com
Subject: [PATCH bpf-next v3 1/6] mm, bpf: Introduce try_alloc_pages() for opportunistic page allocation
Date: Tue, 17 Dec 2024 19:07:14 -0800 [thread overview]
Message-ID: <20241218030720.1602449-2-alexei.starovoitov@gmail.com> (raw)
In-Reply-To: <20241218030720.1602449-1-alexei.starovoitov@gmail.com>
From: Alexei Starovoitov <ast@kernel.org>
Tracing BPF programs execute from tracepoints and kprobes where
running context is unknown, but they need to request additional
memory. The prior workarounds were using pre-allocated memory and
BPF specific freelists to satisfy such allocation requests.
Instead, introduce internal __GFP_TRYLOCK flag that makes page
allocator accessible from any context. It relies on percpu free
list of pages that rmqueue_pcplist() should be able to pop the
page from. If it fails (due to IRQ re-entrancy or list being
empty) then try_alloc_pages() attempts to spin_trylock zone->lock
and refill percpu freelist as normal.
BPF program may execute with IRQs disabled and zone->lock is
sleeping in RT, so trylock is the only option. In theory we can
introduce percpu reentrance counter and increment it every time
spin_lock_irqsave(&zone->lock, flags) is used, but we cannot rely
on it. Even if this cpu is not in page_alloc path the
spin_lock_irqsave() is not safe, since BPF prog might be called
from tracepoint where preemption is disabled. So trylock only.
Note, free_page and memcg are not taught about __GFP_TRYLOCK yet.
The support comes in the next patches.
This is a first step towards supporting BPF requirements in SLUB
and getting rid of bpf_mem_alloc.
That goal was discussed at LSFMM: https://lwn.net/Articles/974138/
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
---
include/linux/gfp.h | 3 ++
include/linux/gfp_types.h | 1 +
mm/internal.h | 2 ++
mm/page_alloc.c | 69 ++++++++++++++++++++++++++++++++++++---
4 files changed, 71 insertions(+), 4 deletions(-)
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index b0fe9f62d15b..65b8df1db26a 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -347,6 +347,9 @@ static inline struct page *alloc_page_vma_noprof(gfp_t gfp,
}
#define alloc_page_vma(...) alloc_hooks(alloc_page_vma_noprof(__VA_ARGS__))
+struct page *try_alloc_pages_noprof(int nid, unsigned int order);
+#define try_alloc_pages(...) alloc_hooks(try_alloc_pages_noprof(__VA_ARGS__))
+
extern unsigned long get_free_pages_noprof(gfp_t gfp_mask, unsigned int order);
#define __get_free_pages(...) alloc_hooks(get_free_pages_noprof(__VA_ARGS__))
diff --git a/include/linux/gfp_types.h b/include/linux/gfp_types.h
index 65db9349f905..65b148ec86eb 100644
--- a/include/linux/gfp_types.h
+++ b/include/linux/gfp_types.h
@@ -48,6 +48,7 @@ enum {
___GFP_THISNODE_BIT,
___GFP_ACCOUNT_BIT,
___GFP_ZEROTAGS_BIT,
+ ___GFP_TRYLOCK_BIT,
#ifdef CONFIG_KASAN_HW_TAGS
___GFP_SKIP_ZERO_BIT,
___GFP_SKIP_KASAN_BIT,
diff --git a/mm/internal.h b/mm/internal.h
index cb8d8e8e3ffa..122fce7e1a9e 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -1175,6 +1175,8 @@ unsigned int reclaim_clean_pages_from_list(struct zone *zone,
#endif
#define ALLOC_HIGHATOMIC 0x200 /* Allows access to MIGRATE_HIGHATOMIC */
#define ALLOC_KSWAPD 0x800 /* allow waking of kswapd, __GFP_KSWAPD_RECLAIM set */
+#define __GFP_TRYLOCK ((__force gfp_t)BIT(___GFP_TRYLOCK_BIT))
+#define ALLOC_TRYLOCK 0x1000000 /* Only use spin_trylock in allocation path */
/* Flags that allow allocations below the min watermark. */
#define ALLOC_RESERVES (ALLOC_NON_BLOCK|ALLOC_MIN_RESERVE|ALLOC_HIGHATOMIC|ALLOC_OOM)
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 1cb4b8c8886d..d23545057b6e 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -2304,7 +2304,11 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order,
unsigned long flags;
int i;
- spin_lock_irqsave(&zone->lock, flags);
+ if (!spin_trylock_irqsave(&zone->lock, flags)) {
+ if (unlikely(alloc_flags & ALLOC_TRYLOCK))
+ return 0;
+ spin_lock_irqsave(&zone->lock, flags);
+ }
for (i = 0; i < count; ++i) {
struct page *page = __rmqueue(zone, order, migratetype,
alloc_flags);
@@ -2904,7 +2908,11 @@ struct page *rmqueue_buddy(struct zone *preferred_zone, struct zone *zone,
do {
page = NULL;
- spin_lock_irqsave(&zone->lock, flags);
+ if (!spin_trylock_irqsave(&zone->lock, flags)) {
+ if (unlikely(alloc_flags & ALLOC_TRYLOCK))
+ return NULL;
+ spin_lock_irqsave(&zone->lock, flags);
+ }
if (alloc_flags & ALLOC_HIGHATOMIC)
page = __rmqueue_smallest(zone, order, MIGRATE_HIGHATOMIC);
if (!page) {
@@ -4001,6 +4009,7 @@ gfp_to_alloc_flags(gfp_t gfp_mask, unsigned int order)
*/
BUILD_BUG_ON(__GFP_HIGH != (__force gfp_t) ALLOC_MIN_RESERVE);
BUILD_BUG_ON(__GFP_KSWAPD_RECLAIM != (__force gfp_t) ALLOC_KSWAPD);
+ BUILD_BUG_ON(__GFP_TRYLOCK != (__force gfp_t) ALLOC_TRYLOCK);
/*
* The caller may dip into page reserves a bit more if the caller
@@ -4009,7 +4018,7 @@ gfp_to_alloc_flags(gfp_t gfp_mask, unsigned int order)
* set both ALLOC_NON_BLOCK and ALLOC_MIN_RESERVE(__GFP_HIGH).
*/
alloc_flags |= (__force int)
- (gfp_mask & (__GFP_HIGH | __GFP_KSWAPD_RECLAIM));
+ (gfp_mask & (__GFP_HIGH | __GFP_KSWAPD_RECLAIM | __GFP_TRYLOCK));
if (!(gfp_mask & __GFP_DIRECT_RECLAIM)) {
/*
@@ -4509,7 +4518,8 @@ static inline bool prepare_alloc_pages(gfp_t gfp_mask, unsigned int order,
might_alloc(gfp_mask);
- if (should_fail_alloc_page(gfp_mask, order))
+ if (!(*alloc_flags & ALLOC_TRYLOCK) &&
+ should_fail_alloc_page(gfp_mask, order))
return false;
*alloc_flags = gfp_to_alloc_flags_cma(gfp_mask, *alloc_flags);
@@ -7023,3 +7033,54 @@ static bool __free_unaccepted(struct page *page)
}
#endif /* CONFIG_UNACCEPTED_MEMORY */
+
+struct page *try_alloc_pages_noprof(int nid, unsigned int order)
+{
+ gfp_t alloc_gfp = __GFP_NOWARN | __GFP_ZERO |
+ __GFP_NOMEMALLOC | __GFP_TRYLOCK;
+ unsigned int alloc_flags = ALLOC_TRYLOCK;
+ struct alloc_context ac = { };
+ struct page *page;
+
+ /*
+ * In RT spin_trylock() may call raw_spin_lock() which is unsafe in NMI.
+ * If spin_trylock() is called from hard IRQ the current task may be
+ * waiting for one rt_spin_lock, but rt_spin_trylock() will mark the
+ * task as the owner of another rt_spin_lock which will confuse PI
+ * logic, so return immediately if called form hard IRQ or NMI.
+ *
+ * Note, irqs_disabled() case is ok. This function can be called
+ * from raw_spin_lock_irqsave region.
+ */
+ if (IS_ENABLED(CONFIG_PREEMPT_RT) && (in_nmi() || in_hardirq()))
+ return NULL;
+ if (!pcp_allowed_order(order))
+ return NULL;
+
+#ifdef CONFIG_UNACCEPTED_MEMORY
+ if (has_unaccepted_memory() && !list_empty(&zone->unaccepted_pages))
+ return NULL;
+#endif
+
+ if (nid == NUMA_NO_NODE)
+ nid = numa_node_id();
+
+ prepare_alloc_pages(alloc_gfp, order, nid, NULL, &ac,
+ &alloc_gfp, &alloc_flags);
+
+ /*
+ * Best effort allocation from percpu free list.
+ * If it's empty attempt to spin_trylock zone->lock.
+ * Do not specify __GFP_KSWAPD_RECLAIM to avoid wakeup_kswapd
+ * that may need to grab a lock.
+ * Do not specify __GFP_ACCOUNT to avoid local_lock.
+ * Do not warn either.
+ */
+ page = get_page_from_freelist(alloc_gfp, order, alloc_flags, &ac);
+
+ /* Unlike regular alloc_pages() there is no __alloc_pages_slowpath(). */
+
+ trace_mm_page_alloc(page, order, alloc_gfp & ~__GFP_TRYLOCK, ac.migratetype);
+ kmsan_alloc_page(page, order, alloc_gfp);
+ return page;
+}
--
2.43.5
next prev parent reply other threads:[~2024-12-18 3:07 UTC|newest]
Thread overview: 36+ messages / expand[flat|nested] mbox.gz Atom feed top
2024-12-18 3:07 [PATCH bpf-next v3 0/6] bpf, mm: Introduce try_alloc_pages() alexei.starovoitov
2024-12-18 3:07 ` alexei.starovoitov [this message]
2024-12-18 11:32 ` [PATCH bpf-next v3 1/6] mm, bpf: Introduce try_alloc_pages() for opportunistic page allocation Michal Hocko
2024-12-19 0:05 ` Shakeel Butt
2024-12-19 7:18 ` Michal Hocko
2024-12-19 1:18 ` Alexei Starovoitov
2024-12-19 7:13 ` Michal Hocko
2024-12-20 0:41 ` Alexei Starovoitov
2024-12-19 0:10 ` Shakeel Butt
2024-12-19 1:39 ` Alexei Starovoitov
2024-12-18 3:07 ` [PATCH bpf-next v3 2/6] mm, bpf: Introduce free_pages_nolock() alexei.starovoitov
2024-12-18 4:58 ` Yosry Ahmed
2024-12-18 5:33 ` Alexei Starovoitov
2024-12-18 5:57 ` Yosry Ahmed
2024-12-18 6:37 ` Alexei Starovoitov
2024-12-18 6:49 ` Yosry Ahmed
2024-12-18 7:25 ` Alexei Starovoitov
2024-12-18 7:40 ` Yosry Ahmed
2024-12-18 11:32 ` Michal Hocko
2024-12-19 1:45 ` Alexei Starovoitov
2024-12-19 7:03 ` Michal Hocko
2024-12-20 0:42 ` Alexei Starovoitov
2024-12-18 3:07 ` [PATCH bpf-next v3 3/6] locking/local_lock: Introduce local_trylock_irqsave() alexei.starovoitov
2024-12-18 3:07 ` [PATCH bpf-next v3 4/6] memcg: Use trylock to access memcg stock_lock alexei.starovoitov
2024-12-18 11:32 ` Michal Hocko
2024-12-19 1:53 ` Alexei Starovoitov
2024-12-19 7:08 ` Michal Hocko
2024-12-19 7:27 ` Michal Hocko
2024-12-19 7:52 ` Michal Hocko
2024-12-20 0:39 ` Alexei Starovoitov
2024-12-20 8:24 ` Michal Hocko
2024-12-20 16:10 ` Alexei Starovoitov
2024-12-20 19:45 ` Shakeel Butt
2024-12-21 7:20 ` Michal Hocko
2024-12-18 3:07 ` [PATCH bpf-next v3 5/6] mm, bpf: Use memcg in try_alloc_pages() alexei.starovoitov
2024-12-18 3:07 ` [PATCH bpf-next v3 6/6] bpf: Use try_alloc_pages() to allocate pages for bpf needs alexei.starovoitov
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20241218030720.1602449-2-alexei.starovoitov@gmail.com \
--to=alexei.starovoitov@gmail.com \
--cc=akpm@linux-foundation.org \
--cc=andrii@kernel.org \
--cc=bigeasy@linutronix.de \
--cc=bpf@vger.kernel.org \
--cc=hannes@cmpxchg.org \
--cc=houtao1@huawei.com \
--cc=jannh@google.com \
--cc=kernel-team@fb.com \
--cc=linux-mm@kvack.org \
--cc=memxor@gmail.com \
--cc=mhocko@suse.com \
--cc=peterz@infradead.org \
--cc=rostedt@goodmis.org \
--cc=shakeel.butt@linux.dev \
--cc=tglx@linutronix.de \
--cc=tj@kernel.org \
--cc=vbabka@suse.cz \
--cc=willy@infradead.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox