From: Brendan Jackman <jackmanb@google.com>
To: Borislav Petkov <bp@alien8.de>,
Dave Hansen <dave.hansen@linux.intel.com>,
Peter Zijlstra <peterz@infradead.org>,
Andrew Morton <akpm@linux-foundation.org>,
David Hildenbrand <david@kernel.org>,
Lorenzo Stoakes <lorenzo.stoakes@oracle.com>,
Vlastimil Babka <vbabka@kernel.org>, Wei Xu <weixugc@google.com>,
Johannes Weiner <hannes@cmpxchg.org>, Zi Yan <ziy@nvidia.com>
Cc: linux-mm@kvack.org, linux-kernel@vger.kernel.org, x86@kernel.org,
rppt@kernel.org, Sumit Garg <sumit.garg@oss.qualcomm.com>,
derkling@google.com, reijiw@google.com,
Will Deacon <will@kernel.org>,
rientjes@google.com, "Kalyazin, Nikita" <kalyazin@amazon.co.uk>,
patrick.roy@linux.dev, "Itazuri, Takahiro" <itazur@amazon.co.uk>,
Andy Lutomirski <luto@kernel.org>,
David Kaplan <david.kaplan@amd.com>,
Thomas Gleixner <tglx@kernel.org>,
Brendan Jackman <jackmanb@google.com>,
Yosry Ahmed <yosry.ahmed@linux.dev>
Subject: [PATCH RFC 16/19] mm/page_alloc: introduce ALLOC_NOBLOCK
Date: Wed, 25 Feb 2026 16:34:41 +0000 [thread overview]
Message-ID: <20260225-page_alloc-unmapped-v1-16-e8808a03cd66@google.com> (raw)
In-Reply-To: <20260225-page_alloc-unmapped-v1-0-e8808a03cd66@google.com>
This flag is set unless we can be sure the caller isn't in an atomic
context.
The allocator will soon start needing to call set_direct_map_* APIs
which cannot be called with IRQs off. It will need to do this even
before direct reclaim is possible.
Despite the fact that, in principle, ALLOC_NOBLOCK is distinct from
__GFP_DIRECT_RECLAIM, in order to avoid introducing a GFP flag, just
infer the former based on whether the caller set the latter. This means
that, in practice, ALLOC_NOBLOCK is just !__GFP_DIRECT_RECLAIM, except
that it is not influenced by gfp_allowed_mask. This could change later,
though.
Call it ALLOC_NOBLOCK in order to try and mitigate confusion vs the
recently-removed ALLOC_NON_BLOCK, which meant something different.
Signed-off-by: Brendan Jackman <jackmanb@google.com>
---
mm/internal.h | 1 +
mm/page_alloc.c | 29 ++++++++++++++++++++++-------
2 files changed, 23 insertions(+), 7 deletions(-)
diff --git a/mm/internal.h b/mm/internal.h
index 5be53d25c89b7..6f2eacf3d8f2c 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -1390,6 +1390,7 @@ unsigned int reclaim_clean_pages_from_list(struct zone *zone,
#define ALLOC_HIGHATOMIC 0x200 /* Allows access to MIGRATE_HIGHATOMIC */
#define ALLOC_TRYLOCK 0x400 /* Only use spin_trylock in allocation path */
#define ALLOC_KSWAPD 0x800 /* allow waking of kswapd, __GFP_KSWAPD_RECLAIM set */
+#define ALLOC_NOBLOCK 0x1000 /* Caller may be atomic */
/* Flags that allow allocations below the min watermark. */
#define ALLOC_RESERVES (ALLOC_HARDER|ALLOC_MIN_RESERVE|ALLOC_HIGHATOMIC|ALLOC_OOM)
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 42b807faca5fe..5576bd6a26b7b 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -4646,6 +4646,8 @@ gfp_to_alloc_flags(gfp_t gfp_mask, unsigned int order)
(gfp_mask & (__GFP_HIGH | __GFP_KSWAPD_RECLAIM));
if (!(gfp_mask & __GFP_DIRECT_RECLAIM)) {
+ alloc_flags |= ALLOC_NOBLOCK;
+
/*
* Not worth trying to allocate harder for __GFP_NOMEMALLOC even
* if it can't schedule.
@@ -4839,14 +4841,13 @@ check_retry_cpuset(int cpuset_mems_cookie, struct alloc_context *ac)
static inline struct page *
__alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
- struct alloc_context *ac)
+ struct alloc_context *ac, unsigned int alloc_flags)
{
bool can_direct_reclaim = gfp_mask & __GFP_DIRECT_RECLAIM;
bool can_compact = can_direct_reclaim && gfp_compaction_allowed(gfp_mask);
bool nofail = gfp_mask & __GFP_NOFAIL;
const bool costly_order = order > PAGE_ALLOC_COSTLY_ORDER;
struct page *page = NULL;
- unsigned int alloc_flags;
unsigned long did_some_progress;
enum compact_priority compact_priority;
enum compact_result compact_result;
@@ -4898,7 +4899,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
* kswapd needs to be woken up, and to avoid the cost of setting up
* alloc_flags precisely. So we do that now.
*/
- alloc_flags = gfp_to_alloc_flags(gfp_mask, order);
+ alloc_flags |= gfp_to_alloc_flags(gfp_mask, order);
/*
* We need to recalculate the starting point for the zonelist iterator
@@ -5124,6 +5125,18 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
return page;
}
+static inline unsigned int init_alloc_flags(gfp_t gfp_mask, unsigned int flags)
+{
+ /*
+ * If the caller allowed __GFP_DIRECT_RECLAIM, they can't be atomic.
+ * Note this is a separate determination from whether direct reclaim is
+ * actually allowed, it must happen before applying gfp_allowed_mask.
+ */
+ if (!(gfp_mask & __GFP_DIRECT_RECLAIM))
+ flags |= ALLOC_NOBLOCK;
+ return flags;
+}
+
static inline bool prepare_alloc_pages(gfp_t gfp_mask, unsigned int order,
int preferred_nid, nodemask_t *nodemask,
struct alloc_context *ac, gfp_t *alloc_gfp,
@@ -5205,7 +5218,7 @@ unsigned long alloc_pages_bulk_noprof(gfp_t gfp, int preferred_nid,
struct list_head *pcp_list;
struct alloc_context ac;
gfp_t alloc_gfp;
- unsigned int alloc_flags = ALLOC_WMARK_LOW;
+ unsigned int alloc_flags = init_alloc_flags(gfp, ALLOC_WMARK_LOW);
int nr_populated = 0, nr_account = 0;
/*
@@ -5346,7 +5359,7 @@ struct page *__alloc_frozen_pages_noprof(gfp_t gfp, unsigned int order,
int preferred_nid, nodemask_t *nodemask)
{
struct page *page;
- unsigned int alloc_flags = ALLOC_WMARK_LOW;
+ unsigned int alloc_flags = init_alloc_flags(gfp, ALLOC_WMARK_LOW);
gfp_t alloc_gfp; /* The gfp_t that was actually used for allocation */
struct alloc_context ac = { };
@@ -5391,7 +5404,7 @@ struct page *__alloc_frozen_pages_noprof(gfp_t gfp, unsigned int order,
*/
ac.nodemask = nodemask;
- page = __alloc_pages_slowpath(alloc_gfp, order, &ac);
+ page = __alloc_pages_slowpath(alloc_gfp, order, &ac, alloc_flags);
out:
if (memcg_kmem_online() && (gfp & __GFP_ACCOUNT) && page &&
@@ -7911,11 +7924,13 @@ struct page *alloc_frozen_pages_nolock_noprof(gfp_t gfp_flags, int nid, unsigned
*/
gfp_t alloc_gfp = __GFP_NOWARN | __GFP_ZERO | __GFP_NOMEMALLOC | __GFP_COMP
| gfp_flags;
- unsigned int alloc_flags = ALLOC_TRYLOCK;
+ unsigned int alloc_flags = init_alloc_flags(alloc_gfp, ALLOC_TRYLOCK);
struct alloc_context ac = { };
struct page *page;
VM_WARN_ON_ONCE(gfp_flags & ~__GFP_ACCOUNT);
+ VM_WARN_ON_ONCE(!(alloc_flags & ALLOC_NOBLOCK));
+
/*
* In PREEMPT_RT spin_trylock() will call raw_spin_lock() which is
* unsafe in NMI. If spin_trylock() is called from hard IRQ the current
--
2.51.2
next prev parent reply other threads:[~2026-02-25 16:35 UTC|newest]
Thread overview: 20+ messages / expand[flat|nested] mbox.gz Atom feed top
2026-02-25 16:34 [PATCH RFC 00/19] mm: Add __GFP_UNMAPPED Brendan Jackman
2026-02-25 16:34 ` [PATCH RFC 01/19] x86/mm: split out preallocate_sub_pgd() Brendan Jackman
2026-02-25 16:34 ` [PATCH RFC 02/19] x86/mm: Generalize LDT remap into "mm-local region" Brendan Jackman
2026-02-25 16:34 ` [PATCH RFC 03/19] x86/tlb: Expose some flush function declarations to modules Brendan Jackman
2026-02-25 16:34 ` [PATCH RFC 04/19] x86/mm: introduce the mermap Brendan Jackman
2026-02-25 16:34 ` [PATCH RFC 05/19] mm: KUnit tests for " Brendan Jackman
2026-02-25 16:34 ` [PATCH RFC 06/19] mm: introduce for_each_free_list() Brendan Jackman
2026-02-25 16:34 ` [PATCH RFC 07/19] mm/page_alloc: don't overload migratetype in find_suitable_fallback() Brendan Jackman
2026-02-25 16:34 ` [PATCH RFC 08/19] mm: introduce freetype_t Brendan Jackman
2026-02-25 16:34 ` [PATCH RFC 09/19] mm: move migratetype definitions to freetype.h Brendan Jackman
2026-02-25 16:34 ` [PATCH RFC 10/19] mm: add definitions for allocating unmapped pages Brendan Jackman
2026-02-25 16:34 ` [PATCH RFC 11/19] mm: rejig pageblock mask definitions Brendan Jackman
2026-02-25 16:34 ` [PATCH RFC 12/19] mm: encode freetype flags in pageblock flags Brendan Jackman
2026-02-25 16:34 ` [PATCH RFC 13/19] mm/page_alloc: remove ifdefs from pindex helpers Brendan Jackman
2026-02-25 16:34 ` [PATCH RFC 14/19] mm/page_alloc: separate pcplists by freetype flags Brendan Jackman
2026-02-25 16:34 ` [PATCH RFC 15/19] mm/page_alloc: rename ALLOC_NON_BLOCK back to _HARDER Brendan Jackman
2026-02-25 16:34 ` Brendan Jackman [this message]
2026-02-25 16:34 ` [PATCH RFC 17/19] mm/page_alloc: implement __GFP_UNMAPPED allocations Brendan Jackman
2026-02-25 16:34 ` [PATCH RFC 18/19] mm/page_alloc: implement __GFP_UNMAPPED|__GFP_ZERO allocations Brendan Jackman
2026-02-25 16:34 ` [PATCH RFC 19/19] mm: Minimal KUnit tests for some new page_alloc logic Brendan Jackman
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20260225-page_alloc-unmapped-v1-16-e8808a03cd66@google.com \
--to=jackmanb@google.com \
--cc=akpm@linux-foundation.org \
--cc=bp@alien8.de \
--cc=dave.hansen@linux.intel.com \
--cc=david.kaplan@amd.com \
--cc=david@kernel.org \
--cc=derkling@google.com \
--cc=hannes@cmpxchg.org \
--cc=itazur@amazon.co.uk \
--cc=kalyazin@amazon.co.uk \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=lorenzo.stoakes@oracle.com \
--cc=luto@kernel.org \
--cc=patrick.roy@linux.dev \
--cc=peterz@infradead.org \
--cc=reijiw@google.com \
--cc=rientjes@google.com \
--cc=rppt@kernel.org \
--cc=sumit.garg@oss.qualcomm.com \
--cc=tglx@kernel.org \
--cc=vbabka@kernel.org \
--cc=weixugc@google.com \
--cc=will@kernel.org \
--cc=x86@kernel.org \
--cc=yosry.ahmed@linux.dev \
--cc=ziy@nvidia.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox