From: Brendan Jackman <jackmanb@google.com>
To: Borislav Petkov <bp@alien8.de>,
Dave Hansen <dave.hansen@linux.intel.com>,
Peter Zijlstra <peterz@infradead.org>,
Andrew Morton <akpm@linux-foundation.org>,
David Hildenbrand <david@kernel.org>,
Vlastimil Babka <vbabka@kernel.org>, Wei Xu <weixugc@google.com>,
Johannes Weiner <hannes@cmpxchg.org>, Zi Yan <ziy@nvidia.com>,
Lorenzo Stoakes <ljs@kernel.org>
Cc: linux-mm@kvack.org, linux-kernel@vger.kernel.org, x86@kernel.org,
rppt@kernel.org, Sumit Garg <sumit.garg@oss.qualcomm.com>,
derkling@google.com, reijiw@google.com,
Will Deacon <will@kernel.org>,
rientjes@google.com, "Kalyazin, Nikita" <kalyazin@amazon.co.uk>,
patrick.roy@linux.dev, "Itazuri, Takahiro" <itazur@amazon.co.uk>,
Andy Lutomirski <luto@kernel.org>,
David Kaplan <david.kaplan@amd.com>,
Thomas Gleixner <tglx@kernel.org>,
Brendan Jackman <jackmanb@google.com>,
Yosry Ahmed <yosry@kernel.org>
Subject: [PATCH v2 16/22] mm/page_alloc: separate pcplists by freetype flags
Date: Fri, 20 Mar 2026 18:23:40 +0000 [thread overview]
Message-ID: <20260320-page_alloc-unmapped-v2-16-28bf1bd54f41@google.com> (raw)
In-Reply-To: <20260320-page_alloc-unmapped-v2-0-28bf1bd54f41@google.com>
The normal freelists are already separated by this flag, so now update
the pcplists accordingly. This follows the most "obvious" design where
__GFP_UNMAPPED is supported at arbitrary orders.
If necessary, it would be possible to avoid the proliferation of
pcplists by restricting orders that can be allocated from them with this
FREETYPE_UNMAPPED.
On the other hand, there's currently no usecase for movable/reclaimable
unmapped memory, and constraining the migratetype doesn't have any
tricky plumbing implications. So, take advantage of that and assume that
FREETYPE_UNMAPPED implies MIGRATE_UNMOVABLE.
Overall, this just takes the existing space of pindices and tacks
another bank on the end. For !THP this is just 4 more lists, with THP
there is a single additional list for hugepages.
Signed-off-by: Brendan Jackman <jackmanb@google.com>
---
include/linux/mmzone.h | 11 ++++++++++-
mm/page_alloc.c | 44 +++++++++++++++++++++++++++++++++-----------
2 files changed, 43 insertions(+), 12 deletions(-)
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index af662e4912591..65efc08152b0c 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -778,8 +778,17 @@ enum zone_watermarks {
#else
#define NR_PCP_THP 0
#endif
+/*
+ * FREETYPE_UNMAPPED can currently only be used with MIGRATE_UNMOVABLE, no for
+ * those there's no need to encode the migratetype in the pindex.
+ */
+#ifdef CONFIG_PAGE_ALLOC_UNMAPPED
+#define NR_UNMAPPED_PCP_LISTS (PAGE_ALLOC_COSTLY_ORDER + 1 + !!NR_PCP_THP)
+#else
+#define NR_UNMAPPED_PCP_LISTS 0
+#endif
#define NR_LOWORDER_PCP_LISTS (MIGRATE_PCPTYPES * (PAGE_ALLOC_COSTLY_ORDER + 1))
-#define NR_PCP_LISTS (NR_LOWORDER_PCP_LISTS + NR_PCP_THP)
+#define NR_PCP_LISTS (NR_LOWORDER_PCP_LISTS + NR_PCP_THP + NR_UNMAPPED_PCP_LISTS)
/*
* Flags used in pcp->flags field.
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index f125eae790f73..53848312a0c21 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -694,18 +694,30 @@ static void bad_page(struct page *page, const char *reason)
add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
}
-static inline unsigned int order_to_pindex(int migratetype, int order)
+static inline unsigned int order_to_pindex(freetype_t freetype, int order)
{
+ int migratetype = free_to_migratetype(freetype);
+
+ VM_BUG_ON(migratetype >= MIGRATE_PCPTYPES);
+ VM_BUG_ON(order > PAGE_ALLOC_COSTLY_ORDER &&
+ (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) || order != HPAGE_PMD_ORDER));
+
+ /* FREETYPE_UNMAPPED currently always means MIGRATE_UNMOVABLE. */
+ if (freetype_flags(freetype) & FREETYPE_UNMAPPED) {
+ int order_offset = order;
+
+ VM_BUG_ON(migratetype != MIGRATE_UNMOVABLE);
+ if (order > PAGE_ALLOC_COSTLY_ORDER)
+ order_offset = PAGE_ALLOC_COSTLY_ORDER + 1;
+
+ return NR_LOWORDER_PCP_LISTS + NR_PCP_THP + order_offset;
+ }
+
if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) {
bool movable = migratetype == MIGRATE_MOVABLE;
- if (order > PAGE_ALLOC_COSTLY_ORDER) {
- VM_BUG_ON(!is_pmd_order(order));
-
+ if (order > PAGE_ALLOC_COSTLY_ORDER)
return NR_LOWORDER_PCP_LISTS + movable;
- }
- } else {
- VM_BUG_ON(order > PAGE_ALLOC_COSTLY_ORDER);
}
return (MIGRATE_PCPTYPES * order) + migratetype;
@@ -713,8 +725,18 @@ static inline unsigned int order_to_pindex(int migratetype, int order)
static inline int pindex_to_order(unsigned int pindex)
{
- int order = pindex / MIGRATE_PCPTYPES;
+ unsigned int unmapped_base = NR_LOWORDER_PCP_LISTS + NR_PCP_THP;
+ int order;
+ if (pindex >= unmapped_base) {
+ order = pindex - unmapped_base;
+ if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
+ order > PAGE_ALLOC_COSTLY_ORDER)
+ return HPAGE_PMD_ORDER;
+ return order;
+ }
+
+ order = pindex / MIGRATE_PCPTYPES;
if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) {
if (pindex >= NR_LOWORDER_PCP_LISTS)
order = HPAGE_PMD_ORDER;
@@ -2935,7 +2957,7 @@ static bool free_frozen_page_commit(struct zone *zone,
*/
pcp->alloc_factor >>= 1;
__count_vm_events(PGFREE, 1 << order);
- pindex = order_to_pindex(free_to_migratetype(freetype), order);
+ pindex = order_to_pindex(freetype, order);
list_add(&page->pcp_list, &pcp->lists[pindex]);
pcp->count += 1 << order;
@@ -3452,7 +3474,7 @@ static struct page *rmqueue_pcplist(struct zone *preferred_zone,
* frees.
*/
pcp->free_count >>= 1;
- list = &pcp->lists[order_to_pindex(free_to_migratetype(freetype), order)];
+ list = &pcp->lists[order_to_pindex(freetype, order)];
page = __rmqueue_pcplist(zone, order, freetype, alloc_flags, pcp, list);
pcp_spin_unlock(pcp);
if (page) {
@@ -5236,7 +5258,7 @@ unsigned long alloc_pages_bulk_noprof(gfp_t gfp, int preferred_nid,
goto failed;
/* Attempt the batch allocation */
- pcp_list = &pcp->lists[order_to_pindex(free_to_migratetype(ac.freetype), 0)];
+ pcp_list = &pcp->lists[order_to_pindex(ac.freetype, 0)];
while (nr_populated < nr_pages) {
/* Skip existing pages */
--
2.51.2
next prev parent reply other threads:[~2026-03-20 18:24 UTC|newest]
Thread overview: 34+ messages / expand[flat|nested] mbox.gz Atom feed top
2026-03-20 18:23 [PATCH v2 00/22] mm: Add __GFP_UNMAPPED Brendan Jackman
2026-03-20 18:23 ` [PATCH v2 01/22] x86/mm: split out preallocate_sub_pgd() Brendan Jackman
2026-03-20 19:42 ` Dave Hansen
2026-03-23 11:01 ` Brendan Jackman
2026-03-24 15:27 ` Borislav Petkov
2026-03-25 13:28 ` Brendan Jackman
2026-03-20 18:23 ` [PATCH v2 02/22] x86/mm: Generalize LDT remap into "mm-local region" Brendan Jackman
2026-03-20 19:47 ` Dave Hansen
2026-03-23 12:01 ` Brendan Jackman
2026-03-23 12:57 ` Brendan Jackman
2026-03-25 14:23 ` Brendan Jackman
2026-03-20 18:23 ` [PATCH v2 03/22] x86/tlb: Expose some flush function declarations to modules Brendan Jackman
2026-03-20 18:23 ` [PATCH v2 04/22] mm: Create flags arg for __apply_to_page_range() Brendan Jackman
2026-03-20 18:23 ` [PATCH v2 05/22] mm: Add more flags " Brendan Jackman
2026-03-26 16:14 ` Brendan Jackman
2026-03-20 18:23 ` [PATCH v2 06/22] x86/mm: introduce the mermap Brendan Jackman
2026-03-20 18:23 ` [PATCH v2 07/22] mm: KUnit tests for " Brendan Jackman
2026-03-24 8:00 ` kernel test robot
2026-03-20 18:23 ` [PATCH v2 08/22] mm: introduce for_each_free_list() Brendan Jackman
2026-03-20 18:23 ` [PATCH v2 09/22] mm/page_alloc: don't overload migratetype in find_suitable_fallback() Brendan Jackman
2026-03-20 18:23 ` [PATCH v2 10/22] mm: introduce freetype_t Brendan Jackman
2026-03-20 18:23 ` [PATCH v2 11/22] mm: move migratetype definitions to freetype.h Brendan Jackman
2026-03-20 18:23 ` [PATCH v2 12/22] mm: add definitions for allocating unmapped pages Brendan Jackman
2026-03-20 18:23 ` [PATCH v2 13/22] mm: rejig pageblock mask definitions Brendan Jackman
2026-03-20 18:23 ` [PATCH v2 14/22] mm: encode freetype flags in pageblock flags Brendan Jackman
2026-03-20 18:23 ` [PATCH v2 15/22] mm/page_alloc: remove ifdefs from pindex helpers Brendan Jackman
2026-03-20 18:23 ` Brendan Jackman [this message]
2026-03-20 18:23 ` [PATCH v2 17/22] mm/page_alloc: rename ALLOC_NON_BLOCK back to _HARDER Brendan Jackman
2026-03-20 18:23 ` [PATCH v2 18/22] mm/page_alloc: introduce ALLOC_NOBLOCK Brendan Jackman
2026-03-20 18:23 ` [PATCH v2 19/22] mm/page_alloc: implement __GFP_UNMAPPED allocations Brendan Jackman
2026-03-20 18:23 ` [PATCH v2 20/22] mm/page_alloc: implement __GFP_UNMAPPED|__GFP_ZERO allocations Brendan Jackman
2026-03-20 18:23 ` [PATCH v2 21/22] mm: Minimal KUnit tests for some new page_alloc logic Brendan Jackman
2026-03-20 18:23 ` [PATCH v2 22/22] mm/secretmem: Use __GFP_UNMAPPED when available Brendan Jackman
2026-03-31 14:40 ` Brendan Jackman
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20260320-page_alloc-unmapped-v2-16-28bf1bd54f41@google.com \
--to=jackmanb@google.com \
--cc=akpm@linux-foundation.org \
--cc=bp@alien8.de \
--cc=dave.hansen@linux.intel.com \
--cc=david.kaplan@amd.com \
--cc=david@kernel.org \
--cc=derkling@google.com \
--cc=hannes@cmpxchg.org \
--cc=itazur@amazon.co.uk \
--cc=kalyazin@amazon.co.uk \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=ljs@kernel.org \
--cc=luto@kernel.org \
--cc=patrick.roy@linux.dev \
--cc=peterz@infradead.org \
--cc=reijiw@google.com \
--cc=rientjes@google.com \
--cc=rppt@kernel.org \
--cc=sumit.garg@oss.qualcomm.com \
--cc=tglx@kernel.org \
--cc=vbabka@kernel.org \
--cc=weixugc@google.com \
--cc=will@kernel.org \
--cc=x86@kernel.org \
--cc=yosry@kernel.org \
--cc=ziy@nvidia.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox