From: "Michael S. Tsirkin" <mst@redhat.com>
To: linux-kernel@vger.kernel.org
Cc: Andrew Morton <akpm@linux-foundation.org>,
David Hildenbrand <david@kernel.org>,
Vlastimil Babka <vbabka@kernel.org>,
Brendan Jackman <jackmanb@google.com>,
Michal Hocko <mhocko@suse.com>,
Suren Baghdasaryan <surenb@google.com>,
Jason Wang <jasowang@redhat.com>,
Andrea Arcangeli <aarcange@redhat.com>,
linux-mm@kvack.org, virtualization@lists.linux.dev,
Muchun Song <muchun.song@linux.dev>,
Oscar Salvador <osalvador@suse.de>,
Hugh Dickins <hughd@google.com>,
Baolin Wang <baolin.wang@linux.alibaba.com>
Subject: [PATCH RFC v2 08/18] mm: hugetlb: thread pghint_t through buddy allocation chain
Date: Mon, 20 Apr 2026 08:50:41 -0400 [thread overview]
Message-ID: <ad75b8d2d13bc92e1ee369a4a3fff646d90018d9.1776689093.git.mst@redhat.com> (raw)
In-Reply-To: <cover.1776689093.git.mst@redhat.com>
Thread pghint_t *hints through the hugetlb buddy allocation path:
alloc_buddy_frozen_folio -> only_alloc_fresh_hugetlb_folio
alloc_buddy_frozen_folio now calls __alloc_frozen_pages_hints()
so the reported-page zeroed hint can propagate up.
Add pghint_t *hints to alloc_hugetlb_folio_reserve() for the
memfd path. Callers that do not need hints pass NULL.
No functional change yet: hints are threaded but not acted upon.
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
Assisted-by: Claude:claude-opus-4-6
Assisted-by: cursor-agent:GPT-5.4-xhigh
---
include/linux/hugetlb.h | 6 ++++--
mm/hugetlb.c | 29 ++++++++++++++++++++---------
mm/memfd.c | 2 +-
3 files changed, 25 insertions(+), 12 deletions(-)
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 65910437be1c..7311ad87add4 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -710,7 +710,8 @@ struct folio *alloc_hugetlb_folio_nodemask(struct hstate *h, int preferred_nid,
nodemask_t *nmask, gfp_t gfp_mask,
bool allow_alloc_fallback);
struct folio *alloc_hugetlb_folio_reserve(struct hstate *h, int preferred_nid,
- nodemask_t *nmask, gfp_t gfp_mask);
+ nodemask_t *nmask, gfp_t gfp_mask,
+ pghint_t *hints);
int hugetlb_add_to_page_cache(struct folio *folio, struct address_space *mapping,
pgoff_t idx);
@@ -1124,7 +1125,8 @@ static inline struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma,
static inline struct folio *
alloc_hugetlb_folio_reserve(struct hstate *h, int preferred_nid,
- nodemask_t *nmask, gfp_t gfp_mask)
+ nodemask_t *nmask, gfp_t gfp_mask,
+ pghint_t *hints)
{
return NULL;
}
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 327eaa4074d3..faa94a114fd4 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -1842,7 +1842,8 @@ struct address_space *hugetlb_folio_mapping_lock_write(struct folio *folio)
}
static struct folio *alloc_buddy_frozen_folio(int order, gfp_t gfp_mask,
- int nid, nodemask_t *nmask, nodemask_t *node_alloc_noretry)
+ int nid, nodemask_t *nmask, nodemask_t *node_alloc_noretry,
+ pghint_t *hints)
{
struct folio *folio;
bool alloc_try_hard = true;
@@ -1859,7 +1860,8 @@ static struct folio *alloc_buddy_frozen_folio(int order, gfp_t gfp_mask,
if (alloc_try_hard)
gfp_mask |= __GFP_RETRY_MAYFAIL;
- folio = (struct folio *)__alloc_frozen_pages(gfp_mask, order, nid, nmask);
+ folio = (struct folio *)__alloc_frozen_pages_hints(gfp_mask, order,
+ nid, nmask, hints);
/*
* If we did not specify __GFP_RETRY_MAYFAIL, but still got a
@@ -1888,11 +1890,14 @@ static struct folio *alloc_buddy_frozen_folio(int order, gfp_t gfp_mask,
static struct folio *only_alloc_fresh_hugetlb_folio(struct hstate *h,
gfp_t gfp_mask, int nid, nodemask_t *nmask,
- nodemask_t *node_alloc_noretry)
+ nodemask_t *node_alloc_noretry, pghint_t *hints)
{
struct folio *folio;
int order = huge_page_order(h);
+ if (hints)
+ *hints = 0;
+
if (nid == NUMA_NO_NODE)
nid = numa_mem_id();
@@ -1900,7 +1905,7 @@ static struct folio *only_alloc_fresh_hugetlb_folio(struct hstate *h,
folio = alloc_gigantic_frozen_folio(order, gfp_mask, nid, nmask);
else
folio = alloc_buddy_frozen_folio(order, gfp_mask, nid, nmask,
- node_alloc_noretry);
+ node_alloc_noretry, hints);
if (folio)
init_new_hugetlb_folio(folio);
return folio;
@@ -1918,7 +1923,8 @@ static struct folio *alloc_fresh_hugetlb_folio(struct hstate *h,
{
struct folio *folio;
- folio = only_alloc_fresh_hugetlb_folio(h, gfp_mask, nid, nmask, NULL);
+ folio = only_alloc_fresh_hugetlb_folio(h, gfp_mask, nid, nmask,
+ NULL, NULL);
if (folio)
hugetlb_vmemmap_optimize_folio(h, folio);
return folio;
@@ -1958,7 +1964,8 @@ static struct folio *alloc_pool_huge_folio(struct hstate *h,
struct folio *folio;
folio = only_alloc_fresh_hugetlb_folio(h, gfp_mask, node,
- nodes_allowed, node_alloc_noretry);
+ nodes_allowed, node_alloc_noretry,
+ NULL);
if (folio)
return folio;
}
@@ -2231,10 +2238,13 @@ struct folio *alloc_buddy_hugetlb_folio_with_mpol(struct hstate *h,
}
struct folio *alloc_hugetlb_folio_reserve(struct hstate *h, int preferred_nid,
- nodemask_t *nmask, gfp_t gfp_mask)
+ nodemask_t *nmask, gfp_t gfp_mask, pghint_t *hints)
{
struct folio *folio;
+ if (hints)
+ *hints = (pghint_t)0;
+
spin_lock_irq(&hugetlb_lock);
if (!h->resv_huge_pages) {
spin_unlock_irq(&hugetlb_lock);
@@ -3434,13 +3444,14 @@ static void __init hugetlb_hstate_alloc_pages_onenode(struct hstate *h, int nid)
gfp_t gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE;
folio = only_alloc_fresh_hugetlb_folio(h, gfp_mask, nid,
- &node_states[N_MEMORY], NULL);
+ &node_states[N_MEMORY], NULL, NULL);
if (!folio && !list_empty(&folio_list) &&
hugetlb_vmemmap_optimizable_size(h)) {
prep_and_add_allocated_folios(h, &folio_list);
INIT_LIST_HEAD(&folio_list);
folio = only_alloc_fresh_hugetlb_folio(h, gfp_mask, nid,
- &node_states[N_MEMORY], NULL);
+ &node_states[N_MEMORY], NULL,
+ NULL);
}
if (!folio)
break;
diff --git a/mm/memfd.c b/mm/memfd.c
index 919c2a53eb96..f1c00600e19a 100644
--- a/mm/memfd.c
+++ b/mm/memfd.c
@@ -93,7 +93,7 @@ struct folio *memfd_alloc_folio(struct file *memfd, pgoff_t idx)
folio = alloc_hugetlb_folio_reserve(h,
numa_node_id(),
NULL,
- gfp_mask);
+ gfp_mask, NULL);
if (folio) {
u32 hash;
--
MST
next prev parent reply other threads:[~2026-04-20 12:50 UTC|newest]
Thread overview: 29+ messages / expand[flat|nested] mbox.gz Atom feed top
2026-04-20 12:51 [PATCH RFC v2 00/18] mm/virtio: skip redundant zeroing of host-zeroed reported pages Michael S. Tsirkin
2026-04-20 12:50 ` [PATCH RFC v2 01/18] mm: page_alloc: propagate PageReported flag across buddy splits Michael S. Tsirkin
2026-04-20 12:50 ` [PATCH RFC v2 02/18] mm: add pghint_t type and vma_alloc_folio_hints API Michael S. Tsirkin
2026-04-21 0:58 ` Huang, Ying
2026-04-20 12:50 ` [PATCH RFC v2 03/18] mm: add PG_zeroed page flag for known-zero pages Michael S. Tsirkin
2026-04-20 12:50 ` [PATCH RFC v2 04/18] mm: page_alloc: track PG_zeroed across buddy merges Michael S. Tsirkin
2026-04-20 12:50 ` [PATCH RFC v2 05/18] mm: page_alloc: preserve PG_zeroed in try_to_claim_block Michael S. Tsirkin
2026-04-20 12:50 ` [PATCH RFC v2 06/18] mm: page_alloc: thread pghint_t through get_page_from_freelist Michael S. Tsirkin
2026-04-20 12:50 ` [PATCH RFC v2 07/18] mm: post_alloc_hook: use PG_zeroed to skip zeroing, return pghint_t Michael S. Tsirkin
2026-04-20 12:50 ` Michael S. Tsirkin [this message]
2026-04-20 12:50 ` [PATCH RFC v2 09/18] mm: hugetlb: use PG_zeroed for pool pages, skip redundant zeroing Michael S. Tsirkin
2026-04-20 12:50 ` [PATCH RFC v2 10/18] mm: page_reporting: support host-zeroed reported pages Michael S. Tsirkin
2026-04-20 12:50 ` [PATCH RFC v2 11/18] mm: skip zeroing in vma_alloc_zeroed_movable_folio for pre-zeroed pages Michael S. Tsirkin
2026-04-21 10:58 ` David Hildenbrand (Arm)
2026-04-20 12:50 ` [PATCH RFC v2 12/18] mm: skip zeroing in alloc_anon_folio " Michael S. Tsirkin
2026-04-20 12:50 ` [PATCH RFC v2 13/18] mm: skip zeroing in vma_alloc_anon_folio_pmd " Michael S. Tsirkin
2026-04-20 12:50 ` [PATCH RFC v2 14/18] mm: memfd: skip zeroing for pre-zeroed hugetlb pages Michael S. Tsirkin
2026-04-20 12:51 ` [PATCH RFC v2 15/18] virtio_balloon: add host_zeroes_pages module parameter Michael S. Tsirkin
2026-04-20 12:51 ` [PATCH RFC v2 16/18] mm: page_reporting: add flush parameter with page budget Michael S. Tsirkin
2026-04-20 12:51 ` [PATCH RFC v2 17/18] mm: add free_frozen_pages_hint and put_page_hint APIs Michael S. Tsirkin
2026-04-21 10:56 ` David Hildenbrand (Arm)
2026-04-20 12:51 ` [PATCH RFC v2 18/18] virtio_balloon: mark deflated pages as pre-zeroed Michael S. Tsirkin
2026-04-20 18:09 ` [syzbot ci] Re: mm/virtio: skip redundant zeroing of host-zeroed reported pages syzbot ci
2026-04-20 18:20 ` [PATCH RFC v2 00/18] " David Hildenbrand (Arm)
2026-04-20 23:33 ` Michael S. Tsirkin
2026-04-21 2:38 ` Gregory Price
2026-04-21 10:04 ` David Hildenbrand (Arm)
2026-04-21 10:50 ` David Hildenbrand (Arm)
2026-04-21 2:21 ` Gregory Price
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=ad75b8d2d13bc92e1ee369a4a3fff646d90018d9.1776689093.git.mst@redhat.com \
--to=mst@redhat.com \
--cc=aarcange@redhat.com \
--cc=akpm@linux-foundation.org \
--cc=baolin.wang@linux.alibaba.com \
--cc=david@kernel.org \
--cc=hughd@google.com \
--cc=jackmanb@google.com \
--cc=jasowang@redhat.com \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=mhocko@suse.com \
--cc=muchun.song@linux.dev \
--cc=osalvador@suse.de \
--cc=surenb@google.com \
--cc=vbabka@kernel.org \
--cc=virtualization@lists.linux.dev \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox