From: Kefeng Wang <wangkefeng.wang@huawei.com>
To: Andrew Morton <akpm@linux-foundation.org>,
Muchun Song <muchun.song@linux.dev>,
Oscar Salvador <osalvador@suse.de>,
David Hildenbrand <david@redhat.com>
Cc: <linux-mm@kvack.org>, Kefeng Wang <wangkefeng.wang@huawei.com>
Subject: [PATCH 4/7] mm: hugetlb: directly pass order when allocate a hugetlb folio
Date: Sat, 2 Aug 2025 15:31:04 +0800 [thread overview]
Message-ID: <20250802073107.2787975-5-wangkefeng.wang@huawei.com> (raw)
In-Reply-To: <20250802073107.2787975-1-wangkefeng.wang@huawei.com>
Use order instead of struct hstate to remove huge_page_order() call
from all hugetlb folio allocation.
Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
---
mm/hugetlb.c | 27 +++++++++++++--------------
mm/hugetlb_cma.c | 3 +--
mm/hugetlb_cma.h | 6 +++---
3 files changed, 17 insertions(+), 19 deletions(-)
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 436403fb0bed..e174a9269f52 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -1473,17 +1473,16 @@ static int hstate_next_node_to_free(struct hstate *h, nodemask_t *nodes_allowed)
#ifdef CONFIG_ARCH_HAS_GIGANTIC_PAGE
#ifdef CONFIG_CONTIG_ALLOC
-static struct folio *alloc_gigantic_folio(struct hstate *h, gfp_t gfp_mask,
+static struct folio *alloc_gigantic_folio(int order, gfp_t gfp_mask,
int nid, nodemask_t *nodemask)
{
struct folio *folio;
- int order = huge_page_order(h);
bool retried = false;
if (nid == NUMA_NO_NODE)
nid = numa_mem_id();
retry:
- folio = hugetlb_cma_alloc_folio(h, gfp_mask, nid, nodemask);
+ folio = hugetlb_cma_alloc_folio(order, gfp_mask, nid, nodemask);
if (!folio) {
if (hugetlb_cma_exclusive_alloc())
return NULL;
@@ -1506,16 +1505,16 @@ static struct folio *alloc_gigantic_folio(struct hstate *h, gfp_t gfp_mask,
}
#else /* !CONFIG_CONTIG_ALLOC */
-static struct folio *alloc_gigantic_folio(struct hstate *h, gfp_t gfp_mask,
- int nid, nodemask_t *nodemask)
+static struct folio *alloc_gigantic_folio(int order, gfp_t gfp_mask, int nid,
+ nodemask_t *nodemask)
{
return NULL;
}
#endif /* CONFIG_CONTIG_ALLOC */
#else /* !CONFIG_ARCH_HAS_GIGANTIC_PAGE */
-static struct folio *alloc_gigantic_folio(struct hstate *h, gfp_t gfp_mask,
- int nid, nodemask_t *nodemask)
+static struct folio *alloc_gigantic_folio(int order, gfp_t gfp_mask, int nid,
+ nodemask_t *nodemask)
{
return NULL;
}
@@ -1926,11 +1925,9 @@ struct address_space *hugetlb_folio_mapping_lock_write(struct folio *folio)
return NULL;
}
-static struct folio *alloc_buddy_hugetlb_folio(struct hstate *h,
- gfp_t gfp_mask, int nid, nodemask_t *nmask,
- nodemask_t *node_alloc_noretry)
+static struct folio *alloc_buddy_hugetlb_folio(int order, gfp_t gfp_mask,
+ int nid, nodemask_t *nmask, nodemask_t *node_alloc_noretry)
{
- int order = huge_page_order(h);
struct folio *folio;
bool alloc_try_hard = true;
@@ -1977,11 +1974,13 @@ static struct folio *only_alloc_fresh_hugetlb_folio(struct hstate *h,
nodemask_t *node_alloc_noretry)
{
struct folio *folio;
+ int order = huge_page_order(h);
- if (hstate_is_gigantic(h))
- folio = alloc_gigantic_folio(h, gfp_mask, nid, nmask);
+ if (order > MAX_PAGE_ORDER)
+ folio = alloc_gigantic_folio(order, gfp_mask, nid, nmask);
else
- folio = alloc_buddy_hugetlb_folio(h, gfp_mask, nid, nmask, node_alloc_noretry);
+ folio = alloc_buddy_hugetlb_folio(order, gfp_mask, nid, nmask,
+ node_alloc_noretry);
if (folio)
init_new_hugetlb_folio(h, folio);
return folio;
diff --git a/mm/hugetlb_cma.c b/mm/hugetlb_cma.c
index f58ef4969e7a..e8e4dc7182d5 100644
--- a/mm/hugetlb_cma.c
+++ b/mm/hugetlb_cma.c
@@ -26,11 +26,10 @@ void hugetlb_cma_free_folio(struct folio *folio)
}
-struct folio *hugetlb_cma_alloc_folio(struct hstate *h, gfp_t gfp_mask,
+struct folio *hugetlb_cma_alloc_folio(int order, gfp_t gfp_mask,
int nid, nodemask_t *nodemask)
{
int node;
- int order = huge_page_order(h);
struct folio *folio = NULL;
if (hugetlb_cma[nid])
diff --git a/mm/hugetlb_cma.h b/mm/hugetlb_cma.h
index f7d7fb9880a2..2c2ec8a7e134 100644
--- a/mm/hugetlb_cma.h
+++ b/mm/hugetlb_cma.h
@@ -4,7 +4,7 @@
#ifdef CONFIG_CMA
void hugetlb_cma_free_folio(struct folio *folio);
-struct folio *hugetlb_cma_alloc_folio(struct hstate *h, gfp_t gfp_mask,
+struct folio *hugetlb_cma_alloc_folio(int order, gfp_t gfp_mask,
int nid, nodemask_t *nodemask);
struct huge_bootmem_page *hugetlb_cma_alloc_bootmem(struct hstate *h, int *nid,
bool node_exact);
@@ -18,8 +18,8 @@ static inline void hugetlb_cma_free_folio(struct folio *folio)
{
}
-static inline struct folio *hugetlb_cma_alloc_folio(struct hstate *h,
- gfp_t gfp_mask, int nid, nodemask_t *nodemask)
+static inline struct folio *hugetlb_cma_alloc_folio(int order, gfp_t gfp_mask,
+ int nid, nodemask_t *nodemask)
{
return NULL;
}
--
2.27.0
next prev parent reply other threads:[~2025-08-02 7:31 UTC|newest]
Thread overview: 33+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-08-02 7:31 [PATCH 0/7] mm: hugetlb: cleanup and allocate frozen " Kefeng Wang
2025-08-02 7:31 ` [PATCH 1/7] mm: hugetlb: convert to alloc_fresh_hugetlb_hvo_folio() Kefeng Wang
2025-08-04 15:41 ` Sidhartha Kumar
2025-08-04 20:27 ` Vishal Moola (Oracle)
2025-08-05 14:21 ` Kefeng Wang
2025-08-05 17:56 ` Vishal Moola (Oracle)
2025-08-05 22:18 ` jane.chu
2025-08-06 0:33 ` jane.chu
2025-08-06 0:56 ` Kefeng Wang
2025-08-06 18:16 ` jane.chu
2025-08-02 7:31 ` [PATCH 2/7] mm: hugetlb: convert to prep_account_new_hugetlb_folio() Kefeng Wang
2025-08-04 15:54 ` Sidhartha Kumar
2025-08-04 20:36 ` Vishal Moola (Oracle)
2025-08-05 14:21 ` Kefeng Wang
2025-08-06 0:53 ` jane.chu
2025-08-02 7:31 ` [PATCH 3/7] mm; hugetlb: simpify alloc_buddy_hugetlb_folio() Kefeng Wang
2025-08-04 15:57 ` Sidhartha Kumar
2025-08-04 20:56 ` Vishal Moola (Oracle)
2025-08-05 14:22 ` Kefeng Wang
2025-08-02 7:31 ` Kefeng Wang [this message]
2025-08-04 16:22 ` [PATCH 4/7] mm: hugetlb: directly pass order when allocate a hugetlb folio Sidhartha Kumar
2025-08-06 20:05 ` jane.chu
2025-08-06 20:17 ` jane.chu
2025-08-02 7:31 ` [PATCH 5/7] mm: hugetlb: remove struct hstate from init_new_hugetlb_folio() Kefeng Wang
2025-08-04 16:13 ` Sidhartha Kumar
2025-08-06 20:08 ` jane.chu
2025-08-02 7:31 ` [PATCH 6/7] mm: hugeltb: check NUMA_NO_NODE in only_alloc_fresh_hugetlb_folio() Kefeng Wang
2025-08-04 19:09 ` Sidhartha Kumar
2025-08-06 20:43 ` jane.chu
2025-08-12 12:13 ` Kefeng Wang
2025-08-02 7:31 ` [PATCH 7/7] mm: hugetlb: allocate frozen pages in alloc_gigantic_folio() Kefeng Wang
2025-08-07 1:22 ` jane.chu
2025-08-12 12:11 ` Kefeng Wang
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20250802073107.2787975-5-wangkefeng.wang@huawei.com \
--to=wangkefeng.wang@huawei.com \
--cc=akpm@linux-foundation.org \
--cc=david@redhat.com \
--cc=linux-mm@kvack.org \
--cc=muchun.song@linux.dev \
--cc=osalvador@suse.de \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox