linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
From: Zi Yan <ziy@nvidia.com>
To: Kefeng Wang <wangkefeng.wang@huawei.com>
Cc: Andrew Morton <akpm@linux-foundation.org>,
	David Hildenbrand <david@redhat.com>,
	Oscar Salvador <osalvador@suse.de>,
	Muchun Song <muchun.song@linux.dev>,
	sidhartha.kumar@oracle.com, jane.chu@oracle.com,
	Vlastimil Babka <vbabka@suse.cz>,
	Brendan Jackman <jackmanb@google.com>,
	Johannes Weiner <hannes@cmpxchg.org>,
	linux-mm@kvack.org
Subject: Re: [PATCH v2 3/9] mm: hugetlb: directly pass order when allocate a hugetlb folio
Date: Mon, 08 Sep 2025 21:11:23 -0400	[thread overview]
Message-ID: <64DE9265-7B31-4128-9949-84AF050CBFF4@nvidia.com> (raw)
In-Reply-To: <20250902124820.3081488-4-wangkefeng.wang@huawei.com>

On 2 Sep 2025, at 8:48, Kefeng Wang wrote:

> Use order instead of struct hstate to remove huge_page_order() call
> from all hugetlb folio allocation.
>
> Reviewed-by: Sidhartha Kumar <sidhartha.kumar@oracle.com>
> Reviewed-by: Jane Chu <jane.chu@oracle.com>
> Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
> ---
>  mm/hugetlb.c     | 27 +++++++++++++--------------
>  mm/hugetlb_cma.c |  3 +--
>  mm/hugetlb_cma.h |  6 +++---
>  3 files changed, 17 insertions(+), 19 deletions(-)
>
> diff --git a/mm/hugetlb.c b/mm/hugetlb.c
> index 4131467fc1cd..5c93faf82674 100644
> --- a/mm/hugetlb.c
> +++ b/mm/hugetlb.c
> @@ -1473,17 +1473,16 @@ static int hstate_next_node_to_free(struct hstate *h, nodemask_t *nodes_allowed)
>
>  #ifdef CONFIG_ARCH_HAS_GIGANTIC_PAGE
>  #ifdef CONFIG_CONTIG_ALLOC
> -static struct folio *alloc_gigantic_folio(struct hstate *h, gfp_t gfp_mask,
> +static struct folio *alloc_gigantic_folio(int order, gfp_t gfp_mask,
>  		int nid, nodemask_t *nodemask)
>  {
>  	struct folio *folio;
> -	int order = huge_page_order(h);
>  	bool retried = false;
>
>  	if (nid == NUMA_NO_NODE)
>  		nid = numa_mem_id();
>  retry:
> -	folio = hugetlb_cma_alloc_folio(h, gfp_mask, nid, nodemask);
> +	folio = hugetlb_cma_alloc_folio(order, gfp_mask, nid, nodemask);
>  	if (!folio) {
>  		if (hugetlb_cma_exclusive_alloc())
>  			return NULL;
> @@ -1506,16 +1505,16 @@ static struct folio *alloc_gigantic_folio(struct hstate *h, gfp_t gfp_mask,
>  }
>
>  #else /* !CONFIG_CONTIG_ALLOC */
> -static struct folio *alloc_gigantic_folio(struct hstate *h, gfp_t gfp_mask,
> -					int nid, nodemask_t *nodemask)
> +static struct folio *alloc_gigantic_folio(int order, gfp_t gfp_mask, int nid,
> +					  nodemask_t *nodemask)
>  {
>  	return NULL;
>  }
>  #endif /* CONFIG_CONTIG_ALLOC */
>
>  #else /* !CONFIG_ARCH_HAS_GIGANTIC_PAGE */
> -static struct folio *alloc_gigantic_folio(struct hstate *h, gfp_t gfp_mask,
> -					int nid, nodemask_t *nodemask)
> +static struct folio *alloc_gigantic_folio(int order, gfp_t gfp_mask, int nid,
> +					  nodemask_t *nodemask)
>  {
>  	return NULL;
>  }
> @@ -1926,11 +1925,9 @@ struct address_space *hugetlb_folio_mapping_lock_write(struct folio *folio)
>  	return NULL;
>  }
>
> -static struct folio *alloc_buddy_hugetlb_folio(struct hstate *h,
> -		gfp_t gfp_mask, int nid, nodemask_t *nmask,
> -		nodemask_t *node_alloc_noretry)
> +static struct folio *alloc_buddy_hugetlb_folio(int order, gfp_t gfp_mask,
> +		int nid, nodemask_t *nmask, nodemask_t *node_alloc_noretry)
>  {
> -	int order = huge_page_order(h);
>  	struct folio *folio;
>  	bool alloc_try_hard = true;
>
> @@ -1980,11 +1977,13 @@ static struct folio *only_alloc_fresh_hugetlb_folio(struct hstate *h,
>  		nodemask_t *node_alloc_noretry)
>  {
>  	struct folio *folio;
> +	int order = huge_page_order(h);
>
> -	if (hstate_is_gigantic(h))
> -		folio = alloc_gigantic_folio(h, gfp_mask, nid, nmask);
> +	if (order > MAX_PAGE_ORDER)

Would it be better to add

bool order_is_gigantic(unsigned int order)
{
	return order > MAX_PAGE_ORDER;
}

for this check? And change hstate_is_gigantic() to

return order_is_gigantic(huge_page_order(h));

To make _is_gigantic() check more consistent.
BTW, isolate_or_dissolve_huge_folio() can use order_is_gigantic() too.

Otherwise, Reviewed-by: Zi Yan <ziy@nvidia.com>


> +		folio = alloc_gigantic_folio(order, gfp_mask, nid, nmask);
>  	else
> -		folio = alloc_buddy_hugetlb_folio(h, gfp_mask, nid, nmask, node_alloc_noretry);
> +		folio = alloc_buddy_hugetlb_folio(order, gfp_mask, nid, nmask,
> +						  node_alloc_noretry);
>  	if (folio)
>  		init_new_hugetlb_folio(h, folio);
>  	return folio;
> diff --git a/mm/hugetlb_cma.c b/mm/hugetlb_cma.c
> index f58ef4969e7a..e8e4dc7182d5 100644
> --- a/mm/hugetlb_cma.c
> +++ b/mm/hugetlb_cma.c
> @@ -26,11 +26,10 @@ void hugetlb_cma_free_folio(struct folio *folio)
>  }
>
>
> -struct folio *hugetlb_cma_alloc_folio(struct hstate *h, gfp_t gfp_mask,
> +struct folio *hugetlb_cma_alloc_folio(int order, gfp_t gfp_mask,
>  				      int nid, nodemask_t *nodemask)
>  {
>  	int node;
> -	int order = huge_page_order(h);
>  	struct folio *folio = NULL;
>
>  	if (hugetlb_cma[nid])
> diff --git a/mm/hugetlb_cma.h b/mm/hugetlb_cma.h
> index f7d7fb9880a2..2c2ec8a7e134 100644
> --- a/mm/hugetlb_cma.h
> +++ b/mm/hugetlb_cma.h
> @@ -4,7 +4,7 @@
>
>  #ifdef CONFIG_CMA
>  void hugetlb_cma_free_folio(struct folio *folio);
> -struct folio *hugetlb_cma_alloc_folio(struct hstate *h, gfp_t gfp_mask,
> +struct folio *hugetlb_cma_alloc_folio(int order, gfp_t gfp_mask,
>  				      int nid, nodemask_t *nodemask);
>  struct huge_bootmem_page *hugetlb_cma_alloc_bootmem(struct hstate *h, int *nid,
>  						    bool node_exact);
> @@ -18,8 +18,8 @@ static inline void hugetlb_cma_free_folio(struct folio *folio)
>  {
>  }
>
> -static inline struct folio *hugetlb_cma_alloc_folio(struct hstate *h,
> -	    gfp_t gfp_mask, int nid, nodemask_t *nodemask)
> +static inline struct folio *hugetlb_cma_alloc_folio(int order, gfp_t gfp_mask,
> +		int nid, nodemask_t *nodemask)
>  {
>  	return NULL;
>  }
> -- 
> 2.27.0


Best Regards,
Yan, Zi


  parent reply	other threads:[~2025-09-09  1:11 UTC|newest]

Thread overview: 42+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2025-09-02 12:48 [PATCH v2 0/9] mm: hugetlb: cleanup and allocate frozen " Kefeng Wang
2025-09-02 12:48 ` [PATCH v2 1/9] mm: hugetlb: convert to use more alloc_fresh_hugetlb_folio() Kefeng Wang
2025-09-08  9:21   ` Oscar Salvador
2025-09-08 12:59     ` Kefeng Wang
2025-09-09  0:54   ` Zi Yan
2025-09-02 12:48 ` [PATCH v2 2/9] mm: hugetlb: convert to account_new_hugetlb_folio() Kefeng Wang
2025-09-08  9:26   ` Oscar Salvador
2025-09-08 13:20     ` Kefeng Wang
2025-09-08 13:38       ` Oscar Salvador
2025-09-08 13:40   ` Oscar Salvador
2025-09-09  7:04     ` Kefeng Wang
2025-09-09  0:59   ` Zi Yan
2025-09-02 12:48 ` [PATCH v2 3/9] mm: hugetlb: directly pass order when allocate a hugetlb folio Kefeng Wang
2025-09-08  9:29   ` Oscar Salvador
2025-09-09  1:11   ` Zi Yan [this message]
2025-09-09  7:11     ` Kefeng Wang
2025-09-02 12:48 ` [PATCH v2 4/9] mm: hugetlb: remove struct hstate from init_new_hugetlb_folio() Kefeng Wang
2025-09-08  9:31   ` Oscar Salvador
2025-09-09  1:13   ` Zi Yan
2025-09-02 12:48 ` [PATCH v2 5/9] mm: hugeltb: check NUMA_NO_NODE in only_alloc_fresh_hugetlb_folio() Kefeng Wang
2025-09-08  9:34   ` Oscar Salvador
2025-09-09  1:16   ` Zi Yan
2025-09-02 12:48 ` [PATCH v2 6/9] mm: page_alloc: add alloc_contig_frozen_pages() Kefeng Wang
2025-09-09  0:21   ` jane.chu
2025-09-09  1:44   ` Zi Yan
2025-09-09  7:29     ` Kefeng Wang
2025-09-09  8:11   ` Oscar Salvador
2025-09-09 18:55   ` Matthew Wilcox
2025-09-09 19:08     ` Zi Yan
2025-09-10  2:05       ` Kefeng Wang
2025-09-02 12:48 ` [PATCH v2 7/9] mm: cma: add alloc flags for __cma_alloc() Kefeng Wang
2025-09-09  0:19   ` jane.chu
2025-09-09  2:03   ` Zi Yan
2025-09-09  8:05   ` Oscar Salvador
2025-09-02 12:48 ` [PATCH v2 8/9] mm: cma: add __cma_release() Kefeng Wang
2025-09-09  0:15   ` jane.chu
2025-09-02 12:48 ` [PATCH v2 9/9] mm: hugetlb: allocate frozen pages in alloc_gigantic_folio() Kefeng Wang
2025-09-09  1:48   ` jane.chu
2025-09-09  7:33     ` Kefeng Wang
2025-09-09  2:02   ` Zi Yan
2025-09-09  7:34     ` Kefeng Wang
2025-09-02 13:51 ` [PATCH v2 0/9] mm: hugetlb: cleanup and allocate frozen hugetlb folio Oscar Salvador

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=64DE9265-7B31-4128-9949-84AF050CBFF4@nvidia.com \
    --to=ziy@nvidia.com \
    --cc=akpm@linux-foundation.org \
    --cc=david@redhat.com \
    --cc=hannes@cmpxchg.org \
    --cc=jackmanb@google.com \
    --cc=jane.chu@oracle.com \
    --cc=linux-mm@kvack.org \
    --cc=muchun.song@linux.dev \
    --cc=osalvador@suse.de \
    --cc=sidhartha.kumar@oracle.com \
    --cc=vbabka@suse.cz \
    --cc=wangkefeng.wang@huawei.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox