linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
From: Kefeng Wang <wangkefeng.wang@huawei.com>
To: Andrew Morton <akpm@linux-foundation.org>,
	Muchun Song <muchun.song@linux.dev>,
	Oscar Salvador <osalvador@suse.de>,
	David Hildenbrand <david@redhat.com>
Cc: <linux-mm@kvack.org>, Kefeng Wang <wangkefeng.wang@huawei.com>
Subject: [PATCH 1/7] mm: hugetlb: convert to alloc_fresh_hugetlb_hvo_folio()
Date: Sat, 2 Aug 2025 15:31:01 +0800	[thread overview]
Message-ID: <20250802073107.2787975-2-wangkefeng.wang@huawei.com> (raw)
In-Reply-To: <20250802073107.2787975-1-wangkefeng.wang@huawei.com>

Now alloc_fresh_hugetlb_folio() is only called by
alloc_migrate_hugetlb_folio(), cleanup it by converting to
alloc_fresh_hugetlb_hvo_folio(), also simplify the
alloc_and_dissolve_hugetlb_folio() and alloc_surplus_hugetlb_folio()
too which help us to remove prep_new_hugetlb_folio() and
__prep_new_hugetlb_folio().

Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
---
 mm/hugetlb.c | 48 +++++++++++++++---------------------------------
 1 file changed, 15 insertions(+), 33 deletions(-)

diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 753f99b4c718..5b4c19e7a5f7 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -1906,20 +1906,6 @@ static void init_new_hugetlb_folio(struct hstate *h, struct folio *folio)
 	set_hugetlb_cgroup_rsvd(folio, NULL);
 }
 
-static void __prep_new_hugetlb_folio(struct hstate *h, struct folio *folio)
-{
-	init_new_hugetlb_folio(h, folio);
-	hugetlb_vmemmap_optimize_folio(h, folio);
-}
-
-static void prep_new_hugetlb_folio(struct hstate *h, struct folio *folio, int nid)
-{
-	__prep_new_hugetlb_folio(h, folio);
-	spin_lock_irq(&hugetlb_lock);
-	__prep_account_new_huge_page(h, nid);
-	spin_unlock_irq(&hugetlb_lock);
-}
-
 /*
  * Find and lock address space (mapping) in write mode.
  *
@@ -2005,25 +1991,20 @@ static struct folio *only_alloc_fresh_hugetlb_folio(struct hstate *h,
 }
 
 /*
- * Common helper to allocate a fresh hugetlb page. All specific allocators
- * should use this function to get new hugetlb pages
+ * Common helper to allocate a fresh hugetlb folio. All specific allocators
+ * should use this function to get new hugetlb folio
  *
- * Note that returned page is 'frozen':  ref count of head page and all tail
+ * Note that returned folio is 'frozen':  ref count of head page and all tail
  * pages is zero.
  */
-static struct folio *alloc_fresh_hugetlb_folio(struct hstate *h,
+static struct folio *alloc_fresh_hugetlb_hvo_folio(struct hstate *h,
 		gfp_t gfp_mask, int nid, nodemask_t *nmask)
 {
 	struct folio *folio;
 
-	if (hstate_is_gigantic(h))
-		folio = alloc_gigantic_folio(h, gfp_mask, nid, nmask);
-	else
-		folio = alloc_buddy_hugetlb_folio(h, gfp_mask, nid, nmask, NULL);
-	if (!folio)
-		return NULL;
-
-	prep_new_hugetlb_folio(h, folio, folio_nid(folio));
+	folio = only_alloc_fresh_hugetlb_folio(h, gfp_mask, nid, nmask, NULL);
+	if (folio)
+		hugetlb_vmemmap_optimize_folio(h, folio);
 	return folio;
 }
 
@@ -2241,12 +2222,10 @@ static struct folio *alloc_surplus_hugetlb_folio(struct hstate *h,
 		goto out_unlock;
 	spin_unlock_irq(&hugetlb_lock);
 
-	folio = only_alloc_fresh_hugetlb_folio(h, gfp_mask, nid, nmask, NULL);
+	folio = alloc_fresh_hugetlb_hvo_folio(h, gfp_mask, nid, nmask);
 	if (!folio)
 		return NULL;
 
-	hugetlb_vmemmap_optimize_folio(h, folio);
-
 	spin_lock_irq(&hugetlb_lock);
 	/*
 	 * nr_huge_pages needs to be adjusted within the same lock cycle
@@ -2286,10 +2265,14 @@ static struct folio *alloc_migrate_hugetlb_folio(struct hstate *h, gfp_t gfp_mas
 	if (hstate_is_gigantic(h))
 		return NULL;
 
-	folio = alloc_fresh_hugetlb_folio(h, gfp_mask, nid, nmask);
+	folio = alloc_fresh_hugetlb_hvo_folio(h, gfp_mask, nid, nmask);
 	if (!folio)
 		return NULL;
 
+	spin_lock_irq(&hugetlb_lock);
+	__prep_account_new_huge_page(h, folio_nid(folio));
+	spin_unlock_irq(&hugetlb_lock);
+
 	/* fresh huge pages are frozen */
 	folio_ref_unfreeze(folio, 1);
 	/*
@@ -2836,11 +2819,10 @@ static int alloc_and_dissolve_hugetlb_folio(struct folio *old_folio,
 		if (!new_folio) {
 			spin_unlock_irq(&hugetlb_lock);
 			gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE;
-			new_folio = alloc_buddy_hugetlb_folio(h, gfp_mask, nid,
-							      NULL, NULL);
+			new_folio = alloc_fresh_hugetlb_hvo_folio(h, gfp_mask,
+								  nid, NULL);
 			if (!new_folio)
 				return -ENOMEM;
-			__prep_new_hugetlb_folio(h, new_folio);
 			goto retry;
 		}
 
-- 
2.27.0



  reply	other threads:[~2025-08-02  7:31 UTC|newest]

Thread overview: 33+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2025-08-02  7:31 [PATCH 0/7] mm: hugetlb: cleanup and allocate frozen hugetlb folio Kefeng Wang
2025-08-02  7:31 ` Kefeng Wang [this message]
2025-08-04 15:41   ` [PATCH 1/7] mm: hugetlb: convert to alloc_fresh_hugetlb_hvo_folio() Sidhartha Kumar
2025-08-04 20:27   ` Vishal Moola (Oracle)
2025-08-05 14:21     ` Kefeng Wang
2025-08-05 17:56       ` Vishal Moola (Oracle)
2025-08-05 22:18   ` jane.chu
2025-08-06  0:33     ` jane.chu
2025-08-06  0:56       ` Kefeng Wang
2025-08-06 18:16         ` jane.chu
2025-08-02  7:31 ` [PATCH 2/7] mm: hugetlb: convert to prep_account_new_hugetlb_folio() Kefeng Wang
2025-08-04 15:54   ` Sidhartha Kumar
2025-08-04 20:36   ` Vishal Moola (Oracle)
2025-08-05 14:21     ` Kefeng Wang
2025-08-06  0:53     ` jane.chu
2025-08-02  7:31 ` [PATCH 3/7] mm; hugetlb: simpify alloc_buddy_hugetlb_folio() Kefeng Wang
2025-08-04 15:57   ` Sidhartha Kumar
2025-08-04 20:56   ` Vishal Moola (Oracle)
2025-08-05 14:22     ` Kefeng Wang
2025-08-02  7:31 ` [PATCH 4/7] mm: hugetlb: directly pass order when allocate a hugetlb folio Kefeng Wang
2025-08-04 16:22   ` Sidhartha Kumar
2025-08-06 20:05   ` jane.chu
2025-08-06 20:17     ` jane.chu
2025-08-02  7:31 ` [PATCH 5/7] mm: hugetlb: remove struct hstate from init_new_hugetlb_folio() Kefeng Wang
2025-08-04 16:13   ` Sidhartha Kumar
2025-08-06 20:08   ` jane.chu
2025-08-02  7:31 ` [PATCH 6/7] mm: hugeltb: check NUMA_NO_NODE in only_alloc_fresh_hugetlb_folio() Kefeng Wang
2025-08-04 19:09   ` Sidhartha Kumar
2025-08-06 20:43   ` jane.chu
2025-08-12 12:13     ` Kefeng Wang
2025-08-02  7:31 ` [PATCH 7/7] mm: hugetlb: allocate frozen pages in alloc_gigantic_folio() Kefeng Wang
2025-08-07  1:22   ` jane.chu
2025-08-12 12:11     ` Kefeng Wang

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20250802073107.2787975-2-wangkefeng.wang@huawei.com \
    --to=wangkefeng.wang@huawei.com \
    --cc=akpm@linux-foundation.org \
    --cc=david@redhat.com \
    --cc=linux-mm@kvack.org \
    --cc=muchun.song@linux.dev \
    --cc=osalvador@suse.de \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox