From: Kefeng Wang <wangkefeng.wang@huawei.com>
To: Andrew Morton <akpm@linux-foundation.org>,
David Hildenbrand <david@redhat.com>,
Oscar Salvador <osalvador@suse.de>,
Muchun Song <muchun.song@linux.dev>, <linux-mm@kvack.org>
Cc: <sidhartha.kumar@oracle.com>, <jane.chu@oracle.com>,
Zi Yan <ziy@nvidia.com>, Vlastimil Babka <vbabka@suse.cz>,
Brendan Jackman <jackmanb@google.com>,
Johannes Weiner <hannes@cmpxchg.org>,
Matthew Wilcox <willy@infradead.org>,
Kefeng Wang <wangkefeng.wang@huawei.com>
Subject: [PATCH v4 5/6] mm: cma: add cma_alloc_frozen{_compound}()
Date: Thu, 23 Oct 2025 19:59:39 +0800 [thread overview]
Message-ID: <20251023115940.3573158-6-wangkefeng.wang@huawei.com> (raw)
In-Reply-To: <20251023115940.3573158-1-wangkefeng.wang@huawei.com>
Introduce cma_alloc_frozen{_compound}() helper to alloc pages without
incrementing their refcount, then convert hugetlb cma to use the
cma_alloc_frozen_compound() and cma_release_frozen() and remove the
unused cma_{alloc,free}_folio(), also move the cma_validate_zones()
into mm/internal.h since no outside user.
The set_pages_refcounted() is only called to set non-compound pages
after above changes, so remove the process about PageHead.
Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
---
include/linux/cma.h | 26 ++++++------------------
mm/cma.c | 48 +++++++++++++++++++++++++--------------------
mm/hugetlb_cma.c | 24 +++++++++++++----------
mm/internal.h | 12 +++++-------
4 files changed, 52 insertions(+), 58 deletions(-)
diff --git a/include/linux/cma.h b/include/linux/cma.h
index e5745d2aec55..e2a690f7e77e 100644
--- a/include/linux/cma.h
+++ b/include/linux/cma.h
@@ -51,29 +51,15 @@ extern struct page *cma_alloc(struct cma *cma, unsigned long count, unsigned int
bool no_warn);
extern bool cma_release(struct cma *cma, const struct page *pages, unsigned long count);
+struct page *cma_alloc_frozen(struct cma *cma, unsigned long count,
+ unsigned int align, bool no_warn);
+struct page *cma_alloc_frozen_compound(struct cma *cma, unsigned int order);
+bool cma_release_frozen(struct cma *cma, const struct page *pages,
+ unsigned long count);
+
extern int cma_for_each_area(int (*it)(struct cma *cma, void *data), void *data);
extern bool cma_intersects(struct cma *cma, unsigned long start, unsigned long end);
extern void cma_reserve_pages_on_error(struct cma *cma);
-#ifdef CONFIG_CMA
-struct folio *cma_alloc_folio(struct cma *cma, int order, gfp_t gfp);
-bool cma_free_folio(struct cma *cma, const struct folio *folio);
-bool cma_validate_zones(struct cma *cma);
-#else
-static inline struct folio *cma_alloc_folio(struct cma *cma, int order, gfp_t gfp)
-{
- return NULL;
-}
-
-static inline bool cma_free_folio(struct cma *cma, const struct folio *folio)
-{
- return false;
-}
-static inline bool cma_validate_zones(struct cma *cma)
-{
- return false;
-}
-#endif
-
#endif
diff --git a/mm/cma.c b/mm/cma.c
index 7f050cf24383..1aa1d821fbe9 100644
--- a/mm/cma.c
+++ b/mm/cma.c
@@ -856,8 +856,8 @@ static int cma_range_alloc(struct cma *cma, struct cma_memrange *cmr,
return ret;
}
-static struct page *__cma_alloc(struct cma *cma, unsigned long count,
- unsigned int align, gfp_t gfp)
+static struct page *__cma_alloc_frozen(struct cma *cma,
+ unsigned long count, unsigned int align, gfp_t gfp)
{
struct page *page = NULL;
int ret = -ENOMEM, r;
@@ -904,7 +904,6 @@ static struct page *__cma_alloc(struct cma *cma, unsigned long count,
trace_cma_alloc_finish(name, page ? page_to_pfn(page) : 0,
page, count, align, ret);
if (page) {
- set_pages_refcounted(page, count);
count_vm_event(CMA_ALLOC_SUCCESS);
cma_sysfs_account_success_pages(cma, count);
} else {
@@ -915,6 +914,21 @@ static struct page *__cma_alloc(struct cma *cma, unsigned long count,
return page;
}
+struct page *cma_alloc_frozen(struct cma *cma, unsigned long count,
+ unsigned int align, bool no_warn)
+{
+ gfp_t gfp = GFP_KERNEL | (no_warn ? __GFP_NOWARN : 0);
+
+ return __cma_alloc_frozen(cma, count, align, gfp);
+}
+
+struct page *cma_alloc_frozen_compound(struct cma *cma, unsigned int order)
+{
+ gfp_t gfp = GFP_KERNEL | __GFP_COMP | __GFP_NOWARN;
+
+ return __cma_alloc_frozen(cma, 1 << order, order, gfp);
+}
+
/**
* cma_alloc() - allocate pages from contiguous area
* @cma: Contiguous memory region for which the allocation is performed.
@@ -927,24 +941,18 @@ static struct page *__cma_alloc(struct cma *cma, unsigned long count,
*/
struct page *cma_alloc(struct cma *cma, unsigned long count,
unsigned int align, bool no_warn)
-{
- return __cma_alloc(cma, count, align, GFP_KERNEL | (no_warn ? __GFP_NOWARN : 0));
-}
-
-struct folio *cma_alloc_folio(struct cma *cma, int order, gfp_t gfp)
{
struct page *page;
- if (WARN_ON(!order || !(gfp & __GFP_COMP)))
- return NULL;
-
- page = __cma_alloc(cma, 1 << order, order, gfp);
+ page = cma_alloc_frozen(cma, count, align, no_warn);
+ if (page)
+ set_pages_refcounted(page, count);
- return page ? page_folio(page) : NULL;
+ return page;
}
static bool __cma_release(struct cma *cma, const struct page *pages,
- unsigned long count, bool compound)
+ unsigned long count, bool frozen)
{
unsigned long pfn, end;
int r;
@@ -974,8 +982,8 @@ static bool __cma_release(struct cma *cma, const struct page *pages,
return false;
}
- if (compound)
- __free_pages((struct page *)pages, compound_order(pages));
+ if (frozen)
+ free_contig_frozen_range(pfn, count);
else
free_contig_range(pfn, count);
@@ -1002,12 +1010,10 @@ bool cma_release(struct cma *cma, const struct page *pages,
return __cma_release(cma, pages, count, false);
}
-bool cma_free_folio(struct cma *cma, const struct folio *folio)
+bool cma_release_frozen(struct cma *cma, const struct page *pages,
+ unsigned long count)
{
- if (WARN_ON(!folio_test_large(folio)))
- return false;
-
- return __cma_release(cma, &folio->page, folio_nr_pages(folio), true);
+ return __cma_release(cma, pages, count, true);
}
int cma_for_each_area(int (*it)(struct cma *cma, void *data), void *data)
diff --git a/mm/hugetlb_cma.c b/mm/hugetlb_cma.c
index e8e4dc7182d5..0a57d3776c8d 100644
--- a/mm/hugetlb_cma.c
+++ b/mm/hugetlb_cma.c
@@ -20,35 +20,39 @@ static unsigned long hugetlb_cma_size __initdata;
void hugetlb_cma_free_folio(struct folio *folio)
{
- int nid = folio_nid(folio);
+ folio_ref_dec(folio);
- WARN_ON_ONCE(!cma_free_folio(hugetlb_cma[nid], folio));
+ WARN_ON_ONCE(!cma_release_frozen(hugetlb_cma[folio_nid(folio)],
+ &folio->page, folio_nr_pages(folio)));
}
-
struct folio *hugetlb_cma_alloc_folio(int order, gfp_t gfp_mask,
int nid, nodemask_t *nodemask)
{
int node;
- struct folio *folio = NULL;
+ struct folio *folio;
+ struct page *page = NULL;
if (hugetlb_cma[nid])
- folio = cma_alloc_folio(hugetlb_cma[nid], order, gfp_mask);
+ page = cma_alloc_frozen_compound(hugetlb_cma[nid], order);
- if (!folio && !(gfp_mask & __GFP_THISNODE)) {
+ if (!page && !(gfp_mask & __GFP_THISNODE)) {
for_each_node_mask(node, *nodemask) {
if (node == nid || !hugetlb_cma[node])
continue;
- folio = cma_alloc_folio(hugetlb_cma[node], order, gfp_mask);
- if (folio)
+ page = cma_alloc_frozen_compound(hugetlb_cma[nid], order);
+ if (page)
break;
}
}
- if (folio)
- folio_set_hugetlb_cma(folio);
+ if (!page)
+ return NULL;
+ set_page_refcounted(page);
+ folio = page_folio(page);
+ folio_set_hugetlb_cma(folio);
return folio;
}
diff --git a/mm/internal.h b/mm/internal.h
index ec1e47a9044f..dd22b1d8ff90 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -515,13 +515,6 @@ static inline void set_page_refcounted(struct page *page)
static inline void set_pages_refcounted(struct page *page, unsigned long nr_pages)
{
- unsigned long pfn = page_to_pfn(page);
-
- if (PageHead(page)) {
- set_page_refcounted(page);
- return;
- }
-
for (; nr_pages--; pfn++)
set_page_refcounted(pfn_to_page(pfn));
}
@@ -952,9 +945,14 @@ void init_cma_reserved_pageblock(struct page *page);
struct cma;
#ifdef CONFIG_CMA
+bool cma_validate_zones(struct cma *cma);
void *cma_reserve_early(struct cma *cma, unsigned long size);
void init_cma_pageblock(struct page *page);
#else
+static inline bool cma_validate_zones(struct cma *cma)
+{
+ return false;
+}
static inline void *cma_reserve_early(struct cma *cma, unsigned long size)
{
return NULL;
--
2.27.0
next prev parent reply other threads:[~2025-10-23 12:00 UTC|newest]
Thread overview: 9+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-10-23 11:59 [PATCH v4 0/6] mm: hugetlb: allocate frozen gigantic folio Kefeng Wang
2025-10-23 11:59 ` [PATCH v4 1/6] mm: debug_vm_pgtable: add debug_vm_pgtable_free_huge_page() Kefeng Wang
2025-10-23 11:59 ` [PATCH v4 2/6] mm: page_alloc: add __split_page() Kefeng Wang
2025-10-23 11:59 ` [PATCH v4 3/6] mm: cma: add __cma_release() Kefeng Wang
2025-10-23 11:59 ` [PATCH v4 4/6] mm: page_alloc: add alloc_contig_frozen_{range,pages}() Kefeng Wang
2025-10-23 11:59 ` Kefeng Wang [this message]
2025-10-24 1:12 ` [PATCH v4 5/6] mm: cma: add cma_alloc_frozen{_compound}() Andrew Morton
2025-10-24 1:31 ` Kefeng Wang
2025-10-23 11:59 ` [PATCH v4 6/6] mm: hugetlb: allocate frozen pages in alloc_gigantic_folio() Kefeng Wang
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20251023115940.3573158-6-wangkefeng.wang@huawei.com \
--to=wangkefeng.wang@huawei.com \
--cc=akpm@linux-foundation.org \
--cc=david@redhat.com \
--cc=hannes@cmpxchg.org \
--cc=jackmanb@google.com \
--cc=jane.chu@oracle.com \
--cc=linux-mm@kvack.org \
--cc=muchun.song@linux.dev \
--cc=osalvador@suse.de \
--cc=sidhartha.kumar@oracle.com \
--cc=vbabka@suse.cz \
--cc=willy@infradead.org \
--cc=ziy@nvidia.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox