From: Zi Yan <ziy@nvidia.com>
To: Kefeng Wang <wangkefeng.wang@huawei.com>
Cc: Andrew Morton <akpm@linux-foundation.org>,
David Hildenbrand <david@redhat.com>,
Oscar Salvador <osalvador@suse.de>,
Muchun Song <muchun.song@linux.dev>,
linux-mm@kvack.org, sidhartha.kumar@oracle.com,
jane.chu@oracle.com, Vlastimil Babka <vbabka@suse.cz>,
Brendan Jackman <jackmanb@google.com>,
Johannes Weiner <hannes@cmpxchg.org>,
Matthew Wilcox <willy@infradead.org>,
David Hildenbrand <david@kernel.org>
Subject: Re: [PATCH v4 5/6] mm: cma: add cma_alloc_frozen{_compound}()
Date: Tue, 16 Dec 2025 13:40:18 -0500 [thread overview]
Message-ID: <4B10600F-A837-4FCA-808D-6F8637B073F7@nvidia.com> (raw)
In-Reply-To: <20251216114844.2126250-6-wangkefeng.wang@huawei.com>
On 16 Dec 2025, at 6:48, Kefeng Wang wrote:
> Introduce cma_alloc_frozen{_compound}() helper to alloc pages without
> incrementing their refcount, then convert hugetlb cma to use the
> cma_alloc_frozen_compound() and cma_release_frozen() and remove the
> unused cma_{alloc,free}_folio(), also move the cma_validate_zones()
> into mm/internal.h since no outside user.
>
> The set_pages_refcounted() is only called to set non-compound pages
> after above changes, so remove the processing about PageHead.
>
> Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
> ---
> include/linux/cma.h | 26 ++++++------------------
> mm/cma.c | 48 +++++++++++++++++++++++++--------------------
> mm/hugetlb_cma.c | 24 +++++++++++++----------
> mm/internal.h | 10 +++++-----
> 4 files changed, 52 insertions(+), 56 deletions(-)
>
> diff --git a/include/linux/cma.h b/include/linux/cma.h
> index e5745d2aec55..e2a690f7e77e 100644
> --- a/include/linux/cma.h
> +++ b/include/linux/cma.h
> @@ -51,29 +51,15 @@ extern struct page *cma_alloc(struct cma *cma, unsigned long count, unsigned int
> bool no_warn);
> extern bool cma_release(struct cma *cma, const struct page *pages, unsigned long count);
>
> +struct page *cma_alloc_frozen(struct cma *cma, unsigned long count,
> + unsigned int align, bool no_warn);
> +struct page *cma_alloc_frozen_compound(struct cma *cma, unsigned int order);
> +bool cma_release_frozen(struct cma *cma, const struct page *pages,
> + unsigned long count);
> +
> extern int cma_for_each_area(int (*it)(struct cma *cma, void *data), void *data);
> extern bool cma_intersects(struct cma *cma, unsigned long start, unsigned long end);
>
> extern void cma_reserve_pages_on_error(struct cma *cma);
>
> -#ifdef CONFIG_CMA
> -struct folio *cma_alloc_folio(struct cma *cma, int order, gfp_t gfp);
> -bool cma_free_folio(struct cma *cma, const struct folio *folio);
> -bool cma_validate_zones(struct cma *cma);
> -#else
> -static inline struct folio *cma_alloc_folio(struct cma *cma, int order, gfp_t gfp)
> -{
> - return NULL;
> -}
> -
> -static inline bool cma_free_folio(struct cma *cma, const struct folio *folio)
> -{
> - return false;
> -}
> -static inline bool cma_validate_zones(struct cma *cma)
> -{
> - return false;
> -}
> -#endif
> -
> #endif
> diff --git a/mm/cma.c b/mm/cma.c
> index 7f050cf24383..1aa1d821fbe9 100644
> --- a/mm/cma.c
> +++ b/mm/cma.c
> @@ -856,8 +856,8 @@ static int cma_range_alloc(struct cma *cma, struct cma_memrange *cmr,
> return ret;
> }
>
> -static struct page *__cma_alloc(struct cma *cma, unsigned long count,
> - unsigned int align, gfp_t gfp)
> +static struct page *__cma_alloc_frozen(struct cma *cma,
> + unsigned long count, unsigned int align, gfp_t gfp)
> {
> struct page *page = NULL;
> int ret = -ENOMEM, r;
> @@ -904,7 +904,6 @@ static struct page *__cma_alloc(struct cma *cma, unsigned long count,
> trace_cma_alloc_finish(name, page ? page_to_pfn(page) : 0,
> page, count, align, ret);
> if (page) {
> - set_pages_refcounted(page, count);
> count_vm_event(CMA_ALLOC_SUCCESS);
> cma_sysfs_account_success_pages(cma, count);
> } else {
> @@ -915,6 +914,21 @@ static struct page *__cma_alloc(struct cma *cma, unsigned long count,
> return page;
> }
>
> +struct page *cma_alloc_frozen(struct cma *cma, unsigned long count,
> + unsigned int align, bool no_warn)
> +{
> + gfp_t gfp = GFP_KERNEL | (no_warn ? __GFP_NOWARN : 0);
> +
> + return __cma_alloc_frozen(cma, count, align, gfp);
> +}
> +
> +struct page *cma_alloc_frozen_compound(struct cma *cma, unsigned int order)
> +{
> + gfp_t gfp = GFP_KERNEL | __GFP_COMP | __GFP_NOWARN;
> +
> + return __cma_alloc_frozen(cma, 1 << order, order, gfp);
> +}
> +
> /**
> * cma_alloc() - allocate pages from contiguous area
> * @cma: Contiguous memory region for which the allocation is performed.
> @@ -927,24 +941,18 @@ static struct page *__cma_alloc(struct cma *cma, unsigned long count,
> */
> struct page *cma_alloc(struct cma *cma, unsigned long count,
> unsigned int align, bool no_warn)
> -{
> - return __cma_alloc(cma, count, align, GFP_KERNEL | (no_warn ? __GFP_NOWARN : 0));
> -}
> -
> -struct folio *cma_alloc_folio(struct cma *cma, int order, gfp_t gfp)
> {
> struct page *page;
>
> - if (WARN_ON(!order || !(gfp & __GFP_COMP)))
> - return NULL;
> -
> - page = __cma_alloc(cma, 1 << order, order, gfp);
> + page = cma_alloc_frozen(cma, count, align, no_warn);
> + if (page)
> + set_pages_refcounted(page, count);
>
> - return page ? page_folio(page) : NULL;
> + return page;
> }
>
> static bool __cma_release(struct cma *cma, const struct page *pages,
> - unsigned long count, bool compound)
> + unsigned long count, bool frozen)
> {
> unsigned long pfn, end;
> int r;
> @@ -974,8 +982,8 @@ static bool __cma_release(struct cma *cma, const struct page *pages,
> return false;
> }
>
> - if (compound)
> - __free_pages((struct page *)pages, compound_order(pages));
> + if (frozen)
> + free_contig_frozen_range(pfn, count);
> else
> free_contig_range(pfn, count);
Can we get rid of free_contig_range() branch by making cma_release() put
each page’s refcount? Then, __cma_relase() becomes cma_release_frozen()
and the release pattern matches allocation pattern:
1. cma_alloc() calls cma_alloc_frozen() and manipulates page refcount.
2. cma_release() manipulates page refcount and calls cma_release_frozen().
The rest of changes looks good to me.
Best Regards,
Yan, Zi
next prev parent reply other threads:[~2025-12-16 18:40 UTC|newest]
Thread overview: 35+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-12-16 11:48 [PATCH v4 RESEND 0/6] mm: hugetlb: allocate frozen gigantic folio Kefeng Wang
2025-12-16 11:48 ` [PATCH v4 1/6] mm: debug_vm_pgtable: add debug_vm_pgtable_free_huge_page() Kefeng Wang
2025-12-16 16:08 ` Zi Yan
2025-12-17 2:40 ` Muchun Song
2025-12-16 11:48 ` [PATCH v4 2/6] mm: page_alloc: add __split_page() Kefeng Wang
2025-12-16 16:21 ` Zi Yan
2025-12-17 7:01 ` Kefeng Wang
2025-12-17 2:45 ` Muchun Song
2025-12-16 11:48 ` [PATCH v4 3/6] mm: cma: add __cma_release() Kefeng Wang
2025-12-16 16:39 ` Zi Yan
2025-12-17 2:46 ` Muchun Song
2025-12-16 11:48 ` [PATCH v4 4/6] mm: page_alloc: add alloc_contig_frozen_{range,pages}() Kefeng Wang
2025-12-16 17:20 ` Zi Yan
2025-12-17 7:17 ` Kefeng Wang
2025-12-17 19:20 ` Zi Yan
2025-12-18 12:00 ` Kefeng Wang
2025-12-16 11:48 ` [PATCH v4 5/6] mm: cma: add cma_alloc_frozen{_compound}() Kefeng Wang
2025-12-16 18:40 ` Zi Yan [this message]
2025-12-17 8:02 ` Kefeng Wang
2025-12-17 19:38 ` Zi Yan
2025-12-18 12:54 ` Kefeng Wang
2025-12-18 15:52 ` Zi Yan
2025-12-19 4:09 ` Kefeng Wang
2025-12-22 2:30 ` Zi Yan
2025-12-22 13:03 ` Kefeng Wang
2025-12-20 14:34 ` kernel test robot
2025-12-22 1:46 ` Kefeng Wang
2025-12-16 11:48 ` [PATCH v4 6/6] mm: hugetlb: allocate frozen pages in alloc_gigantic_folio() Kefeng Wang
2025-12-16 18:44 ` Zi Yan
2025-12-17 8:09 ` Kefeng Wang
2025-12-17 19:40 ` Zi Yan
2025-12-18 12:56 ` Kefeng Wang
-- strict thread matches above, loose matches on Subject: below --
2025-10-23 11:59 [PATCH v4 0/6] mm: hugetlb: allocate frozen gigantic folio Kefeng Wang
2025-10-23 11:59 ` [PATCH v4 5/6] mm: cma: add cma_alloc_frozen{_compound}() Kefeng Wang
2025-10-24 1:12 ` Andrew Morton
2025-10-24 1:31 ` Kefeng Wang
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=4B10600F-A837-4FCA-808D-6F8637B073F7@nvidia.com \
--to=ziy@nvidia.com \
--cc=akpm@linux-foundation.org \
--cc=david@kernel.org \
--cc=david@redhat.com \
--cc=hannes@cmpxchg.org \
--cc=jackmanb@google.com \
--cc=jane.chu@oracle.com \
--cc=linux-mm@kvack.org \
--cc=muchun.song@linux.dev \
--cc=osalvador@suse.de \
--cc=sidhartha.kumar@oracle.com \
--cc=vbabka@suse.cz \
--cc=wangkefeng.wang@huawei.com \
--cc=willy@infradead.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox