From: Frank van der Linden <fvdl@google.com>
To: akpm@linux-foundation.org, muchun.song@linux.dev,
linux-mm@kvack.org, linux-kernel@vger.kernel.org
Cc: yuzhao@google.com, usamaarif642@gmail.com,
joao.m.martins@oracle.com, roman.gushchin@linux.dev,
ziy@nvidia.com, david@redhat.com,
Frank van der Linden <fvdl@google.com>
Subject: [PATCH v5 23/27] mm/cma: introduce a cma validate function
Date: Fri, 28 Feb 2025 18:29:24 +0000 [thread overview]
Message-ID: <20250228182928.2645936-24-fvdl@google.com> (raw)
In-Reply-To: <20250228182928.2645936-1-fvdl@google.com>
Define a function to check if a CMA area is valid, which means:
do its ranges not cross any zone boundaries. Store the result
in the newly created flags for each CMA area, so that multiple
calls are dealt with.
This allows for checking the validity of a CMA area early, which
is needed later in order to be able to allocate hugetlb bootmem
pages from it with pre-HVO.
Signed-off-by: Frank van der Linden <fvdl@google.com>
---
include/linux/cma.h | 5 ++++
mm/cma.c | 60 ++++++++++++++++++++++++++++++++++++---------
mm/cma.h | 8 +++++-
3 files changed, 60 insertions(+), 13 deletions(-)
diff --git a/include/linux/cma.h b/include/linux/cma.h
index 03d85c100dcc..62d9c1cf6326 100644
--- a/include/linux/cma.h
+++ b/include/linux/cma.h
@@ -60,6 +60,7 @@ extern void cma_reserve_pages_on_error(struct cma *cma);
#ifdef CONFIG_CMA
struct folio *cma_alloc_folio(struct cma *cma, int order, gfp_t gfp);
bool cma_free_folio(struct cma *cma, const struct folio *folio);
+bool cma_validate_zones(struct cma *cma);
#else
static inline struct folio *cma_alloc_folio(struct cma *cma, int order, gfp_t gfp)
{
@@ -70,6 +71,10 @@ static inline bool cma_free_folio(struct cma *cma, const struct folio *folio)
{
return false;
}
+static inline bool cma_validate_zones(struct cma *cma)
+{
+ return false;
+}
#endif
#endif
diff --git a/mm/cma.c b/mm/cma.c
index 61ad4fd2f62d..5e1d169e24fa 100644
--- a/mm/cma.c
+++ b/mm/cma.c
@@ -99,6 +99,49 @@ static void cma_clear_bitmap(struct cma *cma, const struct cma_memrange *cmr,
spin_unlock_irqrestore(&cma->lock, flags);
}
+/*
+ * Check if a CMA area contains no ranges that intersect with
+ * multiple zones. Store the result in the flags in case
+ * this gets called more than once.
+ */
+bool cma_validate_zones(struct cma *cma)
+{
+ int r;
+ unsigned long base_pfn;
+ struct cma_memrange *cmr;
+ bool valid_bit_set;
+
+ /*
+ * If already validated, return result of previous check.
+ * Either the valid or invalid bit will be set if this
+ * check has already been done. If neither is set, the
+ * check has not been performed yet.
+ */
+ valid_bit_set = test_bit(CMA_ZONES_VALID, &cma->flags);
+ if (valid_bit_set || test_bit(CMA_ZONES_INVALID, &cma->flags))
+ return valid_bit_set;
+
+ for (r = 0; r < cma->nranges; r++) {
+ cmr = &cma->ranges[r];
+ base_pfn = cmr->base_pfn;
+
+ /*
+ * alloc_contig_range() requires the pfn range specified
+ * to be in the same zone. Simplify by forcing the entire
+ * CMA resv range to be in the same zone.
+ */
+ WARN_ON_ONCE(!pfn_valid(base_pfn));
+ if (pfn_range_intersects_zones(cma->nid, base_pfn, cmr->count)) {
+ set_bit(CMA_ZONES_INVALID, &cma->flags);
+ return false;
+ }
+ }
+
+ set_bit(CMA_ZONES_VALID, &cma->flags);
+
+ return true;
+}
+
static void __init cma_activate_area(struct cma *cma)
{
unsigned long pfn, base_pfn;
@@ -113,19 +156,12 @@ static void __init cma_activate_area(struct cma *cma)
goto cleanup;
}
+ if (!cma_validate_zones(cma))
+ goto cleanup;
+
for (r = 0; r < cma->nranges; r++) {
cmr = &cma->ranges[r];
base_pfn = cmr->base_pfn;
-
- /*
- * alloc_contig_range() requires the pfn range specified
- * to be in the same zone. Simplify by forcing the entire
- * CMA resv range to be in the same zone.
- */
- WARN_ON_ONCE(!pfn_valid(base_pfn));
- if (pfn_range_intersects_zones(cma->nid, base_pfn, cmr->count))
- goto cleanup;
-
for (pfn = base_pfn; pfn < base_pfn + cmr->count;
pfn += pageblock_nr_pages)
init_cma_reserved_pageblock(pfn_to_page(pfn));
@@ -145,7 +181,7 @@ static void __init cma_activate_area(struct cma *cma)
bitmap_free(cma->ranges[r].bitmap);
/* Expose all pages to the buddy, they are useless for CMA. */
- if (!cma->reserve_pages_on_error) {
+ if (!test_bit(CMA_RESERVE_PAGES_ON_ERROR, &cma->flags)) {
for (r = 0; r < allocrange; r++) {
cmr = &cma->ranges[r];
for (pfn = cmr->base_pfn;
@@ -172,7 +208,7 @@ core_initcall(cma_init_reserved_areas);
void __init cma_reserve_pages_on_error(struct cma *cma)
{
- cma->reserve_pages_on_error = true;
+ set_bit(CMA_RESERVE_PAGES_ON_ERROR, &cma->flags);
}
static int __init cma_new_area(const char *name, phys_addr_t size,
diff --git a/mm/cma.h b/mm/cma.h
index ff79dba5508c..bddc84b3cd96 100644
--- a/mm/cma.h
+++ b/mm/cma.h
@@ -49,11 +49,17 @@ struct cma {
/* kobject requires dynamic object */
struct cma_kobject *cma_kobj;
#endif
- bool reserve_pages_on_error;
+ unsigned long flags;
/* NUMA node (NUMA_NO_NODE if unspecified) */
int nid;
};
+enum cma_flags {
+ CMA_RESERVE_PAGES_ON_ERROR,
+ CMA_ZONES_VALID,
+ CMA_ZONES_INVALID,
+};
+
extern struct cma cma_areas[MAX_CMA_AREAS];
extern unsigned int cma_area_count;
--
2.48.1.711.g2feabab25a-goog
next prev parent reply other threads:[~2025-02-28 18:31 UTC|newest]
Thread overview: 31+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-02-28 18:29 [PATCH v5 00/27] hugetlb/CMA improvements for large systems Frank van der Linden
2025-02-28 18:29 ` [PATCH v5 01/27] mm/cma: export total and free number of pages for CMA areas Frank van der Linden
2025-02-28 18:29 ` [PATCH v5 02/27] mm, cma: support multiple contiguous ranges, if requested Frank van der Linden
[not found] ` <202503051327.e87dce82-lkp@intel.com>
2025-03-05 18:02 ` Frank van der Linden
2025-04-07 11:50 ` Geert Uytterhoeven
2025-04-07 15:52 ` Frank van der Linden
2025-02-28 18:29 ` [PATCH v5 03/27] mm/cma: introduce cma_intersects function Frank van der Linden
2025-02-28 18:29 ` [PATCH v5 04/27] mm, hugetlb: use cma_declare_contiguous_multi Frank van der Linden
2025-02-28 18:29 ` [PATCH v5 05/27] mm/hugetlb: remove redundant __ClearPageReserved Frank van der Linden
2025-02-28 18:29 ` [PATCH v5 06/27] mm/hugetlb: use online nodes for bootmem allocation Frank van der Linden
2025-02-28 18:29 ` [PATCH v5 07/27] mm/hugetlb: convert cmdline parameters from setup to early Frank van der Linden
2025-02-28 18:29 ` [PATCH v5 08/27] x86/mm: make register_page_bootmem_memmap handle PTE mappings Frank van der Linden
2025-02-28 18:29 ` [PATCH v5 09/27] mm/bootmem_info: export register_page_bootmem_memmap Frank van der Linden
2025-02-28 18:29 ` [PATCH v5 10/27] mm/sparse: allow for alternate vmemmap section init at boot Frank van der Linden
2025-02-28 18:29 ` [PATCH v5 11/27] mm/hugetlb: set migratetype for bootmem folios Frank van der Linden
2025-02-28 18:29 ` [PATCH v5 12/27] mm: define __init_reserved_page_zone function Frank van der Linden
2025-02-28 18:29 ` [PATCH v5 13/27] mm/hugetlb: check bootmem pages for zone intersections Frank van der Linden
2025-02-28 18:29 ` [PATCH v5 14/27] mm/sparse: add vmemmap_*_hvo functions Frank van der Linden
2025-02-28 18:29 ` [PATCH v5 15/27] mm/hugetlb: deal with multiple calls to hugetlb_bootmem_alloc Frank van der Linden
2025-02-28 18:29 ` [PATCH v5 16/27] mm/hugetlb: move huge_boot_pages list init " Frank van der Linden
2025-02-28 18:29 ` [PATCH v5 17/27] mm/hugetlb: add pre-HVO framework Frank van der Linden
2025-02-28 18:29 ` [PATCH v5 18/27] mm/hugetlb_vmemmap: fix hugetlb_vmemmap_restore_folios definition Frank van der Linden
2025-02-28 18:29 ` [PATCH v5 19/27] mm/hugetlb: do pre-HVO for bootmem allocated pages Frank van der Linden
2025-02-28 18:29 ` [PATCH v5 20/27] x86/setup: call hugetlb_bootmem_alloc early Frank van der Linden
2025-02-28 18:29 ` [PATCH v5 21/27] x86/mm: set ARCH_WANT_HUGETLB_VMEMMAP_PREINIT Frank van der Linden
2025-02-28 18:29 ` [PATCH v5 22/27] mm/cma: simplify zone intersection check Frank van der Linden
2025-02-28 18:29 ` Frank van der Linden [this message]
2025-02-28 18:29 ` [PATCH v5 24/27] mm/cma: introduce interface for early reservations Frank van der Linden
2025-02-28 18:29 ` [PATCH v5 25/27] mm/hugetlb: add hugetlb_cma_only cmdline option Frank van der Linden
2025-02-28 18:29 ` [PATCH v5 26/27] mm/hugetlb: enable bootmem allocation from CMA areas Frank van der Linden
2025-02-28 18:29 ` [PATCH v5 27/27] mm/hugetlb: move hugetlb CMA code in to its own file Frank van der Linden
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20250228182928.2645936-24-fvdl@google.com \
--to=fvdl@google.com \
--cc=akpm@linux-foundation.org \
--cc=david@redhat.com \
--cc=joao.m.martins@oracle.com \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=muchun.song@linux.dev \
--cc=roman.gushchin@linux.dev \
--cc=usamaarif642@gmail.com \
--cc=yuzhao@google.com \
--cc=ziy@nvidia.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox