From: Frank van der Linden <fvdl@google.com>
To: akpm@linux-foundation.org, muchun.song@linux.dev,
linux-mm@kvack.org, linux-kernel@vger.kernel.org
Cc: hannes@cmpxchg.org, david@redhat.com, roman.gushchin@linux.dev,
Frank van der Linden <fvdl@google.com>
Subject: [RFC PATCH 02/12] mm/cma: clean up flag handling a bit
Date: Mon, 15 Sep 2025 19:51:43 +0000 [thread overview]
Message-ID: <20250915195153.462039-3-fvdl@google.com> (raw)
In-Reply-To: <20250915195153.462039-1-fvdl@google.com>
Atomic bit operations aren't needed for the cma flags
field, so switch their manipulation over to normal
AND/OR operations.
Also export the bit values in linux/cma.h, as we will
be adding publicly used values later.
No functional change.
Signed-off-by: Frank van der Linden <fvdl@google.com>
---
include/linux/cma.h | 12 ++++++++++++
mm/cma.c | 16 ++++++++--------
mm/cma.h | 7 -------
3 files changed, 20 insertions(+), 15 deletions(-)
diff --git a/include/linux/cma.h b/include/linux/cma.h
index 62d9c1cf6326..5c3fdc5da908 100644
--- a/include/linux/cma.h
+++ b/include/linux/cma.h
@@ -20,6 +20,18 @@
#define CMA_MIN_ALIGNMENT_PAGES pageblock_nr_pages
#define CMA_MIN_ALIGNMENT_BYTES (PAGE_SIZE * CMA_MIN_ALIGNMENT_PAGES)
+enum cma_flags {
+ __CMA_RESERVE_PAGES_ON_ERROR,
+ __CMA_ZONES_VALID,
+ __CMA_ZONES_INVALID,
+ __CMA_ACTIVATED,
+};
+
+#define CMA_RESERVE_PAGES_ON_ERROR BIT(__CMA_RESERVE_PAGES_ON_ERROR)
+#define CMA_ZONES_VALID BIT(__CMA_ZONES_VALID)
+#define CMA_ZONES_INVALID BIT(__CMA_ZONES_INVALID)
+#define CMA_ACTIVATED BIT(__CMA_ACTIVATED)
+
struct cma;
extern unsigned long totalcma_pages;
diff --git a/mm/cma.c b/mm/cma.c
index 2ffa4befb99a..549d85b2e3a3 100644
--- a/mm/cma.c
+++ b/mm/cma.c
@@ -111,8 +111,8 @@ bool cma_validate_zones(struct cma *cma)
* check has already been done. If neither is set, the
* check has not been performed yet.
*/
- valid_bit_set = test_bit(CMA_ZONES_VALID, &cma->flags);
- if (valid_bit_set || test_bit(CMA_ZONES_INVALID, &cma->flags))
+ valid_bit_set = (cma->flags & CMA_ZONES_VALID);
+ if (valid_bit_set || (cma->flags & CMA_ZONES_INVALID))
return valid_bit_set;
for (r = 0; r < cma->nranges; r++) {
@@ -126,12 +126,12 @@ bool cma_validate_zones(struct cma *cma)
*/
WARN_ON_ONCE(!pfn_valid(base_pfn));
if (pfn_range_intersects_zones(cma->nid, base_pfn, cmr->count)) {
- set_bit(CMA_ZONES_INVALID, &cma->flags);
+ cma->flags |= CMA_ZONES_INVALID;
return false;
}
}
- set_bit(CMA_ZONES_VALID, &cma->flags);
+ cma->flags |= CMA_ZONES_VALID;
return true;
}
@@ -176,7 +176,7 @@ static void __init cma_activate_area(struct cma *cma)
INIT_HLIST_HEAD(&cma->mem_head);
spin_lock_init(&cma->mem_head_lock);
#endif
- set_bit(CMA_ACTIVATED, &cma->flags);
+ cma->flags |= CMA_ACTIVATED;
return;
@@ -185,7 +185,7 @@ static void __init cma_activate_area(struct cma *cma)
bitmap_free(cma->ranges[r].bitmap);
/* Expose all pages to the buddy, they are useless for CMA. */
- if (!test_bit(CMA_RESERVE_PAGES_ON_ERROR, &cma->flags)) {
+ if (!(cma->flags & CMA_RESERVE_PAGES_ON_ERROR)) {
for (r = 0; r < allocrange; r++) {
cmr = &cma->ranges[r];
end_pfn = cmr->base_pfn + cmr->count;
@@ -211,7 +211,7 @@ core_initcall(cma_init_reserved_areas);
void __init cma_reserve_pages_on_error(struct cma *cma)
{
- set_bit(CMA_RESERVE_PAGES_ON_ERROR, &cma->flags);
+ cma->flags |= CMA_RESERVE_PAGES_ON_ERROR;
}
static int __init cma_new_area(const char *name, phys_addr_t size,
@@ -1085,7 +1085,7 @@ void __init *cma_reserve_early(struct cma *cma, unsigned long size)
/*
* Can only be called early in init.
*/
- if (test_bit(CMA_ACTIVATED, &cma->flags))
+ if (cma->flags & CMA_ACTIVATED)
return NULL;
if (!IS_ALIGNED(size, CMA_MIN_ALIGNMENT_BYTES))
diff --git a/mm/cma.h b/mm/cma.h
index c70180c36559..25b696774c6a 100644
--- a/mm/cma.h
+++ b/mm/cma.h
@@ -64,13 +64,6 @@ struct cma {
int nid;
};
-enum cma_flags {
- CMA_RESERVE_PAGES_ON_ERROR,
- CMA_ZONES_VALID,
- CMA_ZONES_INVALID,
- CMA_ACTIVATED,
-};
-
extern struct cma cma_areas[MAX_CMA_AREAS];
extern unsigned int cma_area_count;
--
2.51.0.384.g4c02a37b29-goog
next prev parent reply other threads:[~2025-09-15 19:52 UTC|newest]
Thread overview: 30+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-09-15 19:51 [RFC PATCH 00/12] CMA balancing Frank van der Linden
2025-09-15 19:51 ` [RFC PATCH 01/12] mm/cma: add tunable for CMA fallback limit Frank van der Linden
2025-09-16 20:23 ` Rik van Riel
2025-09-15 19:51 ` Frank van der Linden [this message]
2025-09-16 20:25 ` [RFC PATCH 02/12] mm/cma: clean up flag handling a bit Rik van Riel
2025-09-15 19:51 ` [RFC PATCH 03/12] mm/cma: add flags argument to init functions Frank van der Linden
2025-09-16 21:16 ` Rik van Riel
2025-09-15 19:51 ` [RFC PATCH 04/12] mm/cma: keep a global sorted list of CMA ranges Frank van der Linden
2025-09-16 22:25 ` Rik van Riel
2025-09-15 19:51 ` [RFC PATCH 05/12] mm/cma: add helper functions for CMA balancing Frank van der Linden
2025-09-16 22:57 ` Rik van Riel
2025-09-15 19:51 ` [RFC PATCH 06/12] mm/cma: define and act on CMA_BALANCE flag Frank van der Linden
2025-09-17 3:30 ` Rik van Riel
2025-09-15 19:51 ` [RFC PATCH 07/12] mm/compaction: optionally use a different isolate function Frank van der Linden
2025-09-17 12:53 ` Rik van Riel
2025-09-15 19:51 ` [RFC PATCH 08/12] mm/compaction: simplify isolation order checks a bit Frank van der Linden
2025-09-17 14:43 ` Rik van Riel
2025-09-15 19:51 ` [RFC PATCH 09/12] mm/cma: introduce CMA balancing Frank van der Linden
2025-09-17 15:17 ` Rik van Riel
2025-09-15 19:51 ` [RFC PATCH 10/12] mm/hugetlb: do explicit " Frank van der Linden
2025-09-17 15:21 ` Rik van Riel
2025-09-15 19:51 ` [RFC PATCH 11/12] mm/cma: rebalance CMA when changing cma_first_limit Frank van der Linden
2025-09-17 15:22 ` Rik van Riel
2025-09-15 19:51 ` [RFC PATCH 12/12] mm/cma: add CMA balance VM event counter Frank van der Linden
2025-09-17 15:22 ` Rik van Riel
2025-09-17 0:50 ` [RFC PATCH 00/12] CMA balancing Roman Gushchin
2025-09-17 22:04 ` Frank van der Linden
2025-09-18 22:12 ` Roman Gushchin
2025-09-25 22:11 ` [RFC PATCH 13/12] mm,cma: add compaction cma balance helper for direct reclaim Rik van Riel
2025-09-25 22:11 ` [RFC PATCH 00/12] mm,cma: call CMA balancing from page reclaim code Rik van Riel
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20250915195153.462039-3-fvdl@google.com \
--to=fvdl@google.com \
--cc=akpm@linux-foundation.org \
--cc=david@redhat.com \
--cc=hannes@cmpxchg.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=muchun.song@linux.dev \
--cc=roman.gushchin@linux.dev \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox