From: Suren Baghdasaryan <surenb@google.com>
To: akpm@linux-foundation.org
Cc: david@redhat.com, lorenzo.stoakes@oracle.com,
Liam.Howlett@oracle.com, vbabka@suse.cz,
alexandru.elisei@arm.com, peterx@redhat.com, sj@kernel.org,
rppt@kernel.org, mhocko@suse.com, corbet@lwn.net,
axboe@kernel.dk, viro@zeniv.linux.org.uk, brauner@kernel.org,
hch@infradead.org, jack@suse.cz, willy@infradead.org,
m.szyprowski@samsung.com, robin.murphy@arm.com,
hannes@cmpxchg.org, zhengqi.arch@bytedance.com,
shakeel.butt@linux.dev, axelrasmussen@google.com,
yuanchu@google.com, weixugc@google.com, minchan@kernel.org,
surenb@google.com, linux-mm@kvack.org,
linux-doc@vger.kernel.org, linux-kernel@vger.kernel.org,
linux-block@vger.kernel.org, linux-fsdevel@vger.kernel.org,
iommu@lists.linux.dev, Minchan Kim <minchan@google.com>
Subject: [PATCH v2 8/8] mm: integrate GCMA with CMA using dt-bindings
Date: Sun, 26 Oct 2025 13:36:11 -0700 [thread overview]
Message-ID: <20251026203611.1608903-9-surenb@google.com> (raw)
In-Reply-To: <20251026203611.1608903-1-surenb@google.com>
Introduce a new "guarantee" property for shared-dma-pool to enable
GCMA-backed memory pools. Memory allocations from such pools will
have low latency and will be guaranteed to succeed as long as there
is contiguous space inside the reservation.
dt-schema for shared-dma-pool [1] will need to be updated once this
patch is accepted.
[1] https://github.com/devicetree-org/dt-schema/blob/main/dtschema/schemas/reserved-memory/shared-dma-pool.yaml
Signed-off-by: Minchan Kim <minchan@google.com>
Signed-off-by: Suren Baghdasaryan <surenb@google.com>
---
include/linux/cma.h | 11 +++++++++--
kernel/dma/contiguous.c | 11 ++++++++++-
mm/Kconfig | 2 +-
mm/cma.c | 37 +++++++++++++++++++++++++++----------
mm/cma.h | 1 +
mm/cma_sysfs.c | 10 ++++++++++
mm/gcma.c | 2 +-
7 files changed, 59 insertions(+), 15 deletions(-)
diff --git a/include/linux/cma.h b/include/linux/cma.h
index 62d9c1cf6326..3ec2e76a8666 100644
--- a/include/linux/cma.h
+++ b/include/linux/cma.h
@@ -43,10 +43,17 @@ static inline int __init cma_declare_contiguous(phys_addr_t base,
extern int __init cma_declare_contiguous_multi(phys_addr_t size,
phys_addr_t align, unsigned int order_per_bit,
const char *name, struct cma **res_cma, int nid);
-extern int cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
+extern int __cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
unsigned int order_per_bit,
const char *name,
- struct cma **res_cma);
+ struct cma **res_cma, bool gcma);
+static inline int cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
+ unsigned int order_per_bit,
+ const char *name,
+ struct cma **res_cma)
+{
+ return __cma_init_reserved_mem(base, size, order_per_bit, name, res_cma, false);
+}
extern struct page *cma_alloc(struct cma *cma, unsigned long count, unsigned int align,
bool no_warn);
extern bool cma_pages_valid(struct cma *cma, const struct page *pages, unsigned long count);
diff --git a/kernel/dma/contiguous.c b/kernel/dma/contiguous.c
index d9b9dcba6ff7..73a699ef0377 100644
--- a/kernel/dma/contiguous.c
+++ b/kernel/dma/contiguous.c
@@ -461,6 +461,7 @@ static int __init rmem_cma_setup(struct reserved_mem *rmem)
unsigned long node = rmem->fdt_node;
bool default_cma = of_get_flat_dt_prop(node, "linux,cma-default", NULL);
struct cma *cma;
+ bool gcma;
int err;
if (size_cmdline != -1 && default_cma) {
@@ -478,7 +479,15 @@ static int __init rmem_cma_setup(struct reserved_mem *rmem)
return -EINVAL;
}
- err = cma_init_reserved_mem(rmem->base, rmem->size, 0, rmem->name, &cma);
+ gcma = !!of_get_flat_dt_prop(node, "guarantee", NULL);
+#ifndef CONFIG_GCMA
+ if (gcma) {
+ pr_err("Reserved memory: unable to setup GCMA region, GCMA is not enabled\n");
+ return -EINVAL;
+ }
+#endif
+ err = __cma_init_reserved_mem(rmem->base, rmem->size, 0, rmem->name,
+ &cma, gcma);
if (err) {
pr_err("Reserved memory: unable to setup CMA region\n");
return err;
diff --git a/mm/Kconfig b/mm/Kconfig
index 3166fde83340..1c8b20d90790 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -1099,7 +1099,7 @@ config CMA_AREAS
config GCMA
bool "GCMA (Guaranteed Contiguous Memory Allocator)"
- depends on CLEANCACHE
+ depends on CLEANCACHE && CMA
help
This enables the Guaranteed Contiguous Memory Allocator to allow
low latency guaranteed contiguous memory allocations. Memory
diff --git a/mm/cma.c b/mm/cma.c
index 813e6dc7b095..71fb494ef2a4 100644
--- a/mm/cma.c
+++ b/mm/cma.c
@@ -28,6 +28,7 @@
#include <linux/highmem.h>
#include <linux/io.h>
#include <linux/kmemleak.h>
+#include <linux/gcma.h>
#include <trace/events/cma.h>
#include "internal.h"
@@ -161,11 +162,18 @@ static void __init cma_activate_area(struct cma *cma)
count = early_pfn[r] - cmr->base_pfn;
bitmap_count = cma_bitmap_pages_to_bits(cma, count);
bitmap_set(cmr->bitmap, 0, bitmap_count);
+ } else {
+ count = 0;
}
- for (pfn = early_pfn[r]; pfn < cmr->base_pfn + cmr->count;
- pfn += pageblock_nr_pages)
- init_cma_reserved_pageblock(pfn_to_page(pfn));
+ if (cma->gcma) {
+ gcma_register_area(cma->name, early_pfn[r],
+ cma->count - count);
+ } else {
+ for (pfn = early_pfn[r]; pfn < cmr->base_pfn + cmr->count;
+ pfn += pageblock_nr_pages)
+ init_cma_reserved_pageblock(pfn_to_page(pfn));
+ }
}
spin_lock_init(&cma->lock);
@@ -252,7 +260,7 @@ static void __init cma_drop_area(struct cma *cma)
}
/**
- * cma_init_reserved_mem() - create custom contiguous area from reserved memory
+ * __cma_init_reserved_mem() - create custom contiguous area from reserved memory
* @base: Base address of the reserved area
* @size: Size of the reserved area (in bytes),
* @order_per_bit: Order of pages represented by one bit on bitmap.
@@ -260,13 +268,14 @@ static void __init cma_drop_area(struct cma *cma)
* the area will be set to "cmaN", where N is a running counter of
* used areas.
* @res_cma: Pointer to store the created cma region.
+ * @gcma: Flag to reserve guaranteed reserved memory area.
*
* This function creates custom contiguous area from already reserved memory.
*/
-int __init cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
- unsigned int order_per_bit,
- const char *name,
- struct cma **res_cma)
+int __init __cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
+ unsigned int order_per_bit,
+ const char *name,
+ struct cma **res_cma, bool gcma)
{
struct cma *cma;
int ret;
@@ -297,6 +306,7 @@ int __init cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
cma->ranges[0].count = cma->count;
cma->nranges = 1;
cma->nid = NUMA_NO_NODE;
+ cma->gcma = gcma;
*res_cma = cma;
@@ -836,7 +846,11 @@ static int cma_range_alloc(struct cma *cma, struct cma_memrange *cmr,
spin_unlock_irq(&cma->lock);
mutex_lock(&cma->alloc_mutex);
- ret = alloc_contig_range(pfn, pfn + count, ACR_FLAGS_CMA, gfp);
+ if (cma->gcma)
+ ret = gcma_alloc_range(pfn, count, gfp);
+ else
+ ret = alloc_contig_range(pfn, pfn + count,
+ ACR_FLAGS_CMA, gfp);
mutex_unlock(&cma->alloc_mutex);
if (!ret)
break;
@@ -1009,7 +1023,10 @@ bool cma_release(struct cma *cma, const struct page *pages,
if (r == cma->nranges)
return false;
- free_contig_range(pfn, count);
+ if (cma->gcma)
+ gcma_free_range(pfn, count);
+ else
+ free_contig_range(pfn, count);
cma_clear_bitmap(cma, cmr, pfn, count);
cma_sysfs_account_release_pages(cma, count);
trace_cma_release(cma->name, pfn, pages, count);
diff --git a/mm/cma.h b/mm/cma.h
index c70180c36559..3b09e8619082 100644
--- a/mm/cma.h
+++ b/mm/cma.h
@@ -49,6 +49,7 @@ struct cma {
char name[CMA_MAX_NAME];
int nranges;
struct cma_memrange ranges[CMA_MAX_RANGES];
+ bool gcma;
#ifdef CONFIG_CMA_SYSFS
/* the number of CMA page successful allocations */
atomic64_t nr_pages_succeeded;
diff --git a/mm/cma_sysfs.c b/mm/cma_sysfs.c
index 97acd3e5a6a5..4ecc36270a4d 100644
--- a/mm/cma_sysfs.c
+++ b/mm/cma_sysfs.c
@@ -80,6 +80,15 @@ static ssize_t available_pages_show(struct kobject *kobj,
}
CMA_ATTR_RO(available_pages);
+static ssize_t gcma_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ struct cma *cma = cma_from_kobj(kobj);
+
+ return sysfs_emit(buf, "%d\n", cma->gcma);
+}
+CMA_ATTR_RO(gcma);
+
static void cma_kobj_release(struct kobject *kobj)
{
struct cma *cma = cma_from_kobj(kobj);
@@ -95,6 +104,7 @@ static struct attribute *cma_attrs[] = {
&release_pages_success_attr.attr,
&total_pages_attr.attr,
&available_pages_attr.attr,
+ &gcma_attr.attr,
NULL,
};
ATTRIBUTE_GROUPS(cma);
diff --git a/mm/gcma.c b/mm/gcma.c
index b86f82b8fe9d..fcf1d3c0283f 100644
--- a/mm/gcma.c
+++ b/mm/gcma.c
@@ -119,7 +119,7 @@ int gcma_register_area(const char *name,
folio_set_count(folio, 0);
list_add(&folio->lru, &folios);
}
-
+ folio_zone(pfn_folio(start_pfn))->cma_pages += count;
cleancache_backend_put_folios(pool_id, &folios);
spin_lock(&gcma_area_lock);
--
2.51.1.851.g4ebd6896fd-goog
next prev parent reply other threads:[~2025-10-26 20:36 UTC|newest]
Thread overview: 15+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-10-26 20:36 [PATCH v2 0/8] Guaranteed CMA Suren Baghdasaryan
2025-10-26 20:36 ` [PATCH v2 1/8] mm: implement cleancache Suren Baghdasaryan
2025-10-26 20:36 ` [PATCH v2 2/8] mm/cleancache: add cleancache LRU for folio aging Suren Baghdasaryan
2025-10-26 20:36 ` [PATCH v2 3/8] mm/cleancache: readahead support Suren Baghdasaryan
2025-10-26 20:36 ` [PATCH v2 4/8] mm/cleancache: add sysfs interface Suren Baghdasaryan
2025-10-26 20:36 ` [PATCH v2 5/8] mm/tests: add cleancache kunit test Suren Baghdasaryan
2025-10-26 20:36 ` [PATCH v2 6/8] add cleancache documentation Suren Baghdasaryan
2025-10-26 20:36 ` [PATCH v2 7/8] mm: introduce GCMA Suren Baghdasaryan
2025-10-26 20:36 ` Suren Baghdasaryan [this message]
2025-10-27 6:54 ` [PATCH v2 0/8] Guaranteed CMA Christoph Hellwig
2025-10-27 19:51 ` Suren Baghdasaryan
2025-10-29 9:23 ` Christoph Hellwig
2025-10-29 14:57 ` Suren Baghdasaryan
2025-10-29 15:01 ` Suren Baghdasaryan
2025-10-30 14:04 ` Christoph Hellwig
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20251026203611.1608903-9-surenb@google.com \
--to=surenb@google.com \
--cc=Liam.Howlett@oracle.com \
--cc=akpm@linux-foundation.org \
--cc=alexandru.elisei@arm.com \
--cc=axboe@kernel.dk \
--cc=axelrasmussen@google.com \
--cc=brauner@kernel.org \
--cc=corbet@lwn.net \
--cc=david@redhat.com \
--cc=hannes@cmpxchg.org \
--cc=hch@infradead.org \
--cc=iommu@lists.linux.dev \
--cc=jack@suse.cz \
--cc=linux-block@vger.kernel.org \
--cc=linux-doc@vger.kernel.org \
--cc=linux-fsdevel@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=lorenzo.stoakes@oracle.com \
--cc=m.szyprowski@samsung.com \
--cc=mhocko@suse.com \
--cc=minchan@google.com \
--cc=minchan@kernel.org \
--cc=peterx@redhat.com \
--cc=robin.murphy@arm.com \
--cc=rppt@kernel.org \
--cc=shakeel.butt@linux.dev \
--cc=sj@kernel.org \
--cc=vbabka@suse.cz \
--cc=viro@zeniv.linux.org.uk \
--cc=weixugc@google.com \
--cc=willy@infradead.org \
--cc=yuanchu@google.com \
--cc=zhengqi.arch@bytedance.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox