* [PATCH 1/2] mm/damon: introduce damon_rand_fast() for per-ctx PRNG
@ 2026-04-23 12:23 Jiayuan Chen
2026-04-23 12:23 ` [PATCH 2/2] mm/damon/paddr: prefetch struct page of the next region Jiayuan Chen
0 siblings, 1 reply; 2+ messages in thread
From: Jiayuan Chen @ 2026-04-23 12:23 UTC (permalink / raw)
To: damon
Cc: Jiayuan Chen, Jiayuan Chen, SeongJae Park, Andrew Morton,
linux-mm, linux-kernel
From: Jiayuan Chen <jiayuan.chen@shopee.com>
damon_rand() on the sampling_addr hot path calls get_random_u32_below(),
which takes a local_lock_irqsave() around a per-CPU batched entropy pool
and periodically refills it with ChaCha20. On workloads with large
nr_regions (20k+), this shows up as a large fraction of kdamond CPU
time: the lock_acquire / local_lock pair plus __get_random_u32_below()
dominate perf profiles.
Introduce damon_rand_fast(), which uses a lockless lfsr113 generator
(struct rnd_state) held per damon_ctx and seeded from get_random_u64()
in damon_new_ctx(). kdamond is the sole consumer of a given ctx, so no
synchronization is required. Range mapping uses Lemire's
(u64)rnd * span >> 32 to avoid a 64-bit division; residual bias is
bounded by span / 2^32, negligible for statistical sampling.
The new helper is intended for the sampling-address hot path only.
damon_rand() is kept for call sites that run outside the kdamond loop
and/or have no ctx available (damon_split_regions_of(), kunit tests).
Convert the two hot callers:
- __damon_pa_prepare_access_check()
- __damon_va_prepare_access_check()
lfsr113 is a linear PRNG and MUST NOT be used for anything
security-sensitive. DAMON's sampling_addr is not exposed to userspace
and is only consumed as a probe point for PTE accessed-bit sampling, so
a non-cryptographic PRNG is appropriate here.
Tested with paddr monitoring and max_nr_regions=20000: kdamond CPU
usage reduced from ~72% to ~50% of one core.
Cc: Jiayuan Chen <jiayuan.chen@linux.dev>
Signed-off-by: Jiayuan Chen <jiayuan.chen@shopee.com>
---
include/linux/damon.h | 28 ++++++++++++++++++++++++++++
mm/damon/core.c | 2 ++
mm/damon/paddr.c | 10 +++++-----
mm/damon/vaddr.c | 9 +++++----
4 files changed, 40 insertions(+), 9 deletions(-)
diff --git a/include/linux/damon.h b/include/linux/damon.h
index f2cdb7c3f5e6..0afdc08119c8 100644
--- a/include/linux/damon.h
+++ b/include/linux/damon.h
@@ -10,6 +10,7 @@
#include <linux/memcontrol.h>
#include <linux/mutex.h>
+#include <linux/prandom.h>
#include <linux/time64.h>
#include <linux/types.h>
#include <linux/random.h>
@@ -843,8 +844,35 @@ struct damon_ctx {
struct list_head adaptive_targets;
struct list_head schemes;
+
+ /*
+ * Per-ctx lockless PRNG state for damon_rand_fast(). Seeded from
+ * get_random_u64() in damon_new_ctx(). Owned exclusively by the
+ * kdamond thread of this ctx, so no locking is required.
+ */
+ struct rnd_state rnd_state;
};
+/*
+ * damon_rand_fast - per-ctx PRNG variant of damon_rand() for hot paths.
+ *
+ * Uses the lockless lfsr113 state kept in @ctx->rnd_state. Safe because
+ * kdamond is the single consumer of a given ctx, so no synchronization
+ * is required. Quality is sufficient for statistical sampling; do NOT
+ * use for any security-sensitive randomness.
+ *
+ * Range mapping uses Lemire's (u64)rnd * span >> 32 to avoid a division;
+ * bias is bounded by span / 2^32, negligible for DAMON.
+ */
+static inline unsigned long damon_rand_fast(struct damon_ctx *ctx,
+ unsigned long l, unsigned long r)
+{
+ u32 rnd = prandom_u32_state(&ctx->rnd_state);
+ u32 span = (u32)(r - l);
+
+ return l + (unsigned long)(((u64)rnd * span) >> 32);
+}
+
static inline struct damon_region *damon_next_region(struct damon_region *r)
{
return container_of(r->list.next, struct damon_region, list);
diff --git a/mm/damon/core.c b/mm/damon/core.c
index 3dbbbfdeff71..c3779c674601 100644
--- a/mm/damon/core.c
+++ b/mm/damon/core.c
@@ -607,6 +607,8 @@ struct damon_ctx *damon_new_ctx(void)
INIT_LIST_HEAD(&ctx->adaptive_targets);
INIT_LIST_HEAD(&ctx->schemes);
+ prandom_seed_state(&ctx->rnd_state, get_random_u64());
+
return ctx;
}
diff --git a/mm/damon/paddr.c b/mm/damon/paddr.c
index 5cdcc5037cbc..b5e1197f2ba2 100644
--- a/mm/damon/paddr.c
+++ b/mm/damon/paddr.c
@@ -48,12 +48,12 @@ static void damon_pa_mkold(phys_addr_t paddr)
folio_put(folio);
}
-static void __damon_pa_prepare_access_check(struct damon_region *r,
- unsigned long addr_unit)
+static void __damon_pa_prepare_access_check(struct damon_ctx *ctx,
+ struct damon_region *r)
{
- r->sampling_addr = damon_rand(r->ar.start, r->ar.end);
+ r->sampling_addr = damon_rand_fast(ctx, r->ar.start, r->ar.end);
- damon_pa_mkold(damon_pa_phys_addr(r->sampling_addr, addr_unit));
+ damon_pa_mkold(damon_pa_phys_addr(r->sampling_addr, ctx->addr_unit));
}
static void damon_pa_prepare_access_checks(struct damon_ctx *ctx)
@@ -63,7 +63,7 @@ static void damon_pa_prepare_access_checks(struct damon_ctx *ctx)
damon_for_each_target(t, ctx) {
damon_for_each_region(r, t)
- __damon_pa_prepare_access_check(r, ctx->addr_unit);
+ __damon_pa_prepare_access_check(ctx, r);
}
}
diff --git a/mm/damon/vaddr.c b/mm/damon/vaddr.c
index b069dbc7e3d2..6cf06ffdf880 100644
--- a/mm/damon/vaddr.c
+++ b/mm/damon/vaddr.c
@@ -332,10 +332,11 @@ static void damon_va_mkold(struct mm_struct *mm, unsigned long addr)
* Functions for the access checking of the regions
*/
-static void __damon_va_prepare_access_check(struct mm_struct *mm,
- struct damon_region *r)
+static void __damon_va_prepare_access_check(struct damon_ctx *ctx,
+ struct mm_struct *mm,
+ struct damon_region *r)
{
- r->sampling_addr = damon_rand(r->ar.start, r->ar.end);
+ r->sampling_addr = damon_rand_fast(ctx, r->ar.start, r->ar.end);
damon_va_mkold(mm, r->sampling_addr);
}
@@ -351,7 +352,7 @@ static void damon_va_prepare_access_checks(struct damon_ctx *ctx)
if (!mm)
continue;
damon_for_each_region(r, t)
- __damon_va_prepare_access_check(mm, r);
+ __damon_va_prepare_access_check(ctx, mm, r);
mmput(mm);
}
}
--
2.43.0
^ permalink raw reply [flat|nested] 2+ messages in thread
* [PATCH 2/2] mm/damon/paddr: prefetch struct page of the next region
2026-04-23 12:23 [PATCH 1/2] mm/damon: introduce damon_rand_fast() for per-ctx PRNG Jiayuan Chen
@ 2026-04-23 12:23 ` Jiayuan Chen
0 siblings, 0 replies; 2+ messages in thread
From: Jiayuan Chen @ 2026-04-23 12:23 UTC (permalink / raw)
To: damon
Cc: Jiayuan Chen, Jiayuan Chen, SeongJae Park, Andrew Morton,
linux-mm, linux-kernel
From: Jiayuan Chen <jiayuan.chen@shopee.com>
In paddr mode with large nr_regions (20k+), damon_get_folio() dominates
kdamond CPU time. perf annotate shows ~98% of its samples on a single
load: reading page->compound_head for page_folio(). DAMON samples a
random PFN per region, so the access pattern scatters across the
vmemmap with no locality that hardware prefetchers can exploit; every
compound_head load misses to DRAM (~250 cycles).
Issue a software prefetch for the next region's struct page one
iteration before it is needed. In __damon_pa_prepare_access_check(),
the next region's sampling_addr is picked eagerly (one iteration ahead
of where its mkold will run) so the prefetched line is usable, and the
first region of each epoch is handled with a one-off branch since it
has no predecessor to have pre-picked its addr. damon_pa_check_accesses()
just prefetches based on sampling_addr already populated by the
preceding prepare pass.
Two details worth calling out:
- prefetchw() rather than prefetch(): compound_head (+0x8) and
_refcount (+0x34) share a 64B cacheline. The subsequent
folio_try_get() / folio_put() atomics write _refcount, so bringing
the line in exclusive state avoids a later S->M coherence upgrade.
- pfn_to_page() rather than pfn_to_online_page(): on
CONFIG_SPARSEMEM_VMEMMAP the former is pure arithmetic
(vmemmap + pfn), while the latter walks the mem_section table. The
mem_section lookup itself incurs a DRAM miss for random PFNs, which
would serialize what is supposed to be a non-blocking hint - an
earlier attempt that used pfn_to_online_page() saw the prefetch
path's internal stall dominate perf (~91% skid on the converge
point after the call). Prefetching an unmapped vmemmap entry is
safe: the hint is dropped without faulting.
Concurrency: list_next_entry() and &list == &head comparisons are safe
without locking because kdamond is the sole mutator of the region list
of its ctx; external threads must go through damon_call() which defers
execution to the same kdamond thread between sampling iterations
(see documentation on struct damon_ctx and damon_call()).
Tested with paddr monitoring, max_nr_regions=20000, and stress-ng-vm
consuming ~90% of memory across 8 workers: kdamond CPU further reduced
from ~50% to ~40% of one core on top of the earlier damon_rand_fast()
change.
Cc: Jiayuan Chen <jiayuan.chen@linux.dev>
Signed-off-by: Jiayuan Chen <jiayuan.chen@shopee.com>
---
mm/damon/paddr.c | 31 ++++++++++++++++++++++++++++---
1 file changed, 28 insertions(+), 3 deletions(-)
diff --git a/mm/damon/paddr.c b/mm/damon/paddr.c
index b5e1197f2ba2..99bdf2b88cf1 100644
--- a/mm/damon/paddr.c
+++ b/mm/damon/paddr.c
@@ -10,6 +10,7 @@
#include <linux/mmu_notifier.h>
#include <linux/page_idle.h>
#include <linux/pagemap.h>
+#include <linux/prefetch.h>
#include <linux/rmap.h>
#include <linux/swap.h>
#include <linux/memory-tiers.h>
@@ -48,10 +49,29 @@ static void damon_pa_mkold(phys_addr_t paddr)
folio_put(folio);
}
+static void damon_pa_prefetch_page(unsigned long sampling_addr,
+ unsigned long addr_unit)
+{
+ phys_addr_t paddr = damon_pa_phys_addr(sampling_addr, addr_unit);
+
+ prefetchw(pfn_to_page(PHYS_PFN(paddr)));
+}
+
static void __damon_pa_prepare_access_check(struct damon_ctx *ctx,
+ struct damon_target *t,
struct damon_region *r)
{
- r->sampling_addr = damon_rand_fast(ctx, r->ar.start, r->ar.end);
+ struct damon_region *next = list_next_entry(r, list);
+
+ /* First region has no predecessor to have pre-picked its addr. */
+ if (r->list.prev == &t->regions_list)
+ r->sampling_addr = damon_rand_fast(ctx, r->ar.start, r->ar.end);
+
+ if (&next->list != &t->regions_list) {
+ next->sampling_addr = damon_rand_fast(ctx, next->ar.start,
+ next->ar.end);
+ damon_pa_prefetch_page(next->sampling_addr, ctx->addr_unit);
+ }
damon_pa_mkold(damon_pa_phys_addr(r->sampling_addr, ctx->addr_unit));
}
@@ -63,7 +83,7 @@ static void damon_pa_prepare_access_checks(struct damon_ctx *ctx)
damon_for_each_target(t, ctx) {
damon_for_each_region(r, t)
- __damon_pa_prepare_access_check(ctx, r);
+ __damon_pa_prepare_access_check(ctx, t, r);
}
}
@@ -106,11 +126,16 @@ static void __damon_pa_check_access(struct damon_region *r,
static unsigned int damon_pa_check_accesses(struct damon_ctx *ctx)
{
struct damon_target *t;
- struct damon_region *r;
+ struct damon_region *r, *next;
unsigned int max_nr_accesses = 0;
damon_for_each_target(t, ctx) {
damon_for_each_region(r, t) {
+ next = list_next_entry(r, list);
+ if (&next->list != &t->regions_list)
+ damon_pa_prefetch_page(next->sampling_addr,
+ ctx->addr_unit);
+
__damon_pa_check_access(
r, &ctx->attrs, ctx->addr_unit);
max_nr_accesses = max(r->nr_accesses, max_nr_accesses);
--
2.43.0
^ permalink raw reply [flat|nested] 2+ messages in thread
end of thread, other threads:[~2026-04-23 12:24 UTC | newest]
Thread overview: 2+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2026-04-23 12:23 [PATCH 1/2] mm/damon: introduce damon_rand_fast() for per-ctx PRNG Jiayuan Chen
2026-04-23 12:23 ` [PATCH 2/2] mm/damon/paddr: prefetch struct page of the next region Jiayuan Chen
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox