* [PATCH 7/8] mm/damon: rename DAMON_MIN_REGION to DAMON_MIN_REGION_SZ
2026-01-17 17:52 [PATCH 0/8] mm/damon: cleanup kdamond, damon_call(), damos filter and DAMON_MIN_REGION SeongJae Park
` (5 preceding siblings ...)
2026-01-17 17:52 ` [PATCH 6/8] mm/damon/core: rename damos_filter_out() to damos_core_filter_out() SeongJae Park
@ 2026-01-17 17:52 ` SeongJae Park
2026-01-17 17:52 ` [PATCH 8/8] mm/damon: rename min_sz_region of damon_ctx to min_region_sz SeongJae Park
7 siblings, 0 replies; 9+ messages in thread
From: SeongJae Park @ 2026-01-17 17:52 UTC (permalink / raw)
To: Andrew Morton; +Cc: SeongJae Park, damon, linux-kernel, linux-mm
The macro is for the default minimum size of each DAMON region. There
was a case that a reader was confused if it is the minimum number of
total DAMON regions, which is set on damon_attrs->min_nr_regions. Make
the name more explicit.
Signed-off-by: SeongJae Park <sj@kernel.org>
---
include/linux/damon.h | 2 +-
mm/damon/core.c | 2 +-
mm/damon/lru_sort.c | 2 +-
mm/damon/reclaim.c | 2 +-
mm/damon/sysfs.c | 2 +-
mm/damon/tests/vaddr-kunit.h | 2 +-
mm/damon/vaddr.c | 24 ++++++++++++------------
7 files changed, 18 insertions(+), 18 deletions(-)
diff --git a/include/linux/damon.h b/include/linux/damon.h
index bdca28e15e40..5bf8db1d78fe 100644
--- a/include/linux/damon.h
+++ b/include/linux/damon.h
@@ -15,7 +15,7 @@
#include <linux/random.h>
/* Minimal region size. Every damon_region is aligned by this. */
-#define DAMON_MIN_REGION PAGE_SIZE
+#define DAMON_MIN_REGION_SZ PAGE_SIZE
/* Max priority score for DAMON-based operation schemes */
#define DAMOS_MAX_SCORE (99)
diff --git a/mm/damon/core.c b/mm/damon/core.c
index ae5b772ceffb..5508bc794172 100644
--- a/mm/damon/core.c
+++ b/mm/damon/core.c
@@ -553,7 +553,7 @@ struct damon_ctx *damon_new_ctx(void)
ctx->attrs.max_nr_regions = 1000;
ctx->addr_unit = 1;
- ctx->min_sz_region = DAMON_MIN_REGION;
+ ctx->min_sz_region = DAMON_MIN_REGION_SZ;
INIT_LIST_HEAD(&ctx->adaptive_targets);
INIT_LIST_HEAD(&ctx->schemes);
diff --git a/mm/damon/lru_sort.c b/mm/damon/lru_sort.c
index bedb9134d286..9dde096a9064 100644
--- a/mm/damon/lru_sort.c
+++ b/mm/damon/lru_sort.c
@@ -298,7 +298,7 @@ static int damon_lru_sort_apply_parameters(void)
if (!monitor_region_start && !monitor_region_end)
addr_unit = 1;
param_ctx->addr_unit = addr_unit;
- param_ctx->min_sz_region = max(DAMON_MIN_REGION / addr_unit, 1);
+ param_ctx->min_sz_region = max(DAMON_MIN_REGION_SZ / addr_unit, 1);
if (!damon_lru_sort_mon_attrs.sample_interval) {
err = -EINVAL;
diff --git a/mm/damon/reclaim.c b/mm/damon/reclaim.c
index 55df43e241c5..c343622a2f52 100644
--- a/mm/damon/reclaim.c
+++ b/mm/damon/reclaim.c
@@ -208,7 +208,7 @@ static int damon_reclaim_apply_parameters(void)
if (!monitor_region_start && !monitor_region_end)
addr_unit = 1;
param_ctx->addr_unit = addr_unit;
- param_ctx->min_sz_region = max(DAMON_MIN_REGION / addr_unit, 1);
+ param_ctx->min_sz_region = max(DAMON_MIN_REGION_SZ / addr_unit, 1);
if (!damon_reclaim_mon_attrs.aggr_interval) {
err = -EINVAL;
diff --git a/mm/damon/sysfs.c b/mm/damon/sysfs.c
index 4de25708b05a..57d36d60f329 100644
--- a/mm/damon/sysfs.c
+++ b/mm/damon/sysfs.c
@@ -1470,7 +1470,7 @@ static int damon_sysfs_apply_inputs(struct damon_ctx *ctx,
/* addr_unit is respected by only DAMON_OPS_PADDR */
if (sys_ctx->ops_id == DAMON_OPS_PADDR)
ctx->min_sz_region = max(
- DAMON_MIN_REGION / sys_ctx->addr_unit, 1);
+ DAMON_MIN_REGION_SZ / sys_ctx->addr_unit, 1);
err = damon_sysfs_set_attrs(ctx, sys_ctx->attrs);
if (err)
return err;
diff --git a/mm/damon/tests/vaddr-kunit.h b/mm/damon/tests/vaddr-kunit.h
index 30dc5459f1d2..cfae870178bf 100644
--- a/mm/damon/tests/vaddr-kunit.h
+++ b/mm/damon/tests/vaddr-kunit.h
@@ -147,7 +147,7 @@ static void damon_do_test_apply_three_regions(struct kunit *test,
damon_add_region(r, t);
}
- damon_set_regions(t, three_regions, 3, DAMON_MIN_REGION);
+ damon_set_regions(t, three_regions, 3, DAMON_MIN_REGION_SZ);
for (i = 0; i < nr_expected / 2; i++) {
r = __nth_region_of(t, i);
diff --git a/mm/damon/vaddr.c b/mm/damon/vaddr.c
index 40c73adf1946..83ab3d8c3792 100644
--- a/mm/damon/vaddr.c
+++ b/mm/damon/vaddr.c
@@ -19,8 +19,8 @@
#include "ops-common.h"
#ifdef CONFIG_DAMON_VADDR_KUNIT_TEST
-#undef DAMON_MIN_REGION
-#define DAMON_MIN_REGION 1
+#undef DAMON_MIN_REGION_SZ
+#define DAMON_MIN_REGION_SZ 1
#endif
/*
@@ -78,7 +78,7 @@ static int damon_va_evenly_split_region(struct damon_target *t,
orig_end = r->ar.end;
sz_orig = damon_sz_region(r);
- sz_piece = ALIGN_DOWN(sz_orig / nr_pieces, DAMON_MIN_REGION);
+ sz_piece = ALIGN_DOWN(sz_orig / nr_pieces, DAMON_MIN_REGION_SZ);
if (!sz_piece)
return -EINVAL;
@@ -161,12 +161,12 @@ static int __damon_va_three_regions(struct mm_struct *mm,
swap(first_gap, second_gap);
/* Store the result */
- regions[0].start = ALIGN(start, DAMON_MIN_REGION);
- regions[0].end = ALIGN(first_gap.start, DAMON_MIN_REGION);
- regions[1].start = ALIGN(first_gap.end, DAMON_MIN_REGION);
- regions[1].end = ALIGN(second_gap.start, DAMON_MIN_REGION);
- regions[2].start = ALIGN(second_gap.end, DAMON_MIN_REGION);
- regions[2].end = ALIGN(prev->vm_end, DAMON_MIN_REGION);
+ regions[0].start = ALIGN(start, DAMON_MIN_REGION_SZ);
+ regions[0].end = ALIGN(first_gap.start, DAMON_MIN_REGION_SZ);
+ regions[1].start = ALIGN(first_gap.end, DAMON_MIN_REGION_SZ);
+ regions[1].end = ALIGN(second_gap.start, DAMON_MIN_REGION_SZ);
+ regions[2].start = ALIGN(second_gap.end, DAMON_MIN_REGION_SZ);
+ regions[2].end = ALIGN(prev->vm_end, DAMON_MIN_REGION_SZ);
return 0;
}
@@ -259,8 +259,8 @@ static void __damon_va_init_regions(struct damon_ctx *ctx,
sz += regions[i].end - regions[i].start;
if (ctx->attrs.min_nr_regions)
sz /= ctx->attrs.min_nr_regions;
- if (sz < DAMON_MIN_REGION)
- sz = DAMON_MIN_REGION;
+ if (sz < DAMON_MIN_REGION_SZ)
+ sz = DAMON_MIN_REGION_SZ;
/* Set the initial three regions of the target */
for (i = 0; i < 3; i++) {
@@ -299,7 +299,7 @@ static void damon_va_update(struct damon_ctx *ctx)
damon_for_each_target(t, ctx) {
if (damon_va_three_regions(t, three_regions))
continue;
- damon_set_regions(t, three_regions, 3, DAMON_MIN_REGION);
+ damon_set_regions(t, three_regions, 3, DAMON_MIN_REGION_SZ);
}
}
--
2.47.3
^ permalink raw reply [flat|nested] 9+ messages in thread* [PATCH 8/8] mm/damon: rename min_sz_region of damon_ctx to min_region_sz
2026-01-17 17:52 [PATCH 0/8] mm/damon: cleanup kdamond, damon_call(), damos filter and DAMON_MIN_REGION SeongJae Park
` (6 preceding siblings ...)
2026-01-17 17:52 ` [PATCH 7/8] mm/damon: rename DAMON_MIN_REGION to DAMON_MIN_REGION_SZ SeongJae Park
@ 2026-01-17 17:52 ` SeongJae Park
7 siblings, 0 replies; 9+ messages in thread
From: SeongJae Park @ 2026-01-17 17:52 UTC (permalink / raw)
To: Andrew Morton; +Cc: SeongJae Park, damon, linux-kernel, linux-mm
'min_sz_region' field of 'struct damon_ctx' represents the minimum size
of each DAMON region for the context. 'struct damos_access_pattern' has
a field of the same name. It confuses readers and makes 'grep' less
optimal for them. Rename it to 'min_region_sz'.
Signed-off-by: SeongJae Park <sj@kernel.org>
---
include/linux/damon.h | 8 ++---
mm/damon/core.c | 69 ++++++++++++++++++++++---------------------
mm/damon/lru_sort.c | 4 +--
mm/damon/reclaim.c | 4 +--
mm/damon/stat.c | 2 +-
mm/damon/sysfs.c | 9 +++---
6 files changed, 49 insertions(+), 47 deletions(-)
diff --git a/include/linux/damon.h b/include/linux/damon.h
index 5bf8db1d78fe..a4fea23da857 100644
--- a/include/linux/damon.h
+++ b/include/linux/damon.h
@@ -773,7 +773,7 @@ struct damon_attrs {
*
* @ops: Set of monitoring operations for given use cases.
* @addr_unit: Scale factor for core to ops address conversion.
- * @min_sz_region: Minimum region size.
+ * @min_region_sz: Minimum region size.
* @adaptive_targets: Head of monitoring targets (&damon_target) list.
* @schemes: Head of schemes (&damos) list.
*/
@@ -818,7 +818,7 @@ struct damon_ctx {
/* public: */
struct damon_operations ops;
unsigned long addr_unit;
- unsigned long min_sz_region;
+ unsigned long min_region_sz;
struct list_head adaptive_targets;
struct list_head schemes;
@@ -907,7 +907,7 @@ static inline void damon_insert_region(struct damon_region *r,
void damon_add_region(struct damon_region *r, struct damon_target *t);
void damon_destroy_region(struct damon_region *r, struct damon_target *t);
int damon_set_regions(struct damon_target *t, struct damon_addr_range *ranges,
- unsigned int nr_ranges, unsigned long min_sz_region);
+ unsigned int nr_ranges, unsigned long min_region_sz);
void damon_update_region_access_rate(struct damon_region *r, bool accessed,
struct damon_attrs *attrs);
@@ -975,7 +975,7 @@ int damos_walk(struct damon_ctx *ctx, struct damos_walk_control *control);
int damon_set_region_biggest_system_ram_default(struct damon_target *t,
unsigned long *start, unsigned long *end,
- unsigned long min_sz_region);
+ unsigned long min_region_sz);
#endif /* CONFIG_DAMON */
diff --git a/mm/damon/core.c b/mm/damon/core.c
index 5508bc794172..70efbf22a2b4 100644
--- a/mm/damon/core.c
+++ b/mm/damon/core.c
@@ -203,7 +203,7 @@ static int damon_fill_regions_holes(struct damon_region *first,
* @t: the given target.
* @ranges: array of new monitoring target ranges.
* @nr_ranges: length of @ranges.
- * @min_sz_region: minimum region size.
+ * @min_region_sz: minimum region size.
*
* This function adds new regions to, or modify existing regions of a
* monitoring target to fit in specific ranges.
@@ -211,7 +211,7 @@ static int damon_fill_regions_holes(struct damon_region *first,
* Return: 0 if success, or negative error code otherwise.
*/
int damon_set_regions(struct damon_target *t, struct damon_addr_range *ranges,
- unsigned int nr_ranges, unsigned long min_sz_region)
+ unsigned int nr_ranges, unsigned long min_region_sz)
{
struct damon_region *r, *next;
unsigned int i;
@@ -248,16 +248,16 @@ int damon_set_regions(struct damon_target *t, struct damon_addr_range *ranges,
/* no region intersects with this range */
newr = damon_new_region(
ALIGN_DOWN(range->start,
- min_sz_region),
- ALIGN(range->end, min_sz_region));
+ min_region_sz),
+ ALIGN(range->end, min_region_sz));
if (!newr)
return -ENOMEM;
damon_insert_region(newr, damon_prev_region(r), r, t);
} else {
/* resize intersecting regions to fit in this range */
first->ar.start = ALIGN_DOWN(range->start,
- min_sz_region);
- last->ar.end = ALIGN(range->end, min_sz_region);
+ min_region_sz);
+ last->ar.end = ALIGN(range->end, min_region_sz);
/* fill possible holes in the range */
err = damon_fill_regions_holes(first, last, t);
@@ -553,7 +553,7 @@ struct damon_ctx *damon_new_ctx(void)
ctx->attrs.max_nr_regions = 1000;
ctx->addr_unit = 1;
- ctx->min_sz_region = DAMON_MIN_REGION_SZ;
+ ctx->min_region_sz = DAMON_MIN_REGION_SZ;
INIT_LIST_HEAD(&ctx->adaptive_targets);
INIT_LIST_HEAD(&ctx->schemes);
@@ -1142,7 +1142,7 @@ static struct damon_target *damon_nth_target(int n, struct damon_ctx *ctx)
* If @src has no region, @dst keeps current regions.
*/
static int damon_commit_target_regions(struct damon_target *dst,
- struct damon_target *src, unsigned long src_min_sz_region)
+ struct damon_target *src, unsigned long src_min_region_sz)
{
struct damon_region *src_region;
struct damon_addr_range *ranges;
@@ -1159,7 +1159,7 @@ static int damon_commit_target_regions(struct damon_target *dst,
i = 0;
damon_for_each_region(src_region, src)
ranges[i++] = src_region->ar;
- err = damon_set_regions(dst, ranges, i, src_min_sz_region);
+ err = damon_set_regions(dst, ranges, i, src_min_region_sz);
kfree(ranges);
return err;
}
@@ -1167,11 +1167,11 @@ static int damon_commit_target_regions(struct damon_target *dst,
static int damon_commit_target(
struct damon_target *dst, bool dst_has_pid,
struct damon_target *src, bool src_has_pid,
- unsigned long src_min_sz_region)
+ unsigned long src_min_region_sz)
{
int err;
- err = damon_commit_target_regions(dst, src, src_min_sz_region);
+ err = damon_commit_target_regions(dst, src, src_min_region_sz);
if (err)
return err;
if (dst_has_pid)
@@ -1198,7 +1198,7 @@ static int damon_commit_targets(
err = damon_commit_target(
dst_target, damon_target_has_pid(dst),
src_target, damon_target_has_pid(src),
- src->min_sz_region);
+ src->min_region_sz);
if (err)
return err;
} else {
@@ -1225,7 +1225,7 @@ static int damon_commit_targets(
return -ENOMEM;
err = damon_commit_target(new_target, false,
src_target, damon_target_has_pid(src),
- src->min_sz_region);
+ src->min_region_sz);
if (err) {
damon_destroy_target(new_target, NULL);
return err;
@@ -1272,7 +1272,7 @@ int damon_commit_ctx(struct damon_ctx *dst, struct damon_ctx *src)
}
dst->ops = src->ops;
dst->addr_unit = src->addr_unit;
- dst->min_sz_region = src->min_sz_region;
+ dst->min_region_sz = src->min_region_sz;
return 0;
}
@@ -1305,8 +1305,8 @@ static unsigned long damon_region_sz_limit(struct damon_ctx *ctx)
if (ctx->attrs.min_nr_regions)
sz /= ctx->attrs.min_nr_regions;
- if (sz < ctx->min_sz_region)
- sz = ctx->min_sz_region;
+ if (sz < ctx->min_region_sz)
+ sz = ctx->min_region_sz;
return sz;
}
@@ -1696,7 +1696,7 @@ static bool damos_valid_target(struct damon_ctx *c, struct damon_target *t,
* @t: The target of the region.
* @rp: The pointer to the region.
* @s: The scheme to be applied.
- * @min_sz_region: minimum region size.
+ * @min_region_sz: minimum region size.
*
* If a quota of a scheme has exceeded in a quota charge window, the scheme's
* action would applied to only a part of the target access pattern fulfilling
@@ -1714,7 +1714,8 @@ static bool damos_valid_target(struct damon_ctx *c, struct damon_target *t,
* Return: true if the region should be entirely skipped, false otherwise.
*/
static bool damos_skip_charged_region(struct damon_target *t,
- struct damon_region **rp, struct damos *s, unsigned long min_sz_region)
+ struct damon_region **rp, struct damos *s,
+ unsigned long min_region_sz)
{
struct damon_region *r = *rp;
struct damos_quota *quota = &s->quota;
@@ -1736,11 +1737,11 @@ static bool damos_skip_charged_region(struct damon_target *t,
if (quota->charge_addr_from && r->ar.start <
quota->charge_addr_from) {
sz_to_skip = ALIGN_DOWN(quota->charge_addr_from -
- r->ar.start, min_sz_region);
+ r->ar.start, min_region_sz);
if (!sz_to_skip) {
- if (damon_sz_region(r) <= min_sz_region)
+ if (damon_sz_region(r) <= min_region_sz)
return true;
- sz_to_skip = min_sz_region;
+ sz_to_skip = min_region_sz;
}
damon_split_region_at(t, r, sz_to_skip);
r = damon_next_region(r);
@@ -1766,7 +1767,7 @@ static void damos_update_stat(struct damos *s,
static bool damos_filter_match(struct damon_ctx *ctx, struct damon_target *t,
struct damon_region *r, struct damos_filter *filter,
- unsigned long min_sz_region)
+ unsigned long min_region_sz)
{
bool matched = false;
struct damon_target *ti;
@@ -1783,8 +1784,8 @@ static bool damos_filter_match(struct damon_ctx *ctx, struct damon_target *t,
matched = target_idx == filter->target_idx;
break;
case DAMOS_FILTER_TYPE_ADDR:
- start = ALIGN_DOWN(filter->addr_range.start, min_sz_region);
- end = ALIGN_DOWN(filter->addr_range.end, min_sz_region);
+ start = ALIGN_DOWN(filter->addr_range.start, min_region_sz);
+ end = ALIGN_DOWN(filter->addr_range.end, min_region_sz);
/* inside the range */
if (start <= r->ar.start && r->ar.end <= end) {
@@ -1820,7 +1821,7 @@ static bool damos_core_filter_out(struct damon_ctx *ctx, struct damon_target *t,
s->core_filters_allowed = false;
damos_for_each_core_filter(filter, s) {
- if (damos_filter_match(ctx, t, r, filter, ctx->min_sz_region)) {
+ if (damos_filter_match(ctx, t, r, filter, ctx->min_region_sz)) {
if (filter->allow)
s->core_filters_allowed = true;
return !filter->allow;
@@ -1955,7 +1956,7 @@ static void damos_apply_scheme(struct damon_ctx *c, struct damon_target *t,
if (c->ops.apply_scheme) {
if (quota->esz && quota->charged_sz + sz > quota->esz) {
sz = ALIGN_DOWN(quota->esz - quota->charged_sz,
- c->min_sz_region);
+ c->min_region_sz);
if (!sz)
goto update_stat;
damon_split_region_at(t, r, sz);
@@ -2003,7 +2004,7 @@ static void damon_do_apply_schemes(struct damon_ctx *c,
if (quota->esz && quota->charged_sz >= quota->esz)
continue;
- if (damos_skip_charged_region(t, &r, s, c->min_sz_region))
+ if (damos_skip_charged_region(t, &r, s, c->min_region_sz))
continue;
if (s->max_nr_snapshots &&
@@ -2496,7 +2497,7 @@ static void damon_split_region_at(struct damon_target *t,
/* Split every region in the given target into 'nr_subs' regions */
static void damon_split_regions_of(struct damon_target *t, int nr_subs,
- unsigned long min_sz_region)
+ unsigned long min_region_sz)
{
struct damon_region *r, *next;
unsigned long sz_region, sz_sub = 0;
@@ -2506,13 +2507,13 @@ static void damon_split_regions_of(struct damon_target *t, int nr_subs,
sz_region = damon_sz_region(r);
for (i = 0; i < nr_subs - 1 &&
- sz_region > 2 * min_sz_region; i++) {
+ sz_region > 2 * min_region_sz; i++) {
/*
* Randomly select size of left sub-region to be at
* least 10 percent and at most 90% of original region
*/
sz_sub = ALIGN_DOWN(damon_rand(1, 10) *
- sz_region / 10, min_sz_region);
+ sz_region / 10, min_region_sz);
/* Do not allow blank region */
if (sz_sub == 0 || sz_sub >= sz_region)
continue;
@@ -2552,7 +2553,7 @@ static void kdamond_split_regions(struct damon_ctx *ctx)
nr_subregions = 3;
damon_for_each_target(t, ctx)
- damon_split_regions_of(t, nr_subregions, ctx->min_sz_region);
+ damon_split_regions_of(t, nr_subregions, ctx->min_region_sz);
last_nr_regions = nr_regions;
}
@@ -2902,7 +2903,7 @@ static bool damon_find_biggest_system_ram(unsigned long *start,
* @t: The monitoring target to set the region.
* @start: The pointer to the start address of the region.
* @end: The pointer to the end address of the region.
- * @min_sz_region: Minimum region size.
+ * @min_region_sz: Minimum region size.
*
* This function sets the region of @t as requested by @start and @end. If the
* values of @start and @end are zero, however, this function finds the biggest
@@ -2914,7 +2915,7 @@ static bool damon_find_biggest_system_ram(unsigned long *start,
*/
int damon_set_region_biggest_system_ram_default(struct damon_target *t,
unsigned long *start, unsigned long *end,
- unsigned long min_sz_region)
+ unsigned long min_region_sz)
{
struct damon_addr_range addr_range;
@@ -2927,7 +2928,7 @@ int damon_set_region_biggest_system_ram_default(struct damon_target *t,
addr_range.start = *start;
addr_range.end = *end;
- return damon_set_regions(t, &addr_range, 1, min_sz_region);
+ return damon_set_regions(t, &addr_range, 1, min_region_sz);
}
/*
diff --git a/mm/damon/lru_sort.c b/mm/damon/lru_sort.c
index 9dde096a9064..7bc5c0b2aea3 100644
--- a/mm/damon/lru_sort.c
+++ b/mm/damon/lru_sort.c
@@ -298,7 +298,7 @@ static int damon_lru_sort_apply_parameters(void)
if (!monitor_region_start && !monitor_region_end)
addr_unit = 1;
param_ctx->addr_unit = addr_unit;
- param_ctx->min_sz_region = max(DAMON_MIN_REGION_SZ / addr_unit, 1);
+ param_ctx->min_region_sz = max(DAMON_MIN_REGION_SZ / addr_unit, 1);
if (!damon_lru_sort_mon_attrs.sample_interval) {
err = -EINVAL;
@@ -345,7 +345,7 @@ static int damon_lru_sort_apply_parameters(void)
err = damon_set_region_biggest_system_ram_default(param_target,
&monitor_region_start,
&monitor_region_end,
- param_ctx->min_sz_region);
+ param_ctx->min_region_sz);
if (err)
goto out;
err = damon_commit_ctx(ctx, param_ctx);
diff --git a/mm/damon/reclaim.c b/mm/damon/reclaim.c
index c343622a2f52..43d76f5bed44 100644
--- a/mm/damon/reclaim.c
+++ b/mm/damon/reclaim.c
@@ -208,7 +208,7 @@ static int damon_reclaim_apply_parameters(void)
if (!monitor_region_start && !monitor_region_end)
addr_unit = 1;
param_ctx->addr_unit = addr_unit;
- param_ctx->min_sz_region = max(DAMON_MIN_REGION_SZ / addr_unit, 1);
+ param_ctx->min_region_sz = max(DAMON_MIN_REGION_SZ / addr_unit, 1);
if (!damon_reclaim_mon_attrs.aggr_interval) {
err = -EINVAL;
@@ -251,7 +251,7 @@ static int damon_reclaim_apply_parameters(void)
err = damon_set_region_biggest_system_ram_default(param_target,
&monitor_region_start,
&monitor_region_end,
- param_ctx->min_sz_region);
+ param_ctx->min_region_sz);
if (err)
goto out;
err = damon_commit_ctx(ctx, param_ctx);
diff --git a/mm/damon/stat.c b/mm/damon/stat.c
index 5e18b164f6d8..536f02bd173e 100644
--- a/mm/damon/stat.c
+++ b/mm/damon/stat.c
@@ -181,7 +181,7 @@ static struct damon_ctx *damon_stat_build_ctx(void)
goto free_out;
damon_add_target(ctx, target);
if (damon_set_region_biggest_system_ram_default(target, &start, &end,
- ctx->min_sz_region))
+ ctx->min_region_sz))
goto free_out;
return ctx;
free_out:
diff --git a/mm/damon/sysfs.c b/mm/damon/sysfs.c
index 57d36d60f329..b7f66196bec4 100644
--- a/mm/damon/sysfs.c
+++ b/mm/damon/sysfs.c
@@ -1365,7 +1365,7 @@ static int damon_sysfs_set_attrs(struct damon_ctx *ctx,
static int damon_sysfs_set_regions(struct damon_target *t,
struct damon_sysfs_regions *sysfs_regions,
- unsigned long min_sz_region)
+ unsigned long min_region_sz)
{
struct damon_addr_range *ranges = kmalloc_array(sysfs_regions->nr,
sizeof(*ranges), GFP_KERNEL | __GFP_NOWARN);
@@ -1387,7 +1387,7 @@ static int damon_sysfs_set_regions(struct damon_target *t,
if (ranges[i - 1].end > ranges[i].start)
goto out;
}
- err = damon_set_regions(t, ranges, sysfs_regions->nr, min_sz_region);
+ err = damon_set_regions(t, ranges, sysfs_regions->nr, min_region_sz);
out:
kfree(ranges);
return err;
@@ -1409,7 +1409,8 @@ static int damon_sysfs_add_target(struct damon_sysfs_target *sys_target,
return -EINVAL;
}
t->obsolete = sys_target->obsolete;
- return damon_sysfs_set_regions(t, sys_target->regions, ctx->min_sz_region);
+ return damon_sysfs_set_regions(t, sys_target->regions,
+ ctx->min_region_sz);
}
static int damon_sysfs_add_targets(struct damon_ctx *ctx,
@@ -1469,7 +1470,7 @@ static int damon_sysfs_apply_inputs(struct damon_ctx *ctx,
ctx->addr_unit = sys_ctx->addr_unit;
/* addr_unit is respected by only DAMON_OPS_PADDR */
if (sys_ctx->ops_id == DAMON_OPS_PADDR)
- ctx->min_sz_region = max(
+ ctx->min_region_sz = max(
DAMON_MIN_REGION_SZ / sys_ctx->addr_unit, 1);
err = damon_sysfs_set_attrs(ctx, sys_ctx->attrs);
if (err)
--
2.47.3
^ permalink raw reply [flat|nested] 9+ messages in thread