From: Xiu Jianfeng <xiujianfeng@huaweicloud.com>
To: hannes@cmpxchg.org, mhocko@kernel.org, roman.gushchin@linux.dev,
shakeel.butt@linux.dev, muchun.song@linux.dev,
akpm@linux-foundation.org
Cc: cgroups@vger.kernel.org, linux-mm@kvack.org,
linux-kernel@vger.kernel.org, wangweiyang2@huawei.com
Subject: [PATCH -next] memcg: factor out mem_cgroup_stat_aggregate()
Date: Sat, 26 Oct 2024 09:34:07 +0000 [thread overview]
Message-ID: <20241026093407.310955-1-xiujianfeng@huaweicloud.com> (raw)
From: Xiu Jianfeng <xiujianfeng@huawei.com>
Currently mem_cgroup_css_rstat_flush() is used to flush the per-CPU
statistics from a specified CPU into the global statistics of the
memcg. It processes three kinds of data in three for loops using exactly
the same method. Therefore, the for loop can be factored out and may
make the code more clean.
Signed-off-by: Xiu Jianfeng <xiujianfeng@huawei.com>
---
mm/memcontrol.c | 129 ++++++++++++++++++++++++++----------------------
1 file changed, 70 insertions(+), 59 deletions(-)
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 17af08367c68..c3ae13c8f6fa 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -3738,68 +3738,90 @@ static void mem_cgroup_css_reset(struct cgroup_subsys_state *css)
memcg_wb_domain_size_changed(memcg);
}
-static void mem_cgroup_css_rstat_flush(struct cgroup_subsys_state *css, int cpu)
+struct aggregate_control {
+ /* pointer to the aggregated (CPU and subtree aggregated) counters */
+ long *aggregate;
+ /* pointer to the non-hierarchichal (CPU aggregated) counters */
+ long *local;
+ /* pointer to the pending child counters during tree propagation */
+ long *pending;
+ /* pointer to the parent's pending counters, could be NULL */
+ long *ppending;
+ /* pointer to the percpu counters to be aggregated */
+ long *cstat;
+ /* pointer to the percpu counters of the last aggregation*/
+ long *cstat_prev;
+ /* size of the above counters */
+ int size;
+};
+
+static void mem_cgroup_stat_aggregate(struct aggregate_control *ac)
{
- struct mem_cgroup *memcg = mem_cgroup_from_css(css);
- struct mem_cgroup *parent = parent_mem_cgroup(memcg);
- struct memcg_vmstats_percpu *statc;
+ int i;
long delta, delta_cpu, v;
- int i, nid;
-
- statc = per_cpu_ptr(memcg->vmstats_percpu, cpu);
- for (i = 0; i < MEMCG_VMSTAT_SIZE; i++) {
+ for (i = 0; i < ac->size; i++) {
/*
* Collect the aggregated propagation counts of groups
* below us. We're in a per-cpu loop here and this is
* a global counter, so the first cycle will get them.
*/
- delta = memcg->vmstats->state_pending[i];
+ delta = ac->pending[i];
if (delta)
- memcg->vmstats->state_pending[i] = 0;
+ ac->pending[i] = 0;
/* Add CPU changes on this level since the last flush */
delta_cpu = 0;
- v = READ_ONCE(statc->state[i]);
- if (v != statc->state_prev[i]) {
- delta_cpu = v - statc->state_prev[i];
+ v = READ_ONCE(ac->cstat[i]);
+ if (v != ac->cstat_prev[i]) {
+ delta_cpu = v - ac->cstat_prev[i];
delta += delta_cpu;
- statc->state_prev[i] = v;
+ ac->cstat_prev[i] = v;
}
/* Aggregate counts on this level and propagate upwards */
if (delta_cpu)
- memcg->vmstats->state_local[i] += delta_cpu;
+ ac->local[i] += delta_cpu;
if (delta) {
- memcg->vmstats->state[i] += delta;
- if (parent)
- parent->vmstats->state_pending[i] += delta;
+ ac->aggregate[i] += delta;
+ if (ac->ppending)
+ ac->ppending[i] += delta;
}
}
+}
- for (i = 0; i < NR_MEMCG_EVENTS; i++) {
- delta = memcg->vmstats->events_pending[i];
- if (delta)
- memcg->vmstats->events_pending[i] = 0;
-
- delta_cpu = 0;
- v = READ_ONCE(statc->events[i]);
- if (v != statc->events_prev[i]) {
- delta_cpu = v - statc->events_prev[i];
- delta += delta_cpu;
- statc->events_prev[i] = v;
- }
+static void mem_cgroup_css_rstat_flush(struct cgroup_subsys_state *css, int cpu)
+{
+ struct mem_cgroup *memcg = mem_cgroup_from_css(css);
+ struct mem_cgroup *parent = parent_mem_cgroup(memcg);
+ struct memcg_vmstats_percpu *statc;
+ struct aggregate_control ac;
+ int nid;
- if (delta_cpu)
- memcg->vmstats->events_local[i] += delta_cpu;
+ statc = per_cpu_ptr(memcg->vmstats_percpu, cpu);
- if (delta) {
- memcg->vmstats->events[i] += delta;
- if (parent)
- parent->vmstats->events_pending[i] += delta;
- }
- }
+ ac = (struct aggregate_control) {
+ .aggregate = memcg->vmstats->state,
+ .local = memcg->vmstats->state_local,
+ .pending = memcg->vmstats->state_pending,
+ .ppending = parent ? parent->vmstats->state_pending : NULL,
+ .cstat = statc->state,
+ .cstat_prev = statc->state_prev,
+ .size = MEMCG_VMSTAT_SIZE,
+ };
+ mem_cgroup_stat_aggregate(&ac);
+
+ ac = (struct aggregate_control) {
+ .aggregate = memcg->vmstats->events,
+ .local = memcg->vmstats->events_local,
+ .pending = memcg->vmstats->events_pending,
+ .ppending = parent ? parent->vmstats->events_pending : NULL,
+ .cstat = statc->events,
+ .cstat_prev = statc->events_prev,
+ .size = NR_MEMCG_EVENTS,
+ };
+ mem_cgroup_stat_aggregate(&ac);
for_each_node_state(nid, N_MEMORY) {
struct mem_cgroup_per_node *pn = memcg->nodeinfo[nid];
@@ -3812,28 +3834,17 @@ static void mem_cgroup_css_rstat_flush(struct cgroup_subsys_state *css, int cpu)
lstatc = per_cpu_ptr(pn->lruvec_stats_percpu, cpu);
- for (i = 0; i < NR_MEMCG_NODE_STAT_ITEMS; i++) {
- delta = lstats->state_pending[i];
- if (delta)
- lstats->state_pending[i] = 0;
-
- delta_cpu = 0;
- v = READ_ONCE(lstatc->state[i]);
- if (v != lstatc->state_prev[i]) {
- delta_cpu = v - lstatc->state_prev[i];
- delta += delta_cpu;
- lstatc->state_prev[i] = v;
- }
-
- if (delta_cpu)
- lstats->state_local[i] += delta_cpu;
+ ac = (struct aggregate_control) {
+ .aggregate = lstats->state,
+ .local = lstats->state_local,
+ .pending = lstats->state_pending,
+ .ppending = plstats ? plstats->state_pending : NULL,
+ .cstat = lstatc->state,
+ .cstat_prev = lstatc->state_prev,
+ .size = NR_MEMCG_NODE_STAT_ITEMS,
+ };
+ mem_cgroup_stat_aggregate(&ac);
- if (delta) {
- lstats->state[i] += delta;
- if (plstats)
- plstats->state_pending[i] += delta;
- }
- }
}
WRITE_ONCE(statc->stats_updates, 0);
/* We are in a per-cpu loop here, only do the atomic write once */
--
2.34.1
next reply other threads:[~2024-10-26 9:45 UTC|newest]
Thread overview: 2+ messages / expand[flat|nested] mbox.gz Atom feed top
2024-10-26 9:34 Xiu Jianfeng [this message]
2024-10-26 23:12 ` Andrew Morton
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20241026093407.310955-1-xiujianfeng@huaweicloud.com \
--to=xiujianfeng@huaweicloud.com \
--cc=akpm@linux-foundation.org \
--cc=cgroups@vger.kernel.org \
--cc=hannes@cmpxchg.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=mhocko@kernel.org \
--cc=muchun.song@linux.dev \
--cc=roman.gushchin@linux.dev \
--cc=shakeel.butt@linux.dev \
--cc=wangweiyang2@huawei.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox