linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
From: Dev Jain <dev.jain@arm.com>
To: Shakeel Butt <shakeel.butt@linux.dev>
Cc: Andrew Morton <akpm@linux-foundation.org>,
	Johannes Weiner <hannes@cmpxchg.org>,
	Michal Hocko <mhocko@kernel.org>,
	Roman Gushchin <roman.gushchin@linux.dev>,
	Muchun Song <muchun.song@linux.dev>,
	Harry Yoo <harry.yoo@oracle.com>, Qi Zheng <qi.zheng@linux.dev>,
	Vlastimil Babka <vbabka@suse.cz>,
	linux-mm@kvack.org, cgroups@vger.kernel.org,
	linux-kernel@vger.kernel.org,
	Meta kernel team <kernel-team@meta.com>
Subject: Re: [PATCH 1/4] memcg: use mod_node_page_state to update stats
Date: Thu, 5 Feb 2026 10:50:06 +0530	[thread overview]
Message-ID: <4847c300-c7bb-4259-867c-4bbf4d760576@arm.com> (raw)
In-Reply-To: <aYOuCmjQ5lGm8Mup@linux.dev>


On 05/02/26 2:08 am, Shakeel Butt wrote:
> On Mon, Feb 02, 2026 at 02:23:54PM +0530, Dev Jain wrote:
>> On 02/02/26 10:24 am, Shakeel Butt wrote:
>>>>>> Hello Shakeel,
>>>>>>
>>>>>>  We are seeing a regression in micromm/munmap benchmark with this patch, on arm64 -
>>>>>>  the benchmark mmmaps a lot of memory, memsets it, and measures the time taken
>>>>>>  to munmap. Please see below if my understanding of this patch is correct.
>>>>>>
>>>>>  Thanks for the report. Are you seeing regression in just the benchmark
>>>>>  or some real workload as well? Also how much regression are you seeing?
>>>>>  I have a kernel rebot regression report [1] for this patch as well which
>>>>>  says 2.6% regression and thus it was on the back-burner for now. I will
>>>>>  take look at this again soon.
>>>>>
>>>> The munmap regression is ~24%. Haven't observed a regression in any other
>>>> benchmark yet.
>>> Please share the code/benchmark which shows such regression, also if you can
>>> share the perf profile, that would be awesome.
>> https://gitlab.arm.com/tooling/fastpath/-/blob/main/containers/microbench/micromm.c
>> You can run this with
>> ./micromm 0 munmap 10
>>
>> Don't have a perf profile, I measured the time taken by above command, with and
>> without the patch.
>>
> Hi Dev, can you please try the following patch?
>
>
> From 40155feca7e7bc846800ab8449735bdb03164d6d Mon Sep 17 00:00:00 2001
> From: Shakeel Butt <shakeel.butt@linux.dev>
> Date: Wed, 4 Feb 2026 08:46:08 -0800
> Subject: [PATCH] vmstat: use preempt disable instead of try_cmpxchg
>
> Signed-off-by: Shakeel Butt <shakeel.butt@linux.dev>
> ---
>  include/linux/mmzone.h |  2 +-
>  mm/vmstat.c            | 58 ++++++++++++++++++------------------------
>  2 files changed, 26 insertions(+), 34 deletions(-)
>
> diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
> index 3e51190a55e4..499cd53efdd6 100644
> --- a/include/linux/mmzone.h
> +++ b/include/linux/mmzone.h
> @@ -776,7 +776,7 @@ struct per_cpu_zonestat {
>  
>  struct per_cpu_nodestat {
>  	s8 stat_threshold;
> -	s8 vm_node_stat_diff[NR_VM_NODE_STAT_ITEMS];
> +	long vm_node_stat_diff[NR_VM_NODE_STAT_ITEMS];
>  };
>  
>  #endif /* !__GENERATING_BOUNDS.H */
> diff --git a/mm/vmstat.c b/mm/vmstat.c
> index 86b14b0f77b5..0930695597bb 100644
> --- a/mm/vmstat.c
> +++ b/mm/vmstat.c
> @@ -377,7 +377,7 @@ void __mod_node_page_state(struct pglist_data *pgdat, enum node_stat_item item,
>  				long delta)
>  {
>  	struct per_cpu_nodestat __percpu *pcp = pgdat->per_cpu_nodestats;
> -	s8 __percpu *p = pcp->vm_node_stat_diff + item;
> +	long __percpu *p = pcp->vm_node_stat_diff + item;
>  	long x;
>  	long t;
>  
> @@ -456,8 +456,8 @@ void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
>  void __inc_node_state(struct pglist_data *pgdat, enum node_stat_item item)
>  {
>  	struct per_cpu_nodestat __percpu *pcp = pgdat->per_cpu_nodestats;
> -	s8 __percpu *p = pcp->vm_node_stat_diff + item;
> -	s8 v, t;
> +	long __percpu *p = pcp->vm_node_stat_diff + item;
> +	long v, t;
>  
>  	VM_WARN_ON_ONCE(vmstat_item_in_bytes(item));
>  
> @@ -467,7 +467,7 @@ void __inc_node_state(struct pglist_data *pgdat, enum node_stat_item item)
>  	v = __this_cpu_inc_return(*p);
>  	t = __this_cpu_read(pcp->stat_threshold);
>  	if (unlikely(v > t)) {
> -		s8 overstep = t >> 1;
> +		long overstep = t >> 1;
>  
>  		node_page_state_add(v + overstep, pgdat, item);
>  		__this_cpu_write(*p, -overstep);
> @@ -512,8 +512,8 @@ void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
>  void __dec_node_state(struct pglist_data *pgdat, enum node_stat_item item)
>  {
>  	struct per_cpu_nodestat __percpu *pcp = pgdat->per_cpu_nodestats;
> -	s8 __percpu *p = pcp->vm_node_stat_diff + item;
> -	s8 v, t;
> +	long __percpu *p = pcp->vm_node_stat_diff + item;
> +	long v, t;
>  
>  	VM_WARN_ON_ONCE(vmstat_item_in_bytes(item));
>  
> @@ -523,7 +523,7 @@ void __dec_node_state(struct pglist_data *pgdat, enum node_stat_item item)
>  	v = __this_cpu_dec_return(*p);
>  	t = __this_cpu_read(pcp->stat_threshold);
>  	if (unlikely(v < - t)) {
> -		s8 overstep = t >> 1;
> +		long overstep = t >> 1;
>  
>  		node_page_state_add(v - overstep, pgdat, item);
>  		__this_cpu_write(*p, overstep);
> @@ -619,9 +619,8 @@ static inline void mod_node_state(struct pglist_data *pgdat,
>         enum node_stat_item item, int delta, int overstep_mode)
>  {
>  	struct per_cpu_nodestat __percpu *pcp = pgdat->per_cpu_nodestats;
> -	s8 __percpu *p = pcp->vm_node_stat_diff + item;
> -	long n, t, z;
> -	s8 o;
> +	long __percpu *p = pcp->vm_node_stat_diff + item;
> +	long o, n, t, z;
>  
>  	if (vmstat_item_in_bytes(item)) {
>  		/*
> @@ -634,32 +633,25 @@ static inline void mod_node_state(struct pglist_data *pgdat,
>  		delta >>= PAGE_SHIFT;
>  	}
>  
> +	preempt_disable();
> +
>  	o = this_cpu_read(*p);
> -	do {
> -		z = 0;  /* overflow to node counters */
> +	n = o + delta;
>  
> -		/*
> -		 * The fetching of the stat_threshold is racy. We may apply
> -		 * a counter threshold to the wrong the cpu if we get
> -		 * rescheduled while executing here. However, the next
> -		 * counter update will apply the threshold again and
> -		 * therefore bring the counter under the threshold again.
> -		 *
> -		 * Most of the time the thresholds are the same anyways
> -		 * for all cpus in a node.
> -		 */
> -		t = this_cpu_read(pcp->stat_threshold);
> +	t = this_cpu_read(pcp->stat_threshold);
> +	z = 0;
>  
> -		n = delta + (long)o;
> +	if (abs(n) > t) {
> +		int os = overstep_mode * (t >> 1);
>  
> -		if (abs(n) > t) {
> -			int os = overstep_mode * (t >> 1) ;
> +		/* Overflow must be added to node counters */
> +		z = n + os;
> +		n = -os;
> +	}
>  
> -			/* Overflow must be added to node counters */
> -			z = n + os;
> -			n = -os;
> -		}
> -	} while (!this_cpu_try_cmpxchg(*p, &o, n));
> +	this_cpu_add(*p, n - o);
> +
> +	preempt_enable();
>  
>  	if (z)
>  		node_page_state_add(z, pgdat, item);
> @@ -866,7 +858,7 @@ static bool refresh_cpu_vm_stats(bool do_pagesets)
>  		struct per_cpu_nodestat __percpu *p = pgdat->per_cpu_nodestats;
>  
>  		for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) {
> -			int v;
> +			long v;
>  
>  			v = this_cpu_xchg(p->vm_node_stat_diff[i], 0);
>  			if (v) {
> @@ -929,7 +921,7 @@ void cpu_vm_stats_fold(int cpu)
>  
>  		for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++)
>  			if (p->vm_node_stat_diff[i]) {
> -				int v;
> +				long v;
>  
>  				v = p->vm_node_stat_diff[i];
>  				p->vm_node_stat_diff[i] = 0;

Thanks for looking into this.

But this doesn't solve it :( preempt_disable() contains a compiler barrier,
probably that's why.

Also can you confirm whether my analysis of the regression was correct?
Because if it was, then this diff looks wrong - AFAIU preempt_disable()
won't stop an irq handler from interrupting the execution, so this
will introduce a bug for code paths running in irq context.




  reply	other threads:[~2026-02-05  5:20 UTC|newest]

Thread overview: 49+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2025-11-10 23:20 [PATCH 0/4] memcg: cleanup the memcg stats interfaces Shakeel Butt
2025-11-10 23:20 ` [PATCH 1/4] memcg: use mod_node_page_state to update stats Shakeel Butt
2025-11-11  1:39   ` Harry Yoo
2025-11-11 18:58   ` Roman Gushchin
2026-01-29 13:05   ` Dev Jain
2026-02-02  4:26     ` Shakeel Butt
2026-02-02  4:48       ` Dev Jain
2026-02-02  4:54         ` Shakeel Butt
2026-02-02  8:53           ` Dev Jain
2026-02-04 20:38             ` Shakeel Butt
2026-02-05  5:20               ` Dev Jain [this message]
2026-02-05  5:45                 ` Harry Yoo
2026-02-05  5:58                   ` Shakeel Butt
2026-02-10  7:38                     ` Dev Jain
2026-02-10 16:29                       ` Shakeel Butt
2026-02-11  7:37                         ` Dev Jain
2026-02-11  8:53                           ` Harry Yoo
2026-02-11  9:24                             ` Shakeel Butt
2026-02-11 10:14                               ` Harry Yoo
2026-02-12  5:16                               ` Dev Jain
2026-02-12  5:14                             ` Dev Jain
2026-02-12  1:31                     ` Shakeel Butt
2025-11-10 23:20 ` [PATCH 2/4] memcg: remove __mod_lruvec_kmem_state Shakeel Butt
2025-11-11  1:46   ` Harry Yoo
2025-11-11  8:23   ` Qi Zheng
2025-11-11 18:58   ` Roman Gushchin
2025-11-10 23:20 ` [PATCH 3/4] memcg: remove __mod_lruvec_state Shakeel Butt
2025-11-11  5:21   ` Harry Yoo
2025-11-11 18:58   ` Roman Gushchin
2025-11-10 23:20 ` [PATCH 4/4] memcg: remove __lruvec_stat_mod_folio Shakeel Butt
2025-11-11  5:41   ` Harry Yoo
2025-11-11 18:59   ` Roman Gushchin
2025-11-11  0:59 ` [PATCH 0/4] memcg: cleanup the memcg stats interfaces Harry Yoo
2025-11-11  2:23   ` Qi Zheng
2025-11-11  2:39     ` Shakeel Butt
2025-11-11  2:48       ` Qi Zheng
2025-11-11  3:00         ` Shakeel Butt
2025-11-11  3:07           ` Qi Zheng
2025-11-11  3:18             ` Harry Yoo
2025-11-11  3:29               ` Qi Zheng
2025-11-11  3:05         ` Harry Yoo
2025-11-11  8:01           ` Sebastian Andrzej Siewior
2025-11-11  8:36 ` Qi Zheng
2025-11-11 16:45   ` Shakeel Butt
2025-11-12  2:11     ` Qi Zheng
2025-11-11  9:54 ` Vlastimil Babka
2025-11-11 19:01 ` Roman Gushchin
2025-11-11 19:34   ` Shakeel Butt
2025-11-15 19:27 ` Shakeel Butt

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=4847c300-c7bb-4259-867c-4bbf4d760576@arm.com \
    --to=dev.jain@arm.com \
    --cc=akpm@linux-foundation.org \
    --cc=cgroups@vger.kernel.org \
    --cc=hannes@cmpxchg.org \
    --cc=harry.yoo@oracle.com \
    --cc=kernel-team@meta.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=mhocko@kernel.org \
    --cc=muchun.song@linux.dev \
    --cc=qi.zheng@linux.dev \
    --cc=roman.gushchin@linux.dev \
    --cc=shakeel.butt@linux.dev \
    --cc=vbabka@suse.cz \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox