linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
From: Shakeel Butt <shakeelb@google.com>
To: Ivan Babrou <ivan@cloudflare.com>
Cc: "Michal Koutný" <mkoutny@suse.com>,
	"Daniel Dao" <dqminh@cloudflare.com>,
	kernel-team <kernel-team@cloudflare.com>,
	"Linux MM" <linux-mm@kvack.org>,
	"Johannes Weiner" <hannes@cmpxchg.org>,
	"Roman Gushchin" <guro@fb.com>, "Feng Tang" <feng.tang@intel.com>,
	"Michal Hocko" <mhocko@kernel.org>,
	"Hillf Danton" <hdanton@sina.com>,
	"Andrew Morton" <akpm@linux-foundation.org>,
	"Linus Torvalds" <torvalds@linux-foundation.org>
Subject: Re: Regression in workingset_refault latency on 5.15
Date: Tue, 1 Mar 2022 18:50:22 -0800	[thread overview]
Message-ID: <20220302025022.nnmpwxmkqed2icck@google.com> (raw)
In-Reply-To: <CABWYdi1qK8P2LT2HL8B9sM8oGP4dQnsCjtUgZP1xkrQbHDk_pA@mail.gmail.com>

On Tue, Mar 01, 2022 at 04:48:00PM -0800, Ivan Babrou wrote:

[...]

> Looks like you were right that targeted flush is not going to be as good.


Thanks a lot. Can you please try the following patch (independent of other
patches) as well?


diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index d9b8df5ef212..499f75e066f3 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -75,29 +75,8 @@ enum mem_cgroup_events_target {
  	MEM_CGROUP_NTARGETS,
  };

-struct memcg_vmstats_percpu {
-	/* Local (CPU and cgroup) page state & events */
-	long			state[MEMCG_NR_STAT];
-	unsigned long		events[NR_VM_EVENT_ITEMS];
-
-	/* Delta calculation for lockless upward propagation */
-	long			state_prev[MEMCG_NR_STAT];
-	unsigned long		events_prev[NR_VM_EVENT_ITEMS];
-
-	/* Cgroup1: threshold notifications & softlimit tree updates */
-	unsigned long		nr_page_events;
-	unsigned long		targets[MEM_CGROUP_NTARGETS];
-};
-
-struct memcg_vmstats {
-	/* Aggregated (CPU and subtree) page state & events */
-	long			state[MEMCG_NR_STAT];
-	unsigned long		events[NR_VM_EVENT_ITEMS];
-
-	/* Pending child counts during tree propagation */
-	long			state_pending[MEMCG_NR_STAT];
-	unsigned long		events_pending[NR_VM_EVENT_ITEMS];
-};
+struct memcg_vmstats_percpu;
+struct memcg_vmstats;

  struct mem_cgroup_reclaim_iter {
  	struct mem_cgroup *position;
@@ -304,7 +283,7 @@ struct mem_cgroup {
  	MEMCG_PADDING(_pad1_);

  	/* memory.stat */
-	struct memcg_vmstats	vmstats;
+	struct memcg_vmstats	*vmstats;

  	/* memory.events */
  	atomic_long_t		memory_events[MEMCG_NR_MEMORY_EVENTS];
@@ -964,11 +943,7 @@ static inline void mod_memcg_state(struct mem_cgroup  
*memcg,
  	local_irq_restore(flags);
  }

-static inline unsigned long memcg_page_state(struct mem_cgroup *memcg, int  
idx)
-{
-	return READ_ONCE(memcg->vmstats.state[idx]);
-}
-
+unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx);
  static inline unsigned long lruvec_page_state(struct lruvec *lruvec,
  					      enum node_stat_item idx)
  {
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 32ba963ebf2e..c65f155c2048 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -688,6 +688,71 @@ static void flush_memcg_stats_dwork(struct work_struct  
*w)
  	queue_delayed_work(system_unbound_wq, &stats_flush_dwork, 2UL*HZ);
  }

+static const unsigned int memcg_vm_events[] = {
+	PGPGIN,
+	PGPGOUT,
+	PGFAULT,
+	PGMAJFAULT,
+	PGREFILL,
+	PGSCAN_KSWAPD,
+	PGSCAN_DIRECT,
+	PGSTEAL_KSWAPD,
+	PGSTEAL_DIRECT,
+	PGACTIVATE,
+	PGDEACTIVATE,
+	PGLAZYFREE,
+	PGLAZYFREED,
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+	THP_FAULT_ALLOC,
+	THP_COLLAPSE_ALLOC,
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+};
+#define NR_MEMCG_EVENTS ARRAY_SIZE(memcg_vm_events)
+
+static int memcg_events_index[NR_VM_EVENT_ITEMS] __read_mostly;
+
+static void init_memcg_events(void)
+{
+	int i;
+
+	for (i = 0; i < NR_MEMCG_EVENTS; ++i)
+		memcg_events_index[memcg_vm_events[i]] = i + 1;
+}
+
+static int get_memcg_events_index(enum vm_event_item idx)
+{
+	return memcg_events_index[idx] - 1;
+}
+
+struct memcg_vmstats_percpu {
+	/* Local (CPU and cgroup) page state & events */
+	long			state[MEMCG_NR_STAT];
+	unsigned long		events[NR_MEMCG_EVENTS];
+
+	/* Delta calculation for lockless upward propagation */
+	long			state_prev[MEMCG_NR_STAT];
+	unsigned long		events_prev[NR_MEMCG_EVENTS];
+
+	/* Cgroup1: threshold notifications & softlimit tree updates */
+	unsigned long		nr_page_events;
+	unsigned long		targets[MEM_CGROUP_NTARGETS];
+};
+
+struct memcg_vmstats {
+	/* Aggregated (CPU and subtree) page state & events */
+	long			state[MEMCG_NR_STAT];
+	unsigned long		events[NR_MEMCG_EVENTS];
+
+	/* Pending child counts during tree propagation */
+	long			state_pending[MEMCG_NR_STAT];
+	unsigned long		events_pending[NR_MEMCG_EVENTS];
+};
+
+unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx)
+{
+	return READ_ONCE(memcg->vmstats->state[idx]);
+}
+
  /**
   * __mod_memcg_state - update cgroup memory statistics
   * @memcg: the memory cgroup
@@ -831,25 +896,33 @@ static inline void mod_objcg_mlstate(struct  
obj_cgroup *objcg,
  void __count_memcg_events(struct mem_cgroup *memcg, enum vm_event_item idx,
  			  unsigned long count)
  {
-	if (mem_cgroup_disabled())
+	int index = get_memcg_events_index(idx);
+
+	if (mem_cgroup_disabled() || index < 0)
  		return;

-	__this_cpu_add(memcg->vmstats_percpu->events[idx], count);
+	__this_cpu_add(memcg->vmstats_percpu->events[index], count);
  	memcg_rstat_updated(memcg, count);
  }

  static unsigned long memcg_events(struct mem_cgroup *memcg, int event)
  {
-	return READ_ONCE(memcg->vmstats.events[event]);
+	int index = get_memcg_events_index(event);
+
+	if (index < 0)
+		return 0;
+	return READ_ONCE(memcg->vmstats->events[index]);
  }

  static unsigned long memcg_events_local(struct mem_cgroup *memcg, int  
event)
  {
  	long x = 0;
  	int cpu;
+	int index = get_memcg_events_index(event);

-	for_each_possible_cpu(cpu)
-		x += per_cpu(memcg->vmstats_percpu->events[event], cpu);
+	if (index >= 0)
+		for_each_possible_cpu(cpu)
+			x += per_cpu(memcg->vmstats_percpu->events[index], cpu);
  	return x;
  }

@@ -5147,6 +5220,7 @@ static void __mem_cgroup_free(struct mem_cgroup  
*memcg)

  	for_each_node(node)
  		free_mem_cgroup_per_node_info(memcg, node);
+	kfree(memcg->vmstats);
  	free_percpu(memcg->vmstats_percpu);
  	kfree(memcg);
  }
@@ -5180,6 +5254,10 @@ static struct mem_cgroup *mem_cgroup_alloc(void)
  		goto fail;
  	}

+	memcg->vmstats = kzalloc(sizeof(struct memcg_vmstats), GFP_KERNEL);
+	if (!memcg->vmstats)
+		goto fail;
+
  	memcg->vmstats_percpu = alloc_percpu_gfp(struct memcg_vmstats_percpu,
  						 GFP_KERNEL_ACCOUNT);
  	if (!memcg->vmstats_percpu)
@@ -5248,6 +5326,7 @@ mem_cgroup_css_alloc(struct cgroup_subsys_state  
*parent_css)
  		page_counter_init(&memcg->kmem, &parent->kmem);
  		page_counter_init(&memcg->tcpmem, &parent->tcpmem);
  	} else {
+		init_memcg_events();
  		page_counter_init(&memcg->memory, NULL);
  		page_counter_init(&memcg->swap, NULL);
  		page_counter_init(&memcg->kmem, NULL);
@@ -5400,9 +5479,9 @@ static void mem_cgroup_css_rstat_flush(struct  
cgroup_subsys_state *css, int cpu)
  		 * below us. We're in a per-cpu loop here and this is
  		 * a global counter, so the first cycle will get them.
  		 */
-		delta = memcg->vmstats.state_pending[i];
+		delta = memcg->vmstats->state_pending[i];
  		if (delta)
-			memcg->vmstats.state_pending[i] = 0;
+			memcg->vmstats->state_pending[i] = 0;

  		/* Add CPU changes on this level since the last flush */
  		v = READ_ONCE(statc->state[i]);
@@ -5415,15 +5494,15 @@ static void mem_cgroup_css_rstat_flush(struct  
cgroup_subsys_state *css, int cpu)
  			continue;

  		/* Aggregate counts on this level and propagate upwards */
-		memcg->vmstats.state[i] += delta;
+		memcg->vmstats->state[i] += delta;
  		if (parent)
-			parent->vmstats.state_pending[i] += delta;
+			parent->vmstats->state_pending[i] += delta;
  	}

-	for (i = 0; i < NR_VM_EVENT_ITEMS; i++) {
-		delta = memcg->vmstats.events_pending[i];
+	for (i = 0; i < NR_MEMCG_EVENTS; i++) {
+		delta = memcg->vmstats->events_pending[i];
  		if (delta)
-			memcg->vmstats.events_pending[i] = 0;
+			memcg->vmstats->events_pending[i] = 0;

  		v = READ_ONCE(statc->events[i]);
  		if (v != statc->events_prev[i]) {
@@ -5434,9 +5513,9 @@ static void mem_cgroup_css_rstat_flush(struct  
cgroup_subsys_state *css, int cpu)
  		if (!delta)
  			continue;

-		memcg->vmstats.events[i] += delta;
+		memcg->vmstats->events[i] += delta;
  		if (parent)
-			parent->vmstats.events_pending[i] += delta;
+			parent->vmstats->events_pending[i] += delta;
  	}

  	for_each_node_state(nid, N_MEMORY) {
-- 
2.35.1.574.g5d30c73bfb-goog



  reply	other threads:[~2022-03-02  2:50 UTC|newest]

Thread overview: 36+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-02-23 13:51 Daniel Dao
2022-02-23 15:57 ` Shakeel Butt
2022-02-23 16:00   ` Shakeel Butt
2022-02-23 17:07     ` Daniel Dao
2022-02-23 17:36       ` Shakeel Butt
2022-02-23 19:28         ` Ivan Babrou
2022-02-23 20:28           ` Shakeel Butt
2022-02-23 21:16             ` Ivan Babrou
2022-02-24 14:46               ` Daniel Dao
2022-02-24 16:58                 ` Shakeel Butt
2022-02-24 17:34                   ` Daniel Dao
2022-02-24 18:00                     ` Shakeel Butt
2022-02-24 18:52                       ` Shakeel Butt
2022-02-25 10:23                         ` Daniel Dao
2022-02-25 17:08                           ` Ivan Babrou
2022-02-25 17:22                             ` Shakeel Butt
2022-02-25 18:03                             ` Michal Koutný
2022-02-25 18:08                               ` Ivan Babrou
2022-02-28 23:09                                 ` Shakeel Butt
2022-02-28 23:34                                   ` Ivan Babrou
2022-02-28 23:43                                     ` Shakeel Butt
2022-03-02  0:48                                     ` Ivan Babrou
2022-03-02  2:50                                       ` Shakeel Butt [this message]
2022-03-02  3:40                                         ` Ivan Babrou
2022-03-02 22:33                                           ` Ivan Babrou
2022-03-03  2:32                                             ` Shakeel Butt
2022-03-03  2:35                                             ` Shakeel Butt
2022-03-04  0:21                                               ` Ivan Babrou
2022-03-04  1:05                                                 ` Shakeel Butt
2022-03-04  1:12                                                   ` Ivan Babrou
2022-03-02 11:49                                         ` Frank Hofmann
2022-03-02 15:52                                           ` Shakeel Butt
2022-03-02 10:08                                       ` Michal Koutný
2022-03-02 15:53                                         ` Shakeel Butt
2022-03-02 17:28                                           ` Ivan Babrou
2022-02-24  9:22 ` Thorsten Leemhuis

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20220302025022.nnmpwxmkqed2icck@google.com \
    --to=shakeelb@google.com \
    --cc=akpm@linux-foundation.org \
    --cc=dqminh@cloudflare.com \
    --cc=feng.tang@intel.com \
    --cc=guro@fb.com \
    --cc=hannes@cmpxchg.org \
    --cc=hdanton@sina.com \
    --cc=ivan@cloudflare.com \
    --cc=kernel-team@cloudflare.com \
    --cc=linux-mm@kvack.org \
    --cc=mhocko@kernel.org \
    --cc=mkoutny@suse.com \
    --cc=torvalds@linux-foundation.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox