linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
From: Nhat Pham <nphamcs@gmail.com>
To: akpm@linux-foundation.org
Cc: hannes@cmpxchg.org, cerasuolodomenico@gmail.com,
	yosryahmed@google.com, sjenning@redhat.com, ddstreet@ieee.org,
	vitaly.wool@konsulko.com, mhocko@kernel.org,
	roman.gushchin@linux.dev, shakeelb@google.com,
	muchun.song@linux.dev, chrisl@kernel.org, linux-mm@kvack.org,
	kernel-team@meta.com, linux-kernel@vger.kernel.org,
	cgroups@vger.kernel.org, linux-doc@vger.kernel.org,
	linux-kselftest@vger.kernel.org, shuah@kernel.org
Subject: [PATCH v5 2/6] memcontrol: allows mem_cgroup_iter() to check for onlineness
Date: Mon,  6 Nov 2023 10:31:55 -0800	[thread overview]
Message-ID: <20231106183159.3562879-3-nphamcs@gmail.com> (raw)
In-Reply-To: <20231106183159.3562879-1-nphamcs@gmail.com>

The new zswap writeback scheme requires an online-only memcg hierarchy
traversal. Add a new parameter to mem_cgroup_iter() to check for
onlineness before returning.

Signed-off-by: Nhat Pham <nphamcs@gmail.com>
---
 include/linux/memcontrol.h |  4 ++--
 mm/memcontrol.c            | 17 ++++++++++-------
 mm/shrinker.c              |  4 ++--
 mm/vmscan.c                | 26 +++++++++++++-------------
 4 files changed, 27 insertions(+), 24 deletions(-)

diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 6edd3ec4d8d5..55c85f952afd 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -832,7 +832,7 @@ static inline void mem_cgroup_put(struct mem_cgroup *memcg)
 
 struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *,
 				   struct mem_cgroup *,
-				   struct mem_cgroup_reclaim_cookie *);
+				   struct mem_cgroup_reclaim_cookie *, bool online);
 void mem_cgroup_iter_break(struct mem_cgroup *, struct mem_cgroup *);
 void mem_cgroup_scan_tasks(struct mem_cgroup *memcg,
 			   int (*)(struct task_struct *, void *), void *arg);
@@ -1381,7 +1381,7 @@ static inline struct lruvec *folio_lruvec_lock_irqsave(struct folio *folio,
 static inline struct mem_cgroup *
 mem_cgroup_iter(struct mem_cgroup *root,
 		struct mem_cgroup *prev,
-		struct mem_cgroup_reclaim_cookie *reclaim)
+		struct mem_cgroup_reclaim_cookie *reclaim, bool online)
 {
 	return NULL;
 }
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 61c0c46c2d62..6f7fc0101252 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -221,14 +221,14 @@ enum res_type {
  * be used for reference counting.
  */
 #define for_each_mem_cgroup_tree(iter, root)		\
-	for (iter = mem_cgroup_iter(root, NULL, NULL);	\
+	for (iter = mem_cgroup_iter(root, NULL, NULL, false);	\
 	     iter != NULL;				\
-	     iter = mem_cgroup_iter(root, iter, NULL))
+	     iter = mem_cgroup_iter(root, iter, NULL, false))
 
 #define for_each_mem_cgroup(iter)			\
-	for (iter = mem_cgroup_iter(NULL, NULL, NULL);	\
+	for (iter = mem_cgroup_iter(NULL, NULL, NULL, false);	\
 	     iter != NULL;				\
-	     iter = mem_cgroup_iter(NULL, iter, NULL))
+	     iter = mem_cgroup_iter(NULL, iter, NULL, false))
 
 static inline bool task_is_dying(void)
 {
@@ -1139,6 +1139,7 @@ struct mem_cgroup *get_mem_cgroup_from_current(void)
  * @root: hierarchy root
  * @prev: previously returned memcg, NULL on first invocation
  * @reclaim: cookie for shared reclaim walks, NULL for full walks
+ * @online: skip offline memcgs
  *
  * Returns references to children of the hierarchy below @root, or
  * @root itself, or %NULL after a full round-trip.
@@ -1153,7 +1154,8 @@ struct mem_cgroup *get_mem_cgroup_from_current(void)
  */
 struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root,
 				   struct mem_cgroup *prev,
-				   struct mem_cgroup_reclaim_cookie *reclaim)
+				   struct mem_cgroup_reclaim_cookie *reclaim,
+				   bool online)
 {
 	struct mem_cgroup_reclaim_iter *iter;
 	struct cgroup_subsys_state *css = NULL;
@@ -1223,7 +1225,8 @@ struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root,
 		 * is provided by the caller, so we know it's alive
 		 * and kicking, and don't take an extra reference.
 		 */
-		if (css == &root->css || css_tryget(css)) {
+		if (css == &root->css || (!online && css_tryget(css)) ||
+				css_tryget_online(css)) {
 			memcg = mem_cgroup_from_css(css);
 			break;
 		}
@@ -1836,7 +1839,7 @@ static int mem_cgroup_soft_reclaim(struct mem_cgroup *root_memcg,
 	excess = soft_limit_excess(root_memcg);
 
 	while (1) {
-		victim = mem_cgroup_iter(root_memcg, victim, &reclaim);
+		victim = mem_cgroup_iter(root_memcg, victim, &reclaim, false);
 		if (!victim) {
 			loop++;
 			if (loop >= 2) {
diff --git a/mm/shrinker.c b/mm/shrinker.c
index dd91eab43ed3..54f5d3aa4f27 100644
--- a/mm/shrinker.c
+++ b/mm/shrinker.c
@@ -160,7 +160,7 @@ static int expand_shrinker_info(int new_id)
 	new_size = shrinker_unit_size(new_nr_max);
 	old_size = shrinker_unit_size(shrinker_nr_max);
 
-	memcg = mem_cgroup_iter(NULL, NULL, NULL);
+	memcg = mem_cgroup_iter(NULL, NULL, NULL, false);
 	do {
 		ret = expand_one_shrinker_info(memcg, new_size, old_size,
 					       new_nr_max);
@@ -168,7 +168,7 @@ static int expand_shrinker_info(int new_id)
 			mem_cgroup_iter_break(NULL, memcg);
 			goto out;
 		}
-	} while ((memcg = mem_cgroup_iter(NULL, memcg, NULL)) != NULL);
+	} while ((memcg = mem_cgroup_iter(NULL, memcg, NULL, false)) != NULL);
 out:
 	if (!ret)
 		shrinker_nr_max = new_nr_max;
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 2cc0cb41fb32..065d29502580 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -397,10 +397,10 @@ static unsigned long drop_slab_node(int nid)
 	unsigned long freed = 0;
 	struct mem_cgroup *memcg = NULL;
 
-	memcg = mem_cgroup_iter(NULL, NULL, NULL);
+	memcg = mem_cgroup_iter(NULL, NULL, NULL, false);
 	do {
 		freed += shrink_slab(GFP_KERNEL, nid, memcg, 0);
-	} while ((memcg = mem_cgroup_iter(NULL, memcg, NULL)) != NULL);
+	} while ((memcg = mem_cgroup_iter(NULL, memcg, NULL, false)) != NULL);
 
 	return freed;
 }
@@ -3931,7 +3931,7 @@ static void lru_gen_age_node(struct pglist_data *pgdat, struct scan_control *sc)
 	if (!min_ttl || sc->order || sc->priority == DEF_PRIORITY)
 		return;
 
-	memcg = mem_cgroup_iter(NULL, NULL, NULL);
+	memcg = mem_cgroup_iter(NULL, NULL, NULL, false);
 	do {
 		struct lruvec *lruvec = mem_cgroup_lruvec(memcg, pgdat);
 
@@ -3941,7 +3941,7 @@ static void lru_gen_age_node(struct pglist_data *pgdat, struct scan_control *sc)
 		}
 
 		cond_resched();
-	} while ((memcg = mem_cgroup_iter(NULL, memcg, NULL)));
+	} while ((memcg = mem_cgroup_iter(NULL, memcg, NULL, false)));
 
 	/*
 	 * The main goal is to OOM kill if every generation from all memcgs is
@@ -5033,7 +5033,7 @@ static void lru_gen_change_state(bool enabled)
 	else
 		static_branch_disable_cpuslocked(&lru_gen_caps[LRU_GEN_CORE]);
 
-	memcg = mem_cgroup_iter(NULL, NULL, NULL);
+	memcg = mem_cgroup_iter(NULL, NULL, NULL, false);
 	do {
 		int nid;
 
@@ -5057,7 +5057,7 @@ static void lru_gen_change_state(bool enabled)
 		}
 
 		cond_resched();
-	} while ((memcg = mem_cgroup_iter(NULL, memcg, NULL)));
+	} while ((memcg = mem_cgroup_iter(NULL, memcg, NULL, false)));
 unlock:
 	mutex_unlock(&state_mutex);
 	put_online_mems();
@@ -5160,7 +5160,7 @@ static void *lru_gen_seq_start(struct seq_file *m, loff_t *pos)
 	if (!m->private)
 		return ERR_PTR(-ENOMEM);
 
-	memcg = mem_cgroup_iter(NULL, NULL, NULL);
+	memcg = mem_cgroup_iter(NULL, NULL, NULL, false);
 	do {
 		int nid;
 
@@ -5168,7 +5168,7 @@ static void *lru_gen_seq_start(struct seq_file *m, loff_t *pos)
 			if (!nr_to_skip--)
 				return get_lruvec(memcg, nid);
 		}
-	} while ((memcg = mem_cgroup_iter(NULL, memcg, NULL)));
+	} while ((memcg = mem_cgroup_iter(NULL, memcg, NULL, false)));
 
 	return NULL;
 }
@@ -5191,7 +5191,7 @@ static void *lru_gen_seq_next(struct seq_file *m, void *v, loff_t *pos)
 
 	nid = next_memory_node(nid);
 	if (nid == MAX_NUMNODES) {
-		memcg = mem_cgroup_iter(NULL, memcg, NULL);
+		memcg = mem_cgroup_iter(NULL, memcg, NULL, false);
 		if (!memcg)
 			return NULL;
 
@@ -5794,7 +5794,7 @@ static void shrink_node_memcgs(pg_data_t *pgdat, struct scan_control *sc)
 	struct mem_cgroup *target_memcg = sc->target_mem_cgroup;
 	struct mem_cgroup *memcg;
 
-	memcg = mem_cgroup_iter(target_memcg, NULL, NULL);
+	memcg = mem_cgroup_iter(target_memcg, NULL, NULL, false);
 	do {
 		struct lruvec *lruvec = mem_cgroup_lruvec(memcg, pgdat);
 		unsigned long reclaimed;
@@ -5844,7 +5844,7 @@ static void shrink_node_memcgs(pg_data_t *pgdat, struct scan_control *sc)
 				   sc->nr_scanned - scanned,
 				   sc->nr_reclaimed - reclaimed);
 
-	} while ((memcg = mem_cgroup_iter(target_memcg, memcg, NULL)));
+	} while ((memcg = mem_cgroup_iter(target_memcg, memcg, NULL, false)));
 }
 
 static void shrink_node(pg_data_t *pgdat, struct scan_control *sc)
@@ -6511,12 +6511,12 @@ static void kswapd_age_node(struct pglist_data *pgdat, struct scan_control *sc)
 	if (!inactive_is_low(lruvec, LRU_INACTIVE_ANON))
 		return;
 
-	memcg = mem_cgroup_iter(NULL, NULL, NULL);
+	memcg = mem_cgroup_iter(NULL, NULL, NULL, false);
 	do {
 		lruvec = mem_cgroup_lruvec(memcg, pgdat);
 		shrink_active_list(SWAP_CLUSTER_MAX, lruvec,
 				   sc, LRU_ACTIVE_ANON);
-		memcg = mem_cgroup_iter(NULL, memcg, NULL);
+		memcg = mem_cgroup_iter(NULL, memcg, NULL, false);
 	} while (memcg);
 }
 
-- 
2.34.1


  parent reply	other threads:[~2023-11-06 18:32 UTC|newest]

Thread overview: 23+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-11-06 18:31 [PATCH v5 0/6] workload-specific and memory pressure-driven zswap writeback Nhat Pham
2023-11-06 18:31 ` [PATCH v5 1/6] list_lru: allows explicit memcg and NUMA node selection Nhat Pham
2023-11-06 18:31 ` Nhat Pham [this message]
2023-11-06 18:31 ` [PATCH v5 3/6] zswap: make shrinking memcg-aware Nhat Pham
2023-11-06 20:25   ` Yosry Ahmed
2023-11-06 20:54     ` Nhat Pham
2023-11-06 20:57       ` Yosry Ahmed
2023-11-06 23:25         ` Nhat Pham
2023-11-07  0:31   ` [PATCH v5 3/6] zswap: make shrinking memcg-aware (fix) Nhat Pham
2023-11-08 21:13   ` [PATCH v5 3/6 REPLACE] zswap: make shrinking memcg-aware Nhat Pham
2023-11-06 18:31 ` [PATCH v5 4/6] mm: memcg: add per-memcg zswap writeback stat Nhat Pham
2023-11-06 18:31 ` [PATCH v5 5/6] selftests: cgroup: update per-memcg zswap writeback selftest Nhat Pham
2023-11-06 18:31 ` [PATCH v5 6/6] zswap: shrinks zswap pool based on memory pressure Nhat Pham
2023-11-08 19:46 ` [PATCH v5 0/6] workload-specific and memory pressure-driven zswap writeback Chris Li
2023-11-08 21:15   ` Nhat Pham
2023-11-08 23:12     ` Chris Li
2023-11-09  0:28       ` Nhat Pham
2023-11-09  2:10         ` Chris Li
2023-11-16 21:57           ` Chris Li
2023-11-17 16:23             ` Nhat Pham
2023-11-17 16:27               ` Yosry Ahmed
2023-11-18 18:51                 ` Nhat Pham
2023-11-19  8:50               ` Chris Li

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20231106183159.3562879-3-nphamcs@gmail.com \
    --to=nphamcs@gmail.com \
    --cc=akpm@linux-foundation.org \
    --cc=cerasuolodomenico@gmail.com \
    --cc=cgroups@vger.kernel.org \
    --cc=chrisl@kernel.org \
    --cc=ddstreet@ieee.org \
    --cc=hannes@cmpxchg.org \
    --cc=kernel-team@meta.com \
    --cc=linux-doc@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-kselftest@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=mhocko@kernel.org \
    --cc=muchun.song@linux.dev \
    --cc=roman.gushchin@linux.dev \
    --cc=shakeelb@google.com \
    --cc=shuah@kernel.org \
    --cc=sjenning@redhat.com \
    --cc=vitaly.wool@konsulko.com \
    --cc=yosryahmed@google.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox