From: Yu Zhao <yuzhao@google.com>
To: Andrew Morton <akpm@linux-foundation.org>
Cc: Johannes Weiner <hannes@cmpxchg.org>,
Jonathan Corbet <corbet@lwn.net>,
Michael Larabel <michael@michaellarabel.com>,
Michal Hocko <mhocko@kernel.org>,
Mike Rapoport <rppt@kernel.org>,
Roman Gushchin <roman.gushchin@linux.dev>,
Suren Baghdasaryan <surenb@google.com>,
linux-mm@kvack.org, linux-kernel@vger.kernel.org,
linux-mm@google.com, Yu Zhao <yuzhao@google.com>
Subject: [PATCH mm-unstable v2 5/8] mm: multi-gen LRU: shuffle should_run_aging()
Date: Tue, 20 Dec 2022 17:12:05 -0700 [thread overview]
Message-ID: <20221221001207.1376119-6-yuzhao@google.com> (raw)
In-Reply-To: <20221221001207.1376119-1-yuzhao@google.com>
Move should_run_aging() next to its only caller left.
Signed-off-by: Yu Zhao <yuzhao@google.com>
Change-Id: I07e5372b98ca28c003861fdeddadde4304abcfe4
---
mm/vmscan.c | 124 ++++++++++++++++++++++++++--------------------------
1 file changed, 62 insertions(+), 62 deletions(-)
diff --git a/mm/vmscan.c b/mm/vmscan.c
index a2f71400b8be..c424cc06f8c6 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -4470,68 +4470,6 @@ static bool try_to_inc_max_seq(struct lruvec *lruvec, unsigned long max_seq,
return true;
}
-static bool should_run_aging(struct lruvec *lruvec, unsigned long max_seq,
- struct scan_control *sc, bool can_swap, unsigned long *nr_to_scan)
-{
- int gen, type, zone;
- unsigned long old = 0;
- unsigned long young = 0;
- unsigned long total = 0;
- struct lru_gen_folio *lrugen = &lruvec->lrugen;
- struct mem_cgroup *memcg = lruvec_memcg(lruvec);
- DEFINE_MIN_SEQ(lruvec);
-
- /* whether this lruvec is completely out of cold folios */
- if (min_seq[!can_swap] + MIN_NR_GENS > max_seq) {
- *nr_to_scan = 0;
- return true;
- }
-
- for (type = !can_swap; type < ANON_AND_FILE; type++) {
- unsigned long seq;
-
- for (seq = min_seq[type]; seq <= max_seq; seq++) {
- unsigned long size = 0;
-
- gen = lru_gen_from_seq(seq);
-
- for (zone = 0; zone < MAX_NR_ZONES; zone++)
- size += max(READ_ONCE(lrugen->nr_pages[gen][type][zone]), 0L);
-
- total += size;
- if (seq == max_seq)
- young += size;
- else if (seq + MIN_NR_GENS == max_seq)
- old += size;
- }
- }
-
- /* try to scrape all its memory if this memcg was deleted */
- *nr_to_scan = mem_cgroup_online(memcg) ? (total >> sc->priority) : total;
-
- /*
- * The aging tries to be lazy to reduce the overhead, while the eviction
- * stalls when the number of generations reaches MIN_NR_GENS. Hence, the
- * ideal number of generations is MIN_NR_GENS+1.
- */
- if (min_seq[!can_swap] + MIN_NR_GENS < max_seq)
- return false;
-
- /*
- * It's also ideal to spread pages out evenly, i.e., 1/(MIN_NR_GENS+1)
- * of the total number of pages for each generation. A reasonable range
- * for this average portion is [1/MIN_NR_GENS, 1/(MIN_NR_GENS+2)]. The
- * aging cares about the upper bound of hot pages, while the eviction
- * cares about the lower bound of cold pages.
- */
- if (young * MIN_NR_GENS > total)
- return true;
- if (old * (MIN_NR_GENS + 2) < total)
- return true;
-
- return false;
-}
-
static bool lruvec_is_sizable(struct lruvec *lruvec, struct scan_control *sc)
{
int gen, type, zone;
@@ -5115,6 +5053,68 @@ static int evict_folios(struct lruvec *lruvec, struct scan_control *sc, int swap
return scanned;
}
+static bool should_run_aging(struct lruvec *lruvec, unsigned long max_seq,
+ struct scan_control *sc, bool can_swap, unsigned long *nr_to_scan)
+{
+ int gen, type, zone;
+ unsigned long old = 0;
+ unsigned long young = 0;
+ unsigned long total = 0;
+ struct lru_gen_folio *lrugen = &lruvec->lrugen;
+ struct mem_cgroup *memcg = lruvec_memcg(lruvec);
+ DEFINE_MIN_SEQ(lruvec);
+
+ /* whether this lruvec is completely out of cold folios */
+ if (min_seq[!can_swap] + MIN_NR_GENS > max_seq) {
+ *nr_to_scan = 0;
+ return true;
+ }
+
+ for (type = !can_swap; type < ANON_AND_FILE; type++) {
+ unsigned long seq;
+
+ for (seq = min_seq[type]; seq <= max_seq; seq++) {
+ unsigned long size = 0;
+
+ gen = lru_gen_from_seq(seq);
+
+ for (zone = 0; zone < MAX_NR_ZONES; zone++)
+ size += max(READ_ONCE(lrugen->nr_pages[gen][type][zone]), 0L);
+
+ total += size;
+ if (seq == max_seq)
+ young += size;
+ else if (seq + MIN_NR_GENS == max_seq)
+ old += size;
+ }
+ }
+
+ /* try to scrape all its memory if this memcg was deleted */
+ *nr_to_scan = mem_cgroup_online(memcg) ? (total >> sc->priority) : total;
+
+ /*
+ * The aging tries to be lazy to reduce the overhead, while the eviction
+ * stalls when the number of generations reaches MIN_NR_GENS. Hence, the
+ * ideal number of generations is MIN_NR_GENS+1.
+ */
+ if (min_seq[!can_swap] + MIN_NR_GENS < max_seq)
+ return false;
+
+ /*
+ * It's also ideal to spread pages out evenly, i.e., 1/(MIN_NR_GENS+1)
+ * of the total number of pages for each generation. A reasonable range
+ * for this average portion is [1/MIN_NR_GENS, 1/(MIN_NR_GENS+2)]. The
+ * aging cares about the upper bound of hot pages, while the eviction
+ * cares about the lower bound of cold pages.
+ */
+ if (young * MIN_NR_GENS > total)
+ return true;
+ if (old * (MIN_NR_GENS + 2) < total)
+ return true;
+
+ return false;
+}
+
/*
* For future optimizations:
* 1. Defer try_to_inc_max_seq() to workqueues to reduce latency for memcg
--
2.39.0.314.g84b9a713c41-goog
next prev parent reply other threads:[~2022-12-21 0:12 UTC|newest]
Thread overview: 10+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-12-21 0:12 [PATCH mm-unstable v2 0/8] mm: multi-gen LRU: memcg LRU Yu Zhao
2022-12-21 0:12 ` [PATCH mm-unstable v2 1/8] mm: multi-gen LRU: rename lru_gen_struct to lru_gen_folio Yu Zhao
2022-12-21 0:12 ` [PATCH mm-unstable v2 2/8] mm: multi-gen LRU: rename lrugen->lists[] to lrugen->folios[] Yu Zhao
2022-12-21 0:12 ` [PATCH mm-unstable v2 3/8] mm: multi-gen LRU: remove eviction fairness safeguard Yu Zhao
2022-12-21 0:12 ` [PATCH mm-unstable v2 4/8] mm: multi-gen LRU: remove aging " Yu Zhao
2022-12-21 0:12 ` Yu Zhao [this message]
2022-12-21 0:12 ` [PATCH mm-unstable v2 6/8] mm: multi-gen LRU: per-node lru_gen_folio lists Yu Zhao
2022-12-21 0:12 ` [PATCH mm-unstable v2 7/8] mm: multi-gen LRU: clarify scan_control flags Yu Zhao
2022-12-21 0:12 ` [PATCH mm-unstable v2 8/8] mm: multi-gen LRU: simplify arch_has_hw_pte_young() check Yu Zhao
2022-12-22 0:17 ` [PATCH mm-unstable v2 0/8] mm: multi-gen LRU: memcg LRU Yu Zhao
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20221221001207.1376119-6-yuzhao@google.com \
--to=yuzhao@google.com \
--cc=akpm@linux-foundation.org \
--cc=corbet@lwn.net \
--cc=hannes@cmpxchg.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@google.com \
--cc=linux-mm@kvack.org \
--cc=mhocko@kernel.org \
--cc=michael@michaellarabel.com \
--cc=roman.gushchin@linux.dev \
--cc=rppt@kernel.org \
--cc=surenb@google.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox