From: Kinsey Ho <kinseyho@google.com>
To: Andrew Morton <akpm@linux-foundation.org>
Cc: linux-mm@kvack.org, linux-kernel@vger.kernel.org,
Yu Zhao <yuzhao@google.com>,
Donet Tom <donettom@linux.vnet.ibm.com>,
"Aneesh Kumar K . V" <aneesh.kumar@linux.ibm.com>,
Kinsey Ho <kinseyho@google.com>
Subject: [PATCH mm-unstable v1 4/5] mm/mglru: improve struct lru_gen_mm_walk
Date: Wed, 14 Feb 2024 06:05:37 +0000 [thread overview]
Message-ID: <20240214060538.3524462-5-kinseyho@google.com> (raw)
In-Reply-To: <20240214060538.3524462-1-kinseyho@google.com>
Rename max_seq to seq in struct lru_gen_mm_walk to keep consistent with
struct lru_gen_mm_state. Note that seq is not always up to date with
max_seq from lru_gen_folio.
No functional changes.
Signed-off-by: Kinsey Ho <kinseyho@google.com>
---
include/linux/mmzone.h | 6 ++---
mm/vmscan.c | 50 ++++++++++++++++++++++--------------------
2 files changed, 29 insertions(+), 27 deletions(-)
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 22fd46c77840..e51c75270e82 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -468,7 +468,7 @@ enum {
#define NR_BLOOM_FILTERS 2
struct lru_gen_mm_state {
- /* set to max_seq after each iteration */
+ /* synced with max_seq after each iteration */
unsigned long seq;
/* where the current iteration continues after */
struct list_head *head;
@@ -483,8 +483,8 @@ struct lru_gen_mm_state {
struct lru_gen_mm_walk {
/* the lruvec under reclaim */
struct lruvec *lruvec;
- /* unstable max_seq from lru_gen_folio */
- unsigned long max_seq;
+ /* max_seq from lru_gen_folio: can be out of date */
+ unsigned long seq;
/* the next address within an mm to scan */
unsigned long next_addr;
/* to batch promoted pages */
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 8c77c332a67a..f1019afa7813 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -2906,7 +2906,7 @@ static void reset_mm_stats(struct lru_gen_mm_walk *walk, bool last)
lockdep_assert_held(&get_mm_list(lruvec_memcg(lruvec))->lock);
- hist = lru_hist_from_seq(walk->max_seq);
+ hist = lru_hist_from_seq(walk->seq);
for (i = 0; i < NR_MM_STATS; i++) {
WRITE_ONCE(mm_state->stats[hist][i],
@@ -2915,7 +2915,7 @@ static void reset_mm_stats(struct lru_gen_mm_walk *walk, bool last)
}
if (NR_HIST_GENS > 1 && last) {
- hist = lru_hist_from_seq(walk->max_seq + 1);
+ hist = lru_hist_from_seq(walk->seq + 1);
for (i = 0; i < NR_MM_STATS; i++)
WRITE_ONCE(mm_state->stats[hist][i], 0);
@@ -2944,9 +2944,9 @@ static bool iterate_mm_list(struct lru_gen_mm_walk *walk, struct mm_struct **ite
*/
spin_lock(&mm_list->lock);
- VM_WARN_ON_ONCE(mm_state->seq + 1 < walk->max_seq);
+ VM_WARN_ON_ONCE(mm_state->seq + 1 < walk->seq);
- if (walk->max_seq <= mm_state->seq)
+ if (walk->seq <= mm_state->seq)
goto done;
if (!mm_state->head)
@@ -2976,7 +2976,7 @@ static bool iterate_mm_list(struct lru_gen_mm_walk *walk, struct mm_struct **ite
spin_unlock(&mm_list->lock);
if (mm && first)
- reset_bloom_filter(mm_state, walk->max_seq + 1);
+ reset_bloom_filter(mm_state, walk->seq + 1);
if (*iter)
mmput_async(*iter);
@@ -2986,7 +2986,7 @@ static bool iterate_mm_list(struct lru_gen_mm_walk *walk, struct mm_struct **ite
return last;
}
-static bool iterate_mm_list_nowalk(struct lruvec *lruvec, unsigned long max_seq)
+static bool iterate_mm_list_nowalk(struct lruvec *lruvec, unsigned long seq)
{
bool success = false;
struct mem_cgroup *memcg = lruvec_memcg(lruvec);
@@ -2995,9 +2995,9 @@ static bool iterate_mm_list_nowalk(struct lruvec *lruvec, unsigned long max_seq)
spin_lock(&mm_list->lock);
- VM_WARN_ON_ONCE(mm_state->seq + 1 < max_seq);
+ VM_WARN_ON_ONCE(mm_state->seq + 1 < seq);
- if (max_seq > mm_state->seq) {
+ if (seq > mm_state->seq) {
mm_state->head = NULL;
mm_state->tail = NULL;
WRITE_ONCE(mm_state->seq, mm_state->seq + 1);
@@ -3348,7 +3348,8 @@ static bool walk_pte_range(pmd_t *pmd, unsigned long start, unsigned long end,
struct lru_gen_mm_walk *walk = args->private;
struct mem_cgroup *memcg = lruvec_memcg(walk->lruvec);
struct pglist_data *pgdat = lruvec_pgdat(walk->lruvec);
- int old_gen, new_gen = lru_gen_from_seq(walk->max_seq);
+ DEFINE_MAX_SEQ(walk->lruvec);
+ int old_gen, new_gen = lru_gen_from_seq(max_seq);
pte = pte_offset_map_nolock(args->mm, pmd, start & PMD_MASK, &ptl);
if (!pte)
@@ -3415,7 +3416,8 @@ static void walk_pmd_range_locked(pud_t *pud, unsigned long addr, struct vm_area
struct lru_gen_mm_walk *walk = args->private;
struct mem_cgroup *memcg = lruvec_memcg(walk->lruvec);
struct pglist_data *pgdat = lruvec_pgdat(walk->lruvec);
- int old_gen, new_gen = lru_gen_from_seq(walk->max_seq);
+ DEFINE_MAX_SEQ(walk->lruvec);
+ int old_gen, new_gen = lru_gen_from_seq(max_seq);
VM_WARN_ON_ONCE(pud_leaf(*pud));
@@ -3546,7 +3548,7 @@ static void walk_pmd_range(pud_t *pud, unsigned long start, unsigned long end,
walk_pmd_range_locked(pud, addr, vma, args, bitmap, &first);
}
- if (!walk->force_scan && !test_bloom_filter(mm_state, walk->max_seq, pmd + i))
+ if (!walk->force_scan && !test_bloom_filter(mm_state, walk->seq, pmd + i))
continue;
walk->mm_stats[MM_NONLEAF_FOUND]++;
@@ -3557,7 +3559,7 @@ static void walk_pmd_range(pud_t *pud, unsigned long start, unsigned long end,
walk->mm_stats[MM_NONLEAF_ADDED]++;
/* carry over to the next generation */
- update_bloom_filter(mm_state, walk->max_seq + 1, pmd + i);
+ update_bloom_filter(mm_state, walk->seq + 1, pmd + i);
}
walk_pmd_range_locked(pud, -1, vma, args, bitmap, &first);
@@ -3628,7 +3630,7 @@ static void walk_mm(struct mm_struct *mm, struct lru_gen_mm_walk *walk)
err = -EBUSY;
/* another thread might have called inc_max_seq() */
- if (walk->max_seq != max_seq)
+ if (walk->seq != max_seq)
break;
/* folio_update_gen() requires stable folio_memcg() */
@@ -3765,7 +3767,7 @@ static bool try_to_inc_min_seq(struct lruvec *lruvec, bool can_swap)
return success;
}
-static bool inc_max_seq(struct lruvec *lruvec, unsigned long max_seq,
+static bool inc_max_seq(struct lruvec *lruvec, unsigned long seq,
bool can_swap, bool force_scan)
{
bool success;
@@ -3773,14 +3775,14 @@ static bool inc_max_seq(struct lruvec *lruvec, unsigned long max_seq,
int type, zone;
struct lru_gen_folio *lrugen = &lruvec->lrugen;
restart:
- if (max_seq < READ_ONCE(lrugen->max_seq))
+ if (seq < READ_ONCE(lrugen->max_seq))
return false;
spin_lock_irq(&lruvec->lru_lock);
VM_WARN_ON_ONCE(!seq_is_valid(lruvec));
- success = max_seq == lrugen->max_seq;
+ success = seq == lrugen->max_seq;
if (!success)
goto unlock;
@@ -3833,7 +3835,7 @@ static bool inc_max_seq(struct lruvec *lruvec, unsigned long max_seq,
return success;
}
-static bool try_to_inc_max_seq(struct lruvec *lruvec, unsigned long max_seq,
+static bool try_to_inc_max_seq(struct lruvec *lruvec, unsigned long seq,
bool can_swap, bool force_scan)
{
bool success;
@@ -3842,13 +3844,13 @@ static bool try_to_inc_max_seq(struct lruvec *lruvec, unsigned long max_seq,
struct lru_gen_folio *lrugen = &lruvec->lrugen;
struct lru_gen_mm_state *mm_state = get_mm_state(lruvec);
- VM_WARN_ON_ONCE(max_seq > READ_ONCE(lrugen->max_seq));
+ VM_WARN_ON_ONCE(seq > READ_ONCE(lrugen->max_seq));
if (!mm_state)
- return inc_max_seq(lruvec, max_seq, can_swap, force_scan);
+ return inc_max_seq(lruvec, seq, can_swap, force_scan);
/* see the comment in iterate_mm_list() */
- if (max_seq <= READ_ONCE(mm_state->seq))
+ if (seq <= READ_ONCE(mm_state->seq))
return false;
/*
@@ -3858,18 +3860,18 @@ static bool try_to_inc_max_seq(struct lruvec *lruvec, unsigned long max_seq,
* is less efficient, but it avoids bursty page faults.
*/
if (!should_walk_mmu()) {
- success = iterate_mm_list_nowalk(lruvec, max_seq);
+ success = iterate_mm_list_nowalk(lruvec, seq);
goto done;
}
walk = set_mm_walk(NULL, true);
if (!walk) {
- success = iterate_mm_list_nowalk(lruvec, max_seq);
+ success = iterate_mm_list_nowalk(lruvec, seq);
goto done;
}
walk->lruvec = lruvec;
- walk->max_seq = max_seq;
+ walk->seq = seq;
walk->can_swap = can_swap;
walk->force_scan = force_scan;
@@ -3880,7 +3882,7 @@ static bool try_to_inc_max_seq(struct lruvec *lruvec, unsigned long max_seq,
} while (mm);
done:
if (success) {
- success = inc_max_seq(lruvec, max_seq, can_swap, force_scan);
+ success = inc_max_seq(lruvec, seq, can_swap, force_scan);
WARN_ON_ONCE(!success);
}
--
2.43.0.687.g38aa6559b0-goog
next prev parent reply other threads:[~2024-02-14 6:06 UTC|newest]
Thread overview: 6+ messages / expand[flat|nested] mbox.gz Atom feed top
2024-02-14 6:05 [PATCH mm-unstable v1 0/5] mm/mglru: code cleanup and Kinsey Ho
2024-02-14 6:05 ` [PATCH mm-unstable v1 1/5] mm/mglru: drop unused parameter Kinsey Ho
2024-02-14 6:05 ` [PATCH mm-unstable v1 2/5] mm/mglru: improve should_run_aging() Kinsey Ho
2024-02-14 6:05 ` [PATCH mm-unstable v1 3/5] mm/mglru: improve reset_mm_stats() Kinsey Ho
2024-02-14 6:05 ` Kinsey Ho [this message]
2024-02-14 6:05 ` [PATCH mm-unstable v1 5/5] mm/mglru: improve swappiness handling Kinsey Ho
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20240214060538.3524462-5-kinseyho@google.com \
--to=kinseyho@google.com \
--cc=akpm@linux-foundation.org \
--cc=aneesh.kumar@linux.ibm.com \
--cc=donettom@linux.vnet.ibm.com \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=yuzhao@google.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox