linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
* [PATCH mm-unstable 1/4] mm: multi-gen LRU: use macro for bitmap
@ 2023-05-22 11:20 T.J. Alumbaugh
  2023-05-22 11:20 ` [PATCH mm-unstable 2/4] mm: multi-gen LRU: cleanup lru_gen_soft_reclaim() T.J. Alumbaugh
                   ` (3 more replies)
  0 siblings, 4 replies; 5+ messages in thread
From: T.J. Alumbaugh @ 2023-05-22 11:20 UTC (permalink / raw)
  To: Andrew Morton
  Cc: Yuanchu Xie, Yu Zhao, linux-mm, linux-kernel, linux-mm, T.J. Alumbaugh

Use DECLARE_BITMAP macro when possible.

Signed-off-by: T.J. Alumbaugh <talumbau@google.com>
---
 mm/vmscan.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/mm/vmscan.c b/mm/vmscan.c
index 4637f6462e9c..cf18873a36b9 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -4144,7 +4144,7 @@ static void walk_pmd_range(pud_t *pud, unsigned long start, unsigned long end,
 	unsigned long next;
 	unsigned long addr;
 	struct vm_area_struct *vma;
-	unsigned long bitmap[BITS_TO_LONGS(MIN_LRU_BATCH)];
+	DECLARE_BITMAP(bitmap, MIN_LRU_BATCH);
 	unsigned long first = -1;
 	struct lru_gen_mm_walk *walk = args->private;
 
-- 
2.40.1.698.g37aff9b760-goog



^ permalink raw reply	[flat|nested] 5+ messages in thread

* [PATCH mm-unstable 2/4] mm: multi-gen LRU: cleanup lru_gen_soft_reclaim()
  2023-05-22 11:20 [PATCH mm-unstable 1/4] mm: multi-gen LRU: use macro for bitmap T.J. Alumbaugh
@ 2023-05-22 11:20 ` T.J. Alumbaugh
  2023-05-22 11:20 ` [PATCH mm-unstable 3/4] mm: multi-gen LRU: add helpers in page table walks T.J. Alumbaugh
                   ` (2 subsequent siblings)
  3 siblings, 0 replies; 5+ messages in thread
From: T.J. Alumbaugh @ 2023-05-22 11:20 UTC (permalink / raw)
  To: Andrew Morton
  Cc: Yuanchu Xie, Yu Zhao, linux-mm, linux-kernel, linux-mm, T.J. Alumbaugh

lru_gen_soft_reclaim() gets the lruvec from the memcg and node ID to keep a
cleaner interface on the caller side.

Signed-off-by: T.J. Alumbaugh <talumbau@google.com>
---
 include/linux/mmzone.h | 4 ++--
 mm/memcontrol.c        | 2 +-
 mm/vmscan.c            | 4 +++-
 3 files changed, 6 insertions(+), 4 deletions(-)

diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 3a68326c9989..5a7ada0413da 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -534,7 +534,7 @@ void lru_gen_exit_memcg(struct mem_cgroup *memcg);
 void lru_gen_online_memcg(struct mem_cgroup *memcg);
 void lru_gen_offline_memcg(struct mem_cgroup *memcg);
 void lru_gen_release_memcg(struct mem_cgroup *memcg);
-void lru_gen_soft_reclaim(struct lruvec *lruvec);
+void lru_gen_soft_reclaim(struct mem_cgroup *memcg, int nid);
 
 #else /* !CONFIG_MEMCG */
 
@@ -585,7 +585,7 @@ static inline void lru_gen_release_memcg(struct mem_cgroup *memcg)
 {
 }
 
-static inline void lru_gen_soft_reclaim(struct lruvec *lruvec)
+static inline void lru_gen_soft_reclaim(struct mem_cgroup *memcg, int nid)
 {
 }
 
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index d31fb1e2cb33..738ba18f3a0c 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -485,7 +485,7 @@ static void mem_cgroup_update_tree(struct mem_cgroup *memcg, int nid)
 
 	if (lru_gen_enabled()) {
 		if (soft_limit_excess(memcg))
-			lru_gen_soft_reclaim(&memcg->nodeinfo[nid]->lruvec);
+			lru_gen_soft_reclaim(memcg, nid);
 		return;
 	}
 
diff --git a/mm/vmscan.c b/mm/vmscan.c
index cf18873a36b9..e088db138f5f 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -4846,8 +4846,10 @@ void lru_gen_release_memcg(struct mem_cgroup *memcg)
 	}
 }
 
-void lru_gen_soft_reclaim(struct lruvec *lruvec)
+void lru_gen_soft_reclaim(struct mem_cgroup *memcg, int nid)
 {
+	struct lruvec *lruvec = get_lruvec(memcg, nid);
+
 	/* see the comment on MEMCG_NR_GENS */
 	if (lru_gen_memcg_seg(lruvec) != MEMCG_LRU_HEAD)
 		lru_gen_rotate_memcg(lruvec, MEMCG_LRU_HEAD);
-- 
2.40.1.698.g37aff9b760-goog



^ permalink raw reply	[flat|nested] 5+ messages in thread

* [PATCH mm-unstable 3/4] mm: multi-gen LRU: add helpers in page table walks
  2023-05-22 11:20 [PATCH mm-unstable 1/4] mm: multi-gen LRU: use macro for bitmap T.J. Alumbaugh
  2023-05-22 11:20 ` [PATCH mm-unstable 2/4] mm: multi-gen LRU: cleanup lru_gen_soft_reclaim() T.J. Alumbaugh
@ 2023-05-22 11:20 ` T.J. Alumbaugh
  2023-05-22 11:20 ` [PATCH mm-unstable 4/4] mm: multi-gen LRU: cleanup lru_gen_test_recent() T.J. Alumbaugh
  2023-05-22 11:33 ` [PATCH mm-unstable 1/4] mm: multi-gen LRU: use macro for bitmap David Hildenbrand
  3 siblings, 0 replies; 5+ messages in thread
From: T.J. Alumbaugh @ 2023-05-22 11:20 UTC (permalink / raw)
  To: Andrew Morton
  Cc: Yuanchu Xie, Yu Zhao, linux-mm, linux-kernel, linux-mm, T.J. Alumbaugh

Add helpers to page table walking code:
 - Clarifies intent via name "should_walk_mmu" and "should_clear_pmd_young"
 - Avoids repeating same logic in two places

Signed-off-by: T.J. Alumbaugh <talumbau@google.com>
---
 mm/vmscan.c | 20 +++++++++++++++-----
 1 file changed, 15 insertions(+), 5 deletions(-)

diff --git a/mm/vmscan.c b/mm/vmscan.c
index e088db138f5f..ad0f589d32e6 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -3234,6 +3234,16 @@ DEFINE_STATIC_KEY_ARRAY_FALSE(lru_gen_caps, NR_LRU_GEN_CAPS);
 #define get_cap(cap)	static_branch_unlikely(&lru_gen_caps[cap])
 #endif
 
+static bool should_walk_mmu(void)
+{
+	return arch_has_hw_pte_young() && get_cap(LRU_GEN_MM_WALK);
+}
+
+static bool should_clear_pmd_young(void)
+{
+	return arch_has_hw_nonleaf_pmd_young() && get_cap(LRU_GEN_NONLEAF_YOUNG);
+}
+
 /******************************************************************************
  *                          shorthand helpers
  ******************************************************************************/
@@ -4098,7 +4108,7 @@ static void walk_pmd_range_locked(pud_t *pud, unsigned long addr, struct vm_area
 			goto next;
 
 		if (!pmd_trans_huge(pmd[i])) {
-			if (arch_has_hw_nonleaf_pmd_young() && get_cap(LRU_GEN_NONLEAF_YOUNG))
+			if (should_clear_pmd_young())
 				pmdp_test_and_clear_young(vma, addr, pmd + i);
 			goto next;
 		}
@@ -4191,7 +4201,7 @@ static void walk_pmd_range(pud_t *pud, unsigned long start, unsigned long end,
 #endif
 		walk->mm_stats[MM_NONLEAF_TOTAL]++;
 
-		if (arch_has_hw_nonleaf_pmd_young() && get_cap(LRU_GEN_NONLEAF_YOUNG)) {
+		if (should_clear_pmd_young()) {
 			if (!pmd_young(val))
 				continue;
 
@@ -4493,7 +4503,7 @@ static bool try_to_inc_max_seq(struct lruvec *lruvec, unsigned long max_seq,
 	 * handful of PTEs. Spreading the work out over a period of time usually
 	 * is less efficient, but it avoids bursty page faults.
 	 */
-	if (!arch_has_hw_pte_young() || !get_cap(LRU_GEN_MM_WALK)) {
+	if (!should_walk_mmu()) {
 		success = iterate_mm_list_nowalk(lruvec, max_seq);
 		goto done;
 	}
@@ -5730,10 +5740,10 @@ static ssize_t enabled_show(struct kobject *kobj, struct kobj_attribute *attr, c
 	if (get_cap(LRU_GEN_CORE))
 		caps |= BIT(LRU_GEN_CORE);
 
-	if (arch_has_hw_pte_young() && get_cap(LRU_GEN_MM_WALK))
+	if (should_walk_mmu())
 		caps |= BIT(LRU_GEN_MM_WALK);
 
-	if (arch_has_hw_nonleaf_pmd_young() && get_cap(LRU_GEN_NONLEAF_YOUNG))
+	if (should_clear_pmd_young())
 		caps |= BIT(LRU_GEN_NONLEAF_YOUNG);
 
 	return sysfs_emit(buf, "0x%04x\n", caps);
-- 
2.40.1.698.g37aff9b760-goog



^ permalink raw reply	[flat|nested] 5+ messages in thread

* [PATCH mm-unstable 4/4] mm: multi-gen LRU: cleanup lru_gen_test_recent()
  2023-05-22 11:20 [PATCH mm-unstable 1/4] mm: multi-gen LRU: use macro for bitmap T.J. Alumbaugh
  2023-05-22 11:20 ` [PATCH mm-unstable 2/4] mm: multi-gen LRU: cleanup lru_gen_soft_reclaim() T.J. Alumbaugh
  2023-05-22 11:20 ` [PATCH mm-unstable 3/4] mm: multi-gen LRU: add helpers in page table walks T.J. Alumbaugh
@ 2023-05-22 11:20 ` T.J. Alumbaugh
  2023-05-22 11:33 ` [PATCH mm-unstable 1/4] mm: multi-gen LRU: use macro for bitmap David Hildenbrand
  3 siblings, 0 replies; 5+ messages in thread
From: T.J. Alumbaugh @ 2023-05-22 11:20 UTC (permalink / raw)
  To: Andrew Morton
  Cc: Yuanchu Xie, Yu Zhao, linux-mm, linux-kernel, linux-mm, T.J. Alumbaugh

Avoid passing memcg* and pglist_data* to lru_gen_test_recent()
since we only use the lruvec anyway.

Signed-off-by: T.J. Alumbaugh <talumbau@google.com>
---
 mm/workingset.c | 46 ++++++++++++++++------------------------------
 1 file changed, 16 insertions(+), 30 deletions(-)

diff --git a/mm/workingset.c b/mm/workingset.c
index 90ae785d4c9c..5796e927e6d7 100644
--- a/mm/workingset.c
+++ b/mm/workingset.c
@@ -257,59 +257,46 @@ static void *lru_gen_eviction(struct folio *folio)
 
 /*
  * Tests if the shadow entry is for a folio that was recently evicted.
- * Fills in @memcgid, @pglist_data, @token, @workingset with the values
- * unpacked from shadow.
+ * Fills in @lruvec, @token, @workingset with the values unpacked from shadow.
  */
-static bool lru_gen_test_recent(void *shadow, bool file, int *memcgid,
-		struct pglist_data **pgdat, unsigned long *token, bool *workingset)
+static bool lru_gen_test_recent(void *shadow, bool file, struct lruvec **lruvec,
+				unsigned long *token, bool *workingset)
 {
-	struct mem_cgroup *eviction_memcg;
-	struct lruvec *lruvec;
-	struct lru_gen_folio *lrugen;
+	int memcg_id;
 	unsigned long min_seq;
+	struct mem_cgroup *memcg;
+	struct pglist_data *pgdat;
 
-	unpack_shadow(shadow, memcgid, pgdat, token, workingset);
-	eviction_memcg = mem_cgroup_from_id(*memcgid);
+	unpack_shadow(shadow, &memcg_id, &pgdat, token, workingset);
 
-	lruvec = mem_cgroup_lruvec(eviction_memcg, *pgdat);
-	lrugen = &lruvec->lrugen;
+	memcg = mem_cgroup_from_id(memcg_id);
+	*lruvec = mem_cgroup_lruvec(memcg, pgdat);
 
-	min_seq = READ_ONCE(lrugen->min_seq[file]);
+	min_seq = READ_ONCE((*lruvec)->lrugen.min_seq[file]);
 	return (*token >> LRU_REFS_WIDTH) == (min_seq & (EVICTION_MASK >> LRU_REFS_WIDTH));
 }
 
 static void lru_gen_refault(struct folio *folio, void *shadow)
 {
 	int hist, tier, refs;
-	int memcg_id;
 	bool workingset;
 	unsigned long token;
-	unsigned long min_seq;
 	struct lruvec *lruvec;
 	struct lru_gen_folio *lrugen;
-	struct mem_cgroup *memcg;
-	struct pglist_data *pgdat;
 	int type = folio_is_file_lru(folio);
 	int delta = folio_nr_pages(folio);
 
 	rcu_read_lock();
 
-	if (!lru_gen_test_recent(shadow, type, &memcg_id, &pgdat, &token,
-			&workingset))
+	if (!lru_gen_test_recent(shadow, type, &lruvec, &token, &workingset))
 		goto unlock;
 
-	memcg = folio_memcg_rcu(folio);
-	if (memcg_id != mem_cgroup_id(memcg))
+	if (lruvec != folio_lruvec(folio))
 		goto unlock;
 
-	if (pgdat != folio_pgdat(folio))
-		goto unlock;
-
-	lruvec = mem_cgroup_lruvec(memcg, pgdat);
 	lrugen = &lruvec->lrugen;
-	min_seq = READ_ONCE(lrugen->min_seq[type]);
 
-	hist = lru_hist_from_seq(min_seq);
+	hist = lru_hist_from_seq(READ_ONCE(lrugen->min_seq[type]));
 	/* see the comment in folio_lru_refs() */
 	refs = (token & (BIT(LRU_REFS_WIDTH) - 1)) + workingset;
 	tier = lru_tier_from_refs(refs);
@@ -339,8 +326,8 @@ static void *lru_gen_eviction(struct folio *folio)
 	return NULL;
 }
 
-static bool lru_gen_test_recent(void *shadow, bool file, int *memcgid,
-		struct pglist_data **pgdat, unsigned long *token, bool *workingset)
+static bool lru_gen_test_recent(void *shadow, bool file, struct lruvec **lruvec,
+				unsigned long *token, bool *workingset)
 {
 	return false;
 }
@@ -435,8 +422,7 @@ bool workingset_test_recent(void *shadow, bool file, bool *workingset)
 	unsigned long eviction;
 
 	if (lru_gen_enabled())
-		return lru_gen_test_recent(shadow, file, &memcgid, &pgdat, &eviction,
-			workingset);
+		return lru_gen_test_recent(shadow, file, &eviction_lruvec, &eviction, workingset);
 
 	unpack_shadow(shadow, &memcgid, &pgdat, &eviction, workingset);
 	eviction <<= bucket_order;
-- 
2.40.1.698.g37aff9b760-goog



^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [PATCH mm-unstable 1/4] mm: multi-gen LRU: use macro for bitmap
  2023-05-22 11:20 [PATCH mm-unstable 1/4] mm: multi-gen LRU: use macro for bitmap T.J. Alumbaugh
                   ` (2 preceding siblings ...)
  2023-05-22 11:20 ` [PATCH mm-unstable 4/4] mm: multi-gen LRU: cleanup lru_gen_test_recent() T.J. Alumbaugh
@ 2023-05-22 11:33 ` David Hildenbrand
  3 siblings, 0 replies; 5+ messages in thread
From: David Hildenbrand @ 2023-05-22 11:33 UTC (permalink / raw)
  To: T.J. Alumbaugh, Andrew Morton
  Cc: Yuanchu Xie, Yu Zhao, linux-mm, linux-kernel, linux-mm

On 22.05.23 13:20, T.J. Alumbaugh wrote:
> Use DECLARE_BITMAP macro when possible.
> 
> Signed-off-by: T.J. Alumbaugh <talumbau@google.com>
> ---
>   mm/vmscan.c | 2 +-
>   1 file changed, 1 insertion(+), 1 deletion(-)
> 
> diff --git a/mm/vmscan.c b/mm/vmscan.c
> index 4637f6462e9c..cf18873a36b9 100644
> --- a/mm/vmscan.c
> +++ b/mm/vmscan.c
> @@ -4144,7 +4144,7 @@ static void walk_pmd_range(pud_t *pud, unsigned long start, unsigned long end,
>   	unsigned long next;
>   	unsigned long addr;
>   	struct vm_area_struct *vma;
> -	unsigned long bitmap[BITS_TO_LONGS(MIN_LRU_BATCH)];
> +	DECLARE_BITMAP(bitmap, MIN_LRU_BATCH);
>   	unsigned long first = -1;
>   	struct lru_gen_mm_walk *walk = args->private;
>   

Reviewed-by: David Hildenbrand <david@redhat.com>

-- 
Thanks,

David / dhildenb



^ permalink raw reply	[flat|nested] 5+ messages in thread

end of thread, other threads:[~2023-05-22 11:33 UTC | newest]

Thread overview: 5+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2023-05-22 11:20 [PATCH mm-unstable 1/4] mm: multi-gen LRU: use macro for bitmap T.J. Alumbaugh
2023-05-22 11:20 ` [PATCH mm-unstable 2/4] mm: multi-gen LRU: cleanup lru_gen_soft_reclaim() T.J. Alumbaugh
2023-05-22 11:20 ` [PATCH mm-unstable 3/4] mm: multi-gen LRU: add helpers in page table walks T.J. Alumbaugh
2023-05-22 11:20 ` [PATCH mm-unstable 4/4] mm: multi-gen LRU: cleanup lru_gen_test_recent() T.J. Alumbaugh
2023-05-22 11:33 ` [PATCH mm-unstable 1/4] mm: multi-gen LRU: use macro for bitmap David Hildenbrand

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox