* [PATCH 0/2] Support kshrinkd
@ 2024-02-19 14:17 lipeifeng
2024-02-19 14:17 ` [PATCH 2/2] mm: support kshrinkd lipeifeng
` (3 more replies)
0 siblings, 4 replies; 13+ messages in thread
From: lipeifeng @ 2024-02-19 14:17 UTC (permalink / raw)
To: lipeifeng, akpm, david, osalvador; +Cc: linux-mm, linux-kernel
From: lipeifeng <lipeifeng@oppo.com>
'commit 6d4675e60135 ("mm: don't be stuck to rmap lock on reclaim path")'
The above patch would avoid reclaim path to stuck rmap lock.
But it would cause some folios in LRU not sorted by aging because
the contended-folios in rmap_walk would be putbacked to the head of LRU
during shrink_folio_list even if the folios are very cold.
The patchset setups new kthread:kshrinkd to reclaim the contended-folio
in rmap_walk when shrink_folio_list, to avoid to break the rules of LRU.
lipeifeng (2):
mm/rmap: support folio_referenced to control if try_lock in rmap_walk
mm: support kshrinkd
include/linux/mmzone.h | 6 ++
include/linux/rmap.h | 5 +-
include/linux/swap.h | 3 +
include/linux/vm_event_item.h | 2 +
mm/memory_hotplug.c | 2 +
mm/rmap.c | 5 +-
mm/vmscan.c | 205 ++++++++++++++++++++++++++++++++++++++++--
mm/vmstat.c | 2 +
8 files changed, 221 insertions(+), 9 deletions(-)
--
2.7.4
^ permalink raw reply [flat|nested] 13+ messages in thread
* [PATCH 2/2] mm: support kshrinkd
2024-02-19 14:17 [PATCH 0/2] Support kshrinkd lipeifeng
@ 2024-02-19 14:17 ` lipeifeng
2024-02-20 2:11 ` 李培锋
2024-02-19 16:51 ` [PATCH 0/2] Support kshrinkd Matthew Wilcox
` (2 subsequent siblings)
3 siblings, 1 reply; 13+ messages in thread
From: lipeifeng @ 2024-02-19 14:17 UTC (permalink / raw)
To: lipeifeng, akpm, david, osalvador; +Cc: linux-mm, linux-kernel
From: lipeifeng <lipeifeng@oppo.com>
'commit 6d4675e60135 ("mm: don't be stuck to rmap lock on reclaim path")'
The above patch would avoid reclaim path to stuck rmap lock.
But it would cause some folios in LRU not sorted by aging because
the contended-folio in rmap_walk would be putback to the head of LRU
when shrink_folio_list even if the folio is very cold.
Monkey-test in phone for 300 hours shows that almost one-third of the
contended-pages can be freed successfully next time, putting back those
folios to LRU's head would break the rules of LRU.
- pgsteal_kshrinkd 262577
- pgscan_kshrinkd 795503
For the above reason, the patch setups new kthread:kshrinkd to reclaim
the contended-folio in rmap_walk when shrink_folio_list, to avoid to
break the rules of LRU.
Signed-off-by: lipeifeng <lipeifeng@oppo.com>
---
include/linux/mmzone.h | 6 ++
include/linux/swap.h | 3 +
include/linux/vm_event_item.h | 2 +
mm/memory_hotplug.c | 2 +
mm/vmscan.c | 189 +++++++++++++++++++++++++++++++++++++++++-
mm/vmstat.c | 2 +
6 files changed, 201 insertions(+), 3 deletions(-)
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index a497f18..83d7202 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -1329,6 +1329,12 @@ typedef struct pglist_data {
int kswapd_failures; /* Number of 'reclaimed == 0' runs */
+ struct list_head kshrinkd_folios; /* rmap_walk contended folios list*/
+ spinlock_t kf_lock; /* Protect kshrinkd_folios list*/
+
+ struct task_struct *kshrinkd; /* reclaim kshrinkd_folios*/
+ wait_queue_head_t kshrinkd_wait;
+
#ifdef CONFIG_COMPACTION
int kcompactd_max_order;
enum zone_type kcompactd_highest_zoneidx;
diff --git a/include/linux/swap.h b/include/linux/swap.h
index 4db00dd..155fcb6 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -435,6 +435,9 @@ void check_move_unevictable_folios(struct folio_batch *fbatch);
extern void __meminit kswapd_run(int nid);
extern void __meminit kswapd_stop(int nid);
+extern void kshrinkd_run(int nid);
+extern void kshrinkd_stop(int nid);
+
#ifdef CONFIG_SWAP
int add_swap_extent(struct swap_info_struct *sis, unsigned long start_page,
diff --git a/include/linux/vm_event_item.h b/include/linux/vm_event_item.h
index 747943b..ee95ab1 100644
--- a/include/linux/vm_event_item.h
+++ b/include/linux/vm_event_item.h
@@ -38,9 +38,11 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
PGLAZYFREED,
PGREFILL,
PGREUSE,
+ PGSTEAL_KSHRINKD,
PGSTEAL_KSWAPD,
PGSTEAL_DIRECT,
PGSTEAL_KHUGEPAGED,
+ PGSCAN_KSHRINKD,
PGSCAN_KSWAPD,
PGSCAN_DIRECT,
PGSCAN_KHUGEPAGED,
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 2189099..1b6c4c6 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -1209,6 +1209,7 @@ int __ref online_pages(unsigned long pfn, unsigned long nr_pages,
kswapd_run(nid);
kcompactd_run(nid);
+ kshrinkd_run(nid);
writeback_set_ratelimit();
@@ -2092,6 +2093,7 @@ int __ref offline_pages(unsigned long start_pfn, unsigned long nr_pages,
}
if (arg.status_change_nid >= 0) {
+ kshrinkd_stop(node);
kcompactd_stop(node);
kswapd_stop(node);
}
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 0296d48..63e4fd4 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -139,6 +139,9 @@ struct scan_control {
/* if try_lock in rmap_walk */
unsigned int rw_try_lock:1;
+ /* need kshrinkd to reclaim if rwc trylock contended*/
+ unsigned int need_kshrinkd:1;
+
/* Allocation order */
s8 order;
@@ -190,6 +193,17 @@ struct scan_control {
*/
int vm_swappiness = 60;
+/*
+ * Wakeup kshrinkd those folios which lock-contended in ramp_walk
+ * during shrink_folio_list, instead of putting back to the head
+ * of LRU, to avoid to break the rules of LRU.
+ */
+static void wakeup_kshrinkd(struct pglist_data *pgdat)
+{
+ if (likely(pgdat->kshrinkd))
+ wake_up_interruptible(&pgdat->kshrinkd_wait);
+}
+
#ifdef CONFIG_MEMCG
/* Returns true for reclaim through cgroup limits or cgroup interfaces. */
@@ -821,6 +835,7 @@ enum folio_references {
FOLIOREF_RECLAIM_CLEAN,
FOLIOREF_KEEP,
FOLIOREF_ACTIVATE,
+ FOLIOREF_LOCK_CONTENDED,
};
static enum folio_references folio_check_references(struct folio *folio,
@@ -841,8 +856,12 @@ static enum folio_references folio_check_references(struct folio *folio,
return FOLIOREF_ACTIVATE;
/* rmap lock contention: rotate */
- if (referenced_ptes == -1)
- return FOLIOREF_KEEP;
+ if (referenced_ptes == -1) {
+ if (sc->need_kshrinkd && folio_pgdat(folio)->kshrinkd)
+ return FOLIOREF_LOCK_CONTENDED;
+ else
+ return FOLIOREF_KEEP;
+ }
if (referenced_ptes) {
/*
@@ -1012,6 +1031,7 @@ static unsigned int shrink_folio_list(struct list_head *folio_list,
LIST_HEAD(ret_folios);
LIST_HEAD(free_folios);
LIST_HEAD(demote_folios);
+ LIST_HEAD(contended_folios);
unsigned int nr_reclaimed = 0;
unsigned int pgactivate = 0;
bool do_demote_pass;
@@ -1028,6 +1048,7 @@ static unsigned int shrink_folio_list(struct list_head *folio_list,
enum folio_references references = FOLIOREF_RECLAIM;
bool dirty, writeback;
unsigned int nr_pages;
+ bool lock_contended = false;
cond_resched();
@@ -1169,6 +1190,9 @@ static unsigned int shrink_folio_list(struct list_head *folio_list,
case FOLIOREF_KEEP:
stat->nr_ref_keep += nr_pages;
goto keep_locked;
+ case FOLIOREF_LOCK_CONTENDED:
+ lock_contended = true;
+ goto keep_locked;
case FOLIOREF_RECLAIM:
case FOLIOREF_RECLAIM_CLEAN:
; /* try to reclaim the folio below */
@@ -1449,7 +1473,10 @@ static unsigned int shrink_folio_list(struct list_head *folio_list,
keep_locked:
folio_unlock(folio);
keep:
- list_add(&folio->lru, &ret_folios);
+ if (unlikely(lock_contended))
+ list_add(&folio->lru, &contended_folios);
+ else
+ list_add(&folio->lru, &ret_folios);
VM_BUG_ON_FOLIO(folio_test_lru(folio) ||
folio_test_unevictable(folio), folio);
}
@@ -1491,6 +1518,14 @@ static unsigned int shrink_folio_list(struct list_head *folio_list,
free_unref_page_list(&free_folios);
list_splice(&ret_folios, folio_list);
+
+ if (!list_empty(&contended_folios)) {
+ spin_lock_irq(&pgdat->kf_lock);
+ list_splice(&contended_folios, &pgdat->kshrinkd_folios);
+ spin_unlock_irq(&pgdat->kf_lock);
+ wakeup_kshrinkd(pgdat);
+ }
+
count_vm_events(PGACTIVATE, pgactivate);
if (plug)
@@ -1505,6 +1540,7 @@ unsigned int reclaim_clean_pages_from_list(struct zone *zone,
.gfp_mask = GFP_KERNEL,
.may_unmap = 1,
.rw_try_lock = 1,
+ .need_kshrinkd = 0,
};
struct reclaim_stat stat;
unsigned int nr_reclaimed;
@@ -2101,6 +2137,7 @@ static unsigned int reclaim_folio_list(struct list_head *folio_list,
.may_swap = 1,
.no_demotion = 1,
.rw_try_lock = 1,
+ .need_kshrinkd = 0,
};
nr_reclaimed = shrink_folio_list(folio_list, pgdat, &sc, &dummy_stat, false);
@@ -5448,6 +5485,7 @@ static ssize_t lru_gen_seq_write(struct file *file, const char __user *src,
.reclaim_idx = MAX_NR_ZONES - 1,
.gfp_mask = GFP_KERNEL,
.rw_try_lock = 1,
+ .need_kshrinkd = 0,
};
buf = kvmalloc(len + 1, GFP_KERNEL);
@@ -6421,6 +6459,7 @@ unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
.may_unmap = 1,
.may_swap = 1,
.rw_try_lock = 1,
+ .need_kshrinkd = 1,
};
/*
@@ -6467,6 +6506,7 @@ unsigned long mem_cgroup_shrink_node(struct mem_cgroup *memcg,
.reclaim_idx = MAX_NR_ZONES - 1,
.may_swap = !noswap,
.rw_try_lock = 1,
+ .need_kshrinkd = 0,
};
WARN_ON_ONCE(!current->reclaim_state);
@@ -6512,6 +6552,7 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg,
.may_swap = !!(reclaim_options & MEMCG_RECLAIM_MAY_SWAP),
.proactive = !!(reclaim_options & MEMCG_RECLAIM_PROACTIVE),
.rw_try_lock = 1,
+ .need_kshrinkd = 0,
};
/*
* Traverse the ZONELIST_FALLBACK zonelist of the current node to put
@@ -6774,6 +6815,7 @@ static int balance_pgdat(pg_data_t *pgdat, int order, int highest_zoneidx)
.order = order,
.may_unmap = 1,
.rw_try_lock = 1,
+ .need_kshrinkd = 1,
};
set_task_reclaim_state(current, &sc.reclaim_state);
@@ -7234,6 +7276,7 @@ unsigned long shrink_all_memory(unsigned long nr_to_reclaim)
.may_swap = 1,
.hibernation_mode = 1,
.rw_try_lock = 1,
+ .need_kshrinkd = 0,
};
struct zonelist *zonelist = node_zonelist(numa_node_id(), sc.gfp_mask);
unsigned long nr_reclaimed;
@@ -7304,6 +7347,145 @@ static int __init kswapd_init(void)
module_init(kswapd_init)
+static int kshrinkd_should_run(pg_data_t *pgdat)
+{
+ int should_run;
+
+ spin_lock_irq(&pgdat->kf_lock);
+ should_run = !list_empty(&pgdat->kshrinkd_folios);
+ spin_unlock_irq(&pgdat->kf_lock);
+
+ return should_run;
+}
+
+static unsigned long kshrinkd_reclaim_folios(struct list_head *folio_list,
+ struct pglist_data *pgdat)
+{
+ struct reclaim_stat dummy_stat;
+ unsigned int nr_reclaimed = 0;
+ struct scan_control sc = {
+ .gfp_mask = GFP_KERNEL,
+ .may_writepage = 1,
+ .may_unmap = 1,
+ .may_swap = 1,
+ .no_demotion = 1,
+ .rw_try_lock = 0,
+ .need_kshrinkd = 0,
+ };
+
+ if (list_empty(folio_list))
+ return nr_reclaimed;
+
+ nr_reclaimed = shrink_folio_list(folio_list, pgdat, &sc, &dummy_stat, false);
+
+ return nr_reclaimed;
+}
+
+/*
+ * The background kshrink daemon, started as a kernel thread
+ * from the init process.
+ *
+ * Kshrinkd is to reclaim the contended-folio in rmap_walk when
+ * shrink_folio_list instead of putting back into the head of LRU
+ * directly, to avoid to break the rules of LRU.
+ */
+
+static int kshrinkd(void *p)
+{
+ pg_data_t *pgdat;
+ LIST_HEAD(tmp_contended_folios);
+
+ pgdat = (pg_data_t *)p;
+
+ current->flags |= PF_MEMALLOC | PF_KSWAPD;
+ set_freezable();
+
+ while (!kthread_should_stop()) {
+ unsigned long nr_reclaimed = 0;
+ unsigned long nr_putback = 0;
+
+ wait_event_freezable(pgdat->kshrinkd_wait,
+ kshrinkd_should_run(pgdat));
+
+ /* splice rmap_walk contended folios to tmp-list */
+ spin_lock_irq(&pgdat->kf_lock);
+ list_splice(&pgdat->kshrinkd_folios, &tmp_contended_folios);
+ INIT_LIST_HEAD(&pgdat->kshrinkd_folios);
+ spin_unlock_irq(&pgdat->kf_lock);
+
+ /* reclaim rmap_walk contended folios */
+ nr_reclaimed = kshrinkd_reclaim_folios(&tmp_contended_folios, pgdat);
+ __count_vm_events(PGSTEAL_KSHRINKD, nr_reclaimed);
+
+ /* putback the folios which failed to reclaim to lru */
+ while (!list_empty(&tmp_contended_folios)) {
+ struct folio *folio = lru_to_folio(&tmp_contended_folios);
+
+ nr_putback += folio_nr_pages(folio);
+ list_del(&folio->lru);
+ folio_putback_lru(folio);
+ }
+
+ __count_vm_events(PGSCAN_KSHRINKD, nr_reclaimed + nr_putback);
+ }
+
+ current->flags &= ~(PF_MEMALLOC | PF_KSWAPD);
+
+ return 0;
+}
+
+/*
+ * This kshrinkd start function will be called by init and node-hot-add.
+ */
+void kshrinkd_run(int nid)
+{
+ pg_data_t *pgdat = NODE_DATA(nid);
+
+ if (pgdat->kshrinkd)
+ return;
+
+ pgdat->kshrinkd = kthread_run(kshrinkd, pgdat, "kshrinkd%d", nid);
+ if (IS_ERR(pgdat->kshrinkd)) {
+ /* failure to start kshrinkd */
+ WARN_ON_ONCE(system_state < SYSTEM_RUNNING);
+ pr_err("Failed to start kshrinkd on node %d\n", nid);
+ pgdat->kshrinkd = NULL;
+ }
+}
+
+/*
+ * Called by memory hotplug when all memory in a node is offlined. Caller must
+ * be holding mem_hotplug_begin/done().
+ */
+void kshrinkd_stop(int nid)
+{
+ struct task_struct *kshrinkd = NODE_DATA(nid)->kshrinkd;
+
+ if (kshrinkd) {
+ kthread_stop(kshrinkd);
+ NODE_DATA(nid)->kshrinkd = NULL;
+ }
+}
+
+static int __init kshrinkd_init(void)
+{
+ int nid;
+
+ for_each_node_state(nid, N_MEMORY) {
+ pg_data_t *pgdat = NODE_DATA(nid);
+
+ spin_lock_init(&pgdat->kf_lock);
+ init_waitqueue_head(&pgdat->kshrinkd_wait);
+ INIT_LIST_HEAD(&pgdat->kshrinkd_folios);
+
+ kshrinkd_run(nid);
+ }
+
+ return 0;
+}
+
+module_init(kshrinkd_init)
+
#ifdef CONFIG_NUMA
/*
* Node reclaim mode
@@ -7393,6 +7575,7 @@ static int __node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned in
.may_swap = 1,
.reclaim_idx = gfp_zone(gfp_mask),
.rw_try_lock = 1,
+ .need_kshrinkd = 1,
};
unsigned long pflags;
diff --git a/mm/vmstat.c b/mm/vmstat.c
index db79935..76d8a3b 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -1279,9 +1279,11 @@ const char * const vmstat_text[] = {
"pgrefill",
"pgreuse",
+ "pgsteal_kshrinkd",
"pgsteal_kswapd",
"pgsteal_direct",
"pgsteal_khugepaged",
+ "pgscan_kshrinkd",
"pgscan_kswapd",
"pgscan_direct",
"pgscan_khugepaged",
--
2.7.4
^ permalink raw reply [flat|nested] 13+ messages in thread
* Re: [PATCH 0/2] Support kshrinkd
2024-02-19 14:17 [PATCH 0/2] Support kshrinkd lipeifeng
2024-02-19 14:17 ` [PATCH 2/2] mm: support kshrinkd lipeifeng
@ 2024-02-19 16:51 ` Matthew Wilcox
2024-02-20 2:04 ` 李培锋
[not found] ` <20240219141703.3851-2-lipeifeng@oppo.com>
2024-02-20 2:09 ` [PATCH 0/2] Support kshrinkd 李培锋
3 siblings, 1 reply; 13+ messages in thread
From: Matthew Wilcox @ 2024-02-19 16:51 UTC (permalink / raw)
To: lipeifeng; +Cc: akpm, david, osalvador, linux-mm, linux-kernel
On Mon, Feb 19, 2024 at 10:17:01PM +0800, lipeifeng@oppo.com wrote:
> 'commit 6d4675e60135 ("mm: don't be stuck to rmap lock on reclaim path")'
> The above patch would avoid reclaim path to stuck rmap lock.
> But it would cause some folios in LRU not sorted by aging because
> the contended-folios in rmap_walk would be putbacked to the head of LRU
> during shrink_folio_list even if the folios are very cold.
>
> The patchset setups new kthread:kshrinkd to reclaim the contended-folio
> in rmap_walk when shrink_folio_list, to avoid to break the rules of LRU.
Patch 1/2 didn't make it to my inbox or to lore. But you should talk
about the real world consequences of this in the cover letter. What do
we observe if this problem happens? How much extra performance will we
gain by applying this patch?
^ permalink raw reply [flat|nested] 13+ messages in thread
* Re: [PATCH 1/2] mm/rmap: support folio_referenced to control if try_lock in rmap_walk
[not found] ` <20240219141703.3851-2-lipeifeng@oppo.com>
@ 2024-02-20 1:42 ` 李培锋
2024-02-20 3:01 ` Barry Song
0 siblings, 1 reply; 13+ messages in thread
From: 李培锋 @ 2024-02-20 1:42 UTC (permalink / raw)
To: akpm, david, osalvador, willy
Cc: linux-mm, linux-kernel, tkjos, surenb, gregkh, v-songbaohua
add more experts from Linux and Google.
在 2024/2/19 22:17, lipeifeng@oppo.com 写道:
> From: lipeifeng <lipeifeng@oppo.com>
>
> The patch to support folio_referenced to control the bevavior
> of walk_rmap, which for some thread to hold the lock in rmap_walk
> instead of try_lock when using folio_referenced.
>
> Signed-off-by: lipeifeng <lipeifeng@oppo.com>
> ---
> include/linux/rmap.h | 5 +++--
> mm/rmap.c | 5 +++--
> mm/vmscan.c | 16 ++++++++++++++--
> 3 files changed, 20 insertions(+), 6 deletions(-)
>
> diff --git a/include/linux/rmap.h b/include/linux/rmap.h
> index b7944a8..846b261 100644
> --- a/include/linux/rmap.h
> +++ b/include/linux/rmap.h
> @@ -623,7 +623,8 @@ static inline int folio_try_share_anon_rmap_pmd(struct folio *folio,
> * Called from mm/vmscan.c to handle paging out
> */
> int folio_referenced(struct folio *, int is_locked,
> - struct mem_cgroup *memcg, unsigned long *vm_flags);
> + struct mem_cgroup *memcg, unsigned long *vm_flags,
> + unsigned int rw_try_lock);
>
> void try_to_migrate(struct folio *folio, enum ttu_flags flags);
> void try_to_unmap(struct folio *, enum ttu_flags flags);
> @@ -739,7 +740,7 @@ struct anon_vma *folio_lock_anon_vma_read(struct folio *folio,
>
> static inline int folio_referenced(struct folio *folio, int is_locked,
> struct mem_cgroup *memcg,
> - unsigned long *vm_flags)
> + unsigned long *vm_flags, unsigned int rw_try_lock)
> {
> *vm_flags = 0;
> return 0;
> diff --git a/mm/rmap.c b/mm/rmap.c
> index f5d43ed..15d1fba 100644
> --- a/mm/rmap.c
> +++ b/mm/rmap.c
> @@ -952,6 +952,7 @@ static bool invalid_folio_referenced_vma(struct vm_area_struct *vma, void *arg)
> * @is_locked: Caller holds lock on the folio.
> * @memcg: target memory cgroup
> * @vm_flags: A combination of all the vma->vm_flags which referenced the folio.
> + * @rw_try_lock: if try_lock in rmap_walk
> *
> * Quick test_and_clear_referenced for all mappings of a folio,
> *
> @@ -959,7 +960,7 @@ static bool invalid_folio_referenced_vma(struct vm_area_struct *vma, void *arg)
> * the function bailed out due to rmap lock contention.
> */
> int folio_referenced(struct folio *folio, int is_locked,
> - struct mem_cgroup *memcg, unsigned long *vm_flags)
> + struct mem_cgroup *memcg, unsigned long *vm_flags, unsigned int rw_try_lock)
> {
> int we_locked = 0;
> struct folio_referenced_arg pra = {
> @@ -970,7 +971,7 @@ int folio_referenced(struct folio *folio, int is_locked,
> .rmap_one = folio_referenced_one,
> .arg = (void *)&pra,
> .anon_lock = folio_lock_anon_vma_read,
> - .try_lock = true,
> + .try_lock = rw_try_lock ? true : false,
> .invalid_vma = invalid_folio_referenced_vma,
> };
>
> diff --git a/mm/vmscan.c b/mm/vmscan.c
> index 4f9c854..0296d48 100644
> --- a/mm/vmscan.c
> +++ b/mm/vmscan.c
> @@ -136,6 +136,9 @@ struct scan_control {
> /* Always discard instead of demoting to lower tier memory */
> unsigned int no_demotion:1;
>
> + /* if try_lock in rmap_walk */
> + unsigned int rw_try_lock:1;
> +
> /* Allocation order */
> s8 order;
>
> @@ -827,7 +830,7 @@ static enum folio_references folio_check_references(struct folio *folio,
> unsigned long vm_flags;
>
> referenced_ptes = folio_referenced(folio, 1, sc->target_mem_cgroup,
> - &vm_flags);
> + &vm_flags, sc->rw_try_lock);
> referenced_folio = folio_test_clear_referenced(folio);
>
> /*
> @@ -1501,6 +1504,7 @@ unsigned int reclaim_clean_pages_from_list(struct zone *zone,
> struct scan_control sc = {
> .gfp_mask = GFP_KERNEL,
> .may_unmap = 1,
> + .rw_try_lock = 1,
> };
> struct reclaim_stat stat;
> unsigned int nr_reclaimed;
> @@ -2038,7 +2042,7 @@ static void shrink_active_list(unsigned long nr_to_scan,
>
> /* Referenced or rmap lock contention: rotate */
> if (folio_referenced(folio, 0, sc->target_mem_cgroup,
> - &vm_flags) != 0) {
> + &vm_flags, sc->rw_try_lock) != 0) {
> /*
> * Identify referenced, file-backed active folios and
> * give them one more trip around the active list. So
> @@ -2096,6 +2100,7 @@ static unsigned int reclaim_folio_list(struct list_head *folio_list,
> .may_unmap = 1,
> .may_swap = 1,
> .no_demotion = 1,
> + .rw_try_lock = 1,
> };
>
> nr_reclaimed = shrink_folio_list(folio_list, pgdat, &sc, &dummy_stat, false);
> @@ -5442,6 +5447,7 @@ static ssize_t lru_gen_seq_write(struct file *file, const char __user *src,
> .may_swap = true,
> .reclaim_idx = MAX_NR_ZONES - 1,
> .gfp_mask = GFP_KERNEL,
> + .rw_try_lock = 1,
> };
>
> buf = kvmalloc(len + 1, GFP_KERNEL);
> @@ -6414,6 +6420,7 @@ unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
> .may_writepage = !laptop_mode,
> .may_unmap = 1,
> .may_swap = 1,
> + .rw_try_lock = 1,
> };
>
> /*
> @@ -6459,6 +6466,7 @@ unsigned long mem_cgroup_shrink_node(struct mem_cgroup *memcg,
> .may_unmap = 1,
> .reclaim_idx = MAX_NR_ZONES - 1,
> .may_swap = !noswap,
> + .rw_try_lock = 1,
> };
>
> WARN_ON_ONCE(!current->reclaim_state);
> @@ -6503,6 +6511,7 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg,
> .may_unmap = 1,
> .may_swap = !!(reclaim_options & MEMCG_RECLAIM_MAY_SWAP),
> .proactive = !!(reclaim_options & MEMCG_RECLAIM_PROACTIVE),
> + .rw_try_lock = 1,
> };
> /*
> * Traverse the ZONELIST_FALLBACK zonelist of the current node to put
> @@ -6764,6 +6773,7 @@ static int balance_pgdat(pg_data_t *pgdat, int order, int highest_zoneidx)
> .gfp_mask = GFP_KERNEL,
> .order = order,
> .may_unmap = 1,
> + .rw_try_lock = 1,
> };
>
> set_task_reclaim_state(current, &sc.reclaim_state);
> @@ -7223,6 +7233,7 @@ unsigned long shrink_all_memory(unsigned long nr_to_reclaim)
> .may_unmap = 1,
> .may_swap = 1,
> .hibernation_mode = 1,
> + .rw_try_lock = 1,
> };
> struct zonelist *zonelist = node_zonelist(numa_node_id(), sc.gfp_mask);
> unsigned long nr_reclaimed;
> @@ -7381,6 +7392,7 @@ static int __node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned in
> .may_unmap = !!(node_reclaim_mode & RECLAIM_UNMAP),
> .may_swap = 1,
> .reclaim_idx = gfp_zone(gfp_mask),
> + .rw_try_lock = 1,
> };
> unsigned long pflags;
>
^ permalink raw reply [flat|nested] 13+ messages in thread
* Re: [PATCH 0/2] Support kshrinkd
2024-02-19 16:51 ` [PATCH 0/2] Support kshrinkd Matthew Wilcox
@ 2024-02-20 2:04 ` 李培锋
2024-02-20 2:55 ` Matthew Wilcox
0 siblings, 1 reply; 13+ messages in thread
From: 李培锋 @ 2024-02-20 2:04 UTC (permalink / raw)
To: Matthew Wilcox
Cc: akpm, david, osalvador, linux-mm, linux-kernel, v-songbaohua,
gregkh, tkjos
在 2024/2/20 0:51, Matthew Wilcox 写道:
> On Mon, Feb 19, 2024 at 10:17:01PM +0800, lipeifeng@oppo.com wrote:
>> 'commit 6d4675e60135 ("mm: don't be stuck to rmap lock on reclaim path")'
>> The above patch would avoid reclaim path to stuck rmap lock.
>> But it would cause some folios in LRU not sorted by aging because
>> the contended-folios in rmap_walk would be putbacked to the head of LRU
>> during shrink_folio_list even if the folios are very cold.
>>
>> The patchset setups new kthread:kshrinkd to reclaim the contended-folio
>> in rmap_walk when shrink_folio_list, to avoid to break the rules of LRU.
> Patch 1/2 didn't make it to my inbox or to lore.
Hi Sir, I had resent to you.
> But you should talk
> about the real world consequences of this in the cover letter. What do
> we observe if this problem happens? How much extra performance will we
> gain by applying this patch?
Hi Sir:
Monkey-test in phone with 16G-ram for 300 hours shows that almost one-third
of the contended-pages can be freed successfully next time, putting back
those
folios to LRU's head would break the rules of inative-LRU.
- pgsteal_kshrinkd 262577
- pgscan_kshrinkd 795503
"pgsteal_kshrinkd" means that the amount of those contended-folios which
can be
freed successfully but be putbacked in the head of inactive-LRU, more
than 1GB(262577 folios).
Mobile-phone with 16-ram, the total amount of inactive are around 4.5G,
so that the
contended-folios would break the rules of inactive-LRU.
- nr_inactive_anon 1020953
- nr_inactive_file 204801
Actually, The patchset had been merged in Google kernel/common since
android12-5.10 and android13-5.15, and were taken in more than 100 millions
android-phone devices more than 1.5 years.
But for the reason of GKI, the patches were implemented in the form of
hooks,
the patches merged in google-line as follows:
https://android-review.googlesource.com/c/kernel/common/+/2163904
https://android-review.googlesource.com/c/kernel/common/+/2191343
https://android-review.googlesource.com/c/kernel/common/+/2550490
https://android-review.googlesource.com/c/kernel/common/+/2318311
^ permalink raw reply [flat|nested] 13+ messages in thread
* Re: [PATCH 0/2] Support kshrinkd
2024-02-19 14:17 [PATCH 0/2] Support kshrinkd lipeifeng
` (2 preceding siblings ...)
[not found] ` <20240219141703.3851-2-lipeifeng@oppo.com>
@ 2024-02-20 2:09 ` 李培锋
3 siblings, 0 replies; 13+ messages in thread
From: 李培锋 @ 2024-02-20 2:09 UTC (permalink / raw)
To: akpm, david, osalvador, Matthew Wilcox
Cc: linux-mm, linux-kernel, tkjos, gregkh, v-songbaohua, v-songbaohua
add experts from Linux and Google.
在 2024/2/19 22:17, lipeifeng@oppo.com 写道:
> From: lipeifeng <lipeifeng@oppo.com>
>
> 'commit 6d4675e60135 ("mm: don't be stuck to rmap lock on reclaim path")'
> The above patch would avoid reclaim path to stuck rmap lock.
> But it would cause some folios in LRU not sorted by aging because
> the contended-folios in rmap_walk would be putbacked to the head of LRU
> during shrink_folio_list even if the folios are very cold.
>
> The patchset setups new kthread:kshrinkd to reclaim the contended-folio
> in rmap_walk when shrink_folio_list, to avoid to break the rules of LRU.
>
> lipeifeng (2):
> mm/rmap: support folio_referenced to control if try_lock in rmap_walk
> mm: support kshrinkd
>
> include/linux/mmzone.h | 6 ++
> include/linux/rmap.h | 5 +-
> include/linux/swap.h | 3 +
> include/linux/vm_event_item.h | 2 +
> mm/memory_hotplug.c | 2 +
> mm/rmap.c | 5 +-
> mm/vmscan.c | 205 ++++++++++++++++++++++++++++++++++++++++--
> mm/vmstat.c | 2 +
> 8 files changed, 221 insertions(+), 9 deletions(-)
>
^ permalink raw reply [flat|nested] 13+ messages in thread
* Re: [PATCH 2/2] mm: support kshrinkd
2024-02-19 14:17 ` [PATCH 2/2] mm: support kshrinkd lipeifeng
@ 2024-02-20 2:11 ` 李培锋
2024-02-20 3:19 ` Barry Song
0 siblings, 1 reply; 13+ messages in thread
From: 李培锋 @ 2024-02-20 2:11 UTC (permalink / raw)
To: akpm, david, osalvador, Matthew Wilcox
Cc: linux-mm, linux-kernel, tkjos, gregkh, v-songbaohua, surenb
add experts from Linux and Google.
在 2024/2/19 22:17, lipeifeng@oppo.com 写道:
> From: lipeifeng <lipeifeng@oppo.com>
>
> 'commit 6d4675e60135 ("mm: don't be stuck to rmap lock on reclaim path")'
> The above patch would avoid reclaim path to stuck rmap lock.
> But it would cause some folios in LRU not sorted by aging because
> the contended-folio in rmap_walk would be putback to the head of LRU
> when shrink_folio_list even if the folio is very cold.
>
> Monkey-test in phone for 300 hours shows that almost one-third of the
> contended-pages can be freed successfully next time, putting back those
> folios to LRU's head would break the rules of LRU.
> - pgsteal_kshrinkd 262577
> - pgscan_kshrinkd 795503
>
> For the above reason, the patch setups new kthread:kshrinkd to reclaim
> the contended-folio in rmap_walk when shrink_folio_list, to avoid to
> break the rules of LRU.
>
> Signed-off-by: lipeifeng <lipeifeng@oppo.com>
> ---
> include/linux/mmzone.h | 6 ++
> include/linux/swap.h | 3 +
> include/linux/vm_event_item.h | 2 +
> mm/memory_hotplug.c | 2 +
> mm/vmscan.c | 189 +++++++++++++++++++++++++++++++++++++++++-
> mm/vmstat.c | 2 +
> 6 files changed, 201 insertions(+), 3 deletions(-)
>
> diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
> index a497f18..83d7202 100644
> --- a/include/linux/mmzone.h
> +++ b/include/linux/mmzone.h
> @@ -1329,6 +1329,12 @@ typedef struct pglist_data {
>
> int kswapd_failures; /* Number of 'reclaimed == 0' runs */
>
> + struct list_head kshrinkd_folios; /* rmap_walk contended folios list*/
> + spinlock_t kf_lock; /* Protect kshrinkd_folios list*/
> +
> + struct task_struct *kshrinkd; /* reclaim kshrinkd_folios*/
> + wait_queue_head_t kshrinkd_wait;
> +
> #ifdef CONFIG_COMPACTION
> int kcompactd_max_order;
> enum zone_type kcompactd_highest_zoneidx;
> diff --git a/include/linux/swap.h b/include/linux/swap.h
> index 4db00dd..155fcb6 100644
> --- a/include/linux/swap.h
> +++ b/include/linux/swap.h
> @@ -435,6 +435,9 @@ void check_move_unevictable_folios(struct folio_batch *fbatch);
> extern void __meminit kswapd_run(int nid);
> extern void __meminit kswapd_stop(int nid);
>
> +extern void kshrinkd_run(int nid);
> +extern void kshrinkd_stop(int nid);
> +
> #ifdef CONFIG_SWAP
>
> int add_swap_extent(struct swap_info_struct *sis, unsigned long start_page,
> diff --git a/include/linux/vm_event_item.h b/include/linux/vm_event_item.h
> index 747943b..ee95ab1 100644
> --- a/include/linux/vm_event_item.h
> +++ b/include/linux/vm_event_item.h
> @@ -38,9 +38,11 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
> PGLAZYFREED,
> PGREFILL,
> PGREUSE,
> + PGSTEAL_KSHRINKD,
> PGSTEAL_KSWAPD,
> PGSTEAL_DIRECT,
> PGSTEAL_KHUGEPAGED,
> + PGSCAN_KSHRINKD,
> PGSCAN_KSWAPD,
> PGSCAN_DIRECT,
> PGSCAN_KHUGEPAGED,
> diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
> index 2189099..1b6c4c6 100644
> --- a/mm/memory_hotplug.c
> +++ b/mm/memory_hotplug.c
> @@ -1209,6 +1209,7 @@ int __ref online_pages(unsigned long pfn, unsigned long nr_pages,
>
> kswapd_run(nid);
> kcompactd_run(nid);
> + kshrinkd_run(nid);
>
> writeback_set_ratelimit();
>
> @@ -2092,6 +2093,7 @@ int __ref offline_pages(unsigned long start_pfn, unsigned long nr_pages,
> }
>
> if (arg.status_change_nid >= 0) {
> + kshrinkd_stop(node);
> kcompactd_stop(node);
> kswapd_stop(node);
> }
> diff --git a/mm/vmscan.c b/mm/vmscan.c
> index 0296d48..63e4fd4 100644
> --- a/mm/vmscan.c
> +++ b/mm/vmscan.c
> @@ -139,6 +139,9 @@ struct scan_control {
> /* if try_lock in rmap_walk */
> unsigned int rw_try_lock:1;
>
> + /* need kshrinkd to reclaim if rwc trylock contended*/
> + unsigned int need_kshrinkd:1;
> +
> /* Allocation order */
> s8 order;
>
> @@ -190,6 +193,17 @@ struct scan_control {
> */
> int vm_swappiness = 60;
>
> +/*
> + * Wakeup kshrinkd those folios which lock-contended in ramp_walk
> + * during shrink_folio_list, instead of putting back to the head
> + * of LRU, to avoid to break the rules of LRU.
> + */
> +static void wakeup_kshrinkd(struct pglist_data *pgdat)
> +{
> + if (likely(pgdat->kshrinkd))
> + wake_up_interruptible(&pgdat->kshrinkd_wait);
> +}
> +
> #ifdef CONFIG_MEMCG
>
> /* Returns true for reclaim through cgroup limits or cgroup interfaces. */
> @@ -821,6 +835,7 @@ enum folio_references {
> FOLIOREF_RECLAIM_CLEAN,
> FOLIOREF_KEEP,
> FOLIOREF_ACTIVATE,
> + FOLIOREF_LOCK_CONTENDED,
> };
>
> static enum folio_references folio_check_references(struct folio *folio,
> @@ -841,8 +856,12 @@ static enum folio_references folio_check_references(struct folio *folio,
> return FOLIOREF_ACTIVATE;
>
> /* rmap lock contention: rotate */
> - if (referenced_ptes == -1)
> - return FOLIOREF_KEEP;
> + if (referenced_ptes == -1) {
> + if (sc->need_kshrinkd && folio_pgdat(folio)->kshrinkd)
> + return FOLIOREF_LOCK_CONTENDED;
> + else
> + return FOLIOREF_KEEP;
> + }
>
> if (referenced_ptes) {
> /*
> @@ -1012,6 +1031,7 @@ static unsigned int shrink_folio_list(struct list_head *folio_list,
> LIST_HEAD(ret_folios);
> LIST_HEAD(free_folios);
> LIST_HEAD(demote_folios);
> + LIST_HEAD(contended_folios);
> unsigned int nr_reclaimed = 0;
> unsigned int pgactivate = 0;
> bool do_demote_pass;
> @@ -1028,6 +1048,7 @@ static unsigned int shrink_folio_list(struct list_head *folio_list,
> enum folio_references references = FOLIOREF_RECLAIM;
> bool dirty, writeback;
> unsigned int nr_pages;
> + bool lock_contended = false;
>
> cond_resched();
>
> @@ -1169,6 +1190,9 @@ static unsigned int shrink_folio_list(struct list_head *folio_list,
> case FOLIOREF_KEEP:
> stat->nr_ref_keep += nr_pages;
> goto keep_locked;
> + case FOLIOREF_LOCK_CONTENDED:
> + lock_contended = true;
> + goto keep_locked;
> case FOLIOREF_RECLAIM:
> case FOLIOREF_RECLAIM_CLEAN:
> ; /* try to reclaim the folio below */
> @@ -1449,7 +1473,10 @@ static unsigned int shrink_folio_list(struct list_head *folio_list,
> keep_locked:
> folio_unlock(folio);
> keep:
> - list_add(&folio->lru, &ret_folios);
> + if (unlikely(lock_contended))
> + list_add(&folio->lru, &contended_folios);
> + else
> + list_add(&folio->lru, &ret_folios);
> VM_BUG_ON_FOLIO(folio_test_lru(folio) ||
> folio_test_unevictable(folio), folio);
> }
> @@ -1491,6 +1518,14 @@ static unsigned int shrink_folio_list(struct list_head *folio_list,
> free_unref_page_list(&free_folios);
>
> list_splice(&ret_folios, folio_list);
> +
> + if (!list_empty(&contended_folios)) {
> + spin_lock_irq(&pgdat->kf_lock);
> + list_splice(&contended_folios, &pgdat->kshrinkd_folios);
> + spin_unlock_irq(&pgdat->kf_lock);
> + wakeup_kshrinkd(pgdat);
> + }
> +
> count_vm_events(PGACTIVATE, pgactivate);
>
> if (plug)
> @@ -1505,6 +1540,7 @@ unsigned int reclaim_clean_pages_from_list(struct zone *zone,
> .gfp_mask = GFP_KERNEL,
> .may_unmap = 1,
> .rw_try_lock = 1,
> + .need_kshrinkd = 0,
> };
> struct reclaim_stat stat;
> unsigned int nr_reclaimed;
> @@ -2101,6 +2137,7 @@ static unsigned int reclaim_folio_list(struct list_head *folio_list,
> .may_swap = 1,
> .no_demotion = 1,
> .rw_try_lock = 1,
> + .need_kshrinkd = 0,
> };
>
> nr_reclaimed = shrink_folio_list(folio_list, pgdat, &sc, &dummy_stat, false);
> @@ -5448,6 +5485,7 @@ static ssize_t lru_gen_seq_write(struct file *file, const char __user *src,
> .reclaim_idx = MAX_NR_ZONES - 1,
> .gfp_mask = GFP_KERNEL,
> .rw_try_lock = 1,
> + .need_kshrinkd = 0,
> };
>
> buf = kvmalloc(len + 1, GFP_KERNEL);
> @@ -6421,6 +6459,7 @@ unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
> .may_unmap = 1,
> .may_swap = 1,
> .rw_try_lock = 1,
> + .need_kshrinkd = 1,
> };
>
> /*
> @@ -6467,6 +6506,7 @@ unsigned long mem_cgroup_shrink_node(struct mem_cgroup *memcg,
> .reclaim_idx = MAX_NR_ZONES - 1,
> .may_swap = !noswap,
> .rw_try_lock = 1,
> + .need_kshrinkd = 0,
> };
>
> WARN_ON_ONCE(!current->reclaim_state);
> @@ -6512,6 +6552,7 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg,
> .may_swap = !!(reclaim_options & MEMCG_RECLAIM_MAY_SWAP),
> .proactive = !!(reclaim_options & MEMCG_RECLAIM_PROACTIVE),
> .rw_try_lock = 1,
> + .need_kshrinkd = 0,
> };
> /*
> * Traverse the ZONELIST_FALLBACK zonelist of the current node to put
> @@ -6774,6 +6815,7 @@ static int balance_pgdat(pg_data_t *pgdat, int order, int highest_zoneidx)
> .order = order,
> .may_unmap = 1,
> .rw_try_lock = 1,
> + .need_kshrinkd = 1,
> };
>
> set_task_reclaim_state(current, &sc.reclaim_state);
> @@ -7234,6 +7276,7 @@ unsigned long shrink_all_memory(unsigned long nr_to_reclaim)
> .may_swap = 1,
> .hibernation_mode = 1,
> .rw_try_lock = 1,
> + .need_kshrinkd = 0,
> };
> struct zonelist *zonelist = node_zonelist(numa_node_id(), sc.gfp_mask);
> unsigned long nr_reclaimed;
> @@ -7304,6 +7347,145 @@ static int __init kswapd_init(void)
>
> module_init(kswapd_init)
>
> +static int kshrinkd_should_run(pg_data_t *pgdat)
> +{
> + int should_run;
> +
> + spin_lock_irq(&pgdat->kf_lock);
> + should_run = !list_empty(&pgdat->kshrinkd_folios);
> + spin_unlock_irq(&pgdat->kf_lock);
> +
> + return should_run;
> +}
> +
> +static unsigned long kshrinkd_reclaim_folios(struct list_head *folio_list,
> + struct pglist_data *pgdat)
> +{
> + struct reclaim_stat dummy_stat;
> + unsigned int nr_reclaimed = 0;
> + struct scan_control sc = {
> + .gfp_mask = GFP_KERNEL,
> + .may_writepage = 1,
> + .may_unmap = 1,
> + .may_swap = 1,
> + .no_demotion = 1,
> + .rw_try_lock = 0,
> + .need_kshrinkd = 0,
> + };
> +
> + if (list_empty(folio_list))
> + return nr_reclaimed;
> +
> + nr_reclaimed = shrink_folio_list(folio_list, pgdat, &sc, &dummy_stat, false);
> +
> + return nr_reclaimed;
> +}
> +
> +/*
> + * The background kshrink daemon, started as a kernel thread
> + * from the init process.
> + *
> + * Kshrinkd is to reclaim the contended-folio in rmap_walk when
> + * shrink_folio_list instead of putting back into the head of LRU
> + * directly, to avoid to break the rules of LRU.
> + */
> +
> +static int kshrinkd(void *p)
> +{
> + pg_data_t *pgdat;
> + LIST_HEAD(tmp_contended_folios);
> +
> + pgdat = (pg_data_t *)p;
> +
> + current->flags |= PF_MEMALLOC | PF_KSWAPD;
> + set_freezable();
> +
> + while (!kthread_should_stop()) {
> + unsigned long nr_reclaimed = 0;
> + unsigned long nr_putback = 0;
> +
> + wait_event_freezable(pgdat->kshrinkd_wait,
> + kshrinkd_should_run(pgdat));
> +
> + /* splice rmap_walk contended folios to tmp-list */
> + spin_lock_irq(&pgdat->kf_lock);
> + list_splice(&pgdat->kshrinkd_folios, &tmp_contended_folios);
> + INIT_LIST_HEAD(&pgdat->kshrinkd_folios);
> + spin_unlock_irq(&pgdat->kf_lock);
> +
> + /* reclaim rmap_walk contended folios */
> + nr_reclaimed = kshrinkd_reclaim_folios(&tmp_contended_folios, pgdat);
> + __count_vm_events(PGSTEAL_KSHRINKD, nr_reclaimed);
> +
> + /* putback the folios which failed to reclaim to lru */
> + while (!list_empty(&tmp_contended_folios)) {
> + struct folio *folio = lru_to_folio(&tmp_contended_folios);
> +
> + nr_putback += folio_nr_pages(folio);
> + list_del(&folio->lru);
> + folio_putback_lru(folio);
> + }
> +
> + __count_vm_events(PGSCAN_KSHRINKD, nr_reclaimed + nr_putback);
> + }
> +
> + current->flags &= ~(PF_MEMALLOC | PF_KSWAPD);
> +
> + return 0;
> +}
> +
> +/*
> + * This kshrinkd start function will be called by init and node-hot-add.
> + */
> +void kshrinkd_run(int nid)
> +{
> + pg_data_t *pgdat = NODE_DATA(nid);
> +
> + if (pgdat->kshrinkd)
> + return;
> +
> + pgdat->kshrinkd = kthread_run(kshrinkd, pgdat, "kshrinkd%d", nid);
> + if (IS_ERR(pgdat->kshrinkd)) {
> + /* failure to start kshrinkd */
> + WARN_ON_ONCE(system_state < SYSTEM_RUNNING);
> + pr_err("Failed to start kshrinkd on node %d\n", nid);
> + pgdat->kshrinkd = NULL;
> + }
> +}
> +
> +/*
> + * Called by memory hotplug when all memory in a node is offlined. Caller must
> + * be holding mem_hotplug_begin/done().
> + */
> +void kshrinkd_stop(int nid)
> +{
> + struct task_struct *kshrinkd = NODE_DATA(nid)->kshrinkd;
> +
> + if (kshrinkd) {
> + kthread_stop(kshrinkd);
> + NODE_DATA(nid)->kshrinkd = NULL;
> + }
> +}
> +
> +static int __init kshrinkd_init(void)
> +{
> + int nid;
> +
> + for_each_node_state(nid, N_MEMORY) {
> + pg_data_t *pgdat = NODE_DATA(nid);
> +
> + spin_lock_init(&pgdat->kf_lock);
> + init_waitqueue_head(&pgdat->kshrinkd_wait);
> + INIT_LIST_HEAD(&pgdat->kshrinkd_folios);
> +
> + kshrinkd_run(nid);
> + }
> +
> + return 0;
> +}
> +
> +module_init(kshrinkd_init)
> +
> #ifdef CONFIG_NUMA
> /*
> * Node reclaim mode
> @@ -7393,6 +7575,7 @@ static int __node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned in
> .may_swap = 1,
> .reclaim_idx = gfp_zone(gfp_mask),
> .rw_try_lock = 1,
> + .need_kshrinkd = 1,
> };
> unsigned long pflags;
>
> diff --git a/mm/vmstat.c b/mm/vmstat.c
> index db79935..76d8a3b 100644
> --- a/mm/vmstat.c
> +++ b/mm/vmstat.c
> @@ -1279,9 +1279,11 @@ const char * const vmstat_text[] = {
>
> "pgrefill",
> "pgreuse",
> + "pgsteal_kshrinkd",
> "pgsteal_kswapd",
> "pgsteal_direct",
> "pgsteal_khugepaged",
> + "pgscan_kshrinkd",
> "pgscan_kswapd",
> "pgscan_direct",
> "pgscan_khugepaged",
^ permalink raw reply [flat|nested] 13+ messages in thread
* Re: [PATCH 0/2] Support kshrinkd
2024-02-20 2:04 ` 李培锋
@ 2024-02-20 2:55 ` Matthew Wilcox
2024-02-20 4:14 ` 李培锋
0 siblings, 1 reply; 13+ messages in thread
From: Matthew Wilcox @ 2024-02-20 2:55 UTC (permalink / raw)
To: 李培锋
Cc: akpm, david, osalvador, linux-mm, linux-kernel, v-songbaohua,
gregkh, tkjos
On Tue, Feb 20, 2024 at 10:04:33AM +0800, 李培锋 wrote:
> Monkey-test in phone with 16G-ram for 300 hours shows that almost one-third
>
> of the contended-pages can be freed successfully next time, putting back
> those
>
> folios to LRU's head would break the rules of inative-LRU.
You talk about "the rules of inactive LRU" like we care. The LRU is
an approximation at best. What are the *consequences*? Is there a
benchmark that executes more operations per second as a result of
this patch?
^ permalink raw reply [flat|nested] 13+ messages in thread
* Re: [PATCH 1/2] mm/rmap: support folio_referenced to control if try_lock in rmap_walk
2024-02-20 1:42 ` [PATCH 1/2] mm/rmap: support folio_referenced to control if try_lock in rmap_walk 李培锋
@ 2024-02-20 3:01 ` Barry Song
2024-02-20 4:00 ` 李培锋
0 siblings, 1 reply; 13+ messages in thread
From: Barry Song @ 2024-02-20 3:01 UTC (permalink / raw)
To: 李培锋
Cc: akpm, david, osalvador, willy, linux-mm, linux-kernel, tkjos,
surenb, gregkh, v-songbaohua
Hi peifeng,
On Tue, Feb 20, 2024 at 2:43 PM 李培锋 <lipeifeng@oppo.com> wrote:
>
> add more experts from Linux and Google.
>
>
> 在 2024/2/19 22:17, lipeifeng@oppo.com 写道:
> > From: lipeifeng <lipeifeng@oppo.com>
> >
> > The patch to support folio_referenced to control the bevavior
> > of walk_rmap, which for some thread to hold the lock in rmap_walk
> > instead of try_lock when using folio_referenced.
please describe what problem the patch is trying to address,
and why this modification is needed in commit message.
btw, who is set rw_try_lock to 0, what is the benefit?
> >
> > Signed-off-by: lipeifeng <lipeifeng@oppo.com>
> > ---
> > include/linux/rmap.h | 5 +++--
> > mm/rmap.c | 5 +++--
> > mm/vmscan.c | 16 ++++++++++++++--
> > 3 files changed, 20 insertions(+), 6 deletions(-)
> >
> > diff --git a/include/linux/rmap.h b/include/linux/rmap.h
> > index b7944a8..846b261 100644
> > --- a/include/linux/rmap.h
> > +++ b/include/linux/rmap.h
> > @@ -623,7 +623,8 @@ static inline int folio_try_share_anon_rmap_pmd(struct folio *folio,
> > * Called from mm/vmscan.c to handle paging out
> > */
> > int folio_referenced(struct folio *, int is_locked,
> > - struct mem_cgroup *memcg, unsigned long *vm_flags);
> > + struct mem_cgroup *memcg, unsigned long *vm_flags,
> > + unsigned int rw_try_lock);
> >
> > void try_to_migrate(struct folio *folio, enum ttu_flags flags);
> > void try_to_unmap(struct folio *, enum ttu_flags flags);
> > @@ -739,7 +740,7 @@ struct anon_vma *folio_lock_anon_vma_read(struct folio *folio,
> >
> > static inline int folio_referenced(struct folio *folio, int is_locked,
> > struct mem_cgroup *memcg,
> > - unsigned long *vm_flags)
> > + unsigned long *vm_flags, unsigned int rw_try_lock)
> > {
> > *vm_flags = 0;
> > return 0;
> > diff --git a/mm/rmap.c b/mm/rmap.c
> > index f5d43ed..15d1fba 100644
> > --- a/mm/rmap.c
> > +++ b/mm/rmap.c
> > @@ -952,6 +952,7 @@ static bool invalid_folio_referenced_vma(struct vm_area_struct *vma, void *arg)
> > * @is_locked: Caller holds lock on the folio.
> > * @memcg: target memory cgroup
> > * @vm_flags: A combination of all the vma->vm_flags which referenced the folio.
> > + * @rw_try_lock: if try_lock in rmap_walk
> > *
> > * Quick test_and_clear_referenced for all mappings of a folio,
> > *
> > @@ -959,7 +960,7 @@ static bool invalid_folio_referenced_vma(struct vm_area_struct *vma, void *arg)
> > * the function bailed out due to rmap lock contention.
> > */
> > int folio_referenced(struct folio *folio, int is_locked,
> > - struct mem_cgroup *memcg, unsigned long *vm_flags)
> > + struct mem_cgroup *memcg, unsigned long *vm_flags, unsigned int rw_try_lock)
> > {
> > int we_locked = 0;
> > struct folio_referenced_arg pra = {
> > @@ -970,7 +971,7 @@ int folio_referenced(struct folio *folio, int is_locked,
> > .rmap_one = folio_referenced_one,
> > .arg = (void *)&pra,
> > .anon_lock = folio_lock_anon_vma_read,
> > - .try_lock = true,
> > + .try_lock = rw_try_lock ? true : false,
> > .invalid_vma = invalid_folio_referenced_vma,
> > };
> >
> > diff --git a/mm/vmscan.c b/mm/vmscan.c
> > index 4f9c854..0296d48 100644
> > --- a/mm/vmscan.c
> > +++ b/mm/vmscan.c
> > @@ -136,6 +136,9 @@ struct scan_control {
> > /* Always discard instead of demoting to lower tier memory */
> > unsigned int no_demotion:1;
> >
> > + /* if try_lock in rmap_walk */
> > + unsigned int rw_try_lock:1;
> > +
> > /* Allocation order */
> > s8 order;
> >
> > @@ -827,7 +830,7 @@ static enum folio_references folio_check_references(struct folio *folio,
> > unsigned long vm_flags;
> >
> > referenced_ptes = folio_referenced(folio, 1, sc->target_mem_cgroup,
> > - &vm_flags);
> > + &vm_flags, sc->rw_try_lock);
> > referenced_folio = folio_test_clear_referenced(folio);
> >
> > /*
> > @@ -1501,6 +1504,7 @@ unsigned int reclaim_clean_pages_from_list(struct zone *zone,
> > struct scan_control sc = {
> > .gfp_mask = GFP_KERNEL,
> > .may_unmap = 1,
> > + .rw_try_lock = 1,
> > };
> > struct reclaim_stat stat;
> > unsigned int nr_reclaimed;
> > @@ -2038,7 +2042,7 @@ static void shrink_active_list(unsigned long nr_to_scan,
> >
> > /* Referenced or rmap lock contention: rotate */
> > if (folio_referenced(folio, 0, sc->target_mem_cgroup,
> > - &vm_flags) != 0) {
> > + &vm_flags, sc->rw_try_lock) != 0) {
> > /*
> > * Identify referenced, file-backed active folios and
> > * give them one more trip around the active list. So
> > @@ -2096,6 +2100,7 @@ static unsigned int reclaim_folio_list(struct list_head *folio_list,
> > .may_unmap = 1,
> > .may_swap = 1,
> > .no_demotion = 1,
> > + .rw_try_lock = 1,
> > };
> >
> > nr_reclaimed = shrink_folio_list(folio_list, pgdat, &sc, &dummy_stat, false);
> > @@ -5442,6 +5447,7 @@ static ssize_t lru_gen_seq_write(struct file *file, const char __user *src,
> > .may_swap = true,
> > .reclaim_idx = MAX_NR_ZONES - 1,
> > .gfp_mask = GFP_KERNEL,
> > + .rw_try_lock = 1,
> > };
> >
> > buf = kvmalloc(len + 1, GFP_KERNEL);
> > @@ -6414,6 +6420,7 @@ unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
> > .may_writepage = !laptop_mode,
> > .may_unmap = 1,
> > .may_swap = 1,
> > + .rw_try_lock = 1,
> > };
> >
> > /*
> > @@ -6459,6 +6466,7 @@ unsigned long mem_cgroup_shrink_node(struct mem_cgroup *memcg,
> > .may_unmap = 1,
> > .reclaim_idx = MAX_NR_ZONES - 1,
> > .may_swap = !noswap,
> > + .rw_try_lock = 1,
> > };
> >
> > WARN_ON_ONCE(!current->reclaim_state);
> > @@ -6503,6 +6511,7 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg,
> > .may_unmap = 1,
> > .may_swap = !!(reclaim_options & MEMCG_RECLAIM_MAY_SWAP),
> > .proactive = !!(reclaim_options & MEMCG_RECLAIM_PROACTIVE),
> > + .rw_try_lock = 1,
> > };
> > /*
> > * Traverse the ZONELIST_FALLBACK zonelist of the current node to put
> > @@ -6764,6 +6773,7 @@ static int balance_pgdat(pg_data_t *pgdat, int order, int highest_zoneidx)
> > .gfp_mask = GFP_KERNEL,
> > .order = order,
> > .may_unmap = 1,
> > + .rw_try_lock = 1,
> > };
> >
> > set_task_reclaim_state(current, &sc.reclaim_state);
> > @@ -7223,6 +7233,7 @@ unsigned long shrink_all_memory(unsigned long nr_to_reclaim)
> > .may_unmap = 1,
> > .may_swap = 1,
> > .hibernation_mode = 1,
> > + .rw_try_lock = 1,
> > };
> > struct zonelist *zonelist = node_zonelist(numa_node_id(), sc.gfp_mask);
> > unsigned long nr_reclaimed;
> > @@ -7381,6 +7392,7 @@ static int __node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned in
> > .may_unmap = !!(node_reclaim_mode & RECLAIM_UNMAP),
> > .may_swap = 1,
> > .reclaim_idx = gfp_zone(gfp_mask),
> > + .rw_try_lock = 1,
> > };
> > unsigned long pflags;
> >
Thanks
Barry
^ permalink raw reply [flat|nested] 13+ messages in thread
* Re: [PATCH 2/2] mm: support kshrinkd
2024-02-20 2:11 ` 李培锋
@ 2024-02-20 3:19 ` Barry Song
0 siblings, 0 replies; 13+ messages in thread
From: Barry Song @ 2024-02-20 3:19 UTC (permalink / raw)
To: 李培锋
Cc: akpm, david, osalvador, Matthew Wilcox, linux-mm, linux-kernel,
tkjos, gregkh, v-songbaohua, surenb
Hi Peifeng,
On Tue, Feb 20, 2024 at 3:21 PM 李培锋 <lipeifeng@oppo.com> wrote:
>
> add experts from Linux and Google.
>
>
> 在 2024/2/19 22:17, lipeifeng@oppo.com 写道:
> > From: lipeifeng <lipeifeng@oppo.com>
> >
> > 'commit 6d4675e60135 ("mm: don't be stuck to rmap lock on reclaim path")'
> > The above patch would avoid reclaim path to stuck rmap lock.
> > But it would cause some folios in LRU not sorted by aging because
> > the contended-folio in rmap_walk would be putback to the head of LRU
> > when shrink_folio_list even if the folio is very cold.
> >
> > Monkey-test in phone for 300 hours shows that almost one-third of the
> > contended-pages can be freed successfully next time, putting back those
> > folios to LRU's head would break the rules of LRU.
the commit message seems hard to read.
how serious the LRU aging is broken? what is the percentage of folios
being contended?
what is the negative impact if the contented folios are aged improperly?
> > - pgsteal_kshrinkd 262577
> > - pgscan_kshrinkd 795503
> >
> > For the above reason, the patch setups new kthread:kshrinkd to reclaim
> > the contended-folio in rmap_walk when shrink_folio_list, to avoid to
> > break the rules of LRU.
what benefits the real users experiences have got from the "fixed" aging
by your approach putting contended folios in a separate list and having a
separate thread to reclaim them?
> >
> > Signed-off-by: lipeifeng <lipeifeng@oppo.com>
> > ---
> > include/linux/mmzone.h | 6 ++
> > include/linux/swap.h | 3 +
> > include/linux/vm_event_item.h | 2 +
> > mm/memory_hotplug.c | 2 +
> > mm/vmscan.c | 189 +++++++++++++++++++++++++++++++++++++++++-
> > mm/vmstat.c | 2 +
> > 6 files changed, 201 insertions(+), 3 deletions(-)
> >
> > diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
> > index a497f18..83d7202 100644
> > --- a/include/linux/mmzone.h
> > +++ b/include/linux/mmzone.h
> > @@ -1329,6 +1329,12 @@ typedef struct pglist_data {
> >
> > int kswapd_failures; /* Number of 'reclaimed == 0' runs */
> >
> > + struct list_head kshrinkd_folios; /* rmap_walk contended folios list*/
> > + spinlock_t kf_lock; /* Protect kshrinkd_folios list*/
> > +
> > + struct task_struct *kshrinkd; /* reclaim kshrinkd_folios*/
> > + wait_queue_head_t kshrinkd_wait;
> > +
> > #ifdef CONFIG_COMPACTION
> > int kcompactd_max_order;
> > enum zone_type kcompactd_highest_zoneidx;
> > diff --git a/include/linux/swap.h b/include/linux/swap.h
> > index 4db00dd..155fcb6 100644
> > --- a/include/linux/swap.h
> > +++ b/include/linux/swap.h
> > @@ -435,6 +435,9 @@ void check_move_unevictable_folios(struct folio_batch *fbatch);
> > extern void __meminit kswapd_run(int nid);
> > extern void __meminit kswapd_stop(int nid);
> >
> > +extern void kshrinkd_run(int nid);
> > +extern void kshrinkd_stop(int nid);
> > +
> > #ifdef CONFIG_SWAP
> >
> > int add_swap_extent(struct swap_info_struct *sis, unsigned long start_page,
> > diff --git a/include/linux/vm_event_item.h b/include/linux/vm_event_item.h
> > index 747943b..ee95ab1 100644
> > --- a/include/linux/vm_event_item.h
> > +++ b/include/linux/vm_event_item.h
> > @@ -38,9 +38,11 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
> > PGLAZYFREED,
> > PGREFILL,
> > PGREUSE,
> > + PGSTEAL_KSHRINKD,
> > PGSTEAL_KSWAPD,
> > PGSTEAL_DIRECT,
> > PGSTEAL_KHUGEPAGED,
> > + PGSCAN_KSHRINKD,
> > PGSCAN_KSWAPD,
> > PGSCAN_DIRECT,
> > PGSCAN_KHUGEPAGED,
> > diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
> > index 2189099..1b6c4c6 100644
> > --- a/mm/memory_hotplug.c
> > +++ b/mm/memory_hotplug.c
> > @@ -1209,6 +1209,7 @@ int __ref online_pages(unsigned long pfn, unsigned long nr_pages,
> >
> > kswapd_run(nid);
> > kcompactd_run(nid);
> > + kshrinkd_run(nid);
> >
> > writeback_set_ratelimit();
> >
> > @@ -2092,6 +2093,7 @@ int __ref offline_pages(unsigned long start_pfn, unsigned long nr_pages,
> > }
> >
> > if (arg.status_change_nid >= 0) {
> > + kshrinkd_stop(node);
> > kcompactd_stop(node);
> > kswapd_stop(node);
> > }
> > diff --git a/mm/vmscan.c b/mm/vmscan.c
> > index 0296d48..63e4fd4 100644
> > --- a/mm/vmscan.c
> > +++ b/mm/vmscan.c
> > @@ -139,6 +139,9 @@ struct scan_control {
> > /* if try_lock in rmap_walk */
> > unsigned int rw_try_lock:1;
> >
> > + /* need kshrinkd to reclaim if rwc trylock contended*/
> > + unsigned int need_kshrinkd:1;
> > +
> > /* Allocation order */
> > s8 order;
> >
> > @@ -190,6 +193,17 @@ struct scan_control {
> > */
> > int vm_swappiness = 60;
> >
> > +/*
> > + * Wakeup kshrinkd those folios which lock-contended in ramp_walk
> > + * during shrink_folio_list, instead of putting back to the head
> > + * of LRU, to avoid to break the rules of LRU.
> > + */
> > +static void wakeup_kshrinkd(struct pglist_data *pgdat)
> > +{
> > + if (likely(pgdat->kshrinkd))
> > + wake_up_interruptible(&pgdat->kshrinkd_wait);
> > +}
> > +
> > #ifdef CONFIG_MEMCG
> >
> > /* Returns true for reclaim through cgroup limits or cgroup interfaces. */
> > @@ -821,6 +835,7 @@ enum folio_references {
> > FOLIOREF_RECLAIM_CLEAN,
> > FOLIOREF_KEEP,
> > FOLIOREF_ACTIVATE,
> > + FOLIOREF_LOCK_CONTENDED,
> > };
> >
> > static enum folio_references folio_check_references(struct folio *folio,
> > @@ -841,8 +856,12 @@ static enum folio_references folio_check_references(struct folio *folio,
> > return FOLIOREF_ACTIVATE;
> >
> > /* rmap lock contention: rotate */
> > - if (referenced_ptes == -1)
> > - return FOLIOREF_KEEP;
> > + if (referenced_ptes == -1) {
> > + if (sc->need_kshrinkd && folio_pgdat(folio)->kshrinkd)
> > + return FOLIOREF_LOCK_CONTENDED;
> > + else
> > + return FOLIOREF_KEEP;
> > + }
> >
> > if (referenced_ptes) {
> > /*
> > @@ -1012,6 +1031,7 @@ static unsigned int shrink_folio_list(struct list_head *folio_list,
> > LIST_HEAD(ret_folios);
> > LIST_HEAD(free_folios);
> > LIST_HEAD(demote_folios);
> > + LIST_HEAD(contended_folios);
> > unsigned int nr_reclaimed = 0;
> > unsigned int pgactivate = 0;
> > bool do_demote_pass;
> > @@ -1028,6 +1048,7 @@ static unsigned int shrink_folio_list(struct list_head *folio_list,
> > enum folio_references references = FOLIOREF_RECLAIM;
> > bool dirty, writeback;
> > unsigned int nr_pages;
> > + bool lock_contended = false;
> >
> > cond_resched();
> >
> > @@ -1169,6 +1190,9 @@ static unsigned int shrink_folio_list(struct list_head *folio_list,
> > case FOLIOREF_KEEP:
> > stat->nr_ref_keep += nr_pages;
> > goto keep_locked;
> > + case FOLIOREF_LOCK_CONTENDED:
> > + lock_contended = true;
> > + goto keep_locked;
> > case FOLIOREF_RECLAIM:
> > case FOLIOREF_RECLAIM_CLEAN:
> > ; /* try to reclaim the folio below */
> > @@ -1449,7 +1473,10 @@ static unsigned int shrink_folio_list(struct list_head *folio_list,
> > keep_locked:
> > folio_unlock(folio);
> > keep:
> > - list_add(&folio->lru, &ret_folios);
> > + if (unlikely(lock_contended))
> > + list_add(&folio->lru, &contended_folios);
> > + else
> > + list_add(&folio->lru, &ret_folios);
> > VM_BUG_ON_FOLIO(folio_test_lru(folio) ||
> > folio_test_unevictable(folio), folio);
> > }
> > @@ -1491,6 +1518,14 @@ static unsigned int shrink_folio_list(struct list_head *folio_list,
> > free_unref_page_list(&free_folios);
> >
> > list_splice(&ret_folios, folio_list);
> > +
> > + if (!list_empty(&contended_folios)) {
> > + spin_lock_irq(&pgdat->kf_lock);
> > + list_splice(&contended_folios, &pgdat->kshrinkd_folios);
> > + spin_unlock_irq(&pgdat->kf_lock);
> > + wakeup_kshrinkd(pgdat);
> > + }
> > +
> > count_vm_events(PGACTIVATE, pgactivate);
> >
> > if (plug)
> > @@ -1505,6 +1540,7 @@ unsigned int reclaim_clean_pages_from_list(struct zone *zone,
> > .gfp_mask = GFP_KERNEL,
> > .may_unmap = 1,
> > .rw_try_lock = 1,
> > + .need_kshrinkd = 0,
> > };
> > struct reclaim_stat stat;
> > unsigned int nr_reclaimed;
> > @@ -2101,6 +2137,7 @@ static unsigned int reclaim_folio_list(struct list_head *folio_list,
> > .may_swap = 1,
> > .no_demotion = 1,
> > .rw_try_lock = 1,
> > + .need_kshrinkd = 0,
> > };
> >
> > nr_reclaimed = shrink_folio_list(folio_list, pgdat, &sc, &dummy_stat, false);
> > @@ -5448,6 +5485,7 @@ static ssize_t lru_gen_seq_write(struct file *file, const char __user *src,
> > .reclaim_idx = MAX_NR_ZONES - 1,
> > .gfp_mask = GFP_KERNEL,
> > .rw_try_lock = 1,
> > + .need_kshrinkd = 0,
> > };
> >
> > buf = kvmalloc(len + 1, GFP_KERNEL);
> > @@ -6421,6 +6459,7 @@ unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
> > .may_unmap = 1,
> > .may_swap = 1,
> > .rw_try_lock = 1,
> > + .need_kshrinkd = 1,
> > };
> >
> > /*
> > @@ -6467,6 +6506,7 @@ unsigned long mem_cgroup_shrink_node(struct mem_cgroup *memcg,
> > .reclaim_idx = MAX_NR_ZONES - 1,
> > .may_swap = !noswap,
> > .rw_try_lock = 1,
> > + .need_kshrinkd = 0,
> > };
> >
> > WARN_ON_ONCE(!current->reclaim_state);
> > @@ -6512,6 +6552,7 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg,
> > .may_swap = !!(reclaim_options & MEMCG_RECLAIM_MAY_SWAP),
> > .proactive = !!(reclaim_options & MEMCG_RECLAIM_PROACTIVE),
> > .rw_try_lock = 1,
> > + .need_kshrinkd = 0,
> > };
> > /*
> > * Traverse the ZONELIST_FALLBACK zonelist of the current node to put
> > @@ -6774,6 +6815,7 @@ static int balance_pgdat(pg_data_t *pgdat, int order, int highest_zoneidx)
> > .order = order,
> > .may_unmap = 1,
> > .rw_try_lock = 1,
> > + .need_kshrinkd = 1,
> > };
> >
> > set_task_reclaim_state(current, &sc.reclaim_state);
> > @@ -7234,6 +7276,7 @@ unsigned long shrink_all_memory(unsigned long nr_to_reclaim)
> > .may_swap = 1,
> > .hibernation_mode = 1,
> > .rw_try_lock = 1,
> > + .need_kshrinkd = 0,
> > };
> > struct zonelist *zonelist = node_zonelist(numa_node_id(), sc.gfp_mask);
> > unsigned long nr_reclaimed;
> > @@ -7304,6 +7347,145 @@ static int __init kswapd_init(void)
> >
> > module_init(kswapd_init)
> >
> > +static int kshrinkd_should_run(pg_data_t *pgdat)
> > +{
> > + int should_run;
> > +
> > + spin_lock_irq(&pgdat->kf_lock);
> > + should_run = !list_empty(&pgdat->kshrinkd_folios);
> > + spin_unlock_irq(&pgdat->kf_lock);
> > +
> > + return should_run;
> > +}
> > +
> > +static unsigned long kshrinkd_reclaim_folios(struct list_head *folio_list,
> > + struct pglist_data *pgdat)
> > +{
> > + struct reclaim_stat dummy_stat;
> > + unsigned int nr_reclaimed = 0;
> > + struct scan_control sc = {
> > + .gfp_mask = GFP_KERNEL,
> > + .may_writepage = 1,
> > + .may_unmap = 1,
> > + .may_swap = 1,
> > + .no_demotion = 1,
> > + .rw_try_lock = 0,
> > + .need_kshrinkd = 0,
> > + };
> > +
> > + if (list_empty(folio_list))
> > + return nr_reclaimed;
> > +
> > + nr_reclaimed = shrink_folio_list(folio_list, pgdat, &sc, &dummy_stat, false);
> > +
> > + return nr_reclaimed;
> > +}
> > +
> > +/*
> > + * The background kshrink daemon, started as a kernel thread
> > + * from the init process.
> > + *
> > + * Kshrinkd is to reclaim the contended-folio in rmap_walk when
> > + * shrink_folio_list instead of putting back into the head of LRU
> > + * directly, to avoid to break the rules of LRU.
> > + */
> > +
> > +static int kshrinkd(void *p)
> > +{
> > + pg_data_t *pgdat;
> > + LIST_HEAD(tmp_contended_folios);
> > +
> > + pgdat = (pg_data_t *)p;
> > +
> > + current->flags |= PF_MEMALLOC | PF_KSWAPD;
> > + set_freezable();
> > +
> > + while (!kthread_should_stop()) {
> > + unsigned long nr_reclaimed = 0;
> > + unsigned long nr_putback = 0;
> > +
> > + wait_event_freezable(pgdat->kshrinkd_wait,
> > + kshrinkd_should_run(pgdat));
> > +
> > + /* splice rmap_walk contended folios to tmp-list */
> > + spin_lock_irq(&pgdat->kf_lock);
> > + list_splice(&pgdat->kshrinkd_folios, &tmp_contended_folios);
> > + INIT_LIST_HEAD(&pgdat->kshrinkd_folios);
> > + spin_unlock_irq(&pgdat->kf_lock);
> > +
> > + /* reclaim rmap_walk contended folios */
> > + nr_reclaimed = kshrinkd_reclaim_folios(&tmp_contended_folios, pgdat);
> > + __count_vm_events(PGSTEAL_KSHRINKD, nr_reclaimed);
> > +
> > + /* putback the folios which failed to reclaim to lru */
> > + while (!list_empty(&tmp_contended_folios)) {
> > + struct folio *folio = lru_to_folio(&tmp_contended_folios);
> > +
> > + nr_putback += folio_nr_pages(folio);
> > + list_del(&folio->lru);
> > + folio_putback_lru(folio);
> > + }
> > +
> > + __count_vm_events(PGSCAN_KSHRINKD, nr_reclaimed + nr_putback);
> > + }
> > +
> > + current->flags &= ~(PF_MEMALLOC | PF_KSWAPD);
> > +
> > + return 0;
> > +}
> > +
> > +/*
> > + * This kshrinkd start function will be called by init and node-hot-add.
> > + */
> > +void kshrinkd_run(int nid)
> > +{
> > + pg_data_t *pgdat = NODE_DATA(nid);
> > +
> > + if (pgdat->kshrinkd)
> > + return;
> > +
> > + pgdat->kshrinkd = kthread_run(kshrinkd, pgdat, "kshrinkd%d", nid);
> > + if (IS_ERR(pgdat->kshrinkd)) {
> > + /* failure to start kshrinkd */
> > + WARN_ON_ONCE(system_state < SYSTEM_RUNNING);
> > + pr_err("Failed to start kshrinkd on node %d\n", nid);
> > + pgdat->kshrinkd = NULL;
> > + }
> > +}
> > +
> > +/*
> > + * Called by memory hotplug when all memory in a node is offlined. Caller must
> > + * be holding mem_hotplug_begin/done().
> > + */
> > +void kshrinkd_stop(int nid)
> > +{
> > + struct task_struct *kshrinkd = NODE_DATA(nid)->kshrinkd;
> > +
> > + if (kshrinkd) {
> > + kthread_stop(kshrinkd);
> > + NODE_DATA(nid)->kshrinkd = NULL;
> > + }
> > +}
> > +
> > +static int __init kshrinkd_init(void)
> > +{
> > + int nid;
> > +
> > + for_each_node_state(nid, N_MEMORY) {
> > + pg_data_t *pgdat = NODE_DATA(nid);
> > +
> > + spin_lock_init(&pgdat->kf_lock);
> > + init_waitqueue_head(&pgdat->kshrinkd_wait);
> > + INIT_LIST_HEAD(&pgdat->kshrinkd_folios);
> > +
> > + kshrinkd_run(nid);
> > + }
> > +
> > + return 0;
> > +}
> > +
> > +module_init(kshrinkd_init)
> > +
> > #ifdef CONFIG_NUMA
> > /*
> > * Node reclaim mode
> > @@ -7393,6 +7575,7 @@ static int __node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned in
> > .may_swap = 1,
> > .reclaim_idx = gfp_zone(gfp_mask),
> > .rw_try_lock = 1,
> > + .need_kshrinkd = 1,
> > };
> > unsigned long pflags;
> >
> > diff --git a/mm/vmstat.c b/mm/vmstat.c
> > index db79935..76d8a3b 100644
> > --- a/mm/vmstat.c
> > +++ b/mm/vmstat.c
> > @@ -1279,9 +1279,11 @@ const char * const vmstat_text[] = {
> >
> > "pgrefill",
> > "pgreuse",
> > + "pgsteal_kshrinkd",
> > "pgsteal_kswapd",
> > "pgsteal_direct",
> > "pgsteal_khugepaged",
> > + "pgscan_kshrinkd",
> > "pgscan_kswapd",
> > "pgscan_direct",
> > "pgscan_khugepaged",
>
Thanks
Barry
^ permalink raw reply [flat|nested] 13+ messages in thread
* Re: [PATCH 1/2] mm/rmap: support folio_referenced to control if try_lock in rmap_walk
2024-02-20 3:01 ` Barry Song
@ 2024-02-20 4:00 ` 李培锋
2024-02-20 7:16 ` Barry Song
0 siblings, 1 reply; 13+ messages in thread
From: 李培锋 @ 2024-02-20 4:00 UTC (permalink / raw)
To: Barry Song
Cc: akpm, david, osalvador, willy, linux-mm, linux-kernel, tkjos,
surenb, gregkh, v-songbaohua
在 2024/2/20 11:01, Barry Song 写道:
> Hi peifeng,
>
> On Tue, Feb 20, 2024 at 2:43 PM 李培锋 <lipeifeng@oppo.com> wrote:
>> add more experts from Linux and Google.
>>
>>
>> 在 2024/2/19 22:17, lipeifeng@oppo.com 写道:
>>> From: lipeifeng <lipeifeng@oppo.com>
>>>
>>> The patch to support folio_referenced to control the bevavior
>>> of walk_rmap, which for some thread to hold the lock in rmap_walk
>>> instead of try_lock when using folio_referenced.
> please describe what problem the patch is trying to address,
> and why this modification is needed in commit message.
Hi Barry:
1. the patch is one of the kshrinkd series patches.
2. it is to support folio_referenced to control the bevavior of walk_rmap,
kshrinkd would call folio_referenced through shrink_folio_list but it
doesn't
want to try_lock in rmap_walk during folio_referenced.
> btw, who is set rw_try_lock to 0, what is the benefit?
Actually, the current situation is that only shrink_folio_list will set
try_lock to 1,
while others will be set to 0 that it would wait for rwsem-lock if
contened in rmap_walk.
>
>>> Signed-off-by: lipeifeng <lipeifeng@oppo.com>
>>> ---
>>> include/linux/rmap.h | 5 +++--
>>> mm/rmap.c | 5 +++--
>>> mm/vmscan.c | 16 ++++++++++++++--
>>> 3 files changed, 20 insertions(+), 6 deletions(-)
>>>
>>> diff --git a/include/linux/rmap.h b/include/linux/rmap.h
>>> index b7944a8..846b261 100644
>>> --- a/include/linux/rmap.h
>>> +++ b/include/linux/rmap.h
>>> @@ -623,7 +623,8 @@ static inline int folio_try_share_anon_rmap_pmd(struct folio *folio,
>>> * Called from mm/vmscan.c to handle paging out
>>> */
>>> int folio_referenced(struct folio *, int is_locked,
>>> - struct mem_cgroup *memcg, unsigned long *vm_flags);
>>> + struct mem_cgroup *memcg, unsigned long *vm_flags,
>>> + unsigned int rw_try_lock);
>>>
>>> void try_to_migrate(struct folio *folio, enum ttu_flags flags);
>>> void try_to_unmap(struct folio *, enum ttu_flags flags);
>>> @@ -739,7 +740,7 @@ struct anon_vma *folio_lock_anon_vma_read(struct folio *folio,
>>>
>>> static inline int folio_referenced(struct folio *folio, int is_locked,
>>> struct mem_cgroup *memcg,
>>> - unsigned long *vm_flags)
>>> + unsigned long *vm_flags, unsigned int rw_try_lock)
>>> {
>>> *vm_flags = 0;
>>> return 0;
>>> diff --git a/mm/rmap.c b/mm/rmap.c
>>> index f5d43ed..15d1fba 100644
>>> --- a/mm/rmap.c
>>> +++ b/mm/rmap.c
>>> @@ -952,6 +952,7 @@ static bool invalid_folio_referenced_vma(struct vm_area_struct *vma, void *arg)
>>> * @is_locked: Caller holds lock on the folio.
>>> * @memcg: target memory cgroup
>>> * @vm_flags: A combination of all the vma->vm_flags which referenced the folio.
>>> + * @rw_try_lock: if try_lock in rmap_walk
>>> *
>>> * Quick test_and_clear_referenced for all mappings of a folio,
>>> *
>>> @@ -959,7 +960,7 @@ static bool invalid_folio_referenced_vma(struct vm_area_struct *vma, void *arg)
>>> * the function bailed out due to rmap lock contention.
>>> */
>>> int folio_referenced(struct folio *folio, int is_locked,
>>> - struct mem_cgroup *memcg, unsigned long *vm_flags)
>>> + struct mem_cgroup *memcg, unsigned long *vm_flags, unsigned int rw_try_lock)
>>> {
>>> int we_locked = 0;
>>> struct folio_referenced_arg pra = {
>>> @@ -970,7 +971,7 @@ int folio_referenced(struct folio *folio, int is_locked,
>>> .rmap_one = folio_referenced_one,
>>> .arg = (void *)&pra,
>>> .anon_lock = folio_lock_anon_vma_read,
>>> - .try_lock = true,
>>> + .try_lock = rw_try_lock ? true : false,
>>> .invalid_vma = invalid_folio_referenced_vma,
>>> };
>>>
>>> diff --git a/mm/vmscan.c b/mm/vmscan.c
>>> index 4f9c854..0296d48 100644
>>> --- a/mm/vmscan.c
>>> +++ b/mm/vmscan.c
>>> @@ -136,6 +136,9 @@ struct scan_control {
>>> /* Always discard instead of demoting to lower tier memory */
>>> unsigned int no_demotion:1;
>>>
>>> + /* if try_lock in rmap_walk */
>>> + unsigned int rw_try_lock:1;
>>> +
>>> /* Allocation order */
>>> s8 order;
>>>
>>> @@ -827,7 +830,7 @@ static enum folio_references folio_check_references(struct folio *folio,
>>> unsigned long vm_flags;
>>>
>>> referenced_ptes = folio_referenced(folio, 1, sc->target_mem_cgroup,
>>> - &vm_flags);
>>> + &vm_flags, sc->rw_try_lock);
>>> referenced_folio = folio_test_clear_referenced(folio);
>>>
>>> /*
>>> @@ -1501,6 +1504,7 @@ unsigned int reclaim_clean_pages_from_list(struct zone *zone,
>>> struct scan_control sc = {
>>> .gfp_mask = GFP_KERNEL,
>>> .may_unmap = 1,
>>> + .rw_try_lock = 1,
>>> };
>>> struct reclaim_stat stat;
>>> unsigned int nr_reclaimed;
>>> @@ -2038,7 +2042,7 @@ static void shrink_active_list(unsigned long nr_to_scan,
>>>
>>> /* Referenced or rmap lock contention: rotate */
>>> if (folio_referenced(folio, 0, sc->target_mem_cgroup,
>>> - &vm_flags) != 0) {
>>> + &vm_flags, sc->rw_try_lock) != 0) {
>>> /*
>>> * Identify referenced, file-backed active folios and
>>> * give them one more trip around the active list. So
>>> @@ -2096,6 +2100,7 @@ static unsigned int reclaim_folio_list(struct list_head *folio_list,
>>> .may_unmap = 1,
>>> .may_swap = 1,
>>> .no_demotion = 1,
>>> + .rw_try_lock = 1,
>>> };
>>>
>>> nr_reclaimed = shrink_folio_list(folio_list, pgdat, &sc, &dummy_stat, false);
>>> @@ -5442,6 +5447,7 @@ static ssize_t lru_gen_seq_write(struct file *file, const char __user *src,
>>> .may_swap = true,
>>> .reclaim_idx = MAX_NR_ZONES - 1,
>>> .gfp_mask = GFP_KERNEL,
>>> + .rw_try_lock = 1,
>>> };
>>>
>>> buf = kvmalloc(len + 1, GFP_KERNEL);
>>> @@ -6414,6 +6420,7 @@ unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
>>> .may_writepage = !laptop_mode,
>>> .may_unmap = 1,
>>> .may_swap = 1,
>>> + .rw_try_lock = 1,
>>> };
>>>
>>> /*
>>> @@ -6459,6 +6466,7 @@ unsigned long mem_cgroup_shrink_node(struct mem_cgroup *memcg,
>>> .may_unmap = 1,
>>> .reclaim_idx = MAX_NR_ZONES - 1,
>>> .may_swap = !noswap,
>>> + .rw_try_lock = 1,
>>> };
>>>
>>> WARN_ON_ONCE(!current->reclaim_state);
>>> @@ -6503,6 +6511,7 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg,
>>> .may_unmap = 1,
>>> .may_swap = !!(reclaim_options & MEMCG_RECLAIM_MAY_SWAP),
>>> .proactive = !!(reclaim_options & MEMCG_RECLAIM_PROACTIVE),
>>> + .rw_try_lock = 1,
>>> };
>>> /*
>>> * Traverse the ZONELIST_FALLBACK zonelist of the current node to put
>>> @@ -6764,6 +6773,7 @@ static int balance_pgdat(pg_data_t *pgdat, int order, int highest_zoneidx)
>>> .gfp_mask = GFP_KERNEL,
>>> .order = order,
>>> .may_unmap = 1,
>>> + .rw_try_lock = 1,
>>> };
>>>
>>> set_task_reclaim_state(current, &sc.reclaim_state);
>>> @@ -7223,6 +7233,7 @@ unsigned long shrink_all_memory(unsigned long nr_to_reclaim)
>>> .may_unmap = 1,
>>> .may_swap = 1,
>>> .hibernation_mode = 1,
>>> + .rw_try_lock = 1,
>>> };
>>> struct zonelist *zonelist = node_zonelist(numa_node_id(), sc.gfp_mask);
>>> unsigned long nr_reclaimed;
>>> @@ -7381,6 +7392,7 @@ static int __node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned in
>>> .may_unmap = !!(node_reclaim_mode & RECLAIM_UNMAP),
>>> .may_swap = 1,
>>> .reclaim_idx = gfp_zone(gfp_mask),
>>> + .rw_try_lock = 1,
>>> };
>>> unsigned long pflags;
>>>
> Thanks
> Barry
^ permalink raw reply [flat|nested] 13+ messages in thread
* Re: [PATCH 0/2] Support kshrinkd
2024-02-20 2:55 ` Matthew Wilcox
@ 2024-02-20 4:14 ` 李培锋
0 siblings, 0 replies; 13+ messages in thread
From: 李培锋 @ 2024-02-20 4:14 UTC (permalink / raw)
To: Matthew Wilcox
Cc: akpm, david, osalvador, linux-mm, linux-kernel, v-songbaohua,
gregkh, tkjos, surenb
在 2024/2/20 10:55, Matthew Wilcox 写道:
> On Tue, Feb 20, 2024 at 10:04:33AM +0800, 李培锋 wrote:
>> Monkey-test in phone with 16G-ram for 300 hours shows that almost one-third
>>
>> of the contended-pages can be freed successfully next time, putting back
>> those
>>
>> folios to LRU's head would break the rules of inative-LRU.
> You talk about "the rules of inactive LRU" like we care. The LRU is
> an approximation at best. What are the *consequences*?
> Is there a
> benchmark that executes more operations per second as a result of
> this patch?
Hi Sir:
1. For the above data in 300 hours test in 16G-ram device:
- 795503 folios would be passed during shrink_folio_list since lock
contended;
- 262577 folios would be reclaimed successfully but putback in head of
inative-lru.
2. Converted to per second,:
- 0.243 folios would be putback in the head of inative-lru mistakenly
3. issues:
There are two issues with the current situation:
1. some cold-pages would not be freed in time, like the date we got in
16GB-devices almost 1GB-folios
would not be freed in time during the test, which would cause
shrink_folio_list to become inefficient.
Especially for some folios, which are very cold and correspond to a
common virtual memory space,
we had found some cases that more than 20 folios were contended in
rmap_walk and putback
in the head of inactive-LRU during one shrink_folio_list
proccess(isolate 32 folios) and more background
user-process was killed by lmkd. Kshrinkd would let reclaim-path more
efficient, and reduce 2% lmkd rate.
2. another issue is that staying more cold folios at the head of
inative-lru would result in some hot-pages
to be reclaimed, and more file-refault and anon-swapin. Data would be
updated soon if need.
^ permalink raw reply [flat|nested] 13+ messages in thread
* Re: [PATCH 1/2] mm/rmap: support folio_referenced to control if try_lock in rmap_walk
2024-02-20 4:00 ` 李培锋
@ 2024-02-20 7:16 ` Barry Song
0 siblings, 0 replies; 13+ messages in thread
From: Barry Song @ 2024-02-20 7:16 UTC (permalink / raw)
To: 李培锋
Cc: akpm, david, osalvador, willy, linux-mm, linux-kernel, tkjos,
surenb, gregkh, v-songbaohua
On Tue, Feb 20, 2024 at 5:00 PM 李培锋 <lipeifeng@oppo.com> wrote:
>
>
> 在 2024/2/20 11:01, Barry Song 写道:
> > Hi peifeng,
> >
> > On Tue, Feb 20, 2024 at 2:43 PM 李培锋 <lipeifeng@oppo.com> wrote:
> >> add more experts from Linux and Google.
> >>
> >>
> >> 在 2024/2/19 22:17, lipeifeng@oppo.com 写道:
> >>> From: lipeifeng <lipeifeng@oppo.com>
> >>>
> >>> The patch to support folio_referenced to control the bevavior
> >>> of walk_rmap, which for some thread to hold the lock in rmap_walk
> >>> instead of try_lock when using folio_referenced.
> > please describe what problem the patch is trying to address,
> > and why this modification is needed in commit message.
>
> Hi Barry:
>
> 1. the patch is one of the kshrinkd series patches.
this seems like a bad name for the patchset as nobody knows
what is kshrinkd. maybe something like "asynchronously
reclaim contended folios rather than aging them"?
>
> 2. it is to support folio_referenced to control the bevavior of walk_rmap,
>
> kshrinkd would call folio_referenced through shrink_folio_list but it
> doesn't
>
> want to try_lock in rmap_walk during folio_referenced.
>
>
> > btw, who is set rw_try_lock to 0, what is the benefit?
>
> Actually, the current situation is that only shrink_folio_list will set
> try_lock to 1,
understood, as you don't want contended folios to be skipped
by scanner any more.
>
> while others will be set to 0 that it would wait for rwsem-lock if
> contened in rmap_walk.
ok. other reclamation threads will still skip contended folios.
As discussed, the patchset really needs detailed data to back up.
Thanks
Barry
^ permalink raw reply [flat|nested] 13+ messages in thread
end of thread, other threads:[~2024-02-20 7:16 UTC | newest]
Thread overview: 13+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2024-02-19 14:17 [PATCH 0/2] Support kshrinkd lipeifeng
2024-02-19 14:17 ` [PATCH 2/2] mm: support kshrinkd lipeifeng
2024-02-20 2:11 ` 李培锋
2024-02-20 3:19 ` Barry Song
2024-02-19 16:51 ` [PATCH 0/2] Support kshrinkd Matthew Wilcox
2024-02-20 2:04 ` 李培锋
2024-02-20 2:55 ` Matthew Wilcox
2024-02-20 4:14 ` 李培锋
[not found] ` <20240219141703.3851-2-lipeifeng@oppo.com>
2024-02-20 1:42 ` [PATCH 1/2] mm/rmap: support folio_referenced to control if try_lock in rmap_walk 李培锋
2024-02-20 3:01 ` Barry Song
2024-02-20 4:00 ` 李培锋
2024-02-20 7:16 ` Barry Song
2024-02-20 2:09 ` [PATCH 0/2] Support kshrinkd 李培锋
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox