From: Ning Zhang <ningzhang@linux.alibaba.com>
To: linux-mm@kvack.org
Cc: Andrew Morton <akpm@linux-foundation.org>,
Johannes Weiner <hannes@cmpxchg.org>,
Michal Hocko <mhocko@kernel.org>,
Vladimir Davydov <vdavydov.dev@gmail.com>,
Yu Zhao <yuzhao@google.com>
Subject: [RFC 5/6] mm, thp: add some statistics for zero subpages reclaim
Date: Thu, 28 Oct 2021 19:56:54 +0800 [thread overview]
Message-ID: <1635422215-99394-6-git-send-email-ningzhang@linux.alibaba.com> (raw)
In-Reply-To: <1635422215-99394-1-git-send-email-ningzhang@linux.alibaba.com>
queue_length show the numbers of huge pages in the queue.
split_hpage shows the numbers of huge pages split by thp reclaim.
split_failed shows the numbers of huge pages split failed
reclaim_subpage shows the numbers of zero subpages reclaimed by
thp reclaim.
Signed-off-by: Ning Zhang <ningzhang@linux.alibaba.com>
---
include/linux/huge_mm.h | 3 ++-
include/linux/mmzone.h | 3 +++
mm/huge_memory.c | 8 ++++++--
mm/memcontrol.c | 47 +++++++++++++++++++++++++++++++++++++++++++++++
mm/vmscan.c | 2 +-
5 files changed, 59 insertions(+), 4 deletions(-)
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index f792433..5d4a038 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -189,7 +189,8 @@ unsigned long thp_get_unmapped_area(struct file *filp, unsigned long addr,
extern int global_thp_reclaim;
int zsr_get_hpage(struct hpage_reclaim *hr_queue, struct page **reclaim_page,
int threshold);
-unsigned long zsr_reclaim_hpage(struct lruvec *lruvec, struct page *page);
+unsigned long zsr_reclaim_hpage(struct hpage_reclaim *hr_queue,
+ struct lruvec *lruvec, struct page *page);
void zsr_reclaim_memcg(struct mem_cgroup *memcg);
static inline struct list_head *hpage_reclaim_list(struct page *page)
{
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 222cd4f..6ce6890 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -792,6 +792,9 @@ struct hpage_reclaim {
spinlock_t reclaim_queue_lock;
struct list_head reclaim_queue;
unsigned long reclaim_queue_len;
+ atomic_long_t split_hpage;
+ atomic_long_t split_failed;
+ atomic_long_t reclaim_subpage;
};
#endif
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 633fd0f..5e737d0 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -3506,7 +3506,8 @@ int zsr_get_hpage(struct hpage_reclaim *hr_queue, struct page **reclaim_page,
}
-unsigned long zsr_reclaim_hpage(struct lruvec *lruvec, struct page *page)
+unsigned long zsr_reclaim_hpage(struct hpage_reclaim *hr_queue,
+ struct lruvec *lruvec, struct page *page)
{
struct pglist_data *pgdat = page_pgdat(page);
unsigned long reclaimed;
@@ -3523,12 +3524,15 @@ unsigned long zsr_reclaim_hpage(struct lruvec *lruvec, struct page *page)
putback_lru_page(page);
mod_node_page_state(pgdat, NR_ISOLATED_ANON,
-HPAGE_PMD_NR);
+ atomic_long_inc(&hr_queue->split_failed);
return 0;
}
unlock_page(page);
list_add_tail(&page->lru, &split_list);
reclaimed = reclaim_zero_subpages(&split_list, &keep_list);
+ atomic_long_inc(&hr_queue->split_hpage);
+ atomic_long_add(reclaimed, &hr_queue->reclaim_subpage);
spin_lock_irqsave(&lruvec->lru_lock, flags);
move_pages_to_lru(lruvec, &keep_list);
@@ -3564,7 +3568,7 @@ void zsr_reclaim_memcg(struct mem_cgroup *memcg)
if (!page)
continue;
- zsr_reclaim_hpage(lruvec, page);
+ zsr_reclaim_hpage(hr_queue, lruvec, page);
cond_resched();
}
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index a8e3ca1..f8016ba 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -4580,6 +4580,49 @@ static ssize_t memcg_thp_reclaim_ctrl_write(struct kernfs_open_file *of,
return nbytes;
}
+
+static int memcg_thp_reclaim_stat_show(struct seq_file *m, void *v)
+{
+ struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
+ struct mem_cgroup_per_node *mz;
+ int nid;
+ unsigned long len;
+
+ seq_puts(m, "queue_length\t");
+ for_each_node(nid) {
+ mz = memcg->nodeinfo[nid];
+ len = READ_ONCE(mz->hpage_reclaim_queue.reclaim_queue_len);
+ seq_printf(m, "%-24lu", len);
+ }
+
+ seq_puts(m, "\n");
+ seq_puts(m, "split_hpage\t");
+ for_each_node(nid) {
+ mz = memcg->nodeinfo[nid];
+ len = atomic_long_read(&mz->hpage_reclaim_queue.split_hpage);
+ seq_printf(m, "%-24lu", len);
+ }
+
+ seq_puts(m, "\n");
+ seq_puts(m, "split_failed\t");
+ for_each_node(nid) {
+ mz = memcg->nodeinfo[nid];
+ len = atomic_long_read(&mz->hpage_reclaim_queue.split_failed);
+ seq_printf(m, "%-24lu", len);
+ }
+
+ seq_puts(m, "\n");
+ seq_puts(m, "reclaim_subpage\t");
+ for_each_node(nid) {
+ mz = memcg->nodeinfo[nid];
+ len = atomic_long_read(&mz->hpage_reclaim_queue.reclaim_subpage);
+ seq_printf(m, "%-24lu", len);
+ }
+
+ seq_puts(m, "\n");
+
+ return 0;
+}
#endif
#ifdef CONFIG_CGROUP_WRITEBACK
@@ -5155,6 +5198,10 @@ static ssize_t memcg_write_event_control(struct kernfs_open_file *of,
.seq_show = memcg_thp_reclaim_ctrl_show,
.write = memcg_thp_reclaim_ctrl_write,
},
+ {
+ .name = "thp_reclaim_stat",
+ .seq_show = memcg_thp_reclaim_stat_show,
+ },
#endif
{ }, /* terminate */
};
diff --git a/mm/vmscan.c b/mm/vmscan.c
index fcc80a6..cb5f53d 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -2818,7 +2818,7 @@ static unsigned long reclaim_hpage_zero_subpages(struct lruvec *lruvec,
if (!page)
continue;
- nr_reclaimed += zsr_reclaim_hpage(lruvec, page);
+ nr_reclaimed += zsr_reclaim_hpage(hr_queue, lruvec, page);
cond_resched();
--
1.8.3.1
next prev parent reply other threads:[~2021-10-28 11:57 UTC|newest]
Thread overview: 17+ messages / expand[flat|nested] mbox.gz Atom feed top
2021-10-28 11:56 [RFC 0/6] Reclaim zero subpages of thp to avoid memory bloat Ning Zhang
2021-10-28 11:56 ` [RFC 1/6] mm, thp: introduce thp zero subpages reclaim Ning Zhang
2021-10-28 12:53 ` Matthew Wilcox
2021-10-29 12:16 ` ning zhang
2021-10-28 11:56 ` [RFC 2/6] mm, thp: add a global interface for zero subapges reclaim Ning Zhang
2021-10-28 11:56 ` [RFC 3/6] mm, thp: introduce zero subpages reclaim threshold Ning Zhang
2021-10-28 11:56 ` [RFC 4/6] mm, thp: introduce a controller to trigger zero subpages reclaim Ning Zhang
2021-10-28 11:56 ` Ning Zhang [this message]
2021-10-28 11:56 ` [RFC 6/6] mm, thp: add document for " Ning Zhang
2021-10-28 14:13 ` [RFC 0/6] Reclaim zero subpages of thp to avoid memory bloat Kirill A. Shutemov
2021-10-29 12:07 ` ning zhang
2021-10-29 16:56 ` Yang Shi
2021-11-01 2:50 ` ning zhang
2021-10-29 13:38 ` Michal Hocko
2021-10-29 16:12 ` ning zhang
2021-11-01 9:20 ` Michal Hocko
2021-11-08 3:24 ` ning zhang
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1635422215-99394-6-git-send-email-ningzhang@linux.alibaba.com \
--to=ningzhang@linux.alibaba.com \
--cc=akpm@linux-foundation.org \
--cc=hannes@cmpxchg.org \
--cc=linux-mm@kvack.org \
--cc=mhocko@kernel.org \
--cc=vdavydov.dev@gmail.com \
--cc=yuzhao@google.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox