From: Yinan Zhang <zhangyinan2019@email.szu.edu.cn>
To: akpm@linux-foundation.org
Cc: willy@infradead.org, vbabka@suse.cz,
william.kucharski@oracle.com, linux-kernel@vger.kernel.org,
linux-mm@kvack.org, yejiajian2018@email.szu.edu.cn,
hanshenghong2019@email.szu.edu.cn,
caoyixuan2019@email.szu.edu.cn, zhaochongxi2019@email.szu.edu.cn,
yuhongf@szu.edu.cn, Yinan Zhang <zhangyinan2019@email.szu.edu.cn>
Subject: [PATCH 1/2] mm/page_owner.c: introduce vmalloc allocator for page_owner
Date: Tue, 22 Mar 2022 11:22:24 +0800 [thread overview]
Message-ID: <20220322032225.1402992-1-zhangyinan2019@email.szu.edu.cn> (raw)
An application's memory consumption is high and keeps increasing,
then it is suspected of having memory leak. There are several
commonly used memory allocators: slab, cma, vmalloc, etc. The memory
leak identification can be speed up if page information allocated
by an individual allocator are analyzed individually. This patch
introduce vmalloc allocator for page_owner.
Following adjustments are made:
1) Add a member variable "allocator" to the page_owner struct.
And the value of "allocator" is predefined in a newly added string
array "allocator_name".
2) Add a function __set_page_owner_allocator() to record allocator
name in variable "allocator".
3) Add allocator name in the output of print_page_owner().
This work is coauthored by
Shenghong Han
Yixuan Cao
Chongxi Zhao
Jiajian Ye
Yuhong Feng
Yongqiang Liu
Signed-off-by: Yinan Zhang <zhangyinan2019@email.szu.edu.cn>
---
include/linux/page_owner.h | 18 ++++++++++++++++++
mm/page_owner.c | 29 +++++++++++++++++++++++++++--
2 files changed, 45 insertions(+), 2 deletions(-)
diff --git a/include/linux/page_owner.h b/include/linux/page_owner.h
index 119a0c9d2a8b..d559781dde67 100644
--- a/include/linux/page_owner.h
+++ b/include/linux/page_owner.h
@@ -11,6 +11,8 @@ extern struct page_ext_operations page_owner_ops;
extern void __reset_page_owner(struct page *page, unsigned short order);
extern void __set_page_owner(struct page *page,
unsigned short order, gfp_t gfp_mask);
+extern void __set_page_owner_allocator(struct page *page, unsigned short order,
+ unsigned short allocator);
extern void __split_page_owner(struct page *page, unsigned int nr);
extern void __folio_copy_owner(struct folio *newfolio, struct folio *old);
extern void __set_page_owner_migrate_reason(struct page *page, int reason);
@@ -18,6 +20,11 @@ extern void __dump_page_owner(const struct page *page);
extern void pagetypeinfo_showmixedcount_print(struct seq_file *m,
pg_data_t *pgdat, struct zone *zone);
+enum page_owner_allocator {
+ PAGE_OWNER_ALLOCATOR_UNKNOWN = 0,
+ PAGE_OWNER_ALLOCATOR_VMALLOC
+};
+
static inline void reset_page_owner(struct page *page, unsigned short order)
{
if (static_branch_unlikely(&page_owner_inited))
@@ -31,6 +38,13 @@ static inline void set_page_owner(struct page *page,
__set_page_owner(page, order, gfp_mask);
}
+static inline void set_page_owner_allocator(struct page *page, unsigned short order,
+ unsigned short allocator)
+{
+ if (static_branch_unlikely(&page_owner_inited))
+ __set_page_owner_allocator(page, order, allocator);
+}
+
static inline void split_page_owner(struct page *page, unsigned int nr)
{
if (static_branch_unlikely(&page_owner_inited))
@@ -59,6 +73,10 @@ static inline void set_page_owner(struct page *page,
unsigned int order, gfp_t gfp_mask)
{
}
+static inline void set_page_owner_allocator(struct page *page, unsigned short order,
+ unsigned short allocator)
+{
+}
static inline void split_page_owner(struct page *page,
unsigned short order)
{
diff --git a/mm/page_owner.c b/mm/page_owner.c
index 0a9588506571..11bb805c61fd 100644
--- a/mm/page_owner.c
+++ b/mm/page_owner.c
@@ -32,6 +32,12 @@ struct page_owner {
char comm[TASK_COMM_LEN];
pid_t pid;
pid_t tgid;
+ unsigned short allocator;
+};
+
+const char * const allocator_name[] = {
+ "unknown",
+ "vmalloc",
};
static bool page_owner_enabled = false;
@@ -148,6 +154,7 @@ void __reset_page_owner(struct page *page, unsigned short order)
page_owner = get_page_owner(page_ext);
page_owner->free_handle = handle;
page_owner->free_ts_nsec = free_ts_nsec;
+ page_owner->allocator = PAGE_OWNER_ALLOCATOR_UNKNOWN;
page_ext = page_ext_next(page_ext);
}
}
@@ -190,6 +197,22 @@ noinline void __set_page_owner(struct page *page, unsigned short order,
__set_page_owner_handle(page_ext, handle, order, gfp_mask);
}
+void __set_page_owner_allocator(struct page *page, unsigned short order, unsigned short allocator)
+{
+ int i;
+ struct page_ext *page_ext;
+ struct page_owner *page_owner;
+
+ page_ext = lookup_page_ext(page);
+ if (unlikely(!page_ext))
+ return;
+ for (i = 0; i < (1 << order); i++) {
+ page_owner = get_page_owner(page_ext);
+ page_owner->allocator = allocator;
+ page_ext = page_ext_next(page_ext);
+ }
+}
+
void __set_page_owner_migrate_reason(struct page *page, int reason)
{
struct page_ext *page_ext = lookup_page_ext(page);
@@ -238,6 +261,7 @@ void __folio_copy_owner(struct folio *newfolio, struct folio *old)
new_page_owner->tgid = old_page_owner->tgid;
new_page_owner->ts_nsec = old_page_owner->ts_nsec;
new_page_owner->free_ts_nsec = old_page_owner->ts_nsec;
+ new_page_owner->allocator = old_page_owner->allocator;
strcpy(new_page_owner->comm, old_page_owner->comm);
/*
@@ -386,11 +410,12 @@ print_page_owner(char __user *buf, size_t count, unsigned long pfn,
return -ENOMEM;
ret = scnprintf(kbuf, count,
- "Page allocated via order %u, mask %#x(%pGg), pid %d, tgid %d (%s), ts %llu ns, free_ts %llu ns\n",
+ "Page allocated via order %u, mask %#x(%pGg), pid %d, tgid %d (%s), ts %llu ns, free_ts %llu ns, allocator %s\n",
page_owner->order, page_owner->gfp_mask,
&page_owner->gfp_mask, page_owner->pid,
page_owner->tgid, page_owner->comm,
- page_owner->ts_nsec, page_owner->free_ts_nsec);
+ page_owner->ts_nsec, page_owner->free_ts_nsec,
+ allocator_name[page_owner->allocator]);
/* Print information relevant to grouping pages by mobility */
pageblock_mt = get_pageblock_migratetype(page);
--
2.25.1
next reply other threads:[~2022-03-22 3:22 UTC|newest]
Thread overview: 4+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-03-22 3:22 Yinan Zhang [this message]
2022-03-22 3:22 ` [PATCH 2/2] mm/vmalloc.c: record the allocator in page_owner when __vmalloc_area_node complete mapping pages to virtual address Yinan Zhang
2022-03-22 3:28 ` [PATCH 1/2] mm/page_owner.c: introduce vmalloc allocator for page_owner Matthew Wilcox
[not found] ` <299aef9a-dd57-2197-f3cf-6b08c441f755@email.szu.edu.cn>
2022-03-22 15:10 ` Vlastimil Babka
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20220322032225.1402992-1-zhangyinan2019@email.szu.edu.cn \
--to=zhangyinan2019@email.szu.edu.cn \
--cc=akpm@linux-foundation.org \
--cc=caoyixuan2019@email.szu.edu.cn \
--cc=hanshenghong2019@email.szu.edu.cn \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=vbabka@suse.cz \
--cc=william.kucharski@oracle.com \
--cc=willy@infradead.org \
--cc=yejiajian2018@email.szu.edu.cn \
--cc=yuhongf@szu.edu.cn \
--cc=zhaochongxi2019@email.szu.edu.cn \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox