* [PATCH v6 2/2] mm: hugetlb: proc: add HugetlbPages field to /proc/PID/status
2015-09-17 9:09 [PATCH v6 0/2] hugetlb: display per-process/per-vma usage Naoya Horiguchi
2015-09-17 9:09 ` [PATCH v6 1/2] mm: hugetlb: proc: add hugetlb-related fields to /proc/PID/smaps Naoya Horiguchi
@ 2015-09-17 9:09 ` Naoya Horiguchi
2015-09-20 10:58 ` Michal Hocko
1 sibling, 1 reply; 5+ messages in thread
From: Naoya Horiguchi @ 2015-09-17 9:09 UTC (permalink / raw)
To: Andrew Morton
Cc: Michal Hocko, Vlastimil Babka, Pádraig Brady,
David Rientjes, Jörn Engel, Mike Kravetz, linux-mm,
linux-kernel, Naoya Horiguchi, Naoya Horiguchi
Currently there's no easy way to get per-process usage of hugetlb pages, which
is inconvenient because userspace applications which use hugetlb typically want
to control their processes on the basis of how much memory (including hugetlb)
they use. So this patch simply provides easy access to the info via
/proc/PID/status.
Signed-off-by: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Acked-by: Joern Engel <joern@logfs.org>
Acked-by: David Rientjes <rientjes@google.com>
---
v5 -> v6:
- drop showing per-pagesize info
v4 -> v5:
- add (struct hugetlb_usage *) to struct mm_struct
- use %lu instead of %d for seq_printf()
- introduce hugetlb_fork
v3 -> v4:
- rename field (VmHugetlbRSS is not the best name)
- introduce struct hugetlb_usage in struct mm_struct (no invasion to struct
mm_rss_stat)
- introduce hugetlb_report_usage()
- merged documentation update
v2 -> v3:
- use inline functions instead of macros for !CONFIG_HUGETLB_PAGE
---
Documentation/filesystems/proc.txt | 2 ++
fs/proc/task_mmu.c | 1 +
include/linux/hugetlb.h | 19 +++++++++++++++++++
include/linux/mm_types.h | 3 +++
mm/hugetlb.c | 9 +++++++++
mm/rmap.c | 4 +++-
6 files changed, 37 insertions(+), 1 deletion(-)
diff --git v4.3-rc1/Documentation/filesystems/proc.txt v4.3-rc1_patched/Documentation/filesystems/proc.txt
index b9467d91052a..da29faaf685e 100644
--- v4.3-rc1/Documentation/filesystems/proc.txt
+++ v4.3-rc1_patched/Documentation/filesystems/proc.txt
@@ -174,6 +174,7 @@ For example, to get the status information of a process, all you have to do is
VmLib: 1412 kB
VmPTE: 20 kb
VmSwap: 0 kB
+ HugetlbPages: 0 kB
Threads: 1
SigQ: 0/28578
SigPnd: 0000000000000000
@@ -237,6 +238,7 @@ Table 1-2: Contents of the status files (as of 4.1)
VmPTE size of page table entries
VmPMD size of second level page tables
VmSwap size of swap usage (the number of referred swapents)
+ HugetlbPages size of hugetlb memory portions
Threads number of threads
SigQ number of signals queued/max. number for queue
SigPnd bitmap of pending signals for the thread
diff --git v4.3-rc1/fs/proc/task_mmu.c v4.3-rc1_patched/fs/proc/task_mmu.c
index 22c02917f265..bd167675a06f 100644
--- v4.3-rc1/fs/proc/task_mmu.c
+++ v4.3-rc1_patched/fs/proc/task_mmu.c
@@ -70,6 +70,7 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
ptes >> 10,
pmds >> 10,
swap << (PAGE_SHIFT-10));
+ hugetlb_report_usage(m, mm);
}
unsigned long task_vsize(struct mm_struct *mm)
diff --git v4.3-rc1/include/linux/hugetlb.h v4.3-rc1_patched/include/linux/hugetlb.h
index 5e35379f58a5..685c262e0be8 100644
--- v4.3-rc1/include/linux/hugetlb.h
+++ v4.3-rc1_patched/include/linux/hugetlb.h
@@ -483,6 +483,17 @@ static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
#define hugepages_supported() (HPAGE_SHIFT != 0)
#endif
+void hugetlb_report_usage(struct seq_file *m, struct mm_struct *mm);
+
+static inline void hugetlb_count_add(long l, struct mm_struct *mm)
+{
+ atomic_long_add(l, &mm->hugetlb_usage);
+}
+
+static inline void hugetlb_count_sub(long l, struct mm_struct *mm)
+{
+ atomic_long_sub(l, &mm->hugetlb_usage);
+}
#else /* CONFIG_HUGETLB_PAGE */
struct hstate {};
#define alloc_huge_page(v, a, r) NULL
@@ -519,6 +530,14 @@ static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
{
return &mm->page_table_lock;
}
+
+static inline void hugetlb_report_usage(struct seq_file *f, struct mm_struct *m)
+{
+}
+
+static inline void hugetlb_count_sub(long l, struct mm_struct *mm)
+{
+}
#endif /* CONFIG_HUGETLB_PAGE */
static inline spinlock_t *huge_pte_lock(struct hstate *h,
diff --git v4.3-rc1/include/linux/mm_types.h v4.3-rc1_patched/include/linux/mm_types.h
index 3d6baa7d4534..0a85da25a822 100644
--- v4.3-rc1/include/linux/mm_types.h
+++ v4.3-rc1_patched/include/linux/mm_types.h
@@ -486,6 +486,9 @@ struct mm_struct {
/* address of the bounds directory */
void __user *bd_addr;
#endif
+#ifdef CONFIG_HUGETLB_PAGE
+ atomic_long_t hugetlb_usage;
+#endif
};
static inline void mm_init_cpumask(struct mm_struct *mm)
diff --git v4.3-rc1/mm/hugetlb.c v4.3-rc1_patched/mm/hugetlb.c
index 999fb0aef8f1..444a55de3c4a 100644
--- v4.3-rc1/mm/hugetlb.c
+++ v4.3-rc1_patched/mm/hugetlb.c
@@ -2790,6 +2790,12 @@ void hugetlb_show_meminfo(void)
1UL << (huge_page_order(h) + PAGE_SHIFT - 10));
}
+void hugetlb_report_usage(struct seq_file *m, struct mm_struct *mm)
+{
+ seq_printf(m, "HugetlbPages:\t%8lu kB\n",
+ atomic_long_read(&mm->hugetlb_usage) << (PAGE_SHIFT - 10));
+}
+
/* Return the number pages of memory we physically have, in PAGE_SIZE units. */
unsigned long hugetlb_total_pages(void)
{
@@ -3025,6 +3031,7 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
get_page(ptepage);
page_dup_rmap(ptepage);
set_huge_pte_at(dst, addr, dst_pte, entry);
+ hugetlb_count_add(pages_per_huge_page(h), dst);
}
spin_unlock(src_ptl);
spin_unlock(dst_ptl);
@@ -3105,6 +3112,7 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
if (huge_pte_dirty(pte))
set_page_dirty(page);
+ hugetlb_count_sub(pages_per_huge_page(h), mm);
page_remove_rmap(page);
force_flush = !__tlb_remove_page(tlb, page);
if (force_flush) {
@@ -3501,6 +3509,7 @@ static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
&& (vma->vm_flags & VM_SHARED)));
set_huge_pte_at(mm, address, ptep, new_pte);
+ hugetlb_count_add(pages_per_huge_page(h), mm);
if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
/* Optimization, do the COW without a second fault */
ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page, ptl);
diff --git v4.3-rc1/mm/rmap.c v4.3-rc1_patched/mm/rmap.c
index f5b5c1f3dcd7..d40e7aefb888 100644
--- v4.3-rc1/mm/rmap.c
+++ v4.3-rc1_patched/mm/rmap.c
@@ -1352,7 +1352,9 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
update_hiwater_rss(mm);
if (PageHWPoison(page) && !(flags & TTU_IGNORE_HWPOISON)) {
- if (!PageHuge(page)) {
+ if (PageHuge(page)) {
+ hugetlb_count_sub(1 << compound_order(page), mm);
+ } else {
if (PageAnon(page))
dec_mm_counter(mm, MM_ANONPAGES);
else
--
2.4.3
^ permalink raw reply [flat|nested] 5+ messages in thread
* [PATCH v6 0/2] hugetlb: display per-process/per-vma usage
@ 2015-09-17 9:09 Naoya Horiguchi
2015-09-17 9:09 ` [PATCH v6 1/2] mm: hugetlb: proc: add hugetlb-related fields to /proc/PID/smaps Naoya Horiguchi
2015-09-17 9:09 ` [PATCH v6 2/2] mm: hugetlb: proc: add HugetlbPages field to /proc/PID/status Naoya Horiguchi
0 siblings, 2 replies; 5+ messages in thread
From: Naoya Horiguchi @ 2015-09-17 9:09 UTC (permalink / raw)
To: Andrew Morton
Cc: Michal Hocko, Vlastimil Babka, Pádraig Brady,
David Rientjes, Jörn Engel, Mike Kravetz, linux-mm,
linux-kernel, Naoya Horiguchi, Naoya Horiguchi
[-- Warning: decoded text below may be mangled, UTF-8 assumed --]
[-- Attachment #1: Type: text/plain; charset="utf-8", Size: 1008 bytes --]
Hi everyone,
I updated the series against v4.3-rc1.
In patch 1/2, a new fields is split into two to identify shared/private
mapping (thanks to Pádraig).
And I dropped showing per-hugepagesize info in /proc/PID/status in patch
2/2 because there were objections on this part. This saves lines of diff.
Thanks,
Naoya Horiguchi
---
Summary:
Naoya Horiguchi (2):
mm: hugetlb: proc: add hugetlb-related fields to /proc/PID/smaps
mm: hugetlb: proc: add HugetlbPages field to /proc/PID/status
Documentation/filesystems/proc.txt | 10 ++++++++++
fs/proc/task_mmu.c | 39 ++++++++++++++++++++++++++++++++++++++
include/linux/hugetlb.h | 19 +++++++++++++++++++
include/linux/mm_types.h | 3 +++
mm/hugetlb.c | 9 +++++++++
mm/rmap.c | 4 +++-
6 files changed, 83 insertions(+), 1 deletion(-)N§²æìr¸zǧu©²Æ {\béì¹»\x1c®&Þ)îÆi¢Ø^nr¶Ý¢j$½§$¢¸\x05¢¹¨è§~'.)îÄÃ,yèm¶ÿÃ\f%{±j+ðèצj)Z·
^ permalink raw reply [flat|nested] 5+ messages in thread
* [PATCH v6 1/2] mm: hugetlb: proc: add hugetlb-related fields to /proc/PID/smaps
2015-09-17 9:09 [PATCH v6 0/2] hugetlb: display per-process/per-vma usage Naoya Horiguchi
@ 2015-09-17 9:09 ` Naoya Horiguchi
2015-10-29 19:04 ` [PATCH v7] " Hugh Dickins
2015-09-17 9:09 ` [PATCH v6 2/2] mm: hugetlb: proc: add HugetlbPages field to /proc/PID/status Naoya Horiguchi
1 sibling, 1 reply; 5+ messages in thread
From: Naoya Horiguchi @ 2015-09-17 9:09 UTC (permalink / raw)
To: Andrew Morton
Cc: Michal Hocko, Vlastimil Babka, Pádraig Brady,
David Rientjes, Jörn Engel, Mike Kravetz, linux-mm,
linux-kernel, Naoya Horiguchi, Naoya Horiguchi
Currently /proc/PID/smaps provides no usage info for vma(VM_HUGETLB), which
is inconvenient when we want to know per-task or per-vma base hugetlb usage.
To solve this, this patch adds new fields for hugetlb usage like below:
Size: 20480 kB
Rss: 0 kB
Pss: 0 kB
Shared_Clean: 0 kB
Shared_Dirty: 0 kB
Private_Clean: 0 kB
Private_Dirty: 0 kB
Referenced: 0 kB
Anonymous: 0 kB
AnonHugePages: 0 kB
Shared_Hugetlb: 18432 kB
Private_Hugetlb: 2048 kB
Swap: 0 kB
KernelPageSize: 2048 kB
MMUPageSize: 2048 kB
Locked: 0 kB
VmFlags: rd wr mr mw me de ht
Signed-off-by: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Acked-by: Joern Engel <joern@logfs.org>
Acked-by: David Rientjes <rientjes@google.com>
Acked-by: Michal Hocko <mhocko@suse.cz>
---
v5 -> v6:
- add comment more to refer to no inclusion in RSS/PSS field
- separate field into {Shared,Private}_Hugetlb (thanks to Pádraig Brady)
v3 -> v4:
- suspend Acked-by tag because v3->v4 change is not trivial
- I stated in previous discussion that HugetlbPages line can contain page
size info, but that's not necessary because we already have KernelPageSize
info.
- merged documentation update, where the current documentation doesn't mention
AnonHugePages, so it's also added.
---
Documentation/filesystems/proc.txt | 8 ++++++++
fs/proc/task_mmu.c | 38 ++++++++++++++++++++++++++++++++++++++
2 files changed, 46 insertions(+)
diff --git v4.3-rc1/Documentation/filesystems/proc.txt v4.3-rc1_patched/Documentation/filesystems/proc.txt
index d411ca63c8b6..b9467d91052a 100644
--- v4.3-rc1/Documentation/filesystems/proc.txt
+++ v4.3-rc1_patched/Documentation/filesystems/proc.txt
@@ -423,6 +423,9 @@ Private_Clean: 0 kB
Private_Dirty: 0 kB
Referenced: 892 kB
Anonymous: 0 kB
+AnonHugePages: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
Swap: 0 kB
SwapPss: 0 kB
KernelPageSize: 4 kB
@@ -451,6 +454,11 @@ and a page is modified, the file page is replaced by a private anonymous copy.
"Swap" shows how much would-be-anonymous memory is also used, but out on
swap.
"SwapPss" shows proportional swap share of this mapping.
+"AnonHugePages" shows the ammount of memory backed by transparent hugepage.
+"Shared_Hugetlb" and "Private_Hugetlb" show the ammounts of memory backed by
+hugetlbfs page which is *not* counted in "RSS" or "PSS" field for historical
+reasons. And these are not included in {Shared,Private}_{Clean,Dirty} field.
+
"VmFlags" field deserves a separate description. This member represents the kernel
flags associated with the particular virtual memory area in two letter encoded
manner. The codes are the following:
diff --git v4.3-rc1/fs/proc/task_mmu.c v4.3-rc1_patched/fs/proc/task_mmu.c
index e2d46adb54b4..22c02917f265 100644
--- v4.3-rc1/fs/proc/task_mmu.c
+++ v4.3-rc1_patched/fs/proc/task_mmu.c
@@ -446,6 +446,8 @@ struct mem_size_stats {
unsigned long anonymous;
unsigned long anonymous_thp;
unsigned long swap;
+ unsigned long shared_hugetlb;
+ unsigned long private_hugetlb;
u64 pss;
u64 swap_pss;
};
@@ -625,12 +627,44 @@ static void show_smap_vma_flags(struct seq_file *m, struct vm_area_struct *vma)
seq_putc(m, '\n');
}
+#ifdef CONFIG_HUGETLB_PAGE
+static int smaps_hugetlb_range(pte_t *pte, unsigned long hmask,
+ unsigned long addr, unsigned long end,
+ struct mm_walk *walk)
+{
+ struct mem_size_stats *mss = walk->private;
+ struct vm_area_struct *vma = walk->vma;
+ struct page *page = NULL;
+
+ if (pte_present(*pte)) {
+ page = vm_normal_page(vma, addr, *pte);
+ } else if (is_swap_pte(*pte)) {
+ swp_entry_t swpent = pte_to_swp_entry(*pte);
+
+ if (is_migration_entry(swpent))
+ page = migration_entry_to_page(swpent);
+ }
+ if (page) {
+ int mapcount = page_mapcount(page);
+
+ if (mapcount >= 2)
+ mss->shared_hugetlb += huge_page_size(hstate_vma(vma));
+ else
+ mss->private_hugetlb += huge_page_size(hstate_vma(vma));
+ }
+ return 0;
+}
+#endif /* HUGETLB_PAGE */
+
static int show_smap(struct seq_file *m, void *v, int is_pid)
{
struct vm_area_struct *vma = v;
struct mem_size_stats mss;
struct mm_walk smaps_walk = {
.pmd_entry = smaps_pte_range,
+#ifdef CONFIG_HUGETLB_PAGE
+ .hugetlb_entry = smaps_hugetlb_range,
+#endif
.mm = vma->vm_mm,
.private = &mss,
};
@@ -652,6 +686,8 @@ static int show_smap(struct seq_file *m, void *v, int is_pid)
"Referenced: %8lu kB\n"
"Anonymous: %8lu kB\n"
"AnonHugePages: %8lu kB\n"
+ "Shared_Hugetlb: %8lu kB\n"
+ "Private_Hugetlb: %8lu kB\n"
"Swap: %8lu kB\n"
"SwapPss: %8lu kB\n"
"KernelPageSize: %8lu kB\n"
@@ -667,6 +703,8 @@ static int show_smap(struct seq_file *m, void *v, int is_pid)
mss.referenced >> 10,
mss.anonymous >> 10,
mss.anonymous_thp >> 10,
+ mss.shared_hugetlb >> 10,
+ mss.private_hugetlb >> 10,
mss.swap >> 10,
(unsigned long)(mss.swap_pss >> (10 + PSS_SHIFT)),
vma_kernel_pagesize(vma) >> 10,
--
2.4.3
^ permalink raw reply [flat|nested] 5+ messages in thread
* Re: [PATCH v6 2/2] mm: hugetlb: proc: add HugetlbPages field to /proc/PID/status
2015-09-17 9:09 ` [PATCH v6 2/2] mm: hugetlb: proc: add HugetlbPages field to /proc/PID/status Naoya Horiguchi
@ 2015-09-20 10:58 ` Michal Hocko
0 siblings, 0 replies; 5+ messages in thread
From: Michal Hocko @ 2015-09-20 10:58 UTC (permalink / raw)
To: Naoya Horiguchi
Cc: Andrew Morton, Vlastimil Babka, Pádraig Brady,
David Rientjes, Jörn Engel, Mike Kravetz, linux-mm,
linux-kernel, Naoya Horiguchi
On Thu 17-09-15 09:09:31, Naoya Horiguchi wrote:
> Currently there's no easy way to get per-process usage of hugetlb pages, which
> is inconvenient because userspace applications which use hugetlb typically want
> to control their processes on the basis of how much memory (including hugetlb)
> they use. So this patch simply provides easy access to the info via
> /proc/PID/status.
Thank you for making this much more lightweight. If we ever have a
request for a per-size breakdown we can add HugetlbPages-$size: value kB
> Signed-off-by: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
> Acked-by: Joern Engel <joern@logfs.org>
> Acked-by: David Rientjes <rientjes@google.com>
Acked-by: Michal Hocko <mhocko@suse.com>
Just a small nit-pick, feel free to ignore if this was really intended:
[...]
> +static inline void hugetlb_count_add(long l, struct mm_struct *mm)
> +{
> + atomic_long_add(l, &mm->hugetlb_usage);
> +}
> +
> +static inline void hugetlb_count_sub(long l, struct mm_struct *mm)
> +{
> + atomic_long_sub(l, &mm->hugetlb_usage);
> +}
I can see why you didn't use dec_mm_counter but the ordering could be
same. Other functions which handle counters follow the same template
(target, counter/count).
--
Michal Hocko
SUSE Labs
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply [flat|nested] 5+ messages in thread
* [PATCH v7] mm: hugetlb: proc: add hugetlb-related fields to /proc/PID/smaps
2015-09-17 9:09 ` [PATCH v6 1/2] mm: hugetlb: proc: add hugetlb-related fields to /proc/PID/smaps Naoya Horiguchi
@ 2015-10-29 19:04 ` Hugh Dickins
0 siblings, 0 replies; 5+ messages in thread
From: Hugh Dickins @ 2015-10-29 19:04 UTC (permalink / raw)
To: Andrew Morton
Cc: Naoya Horiguchi, Michal Hocko, Vlastimil Babka,
Pádraig Brady, David Rientjes, Joern Engel, Mike Kravetz,
linux-mm, linux-kernel, Naoya Horiguchi
From: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Currently /proc/PID/smaps provides no usage info for vma(VM_HUGETLB), which
is inconvenient when we want to know per-task or per-vma base hugetlb usage.
To solve this, this patch adds new fields for hugetlb usage like below:
Size: 20480 kB
Rss: 0 kB
Pss: 0 kB
Shared_Clean: 0 kB
Shared_Dirty: 0 kB
Private_Clean: 0 kB
Private_Dirty: 0 kB
Referenced: 0 kB
Anonymous: 0 kB
AnonHugePages: 0 kB
Shared_Hugetlb: 18432 kB
Private_Hugetlb: 2048 kB
Swap: 0 kB
KernelPageSize: 2048 kB
MMUPageSize: 2048 kB
Locked: 0 kB
VmFlags: rd wr mr mw me de ht
[ hughd: fixed Private_Hugetlb alignment ]
Signed-off-by: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Acked-by: Joern Engel <joern@logfs.org>
Acked-by: David Rientjes <rientjes@google.com>
Acked-by: Michal Hocko <mhocko@suse.cz>
Cc: Mike Kravetz <mike.kravetz@oracle.com>
Signed-off-by: Hugh Dickins <hughd@google.com>
---
Andrew, please add back your Signed-off-by, and replace
mm-hugetlb-proc-add-hugetlb-related-fields-to-proc-pid-smaps.patch
by this version: I couldn't send a "fix" patch because an important
line was (commendably upfront) in the patch description itself.
The patch is just grabbed out of mmotm and fixed up inplace.
Seems I'm the only one to care, but I've been distressed by the
misalignment of the Private_Hugetlb field: most of us will never
see anything but "0 kB" there, so please don't uglify it for us;
it's not as if %7lu would truncate a larger number.
Documentation/filesystems/proc.txt | 8 +++++
fs/proc/task_mmu.c | 38 +++++++++++++++++++++++++++
2 files changed, 46 insertions(+)
diff -puN Documentation/filesystems/proc.txt~mm-hugetlb-proc-add-hugetlb-related-fields-to-proc-pid-smaps Documentation/filesystems/proc.txt
--- a/Documentation/filesystems/proc.txt~mm-hugetlb-proc-add-hugetlb-related-fields-to-proc-pid-smaps
+++ a/Documentation/filesystems/proc.txt
@@ -423,6 +423,9 @@ Private_Clean: 0 kB
Private_Dirty: 0 kB
Referenced: 892 kB
Anonymous: 0 kB
+AnonHugePages: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
Swap: 0 kB
SwapPss: 0 kB
KernelPageSize: 4 kB
@@ -451,6 +454,11 @@ and a page is modified, the file page is
"Swap" shows how much would-be-anonymous memory is also used, but out on
swap.
"SwapPss" shows proportional swap share of this mapping.
+"AnonHugePages" shows the ammount of memory backed by transparent hugepage.
+"Shared_Hugetlb" and "Private_Hugetlb" show the ammounts of memory backed by
+hugetlbfs page which is *not* counted in "RSS" or "PSS" field for historical
+reasons. And these are not included in {Shared,Private}_{Clean,Dirty} field.
+
"VmFlags" field deserves a separate description. This member represents the kernel
flags associated with the particular virtual memory area in two letter encoded
manner. The codes are the following:
diff -puN fs/proc/task_mmu.c~mm-hugetlb-proc-add-hugetlb-related-fields-to-proc-pid-smaps fs/proc/task_mmu.c
--- a/fs/proc/task_mmu.c~mm-hugetlb-proc-add-hugetlb-related-fields-to-proc-pid-smaps
+++ a/fs/proc/task_mmu.c
@@ -446,6 +446,8 @@ struct mem_size_stats {
unsigned long anonymous;
unsigned long anonymous_thp;
unsigned long swap;
+ unsigned long shared_hugetlb;
+ unsigned long private_hugetlb;
u64 pss;
u64 swap_pss;
};
@@ -625,12 +627,44 @@ static void show_smap_vma_flags(struct s
seq_putc(m, '\n');
}
+#ifdef CONFIG_HUGETLB_PAGE
+static int smaps_hugetlb_range(pte_t *pte, unsigned long hmask,
+ unsigned long addr, unsigned long end,
+ struct mm_walk *walk)
+{
+ struct mem_size_stats *mss = walk->private;
+ struct vm_area_struct *vma = walk->vma;
+ struct page *page = NULL;
+
+ if (pte_present(*pte)) {
+ page = vm_normal_page(vma, addr, *pte);
+ } else if (is_swap_pte(*pte)) {
+ swp_entry_t swpent = pte_to_swp_entry(*pte);
+
+ if (is_migration_entry(swpent))
+ page = migration_entry_to_page(swpent);
+ }
+ if (page) {
+ int mapcount = page_mapcount(page);
+
+ if (mapcount >= 2)
+ mss->shared_hugetlb += huge_page_size(hstate_vma(vma));
+ else
+ mss->private_hugetlb += huge_page_size(hstate_vma(vma));
+ }
+ return 0;
+}
+#endif /* HUGETLB_PAGE */
+
static int show_smap(struct seq_file *m, void *v, int is_pid)
{
struct vm_area_struct *vma = v;
struct mem_size_stats mss;
struct mm_walk smaps_walk = {
.pmd_entry = smaps_pte_range,
+#ifdef CONFIG_HUGETLB_PAGE
+ .hugetlb_entry = smaps_hugetlb_range,
+#endif
.mm = vma->vm_mm,
.private = &mss,
};
@@ -652,6 +686,8 @@ static int show_smap(struct seq_file *m,
"Referenced: %8lu kB\n"
"Anonymous: %8lu kB\n"
"AnonHugePages: %8lu kB\n"
+ "Shared_Hugetlb: %8lu kB\n"
+ "Private_Hugetlb: %7lu kB\n"
"Swap: %8lu kB\n"
"SwapPss: %8lu kB\n"
"KernelPageSize: %8lu kB\n"
@@ -667,6 +703,8 @@ static int show_smap(struct seq_file *m,
mss.referenced >> 10,
mss.anonymous >> 10,
mss.anonymous_thp >> 10,
+ mss.shared_hugetlb >> 10,
+ mss.private_hugetlb >> 10,
mss.swap >> 10,
(unsigned long)(mss.swap_pss >> (10 + PSS_SHIFT)),
vma_kernel_pagesize(vma) >> 10,
_
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply [flat|nested] 5+ messages in thread
end of thread, other threads:[~2015-10-29 19:04 UTC | newest]
Thread overview: 5+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2015-09-17 9:09 [PATCH v6 0/2] hugetlb: display per-process/per-vma usage Naoya Horiguchi
2015-09-17 9:09 ` [PATCH v6 1/2] mm: hugetlb: proc: add hugetlb-related fields to /proc/PID/smaps Naoya Horiguchi
2015-10-29 19:04 ` [PATCH v7] " Hugh Dickins
2015-09-17 9:09 ` [PATCH v6 2/2] mm: hugetlb: proc: add HugetlbPages field to /proc/PID/status Naoya Horiguchi
2015-09-20 10:58 ` Michal Hocko
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox