* [PATCH] mm: Account pud page tables
@ 2017-09-22 8:41 Kirill A. Shutemov
2017-09-24 21:12 ` kbuild test robot
2017-09-24 21:13 ` kbuild test robot
0 siblings, 2 replies; 3+ messages in thread
From: Kirill A. Shutemov @ 2017-09-22 8:41 UTC (permalink / raw)
To: Andrew Morton
Cc: linux-mm, linux-kernel, Kirill A. Shutemov, Michal Hocko,
Vlastimil Babka
On machine with 5-level paging support a process can allocate
significant amount of memory and stay unnoticed by oom-killer and
memory cgroup. The trick is to allocate a lot of PUD page tables.
We don't account PUD page tables, only PMD and PTE.
We already addressed the same issue for PMD page tables, see
dc6c9a35b66b ("mm: account pmd page tables to the process").
Introduction 5-level paging bring the same issue for PUD page tables.
The patch expands accounting to PUD level.
Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
---
Documentation/sysctl/vm.txt | 8 ++++----
arch/powerpc/mm/hugetlbpage.c | 1 +
arch/sparc/mm/hugetlbpage.c | 1 +
fs/proc/task_mmu.c | 5 ++++-
include/linux/mm.h | 32 +++++++++++++++++++++++++++++++-
include/linux/mm_types.h | 3 +++
kernel/fork.c | 4 ++++
mm/debug.c | 6 ++++--
mm/memory.c | 15 +++++++++------
mm/oom_kill.c | 8 +++++---
10 files changed, 66 insertions(+), 17 deletions(-)
diff --git a/Documentation/sysctl/vm.txt b/Documentation/sysctl/vm.txt
index 9baf66a9ef4e..2717b6f2d706 100644
--- a/Documentation/sysctl/vm.txt
+++ b/Documentation/sysctl/vm.txt
@@ -622,10 +622,10 @@ oom_dump_tasks
Enables a system-wide task dump (excluding kernel threads) to be produced
when the kernel performs an OOM-killing and includes such information as
-pid, uid, tgid, vm size, rss, nr_ptes, nr_pmds, swapents, oom_score_adj
-score, and name. This is helpful to determine why the OOM killer was
-invoked, to identify the rogue task that caused it, and to determine why
-the OOM killer chose the task it did to kill.
+pid, uid, tgid, vm size, rss, nr_ptes, nr_pmds, nr_puds, swapents,
+oom_score_adj score, and name. This is helpful to determine why the OOM
+killer was invoked, to identify the rogue task that caused it, and to
+determine why the OOM killer chose the task it did to kill.
If this is set to zero, this information is suppressed. On very
large systems with thousands of tasks it may not be feasible to dump
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index 1571a498a33f..a9b9083c5e49 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -433,6 +433,7 @@ static void hugetlb_free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
pud = pud_offset(pgd, start);
pgd_clear(pgd);
pud_free_tlb(tlb, pud, start);
+ mm_dec_nr_puds(tlb->mm);
}
/*
diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
index bcd8cdbc377f..fd0d85808828 100644
--- a/arch/sparc/mm/hugetlbpage.c
+++ b/arch/sparc/mm/hugetlbpage.c
@@ -471,6 +471,7 @@ static void hugetlb_free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
pud = pud_offset(pgd, start);
pgd_clear(pgd);
pud_free_tlb(tlb, pud, start);
+ mm_dec_nr_puds(tlb->mm);
}
void hugetlb_free_pgd_range(struct mmu_gather *tlb,
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 5589b4bd4b85..0bf9e423aa99 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -25,7 +25,7 @@
void task_mem(struct seq_file *m, struct mm_struct *mm)
{
- unsigned long text, lib, swap, ptes, pmds, anon, file, shmem;
+ unsigned long text, lib, swap, ptes, pmds, puds, anon, file, shmem;
unsigned long hiwater_vm, total_vm, hiwater_rss, total_rss;
anon = get_mm_counter(mm, MM_ANONPAGES);
@@ -51,6 +51,7 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
swap = get_mm_counter(mm, MM_SWAPENTS);
ptes = PTRS_PER_PTE * sizeof(pte_t) * atomic_long_read(&mm->nr_ptes);
pmds = PTRS_PER_PMD * sizeof(pmd_t) * mm_nr_pmds(mm);
+ puds = PTRS_PER_PUD * sizeof(pmd_t) * mm_nr_puds(mm);
seq_printf(m,
"VmPeak:\t%8lu kB\n"
"VmSize:\t%8lu kB\n"
@@ -67,6 +68,7 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
"VmLib:\t%8lu kB\n"
"VmPTE:\t%8lu kB\n"
"VmPMD:\t%8lu kB\n"
+ "VmPUD:\t%8lu kB\n"
"VmSwap:\t%8lu kB\n",
hiwater_vm << (PAGE_SHIFT-10),
total_vm << (PAGE_SHIFT-10),
@@ -81,6 +83,7 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
mm->stack_vm << (PAGE_SHIFT-10), text, lib,
ptes >> 10,
pmds >> 10,
+ puds >> 10,
swap << (PAGE_SHIFT-10));
hugetlb_report_usage(m, mm);
}
diff --git a/include/linux/mm.h b/include/linux/mm.h
index f8c10d336e42..70ca95d2deee 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1604,8 +1604,38 @@ static inline int __pud_alloc(struct mm_struct *mm, p4d_t *p4d,
{
return 0;
}
+
+static inline unsigned long mm_nr_puds(struct mm_struct *mm)
+{
+ return 0;
+}
+
+static inline void mm_nr_puds_init(struct mm_struct *mm) {}
+static inline void mm_inc_nr_puds(struct mm_struct *mm) {}
+static inline void mm_dec_nr_puds(struct mm_struct *mm) {}
+
#else
int __pud_alloc(struct mm_struct *mm, p4d_t *p4d, unsigned long address);
+
+static inline void mm_nr_puds_init(struct mm_struct *mm)
+{
+ atomic_long_set(&mm->nr_puds, 0);
+}
+
+static inline unsigned long mm_nr_puds(const struct mm_struct *mm)
+{
+ return atomic_long_read(&mm->nr_puds);
+}
+
+static inline void mm_inc_nr_puds(struct mm_struct *mm)
+{
+ atomic_long_inc(&mm->nr_puds);
+}
+
+static inline void mm_dec_nr_puds(struct mm_struct *mm)
+{
+ atomic_long_dec(&mm->nr_puds);
+}
#endif
#if defined(__PAGETABLE_PMD_FOLDED) || !defined(CONFIG_MMU)
@@ -1633,7 +1663,7 @@ static inline void mm_nr_pmds_init(struct mm_struct *mm)
atomic_long_set(&mm->nr_pmds, 0);
}
-static inline unsigned long mm_nr_pmds(struct mm_struct *mm)
+static inline unsigned long mm_nr_pmds(const struct mm_struct *mm)
{
return atomic_long_read(&mm->nr_pmds);
}
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 46f4ecf5479a..6c8c2bb9e5a1 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -401,6 +401,9 @@ struct mm_struct {
atomic_long_t nr_ptes; /* PTE page table pages */
#if CONFIG_PGTABLE_LEVELS > 2
atomic_long_t nr_pmds; /* PMD page table pages */
+#endif
+#if CONFIG_PGTABLE_LEVELS > 3
+ atomic_long_t nr_puds; /* PUD page table pages */
#endif
int map_count; /* number of VMAs */
diff --git a/kernel/fork.c b/kernel/fork.c
index 10646182440f..5624918154db 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -815,6 +815,7 @@ static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p,
mm->core_state = NULL;
atomic_long_set(&mm->nr_ptes, 0);
mm_nr_pmds_init(mm);
+ mm_nr_puds_init(mm);
mm->map_count = 0;
mm->locked_vm = 0;
mm->pinned_vm = 0;
@@ -874,6 +875,9 @@ static void check_mm(struct mm_struct *mm)
if (mm_nr_pmds(mm))
pr_alert("BUG: non-zero nr_pmds on freeing mm: %ld\n",
mm_nr_pmds(mm));
+ if (mm_nr_puds(mm))
+ pr_alert("BUG: non-zero nr_puds on freeing mm: %ld\n",
+ mm_nr_puds(mm));
#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS
VM_BUG_ON_MM(mm->pmd_huge_pte, mm);
diff --git a/mm/debug.c b/mm/debug.c
index 5715448ab0b5..afccb2565269 100644
--- a/mm/debug.c
+++ b/mm/debug.c
@@ -104,7 +104,8 @@ void dump_mm(const struct mm_struct *mm)
"get_unmapped_area %p\n"
#endif
"mmap_base %lu mmap_legacy_base %lu highest_vm_end %lu\n"
- "pgd %p mm_users %d mm_count %d nr_ptes %lu nr_pmds %lu map_count %d\n"
+ "pgd %p mm_users %d mm_count %d\n"
+ "nr_ptes %lu nr_pmds %lu nr_puds %lu map_count %d\n"
"hiwater_rss %lx hiwater_vm %lx total_vm %lx locked_vm %lx\n"
"pinned_vm %lx data_vm %lx exec_vm %lx stack_vm %lx\n"
"start_code %lx end_code %lx start_data %lx end_data %lx\n"
@@ -135,7 +136,8 @@ void dump_mm(const struct mm_struct *mm)
mm->pgd, atomic_read(&mm->mm_users),
atomic_read(&mm->mm_count),
atomic_long_read((atomic_long_t *)&mm->nr_ptes),
- mm_nr_pmds((struct mm_struct *)mm),
+ mm_nr_pmds(mm),
+ mm_nr_puds(mm),
mm->map_count,
mm->hiwater_rss, mm->hiwater_vm, mm->total_vm, mm->locked_vm,
mm->pinned_vm, mm->data_vm, mm->exec_vm, mm->stack_vm,
diff --git a/mm/memory.c b/mm/memory.c
index ec4e15494901..8f49fdafac56 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -506,6 +506,7 @@ static inline void free_pud_range(struct mmu_gather *tlb, p4d_t *p4d,
pud = pud_offset(p4d, start);
p4d_clear(p4d);
pud_free_tlb(tlb, pud, start);
+ mm_dec_nr_puds(tlb->mm);
}
static inline void free_p4d_range(struct mmu_gather *tlb, pgd_t *pgd,
@@ -4124,15 +4125,17 @@ int __pud_alloc(struct mm_struct *mm, p4d_t *p4d, unsigned long address)
spin_lock(&mm->page_table_lock);
#ifndef __ARCH_HAS_5LEVEL_HACK
- if (p4d_present(*p4d)) /* Another has populated it */
- pud_free(mm, new);
- else
+ if (!p4d_present(*p4d)) {
+ mm_inc_nr_puds(mm);
p4d_populate(mm, p4d, new);
-#else
- if (pgd_present(*p4d)) /* Another has populated it */
+ } else /* Another has populated it */
pud_free(mm, new);
- else
+#else
+ if (!pgd_present(*pud)) {
+ mm_inc_nr_puds(mm);
pgd_populate(mm, p4d, new);
+ } else /* Another has populated it */
+ pud_free(mm, new);
#endif /* __ARCH_HAS_5LEVEL_HACK */
spin_unlock(&mm->page_table_lock);
return 0;
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index 99736e026712..4bee6968885d 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -200,7 +200,8 @@ unsigned long oom_badness(struct task_struct *p, struct mem_cgroup *memcg,
* task's rss, pagetable and swap space use.
*/
points = get_mm_rss(p->mm) + get_mm_counter(p->mm, MM_SWAPENTS) +
- atomic_long_read(&p->mm->nr_ptes) + mm_nr_pmds(p->mm);
+ atomic_long_read(&p->mm->nr_ptes) + mm_nr_pmds(p->mm) +
+ mm_nr_puds(p->mm);
task_unlock(p);
/*
@@ -376,7 +377,7 @@ static void dump_tasks(struct mem_cgroup *memcg, const nodemask_t *nodemask)
struct task_struct *p;
struct task_struct *task;
- pr_info("[ pid ] uid tgid total_vm rss nr_ptes nr_pmds swapents oom_score_adj name\n");
+ pr_info("[ pid ] uid tgid total_vm rss nr_ptes nr_pmds nr_puds swapents oom_score_adj name\n");
rcu_read_lock();
for_each_process(p) {
if (oom_unkillable_task(p, memcg, nodemask))
@@ -392,11 +393,12 @@ static void dump_tasks(struct mem_cgroup *memcg, const nodemask_t *nodemask)
continue;
}
- pr_info("[%5d] %5d %5d %8lu %8lu %7ld %7ld %8lu %5hd %s\n",
+ pr_info("[%5d] %5d %5d %8lu %8lu %7ld %7ld %7ld %8lu %5hd %s\n",
task->pid, from_kuid(&init_user_ns, task_uid(task)),
task->tgid, task->mm->total_vm, get_mm_rss(task->mm),
atomic_long_read(&task->mm->nr_ptes),
mm_nr_pmds(task->mm),
+ mm_nr_puds(task->mm),
get_mm_counter(task->mm, MM_SWAPENTS),
task->signal->oom_score_adj, task->comm);
task_unlock(task);
--
2.14.1
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply [flat|nested] 3+ messages in thread
* Re: [PATCH] mm: Account pud page tables
2017-09-22 8:41 [PATCH] mm: Account pud page tables Kirill A. Shutemov
@ 2017-09-24 21:12 ` kbuild test robot
2017-09-24 21:13 ` kbuild test robot
1 sibling, 0 replies; 3+ messages in thread
From: kbuild test robot @ 2017-09-24 21:12 UTC (permalink / raw)
To: Kirill A. Shutemov
Cc: kbuild-all, Andrew Morton, linux-mm, linux-kernel, Michal Hocko,
Vlastimil Babka
[-- Attachment #1: Type: text/plain, Size: 7498 bytes --]
Hi Kirill,
[auto build test WARNING on linus/master]
[also build test WARNING on v4.14-rc1]
[if your patch is applied to the wrong git tree, please drop us a note to help improve the system]
url: https://github.com/0day-ci/linux/commits/Kirill-A-Shutemov/mm-Account-pud-page-tables/20170925-035907
config: i386-randconfig-x077-201739 (attached as .config)
compiler: gcc-6 (Debian 6.2.0-3) 6.2.0 20160901
reproduce:
# save the attached .config to linux build tree
make ARCH=i386
All warnings (new ones prefixed by >>):
In file included from include/linux/kernel.h:13:0,
from mm/debug.c:8:
mm/debug.c: In function 'dump_mm':
>> mm/debug.c:139:14: warning: passing argument 1 of 'mm_nr_pmds' discards 'const' qualifier from pointer target type [-Wdiscarded-qualifiers]
mm_nr_pmds(mm),
^
include/linux/printk.h:295:35: note: in definition of macro 'pr_emerg'
printk(KERN_EMERG pr_fmt(fmt), ##__VA_ARGS__)
^~~~~~~~~~~
In file included from mm/debug.c:9:0:
include/linux/mm.h:1650:29: note: expected 'struct mm_struct *' but argument is of type 'const struct mm_struct *'
static inline unsigned long mm_nr_pmds(struct mm_struct *mm)
^~~~~~~~~~
In file included from include/linux/kernel.h:13:0,
from mm/debug.c:8:
mm/debug.c:140:14: warning: passing argument 1 of 'mm_nr_puds' discards 'const' qualifier from pointer target type [-Wdiscarded-qualifiers]
mm_nr_puds(mm),
^
include/linux/printk.h:295:35: note: in definition of macro 'pr_emerg'
printk(KERN_EMERG pr_fmt(fmt), ##__VA_ARGS__)
^~~~~~~~~~~
In file included from mm/debug.c:9:0:
include/linux/mm.h:1608:29: note: expected 'struct mm_struct *' but argument is of type 'const struct mm_struct *'
static inline unsigned long mm_nr_puds(struct mm_struct *mm)
^~~~~~~~~~
vim +139 mm/debug.c
> 8 #include <linux/kernel.h>
9 #include <linux/mm.h>
10 #include <linux/trace_events.h>
11 #include <linux/memcontrol.h>
12 #include <trace/events/mmflags.h>
13 #include <linux/migrate.h>
14 #include <linux/page_owner.h>
15
16 #include "internal.h"
17
18 char *migrate_reason_names[MR_TYPES] = {
19 "compaction",
20 "memory_failure",
21 "memory_hotplug",
22 "syscall_or_cpuset",
23 "mempolicy_mbind",
24 "numa_misplaced",
25 "cma",
26 };
27
28 const struct trace_print_flags pageflag_names[] = {
29 __def_pageflag_names,
30 {0, NULL}
31 };
32
33 const struct trace_print_flags gfpflag_names[] = {
34 __def_gfpflag_names,
35 {0, NULL}
36 };
37
38 const struct trace_print_flags vmaflag_names[] = {
39 __def_vmaflag_names,
40 {0, NULL}
41 };
42
43 void __dump_page(struct page *page, const char *reason)
44 {
45 /*
46 * Avoid VM_BUG_ON() in page_mapcount().
47 * page->_mapcount space in struct page is used by sl[aou]b pages to
48 * encode own info.
49 */
50 int mapcount = PageSlab(page) ? 0 : page_mapcount(page);
51
52 pr_emerg("page:%p count:%d mapcount:%d mapping:%p index:%#lx",
53 page, page_ref_count(page), mapcount,
54 page->mapping, page_to_pgoff(page));
55 if (PageCompound(page))
56 pr_cont(" compound_mapcount: %d", compound_mapcount(page));
57 pr_cont("\n");
58 BUILD_BUG_ON(ARRAY_SIZE(pageflag_names) != __NR_PAGEFLAGS + 1);
59
60 pr_emerg("flags: %#lx(%pGp)\n", page->flags, &page->flags);
61
62 print_hex_dump(KERN_ALERT, "raw: ", DUMP_PREFIX_NONE, 32,
63 sizeof(unsigned long), page,
64 sizeof(struct page), false);
65
66 if (reason)
67 pr_alert("page dumped because: %s\n", reason);
68
69 #ifdef CONFIG_MEMCG
70 if (page->mem_cgroup)
71 pr_alert("page->mem_cgroup:%p\n", page->mem_cgroup);
72 #endif
73 }
74
75 void dump_page(struct page *page, const char *reason)
76 {
77 __dump_page(page, reason);
78 dump_page_owner(page);
79 }
80 EXPORT_SYMBOL(dump_page);
81
82 #ifdef CONFIG_DEBUG_VM
83
84 void dump_vma(const struct vm_area_struct *vma)
85 {
86 pr_emerg("vma %p start %p end %p\n"
87 "next %p prev %p mm %p\n"
88 "prot %lx anon_vma %p vm_ops %p\n"
89 "pgoff %lx file %p private_data %p\n"
90 "flags: %#lx(%pGv)\n",
91 vma, (void *)vma->vm_start, (void *)vma->vm_end, vma->vm_next,
92 vma->vm_prev, vma->vm_mm,
93 (unsigned long)pgprot_val(vma->vm_page_prot),
94 vma->anon_vma, vma->vm_ops, vma->vm_pgoff,
95 vma->vm_file, vma->vm_private_data,
96 vma->vm_flags, &vma->vm_flags);
97 }
98 EXPORT_SYMBOL(dump_vma);
99
100 void dump_mm(const struct mm_struct *mm)
101 {
102 pr_emerg("mm %p mmap %p seqnum %d task_size %lu\n"
103 #ifdef CONFIG_MMU
104 "get_unmapped_area %p\n"
105 #endif
106 "mmap_base %lu mmap_legacy_base %lu highest_vm_end %lu\n"
107 "pgd %p mm_users %d mm_count %d\n"
108 "nr_ptes %lu nr_pmds %lu nr_puds %lu map_count %d\n"
109 "hiwater_rss %lx hiwater_vm %lx total_vm %lx locked_vm %lx\n"
110 "pinned_vm %lx data_vm %lx exec_vm %lx stack_vm %lx\n"
111 "start_code %lx end_code %lx start_data %lx end_data %lx\n"
112 "start_brk %lx brk %lx start_stack %lx\n"
113 "arg_start %lx arg_end %lx env_start %lx env_end %lx\n"
114 "binfmt %p flags %lx core_state %p\n"
115 #ifdef CONFIG_AIO
116 "ioctx_table %p\n"
117 #endif
118 #ifdef CONFIG_MEMCG
119 "owner %p "
120 #endif
121 "exe_file %p\n"
122 #ifdef CONFIG_MMU_NOTIFIER
123 "mmu_notifier_mm %p\n"
124 #endif
125 #ifdef CONFIG_NUMA_BALANCING
126 "numa_next_scan %lu numa_scan_offset %lu numa_scan_seq %d\n"
127 #endif
128 "tlb_flush_pending %d\n"
129 "def_flags: %#lx(%pGv)\n",
130
131 mm, mm->mmap, mm->vmacache_seqnum, mm->task_size,
132 #ifdef CONFIG_MMU
133 mm->get_unmapped_area,
134 #endif
135 mm->mmap_base, mm->mmap_legacy_base, mm->highest_vm_end,
136 mm->pgd, atomic_read(&mm->mm_users),
137 atomic_read(&mm->mm_count),
138 atomic_long_read((atomic_long_t *)&mm->nr_ptes),
> 139 mm_nr_pmds(mm),
140 mm_nr_puds(mm),
141 mm->map_count,
142 mm->hiwater_rss, mm->hiwater_vm, mm->total_vm, mm->locked_vm,
143 mm->pinned_vm, mm->data_vm, mm->exec_vm, mm->stack_vm,
144 mm->start_code, mm->end_code, mm->start_data, mm->end_data,
145 mm->start_brk, mm->brk, mm->start_stack,
146 mm->arg_start, mm->arg_end, mm->env_start, mm->env_end,
147 mm->binfmt, mm->flags, mm->core_state,
148 #ifdef CONFIG_AIO
149 mm->ioctx_table,
150 #endif
151 #ifdef CONFIG_MEMCG
152 mm->owner,
153 #endif
154 mm->exe_file,
155 #ifdef CONFIG_MMU_NOTIFIER
156 mm->mmu_notifier_mm,
157 #endif
158 #ifdef CONFIG_NUMA_BALANCING
159 mm->numa_next_scan, mm->numa_scan_offset, mm->numa_scan_seq,
160 #endif
161 atomic_read(&mm->tlb_flush_pending),
162 mm->def_flags, &mm->def_flags
163 );
164 }
165
---
0-DAY kernel test infrastructure Open Source Technology Center
https://lists.01.org/pipermail/kbuild-all Intel Corporation
[-- Attachment #2: .config.gz --]
[-- Type: application/gzip, Size: 26644 bytes --]
^ permalink raw reply [flat|nested] 3+ messages in thread
* Re: [PATCH] mm: Account pud page tables
2017-09-22 8:41 [PATCH] mm: Account pud page tables Kirill A. Shutemov
2017-09-24 21:12 ` kbuild test robot
@ 2017-09-24 21:13 ` kbuild test robot
1 sibling, 0 replies; 3+ messages in thread
From: kbuild test robot @ 2017-09-24 21:13 UTC (permalink / raw)
To: Kirill A. Shutemov
Cc: kbuild-all, Andrew Morton, linux-mm, linux-kernel, Michal Hocko,
Vlastimil Babka
[-- Attachment #1: Type: text/plain, Size: 6793 bytes --]
Hi Kirill,
[auto build test WARNING on linus/master]
[also build test WARNING on v4.14-rc1 next-20170922]
[if your patch is applied to the wrong git tree, please drop us a note to help improve the system]
url: https://github.com/0day-ci/linux/commits/Kirill-A-Shutemov/mm-Account-pud-page-tables/20170925-035907
config: i386-randconfig-x070-201739 (attached as .config)
compiler: gcc-6 (Debian 6.2.0-3) 6.2.0 20160901
reproduce:
# save the attached .config to linux build tree
make ARCH=i386
All warnings (new ones prefixed by >>):
In file included from include/linux/kernel.h:13:0,
from mm/debug.c:8:
mm/debug.c: In function 'dump_mm':
>> mm/debug.c:140:14: warning: passing argument 1 of 'mm_nr_puds' discards 'const' qualifier from pointer target type [-Wdiscarded-qualifiers]
mm_nr_puds(mm),
^
include/linux/printk.h:295:35: note: in definition of macro 'pr_emerg'
printk(KERN_EMERG pr_fmt(fmt), ##__VA_ARGS__)
^~~~~~~~~~~
In file included from mm/debug.c:9:0:
include/linux/mm.h:1608:29: note: expected 'struct mm_struct *' but argument is of type 'const struct mm_struct *'
static inline unsigned long mm_nr_puds(struct mm_struct *mm)
^~~~~~~~~~
vim +140 mm/debug.c
> 8 #include <linux/kernel.h>
9 #include <linux/mm.h>
10 #include <linux/trace_events.h>
11 #include <linux/memcontrol.h>
12 #include <trace/events/mmflags.h>
13 #include <linux/migrate.h>
14 #include <linux/page_owner.h>
15
16 #include "internal.h"
17
18 char *migrate_reason_names[MR_TYPES] = {
19 "compaction",
20 "memory_failure",
21 "memory_hotplug",
22 "syscall_or_cpuset",
23 "mempolicy_mbind",
24 "numa_misplaced",
25 "cma",
26 };
27
28 const struct trace_print_flags pageflag_names[] = {
29 __def_pageflag_names,
30 {0, NULL}
31 };
32
33 const struct trace_print_flags gfpflag_names[] = {
34 __def_gfpflag_names,
35 {0, NULL}
36 };
37
38 const struct trace_print_flags vmaflag_names[] = {
39 __def_vmaflag_names,
40 {0, NULL}
41 };
42
43 void __dump_page(struct page *page, const char *reason)
44 {
45 /*
46 * Avoid VM_BUG_ON() in page_mapcount().
47 * page->_mapcount space in struct page is used by sl[aou]b pages to
48 * encode own info.
49 */
50 int mapcount = PageSlab(page) ? 0 : page_mapcount(page);
51
52 pr_emerg("page:%p count:%d mapcount:%d mapping:%p index:%#lx",
53 page, page_ref_count(page), mapcount,
54 page->mapping, page_to_pgoff(page));
55 if (PageCompound(page))
56 pr_cont(" compound_mapcount: %d", compound_mapcount(page));
57 pr_cont("\n");
58 BUILD_BUG_ON(ARRAY_SIZE(pageflag_names) != __NR_PAGEFLAGS + 1);
59
60 pr_emerg("flags: %#lx(%pGp)\n", page->flags, &page->flags);
61
62 print_hex_dump(KERN_ALERT, "raw: ", DUMP_PREFIX_NONE, 32,
63 sizeof(unsigned long), page,
64 sizeof(struct page), false);
65
66 if (reason)
67 pr_alert("page dumped because: %s\n", reason);
68
69 #ifdef CONFIG_MEMCG
70 if (page->mem_cgroup)
71 pr_alert("page->mem_cgroup:%p\n", page->mem_cgroup);
72 #endif
73 }
74
75 void dump_page(struct page *page, const char *reason)
76 {
77 __dump_page(page, reason);
78 dump_page_owner(page);
79 }
80 EXPORT_SYMBOL(dump_page);
81
82 #ifdef CONFIG_DEBUG_VM
83
84 void dump_vma(const struct vm_area_struct *vma)
85 {
86 pr_emerg("vma %p start %p end %p\n"
87 "next %p prev %p mm %p\n"
88 "prot %lx anon_vma %p vm_ops %p\n"
89 "pgoff %lx file %p private_data %p\n"
90 "flags: %#lx(%pGv)\n",
91 vma, (void *)vma->vm_start, (void *)vma->vm_end, vma->vm_next,
92 vma->vm_prev, vma->vm_mm,
93 (unsigned long)pgprot_val(vma->vm_page_prot),
94 vma->anon_vma, vma->vm_ops, vma->vm_pgoff,
95 vma->vm_file, vma->vm_private_data,
96 vma->vm_flags, &vma->vm_flags);
97 }
98 EXPORT_SYMBOL(dump_vma);
99
100 void dump_mm(const struct mm_struct *mm)
101 {
102 pr_emerg("mm %p mmap %p seqnum %d task_size %lu\n"
103 #ifdef CONFIG_MMU
104 "get_unmapped_area %p\n"
105 #endif
106 "mmap_base %lu mmap_legacy_base %lu highest_vm_end %lu\n"
107 "pgd %p mm_users %d mm_count %d\n"
108 "nr_ptes %lu nr_pmds %lu nr_puds %lu map_count %d\n"
109 "hiwater_rss %lx hiwater_vm %lx total_vm %lx locked_vm %lx\n"
110 "pinned_vm %lx data_vm %lx exec_vm %lx stack_vm %lx\n"
111 "start_code %lx end_code %lx start_data %lx end_data %lx\n"
112 "start_brk %lx brk %lx start_stack %lx\n"
113 "arg_start %lx arg_end %lx env_start %lx env_end %lx\n"
114 "binfmt %p flags %lx core_state %p\n"
115 #ifdef CONFIG_AIO
116 "ioctx_table %p\n"
117 #endif
118 #ifdef CONFIG_MEMCG
119 "owner %p "
120 #endif
121 "exe_file %p\n"
122 #ifdef CONFIG_MMU_NOTIFIER
123 "mmu_notifier_mm %p\n"
124 #endif
125 #ifdef CONFIG_NUMA_BALANCING
126 "numa_next_scan %lu numa_scan_offset %lu numa_scan_seq %d\n"
127 #endif
128 "tlb_flush_pending %d\n"
129 "def_flags: %#lx(%pGv)\n",
130
131 mm, mm->mmap, mm->vmacache_seqnum, mm->task_size,
132 #ifdef CONFIG_MMU
133 mm->get_unmapped_area,
134 #endif
135 mm->mmap_base, mm->mmap_legacy_base, mm->highest_vm_end,
136 mm->pgd, atomic_read(&mm->mm_users),
137 atomic_read(&mm->mm_count),
138 atomic_long_read((atomic_long_t *)&mm->nr_ptes),
139 mm_nr_pmds(mm),
> 140 mm_nr_puds(mm),
141 mm->map_count,
142 mm->hiwater_rss, mm->hiwater_vm, mm->total_vm, mm->locked_vm,
143 mm->pinned_vm, mm->data_vm, mm->exec_vm, mm->stack_vm,
144 mm->start_code, mm->end_code, mm->start_data, mm->end_data,
145 mm->start_brk, mm->brk, mm->start_stack,
146 mm->arg_start, mm->arg_end, mm->env_start, mm->env_end,
147 mm->binfmt, mm->flags, mm->core_state,
148 #ifdef CONFIG_AIO
149 mm->ioctx_table,
150 #endif
151 #ifdef CONFIG_MEMCG
152 mm->owner,
153 #endif
154 mm->exe_file,
155 #ifdef CONFIG_MMU_NOTIFIER
156 mm->mmu_notifier_mm,
157 #endif
158 #ifdef CONFIG_NUMA_BALANCING
159 mm->numa_next_scan, mm->numa_scan_offset, mm->numa_scan_seq,
160 #endif
161 atomic_read(&mm->tlb_flush_pending),
162 mm->def_flags, &mm->def_flags
163 );
164 }
165
---
0-DAY kernel test infrastructure Open Source Technology Center
https://lists.01.org/pipermail/kbuild-all Intel Corporation
[-- Attachment #2: .config.gz --]
[-- Type: application/gzip, Size: 22801 bytes --]
^ permalink raw reply [flat|nested] 3+ messages in thread
end of thread, other threads:[~2017-09-24 21:13 UTC | newest]
Thread overview: 3+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2017-09-22 8:41 [PATCH] mm: Account pud page tables Kirill A. Shutemov
2017-09-24 21:12 ` kbuild test robot
2017-09-24 21:13 ` kbuild test robot
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox