From: haoxin <xhao@linux.alibaba.com>
To: Baolin Wang <baolin.wang@linux.alibaba.com>, willy@infradead.org
Cc: akpm@linux-foundation.org, adobriyan@gmail.com,
keescook@chromium.org, linux-kernel@vger.kernel.org,
linux-mm@kvack.org
Subject: Re: [RFC V3 PATCH] mm: add last level page table numa info to /proc/pid/numa_pgtable
Date: Mon, 1 Aug 2022 18:53:16 +0800 [thread overview]
Message-ID: <53793484-07a4-edab-652d-06e5e28521d8@linux.alibaba.com> (raw)
In-Reply-To: <f4f9c5be-a5ce-9e82-56ff-89d1aeee4d98@linux.alibaba.com>
在 2022/8/1 下午2:06, Baolin Wang 写道:
> Hi Xin,
>
> On 8/1/2022 11:27 AM, Xin Hao wrote:
>> In many data center servers, the shared memory architectures is
>> Non-Uniform Memory Access (NUMA), remote numa node data access
>> often brings a high latency problem, but what we are easy to ignore
>> is that the page table remote numa access, It can also leads to a
>> performance degradation.
>>
>> So there add a new interface in /proc, This will help developers to
>> get more info about performance issues if they are caused by cross-NUMA.
>>
>> V2 -> V3
>> 1, Fix compile warning bug.
>>
>> V1 -> V2
>> 1, As Matthew Wilcox advise, Simplify the code.
>> 2, Do some codes format fix.
>
> Please move the changes history under your 'Signed-off-by' with '---'.
>
>>
>> V2:
>> https://lore.kernel.org/linux-mm/20220731155223.60238-1-xhao@linux.alibaba.com/
>> V1:
>> https://lore.kernel.org/linux-mm/YuVqdcY8Ibib2LJa@casper.infradead.org/T/
>>
>> Reported-by: kernel test robot <lkp@intel.com>
>> Signed-off-by: Xin Hao <xhao@linux.alibaba.com>
>> ---
>> fs/proc/base.c | 2 ++
>> fs/proc/internal.h | 1 +
>> fs/proc/task_mmu.c | 87 ++++++++++++++++++++++++++++++++++++++++++++++
>> 3 files changed, 90 insertions(+)
>>
>> diff --git a/fs/proc/base.c b/fs/proc/base.c
>> index 8dfa36a99c74..487e82dd3275 100644
>> --- a/fs/proc/base.c
>> +++ b/fs/proc/base.c
>> @@ -3224,6 +3224,7 @@ static const struct pid_entry tgid_base_stuff[]
>> = {
>> REG("maps", S_IRUGO, proc_pid_maps_operations),
>> #ifdef CONFIG_NUMA
>> REG("numa_maps", S_IRUGO, proc_pid_numa_maps_operations),
>> + REG("numa_pgtable", S_IRUGO, proc_pid_numa_pgtable_operations),
>> #endif
>> REG("mem", S_IRUSR|S_IWUSR, proc_mem_operations),
>> LNK("cwd", proc_cwd_link),
>> @@ -3571,6 +3572,7 @@ static const struct pid_entry tid_base_stuff[] = {
>> #endif
>> #ifdef CONFIG_NUMA
>> REG("numa_maps", S_IRUGO, proc_pid_numa_maps_operations),
>> + REG("numa_pgtable", S_IRUGO, proc_pid_numa_pgtable_operations),
>> #endif
>> REG("mem", S_IRUSR|S_IWUSR, proc_mem_operations),
>> LNK("cwd", proc_cwd_link),
>> diff --git a/fs/proc/internal.h b/fs/proc/internal.h
>> index 06a80f78433d..e7ed9ef097b6 100644
>> --- a/fs/proc/internal.h
>> +++ b/fs/proc/internal.h
>> @@ -296,6 +296,7 @@ struct mm_struct *proc_mem_open(struct inode
>> *inode, unsigned int mode);
>>
>> extern const struct file_operations proc_pid_maps_operations;
>> extern const struct file_operations proc_pid_numa_maps_operations;
>> +extern const struct file_operations proc_pid_numa_pgtable_operations;
>> extern const struct file_operations proc_pid_smaps_operations;
>> extern const struct file_operations proc_pid_smaps_rollup_operations;
>> extern const struct file_operations proc_clear_refs_operations;
>> diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
>> index 2d04e3470d4c..77b7a49757f5 100644
>> --- a/fs/proc/task_mmu.c
>> +++ b/fs/proc/task_mmu.c
>> @@ -1999,4 +1999,91 @@ const struct file_operations
>> proc_pid_numa_maps_operations = {
>> .release = proc_map_release,
>> };
>>
>> +struct pgtable_numa_private {
>> + struct proc_maps_private proc_maps;
>> + unsigned long node[MAX_NUMNODES];
>> +};
>> +
>> +static int gather_pgtable_numa_stats(pmd_t *pmd, unsigned long addr,
>> + unsigned long end, struct mm_walk *walk)
>> +{
>> + struct pgtable_numa_private *priv = walk->private;
>> + struct page *page;
>> + int nid;
>> +
>> + if (pmd_huge(*pmd)) {
>> + page = virt_to_page(pmd);
>> + } else {
>> + page = pmd_page(*pmd);
>
> You should validate if the pmd is valid or present before getting the
> pagetable page.
>
> if (pmd_none(*pmd) || !pmd_present(*pmd))
>
> Another issue is I think you should hold the pmd lock to call
> pmd_page(), since after the validation of pmd_huge(), the pmd entry
> can be modified by other threads if you did not hold the pmd lock.
>
Thanks, Baolin, i will fix it in the next version.
>> + }
>> +
>> + nid = page_to_nid(page);
>> + priv->node[nid]++;
>> +
>> + return 0;
>> +}
>> +
>> +static const struct mm_walk_ops show_numa_pgtable_ops = {
>> + .pmd_entry = gather_pgtable_numa_stats,
>> +};
>> +
>> +/*
>> + * Display the page talbe allocated per node via /proc.
>> + */
>> +static int show_numa_pgtable(struct seq_file *m, void *v)
>> +{
>> + struct pgtable_numa_private *numa_priv = m->private;
>> + struct vm_area_struct *vma = v;
>> + struct mm_struct *mm = vma->vm_mm;
>> + struct file *file = vma->vm_file;
>> + int nid;
>> +
>> + if (!mm)
>> + return 0;
>> +
>> + memset(numa_priv->node, 0, sizeof(numa_priv->node));
>> +
>> + seq_printf(m, "%08lx ", vma->vm_start);
>> +
>> + if (file) {
>> + seq_puts(m, " file=");
>> + seq_file_path(m, file, "\n\t= ");
>> + } else if (vma->vm_start <= mm->brk && vma->vm_end >=
>> mm->start_brk) {
>> + seq_puts(m, " heap");
>> + } else if (is_stack(vma)) {
>> + seq_puts(m, " stack");
>> + }
>> +
>> + /* mmap_lock is held by m_start */
>> + walk_page_vma(vma, &show_numa_pgtable_ops, numa_priv);
>> +
>> + for_each_node_state(nid, N_MEMORY) {
>> + if (numa_priv->node[nid])
>> + seq_printf(m, " N%d=%lu", nid, numa_priv->node[nid]);
>> + }
>> + seq_putc(m, '\n');
>> +
>> + return 0;
>> +}
>> +
>> +static const struct seq_operations proc_pid_numa_pgtable_op = {
>> + .start = m_start,
>> + .next = m_next,
>> + .stop = m_stop,
>> + .show = show_numa_pgtable,
>> +};
>> +
>> +static int pid_numa_pgtable_open(struct inode *inode, struct file
>> *file)
>> +{
>> + return proc_maps_open(inode, file, &proc_pid_numa_pgtable_op,
>> + sizeof(struct pgtable_numa_private));
>> +}
>> +
>> +const struct file_operations proc_pid_numa_pgtable_operations = {
>> + .open = pid_numa_pgtable_open,
>> + .read = seq_read,
>> + .llseek = seq_lseek,
>> + .release = proc_map_release,
>> +};
>> +
>> #endif /* CONFIG_NUMA */
>> --
>> 2.31.0
prev parent reply other threads:[~2022-08-01 10:53 UTC|newest]
Thread overview: 3+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-08-01 3:27 Xin Hao
2022-08-01 6:06 ` Baolin Wang
2022-08-01 10:53 ` haoxin [this message]
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=53793484-07a4-edab-652d-06e5e28521d8@linux.alibaba.com \
--to=xhao@linux.alibaba.com \
--cc=adobriyan@gmail.com \
--cc=akpm@linux-foundation.org \
--cc=baolin.wang@linux.alibaba.com \
--cc=keescook@chromium.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=willy@infradead.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox