From: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
To: linux-mm@kvack.org
Cc: Andrew Morton <akpm@linux-foundation.org>,
Matt Mackall <mpm@selenic.com>, Cliff Wickman <cpw@sgi.com>,
KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>,
Johannes Weiner <hannes@cmpxchg.org>,
KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>,
Michal Hocko <mhocko@suse.cz>,
"Aneesh Kumar K.V" <aneesh.kumar@linux.vnet.ibm.com>,
Pavel Emelyanov <xemul@parallels.com>,
Rik van Riel <riel@redhat.com>,
kirill.shutemov@linux.intel.com, linux-kernel@vger.kernel.org
Subject: [PATCH 05/11] pagemap: redefine callback functions for page table walker
Date: Wed, 30 Oct 2013 17:44:53 -0400 [thread overview]
Message-ID: <1383169499-25144-6-git-send-email-n-horiguchi@ah.jp.nec.com> (raw)
In-Reply-To: <1383169499-25144-1-git-send-email-n-horiguchi@ah.jp.nec.com>
pagemap_pte_range() connected to pmd_entry() does both of pmd loop and
pte loop. So this patch moves pte part into pagemap_pte() on pte_entry().
We remove VM_SOFTDIRTY check in pagemap_pte_range(), because in the new
page table walker we call __walk_page_range() for each vma separately,
so we never experience multiple vmas in single pgd/pud/pmd/pte loop.
ChangeLog v2:
- remove cond_sched() (moved it to walk_hugetlb_range())
- rebase onto mmots
Signed-off-by: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
---
fs/proc/task_mmu.c | 76 ++++++++++++++++++++----------------------------------
1 file changed, 28 insertions(+), 48 deletions(-)
diff --git v3.12-rc7-mmots-2013-10-29-16-24.orig/fs/proc/task_mmu.c v3.12-rc7-mmots-2013-10-29-16-24/fs/proc/task_mmu.c
index 3e1b739..fde594c 100644
--- v3.12-rc7-mmots-2013-10-29-16-24.orig/fs/proc/task_mmu.c
+++ v3.12-rc7-mmots-2013-10-29-16-24/fs/proc/task_mmu.c
@@ -978,19 +978,33 @@ static inline void thp_pmd_to_pagemap_entry(pagemap_entry_t *pme, struct pagemap
}
#endif
-static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
+static int pagemap_pte(pte_t *pte, unsigned long addr, unsigned long end,
struct mm_walk *walk)
{
- struct vm_area_struct *vma;
+ struct vm_area_struct *vma = walk->vma;
struct pagemapread *pm = walk->private;
- spinlock_t *ptl;
- pte_t *pte;
+ pagemap_entry_t pme = make_pme(PM_NOT_PRESENT(pm->v2));
+
+ if (vma && vma->vm_start <= addr && end <= vma->vm_end) {
+ pte_to_pagemap_entry(&pme, pm, vma, addr, *pte);
+ /* unmap before userspace copy */
+ pte_unmap(pte);
+ }
+ return add_to_pagemap(addr, &pme, pm);
+}
+
+static int pagemap_pmd(pmd_t *pmd, unsigned long addr, unsigned long end,
+ struct mm_walk *walk)
+{
int err = 0;
+ struct vm_area_struct *vma = walk->vma;
+ struct pagemapread *pm = walk->private;
pagemap_entry_t pme = make_pme(PM_NOT_PRESENT(pm->v2));
+ spinlock_t *ptl;
- /* find the first VMA at or above 'addr' */
- vma = find_vma(walk->mm, addr);
- if (vma && pmd_trans_huge_lock(pmd, vma, &ptl) == 1) {
+ if (!vma)
+ return err;
+ if (pmd_trans_huge_lock(pmd, vma, &ptl) == 1) {
int pmd_flags2;
if ((vma->vm_flags & VM_SOFTDIRTY) || pmd_soft_dirty(*pmd))
@@ -1009,41 +1023,9 @@ static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
break;
}
spin_unlock(ptl);
- return err;
- }
-
- if (pmd_trans_unstable(pmd))
- return 0;
- for (; addr != end; addr += PAGE_SIZE) {
- int flags2;
-
- /* check to see if we've left 'vma' behind
- * and need a new, higher one */
- if (vma && (addr >= vma->vm_end)) {
- vma = find_vma(walk->mm, addr);
- if (vma && (vma->vm_flags & VM_SOFTDIRTY))
- flags2 = __PM_SOFT_DIRTY;
- else
- flags2 = 0;
- pme = make_pme(PM_NOT_PRESENT(pm->v2) | PM_STATUS2(pm->v2, flags2));
- }
-
- /* check that 'vma' actually covers this address,
- * and that it isn't a huge page vma */
- if (vma && (vma->vm_start <= addr) &&
- !is_vm_hugetlb_page(vma)) {
- pte = pte_offset_map(pmd, addr);
- pte_to_pagemap_entry(&pme, pm, vma, addr, *pte);
- /* unmap before userspace copy */
- pte_unmap(pte);
- }
- err = add_to_pagemap(addr, &pme, pm);
- if (err)
- return err;
+ /* don't call pagemap_pte() */
+ walk->skip = 1;
}
-
- cond_resched();
-
return err;
}
@@ -1066,12 +1048,11 @@ static int pagemap_hugetlb_range(pte_t *pte, unsigned long hmask,
struct mm_walk *walk)
{
struct pagemapread *pm = walk->private;
- struct vm_area_struct *vma;
+ struct vm_area_struct *vma = walk->vma;
int err = 0;
int flags2;
pagemap_entry_t pme;
- vma = find_vma(walk->mm, addr);
WARN_ON_ONCE(!vma);
if (vma && (vma->vm_flags & VM_SOFTDIRTY))
@@ -1079,6 +1060,7 @@ static int pagemap_hugetlb_range(pte_t *pte, unsigned long hmask,
else
flags2 = 0;
+ hmask = huge_page_mask(hstate_vma(vma));
for (; addr != end; addr += PAGE_SIZE) {
int offset = (addr & ~hmask) >> PAGE_SHIFT;
huge_pte_to_pagemap_entry(&pme, pm, *pte, offset, flags2);
@@ -1086,9 +1068,6 @@ static int pagemap_hugetlb_range(pte_t *pte, unsigned long hmask,
if (err)
return err;
}
-
- cond_resched();
-
return err;
}
#endif /* HUGETLB_PAGE */
@@ -1155,10 +1134,11 @@ static ssize_t pagemap_read(struct file *file, char __user *buf,
if (!mm || IS_ERR(mm))
goto out_free;
- pagemap_walk.pmd_entry = pagemap_pte_range;
+ pagemap_walk.pte_entry = pagemap_pte;
+ pagemap_walk.pmd_entry = pagemap_pmd;
pagemap_walk.pte_hole = pagemap_pte_hole;
#ifdef CONFIG_HUGETLB_PAGE
- pagemap_walk.hugetlb_entry = pagemap_hugetlb_range;
+ pagemap_walk.hugetlb_entry = pagemap_hugetlb;
#endif
pagemap_walk.mm = mm;
pagemap_walk.private = ±
--
1.8.3.1
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
next prev parent reply other threads:[~2013-10-30 21:46 UTC|newest]
Thread overview: 17+ messages / expand[flat|nested] mbox.gz Atom feed top
2013-10-30 21:44 [PATCH 00/11 v2] update " Naoya Horiguchi
2013-10-30 21:44 ` [PATCH 01/11] pagewalk: update page table walker core Naoya Horiguchi
2013-11-01 6:23 ` Naoya Horiguchi
2013-10-30 21:44 ` [PATCH 02/11] pagewalk: add walk_page_vma() Naoya Horiguchi
2013-10-30 21:44 ` [PATCH 03/11] smaps: redefine callback functions for page table walker Naoya Horiguchi
2013-10-30 21:44 ` [PATCH 04/11] clear_refs: " Naoya Horiguchi
2013-10-30 21:44 ` Naoya Horiguchi [this message]
2013-10-30 21:44 ` [PATCH 06/11] numa_maps: " Naoya Horiguchi
2013-10-30 21:44 ` [PATCH 07/11] memcg: " Naoya Horiguchi
2013-10-30 21:44 ` [PATCH 08/11] madvise: " Naoya Horiguchi
2013-10-30 21:44 ` [PATCH 09/11] arch/powerpc/mm/subpage-prot.c: use walk_page_vma() instead of walk_page_range() Naoya Horiguchi
2013-10-30 21:44 ` [PATCH 10/11] pagewalk: remove argument hmask from hugetlb_entry() Naoya Horiguchi
2013-10-30 21:44 ` [PATCH 11/11] mempolicy: apply page table walker on queue_pages_range() Naoya Horiguchi
-- strict thread matches above, loose matches on Subject: below --
2014-02-10 21:44 [PATCH 00/11 v5] update page table walker Naoya Horiguchi
2014-02-10 21:44 ` [PATCH 05/11] pagemap: redefine callback functions for " Naoya Horiguchi
2014-01-13 16:54 [PATCH 00/11 v4] update " Naoya Horiguchi
2014-01-13 16:54 ` [PATCH 05/11] pagemap: redefine callback functions for " Naoya Horiguchi
2013-12-11 22:08 [PATCH 00/11 v3] update " Naoya Horiguchi
2013-12-11 22:09 ` [PATCH 05/11] pagemap: redefine callback functions for " Naoya Horiguchi
2013-10-14 17:36 [PATCH 0/11] update " Naoya Horiguchi
2013-10-14 17:37 ` [PATCH 05/11] pagemap: redefine callback functions for " Naoya Horiguchi
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1383169499-25144-6-git-send-email-n-horiguchi@ah.jp.nec.com \
--to=n-horiguchi@ah.jp.nec.com \
--cc=akpm@linux-foundation.org \
--cc=aneesh.kumar@linux.vnet.ibm.com \
--cc=cpw@sgi.com \
--cc=hannes@cmpxchg.org \
--cc=kamezawa.hiroyu@jp.fujitsu.com \
--cc=kirill.shutemov@linux.intel.com \
--cc=kosaki.motohiro@jp.fujitsu.com \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=mhocko@suse.cz \
--cc=mpm@selenic.com \
--cc=riel@redhat.com \
--cc=xemul@parallels.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox