* [RFC v2][PATCH 1/2] pass mm into pagewalkers
@ 2008-06-06 18:55 Dave Hansen
2008-06-06 18:55 ` [RFC v2][PATCH 2/2] fix large pages in pagemap Dave Hansen
0 siblings, 1 reply; 5+ messages in thread
From: Dave Hansen @ 2008-06-06 18:55 UTC (permalink / raw)
To: Hans Rosenfeld; +Cc: Matt Mackall, linux-mm, Dave Hansen
We need this at least for huge page detection for now.
It might also come in handy for some of the other
users.
---
linux-2.6.git-dave/fs/proc/task_mmu.c | 44 +++++++++++++++++++---------------
linux-2.6.git-dave/include/linux/mm.h | 17 ++++++-------
linux-2.6.git-dave/mm/pagewalk.c | 42 +++++++++++++++++---------------
3 files changed, 56 insertions(+), 47 deletions(-)
diff -puN mm/pagewalk.c~pass-mm-into-pagewalkers mm/pagewalk.c
--- linux-2.6.git/mm/pagewalk.c~pass-mm-into-pagewalkers 2008-06-06 09:32:06.000000000 -0700
+++ linux-2.6.git-dave/mm/pagewalk.c 2008-06-06 11:46:16.000000000 -0700
@@ -3,14 +3,14 @@
#include <linux/sched.h>
static int walk_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
- const struct mm_walk *walk, void *private)
+ struct mm_walk *walk)
{
pte_t *pte;
int err = 0;
pte = pte_offset_map(pmd, addr);
for (;;) {
- err = walk->pte_entry(pte, addr, addr + PAGE_SIZE, private);
+ err = walk->pte_entry(pte, addr, addr + PAGE_SIZE, walk);
if (err)
break;
addr += PAGE_SIZE;
@@ -24,7 +24,7 @@ static int walk_pte_range(pmd_t *pmd, un
}
static int walk_pmd_range(pud_t *pud, unsigned long addr, unsigned long end,
- const struct mm_walk *walk, void *private)
+ struct mm_walk *walk)
{
pmd_t *pmd;
unsigned long next;
@@ -35,15 +35,15 @@ static int walk_pmd_range(pud_t *pud, un
next = pmd_addr_end(addr, end);
if (pmd_none_or_clear_bad(pmd)) {
if (walk->pte_hole)
- err = walk->pte_hole(addr, next, private);
+ err = walk->pte_hole(addr, next, walk);
if (err)
break;
continue;
}
if (walk->pmd_entry)
- err = walk->pmd_entry(pmd, addr, next, private);
+ err = walk->pmd_entry(pmd, addr, next, walk);
if (!err && walk->pte_entry)
- err = walk_pte_range(pmd, addr, next, walk, private);
+ err = walk_pte_range(pmd, addr, next, walk);
if (err)
break;
} while (pmd++, addr = next, addr != end);
@@ -52,7 +52,7 @@ static int walk_pmd_range(pud_t *pud, un
}
static int walk_pud_range(pgd_t *pgd, unsigned long addr, unsigned long end,
- const struct mm_walk *walk, void *private)
+ struct mm_walk *walk)
{
pud_t *pud;
unsigned long next;
@@ -63,15 +63,15 @@ static int walk_pud_range(pgd_t *pgd, un
next = pud_addr_end(addr, end);
if (pud_none_or_clear_bad(pud)) {
if (walk->pte_hole)
- err = walk->pte_hole(addr, next, private);
+ err = walk->pte_hole(addr, next, walk);
if (err)
break;
continue;
}
if (walk->pud_entry)
- err = walk->pud_entry(pud, addr, next, private);
+ err = walk->pud_entry(pud, addr, next, walk);
if (!err && (walk->pmd_entry || walk->pte_entry))
- err = walk_pmd_range(pud, addr, next, walk, private);
+ err = walk_pmd_range(pud, addr, next, walk);
if (err)
break;
} while (pud++, addr = next, addr != end);
@@ -85,15 +85,15 @@ static int walk_pud_range(pgd_t *pgd, un
* @addr: starting address
* @end: ending address
* @walk: set of callbacks to invoke for each level of the tree
- * @private: private data passed to the callback function
*
* Recursively walk the page table for the memory area in a VMA,
* calling supplied callbacks. Callbacks are called in-order (first
* PGD, first PUD, first PMD, first PTE, second PTE... second PMD,
* etc.). If lower-level callbacks are omitted, walking depth is reduced.
*
- * Each callback receives an entry pointer, the start and end of the
- * associated range, and a caller-supplied private data pointer.
+ * Each callback receives an entry pointer and the start and end of the
+ * associated range, and a copy of the original mm_walk for access to
+ * the ->private or ->mm fields.
*
* No locks are taken, but the bottom level iterator will map PTE
* directories from highmem if necessary.
@@ -101,9 +101,8 @@ static int walk_pud_range(pgd_t *pgd, un
* If any callback returns a non-zero value, the walk is aborted and
* the return value is propagated back to the caller. Otherwise 0 is returned.
*/
-int walk_page_range(const struct mm_struct *mm,
- unsigned long addr, unsigned long end,
- const struct mm_walk *walk, void *private)
+int walk_page_range(unsigned long addr, unsigned long end,
+ struct mm_walk *walk)
{
pgd_t *pgd;
unsigned long next;
@@ -112,21 +111,24 @@ int walk_page_range(const struct mm_stru
if (addr >= end)
return err;
- pgd = pgd_offset(mm, addr);
+ if (!walk->mm)
+ return -EINVAL;
+
+ pgd = pgd_offset(walk->mm, addr);
do {
next = pgd_addr_end(addr, end);
if (pgd_none_or_clear_bad(pgd)) {
if (walk->pte_hole)
- err = walk->pte_hole(addr, next, private);
+ err = walk->pte_hole(addr, next, walk);
if (err)
break;
continue;
}
if (walk->pgd_entry)
- err = walk->pgd_entry(pgd, addr, next, private);
+ err = walk->pgd_entry(pgd, addr, next, walk);
if (!err &&
(walk->pud_entry || walk->pmd_entry || walk->pte_entry))
- err = walk_pud_range(pgd, addr, next, walk, private);
+ err = walk_pud_range(pgd, addr, next, walk);
if (err)
break;
} while (pgd++, addr = next, addr != end);
diff -puN include/linux/mm.h~pass-mm-into-pagewalkers include/linux/mm.h
--- linux-2.6.git/include/linux/mm.h~pass-mm-into-pagewalkers 2008-06-06 09:32:06.000000000 -0700
+++ linux-2.6.git-dave/include/linux/mm.h 2008-06-06 09:32:06.000000000 -0700
@@ -760,16 +760,17 @@ unsigned long unmap_vmas(struct mmu_gath
* (see walk_page_range for more details)
*/
struct mm_walk {
- int (*pgd_entry)(pgd_t *, unsigned long, unsigned long, void *);
- int (*pud_entry)(pud_t *, unsigned long, unsigned long, void *);
- int (*pmd_entry)(pmd_t *, unsigned long, unsigned long, void *);
- int (*pte_entry)(pte_t *, unsigned long, unsigned long, void *);
- int (*pte_hole)(unsigned long, unsigned long, void *);
+ int (*pgd_entry)(pgd_t *, unsigned long, unsigned long, struct mm_walk *);
+ int (*pud_entry)(pud_t *, unsigned long, unsigned long, struct mm_walk *);
+ int (*pmd_entry)(pmd_t *, unsigned long, unsigned long, struct mm_walk *);
+ int (*pte_entry)(pte_t *, unsigned long, unsigned long, struct mm_walk *);
+ int (*pte_hole)(unsigned long, unsigned long, struct mm_walk *);
+ struct mm_struct *mm;
+ void *private;
};
-int walk_page_range(const struct mm_struct *, unsigned long addr,
- unsigned long end, const struct mm_walk *walk,
- void *private);
+int walk_page_range(unsigned long addr, unsigned long end,
+ struct mm_walk *walk);
void free_pgd_range(struct mmu_gather **tlb, unsigned long addr,
unsigned long end, unsigned long floor, unsigned long ceiling);
void free_pgtables(struct mmu_gather **tlb, struct vm_area_struct *start_vma,
diff -puN fs/proc/task_mmu.c~pass-mm-into-pagewalkers fs/proc/task_mmu.c
--- linux-2.6.git/fs/proc/task_mmu.c~pass-mm-into-pagewalkers 2008-06-06 09:32:06.000000000 -0700
+++ linux-2.6.git-dave/fs/proc/task_mmu.c 2008-06-06 11:45:49.000000000 -0700
@@ -315,9 +315,9 @@ struct mem_size_stats {
};
static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
- void *private)
+ struct mm_walk *walk)
{
- struct mem_size_stats *mss = private;
+ struct mem_size_stats *mss = walk->private;
struct vm_area_struct *vma = mss->vma;
pte_t *pte, ptent;
spinlock_t *ptl;
@@ -365,19 +365,21 @@ static int smaps_pte_range(pmd_t *pmd, u
return 0;
}
-static struct mm_walk smaps_walk = { .pmd_entry = smaps_pte_range };
-
static int show_smap(struct seq_file *m, void *v)
{
struct vm_area_struct *vma = v;
struct mem_size_stats mss;
int ret;
+ struct mm_walk smaps_walk = {
+ .pmd_entry = smaps_pte_range,
+ .mm = vma->vm_mm,
+ .private = &mss,
+ };
memset(&mss, 0, sizeof mss);
mss.vma = vma;
if (vma->vm_mm && !is_vm_hugetlb_page(vma))
- walk_page_range(vma->vm_mm, vma->vm_start, vma->vm_end,
- &smaps_walk, &mss);
+ walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
ret = show_map(m, v);
if (ret)
@@ -426,9 +428,9 @@ const struct file_operations proc_smaps_
};
static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr,
- unsigned long end, void *private)
+ unsigned long end, struct mm_walk *walk)
{
- struct vm_area_struct *vma = private;
+ struct vm_area_struct *vma = walk->private;
pte_t *pte, ptent;
spinlock_t *ptl;
struct page *page;
@@ -452,8 +454,6 @@ static int clear_refs_pte_range(pmd_t *p
return 0;
}
-static struct mm_walk clear_refs_walk = { .pmd_entry = clear_refs_pte_range };
-
static ssize_t clear_refs_write(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
@@ -476,11 +476,17 @@ static ssize_t clear_refs_write(struct f
return -ESRCH;
mm = get_task_mm(task);
if (mm) {
+ static struct mm_walk clear_refs_walk;
+ memset(&clear_refs_walk, 0, sizeof(clear_refs_walk));
+ clear_refs_walk.pmd_entry = clear_refs_pte_range;
+ clear_refs_walk.mm = mm;
down_read(&mm->mmap_sem);
- for (vma = mm->mmap; vma; vma = vma->vm_next)
+ for (vma = mm->mmap; vma; vma = vma->vm_next) {
+ clear_refs_walk.private = vma;
if (!is_vm_hugetlb_page(vma))
- walk_page_range(mm, vma->vm_start, vma->vm_end,
- &clear_refs_walk, vma);
+ walk_page_range(vma->vm_start, vma->vm_end,
+ &clear_refs_walk);
+ }
flush_tlb_mm(mm);
up_read(&mm->mmap_sem);
mmput(mm);
@@ -538,9 +544,9 @@ static int add_to_pagemap(unsigned long
}
static int pagemap_pte_hole(unsigned long start, unsigned long end,
- void *private)
+ struct mm_walk *walk)
{
- struct pagemapread *pm = private;
+ struct pagemapread *pm = walk->private;
unsigned long addr;
int err = 0;
for (addr = start; addr < end; addr += PAGE_SIZE) {
@@ -558,9 +564,9 @@ static u64 swap_pte_to_pagemap_entry(pte
}
static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
- void *private)
+ struct mm_walk *walk)
{
- struct pagemapread *pm = private;
+ struct pagemapread *pm = walk->private;
pte_t *pte;
int err = 0;
@@ -685,8 +691,8 @@ static ssize_t pagemap_read(struct file
* user buffer is tracked in "pm", and the walk
* will stop when we hit the end of the buffer.
*/
- ret = walk_page_range(mm, start_vaddr, end_vaddr,
- &pagemap_walk, &pm);
+ ret = walk_page_range(start_vaddr, end_vaddr,
+ &pagemap_walk);
if (ret == PM_END_OF_BUFFER)
ret = 0;
/* don't need mmap_sem for these, but this looks cleaner */
_
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply [flat|nested] 5+ messages in thread
* [RFC v2][PATCH 2/2] fix large pages in pagemap
2008-06-06 18:55 [RFC v2][PATCH 1/2] pass mm into pagewalkers Dave Hansen
@ 2008-06-06 18:55 ` Dave Hansen
2008-06-10 23:26 ` Matt Mackall
0 siblings, 1 reply; 5+ messages in thread
From: Dave Hansen @ 2008-06-06 18:55 UTC (permalink / raw)
To: Hans Rosenfeld; +Cc: Matt Mackall, linux-mm, Dave Hansen
We were walking right into huge page areas in the pagemap
walker, and calling the pmds pmd_bad() and clearing them.
That leaked huge pages. Bad.
This patch at least works around that for now. It ignores
huge pages in the pagemap walker for the time being, and
won't leak those pages.
Signed-off-by: Dave Hansen <dave@linux.vnet.ibm.com>
---
linux-2.6.git-dave/fs/proc/task_mmu.c | 43 ++++++++++++++++++++++++++--------
1 file changed, 34 insertions(+), 9 deletions(-)
diff -puN fs/proc/task_mmu.c~fix-large-pages-in-pagemap fs/proc/task_mmu.c
--- linux-2.6.git/fs/proc/task_mmu.c~fix-large-pages-in-pagemap 2008-06-06 11:31:48.000000000 -0700
+++ linux-2.6.git-dave/fs/proc/task_mmu.c 2008-06-06 11:41:22.000000000 -0700
@@ -563,24 +563,49 @@ static u64 swap_pte_to_pagemap_entry(pte
return swp_type(e) | (swp_offset(e) << MAX_SWAPFILES_SHIFT);
}
+static unsigned long pte_to_pagemap_entry(pte_t pte)
+{
+ unsigned long pme = 0;
+ if (is_swap_pte(pte))
+ pme = PM_PFRAME(swap_pte_to_pagemap_entry(pte))
+ | PM_PSHIFT(PAGE_SHIFT) | PM_SWAP;
+ else if (pte_present(pte))
+ pme = PM_PFRAME(pte_pfn(pte))
+ | PM_PSHIFT(PAGE_SHIFT) | PM_PRESENT;
+ return pme;
+}
+
static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
struct mm_walk *walk)
{
+ struct vm_area_struct *vma = find_vma(walk->mm, addr);
struct pagemapread *pm = walk->private;
pte_t *pte;
int err = 0;
for (; addr != end; addr += PAGE_SIZE) {
u64 pfn = PM_NOT_PRESENT;
- pte = pte_offset_map(pmd, addr);
- if (is_swap_pte(*pte))
- pfn = PM_PFRAME(swap_pte_to_pagemap_entry(*pte))
- | PM_PSHIFT(PAGE_SHIFT) | PM_SWAP;
- else if (pte_present(*pte))
- pfn = PM_PFRAME(pte_pfn(*pte))
- | PM_PSHIFT(PAGE_SHIFT) | PM_PRESENT;
- /* unmap so we're not in atomic when we copy to userspace */
- pte_unmap(pte);
+
+ /*
+ * Remember that find_vma() returns the
+ * first vma with a vm_end > addr, but
+ * has no guarantee about addr and
+ * vm_start. That means we'll always
+ * find a vma here, unless we're at
+ * an addr higher than the highest vma.
+ */
+ if (vma && (addr >= vma->vm_end))
+ vma = find_vma(walk->mm, addr);
+ if (vma && (vma->vm_start <= addr) &&
+ !is_vm_hugetlb_page(vma)) {
+ pte = pte_offset_map(pmd, addr);
+ pfn = pte_to_pagemap_entry(*pte);
+ /*
+ * unmap so we're not in atomic
+ * when we copy to userspace
+ */
+ pte_unmap(pte);
+ }
err = add_to_pagemap(addr, pfn, pm);
if (err)
return err;
diff -puN mm/pagewalk.c~fix-large-pages-in-pagemap mm/pagewalk.c
_
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply [flat|nested] 5+ messages in thread
* Re: [RFC v2][PATCH 2/2] fix large pages in pagemap
2008-06-06 18:55 ` [RFC v2][PATCH 2/2] fix large pages in pagemap Dave Hansen
@ 2008-06-10 23:26 ` Matt Mackall
2008-06-11 0:06 ` [RFC v3][PATCH " Dave Hansen
0 siblings, 1 reply; 5+ messages in thread
From: Matt Mackall @ 2008-06-10 23:26 UTC (permalink / raw)
To: Dave Hansen; +Cc: Hans Rosenfeld, linux-mm
On Fri, 2008-06-06 at 11:55 -0700, Dave Hansen wrote:
> We were walking right into huge page areas in the pagemap
> walker, and calling the pmds pmd_bad() and clearing them.
>
> That leaked huge pages. Bad.
>
> This patch at least works around that for now. It ignores
> huge pages in the pagemap walker for the time being, and
> won't leak those pages.
>
> Signed-off-by: Dave Hansen <dave@linux.vnet.ibm.com>
> ---
>
> linux-2.6.git-dave/fs/proc/task_mmu.c | 43 ++++++++++++++++++++++++++--------
> 1 file changed, 34 insertions(+), 9 deletions(-)
>
> diff -puN fs/proc/task_mmu.c~fix-large-pages-in-pagemap fs/proc/task_mmu.c
> --- linux-2.6.git/fs/proc/task_mmu.c~fix-large-pages-in-pagemap 2008-06-06 11:31:48.000000000 -0700
> +++ linux-2.6.git-dave/fs/proc/task_mmu.c 2008-06-06 11:41:22.000000000 -0700
> @@ -563,24 +563,49 @@ static u64 swap_pte_to_pagemap_entry(pte
> return swp_type(e) | (swp_offset(e) << MAX_SWAPFILES_SHIFT);
> }
>
> +static unsigned long pte_to_pagemap_entry(pte_t pte)
> +{
> + unsigned long pme = 0;
> + if (is_swap_pte(pte))
> + pme = PM_PFRAME(swap_pte_to_pagemap_entry(pte))
> + | PM_PSHIFT(PAGE_SHIFT) | PM_SWAP;
> + else if (pte_present(pte))
> + pme = PM_PFRAME(pte_pfn(pte))
> + | PM_PSHIFT(PAGE_SHIFT) | PM_PRESENT;
> + return pme;
> +}
> +
> static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
> struct mm_walk *walk)
> {
> + struct vm_area_struct *vma = find_vma(walk->mm, addr);
> struct pagemapread *pm = walk->private;
> pte_t *pte;
> int err = 0;
>
> for (; addr != end; addr += PAGE_SIZE) {
> u64 pfn = PM_NOT_PRESENT;
> - pte = pte_offset_map(pmd, addr);
> - if (is_swap_pte(*pte))
> - pfn = PM_PFRAME(swap_pte_to_pagemap_entry(*pte))
> - | PM_PSHIFT(PAGE_SHIFT) | PM_SWAP;
> - else if (pte_present(*pte))
> - pfn = PM_PFRAME(pte_pfn(*pte))
> - | PM_PSHIFT(PAGE_SHIFT) | PM_PRESENT;
> - /* unmap so we're not in atomic when we copy to userspace */
> - pte_unmap(pte);
> +
> + /*
> + * Remember that find_vma() returns the
> + * first vma with a vm_end > addr, but
> + * has no guarantee about addr and
> + * vm_start. That means we'll always
> + * find a vma here, unless we're at
> + * an addr higher than the highest vma.
> + */
I don't like this comment much - I had to read it several times to
convince myself the code was correct. I think it should instead be three
pieces and perhaps a new variable name, like this:
i>>? /* find the first VMA at or after our current address */
> + struct vm_area_struct *targetvma = find_vma(walk->mm, addr);
/* find next target VMA if we leave current one */
> + if (targetvma && (addr >= targetvma->vm_end))
> + targetvma = find_vma(walk->mm, addr);
/* if inside non-huge target VMA, map it */
> + if (targetvma && (targetvma->vm_start <= addr) &&
> + !is_vm_hugetlb_page(targetvma)) {
> + pte = pte_offset_map(pmd, addr);
> + pfn = pte_to_pagemap_entry(*pte);
> + /*
> + * unmap so we're not in atomic
> + * when we copy to userspace
> + */
> + pte_unmap(pte);
Also, might as well move the map/unmap inside the utility function if
we're going to have one, no?
Otherwise, I'm liking this.
--
Mathematics is the supreme nostalgia of our time.
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply [flat|nested] 5+ messages in thread
* [RFC v3][PATCH 2/2] fix large pages in pagemap
2008-06-10 23:26 ` Matt Mackall
@ 2008-06-11 0:06 ` Dave Hansen
2008-06-11 15:27 ` Matt Mackall
0 siblings, 1 reply; 5+ messages in thread
From: Dave Hansen @ 2008-06-11 0:06 UTC (permalink / raw)
To: Matt Mackall; +Cc: Hans Rosenfeld, linux-mm
On Tue, 2008-06-10 at 18:26 -0500, Matt Mackall wrote:
> > + /*
> > + * Remember that find_vma() returns the
> > + * first vma with a vm_end > addr, but
> > + * has no guarantee about addr and
> > + * vm_start. That means we'll always
> > + * find a vma here, unless we're at
> > + * an addr higher than the highest vma.
> > + */
>
> I don't like this comment much - I had to read it several times to
> convince myself the code was correct. I think it should instead be three
> pieces and perhaps a new variable name, like this:
I'll agree with you on that. :)
I think I'll keep the variable name the same, though. Adding 'target'
doesn't really tell us anything. I can see if we had a couple of
different vmas in there, but since there's only one, I think I'll just
keep a simple name.
> i>>? /* find the first VMA at or after our current address */
> > + struct vm_area_struct *targetvma = find_vma(walk->mm, addr);
>
> /* find next target VMA if we leave current one */
> > + if (targetvma && (addr >= targetvma->vm_end))
> > + targetvma = find_vma(walk->mm, addr);
>
> /* if inside non-huge target VMA, map it */
> > + if (targetvma && (targetvma->vm_start <= addr) &&
> > + !is_vm_hugetlb_page(targetvma)) {
I've tried to incorporate the spirit of these comments. Let me know
what you think.
> Also, might as well move the map/unmap inside the utility function if
> we're going to have one, no?
Yeah, but then we have to pass 'pmd' and 'addr' in there, too. I guess
I could do that, but this looked OK as is.
How does this look?
--
We were walking right into huge page areas in the pagemap
walker, and calling the pmds pmd_bad() and clearing them.
That leaked huge pages. Bad.
This patch at least works around that for now. It ignores
huge pages in the pagemap walker for the time being, and
won't leak those pages.
Signed-off-by: Dave Hansen <dave@linux.vnet.ibm.com>
---
linux-2.6.git-dave/fs/proc/task_mmu.c | 42 ++++++++++++++++++++++++++--------
1 file changed, 33 insertions(+), 9 deletions(-)
diff -puN fs/proc/task_mmu.c~fix-large-pages-in-pagemap fs/proc/task_mmu.c
--- linux-2.6.git/fs/proc/task_mmu.c~fix-large-pages-in-pagemap 2008-06-10 16:54:48.000000000 -0700
+++ linux-2.6.git-dave/fs/proc/task_mmu.c 2008-06-10 17:02:17.000000000 -0700
@@ -563,24 +563,48 @@ static u64 swap_pte_to_pagemap_entry(pte
return swp_type(e) | (swp_offset(e) << MAX_SWAPFILES_SHIFT);
}
+static unsigned long pte_to_pagemap_entry(pte_t pte)
+{
+ unsigned long pme = 0;
+ if (is_swap_pte(pte))
+ pme = PM_PFRAME(swap_pte_to_pagemap_entry(pte))
+ | PM_PSHIFT(PAGE_SHIFT) | PM_SWAP;
+ else if (pte_present(pte))
+ pme = PM_PFRAME(pte_pfn(pte))
+ | PM_PSHIFT(PAGE_SHIFT) | PM_PRESENT;
+ return pme;
+}
+
static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
struct mm_walk *walk)
{
+ struct vm_area_struct *vma;
struct pagemapread *pm = walk->private;
pte_t *pte;
int err = 0;
+ /* find the first VMA at or above 'addr' */
+ vma = find_vma(walk->mm, addr);
for (; addr != end; addr += PAGE_SIZE) {
u64 pfn = PM_NOT_PRESENT;
- pte = pte_offset_map(pmd, addr);
- if (is_swap_pte(*pte))
- pfn = PM_PFRAME(swap_pte_to_pagemap_entry(*pte))
- | PM_PSHIFT(PAGE_SHIFT) | PM_SWAP;
- else if (pte_present(*pte))
- pfn = PM_PFRAME(pte_pfn(*pte))
- | PM_PSHIFT(PAGE_SHIFT) | PM_PRESENT;
- /* unmap so we're not in atomic when we copy to userspace */
- pte_unmap(pte);
+
+ /* check to see if we've left 'vma' behind
+ * and need a new, higher one */
+ if (vma && (addr >= vma->vm_end))
+ vma = find_vma(walk->mm, addr);
+
+ /* check that 'vma' actually covers this address,
+ * and that it isn't a huge page vma */
+ if (vma && (vma->vm_start <= addr) &&
+ !is_vm_hugetlb_page(vma)) {
+ pte = pte_offset_map(pmd, addr);
+ pfn = pte_to_pagemap_entry(*pte);
+ /*
+ * unmap so we're not in atomic
+ * when we copy to userspace
+ */
+ pte_unmap(pte);
+ }
err = add_to_pagemap(addr, pfn, pm);
if (err)
return err;
_
-- Dave
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply [flat|nested] 5+ messages in thread
* Re: [RFC v3][PATCH 2/2] fix large pages in pagemap
2008-06-11 0:06 ` [RFC v3][PATCH " Dave Hansen
@ 2008-06-11 15:27 ` Matt Mackall
0 siblings, 0 replies; 5+ messages in thread
From: Matt Mackall @ 2008-06-11 15:27 UTC (permalink / raw)
To: Dave Hansen; +Cc: Hans Rosenfeld, linux-mm
On Tue, 2008-06-10 at 17:06 -0700, Dave Hansen wrote:
>
> +static unsigned long pte_to_pagemap_entry(pte_t pte)
> +{
> + unsigned long pme = 0;
> + if (is_swap_pte(pte))
> + pme = PM_PFRAME(swap_pte_to_pagemap_entry(pte))
> + | PM_PSHIFT(PAGE_SHIFT) | PM_SWAP;
> + else if (pte_present(pte))
> + pme = PM_PFRAME(pte_pfn(pte))
> + | PM_PSHIFT(PAGE_SHIFT) | PM_PRESENT;
> + return pme;
> +}
> +
> static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
> struct mm_walk *walk)
> {
> + struct vm_area_struct *vma;
> struct pagemapread *pm = walk->private;
> pte_t *pte;
> int err = 0;
>
> + /* find the first VMA at or above 'addr' */
> + vma = find_vma(walk->mm, addr);
Tab weirdness.
> for (; addr != end; addr += PAGE_SIZE) {
> u64 pfn = PM_NOT_PRESENT;
> - pte = pte_offset_map(pmd, addr);
> - if (is_swap_pte(*pte))
> - pfn = PM_PFRAME(swap_pte_to_pagemap_entry(*pte))
> - | PM_PSHIFT(PAGE_SHIFT) | PM_SWAP;
> - else if (pte_present(*pte))
> - pfn = PM_PFRAME(pte_pfn(*pte))
> - | PM_PSHIFT(PAGE_SHIFT) | PM_PRESENT;
> - /* unmap so we're not in atomic when we copy to userspace */
> - pte_unmap(pte);
> +
> + /* check to see if we've left 'vma' behind
> + * and need a new, higher one */
> + if (vma && (addr >= vma->vm_end))
> + vma = find_vma(walk->mm, addr);
> +
> + /* check that 'vma' actually covers this address,
> + * and that it isn't a huge page vma */
> + if (vma && (vma->vm_start <= addr) &&
> + !is_vm_hugetlb_page(vma)) {
> + pte = pte_offset_map(pmd, addr);
> + pfn = pte_to_pagemap_entry(*pte);
> + /*
> + * unmap so we're not in atomic
> + * when we copy to userspace
> + */
> + pte_unmap(pte);
This barely warranted a one line comment but now its four. But anyway,
this and 1/2:
Acked-by: Matt Mackall <mpm@selenic.com>
Thanks for getting back to this.
--
Mathematics is the supreme nostalgia of our time.
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply [flat|nested] 5+ messages in thread
end of thread, other threads:[~2008-06-11 15:27 UTC | newest]
Thread overview: 5+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2008-06-06 18:55 [RFC v2][PATCH 1/2] pass mm into pagewalkers Dave Hansen
2008-06-06 18:55 ` [RFC v2][PATCH 2/2] fix large pages in pagemap Dave Hansen
2008-06-10 23:26 ` Matt Mackall
2008-06-11 0:06 ` [RFC v3][PATCH " Dave Hansen
2008-06-11 15:27 ` Matt Mackall
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox