* Re: [-mm][splitlru][PATCH 1/3] introduce __get_user_pages()
2008-07-19 8:42 ` [-mm][splitlru][PATCH 1/3] introduce __get_user_pages() kosaki.motohiro
@ 2008-07-19 7:47 ` KOSAKI Motohiro
0 siblings, 0 replies; 8+ messages in thread
From: KOSAKI Motohiro @ 2008-07-19 7:47 UTC (permalink / raw)
To: linux-mm, akpm
Cc: kosaki.motohiro, Li Zefan, Hugh Dickins, Lee Schermerhorn, Rik van Riel
> new munlock processing need to GUP_FLAGS_IGNORE_VMA_PERMISSIONS.
> because current get_user_pages() can't grab PROT_NONE pages theresore
> it cause PROT_NONE pages can't munlock.
>
>
>
> Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
> CC: Li Zefan <lizf@cn.fujitsu.com>
> CC: Hugh Dickins <hugh@veritas.com>
> CC: Lee Schermerhorn <Lee.Schermerhorn@hp.com>
> CC: Rik van Riel <riel@redhat.com>
sorry, I forgot to change nommu.c
below patch is new version.
Unfortunately, I don't have nommu machine.
Then I beat up nommu tester...
---
mm/internal.h | 8 ++++++++
mm/memory.c | 37 +++++++++++++++++++++++++++++++------
mm/nommu.c | 42 +++++++++++++++++++++++++++++++-----------
3 files changed, 70 insertions(+), 17 deletions(-)
Index: b/mm/memory.c
===================================================================
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1108,12 +1108,17 @@ static inline int use_zero_page(struct v
return !vma->vm_ops || !vma->vm_ops->fault;
}
-int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
- unsigned long start, int len, int write, int force,
+
+
+int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
+ unsigned long start, int len, int flags,
struct page **pages, struct vm_area_struct **vmas)
{
int i;
- unsigned int vm_flags;
+ unsigned int vm_flags = 0;
+ int write = !!(flags & GUP_FLAGS_WRITE);
+ int force = !!(flags & GUP_FLAGS_FORCE);
+ int ignore = !!(flags & GUP_FLAGS_IGNORE_VMA_PERMISSIONS);
if (len <= 0)
return 0;
@@ -1137,7 +1142,9 @@ int get_user_pages(struct task_struct *t
pud_t *pud;
pmd_t *pmd;
pte_t *pte;
- if (write) /* user gate pages are read-only */
+
+ /* user gate pages are read-only */
+ if (!ignore && write)
return i ? : -EFAULT;
if (pg > TASK_SIZE)
pgd = pgd_offset_k(pg);
@@ -1169,8 +1176,9 @@ int get_user_pages(struct task_struct *t
continue;
}
- if (!vma || (vma->vm_flags & (VM_IO | VM_PFNMAP))
- || !(vm_flags & vma->vm_flags))
+ if (!vma ||
+ (vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
+ (!ignore && !(vm_flags & vma->vm_flags)))
return i ? : -EFAULT;
if (is_vm_hugetlb_page(vma)) {
@@ -1245,6 +1253,23 @@ int get_user_pages(struct task_struct *t
} while (len);
return i;
}
+
+int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
+ unsigned long start, int len, int write, int force,
+ struct page **pages, struct vm_area_struct **vmas)
+{
+ int flags = 0;
+
+ if (write)
+ flags |= GUP_FLAGS_WRITE;
+ if (force)
+ flags |= GUP_FLAGS_FORCE;
+
+ return __get_user_pages(tsk, mm,
+ start, len, flags,
+ pages, vmas);
+}
+
EXPORT_SYMBOL(get_user_pages);
pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr,
Index: b/mm/internal.h
===================================================================
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -243,4 +243,12 @@ static inline void mminit_validate_memmo
}
#endif /* CONFIG_SPARSEMEM */
+#define GUP_FLAGS_WRITE 0x1
+#define GUP_FLAGS_FORCE 0x2
+#define GUP_FLAGS_IGNORE_VMA_PERMISSIONS 0x4
+
+int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
+ unsigned long start, int len, int flags,
+ struct page **pages, struct vm_area_struct **vmas);
+
#endif
Index: b/mm/nommu.c
===================================================================
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -128,20 +128,16 @@ unsigned int kobjsize(const void *objp)
return PAGE_SIZE << compound_order(page);
}
-/*
- * get a list of pages in an address range belonging to the specified process
- * and indicate the VMA that covers each page
- * - this is potentially dodgy as we may end incrementing the page count of a
- * slab page or a secondary page from a compound page
- * - don't permit access to VMAs that don't support it, such as I/O mappings
- */
-int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
- unsigned long start, int len, int write, int force,
- struct page **pages, struct vm_area_struct **vmas)
+int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
+ unsigned long start, int len, int flags,
+ struct page **pages, struct vm_area_struct **vmas)
{
struct vm_area_struct *vma;
unsigned long vm_flags;
int i;
+ int write = !!(flags & GUP_FLAGS_WRITE);
+ int force = !!(flags & GUP_FLAGS_FORCE);
+ int ignore = !!(flags & GUP_FLAGS_IGNORE_VMA_PERMISSIONS);
/* calculate required read or write permissions.
* - if 'force' is set, we only require the "MAY" flags.
@@ -156,7 +152,7 @@ int get_user_pages(struct task_struct *t
/* protect what we can, including chardevs */
if (vma->vm_flags & (VM_IO | VM_PFNMAP) ||
- !(vm_flags & vma->vm_flags))
+ (!ignore && !(vm_flags & vma->vm_flags)))
goto finish_or_fault;
if (pages) {
@@ -174,6 +170,30 @@ int get_user_pages(struct task_struct *t
finish_or_fault:
return i ? : -EFAULT;
}
+
+
+/*
+ * get a list of pages in an address range belonging to the specified process
+ * and indicate the VMA that covers each page
+ * - this is potentially dodgy as we may end incrementing the page count of a
+ * slab page or a secondary page from a compound page
+ * - don't permit access to VMAs that don't support it, such as I/O mappings
+ */
+int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
+ unsigned long start, int len, int write, int force,
+ struct page **pages, struct vm_area_struct **vmas)
+{
+ int flags = 0;
+
+ if (write)
+ flags |= GUP_FLAGS_WRITE;
+ if (force)
+ flags |= GUP_FLAGS_FORCE;
+
+ return __get_user_pages(tsk, mm,
+ start, len, flags,
+ pages, vmas);
+}
EXPORT_SYMBOL(get_user_pages);
DEFINE_RWLOCK(vmlist_lock);
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply [flat|nested] 8+ messages in thread
* [-mm][splitlru][PATCH 0/3] munlock rework
@ 2008-07-19 8:42 kosaki.motohiro
2008-07-19 8:42 ` [-mm][splitlru][PATCH 1/3] introduce __get_user_pages() kosaki.motohiro
` (3 more replies)
0 siblings, 4 replies; 8+ messages in thread
From: kosaki.motohiro @ 2008-07-19 8:42 UTC (permalink / raw)
To: linux-mm, akpm
--
old munlock processing of unevictable-lru use pagewalk.
because get_user_pages() can't grab PROT_NONE page.
then, current -mm has two problem.
- build error on nommu machine
- runtime error on HIGHPTE machine.
So, I hope rework below concept
Old implementation
- use pagewalk
New implementation
- use __get_user_pages()
I tested this patch on
IA64: >24H stress workload
x86_64: ditto
x86_32 with HIGHPTE: only half hour
Li-san, Could you please try to this patch on your 32bit machine?
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply [flat|nested] 8+ messages in thread
* [-mm][splitlru][PATCH 1/3] introduce __get_user_pages()
2008-07-19 8:42 [-mm][splitlru][PATCH 0/3] munlock rework kosaki.motohiro
@ 2008-07-19 8:42 ` kosaki.motohiro
2008-07-19 7:47 ` KOSAKI Motohiro
2008-07-19 8:42 ` [-mm][splitlru][PATCH 2/3] split LRU: munlock rework kosaki.motohiro
` (2 subsequent siblings)
3 siblings, 1 reply; 8+ messages in thread
From: kosaki.motohiro @ 2008-07-19 8:42 UTC (permalink / raw)
To: linux-mm, akpm
Cc: KOSAKI Motohiro, Li Zefan, Hugh Dickins, Lee Schermerhorn, Rik van Riel
[-- Attachment #1: __get_user_pages.patch --]
[-- Type: text/plain, Size: 3336 bytes --]
new munlock processing need to GUP_FLAGS_IGNORE_VMA_PERMISSIONS.
because current get_user_pages() can't grab PROT_NONE pages theresore
it cause PROT_NONE pages can't munlock.
Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
CC: Li Zefan <lizf@cn.fujitsu.com>
CC: Hugh Dickins <hugh@veritas.com>
CC: Lee Schermerhorn <Lee.Schermerhorn@hp.com>
CC: Rik van Riel <riel@redhat.com>
---
mm/internal.h | 8 ++++++++
mm/memory.c | 37 +++++++++++++++++++++++++++++++------
2 files changed, 39 insertions(+), 6 deletions(-)
Index: b/mm/memory.c
===================================================================
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1108,12 +1108,17 @@ static inline int use_zero_page(struct v
return !vma->vm_ops || !vma->vm_ops->fault;
}
-int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
- unsigned long start, int len, int write, int force,
+
+
+int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
+ unsigned long start, int len, int flags,
struct page **pages, struct vm_area_struct **vmas)
{
int i;
- unsigned int vm_flags;
+ unsigned int vm_flags = 0;
+ int write = !!(flags & GUP_FLAGS_WRITE);
+ int force = !!(flags & GUP_FLAGS_FORCE);
+ int ignore = !!(flags & GUP_FLAGS_IGNORE_VMA_PERMISSIONS);
if (len <= 0)
return 0;
@@ -1137,7 +1142,9 @@ int get_user_pages(struct task_struct *t
pud_t *pud;
pmd_t *pmd;
pte_t *pte;
- if (write) /* user gate pages are read-only */
+
+ /* user gate pages are read-only */
+ if (!ignore && write)
return i ? : -EFAULT;
if (pg > TASK_SIZE)
pgd = pgd_offset_k(pg);
@@ -1169,8 +1176,9 @@ int get_user_pages(struct task_struct *t
continue;
}
- if (!vma || (vma->vm_flags & (VM_IO | VM_PFNMAP))
- || !(vm_flags & vma->vm_flags))
+ if (!vma ||
+ (vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
+ (!ignore && !(vm_flags & vma->vm_flags)))
return i ? : -EFAULT;
if (is_vm_hugetlb_page(vma)) {
@@ -1245,6 +1253,23 @@ int get_user_pages(struct task_struct *t
} while (len);
return i;
}
+
+int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
+ unsigned long start, int len, int write, int force,
+ struct page **pages, struct vm_area_struct **vmas)
+{
+ int flags = 0;
+
+ if (write)
+ flags |= GUP_FLAGS_WRITE;
+ if (force)
+ flags |= GUP_FLAGS_FORCE;
+
+ return __get_user_pages(tsk, mm,
+ start, len, flags,
+ pages, vmas);
+}
+
EXPORT_SYMBOL(get_user_pages);
pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr,
Index: b/mm/internal.h
===================================================================
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -243,4 +243,12 @@ static inline void mminit_validate_memmo
}
#endif /* CONFIG_SPARSEMEM */
+#define GUP_FLAGS_WRITE 0x1
+#define GUP_FLAGS_FORCE 0x2
+#define GUP_FLAGS_IGNORE_VMA_PERMISSIONS 0x4
+
+int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
+ unsigned long start, int len, int flags,
+ struct page **pages, struct vm_area_struct **vmas);
+
#endif
--
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply [flat|nested] 8+ messages in thread
* [-mm][splitlru][PATCH 2/3] split LRU: munlock rework
2008-07-19 8:42 [-mm][splitlru][PATCH 0/3] munlock rework kosaki.motohiro
2008-07-19 8:42 ` [-mm][splitlru][PATCH 1/3] introduce __get_user_pages() kosaki.motohiro
@ 2008-07-19 8:42 ` kosaki.motohiro
2008-07-19 14:41 ` Rik van Riel
2008-07-19 8:42 ` [-mm][splitlru][PATCH 3/3] revert to unevictable-lru-infrastructure-kconfig-fix.patch kosaki.motohiro
2008-07-19 9:25 ` [-mm][splitlru][PATCH 0/3] munlock rework Li Zefan
3 siblings, 1 reply; 8+ messages in thread
From: kosaki.motohiro @ 2008-07-19 8:42 UTC (permalink / raw)
To: linux-mm, akpm
Cc: KOSAKI Motohiro, Li Zefan, Hugh Dickins, Lee Schermerhorn, Rik van Riel
[-- Attachment #1: munlock-rework.patch --]
[-- Type: text/plain, Size: 7004 bytes --]
current munlock processing use to pagewalk.
its cause two problems.
- build error on nommu machine
- runtime error on HIGHPTE machine.
This patch fixes it.
Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
CC: Li Zefan <lizf@cn.fujitsu.com>
CC: Hugh Dickins <hugh@veritas.com>
CC: Lee Schermerhorn <Lee.Schermerhorn@hp.com>
CC: Rik van Riel <riel@redhat.com>
---
mm/mlock.c | 152 ++++++++++++++-----------------------------------------------
1 file changed, 35 insertions(+), 117 deletions(-)
Index: b/mm/mlock.c
===================================================================
--- a/mm/mlock.c
+++ b/mm/mlock.c
@@ -147,18 +147,33 @@ static void munlock_vma_page(struct page
* vma->vm_mm->mmap_sem must be held for write.
*/
static int __mlock_vma_pages_range(struct vm_area_struct *vma,
- unsigned long start, unsigned long end)
+ unsigned long start, unsigned long end,
+ int mlock)
{
struct mm_struct *mm = vma->vm_mm;
unsigned long addr = start;
struct page *pages[16]; /* 16 gives a reasonable batch */
- int write = !!(vma->vm_flags & VM_WRITE);
int nr_pages = (end - start) / PAGE_SIZE;
int ret;
+ int gup_flags = 0;
+
+ VM_BUG_ON(start & ~PAGE_MASK);
+ VM_BUG_ON(end & ~PAGE_MASK);
+ VM_BUG_ON(start < vma->vm_start);
+ VM_BUG_ON(end > vma->vm_end);
+ VM_BUG_ON((!rwsem_is_locked(&mm->mmap_sem)) &&
+ (atomic_read(&mm->mm_users) != 0));
+
+ /*
+ * mlock: don't page populate if page has PROT_NONE permission.
+ * munlock: the pages always do munlock althrough
+ * its has PROT_NONE permission.
+ */
+ if (!mlock)
+ gup_flags |= GUP_FLAGS_IGNORE_VMA_PERMISSIONS;
- VM_BUG_ON(start & ~PAGE_MASK || end & ~PAGE_MASK);
- VM_BUG_ON(start < vma->vm_start || end > vma->vm_end);
- VM_BUG_ON(!rwsem_is_locked(&vma->vm_mm->mmap_sem));
+ if (vma->vm_flags & VM_WRITE)
+ gup_flags |= GUP_FLAGS_WRITE;
lru_add_drain_all(); /* push cached pages to LRU */
@@ -171,9 +186,9 @@ static int __mlock_vma_pages_range(struc
* get_user_pages makes pages present if we are
* setting mlock.
*/
- ret = get_user_pages(current, mm, addr,
+ ret = __get_user_pages(current, mm, addr,
min_t(int, nr_pages, ARRAY_SIZE(pages)),
- write, 0, pages, NULL);
+ gup_flags, pages, NULL);
/*
* This can happen for, e.g., VM_NONLINEAR regions before
* a page has been allocated and mapped at a given offset,
@@ -202,8 +217,12 @@ static int __mlock_vma_pages_range(struc
* us. Check after acquiring page lock.
*/
lock_page(page);
- if (page->mapping)
- mlock_vma_page(page);
+ if (page->mapping) {
+ if (mlock)
+ mlock_vma_page(page);
+ else
+ munlock_vma_page(page);
+ }
unlock_page(page);
put_page(page); /* ref from get_user_pages() */
@@ -221,120 +240,19 @@ static int __mlock_vma_pages_range(struc
return 0; /* count entire vma as locked_vm */
}
-/*
- * private structure for munlock page table walk
- */
-struct munlock_page_walk {
- struct vm_area_struct *vma;
- pmd_t *pmd; /* for migration_entry_wait() */
-};
-
-/*
- * munlock normal pages for present ptes
- */
-static int __munlock_pte_handler(pte_t *ptep, unsigned long addr,
- unsigned long end, struct mm_walk *walk)
-{
- struct munlock_page_walk *mpw = walk->private;
- swp_entry_t entry;
- struct page *page;
- pte_t pte;
-
-retry:
- pte = *ptep;
- /*
- * If it's a swap pte, we might be racing with page migration.
- */
- if (unlikely(!pte_present(pte))) {
- if (!is_swap_pte(pte))
- goto out;
- entry = pte_to_swp_entry(pte);
- if (is_migration_entry(entry)) {
- migration_entry_wait(mpw->vma->vm_mm, mpw->pmd, addr);
- goto retry;
- }
- goto out;
- }
-
- page = vm_normal_page(mpw->vma, addr, pte);
- if (!page)
- goto out;
-
- lock_page(page);
- if (!page->mapping) {
- unlock_page(page);
- goto retry;
- }
- munlock_vma_page(page);
- unlock_page(page);
-
-out:
- return 0;
-}
-
-/*
- * Save pmd for pte handler for waiting on migration entries
- */
-static int __munlock_pmd_handler(pmd_t *pmd, unsigned long addr,
- unsigned long end, struct mm_walk *walk)
-{
- struct munlock_page_walk *mpw = walk->private;
-
- mpw->pmd = pmd;
- return 0;
-}
-
-
-/*
- * munlock a range of pages in the vma using standard page table walk.
- *
- * vma->vm_mm->mmap_sem must be held for write.
- */
-static void __munlock_vma_pages_range(struct vm_area_struct *vma,
- unsigned long start, unsigned long end)
-{
- struct mm_struct *mm = vma->vm_mm;
- struct munlock_page_walk mpw = {
- .vma = vma,
- };
- struct mm_walk munlock_page_walk = {
- .pmd_entry = __munlock_pmd_handler,
- .pte_entry = __munlock_pte_handler,
- .private = &mpw,
- .mm = mm,
- };
-
- VM_BUG_ON(start & ~PAGE_MASK || end & ~PAGE_MASK);
- VM_BUG_ON((!rwsem_is_locked(&mm->mmap_sem)) &&
- (atomic_read(&mm->mm_users) != 0));
- VM_BUG_ON(start < vma->vm_start);
- VM_BUG_ON(end > vma->vm_end);
-
- lru_add_drain_all(); /* push cached pages to LRU */
- walk_page_range(start, end, &munlock_page_walk);
- lru_add_drain_all(); /* to update stats */
-}
-
#else /* CONFIG_UNEVICTABLE_LRU */
/*
* Just make pages present if VM_LOCKED. No-op if unlocking.
*/
static int __mlock_vma_pages_range(struct vm_area_struct *vma,
- unsigned long start, unsigned long end)
+ unsigned long start, unsigned long end,
+ int mlock)
{
- if (vma->vm_flags & VM_LOCKED)
+ if (mlock && (vma->vm_flags & VM_LOCKED))
make_pages_present(start, end);
return 0;
}
-
-/*
- * munlock a range of pages in the vma -- no-op.
- */
-static void __munlock_vma_pages_range(struct vm_area_struct *vma,
- unsigned long start, unsigned long end)
-{
-}
#endif /* CONFIG_UNEVICTABLE_LRU */
/*
@@ -357,7 +275,7 @@ int mlock_vma_pages_range(struct vm_area
is_vm_hugetlb_page(vma) ||
vma == get_gate_vma(current))) {
downgrade_write(&mm->mmap_sem);
- nr_pages = __mlock_vma_pages_range(vma, start, end);
+ nr_pages = __mlock_vma_pages_range(vma, start, end, 1);
up_read(&mm->mmap_sem);
/* vma can change or disappear */
@@ -392,7 +310,7 @@ void munlock_vma_pages_range(struct vm_a
unsigned long start, unsigned long end)
{
vma->vm_flags &= ~VM_LOCKED;
- __munlock_vma_pages_range(vma, start, end);
+ __mlock_vma_pages_range(vma, start, end, 0);
}
/*
@@ -469,7 +387,7 @@ success:
*/
downgrade_write(&mm->mmap_sem);
- ret = __mlock_vma_pages_range(vma, start, end);
+ ret = __mlock_vma_pages_range(vma, start, end, 1);
if (ret > 0) {
mm->locked_vm -= ret;
ret = 0;
@@ -495,7 +413,7 @@ success:
* while. Should we downgrade the semaphore for both lock
* AND unlock ?
*/
- __munlock_vma_pages_range(vma, start, end);
+ __mlock_vma_pages_range(vma, start, end, 0);
}
out:
--
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply [flat|nested] 8+ messages in thread
* [-mm][splitlru][PATCH 3/3] revert to unevictable-lru-infrastructure-kconfig-fix.patch
2008-07-19 8:42 [-mm][splitlru][PATCH 0/3] munlock rework kosaki.motohiro
2008-07-19 8:42 ` [-mm][splitlru][PATCH 1/3] introduce __get_user_pages() kosaki.motohiro
2008-07-19 8:42 ` [-mm][splitlru][PATCH 2/3] split LRU: munlock rework kosaki.motohiro
@ 2008-07-19 8:42 ` kosaki.motohiro
2008-07-19 14:41 ` Rik van Riel
2008-07-19 9:25 ` [-mm][splitlru][PATCH 0/3] munlock rework Li Zefan
3 siblings, 1 reply; 8+ messages in thread
From: kosaki.motohiro @ 2008-07-19 8:42 UTC (permalink / raw)
To: linux-mm, akpm
Cc: KOSAKI Motohiro, Li Zefan, Hugh Dickins, Lee Schermerhorn, Rik van Riel
[-- Attachment #1: revert-kconfig.patch --]
[-- Type: text/plain, Size: 2187 bytes --]
CONFIG_UNEVICTABLE_LRU automatically turn on CONFIG_PAGE_WALKER.
it cause build error on nommu machine.
Now, it is unnecessary because munlock was rewritten.
Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
CC: Li Zefan <lizf@cn.fujitsu.com>
CC: Hugh Dickins <hugh@veritas.com>
CC: Lee Schermerhorn <Lee.Schermerhorn@hp.com>
CC: Rik van Riel <riel@redhat.com>
---
init/Kconfig | 1 -
mm/Kconfig | 5 -----
mm/Makefile | 2 +-
3 files changed, 1 insertion(+), 7 deletions(-)
Index: b/init/Kconfig
===================================================================
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -803,7 +803,6 @@ source "arch/Kconfig"
config PROC_PAGE_MONITOR
default y
depends on PROC_FS && MMU
- select PAGE_WALKER
bool "Enable /proc page monitoring" if EMBEDDED
help
Various /proc files exist to monitor process memory utilization:
Index: b/mm/Kconfig
===================================================================
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -209,14 +209,9 @@ config VIRT_TO_BUS
def_bool y
depends on !ARCH_NO_VIRT_TO_BUS
-# automatically selected by UNEVICTABLE_LRU or PROC_PAGE_MONITOR
-config PAGE_WALKER
- def_bool n
-
config UNEVICTABLE_LRU
bool "Add LRU list to track non-evictable pages"
default y
- select PAGE_WALKER
help
Keeps unevictable pages off of the active and inactive pageout
lists, so kswapd will not waste CPU time or have its balancing
Index: b/mm/Makefile
===================================================================
--- a/mm/Makefile
+++ b/mm/Makefile
@@ -13,7 +13,7 @@ obj-y := bootmem.o filemap.o mempool.o
prio_tree.o util.o mmzone.o vmstat.o backing-dev.o \
page_isolation.o $(mmu-y)
-obj-$(CONFIG_PAGE_WALKER) += pagewalk.o
+obj-$(CONFIG_PROC_PAGE_MONITOR) += pagewalk.o
obj-$(CONFIG_BOUNCE) += bounce.o
obj-$(CONFIG_SWAP) += page_io.o swap_state.o swapfile.o thrash.o
obj-$(CONFIG_HAS_DMA) += dmapool.o
--
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply [flat|nested] 8+ messages in thread
* Re: [-mm][splitlru][PATCH 0/3] munlock rework
2008-07-19 8:42 [-mm][splitlru][PATCH 0/3] munlock rework kosaki.motohiro
` (2 preceding siblings ...)
2008-07-19 8:42 ` [-mm][splitlru][PATCH 3/3] revert to unevictable-lru-infrastructure-kconfig-fix.patch kosaki.motohiro
@ 2008-07-19 9:25 ` Li Zefan
3 siblings, 0 replies; 8+ messages in thread
From: Li Zefan @ 2008-07-19 9:25 UTC (permalink / raw)
To: kosaki.motohiro; +Cc: linux-mm, akpm
kosaki.motohiro@jp.fujitsu.com D'uA:
> old munlock processing of unevictable-lru use pagewalk.
> because get_user_pages() can't grab PROT_NONE page.
>
> then, current -mm has two problem.
> - build error on nommu machine
> - runtime error on HIGHPTE machine.
>
> So, I hope rework below concept
>
> Old implementation
> - use pagewalk
>
> New implementation
> - use __get_user_pages()
>
>
> I tested this patch on
> IA64: >24H stress workload
> x86_64: ditto
> x86_32 with HIGHPTE: only half hour
>
>
>
> Li-san, Could you please try to this patch on your 32bit machine?
I've tested this patchset, the bug disappeared and it survived the
ltp tests :) .
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply [flat|nested] 8+ messages in thread
* Re: [-mm][splitlru][PATCH 2/3] split LRU: munlock rework
2008-07-19 8:42 ` [-mm][splitlru][PATCH 2/3] split LRU: munlock rework kosaki.motohiro
@ 2008-07-19 14:41 ` Rik van Riel
0 siblings, 0 replies; 8+ messages in thread
From: Rik van Riel @ 2008-07-19 14:41 UTC (permalink / raw)
To: kosaki.motohiro; +Cc: linux-mm, akpm, Li Zefan, Hugh Dickins, Lee Schermerhorn
On Sat, 19 Jul 2008 17:42:15 +0900
kosaki.motohiro@jp.fujitsu.com wrote:
> current munlock processing use to pagewalk.
> its cause two problems.
> - build error on nommu machine
> - runtime error on HIGHPTE machine.
>
> This patch fixes it.
>
>
>
> Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Acked-by: Rik van Riel <riel@redhat.com>
--
All rights reversed.
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply [flat|nested] 8+ messages in thread
* Re: [-mm][splitlru][PATCH 3/3] revert to unevictable-lru-infrastructure-kconfig-fix.patch
2008-07-19 8:42 ` [-mm][splitlru][PATCH 3/3] revert to unevictable-lru-infrastructure-kconfig-fix.patch kosaki.motohiro
@ 2008-07-19 14:41 ` Rik van Riel
0 siblings, 0 replies; 8+ messages in thread
From: Rik van Riel @ 2008-07-19 14:41 UTC (permalink / raw)
To: kosaki.motohiro; +Cc: linux-mm, akpm, Li Zefan, Hugh Dickins, Lee Schermerhorn
On Sat, 19 Jul 2008 17:42:16 +0900
kosaki.motohiro@jp.fujitsu.com wrote:
> CONFIG_UNEVICTABLE_LRU automatically turn on CONFIG_PAGE_WALKER.
> it cause build error on nommu machine.
>
> Now, it is unnecessary because munlock was rewritten.
>
>
> Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Acked-by: Rik van Riel <riel@redhat.com>
--
All rights reversed.
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply [flat|nested] 8+ messages in thread
end of thread, other threads:[~2008-07-19 14:41 UTC | newest]
Thread overview: 8+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2008-07-19 8:42 [-mm][splitlru][PATCH 0/3] munlock rework kosaki.motohiro
2008-07-19 8:42 ` [-mm][splitlru][PATCH 1/3] introduce __get_user_pages() kosaki.motohiro
2008-07-19 7:47 ` KOSAKI Motohiro
2008-07-19 8:42 ` [-mm][splitlru][PATCH 2/3] split LRU: munlock rework kosaki.motohiro
2008-07-19 14:41 ` Rik van Riel
2008-07-19 8:42 ` [-mm][splitlru][PATCH 3/3] revert to unevictable-lru-infrastructure-kconfig-fix.patch kosaki.motohiro
2008-07-19 14:41 ` Rik van Riel
2008-07-19 9:25 ` [-mm][splitlru][PATCH 0/3] munlock rework Li Zefan
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox