* [PATCH 1/2] private pointer in check_range and MPOL_MF_INVERT
@ 2005-11-08 23:24 Christoph Lameter
2005-11-08 23:25 ` [PATCH 2/2] Fold numa_maps into mempolicy.c Christoph Lameter
0 siblings, 1 reply; 7+ messages in thread
From: Christoph Lameter @ 2005-11-08 23:24 UTC (permalink / raw)
To: ak; +Cc: linux-mm, pj
A part of this functionality is also contained in the direct migration
pathset. The functionality here is more generic and independent of that
patchset. If this patch gets accepted then the policy layer updates
of the next direct migration patchset may be simplified.
- Add internal flag MPOL_MF_INVERT to control check_range() behavior.
- Replace the pagelist passed through check range by a general
private pointer that may be used for other purposes.
(The following patch will use that to merge numa_maps into
mempolicy.c)
- Improve some comments.
Signed-off-by: Christoph Lameter <clameter@sgi.com>
Index: linux-2.6.14-mm1/mm/mempolicy.c
===================================================================
--- linux-2.6.14-mm1.orig/mm/mempolicy.c 2005-11-07 11:48:26.000000000 -0800
+++ linux-2.6.14-mm1/mm/mempolicy.c 2005-11-08 14:59:31.000000000 -0800
@@ -87,8 +87,9 @@
#include <asm/tlbflush.h>
#include <asm/uaccess.h>
-/* Internal MPOL_MF_xxx flags */
+/* Internal flags */
#define MPOL_MF_DISCONTIG_OK (1<<20) /* Skip checks for continuous vmas */
+#define MPOL_MF_INVERT (1<<21) /* Invert check for nodemask */
static kmem_cache_t *policy_cache;
static kmem_cache_t *sn_cache;
@@ -234,11 +235,11 @@ static void migrate_page_add(struct vm_a
}
}
-/* Ensure all existing pages follow the policy. */
+/* Scan through pages checking if pages follow certain conditions. */
static int check_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
unsigned long addr, unsigned long end,
const nodemask_t *nodes, unsigned long flags,
- struct list_head *pagelist)
+ void *private)
{
pte_t *orig_pte;
pte_t *pte;
@@ -248,6 +249,7 @@ static int check_pte_range(struct vm_are
do {
unsigned long pfn;
unsigned int nid;
+ struct page *page;
if (!pte_present(*pte))
continue;
@@ -256,15 +258,16 @@ static int check_pte_range(struct vm_are
print_bad_pte(vma, *pte, addr);
continue;
}
- nid = pfn_to_nid(pfn);
- if (!node_isset(nid, *nodes)) {
- if (pagelist) {
- struct page *page = pfn_to_page(pfn);
+ page = pfn_to_page(pfn);
+ nid = page_to_nid(page);
+ if (node_isset(nid, *nodes) == !!(flags & MPOL_MF_INVERT))
+ continue;
+
+ if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
+ migrate_page_add(vma, page, private, flags);
+ else
+ break;
- migrate_page_add(vma, page, pagelist, flags);
- } else
- break;
- }
} while (pte++, addr += PAGE_SIZE, addr != end);
pte_unmap_unlock(orig_pte, ptl);
return addr != end;
@@ -273,7 +276,7 @@ static int check_pte_range(struct vm_are
static inline int check_pmd_range(struct vm_area_struct *vma, pud_t *pud,
unsigned long addr, unsigned long end,
const nodemask_t *nodes, unsigned long flags,
- struct list_head *pagelist)
+ void *private)
{
pmd_t *pmd;
unsigned long next;
@@ -284,7 +287,7 @@ static inline int check_pmd_range(struct
if (pmd_none_or_clear_bad(pmd))
continue;
if (check_pte_range(vma, pmd, addr, next, nodes,
- flags, pagelist))
+ flags, private))
return -EIO;
} while (pmd++, addr = next, addr != end);
return 0;
@@ -293,7 +296,7 @@ static inline int check_pmd_range(struct
static inline int check_pud_range(struct vm_area_struct *vma, pgd_t *pgd,
unsigned long addr, unsigned long end,
const nodemask_t *nodes, unsigned long flags,
- struct list_head *pagelist)
+ void *private)
{
pud_t *pud;
unsigned long next;
@@ -304,7 +307,7 @@ static inline int check_pud_range(struct
if (pud_none_or_clear_bad(pud))
continue;
if (check_pmd_range(vma, pud, addr, next, nodes,
- flags, pagelist))
+ flags, private))
return -EIO;
} while (pud++, addr = next, addr != end);
return 0;
@@ -313,7 +316,7 @@ static inline int check_pud_range(struct
static inline int check_pgd_range(struct vm_area_struct *vma,
unsigned long addr, unsigned long end,
const nodemask_t *nodes, unsigned long flags,
- struct list_head *pagelist)
+ void *private)
{
pgd_t *pgd;
unsigned long next;
@@ -324,7 +327,7 @@ static inline int check_pgd_range(struct
if (pgd_none_or_clear_bad(pgd))
continue;
if (check_pud_range(vma, pgd, addr, next, nodes,
- flags, pagelist))
+ flags, private))
return -EIO;
} while (pgd++, addr = next, addr != end);
return 0;
@@ -351,7 +354,7 @@ static inline int vma_migratable(struct
*/
static struct vm_area_struct *
check_range(struct mm_struct *mm, unsigned long start, unsigned long end,
- const nodemask_t *nodes, unsigned long flags, struct list_head *pagelist)
+ const nodemask_t *nodes, unsigned long flags, void *private)
{
int err;
struct vm_area_struct *first, *vma, *prev;
@@ -380,7 +383,7 @@ check_range(struct mm_struct *mm, unsign
if (vma->vm_start > start)
start = vma->vm_start;
err = check_pgd_range(vma, start, endvma, nodes,
- flags, pagelist);
+ flags, private);
if (err) {
first = ERR_PTR(err);
break;
@@ -455,9 +458,11 @@ long do_mbind(unsigned long start, unsig
int err;
LIST_HEAD(pagelist);
- if ((flags & ~(unsigned long)(MPOL_MF_STRICT | MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
+ if ((flags & ~(unsigned long)(MPOL_MF_STRICT |
+ MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
|| mode > MPOL_MAX)
return -EINVAL;
+
if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_RESOURCE))
return -EPERM;
@@ -493,8 +498,9 @@ long do_mbind(unsigned long start, unsig
mode,nodes_addr(nodes)[0]);
down_write(&mm->mmap_sem);
- vma = check_range(mm, start, end, nmask, flags,
- (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) ? &pagelist : NULL);
+ vma = check_range(mm, start, end, nmask,
+ flags | MPOL_MF_INVERT, &pagelist);
+
err = PTR_ERR(vma);
if (!IS_ERR(vma)) {
err = mbind_range(vma, start, end, new);
@@ -646,7 +652,6 @@ int do_migrate_pages(struct mm_struct *m
nodemask_t nodes;
nodes_andnot(nodes, *from_nodes, *to_nodes);
- nodes_complement(nodes, nodes);
down_read(&mm->mmap_sem);
check_range(mm, mm->mmap->vm_start, TASK_SIZE, &nodes,
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply [flat|nested] 7+ messages in thread
* [PATCH 2/2] Fold numa_maps into mempolicy.c
2005-11-08 23:24 [PATCH 1/2] private pointer in check_range and MPOL_MF_INVERT Christoph Lameter
@ 2005-11-08 23:25 ` Christoph Lameter
2005-11-16 7:10 ` Paul Jackson
0 siblings, 1 reply; 7+ messages in thread
From: Christoph Lameter @ 2005-11-08 23:25 UTC (permalink / raw)
To: ak; +Cc: linux-mm, pj
- Use the page table iterator in mempolicy.c to gather the statistics.
- Improve the code and fix some comments.
Signed-off-by: Christoph Lameter <clameter@sgi.com>
Index: linux-2.6.14-mm1/mm/mempolicy.c
===================================================================
--- linux-2.6.14-mm1.orig/mm/mempolicy.c 2005-11-08 14:59:31.000000000 -0800
+++ linux-2.6.14-mm1/mm/mempolicy.c 2005-11-08 15:16:33.000000000 -0800
@@ -84,12 +84,15 @@
#include <linux/compat.h>
#include <linux/mempolicy.h>
#include <linux/swap.h>
+#include <linux/seq_file.h>
+#include <linux/proc_fs.h>
#include <asm/tlbflush.h>
#include <asm/uaccess.h>
/* Internal flags */
#define MPOL_MF_DISCONTIG_OK (1<<20) /* Skip checks for continuous vmas */
#define MPOL_MF_INVERT (1<<21) /* Invert check for nodemask */
+#define MPOL_MF_STATS (1<<22) /* Gather statistics */
static kmem_cache_t *policy_cache;
static kmem_cache_t *sn_cache;
@@ -235,6 +238,8 @@ static void migrate_page_add(struct vm_a
}
}
+static void gather_stats(struct page *, void *);
+
/* Scan through pages checking if pages follow certain conditions. */
static int check_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
unsigned long addr, unsigned long end,
@@ -263,7 +268,9 @@ static int check_pte_range(struct vm_are
if (node_isset(nid, *nodes) == !!(flags & MPOL_MF_INVERT))
continue;
- if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
+ if (flags & MPOL_MF_STATS)
+ gather_stats(page, private);
+ else if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
migrate_page_add(vma, page, private, flags);
else
break;
@@ -932,9 +939,15 @@ asmlinkage long compat_sys_mbind(compat_
#endif
-/* Return effective policy for a VMA */
-struct mempolicy *
-get_vma_policy(struct task_struct *task, struct vm_area_struct *vma, unsigned long addr)
+/*
+ * Return effective policy for a VMA
+ *
+ * Must hold mmap_sem until memory pointer is no longer in use
+ * or be called from the current task.
+ */
+struct mempolicy *get_vma_policy(struct task_struct *task,
+ struct vm_area_struct *vma,
+ unsigned long addr)
{
struct mempolicy *pol = task->mempolicy;
@@ -1504,3 +1517,132 @@ void numa_policy_rebind(const nodemask_t
}
rebind_policy(current->mempolicy, old, new);
}
+
+/*
+ * Display pages allocated per node and memory policy via /proc.
+ */
+
+static const char *policy_types[] = { "default", "prefer", "bind",
+ "interleave" };
+
+/*
+ * Convert a mempolicy into a string.
+ * Returns the number of characters in buffer (if positive)
+ * or an error (negative)
+ */
+static inline int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol)
+{
+ char *p = buffer;
+ int l;
+ nodemask_t nodes;
+ int mode = pol ? pol->policy : MPOL_DEFAULT;
+
+ switch (mode) {
+ case MPOL_DEFAULT:
+ nodes_clear(nodes);
+ break;
+
+ case MPOL_PREFERRED:
+ nodes_clear(nodes);
+ node_set(pol->v.preferred_node, nodes);
+ break;
+
+ case MPOL_BIND:
+ get_zonemask(pol, &nodes);
+ break;
+
+ case MPOL_INTERLEAVE:
+ nodes = pol->v.nodes;
+ break;
+
+ default:
+ BUG();
+ return -EFAULT;
+ }
+
+ l = strlen(policy_types[mode]);
+ if (buffer + maxlen < p + l + 1)
+ return -ENOSPC;
+
+ strcpy(p, policy_types[mode]);
+ p += l;
+
+ if (!nodes_empty(nodes)) {
+ if (buffer + maxlen < p + 2)
+ return -ENOSPC;
+ *p++ = '=';
+ p += nodelist_scnprintf(p, buffer + maxlen - p, nodes);
+ }
+ return p - buffer;
+}
+
+struct numa_maps {
+ unsigned long pages;
+ unsigned long anon;
+ unsigned long mapped;
+ unsigned long mapcount_max;
+ unsigned long node[MAX_NUMNODES];
+};
+
+static void gather_stats(struct page *page, void *private)
+{
+ struct numa_maps *md = private;
+ int count = page_mapcount(page);
+
+ if (count)
+ md->mapped++;
+
+ if (count > md->mapcount_max)
+ md->mapcount_max = count;
+
+ md->pages++;
+
+ if (PageAnon(page))
+ md->anon++;
+
+ md->node[page_to_nid(page)]++;
+ cond_resched();
+}
+
+int show_numa_map(struct seq_file *m, void *v)
+{
+ struct task_struct *task = m->private;
+ struct vm_area_struct *vma = v;
+ struct numa_maps *md;
+ int n;
+ char buffer[50];
+
+ if (!vma->vm_mm)
+ return 0;
+
+ md = kzalloc(sizeof(struct numa_maps), GFP_KERNEL);
+ if (!md)
+ return 0;
+
+ check_pgd_range(vma, vma->vm_start, vma->vm_end,
+ &node_online_map, MPOL_MF_STATS, md);
+
+ if (md->pages) {
+ mpol_to_str(buffer, sizeof(buffer),
+ get_vma_policy(task, vma, vma->vm_start));
+
+ seq_printf(m, "%08lx %s pages=%lu mapped=%lu maxref=%lu",
+ vma->vm_start, buffer, md->pages,
+ md->mapped, md->mapcount_max);
+
+ if (md->anon)
+ seq_printf(m," anon=%lu",md->anon);
+
+ for_each_online_node(n)
+ if (md->node[n])
+ seq_printf(m, " N%d=%lu", n, md->node[n]);
+
+ seq_putc(m, '\n');
+ }
+ kfree(md);
+
+ if (m->count < m->size)
+ m->version = (vma != get_gate_vma(task)) ? vma->vm_start : 0;
+ return 0;
+}
+
Index: linux-2.6.14-mm1/fs/proc/task_mmu.c
===================================================================
--- linux-2.6.14-mm1.orig/fs/proc/task_mmu.c 2005-11-07 11:48:07.000000000 -0800
+++ linux-2.6.14-mm1/fs/proc/task_mmu.c 2005-11-08 15:15:47.000000000 -0800
@@ -390,130 +390,12 @@ struct seq_operations proc_pid_smaps_op
};
#ifdef CONFIG_NUMA
-
-struct numa_maps {
- unsigned long pages;
- unsigned long anon;
- unsigned long mapped;
- unsigned long mapcount_max;
- unsigned long node[MAX_NUMNODES];
-};
-
-/*
- * Calculate numa node maps for a vma
- */
-static struct numa_maps *get_numa_maps(const struct vm_area_struct *vma)
-{
- struct page *page;
- unsigned long vaddr;
- struct mm_struct *mm = vma->vm_mm;
- int i;
- struct numa_maps *md = kmalloc(sizeof(struct numa_maps), GFP_KERNEL);
-
- if (!md)
- return NULL;
- md->pages = 0;
- md->anon = 0;
- md->mapped = 0;
- md->mapcount_max = 0;
- for_each_node(i)
- md->node[i] =0;
-
- for (vaddr = vma->vm_start; vaddr < vma->vm_end; vaddr += PAGE_SIZE) {
- page = follow_page(mm, vaddr, 0);
- if (page) {
- int count = page_mapcount(page);
-
- if (count)
- md->mapped++;
- if (count > md->mapcount_max)
- md->mapcount_max = count;
- md->pages++;
- if (PageAnon(page))
- md->anon++;
- md->node[page_to_nid(page)]++;
- }
- cond_resched();
- }
- return md;
-}
-
-static int show_numa_map(struct seq_file *m, void *v)
-{
- struct task_struct *task = m->private;
- struct vm_area_struct *vma = v;
- struct mempolicy *pol;
- struct numa_maps *md;
- struct zone **z;
- int n;
- int first;
-
- if (!vma->vm_mm)
- return 0;
-
- md = get_numa_maps(vma);
- if (!md)
- return 0;
-
- seq_printf(m, "%08lx", vma->vm_start);
- pol = get_vma_policy(task, vma, vma->vm_start);
- /* Print policy */
- switch (pol->policy) {
- case MPOL_PREFERRED:
- seq_printf(m, " prefer=%d", pol->v.preferred_node);
- break;
- case MPOL_BIND:
- seq_printf(m, " bind={");
- first = 1;
- for (z = pol->v.zonelist->zones; *z; z++) {
-
- if (!first)
- seq_putc(m, ',');
- else
- first = 0;
- seq_printf(m, "%d/%s", (*z)->zone_pgdat->node_id,
- (*z)->name);
- }
- seq_putc(m, '}');
- break;
- case MPOL_INTERLEAVE:
- seq_printf(m, " interleave={");
- first = 1;
- for_each_node(n) {
- if (node_isset(n, pol->v.nodes)) {
- if (!first)
- seq_putc(m,',');
- else
- first = 0;
- seq_printf(m, "%d",n);
- }
- }
- seq_putc(m, '}');
- break;
- default:
- seq_printf(m," default");
- break;
- }
- seq_printf(m, " MaxRef=%lu Pages=%lu Mapped=%lu",
- md->mapcount_max, md->pages, md->mapped);
- if (md->anon)
- seq_printf(m," Anon=%lu",md->anon);
-
- for_each_online_node(n) {
- if (md->node[n])
- seq_printf(m, " N%d=%lu", n, md->node[n]);
- }
- seq_putc(m, '\n');
- kfree(md);
- if (m->count < m->size) /* vma is copied successfully */
- m->version = (vma != get_gate_vma(task)) ? vma->vm_start : 0;
- return 0;
-}
+extern int show_numa_map(struct seq_file *m, void *v);
struct seq_operations proc_pid_numa_maps_op = {
- .start = m_start,
- .next = m_next,
- .stop = m_stop,
- .show = show_numa_map
+ .start = m_start,
+ .next = m_next,
+ .stop = m_stop,
+ .show = show_numa_map
};
#endif
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply [flat|nested] 7+ messages in thread
* Re: [PATCH 2/2] Fold numa_maps into mempolicy.c
2005-11-08 23:25 ` [PATCH 2/2] Fold numa_maps into mempolicy.c Christoph Lameter
@ 2005-11-16 7:10 ` Paul Jackson
2005-11-16 8:36 ` Andi Kleen
2005-11-16 18:21 ` Christoph Lameter
0 siblings, 2 replies; 7+ messages in thread
From: Paul Jackson @ 2005-11-16 7:10 UTC (permalink / raw)
To: Christoph Lameter; +Cc: ak, linux-mm
Christoph wrote:
> + * Must hold mmap_sem until memory pointer is no longer in use
> + * or be called from the current task.
> + */
> +struct mempolicy *get_vma_policy(struct task_struct *task,
Twenty (well, four) questions time.
Hmmm ... is that true - that get_vma_policy() can be called for the
current task w/o holding mmap_sem?
Is there any call to get_vma_policy() made that isn't holding mmap_sem?
Except for /proc output, is there any call to get_vma_policy made on any
task other than current?
What does "until memory pointer is no longer in use" mean?
--
I won't rest till it's the best ...
Programmer, Linux Scalability
Paul Jackson <pj@sgi.com> 1.925.600.0401
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply [flat|nested] 7+ messages in thread
* Re: [PATCH 2/2] Fold numa_maps into mempolicy.c
2005-11-16 7:10 ` Paul Jackson
@ 2005-11-16 8:36 ` Andi Kleen
2005-11-16 18:54 ` Christoph Lameter
2005-11-16 18:21 ` Christoph Lameter
1 sibling, 1 reply; 7+ messages in thread
From: Andi Kleen @ 2005-11-16 8:36 UTC (permalink / raw)
To: Paul Jackson; +Cc: Christoph Lameter, linux-mm
On Wednesday 16 November 2005 08:10, Paul Jackson wrote:
> Christoph wrote:
> > + * Must hold mmap_sem until memory pointer is no longer in use
> > + * or be called from the current task.
> > + */
> > +struct mempolicy *get_vma_policy(struct task_struct *task,
>
> Twenty (well, four) questions time.
>
> Hmmm ... is that true - that get_vma_policy() can be called for the
> current task w/o holding mmap_sem?
Yes, e.g. when vma is NULL.
> Is there any call to get_vma_policy() made that isn't holding mmap_sem?
There are some callers of alloc_page_vma with NULL vma yes
> Except for /proc output, is there any call to get_vma_policy made on any
> task other than current?
In the original version there wasn't any. I still think it's a mistake
to allow it for /proc, unfortunately the patch went in.
> What does "until memory pointer is no longer in use" mean?
mempolicy is no longer in use or you took a reference.
-Andi
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply [flat|nested] 7+ messages in thread
* Re: [PATCH 2/2] Fold numa_maps into mempolicy.c
2005-11-16 7:10 ` Paul Jackson
2005-11-16 8:36 ` Andi Kleen
@ 2005-11-16 18:21 ` Christoph Lameter
1 sibling, 0 replies; 7+ messages in thread
From: Christoph Lameter @ 2005-11-16 18:21 UTC (permalink / raw)
To: Paul Jackson; +Cc: ak, linux-mm
On Tue, 15 Nov 2005, Paul Jackson wrote:
> Christoph wrote:
> > + * Must hold mmap_sem until memory pointer is no longer in use
> > + * or be called from the current task.
> > + */
> > +struct mempolicy *get_vma_policy(struct task_struct *task,
>
> Twenty (well, four) questions time.
>
> Hmmm ... is that true - that get_vma_policy() can be called for the
> current task w/o holding mmap_sem?
Hmm. You are right. The current task must be holding map_sem in order to
have the vma not vanish under it. So mmap_sem must be held
unconditionally to use this function.
> Except for /proc output, is there any call to get_vma_policy made on any
> task other than current?
There is currently no use except by /proc/<pid>/numa_stats.
> What does "until memory pointer is no longer in use" mean?
There will be no references to struct mempolicy * after unlock
mmap_sem.
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply [flat|nested] 7+ messages in thread
* Re: [PATCH 2/2] Fold numa_maps into mempolicy.c
2005-11-16 8:36 ` Andi Kleen
@ 2005-11-16 18:54 ` Christoph Lameter
2005-11-16 19:43 ` Paul Jackson
0 siblings, 1 reply; 7+ messages in thread
From: Christoph Lameter @ 2005-11-16 18:54 UTC (permalink / raw)
To: Andi Kleen; +Cc: Paul Jackson, linux-mm
On Wed, 16 Nov 2005, Andi Kleen wrote:
> > Except for /proc output, is there any call to get_vma_policy made on any
> > task other than current?
>
> In the original version there wasn't any. I still think it's a mistake
> to allow it for /proc, unfortunately the patch went in.
We could make the function local to mempolicy.c if we fold the numa_maps
interface into mempolicy.c. That would prevent outside uses of this and so
prevent additional outside uses.
But then Paul was looking for such a use?
f.e.
Index: linux-2.6.14-mm2/mm/mempolicy.c
===================================================================
--- linux-2.6.14-mm2.orig/mm/mempolicy.c 2005-11-15 14:28:32.000000000 -0800
+++ linux-2.6.14-mm2/mm/mempolicy.c 2005-11-16 10:53:01.000000000 -0800
@@ -928,7 +928,7 @@ asmlinkage long compat_sys_mbind(compat_
#endif
/* Return effective policy for a VMA */
-struct mempolicy *
+static struct mempolicy *
get_vma_policy(struct task_struct *task, struct vm_area_struct *vma, unsigned long addr)
{
struct mempolicy *pol = task->mempolicy;
Index: linux-2.6.14-mm2/include/linux/mempolicy.h
===================================================================
--- linux-2.6.14-mm2.orig/include/linux/mempolicy.h 2005-11-16 10:43:41.000000000 -0800
+++ linux-2.6.14-mm2/include/linux/mempolicy.h 2005-11-16 10:52:40.000000000 -0800
@@ -142,9 +142,6 @@ void mpol_free_shared_policy(struct shar
struct mempolicy *mpol_shared_policy_lookup(struct shared_policy *sp,
unsigned long idx);
-struct mempolicy *get_vma_policy(struct task_struct *task,
- struct vm_area_struct *vma, unsigned long addr);
-
extern void numa_default_policy(void);
extern void numa_policy_init(void);
extern void numa_policy_rebind(const nodemask_t *old, const nodemask_t *new);
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply [flat|nested] 7+ messages in thread
* Re: [PATCH 2/2] Fold numa_maps into mempolicy.c
2005-11-16 18:54 ` Christoph Lameter
@ 2005-11-16 19:43 ` Paul Jackson
0 siblings, 0 replies; 7+ messages in thread
From: Paul Jackson @ 2005-11-16 19:43 UTC (permalink / raw)
To: Christoph Lameter; +Cc: ak, linux-mm
> We could make the function local to mempolicy.c if we fold the numa_maps
> interface into mempolicy.c. That would prevent outside uses of this and so
> prevent additional outside uses.
Whether or not get_vma_policy is called with a task != current is not
the same question as whether or not some call is made to get_vma_policy
from code not in mm/mempolicy.c
> But then Paul was looking for such a use?
I was just trying to understand the scope of mmap_sem locking in that
code, for some work I am doing in numa_policy_rebind() to rebind vma
mempolicies safely in the current context. I didn't have a bias as to
what answers I got to my 20^W4 questions, other than that they made
sense to me.
--
I won't rest till it's the best ...
Programmer, Linux Scalability
Paul Jackson <pj@sgi.com> 1.925.600.0401
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply [flat|nested] 7+ messages in thread
end of thread, other threads:[~2005-11-16 19:43 UTC | newest]
Thread overview: 7+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2005-11-08 23:24 [PATCH 1/2] private pointer in check_range and MPOL_MF_INVERT Christoph Lameter
2005-11-08 23:25 ` [PATCH 2/2] Fold numa_maps into mempolicy.c Christoph Lameter
2005-11-16 7:10 ` Paul Jackson
2005-11-16 8:36 ` Andi Kleen
2005-11-16 18:54 ` Christoph Lameter
2005-11-16 19:43 ` Paul Jackson
2005-11-16 18:21 ` Christoph Lameter
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox