From: Peter Zijlstra <peterz@infradead.org>
To: Mel Gorman <mgorman@suse.de>
Cc: Srikar Dronamraju <srikar@linux.vnet.ibm.com>,
Ingo Molnar <mingo@kernel.org>,
Andrea Arcangeli <aarcange@redhat.com>,
Johannes Weiner <hannes@cmpxchg.org>,
Linux-MM <linux-mm@kvack.org>,
LKML <linux-kernel@vger.kernel.org>
Subject: [PATCH] mm, sched, numa: Create a per-task MPOL_INTERLEAVE policy
Date: Thu, 25 Jul 2013 12:46:33 +0200 [thread overview]
Message-ID: <20130725104633.GQ27075@twins.programming.kicks-ass.net> (raw)
In-Reply-To: <1373901620-2021-1-git-send-email-mgorman@suse.de>
Subject: mm, sched, numa: Create a per-task MPOL_INTERLEAVE policy
From: Peter Zijlstra <peterz@infradead.org>
Date: Mon Jul 22 10:42:38 CEST 2013
Just an idea.. the rest of the code doesn't work good enough for this to
matter, also there's something sickly with it since it makes my box
explode. But wanted to put the idea out there anyway.
Signed-off-by: Peter Zijlstra <peterz@infradead.org>
---
include/linux/mempolicy.h | 5 +-
kernel/sched/fair.c | 44 +++++++++++++++++++++
kernel/sched/features.h | 1
mm/huge_memory.c | 28 +++++++------
mm/memory.c | 33 ++++++++++------
mm/mempolicy.c | 94 +++++++++++++++++++++++++++++-----------------
6 files changed, 145 insertions(+), 60 deletions(-)
--- a/include/linux/mempolicy.h
+++ b/include/linux/mempolicy.h
@@ -60,6 +60,7 @@ struct mempolicy {
* The default fast path of a NULL MPOL_DEFAULT policy is always inlined.
*/
+extern struct mempolicy *__mpol_new(unsigned short, unsigned short);
extern void __mpol_put(struct mempolicy *pol);
static inline void mpol_put(struct mempolicy *pol)
{
@@ -187,7 +188,7 @@ static inline int vma_migratable(struct
return 1;
}
-extern int mpol_misplaced(struct page *, struct vm_area_struct *, unsigned long);
+extern int mpol_misplaced(struct page *, struct vm_area_struct *, unsigned long, int *);
#else
@@ -307,7 +308,7 @@ static inline int mpol_to_str(char *buff
}
static inline int mpol_misplaced(struct page *page, struct vm_area_struct *vma,
- unsigned long address)
+ unsigned long address, int *account_node)
{
return -1; /* no node preference */
}
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -893,6 +893,47 @@ static inline unsigned long task_faults(
return p->numa_faults[2*nid] + p->numa_faults[2*nid+1];
}
+/*
+ * Create/Update p->mempolicy MPOL_INTERLEAVE to match p->numa_faults[].
+ */
+static void task_numa_mempol(struct task_struct *p, long max_faults)
+{
+ struct mempolicy *pol = p->mempolicy, *new = NULL;
+ nodemask_t nodes = NODE_MASK_NONE;
+ int node;
+
+ if (!pol) {
+ new = __mpol_new(MPOL_INTERLEAVE, MPOL_F_MOF | MPOL_F_MORON);
+ if (IS_ERR(new))
+ return;
+ }
+
+ task_lock(p);
+
+ pol = p->mempolicy; /* lock forces a re-read */
+ if (!pol) {
+ pol = p->mempolicy = new;
+ new = NULL;
+ }
+
+ if (!(pol->flags & MPOL_F_MORON))
+ goto unlock;
+
+ for_each_node(node) {
+ if (task_faults(p, node) > max_faults/2)
+ node_set(node, nodes);
+ }
+
+ mpol_rebind_task(p, &nodes, MPOL_REBIND_STEP1);
+ mpol_rebind_task(p, &nodes, MPOL_REBIND_STEP2);
+
+unlock:
+ task_unlock(p);
+
+ if (new)
+ __mpol_put(new);
+}
+
static unsigned long weighted_cpuload(const int cpu);
static unsigned long source_load(int cpu, int type);
static unsigned long target_load(int cpu, int type);
@@ -1106,6 +1147,9 @@ static void task_numa_placement(struct t
}
}
+ if (sched_feat(NUMA_INTERLEAVE))
+ task_numa_mempol(p, max_faults);
+
/* Preferred node as the node with the most faults */
if (max_faults && max_nid != p->numa_preferred_nid) {
--- a/kernel/sched/features.h
+++ b/kernel/sched/features.h
@@ -72,4 +72,5 @@ SCHED_FEAT(NUMA_FORCE, false)
SCHED_FEAT(NUMA_BALANCE, true)
SCHED_FEAT(NUMA_FAULTS_UP, true)
SCHED_FEAT(NUMA_FAULTS_DOWN, true)
+SCHED_FEAT(NUMA_INTERLEAVE, false)
#endif
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1292,7 +1292,7 @@ int do_huge_pmd_numa_page(struct mm_stru
{
struct page *page;
unsigned long haddr = addr & HPAGE_PMD_MASK;
- int page_nid = -1, this_nid = numa_node_id();
+ int page_nid = -1, account_nid = -1, this_nid = numa_node_id();
int target_nid, last_nidpid;
bool migrated = false;
@@ -1301,7 +1301,6 @@ int do_huge_pmd_numa_page(struct mm_stru
goto out_unlock;
page = pmd_page(pmd);
- get_page(page);
/*
* Do not account for faults against the huge zero page. The read-only
@@ -1317,13 +1316,12 @@ int do_huge_pmd_numa_page(struct mm_stru
count_vm_numa_event(NUMA_HINT_FAULTS_LOCAL);
last_nidpid = page_nidpid_last(page);
- target_nid = mpol_misplaced(page, vma, haddr);
- if (target_nid == -1) {
- put_page(page);
+ target_nid = mpol_misplaced(page, vma, haddr, &account_nid);
+ if (target_nid == -1)
goto clear_pmdnuma;
- }
/* Acquire the page lock to serialise THP migrations */
+ get_page(page);
spin_unlock(&mm->page_table_lock);
lock_page(page);
@@ -1332,6 +1330,7 @@ int do_huge_pmd_numa_page(struct mm_stru
if (unlikely(!pmd_same(pmd, *pmdp))) {
unlock_page(page);
put_page(page);
+ account_nid = page_nid = -1; /* someone else took our fault */
goto out_unlock;
}
spin_unlock(&mm->page_table_lock);
@@ -1339,17 +1338,20 @@ int do_huge_pmd_numa_page(struct mm_stru
/* Migrate the THP to the requested node */
migrated = migrate_misplaced_transhuge_page(mm, vma,
pmdp, pmd, addr, page, target_nid);
- if (migrated)
- page_nid = target_nid;
- else
+ if (!migrated) {
+ account_nid = -1; /* account against the old page */
goto check_same;
+ }
+ page_nid = target_nid;
goto out;
check_same:
spin_lock(&mm->page_table_lock);
- if (unlikely(!pmd_same(pmd, *pmdp)))
+ if (unlikely(!pmd_same(pmd, *pmdp))) {
+ page_nid = -1; /* someone else took our fault */
goto out_unlock;
+ }
clear_pmdnuma:
pmd = pmd_mknonnuma(pmd);
set_pmd_at(mm, haddr, pmdp, pmd);
@@ -1359,8 +1361,10 @@ int do_huge_pmd_numa_page(struct mm_stru
spin_unlock(&mm->page_table_lock);
out:
- if (page_nid != -1)
- task_numa_fault(last_nidpid, page_nid, HPAGE_PMD_NR, migrated);
+ if (account_nid == -1)
+ account_nid = page_nid;
+ if (account_nid != -1)
+ task_numa_fault(last_nidpid, account_nid, HPAGE_PMD_NR, migrated);
return 0;
}
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -3516,16 +3516,17 @@ static int do_nonlinear_fault(struct mm_
return __do_fault(mm, vma, address, pmd, pgoff, flags, orig_pte);
}
-int numa_migrate_prep(struct page *page, struct vm_area_struct *vma,
- unsigned long addr, int current_nid)
+static int numa_migrate_prep(struct page *page, struct vm_area_struct *vma,
+ unsigned long addr, int page_nid,
+ int *account_nid)
{
get_page(page);
count_vm_numa_event(NUMA_HINT_FAULTS);
- if (current_nid == numa_node_id())
+ if (page_nid == numa_node_id())
count_vm_numa_event(NUMA_HINT_FAULTS_LOCAL);
- return mpol_misplaced(page, vma, addr);
+ return mpol_misplaced(page, vma, addr, account_nid);
}
int do_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
@@ -3533,7 +3534,7 @@ int do_numa_page(struct mm_struct *mm, s
{
struct page *page = NULL;
spinlock_t *ptl;
- int page_nid = -1;
+ int page_nid = -1, account_nid = -1;
int target_nid, last_nidpid;
bool migrated = false;
@@ -3570,7 +3571,7 @@ int do_numa_page(struct mm_struct *mm, s
last_nidpid = page_nidpid_last(page);
page_nid = page_to_nid(page);
- target_nid = numa_migrate_prep(page, vma, addr, page_nid);
+ target_nid = numa_migrate_prep(page, vma, addr, page_nid, &account_nid);
pte_unmap_unlock(ptep, ptl);
if (target_nid == -1) {
put_page(page);
@@ -3583,8 +3584,10 @@ int do_numa_page(struct mm_struct *mm, s
page_nid = target_nid;
out:
- if (page_nid != -1)
- task_numa_fault(last_nidpid, page_nid, 1, migrated);
+ if (account_nid == -1)
+ account_nid = page_nid;
+ if (account_nid != -1)
+ task_numa_fault(last_nidpid, account_nid, 1, migrated);
return 0;
}
@@ -3623,7 +3626,7 @@ static int do_pmd_numa_page(struct mm_st
for (addr = _addr + offset; addr < _addr + PMD_SIZE; pte++, addr += PAGE_SIZE) {
pte_t pteval = *pte;
struct page *page;
- int page_nid = -1;
+ int page_nid = -1, account_nid = -1;
int target_nid;
bool migrated = false;
@@ -3648,19 +3651,25 @@ static int do_pmd_numa_page(struct mm_st
last_nidpid = page_nidpid_last(page);
page_nid = page_to_nid(page);
target_nid = numa_migrate_prep(page, vma, addr,
- page_nid);
+ page_nid, &account_nid);
pte_unmap_unlock(pte, ptl);
if (target_nid != -1) {
migrated = migrate_misplaced_page(page, vma, target_nid);
if (migrated)
page_nid = target_nid;
+ else
+ account_nid = -1;
} else {
put_page(page);
}
- if (page_nid != -1)
- task_numa_fault(last_nidpid, page_nid, 1, migrated);
+ if (account_nid == -1)
+ account_nid = page_nid;
+ if (account_nid != -1)
+ task_numa_fault(last_nidpid, account_nid, 1, migrated);
+
+ cond_resched();
pte = pte_offset_map_lock(mm, pmdp, addr, &ptl);
}
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -118,22 +118,18 @@ static struct mempolicy default_policy =
.flags = MPOL_F_LOCAL,
};
-static struct mempolicy preferred_node_policy[MAX_NUMNODES];
+static struct mempolicy numa_policy = {
+ .refcnt = ATOMIC_INIT(1), /* never free it */
+ .mode = MPOL_PREFERRED,
+ .flags = MPOL_F_LOCAL | MPOL_F_MOF | MPOL_F_MORON,
+};
static struct mempolicy *get_task_policy(struct task_struct *p)
{
struct mempolicy *pol = p->mempolicy;
- int node;
- if (!pol) {
- node = numa_node_id();
- if (node != NUMA_NO_NODE)
- pol = &preferred_node_policy[node];
-
- /* preferred_node_policy is not initialised early in boot */
- if (!pol->mode)
- pol = NULL;
- }
+ if (!pol)
+ pol = &numa_policy;
return pol;
}
@@ -248,6 +244,20 @@ static int mpol_set_nodemask(struct memp
return ret;
}
+struct mempolicy *__mpol_new(unsigned short mode, unsigned short flags)
+{
+ struct mempolicy *policy;
+
+ policy = kmem_cache_alloc(policy_cache, GFP_KERNEL);
+ if (!policy)
+ return ERR_PTR(-ENOMEM);
+ atomic_set(&policy->refcnt, 1);
+ policy->mode = mode;
+ policy->flags = flags;
+
+ return policy;
+}
+
/*
* This function just creates a new policy, does some check and simple
* initialization. You must invoke mpol_set_nodemask() to set nodes.
@@ -255,8 +265,6 @@ static int mpol_set_nodemask(struct memp
static struct mempolicy *mpol_new(unsigned short mode, unsigned short flags,
nodemask_t *nodes)
{
- struct mempolicy *policy;
-
pr_debug("setting mode %d flags %d nodes[0] %lx\n",
mode, flags, nodes ? nodes_addr(*nodes)[0] : NUMA_NO_NODE);
@@ -284,14 +292,8 @@ static struct mempolicy *mpol_new(unsign
mode = MPOL_PREFERRED;
} else if (nodes_empty(*nodes))
return ERR_PTR(-EINVAL);
- policy = kmem_cache_alloc(policy_cache, GFP_KERNEL);
- if (!policy)
- return ERR_PTR(-ENOMEM);
- atomic_set(&policy->refcnt, 1);
- policy->mode = mode;
- policy->flags = flags;
- return policy;
+ return __mpol_new(mode, flags);
}
/* Slow path of a mpol destructor. */
@@ -2234,12 +2236,13 @@ static void sp_free(struct sp_node *n)
* Policy determination "mimics" alloc_page_vma().
* Called from fault path where we know the vma and faulting address.
*/
-int mpol_misplaced(struct page *page, struct vm_area_struct *vma, unsigned long addr)
+int mpol_misplaced(struct page *page, struct vm_area_struct *vma, unsigned long addr, int *account_node)
{
struct mempolicy *pol;
struct zone *zone;
int curnid = page_to_nid(page);
unsigned long pgoff;
+ int thisnid = numa_node_id();
int polnid = -1;
int ret = -1;
@@ -2261,7 +2264,7 @@ int mpol_misplaced(struct page *page, st
case MPOL_PREFERRED:
if (pol->flags & MPOL_F_LOCAL)
- polnid = numa_node_id();
+ polnid = thisnid;
else
polnid = pol->v.preferred_node;
break;
@@ -2276,7 +2279,7 @@ int mpol_misplaced(struct page *page, st
if (node_isset(curnid, pol->v.nodes))
goto out;
(void)first_zones_zonelist(
- node_zonelist(numa_node_id(), GFP_HIGHUSER),
+ node_zonelist(thisnid, GFP_HIGHUSER),
gfp_zone(GFP_HIGHUSER),
&pol->v.nodes, &zone);
polnid = zone->node;
@@ -2291,8 +2294,7 @@ int mpol_misplaced(struct page *page, st
int last_nidpid;
int this_nidpid;
- polnid = numa_node_id();
- this_nidpid = nid_pid_to_nidpid(polnid, current->pid);;
+ this_nidpid = nid_pid_to_nidpid(thisnid, current->pid);;
/*
* Multi-stage node selection is used in conjunction
@@ -2318,6 +2320,39 @@ int mpol_misplaced(struct page *page, st
last_nidpid = page_nidpid_xchg_last(page, this_nidpid);
if (!nidpid_pid_unset(last_nidpid) && nidpid_to_nid(last_nidpid) != polnid)
goto out;
+
+ /*
+ * Preserve interleave pages while allowing useful
+ * ->numa_faults[] statistics.
+ *
+ * When migrating into an interleave set, migrate to
+ * the correct interleaved node but account against the
+ * current node (where the task is running).
+ *
+ * Not doing this would result in ->numa_faults[] being
+ * flat across the interleaved nodes, making it
+ * impossible to shrink the node list even when all
+ * tasks are running on a single node.
+ *
+ * src dst migrate account
+ * 0 0 -- this_node $page_node
+ * 0 1 -- policy_node this_node
+ * 1 0 -- this_node $page_node
+ * 1 1 -- policy_node this_node
+ *
+ */
+ switch (pol->mode) {
+ case MPOL_INTERLEAVE:
+ if (node_isset(thisnid, pol->v.nodes)) {
+ if (account_node)
+ *account_node = thisnid;
+ }
+ break;
+
+ default:
+ polnid = thisnid;
+ break;
+ }
}
if (curnid != polnid)
@@ -2580,15 +2615,6 @@ void __init numa_policy_init(void)
sizeof(struct sp_node),
0, SLAB_PANIC, NULL);
- for_each_node(nid) {
- preferred_node_policy[nid] = (struct mempolicy) {
- .refcnt = ATOMIC_INIT(1),
- .mode = MPOL_PREFERRED,
- .flags = MPOL_F_MOF | MPOL_F_MORON,
- .v = { .preferred_node = nid, },
- };
- }
-
/*
* Set interleaving policy for system init. Interleaving is only
* enabled across suitably sized nodes (default is >= 16MB), or
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
next prev parent reply other threads:[~2013-07-25 10:46 UTC|newest]
Thread overview: 102+ messages / expand[flat|nested] mbox.gz Atom feed top
2013-07-15 15:20 [PATCH 0/18] Basic scheduler support for automatic NUMA balancing V5 Mel Gorman
2013-07-15 15:20 ` [PATCH 01/18] mm: numa: Document automatic NUMA balancing sysctls Mel Gorman
2013-07-15 15:20 ` [PATCH 02/18] sched: Track NUMA hinting faults on per-node basis Mel Gorman
2013-07-17 10:50 ` Peter Zijlstra
2013-07-31 7:54 ` Mel Gorman
2013-07-29 10:10 ` Peter Zijlstra
2013-07-31 7:54 ` Mel Gorman
2013-07-15 15:20 ` [PATCH 03/18] mm: numa: Account for THP numa hinting faults on the correct node Mel Gorman
2013-07-17 0:33 ` Hillf Danton
2013-07-17 1:26 ` Wanpeng Li
2013-07-17 1:26 ` Wanpeng Li
2013-07-15 15:20 ` [PATCH 04/18] mm: numa: Do not migrate or account for hinting faults on the zero page Mel Gorman
2013-07-17 11:00 ` Peter Zijlstra
2013-07-31 8:11 ` Mel Gorman
2013-07-15 15:20 ` [PATCH 05/18] sched: Select a preferred node with the most numa hinting faults Mel Gorman
2013-07-15 15:20 ` [PATCH 06/18] sched: Update NUMA hinting faults once per scan Mel Gorman
2013-07-15 15:20 ` [PATCH 07/18] sched: Favour moving tasks towards the preferred node Mel Gorman
2013-07-25 10:40 ` [PATCH] sched, numa: migrates_degrades_locality() Peter Zijlstra
2013-07-31 8:44 ` Mel Gorman
2013-07-31 8:50 ` Peter Zijlstra
2013-07-15 15:20 ` [PATCH 08/18] sched: Reschedule task on preferred NUMA node once selected Mel Gorman
2013-07-17 1:31 ` Hillf Danton
2013-07-31 9:07 ` Mel Gorman
2013-07-31 9:38 ` Srikar Dronamraju
2013-08-01 4:47 ` Srikar Dronamraju
2013-08-01 15:38 ` Mel Gorman
2013-07-15 15:20 ` [PATCH 09/18] sched: Add infrastructure for split shared/private accounting of NUMA hinting faults Mel Gorman
2013-07-17 2:17 ` Hillf Danton
2013-07-31 9:08 ` Mel Gorman
2013-07-15 15:20 ` [PATCH 10/18] sched: Increase NUMA PTE scanning when a new preferred node is selected Mel Gorman
2013-07-15 15:20 ` [PATCH 11/18] sched: Check current->mm before allocating NUMA faults Mel Gorman
2013-07-15 15:20 ` [PATCH 12/18] sched: Set the scan rate proportional to the size of the task being scanned Mel Gorman
2013-07-15 15:20 ` [PATCH 13/18] mm: numa: Scan pages with elevated page_mapcount Mel Gorman
2013-07-17 5:22 ` Sam Ben
2013-07-31 9:13 ` Mel Gorman
2013-07-15 15:20 ` [PATCH 14/18] sched: Remove check that skips small VMAs Mel Gorman
2013-07-15 15:20 ` [PATCH 15/18] sched: Set preferred NUMA node based on number of private faults Mel Gorman
2013-07-18 1:53 ` [PATCH 15/18] fix compilation with !CONFIG_NUMA_BALANCING Rik van Riel
2013-07-31 9:19 ` Mel Gorman
2013-07-26 11:20 ` [PATCH 15/18] sched: Set preferred NUMA node based on number of private faults Peter Zijlstra
2013-07-31 9:29 ` Mel Gorman
2013-07-31 9:34 ` Peter Zijlstra
2013-07-31 10:10 ` Mel Gorman
2013-07-15 15:20 ` [PATCH 16/18] sched: Avoid overloading CPUs on a preferred NUMA node Mel Gorman
2013-07-15 20:03 ` Peter Zijlstra
2013-07-16 8:23 ` Mel Gorman
2013-07-16 10:35 ` Peter Zijlstra
2013-07-16 15:55 ` Hillf Danton
2013-07-16 16:01 ` Mel Gorman
2013-07-17 10:54 ` Peter Zijlstra
2013-07-31 9:49 ` Mel Gorman
2013-08-01 7:10 ` Srikar Dronamraju
2013-08-01 15:42 ` Mel Gorman
2013-07-15 15:20 ` [PATCH 17/18] sched: Retry migration of tasks to CPU on a preferred node Mel Gorman
2013-07-25 10:33 ` Peter Zijlstra
2013-07-31 10:03 ` Mel Gorman
2013-07-31 10:05 ` Peter Zijlstra
2013-07-31 10:07 ` Mel Gorman
2013-07-25 10:35 ` Peter Zijlstra
2013-08-01 5:13 ` Srikar Dronamraju
2013-08-01 15:46 ` Mel Gorman
2013-07-15 15:20 ` [PATCH 18/18] sched: Swap tasks when reschuling if a CPU on a target node is imbalanced Mel Gorman
2013-07-15 20:11 ` Peter Zijlstra
2013-07-16 9:41 ` Mel Gorman
2013-08-01 4:59 ` Srikar Dronamraju
2013-08-01 15:48 ` Mel Gorman
2013-07-15 20:14 ` [PATCH 0/18] Basic scheduler support for automatic NUMA balancing V5 Peter Zijlstra
2013-07-16 15:10 ` Srikar Dronamraju
2013-07-25 10:36 ` Peter Zijlstra
2013-07-31 10:30 ` Mel Gorman
2013-07-31 10:48 ` Peter Zijlstra
2013-07-31 11:57 ` Mel Gorman
2013-07-31 15:30 ` Peter Zijlstra
2013-07-31 16:11 ` Mel Gorman
2013-07-31 16:39 ` Peter Zijlstra
2013-08-01 15:51 ` Mel Gorman
2013-07-25 10:38 ` [PATCH] mm, numa: Sanitize task_numa_fault() callsites Peter Zijlstra
2013-07-31 11:25 ` Mel Gorman
2013-07-25 10:41 ` [PATCH] sched, numa: Improve scanner Peter Zijlstra
2013-07-25 10:46 ` Peter Zijlstra [this message]
2013-07-26 9:55 ` [PATCH] mm, sched, numa: Create a per-task MPOL_INTERLEAVE policy Peter Zijlstra
2013-08-26 16:10 ` Peter Zijlstra
2013-08-26 16:14 ` Peter Zijlstra
2013-07-30 11:24 ` [PATCH] mm, numa: Change page last {nid,pid} into {cpu,pid} Peter Zijlstra
2013-08-01 22:33 ` Rik van Riel
2013-07-30 11:38 ` [PATCH] sched, numa: Use {cpu, pid} to create task groups for shared faults Peter Zijlstra
2013-07-31 15:07 ` Peter Zijlstra
2013-07-31 15:38 ` Peter Zijlstra
2013-07-31 15:45 ` Don Morris
2013-07-31 16:05 ` Peter Zijlstra
2013-08-02 16:47 ` [PATCH -v3] " Peter Zijlstra
2013-08-02 16:50 ` [PATCH] mm, numa: Do not group on RO pages Peter Zijlstra
2013-08-02 19:56 ` Peter Zijlstra
2013-08-05 19:36 ` [PATCH] numa,sched: use group fault statistics in numa placement Rik van Riel
2013-08-09 13:55 ` Don Morris
2013-08-28 16:41 ` [PATCH -v3] sched, numa: Use {cpu, pid} to create task groups for shared faults Peter Zijlstra
2013-08-28 17:10 ` Rik van Riel
2013-08-01 6:23 ` [PATCH,RFC] numa,sched: use group fault statistics in numa placement Rik van Riel
2013-08-01 10:37 ` Peter Zijlstra
2013-08-01 16:35 ` Rik van Riel
2013-08-01 22:36 ` [RFC PATCH -v2] " Rik van Riel
2013-07-30 13:58 ` [PATCH 0/18] Basic scheduler support for automatic NUMA balancing V5 Andrew Theurer
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20130725104633.GQ27075@twins.programming.kicks-ass.net \
--to=peterz@infradead.org \
--cc=aarcange@redhat.com \
--cc=hannes@cmpxchg.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=mgorman@suse.de \
--cc=mingo@kernel.org \
--cc=srikar@linux.vnet.ibm.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox