linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
From: Peter Zijlstra <peterz@infradead.org>
To: Don Morris <don.morris@hp.com>
Cc: Mel Gorman <mgorman@suse.de>,
	Srikar Dronamraju <srikar@linux.vnet.ibm.com>,
	Ingo Molnar <mingo@kernel.org>,
	Andrea Arcangeli <aarcange@redhat.com>,
	Johannes Weiner <hannes@cmpxchg.org>,
	Linux-MM <linux-mm@kvack.org>,
	LKML <linux-kernel@vger.kernel.org>,
	riel@redhat.com
Subject: [PATCH -v3] sched, numa: Use {cpu, pid} to create task groups for shared faults
Date: Fri, 2 Aug 2013 18:47:15 +0200	[thread overview]
Message-ID: <20130802164715.GP27162@twins.programming.kicks-ass.net> (raw)
In-Reply-To: <51F93105.8020503@hp.com>


Here's the latest; it seems to not crash and appears to actually do as
advertised.

---
Subject: sched, numa: Use {cpu, pid} to create task groups for shared faults
From: Peter Zijlstra <peterz@infradead.org>
Date: Tue Jul 30 10:40:20 CEST 2013

A very simple/straight forward shared fault task grouping
implementation.

Signed-off-by: Peter Zijlstra <peterz@infradead.org>
---
 include/linux/sched.h |    3 
 kernel/sched/core.c   |    3 
 kernel/sched/fair.c   |  174 +++++++++++++++++++++++++++++++++++++++++++++++---
 kernel/sched/sched.h  |    5 -
 4 files changed, 171 insertions(+), 14 deletions(-)

--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1341,6 +1341,9 @@ struct task_struct {
 	u64 node_stamp;			/* migration stamp  */
 	struct callback_head numa_work;
 
+	struct list_head numa_entry;
+	struct numa_group *numa_group;
+
 	/*
 	 * Exponential decaying average of faults on a per-node basis.
 	 * Scheduling placement decisions are made based on the these counts.
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1733,6 +1733,9 @@ static void __sched_fork(struct task_str
 	p->numa_work.next = &p->numa_work;
 	p->numa_faults = NULL;
 	p->numa_faults_buffer = NULL;
+
+	INIT_LIST_HEAD(&p->numa_entry);
+	p->numa_group = NULL;
 #endif /* CONFIG_NUMA_BALANCING */
 }
 
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -1149,6 +1149,17 @@ static void numa_migrate_preferred(struc
 	p->numa_migrate_retry = jiffies + HZ/10;
 }
 
+struct numa_group {
+	atomic_t refcount;
+
+	spinlock_t lock; /* nr_tasks, tasks */
+	int nr_tasks;
+	struct list_head task_list;
+
+	struct rcu_head rcu;
+	atomic_long_t faults[0];
+};
+
 static void task_numa_placement(struct task_struct *p)
 {
 	int seq, nid, max_nid = -1;
@@ -1157,6 +1168,7 @@ static void task_numa_placement(struct t
 	seq = ACCESS_ONCE(p->mm->numa_scan_seq);
 	if (p->numa_scan_seq == seq)
 		return;
+
 	p->numa_scan_seq = seq;
 	p->numa_migrate_seq++;
 	p->numa_scan_period_max = task_scan_max(p);
@@ -1167,14 +1179,24 @@ static void task_numa_placement(struct t
 		int priv, i;
 
 		for (priv = 0; priv < 2; priv++) {
+			long diff;
+
 			i = task_faults_idx(nid, priv);
 
+			diff = -p->numa_faults[i];
+
 			/* Decay existing window, copy faults since last scan */
 			p->numa_faults[i] >>= 1;
 			p->numa_faults[i] += p->numa_faults_buffer[i];
 			p->numa_faults_buffer[i] = 0;
 
+			diff += p->numa_faults[i];
 			faults += p->numa_faults[i];
+
+			if (p->numa_group) {
+				/* safe because we can only change our own group */
+				atomic_long_add(diff, &p->numa_group->faults[i]);
+			}
 		}
 
 		if (faults > max_faults) {
@@ -1211,6 +1233,130 @@ static void task_numa_placement(struct t
 	}
 }
 
+static inline int get_numa_group(struct numa_group *grp)
+{
+	return atomic_inc_not_zero(&grp->refcount);
+}
+
+static inline void put_numa_group(struct numa_group *grp)
+{
+	if (atomic_dec_and_test(&grp->refcount))
+		kfree_rcu(grp, rcu);
+}
+
+static void double_lock(spinlock_t *l1, spinlock_t *l2)
+{
+	if (l1 > l2)
+		swap(l1, l2);
+
+	spin_lock(l1);
+	spin_lock_nested(l2, SINGLE_DEPTH_NESTING);
+}
+
+static void task_numa_group(struct task_struct *p, int cpu, int pid)
+{
+	struct numa_group *grp, *my_grp;
+	struct task_struct *tsk;
+	bool join = false;
+	int i;
+
+	if (unlikely(!p->numa_group)) {
+		unsigned int size = sizeof(struct numa_group) +
+			            2*nr_node_ids*sizeof(atomic_long_t);
+
+		grp = kzalloc(size, GFP_KERNEL | __GFP_NOWARN);
+		if (!grp)
+			return;
+
+		atomic_set(&grp->refcount, 1);
+		spin_lock_init(&grp->lock);
+		INIT_LIST_HEAD(&grp->task_list);
+
+		for (i = 0; i < 2*nr_node_ids; i++)
+			atomic_long_set(&grp->faults[i], p->numa_faults[i]);
+
+		list_add(&p->numa_entry, &grp->task_list);
+		grp->nr_tasks++;
+		rcu_assign_pointer(p->numa_group, grp);
+	}
+
+	rcu_read_lock();
+	tsk = ACCESS_ONCE(cpu_rq(cpu)->curr);
+
+	if ((tsk->pid & LAST__PID_MASK) != pid)
+		goto unlock;
+
+	grp = rcu_dereference(tsk->numa_group);
+	if (!grp)
+		goto unlock;
+
+	my_grp = p->numa_group;
+	if (grp == my_grp)
+		goto unlock;
+
+	/*
+	 * Only join the other group if its bigger; if we're the bigger group,
+	 * the other task will join us.
+	 */
+	if (my_grp->nr_tasks > grp->nr_tasks)
+	    	goto unlock;
+
+	/*
+	 * Tie-break on the grp address.
+	 */
+	if (my_grp->nr_tasks == grp->nr_tasks && my_grp > grp)
+		goto unlock;
+
+	if (!get_numa_group(grp))
+		goto unlock;
+
+	join = true;
+
+unlock:
+	rcu_read_unlock();
+
+	if (!join)
+		return;
+
+	for (i = 0; i < 2*nr_node_ids; i++) {
+		atomic_long_sub(p->numa_faults[i], &my_grp->faults[i]);
+		atomic_long_add(p->numa_faults[i], &grp->faults[i]);
+	}
+
+	double_lock(&my_grp->lock, &grp->lock);
+
+	list_move(&p->numa_entry, &grp->task_list);
+	my_grp->nr_tasks--;
+	grp->nr_tasks++;
+
+	spin_unlock(&my_grp->lock);
+	spin_unlock(&grp->lock);
+
+	rcu_assign_pointer(p->numa_group, grp);
+
+	put_numa_group(my_grp);
+}
+
+void task_numa_free(struct task_struct *p)
+{
+	struct numa_group *grp = p->numa_group;
+	int i;
+
+	kfree(p->numa_faults);
+
+	if (grp) {
+		for (i = 0; i < 2*nr_node_ids; i++)
+			atomic_long_sub(p->numa_faults[i], &grp->faults[i]);
+
+		spin_lock(&grp->lock);
+		list_del(&p->numa_entry);
+		grp->nr_tasks--;
+		spin_unlock(&grp->lock);
+		rcu_assign_pointer(p->numa_group, NULL);
+		put_numa_group(grp);
+	}
+}
+
 /*
  * Got a PROT_NONE fault for a page on @node.
  */
@@ -1226,21 +1372,12 @@ void task_numa_fault(int last_cpupid, in
 	if (!p->mm)
 		return;
 
-	/*
-	 * First accesses are treated as private, otherwise consider accesses
-	 * to be private if the accessing pid has not changed
-	 */
-	if (!cpupid_pid_unset(last_cpupid))
-		priv = ((p->pid & LAST__PID_MASK) == cpupid_to_pid(last_cpupid));
-	else
-		priv = 1;
-
 	/* Allocate buffer to track faults on a per-node basis */
 	if (unlikely(!p->numa_faults)) {
 		int size = sizeof(*p->numa_faults) * 2 * nr_node_ids;
 
 		/* numa_faults and numa_faults_buffer share the allocation */
-		p->numa_faults = kzalloc(size * 2, GFP_KERNEL);
+		p->numa_faults = kzalloc(size * 2, GFP_KERNEL | __GFP_NOWARN);
 		if (!p->numa_faults)
 			return;
 
@@ -1249,6 +1386,23 @@ void task_numa_fault(int last_cpupid, in
 	}
 
 	/*
+	 * First accesses are treated as private, otherwise consider accesses
+	 * to be private if the accessing pid has not changed
+	 */
+	if (unlikely(last_cpupid == (-1 & LAST_CPUPID_MASK))) {
+		priv = 1;
+	} else {
+		int cpu, pid;
+
+		cpu = cpupid_to_cpu(last_cpupid);
+		pid = cpupid_to_pid(last_cpupid);
+
+		priv = (pid == (p->pid & LAST__PID_MASK));
+		if (!priv)
+			task_numa_group(p, cpu, pid);
+	}
+
+	/*
 	 * If pages are properly placed (did not migrate) then scan slower.
 	 * This is reset periodically in case of phase changes
 	 *
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -556,10 +556,7 @@ static inline u64 rq_clock_task(struct r
 #ifdef CONFIG_NUMA_BALANCING
 extern int migrate_task_to(struct task_struct *p, int cpu);
 extern int migrate_swap(struct task_struct *, struct task_struct *);
-static inline void task_numa_free(struct task_struct *p)
-{
-	kfree(p->numa_faults);
-}
+extern void task_numa_free(struct task_struct *p);
 #else /* CONFIG_NUMA_BALANCING */
 static inline void task_numa_free(struct task_struct *p)
 {

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

  parent reply	other threads:[~2013-08-02 16:47 UTC|newest]

Thread overview: 102+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2013-07-15 15:20 [PATCH 0/18] Basic scheduler support for automatic NUMA balancing V5 Mel Gorman
2013-07-15 15:20 ` [PATCH 01/18] mm: numa: Document automatic NUMA balancing sysctls Mel Gorman
2013-07-15 15:20 ` [PATCH 02/18] sched: Track NUMA hinting faults on per-node basis Mel Gorman
2013-07-17 10:50   ` Peter Zijlstra
2013-07-31  7:54     ` Mel Gorman
2013-07-29 10:10   ` Peter Zijlstra
2013-07-31  7:54     ` Mel Gorman
2013-07-15 15:20 ` [PATCH 03/18] mm: numa: Account for THP numa hinting faults on the correct node Mel Gorman
2013-07-17  0:33   ` Hillf Danton
2013-07-17  1:26     ` Wanpeng Li
2013-07-17  1:26     ` Wanpeng Li
2013-07-15 15:20 ` [PATCH 04/18] mm: numa: Do not migrate or account for hinting faults on the zero page Mel Gorman
2013-07-17 11:00   ` Peter Zijlstra
2013-07-31  8:11     ` Mel Gorman
2013-07-15 15:20 ` [PATCH 05/18] sched: Select a preferred node with the most numa hinting faults Mel Gorman
2013-07-15 15:20 ` [PATCH 06/18] sched: Update NUMA hinting faults once per scan Mel Gorman
2013-07-15 15:20 ` [PATCH 07/18] sched: Favour moving tasks towards the preferred node Mel Gorman
2013-07-25 10:40   ` [PATCH] sched, numa: migrates_degrades_locality() Peter Zijlstra
2013-07-31  8:44     ` Mel Gorman
2013-07-31  8:50       ` Peter Zijlstra
2013-07-15 15:20 ` [PATCH 08/18] sched: Reschedule task on preferred NUMA node once selected Mel Gorman
2013-07-17  1:31   ` Hillf Danton
2013-07-31  9:07     ` Mel Gorman
2013-07-31  9:38       ` Srikar Dronamraju
2013-08-01  4:47   ` Srikar Dronamraju
2013-08-01 15:38     ` Mel Gorman
2013-07-15 15:20 ` [PATCH 09/18] sched: Add infrastructure for split shared/private accounting of NUMA hinting faults Mel Gorman
2013-07-17  2:17   ` Hillf Danton
2013-07-31  9:08     ` Mel Gorman
2013-07-15 15:20 ` [PATCH 10/18] sched: Increase NUMA PTE scanning when a new preferred node is selected Mel Gorman
2013-07-15 15:20 ` [PATCH 11/18] sched: Check current->mm before allocating NUMA faults Mel Gorman
2013-07-15 15:20 ` [PATCH 12/18] sched: Set the scan rate proportional to the size of the task being scanned Mel Gorman
2013-07-15 15:20 ` [PATCH 13/18] mm: numa: Scan pages with elevated page_mapcount Mel Gorman
2013-07-17  5:22   ` Sam Ben
2013-07-31  9:13     ` Mel Gorman
2013-07-15 15:20 ` [PATCH 14/18] sched: Remove check that skips small VMAs Mel Gorman
2013-07-15 15:20 ` [PATCH 15/18] sched: Set preferred NUMA node based on number of private faults Mel Gorman
2013-07-18  1:53   ` [PATCH 15/18] fix compilation with !CONFIG_NUMA_BALANCING Rik van Riel
2013-07-31  9:19     ` Mel Gorman
2013-07-26 11:20   ` [PATCH 15/18] sched: Set preferred NUMA node based on number of private faults Peter Zijlstra
2013-07-31  9:29     ` Mel Gorman
2013-07-31  9:34       ` Peter Zijlstra
2013-07-31 10:10         ` Mel Gorman
2013-07-15 15:20 ` [PATCH 16/18] sched: Avoid overloading CPUs on a preferred NUMA node Mel Gorman
2013-07-15 20:03   ` Peter Zijlstra
2013-07-16  8:23     ` Mel Gorman
2013-07-16 10:35       ` Peter Zijlstra
2013-07-16 15:55   ` Hillf Danton
2013-07-16 16:01     ` Mel Gorman
2013-07-17 10:54   ` Peter Zijlstra
2013-07-31  9:49     ` Mel Gorman
2013-08-01  7:10   ` Srikar Dronamraju
2013-08-01 15:42     ` Mel Gorman
2013-07-15 15:20 ` [PATCH 17/18] sched: Retry migration of tasks to CPU on a preferred node Mel Gorman
2013-07-25 10:33   ` Peter Zijlstra
2013-07-31 10:03     ` Mel Gorman
2013-07-31 10:05       ` Peter Zijlstra
2013-07-31 10:07         ` Mel Gorman
2013-07-25 10:35   ` Peter Zijlstra
2013-08-01  5:13   ` Srikar Dronamraju
2013-08-01 15:46     ` Mel Gorman
2013-07-15 15:20 ` [PATCH 18/18] sched: Swap tasks when reschuling if a CPU on a target node is imbalanced Mel Gorman
2013-07-15 20:11   ` Peter Zijlstra
2013-07-16  9:41     ` Mel Gorman
2013-08-01  4:59   ` Srikar Dronamraju
2013-08-01 15:48     ` Mel Gorman
2013-07-15 20:14 ` [PATCH 0/18] Basic scheduler support for automatic NUMA balancing V5 Peter Zijlstra
2013-07-16 15:10 ` Srikar Dronamraju
2013-07-25 10:36 ` Peter Zijlstra
2013-07-31 10:30   ` Mel Gorman
2013-07-31 10:48     ` Peter Zijlstra
2013-07-31 11:57       ` Mel Gorman
2013-07-31 15:30         ` Peter Zijlstra
2013-07-31 16:11           ` Mel Gorman
2013-07-31 16:39             ` Peter Zijlstra
2013-08-01 15:51               ` Mel Gorman
2013-07-25 10:38 ` [PATCH] mm, numa: Sanitize task_numa_fault() callsites Peter Zijlstra
2013-07-31 11:25   ` Mel Gorman
2013-07-25 10:41 ` [PATCH] sched, numa: Improve scanner Peter Zijlstra
2013-07-25 10:46 ` [PATCH] mm, sched, numa: Create a per-task MPOL_INTERLEAVE policy Peter Zijlstra
2013-07-26  9:55   ` Peter Zijlstra
2013-08-26 16:10     ` Peter Zijlstra
2013-08-26 16:14       ` Peter Zijlstra
2013-07-30 11:24 ` [PATCH] mm, numa: Change page last {nid,pid} into {cpu,pid} Peter Zijlstra
2013-08-01 22:33   ` Rik van Riel
2013-07-30 11:38 ` [PATCH] sched, numa: Use {cpu, pid} to create task groups for shared faults Peter Zijlstra
2013-07-31 15:07   ` Peter Zijlstra
2013-07-31 15:38     ` Peter Zijlstra
2013-07-31 15:45     ` Don Morris
2013-07-31 16:05       ` Peter Zijlstra
2013-08-02 16:47       ` Peter Zijlstra [this message]
2013-08-02 16:50         ` [PATCH] mm, numa: Do not group on RO pages Peter Zijlstra
2013-08-02 19:56           ` Peter Zijlstra
2013-08-05 19:36           ` [PATCH] numa,sched: use group fault statistics in numa placement Rik van Riel
2013-08-09 13:55             ` Don Morris
2013-08-28 16:41         ` [PATCH -v3] sched, numa: Use {cpu, pid} to create task groups for shared faults Peter Zijlstra
2013-08-28 17:10           ` Rik van Riel
2013-08-01  6:23   ` [PATCH,RFC] numa,sched: use group fault statistics in numa placement Rik van Riel
2013-08-01 10:37     ` Peter Zijlstra
2013-08-01 16:35       ` Rik van Riel
2013-08-01 22:36   ` [RFC PATCH -v2] " Rik van Riel
2013-07-30 13:58 ` [PATCH 0/18] Basic scheduler support for automatic NUMA balancing V5 Andrew Theurer

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20130802164715.GP27162@twins.programming.kicks-ass.net \
    --to=peterz@infradead.org \
    --cc=aarcange@redhat.com \
    --cc=don.morris@hp.com \
    --cc=hannes@cmpxchg.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=mgorman@suse.de \
    --cc=mingo@kernel.org \
    --cc=riel@redhat.com \
    --cc=srikar@linux.vnet.ibm.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox