* [PATCH] hotplug cpu: move tasks in empty cpusets to parent
@ 2007-09-21 16:42 Cliff Wickman
2007-09-21 18:39 ` Paul Jackson
2007-09-21 18:56 ` David Rientjes
0 siblings, 2 replies; 7+ messages in thread
From: Cliff Wickman @ 2007-09-21 16:42 UTC (permalink / raw)
To: akpm; +Cc: linux-mm
This patch corrects a situation that occurs when one disables all the cpus
in a cpuset.
Currently, the disabled (cpu-less) cpuset inherits the cpus of its parent,
which may overlap its exclusive sibling.
(You will get non-removable cpusets -- "Invalid argument")
Tasks of an empty cpuset should be moved to the cpuset which is the parent
of their current cpuset. Or if the parent cpuset has no cpus, to its
parent, etc.
And the empty cpuset should be removed (if it is flagged notify_on_release).
This patch uses a workqueue thread to call the function that deletes the
cpuset. That way we avoid the complexity of the cpuset locks.
Diffed against 2.6.23-rc7
Signed-off-by: Cliff Wickman <cpw@sgi.com>
---
This is about version 4, of this patch. It avoids a recursive method that
was first used, and incorporates fixes for locking and notify_on_release
conceptual issues raised by Paul Jackson.
kernel/cpuset.c | 206 ++++++++++++++++++++++++++++++++++++++++++++++++--------
1 file changed, 178 insertions(+), 28 deletions(-)
Index: linus.070921/kernel/cpuset.c
===================================================================
--- linus.070921.orig/kernel/cpuset.c
+++ linus.070921/kernel/cpuset.c
@@ -52,6 +52,8 @@
#include <asm/uaccess.h>
#include <asm/atomic.h>
#include <linux/mutex.h>
+#include <linux/kfifo.h>
+#include <linux/workqueue.h>
#define CPUSET_SUPER_MAGIC 0x27e0eb
@@ -109,6 +111,7 @@ typedef enum {
CS_NOTIFY_ON_RELEASE,
CS_SPREAD_PAGE,
CS_SPREAD_SLAB,
+ CS_RELEASED_RESOURCE,
} cpuset_flagbits_t;
/* convenient tests for these bits */
@@ -147,6 +150,11 @@ static inline int is_spread_slab(const s
return test_bit(CS_SPREAD_SLAB, &cs->flags);
}
+static inline int has_released_a_resource(const struct cpuset *cs)
+{
+ return test_bit(CS_RELEASED_RESOURCE, &cs->flags);
+}
+
/*
* Increment this integer everytime any cpuset changes its
* mems_allowed value. Users of cpusets can track this generation
@@ -541,7 +549,7 @@ static void cpuset_release_agent(const c
static void check_for_release(struct cpuset *cs, char **ppathbuf)
{
if (notify_on_release(cs) && atomic_read(&cs->count) == 0 &&
- list_empty(&cs->children)) {
+ list_empty(&cs->children)) {
char *buf;
buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
@@ -1265,6 +1273,7 @@ static int attach_task(struct cpuset *cs
from = oldcs->mems_allowed;
to = cs->mems_allowed;
+ set_bit(CS_RELEASED_RESOURCE, &oldcs->flags);
mutex_unlock(&callback_mutex);
@@ -1995,6 +2004,7 @@ static int cpuset_rmdir(struct inode *un
cpuset_d_remove_dir(d);
dput(d);
number_of_cpusets--;
+ set_bit(CS_RELEASED_RESOURCE, &parent->flags);
mutex_unlock(&callback_mutex);
if (list_empty(&parent->children))
check_for_release(parent, &pathbuf);
@@ -2062,50 +2072,173 @@ out:
}
/*
+ * Move every task that is a member of cpuset "from" to cpuset "to".
+ *
+ * Called with both manage_sem and callback_sem held
+ */
+static void move_member_tasks_to_cpuset(struct cpuset *from, struct cpuset *to)
+{
+ int moved=0;
+ struct task_struct *g, *tsk;
+
+ read_lock(&tasklist_lock);
+ do_each_thread(g, tsk) {
+ if (tsk->cpuset == from) {
+ moved++;
+ task_lock(tsk);
+ tsk->cpuset = to;
+ task_unlock(tsk);
+ }
+ } while_each_thread(g, tsk);
+ read_unlock(&tasklist_lock);
+ atomic_add(moved, &to->count);
+ atomic_set(&from->count, 0);
+}
+
+/*
* If common_cpu_mem_hotplug_unplug(), below, unplugs any CPUs
* or memory nodes, we need to walk over the cpuset hierarchy,
* removing that CPU or node from all cpusets. If this removes the
- * last CPU or node from a cpuset, then the guarantee_online_cpus()
- * or guarantee_online_mems() code will use that emptied cpusets
- * parent online CPUs or nodes. Cpusets that were already empty of
- * CPUs or nodes are left empty.
+ * last CPU or node from a cpuset, then move the tasks in the empty
+ * cpuset to its next-highest non-empty parent.
*
- * This routine is intentionally inefficient in a couple of regards.
- * It will check all cpusets in a subtree even if the top cpuset of
- * the subtree has no offline CPUs or nodes. It checks both CPUs and
- * nodes, even though the caller could have been coded to know that
- * only one of CPUs or nodes needed to be checked on a given call.
- * This was done to minimize text size rather than cpu cycles.
+ * Called with both manage_sem and callback_sem held
+ */
+static void remove_tasks_in_empty_cpuset(struct cpuset *cs)
+{
+ struct cpuset *parent;
+
+ /* cs->count is the number of tasks using the cpuset */
+ if (atomic_read(&cs->count) == 0)
+ return;
+
+ /* this cpuset has had member tasks */
+ set_bit(CS_RELEASED_RESOURCE, &cs->flags);
+
+ /*
+ * Find its next-highest non-empty parent, (top cpuset
+ * has online cpus, so can't be empty).
+ */
+ parent = cs->parent;
+ while (cpus_empty(parent->cpus_allowed)) {
+ /*
+ * this empty cpuset should now be considered to
+ * have been used, and therefore eligible for
+ * release when empty (if it is notify_on_release)
+ */
+ set_bit(CS_RELEASED_RESOURCE, &parent->flags);
+ parent = parent->parent;
+ }
+
+ move_member_tasks_to_cpuset(cs, parent);
+}
+
+/*
+ * Walk the specified cpuset subtree and count the number of empty
+ * notify_on_release cpusets.
+ *
+ * Note that such a notify_on_release cpuset must have had, at some time,
+ * member tasks or cpuset descendants and cpus and memory, before it can
+ * be a candidate for release.
*
- * Call with both manage_mutex and callback_mutex held.
+ * Call with both manage_sem and callback_sem held so
+ * that this function can modify cpus_allowed and mems_allowed.
*
- * Recursive, on depth of cpuset subtree.
+ * This walk processes the tree from top to bottom, completing one layer
+ * before dropping down to the next. It always processes a node before
+ * any of its children.
+ *
+ * Argument "queue" is the fifo queue of cpusets to be walked.
*/
+static int count_releasable_cpusets(const struct cpuset *root,
+ struct kfifo *queue)
+{
+ int count = 0;
+ struct cpuset *cp; /* scans cpusets being updated */
+ struct cpuset *child; /* scans child cpusets of cp */
-static void guarantee_online_cpus_mems_in_subtree(const struct cpuset *cur)
+ __kfifo_put(queue, (unsigned char *)&root, sizeof(root));
+
+ while (__kfifo_get(queue, (unsigned char *)&cp, sizeof(cp))) {
+ list_for_each_entry(child, &cp->children, sibling)
+ __kfifo_put(queue, (unsigned char *)&child,
+ sizeof(child));
+ /* Remove offline cpus and mems from this cpuset. */
+ cpus_and(cp->cpus_allowed, cp->cpus_allowed, cpu_online_map);
+ nodes_and(cp->mems_allowed, cp->mems_allowed, node_online_map);
+ if ((cpus_empty(cp->cpus_allowed) ||
+ nodes_empty(cp->mems_allowed))) {
+ /* Move tasks from the empty cpuset to a parent */
+ remove_tasks_in_empty_cpuset(cp);
+ if (notify_on_release(cp) &&
+ has_released_a_resource(cp))
+ /* count the cpuset to be released */
+ count++;
+ }
+ }
+
+ kfifo_free(queue);
+ return count;
+}
+
+/*
+ * Walk the specified cpuset subtree and release the empty
+ * notify_on_release cpusets.
+ *
+ * This walk processes the tree from top to bottom, completing one layer
+ * before dropping down to the next. It always processes a node before
+ * any of its children.
+ */
+static void release_empty_cpusets(const struct cpuset *root)
{
- struct cpuset *c;
+ struct cpuset *cp; /* scans cpusets being updated */
+ struct cpuset *child; /* scans child cpusets of cp */
+ struct kfifo *queue; /* fifo queue of cpusets to be updated */
+ char *pathbuf = NULL;
- /* Each of our child cpusets mems must be online */
- list_for_each_entry(c, &cur->children, sibling) {
- guarantee_online_cpus_mems_in_subtree(c);
- if (!cpus_empty(c->cpus_allowed))
- guarantee_online_cpus(c, &c->cpus_allowed);
- if (!nodes_empty(c->mems_allowed))
- guarantee_online_mems(c, &c->mems_allowed);
+ queue = kfifo_alloc(number_of_cpusets * sizeof(cp), GFP_KERNEL, NULL);
+ if (queue == ERR_PTR(-ENOMEM))
+ return;
+
+ __kfifo_put(queue, (unsigned char *)&root, sizeof(root));
+
+ while (__kfifo_get(queue, (unsigned char *)&cp, sizeof(cp))) {
+ list_for_each_entry(child, &cp->children, sibling)
+ __kfifo_put(queue, (unsigned char *)&child,
+ sizeof(child));
+ if ((notify_on_release(cp)) && has_released_a_resource(cp) &&
+ (cpus_empty(cp->cpus_allowed) ||
+ nodes_empty(cp->mems_allowed))) {
+ check_for_release(cp, &pathbuf);
+ cpuset_release_agent(pathbuf);
+ }
}
+
+ kfifo_free(queue);
+ return;
}
/*
+ * This runs from a workqueue.
+ *
+ * It's job is to remove any notify_on_release cpusets that have no
+ * online cpus.
+ *
+ * The argument is not used.
+ */
+static void remove_empty_cpusets(struct work_struct *p)
+{
+ release_empty_cpusets(&top_cpuset);
+ return;
+}
+
+static DECLARE_WORK(remove_empties_block, remove_empty_cpusets);
+
+/*
* The cpus_allowed and mems_allowed nodemasks in the top_cpuset track
* cpu_online_map and node_online_map. Force the top cpuset to track
* whats online after any CPU or memory node hotplug or unplug event.
*
- * To ensure that we don't remove a CPU or node from the top cpuset
- * that is currently in use by a child cpuset (which would violate
- * the rule that cpusets must be subsets of their parent), we first
- * call the recursive routine guarantee_online_cpus_mems_in_subtree().
- *
* Since there are two callers of this routine, one for CPU hotplug
* events and one for memory node hotplug events, we could have coded
* two separate routines here. We code it as a single common routine
@@ -2114,12 +2247,26 @@ static void guarantee_online_cpus_mems_i
static void common_cpu_mem_hotplug_unplug(void)
{
+ int cnt=0;
+ struct kfifo *queue;
+
+ /*
+ * Pre-allocate the fifo queue of cpusets to be walked. You can't
+ * call memory allocation functions while holding callback_mutex.
+ */
+ queue = kfifo_alloc(number_of_cpusets * sizeof(struct cpuset *),
+ GFP_KERNEL, NULL);
+ if (queue == ERR_PTR(-ENOMEM))
+ return;
+
mutex_lock(&manage_mutex);
mutex_lock(&callback_mutex);
- guarantee_online_cpus_mems_in_subtree(&top_cpuset);
top_cpuset.cpus_allowed = cpu_online_map;
top_cpuset.mems_allowed = node_online_map;
+ cnt = count_releasable_cpusets(&top_cpuset, queue);
+ if (cnt)
+ schedule_work(&remove_empties_block);
mutex_unlock(&callback_mutex);
mutex_unlock(&manage_mutex);
@@ -2268,6 +2415,9 @@ void cpuset_exit(struct task_struct *tsk
mutex_lock(&manage_mutex);
if (atomic_dec_and_test(&cs->count))
check_for_release(cs, &pathbuf);
+ mutex_lock(&callback_mutex);
+ set_bit(CS_RELEASED_RESOURCE, &cs->flags);
+ mutex_unlock(&callback_mutex);
mutex_unlock(&manage_mutex);
cpuset_release_agent(pathbuf);
} else {
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply [flat|nested] 7+ messages in thread
* Re: [PATCH] hotplug cpu: move tasks in empty cpusets to parent
2007-09-21 16:42 [PATCH] hotplug cpu: move tasks in empty cpusets to parent Cliff Wickman
@ 2007-09-21 18:39 ` Paul Jackson
2007-09-21 18:56 ` David Rientjes
1 sibling, 0 replies; 7+ messages in thread
From: Paul Jackson @ 2007-09-21 18:39 UTC (permalink / raw)
To: Cliff Wickman; +Cc: akpm, linux-mm
Cliff wrote:
> This patch corrects a situation that occurs when one disables all the cpus
> in a cpuset.
Acked-by: Paul Jackson <pj@sgi.com>
--
I won't rest till it's the best ...
Programmer, Linux Scalability
Paul Jackson <pj@sgi.com> 1.925.600.0401
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply [flat|nested] 7+ messages in thread
* Re: [PATCH] hotplug cpu: move tasks in empty cpusets to parent
2007-09-21 16:42 [PATCH] hotplug cpu: move tasks in empty cpusets to parent Cliff Wickman
2007-09-21 18:39 ` Paul Jackson
@ 2007-09-21 18:56 ` David Rientjes
1 sibling, 0 replies; 7+ messages in thread
From: David Rientjes @ 2007-09-21 18:56 UTC (permalink / raw)
To: Cliff Wickman; +Cc: akpm, linux-mm
On Fri, 21 Sep 2007, Cliff Wickman wrote:
> This patch corrects a situation that occurs when one disables all the cpus
> in a cpuset.
>
> Currently, the disabled (cpu-less) cpuset inherits the cpus of its parent,
> which may overlap its exclusive sibling.
> (You will get non-removable cpusets -- "Invalid argument")
>
> Tasks of an empty cpuset should be moved to the cpuset which is the parent
> of their current cpuset. Or if the parent cpuset has no cpus, to its
> parent, etc.
>
It looks like your patch is doing this for tasks that lose all of their
mems too, but it seems like the better alternative is to prevent the user
from doing echo -n > /dev/cpuset/my_cpuset/mems by returning -EINVAL in
update_nodemask(). Are you trying to enable some functionality for node
hot-unplug here? If so, that needs documentation in the description.
> And the empty cpuset should be removed (if it is flagged notify_on_release).
>
notify_on_release simply calls a userspace agent when the last task is
removed, it doesn't necessarily specify that a cpuset should automatically
be removed; that's up to the userspace agent.
There doesn't appear to be any support for memory_migrate cpusets in your
patch, either, so that memory that is allocated on a cpuset's mems is
automatically migrated to its new cpuset's mems when it loses all of its
cpus.
David
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply [flat|nested] 7+ messages in thread
* Re: [PATCH] hotplug cpu: move tasks in empty cpusets to parent
2007-09-21 22:53 Cliff Wickman
@ 2007-09-21 23:15 ` David Rientjes
0 siblings, 0 replies; 7+ messages in thread
From: David Rientjes @ 2007-09-21 23:15 UTC (permalink / raw)
To: Cliff Wickman; +Cc: akpm, pj, linux-mm
On Fri, 21 Sep 2007, Cliff Wickman wrote:
> This patch corrects a situation that occurs when one disables all the cpus
> in a cpuset.
>
> Currently, the disabled (cpu-less) cpuset inherits the cpus of its parent,
> which may overlap its exclusive sibling.
> (You will get non-removable cpusets -- "Invalid argument")
>
> Tasks of an empty cpuset should be moved to the cpuset which is the parent
> of their current cpuset. Or if the parent cpuset has no cpus, to its
> parent, etc.
>
> And the empty cpuset should be removed (if it is flagged notify_on_release).
>
Again, being flagged notify_on_release does not remove the empty cpuset,
it simply calls a userspace agent to do cleanup, if such a userspace agent
exists and notify_on_release is enabled.
Inline comments below.
> Index: linus.070921/kernel/cpuset.c
> ===================================================================
> --- linus.070921.orig/kernel/cpuset.c
> +++ linus.070921/kernel/cpuset.c
> @@ -52,6 +52,8 @@
> #include <asm/uaccess.h>
> #include <asm/atomic.h>
> #include <linux/mutex.h>
> +#include <linux/kfifo.h>
> +#include <linux/workqueue.h>
>
> #define CPUSET_SUPER_MAGIC 0x27e0eb
>
> @@ -109,6 +111,7 @@ typedef enum {
> CS_NOTIFY_ON_RELEASE,
> CS_SPREAD_PAGE,
> CS_SPREAD_SLAB,
> + CS_RELEASED_RESOURCE,
> } cpuset_flagbits_t;
>
> /* convenient tests for these bits */
> @@ -147,6 +150,11 @@ static inline int is_spread_slab(const s
> return test_bit(CS_SPREAD_SLAB, &cs->flags);
> }
>
> +static inline int has_released_a_resource(const struct cpuset *cs)
> +{
> + return test_bit(CS_RELEASED_RESOURCE, &cs->flags);
> +}
> +
> /*
> * Increment this integer everytime any cpuset changes its
> * mems_allowed value. Users of cpusets can track this generation
> @@ -541,7 +549,7 @@ static void cpuset_release_agent(const c
> static void check_for_release(struct cpuset *cs, char **ppathbuf)
> {
> if (notify_on_release(cs) && atomic_read(&cs->count) == 0 &&
> - list_empty(&cs->children)) {
> + list_empty(&cs->children)) {
> char *buf;
>
> buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
Unnecessary change.
> @@ -1265,6 +1273,7 @@ static int attach_task(struct cpuset *cs
>
> from = oldcs->mems_allowed;
> to = cs->mems_allowed;
> + set_bit(CS_RELEASED_RESOURCE, &oldcs->flags);
>
> mutex_unlock(&callback_mutex);
>
> @@ -1995,6 +2004,7 @@ static int cpuset_rmdir(struct inode *un
> cpuset_d_remove_dir(d);
> dput(d);
> number_of_cpusets--;
> + set_bit(CS_RELEASED_RESOURCE, &parent->flags);
> mutex_unlock(&callback_mutex);
> if (list_empty(&parent->children))
> check_for_release(parent, &pathbuf);
> @@ -2062,50 +2072,180 @@ out:
> }
>
> /*
> + * Move every task that is a member of cpuset "from" to cpuset "to".
> + *
> + * Called with both manage_sem and callback_sem held
> + */
> +static void move_member_tasks_to_cpuset(struct cpuset *from, struct cpuset *to)
> +{
> + int moved=0;
> + struct task_struct *g, *tsk;
> +
> + read_lock(&tasklist_lock);
> + do_each_thread(g, tsk) {
> + if (tsk->cpuset == from) {
> + moved++;
> + task_lock(tsk);
> + tsk->cpuset = to;
> + task_unlock(tsk);
> + }
> + } while_each_thread(g, tsk);
> + read_unlock(&tasklist_lock);
> + atomic_add(moved, &to->count);
> + atomic_set(&from->count, 0);
> +}
> +
This isn't that simple. You're missing mpol_rebind_mm() checks, updating
tsk->mems_allowed, etc. It's much easier to make
remove_tasks_in_empty_cpuset() a client of attach_task() by supplying
pids; this would make it the only function where cpuset assignment
changes.
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply [flat|nested] 7+ messages in thread
* [PATCH] hotplug cpu: move tasks in empty cpusets to parent
@ 2007-09-21 22:53 Cliff Wickman
2007-09-21 23:15 ` David Rientjes
0 siblings, 1 reply; 7+ messages in thread
From: Cliff Wickman @ 2007-09-21 22:53 UTC (permalink / raw)
To: akpm; +Cc: rientjes, pj, linux-mm
This patch corrects a situation that occurs when one disables all the cpus
in a cpuset.
Currently, the disabled (cpu-less) cpuset inherits the cpus of its parent,
which may overlap its exclusive sibling.
(You will get non-removable cpusets -- "Invalid argument")
Tasks of an empty cpuset should be moved to the cpuset which is the parent
of their current cpuset. Or if the parent cpuset has no cpus, to its
parent, etc.
And the empty cpuset should be removed (if it is flagged notify_on_release).
This patch uses a workqueue thread to call the function that deletes the
cpuset. That way we avoid the complexity of the cpuset locks.
Diffed against 2.6.23-rc7
Signed-off-by: Cliff Wickman <cpw@sgi.com>
---
Incorporated some explanatory comments resulting from discussion between
David Rientjes and Paul Jackson.
(on remove_tasks_in_empty_cpuset() and count_releasable_cpusets())
kernel/cpuset.c | 213 ++++++++++++++++++++++++++++++++++++++++++++++++--------
1 file changed, 185 insertions(+), 28 deletions(-)
Index: linus.070921/kernel/cpuset.c
===================================================================
--- linus.070921.orig/kernel/cpuset.c
+++ linus.070921/kernel/cpuset.c
@@ -52,6 +52,8 @@
#include <asm/uaccess.h>
#include <asm/atomic.h>
#include <linux/mutex.h>
+#include <linux/kfifo.h>
+#include <linux/workqueue.h>
#define CPUSET_SUPER_MAGIC 0x27e0eb
@@ -109,6 +111,7 @@ typedef enum {
CS_NOTIFY_ON_RELEASE,
CS_SPREAD_PAGE,
CS_SPREAD_SLAB,
+ CS_RELEASED_RESOURCE,
} cpuset_flagbits_t;
/* convenient tests for these bits */
@@ -147,6 +150,11 @@ static inline int is_spread_slab(const s
return test_bit(CS_SPREAD_SLAB, &cs->flags);
}
+static inline int has_released_a_resource(const struct cpuset *cs)
+{
+ return test_bit(CS_RELEASED_RESOURCE, &cs->flags);
+}
+
/*
* Increment this integer everytime any cpuset changes its
* mems_allowed value. Users of cpusets can track this generation
@@ -541,7 +549,7 @@ static void cpuset_release_agent(const c
static void check_for_release(struct cpuset *cs, char **ppathbuf)
{
if (notify_on_release(cs) && atomic_read(&cs->count) == 0 &&
- list_empty(&cs->children)) {
+ list_empty(&cs->children)) {
char *buf;
buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
@@ -1265,6 +1273,7 @@ static int attach_task(struct cpuset *cs
from = oldcs->mems_allowed;
to = cs->mems_allowed;
+ set_bit(CS_RELEASED_RESOURCE, &oldcs->flags);
mutex_unlock(&callback_mutex);
@@ -1995,6 +2004,7 @@ static int cpuset_rmdir(struct inode *un
cpuset_d_remove_dir(d);
dput(d);
number_of_cpusets--;
+ set_bit(CS_RELEASED_RESOURCE, &parent->flags);
mutex_unlock(&callback_mutex);
if (list_empty(&parent->children))
check_for_release(parent, &pathbuf);
@@ -2062,50 +2072,180 @@ out:
}
/*
+ * Move every task that is a member of cpuset "from" to cpuset "to".
+ *
+ * Called with both manage_sem and callback_sem held
+ */
+static void move_member_tasks_to_cpuset(struct cpuset *from, struct cpuset *to)
+{
+ int moved=0;
+ struct task_struct *g, *tsk;
+
+ read_lock(&tasklist_lock);
+ do_each_thread(g, tsk) {
+ if (tsk->cpuset == from) {
+ moved++;
+ task_lock(tsk);
+ tsk->cpuset = to;
+ task_unlock(tsk);
+ }
+ } while_each_thread(g, tsk);
+ read_unlock(&tasklist_lock);
+ atomic_add(moved, &to->count);
+ atomic_set(&from->count, 0);
+}
+
+/*
* If common_cpu_mem_hotplug_unplug(), below, unplugs any CPUs
* or memory nodes, we need to walk over the cpuset hierarchy,
* removing that CPU or node from all cpusets. If this removes the
- * last CPU or node from a cpuset, then the guarantee_online_cpus()
- * or guarantee_online_mems() code will use that emptied cpusets
- * parent online CPUs or nodes. Cpusets that were already empty of
- * CPUs or nodes are left empty.
+ * last CPU or node from a cpuset, then move the tasks in the empty
+ * cpuset to its next-highest non-empty parent.
+ *
+ * The parent cpuset has some superset of the 'mems' nodes that the
+ * newly empty cpuset held, so no migration of memory is necessary.
+ *
+ * Called with both manage_sem and callback_sem held
+ */
+static void remove_tasks_in_empty_cpuset(struct cpuset *cs)
+{
+ struct cpuset *parent;
+
+ /* cs->count is the number of tasks using the cpuset */
+ if (atomic_read(&cs->count) == 0)
+ return;
+
+ /* this cpuset has had member tasks */
+ set_bit(CS_RELEASED_RESOURCE, &cs->flags);
+
+ /*
+ * Find its next-highest non-empty parent, (top cpuset
+ * has online cpus, so can't be empty).
+ */
+ parent = cs->parent;
+ while (cpus_empty(parent->cpus_allowed)) {
+ /*
+ * this empty cpuset should now be considered to
+ * have been used, and therefore eligible for
+ * release when empty (if it is notify_on_release)
+ */
+ set_bit(CS_RELEASED_RESOURCE, &parent->flags);
+ parent = parent->parent;
+ }
+
+ move_member_tasks_to_cpuset(cs, parent);
+}
+
+/*
+ * Walk the specified cpuset subtree and count the number of empty
+ * notify_on_release cpusets.
+ *
+ * Note that such a notify_on_release cpuset must have had, at some time,
+ * member tasks or cpuset descendants and cpus and memory, before it can
+ * be a candidate for release.
+ *
+ * Call with both manage_sem and callback_sem held so
+ * that this function can modify cpus_allowed and mems_allowed.
*
- * This routine is intentionally inefficient in a couple of regards.
- * It will check all cpusets in a subtree even if the top cpuset of
- * the subtree has no offline CPUs or nodes. It checks both CPUs and
- * nodes, even though the caller could have been coded to know that
- * only one of CPUs or nodes needed to be checked on a given call.
- * This was done to minimize text size rather than cpu cycles.
+ * This walk processes the tree from top to bottom, completing one layer
+ * before dropping down to the next. It always processes a node before
+ * any of its children.
*
- * Call with both manage_mutex and callback_mutex held.
+ * Argument "queue" is the fifo queue of cpusets to be walked.
*
- * Recursive, on depth of cpuset subtree.
+ * For now, since we lack memory hot unplug, we'll never see a cpuset
+ * that has tasks along with an empty 'mems'. But if we did see such
+ * a cpuset, we'd handle it just like we do if its 'cpus' was empty.
*/
+static int count_releasable_cpusets(const struct cpuset *root,
+ struct kfifo *queue)
+{
+ int count = 0;
+ struct cpuset *cp; /* scans cpusets being updated */
+ struct cpuset *child; /* scans child cpusets of cp */
+
+ __kfifo_put(queue, (unsigned char *)&root, sizeof(root));
+
+ while (__kfifo_get(queue, (unsigned char *)&cp, sizeof(cp))) {
+ list_for_each_entry(child, &cp->children, sibling)
+ __kfifo_put(queue, (unsigned char *)&child,
+ sizeof(child));
+ /* Remove offline cpus and mems from this cpuset. */
+ cpus_and(cp->cpus_allowed, cp->cpus_allowed, cpu_online_map);
+ nodes_and(cp->mems_allowed, cp->mems_allowed, node_online_map);
+ if ((cpus_empty(cp->cpus_allowed) ||
+ nodes_empty(cp->mems_allowed))) {
+ /* Move tasks from the empty cpuset to a parent */
+ remove_tasks_in_empty_cpuset(cp);
+ if (notify_on_release(cp) &&
+ has_released_a_resource(cp))
+ /* count the cpuset to be released */
+ count++;
+ }
+ }
-static void guarantee_online_cpus_mems_in_subtree(const struct cpuset *cur)
+ kfifo_free(queue);
+ return count;
+}
+
+/*
+ * Walk the specified cpuset subtree and release the empty
+ * notify_on_release cpusets.
+ *
+ * This walk processes the tree from top to bottom, completing one layer
+ * before dropping down to the next. It always processes a node before
+ * any of its children.
+ */
+static void release_empty_cpusets(const struct cpuset *root)
{
- struct cpuset *c;
+ struct cpuset *cp; /* scans cpusets being updated */
+ struct cpuset *child; /* scans child cpusets of cp */
+ struct kfifo *queue; /* fifo queue of cpusets to be updated */
+ char *pathbuf = NULL;
- /* Each of our child cpusets mems must be online */
- list_for_each_entry(c, &cur->children, sibling) {
- guarantee_online_cpus_mems_in_subtree(c);
- if (!cpus_empty(c->cpus_allowed))
- guarantee_online_cpus(c, &c->cpus_allowed);
- if (!nodes_empty(c->mems_allowed))
- guarantee_online_mems(c, &c->mems_allowed);
+ queue = kfifo_alloc(number_of_cpusets * sizeof(cp), GFP_KERNEL, NULL);
+ if (queue == ERR_PTR(-ENOMEM))
+ return;
+
+ __kfifo_put(queue, (unsigned char *)&root, sizeof(root));
+
+ while (__kfifo_get(queue, (unsigned char *)&cp, sizeof(cp))) {
+ list_for_each_entry(child, &cp->children, sibling)
+ __kfifo_put(queue, (unsigned char *)&child,
+ sizeof(child));
+ if ((notify_on_release(cp)) && has_released_a_resource(cp) &&
+ (cpus_empty(cp->cpus_allowed) ||
+ nodes_empty(cp->mems_allowed))) {
+ check_for_release(cp, &pathbuf);
+ cpuset_release_agent(pathbuf);
+ }
}
+
+ kfifo_free(queue);
+ return;
}
/*
+ * This runs from a workqueue.
+ *
+ * It's job is to remove any notify_on_release cpusets that have no
+ * online cpus.
+ *
+ * The argument is not used.
+ */
+static void remove_empty_cpusets(struct work_struct *p)
+{
+ release_empty_cpusets(&top_cpuset);
+ return;
+}
+
+static DECLARE_WORK(remove_empties_block, remove_empty_cpusets);
+
+/*
* The cpus_allowed and mems_allowed nodemasks in the top_cpuset track
* cpu_online_map and node_online_map. Force the top cpuset to track
* whats online after any CPU or memory node hotplug or unplug event.
*
- * To ensure that we don't remove a CPU or node from the top cpuset
- * that is currently in use by a child cpuset (which would violate
- * the rule that cpusets must be subsets of their parent), we first
- * call the recursive routine guarantee_online_cpus_mems_in_subtree().
- *
* Since there are two callers of this routine, one for CPU hotplug
* events and one for memory node hotplug events, we could have coded
* two separate routines here. We code it as a single common routine
@@ -2114,12 +2254,26 @@ static void guarantee_online_cpus_mems_i
static void common_cpu_mem_hotplug_unplug(void)
{
+ int cnt=0;
+ struct kfifo *queue;
+
+ /*
+ * Pre-allocate the fifo queue of cpusets to be walked. You can't
+ * call memory allocation functions while holding callback_mutex.
+ */
+ queue = kfifo_alloc(number_of_cpusets * sizeof(struct cpuset *),
+ GFP_KERNEL, NULL);
+ if (queue == ERR_PTR(-ENOMEM))
+ return;
+
mutex_lock(&manage_mutex);
mutex_lock(&callback_mutex);
- guarantee_online_cpus_mems_in_subtree(&top_cpuset);
top_cpuset.cpus_allowed = cpu_online_map;
top_cpuset.mems_allowed = node_online_map;
+ cnt = count_releasable_cpusets(&top_cpuset, queue);
+ if (cnt)
+ schedule_work(&remove_empties_block);
mutex_unlock(&callback_mutex);
mutex_unlock(&manage_mutex);
@@ -2268,6 +2422,9 @@ void cpuset_exit(struct task_struct *tsk
mutex_lock(&manage_mutex);
if (atomic_dec_and_test(&cs->count))
check_for_release(cs, &pathbuf);
+ mutex_lock(&callback_mutex);
+ set_bit(CS_RELEASED_RESOURCE, &cs->flags);
+ mutex_unlock(&callback_mutex);
mutex_unlock(&manage_mutex);
cpuset_release_agent(pathbuf);
} else {
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply [flat|nested] 7+ messages in thread
* Re: [PATCH] hotplug cpu: move tasks in empty cpusets to parent
2007-09-18 20:40 Cliff Wickman
@ 2007-09-20 23:08 ` Andrew Morton
0 siblings, 0 replies; 7+ messages in thread
From: Andrew Morton @ 2007-09-20 23:08 UTC (permalink / raw)
To: Cliff Wickman; +Cc: linux-mm
On Tue, 18 Sep 2007 15:40:23 -0500
cpw@sgi.com (Cliff Wickman) wrote:
> This patch corrects a situation that occurs when one disables all the cpus
> in a cpuset.
patching file kernel/cpuset.c
Hunk #1 FAILED at 53.
Hunk #2 succeeded at 116 with fuzz 1 (offset 5 lines).
Hunk #3 succeeded at 145 (offset -5 lines).
Hunk #4 FAILED at 544.
Hunk #5 FAILED at 836.
Hunk #6 FAILED at 1125.
Hunk #7 FAILED at 1303.
Hunk #8 FAILED at 2034.
Hunk #9 FAILED at 2102.
Hunk #10 FAILED at 2277.
Hunk #11 FAILED at 2445.
9 out of 11 hunks FAILED -- saving rejects to file kernel/cpuset.c.rej
Failed to apply hotplug-cpu-move-tasks-in-empty-cpusets-to-parent
life sucks a bit at present.
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply [flat|nested] 7+ messages in thread
* [PATCH] hotplug cpu: move tasks in empty cpusets to parent
@ 2007-09-18 20:40 Cliff Wickman
2007-09-20 23:08 ` Andrew Morton
0 siblings, 1 reply; 7+ messages in thread
From: Cliff Wickman @ 2007-09-18 20:40 UTC (permalink / raw)
To: akpm; +Cc: linux-mm
This patch corrects a situation that occurs when one disables all the cpus
in a cpuset.
Currently, the disabled (cpu-less) cpuset inherits the cpus of its parent,
which may overlap its exclusive sibling.
(You will get non-removable cpusets -- "Invalid argument")
Tasks of an empty cpuset should be moved to the cpuset which is the parent
of their current cpuset. Or if the parent cpuset has no cpus, to its
parent, etc.
And the empty cpuset should be removed (if it is flagged notify_on_release).
This patch uses a workqueue thread to call the function that deletes the
cpuset. That way we avoid the complexity of the cpuset locks.
This is about version 4, of this patch. It avoids a recursive method that
was first used, and incorporates fixes for locking and notify_on_release
conceptual issues raised by Paul Jackson.
Diffed against 2.6.23-rc3
Signed-off-by: Cliff Wickman <cpw@sgi.com>
---
kernel/cpuset.c | 210 ++++++++++++++++++++++++++++++++++++++++++++++++--------
1 file changed, 180 insertions(+), 30 deletions(-)
Index: linus.070821/kernel/cpuset.c
===================================================================
--- linus.070821.orig/kernel/cpuset.c
+++ linus.070821/kernel/cpuset.c
@@ -53,6 +53,7 @@
#include <asm/atomic.h>
#include <linux/mutex.h>
#include <linux/kfifo.h>
+#include <linux/workqueue.h>
#define CPUSET_SUPER_MAGIC 0x27e0eb
@@ -110,6 +111,7 @@ typedef enum {
CS_NOTIFY_ON_RELEASE,
CS_SPREAD_PAGE,
CS_SPREAD_SLAB,
+ CS_RELEASED_RESOURCE,
} cpuset_flagbits_t;
/* convenient tests for these bits */
@@ -148,6 +150,11 @@ static inline int is_spread_slab(const s
return test_bit(CS_SPREAD_SLAB, &cs->flags);
}
+static inline int has_released_a_resource(const struct cpuset *cs)
+{
+ return test_bit(CS_RELEASED_RESOURCE, &cs->flags);
+}
+
/*
* Increment this integer everytime any cpuset changes its
* mems_allowed value. Users of cpusets can track this generation
@@ -542,7 +549,7 @@ static void cpuset_release_agent(const c
static void check_for_release(struct cpuset *cs, char **ppathbuf)
{
if (notify_on_release(cs) && atomic_read(&cs->count) == 0 &&
- list_empty(&cs->children)) {
+ list_empty(&cs->children)) {
char *buf;
buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
@@ -834,7 +841,8 @@ update_cpu_domains_tree(struct cpuset *r
while (__kfifo_get(queue, (unsigned char *)&cp, sizeof(cp))) {
list_for_each_entry(child, &cp->children, sibling)
- __kfifo_put(queue,(unsigned char *)&child,sizeof(child));
+ __kfifo_put(queue, (unsigned char *)&child,
+ sizeof(child));
update_cpu_domains(cp);
}
@@ -1122,7 +1130,7 @@ static int update_flag(cpuset_flagbits_t
mutex_unlock(&callback_mutex);
if (cpu_exclusive_changed)
- update_cpu_domains_tree(cs);
+ update_cpu_domains_tree(cs);
return 0;
}
@@ -1300,6 +1308,7 @@ static int attach_task(struct cpuset *cs
from = oldcs->mems_allowed;
to = cs->mems_allowed;
+ set_bit(CS_RELEASED_RESOURCE, &oldcs->flags);
mutex_unlock(&callback_mutex);
@@ -2030,6 +2039,7 @@ static int cpuset_rmdir(struct inode *un
cpuset_d_remove_dir(d);
dput(d);
number_of_cpusets--;
+ set_bit(CS_RELEASED_RESOURCE, &parent->flags);
mutex_unlock(&callback_mutex);
if (list_empty(&parent->children))
check_for_release(parent, &pathbuf);
@@ -2097,50 +2107,173 @@ out:
}
/*
+ * Move every task that is a member of cpuset "from" to cpuset "to".
+ *
+ * Called with both manage_sem and callback_sem held
+ */
+static void move_member_tasks_to_cpuset(struct cpuset *from, struct cpuset *to)
+{
+ int moved=0;
+ struct task_struct *g, *tsk;
+
+ read_lock(&tasklist_lock);
+ do_each_thread(g, tsk) {
+ if (tsk->cpuset == from) {
+ moved++;
+ task_lock(tsk);
+ tsk->cpuset = to;
+ task_unlock(tsk);
+ }
+ } while_each_thread(g, tsk);
+ read_unlock(&tasklist_lock);
+ atomic_add(moved, &to->count);
+ atomic_set(&from->count, 0);
+}
+
+/*
* If common_cpu_mem_hotplug_unplug(), below, unplugs any CPUs
* or memory nodes, we need to walk over the cpuset hierarchy,
* removing that CPU or node from all cpusets. If this removes the
- * last CPU or node from a cpuset, then the guarantee_online_cpus()
- * or guarantee_online_mems() code will use that emptied cpusets
- * parent online CPUs or nodes. Cpusets that were already empty of
- * CPUs or nodes are left empty.
+ * last CPU or node from a cpuset, then move the tasks in the empty
+ * cpuset to its next-highest non-empty parent.
*
- * This routine is intentionally inefficient in a couple of regards.
- * It will check all cpusets in a subtree even if the top cpuset of
- * the subtree has no offline CPUs or nodes. It checks both CPUs and
- * nodes, even though the caller could have been coded to know that
- * only one of CPUs or nodes needed to be checked on a given call.
- * This was done to minimize text size rather than cpu cycles.
+ * Called with both manage_sem and callback_sem held
+ */
+static void remove_tasks_in_empty_cpuset(struct cpuset *cs)
+{
+ struct cpuset *parent;
+
+ /* cs->count is the number of tasks using the cpuset */
+ if (atomic_read(&cs->count) == 0)
+ return;
+
+ /* this cpuset has had member tasks */
+ set_bit(CS_RELEASED_RESOURCE, &cs->flags);
+
+ /*
+ * Find its next-highest non-empty parent, (top cpuset
+ * has online cpus, so can't be empty).
+ */
+ parent = cs->parent;
+ while (cpus_empty(parent->cpus_allowed)) {
+ /*
+ * this empty cpuset should now be considered to
+ * have been used, and therefore eligible for
+ * release when empty (if it is notify_on_release)
+ */
+ set_bit(CS_RELEASED_RESOURCE, &parent->flags);
+ parent = parent->parent;
+ }
+
+ move_member_tasks_to_cpuset(cs, parent);
+}
+
+/*
+ * Walk the specified cpuset subtree and count the number of empty
+ * notify_on_release cpusets.
+ *
+ * Note that such a notify_on_release cpuset must have had, at some time,
+ * member tasks or cpuset descendants and cpus and memory, before it can
+ * be a candidate for release.
*
- * Call with both manage_mutex and callback_mutex held.
+ * Call with both manage_sem and callback_sem held so
+ * that this function can modify cpus_allowed and mems_allowed.
+ *
+ * This walk processes the tree from top to bottom, completing one layer
+ * before dropping down to the next. It always processes a node before
+ * any of its children.
*
- * Recursive, on depth of cpuset subtree.
+ * Argument "queue" is the fifo queue of cpusets to be walked.
*/
+static int count_releasable_cpusets(const struct cpuset *root,
+ struct kfifo *queue)
+{
+ int count = 0;
+ struct cpuset *cp; /* scans cpusets being updated */
+ struct cpuset *child; /* scans child cpusets of cp */
+
+ __kfifo_put(queue, (unsigned char *)&root, sizeof(root));
+
+ while (__kfifo_get(queue, (unsigned char *)&cp, sizeof(cp))) {
+ list_for_each_entry(child, &cp->children, sibling)
+ __kfifo_put(queue, (unsigned char *)&child,
+ sizeof(child));
+ /* Remove offline cpus and mems from this cpuset. */
+ cpus_and(cp->cpus_allowed, cp->cpus_allowed, cpu_online_map);
+ nodes_and(cp->mems_allowed, cp->mems_allowed, node_online_map);
+ if ((cpus_empty(cp->cpus_allowed) ||
+ nodes_empty(cp->mems_allowed))) {
+ /* Move tasks from the empty cpuset to a parent */
+ remove_tasks_in_empty_cpuset(cp);
+ if (notify_on_release(cp) &&
+ has_released_a_resource(cp))
+ /* count the cpuset to be released */
+ count++;
+ }
+ }
+
+ kfifo_free(queue);
+ return count;
+}
-static void guarantee_online_cpus_mems_in_subtree(const struct cpuset *cur)
+/*
+ * Walk the specified cpuset subtree and release the empty
+ * notify_on_release cpusets.
+ *
+ * This walk processes the tree from top to bottom, completing one layer
+ * before dropping down to the next. It always processes a node before
+ * any of its children.
+ */
+static void release_empty_cpusets(const struct cpuset *root)
{
- struct cpuset *c;
+ struct cpuset *cp; /* scans cpusets being updated */
+ struct cpuset *child; /* scans child cpusets of cp */
+ struct kfifo *queue; /* fifo queue of cpusets to be updated */
+ char *pathbuf = NULL;
- /* Each of our child cpusets mems must be online */
- list_for_each_entry(c, &cur->children, sibling) {
- guarantee_online_cpus_mems_in_subtree(c);
- if (!cpus_empty(c->cpus_allowed))
- guarantee_online_cpus(c, &c->cpus_allowed);
- if (!nodes_empty(c->mems_allowed))
- guarantee_online_mems(c, &c->mems_allowed);
+ queue = kfifo_alloc(number_of_cpusets * sizeof(cp), GFP_KERNEL, NULL);
+ if (queue == ERR_PTR(-ENOMEM))
+ return;
+
+ __kfifo_put(queue, (unsigned char *)&root, sizeof(root));
+
+ while (__kfifo_get(queue, (unsigned char *)&cp, sizeof(cp))) {
+ list_for_each_entry(child, &cp->children, sibling)
+ __kfifo_put(queue, (unsigned char *)&child,
+ sizeof(child));
+ if ((notify_on_release(cp)) && has_released_a_resource(cp) &&
+ (cpus_empty(cp->cpus_allowed) ||
+ nodes_empty(cp->mems_allowed))) {
+ check_for_release(cp, &pathbuf);
+ cpuset_release_agent(pathbuf);
+ }
}
+
+ kfifo_free(queue);
+ return;
+}
+
+/*
+ * This runs from a workqueue.
+ *
+ * It's job is to remove any notify_on_release cpusets that have no
+ * online cpus.
+ *
+ * The argument is not used.
+ */
+static void remove_empty_cpusets(struct work_struct *p)
+{
+ release_empty_cpusets(&top_cpuset);
+ return;
}
+static DECLARE_WORK(remove_empties_block, remove_empty_cpusets);
+
/*
* The cpus_allowed and mems_allowed nodemasks in the top_cpuset track
* cpu_online_map and node_online_map. Force the top cpuset to track
* whats online after any CPU or memory node hotplug or unplug event.
*
- * To ensure that we don't remove a CPU or node from the top cpuset
- * that is currently in use by a child cpuset (which would violate
- * the rule that cpusets must be subsets of their parent), we first
- * call the recursive routine guarantee_online_cpus_mems_in_subtree().
- *
* Since there are two callers of this routine, one for CPU hotplug
* events and one for memory node hotplug events, we could have coded
* two separate routines here. We code it as a single common routine
@@ -2149,12 +2282,26 @@ static void guarantee_online_cpus_mems_i
static void common_cpu_mem_hotplug_unplug(void)
{
+ int cnt=0;
+ struct kfifo *queue;
+
+ /*
+ * Pre-allocate the fifo queue of cpusets to be walked. You can't
+ * call memory allocation functions while holding callback_mutex.
+ */
+ queue = kfifo_alloc(number_of_cpusets * sizeof(struct cpuset *),
+ GFP_KERNEL, NULL);
+ if (queue == ERR_PTR(-ENOMEM))
+ return;
+
mutex_lock(&manage_mutex);
mutex_lock(&callback_mutex);
- guarantee_online_cpus_mems_in_subtree(&top_cpuset);
top_cpuset.cpus_allowed = cpu_online_map;
top_cpuset.mems_allowed = node_online_map;
+ cnt = count_releasable_cpusets(&top_cpuset, queue);
+ if (cnt)
+ schedule_work(&remove_empties_block);
mutex_unlock(&callback_mutex);
mutex_unlock(&manage_mutex);
@@ -2303,6 +2450,9 @@ void cpuset_exit(struct task_struct *tsk
mutex_lock(&manage_mutex);
if (atomic_dec_and_test(&cs->count))
check_for_release(cs, &pathbuf);
+ mutex_lock(&callback_mutex);
+ set_bit(CS_RELEASED_RESOURCE, &cs->flags);
+ mutex_unlock(&callback_mutex);
mutex_unlock(&manage_mutex);
cpuset_release_agent(pathbuf);
} else {
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply [flat|nested] 7+ messages in thread
end of thread, other threads:[~2007-09-21 23:15 UTC | newest]
Thread overview: 7+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2007-09-21 16:42 [PATCH] hotplug cpu: move tasks in empty cpusets to parent Cliff Wickman
2007-09-21 18:39 ` Paul Jackson
2007-09-21 18:56 ` David Rientjes
-- strict thread matches above, loose matches on Subject: below --
2007-09-21 22:53 Cliff Wickman
2007-09-21 23:15 ` David Rientjes
2007-09-18 20:40 Cliff Wickman
2007-09-20 23:08 ` Andrew Morton
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox