From: Peter Zijlstra <a.p.zijlstra@chello.nl>
To: Linus Torvalds <torvalds@linux-foundation.org>,
Andrew Morton <akpm@linux-foundation.org>,
Thomas Gleixner <tglx@linutronix.de>, Ingo Molnar <mingo@elte.hu>,
Paul Turner <pjt@google.com>,
Suresh Siddha <suresh.b.siddha@intel.com>,
Mike Galbraith <efault@gmx.de>,
"Paul E. McKenney" <paulmck@linux.vnet.ibm.com>,
Lai Jiangshan <laijs@cn.fujitsu.com>,
Dan Smith <danms@us.ibm.com>,
Bharata B Rao <bharata.rao@gmail.com>,
Lee Schermerhorn <Lee.Schermerhorn@hp.com>,
Andrea Arcangeli <aarcange@redhat.com>,
Rik van Riel <riel@redhat.com>,
Johannes Weiner <hannes@cmpxchg.org>
Cc: linux-kernel@vger.kernel.org, linux-mm@kvack.org,
Peter Zijlstra <a.p.zijlstra@chello.nl>
Subject: [RFC][PATCH 19/26] srcu: Implement call_srcu()
Date: Fri, 16 Mar 2012 15:40:47 +0100 [thread overview]
Message-ID: <20120316144241.351384914@chello.nl> (raw)
In-Reply-To: <20120316144028.036474157@chello.nl>
[-- Attachment #1: call_srcu.patch --]
[-- Type: text/plain, Size: 14186 bytes --]
Implement call_srcu() by using a state machine driven by
call_rcu_sched() and timer callbacks.
The state machine is a direct derivation of the existing
synchronize_srcu() code and replaces synchronize_sched() calls with a
call_rcu_sched() callback and the schedule_timeout() calls with simple
timer callbacks.
It then re-implements synchronize_srcu() using a completion where we
send the complete through call_srcu().
It completely wrecks synchronize_srcu_extradited() which is only used
by KVM.
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
---
include/linux/srcu.h | 23 +++
kernel/srcu.c | 304 +++++++++++++++++++++++++++++----------------------
2 files changed, 196 insertions(+), 131 deletions(-)
--- a/include/linux/srcu.h
+++ b/include/linux/srcu.h
@@ -27,17 +27,35 @@
#ifndef _LINUX_SRCU_H
#define _LINUX_SRCU_H
-#include <linux/mutex.h>
+#include <linux/spinlock.h>
#include <linux/rcupdate.h>
+#include <linux/timer.h>
struct srcu_struct_array {
int c[2];
};
+enum srcu_state {
+ srcu_idle,
+ srcu_sync_1,
+ srcu_sync_2,
+ srcu_sync_2b,
+ srcu_wait,
+ srcu_wait_b,
+ srcu_sync_3,
+ srcu_sync_3b,
+};
+
struct srcu_struct {
int completed;
struct srcu_struct_array __percpu *per_cpu_ref;
- struct mutex mutex;
+ raw_spinlock_t lock;
+ enum srcu_state state;
+ union {
+ struct rcu_head head;
+ struct timer_list timer;
+ };
+ struct rcu_head *pending[2];
#ifdef CONFIG_DEBUG_LOCK_ALLOC
struct lockdep_map dep_map;
#endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
@@ -73,6 +91,7 @@ void __srcu_read_unlock(struct srcu_stru
void synchronize_srcu(struct srcu_struct *sp);
void synchronize_srcu_expedited(struct srcu_struct *sp);
long srcu_batches_completed(struct srcu_struct *sp);
+void call_srcu(struct srcu_struct *sp, struct rcu_head *head, void (*func)(struct rcu_head *));
#ifdef CONFIG_DEBUG_LOCK_ALLOC
--- a/kernel/srcu.c
+++ b/kernel/srcu.c
@@ -16,6 +16,7 @@
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*
* Copyright (C) IBM Corporation, 2006
+ * Copyright (C) 2012 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
*
* Author: Paul McKenney <paulmck@us.ibm.com>
*
@@ -33,11 +34,14 @@
#include <linux/smp.h>
#include <linux/delay.h>
#include <linux/srcu.h>
+#include <linux/completion.h>
static int init_srcu_struct_fields(struct srcu_struct *sp)
{
sp->completed = 0;
- mutex_init(&sp->mutex);
+ raw_spin_lock_init(&sp->lock);
+ sp->state = srcu_idle;
+ sp->pending[0] = sp->pending[1] = NULL;
sp->per_cpu_ref = alloc_percpu(struct srcu_struct_array);
return sp->per_cpu_ref ? 0 : -ENOMEM;
}
@@ -155,119 +159,190 @@ void __srcu_read_unlock(struct srcu_stru
}
EXPORT_SYMBOL_GPL(__srcu_read_unlock);
-/*
- * We use an adaptive strategy for synchronize_srcu() and especially for
- * synchronize_srcu_expedited(). We spin for a fixed time period
- * (defined below) to allow SRCU readers to exit their read-side critical
- * sections. If there are still some readers after 10 microseconds,
- * we repeatedly block for 1-millisecond time periods. This approach
- * has done well in testing, so there is no need for a config parameter.
+
+/**
+ * synchronize_srcu_expedited - like synchronize_srcu, but less patient
+ * @sp: srcu_struct with which to synchronize.
+ *
+ * Note that it is illegal to call synchronize_srcu_expedited()
+ * from the corresponding SRCU read-side critical section; doing so
+ * will result in deadlock. However, it is perfectly legal to call
+ * synchronize_srcu_expedited() on one srcu_struct from some other
+ * srcu_struct's read-side critical section.
*/
-#define SYNCHRONIZE_SRCU_READER_DELAY 10
+void synchronize_srcu_expedited(struct srcu_struct *sp)
+{
+ /* XXX kill me */
+ synchronize_srcu(sp);
+}
+EXPORT_SYMBOL_GPL(synchronize_srcu_expedited);
-/*
- * Helper function for synchronize_srcu() and synchronize_srcu_expedited().
+/**
+ * srcu_batches_completed - return batches completed.
+ * @sp: srcu_struct on which to report batch completion.
+ *
+ * Report the number of batches, correlated with, but not necessarily
+ * precisely the same as, the number of grace periods that have elapsed.
*/
-static void __synchronize_srcu(struct srcu_struct *sp, void (*sync_func)(void))
+long srcu_batches_completed(struct srcu_struct *sp)
{
- int idx;
+ return sp->completed;
+}
+EXPORT_SYMBOL_GPL(srcu_batches_completed);
+
+static void do_srcu_state(struct srcu_struct *sp);
- idx = sp->completed;
- mutex_lock(&sp->mutex);
+static void do_srcu_state_timer(unsigned long __data)
+{
+ struct srcu_struct *sp = (void *)__data;
+ do_srcu_state(sp);
+}
- /*
- * Check to see if someone else did the work for us while we were
- * waiting to acquire the lock. We need -two- advances of
- * the counter, not just one. If there was but one, we might have
- * shown up -after- our helper's first synchronize_sched(), thus
- * having failed to prevent CPU-reordering races with concurrent
- * srcu_read_unlock()s on other CPUs (see comment below). So we
- * either (1) wait for two or (2) supply the second ourselves.
- */
+static void do_srcu_state_rcu(struct rcu_head *head)
+{
+ struct srcu_struct *sp = container_of(head, struct srcu_struct, head);
+ do_srcu_state(sp);
+}
- if ((sp->completed - idx) >= 2) {
- mutex_unlock(&sp->mutex);
- return;
+static void do_srcu_state(struct srcu_struct *sp)
+{
+ struct rcu_head *head, *next;
+ unsigned long flags;
+ int idx;
+
+ raw_spin_lock_irqsave(&sp->lock, flags);
+ switch (sp->state) {
+ case srcu_idle:
+ BUG();
+
+ case srcu_sync_1:
+ /*
+ * The preceding synchronize_sched() ensures that any CPU that
+ * sees the new value of sp->completed will also see any
+ * preceding changes to data structures made by this CPU. This
+ * prevents some other CPU from reordering the accesses in its
+ * SRCU read-side critical section to precede the corresponding
+ * srcu_read_lock() -- ensuring that such references will in
+ * fact be protected.
+ *
+ * So it is now safe to do the flip.
+ */
+ idx = sp->completed & 0x1;
+ sp->completed++;
+
+ sp->state = srcu_sync_2 + idx;
+ call_rcu_sched(&sp->head, do_srcu_state_rcu);
+ break;
+
+ case srcu_sync_2:
+ case srcu_sync_2b:
+ idx = sp->state - srcu_sync_2;
+
+ init_timer(&sp->timer);
+ sp->timer.data = (unsigned long)sp;
+ sp->timer.function = do_srcu_state_timer;
+ sp->state = srcu_wait + idx;
+
+ /*
+ * At this point, because of the preceding synchronize_sched(),
+ * all srcu_read_lock() calls using the old counters have
+ * completed. Their corresponding critical sections might well
+ * be still executing, but the srcu_read_lock() primitives
+ * themselves will have finished executing.
+ */
+test_pending:
+ if (!srcu_readers_active_idx(sp, idx)) {
+ sp->state = srcu_sync_3 + idx;
+ call_rcu_sched(&sp->head, do_srcu_state_rcu);
+ break;
+ }
+
+ mod_timer(&sp->timer, jiffies + 1);
+ break;
+
+ case srcu_wait:
+ case srcu_wait_b:
+ idx = sp->state - srcu_wait;
+ goto test_pending;
+
+ case srcu_sync_3:
+ case srcu_sync_3b:
+ idx = sp->state - srcu_sync_3;
+ /*
+ * The preceding synchronize_sched() forces all
+ * srcu_read_unlock() primitives that were executing
+ * concurrently with the preceding for_each_possible_cpu() loop
+ * to have completed by this point. More importantly, it also
+ * forces the corresponding SRCU read-side critical sections to
+ * have also completed, and the corresponding references to
+ * SRCU-protected data items to be dropped.
+ */
+ head = sp->pending[idx];
+ sp->pending[idx] = NULL;
+ raw_spin_unlock(&sp->lock);
+ while (head) {
+ next = head->next;
+ head->func(head);
+ head = next;
+ }
+ raw_spin_lock(&sp->lock);
+
+ /*
+ * If there's a new batch waiting...
+ */
+ if (sp->pending[idx ^ 1]) {
+ sp->state = srcu_sync_1;
+ call_rcu_sched(&sp->head, do_srcu_state_rcu);
+ break;
+ }
+
+ /*
+ * We done!!
+ */
+ sp->state = srcu_idle;
+ break;
}
+ raw_spin_unlock_irqrestore(&sp->lock, flags);
+}
- sync_func(); /* Force memory barrier on all CPUs. */
+void call_srcu(struct srcu_struct *sp,
+ struct rcu_head *head, void (*func)(struct rcu_head *))
+{
+ unsigned long flags;
+ int idx;
- /*
- * The preceding synchronize_sched() ensures that any CPU that
- * sees the new value of sp->completed will also see any preceding
- * changes to data structures made by this CPU. This prevents
- * some other CPU from reordering the accesses in its SRCU
- * read-side critical section to precede the corresponding
- * srcu_read_lock() -- ensuring that such references will in
- * fact be protected.
- *
- * So it is now safe to do the flip.
- */
+ head->func = func;
- idx = sp->completed & 0x1;
- sp->completed++;
+ raw_spin_lock_irqsave(&sp->lock, flags);
+ idx = sp->completed & 1;
+ barrier(); /* look at sp->completed once */
+ head->next = sp->pending[idx];
+ sp->pending[idx] = head;
+
+ if (sp->state == srcu_idle) {
+ sp->state = srcu_sync_1;
+ call_rcu_sched(&sp->head, do_srcu_state_rcu);
+ }
+ raw_spin_unlock_irqrestore(&sp->lock, flags);
+}
+EXPORT_SYMBOL_GPL(call_srcu);
- sync_func(); /* Force memory barrier on all CPUs. */
+struct srcu_waiter {
+ struct completion wait;
+ struct rcu_head head;
+};
- /*
- * At this point, because of the preceding synchronize_sched(),
- * all srcu_read_lock() calls using the old counters have completed.
- * Their corresponding critical sections might well be still
- * executing, but the srcu_read_lock() primitives themselves
- * will have finished executing. We initially give readers
- * an arbitrarily chosen 10 microseconds to get out of their
- * SRCU read-side critical sections, then loop waiting 1/HZ
- * seconds per iteration. The 10-microsecond value has done
- * very well in testing.
- */
-
- if (srcu_readers_active_idx(sp, idx))
- udelay(SYNCHRONIZE_SRCU_READER_DELAY);
- while (srcu_readers_active_idx(sp, idx))
- schedule_timeout_interruptible(1);
-
- sync_func(); /* Force memory barrier on all CPUs. */
-
- /*
- * The preceding synchronize_sched() forces all srcu_read_unlock()
- * primitives that were executing concurrently with the preceding
- * for_each_possible_cpu() loop to have completed by this point.
- * More importantly, it also forces the corresponding SRCU read-side
- * critical sections to have also completed, and the corresponding
- * references to SRCU-protected data items to be dropped.
- *
- * Note:
- *
- * Despite what you might think at first glance, the
- * preceding synchronize_sched() -must- be within the
- * critical section ended by the following mutex_unlock().
- * Otherwise, a task taking the early exit can race
- * with a srcu_read_unlock(), which might have executed
- * just before the preceding srcu_readers_active() check,
- * and whose CPU might have reordered the srcu_read_unlock()
- * with the preceding critical section. In this case, there
- * is nothing preventing the synchronize_sched() task that is
- * taking the early exit from freeing a data structure that
- * is still being referenced (out of order) by the task
- * doing the srcu_read_unlock().
- *
- * Alternatively, the comparison with "2" on the early exit
- * could be changed to "3", but this increases synchronize_srcu()
- * latency for bulk loads. So the current code is preferred.
- */
+static void synchronize_srcu_complete(struct rcu_head *head)
+{
+ struct srcu_waiter *waiter = container_of(head, struct srcu_waiter, head);
- mutex_unlock(&sp->mutex);
+ complete(&waiter->wait);
}
/**
* synchronize_srcu - wait for prior SRCU read-side critical-section completion
* @sp: srcu_struct with which to synchronize.
*
- * Flip the completed counter, and wait for the old count to drain to zero.
- * As with classic RCU, the updater must use some separate means of
- * synchronizing concurrent updates. Can block; must be called from
- * process context.
- *
* Note that it is illegal to call synchronize_srcu() from the corresponding
* SRCU read-side critical section; doing so will result in deadlock.
* However, it is perfectly legal to call synchronize_srcu() on one
@@ -275,41 +350,12 @@ static void __synchronize_srcu(struct sr
*/
void synchronize_srcu(struct srcu_struct *sp)
{
- __synchronize_srcu(sp, synchronize_sched);
-}
-EXPORT_SYMBOL_GPL(synchronize_srcu);
+ struct srcu_waiter waiter = {
+ .wait = COMPLETION_INITIALIZER_ONSTACK(waiter.wait),
+ };
-/**
- * synchronize_srcu_expedited - like synchronize_srcu, but less patient
- * @sp: srcu_struct with which to synchronize.
- *
- * Flip the completed counter, and wait for the old count to drain to zero.
- * As with classic RCU, the updater must use some separate means of
- * synchronizing concurrent updates. Can block; must be called from
- * process context.
- *
- * Note that it is illegal to call synchronize_srcu_expedited()
- * from the corresponding SRCU read-side critical section; doing so
- * will result in deadlock. However, it is perfectly legal to call
- * synchronize_srcu_expedited() on one srcu_struct from some other
- * srcu_struct's read-side critical section.
- */
-void synchronize_srcu_expedited(struct srcu_struct *sp)
-{
- __synchronize_srcu(sp, synchronize_sched_expedited);
-}
-EXPORT_SYMBOL_GPL(synchronize_srcu_expedited);
-
-/**
- * srcu_batches_completed - return batches completed.
- * @sp: srcu_struct on which to report batch completion.
- *
- * Report the number of batches, correlated with, but not necessarily
- * precisely the same as, the number of grace periods that have elapsed.
- */
+ call_srcu(sp, &waiter.head, synchronize_srcu_complete);
-long srcu_batches_completed(struct srcu_struct *sp)
-{
- return sp->completed;
+ wait_for_completion(&waiter.wait);
}
-EXPORT_SYMBOL_GPL(srcu_batches_completed);
+EXPORT_SYMBOL_GPL(synchronize_srcu);
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Fight unfair telecom internet charges in Canada: sign http://stopthemeter.ca/
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
next prev parent reply other threads:[~2012-03-16 14:53 UTC|newest]
Thread overview: 152+ messages / expand[flat|nested] mbox.gz Atom feed top
2012-03-16 14:40 [RFC][PATCH 00/26] sched/numa Peter Zijlstra
2012-03-16 14:40 ` [RFC][PATCH 01/26] mm, mpol: Re-implement check_*_range() using walk_page_range() Peter Zijlstra
2012-03-16 14:40 ` [RFC][PATCH 02/26] mm, mpol: Remove NUMA_INTERLEAVE_HIT Peter Zijlstra
2012-07-06 10:32 ` Johannes Weiner
2012-07-06 14:48 ` Minchan Kim
2012-07-06 15:02 ` Peter Zijlstra
2012-07-06 14:54 ` Kyungmin Park
2012-07-06 15:00 ` Peter Zijlstra
2012-03-16 14:40 ` [RFC][PATCH 03/26] mm, mpol: add MPOL_MF_LAZY Peter Zijlstra
2012-03-23 11:50 ` Mel Gorman
2012-07-06 16:38 ` Rik van Riel
2012-07-06 20:04 ` Lee Schermerhorn
2012-07-06 20:27 ` Rik van Riel
2012-07-09 11:48 ` Peter Zijlstra
2012-03-16 14:40 ` [RFC][PATCH 04/26] mm, mpol: add MPOL_MF_NOOP Peter Zijlstra
2012-07-06 18:40 ` Rik van Riel
2012-03-16 14:40 ` [RFC][PATCH 05/26] mm, mpol: Check for misplaced page Peter Zijlstra
2012-03-16 14:40 ` [RFC][PATCH 06/26] mm: Migrate " Peter Zijlstra
2012-04-03 17:32 ` Dan Smith
2012-03-16 14:40 ` [RFC][PATCH 07/26] mm: Handle misplaced anon pages Peter Zijlstra
2012-03-16 14:40 ` [RFC][PATCH 08/26] mm, mpol: Simplify do_mbind() Peter Zijlstra
2012-03-16 14:40 ` [RFC][PATCH 09/26] sched, mm: Introduce tsk_home_node() Peter Zijlstra
2012-03-16 14:40 ` [RFC][PATCH 10/26] mm, mpol: Make mempolicy home-node aware Peter Zijlstra
2012-03-16 18:34 ` Christoph Lameter
2012-03-16 21:12 ` Peter Zijlstra
2012-03-19 13:53 ` Christoph Lameter
2012-03-19 14:05 ` Peter Zijlstra
2012-03-19 15:16 ` Christoph Lameter
2012-03-19 15:23 ` Peter Zijlstra
2012-03-19 15:31 ` Christoph Lameter
2012-03-19 17:09 ` Peter Zijlstra
2012-03-19 17:28 ` Peter Zijlstra
2012-03-19 19:06 ` Christoph Lameter
2012-03-19 20:28 ` Lee Schermerhorn
2012-03-19 21:21 ` Peter Zijlstra
2012-03-16 14:40 ` [RFC][PATCH 11/26] mm, mpol: Lazy migrate a process/vma Peter Zijlstra
2012-03-16 14:40 ` [RFC][PATCH 12/26] sched, mm: sched_{fork,exec} node assignment Peter Zijlstra
2012-06-15 18:16 ` Tony Luck
2012-06-20 19:12 ` [PATCH] sched: Fix build problems when CONFIG_NUMA=y and CONFIG_SMP=n Luck, Tony
2012-03-16 14:40 ` [RFC][PATCH 13/26] sched: Implement home-node awareness Peter Zijlstra
2012-03-16 14:40 ` [RFC][PATCH 14/26] sched, numa: Numa balancer Peter Zijlstra
2012-07-07 18:26 ` Rik van Riel
2012-07-09 12:05 ` Peter Zijlstra
2012-07-09 12:23 ` Peter Zijlstra
2012-07-09 12:40 ` Peter Zijlstra
2012-07-09 14:50 ` Rik van Riel
2012-07-08 18:35 ` Rik van Riel
2012-07-09 12:25 ` Peter Zijlstra
2012-07-09 14:54 ` Rik van Riel
2012-07-12 22:02 ` Rik van Riel
2012-07-13 14:45 ` Don Morris
2012-07-14 16:20 ` Rik van Riel
2012-03-16 14:40 ` [RFC][PATCH 15/26] sched, numa: Implement hotplug hooks Peter Zijlstra
2012-03-19 12:16 ` Srivatsa S. Bhat
2012-03-19 12:19 ` Peter Zijlstra
2012-03-19 12:27 ` Srivatsa S. Bhat
2012-03-16 14:40 ` [RFC][PATCH 16/26] sched, numa: Abstract the numa_entity Peter Zijlstra
2012-03-16 14:40 ` [RFC][PATCH 17/26] srcu: revert1 Peter Zijlstra
2012-03-16 14:40 ` [RFC][PATCH 18/26] srcu: revert2 Peter Zijlstra
2012-03-16 14:40 ` Peter Zijlstra [this message]
2012-03-16 14:40 ` [RFC][PATCH 20/26] mm, mpol: Introduce vma_dup_policy() Peter Zijlstra
2012-03-16 14:40 ` [RFC][PATCH 21/26] mm, mpol: Introduce vma_put_policy() Peter Zijlstra
2012-03-16 14:40 ` [RFC][PATCH 22/26] mm, mpol: Split and explose some mempolicy functions Peter Zijlstra
2012-03-16 14:40 ` [RFC][PATCH 23/26] sched, numa: Introduce sys_numa_{t,m}bind() Peter Zijlstra
2012-03-16 14:40 ` [RFC][PATCH 24/26] mm, mpol: Implement numa_group RSS accounting Peter Zijlstra
2012-03-16 14:40 ` [RFC][PATCH 25/26] sched, numa: Only migrate long-running entities Peter Zijlstra
2012-07-08 18:34 ` Rik van Riel
2012-07-09 12:26 ` Peter Zijlstra
2012-07-09 14:53 ` Rik van Riel
2012-07-09 14:55 ` Peter Zijlstra
2012-03-16 14:40 ` [RFC][PATCH 26/26] sched, numa: A few debug bits Peter Zijlstra
2012-03-16 18:25 ` [RFC] AutoNUMA alpha6 Andrea Arcangeli
2012-03-19 18:47 ` Peter Zijlstra
2012-03-19 19:02 ` Andrea Arcangeli
2012-03-20 23:41 ` Dan Smith
2012-03-21 1:00 ` Andrea Arcangeli
2012-03-21 2:12 ` Andrea Arcangeli
2012-03-21 4:01 ` Dan Smith
2012-03-21 12:49 ` Andrea Arcangeli
2012-03-21 22:05 ` Dan Smith
2012-03-21 22:52 ` Andrea Arcangeli
2012-03-21 23:13 ` Dan Smith
2012-03-21 23:41 ` Andrea Arcangeli
2012-03-22 0:17 ` Andrea Arcangeli
2012-03-22 13:58 ` Dan Smith
2012-03-22 14:27 ` Andrea Arcangeli
2012-03-22 18:49 ` Andrea Arcangeli
2012-03-22 18:56 ` Dan Smith
2012-03-22 19:11 ` Andrea Arcangeli
2012-03-23 14:15 ` Andrew Theurer
2012-03-23 16:01 ` Andrea Arcangeli
2012-03-25 13:30 ` Andrea Arcangeli
2012-03-21 7:12 ` Ingo Molnar
2012-03-21 12:08 ` Andrea Arcangeli
2012-03-21 7:53 ` Ingo Molnar
2012-03-21 12:17 ` Andrea Arcangeli
2012-03-19 9:57 ` [RFC][PATCH 00/26] sched/numa Avi Kivity
2012-03-19 11:12 ` Peter Zijlstra
2012-03-19 11:30 ` Peter Zijlstra
2012-03-19 11:39 ` Peter Zijlstra
2012-03-19 11:42 ` Avi Kivity
2012-03-19 11:59 ` Peter Zijlstra
2012-03-19 12:07 ` Avi Kivity
2012-03-19 12:09 ` Peter Zijlstra
2012-03-19 12:16 ` Avi Kivity
2012-03-19 20:03 ` Peter Zijlstra
2012-03-20 10:18 ` Avi Kivity
2012-03-20 10:48 ` Peter Zijlstra
2012-03-20 10:52 ` Avi Kivity
2012-03-20 11:07 ` Peter Zijlstra
2012-03-20 11:48 ` Avi Kivity
2012-03-19 12:20 ` Peter Zijlstra
2012-03-19 12:24 ` Avi Kivity
2012-03-19 15:44 ` Avi Kivity
2012-03-19 13:40 ` Andrea Arcangeli
2012-03-19 20:06 ` Peter Zijlstra
2012-03-19 13:04 ` Andrea Arcangeli
2012-03-19 13:26 ` Peter Zijlstra
2012-03-19 13:57 ` Andrea Arcangeli
2012-03-19 14:06 ` Avi Kivity
2012-03-19 14:30 ` Andrea Arcangeli
2012-03-19 18:42 ` Peter Zijlstra
2012-03-20 22:18 ` Rik van Riel
2012-03-21 16:50 ` Andrea Arcangeli
2012-04-02 16:34 ` Pekka Enberg
2012-04-02 16:55 ` Rik van Riel
2012-04-02 16:54 ` Pekka Enberg
2012-04-02 17:12 ` Pekka Enberg
2012-04-02 17:23 ` Pekka Enberg
2012-03-19 14:07 ` Peter Zijlstra
2012-03-19 14:34 ` Andrea Arcangeli
2012-03-19 18:41 ` Peter Zijlstra
2012-03-19 19:13 ` Peter Zijlstra
2012-03-19 14:07 ` Andrea Arcangeli
2012-03-19 19:05 ` Peter Zijlstra
2012-03-19 13:26 ` Peter Zijlstra
2012-03-19 14:16 ` Andrea Arcangeli
2012-03-19 13:29 ` Peter Zijlstra
2012-03-19 14:19 ` Andrea Arcangeli
2012-03-19 13:39 ` Peter Zijlstra
2012-03-19 14:20 ` Andrea Arcangeli
2012-03-19 20:17 ` Christoph Lameter
2012-03-19 20:28 ` Ingo Molnar
2012-03-19 20:43 ` Christoph Lameter
2012-03-19 21:34 ` Ingo Molnar
2012-03-20 0:05 ` Linus Torvalds
2012-03-20 7:31 ` Ingo Molnar
2012-03-21 22:53 ` Nish Aravamudan
2012-03-22 9:45 ` Peter Zijlstra
2012-03-22 10:34 ` Ingo Molnar
2012-03-24 1:41 ` Nish Aravamudan
2012-03-26 11:42 ` Peter Zijlstra
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20120316144241.351384914@chello.nl \
--to=a.p.zijlstra@chello.nl \
--cc=Lee.Schermerhorn@hp.com \
--cc=aarcange@redhat.com \
--cc=akpm@linux-foundation.org \
--cc=bharata.rao@gmail.com \
--cc=danms@us.ibm.com \
--cc=efault@gmx.de \
--cc=hannes@cmpxchg.org \
--cc=laijs@cn.fujitsu.com \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=mingo@elte.hu \
--cc=paulmck@linux.vnet.ibm.com \
--cc=pjt@google.com \
--cc=riel@redhat.com \
--cc=suresh.b.siddha@intel.com \
--cc=tglx@linutronix.de \
--cc=torvalds@linux-foundation.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox