From: Tim Chen <tim.c.chen@linux.intel.com>
To: Ingo Molnar <mingo@elte.hu>,
Andrew Morton <akpm@linux-foundation.org>,
Thomas Gleixner <tglx@linutronix.de>
Cc: linux-kernel@vger.kernel.org, linux-mm <linux-mm@kvack.org>,
linux-arch@vger.kernel.org,
Linus Torvalds <torvalds@linux-foundation.org>,
Waiman Long <waiman.long@hp.com>,
Andrea Arcangeli <aarcange@redhat.com>,
Alex Shi <alex.shi@linaro.org>, Andi Kleen <andi@firstfloor.org>,
Michel Lespinasse <walken@google.com>,
Davidlohr Bueso <davidlohr.bueso@hp.com>,
Matthew R Wilcox <matthew.r.wilcox@intel.com>,
Dave Hansen <dave.hansen@intel.com>,
Peter Zijlstra <a.p.zijlstra@chello.nl>,
Rik van Riel <riel@redhat.com>,
Peter Hurley <peter@hurleysoftware.com>,
"Paul E.McKenney" <paulmck@linux.vnet.ibm.com>,
Tim Chen <tim.c.chen@linux.intel.com>,
Raghavendra K T <raghavendra.kt@linux.vnet.ibm.com>,
George Spelvin <linux@horizon.com>,
"H. Peter Anvin" <hpa@zytor.com>, Arnd Bergmann <arnd@arndb.de>,
Aswin Chandramouleeswaran <aswin@hp.com>,
Scott J Norton <scott.norton@hp.com>,
Will Deacon <will.deacon@arm.com>,
"Figo.zhang" <figo1802@gmail.com>
Subject: [PATCH v4 5/5] MCS Lock: Allow architecture specific memory barrier in lock/unlock
Date: Wed, 06 Nov 2013 17:27:00 -0800 [thread overview]
Message-ID: <1383787620.11046.368.camel@schen9-DESK> (raw)
In-Reply-To: <cover.1383783691.git.tim.c.chen@linux.intel.com>
This patch moves the decision of what kind of memory barriers to be
used in the MCS lock and unlock functions to the architecture specific
layer. It also moves the actual lock/unlock code to mcs_spinlock.c
file.
A full memory barrier will be used if the following macros are not
defined:
1) smp_mb__before_critical_section()
2) smp_mb__after_critical_section()
For the x86 architecture, only compiler barrier will be needed.
Acked-by: Tim Chen <tim.c.chen@linux.intel.com>
Signed-off-by: Waiman Long <Waiman.Long@hp.com>
---
arch/x86/include/asm/barrier.h | 6 +++
include/linux/mcs_spinlock.h | 78 +-------------------------------------
kernel/locking/mcs_spinlock.c | 81 ++++++++++++++++++++++++++++++++++++++-
3 files changed, 86 insertions(+), 79 deletions(-)
diff --git a/arch/x86/include/asm/barrier.h b/arch/x86/include/asm/barrier.h
index c6cd358..6d0172c 100644
--- a/arch/x86/include/asm/barrier.h
+++ b/arch/x86/include/asm/barrier.h
@@ -92,6 +92,12 @@
#endif
#define smp_read_barrier_depends() read_barrier_depends()
#define set_mb(var, value) do { (void)xchg(&var, value); } while (0)
+
+#if !defined(CONFIG_X86_PPRO_FENCE) && !defined(CONFIG_X86_OOSTORE)
+# define smp_mb__before_critical_section() barrier()
+# define smp_mb__after_critical_section() barrier()
+#endif
+
#else
#define smp_mb() barrier()
#define smp_rmb() barrier()
diff --git a/include/linux/mcs_spinlock.h b/include/linux/mcs_spinlock.h
index f2c71e8..d54bb23 100644
--- a/include/linux/mcs_spinlock.h
+++ b/include/linux/mcs_spinlock.h
@@ -12,19 +12,6 @@
#ifndef __LINUX_MCS_SPINLOCK_H
#define __LINUX_MCS_SPINLOCK_H
-/*
- * asm/processor.h may define arch_mutex_cpu_relax().
- * If it is not defined, cpu_relax() will be used.
- */
-#include <asm/barrier.h>
-#include <asm/cmpxchg.h>
-#include <asm/processor.h>
-#include <linux/compiler.h>
-
-#ifndef arch_mutex_cpu_relax
-# define arch_mutex_cpu_relax() cpu_relax()
-#endif
-
struct mcs_spinlock {
struct mcs_spinlock *next;
int locked; /* 1 if lock acquired */
@@ -32,68 +19,7 @@ struct mcs_spinlock {
extern
void mcs_spin_lock(struct mcs_spinlock **lock, struct mcs_spinlock *node);
-
-/*
- * In order to acquire the lock, the caller should declare a local node and
- * pass a reference of the node to this function in addition to the lock.
- * If the lock has already been acquired, then this will proceed to spin
- * on this node->locked until the previous lock holder sets the node->locked
- * in mcs_spin_unlock().
- *
- * The _raw_mcs_spin_lock() function should not be called directly. Instead,
- * users should call mcs_spin_lock().
- */
-static inline
-void _raw_mcs_spin_lock(struct mcs_spinlock **lock, struct mcs_spinlock *node)
-{
- struct mcs_spinlock *prev;
-
- /* Init node */
- node->locked = 0;
- node->next = NULL;
-
- /* xchg() provides a memory barrier */
- prev = xchg(lock, node);
- if (likely(prev == NULL)) {
- /* Lock acquired */
- return;
- }
- ACCESS_ONCE(prev->next) = node;
- /* Wait until the lock holder passes the lock down */
- while (!ACCESS_ONCE(node->locked))
- arch_mutex_cpu_relax();
-
- /* Make sure subsequent operations happen after the lock is acquired */
- smp_rmb();
-}
-
-/*
- * Releases the lock. The caller should pass in the corresponding node that
- * was used to acquire the lock.
- */
-static inline
-void mcs_spin_unlock(struct mcs_spinlock **lock, struct mcs_spinlock *node)
-{
- struct mcs_spinlock *next = ACCESS_ONCE(node->next);
-
- if (likely(!next)) {
- /*
- * cmpxchg() provides a memory barrier.
- * Release the lock by setting it to NULL
- */
- if (likely(cmpxchg(lock, node, NULL) == node))
- return;
- /* Wait until the next pointer is set */
- while (!(next = ACCESS_ONCE(node->next)))
- arch_mutex_cpu_relax();
- } else {
- /*
- * Make sure all operations within the critical section
- * happen before the lock is released.
- */
- smp_wmb();
- }
- ACCESS_ONCE(next->locked) = 1;
-}
+extern
+void mcs_spin_unlock(struct mcs_spinlock **lock, struct mcs_spinlock *node);
#endif /* __LINUX_MCS_SPINLOCK_H */
diff --git a/kernel/locking/mcs_spinlock.c b/kernel/locking/mcs_spinlock.c
index 3c55626..2dfd207 100644
--- a/kernel/locking/mcs_spinlock.c
+++ b/kernel/locking/mcs_spinlock.c
@@ -7,15 +7,90 @@
* It avoids expensive cache bouncings that common test-and-set spin-lock
* implementations incur.
*/
+/*
+ * asm/processor.h may define arch_mutex_cpu_relax().
+ * If it is not defined, cpu_relax() will be used.
+ */
+#include <asm/barrier.h>
+#include <asm/cmpxchg.h>
+#include <asm/processor.h>
+#include <linux/compiler.h>
#include <linux/mcs_spinlock.h>
#include <linux/export.h>
+#ifndef arch_mutex_cpu_relax
+# define arch_mutex_cpu_relax() cpu_relax()
+#endif
+
/*
- * We don't inline mcs_spin_lock() so that perf can correctly account for the
- * time spent in this lock function.
+ * Fall back to use full memory barrier if those macros are not defined
+ * in a architecture specific header file.
+ */
+#ifndef smp_mb__before_critical_section
+#define smp_mb__before_critical_section() smp_mb()
+#endif
+
+#ifndef smp_mb__after_critical_section
+#define smp_mb__after_critical_section() smp_mb()
+#endif
+
+
+/*
+ * In order to acquire the lock, the caller should declare a local node and
+ * pass a reference of the node to this function in addition to the lock.
+ * If the lock has already been acquired, then this will proceed to spin
+ * on this node->locked until the previous lock holder sets the node->locked
+ * in mcs_spin_unlock().
*/
void mcs_spin_lock(struct mcs_spinlock **lock, struct mcs_spinlock *node)
{
- _raw_mcs_spin_lock(lock, node);
+ struct mcs_spinlock *prev;
+
+ /* Init node */
+ node->locked = 0;
+ node->next = NULL;
+
+ /* xchg() provides a memory barrier */
+ prev = xchg(lock, node);
+ if (likely(prev == NULL)) {
+ /* Lock acquired */
+ return;
+ }
+ ACCESS_ONCE(prev->next) = node;
+ /* Wait until the lock holder passes the lock down */
+ while (!ACCESS_ONCE(node->locked))
+ arch_mutex_cpu_relax();
+
+ /* Make sure subsequent operations happen after the lock is acquired */
+ smp_mb__before_critical_section();
}
EXPORT_SYMBOL_GPL(mcs_spin_lock);
+
+/*
+ * Releases the lock. The caller should pass in the corresponding node that
+ * was used to acquire the lock.
+ */
+void mcs_spin_unlock(struct mcs_spinlock **lock, struct mcs_spinlock *node)
+{
+ struct mcs_spinlock *next = ACCESS_ONCE(node->next);
+
+ if (likely(!next)) {
+ /*
+ * cmpxchg() provides a memory barrier.
+ * Release the lock by setting it to NULL
+ */
+ if (likely(cmpxchg(lock, node, NULL) == node))
+ return;
+ /* Wait until the next pointer is set */
+ while (!(next = ACCESS_ONCE(node->next)))
+ arch_mutex_cpu_relax();
+ } else {
+ /*
+ * Make sure all operations within the critical section
+ * happen before the lock is released.
+ */
+ smp_mb__after_critical_section();
+ }
+ ACCESS_ONCE(next->locked) = 1;
+}
+EXPORT_SYMBOL_GPL(mcs_spin_unlock);
--
1.7.4.4
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
next prev parent reply other threads:[~2013-11-07 1:27 UTC|newest]
Thread overview: 8+ messages / expand[flat|nested] mbox.gz Atom feed top
[not found] <cover.1383783691.git.tim.c.chen@linux.intel.com>
2013-11-07 1:26 ` [PATCH v4 0/4] MCS Lock: MCS lock code cleanup and optimizations Tim Chen
2013-11-07 1:26 ` [PATCH v4 1/5] MCS Lock: Restructure the MCS lock defines and locking code into its own file Tim Chen
2013-11-07 1:26 ` [PATCH v4 2/5] MCS Lock: optimizations and extra comments Tim Chen
2013-11-07 1:26 ` [PATCH v4 3/5] MCS Lock: Barrier corrections Tim Chen
2013-11-07 1:26 ` [PATCH v4 4/5] MCS Lock: Make mcs_spinlock.h includable in other files Tim Chen
2013-11-07 1:27 ` Tim Chen [this message]
2013-11-07 7:40 ` [PATCH v4 5/5] MCS Lock: Allow architecture specific memory barrier in lock/unlock Ingo Molnar
2013-11-07 16:55 ` Tim Chen
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1383787620.11046.368.camel@schen9-DESK \
--to=tim.c.chen@linux.intel.com \
--cc=a.p.zijlstra@chello.nl \
--cc=aarcange@redhat.com \
--cc=akpm@linux-foundation.org \
--cc=alex.shi@linaro.org \
--cc=andi@firstfloor.org \
--cc=arnd@arndb.de \
--cc=aswin@hp.com \
--cc=dave.hansen@intel.com \
--cc=davidlohr.bueso@hp.com \
--cc=figo1802@gmail.com \
--cc=hpa@zytor.com \
--cc=linux-arch@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=linux@horizon.com \
--cc=matthew.r.wilcox@intel.com \
--cc=mingo@elte.hu \
--cc=paulmck@linux.vnet.ibm.com \
--cc=peter@hurleysoftware.com \
--cc=raghavendra.kt@linux.vnet.ibm.com \
--cc=riel@redhat.com \
--cc=scott.norton@hp.com \
--cc=tglx@linutronix.de \
--cc=torvalds@linux-foundation.org \
--cc=waiman.long@hp.com \
--cc=walken@google.com \
--cc=will.deacon@arm.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox