linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
From: Marco Elver <elver@google.com>
To: elver@google.com, Peter Zijlstra <peterz@infradead.org>,
	 Boqun Feng <boqun.feng@gmail.com>,
	Ingo Molnar <mingo@kernel.org>, Will Deacon <will@kernel.org>
Cc: "David S. Miller" <davem@davemloft.net>,
	Luc Van Oostenryck <luc.vanoostenryck@gmail.com>,
	 "Paul E. McKenney" <paulmck@kernel.org>,
	Alexander Potapenko <glider@google.com>,
	Arnd Bergmann <arnd@arndb.de>,
	 Bart Van Assche <bvanassche@acm.org>,
	Bill Wendling <morbo@google.com>, Christoph Hellwig <hch@lst.de>,
	 Dmitry Vyukov <dvyukov@google.com>,
	Eric Dumazet <edumazet@google.com>,
	 Frederic Weisbecker <frederic@kernel.org>,
	Greg Kroah-Hartman <gregkh@linuxfoundation.org>,
	 Herbert Xu <herbert@gondor.apana.org.au>,
	Ian Rogers <irogers@google.com>,  Jann Horn <jannh@google.com>,
	Joel Fernandes <joelagnelf@nvidia.com>,
	 Jonathan Corbet <corbet@lwn.net>,
	Josh Triplett <josh@joshtriplett.org>,
	 Justin Stitt <justinstitt@google.com>,
	Kees Cook <kees@kernel.org>,
	 Kentaro Takeda <takedakn@nttdata.co.jp>,
	Lukas Bulwahn <lukas.bulwahn@gmail.com>,
	 Mark Rutland <mark.rutland@arm.com>,
	Mathieu Desnoyers <mathieu.desnoyers@efficios.com>,
	 Miguel Ojeda <ojeda@kernel.org>,
	Nathan Chancellor <nathan@kernel.org>,
	 Neeraj Upadhyay <neeraj.upadhyay@kernel.org>,
	 Nick Desaulniers <nick.desaulniers+lkml@gmail.com>,
	Steven Rostedt <rostedt@goodmis.org>,
	 Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp>,
	Thomas Gleixner <tglx@linutronix.de>,
	 Thomas Graf <tgraf@suug.ch>, Uladzislau Rezki <urezki@gmail.com>,
	Waiman Long <longman@redhat.com>,
	 kasan-dev@googlegroups.com, linux-crypto@vger.kernel.org,
	 linux-doc@vger.kernel.org, linux-kbuild@vger.kernel.org,
	 linux-kernel@vger.kernel.org, linux-mm@kvack.org,
	 linux-security-module@vger.kernel.org,
	linux-sparse@vger.kernel.org,  llvm@lists.linux.dev,
	rcu@vger.kernel.org
Subject: [PATCH v3 11/35] locking/seqlock: Support Clang's capability analysis
Date: Thu, 18 Sep 2025 15:59:22 +0200	[thread overview]
Message-ID: <20250918140451.1289454-12-elver@google.com> (raw)
In-Reply-To: <20250918140451.1289454-1-elver@google.com>

Add support for Clang's capability analysis for seqlock_t.

Signed-off-by: Marco Elver <elver@google.com>
---
v3:
* __assert -> __assume rename
---
 .../dev-tools/capability-analysis.rst         |  2 +-
 include/linux/seqlock.h                       | 24 +++++++++++
 include/linux/seqlock_types.h                 |  5 ++-
 lib/test_capability-analysis.c                | 43 +++++++++++++++++++
 4 files changed, 71 insertions(+), 3 deletions(-)

diff --git a/Documentation/dev-tools/capability-analysis.rst b/Documentation/dev-tools/capability-analysis.rst
index 89f9c991f7cf..4789de7b019a 100644
--- a/Documentation/dev-tools/capability-analysis.rst
+++ b/Documentation/dev-tools/capability-analysis.rst
@@ -81,7 +81,7 @@ Supported Kernel Primitives
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~
 
 Currently the following synchronization primitives are supported:
-`raw_spinlock_t`, `spinlock_t`, `rwlock_t`, `mutex`.
+`raw_spinlock_t`, `spinlock_t`, `rwlock_t`, `mutex`, `seqlock_t`.
 
 For capabilities with an initialization function (e.g., `spin_lock_init()`),
 calling this function on the capability instance before initializing any
diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h
index 5ce48eab7a2a..2c7a02b727de 100644
--- a/include/linux/seqlock.h
+++ b/include/linux/seqlock.h
@@ -816,6 +816,7 @@ static __always_inline void write_seqcount_latch_end(seqcount_latch_t *s)
 	do {								\
 		spin_lock_init(&(sl)->lock);				\
 		seqcount_spinlock_init(&(sl)->seqcount, &(sl)->lock);	\
+		__assume_cap(sl);					\
 	} while (0)
 
 /**
@@ -832,6 +833,7 @@ static __always_inline void write_seqcount_latch_end(seqcount_latch_t *s)
  * Return: count, to be passed to read_seqretry()
  */
 static inline unsigned read_seqbegin(const seqlock_t *sl)
+	__acquires_shared(sl) __no_capability_analysis
 {
 	return read_seqcount_begin(&sl->seqcount);
 }
@@ -848,6 +850,7 @@ static inline unsigned read_seqbegin(const seqlock_t *sl)
  * Return: true if a read section retry is required, else false
  */
 static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start)
+	__releases_shared(sl) __no_capability_analysis
 {
 	return read_seqcount_retry(&sl->seqcount, start);
 }
@@ -872,6 +875,7 @@ static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start)
  * _irqsave or _bh variants of this function instead.
  */
 static inline void write_seqlock(seqlock_t *sl)
+	__acquires(sl) __no_capability_analysis
 {
 	spin_lock(&sl->lock);
 	do_write_seqcount_begin(&sl->seqcount.seqcount);
@@ -885,6 +889,7 @@ static inline void write_seqlock(seqlock_t *sl)
  * critical section of given seqlock_t.
  */
 static inline void write_sequnlock(seqlock_t *sl)
+	__releases(sl) __no_capability_analysis
 {
 	do_write_seqcount_end(&sl->seqcount.seqcount);
 	spin_unlock(&sl->lock);
@@ -898,6 +903,7 @@ static inline void write_sequnlock(seqlock_t *sl)
  * other write side sections, can be invoked from softirq contexts.
  */
 static inline void write_seqlock_bh(seqlock_t *sl)
+	__acquires(sl) __no_capability_analysis
 {
 	spin_lock_bh(&sl->lock);
 	do_write_seqcount_begin(&sl->seqcount.seqcount);
@@ -912,6 +918,7 @@ static inline void write_seqlock_bh(seqlock_t *sl)
  * write_seqlock_bh().
  */
 static inline void write_sequnlock_bh(seqlock_t *sl)
+	__releases(sl) __no_capability_analysis
 {
 	do_write_seqcount_end(&sl->seqcount.seqcount);
 	spin_unlock_bh(&sl->lock);
@@ -925,6 +932,7 @@ static inline void write_sequnlock_bh(seqlock_t *sl)
  * other write sections, can be invoked from hardirq contexts.
  */
 static inline void write_seqlock_irq(seqlock_t *sl)
+	__acquires(sl) __no_capability_analysis
 {
 	spin_lock_irq(&sl->lock);
 	do_write_seqcount_begin(&sl->seqcount.seqcount);
@@ -938,12 +946,14 @@ static inline void write_seqlock_irq(seqlock_t *sl)
  * seqlock_t write side section opened with write_seqlock_irq().
  */
 static inline void write_sequnlock_irq(seqlock_t *sl)
+	__releases(sl) __no_capability_analysis
 {
 	do_write_seqcount_end(&sl->seqcount.seqcount);
 	spin_unlock_irq(&sl->lock);
 }
 
 static inline unsigned long __write_seqlock_irqsave(seqlock_t *sl)
+	__acquires(sl) __no_capability_analysis
 {
 	unsigned long flags;
 
@@ -976,6 +986,7 @@ static inline unsigned long __write_seqlock_irqsave(seqlock_t *sl)
  */
 static inline void
 write_sequnlock_irqrestore(seqlock_t *sl, unsigned long flags)
+	__releases(sl) __no_capability_analysis
 {
 	do_write_seqcount_end(&sl->seqcount.seqcount);
 	spin_unlock_irqrestore(&sl->lock, flags);
@@ -998,6 +1009,7 @@ write_sequnlock_irqrestore(seqlock_t *sl, unsigned long flags)
  * The opened read section must be closed with read_sequnlock_excl().
  */
 static inline void read_seqlock_excl(seqlock_t *sl)
+	__acquires_shared(sl) __no_capability_analysis
 {
 	spin_lock(&sl->lock);
 }
@@ -1007,6 +1019,7 @@ static inline void read_seqlock_excl(seqlock_t *sl)
  * @sl: Pointer to seqlock_t
  */
 static inline void read_sequnlock_excl(seqlock_t *sl)
+	__releases_shared(sl) __no_capability_analysis
 {
 	spin_unlock(&sl->lock);
 }
@@ -1021,6 +1034,7 @@ static inline void read_sequnlock_excl(seqlock_t *sl)
  * from softirq contexts.
  */
 static inline void read_seqlock_excl_bh(seqlock_t *sl)
+	__acquires_shared(sl) __no_capability_analysis
 {
 	spin_lock_bh(&sl->lock);
 }
@@ -1031,6 +1045,7 @@ static inline void read_seqlock_excl_bh(seqlock_t *sl)
  * @sl: Pointer to seqlock_t
  */
 static inline void read_sequnlock_excl_bh(seqlock_t *sl)
+	__releases_shared(sl) __no_capability_analysis
 {
 	spin_unlock_bh(&sl->lock);
 }
@@ -1045,6 +1060,7 @@ static inline void read_sequnlock_excl_bh(seqlock_t *sl)
  * hardirq context.
  */
 static inline void read_seqlock_excl_irq(seqlock_t *sl)
+	__acquires_shared(sl) __no_capability_analysis
 {
 	spin_lock_irq(&sl->lock);
 }
@@ -1055,11 +1071,13 @@ static inline void read_seqlock_excl_irq(seqlock_t *sl)
  * @sl: Pointer to seqlock_t
  */
 static inline void read_sequnlock_excl_irq(seqlock_t *sl)
+	__releases_shared(sl) __no_capability_analysis
 {
 	spin_unlock_irq(&sl->lock);
 }
 
 static inline unsigned long __read_seqlock_excl_irqsave(seqlock_t *sl)
+	__acquires_shared(sl) __no_capability_analysis
 {
 	unsigned long flags;
 
@@ -1089,6 +1107,7 @@ static inline unsigned long __read_seqlock_excl_irqsave(seqlock_t *sl)
  */
 static inline void
 read_sequnlock_excl_irqrestore(seqlock_t *sl, unsigned long flags)
+	__releases_shared(sl) __no_capability_analysis
 {
 	spin_unlock_irqrestore(&sl->lock, flags);
 }
@@ -1125,6 +1144,7 @@ read_sequnlock_excl_irqrestore(seqlock_t *sl, unsigned long flags)
  * parameter of the next read_seqbegin_or_lock() iteration.
  */
 static inline void read_seqbegin_or_lock(seqlock_t *lock, int *seq)
+	__acquires_shared(lock) __no_capability_analysis
 {
 	if (!(*seq & 1))	/* Even */
 		*seq = read_seqbegin(lock);
@@ -1140,6 +1160,7 @@ static inline void read_seqbegin_or_lock(seqlock_t *lock, int *seq)
  * Return: true if a read section retry is required, false otherwise
  */
 static inline int need_seqretry(seqlock_t *lock, int seq)
+	__releases_shared(lock) __no_capability_analysis
 {
 	return !(seq & 1) && read_seqretry(lock, seq);
 }
@@ -1153,6 +1174,7 @@ static inline int need_seqretry(seqlock_t *lock, int seq)
  * with read_seqbegin_or_lock() and validated by need_seqretry().
  */
 static inline void done_seqretry(seqlock_t *lock, int seq)
+	__no_capability_analysis
 {
 	if (seq & 1)
 		read_sequnlock_excl(lock);
@@ -1180,6 +1202,7 @@ static inline void done_seqretry(seqlock_t *lock, int seq)
  */
 static inline unsigned long
 read_seqbegin_or_lock_irqsave(seqlock_t *lock, int *seq)
+	__acquires_shared(lock) __no_capability_analysis
 {
 	unsigned long flags = 0;
 
@@ -1205,6 +1228,7 @@ read_seqbegin_or_lock_irqsave(seqlock_t *lock, int *seq)
  */
 static inline void
 done_seqretry_irqrestore(seqlock_t *lock, int seq, unsigned long flags)
+	__no_capability_analysis
 {
 	if (seq & 1)
 		read_sequnlock_excl_irqrestore(lock, flags);
diff --git a/include/linux/seqlock_types.h b/include/linux/seqlock_types.h
index dfdf43e3fa3d..9775d6f1a234 100644
--- a/include/linux/seqlock_types.h
+++ b/include/linux/seqlock_types.h
@@ -81,13 +81,14 @@ SEQCOUNT_LOCKNAME(mutex,        struct mutex,    true,     mutex)
  *    - Comments on top of seqcount_t
  *    - Documentation/locking/seqlock.rst
  */
-typedef struct {
+struct_with_capability(seqlock) {
 	/*
 	 * Make sure that readers don't starve writers on PREEMPT_RT: use
 	 * seqcount_spinlock_t instead of seqcount_t. Check __SEQ_LOCK().
 	 */
 	seqcount_spinlock_t seqcount;
 	spinlock_t lock;
-} seqlock_t;
+};
+typedef struct seqlock seqlock_t;
 
 #endif /* __LINUX_SEQLOCK_TYPES_H */
diff --git a/lib/test_capability-analysis.c b/lib/test_capability-analysis.c
index 286723b47328..74d287740bb8 100644
--- a/lib/test_capability-analysis.c
+++ b/lib/test_capability-analysis.c
@@ -6,6 +6,7 @@
 
 #include <linux/build_bug.h>
 #include <linux/mutex.h>
+#include <linux/seqlock.h>
 #include <linux/spinlock.h>
 
 /*
@@ -208,3 +209,45 @@ static void __used test_mutex_cond_guard(struct test_mutex_data *d)
 		d->counter++;
 	}
 }
+
+struct test_seqlock_data {
+	seqlock_t sl;
+	int counter __guarded_by(&sl);
+};
+
+static void __used test_seqlock_init(struct test_seqlock_data *d)
+{
+	seqlock_init(&d->sl);
+	d->counter = 0;
+}
+
+static void __used test_seqlock_reader(struct test_seqlock_data *d)
+{
+	unsigned int seq;
+
+	do {
+		seq = read_seqbegin(&d->sl);
+		(void)d->counter;
+	} while (read_seqretry(&d->sl, seq));
+}
+
+static void __used test_seqlock_writer(struct test_seqlock_data *d)
+{
+	unsigned long flags;
+
+	write_seqlock(&d->sl);
+	d->counter++;
+	write_sequnlock(&d->sl);
+
+	write_seqlock_irq(&d->sl);
+	d->counter++;
+	write_sequnlock_irq(&d->sl);
+
+	write_seqlock_bh(&d->sl);
+	d->counter++;
+	write_sequnlock_bh(&d->sl);
+
+	write_seqlock_irqsave(&d->sl, flags);
+	d->counter++;
+	write_sequnlock_irqrestore(&d->sl, flags);
+}
-- 
2.51.0.384.g4c02a37b29-goog



  parent reply	other threads:[~2025-09-18 14:06 UTC|newest]

Thread overview: 64+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2025-09-18 13:59 [PATCH v3 00/35] Compiler-Based Capability- and Locking-Analysis Marco Elver
2025-09-18 13:59 ` [PATCH v3 01/35] compiler_types: Move lock checking attributes to compiler-capability-analysis.h Marco Elver
2025-09-18 13:59 ` [PATCH v3 02/35] compiler-capability-analysis: Add infrastructure for Clang's capability analysis Marco Elver
2025-09-18 15:58   ` Ian Rogers
2025-09-18 16:03     ` Bart Van Assche
2025-09-18 16:14       ` Steven Rostedt
2025-09-18 13:59 ` [PATCH v3 03/35] compiler-capability-analysis: Add test stub Marco Elver
2025-09-18 13:59 ` [PATCH v3 04/35] Documentation: Add documentation for Compiler-Based Capability Analysis Marco Elver
2025-09-18 13:59 ` [PATCH v3 05/35] checkpatch: Warn about capability_unsafe() without comment Marco Elver
2025-09-18 20:36   ` Joe Perches
2025-09-18 13:59 ` [PATCH v3 06/35] cleanup: Basic compatibility with capability analysis Marco Elver
2025-09-18 13:59 ` [PATCH v3 07/35] lockdep: Annotate lockdep assertions for " Marco Elver
2025-09-18 13:59 ` [PATCH v3 08/35] locking/rwlock, spinlock: Support Clang's " Marco Elver
2025-09-18 13:59 ` [PATCH v3 09/35] compiler-capability-analysis: Change __cond_acquires to take return value Marco Elver
2025-09-18 13:59 ` [PATCH v3 10/35] locking/mutex: Support Clang's capability analysis Marco Elver
2025-09-18 13:59 ` Marco Elver [this message]
2025-09-18 13:59 ` [PATCH v3 12/35] bit_spinlock: Include missing <asm/processor.h> Marco Elver
2025-09-18 13:59 ` [PATCH v3 13/35] bit_spinlock: Support Clang's capability analysis Marco Elver
2025-09-18 13:59 ` [PATCH v3 14/35] rcu: " Marco Elver
2025-09-18 13:59 ` [PATCH v3 15/35] srcu: " Marco Elver
2025-09-18 13:59 ` [PATCH v3 16/35] kref: Add capability-analysis annotations Marco Elver
2025-09-18 13:59 ` [PATCH v3 17/35] locking/rwsem: Support Clang's capability analysis Marco Elver
2025-09-18 13:59 ` [PATCH v3 18/35] locking/local_lock: Include missing headers Marco Elver
2025-09-18 13:59 ` [PATCH v3 19/35] locking/local_lock: Support Clang's capability analysis Marco Elver
2025-09-18 13:59 ` [PATCH v3 20/35] locking/ww_mutex: " Marco Elver
2025-09-18 13:59 ` [PATCH v3 21/35] debugfs: Make debugfs_cancellation a capability struct Marco Elver
2025-09-18 13:59 ` [PATCH v3 22/35] compiler-capability-analysis: Remove Sparse support Marco Elver
2025-09-18 13:59 ` [PATCH v3 23/35] compiler-capability-analysis: Remove __cond_lock() function-like helper Marco Elver
2025-09-18 13:59 ` [PATCH v3 24/35] compiler-capability-analysis: Introduce header suppressions Marco Elver
2025-09-18 13:59 ` [PATCH v3 25/35] compiler: Let data_race() imply disabled capability analysis Marco Elver
2025-09-18 13:59 ` [PATCH v3 26/35] MAINTAINERS: Add entry for Capability Analysis Marco Elver
2025-09-18 13:59 ` [PATCH v3 27/35] kfence: Enable capability analysis Marco Elver
2025-09-18 13:59 ` [PATCH v3 28/35] kcov: " Marco Elver
2025-09-18 13:59 ` [PATCH v3 29/35] kcsan: " Marco Elver
2025-09-18 13:59 ` [PATCH v3 30/35] stackdepot: " Marco Elver
2025-09-18 13:59 ` [PATCH v3 31/35] rhashtable: " Marco Elver
2025-09-18 13:59 ` [PATCH v3 32/35] printk: Move locking annotation to printk.c Marco Elver
2025-09-18 13:59 ` [PATCH v3 33/35] security/tomoyo: Enable capability analysis Marco Elver
2025-09-18 13:59 ` [PATCH v3 34/35] crypto: " Marco Elver
2025-09-18 13:59 ` [PATCH v3 35/35] sched: Enable capability analysis for core.c and fair.c Marco Elver
2025-09-18 14:15 ` [PATCH v3 00/35] Compiler-Based Capability- and Locking-Analysis Christoph Hellwig
2025-09-18 14:30   ` Marco Elver
2025-09-18 14:38     ` Christoph Hellwig
2025-09-18 17:45   ` Nathan Chancellor
2025-09-18 19:40     ` Nathan Chancellor
2025-09-19 14:08     ` Christoph Hellwig
2025-09-19 14:09       ` Christoph Hellwig
2025-09-22  9:33         ` Marco Elver
2025-09-22 17:11           ` Christoph Hellwig
2025-09-23 19:49           ` Nathan Chancellor
2025-09-19 17:20       ` Bart Van Assche
2025-09-22 17:12         ` Christoph Hellwig
2025-09-20 10:23       ` Marco Elver
2025-09-20 12:44         ` Marco Elver
2025-09-18 15:49 ` Linus Torvalds
2025-09-18 21:26   ` Marco Elver
2025-09-18 21:47     ` Linus Torvalds
2025-09-19  9:10       ` Marco Elver
2025-11-13 14:30         ` Marco Elver
2025-11-14  4:38           ` Nathan Chancellor
2025-11-14 13:22             ` Marco Elver
2025-09-18 16:21 ` Ian Rogers
2025-09-18 19:41 ` [syzbot ci] " syzbot ci
2025-09-19  7:05   ` Marco Elver

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20250918140451.1289454-12-elver@google.com \
    --to=elver@google.com \
    --cc=arnd@arndb.de \
    --cc=boqun.feng@gmail.com \
    --cc=bvanassche@acm.org \
    --cc=corbet@lwn.net \
    --cc=davem@davemloft.net \
    --cc=dvyukov@google.com \
    --cc=edumazet@google.com \
    --cc=frederic@kernel.org \
    --cc=glider@google.com \
    --cc=gregkh@linuxfoundation.org \
    --cc=hch@lst.de \
    --cc=herbert@gondor.apana.org.au \
    --cc=irogers@google.com \
    --cc=jannh@google.com \
    --cc=joelagnelf@nvidia.com \
    --cc=josh@joshtriplett.org \
    --cc=justinstitt@google.com \
    --cc=kasan-dev@googlegroups.com \
    --cc=kees@kernel.org \
    --cc=linux-crypto@vger.kernel.org \
    --cc=linux-doc@vger.kernel.org \
    --cc=linux-kbuild@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=linux-security-module@vger.kernel.org \
    --cc=linux-sparse@vger.kernel.org \
    --cc=llvm@lists.linux.dev \
    --cc=longman@redhat.com \
    --cc=luc.vanoostenryck@gmail.com \
    --cc=lukas.bulwahn@gmail.com \
    --cc=mark.rutland@arm.com \
    --cc=mathieu.desnoyers@efficios.com \
    --cc=mingo@kernel.org \
    --cc=morbo@google.com \
    --cc=nathan@kernel.org \
    --cc=neeraj.upadhyay@kernel.org \
    --cc=nick.desaulniers+lkml@gmail.com \
    --cc=ojeda@kernel.org \
    --cc=paulmck@kernel.org \
    --cc=penguin-kernel@I-love.SAKURA.ne.jp \
    --cc=peterz@infradead.org \
    --cc=rcu@vger.kernel.org \
    --cc=rostedt@goodmis.org \
    --cc=takedakn@nttdata.co.jp \
    --cc=tglx@linutronix.de \
    --cc=tgraf@suug.ch \
    --cc=urezki@gmail.com \
    --cc=will@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox