From: Byungchul Park <byungchul@sk.com>
To: linux-kernel@vger.kernel.org
Cc: kernel_team@skhynix.com, torvalds@linux-foundation.org,
damien.lemoal@opensource.wdc.com, linux-ide@vger.kernel.org,
adilger.kernel@dilger.ca, linux-ext4@vger.kernel.org,
mingo@redhat.com, peterz@infradead.org, will@kernel.org,
tglx@linutronix.de, rostedt@goodmis.org, joel@joelfernandes.org,
sashal@kernel.org, daniel.vetter@ffwll.ch, duyuyang@gmail.com,
johannes.berg@intel.com, tj@kernel.org, tytso@mit.edu,
willy@infradead.org, david@fromorbit.com, amir73il@gmail.com,
gregkh@linuxfoundation.org, kernel-team@lge.com,
linux-mm@kvack.org, akpm@linux-foundation.org, mhocko@kernel.org,
minchan@kernel.org, hannes@cmpxchg.org, vdavydov.dev@gmail.com,
sj@kernel.org, jglisse@redhat.com, dennis@kernel.org,
cl@linux.com, penberg@kernel.org, rientjes@google.com,
vbabka@suse.cz, ngupta@vflare.org, linux-block@vger.kernel.org,
josef@toxicpanda.com, linux-fsdevel@vger.kernel.org,
jack@suse.cz, jlayton@kernel.org, dan.j.williams@intel.com,
hch@infradead.org, djwong@kernel.org,
dri-devel@lists.freedesktop.org, rodrigosiqueiramelo@gmail.com,
melissa.srw@gmail.com, hamohammed.sa@gmail.com,
harry.yoo@oracle.com, chris.p.wilson@intel.com,
gwan-gyeong.mun@intel.com, max.byungchul.park@gmail.com,
boqun.feng@gmail.com, longman@redhat.com,
yunseong.kim@ericsson.com, ysk@kzalloc.com, yeoreum.yun@arm.com,
netdev@vger.kernel.org, matthew.brost@intel.com,
her0gyugyu@gmail.com, corbet@lwn.net, catalin.marinas@arm.com,
bp@alien8.de, dave.hansen@linux.intel.com, x86@kernel.org,
hpa@zytor.com, luto@kernel.org, sumit.semwal@linaro.org,
gustavo@padovan.org, christian.koenig@amd.com,
andi.shyti@kernel.org, arnd@arndb.de, lorenzo.stoakes@oracle.com,
Liam.Howlett@oracle.com, rppt@kernel.org, surenb@google.com,
mcgrof@kernel.org, petr.pavlu@suse.com, da.gomez@kernel.org,
samitolvanen@google.com, paulmck@kernel.org, frederic@kernel.org,
neeraj.upadhyay@kernel.org, joelagnelf@nvidia.com,
josh@joshtriplett.org, urezki@gmail.com,
mathieu.desnoyers@efficios.com, jiangshanlai@gmail.com,
qiang.zhang@linux.dev, juri.lelli@redhat.com,
vincent.guittot@linaro.org, dietmar.eggemann@arm.com,
bsegall@google.com, mgorman@suse.de, vschneid@redhat.com,
chuck.lever@oracle.com, neil@brown.name, okorniev@redhat.com,
Dai.Ngo@oracle.com, tom@talpey.com, trondmy@kernel.org,
anna@kernel.org, kees@kernel.org, bigeasy@linutronix.de,
clrkwllms@kernel.org, mark.rutland@arm.com,
ada.coupriediaz@arm.com, kristina.martsenko@arm.com,
wangkefeng.wang@huawei.com, broonie@kernel.org,
kevin.brodsky@arm.com, dwmw@amazon.co.uk, shakeel.butt@linux.dev,
ast@kernel.org, ziy@nvidia.com, yuzhao@google.com,
baolin.wang@linux.alibaba.com, usamaarif642@gmail.com,
joel.granados@kernel.org, richard.weiyang@gmail.com,
geert+renesas@glider.be, tim.c.chen@linux.intel.com,
linux@treblig.org, alexander.shishkin@linux.intel.com,
lillian@star-ark.net, chenhuacai@kernel.org, francesco@valla.it,
guoweikang.kernel@gmail.com, link@vivo.com, jpoimboe@kernel.org,
masahiroy@kernel.org, brauner@kernel.org,
thomas.weissschuh@linutronix.de, oleg@redhat.com,
mjguzik@gmail.com, andrii@kernel.org, wangfushuai@baidu.com,
linux-doc@vger.kernel.org, linux-arm-kernel@lists.infradead.org,
linux-media@vger.kernel.org, linaro-mm-sig@lists.linaro.org,
linux-i2c@vger.kernel.org, linux-arch@vger.kernel.org,
linux-modules@vger.kernel.org, rcu@vger.kernel.org,
linux-nfs@vger.kernel.org, linux-rt-devel@lists.linux.dev
Subject: [PATCH v17 05/47] dept: tie to lockdep and IRQ tracing
Date: Thu, 2 Oct 2025 17:12:05 +0900 [thread overview]
Message-ID: <20251002081247.51255-6-byungchul@sk.com> (raw)
In-Reply-To: <20251002081247.51255-1-byungchul@sk.com>
How to place dept this way looks so ugly. However, it's inevitable for
now. The way should be enhanced gradually.
Signed-off-by: Byungchul Park <byungchul@sk.com>
---
include/linux/irqflags.h | 7 +-
include/linux/local_lock_internal.h | 1 +
include/linux/lockdep.h | 102 ++++++++++++++++++++++------
include/linux/lockdep_types.h | 3 +
include/linux/mutex.h | 1 +
include/linux/percpu-rwsem.h | 2 +-
include/linux/rtmutex.h | 1 +
include/linux/rwlock_types.h | 1 +
include/linux/rwsem.h | 1 +
include/linux/seqlock.h | 2 +-
include/linux/spinlock_types_raw.h | 3 +
include/linux/srcu.h | 2 +-
kernel/dependency/dept.c | 8 +--
kernel/locking/lockdep.c | 22 ++++++
14 files changed, 127 insertions(+), 29 deletions(-)
diff --git a/include/linux/irqflags.h b/include/linux/irqflags.h
index 57b074e0cfbb..d8b9cf093f83 100644
--- a/include/linux/irqflags.h
+++ b/include/linux/irqflags.h
@@ -15,6 +15,7 @@
#include <linux/irqflags_types.h>
#include <linux/typecheck.h>
#include <linux/cleanup.h>
+#include <linux/dept.h>
#include <asm/irqflags.h>
#include <asm/percpu.h>
@@ -55,8 +56,10 @@ extern void trace_hardirqs_off(void);
# define lockdep_softirqs_enabled(p) ((p)->softirqs_enabled)
# define lockdep_hardirq_enter() \
do { \
- if (__this_cpu_inc_return(hardirq_context) == 1)\
+ if (__this_cpu_inc_return(hardirq_context) == 1) { \
current->hardirq_threaded = 0; \
+ dept_hardirq_enter(); \
+ } \
} while (0)
# define lockdep_hardirq_threaded() \
do { \
@@ -131,6 +134,8 @@ do { \
# define lockdep_softirq_enter() \
do { \
current->softirq_context++; \
+ if (current->softirq_context == 1) \
+ dept_softirq_enter(); \
} while (0)
# define lockdep_softirq_exit() \
do { \
diff --git a/include/linux/local_lock_internal.h b/include/linux/local_lock_internal.h
index d80b5306a2c0..3b74da2fec50 100644
--- a/include/linux/local_lock_internal.h
+++ b/include/linux/local_lock_internal.h
@@ -27,6 +27,7 @@ typedef struct {
.name = #lockname, \
.wait_type_inner = LD_WAIT_CONFIG, \
.lock_type = LD_LOCK_PERCPU, \
+ .dmap = DEPT_MAP_INITIALIZER(lockname, NULL),\
}, \
.owner = NULL,
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index 67964dc4db95..ef03d8808c10 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -12,6 +12,7 @@
#include <linux/lockdep_types.h>
#include <linux/smp.h>
+#include <linux/dept_ldt.h>
#include <asm/percpu.h>
struct task_struct;
@@ -39,6 +40,8 @@ static inline void lockdep_copy_map(struct lockdep_map *to,
*/
for (i = 0; i < NR_LOCKDEP_CACHING_CLASSES; i++)
to->class_cache[i] = NULL;
+
+ dept_map_copy(&to->dmap, &from->dmap);
}
/*
@@ -428,7 +431,8 @@ enum xhlock_context_t {
* Note that _name must not be NULL.
*/
#define STATIC_LOCKDEP_MAP_INIT(_name, _key) \
- { .name = (_name), .key = (void *)(_key), }
+ { .name = (_name), .key = (void *)(_key), \
+ .dmap = DEPT_MAP_INITIALIZER(_name, _key) }
static inline void lockdep_invariant_state(bool force) {}
static inline void lockdep_free_task(struct task_struct *task) {}
@@ -510,33 +514,89 @@ extern bool read_lock_is_recursive(void);
#define lock_acquire_shared(l, s, t, n, i) lock_acquire(l, s, t, 1, 1, n, i)
#define lock_acquire_shared_recursive(l, s, t, n, i) lock_acquire(l, s, t, 2, 1, n, i)
-#define spin_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i)
-#define spin_acquire_nest(l, s, t, n, i) lock_acquire_exclusive(l, s, t, n, i)
-#define spin_release(l, i) lock_release(l, i)
-
-#define rwlock_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i)
+#define spin_acquire(l, s, t, i) \
+do { \
+ ldt_lock(&(l)->dmap, s, t, NULL, i); \
+ lock_acquire_exclusive(l, s, t, NULL, i); \
+} while (0)
+#define spin_acquire_nest(l, s, t, n, i) \
+do { \
+ ldt_lock(&(l)->dmap, s, t, n, i); \
+ lock_acquire_exclusive(l, s, t, n, i); \
+} while (0)
+#define spin_release(l, i) \
+do { \
+ ldt_unlock(&(l)->dmap, i); \
+ lock_release(l, i); \
+} while (0)
+#define rwlock_acquire(l, s, t, i) \
+do { \
+ ldt_wlock(&(l)->dmap, s, t, NULL, i); \
+ lock_acquire_exclusive(l, s, t, NULL, i); \
+} while (0)
#define rwlock_acquire_read(l, s, t, i) \
do { \
+ ldt_rlock(&(l)->dmap, s, t, NULL, i, !read_lock_is_recursive());\
if (read_lock_is_recursive()) \
lock_acquire_shared_recursive(l, s, t, NULL, i); \
else \
lock_acquire_shared(l, s, t, NULL, i); \
} while (0)
-
-#define rwlock_release(l, i) lock_release(l, i)
-
-#define seqcount_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i)
-#define seqcount_acquire_read(l, s, t, i) lock_acquire_shared_recursive(l, s, t, NULL, i)
-#define seqcount_release(l, i) lock_release(l, i)
-
-#define mutex_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i)
-#define mutex_acquire_nest(l, s, t, n, i) lock_acquire_exclusive(l, s, t, n, i)
-#define mutex_release(l, i) lock_release(l, i)
-
-#define rwsem_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i)
-#define rwsem_acquire_nest(l, s, t, n, i) lock_acquire_exclusive(l, s, t, n, i)
-#define rwsem_acquire_read(l, s, t, i) lock_acquire_shared(l, s, t, NULL, i)
-#define rwsem_release(l, i) lock_release(l, i)
+#define rwlock_release(l, i) \
+do { \
+ ldt_unlock(&(l)->dmap, i); \
+ lock_release(l, i); \
+} while (0)
+#define seqcount_acquire(l, s, t, i) \
+do { \
+ ldt_wlock(&(l)->dmap, s, t, NULL, i); \
+ lock_acquire_exclusive(l, s, t, NULL, i); \
+} while (0)
+#define seqcount_acquire_read(l, s, t, i) \
+do { \
+ ldt_rlock(&(l)->dmap, s, t, NULL, i, false); \
+ lock_acquire_shared_recursive(l, s, t, NULL, i); \
+} while (0)
+#define seqcount_release(l, i) \
+do { \
+ ldt_unlock(&(l)->dmap, i); \
+ lock_release(l, i); \
+} while (0)
+#define mutex_acquire(l, s, t, i) \
+do { \
+ ldt_lock(&(l)->dmap, s, t, NULL, i); \
+ lock_acquire_exclusive(l, s, t, NULL, i); \
+} while (0)
+#define mutex_acquire_nest(l, s, t, n, i) \
+do { \
+ ldt_lock(&(l)->dmap, s, t, n, i); \
+ lock_acquire_exclusive(l, s, t, n, i); \
+} while (0)
+#define mutex_release(l, i) \
+do { \
+ ldt_unlock(&(l)->dmap, i); \
+ lock_release(l, i); \
+} while (0)
+#define rwsem_acquire(l, s, t, i) \
+do { \
+ ldt_lock(&(l)->dmap, s, t, NULL, i); \
+ lock_acquire_exclusive(l, s, t, NULL, i); \
+} while (0)
+#define rwsem_acquire_nest(l, s, t, n, i) \
+do { \
+ ldt_lock(&(l)->dmap, s, t, n, i); \
+ lock_acquire_exclusive(l, s, t, n, i); \
+} while (0)
+#define rwsem_acquire_read(l, s, t, i) \
+do { \
+ ldt_lock(&(l)->dmap, s, t, NULL, i); \
+ lock_acquire_shared(l, s, t, NULL, i); \
+} while (0)
+#define rwsem_release(l, i) \
+do { \
+ ldt_unlock(&(l)->dmap, i); \
+ lock_release(l, i); \
+} while (0)
#define lock_map_acquire(l) lock_acquire_exclusive(l, 0, 0, NULL, _THIS_IP_)
#define lock_map_acquire_try(l) lock_acquire_exclusive(l, 0, 1, NULL, _THIS_IP_)
diff --git a/include/linux/lockdep_types.h b/include/linux/lockdep_types.h
index eae115a26488..0c3389ed26b6 100644
--- a/include/linux/lockdep_types.h
+++ b/include/linux/lockdep_types.h
@@ -11,6 +11,7 @@
#define __LINUX_LOCKDEP_TYPES_H
#include <linux/types.h>
+#include <linux/dept.h>
#define MAX_LOCKDEP_SUBCLASSES 8UL
@@ -77,6 +78,7 @@ struct lock_class_key {
struct hlist_node hash_entry;
struct lockdep_subclass_key subkeys[MAX_LOCKDEP_SUBCLASSES];
};
+ struct dept_key dkey;
};
extern struct lock_class_key __lockdep_no_validate__;
@@ -195,6 +197,7 @@ struct lockdep_map {
int cpu;
unsigned long ip;
#endif
+ struct dept_map dmap;
};
struct pin_cookie { unsigned int val; };
diff --git a/include/linux/mutex.h b/include/linux/mutex.h
index 847b81ca6436..f8d7f02be04d 100644
--- a/include/linux/mutex.h
+++ b/include/linux/mutex.h
@@ -29,6 +29,7 @@ struct device;
, .dep_map = { \
.name = #lockname, \
.wait_type_inner = LD_WAIT_SLEEP, \
+ .dmap = DEPT_MAP_INITIALIZER(lockname, NULL),\
}
#else
# define __DEP_MAP_MUTEX_INITIALIZER(lockname)
diff --git a/include/linux/percpu-rwsem.h b/include/linux/percpu-rwsem.h
index 288f5235649a..11eece738f1f 100644
--- a/include/linux/percpu-rwsem.h
+++ b/include/linux/percpu-rwsem.h
@@ -22,7 +22,7 @@ struct percpu_rw_semaphore {
};
#ifdef CONFIG_DEBUG_LOCK_ALLOC
-#define __PERCPU_RWSEM_DEP_MAP_INIT(lockname) .dep_map = { .name = #lockname },
+#define __PERCPU_RWSEM_DEP_MAP_INIT(lockname) .dep_map = { .name = #lockname, .dmap = DEPT_MAP_INITIALIZER(lockname, NULL) },
#else
#define __PERCPU_RWSEM_DEP_MAP_INIT(lockname)
#endif
diff --git a/include/linux/rtmutex.h b/include/linux/rtmutex.h
index fa9f1021541e..4dc7f046b0a6 100644
--- a/include/linux/rtmutex.h
+++ b/include/linux/rtmutex.h
@@ -81,6 +81,7 @@ do { \
.dep_map = { \
.name = #mutexname, \
.wait_type_inner = LD_WAIT_SLEEP, \
+ .dmap = DEPT_MAP_INITIALIZER(mutexname, NULL),\
}
#else
#define __DEP_MAP_RT_MUTEX_INITIALIZER(mutexname)
diff --git a/include/linux/rwlock_types.h b/include/linux/rwlock_types.h
index 1948442e7750..6e58dfc84997 100644
--- a/include/linux/rwlock_types.h
+++ b/include/linux/rwlock_types.h
@@ -10,6 +10,7 @@
.dep_map = { \
.name = #lockname, \
.wait_type_inner = LD_WAIT_CONFIG, \
+ .dmap = DEPT_MAP_INITIALIZER(lockname, NULL), \
}
#else
# define RW_DEP_MAP_INIT(lockname)
diff --git a/include/linux/rwsem.h b/include/linux/rwsem.h
index f1aaf676a874..0f349c83a7dc 100644
--- a/include/linux/rwsem.h
+++ b/include/linux/rwsem.h
@@ -22,6 +22,7 @@
.dep_map = { \
.name = #lockname, \
.wait_type_inner = LD_WAIT_SLEEP, \
+ .dmap = DEPT_MAP_INITIALIZER(lockname, NULL),\
},
#else
# define __RWSEM_DEP_MAP_INIT(lockname)
diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h
index 5ce48eab7a2a..5f3447449fe0 100644
--- a/include/linux/seqlock.h
+++ b/include/linux/seqlock.h
@@ -51,7 +51,7 @@ static inline void __seqcount_init(seqcount_t *s, const char *name,
#ifdef CONFIG_DEBUG_LOCK_ALLOC
# define SEQCOUNT_DEP_MAP_INIT(lockname) \
- .dep_map = { .name = #lockname }
+ .dep_map = { .name = #lockname, .dmap = DEPT_MAP_INITIALIZER(lockname, NULL) }
/**
* seqcount_init() - runtime initializer for seqcount_t
diff --git a/include/linux/spinlock_types_raw.h b/include/linux/spinlock_types_raw.h
index 91cb36b65a17..3dcc551ded25 100644
--- a/include/linux/spinlock_types_raw.h
+++ b/include/linux/spinlock_types_raw.h
@@ -31,11 +31,13 @@ typedef struct raw_spinlock {
.dep_map = { \
.name = #lockname, \
.wait_type_inner = LD_WAIT_SPIN, \
+ .dmap = DEPT_MAP_INITIALIZER(lockname, NULL),\
}
# define SPIN_DEP_MAP_INIT(lockname) \
.dep_map = { \
.name = #lockname, \
.wait_type_inner = LD_WAIT_CONFIG, \
+ .dmap = DEPT_MAP_INITIALIZER(lockname, NULL),\
}
# define LOCAL_SPIN_DEP_MAP_INIT(lockname) \
@@ -43,6 +45,7 @@ typedef struct raw_spinlock {
.name = #lockname, \
.wait_type_inner = LD_WAIT_CONFIG, \
.lock_type = LD_LOCK_PERCPU, \
+ .dmap = DEPT_MAP_INITIALIZER(lockname, NULL),\
}
#else
# define RAW_SPIN_DEP_MAP_INIT(lockname)
diff --git a/include/linux/srcu.h b/include/linux/srcu.h
index f179700fecaf..f1961554ed1a 100644
--- a/include/linux/srcu.h
+++ b/include/linux/srcu.h
@@ -35,7 +35,7 @@ int __init_srcu_struct(struct srcu_struct *ssp, const char *name,
__init_srcu_struct((ssp), #ssp, &__srcu_key); \
})
-#define __SRCU_DEP_MAP_INIT(srcu_name) .dep_map = { .name = #srcu_name },
+#define __SRCU_DEP_MAP_INIT(srcu_name) .dep_map = { .name = #srcu_name, .dmap = DEPT_MAP_INITIALIZER(srcu_name, NULL) },
#else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
int init_srcu_struct(struct srcu_struct *ssp);
diff --git a/kernel/dependency/dept.c b/kernel/dependency/dept.c
index 712b7f79a095..cbb036e8cc1d 100644
--- a/kernel/dependency/dept.c
+++ b/kernel/dependency/dept.c
@@ -249,10 +249,10 @@ static bool dept_working(void)
* Even k == NULL is considered as a valid key because it would use
* &->map_key as the key in that case.
*/
-struct dept_key __dept_no_validate__;
+extern struct lock_class_key __lockdep_no_validate__;
static bool valid_key(struct dept_key *k)
{
- return &__dept_no_validate__ != k;
+ return &__lockdep_no_validate__.dkey != k;
}
/*
@@ -1946,7 +1946,7 @@ void dept_softirqs_off(void)
dept_task()->softirqs_enabled = false;
}
-void dept_hardirqs_off(void)
+void noinstr dept_hardirqs_off(void)
{
/*
* Assumes that it's called with IRQ disabled so that accessing
@@ -1968,7 +1968,7 @@ void dept_softirq_enter(void)
/*
* Ensure it's the outmost hardirq context.
*/
-void dept_hardirq_enter(void)
+void noinstr dept_hardirq_enter(void)
{
struct dept_task *dt = dept_task();
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 2d4c5bab5af8..dc97f2753ef8 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -1224,6 +1224,8 @@ void lockdep_register_key(struct lock_class_key *key)
struct lock_class_key *k;
unsigned long flags;
+ dept_key_init(&key->dkey);
+
if (WARN_ON_ONCE(static_obj(key)))
return;
hash_head = keyhashentry(key);
@@ -4361,6 +4363,8 @@ static void __trace_hardirqs_on_caller(void)
*/
void lockdep_hardirqs_on_prepare(void)
{
+ dept_hardirqs_on();
+
if (unlikely(!debug_locks))
return;
@@ -4481,6 +4485,8 @@ EXPORT_SYMBOL_GPL(lockdep_hardirqs_on);
*/
void noinstr lockdep_hardirqs_off(unsigned long ip)
{
+ dept_hardirqs_off();
+
if (unlikely(!debug_locks))
return;
@@ -4525,6 +4531,8 @@ void lockdep_softirqs_on(unsigned long ip)
{
struct irqtrace_events *trace = ¤t->irqtrace;
+ dept_softirqs_on_ip(ip);
+
if (unlikely(!lockdep_enabled()))
return;
@@ -4563,6 +4571,8 @@ void lockdep_softirqs_on(unsigned long ip)
*/
void lockdep_softirqs_off(unsigned long ip)
{
+ dept_softirqs_off();
+
if (unlikely(!lockdep_enabled()))
return;
@@ -4940,6 +4950,8 @@ void lockdep_init_map_type(struct lockdep_map *lock, const char *name,
{
int i;
+ ldt_init(&lock->dmap, &key->dkey, subclass, name);
+
for (i = 0; i < NR_LOCKDEP_CACHING_CLASSES; i++)
lock->class_cache[i] = NULL;
@@ -5736,6 +5748,12 @@ void lock_set_class(struct lockdep_map *lock, const char *name,
{
unsigned long flags;
+ /*
+ * dept_map_(re)init() might be called twice redundantly. But
+ * there's no choice as long as Dept relies on Lockdep.
+ */
+ ldt_set_class(&lock->dmap, name, &key->dkey, subclass, ip);
+
if (unlikely(!lockdep_enabled()))
return;
@@ -5753,6 +5771,8 @@ void lock_downgrade(struct lockdep_map *lock, unsigned long ip)
{
unsigned long flags;
+ ldt_downgrade(&lock->dmap, ip);
+
if (unlikely(!lockdep_enabled()))
return;
@@ -6588,6 +6608,8 @@ void lockdep_unregister_key(struct lock_class_key *key)
bool found = false;
bool need_callback = false;
+ dept_key_destroy(&key->dkey);
+
might_sleep();
if (WARN_ON_ONCE(static_obj(key)))
--
2.17.1
next prev parent reply other threads:[~2025-10-02 8:13 UTC|newest]
Thread overview: 82+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-10-02 8:12 [PATCH v17 00/47] DEPT(DEPendency Tracker) Byungchul Park
2025-10-02 8:12 ` [PATCH v17 01/47] llist: move llist_{head,node} definition to types.h Byungchul Park
2025-10-02 8:24 ` Greg KH
2025-10-02 13:53 ` Mathieu Desnoyers
2025-10-02 23:19 ` Arnd Bergmann
2025-10-16 0:46 ` Byungchul Park
2025-10-16 7:59 ` Arnd Bergmann
2025-10-16 0:38 ` Byungchul Park
2025-10-02 8:12 ` [PATCH v17 02/47] dept: implement DEPT(DEPendency Tracker) Byungchul Park
2025-10-02 8:25 ` Greg KH
2025-10-02 12:56 ` Geert Uytterhoeven
2025-10-02 8:12 ` [PATCH v17 03/47] dept: add single event dependency tracker APIs Byungchul Park
2025-10-02 8:12 ` [PATCH v17 04/47] dept: add lock " Byungchul Park
2025-10-02 8:12 ` Byungchul Park [this message]
2025-10-02 8:12 ` [PATCH v17 06/47] dept: add proc knobs to show stats and dependency graph Byungchul Park
2025-10-02 8:12 ` [PATCH v17 07/47] dept: distinguish each kernel context from another Byungchul Park
2025-10-02 8:12 ` [PATCH v17 08/47] x86_64, dept: add support CONFIG_ARCH_HAS_DEPT_SUPPORT to x86_64 Byungchul Park
2025-10-02 15:22 ` Dave Hansen
2025-10-03 1:12 ` Byungchul Park
2025-10-02 8:12 ` [PATCH v17 09/47] arm64, dept: add support CONFIG_ARCH_HAS_DEPT_SUPPORT to arm64 Byungchul Park
2025-10-02 11:39 ` Mark Brown
2025-10-03 1:46 ` Byungchul Park
2025-10-03 11:33 ` Mark Brown
2025-10-13 1:51 ` Byungchul Park
2025-10-03 14:36 ` Mark Rutland
2025-10-13 4:28 ` Byungchul Park
2025-10-02 8:12 ` [PATCH v17 10/47] dept: distinguish each work from another Byungchul Park
2025-10-02 8:12 ` [PATCH v17 11/47] dept: add a mechanism to refill the internal memory pools on running out Byungchul Park
2025-10-02 8:12 ` [PATCH v17 12/47] dept: record the latest one out of consecutive waits of the same class Byungchul Park
2025-10-02 8:12 ` [PATCH v17 13/47] dept: apply sdt_might_sleep_{start,end}() to wait_for_completion()/complete() Byungchul Park
2025-10-02 8:12 ` [PATCH v17 14/47] dept: apply sdt_might_sleep_{start,end}() to swait Byungchul Park
2025-10-02 8:12 ` [PATCH v17 15/47] dept: apply sdt_might_sleep_{start,end}() to waitqueue wait Byungchul Park
2025-10-02 8:12 ` [PATCH v17 16/47] dept: apply sdt_might_sleep_{start,end}() to hashed-waitqueue wait Byungchul Park
2025-10-02 8:12 ` [PATCH v17 17/47] dept: apply sdt_might_sleep_{start,end}() to dma fence Byungchul Park
2025-10-02 8:12 ` [PATCH v17 18/47] dept: track timeout waits separately with a new Kconfig Byungchul Park
2025-10-02 8:12 ` [PATCH v17 19/47] dept: apply timeout consideration to wait_for_completion()/complete() Byungchul Park
2025-10-02 8:12 ` [PATCH v17 20/47] dept: apply timeout consideration to swait Byungchul Park
2025-10-02 8:12 ` [PATCH v17 21/47] dept: apply timeout consideration to waitqueue wait Byungchul Park
2025-10-02 8:12 ` [PATCH v17 22/47] dept: apply timeout consideration to hashed-waitqueue wait Byungchul Park
2025-10-02 8:12 ` [PATCH v17 23/47] dept: apply timeout consideration to dma fence wait Byungchul Park
2025-10-02 8:12 ` [PATCH v17 24/47] dept: make dept able to work with an external wgen Byungchul Park
2025-10-02 8:12 ` [PATCH v17 25/47] dept: track PG_locked with dept Byungchul Park
2025-10-02 8:12 ` [PATCH v17 26/47] dept: print staged wait's stacktrace on report Byungchul Park
2025-10-02 8:12 ` [PATCH v17 27/47] locking/lockdep: prevent various lockdep assertions when lockdep_off()'ed Byungchul Park
2025-10-02 8:12 ` [PATCH v17 28/47] dept: add documentation for dept Byungchul Park
2025-10-03 2:44 ` Bagas Sanjaya
2025-10-13 1:28 ` Byungchul Park
2025-10-03 5:36 ` Jonathan Corbet
2025-10-13 1:03 ` Byungchul Park
2025-10-03 6:55 ` NeilBrown
2025-10-13 5:23 ` Byungchul Park
2025-10-14 6:03 ` NeilBrown
2025-10-14 6:38 ` Byungchul Park
2025-10-02 8:12 ` [PATCH v17 29/47] cpu/hotplug: use a weaker annotation in AP thread Byungchul Park
2025-10-02 8:12 ` [PATCH v17 30/47] fs/jbd2: use a weaker annotation in journal handling Byungchul Park
2025-10-02 8:40 ` Jan Kara
2025-10-03 1:13 ` Byungchul Park
2025-10-02 8:12 ` [PATCH v17 31/47] dept: assign dept map to mmu notifier invalidation synchronization Byungchul Park
2025-10-02 8:12 ` [PATCH v17 32/47] dept: assign unique dept_key to each distinct dma fence caller Byungchul Park
2025-10-02 8:12 ` [PATCH v17 33/47] dept: make dept aware of lockdep_set_lock_cmp_fn() annotation Byungchul Park
2025-10-02 8:12 ` [PATCH v17 34/47] dept: make dept stop from working on debug_locks_off() Byungchul Park
2025-10-02 8:12 ` [PATCH v17 35/47] i2c: rename wait_for_completion callback to wait_for_completion_cb Byungchul Park
2025-10-04 16:39 ` Wolfram Sang
2025-10-13 5:27 ` Byungchul Park
2025-10-02 8:12 ` [PATCH v17 36/47] dept: assign unique dept_key to each distinct wait_for_completion() caller Byungchul Park
2025-10-02 8:12 ` [PATCH v17 37/47] completion, dept: introduce init_completion_dmap() API Byungchul Park
2025-10-02 8:12 ` [PATCH v17 38/47] dept: introduce a new type of dependency tracking between multi event sites Byungchul Park
2025-10-02 8:12 ` [PATCH v17 39/47] dept: add module support for struct dept_event_site and dept_event_site_dep Byungchul Park
2025-10-02 8:12 ` [PATCH v17 40/47] dept: introduce event_site() to disable event tracking if it's recoverable Byungchul Park
2025-10-02 8:12 ` [PATCH v17 41/47] dept: implement a basic unit test for dept Byungchul Park
2025-10-02 8:12 ` [PATCH v17 42/47] dept: call dept_hardirqs_off() in local_irq_*() regardless of irq state Byungchul Park
2025-10-02 8:12 ` [PATCH v17 43/47] rcu/update: fix same dept key collision between various types of RCU Byungchul Park
2025-10-02 8:12 ` [PATCH v17 44/47] dept: introduce APIs to set page usage and use subclasses_evt for the usage Byungchul Park
2025-11-19 10:53 ` Byungchul Park
2025-11-19 14:37 ` Matthew Wilcox
2025-11-20 2:09 ` Byungchul Park
2025-11-20 2:34 ` Byungchul Park
2025-11-20 5:14 ` Byungchul Park
2025-12-01 7:18 ` Byungchul Park
2025-10-02 8:12 ` [PATCH v17 45/47] dept: track PG_writeback with dept Byungchul Park
2025-10-02 8:12 ` [PATCH v17 46/47] SUNRPC: relocate struct rcu_head to the first field of struct rpc_xprt Byungchul Park
2025-10-02 8:12 ` [PATCH v17 47/47] mm: percpu: increase PERCPU_DYNAMIC_SIZE_SHIFT on DEPT and large PAGE_SIZE Byungchul Park
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20251002081247.51255-6-byungchul@sk.com \
--to=byungchul@sk.com \
--cc=Dai.Ngo@oracle.com \
--cc=Liam.Howlett@oracle.com \
--cc=ada.coupriediaz@arm.com \
--cc=adilger.kernel@dilger.ca \
--cc=akpm@linux-foundation.org \
--cc=alexander.shishkin@linux.intel.com \
--cc=amir73il@gmail.com \
--cc=andi.shyti@kernel.org \
--cc=andrii@kernel.org \
--cc=anna@kernel.org \
--cc=arnd@arndb.de \
--cc=ast@kernel.org \
--cc=baolin.wang@linux.alibaba.com \
--cc=bigeasy@linutronix.de \
--cc=boqun.feng@gmail.com \
--cc=bp@alien8.de \
--cc=brauner@kernel.org \
--cc=broonie@kernel.org \
--cc=bsegall@google.com \
--cc=catalin.marinas@arm.com \
--cc=chenhuacai@kernel.org \
--cc=chris.p.wilson@intel.com \
--cc=christian.koenig@amd.com \
--cc=chuck.lever@oracle.com \
--cc=cl@linux.com \
--cc=clrkwllms@kernel.org \
--cc=corbet@lwn.net \
--cc=da.gomez@kernel.org \
--cc=damien.lemoal@opensource.wdc.com \
--cc=dan.j.williams@intel.com \
--cc=daniel.vetter@ffwll.ch \
--cc=dave.hansen@linux.intel.com \
--cc=david@fromorbit.com \
--cc=dennis@kernel.org \
--cc=dietmar.eggemann@arm.com \
--cc=djwong@kernel.org \
--cc=dri-devel@lists.freedesktop.org \
--cc=duyuyang@gmail.com \
--cc=dwmw@amazon.co.uk \
--cc=francesco@valla.it \
--cc=frederic@kernel.org \
--cc=geert+renesas@glider.be \
--cc=gregkh@linuxfoundation.org \
--cc=guoweikang.kernel@gmail.com \
--cc=gustavo@padovan.org \
--cc=gwan-gyeong.mun@intel.com \
--cc=hamohammed.sa@gmail.com \
--cc=hannes@cmpxchg.org \
--cc=harry.yoo@oracle.com \
--cc=hch@infradead.org \
--cc=her0gyugyu@gmail.com \
--cc=hpa@zytor.com \
--cc=jack@suse.cz \
--cc=jglisse@redhat.com \
--cc=jiangshanlai@gmail.com \
--cc=jlayton@kernel.org \
--cc=joel.granados@kernel.org \
--cc=joel@joelfernandes.org \
--cc=joelagnelf@nvidia.com \
--cc=johannes.berg@intel.com \
--cc=josef@toxicpanda.com \
--cc=josh@joshtriplett.org \
--cc=jpoimboe@kernel.org \
--cc=juri.lelli@redhat.com \
--cc=kees@kernel.org \
--cc=kernel-team@lge.com \
--cc=kernel_team@skhynix.com \
--cc=kevin.brodsky@arm.com \
--cc=kristina.martsenko@arm.com \
--cc=lillian@star-ark.net \
--cc=linaro-mm-sig@lists.linaro.org \
--cc=link@vivo.com \
--cc=linux-arch@vger.kernel.org \
--cc=linux-arm-kernel@lists.infradead.org \
--cc=linux-block@vger.kernel.org \
--cc=linux-doc@vger.kernel.org \
--cc=linux-ext4@vger.kernel.org \
--cc=linux-fsdevel@vger.kernel.org \
--cc=linux-i2c@vger.kernel.org \
--cc=linux-ide@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-media@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=linux-modules@vger.kernel.org \
--cc=linux-nfs@vger.kernel.org \
--cc=linux-rt-devel@lists.linux.dev \
--cc=linux@treblig.org \
--cc=longman@redhat.com \
--cc=lorenzo.stoakes@oracle.com \
--cc=luto@kernel.org \
--cc=mark.rutland@arm.com \
--cc=masahiroy@kernel.org \
--cc=mathieu.desnoyers@efficios.com \
--cc=matthew.brost@intel.com \
--cc=max.byungchul.park@gmail.com \
--cc=mcgrof@kernel.org \
--cc=melissa.srw@gmail.com \
--cc=mgorman@suse.de \
--cc=mhocko@kernel.org \
--cc=minchan@kernel.org \
--cc=mingo@redhat.com \
--cc=mjguzik@gmail.com \
--cc=neeraj.upadhyay@kernel.org \
--cc=neil@brown.name \
--cc=netdev@vger.kernel.org \
--cc=ngupta@vflare.org \
--cc=okorniev@redhat.com \
--cc=oleg@redhat.com \
--cc=paulmck@kernel.org \
--cc=penberg@kernel.org \
--cc=peterz@infradead.org \
--cc=petr.pavlu@suse.com \
--cc=qiang.zhang@linux.dev \
--cc=rcu@vger.kernel.org \
--cc=richard.weiyang@gmail.com \
--cc=rientjes@google.com \
--cc=rodrigosiqueiramelo@gmail.com \
--cc=rostedt@goodmis.org \
--cc=rppt@kernel.org \
--cc=samitolvanen@google.com \
--cc=sashal@kernel.org \
--cc=shakeel.butt@linux.dev \
--cc=sj@kernel.org \
--cc=sumit.semwal@linaro.org \
--cc=surenb@google.com \
--cc=tglx@linutronix.de \
--cc=thomas.weissschuh@linutronix.de \
--cc=tim.c.chen@linux.intel.com \
--cc=tj@kernel.org \
--cc=tom@talpey.com \
--cc=torvalds@linux-foundation.org \
--cc=trondmy@kernel.org \
--cc=tytso@mit.edu \
--cc=urezki@gmail.com \
--cc=usamaarif642@gmail.com \
--cc=vbabka@suse.cz \
--cc=vdavydov.dev@gmail.com \
--cc=vincent.guittot@linaro.org \
--cc=vschneid@redhat.com \
--cc=wangfushuai@baidu.com \
--cc=wangkefeng.wang@huawei.com \
--cc=will@kernel.org \
--cc=willy@infradead.org \
--cc=x86@kernel.org \
--cc=yeoreum.yun@arm.com \
--cc=ysk@kzalloc.com \
--cc=yunseong.kim@ericsson.com \
--cc=yuzhao@google.com \
--cc=ziy@nvidia.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox