linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
From: "Aiqun Yu (Maria)" <quic_aiquny@quicinc.com>
To: Matthew Wilcox <willy@infradead.org>,
	"Eric W. Biederman" <ebiederm@xmission.com>,
	Hillf Danton <hdanton@sina.com>
Cc: <kernel@quicinc.com>, <quic_pkondeti@quicinc.com>,
	<keescook@chromium.or>, <viro@zeniv.linux.org.uk>,
	<brauner@kernel.org>, <oleg@redhat.com>, <dhowells@redhat.com>,
	<jarkko@kernel.org>, <paul@paul-moore.com>, <jmorris@namei.org>,
	<serge@hallyn.com>, <linux-mm@kvack.org>,
	<linux-fsdevel@vger.kernel.org>, <linux-kernel@vger.kernel.org>,
	<keyrings@vger.kernel.org>,
	<linux-security-module@vger.kernel.org>,
	<linux-arm-msm@vger.kernel.org>
Subject: Re: [PATCH] kernel: Introduce a write lock/unlock wrapper for tasklist_lock
Date: Tue, 2 Jan 2024 10:19:47 +0800	[thread overview]
Message-ID: <cd0f6613-9aa9-4698-bebe-0f61286d7552@quicinc.com> (raw)
In-Reply-To: <ZY30k7OCtxrdR9oP@casper.infradead.org>



On 12/29/2023 6:20 AM, Matthew Wilcox wrote:
> On Wed, Dec 13, 2023 at 12:27:05PM -0600, Eric W. Biederman wrote:
>> Matthew Wilcox <willy@infradead.org> writes:
>>> I think the right way to fix this is to pass a boolean flag to
>>> queued_write_lock_slowpath() to let it know whether it can re-enable
>>> interrupts while checking whether _QW_WAITING is set.
>>
>> Yes.  It seems to make sense to distinguish between write_lock_irq and
>> write_lock_irqsave and fix this for all of write_lock_irq.
> 
> I wasn't planning on doing anything here, but Hillf kind of pushed me into
> it.  I think it needs to be something like this.  Compile tested only.
> If it ends up getting used,
Happy new year!
Thx Metthew for chiming into this. I think more thoughts will gain more 
perfect designs.
> 
> Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
> 
> diff --git a/include/asm-generic/qrwlock.h b/include/asm-generic/qrwlock.h
> index 75b8f4601b28..1152e080c719 100644
> --- a/include/asm-generic/qrwlock.h
> +++ b/include/asm-generic/qrwlock.h
> @@ -33,8 +33,8 @@
>   /*
>    * External function declarations
>    */
> -extern void queued_read_lock_slowpath(struct qrwlock *lock);
> -extern void queued_write_lock_slowpath(struct qrwlock *lock);
> +void queued_read_lock_slowpath(struct qrwlock *lock);
> +void queued_write_lock_slowpath(struct qrwlock *lock, bool irq);
>   
>   /**
>    * queued_read_trylock - try to acquire read lock of a queued rwlock
> @@ -98,7 +98,21 @@ static inline void queued_write_lock(struct qrwlock *lock)
>   	if (likely(atomic_try_cmpxchg_acquire(&lock->cnts, &cnts, _QW_LOCKED)))
>   		return;
>   
> -	queued_write_lock_slowpath(lock);
> +	queued_write_lock_slowpath(lock, false);
> +}
> +
> +/**
> + * queued_write_lock_irq - acquire write lock of a queued rwlock
> + * @lock : Pointer to queued rwlock structure
> + */
> +static inline void queued_write_lock_irq(struct qrwlock *lock)
> +{
> +	int cnts = 0;
> +	/* Optimize for the unfair lock case where the fair flag is 0. */
> +	if (likely(atomic_try_cmpxchg_acquire(&lock->cnts, &cnts, _QW_LOCKED)))
> +		return;
> +
> +	queued_write_lock_slowpath(lock, true);
>   }
>   
>   /**
> @@ -138,6 +152,7 @@ static inline int queued_rwlock_is_contended(struct qrwlock *lock)
>    */
>   #define arch_read_lock(l)		queued_read_lock(l)
>   #define arch_write_lock(l)		queued_write_lock(l)
> +#define arch_write_lock_irq(l)		queued_write_lock_irq(l)
>   #define arch_read_trylock(l)		queued_read_trylock(l)
>   #define arch_write_trylock(l)		queued_write_trylock(l)
>   #define arch_read_unlock(l)		queued_read_unlock(l)
> diff --git a/include/linux/rwlock.h b/include/linux/rwlock.h
> index c0ef596f340b..897010b6ba0a 100644
> --- a/include/linux/rwlock.h
> +++ b/include/linux/rwlock.h
> @@ -33,6 +33,7 @@ do {								\
>    extern int do_raw_read_trylock(rwlock_t *lock);
>    extern void do_raw_read_unlock(rwlock_t *lock) __releases(lock);
>    extern void do_raw_write_lock(rwlock_t *lock) __acquires(lock);
> + extern void do_raw_write_lock_irq(rwlock_t *lock) __acquires(lock);
>    extern int do_raw_write_trylock(rwlock_t *lock);
>    extern void do_raw_write_unlock(rwlock_t *lock) __releases(lock);
>   #else
> @@ -40,6 +41,7 @@ do {								\
>   # define do_raw_read_trylock(rwlock)	arch_read_trylock(&(rwlock)->raw_lock)
>   # define do_raw_read_unlock(rwlock)	do {arch_read_unlock(&(rwlock)->raw_lock); __release(lock); } while (0)
>   # define do_raw_write_lock(rwlock)	do {__acquire(lock); arch_write_lock(&(rwlock)->raw_lock); } while (0)
> +# define do_raw_write_lock_irq(rwlock)	do {__acquire(lock); arch_write_lock_irq(&(rwlock)->raw_lock); } while (0)
>   # define do_raw_write_trylock(rwlock)	arch_write_trylock(&(rwlock)->raw_lock)
>   # define do_raw_write_unlock(rwlock)	do {arch_write_unlock(&(rwlock)->raw_lock); __release(lock); } while (0)
>   #endif
> diff --git a/include/linux/rwlock_api_smp.h b/include/linux/rwlock_api_smp.h
> index dceb0a59b692..6257976dfb72 100644
> --- a/include/linux/rwlock_api_smp.h
> +++ b/include/linux/rwlock_api_smp.h
> @@ -193,7 +193,7 @@ static inline void __raw_write_lock_irq(rwlock_t *lock)
>   	local_irq_disable();
>   	preempt_disable();
>   	rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
> -	LOCK_CONTENDED(lock, do_raw_write_trylock, do_raw_write_lock);
> +	LOCK_CONTENDED(lock, do_raw_write_trylock, do_raw_write_lock_irq);
>   }
>   
>   static inline void __raw_write_lock_bh(rwlock_t *lock)
> diff --git a/kernel/locking/qrwlock.c b/kernel/locking/qrwlock.c
> index d2ef312a8611..6c644a71b01d 100644
> --- a/kernel/locking/qrwlock.c
> +++ b/kernel/locking/qrwlock.c
> @@ -61,9 +61,10 @@ EXPORT_SYMBOL(queued_read_lock_slowpath);
>   
>   /**
>    * queued_write_lock_slowpath - acquire write lock of a queued rwlock
> - * @lock : Pointer to queued rwlock structure
> + * @lock: Pointer to queued rwlock structure
> + * @irq: True if we can enable interrupts while spinning
>    */
> -void __lockfunc queued_write_lock_slowpath(struct qrwlock *lock)
> +void __lockfunc queued_write_lock_slowpath(struct qrwlock *lock, bool irq)
>   {
>   	int cnts;
>   
> @@ -82,7 +83,11 @@ void __lockfunc queued_write_lock_slowpath(struct qrwlock *lock)
>   
Also a new state showed up after the current design:
1. locked flag with _QW_WAITING, while irq enabled.
2. And this state will be only in interrupt context.
3. lock->wait_lock is hold by the write waiter.
So per my understanding, a different behavior also needed to be done in 
queued_write_lock_slowpath:
   when (unlikely(in_interrupt())) , get the lock directly.
So needed to be done in release path. This is to address Hillf's concern 
on possibility of deadlock.

Add Hillf here to merge thread. I am going to have a tested patch V2 
accordingly.
Feel free to let me know your thoughts prior on that.
>   	/* When no more readers or writers, set the locked flag */
>   	do {
> +		if (irq)
> +			local_irq_enable();
I think write_lock_irqsave also needs to be take account. So 
loal_irq_save(flags) should be take into account here.
>   		cnts = atomic_cond_read_relaxed(&lock->cnts, VAL == _QW_WAITING);
> +		if (irq)
> +			local_irq_disable();
ditto.
>   	} while (!atomic_try_cmpxchg_acquire(&lock->cnts, &cnts, _QW_LOCKED));
>   unlock:
>   	arch_spin_unlock(&lock->wait_lock);
> diff --git a/kernel/locking/spinlock_debug.c b/kernel/locking/spinlock_debug.c
> index 87b03d2e41db..bf94551d7435 100644
> --- a/kernel/locking/spinlock_debug.c
> +++ b/kernel/locking/spinlock_debug.c
> @@ -212,6 +212,13 @@ void do_raw_write_lock(rwlock_t *lock)
>   	debug_write_lock_after(lock);
>   }
>   
> +void do_raw_write_lock_irq(rwlock_t *lock)
> +{
> +	debug_write_lock_before(lock);
> +	arch_write_lock_irq(&lock->raw_lock);
> +	debug_write_lock_after(lock);
> +}
> +
>   int do_raw_write_trylock(rwlock_t *lock)
>   {
>   	int ret = arch_write_trylock(&lock->raw_lock);

-- 
Thx and BRs,
Aiqun(Maria) Yu


  parent reply	other threads:[~2024-01-02  2:20 UTC|newest]

Thread overview: 15+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-12-13 10:17 Maria Yu
2023-12-13 16:22 ` Matthew Wilcox
2023-12-13 18:27   ` Eric W. Biederman
2023-12-15  5:52     ` Aiqun Yu (Maria)
2023-12-28 22:20     ` Matthew Wilcox
2023-12-29 11:35       ` kernel test robot
2024-01-02  2:19       ` Aiqun Yu (Maria) [this message]
2024-01-02  9:14         ` Matthew Wilcox
2024-01-03  2:58           ` Aiqun Yu (Maria)
2024-01-03 18:18             ` Matthew Wilcox
2024-01-04  0:46               ` Aiqun Yu (Maria)
2024-01-03  6:03       ` kernel test robot
2023-12-25  8:19 Maria Yu
2023-12-25  8:26 ` Aiqun Yu (Maria)
2024-01-03 14:04 ` Jarkko Sakkinen

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=cd0f6613-9aa9-4698-bebe-0f61286d7552@quicinc.com \
    --to=quic_aiquny@quicinc.com \
    --cc=brauner@kernel.org \
    --cc=dhowells@redhat.com \
    --cc=ebiederm@xmission.com \
    --cc=hdanton@sina.com \
    --cc=jarkko@kernel.org \
    --cc=jmorris@namei.org \
    --cc=keescook@chromium.or \
    --cc=kernel@quicinc.com \
    --cc=keyrings@vger.kernel.org \
    --cc=linux-arm-msm@vger.kernel.org \
    --cc=linux-fsdevel@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=linux-security-module@vger.kernel.org \
    --cc=oleg@redhat.com \
    --cc=paul@paul-moore.com \
    --cc=quic_pkondeti@quicinc.com \
    --cc=serge@hallyn.com \
    --cc=viro@zeniv.linux.org.uk \
    --cc=willy@infradead.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox