From: Uladzislau Rezki <urezki@gmail.com>
To: Vlastimil Babka <vbabka@suse.cz>
Cc: "Paul E. McKenney" <paulmck@kernel.org>,
Joel Fernandes <joel@joelfernandes.org>,
Josh Triplett <josh@joshtriplett.org>,
Boqun Feng <boqun.feng@gmail.com>,
Christoph Lameter <cl@linux.com>,
David Rientjes <rientjes@google.com>,
Steven Rostedt <rostedt@goodmis.org>,
Mathieu Desnoyers <mathieu.desnoyers@efficios.com>,
Lai Jiangshan <jiangshanlai@gmail.com>,
Zqiang <qiang.zhang1211@gmail.com>,
Julia Lawall <Julia.Lawall@inria.fr>,
Jakub Kicinski <kuba@kernel.org>,
"Jason A. Donenfeld" <Jason@zx2c4.com>,
"Uladzislau Rezki (Sony)" <urezki@gmail.com>,
Andrew Morton <akpm@linux-foundation.org>,
Roman Gushchin <roman.gushchin@linux.dev>,
Hyeonggon Yoo <42.hyeyoo@gmail.com>,
linux-mm@kvack.org, linux-kernel@vger.kernel.org,
rcu@vger.kernel.org, Alexander Potapenko <glider@google.com>,
Marco Elver <elver@google.com>,
Dmitry Vyukov <dvyukov@google.com>,
kasan-dev@googlegroups.com, Jann Horn <jannh@google.com>,
Mateusz Guzik <mjguzik@gmail.com>
Subject: Re: [PATCH v2 5/7] rcu/kvfree: Add kvfree_rcu_barrier() API
Date: Fri, 9 Aug 2024 18:26:36 +0200 [thread overview]
Message-ID: <ZrZDPLN9CRvRrbMy@pc636> (raw)
In-Reply-To: <20240807-b4-slab-kfree_rcu-destroy-v2-5-ea79102f428c@suse.cz>
Hello, Vlastimil!
> From: "Uladzislau Rezki (Sony)" <urezki@gmail.com>
>
> Add a kvfree_rcu_barrier() function. It waits until all
> in-flight pointers are freed over RCU machinery. It does
> not wait any GP completion and it is within its right to
> return immediately if there are no outstanding pointers.
>
> This function is useful when there is a need to guarantee
> that a memory is fully freed before destroying memory caches.
> For example, during unloading a kernel module.
>
> Signed-off-by: Uladzislau Rezki (Sony) <urezki@gmail.com>
> Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
> ---
> include/linux/rcutiny.h | 5 +++
> include/linux/rcutree.h | 1 +
> kernel/rcu/tree.c | 103 ++++++++++++++++++++++++++++++++++++++++++++----
> 3 files changed, 101 insertions(+), 8 deletions(-)
>
> diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h
> index d9ac7b136aea..522123050ff8 100644
> --- a/include/linux/rcutiny.h
> +++ b/include/linux/rcutiny.h
> @@ -111,6 +111,11 @@ static inline void __kvfree_call_rcu(struct rcu_head *head, void *ptr)
> kvfree(ptr);
> }
>
> +static inline void kvfree_rcu_barrier(void)
> +{
> + rcu_barrier();
> +}
> +
> #ifdef CONFIG_KASAN_GENERIC
> void kvfree_call_rcu(struct rcu_head *head, void *ptr);
> #else
> diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h
> index 254244202ea9..58e7db80f3a8 100644
> --- a/include/linux/rcutree.h
> +++ b/include/linux/rcutree.h
> @@ -35,6 +35,7 @@ static inline void rcu_virt_note_context_switch(void)
>
> void synchronize_rcu_expedited(void);
> void kvfree_call_rcu(struct rcu_head *head, void *ptr);
> +void kvfree_rcu_barrier(void);
>
> void rcu_barrier(void);
> void rcu_momentary_dyntick_idle(void);
> diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
> index e641cc681901..ebcfed9b570e 100644
> --- a/kernel/rcu/tree.c
> +++ b/kernel/rcu/tree.c
> @@ -3584,18 +3584,15 @@ kvfree_rcu_drain_ready(struct kfree_rcu_cpu *krcp)
> }
>
> /*
> - * This function is invoked after the KFREE_DRAIN_JIFFIES timeout.
> + * Return: %true if a work is queued, %false otherwise.
> */
> -static void kfree_rcu_monitor(struct work_struct *work)
> +static bool
> +kvfree_rcu_queue_batch(struct kfree_rcu_cpu *krcp)
> {
> - struct kfree_rcu_cpu *krcp = container_of(work,
> - struct kfree_rcu_cpu, monitor_work.work);
> unsigned long flags;
> + bool queued = false;
> int i, j;
>
> - // Drain ready for reclaim.
> - kvfree_rcu_drain_ready(krcp);
> -
> raw_spin_lock_irqsave(&krcp->lock, flags);
>
> // Attempt to start a new batch.
> @@ -3634,11 +3631,27 @@ static void kfree_rcu_monitor(struct work_struct *work)
> // be that the work is in the pending state when
> // channels have been detached following by each
> // other.
> - queue_rcu_work(system_wq, &krwp->rcu_work);
> + queued = queue_rcu_work(system_wq, &krwp->rcu_work);
> }
> }
>
> raw_spin_unlock_irqrestore(&krcp->lock, flags);
> + return queued;
> +}
> +
> +/*
> + * This function is invoked after the KFREE_DRAIN_JIFFIES timeout.
> + */
> +static void kfree_rcu_monitor(struct work_struct *work)
> +{
> + struct kfree_rcu_cpu *krcp = container_of(work,
> + struct kfree_rcu_cpu, monitor_work.work);
> +
> + // Drain ready for reclaim.
> + kvfree_rcu_drain_ready(krcp);
> +
> + // Queue a batch for a rest.
> + kvfree_rcu_queue_batch(krcp);
>
> // If there is nothing to detach, it means that our job is
> // successfully done here. In case of having at least one
> @@ -3859,6 +3872,80 @@ void kvfree_call_rcu(struct rcu_head *head, void *ptr)
> }
> EXPORT_SYMBOL_GPL(kvfree_call_rcu);
>
> +/**
> + * kvfree_rcu_barrier - Wait until all in-flight kvfree_rcu() complete.
> + *
> + * Note that a single argument of kvfree_rcu() call has a slow path that
> + * triggers synchronize_rcu() following by freeing a pointer. It is done
> + * before the return from the function. Therefore for any single-argument
> + * call that will result in a kfree() to a cache that is to be destroyed
> + * during module exit, it is developer's responsibility to ensure that all
> + * such calls have returned before the call to kmem_cache_destroy().
> + */
> +void kvfree_rcu_barrier(void)
> +{
> + struct kfree_rcu_cpu_work *krwp;
> + struct kfree_rcu_cpu *krcp;
> + bool queued;
> + int i, cpu;
> +
> + /*
> + * Firstly we detach objects and queue them over an RCU-batch
> + * for all CPUs. Finally queued works are flushed for each CPU.
> + *
> + * Please note. If there are outstanding batches for a particular
> + * CPU, those have to be finished first following by queuing a new.
> + */
> + for_each_possible_cpu(cpu) {
> + krcp = per_cpu_ptr(&krc, cpu);
> +
> + /*
> + * Check if this CPU has any objects which have been queued for a
> + * new GP completion. If not(means nothing to detach), we are done
> + * with it. If any batch is pending/running for this "krcp", below
> + * per-cpu flush_rcu_work() waits its completion(see last step).
> + */
> + if (!need_offload_krc(krcp))
> + continue;
> +
> + while (1) {
> + /*
> + * If we are not able to queue a new RCU work it means:
> + * - batches for this CPU are still in flight which should
> + * be flushed first and then repeat;
> + * - no objects to detach, because of concurrency.
> + */
> + queued = kvfree_rcu_queue_batch(krcp);
> +
> + /*
> + * Bail out, if there is no need to offload this "krcp"
> + * anymore. As noted earlier it can run concurrently.
> + */
> + if (queued || !need_offload_krc(krcp))
> + break;
> +
> + /* There are ongoing batches. */
> + for (i = 0; i < KFREE_N_BATCHES; i++) {
> + krwp = &(krcp->krw_arr[i]);
> + flush_rcu_work(&krwp->rcu_work);
> + }
> + }
> + }
> +
> + /*
> + * Now we guarantee that all objects are flushed.
> + */
> + for_each_possible_cpu(cpu) {
> + krcp = per_cpu_ptr(&krc, cpu);
> +
> + for (i = 0; i < KFREE_N_BATCHES; i++) {
> + krwp = &(krcp->krw_arr[i]);
> + flush_rcu_work(&krwp->rcu_work);
> + }
> + }
> +}
> +EXPORT_SYMBOL_GPL(kvfree_rcu_barrier);
> +
> static unsigned long
> kfree_rcu_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
> {
>
> --
> 2.46.0
>
I need to send out a v2. What is a best way? Please let me know. I have not
checked where this series already landed.
Thank you!
--
Uladzislau Rezki
next prev parent reply other threads:[~2024-08-09 16:26 UTC|newest]
Thread overview: 44+ messages / expand[flat|nested] mbox.gz Atom feed top
[not found] <20240807-b4-slab-kfree_rcu-destroy-v2-0-ea79102f428c@suse.cz>
2024-08-09 15:02 ` [-next conflict imminent] Re: [PATCH v2 0/7] mm, slub: handle pending kfree_rcu() in kmem_cache_destroy() Vlastimil Babka
2024-08-09 15:12 ` Jann Horn
2024-08-09 15:14 ` Vlastimil Babka
2024-08-10 0:11 ` Andrew Morton
2024-08-10 20:25 ` Vlastimil Babka
2024-08-10 20:30 ` Andrew Morton
[not found] ` <20240807-b4-slab-kfree_rcu-destroy-v2-5-ea79102f428c@suse.cz>
2024-08-09 16:26 ` Uladzislau Rezki [this message]
2024-08-09 17:00 ` [PATCH v2 5/7] rcu/kvfree: Add kvfree_rcu_barrier() API Vlastimil Babka
2024-08-20 16:02 ` Uladzislau Rezki
[not found] ` <20240807-b4-slab-kfree_rcu-destroy-v2-7-ea79102f428c@suse.cz>
2024-08-09 16:23 ` [PATCH v2 7/7] kunit, slub: add test_kfree_rcu() and test_leak_destroy() Uladzislau Rezki
2024-09-14 13:22 ` Hyeonggon Yoo
2024-09-14 18:39 ` Vlastimil Babka
2024-09-20 13:35 ` Guenter Roeck
2024-09-21 20:40 ` Vlastimil Babka
2024-09-21 21:08 ` Guenter Roeck
2024-09-21 21:25 ` Vlastimil Babka
2024-09-22 6:16 ` Hyeonggon Yoo
2024-09-22 14:13 ` Guenter Roeck
2024-09-25 12:56 ` Hyeonggon Yoo
2024-09-26 12:54 ` Vlastimil Babka
2024-09-30 8:47 ` Vlastimil Babka
[not found] ` <20240807-b4-slab-kfree_rcu-destroy-v2-6-ea79102f428c@suse.cz>
2025-02-21 16:30 ` [PATCH v2 6/7] mm, slab: call kvfree_rcu_barrier() from kmem_cache_destroy() Keith Busch
2025-02-21 16:51 ` Mateusz Guzik
2025-02-21 16:52 ` Mateusz Guzik
2025-02-21 17:28 ` Vlastimil Babka
2025-02-24 11:44 ` Uladzislau Rezki
2025-02-24 15:37 ` Keith Busch
2025-02-25 9:57 ` Vlastimil Babka
2025-02-25 13:39 ` Uladzislau Rezki
2025-02-25 14:12 ` Vlastimil Babka
2025-02-25 16:03 ` Keith Busch
2025-02-25 17:05 ` Keith Busch
2025-02-25 17:41 ` Uladzislau Rezki
2025-02-25 18:11 ` Vlastimil Babka
2025-02-25 18:21 ` Uladzislau Rezki
2025-02-25 18:21 ` Uladzislau Rezki
2025-02-26 10:59 ` Vlastimil Babka
2025-02-26 14:31 ` Uladzislau Rezki
2025-02-26 14:36 ` Vlastimil Babka
2025-02-26 15:42 ` Uladzislau Rezki
2025-02-26 15:46 ` Vlastimil Babka
2025-02-26 15:57 ` Uladzislau Rezki
2025-02-26 15:51 ` Keith Busch
2025-02-26 15:58 ` Uladzislau Rezki
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=ZrZDPLN9CRvRrbMy@pc636 \
--to=urezki@gmail.com \
--cc=42.hyeyoo@gmail.com \
--cc=Jason@zx2c4.com \
--cc=Julia.Lawall@inria.fr \
--cc=akpm@linux-foundation.org \
--cc=boqun.feng@gmail.com \
--cc=cl@linux.com \
--cc=dvyukov@google.com \
--cc=elver@google.com \
--cc=glider@google.com \
--cc=jannh@google.com \
--cc=jiangshanlai@gmail.com \
--cc=joel@joelfernandes.org \
--cc=josh@joshtriplett.org \
--cc=kasan-dev@googlegroups.com \
--cc=kuba@kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=mathieu.desnoyers@efficios.com \
--cc=mjguzik@gmail.com \
--cc=paulmck@kernel.org \
--cc=qiang.zhang1211@gmail.com \
--cc=rcu@vger.kernel.org \
--cc=rientjes@google.com \
--cc=roman.gushchin@linux.dev \
--cc=rostedt@goodmis.org \
--cc=vbabka@suse.cz \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox