From: Ankur Arora <ankur.a.arora@oracle.com>
To: Steven Rostedt <rostedt@goodmis.org>
Cc: Ankur Arora <ankur.a.arora@oracle.com>,
linux-kernel@vger.kernel.org, tglx@linutronix.de,
peterz@infradead.org, torvalds@linux-foundation.org,
paulmck@kernel.org, linux-mm@kvack.org, x86@kernel.org,
akpm@linux-foundation.org, luto@kernel.org, bp@alien8.de,
dave.hansen@linux.intel.com, hpa@zytor.com, mingo@redhat.com,
juri.lelli@redhat.com, vincent.guittot@linaro.org,
willy@infradead.org, mgorman@suse.de, jon.grimm@amd.com,
bharata@amd.com, raghavendra.kt@amd.com,
boris.ostrovsky@oracle.com, konrad.wilk@oracle.com,
jgross@suse.com, andrew.cooper3@citrix.com, mingo@kernel.org,
bristot@kernel.org, mathieu.desnoyers@efficios.com,
geert@linux-m68k.org, glaubitz@physik.fu-berlin.de,
anton.ivanov@cambridgegreys.com, mattst88@gmail.com,
krypton@ulrich-teichert.org, David.Laight@ACULAB.COM,
richard@nod.at, mjguzik@gmail.com,
Josh Poimboeuf <jpoimboe@kernel.org>,
Jiri Kosina <jikos@kernel.org>, Miroslav Benes <mbenes@suse.cz>,
"Petr Mladek" <pmladek@suse.com>,
Joe Lawrence <joe.lawrence@redhat.com>,
live-patching@vger.kernel.org
Subject: Re: [RFC PATCH 07/86] Revert "livepatch,sched: Add livepatch task switching to cond_resched()"
Date: Tue, 07 Nov 2023 20:55:14 -0800 [thread overview]
Message-ID: <874jhwvo7h.fsf@oracle.com> (raw)
In-Reply-To: <20231107181609.7e9e9dcc@gandalf.local.home>
Steven Rostedt <rostedt@goodmis.org> writes:
> On Tue, 7 Nov 2023 13:56:53 -0800
> Ankur Arora <ankur.a.arora@oracle.com> wrote:
>
>> This reverts commit e3ff7c609f39671d1aaff4fb4a8594e14f3e03f8.
>>
>> Note that removing this commit reintroduces "live patches failing to
>> complete within a reasonable amount of time due to CPU-bound kthreads."
>>
>> Unfortunately this fix depends quite critically on PREEMPT_DYNAMIC and
>> existence of cond_resched() so this will need an alternate fix.
>>
>
> Then it would probably be a good idea to Cc the live patching maintainers!
Indeed. Could have sworn that I had. But clearly not.
Apologies and thanks for adding them.
>> Signed-off-by: Ankur Arora <ankur.a.arora@oracle.com>
>> ---
>> include/linux/livepatch.h | 1 -
>> include/linux/livepatch_sched.h | 29 ---------
>> include/linux/sched.h | 20 ++----
>> kernel/livepatch/core.c | 1 -
>> kernel/livepatch/transition.c | 107 +++++---------------------------
>> kernel/sched/core.c | 64 +++----------------
>> 6 files changed, 28 insertions(+), 194 deletions(-)
>> delete mode 100644 include/linux/livepatch_sched.h
>>
>> diff --git a/include/linux/livepatch.h b/include/linux/livepatch.h
>> index 9b9b38e89563..293e29960c6e 100644
>> --- a/include/linux/livepatch.h
>> +++ b/include/linux/livepatch.h
>> @@ -13,7 +13,6 @@
>> #include <linux/ftrace.h>
>> #include <linux/completion.h>
>> #include <linux/list.h>
>> -#include <linux/livepatch_sched.h>
>>
>> #if IS_ENABLED(CONFIG_LIVEPATCH)
>>
>> diff --git a/include/linux/livepatch_sched.h b/include/linux/livepatch_sched.h
>> deleted file mode 100644
>> index 013794fb5da0..000000000000
>> --- a/include/linux/livepatch_sched.h
>> +++ /dev/null
>> @@ -1,29 +0,0 @@
>> -/* SPDX-License-Identifier: GPL-2.0-or-later */
>> -#ifndef _LINUX_LIVEPATCH_SCHED_H_
>> -#define _LINUX_LIVEPATCH_SCHED_H_
>> -
>> -#include <linux/jump_label.h>
>> -#include <linux/static_call_types.h>
>> -
>> -#ifdef CONFIG_LIVEPATCH
>> -
>> -void __klp_sched_try_switch(void);
>> -
>> -#if !defined(CONFIG_PREEMPT_DYNAMIC) || !defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
>> -
>> -DECLARE_STATIC_KEY_FALSE(klp_sched_try_switch_key);
>> -
>> -static __always_inline void klp_sched_try_switch(void)
>> -{
>> - if (static_branch_unlikely(&klp_sched_try_switch_key))
>> - __klp_sched_try_switch();
>> -}
>> -
>> -#endif /* !CONFIG_PREEMPT_DYNAMIC || !CONFIG_HAVE_PREEMPT_DYNAMIC_CALL */
>> -
>> -#else /* !CONFIG_LIVEPATCH */
>> -static inline void klp_sched_try_switch(void) {}
>> -static inline void __klp_sched_try_switch(void) {}
>> -#endif /* CONFIG_LIVEPATCH */
>> -
>> -#endif /* _LINUX_LIVEPATCH_SCHED_H_ */
>> diff --git a/include/linux/sched.h b/include/linux/sched.h
>> index 5bdf80136e42..c5b0ef1ecfe4 100644
>> --- a/include/linux/sched.h
>> +++ b/include/linux/sched.h
>> @@ -36,7 +36,6 @@
>> #include <linux/seqlock.h>
>> #include <linux/kcsan.h>
>> #include <linux/rv.h>
>> -#include <linux/livepatch_sched.h>
>> #include <asm/kmap_size.h>
>>
>> /* task_struct member predeclarations (sorted alphabetically): */
>> @@ -2087,9 +2086,6 @@ extern int __cond_resched(void);
>>
>> #if defined(CONFIG_PREEMPT_DYNAMIC) && defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
>>
>> -void sched_dynamic_klp_enable(void);
>> -void sched_dynamic_klp_disable(void);
>> -
>> DECLARE_STATIC_CALL(cond_resched, __cond_resched);
>>
>> static __always_inline int _cond_resched(void)
>> @@ -2098,7 +2094,6 @@ static __always_inline int _cond_resched(void)
>> }
>>
>> #elif defined(CONFIG_PREEMPT_DYNAMIC) && defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
>> -
>> extern int dynamic_cond_resched(void);
>>
>> static __always_inline int _cond_resched(void)
>> @@ -2106,25 +2101,20 @@ static __always_inline int _cond_resched(void)
>> return dynamic_cond_resched();
>> }
>>
>> -#else /* !CONFIG_PREEMPTION */
>> +#else
>>
>> static inline int _cond_resched(void)
>> {
>> - klp_sched_try_switch();
>> return __cond_resched();
>> }
>>
>> -#endif /* PREEMPT_DYNAMIC && CONFIG_HAVE_PREEMPT_DYNAMIC_CALL */
>> +#endif /* CONFIG_PREEMPT_DYNAMIC */
>>
>> -#else /* CONFIG_PREEMPTION && !CONFIG_PREEMPT_DYNAMIC */
>> +#else
>>
>> -static inline int _cond_resched(void)
>> -{
>> - klp_sched_try_switch();
>> - return 0;
>> -}
>> +static inline int _cond_resched(void) { return 0; }
>>
>> -#endif /* !CONFIG_PREEMPTION || CONFIG_PREEMPT_DYNAMIC */
>> +#endif /* !defined(CONFIG_PREEMPTION) || defined(CONFIG_PREEMPT_DYNAMIC) */
>>
>> #define cond_resched() ({ \
>> __might_resched(__FILE__, __LINE__, 0); \
>> diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c
>> index 61328328c474..fc851455740c 100644
>> --- a/kernel/livepatch/core.c
>> +++ b/kernel/livepatch/core.c
>> @@ -33,7 +33,6 @@
>> *
>> * - klp_ftrace_handler()
>> * - klp_update_patch_state()
>> - * - __klp_sched_try_switch()
>> */
>> DEFINE_MUTEX(klp_mutex);
>>
>> diff --git a/kernel/livepatch/transition.c b/kernel/livepatch/transition.c
>> index e54c3d60a904..70bc38f27af7 100644
>> --- a/kernel/livepatch/transition.c
>> +++ b/kernel/livepatch/transition.c
>> @@ -9,7 +9,6 @@
>>
>> #include <linux/cpu.h>
>> #include <linux/stacktrace.h>
>> -#include <linux/static_call.h>
>> #include "core.h"
>> #include "patch.h"
>> #include "transition.h"
>> @@ -27,25 +26,6 @@ static int klp_target_state = KLP_UNDEFINED;
>>
>> static unsigned int klp_signals_cnt;
>>
>> -/*
>> - * When a livepatch is in progress, enable klp stack checking in
>> - * cond_resched(). This helps CPU-bound kthreads get patched.
>> - */
>> -#if defined(CONFIG_PREEMPT_DYNAMIC) && defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
>> -
>> -#define klp_cond_resched_enable() sched_dynamic_klp_enable()
>> -#define klp_cond_resched_disable() sched_dynamic_klp_disable()
>> -
>> -#else /* !CONFIG_PREEMPT_DYNAMIC || !CONFIG_HAVE_PREEMPT_DYNAMIC_CALL */
>> -
>> -DEFINE_STATIC_KEY_FALSE(klp_sched_try_switch_key);
>> -EXPORT_SYMBOL(klp_sched_try_switch_key);
>> -
>> -#define klp_cond_resched_enable() static_branch_enable(&klp_sched_try_switch_key)
>> -#define klp_cond_resched_disable() static_branch_disable(&klp_sched_try_switch_key)
>> -
>> -#endif /* CONFIG_PREEMPT_DYNAMIC && CONFIG_HAVE_PREEMPT_DYNAMIC_CALL */
>> -
>> /*
>> * This work can be performed periodically to finish patching or unpatching any
>> * "straggler" tasks which failed to transition in the first attempt.
>> @@ -194,8 +174,8 @@ void klp_update_patch_state(struct task_struct *task)
>> * barrier (smp_rmb) for two cases:
>> *
>> * 1) Enforce the order of the TIF_PATCH_PENDING read and the
>> - * klp_target_state read. The corresponding write barriers are in
>> - * klp_init_transition() and klp_reverse_transition().
>> + * klp_target_state read. The corresponding write barrier is in
>> + * klp_init_transition().
>> *
>> * 2) Enforce the order of the TIF_PATCH_PENDING read and a future read
>> * of func->transition, if klp_ftrace_handler() is called later on
>> @@ -363,44 +343,6 @@ static bool klp_try_switch_task(struct task_struct *task)
>> return !ret;
>> }
>>
>> -void __klp_sched_try_switch(void)
>> -{
>> - if (likely(!klp_patch_pending(current)))
>> - return;
>> -
>> - /*
>> - * This function is called from cond_resched() which is called in many
>> - * places throughout the kernel. Using the klp_mutex here might
>> - * deadlock.
>> - *
>> - * Instead, disable preemption to prevent racing with other callers of
>> - * klp_try_switch_task(). Thanks to task_call_func() they won't be
>> - * able to switch this task while it's running.
>> - */
>> - preempt_disable();
>> -
>> - /*
>> - * Make sure current didn't get patched between the above check and
>> - * preempt_disable().
>> - */
>> - if (unlikely(!klp_patch_pending(current)))
>> - goto out;
>> -
>> - /*
>> - * Enforce the order of the TIF_PATCH_PENDING read above and the
>> - * klp_target_state read in klp_try_switch_task(). The corresponding
>> - * write barriers are in klp_init_transition() and
>> - * klp_reverse_transition().
>> - */
>> - smp_rmb();
>> -
>> - klp_try_switch_task(current);
>> -
>> -out:
>> - preempt_enable();
>> -}
>> -EXPORT_SYMBOL(__klp_sched_try_switch);
>> -
>> /*
>> * Sends a fake signal to all non-kthread tasks with TIF_PATCH_PENDING set.
>> * Kthreads with TIF_PATCH_PENDING set are woken up.
>> @@ -507,8 +449,7 @@ void klp_try_complete_transition(void)
>> return;
>> }
>>
>> - /* Done! Now cleanup the data structures. */
>> - klp_cond_resched_disable();
>> + /* we're done, now cleanup the data structures */
>> patch = klp_transition_patch;
>> klp_complete_transition();
>>
>> @@ -560,8 +501,6 @@ void klp_start_transition(void)
>> set_tsk_thread_flag(task, TIF_PATCH_PENDING);
>> }
>>
>> - klp_cond_resched_enable();
>> -
>> klp_signals_cnt = 0;
>> }
>>
>> @@ -617,9 +556,8 @@ void klp_init_transition(struct klp_patch *patch, int state)
>> * see a func in transition with a task->patch_state of KLP_UNDEFINED.
>> *
>> * Also enforce the order of the klp_target_state write and future
>> - * TIF_PATCH_PENDING writes to ensure klp_update_patch_state() and
>> - * __klp_sched_try_switch() don't set a task->patch_state to
>> - * KLP_UNDEFINED.
>> + * TIF_PATCH_PENDING writes to ensure klp_update_patch_state() doesn't
>> + * set a task->patch_state to KLP_UNDEFINED.
>> */
>> smp_wmb();
>>
>> @@ -655,10 +593,14 @@ void klp_reverse_transition(void)
>> klp_target_state == KLP_PATCHED ? "patching to unpatching" :
>> "unpatching to patching");
>>
>> + klp_transition_patch->enabled = !klp_transition_patch->enabled;
>> +
>> + klp_target_state = !klp_target_state;
>> +
>> /*
>> * Clear all TIF_PATCH_PENDING flags to prevent races caused by
>> - * klp_update_patch_state() or __klp_sched_try_switch() running in
>> - * parallel with the reverse transition.
>> + * klp_update_patch_state() running in parallel with
>> + * klp_start_transition().
>> */
>> read_lock(&tasklist_lock);
>> for_each_process_thread(g, task)
>> @@ -668,28 +610,9 @@ void klp_reverse_transition(void)
>> for_each_possible_cpu(cpu)
>> clear_tsk_thread_flag(idle_task(cpu), TIF_PATCH_PENDING);
>>
>> - /*
>> - * Make sure all existing invocations of klp_update_patch_state() and
>> - * __klp_sched_try_switch() see the cleared TIF_PATCH_PENDING before
>> - * starting the reverse transition.
>> - */
>> + /* Let any remaining calls to klp_update_patch_state() complete */
>> klp_synchronize_transition();
>>
>> - /*
>> - * All patching has stopped, now re-initialize the global variables to
>> - * prepare for the reverse transition.
>> - */
>> - klp_transition_patch->enabled = !klp_transition_patch->enabled;
>> - klp_target_state = !klp_target_state;
>> -
>> - /*
>> - * Enforce the order of the klp_target_state write and the
>> - * TIF_PATCH_PENDING writes in klp_start_transition() to ensure
>> - * klp_update_patch_state() and __klp_sched_try_switch() don't set
>> - * task->patch_state to the wrong value.
>> - */
>> - smp_wmb();
>> -
>> klp_start_transition();
>> }
>>
>> @@ -703,9 +626,9 @@ void klp_copy_process(struct task_struct *child)
>> * the task flag up to date with the parent here.
>> *
>> * The operation is serialized against all klp_*_transition()
>> - * operations by the tasklist_lock. The only exceptions are
>> - * klp_update_patch_state(current) and __klp_sched_try_switch(), but we
>> - * cannot race with them because we are current.
>> + * operations by the tasklist_lock. The only exception is
>> + * klp_update_patch_state(current), but we cannot race with
>> + * that because we are current.
>> */
>> if (test_tsk_thread_flag(current, TIF_PATCH_PENDING))
>> set_tsk_thread_flag(child, TIF_PATCH_PENDING);
>> diff --git a/kernel/sched/core.c b/kernel/sched/core.c
>> index 0e8764d63041..b43fda3c5733 100644
>> --- a/kernel/sched/core.c
>> +++ b/kernel/sched/core.c
>> @@ -8597,7 +8597,6 @@ EXPORT_STATIC_CALL_TRAMP(might_resched);
>> static DEFINE_STATIC_KEY_FALSE(sk_dynamic_cond_resched);
>> int __sched dynamic_cond_resched(void)
>> {
>> - klp_sched_try_switch();
>> if (!static_branch_unlikely(&sk_dynamic_cond_resched))
>> return 0;
>> return __cond_resched();
>> @@ -8746,17 +8745,13 @@ int sched_dynamic_mode(const char *str)
>> #error "Unsupported PREEMPT_DYNAMIC mechanism"
>> #endif
>>
>> -DEFINE_MUTEX(sched_dynamic_mutex);
>> -static bool klp_override;
>> -
>> -static void __sched_dynamic_update(int mode)
>> +void sched_dynamic_update(int mode)
>> {
>> /*
>> * Avoid {NONE,VOLUNTARY} -> FULL transitions from ever ending up in
>> * the ZERO state, which is invalid.
>> */
>> - if (!klp_override)
>> - preempt_dynamic_enable(cond_resched);
>> + preempt_dynamic_enable(cond_resched);
>> preempt_dynamic_enable(might_resched);
>> preempt_dynamic_enable(preempt_schedule);
>> preempt_dynamic_enable(preempt_schedule_notrace);
>> @@ -8764,79 +8759,36 @@ static void __sched_dynamic_update(int mode)
>>
>> switch (mode) {
>> case preempt_dynamic_none:
>> - if (!klp_override)
>> - preempt_dynamic_enable(cond_resched);
>> + preempt_dynamic_enable(cond_resched);
>> preempt_dynamic_disable(might_resched);
>> preempt_dynamic_disable(preempt_schedule);
>> preempt_dynamic_disable(preempt_schedule_notrace);
>> preempt_dynamic_disable(irqentry_exit_cond_resched);
>> - if (mode != preempt_dynamic_mode)
>> - pr_info("Dynamic Preempt: none\n");
>> + pr_info("Dynamic Preempt: none\n");
>> break;
>>
>> case preempt_dynamic_voluntary:
>> - if (!klp_override)
>> - preempt_dynamic_enable(cond_resched);
>> + preempt_dynamic_enable(cond_resched);
>> preempt_dynamic_enable(might_resched);
>> preempt_dynamic_disable(preempt_schedule);
>> preempt_dynamic_disable(preempt_schedule_notrace);
>> preempt_dynamic_disable(irqentry_exit_cond_resched);
>> - if (mode != preempt_dynamic_mode)
>> - pr_info("Dynamic Preempt: voluntary\n");
>> + pr_info("Dynamic Preempt: voluntary\n");
>> break;
>>
>> case preempt_dynamic_full:
>> - if (!klp_override)
>> - preempt_dynamic_disable(cond_resched);
>> + preempt_dynamic_disable(cond_resched);
>> preempt_dynamic_disable(might_resched);
>> preempt_dynamic_enable(preempt_schedule);
>> preempt_dynamic_enable(preempt_schedule_notrace);
>> preempt_dynamic_enable(irqentry_exit_cond_resched);
>> - if (mode != preempt_dynamic_mode)
>> - pr_info("Dynamic Preempt: full\n");
>> + pr_info("Dynamic Preempt: full\n");
>> break;
>> }
>>
>> preempt_dynamic_mode = mode;
>> }
>>
>> -void sched_dynamic_update(int mode)
>> -{
>> - mutex_lock(&sched_dynamic_mutex);
>> - __sched_dynamic_update(mode);
>> - mutex_unlock(&sched_dynamic_mutex);
>> -}
>> -
>> -#ifdef CONFIG_HAVE_PREEMPT_DYNAMIC_CALL
>> -
>> -static int klp_cond_resched(void)
>> -{
>> - __klp_sched_try_switch();
>> - return __cond_resched();
>> -}
>> -
>> -void sched_dynamic_klp_enable(void)
>> -{
>> - mutex_lock(&sched_dynamic_mutex);
>> -
>> - klp_override = true;
>> - static_call_update(cond_resched, klp_cond_resched);
>> -
>> - mutex_unlock(&sched_dynamic_mutex);
>> -}
>> -
>> -void sched_dynamic_klp_disable(void)
>> -{
>> - mutex_lock(&sched_dynamic_mutex);
>> -
>> - klp_override = false;
>> - __sched_dynamic_update(preempt_dynamic_mode);
>> -
>> - mutex_unlock(&sched_dynamic_mutex);
>> -}
>> -
>> -#endif /* CONFIG_HAVE_PREEMPT_DYNAMIC_CALL */
>> -
>> static int __init setup_preempt_mode(char *str)
>> {
>> int mode = sched_dynamic_mode(str);
--
ankur
next prev parent reply other threads:[~2023-11-08 4:56 UTC|newest]
Thread overview: 250+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-11-07 21:56 [RFC PATCH 00/86] Make the kernel preemptible Ankur Arora
2023-11-07 21:56 ` [RFC PATCH 01/86] Revert "riscv: support PREEMPT_DYNAMIC with static keys" Ankur Arora
2023-11-07 21:56 ` [RFC PATCH 02/86] Revert "sched/core: Make sched_dynamic_mutex static" Ankur Arora
2023-11-07 23:04 ` Steven Rostedt
2023-11-07 21:56 ` [RFC PATCH 03/86] Revert "ftrace: Use preemption model accessors for trace header printout" Ankur Arora
2023-11-07 23:10 ` Steven Rostedt
2023-11-07 23:23 ` Ankur Arora
2023-11-07 23:31 ` Steven Rostedt
2023-11-07 23:34 ` Steven Rostedt
2023-11-08 0:12 ` Ankur Arora
2023-11-07 21:56 ` [RFC PATCH 04/86] Revert "preempt/dynamic: Introduce preemption model accessors" Ankur Arora
2023-11-07 23:12 ` Steven Rostedt
2023-11-08 4:59 ` Ankur Arora
2023-11-07 21:56 ` [RFC PATCH 05/86] Revert "kcsan: Use " Ankur Arora
2023-11-07 21:56 ` [RFC PATCH 06/86] Revert "entry: Fix compile error in dynamic_irqentry_exit_cond_resched()" Ankur Arora
2023-11-08 7:47 ` Greg KH
2023-11-08 9:09 ` Ankur Arora
2023-11-08 10:00 ` Greg KH
2023-11-07 21:56 ` [RFC PATCH 07/86] Revert "livepatch,sched: Add livepatch task switching to cond_resched()" Ankur Arora
2023-11-07 23:16 ` Steven Rostedt
2023-11-08 4:55 ` Ankur Arora [this message]
2023-11-09 17:26 ` Josh Poimboeuf
2023-11-09 17:31 ` Steven Rostedt
2023-11-09 17:51 ` Josh Poimboeuf
2023-11-09 22:50 ` Ankur Arora
2023-11-09 23:47 ` Josh Poimboeuf
2023-11-10 0:46 ` Ankur Arora
2023-11-10 0:56 ` Steven Rostedt
2023-11-07 21:56 ` [RFC PATCH 08/86] Revert "arm64: Support PREEMPT_DYNAMIC" Ankur Arora
2023-11-07 23:17 ` Steven Rostedt
2023-11-08 15:44 ` Mark Rutland
2023-11-07 21:56 ` [RFC PATCH 09/86] Revert "sched/preempt: Add PREEMPT_DYNAMIC using static keys" Ankur Arora
2023-11-07 21:56 ` [RFC PATCH 10/86] Revert "sched/preempt: Decouple HAVE_PREEMPT_DYNAMIC from GENERIC_ENTRY" Ankur Arora
2023-11-07 21:56 ` [RFC PATCH 11/86] Revert "sched/preempt: Simplify irqentry_exit_cond_resched() callers" Ankur Arora
2023-11-07 21:56 ` [RFC PATCH 12/86] Revert "sched/preempt: Refactor sched_dynamic_update()" Ankur Arora
2023-11-07 21:56 ` [RFC PATCH 13/86] Revert "sched/preempt: Move PREEMPT_DYNAMIC logic later" Ankur Arora
2023-11-07 21:57 ` [RFC PATCH 14/86] Revert "preempt/dynamic: Fix setup_preempt_mode() return value" Ankur Arora
2023-11-07 23:20 ` Steven Rostedt
2023-11-07 21:57 ` [RFC PATCH 15/86] Revert "preempt: Restore preemption model selection configs" Ankur Arora
2023-11-07 21:57 ` [RFC PATCH 16/86] Revert "sched: Provide Kconfig support for default dynamic preempt mode" Ankur Arora
2023-11-07 21:57 ` [RFC PATCH 17/86] sched/preempt: remove PREEMPT_DYNAMIC from the build version Ankur Arora
2023-11-07 21:57 ` [RFC PATCH 18/86] Revert "preempt/dynamic: Fix typo in macro conditional statement" Ankur Arora
2023-11-07 21:57 ` [RFC PATCH 19/86] Revert "sched,preempt: Move preempt_dynamic to debug.c" Ankur Arora
2023-11-07 21:57 ` [RFC PATCH 20/86] Revert "static_call: Relax static_call_update() function argument type" Ankur Arora
2023-11-07 21:57 ` [RFC PATCH 21/86] Revert "sched/core: Use -EINVAL in sched_dynamic_mode()" Ankur Arora
2023-11-07 21:57 ` [RFC PATCH 22/86] Revert "sched/core: Stop using magic values " Ankur Arora
2023-11-07 21:57 ` [RFC PATCH 23/86] Revert "sched,x86: Allow !PREEMPT_DYNAMIC" Ankur Arora
2023-11-07 21:57 ` [RFC PATCH 24/86] Revert "sched: Harden PREEMPT_DYNAMIC" Ankur Arora
2023-11-07 21:57 ` [RFC PATCH 25/86] Revert "sched: Add /debug/sched_preempt" Ankur Arora
2023-11-07 21:57 ` [RFC PATCH 26/86] Revert "preempt/dynamic: Support dynamic preempt with preempt= boot option" Ankur Arora
2023-11-07 21:57 ` [RFC PATCH 27/86] Revert "preempt/dynamic: Provide irqentry_exit_cond_resched() static call" Ankur Arora
2023-11-07 21:57 ` [RFC PATCH 28/86] Revert "preempt/dynamic: Provide preempt_schedule[_notrace]() static calls" Ankur Arora
2023-11-07 21:57 ` [RFC PATCH 29/86] Revert "preempt/dynamic: Provide cond_resched() and might_resched() " Ankur Arora
2023-11-07 21:57 ` [RFC PATCH 30/86] Revert "preempt: Introduce CONFIG_PREEMPT_DYNAMIC" Ankur Arora
2023-11-07 21:57 ` [RFC PATCH 31/86] x86/thread_info: add TIF_NEED_RESCHED_LAZY Ankur Arora
2023-11-07 23:26 ` Steven Rostedt
2023-11-07 21:57 ` [RFC PATCH 32/86] entry: handle TIF_NEED_RESCHED_LAZY Ankur Arora
2023-11-07 21:57 ` [RFC PATCH 33/86] entry/kvm: " Ankur Arora
2023-11-07 21:57 ` [RFC PATCH 34/86] thread_info: accessors for TIF_NEED_RESCHED* Ankur Arora
2023-11-08 8:58 ` Peter Zijlstra
2023-11-21 5:59 ` Ankur Arora
2023-11-07 21:57 ` [RFC PATCH 35/86] thread_info: change to tif_need_resched(resched_t) Ankur Arora
2023-11-08 9:00 ` Peter Zijlstra
2023-11-07 21:57 ` [RFC PATCH 36/86] entry: irqentry_exit only preempts TIF_NEED_RESCHED Ankur Arora
2023-11-08 9:01 ` Peter Zijlstra
2023-11-21 6:00 ` Ankur Arora
2023-11-07 21:57 ` [RFC PATCH 37/86] sched: make test_*_tsk_thread_flag() return bool Ankur Arora
2023-11-08 9:02 ` Peter Zijlstra
2023-11-07 21:57 ` [RFC PATCH 38/86] sched: *_tsk_need_resched() now takes resched_t Ankur Arora
2023-11-08 9:03 ` Peter Zijlstra
2023-11-07 21:57 ` [RFC PATCH 39/86] sched: handle lazy resched in set_nr_*_polling() Ankur Arora
2023-11-08 9:15 ` Peter Zijlstra
2023-11-07 21:57 ` [RFC PATCH 40/86] context_tracking: add ct_state_cpu() Ankur Arora
2023-11-08 9:16 ` Peter Zijlstra
2023-11-21 6:32 ` Ankur Arora
2023-11-07 21:57 ` [RFC PATCH 41/86] sched: handle resched policy in resched_curr() Ankur Arora
2023-11-08 9:36 ` Peter Zijlstra
2023-11-08 10:26 ` Ankur Arora
2023-11-08 10:46 ` Peter Zijlstra
2023-11-21 6:34 ` Ankur Arora
2023-11-21 6:31 ` Ankur Arora
2023-11-07 21:57 ` [RFC PATCH 42/86] sched: force preemption on tick expiration Ankur Arora
2023-11-08 9:56 ` Peter Zijlstra
2023-11-21 6:44 ` Ankur Arora
2023-11-07 21:57 ` [RFC PATCH 43/86] sched: enable PREEMPT_COUNT, PREEMPTION for all preemption models Ankur Arora
2023-11-08 9:58 ` Peter Zijlstra
2023-11-07 21:57 ` [RFC PATCH 44/86] sched: voluntary preemption Ankur Arora
2023-11-07 21:57 ` [RFC PATCH 45/86] preempt: ARCH_NO_PREEMPT only preempts lazily Ankur Arora
2023-11-08 0:07 ` Steven Rostedt
2023-11-08 8:47 ` Ankur Arora
2023-11-07 21:57 ` [RFC PATCH 46/86] tracing: handle lazy resched Ankur Arora
2023-11-08 0:19 ` Steven Rostedt
2023-11-08 9:24 ` Ankur Arora
2023-11-07 21:57 ` [RFC PATCH 47/86] rcu: select PREEMPT_RCU if PREEMPT Ankur Arora
2023-11-08 0:27 ` Steven Rostedt
2023-11-21 0:28 ` Paul E. McKenney
2023-11-21 3:43 ` Steven Rostedt
2023-11-21 5:04 ` Paul E. McKenney
2023-11-21 5:39 ` Ankur Arora
2023-11-21 15:00 ` Steven Rostedt
2023-11-21 15:19 ` Paul E. McKenney
2023-11-28 10:53 ` Thomas Gleixner
2023-11-28 18:30 ` Ankur Arora
2023-12-05 1:03 ` Paul E. McKenney
2023-12-05 1:01 ` Paul E. McKenney
2023-12-05 15:01 ` Steven Rostedt
2023-12-05 19:38 ` Paul E. McKenney
2023-12-05 20:18 ` Ankur Arora
2023-12-06 4:07 ` Paul E. McKenney
2023-12-07 1:33 ` Ankur Arora
2023-12-05 20:45 ` Steven Rostedt
2023-12-06 10:08 ` David Laight
2023-12-07 4:34 ` Paul E. McKenney
2023-12-07 13:44 ` Steven Rostedt
2023-12-08 4:28 ` Paul E. McKenney
2023-11-08 12:15 ` Julian Anastasov
2023-11-07 21:57 ` [RFC PATCH 48/86] rcu: handle quiescent states for PREEMPT_RCU=n Ankur Arora
2023-11-21 0:38 ` Paul E. McKenney
2023-11-21 3:26 ` Ankur Arora
2023-11-21 5:17 ` Paul E. McKenney
2023-11-21 5:34 ` Paul E. McKenney
2023-11-21 6:13 ` Z qiang
2023-11-21 15:32 ` Paul E. McKenney
2023-11-21 19:25 ` Paul E. McKenney
2023-11-21 20:30 ` Peter Zijlstra
2023-11-21 21:14 ` Paul E. McKenney
2023-11-21 21:38 ` Steven Rostedt
2023-11-21 22:26 ` Paul E. McKenney
2023-11-21 22:52 ` Steven Rostedt
2023-11-22 0:01 ` Paul E. McKenney
2023-11-22 0:12 ` Steven Rostedt
2023-11-22 1:09 ` Paul E. McKenney
2023-11-28 17:04 ` Thomas Gleixner
2023-12-05 1:33 ` Paul E. McKenney
2023-12-06 15:10 ` Thomas Gleixner
2023-12-07 4:17 ` Paul E. McKenney
2023-12-07 1:31 ` Ankur Arora
2023-12-07 2:10 ` Steven Rostedt
2023-12-07 4:37 ` Paul E. McKenney
2023-12-07 14:22 ` Thomas Gleixner
2023-11-21 3:55 ` Z qiang
2023-11-07 21:57 ` [RFC PATCH 49/86] osnoise: handle quiescent states directly Ankur Arora
2023-11-07 21:57 ` [RFC PATCH 50/86] rcu: TASKS_RCU does not need to depend on PREEMPTION Ankur Arora
2023-11-21 0:38 ` Paul E. McKenney
2023-11-07 21:57 ` [RFC PATCH 51/86] preempt: disallow !PREEMPT_COUNT or !PREEMPTION Ankur Arora
2023-11-07 21:57 ` [RFC PATCH 52/86] sched: remove CONFIG_PREEMPTION from *_needbreak() Ankur Arora
2023-11-07 21:57 ` [RFC PATCH 53/86] sched: fixup __cond_resched_*() Ankur Arora
2023-11-07 21:57 ` [RFC PATCH 54/86] sched: add cond_resched_stall() Ankur Arora
2023-11-09 11:19 ` Thomas Gleixner
2023-11-09 22:27 ` Ankur Arora
2023-11-07 21:57 ` [RFC PATCH 55/86] xarray: add cond_resched_xas_rcu() and cond_resched_xas_lock_irq() Ankur Arora
2023-11-07 21:57 ` [RFC PATCH 56/86] xarray: use cond_resched_xas*() Ankur Arora
2023-11-07 23:01 ` [RFC PATCH 00/86] Make the kernel preemptible Steven Rostedt
2023-11-07 23:43 ` Ankur Arora
2023-11-08 0:00 ` Steven Rostedt
2023-11-07 23:07 ` [RFC PATCH 57/86] coccinelle: script to remove cond_resched() Ankur Arora
2023-11-07 23:07 ` [RFC PATCH 58/86] treewide: x86: " Ankur Arora
2023-11-07 23:07 ` [RFC PATCH 59/86] treewide: rcu: " Ankur Arora
2023-11-21 1:01 ` Paul E. McKenney
2023-11-07 23:07 ` [RFC PATCH 60/86] treewide: torture: " Ankur Arora
2023-11-21 1:02 ` Paul E. McKenney
2023-11-07 23:07 ` [RFC PATCH 61/86] treewide: bpf: " Ankur Arora
2023-11-07 23:07 ` [RFC PATCH 62/86] treewide: trace: " Ankur Arora
2023-11-07 23:07 ` [RFC PATCH 63/86] treewide: futex: " Ankur Arora
2023-11-07 23:08 ` [RFC PATCH 64/86] treewide: printk: " Ankur Arora
2023-11-07 23:08 ` [RFC PATCH 65/86] treewide: task_work: " Ankur Arora
2023-11-07 23:08 ` [RFC PATCH 66/86] treewide: kernel: " Ankur Arora
2023-11-17 18:14 ` Luis Chamberlain
2023-11-17 19:51 ` Steven Rostedt
2023-11-07 23:08 ` [RFC PATCH 67/86] treewide: kernel: remove cond_reshed() Ankur Arora
2023-11-07 23:08 ` [RFC PATCH 68/86] treewide: mm: remove cond_resched() Ankur Arora
2023-11-08 1:28 ` Sergey Senozhatsky
2023-11-08 7:49 ` Vlastimil Babka
2023-11-08 8:02 ` Yosry Ahmed
2023-11-08 8:54 ` Ankur Arora
2023-11-08 12:58 ` Matthew Wilcox
2023-11-08 14:50 ` Steven Rostedt
2023-11-07 23:08 ` [RFC PATCH 69/86] treewide: io_uring: " Ankur Arora
2023-11-07 23:08 ` [RFC PATCH 70/86] treewide: ipc: " Ankur Arora
2023-11-07 23:08 ` [RFC PATCH 71/86] treewide: lib: " Ankur Arora
2023-11-08 9:15 ` Herbert Xu
2023-11-08 15:08 ` Steven Rostedt
2023-11-09 4:19 ` Herbert Xu
2023-11-09 4:43 ` Steven Rostedt
2023-11-08 19:15 ` Kees Cook
2023-11-08 19:41 ` Steven Rostedt
2023-11-08 22:16 ` Kees Cook
2023-11-08 22:21 ` Steven Rostedt
2023-11-09 9:39 ` David Laight
2023-11-07 23:08 ` [RFC PATCH 72/86] treewide: crypto: " Ankur Arora
2023-11-07 23:08 ` [RFC PATCH 73/86] treewide: security: " Ankur Arora
2023-11-07 23:08 ` [RFC PATCH 74/86] treewide: fs: " Ankur Arora
2023-11-07 23:08 ` [RFC PATCH 75/86] treewide: virt: " Ankur Arora
2023-11-07 23:08 ` [RFC PATCH 76/86] treewide: block: " Ankur Arora
2023-11-07 23:08 ` [RFC PATCH 77/86] treewide: netfilter: " Ankur Arora
2023-11-07 23:08 ` [RFC PATCH 78/86] treewide: net: " Ankur Arora
2023-11-07 23:08 ` [RFC PATCH 79/86] " Ankur Arora
2023-11-08 12:16 ` Eric Dumazet
2023-11-08 17:11 ` Steven Rostedt
2023-11-08 20:59 ` Ankur Arora
2023-11-07 23:08 ` [RFC PATCH 80/86] treewide: sound: " Ankur Arora
2023-11-07 23:08 ` [RFC PATCH 81/86] treewide: md: " Ankur Arora
2023-11-07 23:08 ` [RFC PATCH 82/86] treewide: mtd: " Ankur Arora
2023-11-08 16:28 ` Miquel Raynal
2023-11-08 16:32 ` Matthew Wilcox
2023-11-08 17:21 ` Steven Rostedt
2023-11-09 8:38 ` Miquel Raynal
2023-11-07 23:08 ` [RFC PATCH 83/86] treewide: drm: " Ankur Arora
2023-11-07 23:08 ` [RFC PATCH 84/86] treewide: net: " Ankur Arora
2023-11-07 23:08 ` [RFC PATCH 85/86] treewide: drivers: " Ankur Arora
2023-11-08 0:48 ` Chris Packham
2023-11-09 0:55 ` Ankur Arora
2023-11-09 23:25 ` Dmitry Torokhov
2023-11-09 23:41 ` Steven Rostedt
2023-11-10 0:01 ` Ankur Arora
2023-11-07 23:08 ` [RFC PATCH 86/86] sched: " Ankur Arora
2023-11-07 23:19 ` [RFC PATCH 57/86] coccinelle: script to " Julia Lawall
2023-11-08 8:29 ` Ankur Arora
2023-11-08 9:49 ` Julia Lawall
2023-11-21 0:45 ` Paul E. McKenney
2023-11-21 5:16 ` Ankur Arora
2023-11-21 15:26 ` Paul E. McKenney
2023-11-08 4:08 ` [RFC PATCH 00/86] Make the kernel preemptible Christoph Lameter
2023-11-08 4:33 ` Ankur Arora
2023-11-08 4:52 ` Christoph Lameter
2023-11-08 5:12 ` Steven Rostedt
2023-11-08 6:49 ` Ankur Arora
2023-11-08 7:54 ` Vlastimil Babka
2023-11-08 7:31 ` Juergen Gross
2023-11-08 8:51 ` Peter Zijlstra
2023-11-08 9:53 ` Daniel Bristot de Oliveira
2023-11-08 10:04 ` Ankur Arora
2023-11-08 10:13 ` Peter Zijlstra
2023-11-08 11:00 ` Ankur Arora
2023-11-08 11:14 ` Peter Zijlstra
2023-11-08 12:16 ` Peter Zijlstra
2023-11-08 15:38 ` Thomas Gleixner
2023-11-08 16:15 ` Peter Zijlstra
2023-11-08 16:22 ` Steven Rostedt
2023-11-08 16:49 ` Peter Zijlstra
2023-11-08 17:18 ` Steven Rostedt
2023-11-08 20:46 ` Ankur Arora
2023-11-08 20:26 ` Ankur Arora
2023-11-08 9:43 ` David Laight
2023-11-08 15:15 ` Steven Rostedt
2023-11-08 16:29 ` David Laight
2023-11-08 16:33 ` Mark Rutland
2023-11-09 0:34 ` Ankur Arora
2023-11-09 11:00 ` Mark Rutland
2023-11-09 22:36 ` Ankur Arora
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=874jhwvo7h.fsf@oracle.com \
--to=ankur.a.arora@oracle.com \
--cc=David.Laight@ACULAB.COM \
--cc=akpm@linux-foundation.org \
--cc=andrew.cooper3@citrix.com \
--cc=anton.ivanov@cambridgegreys.com \
--cc=bharata@amd.com \
--cc=boris.ostrovsky@oracle.com \
--cc=bp@alien8.de \
--cc=bristot@kernel.org \
--cc=dave.hansen@linux.intel.com \
--cc=geert@linux-m68k.org \
--cc=glaubitz@physik.fu-berlin.de \
--cc=hpa@zytor.com \
--cc=jgross@suse.com \
--cc=jikos@kernel.org \
--cc=joe.lawrence@redhat.com \
--cc=jon.grimm@amd.com \
--cc=jpoimboe@kernel.org \
--cc=juri.lelli@redhat.com \
--cc=konrad.wilk@oracle.com \
--cc=krypton@ulrich-teichert.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=live-patching@vger.kernel.org \
--cc=luto@kernel.org \
--cc=mathieu.desnoyers@efficios.com \
--cc=mattst88@gmail.com \
--cc=mbenes@suse.cz \
--cc=mgorman@suse.de \
--cc=mingo@kernel.org \
--cc=mingo@redhat.com \
--cc=mjguzik@gmail.com \
--cc=paulmck@kernel.org \
--cc=peterz@infradead.org \
--cc=pmladek@suse.com \
--cc=raghavendra.kt@amd.com \
--cc=richard@nod.at \
--cc=rostedt@goodmis.org \
--cc=tglx@linutronix.de \
--cc=torvalds@linux-foundation.org \
--cc=vincent.guittot@linaro.org \
--cc=willy@infradead.org \
--cc=x86@kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox