From: Bo Li <libo.gcs85@bytedance.com>
To: tglx@linutronix.de, mingo@redhat.com, bp@alien8.de,
dave.hansen@linux.intel.com, x86@kernel.org, luto@kernel.org,
kees@kernel.org, akpm@linux-foundation.org, david@redhat.com,
juri.lelli@redhat.com, vincent.guittot@linaro.org,
peterz@infradead.org
Cc: dietmar.eggemann@arm.com, hpa@zytor.com, acme@kernel.org,
namhyung@kernel.org, mark.rutland@arm.com,
alexander.shishkin@linux.intel.com, jolsa@kernel.org,
irogers@google.com, adrian.hunter@intel.com,
kan.liang@linux.intel.com, viro@zeniv.linux.org.uk,
brauner@kernel.org, jack@suse.cz, lorenzo.stoakes@oracle.com,
Liam.Howlett@oracle.com, vbabka@suse.cz, rppt@kernel.org,
surenb@google.com, mhocko@suse.com, rostedt@goodmis.org,
bsegall@google.com, mgorman@suse.de, vschneid@redhat.com,
jannh@google.com, pfalcato@suse.de, riel@surriel.com,
harry.yoo@oracle.com, linux-kernel@vger.kernel.org,
linux-perf-users@vger.kernel.org, linux-fsdevel@vger.kernel.org,
linux-mm@kvack.org, duanxiongchun@bytedance.com,
yinhongbo@bytedance.com, dengliang.1214@bytedance.com,
xieyongji@bytedance.com, chaiwen.cc@bytedance.com,
songmuchun@bytedance.com, yuanzhu@bytedance.com,
chengguozhu@bytedance.com, sunjiadong.lff@bytedance.com,
Bo Li <libo.gcs85@bytedance.com>
Subject: [RFC v2 19/35] RPAL: add lazy switch main logic
Date: Fri, 30 May 2025 17:27:47 +0800 [thread overview]
Message-ID: <91e9db5ad4a3e1e58a666bd496e55d8f8db2c63c.1748594841.git.libo.gcs85@bytedance.com> (raw)
In-Reply-To: <cover.1748594840.git.libo.gcs85@bytedance.com>
The implementation of lazy switch differs from a regular schedule() in
three key aspects:
1. It occurs at the kernel entry with irq disabled.
2. The next task is explicitly pre-determined rather than selected by
the scheduler.
3. User-space context (excluding general-purpose registers) remains
unchanged across the switch.
This patch introduces the rpal_schedule() interface to address these
requirements. Firstly, the rpal_schedule() skips irq enabling in
finish_lock_switch(), preserving the irq-disabled state required
during kernel entry. Secondly, the rpal_pick_next_task() interface is
used to explicitly specify the target task, bypassing the default
scheduler's decision-making process. Thirdly, non-general-purpose
registers (e.g., FPU, vector units) are not restored during the switch,
ensuring user space context remains intact. Handling of general-purpose
registers will be addressed in a subsequent patch by RPAL before invoking
rpal_schedule().
Signed-off-by: Bo Li <libo.gcs85@bytedance.com>
---
arch/x86/kernel/process_64.c | 75 +++++++++++++++++++++
include/linux/rpal.h | 3 +
kernel/sched/core.c | 126 +++++++++++++++++++++++++++++++++++
3 files changed, 204 insertions(+)
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index 4830e9215de7..efc3f238c486 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -753,6 +753,81 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
return prev_p;
}
+#ifdef CONFIG_RPAL
+__no_kmsan_checks
+__visible __notrace_funcgraph struct task_struct *
+__rpal_switch_to(struct task_struct *prev_p, struct task_struct *next_p)
+{
+ struct thread_struct *prev = &prev_p->thread;
+ struct thread_struct *next = &next_p->thread;
+ int cpu = smp_processor_id();
+
+ WARN_ON_ONCE(IS_ENABLED(CONFIG_DEBUG_ENTRY) &&
+ this_cpu_read(hardirq_stack_inuse));
+
+ /* no need to switch fpu */
+ /* __fpu_invalidate_fpregs_state() */
+ x86_task_fpu(prev_p)->last_cpu = -1;
+ /* fpregs_activate() */
+ __this_cpu_write(fpu_fpregs_owner_ctx, x86_task_fpu(next_p));
+ trace_x86_fpu_regs_activated(x86_task_fpu(next_p));
+ x86_task_fpu(next_p)->last_cpu = cpu;
+ set_tsk_thread_flag(prev_p, TIF_NEED_FPU_LOAD);
+ clear_tsk_thread_flag(next_p, TIF_NEED_FPU_LOAD);
+
+ /* no need to save fs */
+ savesegment(gs, prev_p->thread.gsindex);
+ if (static_cpu_has(X86_FEATURE_FSGSBASE))
+ prev_p->thread.gsbase = __rdgsbase_inactive();
+ else
+ save_base_legacy(prev_p, prev_p->thread.gsindex, GS);
+
+ load_TLS(next, cpu);
+
+ arch_end_context_switch(next_p);
+
+ savesegment(es, prev->es);
+ if (unlikely(next->es | prev->es))
+ loadsegment(es, next->es);
+
+ savesegment(ds, prev->ds);
+ if (unlikely(next->ds | prev->ds))
+ loadsegment(ds, next->ds);
+
+ /* no need to load fs */
+ if (static_cpu_has(X86_FEATURE_FSGSBASE)) {
+ if (unlikely(prev->gsindex || next->gsindex))
+ loadseg(GS, next->gsindex);
+
+ __wrgsbase_inactive(next->gsbase);
+ } else {
+ load_seg_legacy(prev->gsindex, prev->gsbase, next->gsindex,
+ next->gsbase, GS);
+ }
+
+ /* skip pkru load as we will use pkru in RPAL */
+
+ this_cpu_write(current_task, next_p);
+ this_cpu_write(cpu_current_top_of_stack, task_top_of_stack(next_p));
+
+ /* no need to load fpu */
+
+ update_task_stack(next_p);
+ switch_to_extra(prev_p, next_p);
+
+ if (static_cpu_has_bug(X86_BUG_SYSRET_SS_ATTRS)) {
+ unsigned short ss_sel;
+
+ savesegment(ss, ss_sel);
+ if (ss_sel != __KERNEL_DS)
+ loadsegment(ss, __KERNEL_DS);
+ }
+ resctrl_sched_in(next_p);
+
+ return prev_p;
+}
+#endif
+
void set_personality_64bit(void)
{
/* inherit personality from parent */
diff --git a/include/linux/rpal.h b/include/linux/rpal.h
index 45137770fac6..0813db4552c0 100644
--- a/include/linux/rpal.h
+++ b/include/linux/rpal.h
@@ -487,4 +487,7 @@ int rpal_try_to_wake_up(struct task_struct *p);
int rpal_init_thread_pending(struct rpal_common_data *rcd);
void rpal_free_thread_pending(struct rpal_common_data *rcd);
int rpal_set_cpus_allowed_ptr(struct task_struct *p, bool is_lock);
+void rpal_schedule(struct task_struct *next);
+asmlinkage struct task_struct *
+__rpal_switch_to(struct task_struct *prev_p, struct task_struct *next_p);
#endif
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 2e76376c5172..760d88458b39 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -6827,6 +6827,12 @@ static bool try_to_block_task(struct rq *rq, struct task_struct *p,
if (unlikely(is_special_task_state(task_state)))
flags |= DEQUEUE_SPECIAL;
+#ifdef CONFIG_RPAL
+ /* DELAY_DEQUEUE will cause CPU stalls after lazy switch, skip it */
+ if (rpal_test_current_thread_flag(RPAL_RECEIVER_BIT))
+ flags |= DEQUEUE_SPECIAL;
+#endif
+
/*
* __schedule() ttwu()
* prev_state = prev->state; if (p->on_rq && ...)
@@ -11005,6 +11011,62 @@ void sched_enq_and_set_task(struct sched_enq_and_set_ctx *ctx)
#endif /* CONFIG_SCHED_CLASS_EXT */
#ifdef CONFIG_RPAL
+static struct rq *rpal_finish_task_switch(struct task_struct *prev)
+ __releases(rq->lock)
+{
+ struct rq *rq = this_rq();
+ struct mm_struct *mm = rq->prev_mm;
+
+ if (WARN_ONCE(preempt_count() != 2*PREEMPT_DISABLE_OFFSET,
+ "corrupted preempt_count: %s/%d/0x%x\n",
+ current->comm, current->pid, preempt_count()))
+ preempt_count_set(FORK_PREEMPT_COUNT);
+
+ rq->prev_mm = NULL;
+ vtime_task_switch(prev);
+ perf_event_task_sched_in(prev, current);
+ finish_task(prev);
+ tick_nohz_task_switch();
+
+ /* finish_lock_switch, not enable irq */
+ spin_acquire(&__rq_lockp(rq)->dep_map, 0, 0, _THIS_IP_);
+ __balance_callbacks(rq);
+ raw_spin_rq_unlock(rq);
+
+ finish_arch_post_lock_switch();
+ kcov_finish_switch(current);
+ kmap_local_sched_in();
+
+ fire_sched_in_preempt_notifiers(current);
+ if (mm) {
+ membarrier_mm_sync_core_before_usermode(mm);
+ mmdrop(mm);
+ }
+
+ return rq;
+}
+
+static __always_inline struct rq *rpal_context_switch(struct rq *rq,
+ struct task_struct *prev,
+ struct task_struct *next,
+ struct rq_flags *rf)
+{
+ /* irq is off */
+ prepare_task_switch(rq, prev, next);
+ arch_start_context_switch(prev);
+
+ membarrier_switch_mm(rq, prev->active_mm, next->mm);
+ switch_mm_irqs_off(prev->active_mm, next->mm, next);
+ lru_gen_use_mm(next->mm);
+
+ switch_mm_cid(rq, prev, next);
+
+ prepare_lock_switch(rq, next, rf);
+ __rpal_switch_to(prev, next);
+ barrier();
+ return rpal_finish_task_switch(prev);
+}
+
#ifdef CONFIG_SCHED_CORE
static inline struct task_struct *
__rpal_pick_next_task(struct rq *rq, struct task_struct *prev,
@@ -11214,4 +11276,68 @@ rpal_pick_next_task(struct rq *rq, struct task_struct *prev,
BUG();
}
#endif
+
+/* enter and exit with irqs disabled() */
+void __sched notrace rpal_schedule(struct task_struct *next)
+{
+ struct task_struct *prev, *picked;
+ bool preempt = false;
+ unsigned long *switch_count;
+ unsigned long prev_state;
+ struct rq_flags rf;
+ struct rq *rq;
+ int cpu;
+
+ /* sched_mode = SM_NONE */
+
+ preempt_disable();
+
+ trace_sched_entry_tp(preempt, CALLER_ADDR0);
+
+ cpu = smp_processor_id();
+ rq = cpu_rq(cpu);
+ prev = rq->curr;
+
+ schedule_debug(prev, preempt);
+
+ if (sched_feat(HRTICK) || sched_feat(HRTICK_DL))
+ hrtick_clear(rq);
+
+ rcu_note_context_switch(preempt);
+ rq_lock(rq, &rf);
+ smp_mb__after_spinlock();
+
+ rq->clock_update_flags <<= 1;
+ update_rq_clock(rq);
+ rq->clock_update_flags = RQCF_UPDATED;
+
+ switch_count = &prev->nivcsw;
+
+ prev_state = READ_ONCE(prev->__state);
+ if (prev_state) {
+ try_to_block_task(rq, prev, &prev_state);
+ switch_count = &prev->nvcsw;
+ }
+
+ picked = rpal_pick_next_task(rq, prev, next, &rf);
+ rq_set_donor(rq, next);
+ if (unlikely(next != picked))
+ panic("rpal error: next != picked\n");
+
+ clear_tsk_need_resched(prev);
+ clear_preempt_need_resched();
+ rq->last_seen_need_resched_ns = 0;
+
+ rq->nr_switches++;
+ RCU_INIT_POINTER(rq->curr, next);
+ ++*switch_count;
+ migrate_disable_switch(rq, prev);
+ psi_account_irqtime(rq, prev, next);
+ psi_sched_switch(prev, next, !task_on_rq_queued(prev) ||
+ prev->se.sched_delayed);
+ trace_sched_switch(preempt, prev, next, prev_state);
+ rq = rpal_context_switch(rq, prev, next, &rf);
+ trace_sched_exit_tp(true, CALLER_ADDR0);
+ preempt_enable_no_resched();
+}
#endif
--
2.20.1
next prev parent reply other threads:[~2025-05-30 9:33 UTC|newest]
Thread overview: 46+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-05-30 9:27 [RFC v2 00/35] optimize cost of inter-process communication Bo Li
2025-05-30 9:27 ` [RFC v2 01/35] Kbuild: rpal support Bo Li
2025-05-30 9:27 ` [RFC v2 02/35] RPAL: add struct rpal_service Bo Li
2025-05-30 9:27 ` [RFC v2 03/35] RPAL: add service registration interface Bo Li
2025-05-30 9:27 ` [RFC v2 04/35] RPAL: add member to task_struct and mm_struct Bo Li
2025-05-30 9:27 ` [RFC v2 05/35] RPAL: enable virtual address space partitions Bo Li
2025-05-30 9:27 ` [RFC v2 06/35] RPAL: add user interface Bo Li
2025-05-30 9:27 ` [RFC v2 07/35] RPAL: enable shared page mmap Bo Li
2025-05-30 9:27 ` [RFC v2 08/35] RPAL: enable sender/receiver registration Bo Li
2025-05-30 9:27 ` [RFC v2 09/35] RPAL: enable address space sharing Bo Li
2025-05-30 9:27 ` [RFC v2 10/35] RPAL: allow service enable/disable Bo Li
2025-05-30 9:27 ` [RFC v2 11/35] RPAL: add service request/release Bo Li
2025-05-30 9:27 ` [RFC v2 12/35] RPAL: enable service disable notification Bo Li
2025-05-30 9:27 ` [RFC v2 13/35] RPAL: add tlb flushing support Bo Li
2025-05-30 9:27 ` [RFC v2 14/35] RPAL: enable page fault handling Bo Li
2025-05-30 13:59 ` Dave Hansen
2025-05-30 9:27 ` [RFC v2 15/35] RPAL: add sender/receiver state Bo Li
2025-05-30 9:27 ` [RFC v2 16/35] RPAL: add cpu lock interface Bo Li
2025-05-30 9:27 ` [RFC v2 17/35] RPAL: add a mapping between fsbase and tasks Bo Li
2025-05-30 9:27 ` [RFC v2 18/35] sched: pick a specified task Bo Li
2025-05-30 9:27 ` Bo Li [this message]
2025-05-30 9:27 ` [RFC v2 20/35] RPAL: add rpal_ret_from_lazy_switch Bo Li
2025-05-30 9:27 ` [RFC v2 21/35] RPAL: add kernel entry handling for lazy switch Bo Li
2025-05-30 9:27 ` [RFC v2 22/35] RPAL: rebuild receiver state Bo Li
2025-05-30 9:27 ` [RFC v2 23/35] RPAL: resume cpumask when fork Bo Li
2025-05-30 9:27 ` [RFC v2 24/35] RPAL: critical section optimization Bo Li
2025-05-30 9:27 ` [RFC v2 25/35] RPAL: add MPK initialization and interface Bo Li
2025-05-30 9:27 ` [RFC v2 26/35] RPAL: enable MPK support Bo Li
2025-05-30 17:03 ` Dave Hansen
2025-05-30 9:27 ` [RFC v2 27/35] RPAL: add epoll support Bo Li
2025-05-30 9:27 ` [RFC v2 28/35] RPAL: add rpal_uds_fdmap() support Bo Li
2025-05-30 9:27 ` [RFC v2 29/35] RPAL: fix race condition in pkru update Bo Li
2025-05-30 9:27 ` [RFC v2 30/35] RPAL: fix pkru setup when fork Bo Li
2025-05-30 9:27 ` [RFC v2 31/35] RPAL: add receiver waker Bo Li
2025-05-30 9:28 ` [RFC v2 32/35] RPAL: fix unknown nmi on AMD CPU Bo Li
2025-05-30 9:28 ` [RFC v2 33/35] RPAL: enable time slice correction Bo Li
2025-05-30 9:28 ` [RFC v2 34/35] RPAL: enable fast epoll wait Bo Li
2025-05-30 9:28 ` [RFC v2 35/35] samples/rpal: add RPAL samples Bo Li
2025-05-30 9:33 ` [RFC v2 00/35] optimize cost of inter-process communication Lorenzo Stoakes
2025-06-03 8:22 ` Bo Li
2025-06-03 9:22 ` Lorenzo Stoakes
2025-05-30 9:41 ` Pedro Falcato
2025-05-30 9:56 ` David Hildenbrand
2025-05-30 22:42 ` Andrew Morton
2025-05-31 7:16 ` Ingo Molnar
2025-06-03 17:49 ` H. Peter Anvin
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=91e9db5ad4a3e1e58a666bd496e55d8f8db2c63c.1748594841.git.libo.gcs85@bytedance.com \
--to=libo.gcs85@bytedance.com \
--cc=Liam.Howlett@oracle.com \
--cc=acme@kernel.org \
--cc=adrian.hunter@intel.com \
--cc=akpm@linux-foundation.org \
--cc=alexander.shishkin@linux.intel.com \
--cc=bp@alien8.de \
--cc=brauner@kernel.org \
--cc=bsegall@google.com \
--cc=chaiwen.cc@bytedance.com \
--cc=chengguozhu@bytedance.com \
--cc=dave.hansen@linux.intel.com \
--cc=david@redhat.com \
--cc=dengliang.1214@bytedance.com \
--cc=dietmar.eggemann@arm.com \
--cc=duanxiongchun@bytedance.com \
--cc=harry.yoo@oracle.com \
--cc=hpa@zytor.com \
--cc=irogers@google.com \
--cc=jack@suse.cz \
--cc=jannh@google.com \
--cc=jolsa@kernel.org \
--cc=juri.lelli@redhat.com \
--cc=kan.liang@linux.intel.com \
--cc=kees@kernel.org \
--cc=linux-fsdevel@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=linux-perf-users@vger.kernel.org \
--cc=lorenzo.stoakes@oracle.com \
--cc=luto@kernel.org \
--cc=mark.rutland@arm.com \
--cc=mgorman@suse.de \
--cc=mhocko@suse.com \
--cc=mingo@redhat.com \
--cc=namhyung@kernel.org \
--cc=peterz@infradead.org \
--cc=pfalcato@suse.de \
--cc=riel@surriel.com \
--cc=rostedt@goodmis.org \
--cc=rppt@kernel.org \
--cc=songmuchun@bytedance.com \
--cc=sunjiadong.lff@bytedance.com \
--cc=surenb@google.com \
--cc=tglx@linutronix.de \
--cc=vbabka@suse.cz \
--cc=vincent.guittot@linaro.org \
--cc=viro@zeniv.linux.org.uk \
--cc=vschneid@redhat.com \
--cc=x86@kernel.org \
--cc=xieyongji@bytedance.com \
--cc=yinhongbo@bytedance.com \
--cc=yuanzhu@bytedance.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox