summaryrefslogtreecommitdiff
path: root/kernel/sched
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2026-03-17 13:21:52 -0400
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2026-03-25 11:10:33 +0100
commit522acaae34aa7e05859260056b39c7c030592a0c (patch)
tree87af92f1d553999ce3d830aa6b388c6a8abc43d8 /kernel/sched
parentc1b8245c0a38787533dd7da0c4e0e68f89a623c0 (diff)
downloadlinux-522acaae34aa7e05859260056b39c7c030592a0c.tar.gz
linux-522acaae34aa7e05859260056b39c7c030592a0c.tar.bz2
linux-522acaae34aa7e05859260056b39c7c030592a0c.zip
sched_ext: Disable preemption between scx_claim_exit() and kicking helper work
[ Upstream commit 83236b2e43dba00bee5b82eb5758816b1a674f6a ] scx_claim_exit() atomically sets exit_kind, which prevents scx_error() from triggering further error handling. After claiming exit, the caller must kick the helper kthread work which initiates bypass mode and teardown. If the calling task gets preempted between claiming exit and kicking the helper work, and the BPF scheduler fails to schedule it back (since error handling is now disabled), the helper work is never queued, bypass mode never activates, tasks stop being dispatched, and the system wedges. Disable preemption across scx_claim_exit() and the subsequent work kicking in all callers - scx_disable() and scx_vexit(). Add lockdep_assert_preemption_disabled() to scx_claim_exit() to enforce the requirement. Fixes: f0e1a0643a59 ("sched_ext: Implement BPF extensible scheduler class") Cc: stable@vger.kernel.org # v6.12+ Reviewed-by: Andrea Righi <arighi@nvidia.com> Signed-off-by: Tejun Heo <tj@kernel.org> Signed-off-by: Sasha Levin <sashal@kernel.org> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'kernel/sched')
-rw-r--r--kernel/sched/ext.c12
1 files changed, 12 insertions, 0 deletions
diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c
index 153a6cd661f3..4e3f06c19ab4 100644
--- a/kernel/sched/ext.c
+++ b/kernel/sched/ext.c
@@ -4066,10 +4066,19 @@ done:
scx_bypass(false);
}
+/*
+ * Claim the exit on @sch. The caller must ensure that the helper kthread work
+ * is kicked before the current task can be preempted. Once exit_kind is
+ * claimed, scx_error() can no longer trigger, so if the current task gets
+ * preempted and the BPF scheduler fails to schedule it back, the helper work
+ * will never be kicked and the whole system can wedge.
+ */
static bool scx_claim_exit(struct scx_sched *sch, enum scx_exit_kind kind)
{
int none = SCX_EXIT_NONE;
+ lockdep_assert_preemption_disabled();
+
if (!atomic_try_cmpxchg(&sch->exit_kind, &none, kind))
return false;
@@ -4092,6 +4101,7 @@ static void scx_disable(enum scx_exit_kind kind)
rcu_read_lock();
sch = rcu_dereference(scx_root);
if (sch) {
+ guard(preempt)();
scx_claim_exit(sch, kind);
kthread_queue_work(sch->helper, &sch->disable_work);
}
@@ -4414,6 +4424,8 @@ static void scx_vexit(struct scx_sched *sch,
{
struct scx_exit_info *ei = sch->exit_info;
+ guard(preempt)();
+
if (!scx_claim_exit(sch, kind))
return;