summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSebastian Andrzej Siewior <bigeasy@linutronix.de>2025-11-27 15:47:23 +0100
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2025-12-18 14:00:15 +0100
commitb0391a8a07ef5d88b14071badc306708a6d6e788 (patch)
tree088c2f2acbb791ddfd70a18dcd4b4fa035c1b9ae
parent09efe7cfbf919c4d763bc425473fcfee0dc98356 (diff)
downloadlinux-b0391a8a07ef5d88b14071badc306708a6d6e788.tar.gz
linux-b0391a8a07ef5d88b14071badc306708a6d6e788.tar.bz2
linux-b0391a8a07ef5d88b14071badc306708a6d6e788.zip
cpu: Make atomic hotplug callbacks run with interrupts disabled on UP
[ Upstream commit c94291914b200e10c72cef23c8e4c67eb4fdbcd9 ] On SMP systems the CPU hotplug callbacks in the "starting" range are invoked while the CPU is brought up and interrupts are still disabled. Callbacks which are added later are invoked via the hotplug-thread on the target CPU and interrupts are explicitly disabled. In the UP case callbacks which are added later are invoked directly without the thread indirection. This is in principle okay since there is just one CPU but those callbacks are invoked with interrupt disabled code. That's incorrect as those callbacks assume interrupt disabled context. Disable interrupts before invoking the callbacks on UP if the state is atomic and interrupts are expected to be disabled. The "save" part is required because this is also invoked early in the boot process while interrupts are disabled and must not be enabled prematurely. Fixes: 06ddd17521bf1 ("sched/smp: Always define is_percpu_thread() and scheduler_ipi()") Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Link: https://patch.msgid.link/20251127144723.ev9DuXXR@linutronix.de Signed-off-by: Sasha Levin <sashal@kernel.org>
-rw-r--r--kernel/cpu.c25
1 files changed, 16 insertions, 9 deletions
diff --git a/kernel/cpu.c b/kernel/cpu.c
index db9f6c539b28..15000c7abc65 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -249,6 +249,14 @@ err:
return ret;
}
+/*
+ * The former STARTING/DYING states, ran with IRQs disabled and must not fail.
+ */
+static bool cpuhp_is_atomic_state(enum cpuhp_state state)
+{
+ return CPUHP_AP_IDLE_DEAD <= state && state < CPUHP_AP_ONLINE;
+}
+
#ifdef CONFIG_SMP
static bool cpuhp_is_ap_state(enum cpuhp_state state)
{
@@ -271,14 +279,6 @@ static inline void complete_ap_thread(struct cpuhp_cpu_state *st, bool bringup)
complete(done);
}
-/*
- * The former STARTING/DYING states, ran with IRQs disabled and must not fail.
- */
-static bool cpuhp_is_atomic_state(enum cpuhp_state state)
-{
- return CPUHP_AP_IDLE_DEAD <= state && state < CPUHP_AP_ONLINE;
-}
-
/* Synchronization state management */
enum cpuhp_sync_state {
SYNC_STATE_DEAD,
@@ -2364,7 +2364,14 @@ static int cpuhp_issue_call(int cpu, enum cpuhp_state state, bool bringup,
else
ret = cpuhp_invoke_callback(cpu, state, bringup, node, NULL);
#else
- ret = cpuhp_invoke_callback(cpu, state, bringup, node, NULL);
+ if (cpuhp_is_atomic_state(state)) {
+ guard(irqsave)();
+ ret = cpuhp_invoke_callback(cpu, state, bringup, node, NULL);
+ /* STARTING/DYING must not fail! */
+ WARN_ON_ONCE(ret);
+ } else {
+ ret = cpuhp_invoke_callback(cpu, state, bringup, node, NULL);
+ }
#endif
BUG_ON(ret && !bringup);
return ret;