summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2025-07-20 11:08:51 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2025-07-20 11:08:51 -0700
commit62347e279092ae704877467abdc8533e914f945e (patch)
tree225fbeee4e7d84d37da91ddbb5039711dd857569
parent5f054ef2e0f1ca7d32ac48e275d08e2ac29d84f3 (diff)
parent36569780b0d64de283f9d6c2195fd1a43e221ee8 (diff)
downloadlinux-62347e279092ae704877467abdc8533e914f945e.tar.gz
linux-62347e279092ae704877467abdc8533e914f945e.tar.bz2
linux-62347e279092ae704877467abdc8533e914f945e.zip
Merge tag 'sched-urgent-2025-07-20' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull scheduler fix from Thomas Gleixner: "A single fix for the scheduler. A recent commit changed the runqueue counter nr_uninterruptible to an unsigned int. Due to the fact that the counters are not updated on migration of a uninterruptble task to a different CPU, these counters can exceed INT_MAX. The counter is cast to long in the load average calculation, which means that the cast expands into negative space resulting in bogus load average values. Convert it back to unsigned long to fix this. * tag 'sched-urgent-2025-07-20' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: sched: Change nr_uninterruptible type to unsigned long
-rw-r--r--kernel/sched/loadavg.c2
-rw-r--r--kernel/sched/sched.h2
2 files changed, 2 insertions, 2 deletions
diff --git a/kernel/sched/loadavg.c b/kernel/sched/loadavg.c
index c48900b856a2..52ca8e268cfc 100644
--- a/kernel/sched/loadavg.c
+++ b/kernel/sched/loadavg.c
@@ -80,7 +80,7 @@ long calc_load_fold_active(struct rq *this_rq, long adjust)
long nr_active, delta = 0;
nr_active = this_rq->nr_running - adjust;
- nr_active += (int)this_rq->nr_uninterruptible;
+ nr_active += (long)this_rq->nr_uninterruptible;
if (nr_active != this_rq->calc_load_active) {
delta = nr_active - this_rq->calc_load_active;
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 475bb5998295..83e3aa917142 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1149,7 +1149,7 @@ struct rq {
* one CPU and if it got migrated afterwards it may decrease
* it on another CPU. Always updated under the runqueue lock:
*/
- unsigned int nr_uninterruptible;
+ unsigned long nr_uninterruptible;
union {
struct task_struct __rcu *donor; /* Scheduler context */