summaryrefslogtreecommitdiff
path: root/kernel/sched/core.c
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2024-08-14 00:25:53 +0200
committerPeter Zijlstra <peterz@infradead.org>2024-09-03 15:26:31 +0200
commitfd03c5b8585562d60f8b597b4332d28f48abfe7d (patch)
tree11b3007a752affe6beaca37c057ca9a5c0983048 /kernel/sched/core.c
parent260598f142c34811d226fdde5ab0346b48181439 (diff)
downloadlinux-fd03c5b8585562d60f8b597b4332d28f48abfe7d.tar.gz
linux-fd03c5b8585562d60f8b597b4332d28f48abfe7d.tar.bz2
linux-fd03c5b8585562d60f8b597b4332d28f48abfe7d.zip
sched: Rework pick_next_task()
The current rule is that: pick_next_task() := pick_task() + set_next_task(.first = true) And many classes implement it directly as such. Change things around to make pick_next_task() optional while also changing the definition to: pick_next_task(prev) := pick_task() + put_prev_task() + set_next_task(.first = true) The reason is that sched_ext would like to have a 'final' call that knows the next task. By placing put_prev_task() right next to set_next_task() (as it already is for sched_core) this becomes trivial. As a bonus, this is a nice cleanup on its own. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Link: https://lore.kernel.org/r/20240813224016.051225657@infradead.org
Diffstat (limited to 'kernel/sched/core.c')
-rw-r--r--kernel/sched/core.c21
1 files changed, 15 insertions, 6 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 36f9bc509ff2..b9429eb5dbbe 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -5893,8 +5893,9 @@ __pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
/* Assume the next prioritized class is idle_sched_class */
if (!p) {
+ p = pick_task_idle(rq);
put_prev_task(rq, prev);
- p = pick_next_task_idle(rq);
+ set_next_task_first(rq, p);
}
/*
@@ -5916,12 +5917,20 @@ __pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
restart:
prev_balance(rq, prev, rf);
- put_prev_task(rq, prev);
for_each_class(class) {
- p = class->pick_next_task(rq);
- if (p)
- return p;
+ if (class->pick_next_task) {
+ p = class->pick_next_task(rq, prev);
+ if (p)
+ return p;
+ } else {
+ p = class->pick_task(rq);
+ if (p) {
+ put_prev_task(rq, prev);
+ set_next_task_first(rq, p);
+ return p;
+ }
+ }
}
BUG(); /* The idle class should always have a runnable task. */
@@ -6017,7 +6026,6 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
}
prev_balance(rq, prev, rf);
- put_prev_task(rq, prev);
smt_mask = cpu_smt_mask(cpu);
need_sync = !!rq->core->core_cookie;
@@ -6184,6 +6192,7 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
}
out_set_next:
+ put_prev_task(rq, prev);
set_next_task_first(rq, next);
out:
if (rq->core->core_forceidle_count && next == rq->idle)