summaryrefslogtreecommitdiff
path: root/kernel/sched/sched.h
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2024-08-14 00:25:53 +0200
committerPeter Zijlstra <peterz@infradead.org>2024-09-03 15:26:31 +0200
commitfd03c5b8585562d60f8b597b4332d28f48abfe7d (patch)
tree11b3007a752affe6beaca37c057ca9a5c0983048 /kernel/sched/sched.h
parent260598f142c34811d226fdde5ab0346b48181439 (diff)
downloadlinux-fd03c5b8585562d60f8b597b4332d28f48abfe7d.tar.gz
linux-fd03c5b8585562d60f8b597b4332d28f48abfe7d.tar.bz2
linux-fd03c5b8585562d60f8b597b4332d28f48abfe7d.zip
sched: Rework pick_next_task()
The current rule is that: pick_next_task() := pick_task() + set_next_task(.first = true) And many classes implement it directly as such. Change things around to make pick_next_task() optional while also changing the definition to: pick_next_task(prev) := pick_task() + put_prev_task() + set_next_task(.first = true) The reason is that sched_ext would like to have a 'final' call that knows the next task. By placing put_prev_task() right next to set_next_task() (as it already is for sched_core) this becomes trivial. As a bonus, this is a nice cleanup on its own. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Link: https://lore.kernel.org/r/20240813224016.051225657@infradead.org
Diffstat (limited to 'kernel/sched/sched.h')
-rw-r--r--kernel/sched/sched.h16
1 files changed, 12 insertions, 4 deletions
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 80605e2da483..64a4ed758ba1 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -2300,7 +2300,17 @@ struct sched_class {
void (*wakeup_preempt)(struct rq *rq, struct task_struct *p, int flags);
- struct task_struct *(*pick_next_task)(struct rq *rq);
+ struct task_struct *(*pick_task)(struct rq *rq);
+ /*
+ * Optional! When implemented pick_next_task() should be equivalent to:
+ *
+ * next = pick_task();
+ * if (next) {
+ * put_prev_task(prev);
+ * set_next_task_first(next);
+ * }
+ */
+ struct task_struct *(*pick_next_task)(struct rq *rq, struct task_struct *prev);
void (*put_prev_task)(struct rq *rq, struct task_struct *p);
void (*set_next_task)(struct rq *rq, struct task_struct *p, bool first);
@@ -2309,8 +2319,6 @@ struct sched_class {
int (*balance)(struct rq *rq, struct task_struct *prev, struct rq_flags *rf);
int (*select_task_rq)(struct task_struct *p, int task_cpu, int flags);
- struct task_struct * (*pick_task)(struct rq *rq);
-
void (*migrate_task_rq)(struct task_struct *p, int new_cpu);
void (*task_woken)(struct rq *this_rq, struct task_struct *task);
@@ -2421,7 +2429,7 @@ static inline bool sched_fair_runnable(struct rq *rq)
}
extern struct task_struct *pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf);
-extern struct task_struct *pick_next_task_idle(struct rq *rq);
+extern struct task_struct *pick_task_idle(struct rq *rq);
#define SCA_CHECK 0x01
#define SCA_MIGRATE_DISABLE 0x02