summaryrefslogtreecommitdiff
path: root/kernel/sched/core.c
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2024-09-03 09:15:42 -1000
committerTejun Heo <tj@kernel.org>2024-09-03 12:49:18 -1000
commitd7b01aef9dbd50f190c2c340deaf324806d09885 (patch)
treef6ddedc4cffb48f67d7f2279b0c9e45e7d2ee827 /kernel/sched/core.c
parent62607d033bb8dc417c2fd06f37f433d468023e66 (diff)
parentb2d70222dbf2a2ff7a972a685d249a5d75afa87f (diff)
downloadlinux-d7b01aef9dbd50f190c2c340deaf324806d09885.tar.gz
linux-d7b01aef9dbd50f190c2c340deaf324806d09885.tar.bz2
linux-d7b01aef9dbd50f190c2c340deaf324806d09885.zip
Merge branch 'tip/sched/core' into for-6.12
- Resolve trivial context conflicts from dl_server clearing being moved around. - Add @next to put_prev_task_scx() and @prev to pick_next_task_scx() to match sched/core. - Merge sched_class->switch_class() addition from sched_ext with tip/sched/core changes in __pick_next_task(). - Make pick_next_task_scx() call put_prev_task_scx() to emulate the previous behavior where sched_class->put_prev_task() was called before sched_class->pick_next_task(). While this makes sched_ext build and function, the behavior is not in line with other sched classes. The follow-up patches will address the discrepancies and remove sched_class->switch_class(). Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'kernel/sched/core.c')
-rw-r--r--kernel/sched/core.c89
1 files changed, 42 insertions, 47 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index b0cec06bb1fa..91bedf5d9f89 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -3690,8 +3690,6 @@ ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags,
rq->idle_stamp = 0;
}
#endif
-
- p->dl_server = NULL;
}
/*
@@ -5895,8 +5893,8 @@ static inline void schedule_debug(struct task_struct *prev, bool preempt)
schedstat_inc(this_rq()->sched_count);
}
-static void put_prev_task_balance(struct rq *rq, struct task_struct *prev,
- struct rq_flags *rf)
+static void prev_balance(struct rq *rq, struct task_struct *prev,
+ struct rq_flags *rf)
{
const struct sched_class *start_class = prev->sched_class;
const struct sched_class *class;
@@ -5923,16 +5921,6 @@ static void put_prev_task_balance(struct rq *rq, struct task_struct *prev,
if (class->balance && class->balance(rq, prev, rf))
break;
}
-
- put_prev_task(rq, prev);
-
- /*
- * We've updated @prev and no longer need the server link, clear it.
- * Must be done before ->pick_next_task() because that can (re)set
- * ->dl_server.
- */
- if (prev->dl_server)
- prev->dl_server = NULL;
}
/*
@@ -5944,6 +5932,8 @@ __pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
const struct sched_class *class;
struct task_struct *p;
+ rq->dl_server = NULL;
+
if (scx_enabled())
goto restart;
@@ -5962,38 +5952,37 @@ __pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
/* Assume the next prioritized class is idle_sched_class */
if (!p) {
- put_prev_task(rq, prev);
- p = pick_next_task_idle(rq);
+ p = pick_task_idle(rq);
+ put_prev_set_next_task(rq, prev, p);
}
- /*
- * This is a normal CFS pick, but the previous could be a DL pick.
- * Clear it as previous is no longer picked.
- */
- if (prev->dl_server)
- prev->dl_server = NULL;
-
- /*
- * This is the fast path; it cannot be a DL server pick;
- * therefore even if @p == @prev, ->dl_server must be NULL.
- */
- if (p->dl_server)
- p->dl_server = NULL;
-
return p;
}
restart:
- put_prev_task_balance(rq, prev, rf);
+ prev_balance(rq, prev, rf);
for_each_active_class(class) {
- p = class->pick_next_task(rq);
- if (p) {
- const struct sched_class *prev_class = prev->sched_class;
+ if (class->pick_next_task) {
+ p = class->pick_next_task(rq, prev);
+ if (p) {
+ const struct sched_class *prev_class = prev->sched_class;
+
+ if (class != prev_class && prev_class->switch_class)
+ prev_class->switch_class(rq, p);
+ return p;
+ }
+ } else {
+ p = class->pick_task(rq);
+ if (p) {
+ const struct sched_class *prev_class = prev->sched_class;
- if (class != prev_class && prev_class->switch_class)
- prev_class->switch_class(rq, p);
- return p;
+ put_prev_set_next_task(rq, prev, p);
+
+ if (class != prev_class && prev_class->switch_class)
+ prev_class->switch_class(rq, p);
+ return p;
+ }
}
}
@@ -6024,6 +6013,8 @@ static inline struct task_struct *pick_task(struct rq *rq)
const struct sched_class *class;
struct task_struct *p;
+ rq->dl_server = NULL;
+
for_each_active_class(class) {
p = class->pick_task(rq);
if (p)
@@ -6062,6 +6053,7 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
* another cpu during offline.
*/
rq->core_pick = NULL;
+ rq->core_dl_server = NULL;
return __pick_next_task(rq, prev, rf);
}
@@ -6080,16 +6072,13 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
WRITE_ONCE(rq->core_sched_seq, rq->core->core_pick_seq);
next = rq->core_pick;
- if (next != prev) {
- put_prev_task(rq, prev);
- set_next_task(rq, next);
- }
-
+ rq->dl_server = rq->core_dl_server;
rq->core_pick = NULL;
- goto out;
+ rq->core_dl_server = NULL;
+ goto out_set_next;
}
- put_prev_task_balance(rq, prev, rf);
+ prev_balance(rq, prev, rf);
smt_mask = cpu_smt_mask(cpu);
need_sync = !!rq->core->core_cookie;
@@ -6130,6 +6119,7 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
next = pick_task(rq);
if (!next->core_cookie) {
rq->core_pick = NULL;
+ rq->core_dl_server = NULL;
/*
* For robustness, update the min_vruntime_fi for
* unconstrained picks as well.
@@ -6157,7 +6147,9 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
if (i != cpu && (rq_i != rq->core || !core_clock_updated))
update_rq_clock(rq_i);
- p = rq_i->core_pick = pick_task(rq_i);
+ rq_i->core_pick = p = pick_task(rq_i);
+ rq_i->core_dl_server = rq_i->dl_server;
+
if (!max || prio_less(max, p, fi_before))
max = p;
}
@@ -6181,6 +6173,7 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
}
rq_i->core_pick = p;
+ rq_i->core_dl_server = NULL;
if (p == rq_i->idle) {
if (rq_i->nr_running) {
@@ -6241,6 +6234,7 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
if (i == cpu) {
rq_i->core_pick = NULL;
+ rq_i->core_dl_server = NULL;
continue;
}
@@ -6249,6 +6243,7 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
if (rq_i->curr == rq_i->core_pick) {
rq_i->core_pick = NULL;
+ rq_i->core_dl_server = NULL;
continue;
}
@@ -6256,8 +6251,7 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
}
out_set_next:
- set_next_task(rq, next);
-out:
+ put_prev_set_next_task(rq, prev, next);
if (rq->core->core_forceidle_count && next == rq->idle)
queue_core_balance(rq);
@@ -8487,6 +8481,7 @@ void __init sched_init(void)
#ifdef CONFIG_SCHED_CORE
rq->core = rq;
rq->core_pick = NULL;
+ rq->core_dl_server = NULL;
rq->core_enabled = 0;
rq->core_tree = RB_ROOT;
rq->core_forceidle_count = 0;