summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorYang Yingliang <yangyingliang@huawei.com>2024-07-03 11:16:09 +0800
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2024-08-14 13:59:00 +0200
commit4c15b20c26a40058145b42da8b7820a0ffe34ef4 (patch)
treed042f0c31a6423718718fcaa206098a74c57dfcd /kernel
parent65727331b60197b742089855ac09464c22b96f66 (diff)
downloadlinux-4c15b20c26a40058145b42da8b7820a0ffe34ef4.tar.gz
linux-4c15b20c26a40058145b42da8b7820a0ffe34ef4.tar.bz2
linux-4c15b20c26a40058145b42da8b7820a0ffe34ef4.zip
sched/core: Introduce sched_set_rq_on/offline() helper
commit 2f027354122f58ee846468a6f6b48672fff92e9b upstream. Introduce sched_set_rq_on/offline() helper, so it can be called in normal or error path simply. No functional changed. Cc: stable@kernel.org Signed-off-by: Yang Yingliang <yangyingliang@huawei.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Link: https://lore.kernel.org/r/20240703031610.587047-4-yangyingliang@huaweicloud.com Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched/core.c40
1 files changed, 26 insertions, 14 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 4e10b79b882a..f17bb1738a95 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -9596,6 +9596,30 @@ void set_rq_offline(struct rq *rq)
}
}
+static inline void sched_set_rq_online(struct rq *rq, int cpu)
+{
+ struct rq_flags rf;
+
+ rq_lock_irqsave(rq, &rf);
+ if (rq->rd) {
+ BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
+ set_rq_online(rq);
+ }
+ rq_unlock_irqrestore(rq, &rf);
+}
+
+static inline void sched_set_rq_offline(struct rq *rq, int cpu)
+{
+ struct rq_flags rf;
+
+ rq_lock_irqsave(rq, &rf);
+ if (rq->rd) {
+ BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
+ set_rq_offline(rq);
+ }
+ rq_unlock_irqrestore(rq, &rf);
+}
+
/*
* used to mark begin/end of suspend/resume:
*/
@@ -9665,7 +9689,6 @@ static inline void sched_smt_present_dec(int cpu)
int sched_cpu_activate(unsigned int cpu)
{
struct rq *rq = cpu_rq(cpu);
- struct rq_flags rf;
/*
* Clear the balance_push callback and prepare to schedule
@@ -9694,12 +9717,7 @@ int sched_cpu_activate(unsigned int cpu)
* 2) At runtime, if cpuset_cpu_active() fails to rebuild the
* domains.
*/
- rq_lock_irqsave(rq, &rf);
- if (rq->rd) {
- BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
- set_rq_online(rq);
- }
- rq_unlock_irqrestore(rq, &rf);
+ sched_set_rq_online(rq, cpu);
return 0;
}
@@ -9707,7 +9725,6 @@ int sched_cpu_activate(unsigned int cpu)
int sched_cpu_deactivate(unsigned int cpu)
{
struct rq *rq = cpu_rq(cpu);
- struct rq_flags rf;
int ret;
/*
@@ -9738,12 +9755,7 @@ int sched_cpu_deactivate(unsigned int cpu)
*/
synchronize_rcu();
- rq_lock_irqsave(rq, &rf);
- if (rq->rd) {
- BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
- set_rq_offline(rq);
- }
- rq_unlock_irqrestore(rq, &rf);
+ sched_set_rq_offline(rq, cpu);
/*
* When going down, decrement the number of cores with SMT present.