Skip to content

Commit 5a1263d

Browse files
Yang Yinglianggregkh
authored andcommitted
sched/core: Introduce sched_set_rq_on/offline() helper
commit 2f02735 upstream. Introduce sched_set_rq_on/offline() helper, so it can be called in normal or error path simply. No functional changed. Cc: stable@kernel.org Signed-off-by: Yang Yingliang <yangyingliang@huawei.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Link: https://lore.kernel.org/r/20240703031610.587047-4-yangyingliang@huaweicloud.com Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
1 parent d0c87a3 commit 5a1263d

1 file changed

Lines changed: 26 additions & 14 deletions

File tree

kernel/sched/core.c

Lines changed: 26 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -9604,6 +9604,30 @@ void set_rq_offline(struct rq *rq)
96049604
}
96059605
}
96069606

9607+
static inline void sched_set_rq_online(struct rq *rq, int cpu)
9608+
{
9609+
struct rq_flags rf;
9610+
9611+
rq_lock_irqsave(rq, &rf);
9612+
if (rq->rd) {
9613+
BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
9614+
set_rq_online(rq);
9615+
}
9616+
rq_unlock_irqrestore(rq, &rf);
9617+
}
9618+
9619+
static inline void sched_set_rq_offline(struct rq *rq, int cpu)
9620+
{
9621+
struct rq_flags rf;
9622+
9623+
rq_lock_irqsave(rq, &rf);
9624+
if (rq->rd) {
9625+
BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
9626+
set_rq_offline(rq);
9627+
}
9628+
rq_unlock_irqrestore(rq, &rf);
9629+
}
9630+
96079631
/*
96089632
* used to mark begin/end of suspend/resume:
96099633
*/
@@ -9673,7 +9697,6 @@ static inline void sched_smt_present_dec(int cpu)
96739697
int sched_cpu_activate(unsigned int cpu)
96749698
{
96759699
struct rq *rq = cpu_rq(cpu);
9676-
struct rq_flags rf;
96779700

96789701
/*
96799702
* Clear the balance_push callback and prepare to schedule
@@ -9702,20 +9725,14 @@ int sched_cpu_activate(unsigned int cpu)
97029725
* 2) At runtime, if cpuset_cpu_active() fails to rebuild the
97039726
* domains.
97049727
*/
9705-
rq_lock_irqsave(rq, &rf);
9706-
if (rq->rd) {
9707-
BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
9708-
set_rq_online(rq);
9709-
}
9710-
rq_unlock_irqrestore(rq, &rf);
9728+
sched_set_rq_online(rq, cpu);
97119729

97129730
return 0;
97139731
}
97149732

97159733
int sched_cpu_deactivate(unsigned int cpu)
97169734
{
97179735
struct rq *rq = cpu_rq(cpu);
9718-
struct rq_flags rf;
97199736
int ret;
97209737

97219738
/*
@@ -9746,12 +9763,7 @@ int sched_cpu_deactivate(unsigned int cpu)
97469763
*/
97479764
synchronize_rcu();
97489765

9749-
rq_lock_irqsave(rq, &rf);
9750-
if (rq->rd) {
9751-
BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
9752-
set_rq_offline(rq);
9753-
}
9754-
rq_unlock_irqrestore(rq, &rf);
9766+
sched_set_rq_offline(rq, cpu);
97559767

97569768
/*
97579769
* When going down, decrement the number of cores with SMT present.

0 commit comments

Comments
 (0)