sched: Simplify sched_core_cpu_{starting,deactivate}()

Use guards to reduce gotos and simplify control flow.

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Valentin Schneider <vschneid@redhat.com>
Link: https://lore.kernel.org/r/20230801211812.371787909@infradead.org
This commit is contained in:
Peter Zijlstra 2023-08-01 22:41:30 +02:00
parent b4e1fa1e14
commit 7170509cad

View File

@ -6400,20 +6400,24 @@ static void queue_core_balance(struct rq *rq)
queue_balance_callback(rq, &per_cpu(core_balance_head, rq->cpu), sched_core_balance); queue_balance_callback(rq, &per_cpu(core_balance_head, rq->cpu), sched_core_balance);
} }
DEFINE_LOCK_GUARD_1(core_lock, int,
sched_core_lock(*_T->lock, &_T->flags),
sched_core_unlock(*_T->lock, &_T->flags),
unsigned long flags)
static void sched_core_cpu_starting(unsigned int cpu) static void sched_core_cpu_starting(unsigned int cpu)
{ {
const struct cpumask *smt_mask = cpu_smt_mask(cpu); const struct cpumask *smt_mask = cpu_smt_mask(cpu);
struct rq *rq = cpu_rq(cpu), *core_rq = NULL; struct rq *rq = cpu_rq(cpu), *core_rq = NULL;
unsigned long flags;
int t; int t;
sched_core_lock(cpu, &flags); guard(core_lock)(&cpu);
WARN_ON_ONCE(rq->core != rq); WARN_ON_ONCE(rq->core != rq);
/* if we're the first, we'll be our own leader */ /* if we're the first, we'll be our own leader */
if (cpumask_weight(smt_mask) == 1) if (cpumask_weight(smt_mask) == 1)
goto unlock; return;
/* find the leader */ /* find the leader */
for_each_cpu(t, smt_mask) { for_each_cpu(t, smt_mask) {
@ -6427,7 +6431,7 @@ static void sched_core_cpu_starting(unsigned int cpu)
} }
if (WARN_ON_ONCE(!core_rq)) /* whoopsie */ if (WARN_ON_ONCE(!core_rq)) /* whoopsie */
goto unlock; return;
/* install and validate core_rq */ /* install and validate core_rq */
for_each_cpu(t, smt_mask) { for_each_cpu(t, smt_mask) {
@ -6438,29 +6442,25 @@ static void sched_core_cpu_starting(unsigned int cpu)
WARN_ON_ONCE(rq->core != core_rq); WARN_ON_ONCE(rq->core != core_rq);
} }
unlock:
sched_core_unlock(cpu, &flags);
} }
static void sched_core_cpu_deactivate(unsigned int cpu) static void sched_core_cpu_deactivate(unsigned int cpu)
{ {
const struct cpumask *smt_mask = cpu_smt_mask(cpu); const struct cpumask *smt_mask = cpu_smt_mask(cpu);
struct rq *rq = cpu_rq(cpu), *core_rq = NULL; struct rq *rq = cpu_rq(cpu), *core_rq = NULL;
unsigned long flags;
int t; int t;
sched_core_lock(cpu, &flags); guard(core_lock)(&cpu);
/* if we're the last man standing, nothing to do */ /* if we're the last man standing, nothing to do */
if (cpumask_weight(smt_mask) == 1) { if (cpumask_weight(smt_mask) == 1) {
WARN_ON_ONCE(rq->core != rq); WARN_ON_ONCE(rq->core != rq);
goto unlock; return;
} }
/* if we're not the leader, nothing to do */ /* if we're not the leader, nothing to do */
if (rq->core != rq) if (rq->core != rq)
goto unlock; return;
/* find a new leader */ /* find a new leader */
for_each_cpu(t, smt_mask) { for_each_cpu(t, smt_mask) {
@ -6471,7 +6471,7 @@ static void sched_core_cpu_deactivate(unsigned int cpu)
} }
if (WARN_ON_ONCE(!core_rq)) /* impossible */ if (WARN_ON_ONCE(!core_rq)) /* impossible */
goto unlock; return;
/* copy the shared state to the new leader */ /* copy the shared state to the new leader */
core_rq->core_task_seq = rq->core_task_seq; core_rq->core_task_seq = rq->core_task_seq;
@ -6493,9 +6493,6 @@ static void sched_core_cpu_deactivate(unsigned int cpu)
rq = cpu_rq(t); rq = cpu_rq(t);
rq->core = core_rq; rq->core = core_rq;
} }
unlock:
sched_core_unlock(cpu, &flags);
} }
static inline void sched_core_cpu_dying(unsigned int cpu) static inline void sched_core_cpu_dying(unsigned int cpu)