sched/fair: Rename check_preempt_curr() to wakeup_preempt()

The name is a bit opaque - make it clear that this is about wakeup
preemption.

Also rename the ->check_preempt_curr() methods similarly.

Signed-off-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
This commit is contained in:
Ingo Molnar 2023-09-19 10:38:21 +02:00
parent 82845683ca
commit e23edc86b0
7 changed files with 26 additions and 26 deletions

View File

@ -2211,10 +2211,10 @@ static inline void check_class_changed(struct rq *rq, struct task_struct *p,
p->sched_class->prio_changed(rq, p, oldprio); p->sched_class->prio_changed(rq, p, oldprio);
} }
void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags) void wakeup_preempt(struct rq *rq, struct task_struct *p, int flags)
{ {
if (p->sched_class == rq->curr->sched_class) if (p->sched_class == rq->curr->sched_class)
rq->curr->sched_class->check_preempt_curr(rq, p, flags); rq->curr->sched_class->wakeup_preempt(rq, p, flags);
else if (sched_class_above(p->sched_class, rq->curr->sched_class)) else if (sched_class_above(p->sched_class, rq->curr->sched_class))
resched_curr(rq); resched_curr(rq);
@ -2508,7 +2508,7 @@ static struct rq *move_queued_task(struct rq *rq, struct rq_flags *rf,
rq_lock(rq, rf); rq_lock(rq, rf);
WARN_ON_ONCE(task_cpu(p) != new_cpu); WARN_ON_ONCE(task_cpu(p) != new_cpu);
activate_task(rq, p, 0); activate_task(rq, p, 0);
check_preempt_curr(rq, p, 0); wakeup_preempt(rq, p, 0);
return rq; return rq;
} }
@ -3390,7 +3390,7 @@ static void __migrate_swap_task(struct task_struct *p, int cpu)
deactivate_task(src_rq, p, 0); deactivate_task(src_rq, p, 0);
set_task_cpu(p, cpu); set_task_cpu(p, cpu);
activate_task(dst_rq, p, 0); activate_task(dst_rq, p, 0);
check_preempt_curr(dst_rq, p, 0); wakeup_preempt(dst_rq, p, 0);
rq_unpin_lock(dst_rq, &drf); rq_unpin_lock(dst_rq, &drf);
rq_unpin_lock(src_rq, &srf); rq_unpin_lock(src_rq, &srf);
@ -3764,7 +3764,7 @@ ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags,
} }
activate_task(rq, p, en_flags); activate_task(rq, p, en_flags);
check_preempt_curr(rq, p, wake_flags); wakeup_preempt(rq, p, wake_flags);
ttwu_do_wakeup(p); ttwu_do_wakeup(p);
@ -3835,7 +3835,7 @@ static int ttwu_runnable(struct task_struct *p, int wake_flags)
* it should preempt the task that is current now. * it should preempt the task that is current now.
*/ */
update_rq_clock(rq); update_rq_clock(rq);
check_preempt_curr(rq, p, wake_flags); wakeup_preempt(rq, p, wake_flags);
} }
ttwu_do_wakeup(p); ttwu_do_wakeup(p);
ret = 1; ret = 1;
@ -4854,7 +4854,7 @@ void wake_up_new_task(struct task_struct *p)
activate_task(rq, p, ENQUEUE_NOCLOCK); activate_task(rq, p, ENQUEUE_NOCLOCK);
trace_sched_wakeup_new(p); trace_sched_wakeup_new(p);
check_preempt_curr(rq, p, WF_FORK); wakeup_preempt(rq, p, WF_FORK);
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
if (p->sched_class->task_woken) { if (p->sched_class->task_woken) {
/* /*

View File

@ -763,7 +763,7 @@ static inline void deadline_queue_pull_task(struct rq *rq)
static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags); static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags);
static void __dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags); static void __dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags);
static void check_preempt_curr_dl(struct rq *rq, struct task_struct *p, int flags); static void wakeup_preempt_dl(struct rq *rq, struct task_struct *p, int flags);
static inline void replenish_dl_new_period(struct sched_dl_entity *dl_se, static inline void replenish_dl_new_period(struct sched_dl_entity *dl_se,
struct rq *rq) struct rq *rq)
@ -1175,7 +1175,7 @@ static enum hrtimer_restart dl_task_timer(struct hrtimer *timer)
enqueue_task_dl(rq, p, ENQUEUE_REPLENISH); enqueue_task_dl(rq, p, ENQUEUE_REPLENISH);
if (dl_task(rq->curr)) if (dl_task(rq->curr))
check_preempt_curr_dl(rq, p, 0); wakeup_preempt_dl(rq, p, 0);
else else
resched_curr(rq); resched_curr(rq);
@ -1939,7 +1939,7 @@ static int balance_dl(struct rq *rq, struct task_struct *p, struct rq_flags *rf)
* Only called when both the current and waking task are -deadline * Only called when both the current and waking task are -deadline
* tasks. * tasks.
*/ */
static void check_preempt_curr_dl(struct rq *rq, struct task_struct *p, static void wakeup_preempt_dl(struct rq *rq, struct task_struct *p,
int flags) int flags)
{ {
if (dl_entity_preempt(&p->dl, &rq->curr->dl)) { if (dl_entity_preempt(&p->dl, &rq->curr->dl)) {
@ -2652,7 +2652,7 @@ static void switched_to_dl(struct rq *rq, struct task_struct *p)
deadline_queue_push_tasks(rq); deadline_queue_push_tasks(rq);
#endif #endif
if (dl_task(rq->curr)) if (dl_task(rq->curr))
check_preempt_curr_dl(rq, p, 0); wakeup_preempt_dl(rq, p, 0);
else else
resched_curr(rq); resched_curr(rq);
} else { } else {
@ -2721,7 +2721,7 @@ DEFINE_SCHED_CLASS(dl) = {
.dequeue_task = dequeue_task_dl, .dequeue_task = dequeue_task_dl,
.yield_task = yield_task_dl, .yield_task = yield_task_dl,
.check_preempt_curr = check_preempt_curr_dl, .wakeup_preempt = wakeup_preempt_dl,
.pick_next_task = pick_next_task_dl, .pick_next_task = pick_next_task_dl,
.put_prev_task = put_prev_task_dl, .put_prev_task = put_prev_task_dl,

View File

@ -8007,7 +8007,7 @@ static void check_preempt_wakeup_fair(struct rq *rq, struct task_struct *p, int
/* /*
* This is possible from callers such as attach_tasks(), in which we * This is possible from callers such as attach_tasks(), in which we
* unconditionally check_preempt_curr() after an enqueue (which may have * unconditionally wakeup_preempt() after an enqueue (which may have
* lead to a throttle). This both saves work and prevents false * lead to a throttle). This both saves work and prevents false
* next-buddy nomination below. * next-buddy nomination below.
*/ */
@ -8914,7 +8914,7 @@ static void attach_task(struct rq *rq, struct task_struct *p)
WARN_ON_ONCE(task_rq(p) != rq); WARN_ON_ONCE(task_rq(p) != rq);
activate_task(rq, p, ENQUEUE_NOCLOCK); activate_task(rq, p, ENQUEUE_NOCLOCK);
check_preempt_curr(rq, p, 0); wakeup_preempt(rq, p, 0);
} }
/* /*
@ -12369,7 +12369,7 @@ prio_changed_fair(struct rq *rq, struct task_struct *p, int oldprio)
if (p->prio > oldprio) if (p->prio > oldprio)
resched_curr(rq); resched_curr(rq);
} else } else
check_preempt_curr(rq, p, 0); wakeup_preempt(rq, p, 0);
} }
#ifdef CONFIG_FAIR_GROUP_SCHED #ifdef CONFIG_FAIR_GROUP_SCHED
@ -12471,7 +12471,7 @@ static void switched_to_fair(struct rq *rq, struct task_struct *p)
if (task_current(rq, p)) if (task_current(rq, p))
resched_curr(rq); resched_curr(rq);
else else
check_preempt_curr(rq, p, 0); wakeup_preempt(rq, p, 0);
} }
} }
@ -12830,7 +12830,7 @@ DEFINE_SCHED_CLASS(fair) = {
.yield_task = yield_task_fair, .yield_task = yield_task_fair,
.yield_to_task = yield_to_task_fair, .yield_to_task = yield_to_task_fair,
.check_preempt_curr = check_preempt_wakeup_fair, .wakeup_preempt = check_preempt_wakeup_fair,
.pick_next_task = __pick_next_task_fair, .pick_next_task = __pick_next_task_fair,
.put_prev_task = put_prev_task_fair, .put_prev_task = put_prev_task_fair,

View File

@ -400,7 +400,7 @@ balance_idle(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
/* /*
* Idle tasks are unconditionally rescheduled: * Idle tasks are unconditionally rescheduled:
*/ */
static void check_preempt_curr_idle(struct rq *rq, struct task_struct *p, int flags) static void wakeup_preempt_idle(struct rq *rq, struct task_struct *p, int flags)
{ {
resched_curr(rq); resched_curr(rq);
} }
@ -481,7 +481,7 @@ DEFINE_SCHED_CLASS(idle) = {
/* dequeue is not valid, we print a debug message there: */ /* dequeue is not valid, we print a debug message there: */
.dequeue_task = dequeue_task_idle, .dequeue_task = dequeue_task_idle,
.check_preempt_curr = check_preempt_curr_idle, .wakeup_preempt = wakeup_preempt_idle,
.pick_next_task = pick_next_task_idle, .pick_next_task = pick_next_task_idle,
.put_prev_task = put_prev_task_idle, .put_prev_task = put_prev_task_idle,

View File

@ -953,7 +953,7 @@ static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
/* /*
* When we're idle and a woken (rt) task is * When we're idle and a woken (rt) task is
* throttled check_preempt_curr() will set * throttled wakeup_preempt() will set
* skip_update and the time between the wakeup * skip_update and the time between the wakeup
* and this unthrottle will get accounted as * and this unthrottle will get accounted as
* 'runtime'. * 'runtime'.
@ -1715,7 +1715,7 @@ static int balance_rt(struct rq *rq, struct task_struct *p, struct rq_flags *rf)
/* /*
* Preempt the current task with a newly woken task if needed: * Preempt the current task with a newly woken task if needed:
*/ */
static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p, int flags) static void wakeup_preempt_rt(struct rq *rq, struct task_struct *p, int flags)
{ {
if (p->prio < rq->curr->prio) { if (p->prio < rq->curr->prio) {
resched_curr(rq); resched_curr(rq);
@ -2702,7 +2702,7 @@ DEFINE_SCHED_CLASS(rt) = {
.dequeue_task = dequeue_task_rt, .dequeue_task = dequeue_task_rt,
.yield_task = yield_task_rt, .yield_task = yield_task_rt,
.check_preempt_curr = check_preempt_curr_rt, .wakeup_preempt = wakeup_preempt_rt,
.pick_next_task = pick_next_task_rt, .pick_next_task = pick_next_task_rt,
.put_prev_task = put_prev_task_rt, .put_prev_task = put_prev_task_rt,

View File

@ -2236,7 +2236,7 @@ struct sched_class {
void (*yield_task) (struct rq *rq); void (*yield_task) (struct rq *rq);
bool (*yield_to_task)(struct rq *rq, struct task_struct *p); bool (*yield_to_task)(struct rq *rq, struct task_struct *p);
void (*check_preempt_curr)(struct rq *rq, struct task_struct *p, int flags); void (*wakeup_preempt)(struct rq *rq, struct task_struct *p, int flags);
struct task_struct *(*pick_next_task)(struct rq *rq); struct task_struct *(*pick_next_task)(struct rq *rq);
@ -2510,7 +2510,7 @@ static inline void sub_nr_running(struct rq *rq, unsigned count)
extern void activate_task(struct rq *rq, struct task_struct *p, int flags); extern void activate_task(struct rq *rq, struct task_struct *p, int flags);
extern void deactivate_task(struct rq *rq, struct task_struct *p, int flags); extern void deactivate_task(struct rq *rq, struct task_struct *p, int flags);
extern void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags); extern void wakeup_preempt(struct rq *rq, struct task_struct *p, int flags);
#ifdef CONFIG_PREEMPT_RT #ifdef CONFIG_PREEMPT_RT
#define SCHED_NR_MIGRATE_BREAK 8 #define SCHED_NR_MIGRATE_BREAK 8

View File

@ -23,7 +23,7 @@ balance_stop(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
#endif /* CONFIG_SMP */ #endif /* CONFIG_SMP */
static void static void
check_preempt_curr_stop(struct rq *rq, struct task_struct *p, int flags) wakeup_preempt_stop(struct rq *rq, struct task_struct *p, int flags)
{ {
/* we're never preempted */ /* we're never preempted */
} }
@ -120,7 +120,7 @@ DEFINE_SCHED_CLASS(stop) = {
.dequeue_task = dequeue_task_stop, .dequeue_task = dequeue_task_stop,
.yield_task = yield_task_stop, .yield_task = yield_task_stop,
.check_preempt_curr = check_preempt_curr_stop, .wakeup_preempt = wakeup_preempt_stop,
.pick_next_task = pick_next_task_stop, .pick_next_task = pick_next_task_stop,
.put_prev_task = put_prev_task_stop, .put_prev_task = put_prev_task_stop,