mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-11 12:28:41 +08:00
sched: rework enqueue/dequeue_entity() to get rid of set_curr_task()
rework enqueue/dequeue_entity() to get rid of sched_class::set_curr_task(). This simplifies sched_setscheduler(), rt_mutex_setprio() and sched_move_tasks(). text data bss dec hex filename 24330 2734 20 27084 69cc sched.o.before 24233 2730 20 26983 6967 sched.o.after Signed-off-by: Dmitry Adamushko <dmitry.adamushko@gmail.com> Signed-off-by: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com> Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Reviewed-by: Thomas Gleixner <tglx@linutronix.de>
This commit is contained in:
parent
4530d7ab0f
commit
f6b53205e1
@ -871,7 +871,6 @@ struct sched_class {
|
|||||||
struct sched_domain *sd, enum cpu_idle_type idle,
|
struct sched_domain *sd, enum cpu_idle_type idle,
|
||||||
int *all_pinned, int *this_best_prio);
|
int *all_pinned, int *this_best_prio);
|
||||||
|
|
||||||
void (*set_curr_task) (struct rq *rq);
|
|
||||||
void (*task_tick) (struct rq *rq, struct task_struct *p);
|
void (*task_tick) (struct rq *rq, struct task_struct *p);
|
||||||
void (*task_new) (struct rq *rq, struct task_struct *p);
|
void (*task_new) (struct rq *rq, struct task_struct *p);
|
||||||
};
|
};
|
||||||
|
@ -3915,8 +3915,8 @@ EXPORT_SYMBOL(sleep_on_timeout);
|
|||||||
*/
|
*/
|
||||||
void rt_mutex_setprio(struct task_struct *p, int prio)
|
void rt_mutex_setprio(struct task_struct *p, int prio)
|
||||||
{
|
{
|
||||||
int oldprio, on_rq, running;
|
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
int oldprio, on_rq;
|
||||||
struct rq *rq;
|
struct rq *rq;
|
||||||
|
|
||||||
BUG_ON(prio < 0 || prio > MAX_PRIO);
|
BUG_ON(prio < 0 || prio > MAX_PRIO);
|
||||||
@ -3926,12 +3926,8 @@ void rt_mutex_setprio(struct task_struct *p, int prio)
|
|||||||
|
|
||||||
oldprio = p->prio;
|
oldprio = p->prio;
|
||||||
on_rq = p->se.on_rq;
|
on_rq = p->se.on_rq;
|
||||||
running = task_running(rq, p);
|
if (on_rq)
|
||||||
if (on_rq) {
|
|
||||||
dequeue_task(rq, p, 0);
|
dequeue_task(rq, p, 0);
|
||||||
if (running)
|
|
||||||
p->sched_class->put_prev_task(rq, p);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (rt_prio(prio))
|
if (rt_prio(prio))
|
||||||
p->sched_class = &rt_sched_class;
|
p->sched_class = &rt_sched_class;
|
||||||
@ -3941,15 +3937,13 @@ void rt_mutex_setprio(struct task_struct *p, int prio)
|
|||||||
p->prio = prio;
|
p->prio = prio;
|
||||||
|
|
||||||
if (on_rq) {
|
if (on_rq) {
|
||||||
if (running)
|
|
||||||
p->sched_class->set_curr_task(rq);
|
|
||||||
enqueue_task(rq, p, 0);
|
enqueue_task(rq, p, 0);
|
||||||
/*
|
/*
|
||||||
* Reschedule if we are currently running on this runqueue and
|
* Reschedule if we are currently running on this runqueue and
|
||||||
* our priority decreased, or if we are not currently running on
|
* our priority decreased, or if we are not currently running on
|
||||||
* this runqueue and our priority is higher than the current's
|
* this runqueue and our priority is higher than the current's
|
||||||
*/
|
*/
|
||||||
if (running) {
|
if (task_running(rq, p)) {
|
||||||
if (p->prio > oldprio)
|
if (p->prio > oldprio)
|
||||||
resched_task(rq->curr);
|
resched_task(rq->curr);
|
||||||
} else {
|
} else {
|
||||||
@ -4155,7 +4149,7 @@ __setscheduler(struct rq *rq, struct task_struct *p, int policy, int prio)
|
|||||||
int sched_setscheduler(struct task_struct *p, int policy,
|
int sched_setscheduler(struct task_struct *p, int policy,
|
||||||
struct sched_param *param)
|
struct sched_param *param)
|
||||||
{
|
{
|
||||||
int retval, oldprio, oldpolicy = -1, on_rq, running;
|
int retval, oldprio, oldpolicy = -1, on_rq;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
struct rq *rq;
|
struct rq *rq;
|
||||||
|
|
||||||
@ -4237,24 +4231,20 @@ recheck:
|
|||||||
}
|
}
|
||||||
update_rq_clock(rq);
|
update_rq_clock(rq);
|
||||||
on_rq = p->se.on_rq;
|
on_rq = p->se.on_rq;
|
||||||
running = task_running(rq, p);
|
if (on_rq)
|
||||||
if (on_rq) {
|
|
||||||
deactivate_task(rq, p, 0);
|
deactivate_task(rq, p, 0);
|
||||||
if (running)
|
|
||||||
p->sched_class->put_prev_task(rq, p);
|
|
||||||
}
|
|
||||||
oldprio = p->prio;
|
oldprio = p->prio;
|
||||||
__setscheduler(rq, p, policy, param->sched_priority);
|
__setscheduler(rq, p, policy, param->sched_priority);
|
||||||
|
|
||||||
if (on_rq) {
|
if (on_rq) {
|
||||||
if (running)
|
|
||||||
p->sched_class->set_curr_task(rq);
|
|
||||||
activate_task(rq, p, 0);
|
activate_task(rq, p, 0);
|
||||||
/*
|
/*
|
||||||
* Reschedule if we are currently running on this runqueue and
|
* Reschedule if we are currently running on this runqueue and
|
||||||
* our priority decreased, or if we are not currently running on
|
* our priority decreased, or if we are not currently running on
|
||||||
* this runqueue and our priority is higher than the current's
|
* this runqueue and our priority is higher than the current's
|
||||||
*/
|
*/
|
||||||
if (running) {
|
if (task_running(rq, p)) {
|
||||||
if (p->prio > oldprio)
|
if (p->prio > oldprio)
|
||||||
resched_task(rq->curr);
|
resched_task(rq->curr);
|
||||||
} else {
|
} else {
|
||||||
@ -6855,19 +6845,13 @@ static void sched_move_task(struct container_subsys *ss, struct container *cont,
|
|||||||
running = task_running(rq, tsk);
|
running = task_running(rq, tsk);
|
||||||
on_rq = tsk->se.on_rq;
|
on_rq = tsk->se.on_rq;
|
||||||
|
|
||||||
if (on_rq) {
|
if (on_rq)
|
||||||
dequeue_task(rq, tsk, 0);
|
dequeue_task(rq, tsk, 0);
|
||||||
if (unlikely(running))
|
|
||||||
tsk->sched_class->put_prev_task(rq, tsk);
|
|
||||||
}
|
|
||||||
|
|
||||||
set_task_cfs_rq(tsk);
|
set_task_cfs_rq(tsk);
|
||||||
|
|
||||||
if (on_rq) {
|
if (on_rq)
|
||||||
if (unlikely(running))
|
|
||||||
tsk->sched_class->set_curr_task(rq);
|
|
||||||
enqueue_task(rq, tsk, 0);
|
enqueue_task(rq, tsk, 0);
|
||||||
}
|
|
||||||
|
|
||||||
done:
|
done:
|
||||||
task_rq_unlock(rq, &flags);
|
task_rq_unlock(rq, &flags);
|
||||||
|
@ -472,8 +472,19 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
|
|||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int wakeup)
|
enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se,
|
||||||
|
int wakeup, int set_curr)
|
||||||
{
|
{
|
||||||
|
/*
|
||||||
|
* In case of the 'current'.
|
||||||
|
*/
|
||||||
|
if (unlikely(set_curr)) {
|
||||||
|
update_stats_curr_start(cfs_rq, se);
|
||||||
|
cfs_rq->curr = se;
|
||||||
|
account_entity_enqueue(cfs_rq, se);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Update the fair clock.
|
* Update the fair clock.
|
||||||
*/
|
*/
|
||||||
@ -485,8 +496,7 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int wakeup)
|
|||||||
}
|
}
|
||||||
|
|
||||||
update_stats_enqueue(cfs_rq, se);
|
update_stats_enqueue(cfs_rq, se);
|
||||||
if (se != cfs_rq->curr)
|
__enqueue_entity(cfs_rq, se);
|
||||||
__enqueue_entity(cfs_rq, se);
|
|
||||||
account_entity_enqueue(cfs_rq, se);
|
account_entity_enqueue(cfs_rq, se);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -506,8 +516,12 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int sleep)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
if (se != cfs_rq->curr)
|
if (likely(se != cfs_rq->curr))
|
||||||
__dequeue_entity(cfs_rq, se);
|
__dequeue_entity(cfs_rq, se);
|
||||||
|
else {
|
||||||
|
update_stats_curr_end(cfs_rq, se);
|
||||||
|
cfs_rq->curr = NULL;
|
||||||
|
}
|
||||||
account_entity_dequeue(cfs_rq, se);
|
account_entity_dequeue(cfs_rq, se);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -689,12 +703,17 @@ static void enqueue_task_fair(struct rq *rq, struct task_struct *p, int wakeup)
|
|||||||
{
|
{
|
||||||
struct cfs_rq *cfs_rq;
|
struct cfs_rq *cfs_rq;
|
||||||
struct sched_entity *se = &p->se;
|
struct sched_entity *se = &p->se;
|
||||||
|
int set_curr = 0;
|
||||||
|
|
||||||
|
/* Are we enqueuing the current task? */
|
||||||
|
if (unlikely(task_running(rq, p)))
|
||||||
|
set_curr = 1;
|
||||||
|
|
||||||
for_each_sched_entity(se) {
|
for_each_sched_entity(se) {
|
||||||
if (se->on_rq)
|
if (se->on_rq)
|
||||||
break;
|
break;
|
||||||
cfs_rq = cfs_rq_of(se);
|
cfs_rq = cfs_rq_of(se);
|
||||||
enqueue_entity(cfs_rq, se, wakeup);
|
enqueue_entity(cfs_rq, se, wakeup, set_curr);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -742,7 +761,7 @@ static void yield_task_fair(struct rq *rq)
|
|||||||
* position within the tree:
|
* position within the tree:
|
||||||
*/
|
*/
|
||||||
dequeue_entity(cfs_rq, se, 0);
|
dequeue_entity(cfs_rq, se, 0);
|
||||||
enqueue_entity(cfs_rq, se, 0);
|
enqueue_entity(cfs_rq, se, 0, 1);
|
||||||
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
@ -985,29 +1004,6 @@ static void task_new_fair(struct rq *rq, struct task_struct *p)
|
|||||||
resched_task(rq->curr);
|
resched_task(rq->curr);
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_FAIR_GROUP_SCHED
|
|
||||||
/* Account for a task changing its policy or group.
|
|
||||||
*
|
|
||||||
* This routine is mostly called to set cfs_rq->curr field when a task
|
|
||||||
* migrates between groups/classes.
|
|
||||||
*/
|
|
||||||
static void set_curr_task_fair(struct rq *rq)
|
|
||||||
{
|
|
||||||
struct sched_entity *se = &rq->curr->se;
|
|
||||||
|
|
||||||
for_each_sched_entity(se)
|
|
||||||
set_next_entity(cfs_rq_of(se), se);
|
|
||||||
}
|
|
||||||
#else
|
|
||||||
static void set_curr_task_fair(struct rq *rq)
|
|
||||||
{
|
|
||||||
struct sched_entity *se = &rq->curr->se;
|
|
||||||
struct cfs_rq *cfs_rq = cfs_rq_of(se);
|
|
||||||
|
|
||||||
cfs_rq->curr = se;
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* All the scheduling class methods:
|
* All the scheduling class methods:
|
||||||
*/
|
*/
|
||||||
@ -1023,7 +1019,6 @@ struct sched_class fair_sched_class __read_mostly = {
|
|||||||
|
|
||||||
.load_balance = load_balance_fair,
|
.load_balance = load_balance_fair,
|
||||||
|
|
||||||
.set_curr_task = set_curr_task_fair,
|
|
||||||
.task_tick = task_tick_fair,
|
.task_tick = task_tick_fair,
|
||||||
.task_new = task_new_fair,
|
.task_new = task_new_fair,
|
||||||
};
|
};
|
||||||
|
@ -50,10 +50,6 @@ static void task_tick_idle(struct rq *rq, struct task_struct *curr)
|
|||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
static void set_curr_task_idle(struct rq *rq)
|
|
||||||
{
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Simple, special scheduling class for the per-CPU idle tasks:
|
* Simple, special scheduling class for the per-CPU idle tasks:
|
||||||
*/
|
*/
|
||||||
@ -70,7 +66,6 @@ static struct sched_class idle_sched_class __read_mostly = {
|
|||||||
|
|
||||||
.load_balance = load_balance_idle,
|
.load_balance = load_balance_idle,
|
||||||
|
|
||||||
.set_curr_task = set_curr_task_idle,
|
|
||||||
.task_tick = task_tick_idle,
|
.task_tick = task_tick_idle,
|
||||||
/* no .task_new for idle tasks */
|
/* no .task_new for idle tasks */
|
||||||
};
|
};
|
||||||
|
@ -218,10 +218,6 @@ static void task_tick_rt(struct rq *rq, struct task_struct *p)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void set_curr_task_rt(struct rq *rq)
|
|
||||||
{
|
|
||||||
}
|
|
||||||
|
|
||||||
static struct sched_class rt_sched_class __read_mostly = {
|
static struct sched_class rt_sched_class __read_mostly = {
|
||||||
.enqueue_task = enqueue_task_rt,
|
.enqueue_task = enqueue_task_rt,
|
||||||
.dequeue_task = dequeue_task_rt,
|
.dequeue_task = dequeue_task_rt,
|
||||||
@ -234,6 +230,5 @@ static struct sched_class rt_sched_class __read_mostly = {
|
|||||||
|
|
||||||
.load_balance = load_balance_rt,
|
.load_balance = load_balance_rt,
|
||||||
|
|
||||||
.set_curr_task = set_curr_task_rt,
|
|
||||||
.task_tick = task_tick_rt,
|
.task_tick = task_tick_rt,
|
||||||
};
|
};
|
||||||
|
Loading…
Reference in New Issue
Block a user