mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-17 17:24:17 +08:00
[PATCH] sched: ->task_new cleanup
make sched_class.task_new == NULL a 'default method', this allows the removal of task_rt_new. Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
parent
4e6f96f313
commit
cad60d93e1
@ -874,7 +874,7 @@ struct sched_class {
|
||||
|
||||
void (*set_curr_task) (struct rq *rq);
|
||||
void (*task_tick) (struct rq *rq, struct task_struct *p);
|
||||
void (*task_new) (struct rq *rq, struct task_struct *p);
|
||||
void (*task_new) (struct rq *rq, struct task_struct *p, u64 now);
|
||||
};
|
||||
|
||||
struct load_weight {
|
||||
|
@ -1641,22 +1641,27 @@ void fastcall wake_up_new_task(struct task_struct *p, unsigned long clone_flags)
|
||||
unsigned long flags;
|
||||
struct rq *rq;
|
||||
int this_cpu;
|
||||
u64 now;
|
||||
|
||||
rq = task_rq_lock(p, &flags);
|
||||
BUG_ON(p->state != TASK_RUNNING);
|
||||
this_cpu = smp_processor_id(); /* parent's CPU */
|
||||
now = rq_clock(rq);
|
||||
|
||||
p->prio = effective_prio(p);
|
||||
|
||||
if (!sysctl_sched_child_runs_first || (clone_flags & CLONE_VM) ||
|
||||
task_cpu(p) != this_cpu || !current->se.on_rq) {
|
||||
if (!p->sched_class->task_new || !sysctl_sched_child_runs_first ||
|
||||
(clone_flags & CLONE_VM) || task_cpu(p) != this_cpu ||
|
||||
!current->se.on_rq) {
|
||||
|
||||
activate_task(rq, p, 0);
|
||||
} else {
|
||||
/*
|
||||
* Let the scheduling class do new task startup
|
||||
* management (if any):
|
||||
*/
|
||||
p->sched_class->task_new(rq, p);
|
||||
p->sched_class->task_new(rq, p, now);
|
||||
inc_nr_running(p, rq, now);
|
||||
}
|
||||
check_preempt_curr(rq, p);
|
||||
task_rq_unlock(rq, &flags);
|
||||
|
@ -1041,11 +1041,10 @@ static void task_tick_fair(struct rq *rq, struct task_struct *curr)
|
||||
* monopolize the CPU. Note: the parent runqueue is locked,
|
||||
* the child is not running yet.
|
||||
*/
|
||||
static void task_new_fair(struct rq *rq, struct task_struct *p)
|
||||
static void task_new_fair(struct rq *rq, struct task_struct *p, u64 now)
|
||||
{
|
||||
struct cfs_rq *cfs_rq = task_cfs_rq(p);
|
||||
struct sched_entity *se = &p->se;
|
||||
u64 now = rq_clock(rq);
|
||||
|
||||
sched_info_queued(p);
|
||||
|
||||
@ -1072,7 +1071,6 @@ static void task_new_fair(struct rq *rq, struct task_struct *p)
|
||||
p->se.wait_runtime = -(sysctl_sched_granularity / 2);
|
||||
|
||||
__enqueue_entity(cfs_rq, se);
|
||||
inc_nr_running(p, rq, now);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_FAIR_GROUP_SCHED
|
||||
|
@ -229,15 +229,6 @@ static void task_tick_rt(struct rq *rq, struct task_struct *p)
|
||||
requeue_task_rt(rq, p);
|
||||
}
|
||||
|
||||
/*
|
||||
* No parent/child timeslice management necessary for RT tasks,
|
||||
* just activate them:
|
||||
*/
|
||||
static void task_new_rt(struct rq *rq, struct task_struct *p)
|
||||
{
|
||||
activate_task(rq, p, 1);
|
||||
}
|
||||
|
||||
static struct sched_class rt_sched_class __read_mostly = {
|
||||
.enqueue_task = enqueue_task_rt,
|
||||
.dequeue_task = dequeue_task_rt,
|
||||
@ -251,5 +242,4 @@ static struct sched_class rt_sched_class __read_mostly = {
|
||||
.load_balance = load_balance_rt,
|
||||
|
||||
.task_tick = task_tick_rt,
|
||||
.task_new = task_new_rt,
|
||||
};
|
||||
|
Loading…
Reference in New Issue
Block a user