mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-15 08:14:15 +08:00
Merge branch 'sched-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'sched-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: sched: release buddies on yield fix for account_group_exec_runtime(), make sure ->signal can't be freed under rq->lock sched: clean up debug info
This commit is contained in:
commit
2f96cb57cd
@ -247,6 +247,7 @@ extern void init_idle(struct task_struct *idle, int cpu);
|
||||
extern void init_idle_bootup_task(struct task_struct *idle);
|
||||
|
||||
extern int runqueue_is_locked(void);
|
||||
extern void task_rq_unlock_wait(struct task_struct *p);
|
||||
|
||||
extern cpumask_t nohz_cpu_mask;
|
||||
#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ)
|
||||
|
@ -141,6 +141,11 @@ static void __exit_signal(struct task_struct *tsk)
|
||||
if (sig) {
|
||||
flush_sigqueue(&sig->shared_pending);
|
||||
taskstats_tgid_free(sig);
|
||||
/*
|
||||
* Make sure ->signal can't go away under rq->lock,
|
||||
* see account_group_exec_runtime().
|
||||
*/
|
||||
task_rq_unlock_wait(tsk);
|
||||
__cleanup_signal(sig);
|
||||
}
|
||||
}
|
||||
|
@ -399,7 +399,7 @@ struct cfs_rq {
|
||||
*/
|
||||
struct sched_entity *curr, *next, *last;
|
||||
|
||||
unsigned long nr_spread_over;
|
||||
unsigned int nr_spread_over;
|
||||
|
||||
#ifdef CONFIG_FAIR_GROUP_SCHED
|
||||
struct rq *rq; /* cpu runqueue to which this cfs_rq is attached */
|
||||
@ -969,6 +969,14 @@ static struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags)
|
||||
}
|
||||
}
|
||||
|
||||
void task_rq_unlock_wait(struct task_struct *p)
|
||||
{
|
||||
struct rq *rq = task_rq(p);
|
||||
|
||||
smp_mb(); /* spin-unlock-wait is not a full memory barrier */
|
||||
spin_unlock_wait(&rq->lock);
|
||||
}
|
||||
|
||||
static void __task_rq_unlock(struct rq *rq)
|
||||
__releases(rq->lock)
|
||||
{
|
||||
|
@ -144,7 +144,7 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
|
||||
last = __pick_last_entity(cfs_rq);
|
||||
if (last)
|
||||
max_vruntime = last->vruntime;
|
||||
min_vruntime = rq->cfs.min_vruntime;
|
||||
min_vruntime = cfs_rq->min_vruntime;
|
||||
rq0_min_vruntime = per_cpu(runqueues, 0).cfs.min_vruntime;
|
||||
spin_unlock_irqrestore(&rq->lock, flags);
|
||||
SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "MIN_vruntime",
|
||||
@ -161,26 +161,8 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
|
||||
SPLIT_NS(spread0));
|
||||
SEQ_printf(m, " .%-30s: %ld\n", "nr_running", cfs_rq->nr_running);
|
||||
SEQ_printf(m, " .%-30s: %ld\n", "load", cfs_rq->load.weight);
|
||||
#ifdef CONFIG_SCHEDSTATS
|
||||
#define P(n) SEQ_printf(m, " .%-30s: %d\n", #n, rq->n);
|
||||
|
||||
P(yld_exp_empty);
|
||||
P(yld_act_empty);
|
||||
P(yld_both_empty);
|
||||
P(yld_count);
|
||||
|
||||
P(sched_switch);
|
||||
P(sched_count);
|
||||
P(sched_goidle);
|
||||
|
||||
P(ttwu_count);
|
||||
P(ttwu_local);
|
||||
|
||||
P(bkl_count);
|
||||
|
||||
#undef P
|
||||
#endif
|
||||
SEQ_printf(m, " .%-30s: %ld\n", "nr_spread_over",
|
||||
SEQ_printf(m, " .%-30s: %d\n", "nr_spread_over",
|
||||
cfs_rq->nr_spread_over);
|
||||
#ifdef CONFIG_FAIR_GROUP_SCHED
|
||||
#ifdef CONFIG_SMP
|
||||
@ -260,6 +242,25 @@ static void print_cpu(struct seq_file *m, int cpu)
|
||||
#undef P
|
||||
#undef PN
|
||||
|
||||
#ifdef CONFIG_SCHEDSTATS
|
||||
#define P(n) SEQ_printf(m, " .%-30s: %d\n", #n, rq->n);
|
||||
|
||||
P(yld_exp_empty);
|
||||
P(yld_act_empty);
|
||||
P(yld_both_empty);
|
||||
P(yld_count);
|
||||
|
||||
P(sched_switch);
|
||||
P(sched_count);
|
||||
P(sched_goidle);
|
||||
|
||||
P(ttwu_count);
|
||||
P(ttwu_local);
|
||||
|
||||
P(bkl_count);
|
||||
|
||||
#undef P
|
||||
#endif
|
||||
print_cfs_stats(m, cpu);
|
||||
print_rt_stats(m, cpu);
|
||||
|
||||
|
@ -716,6 +716,15 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int wakeup)
|
||||
__enqueue_entity(cfs_rq, se);
|
||||
}
|
||||
|
||||
static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se)
|
||||
{
|
||||
if (cfs_rq->last == se)
|
||||
cfs_rq->last = NULL;
|
||||
|
||||
if (cfs_rq->next == se)
|
||||
cfs_rq->next = NULL;
|
||||
}
|
||||
|
||||
static void
|
||||
dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int sleep)
|
||||
{
|
||||
@ -738,11 +747,7 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int sleep)
|
||||
#endif
|
||||
}
|
||||
|
||||
if (cfs_rq->last == se)
|
||||
cfs_rq->last = NULL;
|
||||
|
||||
if (cfs_rq->next == se)
|
||||
cfs_rq->next = NULL;
|
||||
clear_buddies(cfs_rq, se);
|
||||
|
||||
if (se != cfs_rq->curr)
|
||||
__dequeue_entity(cfs_rq, se);
|
||||
@ -977,6 +982,8 @@ static void yield_task_fair(struct rq *rq)
|
||||
if (unlikely(cfs_rq->nr_running == 1))
|
||||
return;
|
||||
|
||||
clear_buddies(cfs_rq, se);
|
||||
|
||||
if (likely(!sysctl_sched_compat_yield) && curr->policy != SCHED_BATCH) {
|
||||
update_rq_clock(rq);
|
||||
/*
|
||||
|
Loading…
Reference in New Issue
Block a user