mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-11-18 07:35:12 +08:00
sched: Get rid of lock_depth
Neil Brown pointed out that lock_depth somehow escaped the BKL removal work. Let's get rid of it now. Note that the perf scripting utilities still have a bunch of code for dealing with common_lock_depth in tracepoints; I have left that in place in case anybody wants to use that code with older kernels. Suggested-by: Neil Brown <neilb@suse.de> Signed-off-by: Jonathan Corbet <corbet@lwn.net> Cc: Arnd Bergmann <arnd@arndb.de> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Andrew Morton <akpm@linux-foundation.org> Link: http://lkml.kernel.org/r/20110422111910.456c0e84@bike.lwn.net Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
parent
d3bf52e998
commit
625f2a378e
@ -120,7 +120,6 @@ format:
|
||||
field:unsigned char common_flags; offset:2; size:1; signed:0;
|
||||
field:unsigned char common_preempt_count; offset:3; size:1;signed:0;
|
||||
field:int common_pid; offset:4; size:4; signed:1;
|
||||
field:int common_lock_depth; offset:8; size:4; signed:1;
|
||||
|
||||
field:unsigned long __probe_ip; offset:12; size:4; signed:0;
|
||||
field:int __probe_nargs; offset:16; size:4; signed:1;
|
||||
|
@ -134,7 +134,6 @@ extern struct cred init_cred;
|
||||
.stack = &init_thread_info, \
|
||||
.usage = ATOMIC_INIT(2), \
|
||||
.flags = PF_KTHREAD, \
|
||||
.lock_depth = -1, \
|
||||
.prio = MAX_PRIO-20, \
|
||||
.static_prio = MAX_PRIO-20, \
|
||||
.normal_prio = MAX_PRIO-20, \
|
||||
|
@ -731,10 +731,6 @@ struct sched_info {
|
||||
/* timestamps */
|
||||
unsigned long long last_arrival,/* when we last ran on a cpu */
|
||||
last_queued; /* when we were last queued to run */
|
||||
#ifdef CONFIG_SCHEDSTATS
|
||||
/* BKL stats */
|
||||
unsigned int bkl_count;
|
||||
#endif
|
||||
};
|
||||
#endif /* defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) */
|
||||
|
||||
@ -1190,8 +1186,6 @@ struct task_struct {
|
||||
unsigned int flags; /* per process flags, defined below */
|
||||
unsigned int ptrace;
|
||||
|
||||
int lock_depth; /* BKL lock depth */
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
struct task_struct *wake_entry;
|
||||
int on_cpu;
|
||||
|
@ -1103,7 +1103,6 @@ static struct task_struct *copy_process(unsigned long clone_flags,
|
||||
|
||||
posix_cpu_timers_init(p);
|
||||
|
||||
p->lock_depth = -1; /* -1 = no lock */
|
||||
do_posix_clock_monotonic_gettime(&p->start_time);
|
||||
p->real_start_time = p->start_time;
|
||||
monotonic_to_bootbased(&p->real_start_time);
|
||||
|
@ -162,13 +162,6 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
|
||||
for (;;) {
|
||||
struct task_struct *owner;
|
||||
|
||||
/*
|
||||
* If we own the BKL, then don't spin. The owner of
|
||||
* the mutex might be waiting on us to release the BKL.
|
||||
*/
|
||||
if (unlikely(current->lock_depth >= 0))
|
||||
break;
|
||||
|
||||
/*
|
||||
* If there's an owner, wait for it to either
|
||||
* release the lock or go to sleep.
|
||||
|
@ -4121,12 +4121,6 @@ static inline void schedule_debug(struct task_struct *prev)
|
||||
profile_hit(SCHED_PROFILING, __builtin_return_address(0));
|
||||
|
||||
schedstat_inc(this_rq(), sched_count);
|
||||
#ifdef CONFIG_SCHEDSTATS
|
||||
if (unlikely(prev->lock_depth >= 0)) {
|
||||
schedstat_inc(this_rq(), rq_sched_info.bkl_count);
|
||||
schedstat_inc(prev, sched_info.bkl_count);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
static void put_prev_task(struct rq *rq, struct task_struct *prev)
|
||||
@ -5852,11 +5846,8 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu)
|
||||
raw_spin_unlock_irqrestore(&rq->lock, flags);
|
||||
|
||||
/* Set the preempt count _outside_ the spinlocks! */
|
||||
#if defined(CONFIG_PREEMPT)
|
||||
task_thread_info(idle)->preempt_count = (idle->lock_depth >= 0);
|
||||
#else
|
||||
task_thread_info(idle)->preempt_count = 0;
|
||||
#endif
|
||||
|
||||
/*
|
||||
* The idle tasks have their own, simple scheduling class:
|
||||
*/
|
||||
|
@ -296,9 +296,6 @@ static void print_cpu(struct seq_file *m, int cpu)
|
||||
P(ttwu_count);
|
||||
P(ttwu_local);
|
||||
|
||||
SEQ_printf(m, " .%-30s: %d\n", "bkl_count",
|
||||
rq->rq_sched_info.bkl_count);
|
||||
|
||||
#undef P
|
||||
#undef P64
|
||||
#endif
|
||||
@ -441,7 +438,6 @@ void proc_sched_show_task(struct task_struct *p, struct seq_file *m)
|
||||
P(se.statistics.wait_count);
|
||||
PN(se.statistics.iowait_sum);
|
||||
P(se.statistics.iowait_count);
|
||||
P(sched_info.bkl_count);
|
||||
P(se.nr_migrations);
|
||||
P(se.statistics.nr_migrations_cold);
|
||||
P(se.statistics.nr_failed_migrations_affine);
|
||||
|
@ -53,7 +53,6 @@ const char *reserved_field_names[] = {
|
||||
"common_preempt_count",
|
||||
"common_pid",
|
||||
"common_tgid",
|
||||
"common_lock_depth",
|
||||
FIELD_STRING_IP,
|
||||
FIELD_STRING_RETIP,
|
||||
FIELD_STRING_FUNC,
|
||||
|
@ -63,7 +63,6 @@ The format file for the sched_wakep event defines the following fields
|
||||
field:unsigned char common_flags;
|
||||
field:unsigned char common_preempt_count;
|
||||
field:int common_pid;
|
||||
field:int common_lock_depth;
|
||||
|
||||
field:char comm[TASK_COMM_LEN];
|
||||
field:pid_t pid;
|
||||
|
@ -463,7 +463,6 @@ The format file for the sched_wakep event defines the following fields
|
||||
field:unsigned char common_flags;
|
||||
field:unsigned char common_preempt_count;
|
||||
field:int common_pid;
|
||||
field:int common_lock_depth;
|
||||
|
||||
field:char comm[TASK_COMM_LEN];
|
||||
field:pid_t pid;
|
||||
|
Loading…
Reference in New Issue
Block a user