mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-15 00:04:15 +08:00
blktrace: switch trace spinlock to a raw spinlock
The running_trace_lock protects running_trace_list and is acquired within the tracepoint which implies disabled preemption. The spinlock_t typed lock can not be acquired with disabled preemption on PREEMPT_RT because it becomes a sleeping lock. The runtime of the tracepoint depends on the number of entries in running_trace_list and has no limit. The blk-tracer is considered debug code and higher latencies here are okay. Make running_trace_lock a raw_spinlock_t. Signed-off-by: Wander Lairson Costa <wander@redhat.com> Link: https://lore.kernel.org/r/20211220192827.38297-1-wander@redhat.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
5ef1630586
commit
361c81dbc5
@ -34,7 +34,7 @@ static struct trace_array *blk_tr;
|
||||
static bool blk_tracer_enabled __read_mostly;
|
||||
|
||||
static LIST_HEAD(running_trace_list);
|
||||
static __cacheline_aligned_in_smp DEFINE_SPINLOCK(running_trace_lock);
|
||||
static __cacheline_aligned_in_smp DEFINE_RAW_SPINLOCK(running_trace_lock);
|
||||
|
||||
/* Select an alternative, minimalistic output than the original one */
|
||||
#define TRACE_BLK_OPT_CLASSIC 0x1
|
||||
@ -121,12 +121,12 @@ static void trace_note_tsk(struct task_struct *tsk)
|
||||
struct blk_trace *bt;
|
||||
|
||||
tsk->btrace_seq = blktrace_seq;
|
||||
spin_lock_irqsave(&running_trace_lock, flags);
|
||||
raw_spin_lock_irqsave(&running_trace_lock, flags);
|
||||
list_for_each_entry(bt, &running_trace_list, running_list) {
|
||||
trace_note(bt, tsk->pid, BLK_TN_PROCESS, tsk->comm,
|
||||
sizeof(tsk->comm), 0);
|
||||
}
|
||||
spin_unlock_irqrestore(&running_trace_lock, flags);
|
||||
raw_spin_unlock_irqrestore(&running_trace_lock, flags);
|
||||
}
|
||||
|
||||
static void trace_note_time(struct blk_trace *bt)
|
||||
@ -666,9 +666,9 @@ static int __blk_trace_startstop(struct request_queue *q, int start)
|
||||
blktrace_seq++;
|
||||
smp_mb();
|
||||
bt->trace_state = Blktrace_running;
|
||||
spin_lock_irq(&running_trace_lock);
|
||||
raw_spin_lock_irq(&running_trace_lock);
|
||||
list_add(&bt->running_list, &running_trace_list);
|
||||
spin_unlock_irq(&running_trace_lock);
|
||||
raw_spin_unlock_irq(&running_trace_lock);
|
||||
|
||||
trace_note_time(bt);
|
||||
ret = 0;
|
||||
@ -676,9 +676,9 @@ static int __blk_trace_startstop(struct request_queue *q, int start)
|
||||
} else {
|
||||
if (bt->trace_state == Blktrace_running) {
|
||||
bt->trace_state = Blktrace_stopped;
|
||||
spin_lock_irq(&running_trace_lock);
|
||||
raw_spin_lock_irq(&running_trace_lock);
|
||||
list_del_init(&bt->running_list);
|
||||
spin_unlock_irq(&running_trace_lock);
|
||||
raw_spin_unlock_irq(&running_trace_lock);
|
||||
relay_flush(bt->rchan);
|
||||
ret = 0;
|
||||
}
|
||||
@ -1608,9 +1608,9 @@ static int blk_trace_remove_queue(struct request_queue *q)
|
||||
|
||||
if (bt->trace_state == Blktrace_running) {
|
||||
bt->trace_state = Blktrace_stopped;
|
||||
spin_lock_irq(&running_trace_lock);
|
||||
raw_spin_lock_irq(&running_trace_lock);
|
||||
list_del_init(&bt->running_list);
|
||||
spin_unlock_irq(&running_trace_lock);
|
||||
raw_spin_unlock_irq(&running_trace_lock);
|
||||
relay_flush(bt->rchan);
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user