mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-12-06 02:24:14 +08:00
media: rc: fix race condition in ir_raw_event_store_edge() handling
There is a possible race condition between the IR timeout being generated from the timer, and new IR arriving. This could result in the timeout being added to the kfifo after new IR arrives. On top of that, there is concurrent write access to the kfifo from ir_raw_event_store_edge() and the timer. Signed-off-by: Sean Young <sean@mess.org> Signed-off-by: Mauro Carvalho Chehab <mchehab@s-opensource.com>
This commit is contained in:
parent
5817b3d15e
commit
e3e389f931
@ -50,8 +50,9 @@ struct ir_raw_event_ctrl {
|
||||
DECLARE_KFIFO(kfifo, struct ir_raw_event, MAX_IR_EVENT_SIZE);
|
||||
ktime_t last_event; /* when last event occurred */
|
||||
struct rc_dev *dev; /* pointer to the parent rc_dev */
|
||||
/* edge driver */
|
||||
struct timer_list edge_handle;
|
||||
/* handle delayed ir_raw_event_store_edge processing */
|
||||
spinlock_t edge_spinlock;
|
||||
struct timer_list edge_handle;
|
||||
|
||||
/* raw decoder state follows */
|
||||
struct ir_raw_event prev_ev;
|
||||
|
@ -101,6 +101,7 @@ int ir_raw_event_store_edge(struct rc_dev *dev, bool pulse)
|
||||
ev.duration = ktime_to_ns(ktime_sub(now, dev->raw->last_event));
|
||||
ev.pulse = !pulse;
|
||||
|
||||
spin_lock(&dev->raw->edge_spinlock);
|
||||
rc = ir_raw_event_store(dev, &ev);
|
||||
|
||||
dev->raw->last_event = now;
|
||||
@ -112,6 +113,7 @@ int ir_raw_event_store_edge(struct rc_dev *dev, bool pulse)
|
||||
mod_timer(&dev->raw->edge_handle,
|
||||
jiffies + msecs_to_jiffies(15));
|
||||
}
|
||||
spin_unlock(&dev->raw->edge_spinlock);
|
||||
|
||||
return rc;
|
||||
}
|
||||
@ -462,12 +464,26 @@ int ir_raw_encode_scancode(enum rc_proto protocol, u32 scancode,
|
||||
}
|
||||
EXPORT_SYMBOL(ir_raw_encode_scancode);
|
||||
|
||||
static void edge_handle(struct timer_list *t)
|
||||
/**
|
||||
* ir_raw_edge_handle() - Handle ir_raw_event_store_edge() processing
|
||||
*
|
||||
* @t: timer_list
|
||||
*
|
||||
* This callback is armed by ir_raw_event_store_edge(). It does two things:
|
||||
* first of all, rather than calling ir_raw_event_handle() for each
|
||||
* edge and waking up the rc thread, 15 ms after the first edge
|
||||
* ir_raw_event_handle() is called. Secondly, generate a timeout event
|
||||
* no more IR is received after the rc_dev timeout.
|
||||
*/
|
||||
static void ir_raw_edge_handle(struct timer_list *t)
|
||||
{
|
||||
struct ir_raw_event_ctrl *raw = from_timer(raw, t, edge_handle);
|
||||
struct rc_dev *dev = raw->dev;
|
||||
ktime_t interval = ktime_sub(ktime_get(), dev->raw->last_event);
|
||||
unsigned long flags;
|
||||
ktime_t interval;
|
||||
|
||||
spin_lock_irqsave(&dev->raw->edge_spinlock, flags);
|
||||
interval = ktime_sub(ktime_get(), dev->raw->last_event);
|
||||
if (ktime_to_ns(interval) >= dev->timeout) {
|
||||
DEFINE_IR_RAW_EVENT(ev);
|
||||
|
||||
@ -480,6 +496,7 @@ static void edge_handle(struct timer_list *t)
|
||||
jiffies + nsecs_to_jiffies(dev->timeout -
|
||||
ktime_to_ns(interval)));
|
||||
}
|
||||
spin_unlock_irqrestore(&dev->raw->edge_spinlock, flags);
|
||||
|
||||
ir_raw_event_handle(dev);
|
||||
}
|
||||
@ -528,7 +545,8 @@ int ir_raw_event_prepare(struct rc_dev *dev)
|
||||
|
||||
dev->raw->dev = dev;
|
||||
dev->change_protocol = change_protocol;
|
||||
timer_setup(&dev->raw->edge_handle, edge_handle, 0);
|
||||
spin_lock_init(&dev->raw->edge_spinlock);
|
||||
timer_setup(&dev->raw->edge_handle, ir_raw_edge_handle, 0);
|
||||
INIT_KFIFO(dev->raw->kfifo);
|
||||
|
||||
return 0;
|
||||
|
Loading…
Reference in New Issue
Block a user