2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-28 23:23:55 +08:00

wlcore: move handling from hardirq to the irq thread function

Spin locks and completions are expensive in hard IRQ context and cause
problems with RT kernels.  In RT kernels, both spin locks and
completions can schedule(), so we can't use them in hard irq context.

Move handling code into the irq thread function to avoid that.

Reported-by: Gregoire Gentil <gregoire@alwaysinnovating.com>
Signed-off-by: Luciano Coelho <coelho@ti.com>
This commit is contained in:
Luciano Coelho 2013-03-08 09:41:53 +02:00
parent da7aa28004
commit 97236a0656

View File

@ -651,6 +651,25 @@ static irqreturn_t wlcore_irq(int irq, void *cookie)
unsigned long flags;
struct wl1271 *wl = cookie;
/* complete the ELP completion */
spin_lock_irqsave(&wl->wl_lock, flags);
set_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
if (wl->elp_compl) {
complete(wl->elp_compl);
wl->elp_compl = NULL;
}
if (test_bit(WL1271_FLAG_SUSPENDED, &wl->flags)) {
/* don't enqueue a work right now. mark it as pending */
set_bit(WL1271_FLAG_PENDING_WORK, &wl->flags);
wl1271_debug(DEBUG_IRQ, "should not enqueue work");
disable_irq_nosync(wl->irq);
pm_wakeup_event(wl->dev, 0);
spin_unlock_irqrestore(&wl->wl_lock, flags);
return IRQ_HANDLED;
}
spin_unlock_irqrestore(&wl->wl_lock, flags);
/* TX might be handled here, avoid redundant work */
set_bit(WL1271_FLAG_TX_PENDING, &wl->flags);
cancel_work_sync(&wl->tx_work);
@ -5998,35 +6017,6 @@ int wlcore_free_hw(struct wl1271 *wl)
}
EXPORT_SYMBOL_GPL(wlcore_free_hw);
static irqreturn_t wl12xx_hardirq(int irq, void *cookie)
{
struct wl1271 *wl = cookie;
unsigned long flags;
wl1271_debug(DEBUG_IRQ, "IRQ");
/* complete the ELP completion */
spin_lock_irqsave(&wl->wl_lock, flags);
set_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
if (wl->elp_compl) {
complete(wl->elp_compl);
wl->elp_compl = NULL;
}
if (test_bit(WL1271_FLAG_SUSPENDED, &wl->flags)) {
/* don't enqueue a work right now. mark it as pending */
set_bit(WL1271_FLAG_PENDING_WORK, &wl->flags);
wl1271_debug(DEBUG_IRQ, "should not enqueue work");
disable_irq_nosync(wl->irq);
pm_wakeup_event(wl->dev, 0);
spin_unlock_irqrestore(&wl->wl_lock, flags);
return IRQ_HANDLED;
}
spin_unlock_irqrestore(&wl->wl_lock, flags);
return IRQ_WAKE_THREAD;
}
static void wlcore_nvs_cb(const struct firmware *fw, void *context)
{
struct wl1271 *wl = context;
@ -6068,9 +6058,8 @@ static void wlcore_nvs_cb(const struct firmware *fw, void *context)
else
irqflags = IRQF_TRIGGER_HIGH | IRQF_ONESHOT;
ret = request_threaded_irq(wl->irq, wl12xx_hardirq, wlcore_irq,
irqflags,
pdev->name, wl);
ret = request_threaded_irq(wl->irq, NULL, wlcore_irq,
irqflags, pdev->name, wl);
if (ret < 0) {
wl1271_error("request_irq() failed: %d", ret);
goto out_free_nvs;