mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-19 02:34:01 +08:00
spi: Go back to immediate teardown
Commit 412e603732
("spi: core: avoid waking pump thread from spi_sync
instead run teardown delayed") introduced regressions on some boards,
apparently connected to spi_mem not triggering shutdown properly any
more. Since we've thus far been unable to figure out exactly where the
breakage is revert the optimisation for now.
Reported-by: Jon Hunter <jonathanh@nvidia.com>
Signed-off-by: Mark Brown <broonie@kernel.org>
Cc: kernel@martin.sperl.org
This commit is contained in:
parent
51eea52d26
commit
f0125f1a55
@ -1225,7 +1225,7 @@ static void __spi_pump_messages(struct spi_controller *ctlr, bool in_kthread)
|
||||
return;
|
||||
}
|
||||
|
||||
/* If another context is idling the device then defer to kthread */
|
||||
/* If another context is idling the device then defer */
|
||||
if (ctlr->idling) {
|
||||
kthread_queue_work(&ctlr->kworker, &ctlr->pump_messages);
|
||||
spin_unlock_irqrestore(&ctlr->queue_lock, flags);
|
||||
@ -1239,10 +1239,34 @@ static void __spi_pump_messages(struct spi_controller *ctlr, bool in_kthread)
|
||||
return;
|
||||
}
|
||||
|
||||
/* schedule idle teardown with a delay of 1 second */
|
||||
kthread_mod_delayed_work(&ctlr->kworker,
|
||||
&ctlr->pump_idle_teardown,
|
||||
HZ);
|
||||
/* Only do teardown in the thread */
|
||||
if (!in_kthread) {
|
||||
kthread_queue_work(&ctlr->kworker,
|
||||
&ctlr->pump_messages);
|
||||
spin_unlock_irqrestore(&ctlr->queue_lock, flags);
|
||||
return;
|
||||
}
|
||||
|
||||
ctlr->busy = false;
|
||||
ctlr->idling = true;
|
||||
spin_unlock_irqrestore(&ctlr->queue_lock, flags);
|
||||
|
||||
kfree(ctlr->dummy_rx);
|
||||
ctlr->dummy_rx = NULL;
|
||||
kfree(ctlr->dummy_tx);
|
||||
ctlr->dummy_tx = NULL;
|
||||
if (ctlr->unprepare_transfer_hardware &&
|
||||
ctlr->unprepare_transfer_hardware(ctlr))
|
||||
dev_err(&ctlr->dev,
|
||||
"failed to unprepare transfer hardware\n");
|
||||
if (ctlr->auto_runtime_pm) {
|
||||
pm_runtime_mark_last_busy(ctlr->dev.parent);
|
||||
pm_runtime_put_autosuspend(ctlr->dev.parent);
|
||||
}
|
||||
trace_spi_controller_idle(ctlr);
|
||||
|
||||
spin_lock_irqsave(&ctlr->queue_lock, flags);
|
||||
ctlr->idling = false;
|
||||
spin_unlock_irqrestore(&ctlr->queue_lock, flags);
|
||||
return;
|
||||
}
|
||||
@ -1335,77 +1359,6 @@ static void spi_pump_messages(struct kthread_work *work)
|
||||
__spi_pump_messages(ctlr, true);
|
||||
}
|
||||
|
||||
/**
|
||||
* spi_pump_idle_teardown - kthread delayed work function which tears down
|
||||
* the controller settings after some delay
|
||||
* @work: pointer to kthread work struct contained in the controller struct
|
||||
*/
|
||||
static void spi_pump_idle_teardown(struct kthread_work *work)
|
||||
{
|
||||
struct spi_controller *ctlr =
|
||||
container_of(work, struct spi_controller,
|
||||
pump_idle_teardown.work);
|
||||
unsigned long flags;
|
||||
|
||||
/* Lock queue */
|
||||
spin_lock_irqsave(&ctlr->queue_lock, flags);
|
||||
|
||||
/* Make sure we are not already running a message */
|
||||
if (ctlr->cur_msg)
|
||||
goto out;
|
||||
|
||||
/* if there is anything in the list then exit */
|
||||
if (!list_empty(&ctlr->queue))
|
||||
goto out;
|
||||
|
||||
/* if the controller is running then exit */
|
||||
if (ctlr->running)
|
||||
goto out;
|
||||
|
||||
/* if the controller is busy then exit */
|
||||
if (ctlr->busy)
|
||||
goto out;
|
||||
|
||||
/* if the controller is idling then exit
|
||||
* this is actually a bit strange and would indicate that
|
||||
* this function is scheduled twice, which should not happen
|
||||
*/
|
||||
if (ctlr->idling)
|
||||
goto out;
|
||||
|
||||
/* set up the initial states */
|
||||
ctlr->busy = false;
|
||||
ctlr->idling = true;
|
||||
spin_unlock_irqrestore(&ctlr->queue_lock, flags);
|
||||
|
||||
/* free dummy receive buffers */
|
||||
kfree(ctlr->dummy_rx);
|
||||
ctlr->dummy_rx = NULL;
|
||||
kfree(ctlr->dummy_tx);
|
||||
ctlr->dummy_tx = NULL;
|
||||
|
||||
/* unprepare hardware */
|
||||
if (ctlr->unprepare_transfer_hardware &&
|
||||
ctlr->unprepare_transfer_hardware(ctlr))
|
||||
dev_err(&ctlr->dev,
|
||||
"failed to unprepare transfer hardware\n");
|
||||
/* handle pm */
|
||||
if (ctlr->auto_runtime_pm) {
|
||||
pm_runtime_mark_last_busy(ctlr->dev.parent);
|
||||
pm_runtime_put_autosuspend(ctlr->dev.parent);
|
||||
}
|
||||
|
||||
/* mark controller as idle */
|
||||
trace_spi_controller_idle(ctlr);
|
||||
|
||||
/* finally put us from idling into stopped */
|
||||
spin_lock_irqsave(&ctlr->queue_lock, flags);
|
||||
ctlr->idling = false;
|
||||
|
||||
out:
|
||||
spin_unlock_irqrestore(&ctlr->queue_lock, flags);
|
||||
}
|
||||
|
||||
static int spi_init_queue(struct spi_controller *ctlr)
|
||||
{
|
||||
struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
|
||||
@ -1421,8 +1374,7 @@ static int spi_init_queue(struct spi_controller *ctlr)
|
||||
return PTR_ERR(ctlr->kworker_task);
|
||||
}
|
||||
kthread_init_work(&ctlr->pump_messages, spi_pump_messages);
|
||||
kthread_init_delayed_work(&ctlr->pump_idle_teardown,
|
||||
spi_pump_idle_teardown);
|
||||
|
||||
/*
|
||||
* Controller config will indicate if this controller should run the
|
||||
* message pump with high (realtime) priority to reduce the transfer
|
||||
@ -1494,16 +1446,7 @@ void spi_finalize_current_message(struct spi_controller *ctlr)
|
||||
spin_lock_irqsave(&ctlr->queue_lock, flags);
|
||||
ctlr->cur_msg = NULL;
|
||||
ctlr->cur_msg_prepared = false;
|
||||
|
||||
/* if there is something queued, then wake the queue */
|
||||
if (!list_empty(&ctlr->queue))
|
||||
kthread_queue_work(&ctlr->kworker, &ctlr->pump_messages);
|
||||
else
|
||||
/* otherwise schedule delayed teardown */
|
||||
kthread_mod_delayed_work(&ctlr->kworker,
|
||||
&ctlr->pump_idle_teardown,
|
||||
HZ);
|
||||
|
||||
kthread_queue_work(&ctlr->kworker, &ctlr->pump_messages);
|
||||
spin_unlock_irqrestore(&ctlr->queue_lock, flags);
|
||||
|
||||
trace_spi_message_done(mesg);
|
||||
@ -1608,7 +1551,7 @@ static int __spi_queued_transfer(struct spi_device *spi,
|
||||
msg->status = -EINPROGRESS;
|
||||
|
||||
list_add_tail(&msg->queue, &ctlr->queue);
|
||||
if (need_pump)
|
||||
if (!ctlr->busy && need_pump)
|
||||
kthread_queue_work(&ctlr->kworker, &ctlr->pump_messages);
|
||||
|
||||
spin_unlock_irqrestore(&ctlr->queue_lock, flags);
|
||||
@ -3783,3 +3726,4 @@ err0:
|
||||
* include needing to have boardinfo data structures be much more public.
|
||||
*/
|
||||
postcore_initcall(spi_init);
|
||||
|
||||
|
@ -334,7 +334,6 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv)
|
||||
* @kworker: thread struct for message pump
|
||||
* @kworker_task: pointer to task for message pump kworker thread
|
||||
* @pump_messages: work struct for scheduling work to the message pump
|
||||
* @pump_idle_teardown: work structure for scheduling a teardown delayed
|
||||
* @queue_lock: spinlock to syncronise access to message queue
|
||||
* @queue: message queue
|
||||
* @idling: the device is entering idle state
|
||||
@ -533,7 +532,6 @@ struct spi_controller {
|
||||
struct kthread_worker kworker;
|
||||
struct task_struct *kworker_task;
|
||||
struct kthread_work pump_messages;
|
||||
struct kthread_delayed_work pump_idle_teardown;
|
||||
spinlock_t queue_lock;
|
||||
struct list_head queue;
|
||||
struct spi_message *cur_msg;
|
||||
|
Loading…
Reference in New Issue
Block a user