mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-26 22:24:09 +08:00
net: ipa: have ipa_clock_get() return a value
We currently assume no errors occur when enabling or disabling the IPA core clock and interconnects. And although this commit exposes errors that could occur, we generally assume this won't happen in practice. This commit changes ipa_clock_get() and ipa_clock_put() so each returns a value. The values returned are meant to mimic what the runtime power management functions return, so we can set up error handling here before we make the switch. Have ipa_clock_get() increment the reference count even if it returns an error, to match the behavior of pm_runtime_get(). More details follow. When taking a reference in ipa_clock_get(), return 0 for the first reference, 1 for subsequent references, or a negative error code if an error occurs. Note that if ipa_clock_get() returns an error, we must not touch hardware; in some cases such errors now cause entire blocks of code to be skipped. When dropping a reference in ipa_clock_put(), we return 0 or an error code. The error would come from ipa_clock_disable(), which now returns what ipa_interconnect_disable() returns (either 0 or a negative error code). For now, callers ignore the return value; if an error occurs, a message will have already been logged, and little more can actually be done to improve the situation. Signed-off-by: Alex Elder <elder@linaro.org> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
6f45933dfe
commit
7ebd168c3b
@ -223,10 +223,11 @@ static int ipa_clock_enable(struct ipa *ipa)
|
||||
}
|
||||
|
||||
/* Inverse of ipa_clock_enable() */
|
||||
static void ipa_clock_disable(struct ipa *ipa)
|
||||
static int ipa_clock_disable(struct ipa *ipa)
|
||||
{
|
||||
clk_disable_unprepare(ipa->clock->core);
|
||||
(void)ipa_interconnect_disable(ipa);
|
||||
|
||||
return ipa_interconnect_disable(ipa);
|
||||
}
|
||||
|
||||
/* Get an IPA clock reference, but only if the reference count is
|
||||
@ -246,43 +247,51 @@ bool ipa_clock_get_additional(struct ipa *ipa)
|
||||
* Incrementing the reference count is intentionally deferred until
|
||||
* after the clock is running and endpoints are resumed.
|
||||
*/
|
||||
void ipa_clock_get(struct ipa *ipa)
|
||||
int ipa_clock_get(struct ipa *ipa)
|
||||
{
|
||||
struct ipa_clock *clock = ipa->clock;
|
||||
int ret;
|
||||
|
||||
/* If the clock is running, just bump the reference count */
|
||||
if (ipa_clock_get_additional(ipa))
|
||||
return;
|
||||
return 1;
|
||||
|
||||
/* Otherwise get the mutex and check again */
|
||||
mutex_lock(&clock->mutex);
|
||||
|
||||
/* A reference might have been added before we got the mutex. */
|
||||
if (ipa_clock_get_additional(ipa))
|
||||
if (ipa_clock_get_additional(ipa)) {
|
||||
ret = 1;
|
||||
goto out_mutex_unlock;
|
||||
}
|
||||
|
||||
ret = ipa_clock_enable(ipa);
|
||||
if (!ret)
|
||||
refcount_set(&clock->count, 1);
|
||||
|
||||
refcount_set(&clock->count, 1);
|
||||
|
||||
out_mutex_unlock:
|
||||
mutex_unlock(&clock->mutex);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Attempt to remove an IPA clock reference. If this represents the
|
||||
* last reference, disable the IPA clock under protection of the mutex.
|
||||
*/
|
||||
void ipa_clock_put(struct ipa *ipa)
|
||||
int ipa_clock_put(struct ipa *ipa)
|
||||
{
|
||||
struct ipa_clock *clock = ipa->clock;
|
||||
int ret;
|
||||
|
||||
/* If this is not the last reference there's nothing more to do */
|
||||
if (!refcount_dec_and_mutex_lock(&clock->count, &clock->mutex))
|
||||
return;
|
||||
return 0;
|
||||
|
||||
ipa_clock_disable(ipa);
|
||||
ret = ipa_clock_disable(ipa);
|
||||
|
||||
mutex_unlock(&clock->mutex);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Return the current IPA core clock rate */
|
||||
@ -388,7 +397,7 @@ void ipa_clock_exit(struct ipa_clock *clock)
|
||||
* ipa_suspend() - Power management system suspend callback
|
||||
* @dev: IPA device structure
|
||||
*
|
||||
* Return: Always returns zero
|
||||
* Return: 0 on success, or a negative error code
|
||||
*
|
||||
* Called by the PM framework when a system suspend operation is invoked.
|
||||
* Suspends endpoints and releases the clock reference held to keep
|
||||
@ -405,16 +414,14 @@ static int ipa_suspend(struct device *dev)
|
||||
gsi_suspend(&ipa->gsi);
|
||||
}
|
||||
|
||||
ipa_clock_put(ipa);
|
||||
|
||||
return 0;
|
||||
return ipa_clock_put(ipa);
|
||||
}
|
||||
|
||||
/**
|
||||
* ipa_resume() - Power management system resume callback
|
||||
* @dev: IPA device structure
|
||||
*
|
||||
* Return: Always returns 0
|
||||
* Return: 0 on success, or a negative error code
|
||||
*
|
||||
* Called by the PM framework when a system resume operation is invoked.
|
||||
* Takes an IPA clock reference to keep the clock running until suspend,
|
||||
@ -423,11 +430,16 @@ static int ipa_suspend(struct device *dev)
|
||||
static int ipa_resume(struct device *dev)
|
||||
{
|
||||
struct ipa *ipa = dev_get_drvdata(dev);
|
||||
int ret;
|
||||
|
||||
/* This clock reference will keep the IPA out of suspend
|
||||
* until we get a power management suspend request.
|
||||
*/
|
||||
ipa_clock_get(ipa);
|
||||
ret = ipa_clock_get(ipa);
|
||||
if (WARN_ON(ret < 0)) {
|
||||
(void)ipa_clock_put(ipa);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Endpoints aren't usable until setup is complete */
|
||||
if (ipa->setup_complete) {
|
||||
|
@ -54,14 +54,20 @@ void ipa_clock_exit(struct ipa_clock *clock);
|
||||
* ipa_clock_get() - Get an IPA clock reference
|
||||
* @ipa: IPA pointer
|
||||
*
|
||||
* This call blocks if this is the first reference.
|
||||
* Return: 0 if clock started, 1 if clock already running, or a negative
|
||||
* error code
|
||||
*
|
||||
* This call blocks if this is the first reference. A reference is
|
||||
* taken even if an error occurs starting the IPA clock.
|
||||
*/
|
||||
void ipa_clock_get(struct ipa *ipa);
|
||||
int ipa_clock_get(struct ipa *ipa);
|
||||
|
||||
/**
|
||||
* ipa_clock_get_additional() - Get an IPA clock reference if not first
|
||||
* @ipa: IPA pointer
|
||||
*
|
||||
* Return: true if reference taken, false otherwise
|
||||
*
|
||||
* This returns immediately, and only takes a reference if not the first
|
||||
*/
|
||||
bool ipa_clock_get_additional(struct ipa *ipa);
|
||||
@ -70,10 +76,12 @@ bool ipa_clock_get_additional(struct ipa *ipa);
|
||||
* ipa_clock_put() - Drop an IPA clock reference
|
||||
* @ipa: IPA pointer
|
||||
*
|
||||
* Return: 0 if successful, or a negative error code
|
||||
*
|
||||
* This drops a clock reference. If the last reference is being dropped,
|
||||
* the clock is stopped and RX endpoints are suspended. This call will
|
||||
* not block unless the last reference is dropped.
|
||||
*/
|
||||
void ipa_clock_put(struct ipa *ipa);
|
||||
int ipa_clock_put(struct ipa *ipa);
|
||||
|
||||
#endif /* _IPA_CLOCK_H_ */
|
||||
|
@ -83,8 +83,11 @@ static irqreturn_t ipa_isr_thread(int irq, void *dev_id)
|
||||
u32 pending;
|
||||
u32 offset;
|
||||
u32 mask;
|
||||
int ret;
|
||||
|
||||
ipa_clock_get(ipa);
|
||||
ret = ipa_clock_get(ipa);
|
||||
if (WARN_ON(ret < 0))
|
||||
goto out_clock_put;
|
||||
|
||||
/* The status register indicates which conditions are present,
|
||||
* including conditions whose interrupt is not enabled. Handle
|
||||
@ -112,8 +115,8 @@ static irqreturn_t ipa_isr_thread(int irq, void *dev_id)
|
||||
offset = ipa_reg_irq_clr_offset(ipa->version);
|
||||
iowrite32(pending, ipa->reg_virt + offset);
|
||||
}
|
||||
|
||||
ipa_clock_put(ipa);
|
||||
out_clock_put:
|
||||
(void)ipa_clock_put(ipa);
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
@ -431,7 +431,9 @@ static int ipa_config(struct ipa *ipa, const struct ipa_data *data)
|
||||
* is held after initialization completes, and won't get dropped
|
||||
* unless/until a system suspend request arrives.
|
||||
*/
|
||||
ipa_clock_get(ipa);
|
||||
ret = ipa_clock_get(ipa);
|
||||
if (WARN_ON(ret < 0))
|
||||
goto err_clock_put;
|
||||
|
||||
ipa_hardware_config(ipa, data);
|
||||
|
||||
@ -475,7 +477,8 @@ err_mem_deconfig:
|
||||
ipa_mem_deconfig(ipa);
|
||||
err_hardware_deconfig:
|
||||
ipa_hardware_deconfig(ipa);
|
||||
ipa_clock_put(ipa);
|
||||
err_clock_put:
|
||||
(void)ipa_clock_put(ipa);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -493,7 +496,7 @@ static void ipa_deconfig(struct ipa *ipa)
|
||||
ipa->interrupt = NULL;
|
||||
ipa_mem_deconfig(ipa);
|
||||
ipa_hardware_deconfig(ipa);
|
||||
ipa_clock_put(ipa);
|
||||
(void)ipa_clock_put(ipa);
|
||||
}
|
||||
|
||||
static int ipa_firmware_load(struct device *dev)
|
||||
@ -750,20 +753,22 @@ static int ipa_probe(struct platform_device *pdev)
|
||||
goto err_table_exit;
|
||||
|
||||
/* The clock needs to be active for config and setup */
|
||||
ipa_clock_get(ipa);
|
||||
ret = ipa_clock_get(ipa);
|
||||
if (WARN_ON(ret < 0))
|
||||
goto err_clock_put;
|
||||
|
||||
ret = ipa_config(ipa, data);
|
||||
if (ret)
|
||||
goto err_clock_put; /* Error */
|
||||
goto err_clock_put;
|
||||
|
||||
dev_info(dev, "IPA driver initialized");
|
||||
|
||||
/* If the modem is doing early initialization, it will trigger a
|
||||
* call to ipa_setup() call when it has finished. In that case
|
||||
* we're done here.
|
||||
* call to ipa_setup() when it has finished. In that case we're
|
||||
* done here.
|
||||
*/
|
||||
if (modem_init)
|
||||
goto out_clock_put; /* Done; no error */
|
||||
goto done;
|
||||
|
||||
/* Otherwise we need to load the firmware and have Trust Zone validate
|
||||
* and install it. If that succeeds we can proceed with setup.
|
||||
@ -775,16 +780,15 @@ static int ipa_probe(struct platform_device *pdev)
|
||||
ret = ipa_setup(ipa);
|
||||
if (ret)
|
||||
goto err_deconfig;
|
||||
|
||||
out_clock_put:
|
||||
ipa_clock_put(ipa);
|
||||
done:
|
||||
(void)ipa_clock_put(ipa);
|
||||
|
||||
return 0;
|
||||
|
||||
err_deconfig:
|
||||
ipa_deconfig(ipa);
|
||||
err_clock_put:
|
||||
ipa_clock_put(ipa);
|
||||
(void)ipa_clock_put(ipa);
|
||||
ipa_modem_exit(ipa);
|
||||
err_table_exit:
|
||||
ipa_table_exit(ipa);
|
||||
@ -810,7 +814,9 @@ static int ipa_remove(struct platform_device *pdev)
|
||||
struct ipa_clock *clock = ipa->clock;
|
||||
int ret;
|
||||
|
||||
ipa_clock_get(ipa);
|
||||
ret = ipa_clock_get(ipa);
|
||||
if (WARN_ON(ret < 0))
|
||||
goto out_clock_put;
|
||||
|
||||
if (ipa->setup_complete) {
|
||||
ret = ipa_modem_stop(ipa);
|
||||
@ -826,8 +832,8 @@ static int ipa_remove(struct platform_device *pdev)
|
||||
}
|
||||
|
||||
ipa_deconfig(ipa);
|
||||
|
||||
ipa_clock_put(ipa);
|
||||
out_clock_put:
|
||||
(void)ipa_clock_put(ipa);
|
||||
|
||||
ipa_modem_exit(ipa);
|
||||
ipa_table_exit(ipa);
|
||||
|
@ -45,7 +45,9 @@ static int ipa_open(struct net_device *netdev)
|
||||
struct ipa *ipa = priv->ipa;
|
||||
int ret;
|
||||
|
||||
ipa_clock_get(ipa);
|
||||
ret = ipa_clock_get(ipa);
|
||||
if (WARN_ON(ret < 0))
|
||||
goto err_clock_put;
|
||||
|
||||
ret = ipa_endpoint_enable_one(ipa->name_map[IPA_ENDPOINT_AP_MODEM_TX]);
|
||||
if (ret)
|
||||
@ -62,7 +64,7 @@ static int ipa_open(struct net_device *netdev)
|
||||
err_disable_tx:
|
||||
ipa_endpoint_disable_one(ipa->name_map[IPA_ENDPOINT_AP_MODEM_TX]);
|
||||
err_clock_put:
|
||||
ipa_clock_put(ipa);
|
||||
(void)ipa_clock_put(ipa);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -78,7 +80,7 @@ static int ipa_stop(struct net_device *netdev)
|
||||
ipa_endpoint_disable_one(ipa->name_map[IPA_ENDPOINT_AP_MODEM_RX]);
|
||||
ipa_endpoint_disable_one(ipa->name_map[IPA_ENDPOINT_AP_MODEM_TX]);
|
||||
|
||||
ipa_clock_put(ipa);
|
||||
(void)ipa_clock_put(ipa);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -297,7 +299,9 @@ static void ipa_modem_crashed(struct ipa *ipa)
|
||||
struct device *dev = &ipa->pdev->dev;
|
||||
int ret;
|
||||
|
||||
ipa_clock_get(ipa);
|
||||
ret = ipa_clock_get(ipa);
|
||||
if (WARN_ON(ret < 0))
|
||||
goto out_clock_put;
|
||||
|
||||
ipa_endpoint_modem_pause_all(ipa, true);
|
||||
|
||||
@ -324,7 +328,8 @@ static void ipa_modem_crashed(struct ipa *ipa)
|
||||
if (ret)
|
||||
dev_err(dev, "error %d zeroing modem memory regions\n", ret);
|
||||
|
||||
ipa_clock_put(ipa);
|
||||
out_clock_put:
|
||||
(void)ipa_clock_put(ipa);
|
||||
}
|
||||
|
||||
static int ipa_modem_notify(struct notifier_block *nb, unsigned long action,
|
||||
|
@ -150,24 +150,26 @@ static void ipa_smp2p_panic_notifier_unregister(struct ipa_smp2p *smp2p)
|
||||
static irqreturn_t ipa_smp2p_modem_setup_ready_isr(int irq, void *dev_id)
|
||||
{
|
||||
struct ipa_smp2p *smp2p = dev_id;
|
||||
int ret;
|
||||
|
||||
mutex_lock(&smp2p->mutex);
|
||||
|
||||
if (!smp2p->disabled) {
|
||||
int ret;
|
||||
if (smp2p->disabled)
|
||||
goto out_mutex_unlock;
|
||||
smp2p->disabled = true; /* If any others arrive, ignore them */
|
||||
|
||||
/* The clock needs to be active for setup */
|
||||
ipa_clock_get(smp2p->ipa);
|
||||
/* The clock needs to be active for setup */
|
||||
ret = ipa_clock_get(smp2p->ipa);
|
||||
if (WARN_ON(ret < 0))
|
||||
goto out_clock_put;
|
||||
|
||||
ret = ipa_setup(smp2p->ipa);
|
||||
if (ret)
|
||||
dev_err(&smp2p->ipa->pdev->dev,
|
||||
"error %d from ipa_setup()\n", ret);
|
||||
smp2p->disabled = true;
|
||||
|
||||
ipa_clock_put(smp2p->ipa);
|
||||
}
|
||||
/* An error here won't cause driver shutdown, so warn if one occurs */
|
||||
ret = ipa_setup(smp2p->ipa);
|
||||
WARN(ret != 0, "error %d from ipa_setup()\n", ret);
|
||||
|
||||
out_clock_put:
|
||||
(void)ipa_clock_put(smp2p->ipa);
|
||||
out_mutex_unlock:
|
||||
mutex_unlock(&smp2p->mutex);
|
||||
|
||||
return IRQ_HANDLED;
|
||||
@ -206,7 +208,7 @@ static void ipa_smp2p_clock_release(struct ipa *ipa)
|
||||
if (!ipa->smp2p->clock_on)
|
||||
return;
|
||||
|
||||
ipa_clock_put(ipa);
|
||||
(void)ipa_clock_put(ipa);
|
||||
ipa->smp2p->clock_on = false;
|
||||
}
|
||||
|
||||
|
@ -154,7 +154,7 @@ static void ipa_uc_response_hdlr(struct ipa *ipa, enum ipa_irq_id irq_id)
|
||||
case IPA_UC_RESPONSE_INIT_COMPLETED:
|
||||
if (ipa->uc_clocked) {
|
||||
ipa->uc_loaded = true;
|
||||
ipa_clock_put(ipa);
|
||||
(void)ipa_clock_put(ipa);
|
||||
ipa->uc_clocked = false;
|
||||
} else {
|
||||
dev_warn(dev, "unexpected init_completed response\n");
|
||||
@ -182,21 +182,25 @@ void ipa_uc_deconfig(struct ipa *ipa)
|
||||
ipa_interrupt_remove(ipa->interrupt, IPA_IRQ_UC_1);
|
||||
ipa_interrupt_remove(ipa->interrupt, IPA_IRQ_UC_0);
|
||||
if (ipa->uc_clocked)
|
||||
ipa_clock_put(ipa);
|
||||
(void)ipa_clock_put(ipa);
|
||||
}
|
||||
|
||||
/* Take a proxy clock reference for the microcontroller */
|
||||
void ipa_uc_clock(struct ipa *ipa)
|
||||
{
|
||||
static bool already;
|
||||
int ret;
|
||||
|
||||
if (already)
|
||||
return;
|
||||
already = true; /* Only do this on first boot */
|
||||
|
||||
/* This clock reference dropped in ipa_uc_response_hdlr() above */
|
||||
ipa_clock_get(ipa);
|
||||
ipa->uc_clocked = true;
|
||||
ret = ipa_clock_get(ipa);
|
||||
if (WARN(ret < 0, "error %d getting proxy clock\n", ret))
|
||||
(void)ipa_clock_put(ipa);
|
||||
|
||||
ipa->uc_clocked = ret >= 0;
|
||||
}
|
||||
|
||||
/* Send a command to the microcontroller */
|
||||
|
Loading…
Reference in New Issue
Block a user