2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-05 03:44:03 +08:00

scsi: ufs: ufs-qcom: Remove unnecessary goto statements

A goto statement in an error path is useful if the function needs to do
cleanup other than returning the error code. But in this driver, goto
statements are used for just returning the error code in many places. This
really makes it hard to read the code.

Get rid of those goto statements and just return the error code directly.

Reviewed-by: Dmitry Baryshkov <dmitry.baryshkov@linaro.org>
Reviewed-by: Asutosh Das <quic_asutoshd@quicinc.com>
Tested-by: Andrew Halaney <ahalaney@redhat.com> # Qdrive3/sa8540p-ride
Signed-off-by: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
This commit is contained in:
Manivannan Sadhasivam 2022-12-22 19:39:51 +05:30 committed by Martin K. Petersen
parent 2d95c6deb6
commit 031312dbc6

View File

@ -101,7 +101,7 @@ static void ufs_qcom_disable_lane_clks(struct ufs_qcom_host *host)
static int ufs_qcom_enable_lane_clks(struct ufs_qcom_host *host)
{
int err = 0;
int err;
struct device *dev = host->hba->dev;
if (host->is_lane_clks_enabled)
@ -110,7 +110,7 @@ static int ufs_qcom_enable_lane_clks(struct ufs_qcom_host *host)
err = ufs_qcom_host_clk_enable(dev, "rx_lane0_sync_clk",
host->rx_l0_sync_clk);
if (err)
goto out;
return err;
err = ufs_qcom_host_clk_enable(dev, "tx_lane0_sync_clk",
host->tx_l0_sync_clk);
@ -128,7 +128,8 @@ static int ufs_qcom_enable_lane_clks(struct ufs_qcom_host *host)
goto disable_rx_l1;
host->is_lane_clks_enabled = true;
goto out;
return 0;
disable_rx_l1:
clk_disable_unprepare(host->rx_l1_sync_clk);
@ -136,7 +137,7 @@ disable_tx_l0:
clk_disable_unprepare(host->tx_l0_sync_clk);
disable_rx_l0:
clk_disable_unprepare(host->rx_l0_sync_clk);
out:
return err;
}
@ -151,25 +152,25 @@ static int ufs_qcom_init_lane_clks(struct ufs_qcom_host *host)
err = ufs_qcom_host_clk_get(dev, "rx_lane0_sync_clk",
&host->rx_l0_sync_clk, false);
if (err)
goto out;
return err;
err = ufs_qcom_host_clk_get(dev, "tx_lane0_sync_clk",
&host->tx_l0_sync_clk, false);
if (err)
goto out;
return err;
/* In case of single lane per direction, don't read lane1 clocks */
if (host->hba->lanes_per_direction > 1) {
err = ufs_qcom_host_clk_get(dev, "rx_lane1_sync_clk",
&host->rx_l1_sync_clk, false);
if (err)
goto out;
return err;
err = ufs_qcom_host_clk_get(dev, "tx_lane1_sync_clk",
&host->tx_l1_sync_clk, true);
}
out:
return err;
return 0;
}
static int ufs_qcom_check_hibern8(struct ufs_hba *hba)
@ -232,7 +233,7 @@ static int ufs_qcom_host_reset(struct ufs_hba *hba)
if (!host->core_reset) {
dev_warn(hba->dev, "%s: reset control not set\n", __func__);
goto out;
return 0;
}
reenable_intr = hba->is_irq_enabled;
@ -243,7 +244,7 @@ static int ufs_qcom_host_reset(struct ufs_hba *hba)
if (ret) {
dev_err(hba->dev, "%s: core_reset assert failed, err = %d\n",
__func__, ret);
goto out;
return ret;
}
/*
@ -265,15 +266,14 @@ static int ufs_qcom_host_reset(struct ufs_hba *hba)
hba->is_irq_enabled = true;
}
out:
return ret;
return 0;
}
static int ufs_qcom_power_up_sequence(struct ufs_hba *hba)
{
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
struct phy *phy = host->generic_phy;
int ret = 0;
int ret;
bool is_rate_B = UFS_QCOM_LIMIT_HS_RATE == PA_HS_MODE_B;
/* Reset UFS Host Controller and PHY */
@ -290,7 +290,7 @@ static int ufs_qcom_power_up_sequence(struct ufs_hba *hba)
if (ret) {
dev_err(hba->dev, "%s: phy init failed, ret = %d\n",
__func__, ret);
goto out;
return ret;
}
/* power on phy - start serdes and phy's power and clocks */
@ -307,7 +307,7 @@ static int ufs_qcom_power_up_sequence(struct ufs_hba *hba)
out_disable_phy:
phy_exit(phy);
out:
return ret;
}
@ -365,7 +365,6 @@ static int ufs_qcom_hce_enable_notify(struct ufs_hba *hba,
static int ufs_qcom_cfg_timers(struct ufs_hba *hba, u32 gear,
u32 hs, u32 rate, bool update_link_startup_timer)
{
int ret = 0;
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
struct ufs_clk_info *clki;
u32 core_clk_period_in_ns;
@ -400,11 +399,11 @@ static int ufs_qcom_cfg_timers(struct ufs_hba *hba, u32 gear,
* Aggregation logic.
*/
if (ufs_qcom_cap_qunipro(host) && !ufshcd_is_intr_aggr_allowed(hba))
goto out;
return 0;
if (gear == 0) {
dev_err(hba->dev, "%s: invalid gear = %d\n", __func__, gear);
goto out_error;
return -EINVAL;
}
list_for_each_entry(clki, &hba->clk_list_head, list) {
@ -427,7 +426,7 @@ static int ufs_qcom_cfg_timers(struct ufs_hba *hba, u32 gear,
}
if (ufs_qcom_cap_qunipro(host))
goto out;
return 0;
core_clk_period_in_ns = NSEC_PER_SEC / core_clk_rate;
core_clk_period_in_ns <<= OFFSET_CLK_NS_REG;
@ -442,7 +441,7 @@ static int ufs_qcom_cfg_timers(struct ufs_hba *hba, u32 gear,
"%s: index %d exceeds table size %zu\n",
__func__, gear,
ARRAY_SIZE(hs_fr_table_rA));
goto out_error;
return -EINVAL;
}
tx_clk_cycles_per_us = hs_fr_table_rA[gear-1][1];
} else if (rate == PA_HS_MODE_B) {
@ -451,13 +450,13 @@ static int ufs_qcom_cfg_timers(struct ufs_hba *hba, u32 gear,
"%s: index %d exceeds table size %zu\n",
__func__, gear,
ARRAY_SIZE(hs_fr_table_rB));
goto out_error;
return -EINVAL;
}
tx_clk_cycles_per_us = hs_fr_table_rB[gear-1][1];
} else {
dev_err(hba->dev, "%s: invalid rate = %d\n",
__func__, rate);
goto out_error;
return -EINVAL;
}
break;
case SLOWAUTO_MODE:
@ -467,14 +466,14 @@ static int ufs_qcom_cfg_timers(struct ufs_hba *hba, u32 gear,
"%s: index %d exceeds table size %zu\n",
__func__, gear,
ARRAY_SIZE(pwm_fr_table));
goto out_error;
return -EINVAL;
}
tx_clk_cycles_per_us = pwm_fr_table[gear-1][1];
break;
case UNCHANGED:
default:
dev_err(hba->dev, "%s: invalid mode = %d\n", __func__, hs);
goto out_error;
return -EINVAL;
}
if (ufshcd_readl(hba, REG_UFS_TX_SYMBOL_CLK_NS_US) !=
@ -498,12 +497,8 @@ static int ufs_qcom_cfg_timers(struct ufs_hba *hba, u32 gear,
*/
mb();
}
goto out;
out_error:
ret = -EINVAL;
out:
return ret;
return 0;
}
static int ufs_qcom_link_startup_notify(struct ufs_hba *hba,
@ -518,8 +513,7 @@ static int ufs_qcom_link_startup_notify(struct ufs_hba *hba,
0, true)) {
dev_err(hba->dev, "%s: ufs_qcom_cfg_timers() failed\n",
__func__);
err = -EINVAL;
goto out;
return -EINVAL;
}
if (ufs_qcom_cap_qunipro(host))
@ -545,7 +539,6 @@ static int ufs_qcom_link_startup_notify(struct ufs_hba *hba,
break;
}
out:
return err;
}
@ -680,6 +673,11 @@ static int ufs_qcom_pwr_change_notify(struct ufs_hba *hba,
struct ufs_dev_params ufs_qcom_cap;
int ret = 0;
if (!dev_req_params) {
pr_err("%s: incoming dev_req_params is NULL\n", __func__);
return -EINVAL;
}
switch (status) {
case PRE_CHANGE:
ufshcd_init_pwr_dev_param(&ufs_qcom_cap);
@ -705,7 +703,7 @@ static int ufs_qcom_pwr_change_notify(struct ufs_hba *hba,
if (ret) {
dev_err(hba->dev, "%s: failed to determine capabilities\n",
__func__);
goto out;
return ret;
}
/* enable the device ref clock before changing to HS mode */
@ -746,7 +744,7 @@ static int ufs_qcom_pwr_change_notify(struct ufs_hba *hba,
ret = -EINVAL;
break;
}
out:
return ret;
}
@ -758,14 +756,11 @@ static int ufs_qcom_quirk_host_pa_saveconfigtime(struct ufs_hba *hba)
err = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_VS_CONFIG_REG1),
&pa_vs_config_reg1);
if (err)
goto out;
return err;
/* Allow extension of MSB bits of PA_SaveConfigTime attribute */
err = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_VS_CONFIG_REG1),
return ufshcd_dme_set(hba, UIC_ARG_MIB(PA_VS_CONFIG_REG1),
(pa_vs_config_reg1 | (1 << 12)));
out:
return err;
}
static int ufs_qcom_apply_dev_quirks(struct ufs_hba *hba)
@ -942,9 +937,8 @@ static int ufs_qcom_init(struct ufs_hba *hba)
host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
if (!host) {
err = -ENOMEM;
dev_err(dev, "%s: no memory for qcom ufs host\n", __func__);
goto out;
return -ENOMEM;
}
/* Make a two way bind between the qcom host and the hba */
@ -965,10 +959,8 @@ static int ufs_qcom_init(struct ufs_hba *hba)
host->rcdev.owner = dev->driver->owner;
host->rcdev.nr_resets = 1;
err = devm_reset_controller_register(dev, &host->rcdev);
if (err) {
if (err)
dev_warn(dev, "Failed to register reset controller\n");
err = 0;
}
if (!has_acpi_companion(dev)) {
host->generic_phy = devm_phy_get(dev, "ufsphy");
@ -1033,17 +1025,16 @@ static int ufs_qcom_init(struct ufs_hba *hba)
ufs_qcom_get_default_testbus_cfg(host);
err = ufs_qcom_testbus_config(host);
if (err) {
if (err)
/* Failure is non-fatal */
dev_warn(dev, "%s: failed to configure the testbus %d\n",
__func__, err);
err = 0;
}
goto out;
return 0;
out_variant_clear:
ufshcd_set_variant(hba, NULL);
out:
return err;
}
@ -1069,7 +1060,7 @@ static int ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(struct ufs_hba *hba,
UIC_ARG_MIB(DME_VS_CORE_CLK_CTRL),
&core_clk_ctrl_reg);
if (err)
goto out;
return err;
core_clk_ctrl_reg &= ~DME_VS_CORE_CLK_CTRL_MAX_CORE_CLK_1US_CYCLES_MASK;
core_clk_ctrl_reg |= clk_cycles;
@ -1077,11 +1068,9 @@ static int ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(struct ufs_hba *hba,
/* Clear CORE_CLK_DIV_EN */
core_clk_ctrl_reg &= ~DME_VS_CORE_CLK_CTRL_CORE_CLK_DIV_EN_BIT;
err = ufshcd_dme_set(hba,
return ufshcd_dme_set(hba,
UIC_ARG_MIB(DME_VS_CORE_CLK_CTRL),
core_clk_ctrl_reg);
out:
return err;
}
static int ufs_qcom_clk_scale_up_pre_change(struct ufs_hba *hba)
@ -1164,7 +1153,7 @@ static int ufs_qcom_clk_scale_notify(struct ufs_hba *hba,
if (err || !dev_req_params) {
ufshcd_uic_hibern8_exit(hba);
goto out;
return err;
}
ufs_qcom_cfg_timers(hba,
@ -1175,8 +1164,7 @@ static int ufs_qcom_clk_scale_notify(struct ufs_hba *hba,
ufshcd_uic_hibern8_exit(hba);
}
out:
return err;
return 0;
}
static void ufs_qcom_enable_test_bus(struct ufs_qcom_host *host)