- Set of fixes/updated on UFS core

- Fix dcache/flush
  - Update & Sync Quirks
  - Backport fixes from Linux
  - Add missing memory barriers
  - Remove link_startup_again logic
  - Add Neil to UFS maintainers
 - Add UFS DesignWare Controller for AMD vers2 platforms
 - Add UFS Qualcomm controller driver
 -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCgAdFiEEPVPGJshWBf4d9CyLd9zb2sjISdEFAmcM2qMACgkQd9zb2sjI
 SdF7aA//c3YYFh4ChcK+hbQD7OKn+3Djm1KhMgnDEbSI9Un0CTQLfVbLo/AyO+qn
 EJFyQ3khgHYBLmzjYcn5I682La203vkpLOJ6PK6tznGQiOoDlkg9gkJt4SEk/x6W
 G+ycuMs2DhmAlPuHU2FA9Kfu9hSeyA9+fLHuZ1hgUuwQANJlS3bG4FtME+mqGMeP
 /I69/daPXFX3VeGMeCgcYB9sVc+vkMdpERBYwQkY6VaBwBacU+asUZJIjwBpWugn
 VjaNGSN2sOp5hlUPL8EkhJeKbd7KtxL3aW+Wil7QC3STOiYuPmTkxQ4JaQmY4FVy
 kIk3RMPdIXxd6Csphw2SKewbdOUgNsDVFtZNGwfPtUU6KeEFv7tgedzLwFanXiqQ
 cm6AvkloxdPqLYQq2W9P0XdZuOAbfKt2NMCeB2KzYhsopPGyzmujg32kmNYoOMsJ
 s+iW4VTatHT3ZGQVwYxH7bFNCYyAv5j+i13nAgOtoViGdSfh+m6wgAWyresne/g1
 Y+wxkRgB+ZAXc/cAtRQ0uOvos/uSj5nvP/0I0g3HofJzlplPodNq9i5futQG35Ut
 LnGWvq/JELy9iLMyzwlpatIe/0dEpaHcEqe1KY+J78QJf3ZZyBfV0iv6BMpi1Wa3
 m39ZZGGvyM/t60zjujE+hT5jrNKTxs0WvZm/WETdptilyPtnHiM=
 =1K1R
 -----END PGP SIGNATURE-----

Merge tag 'u-boot-ufs-next-20241014' of https://source.denx.de/u-boot/custodians/u-boot-ufs

- Set of fixes/updated on UFS core
 - Fix dcache/flush
 - Update & Sync Quirks
 - Backport fixes from Linux
 - Add missing memory barriers
 - Remove link_startup_again logic
 - Add Neil to UFS maintainers
- Add UFS DesignWare Controller for AMD vers2 platforms
- Add UFS Qualcomm controller driver
This commit is contained in:
Tom Rini 2024-10-14 08:15:15 -06:00
commit c7aafb20ce
13 changed files with 1850 additions and 64 deletions

View File

@ -1714,6 +1714,7 @@ T: git https://source.denx.de/u-boot/custodians/u-boot-ubi.git
F: drivers/mtd/ubi/
UFS
M: Neil Armstrong <neil.armstrong@linaro.org>
M: Bhupesh Sharma <bhupesh.linux@gmail.com>
M: Neha Malcom Francis <n-francis@ti.com>
S: Maintained

View File

@ -146,7 +146,7 @@ CONFIG_USB_GADGET_PRODUCT_NUM=0x0300
CONFIG_USB_GADGET_DOWNLOAD=y
CONFIG_USB_FUNCTION_THOR=y
CONFIG_UFS=y
CONFIG_CADENCE_UFS=y
CONFIG_UFS_AMD_VERSAL2=y
CONFIG_VIRTIO_MMIO=y
CONFIG_VIRTIO_NET=y
CONFIG_VIRTIO_BLK=y

View File

@ -26,6 +26,13 @@ config UFS_PCI
If unsure, say N.
config QCOM_UFS
bool "Qualcomm Host Controller driver for UFS"
depends on UFS && ARCH_SNAPDRAGON
help
This selects the platform driver for the UFS host
controller present on Qualcomm Snapdragon SoCs.
config TI_J721E_UFS
bool "Glue Layer driver for UFS on TI J721E devices"
help
@ -41,4 +48,12 @@ config UFS_RENESAS
UFS host on Renesas needs some vendor specific configuration before
accessing the hardware.
config UFS_AMD_VERSAL2
bool "AMD Versal Gen 2 UFS controller platform driver"
depends on UFS && ZYNQMP_FIRMWARE
help
This selects the AMD specific additions to UFSHCD platform driver.
UFS host on AMD needs some vendor specific configuration before accessing
the hardware.
endmenu

View File

@ -5,6 +5,8 @@
obj-$(CONFIG_UFS) += ufs.o ufs-uclass.o
obj-$(CONFIG_CADENCE_UFS) += cdns-platform.o
obj-$(CONFIG_QCOM_UFS) += ufs-qcom.o
obj-$(CONFIG_TI_J721E_UFS) += ti-j721e-ufs.o
obj-$(CONFIG_UFS_PCI) += ufs-pci.o
obj-$(CONFIG_UFS_RENESAS) += ufs-renesas.o
obj-$(CONFIG_UFS_AMD_VERSAL2) += ufs-amd-versal2.o ufshcd-dwc.o

View File

@ -0,0 +1,501 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2024 Advanced Micro Devices, Inc.
*/
#include <clk.h>
#include <dm.h>
#include <ufs.h>
#include <asm/io.h>
#include <dm/device_compat.h>
#include <zynqmp_firmware.h>
#include <linux/bitops.h>
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/time.h>
#include <reset.h>
#include "ufs.h"
#include "ufshcd-dwc.h"
#include "ufshci-dwc.h"
#define VERSAL2_UFS_DEVICE_ID 4
#define SRAM_CSR_INIT_DONE_MASK BIT(0)
#define SRAM_CSR_EXT_LD_DONE_MASK BIT(1)
#define SRAM_CSR_BYPASS_MASK BIT(2)
#define MPHY_FAST_RX_AFE_CAL BIT(2)
#define MPHY_FW_CALIB_CFG_VAL BIT(8)
#define TX_RX_CFG_RDY_MASK GENMASK(3, 0)
#define TIMEOUT_MICROSEC 1000000L
#define IOCTL_UFS_TXRX_CFGRDY_GET 40
#define IOCTL_UFS_SRAM_CSR_SEL 41
#define PM_UFS_SRAM_CSR_WRITE 0
#define PM_UFS_SRAM_CSR_READ 1
struct ufs_versal2_priv {
struct ufs_hba *hba;
struct reset_ctl *rstc;
struct reset_ctl *rstphy;
u32 phy_mode;
u32 host_clk;
u32 pd_dev_id;
u8 attcompval0;
u8 attcompval1;
u8 ctlecompval0;
u8 ctlecompval1;
};
static int ufs_versal2_phy_reg_write(struct ufs_hba *hba, u32 addr, u32 val)
{
static struct ufshcd_dme_attr_val phy_write_attrs[] = {
{ UIC_ARG_MIB(CBCREGADDRLSB), 0, DME_LOCAL },
{ UIC_ARG_MIB(CBCREGADDRMSB), 0, DME_LOCAL },
{ UIC_ARG_MIB(CBCREGWRLSB), 0, DME_LOCAL },
{ UIC_ARG_MIB(CBCREGWRMSB), 0, DME_LOCAL },
{ UIC_ARG_MIB(CBCREGRDWRSEL), 1, DME_LOCAL },
{ UIC_ARG_MIB(VS_MPHYCFGUPDT), 1, DME_LOCAL }
};
phy_write_attrs[0].mib_val = (u8)addr;
phy_write_attrs[1].mib_val = (u8)(addr >> 8);
phy_write_attrs[2].mib_val = (u8)val;
phy_write_attrs[3].mib_val = (u8)(val >> 8);
return ufshcd_dwc_dme_set_attrs(hba, phy_write_attrs, ARRAY_SIZE(phy_write_attrs));
}
static int ufs_versal2_phy_reg_read(struct ufs_hba *hba, u32 addr, u32 *val)
{
u32 mib_val;
int ret;
static struct ufshcd_dme_attr_val phy_read_attrs[] = {
{ UIC_ARG_MIB(CBCREGADDRLSB), 0, DME_LOCAL },
{ UIC_ARG_MIB(CBCREGADDRMSB), 0, DME_LOCAL },
{ UIC_ARG_MIB(CBCREGRDWRSEL), 0, DME_LOCAL },
{ UIC_ARG_MIB(VS_MPHYCFGUPDT), 1, DME_LOCAL }
};
phy_read_attrs[0].mib_val = (u8)addr;
phy_read_attrs[1].mib_val = (u8)(addr >> 8);
ret = ufshcd_dwc_dme_set_attrs(hba, phy_read_attrs, ARRAY_SIZE(phy_read_attrs));
if (ret)
return ret;
ret = ufshcd_dme_get(hba, UIC_ARG_MIB(CBCREGRDLSB), &mib_val);
if (ret)
return ret;
*val = mib_val;
ret = ufshcd_dme_get(hba, UIC_ARG_MIB(CBCREGRDMSB), &mib_val);
if (ret)
return ret;
*val |= (mib_val << 8);
return 0;
}
int versal2_pm_ufs_get_txrx_cfgrdy(u32 node_id, u32 *value)
{
u32 ret_payload[PAYLOAD_ARG_CNT];
int ret;
if (!value)
return -EINVAL;
ret = xilinx_pm_request(PM_IOCTL, node_id, IOCTL_UFS_TXRX_CFGRDY_GET,
0, 0, ret_payload);
*value = ret_payload[1];
return ret;
}
int versal2_pm_ufs_sram_csr_sel(u32 node_id, u32 type, u32 *value)
{
u32 ret_payload[PAYLOAD_ARG_CNT];
int ret;
if (!value)
return -EINVAL;
if (type == PM_UFS_SRAM_CSR_READ) {
ret = xilinx_pm_request(PM_IOCTL, node_id, IOCTL_UFS_SRAM_CSR_SEL,
type, 0, ret_payload);
*value = ret_payload[1];
} else {
ret = xilinx_pm_request(PM_IOCTL, node_id, IOCTL_UFS_SRAM_CSR_SEL,
type, *value, 0);
}
return ret;
}
static int ufs_versal2_enable_phy(struct ufs_hba *hba)
{
u32 offset, reg;
int ret;
ret = ufshcd_dme_set(hba, UIC_ARG_MIB(VS_MPHYDISABLE), 0);
if (ret)
return ret;
ret = ufshcd_dme_set(hba, UIC_ARG_MIB(VS_MPHYCFGUPDT), 1);
if (ret)
return ret;
/* Check Tx/Rx FSM states */
for (offset = 0; offset < 2; offset++) {
u32 time_left, mibsel;
time_left = TIMEOUT_MICROSEC;
mibsel = UIC_ARG_MIB_SEL(MTX_FSM_STATE, UIC_ARG_MPHY_TX_GEN_SEL_INDEX(offset));
do {
ret = ufshcd_dme_get(hba, mibsel, &reg);
if (ret)
return ret;
if (reg == TX_STATE_HIBERN8 || reg == TX_STATE_SLEEP ||
reg == TX_STATE_LSBURST)
break;
time_left--;
mdelay(5);
} while (time_left);
if (!time_left) {
dev_err(hba->dev, "Invalid Tx FSM state.\n");
return -ETIMEDOUT;
}
time_left = TIMEOUT_MICROSEC;
mibsel = UIC_ARG_MIB_SEL(MRX_FSM_STATE, UIC_ARG_MPHY_RX_GEN_SEL_INDEX(offset));
do {
ret = ufshcd_dme_get(hba, mibsel, &reg);
if (ret)
return ret;
if (reg == RX_STATE_HIBERN8 || reg == RX_STATE_SLEEP ||
reg == RX_STATE_LSBURST)
break;
time_left--;
mdelay(5);
} while (time_left);
if (!time_left) {
dev_err(hba->dev, "Invalid Rx FSM state.\n");
return -ETIMEDOUT;
}
}
return 0;
}
static int ufs_versal2_setup_phy(struct ufs_hba *hba)
{
struct ufs_versal2_priv *priv = dev_get_priv(hba->dev);
int ret;
u32 reg;
/* Bypass RX-AFE offset calibrations (ATT/CTLE) */
ret = ufs_versal2_phy_reg_read(hba, FAST_FLAGS(0), &reg);
if (ret)
return ret;
reg |= MPHY_FAST_RX_AFE_CAL;
ret = ufs_versal2_phy_reg_write(hba, FAST_FLAGS(0), reg);
if (ret)
return ret;
ret = ufs_versal2_phy_reg_read(hba, FAST_FLAGS(1), &reg);
if (ret)
return ret;
reg |= MPHY_FAST_RX_AFE_CAL;
ret = ufs_versal2_phy_reg_write(hba, FAST_FLAGS(1), reg);
if (ret)
return ret;
/* Program ATT and CTLE compensation values */
if (priv->attcompval0) {
ret = ufs_versal2_phy_reg_write(hba, RX_AFE_ATT_IDAC(0), priv->attcompval0);
if (ret)
return ret;
}
if (priv->attcompval1) {
ret = ufs_versal2_phy_reg_write(hba, RX_AFE_ATT_IDAC(1), priv->attcompval1);
if (ret)
return ret;
}
if (priv->ctlecompval0) {
ret = ufs_versal2_phy_reg_write(hba, RX_AFE_CTLE_IDAC(0), priv->ctlecompval0);
if (ret)
return ret;
}
if (priv->ctlecompval1) {
ret = ufs_versal2_phy_reg_write(hba, RX_AFE_CTLE_IDAC(1), priv->ctlecompval1);
if (ret)
return ret;
}
ret = ufs_versal2_phy_reg_read(hba, FW_CALIB_CCFG(0), &reg);
if (ret)
return ret;
reg |= MPHY_FW_CALIB_CFG_VAL;
ret = ufs_versal2_phy_reg_write(hba, FW_CALIB_CCFG(0), reg);
if (ret)
return ret;
ret = ufs_versal2_phy_reg_read(hba, FW_CALIB_CCFG(1), &reg);
if (ret)
return ret;
reg |= MPHY_FW_CALIB_CFG_VAL;
return ufs_versal2_phy_reg_write(hba, FW_CALIB_CCFG(1), reg);
}
static int ufs_versal2_phy_init(struct ufs_hba *hba)
{
struct ufs_versal2_priv *priv = dev_get_priv(hba->dev);
u32 reg, time_left;
int ret;
static const struct ufshcd_dme_attr_val rmmi_attrs[] = {
{ UIC_ARG_MIB(CBREFCLKCTRL2), CBREFREFCLK_GATE_OVR_EN, DME_LOCAL },
{ UIC_ARG_MIB(CBCRCTRL), 1, DME_LOCAL },
{ UIC_ARG_MIB(CBC10DIRECTCONF2), 1, DME_LOCAL },
{ UIC_ARG_MIB(VS_MPHYCFGUPDT), 1, DME_LOCAL }
};
/* Wait for Tx/Rx config_rdy */
time_left = TIMEOUT_MICROSEC;
do {
time_left--;
ret = versal2_pm_ufs_get_txrx_cfgrdy(priv->pd_dev_id, &reg);
if (ret)
return ret;
reg &= TX_RX_CFG_RDY_MASK;
if (!reg)
break;
mdelay(5);
} while (time_left);
if (!time_left) {
dev_err(hba->dev, "Tx/Rx configuration signal busy.\n");
return -ETIMEDOUT;
}
ret = ufshcd_dwc_dme_set_attrs(hba, rmmi_attrs, ARRAY_SIZE(rmmi_attrs));
if (ret)
return ret;
/* DeAssert PHY reset */
ret = reset_deassert(priv->rstphy);
if (ret) {
dev_err(hba->dev, "ufsphy reset deassert failed\n");
return ret;
}
/* Wait for SRAM init done */
time_left = TIMEOUT_MICROSEC;
do {
time_left--;
ret = versal2_pm_ufs_sram_csr_sel(priv->pd_dev_id,
PM_UFS_SRAM_CSR_READ, &reg);
if (ret)
return ret;
reg &= SRAM_CSR_INIT_DONE_MASK;
if (reg)
break;
mdelay(5);
} while (time_left);
if (!time_left) {
dev_err(hba->dev, "SRAM initialization failed.\n");
return -ETIMEDOUT;
}
ret = ufs_versal2_setup_phy(hba);
if (ret)
return ret;
return ufs_versal2_enable_phy(hba);
}
static int ufs_versal2_init(struct ufs_hba *hba)
{
struct ufs_versal2_priv *priv = dev_get_priv(hba->dev);
struct clk clk;
unsigned long core_clk_rate = 0;
int ret = 0;
priv->phy_mode = UFSHCD_DWC_PHY_MODE_ROM;
priv->pd_dev_id = VERSAL2_UFS_DEVICE_ID;
ret = clk_get_by_name(hba->dev, "core_clk", &clk);
if (ret) {
dev_err(hba->dev, "failed to get core_clk clock\n");
return ret;
}
core_clk_rate = clk_get_rate(&clk);
if (IS_ERR_VALUE(core_clk_rate)) {
dev_err(hba->dev, "%s: unable to find core_clk rate\n",
__func__);
return core_clk_rate;
}
priv->host_clk = core_clk_rate;
priv->rstc = devm_reset_control_get(hba->dev, "ufshc-rst");
if (IS_ERR(priv->rstc)) {
dev_err(hba->dev, "failed to get reset ctl: ufshc-rst\n");
return PTR_ERR(priv->rstc);
}
priv->rstphy = devm_reset_control_get(hba->dev, "ufsphy-rst");
if (IS_ERR(priv->rstphy)) {
dev_err(hba->dev, "failed to get reset ctl: ufsphy-rst\n");
return PTR_ERR(priv->rstphy);
}
return ret;
}
static int ufs_versal2_hce_enable_notify(struct ufs_hba *hba,
enum ufs_notify_change_status status)
{
struct ufs_versal2_priv *priv = dev_get_priv(hba->dev);
u32 sram_csr;
int ret;
switch (status) {
case PRE_CHANGE:
/* Assert RST_UFS Reset for UFS block in PMX_IOU */
ret = reset_assert(priv->rstc);
if (ret) {
dev_err(hba->dev, "ufshc reset assert failed, err = %d\n", ret);
return ret;
}
/* Assert PHY reset */
ret = reset_assert(priv->rstphy);
if (ret) {
dev_err(hba->dev, "ufsphy reset assert failed, err = %d\n", ret);
return ret;
}
ret = versal2_pm_ufs_sram_csr_sel(priv->pd_dev_id,
PM_UFS_SRAM_CSR_READ, &sram_csr);
if (ret)
return ret;
if (!priv->phy_mode) {
sram_csr &= ~SRAM_CSR_EXT_LD_DONE_MASK;
sram_csr |= SRAM_CSR_BYPASS_MASK;
} else {
dev_err(hba->dev, "Invalid phy-mode %d.\n", priv->phy_mode);
return -EINVAL;
}
ret = versal2_pm_ufs_sram_csr_sel(priv->pd_dev_id,
PM_UFS_SRAM_CSR_WRITE, &sram_csr);
if (ret)
return ret;
/* De Assert RST_UFS Reset for UFS block in PMX_IOU */
ret = reset_deassert(priv->rstc);
if (ret)
dev_err(hba->dev, "ufshc reset deassert failed, err = %d\n", ret);
break;
case POST_CHANGE:
ret = ufs_versal2_phy_init(hba);
if (ret)
dev_err(hba->dev, "Phy init failed (%d)\n", ret);
break;
default:
ret = -EINVAL;
break;
}
return ret;
}
static int ufs_versal2_link_startup_notify(struct ufs_hba *hba,
enum ufs_notify_change_status status)
{
struct ufs_versal2_priv *priv = dev_get_priv(hba->dev);
int ret = 0;
switch (status) {
case PRE_CHANGE:
if (priv->host_clk) {
u32 core_clk_div = priv->host_clk / TIMEOUT_MICROSEC;
ufshcd_writel(hba, core_clk_div, DWC_UFS_REG_HCLKDIV);
}
break;
case POST_CHANGE:
ret = ufshcd_dwc_link_startup_notify(hba, status);
break;
default:
ret = -EINVAL;
break;
}
return ret;
}
static struct ufs_hba_ops ufs_versal2_hba_ops = {
.init = ufs_versal2_init,
.link_startup_notify = ufs_versal2_link_startup_notify,
.hce_enable_notify = ufs_versal2_hce_enable_notify,
};
static int ufs_versal2_probe(struct udevice *dev)
{
int ret;
/* Perform generic probe */
ret = ufshcd_probe(dev, &ufs_versal2_hba_ops);
if (ret)
dev_err(dev, "ufshcd_probe() failed %d\n", ret);
return ret;
}
static int ufs_versal2_bind(struct udevice *dev)
{
struct udevice *scsi_dev;
return ufs_scsi_bind(dev, &scsi_dev);
}
static const struct udevice_id ufs_versal2_ids[] = {
{
.compatible = "amd,versal2-ufs",
},
{},
};
U_BOOT_DRIVER(ufs_versal2_pltfm) = {
.name = "ufs-versal2-pltfm",
.id = UCLASS_UFS,
.of_match = ufs_versal2_ids,
.probe = ufs_versal2_probe,
.bind = ufs_versal2_bind,
};

670
drivers/ufs/ufs-qcom.c Normal file
View File

@ -0,0 +1,670 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2013-2016, Linux Foundation. All rights reserved.
* Copyright (C) 2023-2024 Linaro Limited
* Authors:
* - Bhupesh Sharma <bhupesh.sharma@linaro.org>
* - Neil Armstrong <neil.armstrong@linaro.org>
*
* Based on Linux driver
*/
#include <asm/io.h>
#include <clk.h>
#include <dm.h>
#include <dm/device_compat.h>
#include <generic-phy.h>
#include <ufs.h>
#include <asm/gpio.h>
#include <linux/bitops.h>
#include <linux/delay.h>
#include <linux/err.h>
#include "ufs.h"
#include "ufs-qcom.h"
#define ceil(freq, div) ((freq) % (div) == 0 ? ((freq) / (div)) : ((freq) / (div) + 1))
static void ufs_qcom_dev_ref_clk_ctrl(struct ufs_hba *hba, bool enable);
static int ufs_qcom_enable_clks(struct ufs_qcom_priv *priv)
{
int err;
if (priv->is_clks_enabled)
return 0;
err = clk_enable_bulk(&priv->clks);
if (err)
return err;
priv->is_clks_enabled = true;
return 0;
}
static int ufs_qcom_init_clks(struct ufs_qcom_priv *priv)
{
int err;
struct udevice *dev = priv->hba->dev;
err = clk_get_bulk(dev, &priv->clks);
if (err)
return err;
return 0;
}
static int ufs_qcom_check_hibern8(struct ufs_hba *hba)
{
int err, retry_count = 50;
u32 tx_fsm_val = 0;
do {
err = ufshcd_dme_get(hba,
UIC_ARG_MIB_SEL(MPHY_TX_FSM_STATE,
UIC_ARG_MPHY_TX_GEN_SEL_INDEX(0)),
&tx_fsm_val);
if (err || tx_fsm_val == TX_FSM_HIBERN8)
break;
/* max. 200us */
udelay(200);
retry_count--;
} while (retry_count != 0);
/* Check the state again */
err = ufshcd_dme_get(hba,
UIC_ARG_MIB_SEL(MPHY_TX_FSM_STATE,
UIC_ARG_MPHY_TX_GEN_SEL_INDEX(0)),
&tx_fsm_val);
if (err) {
dev_err(hba->dev, "%s: unable to get TX_FSM_STATE, err %d\n",
__func__, err);
} else if (tx_fsm_val != TX_FSM_HIBERN8) {
err = tx_fsm_val;
dev_err(hba->dev, "%s: invalid TX_FSM_STATE = %d\n",
__func__, err);
}
return err;
}
static void ufs_qcom_select_unipro_mode(struct ufs_qcom_priv *priv)
{
ufshcd_rmwl(priv->hba, QUNIPRO_SEL, QUNIPRO_SEL, REG_UFS_CFG1);
if (priv->hw_ver.major >= 0x05)
ufshcd_rmwl(priv->hba, QUNIPRO_G4_SEL, 0, REG_UFS_CFG0);
}
/*
* ufs_qcom_reset - reset host controller and PHY
*/
static int ufs_qcom_reset(struct ufs_hba *hba)
{
struct ufs_qcom_priv *priv = dev_get_priv(hba->dev);
int ret;
ret = reset_assert(&priv->core_reset);
if (ret) {
dev_err(hba->dev, "%s: core_reset assert failed, err = %d\n",
__func__, ret);
return ret;
}
/*
* The hardware requirement for delay between assert/deassert
* is at least 3-4 sleep clock (32.7KHz) cycles, which comes to
* ~125us (4/32768). To be on the safe side add 200us delay.
*/
udelay(210);
ret = reset_deassert(&priv->core_reset);
if (ret)
dev_err(hba->dev, "%s: core_reset deassert failed, err = %d\n",
__func__, ret);
udelay(1100);
return 0;
}
/**
* ufs_qcom_advertise_quirks - advertise the known QCOM UFS controller quirks
* @hba: host controller instance
*
* QCOM UFS host controller might have some non standard behaviours (quirks)
* than what is specified by UFSHCI specification. Advertise all such
* quirks to standard UFS host controller driver so standard takes them into
* account.
*/
static void ufs_qcom_advertise_quirks(struct ufs_hba *hba)
{
struct ufs_qcom_priv *priv = dev_get_priv(hba->dev);
if (priv->hw_ver.major == 0x2)
hba->quirks |= UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION;
if (priv->hw_ver.major > 0x3)
hba->quirks |= UFSHCD_QUIRK_REINIT_AFTER_MAX_GEAR_SWITCH;
}
/**
* ufs_qcom_setup_clocks - enables/disable clocks
* @hba: host controller instance
* @on: If true, enable clocks else disable them.
* @status: PRE_CHANGE or POST_CHANGE notify
*
* Returns 0 on success, non-zero on failure.
*/
static int ufs_qcom_setup_clocks(struct ufs_hba *hba, bool on,
enum ufs_notify_change_status status)
{
switch (status) {
case PRE_CHANGE:
if (!on)
/* disable device ref_clk */
ufs_qcom_dev_ref_clk_ctrl(hba, false);
break;
case POST_CHANGE:
if (on)
/* enable the device ref clock for HS mode*/
ufs_qcom_dev_ref_clk_ctrl(hba, true);
break;
}
return 0;
}
static u32 ufs_qcom_get_hs_gear(struct ufs_hba *hba)
{
struct ufs_qcom_priv *priv = dev_get_priv(hba->dev);
/*
* TOFIX: v4 controllers *should* be able to support HS Gear 4
* but so far pwr_mode switch is failing on v4 controllers and HS Gear 4.
* only enable HS Gear > 3 for Controlers major version 5 and later.
*/
if (priv->hw_ver.major > 0x4)
return UFS_QCOM_MAX_GEAR(ufshcd_readl(hba, REG_UFS_PARAM0));
/* Default is HS-G3 */
return UFS_HS_G3;
}
static int ufs_get_max_pwr_mode(struct ufs_hba *hba,
struct ufs_pwr_mode_info *max_pwr_info)
{
struct ufs_qcom_priv *priv = dev_get_priv(hba->dev);
u32 max_gear = ufs_qcom_get_hs_gear(hba);
max_pwr_info->info.gear_rx = min(max_pwr_info->info.gear_rx, max_gear);
/* Qualcomm UFS only support symmetric Gear */
max_pwr_info->info.gear_tx = max_pwr_info->info.gear_rx;
if (priv->hw_ver.major >= 0x4 && max_pwr_info->info.gear_rx > UFS_HS_G3)
ufshcd_dme_set(hba,
UIC_ARG_MIB(PA_TXHSADAPTTYPE),
PA_INITIAL_ADAPT);
dev_info(hba->dev, "Max HS Gear: %d\n", max_pwr_info->info.gear_rx);
return 0;
}
static int ufs_qcom_power_up_sequence(struct ufs_hba *hba)
{
struct ufs_qcom_priv *priv = dev_get_priv(hba->dev);
struct phy phy;
int ret;
/* Reset UFS Host Controller and PHY */
ret = ufs_qcom_reset(hba);
if (ret)
dev_warn(hba->dev, "%s: host reset returned %d\n",
__func__, ret);
/* get phy */
ret = generic_phy_get_by_name(hba->dev, "ufsphy", &phy);
if (ret) {
dev_warn(hba->dev, "%s: Unable to get QMP ufs phy, ret = %d\n",
__func__, ret);
return ret;
}
/* phy initialization */
ret = generic_phy_init(&phy);
if (ret) {
dev_err(hba->dev, "%s: phy init failed, ret = %d\n",
__func__, ret);
return ret;
}
/* power on phy */
ret = generic_phy_power_on(&phy);
if (ret) {
dev_err(hba->dev, "%s: phy power on failed, ret = %d\n",
__func__, ret);
goto out_disable_phy;
}
ufs_qcom_select_unipro_mode(priv);
return 0;
out_disable_phy:
generic_phy_exit(&phy);
return ret;
}
/*
* The UTP controller has a number of internal clock gating cells (CGCs).
* Internal hardware sub-modules within the UTP controller control the CGCs.
* Hardware CGCs disable the clock to inactivate UTP sub-modules not involved
* in a specific operation, UTP controller CGCs are by default disabled and
* this function enables them (after every UFS link startup) to save some power
* leakage.
*/
static void ufs_qcom_enable_hw_clk_gating(struct ufs_hba *hba)
{
ufshcd_rmwl(hba, REG_UFS_CFG2_CGC_EN_ALL, REG_UFS_CFG2_CGC_EN_ALL,
REG_UFS_CFG2);
/* Ensure that HW clock gating is enabled before next operations */
ufshcd_readl(hba, REG_UFS_CFG2);
}
static int ufs_qcom_hce_enable_notify(struct ufs_hba *hba,
enum ufs_notify_change_status status)
{
struct ufs_qcom_priv *priv = dev_get_priv(hba->dev);
int err;
switch (status) {
case PRE_CHANGE:
ufs_qcom_power_up_sequence(hba);
/*
* The PHY PLL output is the source of tx/rx lane symbol
* clocks, hence, enable the lane clocks only after PHY
* is initialized.
*/
err = ufs_qcom_enable_clks(priv);
break;
case POST_CHANGE:
/* check if UFS PHY moved from DISABLED to HIBERN8 */
err = ufs_qcom_check_hibern8(hba);
ufs_qcom_enable_hw_clk_gating(hba);
break;
default:
dev_err(hba->dev, "%s: invalid status %d\n", __func__, status);
err = -EINVAL;
break;
}
return err;
}
/* Look for the maximum core_clk_unipro clock value */
static u32 ufs_qcom_get_core_clk_unipro_max_freq(struct ufs_hba *hba)
{
struct ufs_qcom_priv *priv = dev_get_priv(hba->dev);
ofnode node = dev_ofnode(priv->hba->dev);
struct ofnode_phandle_args opp_table;
int pos, ret;
u32 clk = 0;
/* Get core_clk_unipro clock index */
pos = ofnode_stringlist_search(node, "clock-names", "core_clk_unipro");
if (pos < 0)
goto fallback;
/* Try parsing the opps */
if (!ofnode_parse_phandle_with_args(node, "required-opps",
NULL, 0, 0, &opp_table) &&
ofnode_device_is_compatible(opp_table.node, "operating-points-v2")) {
ofnode opp_node;
ofnode_for_each_subnode(opp_node, opp_table.node) {
u64 opp_clk;
/* opp-hw contains the OPP frequency */
ret = ofnode_read_u64_index(opp_node, "opp-hz", pos, &opp_clk);
if (ret)
continue;
/* We don't handle larger clock values, ignore */
if (opp_clk > U32_MAX)
continue;
/* Only keep the largest value */
if (opp_clk > clk)
clk = opp_clk;
}
/* If we get a valid clock, return it or check legacy*/
if (clk)
return clk;
}
/* Legacy freq-table-hz has a pair of u32 per clocks entry, min then max */
if (!ofnode_read_u32_index(node, "freq-table-hz", pos * 2 + 1, &clk) &&
clk > 0)
return clk;
fallback:
/* default for backwards compatibility */
return UNIPRO_CORE_CLK_FREQ_150_MHZ * 1000 * 1000;
};
static int ufs_qcom_set_clk_40ns_cycles(struct ufs_hba *hba,
u32 cycles_in_1us)
{
struct ufs_qcom_priv *priv = dev_get_priv(hba->dev);
u32 cycles_in_40ns;
int err;
u32 reg;
/*
* UFS host controller V4.0.0 onwards needs to program
* PA_VS_CORE_CLK_40NS_CYCLES attribute per programmed
* frequency of unipro core clk of UFS host controller.
*/
if (priv->hw_ver.major < 4)
return 0;
/*
* Generic formulae for cycles_in_40ns = (freq_unipro/25) is not
* applicable for all frequencies. For ex: ceil(37.5 MHz/25) will
* be 2 and ceil(403 MHZ/25) will be 17 whereas Hardware
* specification expect to be 16. Hence use exact hardware spec
* mandated value for cycles_in_40ns instead of calculating using
* generic formulae.
*/
switch (cycles_in_1us) {
case UNIPRO_CORE_CLK_FREQ_403_MHZ:
cycles_in_40ns = 16;
break;
case UNIPRO_CORE_CLK_FREQ_300_MHZ:
cycles_in_40ns = 12;
break;
case UNIPRO_CORE_CLK_FREQ_201_5_MHZ:
cycles_in_40ns = 8;
break;
case UNIPRO_CORE_CLK_FREQ_150_MHZ:
cycles_in_40ns = 6;
break;
case UNIPRO_CORE_CLK_FREQ_100_MHZ:
cycles_in_40ns = 4;
break;
case UNIPRO_CORE_CLK_FREQ_75_MHZ:
cycles_in_40ns = 3;
break;
case UNIPRO_CORE_CLK_FREQ_37_5_MHZ:
cycles_in_40ns = 2;
break;
default:
dev_err(hba->dev, "UNIPRO clk freq %u MHz not supported\n",
cycles_in_1us);
return -EINVAL;
}
err = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_VS_CORE_CLK_40NS_CYCLES), &reg);
if (err)
return err;
reg &= ~PA_VS_CORE_CLK_40NS_CYCLES_MASK;
reg |= cycles_in_40ns;
return ufshcd_dme_set(hba, UIC_ARG_MIB(PA_VS_CORE_CLK_40NS_CYCLES), reg);
}
static int ufs_qcom_set_core_clk_ctrl(struct ufs_hba *hba)
{
struct ufs_qcom_priv *priv = dev_get_priv(hba->dev);
u32 core_clk_ctrl_reg;
u32 cycles_in_1us;
int err;
cycles_in_1us = ceil(ufs_qcom_get_core_clk_unipro_max_freq(hba),
(1000 * 1000));
err = ufshcd_dme_get(hba,
UIC_ARG_MIB(DME_VS_CORE_CLK_CTRL),
&core_clk_ctrl_reg);
if (err)
return err;
/* Bit mask is different for UFS host controller V4.0.0 onwards */
if (priv->hw_ver.major >= 4) {
core_clk_ctrl_reg &= ~CLK_1US_CYCLES_MASK_V4;
core_clk_ctrl_reg |= FIELD_PREP(CLK_1US_CYCLES_MASK_V4, cycles_in_1us);
} else {
core_clk_ctrl_reg &= ~CLK_1US_CYCLES_MASK;
core_clk_ctrl_reg |= FIELD_PREP(CLK_1US_CYCLES_MASK, cycles_in_1us);
}
/* Clear CORE_CLK_DIV_EN */
core_clk_ctrl_reg &= ~DME_VS_CORE_CLK_CTRL_CORE_CLK_DIV_EN_BIT;
err = ufshcd_dme_set(hba,
UIC_ARG_MIB(DME_VS_CORE_CLK_CTRL),
core_clk_ctrl_reg);
if (err)
return err;
/* Configure unipro core clk 40ns attribute */
return ufs_qcom_set_clk_40ns_cycles(hba, cycles_in_1us);
}
static u32 ufs_qcom_get_local_unipro_ver(struct ufs_hba *hba)
{
/* HCI version 1.0 and 1.1 supports UniPro 1.41 */
switch (hba->version) {
case UFSHCI_VERSION_10:
case UFSHCI_VERSION_11:
return UFS_UNIPRO_VER_1_41;
case UFSHCI_VERSION_20:
case UFSHCI_VERSION_21:
default:
return UFS_UNIPRO_VER_1_6;
}
}
static int ufs_qcom_link_startup_notify(struct ufs_hba *hba,
enum ufs_notify_change_status status)
{
int err = 0;
switch (status) {
case PRE_CHANGE:
err = ufs_qcom_set_core_clk_ctrl(hba);
if (err)
dev_err(hba->dev, "cfg core clk ctrl failed\n");
/*
* Some UFS devices (and may be host) have issues if LCC is
* enabled. So we are setting PA_Local_TX_LCC_Enable to 0
* before link startup which will make sure that both host
* and device TX LCC are disabled once link startup is
* completed.
*/
if (ufs_qcom_get_local_unipro_ver(hba) != UFS_UNIPRO_VER_1_41)
err = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_LOCAL_TX_LCC_ENABLE), 0);
break;
default:
break;
}
return err;
}
static void ufs_qcom_dev_ref_clk_ctrl(struct ufs_hba *hba, bool enable)
{
struct ufs_qcom_priv *priv = dev_get_priv(hba->dev);
if (enable ^ priv->is_dev_ref_clk_enabled) {
u32 temp = readl_relaxed(hba->mmio_base + REG_UFS_CFG1);
if (enable)
temp |= BIT(26);
else
temp &= ~BIT(26);
/*
* If we are here to disable this clock it might be immediately
* after entering into hibern8 in which case we need to make
* sure that device ref_clk is active for specific time after
* hibern8 enter.
*/
if (!enable)
udelay(10);
writel_relaxed(temp, hba->mmio_base + REG_UFS_CFG1);
/*
* Make sure the write to ref_clk reaches the destination and
* not stored in a Write Buffer (WB).
*/
readl(hba->mmio_base + REG_UFS_CFG1);
/*
* If we call hibern8 exit after this, we need to make sure that
* device ref_clk is stable for at least 1us before the hibern8
* exit command.
*/
if (enable)
udelay(1);
priv->is_dev_ref_clk_enabled = enable;
}
}
/**
* ufs_qcom_init - bind phy with controller
* @hba: host controller instance
*
* Powers up PHY enabling clocks and regulators.
*
* Returns -EPROBE_DEFER if binding fails, returns negative error
* on phy power up failure and returns zero on success.
*/
static int ufs_qcom_init(struct ufs_hba *hba)
{
struct ufs_qcom_priv *priv = dev_get_priv(hba->dev);
int err;
priv->hba = hba;
/* setup clocks */
ufs_qcom_setup_clocks(hba, true, PRE_CHANGE);
if (priv->hw_ver.major >= 0x4)
ufshcd_dme_set(hba,
UIC_ARG_MIB(PA_TXHSADAPTTYPE),
PA_NO_ADAPT);
ufs_qcom_setup_clocks(hba, true, POST_CHANGE);
ufs_qcom_get_controller_revision(hba, &priv->hw_ver.major,
&priv->hw_ver.minor,
&priv->hw_ver.step);
dev_info(hba->dev, "Qcom UFS HC version: %d.%d.%d\n",
priv->hw_ver.major,
priv->hw_ver.minor,
priv->hw_ver.step);
err = ufs_qcom_init_clks(priv);
if (err) {
dev_err(hba->dev, "failed to initialize clocks, err:%d\n", err);
return err;
}
ufs_qcom_advertise_quirks(hba);
ufs_qcom_setup_clocks(hba, true, POST_CHANGE);
return 0;
}
/**
* ufs_qcom_device_reset() - toggle the (optional) device reset line
* @hba: per-adapter instance
*
* Toggles the (optional) reset line to reset the attached device.
*/
static int ufs_qcom_device_reset(struct ufs_hba *hba)
{
struct ufs_qcom_priv *priv = dev_get_priv(hba->dev);
if (!dm_gpio_is_valid(&priv->reset))
return 0;
/*
* The UFS device shall detect reset pulses of 1us, sleep for 10us to
* be on the safe side.
*/
dm_gpio_set_value(&priv->reset, true);
udelay(10);
dm_gpio_set_value(&priv->reset, false);
udelay(10);
return 0;
}
static struct ufs_hba_ops ufs_qcom_hba_ops = {
.init = ufs_qcom_init,
.get_max_pwr_mode = ufs_get_max_pwr_mode,
.hce_enable_notify = ufs_qcom_hce_enable_notify,
.link_startup_notify = ufs_qcom_link_startup_notify,
.device_reset = ufs_qcom_device_reset,
};
static int ufs_qcom_probe(struct udevice *dev)
{
struct ufs_qcom_priv *priv = dev_get_priv(dev);
int ret;
/* get resets */
ret = reset_get_by_name(dev, "rst", &priv->core_reset);
if (ret) {
dev_err(dev, "failed to get reset, ret:%d\n", ret);
return ret;
}
ret = gpio_request_by_name(dev, "reset-gpios", 0, &priv->reset, GPIOD_IS_OUT);
if (ret) {
dev_err(dev, "Warning: cannot get reset GPIO\n");
}
ret = ufshcd_probe(dev, &ufs_qcom_hba_ops);
if (ret) {
dev_err(dev, "ufshcd_probe() failed, ret:%d\n", ret);
return ret;
}
return 0;
}
static int ufs_qcom_bind(struct udevice *dev)
{
struct udevice *scsi_dev;
return ufs_scsi_bind(dev, &scsi_dev);
}
static const struct udevice_id ufs_qcom_ids[] = {
{ .compatible = "qcom,ufshc" },
{},
};
U_BOOT_DRIVER(qcom_ufshcd) = {
.name = "qcom-ufshcd",
.id = UCLASS_UFS,
.of_match = ufs_qcom_ids,
.probe = ufs_qcom_probe,
.bind = ufs_qcom_bind,
.priv_auto = sizeof(struct ufs_qcom_priv),
};

147
drivers/ufs/ufs-qcom.h Normal file
View File

@ -0,0 +1,147 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
*/
#ifndef UFS_QCOM_H_
#define UFS_QCOM_H_
#include <reset.h>
#include <linux/bitfield.h>
#define MPHY_TX_FSM_STATE 0x41
#define TX_FSM_HIBERN8 0x1
#define DEFAULT_CLK_RATE_HZ 1000000
#define UFS_HW_VER_MAJOR_MASK GENMASK(31, 28)
#define UFS_HW_VER_MINOR_MASK GENMASK(27, 16)
#define UFS_HW_VER_STEP_MASK GENMASK(15, 0)
/* QCOM UFS host controller vendor specific registers */
enum {
REG_UFS_SYS1CLK_1US = 0xC0,
REG_UFS_TX_SYMBOL_CLK_NS_US = 0xC4,
REG_UFS_LOCAL_PORT_ID_REG = 0xC8,
REG_UFS_PA_ERR_CODE = 0xCC,
/* On older UFS revisions, this register is called "RETRY_TIMER_REG" */
REG_UFS_PARAM0 = 0xD0,
/* On older UFS revisions, this register is called "REG_UFS_PA_LINK_STARTUP_TIMER" */
REG_UFS_CFG0 = 0xD8,
REG_UFS_CFG1 = 0xDC,
REG_UFS_CFG2 = 0xE0,
REG_UFS_HW_VERSION = 0xE4,
UFS_TEST_BUS = 0xE8,
UFS_TEST_BUS_CTRL_0 = 0xEC,
UFS_TEST_BUS_CTRL_1 = 0xF0,
UFS_TEST_BUS_CTRL_2 = 0xF4,
UFS_UNIPRO_CFG = 0xF8,
/*
* QCOM UFS host controller vendor specific registers
* added in HW Version 3.0.0
*/
UFS_AH8_CFG = 0xFC,
REG_UFS_CFG3 = 0x271C,
};
/* bit definitions for REG_UFS_CFG0 register */
#define QUNIPRO_G4_SEL BIT(5)
/* bit definitions for REG_UFS_CFG1 register */
#define QUNIPRO_SEL BIT(0)
#define UFS_PHY_SOFT_RESET BIT(1)
#define UTP_DBG_RAMS_EN BIT(17)
#define TEST_BUS_EN BIT(18)
#define TEST_BUS_SEL GENMASK(22, 19)
#define UFS_REG_TEST_BUS_EN BIT(30)
#define UFS_PHY_RESET_ENABLE 1
#define UFS_PHY_RESET_DISABLE 0
/* bit definitions for REG_UFS_CFG2 register */
#define UAWM_HW_CGC_EN BIT(0)
#define UARM_HW_CGC_EN BIT(1)
#define TXUC_HW_CGC_EN BIT(2)
#define RXUC_HW_CGC_EN BIT(3)
#define DFC_HW_CGC_EN BIT(4)
#define TRLUT_HW_CGC_EN BIT(5)
#define TMRLUT_HW_CGC_EN BIT(6)
#define OCSC_HW_CGC_EN BIT(7)
/* bit definitions for REG_UFS_PARAM0 */
#define MAX_HS_GEAR_MASK GENMASK(6, 4)
#define UFS_QCOM_MAX_GEAR(x) FIELD_GET(MAX_HS_GEAR_MASK, (x))
/* bit definition for UFS_UFS_TEST_BUS_CTRL_n */
#define TEST_BUS_SUB_SEL_MASK GENMASK(4, 0) /* All XXX_SEL fields are 5 bits wide */
#define REG_UFS_CFG2_CGC_EN_ALL (UAWM_HW_CGC_EN | UARM_HW_CGC_EN |\
TXUC_HW_CGC_EN | RXUC_HW_CGC_EN |\
DFC_HW_CGC_EN | TRLUT_HW_CGC_EN |\
TMRLUT_HW_CGC_EN | OCSC_HW_CGC_EN)
/* bit offset */
#define OFFSET_CLK_NS_REG 0xa
/* bit masks */
#define MASK_TX_SYMBOL_CLK_1US_REG GENMASK(9, 0)
#define MASK_CLK_NS_REG GENMASK(23, 10)
/* QUniPro Vendor specific attributes */
#define PA_VS_CONFIG_REG1 0x9000
#define DME_VS_CORE_CLK_CTRL 0xD002
/* bit and mask definitions for DME_VS_CORE_CLK_CTRL attribute */
#define CLK_1US_CYCLES_MASK_V4 GENMASK(27, 16)
#define CLK_1US_CYCLES_MASK GENMASK(7, 0)
#define DME_VS_CORE_CLK_CTRL_CORE_CLK_DIV_EN_BIT BIT(8)
#define PA_VS_CORE_CLK_40NS_CYCLES 0x9007
#define PA_VS_CORE_CLK_40NS_CYCLES_MASK GENMASK(6, 0)
/* QCOM UFS host controller core clk frequencies */
#define UNIPRO_CORE_CLK_FREQ_37_5_MHZ 38
#define UNIPRO_CORE_CLK_FREQ_75_MHZ 75
#define UNIPRO_CORE_CLK_FREQ_100_MHZ 100
#define UNIPRO_CORE_CLK_FREQ_150_MHZ 150
#define UNIPRO_CORE_CLK_FREQ_300_MHZ 300
#define UNIPRO_CORE_CLK_FREQ_201_5_MHZ 202
#define UNIPRO_CORE_CLK_FREQ_403_MHZ 403
static inline void
ufs_qcom_get_controller_revision(struct ufs_hba *hba,
u8 *major, u16 *minor, u16 *step)
{
u32 ver = ufshcd_readl(hba, REG_UFS_HW_VERSION);
*major = FIELD_GET(UFS_HW_VER_MAJOR_MASK, ver);
*minor = FIELD_GET(UFS_HW_VER_MINOR_MASK, ver);
*step = FIELD_GET(UFS_HW_VER_STEP_MASK, ver);
};
/* Host controller hardware version: major.minor.step */
struct ufs_hw_version {
u16 step;
u16 minor;
u8 major;
};
struct gpio_desc;
struct ufs_qcom_priv {
struct phy *generic_phy;
struct ufs_hba *hba;
struct clk_bulk clks;
bool is_clks_enabled;
struct ufs_hw_version hw_ver;
/* Reset control of HCI */
struct reset_ctl core_reset;
struct gpio_desc reset;
bool is_dev_ref_clk_enabled;
};
#endif /* UFS_QCOM_H_ */

View File

@ -125,6 +125,11 @@ static void ufshcd_print_pwr_info(struct ufs_hba *hba)
hba->pwr_info.hs_rate);
}
static void ufshcd_device_reset(struct ufs_hba *hba)
{
ufshcd_vops_device_reset(hba);
}
/**
* ufshcd_ready_for_uic_cmd - Check if controller is ready
* to accept UIC commands
@ -432,6 +437,12 @@ static int ufshcd_make_hba_operational(struct ufs_hba *hba)
ufshcd_writel(hba, upper_32_bits((dma_addr_t)hba->utmrdl),
REG_UTP_TASK_REQ_LIST_BASE_H);
/*
* Make sure base address and interrupt setup are updated before
* enabling the run/stop registers below.
*/
wmb();
/*
* UCRDY, UTMRLDY and UTRLRDY bits must be 1
*/
@ -456,9 +467,7 @@ static int ufshcd_link_startup(struct ufs_hba *hba)
{
int ret;
int retries = DME_LINKSTARTUP_RETRIES;
bool link_startup_again = true;
link_startup:
do {
ufshcd_ops_link_startup_notify(hba, PRE_CHANGE);
@ -484,12 +493,6 @@ link_startup:
/* failed to get the link up... retire */
goto out;
if (link_startup_again) {
link_startup_again = false;
retries = DME_LINKSTARTUP_RETRIES;
goto link_startup;
}
/* Mark that link is up in PWM-G1, 1-lane, SLOW-AUTO mode */
ufshcd_init_pwr_info(hba);
@ -504,6 +507,8 @@ link_startup:
if (ret)
goto out;
/* Clear UECPA once due to LINERESET has happened during LINK_STARTUP */
ufshcd_readl(hba, REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER);
ret = ufshcd_make_hba_operational(hba);
out:
if (ret)
@ -633,7 +638,9 @@ static int ufshcd_memory_alloc(struct ufs_hba *hba)
/* Allocate one Transfer Request Descriptor
* Should be aligned to 1k boundary.
*/
hba->utrdl = memalign(1024, sizeof(struct utp_transfer_req_desc));
hba->utrdl = memalign(1024,
ALIGN(sizeof(struct utp_transfer_req_desc),
ARCH_DMA_MINALIGN));
if (!hba->utrdl) {
dev_err(hba->dev, "Transfer Descriptor memory allocation failed\n");
return -ENOMEM;
@ -642,7 +649,9 @@ static int ufshcd_memory_alloc(struct ufs_hba *hba)
/* Allocate one Command Descriptor
* Should be aligned to 1k boundary.
*/
hba->ucdl = memalign(1024, sizeof(struct utp_transfer_cmd_desc));
hba->ucdl = memalign(1024,
ALIGN(sizeof(struct utp_transfer_cmd_desc),
ARCH_DMA_MINALIGN));
if (!hba->ucdl) {
dev_err(hba->dev, "Command descriptor memory allocation failed\n");
return -ENOMEM;
@ -692,18 +701,29 @@ static inline u8 ufshcd_get_upmcrs(struct ufs_hba *hba)
}
/**
* ufshcd_cache_flush_and_invalidate - Flush and invalidate cache
* ufshcd_cache_flush - Flush cache
*
* Flush and invalidate cache in aligned address..address+size range.
* The invalidation is in place to avoid stale data in cache.
* Flush cache in aligned address..address+size range.
*/
static void ufshcd_cache_flush_and_invalidate(void *addr, unsigned long size)
static void ufshcd_cache_flush(void *addr, unsigned long size)
{
uintptr_t aaddr = (uintptr_t)addr & ~(ARCH_DMA_MINALIGN - 1);
unsigned long asize = ALIGN(size, ARCH_DMA_MINALIGN);
uintptr_t start_addr = (uintptr_t)addr & ~(ARCH_DMA_MINALIGN - 1);
uintptr_t end_addr = ALIGN((uintptr_t)addr + size, ARCH_DMA_MINALIGN);
flush_dcache_range(aaddr, aaddr + asize);
invalidate_dcache_range(aaddr, aaddr + asize);
flush_dcache_range(start_addr, end_addr);
}
/**
* ufshcd_cache_invalidate - Invalidate cache
*
* Invalidate cache in aligned address..address+size range.
*/
static void ufshcd_cache_invalidate(void *addr, unsigned long size)
{
uintptr_t start_addr = (uintptr_t)addr & ~(ARCH_DMA_MINALIGN - 1);
uintptr_t end_addr = ALIGN((uintptr_t)addr + size, ARCH_DMA_MINALIGN);
invalidate_dcache_range(start_addr, end_addr);
}
/**
@ -750,7 +770,7 @@ static void ufshcd_prepare_req_desc_hdr(struct ufs_hba *hba,
req_desc->prd_table_length = 0;
ufshcd_cache_flush_and_invalidate(req_desc, sizeof(*req_desc));
ufshcd_cache_flush(req_desc, sizeof(*req_desc));
}
static void ufshcd_prepare_utp_query_req_upiu(struct ufs_hba *hba,
@ -781,13 +801,13 @@ static void ufshcd_prepare_utp_query_req_upiu(struct ufs_hba *hba,
/* Copy the Descriptor */
if (query->request.upiu_req.opcode == UPIU_QUERY_OPCODE_WRITE_DESC) {
memcpy(ucd_req_ptr + 1, query->descriptor, len);
ufshcd_cache_flush_and_invalidate(ucd_req_ptr, 2 * sizeof(*ucd_req_ptr));
ufshcd_cache_flush(ucd_req_ptr, 2 * sizeof(*ucd_req_ptr));
} else {
ufshcd_cache_flush_and_invalidate(ucd_req_ptr, sizeof(*ucd_req_ptr));
ufshcd_cache_flush(ucd_req_ptr, sizeof(*ucd_req_ptr));
}
memset(hba->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
ufshcd_cache_flush_and_invalidate(hba->ucd_rsp_ptr, sizeof(*hba->ucd_rsp_ptr));
ufshcd_cache_flush(hba->ucd_rsp_ptr, sizeof(*hba->ucd_rsp_ptr));
}
static inline void ufshcd_prepare_utp_nop_upiu(struct ufs_hba *hba)
@ -805,8 +825,8 @@ static inline void ufshcd_prepare_utp_nop_upiu(struct ufs_hba *hba)
memset(hba->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
ufshcd_cache_flush_and_invalidate(ucd_req_ptr, sizeof(*ucd_req_ptr));
ufshcd_cache_flush_and_invalidate(hba->ucd_rsp_ptr, sizeof(*hba->ucd_rsp_ptr));
ufshcd_cache_flush(ucd_req_ptr, sizeof(*ucd_req_ptr));
ufshcd_cache_flush(hba->ucd_rsp_ptr, sizeof(*hba->ucd_rsp_ptr));
}
/**
@ -844,6 +864,9 @@ static int ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag)
ufshcd_writel(hba, 1 << task_tag, REG_UTP_TRANSFER_REQ_DOOR_BELL);
/* Make sure doorbell reg is updated before reading interrupt status */
wmb();
start = get_timer(0);
do {
intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
@ -873,6 +896,8 @@ static int ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag)
*/
static inline int ufshcd_get_req_rsp(struct utp_upiu_rsp *ucd_rsp_ptr)
{
ufshcd_cache_invalidate(ucd_rsp_ptr, sizeof(*ucd_rsp_ptr));
return be32_to_cpu(ucd_rsp_ptr->header.dword_0) >> 24;
}
@ -884,6 +909,8 @@ static inline int ufshcd_get_tr_ocs(struct ufs_hba *hba)
{
struct utp_transfer_req_desc *req_desc = hba->utrdl;
ufshcd_cache_invalidate(req_desc, sizeof(*req_desc));
return le32_to_cpu(req_desc->header.dword_2) & MASK_OCS;
}
@ -1433,8 +1460,8 @@ void ufshcd_prepare_utp_scsi_cmd_upiu(struct ufs_hba *hba,
memcpy(ucd_req_ptr->sc.cdb, pccb->cmd, cdb_len);
memset(hba->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
ufshcd_cache_flush_and_invalidate(ucd_req_ptr, sizeof(*ucd_req_ptr));
ufshcd_cache_flush_and_invalidate(hba->ucd_rsp_ptr, sizeof(*hba->ucd_rsp_ptr));
ufshcd_cache_flush(ucd_req_ptr, sizeof(*ucd_req_ptr));
ufshcd_cache_flush(hba->ucd_rsp_ptr, sizeof(*hba->ucd_rsp_ptr));
}
static inline void prepare_prdt_desc(struct ufshcd_sg_entry *entry,
@ -1449,7 +1476,6 @@ static void prepare_prdt_table(struct ufs_hba *hba, struct scsi_cmd *pccb)
{
struct utp_transfer_req_desc *req_desc = hba->utrdl;
struct ufshcd_sg_entry *prd_table = hba->ucd_prdt_ptr;
uintptr_t aaddr = (uintptr_t)(pccb->pdata) & ~(ARCH_DMA_MINALIGN - 1);
ulong datalen = pccb->datalen;
int table_length;
u8 *buf;
@ -1457,19 +1483,10 @@ static void prepare_prdt_table(struct ufs_hba *hba, struct scsi_cmd *pccb)
if (!datalen) {
req_desc->prd_table_length = 0;
ufshcd_cache_flush_and_invalidate(req_desc, sizeof(*req_desc));
ufshcd_cache_flush(req_desc, sizeof(*req_desc));
return;
}
if (pccb->dma_dir == DMA_TO_DEVICE) { /* Write to device */
flush_dcache_range(aaddr, aaddr +
ALIGN(datalen, ARCH_DMA_MINALIGN));
}
/* In any case, invalidate cache to avoid stale data in it. */
invalidate_dcache_range(aaddr, aaddr +
ALIGN(datalen, ARCH_DMA_MINALIGN));
table_length = DIV_ROUND_UP(pccb->datalen, MAX_PRDT_ENTRY);
buf = pccb->pdata;
i = table_length;
@ -1483,8 +1500,8 @@ static void prepare_prdt_table(struct ufs_hba *hba, struct scsi_cmd *pccb)
prepare_prdt_desc(&prd_table[table_length - i - 1], buf, datalen - 1);
req_desc->prd_table_length = table_length;
ufshcd_cache_flush_and_invalidate(prd_table, sizeof(*prd_table) * table_length);
ufshcd_cache_flush_and_invalidate(req_desc, sizeof(*req_desc));
ufshcd_cache_flush(prd_table, sizeof(*prd_table) * table_length);
ufshcd_cache_flush(req_desc, sizeof(*req_desc));
}
static int ufs_scsi_exec(struct udevice *scsi_dev, struct scsi_cmd *pccb)
@ -1498,8 +1515,12 @@ static int ufs_scsi_exec(struct udevice *scsi_dev, struct scsi_cmd *pccb)
ufshcd_prepare_utp_scsi_cmd_upiu(hba, pccb, upiu_flags);
prepare_prdt_table(hba, pccb);
ufshcd_cache_flush(pccb->pdata, pccb->datalen);
ufshcd_send_command(hba, TASK_TAG);
ufshcd_cache_invalidate(pccb->pdata, pccb->datalen);
ocs = ufshcd_get_tr_ocs(hba);
switch (ocs) {
case OCS_SUCCESS:
@ -1723,7 +1744,7 @@ static int ufshcd_get_max_pwr_mode(struct ufs_hba *hba)
}
hba->max_pwr_info.is_valid = true;
return 0;
return ufshcd_ops_get_max_pwr_mode(hba, &hba->max_pwr_info);
}
static int ufshcd_change_power_mode(struct ufs_hba *hba,
@ -1901,7 +1922,7 @@ int ufs_start(struct ufs_hba *hba)
return ret;
}
printf("Device at %s up at:", hba->dev->name);
debug("UFS Device %s is up!\n", hba->dev->name);
ufshcd_print_pwr_info(hba);
}
@ -1953,7 +1974,8 @@ int ufshcd_probe(struct udevice *ufs_dev, struct ufs_hba_ops *hba_ops)
hba->version != UFSHCI_VERSION_20 &&
hba->version != UFSHCI_VERSION_21 &&
hba->version != UFSHCI_VERSION_30 &&
hba->version != UFSHCI_VERSION_31)
hba->version != UFSHCI_VERSION_31 &&
hba->version != UFSHCI_VERSION_40)
dev_err(hba->dev, "invalid UFS version 0x%x\n",
hba->version);
@ -1979,6 +2001,11 @@ int ufshcd_probe(struct udevice *ufs_dev, struct ufs_hba_ops *hba_ops)
REG_INTERRUPT_STATUS);
ufshcd_writel(hba, 0, REG_INTERRUPT_ENABLE);
mb();
/* Reset the attached device */
ufshcd_device_reset(hba);
err = ufshcd_hba_enable(hba);
if (err) {
dev_err(hba->dev, "Host controller enable failed\n");

View File

@ -3,6 +3,7 @@
#define __UFS_H
#include <linux/types.h>
#include <asm/io.h>
#include "unipro.h"
struct udevice;
@ -695,11 +696,177 @@ struct ufs_dev_cmd {
struct ufs_hba_ops {
int (*init)(struct ufs_hba *hba);
int (*get_max_pwr_mode)(struct ufs_hba *hba,
struct ufs_pwr_mode_info *max_pwr_info);
int (*hce_enable_notify)(struct ufs_hba *hba,
enum ufs_notify_change_status);
int (*link_startup_notify)(struct ufs_hba *hba,
enum ufs_notify_change_status);
int (*phy_initialization)(struct ufs_hba *hba);
int (*device_reset)(struct ufs_hba *hba);
};
enum ufshcd_quirks {
/* Interrupt aggregation support is broken */
UFSHCD_QUIRK_BROKEN_INTR_AGGR = 1 << 0,
/*
* delay before each dme command is required as the unipro
* layer has shown instabilities
*/
UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS = 1 << 1,
/*
* If UFS host controller is having issue in processing LCC (Line
* Control Command) coming from device then enable this quirk.
* When this quirk is enabled, host controller driver should disable
* the LCC transmission on UFS device (by clearing TX_LCC_ENABLE
* attribute of device to 0).
*/
UFSHCD_QUIRK_BROKEN_LCC = 1 << 2,
/*
* The attribute PA_RXHSUNTERMCAP specifies whether or not the
* inbound Link supports unterminated line in HS mode. Setting this
* attribute to 1 fixes moving to HS gear.
*/
UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP = 1 << 3,
/*
* This quirk needs to be enabled if the host controller only allows
* accessing the peer dme attributes in AUTO mode (FAST AUTO or
* SLOW AUTO).
*/
UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE = 1 << 4,
/*
* This quirk needs to be enabled if the host controller doesn't
* advertise the correct version in UFS_VER register. If this quirk
* is enabled, standard UFS host driver will call the vendor specific
* ops (get_ufs_hci_version) to get the correct version.
*/
UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION = 1 << 5,
/*
* Clear handling for transfer/task request list is just opposite.
*/
UFSHCI_QUIRK_BROKEN_REQ_LIST_CLR = 1 << 6,
/*
* This quirk needs to be enabled if host controller doesn't allow
* that the interrupt aggregation timer and counter are reset by s/w.
*/
UFSHCI_QUIRK_SKIP_RESET_INTR_AGGR = 1 << 7,
/*
* This quirks needs to be enabled if host controller cannot be
* enabled via HCE register.
*/
UFSHCI_QUIRK_BROKEN_HCE = 1 << 8,
/*
* This quirk needs to be enabled if the host controller regards
* resolution of the values of PRDTO and PRDTL in UTRD as byte.
*/
UFSHCD_QUIRK_PRDT_BYTE_GRAN = 1 << 9,
/*
* This quirk needs to be enabled if the host controller reports
* OCS FATAL ERROR with device error through sense data
*/
UFSHCD_QUIRK_BROKEN_OCS_FATAL_ERROR = 1 << 10,
/*
* This quirk needs to be enabled if the host controller has
* auto-hibernate capability but it doesn't work.
*/
UFSHCD_QUIRK_BROKEN_AUTO_HIBERN8 = 1 << 11,
/*
* This quirk needs to disable manual flush for write booster
*/
UFSHCI_QUIRK_SKIP_MANUAL_WB_FLUSH_CTRL = 1 << 12,
/*
* This quirk needs to disable unipro timeout values
* before power mode change
*/
UFSHCD_QUIRK_SKIP_DEF_UNIPRO_TIMEOUT_SETTING = 1 << 13,
/*
* This quirk needs to be enabled if the host controller does not
* support UIC command
*/
UFSHCD_QUIRK_BROKEN_UIC_CMD = 1 << 15,
/*
* This quirk needs to be enabled if the host controller cannot
* support physical host configuration.
*/
UFSHCD_QUIRK_SKIP_PH_CONFIGURATION = 1 << 16,
/*
* This quirk needs to be enabled if the host controller has
* 64-bit addressing supported capability but it doesn't work.
*/
UFSHCD_QUIRK_BROKEN_64BIT_ADDRESS = 1 << 17,
/*
* This quirk needs to be enabled if the host controller has
* auto-hibernate capability but it's FASTAUTO only.
*/
UFSHCD_QUIRK_HIBERN_FASTAUTO = 1 << 18,
/*
* This quirk needs to be enabled if the host controller needs
* to reinit the device after switching to maximum gear.
*/
UFSHCD_QUIRK_REINIT_AFTER_MAX_GEAR_SWITCH = 1 << 19,
/*
* Some host raises interrupt (per queue) in addition to
* CQES (traditional) when ESI is disabled.
* Enable this quirk will disable CQES and use per queue interrupt.
*/
UFSHCD_QUIRK_MCQ_BROKEN_INTR = 1 << 20,
/*
* Some host does not implement SQ Run Time Command (SQRTC) register
* thus need this quirk to skip related flow.
*/
UFSHCD_QUIRK_MCQ_BROKEN_RTC = 1 << 21,
/*
* This quirk needs to be enabled if the host controller supports inline
* encryption but it needs to initialize the crypto capabilities in a
* nonstandard way and/or needs to override blk_crypto_ll_ops. If
* enabled, the standard code won't initialize the blk_crypto_profile;
* ufs_hba_variant_ops::init() must do it instead.
*/
UFSHCD_QUIRK_CUSTOM_CRYPTO_PROFILE = 1 << 22,
/*
* This quirk needs to be enabled if the host controller supports inline
* encryption but does not support the CRYPTO_GENERAL_ENABLE bit, i.e.
* host controller initialization fails if that bit is set.
*/
UFSHCD_QUIRK_BROKEN_CRYPTO_ENABLE = 1 << 23,
/*
* This quirk needs to be enabled if the host controller driver copies
* cryptographic keys into the PRDT in order to send them to hardware,
* and therefore the PRDT should be zeroized after each request (as per
* the standard best practice for managing keys).
*/
UFSHCD_QUIRK_KEYS_IN_PRDT = 1 << 24,
/*
* This quirk indicates that the controller reports the value 1 (not
* supported) in the Legacy Single DoorBell Support (LSDBS) bit of the
* Controller Capabilities register although it supports the legacy
* single doorbell mode.
*/
UFSHCD_QUIRK_BROKEN_LSDBS_CAP = 1 << 25,
};
struct ufs_hba {
@ -710,27 +877,7 @@ struct ufs_hba {
u32 capabilities;
u32 version;
u32 intr_mask;
u32 quirks;
/*
* If UFS host controller is having issue in processing LCC (Line
* Control Command) coming from device then enable this quirk.
* When this quirk is enabled, host controller driver should disable
* the LCC transmission on UFS device (by clearing TX_LCC_ENABLE
* attribute of device to 0).
*/
#define UFSHCD_QUIRK_BROKEN_LCC BIT(0)
/*
* This quirk needs to be enabled if the host controller has
* 64-bit addressing supported capability but it doesn't work.
*/
#define UFSHCD_QUIRK_BROKEN_64BIT_ADDRESS BIT(1)
/*
* This quirk needs to be enabled if the host controller has
* auto-hibernate capability but it's FASTAUTO only.
*/
#define UFSHCD_QUIRK_HIBERN_FASTAUTO BIT(2)
enum ufshcd_quirks quirks;
/* Virtual memory reference */
struct utp_transfer_cmd_desc *ucdl;
@ -758,6 +905,15 @@ static inline int ufshcd_ops_init(struct ufs_hba *hba)
return 0;
}
static inline int ufshcd_ops_get_max_pwr_mode(struct ufs_hba *hba,
struct ufs_pwr_mode_info *max_pwr_info)
{
if (hba->ops && hba->ops->get_max_pwr_mode)
return hba->ops->get_max_pwr_mode(hba, max_pwr_info);
return 0;
}
static inline int ufshcd_ops_hce_enable_notify(struct ufs_hba *hba,
bool status)
{
@ -776,6 +932,14 @@ static inline int ufshcd_ops_link_startup_notify(struct ufs_hba *hba,
return 0;
}
static inline int ufshcd_vops_device_reset(struct ufs_hba *hba)
{
if (hba->ops && hba->ops->device_reset)
return hba->ops->device_reset(hba);
return 0;
}
/* Controller UFSHCI version */
enum {
UFSHCI_VERSION_10 = 0x00010000, /* 1.0 */
@ -784,6 +948,7 @@ enum {
UFSHCI_VERSION_21 = 0x00000210, /* 2.1 */
UFSHCI_VERSION_30 = 0x00000300, /* 3.0 */
UFSHCI_VERSION_31 = 0x00000310, /* 3.1 */
UFSHCI_VERSION_40 = 0x00000400, /* 4.0 */
};
/* Interrupt disable masks */
@ -921,6 +1086,23 @@ enum {
#define ufshcd_readl(hba, reg) \
readl((hba)->mmio_base + (reg))
/**
* ufshcd_rmwl - perform read/modify/write for a controller register
* @hba: per adapter instance
* @mask: mask to apply on read value
* @val: actual value to write
* @reg: register address
*/
static inline void ufshcd_rmwl(struct ufs_hba *hba, u32 mask, u32 val, u32 reg)
{
u32 tmp;
tmp = ufshcd_readl(hba, reg);
tmp &= ~mask;
tmp |= (val & mask);
ufshcd_writel(hba, tmp, reg);
}
/* UTRLRSR - UTP Transfer Request Run-Stop Register 60h */
#define UTP_TRANSFER_REQ_LIST_RUN_STOP_BIT 0x1

133
drivers/ufs/ufshcd-dwc.c Normal file
View File

@ -0,0 +1,133 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* UFS Host driver for Synopsys Designware Core
*
* Copyright (C) 2015-2016 Synopsys, Inc. (www.synopsys.com)
*
*/
#include <clk.h>
#include <dm.h>
#include <ufs.h>
#include <asm/io.h>
#include <dm/device_compat.h>
#include <linux/bitops.h>
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/time.h>
#include "ufs.h"
#include "ufshci-dwc.h"
#include "ufshcd-dwc.h"
int ufshcd_dwc_dme_set_attrs(struct ufs_hba *hba,
const struct ufshcd_dme_attr_val *v, int n)
{
int ret = 0;
int attr_node = 0;
for (attr_node = 0; attr_node < n; attr_node++) {
ret = ufshcd_dme_set_attr(hba, v[attr_node].attr_sel,
ATTR_SET_NOR, v[attr_node].mib_val, v[attr_node].peer);
if (ret)
return ret;
}
return 0;
}
/**
* ufshcd_dwc_program_clk_div() - program clock divider.
* @hba: Private Structure pointer
* @divider_val: clock divider value to be programmed
*
*/
static void ufshcd_dwc_program_clk_div(struct ufs_hba *hba, u32 divider_val)
{
ufshcd_writel(hba, divider_val, DWC_UFS_REG_HCLKDIV);
}
/**
* ufshcd_dwc_link_is_up() - check if link is up.
* @hba: private structure pointer
*
* Return: 0 on success, non-zero value on failure.
*/
static int ufshcd_dwc_link_is_up(struct ufs_hba *hba)
{
int dme_result = 0;
ufshcd_dme_get(hba, UIC_ARG_MIB(VS_POWERSTATE), &dme_result);
if (dme_result == UFSHCD_LINK_IS_UP)
return 0;
return 1;
}
/**
* ufshcd_dwc_connection_setup() - configure unipro attributes.
* @hba: pointer to drivers private data
*
* This function configures both the local side (host) and the peer side
* (device) unipro attributes to establish the connection to application/
* cport.
* This function is not required if the hardware is properly configured to
* have this connection setup on reset. But invoking this function does no
* harm and should be fine even working with any ufs device.
*
* Return: 0 on success non-zero value on failure.
*/
static int ufshcd_dwc_connection_setup(struct ufs_hba *hba)
{
static const struct ufshcd_dme_attr_val setup_attrs[] = {
{ UIC_ARG_MIB(T_CONNECTIONSTATE), 0, DME_LOCAL },
{ UIC_ARG_MIB(N_DEVICEID), 0, DME_LOCAL },
{ UIC_ARG_MIB(N_DEVICEID_VALID), 0, DME_LOCAL },
{ UIC_ARG_MIB(T_PEERDEVICEID), 1, DME_LOCAL },
{ UIC_ARG_MIB(T_PEERCPORTID), 0, DME_LOCAL },
{ UIC_ARG_MIB(T_TRAFFICCLASS), 0, DME_LOCAL },
{ UIC_ARG_MIB(T_CPORTFLAGS), 0x6, DME_LOCAL },
{ UIC_ARG_MIB(T_CPORTMODE), 1, DME_LOCAL },
{ UIC_ARG_MIB(T_CONNECTIONSTATE), 1, DME_LOCAL },
{ UIC_ARG_MIB(T_CONNECTIONSTATE), 0, DME_PEER },
{ UIC_ARG_MIB(N_DEVICEID), 1, DME_PEER },
{ UIC_ARG_MIB(N_DEVICEID_VALID), 1, DME_PEER },
{ UIC_ARG_MIB(T_PEERDEVICEID), 1, DME_PEER },
{ UIC_ARG_MIB(T_PEERCPORTID), 0, DME_PEER },
{ UIC_ARG_MIB(T_TRAFFICCLASS), 0, DME_PEER },
{ UIC_ARG_MIB(T_CPORTFLAGS), 0x6, DME_PEER },
{ UIC_ARG_MIB(T_CPORTMODE), 1, DME_PEER },
{ UIC_ARG_MIB(T_CONNECTIONSTATE), 1, DME_PEER }
};
return ufshcd_dwc_dme_set_attrs(hba, setup_attrs, ARRAY_SIZE(setup_attrs));
}
/**
* ufshcd_dwc_link_startup_notify() - program clock divider.
* @hba: private structure pointer
* @status: Callback notify status
*
* Return: 0 on success, non-zero value on failure.
*/
int ufshcd_dwc_link_startup_notify(struct ufs_hba *hba,
enum ufs_notify_change_status status)
{
int err = 0;
if (status == PRE_CHANGE) {
ufshcd_dwc_program_clk_div(hba, DWC_UFS_REG_HCLKDIV_DIV_125);
} else { /* POST_CHANGE */
err = ufshcd_dwc_link_is_up(hba);
if (err) {
dev_err(hba->dev, "Link is not up\n");
return err;
}
err = ufshcd_dwc_connection_setup(hba);
if (err)
dev_err(hba->dev, "Connection setup failed (%d)\n",
err);
}
return err;
}

69
drivers/ufs/ufshcd-dwc.h Normal file
View File

@ -0,0 +1,69 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* UFS Host driver for Synopsys Designware Core
*
* Copyright (C) 2015-2016 Synopsys, Inc. (www.synopsys.com)
*
* Authors: Joao Pinto <jpinto@synopsys.com>
*/
#ifndef _UFSHCD_DWC_H
#define _UFSHCD_DWC_H
/* PHY modes */
#define UFSHCD_DWC_PHY_MODE_ROM 0
/* RMMI Attributes */
#define CBREFCLKCTRL2 0x8132
#define CBCRCTRL 0x811F
#define CBC10DIRECTCONF2 0x810E
#define CBCREGADDRLSB 0x8116
#define CBCREGADDRMSB 0x8117
#define CBCREGWRLSB 0x8118
#define CBCREGWRMSB 0x8119
#define CBCREGRDLSB 0x811A
#define CBCREGRDMSB 0x811B
#define CBCREGRDWRSEL 0x811C
#define CBREFREFCLK_GATE_OVR_EN BIT(7)
/* M-PHY Attributes */
#define MTX_FSM_STATE 0x41
#define MRX_FSM_STATE 0xC1
/* M-PHY registers */
#define FAST_FLAGS(n) (0x401C + ((n) * 0x100))
#define RX_AFE_ATT_IDAC(n) (0x4000 + ((n) * 0x100))
#define RX_AFE_CTLE_IDAC(n) (0x4001 + ((n) * 0x100))
#define FW_CALIB_CCFG(n) (0x404D + ((n) * 0x100))
/* Tx/Rx FSM state */
enum rx_fsm_state {
RX_STATE_DISABLED = 0,
RX_STATE_HIBERN8 = 1,
RX_STATE_SLEEP = 2,
RX_STATE_STALL = 3,
RX_STATE_LSBURST = 4,
RX_STATE_HSBURST = 5,
};
enum tx_fsm_state {
TX_STATE_DISABLED = 0,
TX_STATE_HIBERN8 = 1,
TX_STATE_SLEEP = 2,
TX_STATE_STALL = 3,
TX_STATE_LSBURST = 4,
TX_STATE_HSBURST = 5,
};
struct ufshcd_dme_attr_val {
u32 attr_sel;
u32 mib_val;
u8 peer;
};
int ufshcd_dwc_link_startup_notify(struct ufs_hba *hba,
enum ufs_notify_change_status status);
int ufshcd_dwc_dme_set_attrs(struct ufs_hba *hba,
const struct ufshcd_dme_attr_val *v, int n);
#endif /* End of Header */

32
drivers/ufs/ufshci-dwc.h Normal file
View File

@ -0,0 +1,32 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* UFS Host driver for Synopsys Designware Core
*
* Copyright (C) 2015-2016 Synopsys, Inc. (www.synopsys.com)
*
* Authors: Joao Pinto <jpinto@synopsys.com>
*/
#ifndef _UFSHCI_DWC_H
#define _UFSHCI_DWC_H
/* DWC HC UFSHCI specific Registers */
enum dwc_specific_registers {
DWC_UFS_REG_HCLKDIV = 0xFC,
};
/* Clock Divider Values: Hex equivalent of frequency in MHz */
enum clk_div_values {
DWC_UFS_REG_HCLKDIV_DIV_62_5 = 0x3e,
DWC_UFS_REG_HCLKDIV_DIV_125 = 0x7d,
DWC_UFS_REG_HCLKDIV_DIV_200 = 0xc8,
};
/* Selector Index */
enum selector_index {
SELIND_LN0_TX = 0x00,
SELIND_LN1_TX = 0x01,
SELIND_LN0_RX = 0x04,
SELIND_LN1_RX = 0x05,
};
#endif

View File

@ -140,6 +140,12 @@
#define PA_SLEEPNOCONFIGTIME 0x15A2
#define PA_STALLNOCONFIGTIME 0x15A3
#define PA_SAVECONFIGTIME 0x15A4
#define PA_TXHSADAPTTYPE 0x15D4
/* Adapt type for PA_TXHSADAPTTYPE attribute */
#define PA_REFRESH_ADAPT 0x00
#define PA_INITIAL_ADAPT 0x01
#define PA_NO_ADAPT 0x03
#define PA_TACTIVATE_TIME_UNIT_US 10
#define PA_HIBERN8_TIME_UNIT_US 100
@ -148,6 +154,7 @@
#define VS_MPHYCFGUPDT 0xD085
#define VS_DEBUGOMC 0xD09E
#define VS_POWERSTATE 0xD083
#define VS_MPHYDISABLE 0xD0C1
#define PA_GRANULARITY_MIN_VAL 1
#define PA_GRANULARITY_MAX_VAL 6