2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-25 05:34:00 +08:00

Merge remote-tracking branch 'mkp-scsi/4.6/scsi-queue' into misc

This commit is contained in:
James Bottomley 2016-03-15 15:35:03 -07:00
commit b86c75fda6
16 changed files with 1143 additions and 135 deletions

View File

@ -38,6 +38,9 @@ Optional properties:
defined or a value in the array is "0" then it is assumed
that the frequency is set by the parent clock or a
fixed rate clock source.
-lanes-per-direction : number of lanes available per direction - either 1 or 2.
Note that it is assume same number of lanes is used both
directions at once. If not specified, default is 2 lanes per direction.
Note: If above properties are not defined it can be assumed that the supply
regulators or clocks are always on.

View File

@ -332,7 +332,7 @@ static int alua_check_vpd(struct scsi_device *sdev, struct alua_dh_data *h,
{
int rel_port = -1, group_id;
struct alua_port_group *pg, *old_pg = NULL;
bool pg_updated;
bool pg_updated = false;
unsigned long flags;
group_id = scsi_vpd_tpg_id(sdev, &rel_port);

View File

@ -2097,7 +2097,7 @@ struct megasas_instance {
u8 UnevenSpanSupport;
u8 supportmax256vd;
u8 allow_fw_scan;
u8 pd_list_not_supported;
u16 fw_supported_vd_count;
u16 fw_supported_pd_count;

View File

@ -1838,7 +1838,7 @@ static int megasas_slave_configure(struct scsi_device *sdev)
struct megasas_instance *instance;
instance = megasas_lookup_instance(sdev->host->host_no);
if (instance->allow_fw_scan) {
if (instance->pd_list_not_supported) {
if (sdev->channel < MEGASAS_MAX_PD_CHANNELS &&
sdev->type == TYPE_DISK) {
pd_index = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) +
@ -1874,7 +1874,8 @@ static int megasas_slave_alloc(struct scsi_device *sdev)
pd_index =
(sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) +
sdev->id;
if ((instance->allow_fw_scan || instance->pd_list[pd_index].driveState ==
if ((instance->pd_list_not_supported ||
instance->pd_list[pd_index].driveState ==
MR_PD_STATE_SYSTEM)) {
goto scan_target;
}
@ -4087,7 +4088,13 @@ megasas_get_pd_list(struct megasas_instance *instance)
switch (ret) {
case DCMD_FAILED:
megaraid_sas_kill_hba(instance);
dev_info(&instance->pdev->dev, "MR_DCMD_PD_LIST_QUERY "
"failed/not supported by firmware\n");
if (instance->ctrl_context)
megaraid_sas_kill_hba(instance);
else
instance->pd_list_not_supported = 1;
break;
case DCMD_TIMEOUT:
@ -5034,7 +5041,6 @@ static int megasas_init_fw(struct megasas_instance *instance)
case PCI_DEVICE_ID_DELL_PERC5:
default:
instance->instancet = &megasas_instance_template_xscale;
instance->allow_fw_scan = 1;
break;
}

View File

@ -4,7 +4,7 @@
#define SAS_HOST_ATTRS 0
#define SAS_PHY_ATTRS 17
#define SAS_PORT_ATTRS 1
#define SAS_RPORT_ATTRS 7
#define SAS_RPORT_ATTRS 8
#define SAS_END_DEV_ATTRS 5
#define SAS_EXPANDER_ATTRS 7

View File

@ -1105,7 +1105,7 @@ static umode_t scsi_sdev_bin_attr_is_visible(struct kobject *kobj,
if (attr == &dev_attr_vpd_pg80 && !sdev->vpd_pg80)
return 0;
if (attr == &dev_attr_vpd_pg83 && sdev->vpd_pg83)
if (attr == &dev_attr_vpd_pg83 && !sdev->vpd_pg83)
return 0;
return S_IRUGO;

View File

@ -1286,6 +1286,7 @@ sas_rphy_protocol_attr(identify.target_port_protocols, target_port_protocols);
sas_rphy_simple_attr(identify.sas_address, sas_address, "0x%016llx\n",
unsigned long long);
sas_rphy_simple_attr(identify.phy_identifier, phy_identifier, "%d\n", u8);
sas_rphy_simple_attr(scsi_target_id, scsi_target_id, "%d\n", u32);
/* only need 8 bytes of data plus header (4 or 8) */
#define BUF_SIZE 64
@ -1886,6 +1887,7 @@ sas_attach_transport(struct sas_function_template *ft)
SETUP_RPORT_ATTRIBUTE(rphy_device_type);
SETUP_RPORT_ATTRIBUTE(rphy_sas_address);
SETUP_RPORT_ATTRIBUTE(rphy_phy_identifier);
SETUP_RPORT_ATTRIBUTE(rphy_scsi_target_id);
SETUP_OPTIONAL_RPORT_ATTRIBUTE(rphy_enclosure_identifier,
get_enclosure_identifier);
SETUP_OPTIONAL_RPORT_ATTRIBUTE(rphy_bay_identifier,

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2013-2015, Linux Foundation. All rights reserved.
* Copyright (c) 2013-2016, Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@ -16,8 +16,8 @@
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/phy/phy.h>
#include <linux/phy/phy-qcom-ufs.h>
#include "ufshcd.h"
#include "ufshcd-pltfrm.h"
#include "unipro.h"
@ -58,6 +58,12 @@ static void ufs_qcom_dump_regs(struct ufs_hba *hba, int offset, int len,
len * 4, false);
}
static void ufs_qcom_dump_regs_wrapper(struct ufs_hba *hba, int offset, int len,
char *prefix, void *priv)
{
ufs_qcom_dump_regs(hba, offset, len, prefix);
}
static int ufs_qcom_get_connected_tx_lanes(struct ufs_hba *hba, u32 *tx_lanes)
{
int err = 0;
@ -106,9 +112,11 @@ static void ufs_qcom_disable_lane_clks(struct ufs_qcom_host *host)
if (!host->is_lane_clks_enabled)
return;
clk_disable_unprepare(host->tx_l1_sync_clk);
if (host->hba->lanes_per_direction > 1)
clk_disable_unprepare(host->tx_l1_sync_clk);
clk_disable_unprepare(host->tx_l0_sync_clk);
clk_disable_unprepare(host->rx_l1_sync_clk);
if (host->hba->lanes_per_direction > 1)
clk_disable_unprepare(host->rx_l1_sync_clk);
clk_disable_unprepare(host->rx_l0_sync_clk);
host->is_lane_clks_enabled = false;
@ -132,21 +140,24 @@ static int ufs_qcom_enable_lane_clks(struct ufs_qcom_host *host)
if (err)
goto disable_rx_l0;
err = ufs_qcom_host_clk_enable(dev, "rx_lane1_sync_clk",
host->rx_l1_sync_clk);
if (err)
goto disable_tx_l0;
if (host->hba->lanes_per_direction > 1) {
err = ufs_qcom_host_clk_enable(dev, "rx_lane1_sync_clk",
host->rx_l1_sync_clk);
if (err)
goto disable_tx_l0;
err = ufs_qcom_host_clk_enable(dev, "tx_lane1_sync_clk",
host->tx_l1_sync_clk);
if (err)
goto disable_rx_l1;
err = ufs_qcom_host_clk_enable(dev, "tx_lane1_sync_clk",
host->tx_l1_sync_clk);
if (err)
goto disable_rx_l1;
}
host->is_lane_clks_enabled = true;
goto out;
disable_rx_l1:
clk_disable_unprepare(host->rx_l1_sync_clk);
if (host->hba->lanes_per_direction > 1)
clk_disable_unprepare(host->rx_l1_sync_clk);
disable_tx_l0:
clk_disable_unprepare(host->tx_l0_sync_clk);
disable_rx_l0:
@ -170,14 +181,16 @@ static int ufs_qcom_init_lane_clks(struct ufs_qcom_host *host)
if (err)
goto out;
err = ufs_qcom_host_clk_get(dev, "rx_lane1_sync_clk",
&host->rx_l1_sync_clk);
if (err)
goto out;
err = ufs_qcom_host_clk_get(dev, "tx_lane1_sync_clk",
&host->tx_l1_sync_clk);
/* In case of single lane per direction, don't read lane1 clocks */
if (host->hba->lanes_per_direction > 1) {
err = ufs_qcom_host_clk_get(dev, "rx_lane1_sync_clk",
&host->rx_l1_sync_clk);
if (err)
goto out;
err = ufs_qcom_host_clk_get(dev, "tx_lane1_sync_clk",
&host->tx_l1_sync_clk);
}
out:
return err;
}
@ -267,9 +280,8 @@ static int ufs_qcom_power_up_sequence(struct ufs_hba *hba)
ret = ufs_qcom_phy_calibrate_phy(phy, is_rate_B);
if (ret) {
dev_err(hba->dev,
"%s: ufs_qcom_phy_calibrate_phy()failed, ret = %d\n",
__func__, ret);
dev_err(hba->dev, "%s: ufs_qcom_phy_calibrate_phy() failed, ret = %d\n",
__func__, ret);
goto out;
}
@ -519,6 +531,18 @@ static int ufs_qcom_link_startup_notify(struct ufs_hba *hba,
err = ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(hba,
150);
/*
* Some UFS devices (and may be host) have issues if LCC is
* enabled. So we are setting PA_Local_TX_LCC_Enable to 0
* before link startup which will make sure that both host
* and device TX LCC are disabled once link startup is
* completed.
*/
if (ufshcd_get_local_unipro_ver(hba) != UFS_UNIPRO_VER_1_41)
err = ufshcd_dme_set(hba,
UIC_ARG_MIB(PA_LOCAL_TX_LCC_ENABLE),
0);
break;
case POST_CHANGE:
ufs_qcom_link_startup_post_change(hba);
@ -962,6 +986,10 @@ static int ufs_qcom_pwr_change_notify(struct ufs_hba *hba,
goto out;
}
/* enable the device ref clock before changing to HS mode */
if (!ufshcd_is_hs_mode(&hba->pwr_info) &&
ufshcd_is_hs_mode(dev_req_params))
ufs_qcom_dev_ref_clk_ctrl(host, true);
break;
case POST_CHANGE:
if (ufs_qcom_cfg_timers(hba, dev_req_params->gear_rx,
@ -989,6 +1017,11 @@ static int ufs_qcom_pwr_change_notify(struct ufs_hba *hba,
memcpy(&host->dev_req_params,
dev_req_params, sizeof(*dev_req_params));
ufs_qcom_update_bus_bw_vote(host);
/* disable the device ref clock if entered PWM mode */
if (ufshcd_is_hs_mode(&hba->pwr_info) &&
!ufshcd_is_hs_mode(dev_req_params))
ufs_qcom_dev_ref_clk_ctrl(host, false);
break;
default:
ret = -EINVAL;
@ -1090,6 +1123,9 @@ static int ufs_qcom_setup_clocks(struct ufs_hba *hba, bool on)
ufs_qcom_phy_disable_iface_clk(host->generic_phy);
goto out;
}
/* enable the device ref clock for HS mode*/
if (ufshcd_is_hs_mode(&hba->pwr_info))
ufs_qcom_dev_ref_clk_ctrl(host, true);
vote = host->bus_vote.saved_vote;
if (vote == host->bus_vote.min_bw_vote)
ufs_qcom_update_bus_bw_vote(host);
@ -1367,6 +1403,74 @@ out:
return err;
}
static void ufs_qcom_print_hw_debug_reg_all(struct ufs_hba *hba,
void *priv, void (*print_fn)(struct ufs_hba *hba,
int offset, int num_regs, char *str, void *priv))
{
u32 reg;
struct ufs_qcom_host *host;
if (unlikely(!hba)) {
pr_err("%s: hba is NULL\n", __func__);
return;
}
if (unlikely(!print_fn)) {
dev_err(hba->dev, "%s: print_fn is NULL\n", __func__);
return;
}
host = ufshcd_get_variant(hba);
if (!(host->dbg_print_en & UFS_QCOM_DBG_PRINT_REGS_EN))
return;
reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_REG_OCSC);
print_fn(hba, reg, 44, "UFS_UFS_DBG_RD_REG_OCSC ", priv);
reg = ufshcd_readl(hba, REG_UFS_CFG1);
reg |= UFS_BIT(17);
ufshcd_writel(hba, reg, REG_UFS_CFG1);
reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_EDTL_RAM);
print_fn(hba, reg, 32, "UFS_UFS_DBG_RD_EDTL_RAM ", priv);
reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_DESC_RAM);
print_fn(hba, reg, 128, "UFS_UFS_DBG_RD_DESC_RAM ", priv);
reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_PRDT_RAM);
print_fn(hba, reg, 64, "UFS_UFS_DBG_RD_PRDT_RAM ", priv);
ufshcd_writel(hba, (reg & ~UFS_BIT(17)), REG_UFS_CFG1);
reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_UAWM);
print_fn(hba, reg, 4, "UFS_DBG_RD_REG_UAWM ", priv);
reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_UARM);
print_fn(hba, reg, 4, "UFS_DBG_RD_REG_UARM ", priv);
reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_TXUC);
print_fn(hba, reg, 48, "UFS_DBG_RD_REG_TXUC ", priv);
reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_RXUC);
print_fn(hba, reg, 27, "UFS_DBG_RD_REG_RXUC ", priv);
reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_DFC);
print_fn(hba, reg, 19, "UFS_DBG_RD_REG_DFC ", priv);
reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_TRLUT);
print_fn(hba, reg, 34, "UFS_DBG_RD_REG_TRLUT ", priv);
reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_TMRLUT);
print_fn(hba, reg, 9, "UFS_DBG_RD_REG_TMRLUT ", priv);
}
static void ufs_qcom_enable_test_bus(struct ufs_qcom_host *host)
{
if (host->dbg_print_en & UFS_QCOM_DBG_PRINT_TEST_BUS_EN)
ufshcd_rmwl(host->hba, TEST_BUS_EN, TEST_BUS_EN, REG_UFS_CFG1);
else
ufshcd_rmwl(host->hba, TEST_BUS_EN, 0, REG_UFS_CFG1);
}
static void ufs_qcom_get_default_testbus_cfg(struct ufs_qcom_host *host)
{
/* provide a legal default configuration */
@ -1475,6 +1579,7 @@ int ufs_qcom_testbus_config(struct ufs_qcom_host *host)
ufshcd_rmwl(host->hba, mask,
(u32)host->testbus.select_minor << offset,
reg);
ufs_qcom_enable_test_bus(host);
ufshcd_release(host->hba);
pm_runtime_put_sync(host->hba->dev);
@ -1491,8 +1596,10 @@ static void ufs_qcom_dump_dbg_regs(struct ufs_hba *hba)
ufs_qcom_dump_regs(hba, REG_UFS_SYS1CLK_1US, 16,
"HCI Vendor Specific Registers ");
ufs_qcom_print_hw_debug_reg_all(hba, NULL, ufs_qcom_dump_regs_wrapper);
ufs_qcom_testbus_read(hba);
}
/**
* struct ufs_hba_qcom_vops - UFS QCOM specific variant operations
*
@ -1537,7 +1644,7 @@ static int ufs_qcom_probe(struct platform_device *pdev)
* ufs_qcom_remove - set driver_data of the device to NULL
* @pdev: pointer to platform device handle
*
* Always return 0
* Always returns 0
*/
static int ufs_qcom_remove(struct platform_device *pdev)
{

View File

@ -241,6 +241,15 @@ struct ufs_qcom_host {
struct ufs_qcom_testbus testbus;
};
static inline u32
ufs_qcom_get_debug_reg_offset(struct ufs_qcom_host *host, u32 reg)
{
if (host->hw_ver.major <= 0x02)
return UFS_CNTLR_2_x_x_VEN_REGS_OFFSET(reg);
return UFS_CNTLR_3_x_x_VEN_REGS_OFFSET(reg);
};
#define ufs_qcom_is_link_off(hba) ufshcd_is_link_off(hba)
#define ufs_qcom_is_link_active(hba) ufshcd_is_link_active(hba)
#define ufs_qcom_is_link_hibern8(hba) ufshcd_is_link_hibern8(hba)

View File

@ -43,6 +43,7 @@
#define GENERAL_UPIU_REQUEST_SIZE 32
#define QUERY_DESC_MAX_SIZE 255
#define QUERY_DESC_MIN_SIZE 2
#define QUERY_DESC_HDR_SIZE 2
#define QUERY_OSF_SIZE (GENERAL_UPIU_REQUEST_SIZE - \
(sizeof(struct utp_upiu_header)))
@ -195,6 +196,37 @@ enum unit_desc_param {
UNIT_DESC_PARAM_LARGE_UNIT_SIZE_M1 = 0x22,
};
/* Device descriptor parameters offsets in bytes*/
enum device_desc_param {
DEVICE_DESC_PARAM_LEN = 0x0,
DEVICE_DESC_PARAM_TYPE = 0x1,
DEVICE_DESC_PARAM_DEVICE_TYPE = 0x2,
DEVICE_DESC_PARAM_DEVICE_CLASS = 0x3,
DEVICE_DESC_PARAM_DEVICE_SUB_CLASS = 0x4,
DEVICE_DESC_PARAM_PRTCL = 0x5,
DEVICE_DESC_PARAM_NUM_LU = 0x6,
DEVICE_DESC_PARAM_NUM_WLU = 0x7,
DEVICE_DESC_PARAM_BOOT_ENBL = 0x8,
DEVICE_DESC_PARAM_DESC_ACCSS_ENBL = 0x9,
DEVICE_DESC_PARAM_INIT_PWR_MODE = 0xA,
DEVICE_DESC_PARAM_HIGH_PR_LUN = 0xB,
DEVICE_DESC_PARAM_SEC_RMV_TYPE = 0xC,
DEVICE_DESC_PARAM_SEC_LU = 0xD,
DEVICE_DESC_PARAM_BKOP_TERM_LT = 0xE,
DEVICE_DESC_PARAM_ACTVE_ICC_LVL = 0xF,
DEVICE_DESC_PARAM_SPEC_VER = 0x10,
DEVICE_DESC_PARAM_MANF_DATE = 0x12,
DEVICE_DESC_PARAM_MANF_NAME = 0x14,
DEVICE_DESC_PARAM_PRDCT_NAME = 0x15,
DEVICE_DESC_PARAM_SN = 0x16,
DEVICE_DESC_PARAM_OEM_ID = 0x17,
DEVICE_DESC_PARAM_MANF_ID = 0x18,
DEVICE_DESC_PARAM_UD_OFFSET = 0x1A,
DEVICE_DESC_PARAM_UD_LEN = 0x1B,
DEVICE_DESC_PARAM_RTT_CAP = 0x1C,
DEVICE_DESC_PARAM_FRQ_RTC = 0x1D,
};
/*
* Logical Unit Write Protect
* 00h: LU not write protected
@ -469,6 +501,7 @@ struct ufs_vreg {
struct regulator *reg;
const char *name;
bool enabled;
bool unused;
int min_uV;
int max_uV;
int min_uA;

View File

@ -0,0 +1,151 @@
/*
* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#ifndef _UFS_QUIRKS_H_
#define _UFS_QUIRKS_H_
/* return true if s1 is a prefix of s2 */
#define STR_PRFX_EQUAL(s1, s2) !strncmp(s1, s2, strlen(s1))
#define UFS_ANY_VENDOR 0xFFFF
#define UFS_ANY_MODEL "ANY_MODEL"
#define MAX_MODEL_LEN 16
#define UFS_VENDOR_TOSHIBA 0x198
#define UFS_VENDOR_SAMSUNG 0x1CE
/**
* ufs_device_info - ufs device details
* @wmanufacturerid: card details
* @model: card model
*/
struct ufs_device_info {
u16 wmanufacturerid;
char model[MAX_MODEL_LEN + 1];
};
/**
* ufs_dev_fix - ufs device quirk info
* @card: ufs card details
* @quirk: device quirk
*/
struct ufs_dev_fix {
struct ufs_device_info card;
unsigned int quirk;
};
#define END_FIX { { 0 }, 0 }
/* add specific device quirk */
#define UFS_FIX(_vendor, _model, _quirk) \
{ \
.card.wmanufacturerid = (_vendor),\
.card.model = (_model), \
.quirk = (_quirk), \
}
/*
* If UFS device is having issue in processing LCC (Line Control
* Command) coming from UFS host controller then enable this quirk.
* When this quirk is enabled, host controller driver should disable
* the LCC transmission on UFS host controller (by clearing
* TX_LCC_ENABLE attribute of host to 0).
*/
#define UFS_DEVICE_QUIRK_BROKEN_LCC (1 << 0)
/*
* Some UFS devices don't need VCCQ rail for device operations. Enabling this
* quirk for such devices will make sure that VCCQ rail is not voted.
*/
#define UFS_DEVICE_NO_VCCQ (1 << 1)
/*
* Some vendor's UFS device sends back to back NACs for the DL data frames
* causing the host controller to raise the DFES error status. Sometimes
* such UFS devices send back to back NAC without waiting for new
* retransmitted DL frame from the host and in such cases it might be possible
* the Host UniPro goes into bad state without raising the DFES error
* interrupt. If this happens then all the pending commands would timeout
* only after respective SW command (which is generally too large).
*
* We can workaround such device behaviour like this:
* - As soon as SW sees the DL NAC error, it should schedule the error handler
* - Error handler would sleep for 50ms to see if there are any fatal errors
* raised by UFS controller.
* - If there are fatal errors then SW does normal error recovery.
* - If there are no fatal errors then SW sends the NOP command to device
* to check if link is alive.
* - If NOP command times out, SW does normal error recovery
* - If NOP command succeed, skip the error handling.
*
* If DL NAC error is seen multiple times with some vendor's UFS devices then
* enable this quirk to initiate quick error recovery and also silence related
* error logs to reduce spamming of kernel logs.
*/
#define UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS (1 << 2)
/*
* Some UFS devices may not work properly after resume if the link was kept
* in off state during suspend. Enabling this quirk will not allow the
* link to be kept in off state during suspend.
*/
#define UFS_DEVICE_QUIRK_NO_LINK_OFF (1 << 3)
/*
* Few Toshiba UFS device models advertise RX_MIN_ACTIVATETIME_CAPABILITY as
* 600us which may not be enough for reliable hibern8 exit hardware sequence
* from UFS device.
* To workaround this issue, host should set its PA_TACTIVATE time to 1ms even
* if device advertises RX_MIN_ACTIVATETIME_CAPABILITY less than 1ms.
*/
#define UFS_DEVICE_QUIRK_PA_TACTIVATE (1 << 4)
/*
* Some UFS memory devices may have really low read/write throughput in
* FAST AUTO mode, enable this quirk to make sure that FAST AUTO mode is
* never enabled for such devices.
*/
#define UFS_DEVICE_NO_FASTAUTO (1 << 5)
/*
* It seems some UFS devices may keep drawing more than sleep current
* (atleast for 500us) from UFS rails (especially from VCCQ rail).
* To avoid this situation, add 2ms delay before putting these UFS
* rails in LPM mode.
*/
#define UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM (1 << 6)
struct ufs_hba;
void ufs_advertise_fixup_device(struct ufs_hba *hba);
static struct ufs_dev_fix ufs_fixups[] = {
/* UFS cards deviations table */
UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM),
UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL, UFS_DEVICE_NO_VCCQ),
UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS),
UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
UFS_DEVICE_NO_FASTAUTO),
UFS_FIX(UFS_VENDOR_TOSHIBA, UFS_ANY_MODEL,
UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM),
UFS_FIX(UFS_VENDOR_TOSHIBA, "THGLF2G9C8KBADG",
UFS_DEVICE_QUIRK_PA_TACTIVATE),
UFS_FIX(UFS_VENDOR_TOSHIBA, "THGLF2G9D8KBADG",
UFS_DEVICE_QUIRK_PA_TACTIVATE),
END_FIX
};
#endif /* UFS_QUIRKS_H_ */

View File

@ -40,6 +40,8 @@
#include "ufshcd.h"
#include "ufshcd-pltfrm.h"
#define UFSHCD_DEFAULT_LANES_PER_DIRECTION 2
static int ufshcd_parse_clock_info(struct ufs_hba *hba)
{
int ret = 0;
@ -277,6 +279,21 @@ void ufshcd_pltfrm_shutdown(struct platform_device *pdev)
}
EXPORT_SYMBOL_GPL(ufshcd_pltfrm_shutdown);
static void ufshcd_init_lanes_per_dir(struct ufs_hba *hba)
{
struct device *dev = hba->dev;
int ret;
ret = of_property_read_u32(dev->of_node, "lanes-per-direction",
&hba->lanes_per_direction);
if (ret) {
dev_dbg(hba->dev,
"%s: failed to read lanes-per-direction, ret=%d\n",
__func__, ret);
hba->lanes_per_direction = UFSHCD_DEFAULT_LANES_PER_DIRECTION;
}
}
/**
* ufshcd_pltfrm_init - probe routine of the driver
* @pdev: pointer to Platform device handle
@ -331,6 +348,8 @@ int ufshcd_pltfrm_init(struct platform_device *pdev,
pm_runtime_set_active(&pdev->dev);
pm_runtime_enable(&pdev->dev);
ufshcd_init_lanes_per_dir(hba);
err = ufshcd_init(hba, mmio_base, irq);
if (err) {
dev_err(dev, "Initialization failed\n");

File diff suppressed because it is too large Load Diff

View File

@ -54,6 +54,7 @@
#include <linux/clk.h>
#include <linux/completion.h>
#include <linux/regulator/consumer.h>
#include "unipro.h"
#include <asm/irq.h>
#include <asm/byteorder.h>
@ -383,6 +384,9 @@ struct ufs_init_prefetch {
* @clk_list_head: UFS host controller clocks list node head
* @pwr_info: holds current power mode
* @max_pwr_info: keeps the device max valid pwm
* @urgent_bkops_lvl: keeps track of urgent bkops level for device
* @is_urgent_bkops_lvl_checked: keeps track if the urgent bkops level for
* device is known or not.
*/
struct ufs_hba {
void __iomem *mmio_base;
@ -470,6 +474,9 @@ struct ufs_hba {
unsigned int quirks; /* Deviations from standard UFSHCI spec. */
/* Device deviations from standard UFS device spec. */
unsigned int dev_quirks;
wait_queue_head_t tm_wq;
wait_queue_head_t tm_tag_wq;
unsigned long tm_condition;
@ -509,6 +516,8 @@ struct ufs_hba {
bool wlun_dev_clr_ua;
/* Number of lanes available (1 or 2) for Rx/Tx */
u32 lanes_per_direction;
struct ufs_pa_layer_attr pwr_info;
struct ufs_pwr_mode_info max_pwr_info;
@ -533,6 +542,9 @@ struct ufs_hba {
struct devfreq *devfreq;
struct ufs_clk_scaling clk_scaling;
bool is_sys_suspended;
enum bkops_status urgent_bkops_lvl;
bool is_urgent_bkops_lvl_checked;
};
/* Returns true if clocks can be gated. Otherwise false */
@ -588,15 +600,9 @@ int ufshcd_alloc_host(struct device *, struct ufs_hba **);
void ufshcd_dealloc_host(struct ufs_hba *);
int ufshcd_init(struct ufs_hba * , void __iomem * , unsigned int);
void ufshcd_remove(struct ufs_hba *);
/**
* ufshcd_hba_stop - Send controller to reset state
* @hba: per adapter instance
*/
static inline void ufshcd_hba_stop(struct ufs_hba *hba)
{
ufshcd_writel(hba, CONTROLLER_DISABLE, REG_CONTROLLER_ENABLE);
}
int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask,
u32 val, unsigned long interval_us,
unsigned long timeout_ms, bool can_sleep);
static inline void check_upiu_size(void)
{
@ -682,11 +688,27 @@ static inline int ufshcd_dme_peer_get(struct ufs_hba *hba,
return ufshcd_dme_get_attr(hba, attr_sel, mib_val, DME_PEER);
}
int ufshcd_read_device_desc(struct ufs_hba *hba, u8 *buf, u32 size);
static inline bool ufshcd_is_hs_mode(struct ufs_pa_layer_attr *pwr_info)
{
return (pwr_info->pwr_rx == FAST_MODE ||
pwr_info->pwr_rx == FASTAUTO_MODE) &&
(pwr_info->pwr_tx == FAST_MODE ||
pwr_info->pwr_tx == FASTAUTO_MODE);
}
#define ASCII_STD true
int ufshcd_read_string_desc(struct ufs_hba *hba, int desc_index, u8 *buf,
u32 size, bool ascii);
/* Expose Query-Request API */
int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
enum flag_idn idn, bool *flag_res);
int ufshcd_hold(struct ufs_hba *hba, bool async);
void ufshcd_release(struct ufs_hba *hba);
u32 ufshcd_get_local_unipro_ver(struct ufs_hba *hba);
/* Wrapper functions for safely calling variant operations */
static inline const char *ufshcd_get_var_name(struct ufs_hba *hba)

View File

@ -92,6 +92,7 @@ enum {
UFSHCI_VERSION_10 = 0x00010000, /* 1.0 */
UFSHCI_VERSION_11 = 0x00010100, /* 1.1 */
UFSHCI_VERSION_20 = 0x00000200, /* 2.0 */
UFSHCI_VERSION_21 = 0x00000210, /* 2.1 */
};
/*
@ -170,6 +171,8 @@ enum {
#define UIC_DATA_LINK_LAYER_ERROR UFS_BIT(31)
#define UIC_DATA_LINK_LAYER_ERROR_CODE_MASK 0x7FFF
#define UIC_DATA_LINK_LAYER_ERROR_PA_INIT 0x2000
#define UIC_DATA_LINK_LAYER_ERROR_NAC_RECEIVED 0x0001
#define UIC_DATA_LINK_LAYER_ERROR_TCx_REPLAY_TIMEOUT 0x0002
/* UECN - Host UIC Error Code Network Layer 40h */
#define UIC_NETWORK_LAYER_ERROR UFS_BIT(31)
@ -209,6 +212,7 @@ enum {
/* GenSelectorIndex calculation macros for M-PHY attributes */
#define UIC_ARG_MPHY_TX_GEN_SEL_INDEX(lane) (lane)
#define UIC_ARG_MPHY_RX_GEN_SEL_INDEX(lane) (PA_MAXDATALANES + (lane))
#define UIC_ARG_MIB_SEL(attr, sel) ((((attr) & 0xFFFF) << 16) |\
((sel) & 0xFFFF))

View File

@ -15,6 +15,7 @@
/*
* M-TX Configuration Attributes
*/
#define TX_HIBERN8TIME_CAPABILITY 0x000F
#define TX_MODE 0x0021
#define TX_HSRATE_SERIES 0x0022
#define TX_HSGEAR 0x0023
@ -48,8 +49,12 @@
#define RX_ENTER_HIBERN8 0x00A7
#define RX_BYPASS_8B10B_ENABLE 0x00A8
#define RX_TERMINATION_FORCE_ENABLE 0x0089
#define RX_MIN_ACTIVATETIME_CAPABILITY 0x008F
#define RX_HIBERN8TIME_CAPABILITY 0x0092
#define is_mphy_tx_attr(attr) (attr < RX_MODE)
#define RX_MIN_ACTIVATETIME_UNIT_US 100
#define HIBERN8TIME_UNIT_US 100
/*
* PHY Adpater attributes
*/
@ -70,6 +75,7 @@
#define PA_MAXRXSPEEDFAST 0x1541
#define PA_MAXRXSPEEDSLOW 0x1542
#define PA_TXLINKSTARTUPHS 0x1544
#define PA_LOCAL_TX_LCC_ENABLE 0x155E
#define PA_TXSPEEDFAST 0x1565
#define PA_TXSPEEDSLOW 0x1566
#define PA_REMOTEVERINFO 0x15A0
@ -110,6 +116,12 @@
#define PA_STALLNOCONFIGTIME 0x15A3
#define PA_SAVECONFIGTIME 0x15A4
#define PA_TACTIVATE_TIME_UNIT_US 10
#define PA_HIBERN8_TIME_UNIT_US 100
/* PHY Adapter Protocol Constants */
#define PA_MAXDATALANES 4
/* PA power modes */
enum {
FAST_MODE = 1,
@ -143,6 +155,16 @@ enum ufs_hs_gear_tag {
UFS_HS_G3, /* HS Gear 3 */
};
enum ufs_unipro_ver {
UFS_UNIPRO_VER_RESERVED = 0,
UFS_UNIPRO_VER_1_40 = 1, /* UniPro version 1.40 */
UFS_UNIPRO_VER_1_41 = 2, /* UniPro version 1.41 */
UFS_UNIPRO_VER_1_6 = 3, /* UniPro version 1.6 */
UFS_UNIPRO_VER_MAX = 4, /* UniPro unsupported version */
/* UniPro version field mask in PA_LOCALVERINFO */
UFS_UNIPRO_VER_MASK = 0xF,
};
/*
* Data Link Layer Attributes
*/