I deprecate here -8.ucode since -9 has been published long ago.

Along with that I have a new activity, we have now better
 infrastructure for firmware debugging. This will allow to
 have configurable probes insides the firmware.
 Luca continues his work on NetDetect, this feature is now
 complete. All the rest is minor fixes here and there.
 -----BEGIN PGP SIGNATURE-----
 Version: GnuPG v1
 
 iQIcBAABAgAGBQJUffZ1AAoJEC0Llv5uNjIBk/wP/3YYPkkEk7nWwNtClChHnWgL
 7iGIqACxskGFu49Y8cfdpLv0/msJv6EhUPxhi+t8IApFNkvaVunc7F3nXQA07nyf
 XcF4/TLtqaxrKZSNuBZa2ipPiyWQtpJEPpxS6L+9ElS9MnD8lNhGIRb4T5Lsm73R
 sfvx00xJxzknyMcJx5YUasWvWq9QfjbxzzK4G+FwM4bmYoe7vwILv+sylRyEo+ED
 iS4axCJhrq/sE7qEqQa2GhCTO6gap2ecyisxByY/ycwoCCf7iQo5P5fmb4AoNBAf
 7mLAYjpoPLi7ZqNxcapwVZe0GBuZWnhncc+PQ8WXO4iLnGfYoXWr9RlnWVjZ/tAo
 hRmAiQS0lO5esC68Re34+3sDMf7TfQub1LZfXCds0SLD2PrAV8kei5tdGGmdLlxl
 8hqNFftkYCO80vkWypsJU0QGBsQcTWV9VRLQ2pgBbpCv80LA0HOaR82my0vsBx5s
 zEaqN9YbLefiqNS/zh09aqa4HR+SfNg6Dtek1Tt/sM4jUfDbFuGwCzNNZYIU1G4I
 vqxBdNtaWMjVhjOh+aqX4VmiDuU0TgOwyxqbuZAdyR/VBwP9wnNgdRFkOXjBuseB
 wasXYf0KBm+4zgCHdQL1OKsgSahW02Yl8Z8oln82dPG37td9rSV9cX+A2Szx4Uzd
 r3XHXYsz05aHeNGMQA2c
 =rVs8
 -----END PGP SIGNATURE-----

Merge tag 'iwlwifi-next-for-john-2014-12-02' of git://git.kernel.org/pub/scm/linux/kernel/git/iwlwifi/iwlwifi-next

"Grumbach, Emmanuel" <emmanuel.grumbach@intel.com> says:

"I deprecate here -8.ucode since -9 has been published long ago.
Along with that I have a new activity, we have now better
infrastructure for firmware debugging. This will allow to
have configurable probes insides the firmware.
Luca continues his work on NetDetect, this feature is now
complete. All the rest is minor fixes here and there."

Signed-off-by: John W. Linville <linville@tuxdriver.com>
This commit is contained in:
John W. Linville 2014-12-02 14:50:31 -05:00
commit ea37511701
26 changed files with 1253 additions and 466 deletions

View File

@ -966,21 +966,21 @@ struct iwl_rem_sta_cmd {
/* WiFi queues mask */
#define IWL_SCD_BK_MSK cpu_to_le32(BIT(0))
#define IWL_SCD_BE_MSK cpu_to_le32(BIT(1))
#define IWL_SCD_VI_MSK cpu_to_le32(BIT(2))
#define IWL_SCD_VO_MSK cpu_to_le32(BIT(3))
#define IWL_SCD_MGMT_MSK cpu_to_le32(BIT(3))
#define IWL_SCD_BK_MSK BIT(0)
#define IWL_SCD_BE_MSK BIT(1)
#define IWL_SCD_VI_MSK BIT(2)
#define IWL_SCD_VO_MSK BIT(3)
#define IWL_SCD_MGMT_MSK BIT(3)
/* PAN queues mask */
#define IWL_PAN_SCD_BK_MSK cpu_to_le32(BIT(4))
#define IWL_PAN_SCD_BE_MSK cpu_to_le32(BIT(5))
#define IWL_PAN_SCD_VI_MSK cpu_to_le32(BIT(6))
#define IWL_PAN_SCD_VO_MSK cpu_to_le32(BIT(7))
#define IWL_PAN_SCD_MGMT_MSK cpu_to_le32(BIT(7))
#define IWL_PAN_SCD_MULTICAST_MSK cpu_to_le32(BIT(8))
#define IWL_PAN_SCD_BK_MSK BIT(4)
#define IWL_PAN_SCD_BE_MSK BIT(5)
#define IWL_PAN_SCD_VI_MSK BIT(6)
#define IWL_PAN_SCD_VO_MSK BIT(7)
#define IWL_PAN_SCD_MGMT_MSK BIT(7)
#define IWL_PAN_SCD_MULTICAST_MSK BIT(8)
#define IWL_AGG_TX_QUEUE_MSK cpu_to_le32(0xffc00)
#define IWL_AGG_TX_QUEUE_MSK 0xffc00
#define IWL_DROP_ALL BIT(1)
@ -1005,12 +1005,17 @@ struct iwl_rem_sta_cmd {
* 1: Dump multiple MSDU according to PS, INVALID STA, TTL, TID disable.
* 2: Dump all FIFO
*/
struct iwl_txfifo_flush_cmd {
struct iwl_txfifo_flush_cmd_v3 {
__le32 queue_control;
__le16 flush_control;
__le16 reserved;
} __packed;
struct iwl_txfifo_flush_cmd_v2 {
__le16 queue_control;
__le16 flush_control;
} __packed;
/*
* REPLY_WEP_KEY = 0x20
*/

View File

@ -137,37 +137,38 @@ int iwlagn_manage_ibss_station(struct iwl_priv *priv,
*/
int iwlagn_txfifo_flush(struct iwl_priv *priv, u32 scd_q_msk)
{
struct iwl_txfifo_flush_cmd flush_cmd;
struct iwl_host_cmd cmd = {
.id = REPLY_TXFIFO_FLUSH,
.len = { sizeof(struct iwl_txfifo_flush_cmd), },
.data = { &flush_cmd, },
struct iwl_txfifo_flush_cmd_v3 flush_cmd_v3 = {
.flush_control = cpu_to_le16(IWL_DROP_ALL),
};
struct iwl_txfifo_flush_cmd_v2 flush_cmd_v2 = {
.flush_control = cpu_to_le16(IWL_DROP_ALL),
};
memset(&flush_cmd, 0, sizeof(flush_cmd));
u32 queue_control = IWL_SCD_VO_MSK | IWL_SCD_VI_MSK |
IWL_SCD_BE_MSK | IWL_SCD_BK_MSK | IWL_SCD_MGMT_MSK;
flush_cmd.queue_control = IWL_SCD_VO_MSK | IWL_SCD_VI_MSK |
IWL_SCD_BE_MSK | IWL_SCD_BK_MSK |
IWL_SCD_MGMT_MSK;
if ((priv->valid_contexts != BIT(IWL_RXON_CTX_BSS)))
flush_cmd.queue_control |= IWL_PAN_SCD_VO_MSK |
IWL_PAN_SCD_VI_MSK |
IWL_PAN_SCD_BE_MSK |
IWL_PAN_SCD_BK_MSK |
IWL_PAN_SCD_MGMT_MSK |
IWL_PAN_SCD_MULTICAST_MSK;
queue_control |= IWL_PAN_SCD_VO_MSK | IWL_PAN_SCD_VI_MSK |
IWL_PAN_SCD_BE_MSK | IWL_PAN_SCD_BK_MSK |
IWL_PAN_SCD_MGMT_MSK |
IWL_PAN_SCD_MULTICAST_MSK;
if (priv->nvm_data->sku_cap_11n_enable)
flush_cmd.queue_control |= IWL_AGG_TX_QUEUE_MSK;
queue_control |= IWL_AGG_TX_QUEUE_MSK;
if (scd_q_msk)
flush_cmd.queue_control = cpu_to_le32(scd_q_msk);
queue_control = scd_q_msk;
IWL_DEBUG_INFO(priv, "queue control: 0x%x\n",
flush_cmd.queue_control);
flush_cmd.flush_control = cpu_to_le16(IWL_DROP_ALL);
IWL_DEBUG_INFO(priv, "queue control: 0x%x\n", queue_control);
flush_cmd_v3.queue_control = cpu_to_le32(queue_control);
flush_cmd_v2.queue_control = cpu_to_le16((u16)queue_control);
return iwl_dvm_send_cmd(priv, &cmd);
if (IWL_UCODE_API(priv->fw->ucode_ver) > 2)
return iwl_dvm_send_cmd_pdu(priv, REPLY_TXFIFO_FLUSH, 0,
sizeof(flush_cmd_v3),
&flush_cmd_v3);
return iwl_dvm_send_cmd_pdu(priv, REPLY_TXFIFO_FLUSH, 0,
sizeof(flush_cmd_v2), &flush_cmd_v2);
}
void iwlagn_dev_txfifo_flush(struct iwl_priv *priv)

View File

@ -73,12 +73,12 @@
#define IWL3160_UCODE_API_MAX 10
/* Oldest version we won't warn about */
#define IWL7260_UCODE_API_OK 9
#define IWL3160_UCODE_API_OK 9
#define IWL7260_UCODE_API_OK 10
#define IWL3160_UCODE_API_OK 10
/* Lowest firmware API version supported */
#define IWL7260_UCODE_API_MIN 8
#define IWL3160_UCODE_API_MIN 8
#define IWL7260_UCODE_API_MIN 9
#define IWL3160_UCODE_API_MIN 9
/* NVM versions */
#define IWL7260_NVM_VERSION 0x0a1d
@ -89,6 +89,8 @@
#define IWL3165_TX_POWER_VERSION 0xffff /* meaningless */
#define IWL7265_NVM_VERSION 0x0a1d
#define IWL7265_TX_POWER_VERSION 0xffff /* meaningless */
#define IWL7265D_NVM_VERSION 0x0c11
#define IWL7265_TX_POWER_VERSION 0xffff /* meaningless */
#define IWL7260_FW_PRE "iwlwifi-7260-"
#define IWL7260_MODULE_FIRMWARE(api) IWL7260_FW_PRE __stringify(api) ".ucode"
@ -275,7 +277,7 @@ const struct iwl_cfg iwl7265d_2ac_cfg = {
.fw_name_pre = IWL7265D_FW_PRE,
IWL_DEVICE_7000,
.ht_params = &iwl7265_ht_params,
.nvm_ver = IWL7265_NVM_VERSION,
.nvm_ver = IWL7265D_NVM_VERSION,
.nvm_calib_ver = IWL7265_TX_POWER_VERSION,
.pwr_tx_backoffs = iwl7265_pwr_tx_backoffs,
};
@ -285,7 +287,7 @@ const struct iwl_cfg iwl7265d_2n_cfg = {
.fw_name_pre = IWL7265D_FW_PRE,
IWL_DEVICE_7000,
.ht_params = &iwl7265_ht_params,
.nvm_ver = IWL7265_NVM_VERSION,
.nvm_ver = IWL7265D_NVM_VERSION,
.nvm_calib_ver = IWL7265_TX_POWER_VERSION,
.pwr_tx_backoffs = iwl7265_pwr_tx_backoffs,
};
@ -295,7 +297,7 @@ const struct iwl_cfg iwl7265d_n_cfg = {
.fw_name_pre = IWL7265D_FW_PRE,
IWL_DEVICE_7000,
.ht_params = &iwl7265_ht_params,
.nvm_ver = IWL7265_NVM_VERSION,
.nvm_ver = IWL7265D_NVM_VERSION,
.nvm_calib_ver = IWL7265_TX_POWER_VERSION,
.pwr_tx_backoffs = iwl7265_pwr_tx_backoffs,
};

View File

@ -72,10 +72,10 @@
#define IWL8000_UCODE_API_MAX 10
/* Oldest version we won't warn about */
#define IWL8000_UCODE_API_OK 8
#define IWL8000_UCODE_API_OK 10
/* Lowest firmware API version supported */
#define IWL8000_UCODE_API_MIN 8
#define IWL8000_UCODE_API_MIN 9
/* NVM versions */
#define IWL8000_NVM_VERSION 0x0a1d
@ -159,8 +159,8 @@ const struct iwl_cfg iwl8260_2ac_sdio_cfg = {
.max_vht_ampdu_exponent = MAX_VHT_AMPDU_EXPONENT_8260_SDIO,
};
const struct iwl_cfg iwl4265_2ac_sdio_cfg = {
.name = "Intel(R) Dual Band Wireless-AC 4265",
const struct iwl_cfg iwl4165_2ac_sdio_cfg = {
.name = "Intel(R) Dual Band Wireless-AC 4165",
.fw_name_pre = IWL8000_FW_PRE,
IWL_DEVICE_8000,
.ht_params = &iwl8000_ht_params,

View File

@ -371,6 +371,7 @@ extern const struct iwl_cfg iwl8260_2n_cfg;
extern const struct iwl_cfg iwl8260_2ac_cfg;
extern const struct iwl_cfg iwl8260_2ac_sdio_cfg;
extern const struct iwl_cfg iwl4265_2ac_sdio_cfg;
extern const struct iwl_cfg iwl4165_2ac_sdio_cfg;
#endif /* CONFIG_IWLMVM */
#endif /* __IWL_CONFIG_H__ */

View File

@ -78,9 +78,6 @@
#include "iwl-config.h"
#include "iwl-modparams.h"
/* private includes */
#include "iwl-fw-file.h"
/******************************************************************************
*
* module boiler plate
@ -187,6 +184,11 @@ static void iwl_free_fw_img(struct iwl_drv *drv, struct fw_img *img)
static void iwl_dealloc_ucode(struct iwl_drv *drv)
{
int i;
kfree(drv->fw.dbg_dest_tlv);
for (i = 0; i < ARRAY_SIZE(drv->fw.dbg_conf_tlv); i++)
kfree(drv->fw.dbg_conf_tlv[i]);
for (i = 0; i < IWL_UCODE_TYPE_MAX; i++)
iwl_free_fw_img(drv, drv->fw.img + i);
}
@ -248,6 +250,9 @@ static int iwl_request_firmware(struct iwl_drv *drv, bool first)
/*
* Starting 8000B - FW name format has changed. This overwrites the
* previous name and uses the new format.
*
* TODO:
* Once there is only one supported step for 8000 family - delete this!
*/
if (drv->trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) {
char rev_step[2] = {
@ -258,6 +263,13 @@ static int iwl_request_firmware(struct iwl_drv *drv, bool first)
if (CSR_HW_REV_STEP(drv->trans->hw_rev) == SILICON_A_STEP)
rev_step[0] = 0;
/*
* If hw_rev wasn't set yet - default as B-step. If it IS A-step
* we'll reload that FW later instead.
*/
if (drv->trans->hw_rev == 0)
rev_step[0] = 'B';
snprintf(drv->firmware_name, sizeof(drv->firmware_name),
"%s%s-%s.ucode", name_pre, rev_step, tag);
}
@ -301,6 +313,11 @@ struct iwl_firmware_pieces {
u32 init_evtlog_ptr, init_evtlog_size, init_errlog_ptr;
u32 inst_evtlog_ptr, inst_evtlog_size, inst_errlog_ptr;
/* FW debug data parsed for driver usage */
struct iwl_fw_dbg_dest_tlv *dbg_dest_tlv;
struct iwl_fw_dbg_conf_tlv *dbg_conf_tlv[FW_DBG_MAX];
size_t dbg_conf_tlv_len[FW_DBG_MAX];
};
/*
@ -574,6 +591,8 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
char buildstr[25];
u32 build;
int num_of_cpus;
bool usniffer_images = false;
bool usniffer_req = false;
if (len < sizeof(*ucode)) {
IWL_ERR(drv, "uCode has invalid length: %zd\n", len);
@ -846,12 +865,79 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
capa->n_scan_channels =
le32_to_cpup((__le32 *)tlv_data);
break;
case IWL_UCODE_TLV_FW_DBG_DEST: {
struct iwl_fw_dbg_dest_tlv *dest = (void *)tlv_data;
if (pieces->dbg_dest_tlv) {
IWL_ERR(drv,
"dbg destination ignored, already exists\n");
break;
}
pieces->dbg_dest_tlv = dest;
IWL_INFO(drv, "Found debug destination: %s\n",
get_fw_dbg_mode_string(dest->monitor_mode));
drv->fw.dbg_dest_reg_num =
tlv_len - offsetof(struct iwl_fw_dbg_dest_tlv,
reg_ops);
drv->fw.dbg_dest_reg_num /=
sizeof(drv->fw.dbg_dest_tlv->reg_ops[0]);
break;
}
case IWL_UCODE_TLV_FW_DBG_CONF: {
struct iwl_fw_dbg_conf_tlv *conf = (void *)tlv_data;
if (!pieces->dbg_dest_tlv) {
IWL_ERR(drv,
"Ignore dbg config %d - no destination configured\n",
conf->id);
break;
}
if (conf->id >= ARRAY_SIZE(drv->fw.dbg_conf_tlv)) {
IWL_ERR(drv,
"Skip unknown configuration: %d\n",
conf->id);
break;
}
if (pieces->dbg_conf_tlv[conf->id]) {
IWL_ERR(drv,
"Ignore duplicate dbg config %d\n",
conf->id);
break;
}
if (conf->usniffer)
usniffer_req = true;
IWL_INFO(drv, "Found debug configuration: %d\n",
conf->id);
pieces->dbg_conf_tlv[conf->id] = conf;
pieces->dbg_conf_tlv_len[conf->id] = tlv_len;
break;
}
case IWL_UCODE_TLV_SEC_RT_USNIFFER:
usniffer_images = true;
iwl_store_ucode_sec(pieces, tlv_data,
IWL_UCODE_REGULAR_USNIFFER,
tlv_len);
break;
default:
IWL_DEBUG_INFO(drv, "unknown TLV: %d\n", tlv_type);
break;
}
}
if (usniffer_req && !usniffer_images) {
IWL_ERR(drv,
"user selected to work with usniffer but usniffer image isn't available in ucode package\n");
return -EINVAL;
}
if (len) {
IWL_ERR(drv, "invalid TLV after parsing: %zd\n", len);
iwl_print_hex_dump(drv, IWL_DL_FW, (u8 *)data, len);
@ -989,13 +1075,14 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
struct iwl_ucode_header *ucode;
struct iwlwifi_opmode_table *op;
int err;
struct iwl_firmware_pieces pieces;
struct iwl_firmware_pieces *pieces;
const unsigned int api_max = drv->cfg->ucode_api_max;
unsigned int api_ok = drv->cfg->ucode_api_ok;
const unsigned int api_min = drv->cfg->ucode_api_min;
u32 api_ver;
int i;
bool load_module = false;
u32 hw_rev = drv->trans->hw_rev;
fw->ucode_capa.max_probe_length = IWL_DEFAULT_MAX_PROBE_LENGTH;
fw->ucode_capa.standard_phy_calibration_size =
@ -1005,7 +1092,9 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
if (!api_ok)
api_ok = api_max;
memset(&pieces, 0, sizeof(pieces));
pieces = kzalloc(sizeof(*pieces), GFP_KERNEL);
if (!pieces)
return;
if (!ucode_raw) {
if (drv->fw_index <= api_ok)
@ -1028,10 +1117,10 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
ucode = (struct iwl_ucode_header *)ucode_raw->data;
if (ucode->ver)
err = iwl_parse_v1_v2_firmware(drv, ucode_raw, &pieces);
err = iwl_parse_v1_v2_firmware(drv, ucode_raw, pieces);
else
err = iwl_parse_tlv_firmware(drv, ucode_raw, &pieces,
&fw->ucode_capa);
err = iwl_parse_tlv_firmware(drv, ucode_raw, pieces,
&fw->ucode_capa);
if (err)
goto try_again;
@ -1071,7 +1160,7 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
* In mvm uCode there is no difference between data and instructions
* sections.
*/
if (!fw->mvm_fw && validate_sec_sizes(drv, &pieces, drv->cfg))
if (!fw->mvm_fw && validate_sec_sizes(drv, pieces, drv->cfg))
goto try_again;
/* Allocate ucode buffers for card's bus-master loading ... */
@ -1080,9 +1169,33 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
* 1) unmodified from disk
* 2) backup cache for save/restore during power-downs */
for (i = 0; i < IWL_UCODE_TYPE_MAX; i++)
if (iwl_alloc_ucode(drv, &pieces, i))
if (iwl_alloc_ucode(drv, pieces, i))
goto out_free_fw;
if (pieces->dbg_dest_tlv) {
drv->fw.dbg_dest_tlv =
kmemdup(pieces->dbg_dest_tlv,
sizeof(*pieces->dbg_dest_tlv) +
sizeof(pieces->dbg_dest_tlv->reg_ops[0]) *
drv->fw.dbg_dest_reg_num, GFP_KERNEL);
if (!drv->fw.dbg_dest_tlv)
goto out_free_fw;
}
for (i = 0; i < ARRAY_SIZE(drv->fw.dbg_conf_tlv); i++) {
if (pieces->dbg_conf_tlv[i]) {
drv->fw.dbg_conf_tlv_len[i] =
pieces->dbg_conf_tlv_len[i];
drv->fw.dbg_conf_tlv[i] =
kmemdup(pieces->dbg_conf_tlv[i],
drv->fw.dbg_conf_tlv_len[i],
GFP_KERNEL);
if (!drv->fw.dbg_conf_tlv[i])
goto out_free_fw;
}
}
/* Now that we can no longer fail, copy information */
/*
@ -1090,20 +1203,20 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
* for each event, which is of mode 1 (including timestamp) for all
* new microcodes that include this information.
*/
fw->init_evtlog_ptr = pieces.init_evtlog_ptr;
if (pieces.init_evtlog_size)
fw->init_evtlog_size = (pieces.init_evtlog_size - 16)/12;
fw->init_evtlog_ptr = pieces->init_evtlog_ptr;
if (pieces->init_evtlog_size)
fw->init_evtlog_size = (pieces->init_evtlog_size - 16)/12;
else
fw->init_evtlog_size =
drv->cfg->base_params->max_event_log_size;
fw->init_errlog_ptr = pieces.init_errlog_ptr;
fw->inst_evtlog_ptr = pieces.inst_evtlog_ptr;
if (pieces.inst_evtlog_size)
fw->inst_evtlog_size = (pieces.inst_evtlog_size - 16)/12;
fw->init_errlog_ptr = pieces->init_errlog_ptr;
fw->inst_evtlog_ptr = pieces->inst_evtlog_ptr;
if (pieces->inst_evtlog_size)
fw->inst_evtlog_size = (pieces->inst_evtlog_size - 16)/12;
else
fw->inst_evtlog_size =
drv->cfg->base_params->max_event_log_size;
fw->inst_errlog_ptr = pieces.inst_errlog_ptr;
fw->inst_errlog_ptr = pieces->inst_errlog_ptr;
/*
* figure out the offset of chain noise reset and gain commands
@ -1162,10 +1275,55 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
op->name, err);
#endif
}
/*
* We may have loaded the wrong FW file in 8000 HW family if it is an
* A-step card, and if drv->trans->hw_rev wasn't properly read when
* the FW file had been loaded. (This might happen in SDIO.) In such a
* case - unload and reload the correct file.
*
* TODO:
* Once there is only one supported step for 8000 family - delete this!
*/
if (drv->trans->cfg->device_family == IWL_DEVICE_FAMILY_8000 &&
CSR_HW_REV_STEP(drv->trans->hw_rev) == SILICON_A_STEP &&
drv->trans->hw_rev != hw_rev) {
char firmware_name[32];
/* Free previous FW resources */
if (drv->op_mode)
_iwl_op_mode_stop(drv);
iwl_dealloc_ucode(drv);
/* Build name of correct-step FW */
snprintf(firmware_name, sizeof(firmware_name),
strrchr(drv->firmware_name, '-'));
snprintf(drv->firmware_name, sizeof(drv->firmware_name),
"%s%s", drv->cfg->fw_name_pre, firmware_name);
/* Clear data before loading correct FW */
list_del(&drv->list);
/* Request correct FW file this time */
IWL_DEBUG_INFO(drv, "attempting to load A-step FW %s\n",
drv->firmware_name);
err = request_firmware(&ucode_raw, drv->firmware_name,
drv->trans->dev);
if (err) {
IWL_ERR(drv, "Failed swapping FW!\n");
goto out_unbind;
}
/* Redo callback function - this time with right FW */
iwl_req_fw_callback(ucode_raw, context);
}
kfree(pieces);
return;
try_again:
/* try next, if any */
kfree(pieces);
release_firmware(ucode_raw);
if (iwl_request_firmware(drv, false))
goto out_unbind;
@ -1176,6 +1334,7 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
iwl_dealloc_ucode(drv);
release_firmware(ucode_raw);
out_unbind:
kfree(pieces);
complete(&drv->request_firmware_complete);
device_release_driver(drv->trans->dev);
}

View File

@ -81,6 +81,7 @@
* @IWL_FW_ERROR_DUMP_FW_MONITOR: firmware monitor
* @IWL_FW_ERROR_DUMP_PRPH: range of periphery registers - there can be several
* sections like this in a single file.
* @IWL_FW_ERROR_DUMP_FH_REGS: range of FH registers
*/
enum iwl_fw_error_dump_type {
IWL_FW_ERROR_DUMP_SRAM = 0,
@ -90,6 +91,8 @@ enum iwl_fw_error_dump_type {
IWL_FW_ERROR_DUMP_DEV_FW_INFO = 4,
IWL_FW_ERROR_DUMP_FW_MONITOR = 5,
IWL_FW_ERROR_DUMP_PRPH = 6,
IWL_FW_ERROR_DUMP_TXF = 7,
IWL_FW_ERROR_DUMP_FH_REGS = 8,
IWL_FW_ERROR_DUMP_MAX,
};

View File

@ -131,6 +131,9 @@ enum iwl_ucode_tlv_type {
IWL_UCODE_TLV_API_CHANGES_SET = 29,
IWL_UCODE_TLV_ENABLED_CAPABILITIES = 30,
IWL_UCODE_TLV_N_SCAN_CHANNELS = 31,
IWL_UCODE_TLV_SEC_RT_USNIFFER = 34,
IWL_UCODE_TLV_FW_DBG_DEST = 38,
IWL_UCODE_TLV_FW_DBG_CONF = 39,
};
struct iwl_ucode_tlv {
@ -179,4 +182,309 @@ struct iwl_ucode_capa {
__le32 api_capa;
} __packed;
/**
* enum iwl_ucode_tlv_flag - ucode API flags
* @IWL_UCODE_TLV_FLAGS_PAN: This is PAN capable microcode; this previously
* was a separate TLV but moved here to save space.
* @IWL_UCODE_TLV_FLAGS_NEWSCAN: new uCode scan behaviour on hidden SSID,
* treats good CRC threshold as a boolean
* @IWL_UCODE_TLV_FLAGS_MFP: This uCode image supports MFP (802.11w).
* @IWL_UCODE_TLV_FLAGS_P2P: This uCode image supports P2P.
* @IWL_UCODE_TLV_FLAGS_DW_BC_TABLE: The SCD byte count table is in DWORDS
* @IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT: This uCode image supports uAPSD
* @IWL_UCODE_TLV_FLAGS_SHORT_BL: 16 entries of black list instead of 64 in scan
* offload profile config command.
* @IWL_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS: D3 image supports up to six
* (rather than two) IPv6 addresses
* @IWL_UCODE_TLV_FLAGS_NO_BASIC_SSID: not sending a probe with the SSID element
* from the probe request template.
* @IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL: new NS offload (small version)
* @IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_LARGE: new NS offload (large version)
* @IWL_UCODE_TLV_FLAGS_P2P_PM: P2P client supports PM as a stand alone MAC
* @IWL_UCODE_TLV_FLAGS_P2P_BSS_PS_DCM: support power save on BSS station and
* P2P client interfaces simultaneously if they are in different bindings.
* @IWL_UCODE_TLV_FLAGS_P2P_BSS_PS_SCM: support power save on BSS station and
* P2P client interfaces simultaneously if they are in same bindings.
* @IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT: General support for uAPSD
* @IWL_UCODE_TLV_FLAGS_P2P_PS_UAPSD: P2P client supports uAPSD power save
* @IWL_UCODE_TLV_FLAGS_BCAST_FILTERING: uCode supports broadcast filtering.
* @IWL_UCODE_TLV_FLAGS_GO_UAPSD: AP/GO interfaces support uAPSD clients
* @IWL_UCODE_TLV_FLAGS_EBS_SUPPORT: this uCode image supports EBS.
*/
enum iwl_ucode_tlv_flag {
IWL_UCODE_TLV_FLAGS_PAN = BIT(0),
IWL_UCODE_TLV_FLAGS_NEWSCAN = BIT(1),
IWL_UCODE_TLV_FLAGS_MFP = BIT(2),
IWL_UCODE_TLV_FLAGS_P2P = BIT(3),
IWL_UCODE_TLV_FLAGS_DW_BC_TABLE = BIT(4),
IWL_UCODE_TLV_FLAGS_SHORT_BL = BIT(7),
IWL_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS = BIT(10),
IWL_UCODE_TLV_FLAGS_NO_BASIC_SSID = BIT(12),
IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL = BIT(15),
IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_LARGE = BIT(16),
IWL_UCODE_TLV_FLAGS_P2P_PM = BIT(21),
IWL_UCODE_TLV_FLAGS_BSS_P2P_PS_DCM = BIT(22),
IWL_UCODE_TLV_FLAGS_BSS_P2P_PS_SCM = BIT(23),
IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT = BIT(24),
IWL_UCODE_TLV_FLAGS_EBS_SUPPORT = BIT(25),
IWL_UCODE_TLV_FLAGS_P2P_PS_UAPSD = BIT(26),
IWL_UCODE_TLV_FLAGS_BCAST_FILTERING = BIT(29),
IWL_UCODE_TLV_FLAGS_GO_UAPSD = BIT(30),
};
/**
* enum iwl_ucode_tlv_api - ucode api
* @IWL_UCODE_TLV_API_WOWLAN_CONFIG_TID: wowlan config includes tid field.
* @IWL_UCODE_TLV_CAPA_EXTENDED_BEACON: Support Extended beacon notification
* @IWL_UCODE_TLV_API_BT_COEX_SPLIT: new API for BT Coex
* @IWL_UCODE_TLV_API_CSA_FLOW: ucode can do unbind-bind flow for CSA.
* @IWL_UCODE_TLV_API_DISABLE_STA_TX: ucode supports tx_disable bit.
* @IWL_UCODE_TLV_API_LMAC_SCAN: This ucode uses LMAC unified scan API.
* @IWL_UCODE_TLV_API_SF_NO_DUMMY_NOTIF: ucode supports disabling dummy notif.
* @IWL_UCODE_TLV_API_FRAGMENTED_SCAN: This ucode supports active dwell time
* longer than the passive one, which is essential for fragmented scan.
*/
enum iwl_ucode_tlv_api {
IWL_UCODE_TLV_API_WOWLAN_CONFIG_TID = BIT(0),
IWL_UCODE_TLV_CAPA_EXTENDED_BEACON = BIT(1),
IWL_UCODE_TLV_API_BT_COEX_SPLIT = BIT(3),
IWL_UCODE_TLV_API_CSA_FLOW = BIT(4),
IWL_UCODE_TLV_API_DISABLE_STA_TX = BIT(5),
IWL_UCODE_TLV_API_LMAC_SCAN = BIT(6),
IWL_UCODE_TLV_API_SF_NO_DUMMY_NOTIF = BIT(7),
IWL_UCODE_TLV_API_FRAGMENTED_SCAN = BIT(8),
};
/**
* enum iwl_ucode_tlv_capa - ucode capabilities
* @IWL_UCODE_TLV_CAPA_D0I3_SUPPORT: supports D0i3
* @IWL_UCODE_TLV_CAPA_LAR_SUPPORT: supports Location Aware Regulatory
* @IWL_UCODE_TLV_CAPA_UMAC_SCAN: supports UMAC scan.
* @IWL_UCODE_TLV_CAPA_TDLS_SUPPORT: support basic TDLS functionality
* @IWL_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT: supports insertion of current
* tx power value into TPC Report action frame and Link Measurement Report
* action frame
* @IWL_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT: supports updating current
* channel in DS parameter set element in probe requests.
* @IWL_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT: supports adding TPC Report IE in
* probe requests.
* @IWL_UCODE_TLV_CAPA_QUIET_PERIOD_SUPPORT: supports Quiet Period requests
* @IWL_UCODE_TLV_CAPA_DQA_SUPPORT: supports dynamic queue allocation (DQA),
* which also implies support for the scheduler configuration command
* @IWL_UCODE_TLV_CAPA_TDLS_CHANNEL_SWITCH: supports TDLS channel switching
* @IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT: supports Hot Spot Command
*/
enum iwl_ucode_tlv_capa {
IWL_UCODE_TLV_CAPA_D0I3_SUPPORT = BIT(0),
IWL_UCODE_TLV_CAPA_LAR_SUPPORT = BIT(1),
IWL_UCODE_TLV_CAPA_UMAC_SCAN = BIT(2),
IWL_UCODE_TLV_CAPA_TDLS_SUPPORT = BIT(6),
IWL_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT = BIT(8),
IWL_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT = BIT(9),
IWL_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT = BIT(10),
IWL_UCODE_TLV_CAPA_QUIET_PERIOD_SUPPORT = BIT(11),
IWL_UCODE_TLV_CAPA_DQA_SUPPORT = BIT(12),
IWL_UCODE_TLV_CAPA_TDLS_CHANNEL_SWITCH = BIT(13),
IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT = BIT(18),
};
/* The default calibrate table size if not specified by firmware file */
#define IWL_DEFAULT_STANDARD_PHY_CALIBRATE_TBL_SIZE 18
#define IWL_MAX_STANDARD_PHY_CALIBRATE_TBL_SIZE 19
#define IWL_MAX_PHY_CALIBRATE_TBL_SIZE 253
/* The default max probe length if not specified by the firmware file */
#define IWL_DEFAULT_MAX_PROBE_LENGTH 200
/*
* For 16.0 uCode and above, there is no differentiation between sections,
* just an offset to the HW address.
*/
#define IWL_UCODE_SECTION_MAX 12
#define IWL_API_ARRAY_SIZE 1
#define IWL_CAPABILITIES_ARRAY_SIZE 1
#define CPU1_CPU2_SEPARATOR_SECTION 0xFFFFCCCC
/* uCode version contains 4 values: Major/Minor/API/Serial */
#define IWL_UCODE_MAJOR(ver) (((ver) & 0xFF000000) >> 24)
#define IWL_UCODE_MINOR(ver) (((ver) & 0x00FF0000) >> 16)
#define IWL_UCODE_API(ver) (((ver) & 0x0000FF00) >> 8)
#define IWL_UCODE_SERIAL(ver) ((ver) & 0x000000FF)
/*
* Calibration control struct.
* Sent as part of the phy configuration command.
* @flow_trigger: bitmap for which calibrations to perform according to
* flow triggers.
* @event_trigger: bitmap for which calibrations to perform according to
* event triggers.
*/
struct iwl_tlv_calib_ctrl {
__le32 flow_trigger;
__le32 event_trigger;
} __packed;
enum iwl_fw_phy_cfg {
FW_PHY_CFG_RADIO_TYPE_POS = 0,
FW_PHY_CFG_RADIO_TYPE = 0x3 << FW_PHY_CFG_RADIO_TYPE_POS,
FW_PHY_CFG_RADIO_STEP_POS = 2,
FW_PHY_CFG_RADIO_STEP = 0x3 << FW_PHY_CFG_RADIO_STEP_POS,
FW_PHY_CFG_RADIO_DASH_POS = 4,
FW_PHY_CFG_RADIO_DASH = 0x3 << FW_PHY_CFG_RADIO_DASH_POS,
FW_PHY_CFG_TX_CHAIN_POS = 16,
FW_PHY_CFG_TX_CHAIN = 0xf << FW_PHY_CFG_TX_CHAIN_POS,
FW_PHY_CFG_RX_CHAIN_POS = 20,
FW_PHY_CFG_RX_CHAIN = 0xf << FW_PHY_CFG_RX_CHAIN_POS,
};
#define IWL_UCODE_MAX_CS 1
/**
* struct iwl_fw_cipher_scheme - a cipher scheme supported by FW.
* @cipher: a cipher suite selector
* @flags: cipher scheme flags (currently reserved for a future use)
* @hdr_len: a size of MPDU security header
* @pn_len: a size of PN
* @pn_off: an offset of pn from the beginning of the security header
* @key_idx_off: an offset of key index byte in the security header
* @key_idx_mask: a bit mask of key_idx bits
* @key_idx_shift: bit shift needed to get key_idx
* @mic_len: mic length in bytes
* @hw_cipher: a HW cipher index used in host commands
*/
struct iwl_fw_cipher_scheme {
__le32 cipher;
u8 flags;
u8 hdr_len;
u8 pn_len;
u8 pn_off;
u8 key_idx_off;
u8 key_idx_mask;
u8 key_idx_shift;
u8 mic_len;
u8 hw_cipher;
} __packed;
enum iwl_fw_dbg_reg_operator {
CSR_ASSIGN,
CSR_SETBIT,
CSR_CLEARBIT,
PRPH_ASSIGN,
PRPH_SETBIT,
PRPH_CLEARBIT,
};
/**
* struct iwl_fw_dbg_reg_op - an operation on a register
*
* @op: %enum iwl_fw_dbg_reg_operator
* @addr: offset of the register
* @val: value
*/
struct iwl_fw_dbg_reg_op {
u8 op;
u8 reserved[3];
__le32 addr;
__le32 val;
} __packed;
/**
* enum iwl_fw_dbg_monitor_mode - available monitor recording modes
*
* @SMEM_MODE: monitor stores the data in SMEM
* @EXTERNAL_MODE: monitor stores the data in allocated DRAM
* @MARBH_MODE: monitor stores the data in MARBH buffer
*/
enum iwl_fw_dbg_monitor_mode {
SMEM_MODE = 0,
EXTERNAL_MODE = 1,
MARBH_MODE = 2,
};
/**
* struct iwl_fw_dbg_dest_tlv - configures the destination of the debug data
*
* @version: version of the TLV - currently 0
* @monitor_mode: %enum iwl_fw_dbg_monitor_mode
* @base_reg: addr of the base addr register (PRPH)
* @end_reg: addr of the end addr register (PRPH)
* @write_ptr_reg: the addr of the reg of the write pointer
* @wrap_count: the addr of the reg of the wrap_count
* @base_shift: shift right of the base addr reg
* @end_shift: shift right of the end addr reg
* @reg_ops: array of registers operations
*
* This parses IWL_UCODE_TLV_FW_DBG_DEST
*/
struct iwl_fw_dbg_dest_tlv {
u8 version;
u8 monitor_mode;
u8 reserved[2];
__le32 base_reg;
__le32 end_reg;
__le32 write_ptr_reg;
__le32 wrap_count;
u8 base_shift;
u8 end_shift;
struct iwl_fw_dbg_reg_op reg_ops[0];
} __packed;
struct iwl_fw_dbg_conf_hcmd {
u8 id;
u8 reserved;
__le16 len;
u8 data[0];
} __packed;
/**
* struct iwl_fw_dbg_trigger - a TLV that describes a debug configuration
*
* @enabled: is this trigger enabled
* @reserved:
* @len: length, in bytes, of the %trigger field
* @trigger: pointer to a trigger struct
*/
struct iwl_fw_dbg_trigger {
u8 enabled;
u8 reserved;
u8 len;
u8 trigger[0];
} __packed;
/**
* enum iwl_fw_dbg_conf - configurations available
*
* @FW_DBG_CUSTOM: take this configuration from alive
* Note that the trigger is NO-OP for this configuration
*/
enum iwl_fw_dbg_conf {
FW_DBG_CUSTOM = 0,
/* must be last */
FW_DBG_MAX,
FW_DBG_INVALID = 0xff,
};
/**
* struct iwl_fw_dbg_conf_tlv - a TLV that describes a debug configuration
*
* @id: %enum iwl_fw_dbg_conf
* @usniffer: should the uSniffer image be used
* @num_of_hcmds: how many HCMDs to send are present here
* @hcmd: a variable length host command to be sent to apply the configuration.
* If there is more than one HCMD to send, they will appear one after the
* other and be sent in the order that they appear in.
* This parses IWL_UCODE_TLV_FW_DBG_CONF
*/
struct iwl_fw_dbg_conf_tlv {
u8 id;
u8 usniffer;
u8 reserved;
u8 num_of_hcmds;
struct iwl_fw_dbg_conf_hcmd hcmd;
/* struct iwl_fw_dbg_trigger sits after all variable length hcmds */
} __packed;
#endif /* __iwl_fw_file_h__ */

View File

@ -69,116 +69,6 @@
#include "iwl-fw-file.h"
/**
* enum iwl_ucode_tlv_flag - ucode API flags
* @IWL_UCODE_TLV_FLAGS_PAN: This is PAN capable microcode; this previously
* was a separate TLV but moved here to save space.
* @IWL_UCODE_TLV_FLAGS_NEWSCAN: new uCode scan behaviour on hidden SSID,
* treats good CRC threshold as a boolean
* @IWL_UCODE_TLV_FLAGS_MFP: This uCode image supports MFP (802.11w).
* @IWL_UCODE_TLV_FLAGS_P2P: This uCode image supports P2P.
* @IWL_UCODE_TLV_FLAGS_DW_BC_TABLE: The SCD byte count table is in DWORDS
* @IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT: This uCode image supports uAPSD
* @IWL_UCODE_TLV_FLAGS_SHORT_BL: 16 entries of black list instead of 64 in scan
* offload profile config command.
* @IWL_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS: D3 image supports up to six
* (rather than two) IPv6 addresses
* @IWL_UCODE_TLV_FLAGS_NO_BASIC_SSID: not sending a probe with the SSID element
* from the probe request template.
* @IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL: new NS offload (small version)
* @IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_LARGE: new NS offload (large version)
* @IWL_UCODE_TLV_FLAGS_P2P_PM: P2P client supports PM as a stand alone MAC
* @IWL_UCODE_TLV_FLAGS_P2P_BSS_PS_DCM: support power save on BSS station and
* P2P client interfaces simultaneously if they are in different bindings.
* @IWL_UCODE_TLV_FLAGS_P2P_BSS_PS_SCM: support power save on BSS station and
* P2P client interfaces simultaneously if they are in same bindings.
* @IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT: General support for uAPSD
* @IWL_UCODE_TLV_FLAGS_P2P_PS_UAPSD: P2P client supports uAPSD power save
* @IWL_UCODE_TLV_FLAGS_BCAST_FILTERING: uCode supports broadcast filtering.
* @IWL_UCODE_TLV_FLAGS_GO_UAPSD: AP/GO interfaces support uAPSD clients
* @IWL_UCODE_TLV_FLAGS_EBS_SUPPORT: this uCode image supports EBS.
*/
enum iwl_ucode_tlv_flag {
IWL_UCODE_TLV_FLAGS_PAN = BIT(0),
IWL_UCODE_TLV_FLAGS_NEWSCAN = BIT(1),
IWL_UCODE_TLV_FLAGS_MFP = BIT(2),
IWL_UCODE_TLV_FLAGS_P2P = BIT(3),
IWL_UCODE_TLV_FLAGS_DW_BC_TABLE = BIT(4),
IWL_UCODE_TLV_FLAGS_SHORT_BL = BIT(7),
IWL_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS = BIT(10),
IWL_UCODE_TLV_FLAGS_NO_BASIC_SSID = BIT(12),
IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL = BIT(15),
IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_LARGE = BIT(16),
IWL_UCODE_TLV_FLAGS_P2P_PM = BIT(21),
IWL_UCODE_TLV_FLAGS_BSS_P2P_PS_DCM = BIT(22),
IWL_UCODE_TLV_FLAGS_BSS_P2P_PS_SCM = BIT(23),
IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT = BIT(24),
IWL_UCODE_TLV_FLAGS_EBS_SUPPORT = BIT(25),
IWL_UCODE_TLV_FLAGS_P2P_PS_UAPSD = BIT(26),
IWL_UCODE_TLV_FLAGS_BCAST_FILTERING = BIT(29),
IWL_UCODE_TLV_FLAGS_GO_UAPSD = BIT(30),
};
/**
* enum iwl_ucode_tlv_api - ucode api
* @IWL_UCODE_TLV_API_WOWLAN_CONFIG_TID: wowlan config includes tid field.
* @IWL_UCODE_TLV_CAPA_EXTENDED_BEACON: Support Extended beacon notification
* @IWL_UCODE_TLV_API_BT_COEX_SPLIT: new API for BT Coex
* @IWL_UCODE_TLV_API_CSA_FLOW: ucode can do unbind-bind flow for CSA.
* @IWL_UCODE_TLV_API_DISABLE_STA_TX: ucode supports tx_disable bit.
* @IWL_UCODE_TLV_API_LMAC_SCAN: This ucode uses LMAC unified scan API.
* @IWL_UCODE_TLV_API_SF_NO_DUMMY_NOTIF: ucode supports disabling dummy notif.
* @IWL_UCODE_TLV_API_FRAGMENTED_SCAN: This ucode supports active dwell time
* longer than the passive one, which is essential for fragmented scan.
*/
enum iwl_ucode_tlv_api {
IWL_UCODE_TLV_API_WOWLAN_CONFIG_TID = BIT(0),
IWL_UCODE_TLV_CAPA_EXTENDED_BEACON = BIT(1),
IWL_UCODE_TLV_API_BT_COEX_SPLIT = BIT(3),
IWL_UCODE_TLV_API_CSA_FLOW = BIT(4),
IWL_UCODE_TLV_API_DISABLE_STA_TX = BIT(5),
IWL_UCODE_TLV_API_LMAC_SCAN = BIT(6),
IWL_UCODE_TLV_API_SF_NO_DUMMY_NOTIF = BIT(7),
IWL_UCODE_TLV_API_FRAGMENTED_SCAN = BIT(8),
};
/**
* enum iwl_ucode_tlv_capa - ucode capabilities
* @IWL_UCODE_TLV_CAPA_D0I3_SUPPORT: supports D0i3
* @IWL_UCODE_TLV_CAPA_UMAC_SCAN: supports UMAC scan.
* @IWL_UCODE_TLV_CAPA_TDLS_SUPPORT: support basic TDLS functionality
* @IWL_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT: supports insertion of current
* tx power value into TPC Report action frame and Link Measurement Report
* action frame
* @IWL_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT: supports updating current
* channel in DS parameter set element in probe requests.
* @IWL_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT: supports adding TPC Report IE in
* probe requests.
* @IWL_UCODE_TLV_CAPA_QUIET_PERIOD_SUPPORT: supports Quiet Period requests
* @IWL_UCODE_TLV_CAPA_DQA_SUPPORT: supports dynamic queue allocation (DQA),
* which also implies support for the scheduler configuration command
* @IWL_UCODE_TLV_CAPA_TDLS_CHANNEL_SWITCH: supports TDLS channel switching
*/
enum iwl_ucode_tlv_capa {
IWL_UCODE_TLV_CAPA_D0I3_SUPPORT = BIT(0),
IWL_UCODE_TLV_CAPA_UMAC_SCAN = BIT(2),
IWL_UCODE_TLV_CAPA_TDLS_SUPPORT = BIT(6),
IWL_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT = BIT(8),
IWL_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT = BIT(9),
IWL_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT = BIT(10),
IWL_UCODE_TLV_CAPA_QUIET_PERIOD_SUPPORT = BIT(11),
IWL_UCODE_TLV_CAPA_DQA_SUPPORT = BIT(12),
IWL_UCODE_TLV_CAPA_TDLS_CHANNEL_SWITCH = BIT(13),
};
/* The default calibrate table size if not specified by firmware file */
#define IWL_DEFAULT_STANDARD_PHY_CALIBRATE_TBL_SIZE 18
#define IWL_MAX_STANDARD_PHY_CALIBRATE_TBL_SIZE 19
#define IWL_MAX_PHY_CALIBRATE_TBL_SIZE 253
/* The default max probe length if not specified by the firmware file */
#define IWL_DEFAULT_MAX_PROBE_LENGTH 200
/**
* enum iwl_ucode_type
*
@ -187,11 +77,13 @@ enum iwl_ucode_tlv_capa {
* @IWL_UCODE_REGULAR: Normal runtime ucode
* @IWL_UCODE_INIT: Initial ucode
* @IWL_UCODE_WOWLAN: Wake on Wireless enabled ucode
* @IWL_UCODE_REGULAR_USNIFFER: Normal runtime ucode when using usniffer image
*/
enum iwl_ucode_type {
IWL_UCODE_REGULAR,
IWL_UCODE_INIT,
IWL_UCODE_WOWLAN,
IWL_UCODE_REGULAR_USNIFFER,
IWL_UCODE_TYPE_MAX,
};
@ -206,14 +98,6 @@ enum iwl_ucode_sec {
IWL_UCODE_SECTION_DATA,
IWL_UCODE_SECTION_INST,
};
/*
* For 16.0 uCode and above, there is no differentiation between sections,
* just an offset to the HW address.
*/
#define IWL_UCODE_SECTION_MAX 12
#define IWL_API_ARRAY_SIZE 1
#define IWL_CAPABILITIES_ARRAY_SIZE 1
#define CPU1_CPU2_SEPARATOR_SECTION 0xFFFFCCCC
struct iwl_ucode_capabilities {
u32 max_probe_length;
@ -241,66 +125,6 @@ struct iwl_sf_region {
u32 size;
};
/* uCode version contains 4 values: Major/Minor/API/Serial */
#define IWL_UCODE_MAJOR(ver) (((ver) & 0xFF000000) >> 24)
#define IWL_UCODE_MINOR(ver) (((ver) & 0x00FF0000) >> 16)
#define IWL_UCODE_API(ver) (((ver) & 0x0000FF00) >> 8)
#define IWL_UCODE_SERIAL(ver) ((ver) & 0x000000FF)
/*
* Calibration control struct.
* Sent as part of the phy configuration command.
* @flow_trigger: bitmap for which calibrations to perform according to
* flow triggers.
* @event_trigger: bitmap for which calibrations to perform according to
* event triggers.
*/
struct iwl_tlv_calib_ctrl {
__le32 flow_trigger;
__le32 event_trigger;
} __packed;
enum iwl_fw_phy_cfg {
FW_PHY_CFG_RADIO_TYPE_POS = 0,
FW_PHY_CFG_RADIO_TYPE = 0x3 << FW_PHY_CFG_RADIO_TYPE_POS,
FW_PHY_CFG_RADIO_STEP_POS = 2,
FW_PHY_CFG_RADIO_STEP = 0x3 << FW_PHY_CFG_RADIO_STEP_POS,
FW_PHY_CFG_RADIO_DASH_POS = 4,
FW_PHY_CFG_RADIO_DASH = 0x3 << FW_PHY_CFG_RADIO_DASH_POS,
FW_PHY_CFG_TX_CHAIN_POS = 16,
FW_PHY_CFG_TX_CHAIN = 0xf << FW_PHY_CFG_TX_CHAIN_POS,
FW_PHY_CFG_RX_CHAIN_POS = 20,
FW_PHY_CFG_RX_CHAIN = 0xf << FW_PHY_CFG_RX_CHAIN_POS,
};
#define IWL_UCODE_MAX_CS 1
/**
* struct iwl_fw_cipher_scheme - a cipher scheme supported by FW.
* @cipher: a cipher suite selector
* @flags: cipher scheme flags (currently reserved for a future use)
* @hdr_len: a size of MPDU security header
* @pn_len: a size of PN
* @pn_off: an offset of pn from the beginning of the security header
* @key_idx_off: an offset of key index byte in the security header
* @key_idx_mask: a bit mask of key_idx bits
* @key_idx_shift: bit shift needed to get key_idx
* @mic_len: mic length in bytes
* @hw_cipher: a HW cipher index used in host commands
*/
struct iwl_fw_cipher_scheme {
__le32 cipher;
u8 flags;
u8 hdr_len;
u8 pn_len;
u8 pn_off;
u8 key_idx_off;
u8 key_idx_mask;
u8 key_idx_shift;
u8 mic_len;
u8 hw_cipher;
} __packed;
/**
* struct iwl_fw_cscheme_list - a cipher scheme list
* @size: a number of entries
@ -327,6 +151,11 @@ struct iwl_fw_cscheme_list {
* @inst_errlog_ptr: error log offfset for runtime ucode.
* @mvm_fw: indicates this is MVM firmware
* @cipher_scheme: optional external cipher scheme.
* @human_readable: human readable version
* @dbg_dest_tlv: points to the destination TLV for debug
* @dbg_conf_tlv: array of pointers to configuration TLVs for debug
* @dbg_conf_tlv_len: lengths of the @dbg_conf_tlv entries
* @dbg_dest_reg_num: num of reg_ops in %dbg_dest_tlv
*/
struct iwl_fw {
u32 ucode_ver;
@ -351,6 +180,68 @@ struct iwl_fw {
struct ieee80211_cipher_scheme cs[IWL_UCODE_MAX_CS];
u8 human_readable[FW_VER_HUMAN_READABLE_SZ];
struct iwl_fw_dbg_dest_tlv *dbg_dest_tlv;
struct iwl_fw_dbg_conf_tlv *dbg_conf_tlv[FW_DBG_MAX];
size_t dbg_conf_tlv_len[FW_DBG_MAX];
u8 dbg_dest_reg_num;
};
static inline const char *get_fw_dbg_mode_string(int mode)
{
switch (mode) {
case SMEM_MODE:
return "SMEM";
case EXTERNAL_MODE:
return "EXTERNAL_DRAM";
case MARBH_MODE:
return "MARBH";
default:
return "UNKNOWN";
}
}
static inline const struct iwl_fw_dbg_trigger *
iwl_fw_dbg_conf_get_trigger(const struct iwl_fw *fw, u8 id)
{
const struct iwl_fw_dbg_conf_tlv *conf_tlv = fw->dbg_conf_tlv[id];
u8 *ptr;
int i;
if (!conf_tlv)
return NULL;
ptr = (void *)&conf_tlv->hcmd;
for (i = 0; i < conf_tlv->num_of_hcmds; i++) {
ptr += sizeof(conf_tlv->hcmd);
ptr += le16_to_cpu(conf_tlv->hcmd.len);
}
return (const struct iwl_fw_dbg_trigger *)ptr;
}
static inline bool
iwl_fw_dbg_conf_enabled(const struct iwl_fw *fw, u8 id)
{
const struct iwl_fw_dbg_trigger *trigger =
iwl_fw_dbg_conf_get_trigger(fw, id);
if (!trigger)
return false;
return trigger->enabled;
}
static inline bool
iwl_fw_dbg_conf_usniffer(const struct iwl_fw *fw, u8 id)
{
const struct iwl_fw_dbg_conf_tlv *conf_tlv = fw->dbg_conf_tlv[id];
if (!conf_tlv)
return false;
return conf_tlv->usniffer;
}
#endif /* __iwl_fw_h__ */

View File

@ -322,6 +322,7 @@ enum secure_boot_config_reg {
LMPM_SECURE_BOOT_CONFIG_INSPECTOR_NOT_REQ = 0x00000002,
};
#define LMPM_SECURE_BOOT_CPU1_STATUS_ADDR_B0 (0xA01E30)
#define LMPM_SECURE_BOOT_CPU1_STATUS_ADDR (0x1E30)
#define LMPM_SECURE_BOOT_CPU2_STATUS_ADDR (0x1E34)
enum secure_boot_status_reg {
@ -333,6 +334,7 @@ enum secure_boot_status_reg {
LMPM_SECURE_BOOT_STATUS_SUCCESS = 0x00000003,
};
#define FH_UCODE_LOAD_STATUS (0x1AF0)
#define CSR_UCODE_LOAD_STATUS_ADDR (0x1E70)
enum secure_load_status_reg {
LMPM_CPU_UCODE_LOADING_STARTED = 0x00000001,
@ -349,10 +351,10 @@ enum secure_load_status_reg {
#define LMPM_SECURE_INSPECTOR_CODE_MEM_SPACE (0x400000)
#define LMPM_SECURE_INSPECTOR_DATA_MEM_SPACE (0x402000)
#define LMPM_SECURE_CPU1_HDR_MEM_SPACE (0x404000)
#define LMPM_SECURE_CPU2_HDR_MEM_SPACE (0x405000)
#define LMPM_SECURE_CPU1_HDR_MEM_SPACE (0x420000)
#define LMPM_SECURE_CPU2_HDR_MEM_SPACE (0x420400)
#define LMPM_SECURE_TIME_OUT (50000) /* 5 msec */
#define LMPM_SECURE_TIME_OUT (100) /* 10 micro */
/* Rx FIFO */
#define RXF_SIZE_ADDR (0xa00c88)

View File

@ -574,6 +574,9 @@ enum iwl_trans_state {
* @rx_mpdu_cmd_hdr_size: used for tracing, amount of data before the
* start of the 802.11 header in the @rx_mpdu_cmd
* @dflt_pwr_limit: default power limit fetched from the platform (ACPI)
* @dbg_dest_tlv: points to the destination TLV for debug
* @dbg_conf_tlv: array of pointers to configuration TLVs for debug
* @dbg_dest_reg_num: num of reg_ops in %dbg_dest_tlv
*/
struct iwl_trans {
const struct iwl_trans_ops *ops;
@ -605,6 +608,10 @@ struct iwl_trans {
u64 dflt_pwr_limit;
const struct iwl_fw_dbg_dest_tlv *dbg_dest_tlv;
const struct iwl_fw_dbg_conf_tlv *dbg_conf_tlv[FW_DBG_MAX];
u8 dbg_dest_reg_num;
/* pointer to trans specific struct */
/*Ensure that this pointer will always be aligned to sizeof pointer */
char trans_specific[0] __aligned(sizeof(void *));

View File

@ -94,8 +94,8 @@
#define IWL_MVM_BT_COEX_MPLUT 1
#define IWL_MVM_BT_COEX_RRC 1
#define IWL_MVM_BT_COEX_TTC 1
#define IWL_MVM_BT_COEX_MPLUT_REG0 0x2e402280
#define IWL_MVM_BT_COEX_MPLUT_REG1 0x7711a751
#define IWL_MVM_BT_COEX_MPLUT_REG0 0x28412201
#define IWL_MVM_BT_COEX_MPLUT_REG1 0x11118451
#define IWL_MVM_BT_COEX_ANTENNA_COUPLING_THRS 30
#define IWL_MVM_FW_MCAST_FILTER_PASS_ALL 0
#define IWL_MVM_FW_BCAST_FILTER_PASS_ALL 0

View File

@ -785,33 +785,19 @@ static int iwl_mvm_switch_to_d3(struct iwl_mvm *mvm)
return iwl_mvm_load_d3_fw(mvm);
}
static int
iwl_mvm_send_wowlan_config_cmd(struct iwl_mvm *mvm,
const struct iwl_wowlan_config_cmd_v3 *cmd)
{
/* start only with the v2 part of the command */
u16 cmd_len = sizeof(cmd->common);
if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_WOWLAN_CONFIG_TID)
cmd_len = sizeof(*cmd);
return iwl_mvm_send_cmd_pdu(mvm, WOWLAN_CONFIGURATION, 0,
cmd_len, cmd);
}
static int
iwl_mvm_get_wowlan_config(struct iwl_mvm *mvm,
struct cfg80211_wowlan *wowlan,
struct iwl_wowlan_config_cmd_v3 *wowlan_config_cmd,
struct iwl_wowlan_config_cmd *wowlan_config_cmd,
struct ieee80211_vif *vif, struct iwl_mvm_vif *mvmvif,
struct ieee80211_sta *ap_sta)
{
int ret;
struct iwl_mvm_sta *mvm_ap_sta = (struct iwl_mvm_sta *)ap_sta->drv_priv;
/* TODO: wowlan_config_cmd->common.wowlan_ba_teardown_tids */
/* TODO: wowlan_config_cmd->wowlan_ba_teardown_tids */
wowlan_config_cmd->common.is_11n_connection =
wowlan_config_cmd->is_11n_connection =
ap_sta->ht_cap.ht_supported;
/* Query the last used seqno and set it */
@ -819,32 +805,32 @@ iwl_mvm_get_wowlan_config(struct iwl_mvm *mvm,
if (ret < 0)
return ret;
wowlan_config_cmd->common.non_qos_seq = cpu_to_le16(ret);
wowlan_config_cmd->non_qos_seq = cpu_to_le16(ret);
iwl_mvm_set_wowlan_qos_seq(mvm_ap_sta, &wowlan_config_cmd->common);
iwl_mvm_set_wowlan_qos_seq(mvm_ap_sta, wowlan_config_cmd);
if (wowlan->disconnect)
wowlan_config_cmd->common.wakeup_filter |=
wowlan_config_cmd->wakeup_filter |=
cpu_to_le32(IWL_WOWLAN_WAKEUP_BEACON_MISS |
IWL_WOWLAN_WAKEUP_LINK_CHANGE);
if (wowlan->magic_pkt)
wowlan_config_cmd->common.wakeup_filter |=
wowlan_config_cmd->wakeup_filter |=
cpu_to_le32(IWL_WOWLAN_WAKEUP_MAGIC_PACKET);
if (wowlan->gtk_rekey_failure)
wowlan_config_cmd->common.wakeup_filter |=
wowlan_config_cmd->wakeup_filter |=
cpu_to_le32(IWL_WOWLAN_WAKEUP_GTK_REKEY_FAIL);
if (wowlan->eap_identity_req)
wowlan_config_cmd->common.wakeup_filter |=
wowlan_config_cmd->wakeup_filter |=
cpu_to_le32(IWL_WOWLAN_WAKEUP_EAP_IDENT_REQ);
if (wowlan->four_way_handshake)
wowlan_config_cmd->common.wakeup_filter |=
wowlan_config_cmd->wakeup_filter |=
cpu_to_le32(IWL_WOWLAN_WAKEUP_4WAY_HANDSHAKE);
if (wowlan->n_patterns)
wowlan_config_cmd->common.wakeup_filter |=
wowlan_config_cmd->wakeup_filter |=
cpu_to_le32(IWL_WOWLAN_WAKEUP_PATTERN_MATCH);
if (wowlan->rfkill_release)
wowlan_config_cmd->common.wakeup_filter |=
wowlan_config_cmd->wakeup_filter |=
cpu_to_le32(IWL_WOWLAN_WAKEUP_RF_KILL_DEASSERT);
if (wowlan->tcp) {
@ -852,7 +838,7 @@ iwl_mvm_get_wowlan_config(struct iwl_mvm *mvm,
* Set the "link change" (really "link lost") flag as well
* since that implies losing the TCP connection.
*/
wowlan_config_cmd->common.wakeup_filter |=
wowlan_config_cmd->wakeup_filter |=
cpu_to_le32(IWL_WOWLAN_WAKEUP_REMOTE_LINK_LOSS |
IWL_WOWLAN_WAKEUP_REMOTE_SIGNATURE_TABLE |
IWL_WOWLAN_WAKEUP_REMOTE_WAKEUP_PACKET |
@ -865,7 +851,7 @@ iwl_mvm_get_wowlan_config(struct iwl_mvm *mvm,
static int
iwl_mvm_wowlan_config(struct iwl_mvm *mvm,
struct cfg80211_wowlan *wowlan,
struct iwl_wowlan_config_cmd_v3 *wowlan_config_cmd,
struct iwl_wowlan_config_cmd *wowlan_config_cmd,
struct ieee80211_vif *vif, struct iwl_mvm_vif *mvmvif,
struct ieee80211_sta *ap_sta)
{
@ -947,7 +933,9 @@ iwl_mvm_wowlan_config(struct iwl_mvm *mvm,
}
}
ret = iwl_mvm_send_wowlan_config_cmd(mvm, wowlan_config_cmd);
ret = iwl_mvm_send_cmd_pdu(mvm, WOWLAN_CONFIGURATION, 0,
sizeof(*wowlan_config_cmd),
wowlan_config_cmd);
if (ret)
goto out;
@ -972,7 +960,7 @@ iwl_mvm_netdetect_config(struct iwl_mvm *mvm,
struct cfg80211_sched_scan_request *nd_config,
struct ieee80211_vif *vif)
{
struct iwl_wowlan_config_cmd_v3 wowlan_config_cmd = {};
struct iwl_wowlan_config_cmd wowlan_config_cmd = {};
int ret;
ret = iwl_mvm_switch_to_d3(mvm);
@ -981,16 +969,51 @@ iwl_mvm_netdetect_config(struct iwl_mvm *mvm,
/* rfkill release can be either for wowlan or netdetect */
if (wowlan->rfkill_release)
wowlan_config_cmd.common.wakeup_filter |=
wowlan_config_cmd.wakeup_filter |=
cpu_to_le32(IWL_WOWLAN_WAKEUP_RF_KILL_DEASSERT);
ret = iwl_mvm_send_wowlan_config_cmd(mvm, &wowlan_config_cmd);
ret = iwl_mvm_send_cmd_pdu(mvm, WOWLAN_CONFIGURATION, 0,
sizeof(wowlan_config_cmd),
&wowlan_config_cmd);
if (ret)
return ret;
ret = iwl_mvm_scan_offload_start(mvm, vif, nd_config, &mvm->nd_ies);
if (ret)
return ret;
return ret;
if (WARN_ON(mvm->nd_match_sets || mvm->nd_channels))
return -EBUSY;
/* save the sched scan matchsets... */
if (nd_config->n_match_sets) {
mvm->nd_match_sets = kmemdup(nd_config->match_sets,
sizeof(*nd_config->match_sets) *
nd_config->n_match_sets,
GFP_KERNEL);
if (mvm->nd_match_sets)
mvm->n_nd_match_sets = nd_config->n_match_sets;
}
/* ...and the sched scan channels for later reporting */
mvm->nd_channels = kmemdup(nd_config->channels,
sizeof(*nd_config->channels) *
nd_config->n_channels,
GFP_KERNEL);
if (mvm->nd_channels)
mvm->n_nd_channels = nd_config->n_channels;
return 0;
}
static void iwl_mvm_free_nd(struct iwl_mvm *mvm)
{
kfree(mvm->nd_match_sets);
mvm->nd_match_sets = NULL;
mvm->n_nd_match_sets = 0;
kfree(mvm->nd_channels);
mvm->nd_channels = NULL;
mvm->n_nd_channels = 0;
}
static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
@ -1051,7 +1074,7 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
mvm->net_detect = true;
} else {
struct iwl_wowlan_config_cmd_v3 wowlan_config_cmd = {};
struct iwl_wowlan_config_cmd wowlan_config_cmd = {};
ap_sta = rcu_dereference_protected(
mvm->fw_id_to_mac_id[mvmvif->ap_sta_id],
@ -1104,8 +1127,10 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
iwl_trans_d3_suspend(mvm->trans, test);
out:
if (ret < 0)
if (ret < 0) {
ieee80211_restart_hw(mvm->hw);
iwl_mvm_free_nd(mvm);
}
out_noreset:
mutex_unlock(&mvm->mutex);
@ -1626,15 +1651,64 @@ out_unlock:
return false;
}
struct iwl_mvm_nd_query_results {
u32 matched_profiles;
struct iwl_scan_offload_profile_match matches[IWL_SCAN_MAX_PROFILES];
};
static int
iwl_mvm_netdetect_query_results(struct iwl_mvm *mvm,
struct iwl_mvm_nd_query_results *results)
{
struct iwl_scan_offload_profiles_query *query;
struct iwl_host_cmd cmd = {
.id = SCAN_OFFLOAD_PROFILES_QUERY_CMD,
.flags = CMD_WANT_SKB,
};
int ret, len;
ret = iwl_mvm_send_cmd(mvm, &cmd);
if (ret) {
IWL_ERR(mvm, "failed to query matched profiles (%d)\n", ret);
return ret;
}
/* RF-kill already asserted again... */
if (!cmd.resp_pkt) {
ret = -ERFKILL;
goto out_free_resp;
}
len = iwl_rx_packet_payload_len(cmd.resp_pkt);
if (len < sizeof(*query)) {
IWL_ERR(mvm, "Invalid scan offload profiles query response!\n");
ret = -EIO;
goto out_free_resp;
}
query = (void *)cmd.resp_pkt->data;
results->matched_profiles = le32_to_cpu(query->matched_profiles);
memcpy(results->matches, query->matches, sizeof(results->matches));
out_free_resp:
iwl_free_resp(&cmd);
return ret;
}
static void iwl_mvm_query_netdetect_reasons(struct iwl_mvm *mvm,
struct ieee80211_vif *vif)
{
struct cfg80211_wowlan_nd_info *net_detect = NULL;
struct cfg80211_wowlan_wakeup wakeup = {
.pattern_idx = -1,
};
struct cfg80211_wowlan_wakeup *wakeup_report = &wakeup;
struct iwl_mvm_nd_query_results query;
struct iwl_wowlan_status *fw_status;
unsigned long matched_profiles;
u32 reasons = 0;
int i, j, n_matches, ret;
fw_status = iwl_mvm_get_wakeup_status(mvm, vif);
if (!IS_ERR_OR_NULL(fw_status))
@ -1643,13 +1717,73 @@ static void iwl_mvm_query_netdetect_reasons(struct iwl_mvm *mvm,
if (reasons & IWL_WOWLAN_WAKEUP_BY_RFKILL_DEASSERTED)
wakeup.rfkill_release = true;
if (reasons == IWL_WOWLAN_WAKEUP_BY_NON_WIRELESS) {
/* TODO: read and check if it was netdetect */
if (reasons != IWL_WOWLAN_WAKEUP_BY_NON_WIRELESS)
goto out;
ret = iwl_mvm_netdetect_query_results(mvm, &query);
if (ret || !query.matched_profiles) {
wakeup_report = NULL;
goto out;
}
matched_profiles = query.matched_profiles;
if (mvm->n_nd_match_sets) {
n_matches = hweight_long(matched_profiles);
} else {
IWL_ERR(mvm, "no net detect match information available\n");
n_matches = 0;
}
net_detect = kzalloc(sizeof(*net_detect) +
(n_matches * sizeof(net_detect->matches[0])),
GFP_KERNEL);
if (!net_detect || !n_matches)
goto out_report_nd;
for_each_set_bit(i, &matched_profiles, mvm->n_nd_match_sets) {
struct iwl_scan_offload_profile_match *fw_match;
struct cfg80211_wowlan_nd_match *match;
int n_channels = 0;
fw_match = &query.matches[i];
for (j = 0; j < SCAN_OFFLOAD_MATCHING_CHANNELS_LEN; j++)
n_channels += hweight8(fw_match->matching_channels[j]);
match = kzalloc(sizeof(*match) +
(n_channels * sizeof(*match->channels)),
GFP_KERNEL);
if (!match)
goto out_report_nd;
net_detect->matches[net_detect->n_matches++] = match;
match->ssid.ssid_len = mvm->nd_match_sets[i].ssid.ssid_len;
memcpy(match->ssid.ssid, mvm->nd_match_sets[i].ssid.ssid,
match->ssid.ssid_len);
if (mvm->n_nd_channels < n_channels)
continue;
for (j = 0; j < SCAN_OFFLOAD_MATCHING_CHANNELS_LEN * 8; j++)
if (fw_match->matching_channels[j / 8] & (BIT(j % 8)))
match->channels[match->n_channels++] =
mvm->nd_channels[j]->center_freq;
}
out_report_nd:
wakeup.net_detect = net_detect;
out:
iwl_mvm_free_nd(mvm);
mutex_unlock(&mvm->mutex);
ieee80211_report_wowlan_wakeup(vif, wakeup_report, GFP_KERNEL);
if (net_detect) {
for (i = 0; i < net_detect->n_matches; i++)
kfree(net_detect->matches[i]);
kfree(net_detect);
}
}
static void iwl_mvm_read_d3_sram(struct iwl_mvm *mvm)

View File

@ -1339,6 +1339,7 @@ static ssize_t iwl_dbgfs_d0i3_refs_read(struct file *file,
PRINT_MVM_REF(IWL_MVM_REF_NMI);
PRINT_MVM_REF(IWL_MVM_REF_TM_CMD);
PRINT_MVM_REF(IWL_MVM_REF_EXIT_WORK);
PRINT_MVM_REF(IWL_MVM_REF_PROTECT_CSA);
return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
}

View File

@ -241,16 +241,12 @@ enum iwl_wowlan_wakeup_filters {
IWL_WOWLAN_WAKEUP_BCN_FILTERING = BIT(16),
}; /* WOWLAN_WAKEUP_FILTER_API_E_VER_4 */
struct iwl_wowlan_config_cmd_v2 {
struct iwl_wowlan_config_cmd {
__le32 wakeup_filter;
__le16 non_qos_seq;
__le16 qos_seq[8];
u8 wowlan_ba_teardown_tids;
u8 is_11n_connection;
} __packed; /* WOWLAN_CONFIG_API_S_VER_2 */
struct iwl_wowlan_config_cmd_v3 {
struct iwl_wowlan_config_cmd_v2 common;
u8 offloading_tid;
u8 reserved[3];
} __packed; /* WOWLAN_CONFIG_API_S_VER_3 */

View File

@ -1047,4 +1047,48 @@ struct iwl_umac_scan_complete {
__le32 reserved;
} __packed; /* SCAN_COMPLETE_NTF_UMAC_API_S_VER_1 */
#define SCAN_OFFLOAD_MATCHING_CHANNELS_LEN 5
/**
* struct iwl_scan_offload_profile_match - match information
* @bssid: matched bssid
* @channel: channel where the match occurred
* @energy:
* @matching_feature:
* @matching_channels: bitmap of channels that matched, referencing
* the channels passed in tue scan offload request
*/
struct iwl_scan_offload_profile_match {
u8 bssid[ETH_ALEN];
__le16 reserved;
u8 channel;
u8 energy;
u8 matching_feature;
u8 matching_channels[SCAN_OFFLOAD_MATCHING_CHANNELS_LEN];
} __packed; /* SCAN_OFFLOAD_PROFILE_MATCH_RESULTS_S_VER_1 */
/**
* struct iwl_scan_offload_profiles_query - match results query response
* @matched_profiles: bitmap of matched profiles, referencing the
* matches passed in the scan offload request
* @last_scan_age: age of the last offloaded scan
* @n_scans_done: number of offloaded scans done
* @gp2_d0u: GP2 when D0U occurred
* @gp2_invoked: GP2 when scan offload was invoked
* @resume_while_scanning: not used
* @self_recovery: obsolete
* @reserved: reserved
* @matches: array of match information, one for each match
*/
struct iwl_scan_offload_profiles_query {
__le32 matched_profiles;
__le32 last_scan_age;
__le32 n_scans_done;
__le32 gp2_d0u;
__le32 gp2_invoked;
u8 resume_while_scanning;
u8 self_recovery;
__le16 reserved;
struct iwl_scan_offload_profile_match matches[IWL_SCAN_MAX_PROFILES];
} __packed; /* SCAN_OFFLOAD_PROFILES_QUERY_RSP_S_VER_2 */
#endif

View File

@ -249,11 +249,9 @@ enum {
WOWLAN_TX_POWER_PER_DB = 0xe6,
/* and for NetDetect */
NET_DETECT_CONFIG_CMD = 0x54,
NET_DETECT_PROFILES_QUERY_CMD = 0x56,
NET_DETECT_PROFILES_CMD = 0x57,
NET_DETECT_HOTSPOTS_CMD = 0x58,
NET_DETECT_HOTSPOTS_QUERY_CMD = 0x59,
SCAN_OFFLOAD_PROFILES_QUERY_CMD = 0x56,
SCAN_OFFLOAD_HOTSPOTS_CONFIG_CMD = 0x58,
SCAN_OFFLOAD_HOTSPOTS_QUERY_CMD = 0x59,
REPLY_MAX = 0xff,
};
@ -1617,7 +1615,7 @@ enum iwl_sf_scenario {
#define SF_NUM_TIMEOUT_TYPES 2 /* Aging timer and Idle timer */
/* smart FIFO default values */
#define SF_W_MARK_SISO 4096
#define SF_W_MARK_SISO 6144
#define SF_W_MARK_MIMO2 8192
#define SF_W_MARK_MIMO3 6144
#define SF_W_MARK_LEGACY 4096

View File

@ -186,7 +186,12 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
static const u8 alive_cmd[] = { MVM_ALIVE };
struct iwl_sf_region st_fwrd_space;
fw = iwl_get_ucode_image(mvm, ucode_type);
if (ucode_type == IWL_UCODE_REGULAR &&
iwl_fw_dbg_conf_usniffer(mvm->fw, FW_DBG_CUSTOM) &&
iwl_fw_dbg_conf_enabled(mvm->fw, FW_DBG_CUSTOM))
fw = iwl_get_ucode_image(mvm, IWL_UCODE_REGULAR_USNIFFER);
else
fw = iwl_get_ucode_image(mvm, ucode_type);
if (WARN_ON(!fw))
return -EINVAL;
mvm->cur_ucode = ucode_type;
@ -394,6 +399,42 @@ out:
return ret;
}
static int iwl_mvm_start_fw_dbg_conf(struct iwl_mvm *mvm,
enum iwl_fw_dbg_conf conf_id)
{
u8 *ptr;
int ret;
int i;
if (WARN_ONCE(conf_id >= ARRAY_SIZE(mvm->fw->dbg_conf_tlv),
"Invalid configuration %d\n", conf_id))
return -EINVAL;
if (!mvm->fw->dbg_conf_tlv[conf_id])
return -EINVAL;
if (mvm->fw_dbg_conf != FW_DBG_INVALID)
IWL_WARN(mvm, "FW already configured (%d) - re-configuring\n",
mvm->fw_dbg_conf);
/* Send all HCMDs for configuring the FW debug */
ptr = (void *)&mvm->fw->dbg_conf_tlv[conf_id]->hcmd;
for (i = 0; i < mvm->fw->dbg_conf_tlv[conf_id]->num_of_hcmds; i++) {
struct iwl_fw_dbg_conf_hcmd *cmd = (void *)ptr;
ret = iwl_mvm_send_cmd_pdu(mvm, cmd->id, 0,
le16_to_cpu(cmd->len), cmd->data);
if (ret)
return ret;
ptr += sizeof(*cmd);
ptr += le16_to_cpu(cmd->len);
}
mvm->fw_dbg_conf = conf_id;
return ret;
}
int iwl_mvm_up(struct iwl_mvm *mvm)
{
int ret, i;
@ -445,6 +486,9 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
if (ret)
IWL_ERR(mvm, "Failed to initialize Smart Fifo\n");
mvm->fw_dbg_conf = FW_DBG_INVALID;
iwl_mvm_start_fw_dbg_conf(mvm, FW_DBG_CUSTOM);
ret = iwl_send_tx_ant_cfg(mvm, mvm->fw->valid_tx_ant);
if (ret)
goto error;

View File

@ -1304,31 +1304,22 @@ int iwl_mvm_rx_beacon_notif(struct iwl_mvm *mvm,
struct iwl_device_cmd *cmd)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_extended_beacon_notif *beacon = (void *)pkt->data;
struct iwl_mvm_tx_resp *beacon_notify_hdr;
struct ieee80211_vif *csa_vif;
struct ieee80211_vif *tx_blocked_vif;
u64 tsf;
u16 status;
lockdep_assert_held(&mvm->mutex);
if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_CAPA_EXTENDED_BEACON) {
struct iwl_extended_beacon_notif *beacon = (void *)pkt->data;
beacon_notify_hdr = &beacon->beacon_notify_hdr;
tsf = le64_to_cpu(beacon->tsf);
mvm->ap_last_beacon_gp2 = le32_to_cpu(beacon->gp2);
} else {
struct iwl_beacon_notif *beacon = (void *)pkt->data;
beacon_notify_hdr = &beacon->beacon_notify_hdr;
tsf = le64_to_cpu(beacon->tsf);
}
beacon_notify_hdr = &beacon->beacon_notify_hdr;
mvm->ap_last_beacon_gp2 = le32_to_cpu(beacon->gp2);
status = le16_to_cpu(beacon_notify_hdr->status.status) & TX_STATUS_MSK;
IWL_DEBUG_RX(mvm,
"beacon status %#x retries:%d tsf:0x%16llX gp2:0x%X rate:%d\n",
status, beacon_notify_hdr->failure_frame, tsf,
status, beacon_notify_hdr->failure_frame,
le64_to_cpu(beacon->tsf),
mvm->ap_last_beacon_gp2,
le32_to_cpu(beacon_notify_hdr->initial_rate));

View File

@ -337,7 +337,6 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
hw->flags |= IEEE80211_HW_MFP_CAPABLE;
if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT &&
IWL_UCODE_API(mvm->fw->ucode_ver) >= 9 &&
!iwlwifi_mod_params.uapsd_disable) {
hw->flags |= IEEE80211_HW_SUPPORTS_UAPSD;
hw->uapsd_queues = IWL_MVM_UAPSD_QUEUES;
@ -370,8 +369,7 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_GO_UAPSD)
hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD;
if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_CSA_FLOW)
hw->wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH;
hw->wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH;
hw->wiphy->iface_combinations = iwl_mvm_iface_combinations;
hw->wiphy->n_iface_combinations =
@ -2577,9 +2575,15 @@ static int iwl_mvm_roc(struct ieee80211_hw *hw,
switch (vif->type) {
case NL80211_IFTYPE_STATION:
/* Use aux roc framework (HS20) */
ret = iwl_mvm_send_aux_roc_cmd(mvm, channel,
vif, duration);
if (mvm->fw->ucode_capa.capa[0] &
IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT) {
/* Use aux roc framework (HS20) */
ret = iwl_mvm_send_aux_roc_cmd(mvm, channel,
vif, duration);
goto out_unlock;
}
IWL_ERR(mvm, "hotspot not supported\n");
ret = -EINVAL;
goto out_unlock;
case NL80211_IFTYPE_P2P_DEVICE:
/* handle below */

View File

@ -295,7 +295,6 @@ enum iwl_bt_force_ant_mode {
* struct iwl_mvm_vif_bf_data - beacon filtering related data
* @bf_enabled: indicates if beacon filtering is enabled
* @ba_enabled: indicated if beacon abort is enabled
* @last_beacon_signal: last beacon rssi signal in dbm
* @ave_beacon_signal: average beacon signal
* @last_cqm_event: rssi of the last cqm event
* @bt_coex_min_thold: minimum threshold for BT coex
@ -671,6 +670,7 @@ struct iwl_mvm {
/* -1 for always, 0 for never, >0 for that many times */
s8 restart_fw;
struct work_struct fw_error_dump_wk;
enum iwl_fw_dbg_conf fw_dbg_conf;
#ifdef CONFIG_IWLWIFI_LEDS
struct led_classdev led;
@ -685,6 +685,10 @@ struct iwl_mvm {
/* sched scan settings for net detect */
struct cfg80211_sched_scan_request *nd_config;
struct ieee80211_scan_ies nd_ies;
struct cfg80211_match_set *nd_match_sets;
int n_nd_match_sets;
struct ieee80211_channel **nd_channels;
int n_nd_channels;
bool net_detect;
#ifdef CONFIG_IWLWIFI_DEBUGFS
u32 d3_wake_sysassert; /* must be u32 for debugfs_create_bool */
@ -1143,7 +1147,7 @@ iwl_mvm_set_last_nonqos_seq(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
}
#endif
void iwl_mvm_set_wowlan_qos_seq(struct iwl_mvm_sta *mvm_ap_sta,
struct iwl_wowlan_config_cmd_v2 *cmd);
struct iwl_wowlan_config_cmd *cmd);
int iwl_mvm_send_proto_offload(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
bool disable_offloading,

View File

@ -339,11 +339,15 @@ static int iwl_mvm_read_external_nvm(struct iwl_mvm *mvm)
} *file_sec;
const u8 *eof, *temp;
int max_section_size;
const __le32 *dword_buff;
#define NVM_WORD1_LEN(x) (8 * (x & 0x03FF))
#define NVM_WORD2_ID(x) (x >> 12)
#define NVM_WORD2_LEN_FAMILY_8000(x) (2 * ((x & 0xFF) << 8 | x >> 8))
#define NVM_WORD1_ID_FAMILY_8000(x) (x >> 4)
#define NVM_HEADER_0 (0x2A504C54)
#define NVM_HEADER_1 (0x4E564D2A)
#define NVM_HEADER_SIZE (4 * sizeof(u32))
IWL_DEBUG_EEPROM(mvm->trans->dev, "Read from external NVM\n");
@ -372,12 +376,6 @@ static int iwl_mvm_read_external_nvm(struct iwl_mvm *mvm)
IWL_INFO(mvm, "Loaded NVM file %s (%zu bytes)\n",
mvm->nvm_file_name, fw_entry->size);
if (fw_entry->size < sizeof(*file_sec)) {
IWL_ERR(mvm, "NVM file too small\n");
ret = -EINVAL;
goto out;
}
if (fw_entry->size > MAX_NVM_FILE_LEN) {
IWL_ERR(mvm, "NVM file too large\n");
ret = -EINVAL;
@ -385,8 +383,25 @@ static int iwl_mvm_read_external_nvm(struct iwl_mvm *mvm)
}
eof = fw_entry->data + fw_entry->size;
dword_buff = (__le32 *)fw_entry->data;
file_sec = (void *)fw_entry->data;
/* some NVM file will contain a header.
* The header is identified by 2 dwords header as follow:
* dword[0] = 0x2A504C54
* dword[1] = 0x4E564D2A
*
* This header must be skipped when providing the NVM data to the FW.
*/
if (fw_entry->size > NVM_HEADER_SIZE &&
dword_buff[0] == cpu_to_le32(NVM_HEADER_0) &&
dword_buff[1] == cpu_to_le32(NVM_HEADER_1)) {
file_sec = (void *)(fw_entry->data + NVM_HEADER_SIZE);
IWL_INFO(mvm, "NVM Version %08X\n", le32_to_cpu(dword_buff[2]));
IWL_INFO(mvm, "NVM Manufacturing date %08X\n",
le32_to_cpu(dword_buff[3]));
} else {
file_sec = (void *)fw_entry->data;
}
while (true) {
if (file_sec->data > eof) {

View File

@ -67,7 +67,7 @@
#include "mvm.h"
void iwl_mvm_set_wowlan_qos_seq(struct iwl_mvm_sta *mvm_ap_sta,
struct iwl_wowlan_config_cmd_v2 *cmd)
struct iwl_wowlan_config_cmd *cmd)
{
int i;

View File

@ -325,11 +325,9 @@ static const char *const iwl_mvm_cmd_strings[REPLY_MAX] = {
CMD(WOWLAN_KEK_KCK_MATERIAL),
CMD(WOWLAN_GET_STATUSES),
CMD(WOWLAN_TX_POWER_PER_DB),
CMD(NET_DETECT_CONFIG_CMD),
CMD(NET_DETECT_PROFILES_QUERY_CMD),
CMD(NET_DETECT_PROFILES_CMD),
CMD(NET_DETECT_HOTSPOTS_CMD),
CMD(NET_DETECT_HOTSPOTS_QUERY_CMD),
CMD(SCAN_OFFLOAD_PROFILES_QUERY_CMD),
CMD(SCAN_OFFLOAD_HOTSPOTS_CONFIG_CMD),
CMD(SCAN_OFFLOAD_HOTSPOTS_QUERY_CMD),
CMD(CARD_STATE_NOTIFICATION),
CMD(MISSED_BEACONS_NOTIFICATION),
CMD(BT_COEX_PRIO_TABLE),
@ -498,6 +496,10 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
trans->rx_mpdu_cmd = REPLY_RX_MPDU_CMD;
trans->rx_mpdu_cmd_hdr_size = sizeof(struct iwl_rx_mpdu_res_start);
trans->dbg_dest_tlv = mvm->fw->dbg_dest_tlv;
trans->dbg_dest_reg_num = mvm->fw->dbg_dest_reg_num;
memcpy(trans->dbg_conf_tlv, mvm->fw->dbg_conf_tlv,
sizeof(trans->dbg_conf_tlv));
/* set up notification wait support */
iwl_notification_wait_init(&mvm->notif_wait);
@ -1002,7 +1004,7 @@ static void iwl_mvm_enter_d0i3_iterator(void *_data, u8 *mac,
}
static void iwl_mvm_set_wowlan_data(struct iwl_mvm *mvm,
struct iwl_wowlan_config_cmd_v3 *cmd,
struct iwl_wowlan_config_cmd *cmd,
struct iwl_d0i3_iter_data *iter_data)
{
struct ieee80211_sta *ap_sta;
@ -1018,14 +1020,14 @@ static void iwl_mvm_set_wowlan_data(struct iwl_mvm *mvm,
goto out;
mvm_ap_sta = iwl_mvm_sta_from_mac80211(ap_sta);
cmd->common.is_11n_connection = ap_sta->ht_cap.ht_supported;
cmd->is_11n_connection = ap_sta->ht_cap.ht_supported;
cmd->offloading_tid = iter_data->offloading_tid;
/*
* The d0i3 uCode takes care of the nonqos counters,
* so configure only the qos seq ones.
*/
iwl_mvm_set_wowlan_qos_seq(mvm_ap_sta, &cmd->common);
iwl_mvm_set_wowlan_qos_seq(mvm_ap_sta, cmd);
out:
rcu_read_unlock();
}
@ -1037,14 +1039,11 @@ static int iwl_mvm_enter_d0i3(struct iwl_op_mode *op_mode)
struct iwl_d0i3_iter_data d0i3_iter_data = {
.mvm = mvm,
};
struct iwl_wowlan_config_cmd_v3 wowlan_config_cmd = {
.common = {
.wakeup_filter =
cpu_to_le32(IWL_WOWLAN_WAKEUP_RX_FRAME |
IWL_WOWLAN_WAKEUP_BEACON_MISS |
IWL_WOWLAN_WAKEUP_LINK_CHANGE |
IWL_WOWLAN_WAKEUP_BCN_FILTERING),
},
struct iwl_wowlan_config_cmd wowlan_config_cmd = {
.wakeup_filter = cpu_to_le32(IWL_WOWLAN_WAKEUP_RX_FRAME |
IWL_WOWLAN_WAKEUP_BEACON_MISS |
IWL_WOWLAN_WAKEUP_LINK_CHANGE |
IWL_WOWLAN_WAKEUP_BCN_FILTERING),
};
struct iwl_d3_manager_config d3_cfg_cmd = {
.min_sleep_time = cpu_to_le32(1000),

View File

@ -78,6 +78,7 @@
#include "iwl-agn-hw.h"
#include "iwl-fw-error-dump.h"
#include "internal.h"
#include "iwl-fh.h"
/* extended range in FW SRAM */
#define IWL_FW_MEM_EXTENDED_START 0x40000
@ -665,14 +666,14 @@ static int iwl_pcie_load_section(struct iwl_trans *trans, u8 section_num,
return ret;
}
static int iwl_pcie_load_cpu_secured_sections(struct iwl_trans *trans,
const struct fw_img *image,
int cpu,
int *first_ucode_section)
static int iwl_pcie_load_cpu_sections_8000b(struct iwl_trans *trans,
const struct fw_img *image,
int cpu,
int *first_ucode_section)
{
int shift_param;
int i, ret = 0;
u32 last_read_idx = 0;
int i, ret = 0, sec_num = 0x1;
u32 val, last_read_idx = 0;
if (cpu == 1) {
shift_param = 0;
@ -693,21 +694,16 @@ static int iwl_pcie_load_cpu_secured_sections(struct iwl_trans *trans,
break;
}
if (i == (*first_ucode_section) + 1)
/* set CPU to started */
iwl_set_bits_prph(trans,
CSR_UCODE_LOAD_STATUS_ADDR,
LMPM_CPU_HDRS_LOADING_COMPLETED
<< shift_param);
ret = iwl_pcie_load_section(trans, i, &image->sec[i]);
if (ret)
return ret;
/* Notify the ucode of the loaded section number and status */
val = iwl_read_direct32(trans, FH_UCODE_LOAD_STATUS);
val = val | (sec_num << shift_param);
iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS, val);
sec_num = (sec_num << 1) | 0x1;
}
/* image loading complete */
iwl_set_bits_prph(trans,
CSR_UCODE_LOAD_STATUS_ADDR,
LMPM_CPU_UCODE_LOADING_COMPLETED << shift_param);
*first_ucode_section = last_read_idx;
@ -760,6 +756,64 @@ static int iwl_pcie_load_cpu_sections(struct iwl_trans *trans,
return 0;
}
static void iwl_pcie_apply_destination(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
const struct iwl_fw_dbg_dest_tlv *dest = trans->dbg_dest_tlv;
int i;
if (dest->version)
IWL_ERR(trans,
"DBG DEST version is %d - expect issues\n",
dest->version);
IWL_INFO(trans, "Applying debug destination %s\n",
get_fw_dbg_mode_string(dest->monitor_mode));
if (dest->monitor_mode == EXTERNAL_MODE)
iwl_pcie_alloc_fw_monitor(trans);
else
IWL_WARN(trans, "PCI should have external buffer debug\n");
for (i = 0; i < trans->dbg_dest_reg_num; i++) {
u32 addr = le32_to_cpu(dest->reg_ops[i].addr);
u32 val = le32_to_cpu(dest->reg_ops[i].val);
switch (dest->reg_ops[i].op) {
case CSR_ASSIGN:
iwl_write32(trans, addr, val);
break;
case CSR_SETBIT:
iwl_set_bit(trans, addr, BIT(val));
break;
case CSR_CLEARBIT:
iwl_clear_bit(trans, addr, BIT(val));
break;
case PRPH_ASSIGN:
iwl_write_prph(trans, addr, val);
break;
case PRPH_SETBIT:
iwl_set_bits_prph(trans, addr, BIT(val));
break;
case PRPH_CLEARBIT:
iwl_clear_bits_prph(trans, addr, BIT(val));
break;
default:
IWL_ERR(trans, "FW debug - unknown OP %d\n",
dest->reg_ops[i].op);
break;
}
}
if (dest->monitor_mode == EXTERNAL_MODE && trans_pcie->fw_mon_size) {
iwl_write_prph(trans, le32_to_cpu(dest->base_reg),
trans_pcie->fw_mon_phys >> dest->base_shift);
iwl_write_prph(trans, le32_to_cpu(dest->end_reg),
(trans_pcie->fw_mon_phys +
trans_pcie->fw_mon_size) >> dest->end_shift);
}
}
static int iwl_pcie_load_given_ucode(struct iwl_trans *trans,
const struct fw_img *image)
{
@ -767,39 +821,13 @@ static int iwl_pcie_load_given_ucode(struct iwl_trans *trans,
int ret = 0;
int first_ucode_section;
IWL_DEBUG_FW(trans,
"working with %s CPU\n",
IWL_DEBUG_FW(trans, "working with %s CPU\n",
image->is_dual_cpus ? "Dual" : "Single");
/* configure the ucode to be ready to get the secured image */
if (iwl_has_secure_boot(trans->hw_rev, trans->cfg->device_family)) {
/* set secure boot inspector addresses */
iwl_write_prph(trans,
LMPM_SECURE_INSPECTOR_CODE_ADDR,
LMPM_SECURE_INSPECTOR_CODE_MEM_SPACE);
iwl_write_prph(trans,
LMPM_SECURE_INSPECTOR_DATA_ADDR,
LMPM_SECURE_INSPECTOR_DATA_MEM_SPACE);
/* set CPU1 header address */
iwl_write_prph(trans,
LMPM_SECURE_UCODE_LOAD_CPU1_HDR_ADDR,
LMPM_SECURE_CPU1_HDR_MEM_SPACE);
/* load to FW the binary Secured sections of CPU1 */
ret = iwl_pcie_load_cpu_secured_sections(trans, image, 1,
&first_ucode_section);
if (ret)
return ret;
} else {
/* load to FW the binary Non secured sections of CPU1 */
ret = iwl_pcie_load_cpu_sections(trans, image, 1,
&first_ucode_section);
if (ret)
return ret;
}
/* load to FW the binary non secured sections of CPU1 */
ret = iwl_pcie_load_cpu_sections(trans, image, 1, &first_ucode_section);
if (ret)
return ret;
if (image->is_dual_cpus) {
/* set CPU2 header address */
@ -808,14 +836,8 @@ static int iwl_pcie_load_given_ucode(struct iwl_trans *trans,
LMPM_SECURE_CPU2_HDR_MEM_SPACE);
/* load to FW the binary sections of CPU2 */
if (iwl_has_secure_boot(trans->hw_rev,
trans->cfg->device_family))
ret = iwl_pcie_load_cpu_secured_sections(
trans, image, 2,
&first_ucode_section);
else
ret = iwl_pcie_load_cpu_sections(trans, image, 2,
&first_ucode_section);
ret = iwl_pcie_load_cpu_sections(trans, image, 2,
&first_ucode_section);
if (ret)
return ret;
}
@ -832,6 +854,8 @@ static int iwl_pcie_load_given_ucode(struct iwl_trans *trans,
(trans_pcie->fw_mon_phys +
trans_pcie->fw_mon_size) >> 4);
}
} else if (trans->dbg_dest_tlv) {
iwl_pcie_apply_destination(trans);
}
/* release CPU reset */
@ -840,18 +864,50 @@ static int iwl_pcie_load_given_ucode(struct iwl_trans *trans,
else
iwl_write32(trans, CSR_RESET, 0);
if (iwl_has_secure_boot(trans->hw_rev, trans->cfg->device_family)) {
/* wait for image verification to complete */
ret = iwl_poll_prph_bit(trans,
LMPM_SECURE_BOOT_CPU1_STATUS_ADDR,
LMPM_SECURE_BOOT_STATUS_SUCCESS,
LMPM_SECURE_BOOT_STATUS_SUCCESS,
LMPM_SECURE_TIME_OUT);
return 0;
}
if (ret < 0) {
IWL_ERR(trans, "Time out on secure boot process\n");
return ret;
}
static int iwl_pcie_load_given_ucode_8000b(struct iwl_trans *trans,
const struct fw_img *image)
{
int ret = 0;
int first_ucode_section;
u32 reg;
IWL_DEBUG_FW(trans, "working with %s CPU\n",
image->is_dual_cpus ? "Dual" : "Single");
/* configure the ucode to be ready to get the secured image */
/* release CPU reset */
iwl_write_prph(trans, RELEASE_CPU_RESET, RELEASE_CPU_RESET_BIT);
/* load to FW the binary Secured sections of CPU1 */
ret = iwl_pcie_load_cpu_sections_8000b(trans, image, 1,
&first_ucode_section);
if (ret)
return ret;
/* load to FW the binary sections of CPU2 */
ret = iwl_pcie_load_cpu_sections_8000b(trans, image, 2,
&first_ucode_section);
if (ret)
return ret;
/* Notify FW loading is done */
iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS, 0xFFFFFFFF);
/* wait for image verification to complete */
ret = iwl_poll_prph_bit(trans, LMPM_SECURE_BOOT_CPU1_STATUS_ADDR_B0,
LMPM_SECURE_BOOT_STATUS_SUCCESS,
LMPM_SECURE_BOOT_STATUS_SUCCESS,
LMPM_SECURE_TIME_OUT);
if (ret < 0) {
reg = iwl_read_prph(trans,
LMPM_SECURE_BOOT_CPU1_STATUS_ADDR_B0);
IWL_ERR(trans, "Timeout on secure boot process, reg = %x\n",
reg);
return ret;
}
return 0;
@ -903,7 +959,11 @@ static int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
/* Load the given image to the HW */
return iwl_pcie_load_given_ucode(trans, fw);
if ((trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) &&
(CSR_HW_REV_STEP(trans->hw_rev) == SILICON_B_STEP))
return iwl_pcie_load_given_ucode_8000b(trans, fw);
else
return iwl_pcie_load_given_ucode(trans, fw);
}
static void iwl_trans_pcie_fw_alive(struct iwl_trans *trans, u32 scd_addr)
@ -994,6 +1054,9 @@ static void iwl_trans_pcie_stop_device(struct iwl_trans *trans)
clear_bit(STATUS_RFKILL, &trans->status);
if (hw_rfkill != was_hw_rfkill)
iwl_trans_pcie_rf_kill(trans, hw_rfkill);
/* re-take ownership to prevent other users from stealing the deivce */
iwl_pcie_prepare_card_hw(trans);
}
void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state)
@ -1968,6 +2031,31 @@ static u32 iwl_trans_pcie_dump_csr(struct iwl_trans *trans,
return csr_len;
}
static u32 iwl_trans_pcie_fh_regs_dump(struct iwl_trans *trans,
struct iwl_fw_error_dump_data **data)
{
u32 fh_regs_len = FH_MEM_UPPER_BOUND - FH_MEM_LOWER_BOUND;
unsigned long flags;
__le32 *val;
int i;
if (!iwl_trans_grab_nic_access(trans, false, &flags))
return 0;
(*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_FH_REGS);
(*data)->len = cpu_to_le32(fh_regs_len);
val = (void *)(*data)->data;
for (i = FH_MEM_LOWER_BOUND; i < FH_MEM_UPPER_BOUND; i += sizeof(u32))
*val++ = cpu_to_le32(iwl_trans_pcie_read32(trans, i));
iwl_trans_release_nic_access(trans, &flags);
*data = iwl_fw_error_next_data(*data);
return sizeof(**data) + fh_regs_len;
}
static
struct iwl_trans_dump_data *iwl_trans_pcie_dump_data(struct iwl_trans *trans)
{
@ -1977,6 +2065,7 @@ struct iwl_trans_dump_data *iwl_trans_pcie_dump_data(struct iwl_trans *trans)
struct iwl_fw_error_dump_txcmd *txcmd;
struct iwl_trans_dump_data *dump_data;
u32 len;
u32 monitor_len;
int i, ptr;
/* transport dump header */
@ -1999,10 +2088,34 @@ struct iwl_trans_dump_data *iwl_trans_pcie_dump_data(struct iwl_trans *trans)
num_bytes_in_chunk;
}
/* FH registers */
len += sizeof(*data) + (FH_MEM_UPPER_BOUND - FH_MEM_LOWER_BOUND);
/* FW monitor */
if (trans_pcie->fw_mon_page)
if (trans_pcie->fw_mon_page) {
len += sizeof(*data) + sizeof(struct iwl_fw_error_dump_fw_mon) +
trans_pcie->fw_mon_size;
trans_pcie->fw_mon_size;
monitor_len = trans_pcie->fw_mon_size;
} else if (trans->dbg_dest_tlv) {
u32 base, end;
base = le32_to_cpu(trans->dbg_dest_tlv->base_reg);
end = le32_to_cpu(trans->dbg_dest_tlv->end_reg);
base = iwl_read_prph(trans, base) <<
trans->dbg_dest_tlv->base_shift;
end = iwl_read_prph(trans, end) <<
trans->dbg_dest_tlv->end_shift;
/* Make "end" point to the actual end */
if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
end += (1 << trans->dbg_dest_tlv->end_shift);
monitor_len = end - base;
len += sizeof(*data) + sizeof(struct iwl_fw_error_dump_fw_mon) +
monitor_len;
} else {
monitor_len = 0;
}
dump_data = vzalloc(len);
if (!dump_data)
@ -2039,36 +2152,71 @@ struct iwl_trans_dump_data *iwl_trans_pcie_dump_data(struct iwl_trans *trans)
len += iwl_trans_pcie_dump_prph(trans, &data);
len += iwl_trans_pcie_dump_csr(trans, &data);
len += iwl_trans_pcie_fh_regs_dump(trans, &data);
/* data is already pointing to the next section */
if (trans_pcie->fw_mon_page) {
if ((trans_pcie->fw_mon_page &&
trans->cfg->device_family == IWL_DEVICE_FAMILY_7000) ||
trans->dbg_dest_tlv) {
struct iwl_fw_error_dump_fw_mon *fw_mon_data;
u32 base, write_ptr, wrap_cnt;
/* If there was a dest TLV - use the values from there */
if (trans->dbg_dest_tlv) {
write_ptr =
le32_to_cpu(trans->dbg_dest_tlv->write_ptr_reg);
wrap_cnt = le32_to_cpu(trans->dbg_dest_tlv->wrap_count);
base = le32_to_cpu(trans->dbg_dest_tlv->base_reg);
} else {
base = MON_BUFF_BASE_ADDR;
write_ptr = MON_BUFF_WRPTR;
wrap_cnt = MON_BUFF_CYCLE_CNT;
}
data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_FW_MONITOR);
data->len = cpu_to_le32(trans_pcie->fw_mon_size +
sizeof(*fw_mon_data));
fw_mon_data = (void *)data->data;
fw_mon_data->fw_mon_wr_ptr =
cpu_to_le32(iwl_read_prph(trans, MON_BUFF_WRPTR));
cpu_to_le32(iwl_read_prph(trans, write_ptr));
fw_mon_data->fw_mon_cycle_cnt =
cpu_to_le32(iwl_read_prph(trans, MON_BUFF_CYCLE_CNT));
cpu_to_le32(iwl_read_prph(trans, wrap_cnt));
fw_mon_data->fw_mon_base_ptr =
cpu_to_le32(iwl_read_prph(trans, MON_BUFF_BASE_ADDR));
cpu_to_le32(iwl_read_prph(trans, base));
/*
* The firmware is now asserted, it won't write anything to
* the buffer. CPU can take ownership to fetch the data.
* The buffer will be handed back to the device before the
* firmware will be restarted.
*/
dma_sync_single_for_cpu(trans->dev, trans_pcie->fw_mon_phys,
trans_pcie->fw_mon_size,
DMA_FROM_DEVICE);
memcpy(fw_mon_data->data, page_address(trans_pcie->fw_mon_page),
trans_pcie->fw_mon_size);
len += sizeof(*data) + sizeof(*fw_mon_data);
if (trans_pcie->fw_mon_page) {
data->len = cpu_to_le32(trans_pcie->fw_mon_size +
sizeof(*fw_mon_data));
len += sizeof(*data) + sizeof(*fw_mon_data) +
trans_pcie->fw_mon_size;
/*
* The firmware is now asserted, it won't write anything
* to the buffer. CPU can take ownership to fetch the
* data. The buffer will be handed back to the device
* before the firmware will be restarted.
*/
dma_sync_single_for_cpu(trans->dev,
trans_pcie->fw_mon_phys,
trans_pcie->fw_mon_size,
DMA_FROM_DEVICE);
memcpy(fw_mon_data->data,
page_address(trans_pcie->fw_mon_page),
trans_pcie->fw_mon_size);
len += trans_pcie->fw_mon_size;
} else {
/* If we are here then the buffer is internal */
/*
* Update pointers to reflect actual values after
* shifting
*/
base = iwl_read_prph(trans, base) <<
trans->dbg_dest_tlv->base_shift;
iwl_trans_read_mem(trans, base, fw_mon_data->data,
monitor_len / sizeof(u32));
data->len = cpu_to_le32(sizeof(*fw_mon_data) +
monitor_len);
len += monitor_len;
}
}
dump_data->len = len;

View File

@ -989,6 +989,65 @@ out:
spin_unlock_bh(&txq->lock);
}
static int iwl_pcie_set_cmd_in_flight(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
int ret;
lockdep_assert_held(&trans_pcie->reg_lock);
if (trans_pcie->cmd_in_flight)
return 0;
trans_pcie->cmd_in_flight = true;
/*
* wake up the NIC to make sure that the firmware will see the host
* command - we will let the NIC sleep once all the host commands
* returned. This needs to be done only on NICs that have
* apmg_wake_up_wa set.
*/
if (trans->cfg->base_params->apmg_wake_up_wa) {
__iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL,
CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
udelay(2);
ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
(CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY |
CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP),
15000);
if (ret < 0) {
__iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
trans_pcie->cmd_in_flight = false;
IWL_ERR(trans, "Failed to wake NIC for hcmd\n");
return -EIO;
}
}
return 0;
}
static int iwl_pcie_clear_cmd_in_flight(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
lockdep_assert_held(&trans_pcie->reg_lock);
if (WARN_ON(!trans_pcie->cmd_in_flight))
return 0;
trans_pcie->cmd_in_flight = false;
if (trans->cfg->base_params->apmg_wake_up_wa)
__iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
return 0;
}
/*
* iwl_pcie_cmdq_reclaim - Reclaim TX command queue entries already Tx'd
*
@ -1024,14 +1083,9 @@ static void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx)
}
}
if (trans->cfg->base_params->apmg_wake_up_wa &&
q->read_ptr == q->write_ptr) {
if (q->read_ptr == q->write_ptr) {
spin_lock_irqsave(&trans_pcie->reg_lock, flags);
WARN_ON(!trans_pcie->cmd_in_flight);
trans_pcie->cmd_in_flight = false;
__iwl_trans_pcie_clear_bit(trans,
CSR_GP_CNTRL,
CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
iwl_pcie_clear_cmd_in_flight(trans);
spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
}
@ -1419,35 +1473,11 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
mod_timer(&txq->stuck_timer, jiffies + trans_pcie->wd_timeout);
spin_lock_irqsave(&trans_pcie->reg_lock, flags);
/*
* wake up the NIC to make sure that the firmware will see the host
* command - we will let the NIC sleep once all the host commands
* returned. This needs to be done only on NICs that have
* apmg_wake_up_wa set.
*/
if (trans->cfg->base_params->apmg_wake_up_wa &&
!trans_pcie->cmd_in_flight) {
trans_pcie->cmd_in_flight = true;
__iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL,
CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
udelay(2);
ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
(CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY |
CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP),
15000);
if (ret < 0) {
__iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
trans_pcie->cmd_in_flight = false;
IWL_ERR(trans, "Failed to wake NIC for hcmd\n");
idx = -EIO;
goto out;
}
ret = iwl_pcie_set_cmd_in_flight(trans);
if (ret < 0) {
idx = ret;
spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
goto out;
}
/* Increment and update queue's write index */