2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-26 06:04:14 +08:00

More iwlwifi patches for 4.13

* Some changes in suspend/resume handling to support new FWs;
 * A bunch of RF-kill related fixes;
 * Continued work towards the A000 family;
 * Support for a new version of the TX flush FW API;
 * Some fixes in monitor interfaces;
 * A few fixes in the recovery flows;
 * Johannes' documentation fixes and FW API struct cleanups continue;
 * Remove some noise from the kernel logs;
 * Some other small improvements, fixes and cleanups;
 -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEF3LNfgb2BPWm68smoUecoho8xfoFAllTgG4ACgkQoUecoho8
 xfopuBAAnRfYFrgZJ+hasZBNjaP2n1dc3wutnOMfly79owxUy7Iw7ekuo3MKZuxr
 kLv7l02Zbo8hG9KO/sAvsjVNEIhnPrD1bUuY2DCJb5+ap2BWoFkA9+Z1nw5K1q2z
 zD+nlAVE+v0rdmRe/gt6+s3kgQKahvqgotXJ/ObOuoaphQRm1VP22eVKl+XTG1gL
 9NwmJ/ohuqwjVDVsv6ThTOq1pfbf0r065pUHrbSvCh7Wtus9KSloNajRbHhHjmz7
 DG1Lop1bWTlEwYuGyJpFGwVt2hUcFb4sO6chzWhBxIeX/NBNHy+8ooY6dFbExgfD
 ++Co/HbMLZr9KRrIRxCkoe5Z1X5VaQR6rpUWLIWSvMrBb1CreBW4YdTdSdAPzk+n
 MlyF2s3SxJkl/wvXCmwKsy02RX+X6ncbZ0/ihRoi+PAw4UI1sqbQN8y8/IlbNORc
 fTBAqVpph+yyrUQoFlepSWTmVOGwKivbxEWZmKNpCbLBWMCA+nGwRbJnhMkj/bc2
 Xkl17S4CTHD2basikiUiXjObKPPFw9c+ZuNgX+DnBD0qmyBilxgYuXRpaXRTPl1N
 YbPygbEyiUeOzGS9fQcnLVEHVmvqRrFChD1eM6ag2nw2CVz5n57Zyu8j1QQQLq7F
 Wsx7WE+sf/P48shVVkcXgCiZ7Bk10knSHC77ox4tH3PmLiVDNag=
 =DdqX
 -----END PGP SIGNATURE-----

Merge tag 'iwlwifi-next-for-kalle-2017-06-28' of git://git.kernel.org/pub/scm/linux/kernel/git/iwlwifi/iwlwifi-next

More iwlwifi patches for 4.13

* Some changes in suspend/resume handling to support new FWs;
* A bunch of RF-kill related fixes;
* Continued work towards the A000 family;
* Support for a new version of the TX flush FW API;
* Some fixes in monitor interfaces;
* A few fixes in the recovery flows;
* Johannes' documentation fixes and FW API struct cleanups continue;
* Remove some noise from the kernel logs;
* Some other small improvements, fixes and cleanups;
This commit is contained in:
Kalle Valo 2017-06-28 18:55:55 +03:00
commit b90a16854d
39 changed files with 845 additions and 450 deletions

View File

@ -1513,7 +1513,7 @@ out_destroy_workqueue:
out_free_eeprom_blob: out_free_eeprom_blob:
kfree(priv->eeprom_blob); kfree(priv->eeprom_blob);
out_free_eeprom: out_free_eeprom:
iwl_free_nvm_data(priv->nvm_data); kfree(priv->nvm_data);
out_free_hw: out_free_hw:
ieee80211_free_hw(priv->hw); ieee80211_free_hw(priv->hw);
out: out:
@ -1532,7 +1532,7 @@ static void iwl_op_mode_dvm_stop(struct iwl_op_mode *op_mode)
iwl_tt_exit(priv); iwl_tt_exit(priv);
kfree(priv->eeprom_blob); kfree(priv->eeprom_blob);
iwl_free_nvm_data(priv->nvm_data); kfree(priv->nvm_data);
/*netif_stop_queue(dev); */ /*netif_stop_queue(dev); */
flush_workqueue(priv->workqueue); flush_workqueue(priv->workqueue);

View File

@ -74,7 +74,7 @@
#define IWL_A000_JF_FW_PRE "iwlwifi-Qu-a0-jf-b0-" #define IWL_A000_JF_FW_PRE "iwlwifi-Qu-a0-jf-b0-"
#define IWL_A000_HR_FW_PRE "iwlwifi-Qu-a0-hr-a0-" #define IWL_A000_HR_FW_PRE "iwlwifi-Qu-a0-hr-a0-"
#define IWL_A000_HR_CDB_FW_PRE "iwlwifi-QuIcp-a0-hrcdb-a0-" #define IWL_A000_HR_CDB_FW_PRE "iwlwifi-QuIcp-z0-hrcdb-a0-"
#define IWL_A000_HR_MODULE_FIRMWARE(api) \ #define IWL_A000_HR_MODULE_FIRMWARE(api) \
IWL_A000_HR_FW_PRE "-" __stringify(api) ".ucode" IWL_A000_HR_FW_PRE "-" __stringify(api) ".ucode"

View File

@ -131,6 +131,7 @@ enum iwl_led_mode {
/* Antenna presence definitions */ /* Antenna presence definitions */
#define ANT_NONE 0x0 #define ANT_NONE 0x0
#define ANT_INVALID 0xff
#define ANT_A BIT(0) #define ANT_A BIT(0)
#define ANT_B BIT(1) #define ANT_B BIT(1)
#define ANT_C BIT(2) #define ANT_C BIT(2)

View File

@ -153,6 +153,10 @@
/* GIO Chicken Bits (PCI Express bus link power management) */ /* GIO Chicken Bits (PCI Express bus link power management) */
#define CSR_GIO_CHICKEN_BITS (CSR_BASE+0x100) #define CSR_GIO_CHICKEN_BITS (CSR_BASE+0x100)
/* host chicken bits */
#define CSR_HOST_CHICKEN (CSR_BASE + 0x204)
#define CSR_HOST_CHICKEN_PM_IDLE_SRC_DIS_SB_PME BIT(19)
/* Analog phase-lock-loop configuration */ /* Analog phase-lock-loop configuration */
#define CSR_ANA_PLL_CFG (CSR_BASE+0x20c) #define CSR_ANA_PLL_CFG (CSR_BASE+0x20c)

View File

@ -35,19 +35,20 @@
TRACE_EVENT(iwlwifi_dev_tx_data, TRACE_EVENT(iwlwifi_dev_tx_data,
TP_PROTO(const struct device *dev, TP_PROTO(const struct device *dev,
struct sk_buff *skb, struct sk_buff *skb, u8 hdr_len),
u8 hdr_len, size_t data_len), TP_ARGS(dev, skb, hdr_len),
TP_ARGS(dev, skb, hdr_len, data_len),
TP_STRUCT__entry( TP_STRUCT__entry(
DEV_ENTRY DEV_ENTRY
__dynamic_array(u8, data, iwl_trace_data(skb) ? data_len : 0) __dynamic_array(u8, data,
iwl_trace_data(skb) ? skb->len - hdr_len : 0)
), ),
TP_fast_assign( TP_fast_assign(
DEV_ASSIGN; DEV_ASSIGN;
if (iwl_trace_data(skb)) if (iwl_trace_data(skb))
skb_copy_bits(skb, hdr_len, skb_copy_bits(skb, hdr_len,
__get_dynamic_array(data), data_len); __get_dynamic_array(data),
skb->len - hdr_len);
), ),
TP_printk("[%s] TX frame data", __get_str(dev)) TP_printk("[%s] TX frame data", __get_str(dev))
); );

View File

@ -2,7 +2,7 @@
* *
* Copyright(c) 2009 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2009 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2015 Intel Mobile Communications GmbH * Copyright(c) 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 Intel Deutschland GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
* *
* This program is free software; you can redistribute it and/or modify it * This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as * under the terms of version 2 of the GNU General Public License as
@ -91,8 +91,8 @@ TRACE_EVENT(iwlwifi_dev_tx,
TP_PROTO(const struct device *dev, struct sk_buff *skb, TP_PROTO(const struct device *dev, struct sk_buff *skb,
void *tfd, size_t tfdlen, void *tfd, size_t tfdlen,
void *buf0, size_t buf0_len, void *buf0, size_t buf0_len,
void *buf1, size_t buf1_len), int hdr_len),
TP_ARGS(dev, skb, tfd, tfdlen, buf0, buf0_len, buf1, buf1_len), TP_ARGS(dev, skb, tfd, tfdlen, buf0, buf0_len, hdr_len),
TP_STRUCT__entry( TP_STRUCT__entry(
DEV_ENTRY DEV_ENTRY
@ -105,15 +105,20 @@ TRACE_EVENT(iwlwifi_dev_tx,
* for the possible padding). * for the possible padding).
*/ */
__dynamic_array(u8, buf0, buf0_len) __dynamic_array(u8, buf0, buf0_len)
__dynamic_array(u8, buf1, iwl_trace_data(skb) ? 0 : buf1_len) __dynamic_array(u8, buf1, hdr_len > 0 && iwl_trace_data(skb) ?
0 : skb->len - hdr_len)
), ),
TP_fast_assign( TP_fast_assign(
DEV_ASSIGN; DEV_ASSIGN;
__entry->framelen = buf0_len + buf1_len; __entry->framelen = buf0_len;
if (hdr_len > 0)
__entry->framelen += skb->len - hdr_len;
memcpy(__get_dynamic_array(tfd), tfd, tfdlen); memcpy(__get_dynamic_array(tfd), tfd, tfdlen);
memcpy(__get_dynamic_array(buf0), buf0, buf0_len); memcpy(__get_dynamic_array(buf0), buf0, buf0_len);
if (!iwl_trace_data(skb)) if (hdr_len > 0 && !iwl_trace_data(skb))
memcpy(__get_dynamic_array(buf1), buf1, buf1_len); skb_copy_bits(skb, hdr_len,
__get_dynamic_array(buf1),
skb->len - hdr_len);
), ),
TP_printk("[%s] TX %.2x (%zu bytes)", TP_printk("[%s] TX %.2x (%zu bytes)",
__get_str(dev), ((u8 *)__get_dynamic_array(buf0))[0], __get_str(dev), ((u8 *)__get_dynamic_array(buf0))[0],

View File

@ -121,15 +121,6 @@ struct iwl_nvm_data *
iwl_parse_eeprom_data(struct device *dev, const struct iwl_cfg *cfg, iwl_parse_eeprom_data(struct device *dev, const struct iwl_cfg *cfg,
const u8 *eeprom, size_t eeprom_size); const u8 *eeprom, size_t eeprom_size);
/**
* iwl_free_nvm_data - free NVM data
* @data: the data to free
*/
static inline void iwl_free_nvm_data(struct iwl_nvm_data *data)
{
kfree(data);
}
int iwl_nvm_check_version(struct iwl_nvm_data *data, int iwl_nvm_check_version(struct iwl_nvm_data *data,
struct iwl_trans *trans); struct iwl_trans *trans);

View File

@ -6,7 +6,7 @@
* GPL LICENSE SUMMARY * GPL LICENSE SUMMARY
* *
* Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2015 - 2016 Intel Deutschland GmbH * Copyright(c) 2015 - 2017 Intel Deutschland GmbH
* *
* This program is free software; you can redistribute it and/or modify * This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as * it under the terms of version 2 of the GNU General Public License as
@ -32,7 +32,7 @@
* BSD LICENSE * BSD LICENSE
* *
* Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2015 - 2016 Intel Deutschland GmbH * Copyright(c) 2015 - 2017 Intel Deutschland GmbH
* All rights reserved. * All rights reserved.
* *
* Redistribution and use in source and binary forms, with or without * Redistribution and use in source and binary forms, with or without
@ -66,6 +66,7 @@
#define __iwl_fh_h__ #define __iwl_fh_h__
#include <linux/types.h> #include <linux/types.h>
#include <linux/bitfield.h>
/****************************/ /****************************/
/* Flow Handler Definitions */ /* Flow Handler Definitions */
@ -478,13 +479,12 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(struct iwl_trans *trans,
#define RFH_GEN_CFG 0xA09800 #define RFH_GEN_CFG 0xA09800
#define RFH_GEN_CFG_SERVICE_DMA_SNOOP BIT(0) #define RFH_GEN_CFG_SERVICE_DMA_SNOOP BIT(0)
#define RFH_GEN_CFG_RFH_DMA_SNOOP BIT(1) #define RFH_GEN_CFG_RFH_DMA_SNOOP BIT(1)
#define RFH_GEN_CFG_RB_CHUNK_SIZE_POS 4 #define RFH_GEN_CFG_RB_CHUNK_SIZE BIT(4)
#define RFH_GEN_CFG_RB_CHUNK_SIZE_128 1 #define RFH_GEN_CFG_RB_CHUNK_SIZE_128 1
#define RFH_GEN_CFG_RB_CHUNK_SIZE_64 0 #define RFH_GEN_CFG_RB_CHUNK_SIZE_64 0
#define RFH_GEN_CFG_DEFAULT_RXQ_NUM_MASK 0xF00 /* the driver assumes everywhere that the default RXQ is 0 */
#define RFH_GEN_CFG_DEFAULT_RXQ_NUM_POS 8 #define RFH_GEN_CFG_DEFAULT_RXQ_NUM 0xF00
#define RFH_GEN_CFG_VAL(_n, _v) FIELD_PREP(RFH_GEN_CFG_ ## _n, _v)
#define DEFAULT_RXQ_NUM 0
/* end of 9000 rx series registers */ /* end of 9000 rx series registers */
@ -655,6 +655,17 @@ static inline u8 iwl_get_dma_hi_addr(dma_addr_t addr)
{ {
return (sizeof(addr) > sizeof(u32) ? upper_32_bits(addr) : 0) & 0xF; return (sizeof(addr) > sizeof(u32) ? upper_32_bits(addr) : 0) & 0xF;
} }
/**
* enum iwl_tfd_tb_hi_n_len - TB hi_n_len bits
* @TB_HI_N_LEN_ADDR_HI_MSK: high 4 bits (to make it 36) of DMA address
* @TB_HI_N_LEN_LEN_MSK: length of the TB
*/
enum iwl_tfd_tb_hi_n_len {
TB_HI_N_LEN_ADDR_HI_MSK = 0xf,
TB_HI_N_LEN_LEN_MSK = 0xfff0,
};
/** /**
* struct iwl_tfd_tb transmit buffer descriptor within transmit frame descriptor * struct iwl_tfd_tb transmit buffer descriptor within transmit frame descriptor
* *
@ -662,8 +673,7 @@ static inline u8 iwl_get_dma_hi_addr(dma_addr_t addr)
* *
* @lo: low [31:0] portion of the dma address of TX buffer * @lo: low [31:0] portion of the dma address of TX buffer
* every even is unaligned on 16 bit boundary * every even is unaligned on 16 bit boundary
* @hi_n_len 0-3 [35:32] portion of dma * @hi_n_len: &enum iwl_tfd_tb_hi_n_len
* 4-15 length of the tx buffer
*/ */
struct iwl_tfd_tb { struct iwl_tfd_tb {
__le32 lo; __le32 lo;

View File

@ -353,6 +353,7 @@ enum iwl_ucode_tlv_capa {
IWL_UCODE_TLV_CAPA_STA_PM_NOTIF = (__force iwl_ucode_tlv_capa_t)38, IWL_UCODE_TLV_CAPA_STA_PM_NOTIF = (__force iwl_ucode_tlv_capa_t)38,
IWL_UCODE_TLV_CAPA_BINDING_CDB_SUPPORT = (__force iwl_ucode_tlv_capa_t)39, IWL_UCODE_TLV_CAPA_BINDING_CDB_SUPPORT = (__force iwl_ucode_tlv_capa_t)39,
IWL_UCODE_TLV_CAPA_CDB_SUPPORT = (__force iwl_ucode_tlv_capa_t)40, IWL_UCODE_TLV_CAPA_CDB_SUPPORT = (__force iwl_ucode_tlv_capa_t)40,
IWL_UCODE_TLV_CAPA_D0I3_END_FIRST = (__force iwl_ucode_tlv_capa_t)41,
IWL_UCODE_TLV_CAPA_EXTENDED_DTS_MEASURE = (__force iwl_ucode_tlv_capa_t)64, IWL_UCODE_TLV_CAPA_EXTENDED_DTS_MEASURE = (__force iwl_ucode_tlv_capa_t)64,
IWL_UCODE_TLV_CAPA_SHORT_PM_TIMEOUTS = (__force iwl_ucode_tlv_capa_t)65, IWL_UCODE_TLV_CAPA_SHORT_PM_TIMEOUTS = (__force iwl_ucode_tlv_capa_t)65,
IWL_UCODE_TLV_CAPA_BT_MPLUT_SUPPORT = (__force iwl_ucode_tlv_capa_t)67, IWL_UCODE_TLV_CAPA_BT_MPLUT_SUPPORT = (__force iwl_ucode_tlv_capa_t)67,

View File

@ -640,6 +640,8 @@ static int iwl_set_hw_address(struct iwl_trans *trans,
return -EINVAL; return -EINVAL;
} }
IWL_INFO(trans, "base HW address: %pM\n", data->hw_addr);
return 0; return 0;
} }

View File

@ -66,6 +66,7 @@
#ifndef __iwl_prph_h__ #ifndef __iwl_prph_h__
#define __iwl_prph_h__ #define __iwl_prph_h__
#include <linux/bitfield.h>
/* /*
* Registers in this file are internal, not PCI bus memory mapped. * Registers in this file are internal, not PCI bus memory mapped.
@ -247,14 +248,14 @@
#define SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN (19) #define SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN (19)
#define SCD_QUEUE_STTS_REG_MSK (0x017F0000) #define SCD_QUEUE_STTS_REG_MSK (0x017F0000)
#define SCD_QUEUE_CTX_REG1_CREDIT_POS (8) #define SCD_QUEUE_CTX_REG1_CREDIT (0x00FFFF00)
#define SCD_QUEUE_CTX_REG1_CREDIT_MSK (0x00FFFF00) #define SCD_QUEUE_CTX_REG1_SUPER_CREDIT (0xFF000000)
#define SCD_QUEUE_CTX_REG1_SUPER_CREDIT_POS (24) #define SCD_QUEUE_CTX_REG1_VAL(_n, _v) FIELD_PREP(SCD_QUEUE_CTX_REG1_ ## _n, _v)
#define SCD_QUEUE_CTX_REG1_SUPER_CREDIT_MSK (0xFF000000)
#define SCD_QUEUE_CTX_REG2_WIN_SIZE_POS (0) #define SCD_QUEUE_CTX_REG2_WIN_SIZE (0x0000007F)
#define SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK (0x0000007F) #define SCD_QUEUE_CTX_REG2_FRAME_LIMIT (0x007F0000)
#define SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS (16) #define SCD_QUEUE_CTX_REG2_VAL(_n, _v) FIELD_PREP(SCD_QUEUE_CTX_REG2_ ## _n, _v)
#define SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK (0x007F0000)
#define SCD_GP_CTRL_ENABLE_31_QUEUES BIT(0) #define SCD_GP_CTRL_ENABLE_31_QUEUES BIT(0)
#define SCD_GP_CTRL_AUTO_ACTIVE_MODE BIT(18) #define SCD_GP_CTRL_AUTO_ACTIVE_MODE BIT(18)

View File

@ -117,7 +117,7 @@ int iwl_trans_send_cmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
int ret; int ret;
if (unlikely(!(cmd->flags & CMD_SEND_IN_RFKILL) && if (unlikely(!(cmd->flags & CMD_SEND_IN_RFKILL) &&
test_bit(STATUS_RFKILL, &trans->status))) test_bit(STATUS_RFKILL_OPMODE, &trans->status)))
return -ERFKILL; return -ERFKILL;
if (unlikely(test_bit(STATUS_FW_ERROR, &trans->status))) if (unlikely(test_bit(STATUS_FW_ERROR, &trans->status)))
@ -143,6 +143,9 @@ int iwl_trans_send_cmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
if (!(cmd->flags & CMD_ASYNC)) if (!(cmd->flags & CMD_ASYNC))
lock_map_release(&trans->sync_cmd_lockdep_map); lock_map_release(&trans->sync_cmd_lockdep_map);
if (WARN_ON((cmd->flags & CMD_WANT_SKB) && !ret && !cmd->resp_pkt))
return -EIO;
return ret; return ret;
} }
IWL_EXPORT_SYMBOL(iwl_trans_send_cmd); IWL_EXPORT_SYMBOL(iwl_trans_send_cmd);

View File

@ -322,7 +322,8 @@ enum iwl_d3_status {
* @STATUS_DEVICE_ENABLED: APM is enabled * @STATUS_DEVICE_ENABLED: APM is enabled
* @STATUS_TPOWER_PMI: the device might be asleep (need to wake it up) * @STATUS_TPOWER_PMI: the device might be asleep (need to wake it up)
* @STATUS_INT_ENABLED: interrupts are enabled * @STATUS_INT_ENABLED: interrupts are enabled
* @STATUS_RFKILL: the HW RFkill switch is in KILL position * @STATUS_RFKILL_HW: the actual HW state of the RF-kill switch
* @STATUS_RFKILL_OPMODE: RF-kill state reported to opmode
* @STATUS_FW_ERROR: the fw is in error state * @STATUS_FW_ERROR: the fw is in error state
* @STATUS_TRANS_GOING_IDLE: shutting down the trans, only special commands * @STATUS_TRANS_GOING_IDLE: shutting down the trans, only special commands
* are sent * are sent
@ -334,7 +335,8 @@ enum iwl_trans_status {
STATUS_DEVICE_ENABLED, STATUS_DEVICE_ENABLED,
STATUS_TPOWER_PMI, STATUS_TPOWER_PMI,
STATUS_INT_ENABLED, STATUS_INT_ENABLED,
STATUS_RFKILL, STATUS_RFKILL_HW,
STATUS_RFKILL_OPMODE,
STATUS_FW_ERROR, STATUS_FW_ERROR,
STATUS_TRANS_GOING_IDLE, STATUS_TRANS_GOING_IDLE,
STATUS_TRANS_IDLE, STATUS_TRANS_IDLE,
@ -480,7 +482,9 @@ struct iwl_trans_txq_scd_cfg {
* iwl_trans_ac_txq_enable wrapper. fw_alive must have been called before * iwl_trans_ac_txq_enable wrapper. fw_alive must have been called before
* this one. The op_mode must not configure the HCMD queue. The scheduler * this one. The op_mode must not configure the HCMD queue. The scheduler
* configuration may be %NULL, in which case the hardware will not be * configuration may be %NULL, in which case the hardware will not be
* configured. May sleep. * configured. If true is returned, the operation mode needs to increment
* the sequence number of the packets routed to this queue because of a
* hardware scheduler bug. May sleep.
* @txq_disable: de-configure a Tx queue to send AMPDUs * @txq_disable: de-configure a Tx queue to send AMPDUs
* Must be atomic * Must be atomic
* @txq_set_shared_mode: change Tx queue shared/unshared marking * @txq_set_shared_mode: change Tx queue shared/unshared marking
@ -542,7 +546,7 @@ struct iwl_trans_ops {
void (*reclaim)(struct iwl_trans *trans, int queue, int ssn, void (*reclaim)(struct iwl_trans *trans, int queue, int ssn,
struct sk_buff_head *skbs); struct sk_buff_head *skbs);
void (*txq_enable)(struct iwl_trans *trans, int queue, u16 ssn, bool (*txq_enable)(struct iwl_trans *trans, int queue, u16 ssn,
const struct iwl_trans_txq_scd_cfg *cfg, const struct iwl_trans_txq_scd_cfg *cfg,
unsigned int queue_wdg_timeout); unsigned int queue_wdg_timeout);
void (*txq_disable)(struct iwl_trans *trans, int queue, void (*txq_disable)(struct iwl_trans *trans, int queue,
@ -950,7 +954,7 @@ static inline void iwl_trans_txq_disable(struct iwl_trans *trans, int queue,
trans->ops->txq_disable(trans, queue, configure_scd); trans->ops->txq_disable(trans, queue, configure_scd);
} }
static inline void static inline bool
iwl_trans_txq_enable_cfg(struct iwl_trans *trans, int queue, u16 ssn, iwl_trans_txq_enable_cfg(struct iwl_trans *trans, int queue, u16 ssn,
const struct iwl_trans_txq_scd_cfg *cfg, const struct iwl_trans_txq_scd_cfg *cfg,
unsigned int queue_wdg_timeout) unsigned int queue_wdg_timeout)
@ -959,10 +963,11 @@ iwl_trans_txq_enable_cfg(struct iwl_trans *trans, int queue, u16 ssn,
if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) { if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state); IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
return; return false;
} }
trans->ops->txq_enable(trans, queue, ssn, cfg, queue_wdg_timeout); return trans->ops->txq_enable(trans, queue, ssn,
cfg, queue_wdg_timeout);
} }
static inline void static inline void

View File

@ -7,7 +7,7 @@
* *
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 Intel Deutschland GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
* *
* This program is free software; you can redistribute it and/or modify * This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as * it under the terms of version 2 of the GNU General Public License as
@ -34,7 +34,7 @@
* *
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 Intel Deutschland GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
* All rights reserved. * All rights reserved.
* *
* Redistribution and use in source and binary forms, with or without * Redistribution and use in source and binary forms, with or without
@ -1795,12 +1795,6 @@ iwl_mvm_get_wakeup_status(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
return ERR_PTR(ret); return ERR_PTR(ret);
} }
/* RF-kill already asserted again... */
if (!cmd.resp_pkt) {
fw_status = ERR_PTR(-ERFKILL);
goto out_free_resp;
}
status_size = sizeof(*fw_status); status_size = sizeof(*fw_status);
len = iwl_rx_packet_payload_len(cmd.resp_pkt); len = iwl_rx_packet_payload_len(cmd.resp_pkt);
@ -1925,12 +1919,6 @@ iwl_mvm_netdetect_query_results(struct iwl_mvm *mvm,
return ret; return ret;
} }
/* RF-kill already asserted again... */
if (!cmd.resp_pkt) {
ret = -ERFKILL;
goto out_free_resp;
}
len = iwl_rx_packet_payload_len(cmd.resp_pkt); len = iwl_rx_packet_payload_len(cmd.resp_pkt);
if (len < sizeof(*query)) { if (len < sizeof(*query)) {
IWL_ERR(mvm, "Invalid scan offload profiles query response!\n"); IWL_ERR(mvm, "Invalid scan offload profiles query response!\n");
@ -2087,9 +2075,8 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test)
bool keep = false; bool keep = false;
bool unified_image = fw_has_capa(&mvm->fw->ucode_capa, bool unified_image = fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG); IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG);
bool d0i3_first = fw_has_capa(&mvm->fw->ucode_capa,
u32 flags = CMD_ASYNC | CMD_HIGH_PRIO | CMD_SEND_IN_IDLE | IWL_UCODE_TLV_CAPA_D0I3_END_FIRST);
CMD_WAKE_UP_TRANS;
mutex_lock(&mvm->mutex); mutex_lock(&mvm->mutex);
@ -2110,6 +2097,15 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test)
/* query SRAM first in case we want event logging */ /* query SRAM first in case we want event logging */
iwl_mvm_read_d3_sram(mvm); iwl_mvm_read_d3_sram(mvm);
if (d0i3_first) {
ret = iwl_mvm_send_cmd_pdu(mvm, D0I3_END_CMD, 0, 0, NULL);
if (ret < 0) {
IWL_ERR(mvm, "Failed to send D0I3_END_CMD first (%d)\n",
ret);
goto err;
}
}
/* /*
* Query the current location and source from the D3 firmware so we * Query the current location and source from the D3 firmware so we
* can play it back when we re-intiailize the D0 firmware * can play it back when we re-intiailize the D0 firmware
@ -2155,9 +2151,14 @@ out_iterate:
iwl_mvm_d3_disconnect_iter, keep ? vif : NULL); iwl_mvm_d3_disconnect_iter, keep ? vif : NULL);
out: out:
/* no need to reset the device in unified images, if successful */
if (unified_image && !ret) { if (unified_image && !ret) {
ret = iwl_mvm_send_cmd_pdu(mvm, D0I3_END_CMD, flags, 0, NULL); /* nothing else to do if we already sent D0I3_END_CMD */
if (!ret) /* D3 ended successfully - no need to reset device */ if (d0i3_first)
return 0;
ret = iwl_mvm_send_cmd_pdu(mvm, D0I3_END_CMD, 0, 0, NULL);
if (!ret)
return 0; return 0;
} }

View File

@ -7,7 +7,7 @@
* *
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 Intel Deutschland GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
* *
* This program is free software; you can redistribute it and/or modify * This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as * it under the terms of version 2 of the GNU General Public License as
@ -34,7 +34,7 @@
* *
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 Intel Deutschland GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
* All rights reserved. * All rights reserved.
* *
* Redistribution and use in source and binary forms, with or without * Redistribution and use in source and binary forms, with or without
@ -1304,7 +1304,7 @@ static ssize_t iwl_dbgfs_low_latency_read(struct file *file,
char buf[30] = {}; char buf[30] = {};
int len; int len;
len = snprintf(buf, sizeof(buf) - 1, len = scnprintf(buf, sizeof(buf) - 1,
"traffic=%d\ndbgfs=%d\nvcmd=%d\n", "traffic=%d\ndbgfs=%d\nvcmd=%d\n",
mvmvif->low_latency_traffic, mvmvif->low_latency_traffic,
mvmvif->low_latency_dbgfs, mvmvif->low_latency_dbgfs,
@ -1385,10 +1385,12 @@ static ssize_t iwl_dbgfs_rx_phyinfo_read(struct file *file,
struct ieee80211_vif *vif = file->private_data; struct ieee80211_vif *vif = file->private_data;
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
char buf[8]; char buf[8];
int len;
snprintf(buf, sizeof(buf), "0x%04x\n", mvmvif->mvm->dbgfs_rx_phyinfo); len = scnprintf(buf, sizeof(buf), "0x%04x\n",
mvmvif->mvm->dbgfs_rx_phyinfo);
return simple_read_from_buffer(user_buf, count, ppos, buf, sizeof(buf)); return simple_read_from_buffer(user_buf, count, ppos, buf, len);
} }
static void iwl_dbgfs_quota_check(void *data, u8 *mac, static void iwl_dbgfs_quota_check(void *data, u8 *mac,
@ -1439,7 +1441,7 @@ static ssize_t iwl_dbgfs_quota_min_read(struct file *file,
char buf[10]; char buf[10];
int len; int len;
len = snprintf(buf, sizeof(buf), "%d\n", mvmvif->dbgfs_quota_min); len = scnprintf(buf, sizeof(buf), "%d\n", mvmvif->dbgfs_quota_min);
return simple_read_from_buffer(user_buf, count, ppos, buf, len); return simple_read_from_buffer(user_buf, count, ppos, buf, len);
} }

View File

@ -119,19 +119,30 @@ static ssize_t iwl_dbgfs_tx_flush_write(struct iwl_mvm *mvm, char *buf,
size_t count, loff_t *ppos) size_t count, loff_t *ppos)
{ {
int ret; int ret;
u32 scd_q_msk; u32 flush_arg;
if (!iwl_mvm_firmware_running(mvm) || if (!iwl_mvm_firmware_running(mvm) ||
mvm->cur_ucode != IWL_UCODE_REGULAR) mvm->cur_ucode != IWL_UCODE_REGULAR)
return -EIO; return -EIO;
if (sscanf(buf, "%x", &scd_q_msk) != 1) if (kstrtou32(buf, 0, &flush_arg))
return -EINVAL; return -EINVAL;
IWL_ERR(mvm, "FLUSHING queues: scd_q_msk = 0x%x\n", scd_q_msk); if (iwl_mvm_has_new_tx_api(mvm)) {
IWL_DEBUG_TX_QUEUES(mvm,
"FLUSHING all tids queues on sta_id = %d\n",
flush_arg);
mutex_lock(&mvm->mutex);
ret = iwl_mvm_flush_sta_tids(mvm, flush_arg, 0xFF, 0) ? : count;
mutex_unlock(&mvm->mutex);
return ret;
}
IWL_DEBUG_TX_QUEUES(mvm, "FLUSHING queues mask to flush = 0x%x\n",
flush_arg);
mutex_lock(&mvm->mutex); mutex_lock(&mvm->mutex);
ret = iwl_mvm_flush_tx_path(mvm, scd_q_msk, 0) ? : count; ret = iwl_mvm_flush_tx_path(mvm, flush_arg, 0) ? : count;
mutex_unlock(&mvm->mutex); mutex_unlock(&mvm->mutex);
return ret; return ret;

View File

@ -199,6 +199,7 @@ struct iwl_mac_data_ibss {
* @dtim_reciprocal: 2^32 / dtim_interval , applicable only when associated * @dtim_reciprocal: 2^32 / dtim_interval , applicable only when associated
* @listen_interval: in beacon intervals, applicable only when associated * @listen_interval: in beacon intervals, applicable only when associated
* @assoc_id: unique ID assigned by the AP during association * @assoc_id: unique ID assigned by the AP during association
* @assoc_beacon_arrive_time: TSF of first beacon after association
*/ */
struct iwl_mac_data_sta { struct iwl_mac_data_sta {
__le32 is_assoc; __le32 is_assoc;

View File

@ -769,7 +769,7 @@ struct iwl_scan_offload_profiles_query {
* @last_channel: last channel that was scanned * @last_channel: last channel that was scanned
* @start_tsf: TSF timer in usecs of the scan start time for the mac specified * @start_tsf: TSF timer in usecs of the scan start time for the mac specified
* in &struct iwl_scan_req_umac. * in &struct iwl_scan_req_umac.
* @results: array of scan results, only "scanned_channels" of them are valid * @results: array of scan results, length in @scanned_channels
*/ */
struct iwl_umac_scan_iter_complete_notif { struct iwl_umac_scan_iter_complete_notif {
__le32 uid; __le32 uid;

View File

@ -488,7 +488,7 @@ enum iwl_tx_agg_status {
/** /**
* struct agg_tx_status - per packet TX aggregation status * struct agg_tx_status - per packet TX aggregation status
* @status: enum iwl_tx_agg_status * @status: See &enum iwl_tx_agg_status
* @sequence: Sequence # for this frame's Tx cmd (not SSN!) * @sequence: Sequence # for this frame's Tx cmd (not SSN!)
*/ */
struct agg_tx_status { struct agg_tx_status {
@ -512,6 +512,64 @@ struct agg_tx_status {
#define IWL_MVM_TX_RES_GET_TID(_ra_tid) ((_ra_tid) & 0x0f) #define IWL_MVM_TX_RES_GET_TID(_ra_tid) ((_ra_tid) & 0x0f)
#define IWL_MVM_TX_RES_GET_RA(_ra_tid) ((_ra_tid) >> 4) #define IWL_MVM_TX_RES_GET_RA(_ra_tid) ((_ra_tid) >> 4)
/**
* struct iwl_mvm_tx_resp_v3 - notifies that fw is TXing a packet
* ( REPLY_TX = 0x1c )
* @frame_count: 1 no aggregation, >1 aggregation
* @bt_kill_count: num of times blocked by bluetooth (unused for agg)
* @failure_rts: num of failures due to unsuccessful RTS
* @failure_frame: num failures due to no ACK (unused for agg)
* @initial_rate: for non-agg: rate of the successful Tx. For agg: rate of the
* Tx of all the batch. RATE_MCS_*
* @wireless_media_time: for non-agg: RTS + CTS + frame tx attempts time + ACK.
* for agg: RTS + CTS + aggregation tx time + block-ack time.
* in usec.
* @pa_status: tx power info
* @pa_integ_res_a: tx power info
* @pa_integ_res_b: tx power info
* @pa_integ_res_c: tx power info
* @measurement_req_id: tx power info
* @reduced_tpc: transmit power reduction used
* @reserved: reserved
* @tfd_info: TFD information set by the FH
* @seq_ctl: sequence control from the Tx cmd
* @byte_cnt: byte count from the Tx cmd
* @tlc_info: TLC rate info
* @ra_tid: bits [3:0] = ra, bits [7:4] = tid
* @frame_ctrl: frame control
* @tx_queue: TX queue for this response
* @status: for non-agg: frame status TX_STATUS_*
* for agg: status of 1st frame, AGG_TX_STATE_*; other frame status fields
* follow this one, up to frame_count. Length in @frame_count.
*
* After the array of statuses comes the SSN of the SCD. Look at
* %iwl_mvm_get_scd_ssn for more details.
*/
struct iwl_mvm_tx_resp_v3 {
u8 frame_count;
u8 bt_kill_count;
u8 failure_rts;
u8 failure_frame;
__le32 initial_rate;
__le16 wireless_media_time;
u8 pa_status;
u8 pa_integ_res_a[3];
u8 pa_integ_res_b[3];
u8 pa_integ_res_c[3];
__le16 measurement_req_id;
u8 reduced_tpc;
u8 reserved;
__le32 tfd_info;
__le16 seq_ctl;
__le16 byte_cnt;
u8 tlc_info;
u8 ra_tid;
__le16 frame_ctrl;
struct agg_tx_status status[];
} __packed; /* TX_RSP_API_S_VER_3 */
/** /**
* struct iwl_mvm_tx_resp - notifies that fw is TXing a packet * struct iwl_mvm_tx_resp - notifies that fw is TXing a packet
* ( REPLY_TX = 0x1c ) * ( REPLY_TX = 0x1c )
@ -539,8 +597,6 @@ struct agg_tx_status {
* @frame_ctrl: frame control * @frame_ctrl: frame control
* @tx_queue: TX queue for this response * @tx_queue: TX queue for this response
* @status: for non-agg: frame status TX_STATUS_* * @status: for non-agg: frame status TX_STATUS_*
* for agg: status of 1st frame, AGG_TX_STATE_*; other frame status fields
* follow this one, up to frame_count.
* For version 6 TX response isn't received for aggregation at all. * For version 6 TX response isn't received for aggregation at all.
* *
* After the array of statuses comes the SSN of the SCD. Look at * After the array of statuses comes the SSN of the SCD. Look at
@ -568,16 +624,9 @@ struct iwl_mvm_tx_resp {
u8 tlc_info; u8 tlc_info;
u8 ra_tid; u8 ra_tid;
__le16 frame_ctrl; __le16 frame_ctrl;
union {
struct {
struct agg_tx_status status;
} v3;/* TX_RSP_API_S_VER_3 */
struct {
__le16 tx_queue; __le16 tx_queue;
__le16 reserved2; __le16 reserved2;
struct agg_tx_status status; struct agg_tx_status status;
} v6;
};
} __packed; /* TX_RSP_API_S_VER_6 */ } __packed; /* TX_RSP_API_S_VER_6 */
/** /**
@ -797,12 +846,24 @@ enum iwl_dump_control {
* @flush_ctl: control flags * @flush_ctl: control flags
* @reserved: reserved * @reserved: reserved
*/ */
struct iwl_tx_path_flush_cmd { struct iwl_tx_path_flush_cmd_v1 {
__le32 queues_ctl; __le32 queues_ctl;
__le16 flush_ctl; __le16 flush_ctl;
__le16 reserved; __le16 reserved;
} __packed; /* TX_PATH_FLUSH_CMD_API_S_VER_1 */ } __packed; /* TX_PATH_FLUSH_CMD_API_S_VER_1 */
/**
* struct iwl_tx_path_flush_cmd -- queue/FIFO flush command
* @sta_id: station ID to flush
* @tid_mask: TID mask to flush
* @reserved: reserved
*/
struct iwl_tx_path_flush_cmd {
__le32 sta_id;
__le16 tid_mask;
__le16 reserved;
} __packed; /* TX_PATH_FLUSH_CMD_API_S_VER_2 */
/* Available options for the SCD_QUEUE_CFG HCMD */ /* Available options for the SCD_QUEUE_CFG HCMD */
enum iwl_scd_cfg_actions { enum iwl_scd_cfg_actions {
SCD_CFG_DISABLE_QUEUE = 0x0, SCD_CFG_DISABLE_QUEUE = 0x0,

View File

@ -216,7 +216,9 @@ enum iwl_legacy_cmds {
FW_GET_ITEM_CMD = 0x1a, FW_GET_ITEM_CMD = 0x1a,
/** /**
* @TX_CMD: uses &struct iwl_tx_cmd or &struct iwl_tx_cmd_gen2 * @TX_CMD: uses &struct iwl_tx_cmd or &struct iwl_tx_cmd_gen2,
* response in &struct iwl_mvm_tx_resp or
* &struct iwl_mvm_tx_resp_v3
*/ */
TX_CMD = 0x1c, TX_CMD = 0x1c,
@ -552,9 +554,26 @@ enum iwl_regulatory_and_nvm_subcmd_ids {
NVM_GET_INFO = 0x2, NVM_GET_INFO = 0x2,
}; };
/**
* enum iwl_debug_cmds - debug commands
*/
enum iwl_debug_cmds { enum iwl_debug_cmds {
/**
* @LMAC_RD_WR:
* LMAC memory read/write, using &struct iwl_dbg_mem_access_cmd and
* &struct iwl_dbg_mem_access_rsp
*/
LMAC_RD_WR = 0x0, LMAC_RD_WR = 0x0,
/**
* @UMAC_RD_WR:
* UMAC memory read/write, using &struct iwl_dbg_mem_access_cmd and
* &struct iwl_dbg_mem_access_rsp
*/
UMAC_RD_WR = 0x1, UMAC_RD_WR = 0x1,
/**
* @MFU_ASSERT_DUMP_NTF:
* &struct iwl_mfu_assert_dump_notif
*/
MFU_ASSERT_DUMP_NTF = 0xFE, MFU_ASSERT_DUMP_NTF = 0xFE,
}; };
@ -2111,7 +2130,7 @@ struct ct_kill_notif {
* enum ctdp_cmd_operation - CTDP command operations * enum ctdp_cmd_operation - CTDP command operations
* @CTDP_CMD_OPERATION_START: update the current budget * @CTDP_CMD_OPERATION_START: update the current budget
* @CTDP_CMD_OPERATION_STOP: stop ctdp * @CTDP_CMD_OPERATION_STOP: stop ctdp
* @CTDP_CMD_OPERATION_REPORT: get the avgerage budget * @CTDP_CMD_OPERATION_REPORT: get the average budget
*/ */
enum iwl_mvm_ctdp_cmd_operation { enum iwl_mvm_ctdp_cmd_operation {
CTDP_CMD_OPERATION_START = 0x1, CTDP_CMD_OPERATION_START = 0x1,

View File

@ -319,9 +319,7 @@ static void iwl_mvm_dump_fifos(struct iwl_mvm *mvm,
void iwl_mvm_free_fw_dump_desc(struct iwl_mvm *mvm) void iwl_mvm_free_fw_dump_desc(struct iwl_mvm *mvm)
{ {
if (mvm->fw_dump_desc == &iwl_mvm_dump_desc_assert) if (mvm->fw_dump_desc != &iwl_mvm_dump_desc_assert)
return;
kfree(mvm->fw_dump_desc); kfree(mvm->fw_dump_desc);
mvm->fw_dump_desc = NULL; mvm->fw_dump_desc = NULL;
} }
@ -915,6 +913,10 @@ int iwl_mvm_fw_dbg_collect_desc(struct iwl_mvm *mvm,
if (trigger) if (trigger)
delay = msecs_to_jiffies(le32_to_cpu(trigger->stop_delay)); delay = msecs_to_jiffies(le32_to_cpu(trigger->stop_delay));
if (WARN(mvm->trans->state == IWL_TRANS_NO_FW,
"Can't collect dbg data when FW isn't alive\n"))
return -EIO;
if (test_and_set_bit(IWL_MVM_STATUS_DUMPING_FW_LOG, &mvm->status)) if (test_and_set_bit(IWL_MVM_STATUS_DUMPING_FW_LOG, &mvm->status))
return -EBUSY; return -EBUSY;

View File

@ -846,6 +846,8 @@ static int iwl_mvm_mac_ctxt_cmd_sta(struct iwl_mvm *mvm,
cpu_to_le64(vif->bss_conf.sync_tsf + dtim_offs); cpu_to_le64(vif->bss_conf.sync_tsf + dtim_offs);
ctxt_sta->dtim_time = ctxt_sta->dtim_time =
cpu_to_le32(vif->bss_conf.sync_device_ts + dtim_offs); cpu_to_le32(vif->bss_conf.sync_device_ts + dtim_offs);
ctxt_sta->assoc_beacon_arrive_time =
cpu_to_le32(vif->bss_conf.sync_device_ts);
IWL_DEBUG_INFO(mvm, "DTIM TBTT is 0x%llx/0x%x, offset %d\n", IWL_DEBUG_INFO(mvm, "DTIM TBTT is 0x%llx/0x%x, offset %d\n",
le64_to_cpu(ctxt_sta->dtim_tsf), le64_to_cpu(ctxt_sta->dtim_tsf),
@ -1457,6 +1459,7 @@ void iwl_mvm_rx_beacon_notif(struct iwl_mvm *mvm,
beacon_notify_hdr = &beacon->beacon_notify_hdr; beacon_notify_hdr = &beacon->beacon_notify_hdr;
mvm->ap_last_beacon_gp2 = le32_to_cpu(beacon->gp2); mvm->ap_last_beacon_gp2 = le32_to_cpu(beacon->gp2);
mvm->ibss_manager = beacon->ibss_mgr_status != 0;
agg_status = iwl_mvm_get_agg_status(mvm, beacon_notify_hdr); agg_status = iwl_mvm_get_agg_status(mvm, beacon_notify_hdr);
status = le16_to_cpu(agg_status->status) & TX_STATUS_MSK; status = le16_to_cpu(agg_status->status) & TX_STATUS_MSK;

View File

@ -1988,14 +1988,32 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
WARN_ONCE(iwl_mvm_sf_update(mvm, vif, false), WARN_ONCE(iwl_mvm_sf_update(mvm, vif, false),
"Failed to update SF upon disassociation\n"); "Failed to update SF upon disassociation\n");
/* remove AP station now that the MAC is unassoc */ /*
ret = iwl_mvm_rm_sta_id(mvm, vif, mvmvif->ap_sta_id); * If we get an assert during the connection (after the
* station has been added, but before the vif is set
* to associated), mac80211 will re-add the station and
* then configure the vif. Since the vif is not
* associated, we would remove the station here and
* this would fail the recovery.
*/
if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART,
&mvm->status)) {
/*
* Remove AP station now that
* the MAC is unassoc
*/
ret = iwl_mvm_rm_sta_id(mvm, vif,
mvmvif->ap_sta_id);
if (ret) if (ret)
IWL_ERR(mvm, "failed to remove AP station\n"); IWL_ERR(mvm,
"failed to remove AP station\n");
if (mvm->d0i3_ap_sta_id == mvmvif->ap_sta_id) if (mvm->d0i3_ap_sta_id == mvmvif->ap_sta_id)
mvm->d0i3_ap_sta_id = IWL_MVM_INVALID_STA; mvm->d0i3_ap_sta_id =
IWL_MVM_INVALID_STA;
mvmvif->ap_sta_id = IWL_MVM_INVALID_STA; mvmvif->ap_sta_id = IWL_MVM_INVALID_STA;
}
/* remove quota for this interface */ /* remove quota for this interface */
ret = iwl_mvm_update_quotas(mvm, false, NULL); ret = iwl_mvm_update_quotas(mvm, false, NULL);
if (ret) if (ret)
@ -2395,7 +2413,7 @@ static void __iwl_mvm_mac_sta_notify(struct ieee80211_hw *hw,
__set_bit(tid_data->txq_id, &txqs); __set_bit(tid_data->txq_id, &txqs);
if (iwl_mvm_tid_queued(tid_data) == 0) if (iwl_mvm_tid_queued(mvm, tid_data) == 0)
continue; continue;
__set_bit(tid, &tids); __set_bit(tid, &tids);
@ -2883,6 +2901,7 @@ static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
case WLAN_CIPHER_SUITE_CCMP: case WLAN_CIPHER_SUITE_CCMP:
case WLAN_CIPHER_SUITE_GCMP: case WLAN_CIPHER_SUITE_GCMP:
case WLAN_CIPHER_SUITE_GCMP_256: case WLAN_CIPHER_SUITE_GCMP_256:
if (!iwl_mvm_has_new_tx_api(mvm))
key->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE; key->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE;
break; break;
case WLAN_CIPHER_SUITE_AES_CMAC: case WLAN_CIPHER_SUITE_AES_CMAC:
@ -2929,9 +2948,14 @@ static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
ret = -EOPNOTSUPP; ret = -EOPNOTSUPP;
else else
ret = 0; ret = 0;
if (key->cipher != WLAN_CIPHER_SUITE_GCMP &&
key->cipher != WLAN_CIPHER_SUITE_GCMP_256 &&
!iwl_mvm_has_new_tx_api(mvm)) {
key->hw_key_idx = STA_KEY_IDX_INVALID; key->hw_key_idx = STA_KEY_IDX_INVALID;
break; break;
} }
}
/* During FW restart, in order to restore the state as it was, /* During FW restart, in order to restore the state as it was,
* don't try to reprogram keys we previously failed for. * don't try to reprogram keys we previously failed for.
@ -3731,6 +3755,13 @@ static int iwl_mvm_switch_vif_chanctx(struct ieee80211_hw *hw,
return ret; return ret;
} }
static int iwl_mvm_tx_last_beacon(struct ieee80211_hw *hw)
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
return mvm->ibss_manager;
}
static int iwl_mvm_set_tim(struct ieee80211_hw *hw, static int iwl_mvm_set_tim(struct ieee80211_hw *hw,
struct ieee80211_sta *sta, struct ieee80211_sta *sta,
bool set) bool set)
@ -4264,11 +4295,13 @@ void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm,
goto out; goto out;
} }
if (notif->sync) if (notif->sync) {
ret = wait_event_timeout(mvm->rx_sync_waitq, ret = wait_event_timeout(mvm->rx_sync_waitq,
atomic_read(&mvm->queue_sync_counter) == 0, atomic_read(&mvm->queue_sync_counter) == 0 ||
iwl_mvm_is_radio_killed(mvm),
HZ); HZ);
WARN_ON_ONCE(!ret); WARN_ON_ONCE(!ret && !iwl_mvm_is_radio_killed(mvm));
}
out: out:
atomic_set(&mvm->queue_sync_counter, 0); atomic_set(&mvm->queue_sync_counter, 0);
@ -4332,6 +4365,8 @@ const struct ieee80211_ops iwl_mvm_hw_ops = {
.join_ibss = iwl_mvm_start_ap_ibss, .join_ibss = iwl_mvm_start_ap_ibss,
.leave_ibss = iwl_mvm_stop_ap_ibss, .leave_ibss = iwl_mvm_stop_ap_ibss,
.tx_last_beacon = iwl_mvm_tx_last_beacon,
.set_tim = iwl_mvm_set_tim, .set_tim = iwl_mvm_set_tim,
.channel_switch = iwl_mvm_channel_switch, .channel_switch = iwl_mvm_channel_switch,

View File

@ -1021,6 +1021,9 @@ struct iwl_mvm {
/* system time of last beacon (for AP/GO interface) */ /* system time of last beacon (for AP/GO interface) */
u32 ap_last_beacon_gp2; u32 ap_last_beacon_gp2;
/* indicates that we transmitted the last beacon */
bool ibss_manager;
bool lar_regdom_set; bool lar_regdom_set;
enum iwl_mcc_source mcc_src; enum iwl_mcc_source mcc_src;
@ -1078,6 +1081,18 @@ struct iwl_mvm {
#define IWL_MAC80211_GET_MVM(_hw) \ #define IWL_MAC80211_GET_MVM(_hw) \
IWL_OP_MODE_GET_MVM((struct iwl_op_mode *)((_hw)->priv)) IWL_OP_MODE_GET_MVM((struct iwl_op_mode *)((_hw)->priv))
/**
* enum iwl_mvm_status - MVM status bits
* @IWL_MVM_STATUS_HW_RFKILL: HW RF-kill is asserted
* @IWL_MVM_STATUS_HW_CTKILL: CT-kill is active
* @IWL_MVM_STATUS_ROC_RUNNING: remain-on-channel is running
* @IWL_MVM_STATUS_IN_HW_RESTART: HW restart is active
* @IWL_MVM_STATUS_IN_D0I3: NIC is in D0i3
* @IWL_MVM_STATUS_ROC_AUX_RUNNING: AUX remain-on-channel is running
* @IWL_MVM_STATUS_D3_RECONFIG: D3 reconfiguration is being done
* @IWL_MVM_STATUS_DUMPING_FW_LOG: FW log is being dumped
* @IWL_MVM_STATUS_FIRMWARE_RUNNING: firmware is running
*/
enum iwl_mvm_status { enum iwl_mvm_status {
IWL_MVM_STATUS_HW_RFKILL, IWL_MVM_STATUS_HW_RFKILL,
IWL_MVM_STATUS_HW_CTKILL, IWL_MVM_STATUS_HW_CTKILL,
@ -1281,14 +1296,13 @@ static inline bool iwl_mvm_is_cdb_supported(struct iwl_mvm *mvm)
IWL_UCODE_TLV_CAPA_CDB_SUPPORT); IWL_UCODE_TLV_CAPA_CDB_SUPPORT);
} }
static inline struct agg_tx_status* static inline struct agg_tx_status *
iwl_mvm_get_agg_status(struct iwl_mvm *mvm, iwl_mvm_get_agg_status(struct iwl_mvm *mvm, void *tx_resp)
struct iwl_mvm_tx_resp *tx_resp)
{ {
if (iwl_mvm_has_new_tx_api(mvm)) if (iwl_mvm_has_new_tx_api(mvm))
return &tx_resp->v6.status; return &((struct iwl_mvm_tx_resp *)tx_resp)->status;
else else
return &tx_resp->v3.status; return ((struct iwl_mvm_tx_resp_v3 *)tx_resp)->status;
} }
static inline bool iwl_mvm_is_tt_in_fw(struct iwl_mvm *mvm) static inline bool iwl_mvm_is_tt_in_fw(struct iwl_mvm *mvm)
@ -1370,7 +1384,9 @@ const char *iwl_mvm_get_tx_fail_reason(u32 status);
static inline const char *iwl_mvm_get_tx_fail_reason(u32 status) { return ""; } static inline const char *iwl_mvm_get_tx_fail_reason(u32 status) { return ""; }
#endif #endif
int iwl_mvm_flush_tx_path(struct iwl_mvm *mvm, u32 tfd_msk, u32 flags); int iwl_mvm_flush_tx_path(struct iwl_mvm *mvm, u32 tfd_msk, u32 flags);
int iwl_mvm_flush_sta(struct iwl_mvm *mvm, void *sta, bool int_sta, u32 flags); int iwl_mvm_flush_sta(struct iwl_mvm *mvm, void *sta, bool internal, u32 flags);
int iwl_mvm_flush_sta_tids(struct iwl_mvm *mvm, u32 sta_id,
u16 tids, u32 flags);
void iwl_mvm_async_handlers_purge(struct iwl_mvm *mvm); void iwl_mvm_async_handlers_purge(struct iwl_mvm *mvm);
@ -1730,7 +1746,7 @@ static inline bool iwl_mvm_vif_low_latency(struct iwl_mvm_vif *mvmvif)
} }
/* hw scheduler queue config */ /* hw scheduler queue config */
void iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue, bool iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
u16 ssn, const struct iwl_trans_txq_scd_cfg *cfg, u16 ssn, const struct iwl_trans_txq_scd_cfg *cfg,
unsigned int wdg_timeout); unsigned int wdg_timeout);
int iwl_mvm_tvqm_enable_txq(struct iwl_mvm *mvm, int mac80211_queue, int iwl_mvm_tvqm_enable_txq(struct iwl_mvm *mvm, int mac80211_queue,

View File

@ -118,10 +118,6 @@ static int iwl_nvm_write_chunk(struct iwl_mvm *mvm, u16 section,
return ret; return ret;
pkt = cmd.resp_pkt; pkt = cmd.resp_pkt;
if (!pkt) {
IWL_ERR(mvm, "Error in NVM_ACCESS response\n");
return -EINVAL;
}
/* Extract & check NVM write response */ /* Extract & check NVM write response */
nvm_resp = (void *)pkt->data; nvm_resp = (void *)pkt->data;
if (le16_to_cpu(nvm_resp->status) != READ_NVM_CHUNK_SUCCEED) { if (le16_to_cpu(nvm_resp->status) != READ_NVM_CHUNK_SUCCEED) {
@ -600,9 +596,11 @@ int iwl_mvm_nvm_get_from_fw(struct iwl_mvm *mvm)
if (!is_valid_ether_addr(mvm->nvm_data->hw_addr)) { if (!is_valid_ether_addr(mvm->nvm_data->hw_addr)) {
IWL_ERR(trans, "no valid mac address was found\n"); IWL_ERR(trans, "no valid mac address was found\n");
ret = -EINVAL; ret = -EINVAL;
goto out; goto err_free;
} }
IWL_INFO(trans, "base HW address: %pM\n", mvm->nvm_data->hw_addr);
/* Initialize general data */ /* Initialize general data */
mvm->nvm_data->nvm_version = le16_to_cpu(rsp->general.nvm_version); mvm->nvm_data->nvm_version = le16_to_cpu(rsp->general.nvm_version);
@ -632,7 +630,11 @@ int iwl_mvm_nvm_get_from_fw(struct iwl_mvm *mvm)
mvm->nvm_data->valid_rx_ant & mvm->fw->valid_rx_ant, mvm->nvm_data->valid_rx_ant & mvm->fw->valid_rx_ant,
rsp->regulatory.lar_enabled && lar_fw_supported); rsp->regulatory.lar_enabled && lar_fw_supported);
ret = 0; iwl_free_resp(&hcmd);
return 0;
err_free:
kfree(mvm->nvm_data);
out: out:
iwl_free_resp(&hcmd); iwl_free_resp(&hcmd);
return ret; return ret;
@ -783,6 +785,10 @@ iwl_mvm_update_mcc(struct iwl_mvm *mvm, const char *alpha2,
resp_len = sizeof(struct iwl_mcc_update_resp) + resp_len = sizeof(struct iwl_mcc_update_resp) +
n_channels * sizeof(__le32); n_channels * sizeof(__le32);
resp_cp = kmemdup(mcc_resp, resp_len, GFP_KERNEL); resp_cp = kmemdup(mcc_resp, resp_len, GFP_KERNEL);
if (!resp_cp) {
resp_cp = ERR_PTR(-ENOMEM);
goto exit;
}
} else { } else {
struct iwl_mcc_update_resp_v1 *mcc_resp_v1 = (void *)pkt->data; struct iwl_mcc_update_resp_v1 *mcc_resp_v1 = (void *)pkt->data;
@ -790,8 +796,11 @@ iwl_mvm_update_mcc(struct iwl_mvm *mvm, const char *alpha2,
resp_len = sizeof(struct iwl_mcc_update_resp) + resp_len = sizeof(struct iwl_mcc_update_resp) +
n_channels * sizeof(__le32); n_channels * sizeof(__le32);
resp_cp = kzalloc(resp_len, GFP_KERNEL); resp_cp = kzalloc(resp_len, GFP_KERNEL);
if (!resp_cp) {
resp_cp = ERR_PTR(-ENOMEM);
goto exit;
}
if (resp_cp) {
resp_cp->status = mcc_resp_v1->status; resp_cp->status = mcc_resp_v1->status;
resp_cp->mcc = mcc_resp_v1->mcc; resp_cp->mcc = mcc_resp_v1->mcc;
resp_cp->cap = mcc_resp_v1->cap; resp_cp->cap = mcc_resp_v1->cap;
@ -800,12 +809,6 @@ iwl_mvm_update_mcc(struct iwl_mvm *mvm, const char *alpha2,
memcpy(resp_cp->channels, mcc_resp_v1->channels, memcpy(resp_cp->channels, mcc_resp_v1->channels,
n_channels * sizeof(__le32)); n_channels * sizeof(__le32));
} }
}
if (!resp_cp) {
ret = -ENOMEM;
goto exit;
}
status = le32_to_cpu(resp_cp->status); status = le32_to_cpu(resp_cp->status);
@ -824,8 +827,6 @@ iwl_mvm_update_mcc(struct iwl_mvm *mvm, const char *alpha2,
exit: exit:
iwl_free_resp(&cmd); iwl_free_resp(&cmd);
if (ret)
return ERR_PTR(ret);
return resp_cp; return resp_cp;
} }

View File

@ -849,7 +849,7 @@ static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode)
iwl_phy_db_free(mvm->phy_db); iwl_phy_db_free(mvm->phy_db);
mvm->phy_db = NULL; mvm->phy_db = NULL;
iwl_free_nvm_data(mvm->nvm_data); kfree(mvm->nvm_data);
for (i = 0; i < NVM_MAX_NUM_SECTIONS; i++) for (i = 0; i < NVM_MAX_NUM_SECTIONS; i++)
kfree(mvm->nvm_sections[i].data); kfree(mvm->nvm_sections[i].data);
@ -1094,6 +1094,16 @@ static void iwl_mvm_wake_sw_queue(struct iwl_op_mode *op_mode, int hw_queue)
iwl_mvm_start_mac_queues(mvm, mq); iwl_mvm_start_mac_queues(mvm, mq);
} }
static void iwl_mvm_set_rfkill_state(struct iwl_mvm *mvm)
{
bool state = iwl_mvm_is_radio_killed(mvm);
if (state)
wake_up(&mvm->rx_sync_waitq);
wiphy_rfkill_set_hw_state(mvm->hw->wiphy, state);
}
void iwl_mvm_set_hw_ctkill_state(struct iwl_mvm *mvm, bool state) void iwl_mvm_set_hw_ctkill_state(struct iwl_mvm *mvm, bool state)
{ {
if (state) if (state)
@ -1101,7 +1111,7 @@ void iwl_mvm_set_hw_ctkill_state(struct iwl_mvm *mvm, bool state)
else else
clear_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status); clear_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status);
wiphy_rfkill_set_hw_state(mvm->hw->wiphy, iwl_mvm_is_radio_killed(mvm)); iwl_mvm_set_rfkill_state(mvm);
} }
static bool iwl_mvm_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state) static bool iwl_mvm_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state)
@ -1114,7 +1124,7 @@ static bool iwl_mvm_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state)
else else
clear_bit(IWL_MVM_STATUS_HW_RFKILL, &mvm->status); clear_bit(IWL_MVM_STATUS_HW_RFKILL, &mvm->status);
wiphy_rfkill_set_hw_state(mvm->hw->wiphy, iwl_mvm_is_radio_killed(mvm)); iwl_mvm_set_rfkill_state(mvm);
/* iwl_run_init_mvm_ucode is waiting for results, abort it */ /* iwl_run_init_mvm_ucode is waiting for results, abort it */
if (calibrating) if (calibrating)
@ -1171,9 +1181,13 @@ static void iwl_mvm_fw_error_dump_wk(struct work_struct *work)
/* start recording again if the firmware is not crashed */ /* start recording again if the firmware is not crashed */
if (!test_bit(STATUS_FW_ERROR, &mvm->trans->status) && if (!test_bit(STATUS_FW_ERROR, &mvm->trans->status) &&
mvm->fw->dbg_dest_tlv) mvm->fw->dbg_dest_tlv) {
iwl_clear_bits_prph(mvm->trans, iwl_clear_bits_prph(mvm->trans,
MON_BUFF_SAMPLE_CTL, 0x100); MON_BUFF_SAMPLE_CTL, 0x100);
iwl_clear_bits_prph(mvm->trans,
MON_BUFF_SAMPLE_CTL, 0x1);
iwl_set_bits_prph(mvm->trans, MON_BUFF_SAMPLE_CTL, 0x1);
}
} else { } else {
u32 in_sample = iwl_read_prph(mvm->trans, DBGC_IN_SAMPLE); u32 in_sample = iwl_read_prph(mvm->trans, DBGC_IN_SAMPLE);
u32 out_ctrl = iwl_read_prph(mvm->trans, DBGC_OUT_CTRL); u32 out_ctrl = iwl_read_prph(mvm->trans, DBGC_OUT_CTRL);
@ -1313,7 +1327,7 @@ static bool iwl_mvm_disallow_offloading(struct iwl_mvm *mvm,
* for offloading in order to prevent reuse of the same * for offloading in order to prevent reuse of the same
* qos seq counters. * qos seq counters.
*/ */
if (iwl_mvm_tid_queued(tid_data)) if (iwl_mvm_tid_queued(mvm, tid_data))
continue; continue;
if (tid_data->state != IWL_AGG_OFF) if (tid_data->state != IWL_AGG_OFF)
@ -1463,9 +1477,15 @@ int iwl_mvm_enter_d0i3(struct iwl_op_mode *op_mode)
synchronize_net(); synchronize_net();
/* Flush the hw queues, in case something got queued during entry */ /* Flush the hw queues, in case something got queued during entry */
ret = iwl_mvm_flush_tx_path(mvm, iwl_mvm_flushable_queues(mvm), flags); /* TODO new tx api */
if (iwl_mvm_has_new_tx_api(mvm)) {
WARN_ONCE(1, "d0i3: Need to implement flush TX queue\n");
} else {
ret = iwl_mvm_flush_tx_path(mvm, iwl_mvm_flushable_queues(mvm),
flags);
if (ret) if (ret)
return ret; return ret;
}
/* configure wowlan configuration only if needed */ /* configure wowlan configuration only if needed */
if (mvm->d0i3_ap_sta_id != IWL_MVM_INVALID_STA) { if (mvm->d0i3_ap_sta_id != IWL_MVM_INVALID_STA) {
@ -1611,9 +1631,6 @@ static void iwl_mvm_d0i3_exit_work(struct work_struct *wk)
if (ret) if (ret)
goto out; goto out;
if (!get_status_cmd.resp_pkt)
goto out;
status = (void *)get_status_cmd.resp_pkt->data; status = (void *)get_status_cmd.resp_pkt->data;
wakeup_reasons = le32_to_cpu(status->wakeup_reasons); wakeup_reasons = le32_to_cpu(status->wakeup_reasons);
qos_seq = status->qos_seq_ctr; qos_seq = status->qos_seq_ctr;

View File

@ -2836,7 +2836,11 @@ static void rs_initialize_lq(struct iwl_mvm *mvm,
rs_get_initial_rate(mvm, sta, lq_sta, band, rate); rs_get_initial_rate(mvm, sta, lq_sta, band, rate);
rs_init_optimal_rate(mvm, sta, lq_sta); rs_init_optimal_rate(mvm, sta, lq_sta);
WARN_ON_ONCE(rate->ant != ANT_A && rate->ant != ANT_B); WARN_ONCE(rate->ant != ANT_A && rate->ant != ANT_B,
"ant: 0x%x, chains 0x%x, fw tx ant: 0x%x, nvm tx ant: 0x%x\n",
rate->ant, lq_sta->pers.chains, mvm->fw->valid_tx_ant,
mvm->nvm_data ? mvm->nvm_data->valid_tx_ant : ANT_INVALID);
tbl->column = rs_get_column_from_rate(rate); tbl->column = rs_get_column_from_rate(rate);
rs_set_expected_tpt_table(lq_sta, tbl); rs_set_expected_tpt_table(lq_sta, tbl);

View File

@ -502,7 +502,7 @@ void iwl_mvm_reorder_timer_expired(unsigned long data)
buf->sta_id, sn); buf->sta_id, sn);
iwl_mvm_release_frames(buf->mvm, sta, NULL, buf, sn); iwl_mvm_release_frames(buf->mvm, sta, NULL, buf, sn);
rcu_read_unlock(); rcu_read_unlock();
} else if (buf->num_stored) { } else {
/* /*
* If no frame expired and there are stored frames, index is now * If no frame expired and there are stored frames, index is now
* pointing to the first unexpired frame - modify timer * pointing to the first unexpired frame - modify timer

View File

@ -734,7 +734,6 @@ static int iwl_mvm_sta_alloc_queue_tvqm(struct iwl_mvm *mvm,
spin_lock_bh(&mvmsta->lock); spin_lock_bh(&mvmsta->lock);
mvmsta->tid_data[tid].txq_id = queue; mvmsta->tid_data[tid].txq_id = queue;
mvmsta->tid_data[tid].is_tid_active = true; mvmsta->tid_data[tid].is_tid_active = true;
mvmsta->tfd_queue_msk |= BIT(queue);
spin_unlock_bh(&mvmsta->lock); spin_unlock_bh(&mvmsta->lock);
return 0; return 0;
@ -758,7 +757,7 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
bool using_inactive_queue = false, same_sta = false; bool using_inactive_queue = false, same_sta = false;
unsigned long disable_agg_tids = 0; unsigned long disable_agg_tids = 0;
enum iwl_mvm_agg_state queue_state; enum iwl_mvm_agg_state queue_state;
bool shared_queue = false; bool shared_queue = false, inc_ssn;
int ssn; int ssn;
unsigned long tfd_queue_mask; unsigned long tfd_queue_mask;
int ret; int ret;
@ -885,8 +884,12 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
} }
ssn = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl)); ssn = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
iwl_mvm_enable_txq(mvm, queue, mac_queue, ssn, &cfg, inc_ssn = iwl_mvm_enable_txq(mvm, queue, mac_queue,
wdg_timeout); ssn, &cfg, wdg_timeout);
if (inc_ssn) {
ssn = (ssn + 1) & IEEE80211_SCTL_SEQ;
le16_add_cpu(&hdr->seq_ctrl, 0x10);
}
/* /*
* Mark queue as shared in transport if shared * Mark queue as shared in transport if shared
@ -898,6 +901,13 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
iwl_trans_txq_set_shared_mode(mvm->trans, queue, true); iwl_trans_txq_set_shared_mode(mvm->trans, queue, true);
spin_lock_bh(&mvmsta->lock); spin_lock_bh(&mvmsta->lock);
/*
* This looks racy, but it is not. We have only one packet for
* this ra/tid in our Tx path since we stop the Qdisc when we
* need to allocate a new TFD queue.
*/
if (inc_ssn)
mvmsta->tid_data[tid].seq_number += 0x10;
mvmsta->tid_data[tid].txq_id = queue; mvmsta->tid_data[tid].txq_id = queue;
mvmsta->tid_data[tid].is_tid_active = true; mvmsta->tid_data[tid].is_tid_active = true;
mvmsta->tfd_queue_msk |= BIT(queue); mvmsta->tfd_queue_msk |= BIT(queue);
@ -1993,8 +2003,6 @@ int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
mvm->probe_queue = queue; mvm->probe_queue = queue;
else if (vif->type == NL80211_IFTYPE_P2P_DEVICE) else if (vif->type == NL80211_IFTYPE_P2P_DEVICE)
mvm->p2p_dev_queue = queue; mvm->p2p_dev_queue = queue;
bsta->tfd_queue_msk |= BIT(queue);
} }
return 0; return 0;
@ -2004,29 +2012,32 @@ static void iwl_mvm_free_bcast_sta_queues(struct iwl_mvm *mvm,
struct ieee80211_vif *vif) struct ieee80211_vif *vif)
{ {
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
int queue;
lockdep_assert_held(&mvm->mutex); lockdep_assert_held(&mvm->mutex);
iwl_mvm_flush_sta(mvm, &mvmvif->bcast_sta, true, 0); iwl_mvm_flush_sta(mvm, &mvmvif->bcast_sta, true, 0);
if (vif->type == NL80211_IFTYPE_AP || switch (vif->type) {
vif->type == NL80211_IFTYPE_ADHOC) case NL80211_IFTYPE_AP:
iwl_mvm_disable_txq(mvm, vif->cab_queue, vif->cab_queue, case NL80211_IFTYPE_ADHOC:
IWL_MAX_TID_COUNT, 0); queue = mvm->probe_queue;
break;
if (mvmvif->bcast_sta.tfd_queue_msk & BIT(mvm->probe_queue)) { case NL80211_IFTYPE_P2P_DEVICE:
iwl_mvm_disable_txq(mvm, mvm->probe_queue, queue = mvm->p2p_dev_queue;
vif->hw_queue[0], IWL_MAX_TID_COUNT, break;
0); default:
mvmvif->bcast_sta.tfd_queue_msk &= ~BIT(mvm->probe_queue); WARN(1, "Can't free bcast queue on vif type %d\n",
vif->type);
return;
} }
if (mvmvif->bcast_sta.tfd_queue_msk & BIT(mvm->p2p_dev_queue)) { iwl_mvm_disable_txq(mvm, queue, vif->hw_queue[0], IWL_MAX_TID_COUNT, 0);
iwl_mvm_disable_txq(mvm, mvm->p2p_dev_queue, if (iwl_mvm_has_new_tx_api(mvm))
vif->hw_queue[0], IWL_MAX_TID_COUNT, return;
0);
mvmvif->bcast_sta.tfd_queue_msk &= ~BIT(mvm->p2p_dev_queue); WARN_ON(!(mvmvif->bcast_sta.tfd_queue_msk & BIT(queue)));
} mvmvif->bcast_sta.tfd_queue_msk &= ~BIT(queue);
} }
/* Send the FW a request to remove the station from it's internal data /* Send the FW a request to remove the station from it's internal data
@ -2529,6 +2540,7 @@ int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
{ {
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
struct iwl_mvm_tid_data *tid_data; struct iwl_mvm_tid_data *tid_data;
u16 normalized_ssn;
int txq_id; int txq_id;
int ret; int ret;
@ -2616,7 +2628,15 @@ int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
mvmsta->sta_id, tid, txq_id, tid_data->ssn, mvmsta->sta_id, tid, txq_id, tid_data->ssn,
tid_data->next_reclaimed); tid_data->next_reclaimed);
if (tid_data->ssn == tid_data->next_reclaimed) { /*
* In A000 HW, the next_reclaimed index is only 8 bit, so we'll need
* to align the wrap around of ssn so we compare relevant values.
*/
normalized_ssn = tid_data->ssn;
if (mvm->trans->cfg->gen2)
normalized_ssn &= 0xff;
if (normalized_ssn == tid_data->next_reclaimed) {
tid_data->state = IWL_AGG_STARTING; tid_data->state = IWL_AGG_STARTING;
ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid); ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
} else { } else {
@ -2814,8 +2834,13 @@ int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
"ssn = %d, next_recl = %d\n", "ssn = %d, next_recl = %d\n",
tid_data->ssn, tid_data->next_reclaimed); tid_data->ssn, tid_data->next_reclaimed);
/* There are still packets for this RA / TID in the HW */ /*
if (tid_data->ssn != tid_data->next_reclaimed) { * There are still packets for this RA / TID in the HW.
* Not relevant for DQA mode, since there is no need to disable
* the queue.
*/
if (!iwl_mvm_is_dqa_supported(mvm) &&
tid_data->ssn != tid_data->next_reclaimed) {
tid_data->state = IWL_EMPTYING_HW_QUEUE_DELBA; tid_data->state = IWL_EMPTYING_HW_QUEUE_DELBA;
err = 0; err = 0;
break; break;
@ -2888,14 +2913,17 @@ int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
if (old_state >= IWL_AGG_ON) { if (old_state >= IWL_AGG_ON) {
iwl_mvm_drain_sta(mvm, mvmsta, true); iwl_mvm_drain_sta(mvm, mvmsta, true);
if (iwl_mvm_has_new_tx_api(mvm)) {
if (iwl_mvm_flush_sta_tids(mvm, mvmsta->sta_id,
BIT(tid), 0))
IWL_ERR(mvm, "Couldn't flush the AGG queue\n");
iwl_trans_wait_txq_empty(mvm->trans, txq_id);
} else {
if (iwl_mvm_flush_tx_path(mvm, BIT(txq_id), 0)) if (iwl_mvm_flush_tx_path(mvm, BIT(txq_id), 0))
IWL_ERR(mvm, "Couldn't flush the AGG queue\n"); IWL_ERR(mvm, "Couldn't flush the AGG queue\n");
if (iwl_mvm_has_new_tx_api(mvm))
iwl_trans_wait_txq_empty(mvm->trans, txq_id);
else
iwl_trans_wait_tx_queues_empty(mvm->trans, BIT(txq_id)); iwl_trans_wait_tx_queues_empty(mvm->trans, BIT(txq_id));
}
iwl_mvm_drain_sta(mvm, mvmsta, false); iwl_mvm_drain_sta(mvm, mvmsta, false);
@ -2975,7 +3003,7 @@ static struct iwl_mvm_sta *iwl_mvm_get_key_sta(struct iwl_mvm *mvm,
} }
static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm, static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm,
struct iwl_mvm_sta *mvm_sta, u32 sta_id,
struct ieee80211_key_conf *key, bool mcast, struct ieee80211_key_conf *key, bool mcast,
u32 tkip_iv32, u16 *tkip_p1k, u32 cmd_flags, u32 tkip_iv32, u16 *tkip_p1k, u32 cmd_flags,
u8 key_offset) u8 key_offset)
@ -2993,6 +3021,9 @@ static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm,
bool new_api = fw_has_api(&mvm->fw->ucode_capa, bool new_api = fw_has_api(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_API_TKIP_MIC_KEYS); IWL_UCODE_TLV_API_TKIP_MIC_KEYS);
if (sta_id == IWL_MVM_INVALID_STA)
return -EINVAL;
keyidx = (key->keyidx << STA_KEY_FLG_KEYID_POS) & keyidx = (key->keyidx << STA_KEY_FLG_KEYID_POS) &
STA_KEY_FLG_KEYID_MSK; STA_KEY_FLG_KEYID_MSK;
key_flags = cpu_to_le16(keyidx); key_flags = cpu_to_le16(keyidx);
@ -3051,7 +3082,7 @@ static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm,
u.cmd.common.key_offset = key_offset; u.cmd.common.key_offset = key_offset;
u.cmd.common.key_flags = key_flags; u.cmd.common.key_flags = key_flags;
u.cmd.common.sta_id = mvm_sta->sta_id; u.cmd.common.sta_id = sta_id;
if (new_api) { if (new_api) {
u.cmd.transmit_seq_cnt = cpu_to_le64(pn); u.cmd.transmit_seq_cnt = cpu_to_le64(pn);
@ -3184,19 +3215,37 @@ static int __iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
u8 key_offset, u8 key_offset,
bool mcast) bool mcast)
{ {
struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
int ret; int ret;
const u8 *addr; const u8 *addr;
struct ieee80211_key_seq seq; struct ieee80211_key_seq seq;
u16 p1k[5]; u16 p1k[5];
u32 sta_id;
if (sta) {
struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
sta_id = mvm_sta->sta_id;
} else if (vif->type == NL80211_IFTYPE_AP &&
!(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE)) {
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
sta_id = mvmvif->mcast_sta.sta_id;
} else {
IWL_ERR(mvm, "Failed to find station id\n");
return -EINVAL;
}
switch (keyconf->cipher) { switch (keyconf->cipher) {
case WLAN_CIPHER_SUITE_TKIP: case WLAN_CIPHER_SUITE_TKIP:
if (vif->type == NL80211_IFTYPE_AP) {
ret = -EINVAL;
break;
}
addr = iwl_mvm_get_mac_addr(mvm, vif, sta); addr = iwl_mvm_get_mac_addr(mvm, vif, sta);
/* get phase 1 key from mac80211 */ /* get phase 1 key from mac80211 */
ieee80211_get_key_rx_seq(keyconf, 0, &seq); ieee80211_get_key_rx_seq(keyconf, 0, &seq);
ieee80211_get_tkip_rx_p1k(keyconf, addr, seq.tkip.iv32, p1k); ieee80211_get_tkip_rx_p1k(keyconf, addr, seq.tkip.iv32, p1k);
ret = iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, mcast, ret = iwl_mvm_send_sta_key(mvm, sta_id, keyconf, mcast,
seq.tkip.iv32, p1k, 0, key_offset); seq.tkip.iv32, p1k, 0, key_offset);
break; break;
case WLAN_CIPHER_SUITE_CCMP: case WLAN_CIPHER_SUITE_CCMP:
@ -3204,11 +3253,11 @@ static int __iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
case WLAN_CIPHER_SUITE_WEP104: case WLAN_CIPHER_SUITE_WEP104:
case WLAN_CIPHER_SUITE_GCMP: case WLAN_CIPHER_SUITE_GCMP:
case WLAN_CIPHER_SUITE_GCMP_256: case WLAN_CIPHER_SUITE_GCMP_256:
ret = iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, mcast, ret = iwl_mvm_send_sta_key(mvm, sta_id, keyconf, mcast,
0, NULL, 0, key_offset); 0, NULL, 0, key_offset);
break; break;
default: default:
ret = iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, mcast, ret = iwl_mvm_send_sta_key(mvm, sta_id, keyconf, mcast,
0, NULL, 0, key_offset); 0, NULL, 0, key_offset);
} }
@ -3229,6 +3278,9 @@ static int __iwl_mvm_remove_sta_key(struct iwl_mvm *mvm, u8 sta_id,
int ret, size; int ret, size;
u32 status; u32 status;
if (sta_id == IWL_MVM_INVALID_STA)
return -EINVAL;
key_flags = cpu_to_le16((keyconf->keyidx << STA_KEY_FLG_KEYID_POS) & key_flags = cpu_to_le16((keyconf->keyidx << STA_KEY_FLG_KEYID_POS) &
STA_KEY_FLG_KEYID_MSK); STA_KEY_FLG_KEYID_MSK);
key_flags |= cpu_to_le16(STA_KEY_FLG_NO_ENC | STA_KEY_FLG_WEP_KEY_MAP); key_flags |= cpu_to_le16(STA_KEY_FLG_NO_ENC | STA_KEY_FLG_WEP_KEY_MAP);
@ -3272,12 +3324,14 @@ int iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
{ {
bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE); bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
struct iwl_mvm_sta *mvm_sta; struct iwl_mvm_sta *mvm_sta;
u8 sta_id; u8 sta_id = IWL_MVM_INVALID_STA;
int ret; int ret;
static const u8 __maybe_unused zero_addr[ETH_ALEN] = {0}; static const u8 __maybe_unused zero_addr[ETH_ALEN] = {0};
lockdep_assert_held(&mvm->mutex); lockdep_assert_held(&mvm->mutex);
if (vif->type != NL80211_IFTYPE_AP ||
keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE) {
/* Get the station id from the mvm local station table */ /* Get the station id from the mvm local station table */
mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta); mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta);
if (!mvm_sta) { if (!mvm_sta) {
@ -3289,16 +3343,19 @@ int iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
if (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC || if (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC ||
keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 || keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 ||
keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256) { keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256) {
ret = iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, false); ret = iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id,
false);
goto end; goto end;
} }
/* /*
* It is possible that the 'sta' parameter is NULL, and thus * It is possible that the 'sta' parameter is NULL, and thus
* there is a need to retrieve the sta from the local station table. * there is a need to retrieve the sta from the local station
* table.
*/ */
if (!sta) { if (!sta) {
sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id], sta = rcu_dereference_protected(
mvm->fw_id_to_mac_id[sta_id],
lockdep_is_held(&mvm->mutex)); lockdep_is_held(&mvm->mutex));
if (IS_ERR_OR_NULL(sta)) { if (IS_ERR_OR_NULL(sta)) {
IWL_ERR(mvm, "Invalid station id\n"); IWL_ERR(mvm, "Invalid station id\n");
@ -3308,6 +3365,7 @@ int iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
if (WARN_ON_ONCE(iwl_mvm_sta_from_mac80211(sta)->vif != vif)) if (WARN_ON_ONCE(iwl_mvm_sta_from_mac80211(sta)->vif != vif))
return -EINVAL; return -EINVAL;
}
/* If the key_offset is not pre-assigned, we need to find a /* If the key_offset is not pre-assigned, we need to find a
* new offset to use. In normal cases, the offset is not * new offset to use. In normal cases, the offset is not
@ -3337,8 +3395,9 @@ int iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
* to the same key slot (offset). * to the same key slot (offset).
* If this fails, remove the original as well. * If this fails, remove the original as well.
*/ */
if (keyconf->cipher == WLAN_CIPHER_SUITE_WEP40 || if ((keyconf->cipher == WLAN_CIPHER_SUITE_WEP40 ||
keyconf->cipher == WLAN_CIPHER_SUITE_WEP104) { keyconf->cipher == WLAN_CIPHER_SUITE_WEP104) &&
sta) {
ret = __iwl_mvm_set_sta_key(mvm, vif, sta, keyconf, ret = __iwl_mvm_set_sta_key(mvm, vif, sta, keyconf,
key_offset, !mcast); key_offset, !mcast);
if (ret) { if (ret) {
@ -3372,6 +3431,9 @@ int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm,
mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta); mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta);
if (mvm_sta) if (mvm_sta)
sta_id = mvm_sta->sta_id; sta_id = mvm_sta->sta_id;
else if (!sta && vif->type == NL80211_IFTYPE_AP && mcast)
sta_id = iwl_mvm_vif_from_mac80211(vif)->mcast_sta.sta_id;
IWL_DEBUG_WEP(mvm, "mvm remove dynamic key: idx=%d sta=%d\n", IWL_DEBUG_WEP(mvm, "mvm remove dynamic key: idx=%d sta=%d\n",
keyconf->keyidx, sta_id); keyconf->keyidx, sta_id);
@ -3394,7 +3456,7 @@ int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm,
} }
mvm->fw_key_deleted[keyconf->hw_key_idx] = 0; mvm->fw_key_deleted[keyconf->hw_key_idx] = 0;
if (!mvm_sta) { if (sta && !mvm_sta) {
IWL_DEBUG_WEP(mvm, "station non-existent, early return.\n"); IWL_DEBUG_WEP(mvm, "station non-existent, early return.\n");
return 0; return 0;
} }
@ -3425,7 +3487,7 @@ void iwl_mvm_update_tkip_key(struct iwl_mvm *mvm,
mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta); mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta);
if (WARN_ON_ONCE(!mvm_sta)) if (WARN_ON_ONCE(!mvm_sta))
goto unlock; goto unlock;
iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, mcast, iwl_mvm_send_sta_key(mvm, mvm_sta->sta_id, keyconf, mcast,
iv32, phase1key, CMD_ASYNC, keyconf->hw_key_idx); iv32, phase1key, CMD_ASYNC, keyconf->hw_key_idx);
unlock: unlock:
@ -3501,7 +3563,7 @@ void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm,
return; return;
} }
n_queued = iwl_mvm_tid_queued(tid_data); n_queued = iwl_mvm_tid_queued(mvm, tid_data);
if (n_queued > remaining) { if (n_queued > remaining) {
more_data = true; more_data = true;
remaining = 0; remaining = 0;
@ -3683,3 +3745,17 @@ void iwl_mvm_csa_client_absent(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
rcu_read_unlock(); rcu_read_unlock();
} }
u16 iwl_mvm_tid_queued(struct iwl_mvm *mvm, struct iwl_mvm_tid_data *tid_data)
{
u16 sn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
/*
* In A000 HW, the next_reclaimed index is only 8 bit, so we'll need
* to align the wrap around of ssn so we compare relevant values.
*/
if (mvm->trans->cfg->gen2)
sn &= 0xff;
return ieee80211_sn_sub(sn, tid_data->next_reclaimed);
}

View File

@ -341,12 +341,6 @@ struct iwl_mvm_tid_data {
bool is_tid_active; bool is_tid_active;
}; };
static inline u16 iwl_mvm_tid_queued(struct iwl_mvm_tid_data *tid_data)
{
return ieee80211_sn_sub(IEEE80211_SEQ_TO_SN(tid_data->seq_number),
tid_data->next_reclaimed);
}
struct iwl_mvm_key_pn { struct iwl_mvm_key_pn {
struct rcu_head rcu_head; struct rcu_head rcu_head;
struct { struct {
@ -447,6 +441,8 @@ struct iwl_mvm_sta {
u8 avg_energy; u8 avg_energy;
}; };
u16 iwl_mvm_tid_queued(struct iwl_mvm *mvm, struct iwl_mvm_tid_data *tid_data);
static inline struct iwl_mvm_sta * static inline struct iwl_mvm_sta *
iwl_mvm_sta_from_mac80211(struct ieee80211_sta *sta) iwl_mvm_sta_from_mac80211(struct ieee80211_sta *sta)
{ {

View File

@ -473,7 +473,10 @@ iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb,
if (unlikely(!dev_cmd)) if (unlikely(!dev_cmd))
return NULL; return NULL;
memset(dev_cmd, 0, sizeof(*dev_cmd)); /* Make sure we zero enough of dev_cmd */
BUILD_BUG_ON(sizeof(struct iwl_tx_cmd_gen2) > sizeof(*tx_cmd));
memset(dev_cmd, 0, sizeof(dev_cmd->hdr) + sizeof(*tx_cmd));
dev_cmd->hdr.cmd = TX_CMD; dev_cmd->hdr.cmd = TX_CMD;
if (iwl_mvm_has_new_tx_api(mvm)) { if (iwl_mvm_has_new_tx_api(mvm)) {
@ -648,6 +651,9 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
info.control.vif->type == NL80211_IFTYPE_STATION && info.control.vif->type == NL80211_IFTYPE_STATION &&
queue != mvm->aux_queue) { queue != mvm->aux_queue) {
queue = IWL_MVM_DQA_BSS_CLIENT_QUEUE; queue = IWL_MVM_DQA_BSS_CLIENT_QUEUE;
} else if (iwl_mvm_is_dqa_supported(mvm) &&
info.control.vif->type == NL80211_IFTYPE_MONITOR) {
queue = mvm->aux_queue;
} }
} }
@ -1126,13 +1132,14 @@ static void iwl_mvm_check_ratid_empty(struct iwl_mvm *mvm,
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid]; struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
struct ieee80211_vif *vif = mvmsta->vif; struct ieee80211_vif *vif = mvmsta->vif;
u16 normalized_ssn;
lockdep_assert_held(&mvmsta->lock); lockdep_assert_held(&mvmsta->lock);
if ((tid_data->state == IWL_AGG_ON || if ((tid_data->state == IWL_AGG_ON ||
tid_data->state == IWL_EMPTYING_HW_QUEUE_DELBA || tid_data->state == IWL_EMPTYING_HW_QUEUE_DELBA ||
iwl_mvm_is_dqa_supported(mvm)) && iwl_mvm_is_dqa_supported(mvm)) &&
iwl_mvm_tid_queued(tid_data) == 0) { iwl_mvm_tid_queued(mvm, tid_data) == 0) {
/* /*
* Now that this aggregation or DQA queue is empty tell * Now that this aggregation or DQA queue is empty tell
* mac80211 so it knows we no longer have frames buffered for * mac80211 so it knows we no longer have frames buffered for
@ -1141,7 +1148,15 @@ static void iwl_mvm_check_ratid_empty(struct iwl_mvm *mvm,
ieee80211_sta_set_buffered(sta, tid, false); ieee80211_sta_set_buffered(sta, tid, false);
} }
if (tid_data->ssn != tid_data->next_reclaimed) /*
* In A000 HW, the next_reclaimed index is only 8 bit, so we'll need
* to align the wrap around of ssn so we compare relevant values.
*/
normalized_ssn = tid_data->ssn;
if (mvm->trans->cfg->gen2)
normalized_ssn &= 0xff;
if (normalized_ssn != tid_data->next_reclaimed)
return; return;
switch (tid_data->state) { switch (tid_data->state) {
@ -1319,6 +1334,7 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
struct ieee80211_sta *sta; struct ieee80211_sta *sta;
u16 sequence = le16_to_cpu(pkt->hdr.sequence); u16 sequence = le16_to_cpu(pkt->hdr.sequence);
int txq_id = SEQ_TO_QUEUE(sequence); int txq_id = SEQ_TO_QUEUE(sequence);
/* struct iwl_mvm_tx_resp_v3 is almost the same */
struct iwl_mvm_tx_resp *tx_resp = (void *)pkt->data; struct iwl_mvm_tx_resp *tx_resp = (void *)pkt->data;
int sta_id = IWL_MVM_TX_RES_GET_RA(tx_resp->ra_tid); int sta_id = IWL_MVM_TX_RES_GET_RA(tx_resp->ra_tid);
int tid = IWL_MVM_TX_RES_GET_TID(tx_resp->ra_tid); int tid = IWL_MVM_TX_RES_GET_TID(tx_resp->ra_tid);
@ -1336,7 +1352,7 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
__skb_queue_head_init(&skbs); __skb_queue_head_init(&skbs);
if (iwl_mvm_has_new_tx_api(mvm)) if (iwl_mvm_has_new_tx_api(mvm))
txq_id = le16_to_cpu(tx_resp->v6.tx_queue); txq_id = le16_to_cpu(tx_resp->tx_queue);
seq_ctl = le16_to_cpu(tx_resp->seq_ctl); seq_ctl = le16_to_cpu(tx_resp->seq_ctl);
@ -1485,7 +1501,7 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
if (mvmsta->sleep_tx_count) { if (mvmsta->sleep_tx_count) {
mvmsta->sleep_tx_count--; mvmsta->sleep_tx_count--;
if (mvmsta->sleep_tx_count && if (mvmsta->sleep_tx_count &&
!iwl_mvm_tid_queued(tid_data)) { !iwl_mvm_tid_queued(mvm, tid_data)) {
/* /*
* The number of frames in the queue * The number of frames in the queue
* dropped to 0 even if we sent less * dropped to 0 even if we sent less
@ -1889,11 +1905,13 @@ out:
int iwl_mvm_flush_tx_path(struct iwl_mvm *mvm, u32 tfd_msk, u32 flags) int iwl_mvm_flush_tx_path(struct iwl_mvm *mvm, u32 tfd_msk, u32 flags)
{ {
int ret; int ret;
struct iwl_tx_path_flush_cmd flush_cmd = { struct iwl_tx_path_flush_cmd_v1 flush_cmd = {
.queues_ctl = cpu_to_le32(tfd_msk), .queues_ctl = cpu_to_le32(tfd_msk),
.flush_ctl = cpu_to_le16(DUMP_TX_FIFO_FLUSH), .flush_ctl = cpu_to_le16(DUMP_TX_FIFO_FLUSH),
}; };
WARN_ON(iwl_mvm_has_new_tx_api(mvm));
ret = iwl_mvm_send_cmd_pdu(mvm, TXPATH_FLUSH, flags, ret = iwl_mvm_send_cmd_pdu(mvm, TXPATH_FLUSH, flags,
sizeof(flush_cmd), &flush_cmd); sizeof(flush_cmd), &flush_cmd);
if (ret) if (ret)
@ -1901,19 +1919,41 @@ int iwl_mvm_flush_tx_path(struct iwl_mvm *mvm, u32 tfd_msk, u32 flags)
return ret; return ret;
} }
int iwl_mvm_flush_sta(struct iwl_mvm *mvm, void *sta, bool int_sta, u32 flags) int iwl_mvm_flush_sta_tids(struct iwl_mvm *mvm, u32 sta_id,
u16 tids, u32 flags)
{ {
u32 mask; int ret;
struct iwl_tx_path_flush_cmd flush_cmd = {
.sta_id = cpu_to_le32(sta_id),
.tid_mask = cpu_to_le16(tids),
};
if (int_sta) { WARN_ON(!iwl_mvm_has_new_tx_api(mvm));
ret = iwl_mvm_send_cmd_pdu(mvm, TXPATH_FLUSH, flags,
sizeof(flush_cmd), &flush_cmd);
if (ret)
IWL_ERR(mvm, "Failed to send flush command (%d)\n", ret);
return ret;
}
int iwl_mvm_flush_sta(struct iwl_mvm *mvm, void *sta, bool internal, u32 flags)
{
struct iwl_mvm_int_sta *int_sta = sta; struct iwl_mvm_int_sta *int_sta = sta;
mask = int_sta->tfd_queue_msk;
} else {
struct iwl_mvm_sta *mvm_sta = sta; struct iwl_mvm_sta *mvm_sta = sta;
mask = mvm_sta->tfd_queue_msk; if (iwl_mvm_has_new_tx_api(mvm)) {
if (internal)
return iwl_mvm_flush_sta_tids(mvm, int_sta->sta_id,
BIT(IWL_MGMT_TID), flags);
return iwl_mvm_flush_sta_tids(mvm, mvm_sta->sta_id,
0xFF, flags);
} }
return iwl_mvm_flush_tx_path(mvm, mask, flags); if (internal)
return iwl_mvm_flush_tx_path(mvm, int_sta->tfd_queue_msk,
flags);
return iwl_mvm_flush_tx_path(mvm, mvm_sta->tfd_queue_msk, flags);
} }

View File

@ -69,6 +69,7 @@
#include "iwl-debug.h" #include "iwl-debug.h"
#include "iwl-io.h" #include "iwl-io.h"
#include "iwl-prph.h" #include "iwl-prph.h"
#include "iwl-csr.h"
#include "fw-dbg.h" #include "fw-dbg.h"
#include "mvm.h" #include "mvm.h"
#include "fw-api-rs.h" #include "fw-api-rs.h"
@ -168,11 +169,6 @@ int iwl_mvm_send_cmd_status(struct iwl_mvm *mvm, struct iwl_host_cmd *cmd,
} }
pkt = cmd->resp_pkt; pkt = cmd->resp_pkt;
/* Can happen if RFKILL is asserted */
if (!pkt) {
ret = 0;
goto out_free_resp;
}
resp_len = iwl_rx_packet_payload_len(pkt); resp_len = iwl_rx_packet_payload_len(pkt);
if (WARN_ON_ONCE(resp_len != sizeof(*resp))) { if (WARN_ON_ONCE(resp_len != sizeof(*resp))) {
@ -502,6 +498,7 @@ static void iwl_mvm_dump_lmac_error_log(struct iwl_mvm *mvm, u32 base)
{ {
struct iwl_trans *trans = mvm->trans; struct iwl_trans *trans = mvm->trans;
struct iwl_error_event_table table; struct iwl_error_event_table table;
u32 val;
if (mvm->cur_ucode == IWL_UCODE_INIT) { if (mvm->cur_ucode == IWL_UCODE_INIT) {
if (!base) if (!base)
@ -520,6 +517,36 @@ static void iwl_mvm_dump_lmac_error_log(struct iwl_mvm *mvm, u32 base)
return; return;
} }
/* check if there is a HW error */
val = iwl_trans_read_mem32(trans, base);
if (((val & ~0xf) == 0xa5a5a5a0) || ((val & ~0xf) == 0x5a5a5a50)) {
int err;
IWL_ERR(trans, "HW error, resetting before reading\n");
/* reset the device */
iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
usleep_range(1000, 2000);
/* set INIT_DONE flag */
iwl_set_bit(trans, CSR_GP_CNTRL,
CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
/* and wait for clock stabilization */
if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
udelay(2);
err = iwl_poll_bit(trans, CSR_GP_CNTRL,
CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
25000);
if (err < 0) {
IWL_DEBUG_INFO(trans,
"Failed to reset the card for the dump\n");
return;
}
}
iwl_trans_read_mem_bytes(trans, base, &table, sizeof(table)); iwl_trans_read_mem_bytes(trans, base, &table, sizeof(table));
if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) { if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
@ -730,16 +757,10 @@ int iwl_mvm_tvqm_enable_txq(struct iwl_mvm *mvm, int mac80211_queue,
return queue; return queue;
} }
void iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue, bool iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
u16 ssn, const struct iwl_trans_txq_scd_cfg *cfg, u16 ssn, const struct iwl_trans_txq_scd_cfg *cfg,
unsigned int wdg_timeout) unsigned int wdg_timeout)
{ {
if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
return;
/* Send the enabling command if we need to */
if (iwl_mvm_update_txq_mapping(mvm, queue, mac80211_queue,
cfg->sta_id, cfg->tid)) {
struct iwl_scd_txq_cfg_cmd cmd = { struct iwl_scd_txq_cfg_cmd cmd = {
.scd_queue = queue, .scd_queue = queue,
.action = SCD_CFG_ENABLE_QUEUE, .action = SCD_CFG_ENABLE_QUEUE,
@ -750,15 +771,25 @@ void iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
.aggregate = cfg->aggregate, .aggregate = cfg->aggregate,
.tid = cfg->tid, .tid = cfg->tid,
}; };
bool inc_ssn;
iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn, NULL, if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
wdg_timeout); return false;
WARN(iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0,
sizeof(struct iwl_scd_txq_cfg_cmd), /* Send the enabling command if we need to */
&cmd), if (!iwl_mvm_update_txq_mapping(mvm, queue, mac80211_queue,
"Failed to configure queue %d on FIFO %d\n", queue, cfg->sta_id, cfg->tid))
cfg->fifo); return false;
}
inc_ssn = iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn,
NULL, wdg_timeout);
if (inc_ssn)
le16_add_cpu(&cmd.ssn, 1);
WARN(iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd),
"Failed to configure queue %d on FIFO %d\n", queue, cfg->fifo);
return inc_ssn;
} }
int iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue, int iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
@ -1186,7 +1217,11 @@ static void iwl_mvm_remove_inactive_tids(struct iwl_mvm *mvm,
/* Go over all non-active TIDs, incl. IWL_MAX_TID_COUNT (for mgmt) */ /* Go over all non-active TIDs, incl. IWL_MAX_TID_COUNT (for mgmt) */
for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) { for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
/* If some TFDs are still queued - don't mark TID as inactive */ /* If some TFDs are still queued - don't mark TID as inactive */
if (iwl_mvm_tid_queued(&mvmsta->tid_data[tid])) if (iwl_mvm_tid_queued(mvm, &mvmsta->tid_data[tid]))
tid_bitmap &= ~BIT(tid);
/* Don't mark as inactive any TID that has an active BA */
if (mvmsta->tid_data[tid].state != IWL_AGG_OFF)
tid_bitmap &= ~BIT(tid); tid_bitmap &= ~BIT(tid);
} }

View File

@ -766,7 +766,6 @@ static int iwl_pci_resume(struct device *device)
struct pci_dev *pdev = to_pci_dev(device); struct pci_dev *pdev = to_pci_dev(device);
struct iwl_trans *trans = pci_get_drvdata(pdev); struct iwl_trans *trans = pci_get_drvdata(pdev);
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
bool hw_rfkill;
/* Before you put code here, think about WoWLAN. You cannot check here /* Before you put code here, think about WoWLAN. You cannot check here
* whether WoWLAN is enabled or not, and your code will run even if * whether WoWLAN is enabled or not, and your code will run even if
@ -783,16 +782,13 @@ static int iwl_pci_resume(struct device *device)
return 0; return 0;
/* /*
* Enable rfkill interrupt (in order to keep track of * Enable rfkill interrupt (in order to keep track of the rfkill
* the rfkill status). Must be locked to avoid processing * status). Must be locked to avoid processing a possible rfkill
* a possible rfkill interrupt between reading the state * interrupt while in iwl_trans_check_hw_rf_kill().
* and calling iwl_trans_pcie_rf_kill() with it.
*/ */
mutex_lock(&trans_pcie->mutex); mutex_lock(&trans_pcie->mutex);
iwl_enable_rfkill_int(trans); iwl_enable_rfkill_int(trans);
iwl_trans_check_hw_rf_kill(trans);
hw_rfkill = iwl_is_rfkill_set(trans);
iwl_trans_pcie_rf_kill(trans, hw_rfkill);
mutex_unlock(&trans_pcie->mutex); mutex_unlock(&trans_pcie->mutex);
return 0; return 0;

View File

@ -403,7 +403,8 @@ struct iwl_trans_pcie {
dma_addr_t ict_tbl_dma; dma_addr_t ict_tbl_dma;
int ict_index; int ict_index;
bool use_ict; bool use_ict;
bool is_down; bool is_down, opmode_down;
bool debug_rfkill;
struct isr_statistics isr_stats; struct isr_statistics isr_stats;
spinlock_t irq_lock; spinlock_t irq_lock;
@ -515,7 +516,7 @@ int iwl_pcie_gen2_tx_init(struct iwl_trans *trans);
void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr); void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr);
int iwl_pcie_tx_stop(struct iwl_trans *trans); int iwl_pcie_tx_stop(struct iwl_trans *trans);
void iwl_pcie_tx_free(struct iwl_trans *trans); void iwl_pcie_tx_free(struct iwl_trans *trans);
void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int queue, u16 ssn, bool iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int queue, u16 ssn,
const struct iwl_trans_txq_scd_cfg *cfg, const struct iwl_trans_txq_scd_cfg *cfg,
unsigned int wdg_timeout); unsigned int wdg_timeout);
void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int queue, void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int queue,
@ -675,6 +676,8 @@ static inline void iwl_enable_rfkill_int(struct iwl_trans *trans)
} }
} }
void iwl_pcie_handle_rfkill_irq(struct iwl_trans *trans);
static inline void iwl_wake_queue(struct iwl_trans *trans, static inline void iwl_wake_queue(struct iwl_trans *trans,
struct iwl_txq *txq) struct iwl_txq *txq)
{ {
@ -713,7 +716,12 @@ static inline u8 get_cmd_index(struct iwl_txq *q, u32 index)
static inline bool iwl_is_rfkill_set(struct iwl_trans *trans) static inline bool iwl_is_rfkill_set(struct iwl_trans *trans)
{ {
lockdep_assert_held(&IWL_TRANS_GET_PCIE_TRANS(trans)->mutex); struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
lockdep_assert_held(&trans_pcie->mutex);
if (trans_pcie->debug_rfkill)
return true;
return !(iwl_read32(trans, CSR_GP_CNTRL) & return !(iwl_read32(trans, CSR_GP_CNTRL) &
CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW); CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW);
@ -767,9 +775,11 @@ void iwl_pcie_apm_config(struct iwl_trans *trans);
int iwl_pcie_prepare_card_hw(struct iwl_trans *trans); int iwl_pcie_prepare_card_hw(struct iwl_trans *trans);
void iwl_pcie_synchronize_irqs(struct iwl_trans *trans); void iwl_pcie_synchronize_irqs(struct iwl_trans *trans);
bool iwl_trans_check_hw_rf_kill(struct iwl_trans *trans); bool iwl_trans_check_hw_rf_kill(struct iwl_trans *trans);
void iwl_trans_pcie_handle_stop_rfkill(struct iwl_trans *trans,
bool was_in_rfkill);
void iwl_pcie_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq); void iwl_pcie_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq);
int iwl_queue_space(const struct iwl_txq *q); int iwl_queue_space(const struct iwl_txq *q);
int iwl_pcie_apm_stop_master(struct iwl_trans *trans); void iwl_pcie_apm_stop_master(struct iwl_trans *trans);
void iwl_pcie_conf_msix_hw(struct iwl_trans_pcie *trans_pcie); void iwl_pcie_conf_msix_hw(struct iwl_trans_pcie *trans_pcie);
int iwl_pcie_txq_init(struct iwl_trans *trans, struct iwl_txq *txq, int iwl_pcie_txq_init(struct iwl_trans *trans, struct iwl_txq *txq,
int slots_num, bool cmd_queue); int slots_num, bool cmd_queue);

View File

@ -845,14 +845,14 @@ static void iwl_pcie_rx_mq_hw_init(struct iwl_trans *trans)
* Set RX DMA chunk size to 64B for IOSF and 128B for PCIe * Set RX DMA chunk size to 64B for IOSF and 128B for PCIe
* Default queue is 0 * Default queue is 0
*/ */
iwl_write_prph_no_grab(trans, RFH_GEN_CFG, RFH_GEN_CFG_RFH_DMA_SNOOP | iwl_write_prph_no_grab(trans, RFH_GEN_CFG,
(DEFAULT_RXQ_NUM << RFH_GEN_CFG_RFH_DMA_SNOOP |
RFH_GEN_CFG_DEFAULT_RXQ_NUM_POS) | RFH_GEN_CFG_VAL(DEFAULT_RXQ_NUM, 0) |
RFH_GEN_CFG_SERVICE_DMA_SNOOP | RFH_GEN_CFG_SERVICE_DMA_SNOOP |
(trans->cfg->integrated ? RFH_GEN_CFG_VAL(RB_CHUNK_SIZE,
trans->cfg->integrated ?
RFH_GEN_CFG_RB_CHUNK_SIZE_64 : RFH_GEN_CFG_RB_CHUNK_SIZE_64 :
RFH_GEN_CFG_RB_CHUNK_SIZE_128) << RFH_GEN_CFG_RB_CHUNK_SIZE_128));
RFH_GEN_CFG_RB_CHUNK_SIZE_POS);
/* Enable the relevant rx queues */ /* Enable the relevant rx queues */
iwl_write_prph_no_grab(trans, RFH_RXF_RXQ_ACTIVE, enabled); iwl_write_prph_no_grab(trans, RFH_RXF_RXQ_ACTIVE, enabled);
@ -1413,18 +1413,16 @@ static void iwl_pcie_irq_handle_error(struct iwl_trans *trans)
return; return;
} }
local_bh_disable();
/* The STATUS_FW_ERROR bit is set in this function. This must happen
* before we wake up the command caller, to ensure a proper cleanup. */
iwl_trans_fw_error(trans);
local_bh_enable();
for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) { for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) {
if (!trans_pcie->txq[i]) if (!trans_pcie->txq[i])
continue; continue;
del_timer(&trans_pcie->txq[i]->stuck_timer); del_timer(&trans_pcie->txq[i]->stuck_timer);
} }
/* The STATUS_FW_ERROR bit is set in this function. This must happen
* before we wake up the command caller, to ensure a proper cleanup. */
iwl_trans_fw_error(trans);
clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
wake_up(&trans_pcie->wait_command_queue); wake_up(&trans_pcie->wait_command_queue);
} }
@ -1509,6 +1507,46 @@ static u32 iwl_pcie_int_cause_ict(struct iwl_trans *trans)
return inta; return inta;
} }
void iwl_pcie_handle_rfkill_irq(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
bool hw_rfkill, prev, report;
mutex_lock(&trans_pcie->mutex);
prev = test_bit(STATUS_RFKILL_OPMODE, &trans->status);
hw_rfkill = iwl_is_rfkill_set(trans);
if (hw_rfkill) {
set_bit(STATUS_RFKILL_OPMODE, &trans->status);
set_bit(STATUS_RFKILL_HW, &trans->status);
}
if (trans_pcie->opmode_down)
report = hw_rfkill;
else
report = test_bit(STATUS_RFKILL_OPMODE, &trans->status);
IWL_WARN(trans, "RF_KILL bit toggled to %s.\n",
hw_rfkill ? "disable radio" : "enable radio");
isr_stats->rfkill++;
if (prev != report)
iwl_trans_pcie_rf_kill(trans, report);
mutex_unlock(&trans_pcie->mutex);
if (hw_rfkill) {
if (test_and_clear_bit(STATUS_SYNC_HCMD_ACTIVE,
&trans->status))
IWL_DEBUG_RF_KILL(trans,
"Rfkill while SYNC HCMD in flight\n");
wake_up(&trans_pcie->wait_command_queue);
} else {
clear_bit(STATUS_RFKILL_HW, &trans->status);
if (trans_pcie->opmode_down)
clear_bit(STATUS_RFKILL_OPMODE, &trans->status);
}
}
irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id) irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
{ {
struct iwl_trans *trans = dev_id; struct iwl_trans *trans = dev_id;
@ -1632,30 +1670,7 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
/* HW RF KILL switch toggled */ /* HW RF KILL switch toggled */
if (inta & CSR_INT_BIT_RF_KILL) { if (inta & CSR_INT_BIT_RF_KILL) {
bool hw_rfkill; iwl_pcie_handle_rfkill_irq(trans);
mutex_lock(&trans_pcie->mutex);
hw_rfkill = iwl_is_rfkill_set(trans);
if (hw_rfkill)
set_bit(STATUS_RFKILL, &trans->status);
IWL_WARN(trans, "RF_KILL bit toggled to %s.\n",
hw_rfkill ? "disable radio" : "enable radio");
isr_stats->rfkill++;
iwl_trans_pcie_rf_kill(trans, hw_rfkill);
mutex_unlock(&trans_pcie->mutex);
if (hw_rfkill) {
if (test_and_clear_bit(STATUS_SYNC_HCMD_ACTIVE,
&trans->status))
IWL_DEBUG_RF_KILL(trans,
"Rfkill while SYNC HCMD in flight\n");
wake_up(&trans_pcie->wait_command_queue);
} else {
clear_bit(STATUS_RFKILL, &trans->status);
}
handled |= CSR_INT_BIT_RF_KILL; handled |= CSR_INT_BIT_RF_KILL;
} }
@ -1982,31 +1997,8 @@ irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id)
} }
/* HW RF KILL switch toggled */ /* HW RF KILL switch toggled */
if (inta_hw & MSIX_HW_INT_CAUSES_REG_RF_KILL) { if (inta_hw & MSIX_HW_INT_CAUSES_REG_RF_KILL)
bool hw_rfkill; iwl_pcie_handle_rfkill_irq(trans);
mutex_lock(&trans_pcie->mutex);
hw_rfkill = iwl_is_rfkill_set(trans);
if (hw_rfkill)
set_bit(STATUS_RFKILL, &trans->status);
IWL_WARN(trans, "RF_KILL bit toggled to %s.\n",
hw_rfkill ? "disable radio" : "enable radio");
isr_stats->rfkill++;
iwl_trans_pcie_rf_kill(trans, hw_rfkill);
mutex_unlock(&trans_pcie->mutex);
if (hw_rfkill) {
if (test_and_clear_bit(STATUS_SYNC_HCMD_ACTIVE,
&trans->status))
IWL_DEBUG_RF_KILL(trans,
"Rfkill while SYNC HCMD in flight\n");
wake_up(&trans_pcie->wait_command_queue);
} else {
clear_bit(STATUS_RFKILL, &trans->status);
}
}
if (inta_hw & MSIX_HW_INT_CAUSES_REG_HW_ERR) { if (inta_hw & MSIX_HW_INT_CAUSES_REG_HW_ERR) {
IWL_ERR(trans, IWL_ERR(trans,

View File

@ -150,7 +150,6 @@ static void iwl_pcie_gen2_apm_stop(struct iwl_trans *trans, bool op_mode_leave)
void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans, bool low_power) void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans, bool low_power)
{ {
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
bool hw_rfkill, was_hw_rfkill;
lockdep_assert_held(&trans_pcie->mutex); lockdep_assert_held(&trans_pcie->mutex);
@ -159,8 +158,6 @@ void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans, bool low_power)
trans_pcie->is_down = true; trans_pcie->is_down = true;
was_hw_rfkill = iwl_is_rfkill_set(trans);
/* tell the device to stop sending interrupts */ /* tell the device to stop sending interrupts */
iwl_disable_interrupts(trans); iwl_disable_interrupts(trans);
@ -217,7 +214,6 @@ void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans, bool low_power)
clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
clear_bit(STATUS_INT_ENABLED, &trans->status); clear_bit(STATUS_INT_ENABLED, &trans->status);
clear_bit(STATUS_TPOWER_PMI, &trans->status); clear_bit(STATUS_TPOWER_PMI, &trans->status);
clear_bit(STATUS_RFKILL, &trans->status);
/* /*
* Even if we stop the HW, we still want the RF kill * Even if we stop the HW, we still want the RF kill
@ -225,26 +221,6 @@ void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans, bool low_power)
*/ */
iwl_enable_rfkill_int(trans); iwl_enable_rfkill_int(trans);
/*
* Check again since the RF kill state may have changed while
* all the interrupts were disabled, in this case we couldn't
* receive the RF kill interrupt and update the state in the
* op_mode.
* Don't call the op_mode if the rkfill state hasn't changed.
* This allows the op_mode to call stop_device from the rfkill
* notification without endless recursion. Under very rare
* circumstances, we might have a small recursion if the rfkill
* state changed exactly now while we were called from stop_device.
* This is very unlikely but can happen and is supported.
*/
hw_rfkill = iwl_is_rfkill_set(trans);
if (hw_rfkill)
set_bit(STATUS_RFKILL, &trans->status);
else
clear_bit(STATUS_RFKILL, &trans->status);
if (hw_rfkill != was_hw_rfkill)
iwl_trans_pcie_rf_kill(trans, hw_rfkill);
/* re-take ownership to prevent other users from stealing the device */ /* re-take ownership to prevent other users from stealing the device */
iwl_pcie_prepare_card_hw(trans); iwl_pcie_prepare_card_hw(trans);
} }
@ -252,9 +228,13 @@ void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans, bool low_power)
void iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans, bool low_power) void iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans, bool low_power)
{ {
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
bool was_in_rfkill;
mutex_lock(&trans_pcie->mutex); mutex_lock(&trans_pcie->mutex);
trans_pcie->opmode_down = true;
was_in_rfkill = test_bit(STATUS_RFKILL_OPMODE, &trans->status);
_iwl_trans_pcie_gen2_stop_device(trans, low_power); _iwl_trans_pcie_gen2_stop_device(trans, low_power);
iwl_trans_pcie_handle_stop_rfkill(trans, was_in_rfkill);
mutex_unlock(&trans_pcie->mutex); mutex_unlock(&trans_pcie->mutex);
} }

View File

@ -224,7 +224,7 @@ void iwl_pcie_apm_config(struct iwl_trans *trans)
pcie_capability_read_word(trans_pcie->pci_dev, PCI_EXP_DEVCTL2, &cap); pcie_capability_read_word(trans_pcie->pci_dev, PCI_EXP_DEVCTL2, &cap);
trans->ltr_enabled = cap & PCI_EXP_DEVCTL2_LTR_EN; trans->ltr_enabled = cap & PCI_EXP_DEVCTL2_LTR_EN;
dev_info(trans->dev, "L1 %sabled - LTR %sabled\n", IWL_DEBUG_POWER(trans, "L1 %sabled - LTR %sabled\n",
(lctl & PCI_EXP_LNKCTL_ASPM_L1) ? "En" : "Dis", (lctl & PCI_EXP_LNKCTL_ASPM_L1) ? "En" : "Dis",
trans->ltr_enabled ? "En" : "Dis"); trans->ltr_enabled ? "En" : "Dis");
} }
@ -448,9 +448,9 @@ static void iwl_pcie_apm_lp_xtal_enable(struct iwl_trans *trans)
~SHR_APMG_XTAL_CFG_XTAL_ON_REQ); ~SHR_APMG_XTAL_CFG_XTAL_ON_REQ);
} }
int iwl_pcie_apm_stop_master(struct iwl_trans *trans) void iwl_pcie_apm_stop_master(struct iwl_trans *trans)
{ {
int ret = 0; int ret;
/* stop device's busmaster DMA activity */ /* stop device's busmaster DMA activity */
iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER); iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER);
@ -462,8 +462,6 @@ int iwl_pcie_apm_stop_master(struct iwl_trans *trans)
IWL_WARN(trans, "Master Disable Timed Out, 100 usec\n"); IWL_WARN(trans, "Master Disable Timed Out, 100 usec\n");
IWL_DEBUG_INFO(trans, "stop master\n"); IWL_DEBUG_INFO(trans, "stop master\n");
return ret;
} }
static void iwl_pcie_apm_stop(struct iwl_trans *trans, bool op_mode_leave) static void iwl_pcie_apm_stop(struct iwl_trans *trans, bool op_mode_leave)
@ -996,14 +994,24 @@ static int iwl_pcie_load_given_ucode_8000(struct iwl_trans *trans,
bool iwl_trans_check_hw_rf_kill(struct iwl_trans *trans) bool iwl_trans_check_hw_rf_kill(struct iwl_trans *trans)
{ {
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
bool hw_rfkill = iwl_is_rfkill_set(trans); bool hw_rfkill = iwl_is_rfkill_set(trans);
bool prev = test_bit(STATUS_RFKILL_OPMODE, &trans->status);
bool report;
if (hw_rfkill) if (hw_rfkill) {
set_bit(STATUS_RFKILL, &trans->status); set_bit(STATUS_RFKILL_HW, &trans->status);
else set_bit(STATUS_RFKILL_OPMODE, &trans->status);
clear_bit(STATUS_RFKILL, &trans->status); } else {
clear_bit(STATUS_RFKILL_HW, &trans->status);
if (trans_pcie->opmode_down)
clear_bit(STATUS_RFKILL_OPMODE, &trans->status);
}
iwl_trans_pcie_rf_kill(trans, hw_rfkill); report = test_bit(STATUS_RFKILL_OPMODE, &trans->status);
if (prev != report)
iwl_trans_pcie_rf_kill(trans, report);
return hw_rfkill; return hw_rfkill;
} }
@ -1128,7 +1136,6 @@ static void iwl_pcie_init_msix(struct iwl_trans_pcie *trans_pcie)
static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power) static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power)
{ {
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
bool hw_rfkill, was_hw_rfkill;
lockdep_assert_held(&trans_pcie->mutex); lockdep_assert_held(&trans_pcie->mutex);
@ -1137,8 +1144,6 @@ static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power)
trans_pcie->is_down = true; trans_pcie->is_down = true;
was_hw_rfkill = iwl_is_rfkill_set(trans);
/* tell the device to stop sending interrupts */ /* tell the device to stop sending interrupts */
iwl_disable_interrupts(trans); iwl_disable_interrupts(trans);
@ -1199,7 +1204,6 @@ static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power)
clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
clear_bit(STATUS_INT_ENABLED, &trans->status); clear_bit(STATUS_INT_ENABLED, &trans->status);
clear_bit(STATUS_TPOWER_PMI, &trans->status); clear_bit(STATUS_TPOWER_PMI, &trans->status);
clear_bit(STATUS_RFKILL, &trans->status);
/* /*
* Even if we stop the HW, we still want the RF kill * Even if we stop the HW, we still want the RF kill
@ -1207,26 +1211,6 @@ static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power)
*/ */
iwl_enable_rfkill_int(trans); iwl_enable_rfkill_int(trans);
/*
* Check again since the RF kill state may have changed while
* all the interrupts were disabled, in this case we couldn't
* receive the RF kill interrupt and update the state in the
* op_mode.
* Don't call the op_mode if the rkfill state hasn't changed.
* This allows the op_mode to call stop_device from the rfkill
* notification without endless recursion. Under very rare
* circumstances, we might have a small recursion if the rfkill
* state changed exactly now while we were called from stop_device.
* This is very unlikely but can happen and is supported.
*/
hw_rfkill = iwl_is_rfkill_set(trans);
if (hw_rfkill)
set_bit(STATUS_RFKILL, &trans->status);
else
clear_bit(STATUS_RFKILL, &trans->status);
if (hw_rfkill != was_hw_rfkill)
iwl_trans_pcie_rf_kill(trans, hw_rfkill);
/* re-take ownership to prevent other users from stealing the device */ /* re-take ownership to prevent other users from stealing the device */
iwl_pcie_prepare_card_hw(trans); iwl_pcie_prepare_card_hw(trans);
} }
@ -1339,12 +1323,45 @@ static void iwl_trans_pcie_fw_alive(struct iwl_trans *trans, u32 scd_addr)
iwl_pcie_tx_start(trans, scd_addr); iwl_pcie_tx_start(trans, scd_addr);
} }
void iwl_trans_pcie_handle_stop_rfkill(struct iwl_trans *trans,
bool was_in_rfkill)
{
bool hw_rfkill;
/*
* Check again since the RF kill state may have changed while
* all the interrupts were disabled, in this case we couldn't
* receive the RF kill interrupt and update the state in the
* op_mode.
* Don't call the op_mode if the rkfill state hasn't changed.
* This allows the op_mode to call stop_device from the rfkill
* notification without endless recursion. Under very rare
* circumstances, we might have a small recursion if the rfkill
* state changed exactly now while we were called from stop_device.
* This is very unlikely but can happen and is supported.
*/
hw_rfkill = iwl_is_rfkill_set(trans);
if (hw_rfkill) {
set_bit(STATUS_RFKILL_HW, &trans->status);
set_bit(STATUS_RFKILL_OPMODE, &trans->status);
} else {
clear_bit(STATUS_RFKILL_HW, &trans->status);
clear_bit(STATUS_RFKILL_OPMODE, &trans->status);
}
if (hw_rfkill != was_in_rfkill)
iwl_trans_pcie_rf_kill(trans, hw_rfkill);
}
static void iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power) static void iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power)
{ {
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
bool was_in_rfkill;
mutex_lock(&trans_pcie->mutex); mutex_lock(&trans_pcie->mutex);
trans_pcie->opmode_down = true;
was_in_rfkill = test_bit(STATUS_RFKILL_OPMODE, &trans->status);
_iwl_trans_pcie_stop_device(trans, low_power); _iwl_trans_pcie_stop_device(trans, low_power);
iwl_trans_pcie_handle_stop_rfkill(trans, was_in_rfkill);
mutex_unlock(&trans_pcie->mutex); mutex_unlock(&trans_pcie->mutex);
} }
@ -1355,6 +1372,8 @@ void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state)
lockdep_assert_held(&trans_pcie->mutex); lockdep_assert_held(&trans_pcie->mutex);
IWL_WARN(trans, "reporting RF_KILL (radio %s)\n",
state ? "disabled" : "enabled");
if (iwl_op_mode_hw_rf_kill(trans->op_mode, state)) { if (iwl_op_mode_hw_rf_kill(trans->op_mode, state)) {
if (trans->cfg->gen2) if (trans->cfg->gen2)
_iwl_trans_pcie_gen2_stop_device(trans, true); _iwl_trans_pcie_gen2_stop_device(trans, true);
@ -1646,6 +1665,8 @@ static int _iwl_trans_pcie_start_hw(struct iwl_trans *trans, bool low_power)
/* From now on, the op_mode will be kept updated about RF kill state */ /* From now on, the op_mode will be kept updated about RF kill state */
iwl_enable_rfkill_int(trans); iwl_enable_rfkill_int(trans);
trans_pcie->opmode_down = false;
/* Set is_down to false here so that...*/ /* Set is_down to false here so that...*/
trans_pcie->is_down = false; trans_pcie->is_down = false;
@ -2405,17 +2426,12 @@ static ssize_t iwl_dbgfs_interrupt_write(struct file *file,
struct iwl_trans *trans = file->private_data; struct iwl_trans *trans = file->private_data;
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct isr_statistics *isr_stats = &trans_pcie->isr_stats; struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
char buf[8];
int buf_size;
u32 reset_flag; u32 reset_flag;
int ret;
memset(buf, 0, sizeof(buf)); ret = kstrtou32_from_user(user_buf, count, 16, &reset_flag);
buf_size = min(count, sizeof(buf) - 1); if (ret)
if (copy_from_user(buf, user_buf, buf_size)) return ret;
return -EFAULT;
if (sscanf(buf, "%x", &reset_flag) != 1)
return -EFAULT;
if (reset_flag == 0) if (reset_flag == 0)
memset(isr_stats, 0, sizeof(*isr_stats)); memset(isr_stats, 0, sizeof(*isr_stats));
@ -2427,16 +2443,6 @@ static ssize_t iwl_dbgfs_csr_write(struct file *file,
size_t count, loff_t *ppos) size_t count, loff_t *ppos)
{ {
struct iwl_trans *trans = file->private_data; struct iwl_trans *trans = file->private_data;
char buf[8];
int buf_size;
int csr;
memset(buf, 0, sizeof(buf));
buf_size = min(count, sizeof(buf) - 1);
if (copy_from_user(buf, user_buf, buf_size))
return -EFAULT;
if (sscanf(buf, "%d", &csr) != 1)
return -EFAULT;
iwl_pcie_dump_csr(trans); iwl_pcie_dump_csr(trans);
@ -2461,11 +2467,50 @@ static ssize_t iwl_dbgfs_fh_reg_read(struct file *file,
return ret; return ret;
} }
static ssize_t iwl_dbgfs_rfkill_read(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
{
struct iwl_trans *trans = file->private_data;
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
char buf[100];
int pos;
pos = scnprintf(buf, sizeof(buf), "debug: %d\nhw: %d\n",
trans_pcie->debug_rfkill,
!(iwl_read32(trans, CSR_GP_CNTRL) &
CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW));
return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
}
static ssize_t iwl_dbgfs_rfkill_write(struct file *file,
const char __user *user_buf,
size_t count, loff_t *ppos)
{
struct iwl_trans *trans = file->private_data;
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
bool old = trans_pcie->debug_rfkill;
int ret;
ret = kstrtobool_from_user(user_buf, count, &trans_pcie->debug_rfkill);
if (ret)
return ret;
if (old == trans_pcie->debug_rfkill)
return count;
IWL_WARN(trans, "changing debug rfkill %d->%d\n",
old, trans_pcie->debug_rfkill);
iwl_pcie_handle_rfkill_irq(trans);
return count;
}
DEBUGFS_READ_WRITE_FILE_OPS(interrupt); DEBUGFS_READ_WRITE_FILE_OPS(interrupt);
DEBUGFS_READ_FILE_OPS(fh_reg); DEBUGFS_READ_FILE_OPS(fh_reg);
DEBUGFS_READ_FILE_OPS(rx_queue); DEBUGFS_READ_FILE_OPS(rx_queue);
DEBUGFS_READ_FILE_OPS(tx_queue); DEBUGFS_READ_FILE_OPS(tx_queue);
DEBUGFS_WRITE_FILE_OPS(csr); DEBUGFS_WRITE_FILE_OPS(csr);
DEBUGFS_READ_WRITE_FILE_OPS(rfkill);
/* Create the debugfs files and directories */ /* Create the debugfs files and directories */
int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans) int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans)
@ -2477,6 +2522,7 @@ int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans)
DEBUGFS_ADD_FILE(interrupt, dir, S_IWUSR | S_IRUSR); DEBUGFS_ADD_FILE(interrupt, dir, S_IWUSR | S_IRUSR);
DEBUGFS_ADD_FILE(csr, dir, S_IWUSR); DEBUGFS_ADD_FILE(csr, dir, S_IWUSR);
DEBUGFS_ADD_FILE(fh_reg, dir, S_IRUSR); DEBUGFS_ADD_FILE(fh_reg, dir, S_IRUSR);
DEBUGFS_ADD_FILE(rfkill, dir, S_IWUSR | S_IRUSR);
return 0; return 0;
err: err:
@ -2965,6 +3011,7 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
trans_pcie->trans = trans; trans_pcie->trans = trans;
trans_pcie->opmode_down = true;
spin_lock_init(&trans_pcie->irq_lock); spin_lock_init(&trans_pcie->irq_lock);
spin_lock_init(&trans_pcie->reg_lock); spin_lock_init(&trans_pcie->reg_lock);
mutex_init(&trans_pcie->mutex); mutex_init(&trans_pcie->mutex);
@ -3087,6 +3134,17 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
} }
} }
/*
* 9000-series integrated A-step has a problem with suspend/resume
* and sometimes even causes the whole platform to get stuck. This
* workaround makes the hardware not go into the problematic state.
*/
if (trans->cfg->integrated &&
trans->cfg->device_family == IWL_DEVICE_FAMILY_9000 &&
CSR_HW_REV_STEP(trans->hw_rev) == SILICON_A_STEP)
iwl_set_bit(trans, CSR_HOST_CHICKEN,
CSR_HOST_CHICKEN_PM_IDLE_SRC_DIS_SB_PME);
trans->hw_rf_id = iwl_read32(trans, CSR_HW_RF_ID); trans->hw_rf_id = iwl_read32(trans, CSR_HW_RF_ID);
iwl_pcie_set_interrupt_capa(pdev, trans); iwl_pcie_set_interrupt_capa(pdev, trans);

View File

@ -249,7 +249,7 @@ static int iwl_pcie_gen2_build_amsdu(struct iwl_trans *trans,
IEEE80211_CCMP_HDR_LEN : 0; IEEE80211_CCMP_HDR_LEN : 0;
trace_iwlwifi_dev_tx(trans->dev, skb, tfd, sizeof(*tfd), trace_iwlwifi_dev_tx(trans->dev, skb, tfd, sizeof(*tfd),
&dev_cmd->hdr, start_len, NULL, 0); &dev_cmd->hdr, start_len, 0);
ip_hdrlen = skb_transport_header(skb) - skb_network_header(skb); ip_hdrlen = skb_transport_header(skb) - skb_network_header(skb);
snap_ip_tcp_hdrlen = 8 + ip_hdrlen + tcp_hdrlen(skb); snap_ip_tcp_hdrlen = 8 + ip_hdrlen + tcp_hdrlen(skb);
@ -467,10 +467,8 @@ struct iwl_tfh_tfd *iwl_pcie_gen2_build_tfd(struct iwl_trans *trans,
} }
trace_iwlwifi_dev_tx(trans->dev, skb, tfd, sizeof(*tfd), &dev_cmd->hdr, trace_iwlwifi_dev_tx(trans->dev, skb, tfd, sizeof(*tfd), &dev_cmd->hdr,
IWL_FIRST_TB_SIZE + tb1_len, IWL_FIRST_TB_SIZE + tb1_len, hdr_len);
skb->data + hdr_len, tb2_len); trace_iwlwifi_dev_tx_data(trans->dev, skb, hdr_len);
trace_iwlwifi_dev_tx_data(trans->dev, skb, hdr_len,
skb->len - hdr_len);
return tfd; return tfd;
@ -863,7 +861,7 @@ static int iwl_pcie_gen2_send_hcmd_sync(struct iwl_trans *trans,
} }
if (!(cmd->flags & CMD_SEND_IN_RFKILL) && if (!(cmd->flags & CMD_SEND_IN_RFKILL) &&
test_bit(STATUS_RFKILL, &trans->status)) { test_bit(STATUS_RFKILL_OPMODE, &trans->status)) {
IWL_DEBUG_RF_KILL(trans, "RFKILL in SYNC CMD... no rsp\n"); IWL_DEBUG_RF_KILL(trans, "RFKILL in SYNC CMD... no rsp\n");
ret = -ERFKILL; ret = -ERFKILL;
goto cancel; goto cancel;
@ -900,7 +898,7 @@ int iwl_trans_pcie_gen2_send_hcmd(struct iwl_trans *trans,
struct iwl_host_cmd *cmd) struct iwl_host_cmd *cmd)
{ {
if (!(cmd->flags & CMD_SEND_IN_RFKILL) && if (!(cmd->flags & CMD_SEND_IN_RFKILL) &&
test_bit(STATUS_RFKILL, &trans->status)) { test_bit(STATUS_RFKILL_OPMODE, &trans->status)) {
IWL_DEBUG_RF_KILL(trans, "Dropping CMD 0x%x: RF KILL\n", IWL_DEBUG_RF_KILL(trans, "Dropping CMD 0x%x: RF KILL\n",
cmd->id); cmd->id);
return -ERFKILL; return -ERFKILL;
@ -1076,7 +1074,7 @@ int iwl_trans_pcie_dyn_txq_alloc(struct iwl_trans *trans,
rsp = (void *)hcmd.resp_pkt->data; rsp = (void *)hcmd.resp_pkt->data;
qid = le16_to_cpu(rsp->queue_number); qid = le16_to_cpu(rsp->queue_number);
if (qid > ARRAY_SIZE(trans_pcie->txq)) { if (qid >= ARRAY_SIZE(trans_pcie->txq)) {
WARN_ONCE(1, "queue index %d unsupported", qid); WARN_ONCE(1, "queue index %d unsupported", qid);
ret = -EIO; ret = -EIO;
goto error_free_resp; goto error_free_resp;

View File

@ -1277,13 +1277,14 @@ static int iwl_pcie_txq_set_ratid_map(struct iwl_trans *trans, u16 ra_tid,
* combined with Traffic ID (QOS priority), in format used by Tx Scheduler */ * combined with Traffic ID (QOS priority), in format used by Tx Scheduler */
#define BUILD_RAxTID(sta_id, tid) (((sta_id) << 4) + (tid)) #define BUILD_RAxTID(sta_id, tid) (((sta_id) << 4) + (tid))
void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, u16 ssn, bool iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, u16 ssn,
const struct iwl_trans_txq_scd_cfg *cfg, const struct iwl_trans_txq_scd_cfg *cfg,
unsigned int wdg_timeout) unsigned int wdg_timeout)
{ {
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_txq *txq = trans_pcie->txq[txq_id]; struct iwl_txq *txq = trans_pcie->txq[txq_id];
int fifo = -1; int fifo = -1;
bool scd_bug = false;
if (test_and_set_bit(txq_id, trans_pcie->queue_used)) if (test_and_set_bit(txq_id, trans_pcie->queue_used))
WARN_ONCE(1, "queue %d already used - expect issues", txq_id); WARN_ONCE(1, "queue %d already used - expect issues", txq_id);
@ -1324,6 +1325,23 @@ void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, u16 ssn,
ssn = txq->read_ptr; ssn = txq->read_ptr;
} }
} else {
/*
* If we need to move the SCD write pointer by steps of
* 0x40, 0x80 or 0xc0, it gets stuck. Avoids this and let
* the op_mode know by returning true later.
* Do this only in case cfg is NULL since this trick can
* be done only if we have DQA enabled which is true for mvm
* only. And mvm never sets a cfg pointer.
* This is really ugly, but this is the easiest way out for
* this sad hardware issue.
* This bug has been fixed on devices 9000 and up.
*/
scd_bug = !trans->cfg->mq_rx_supported &&
!((ssn - txq->write_ptr) & 0x3f) &&
(ssn != txq->write_ptr);
if (scd_bug)
ssn++;
} }
/* Place first TFD at index corresponding to start sequence number. /* Place first TFD at index corresponding to start sequence number.
@ -1344,10 +1362,8 @@ void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, u16 ssn,
iwl_trans_write_mem32(trans, iwl_trans_write_mem32(trans,
trans_pcie->scd_base_addr + trans_pcie->scd_base_addr +
SCD_CONTEXT_QUEUE_OFFSET(txq_id) + sizeof(u32), SCD_CONTEXT_QUEUE_OFFSET(txq_id) + sizeof(u32),
((frame_limit << SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) & SCD_QUEUE_CTX_REG2_VAL(WIN_SIZE, frame_limit) |
SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) | SCD_QUEUE_CTX_REG2_VAL(FRAME_LIMIT, frame_limit));
((frame_limit << SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
/* Set up status area in SRAM, map to Tx DMA/FIFO, activate */ /* Set up status area in SRAM, map to Tx DMA/FIFO, activate */
iwl_write_prph(trans, SCD_QUEUE_STATUS_BITS(txq_id), iwl_write_prph(trans, SCD_QUEUE_STATUS_BITS(txq_id),
@ -1369,6 +1385,8 @@ void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, u16 ssn,
"Activate queue %d WrPtr: %d\n", "Activate queue %d WrPtr: %d\n",
txq_id, ssn & 0xff); txq_id, ssn & 0xff);
} }
return scd_bug;
} }
void iwl_trans_pcie_txq_set_shared_mode(struct iwl_trans *trans, u32 txq_id, void iwl_trans_pcie_txq_set_shared_mode(struct iwl_trans *trans, u32 txq_id,
@ -1708,7 +1726,7 @@ void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
{ {
struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_rx_packet *pkt = rxb_addr(rxb);
u16 sequence = le16_to_cpu(pkt->hdr.sequence); u16 sequence = le16_to_cpu(pkt->hdr.sequence);
u8 group_id = iwl_cmd_groupid(pkt->hdr.group_id); u8 group_id;
u32 cmd_id; u32 cmd_id;
int txq_id = SEQ_TO_QUEUE(sequence); int txq_id = SEQ_TO_QUEUE(sequence);
int index = SEQ_TO_INDEX(sequence); int index = SEQ_TO_INDEX(sequence);
@ -1734,6 +1752,7 @@ void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
cmd_index = get_cmd_index(txq, index); cmd_index = get_cmd_index(txq, index);
cmd = txq->entries[cmd_index].cmd; cmd = txq->entries[cmd_index].cmd;
meta = &txq->entries[cmd_index].meta; meta = &txq->entries[cmd_index].meta;
group_id = cmd->hdr.group_id;
cmd_id = iwl_cmd_id(cmd->hdr.cmd, group_id, 0); cmd_id = iwl_cmd_id(cmd->hdr.cmd, group_id, 0);
iwl_pcie_tfd_unmap(trans, meta, txq, index); iwl_pcie_tfd_unmap(trans, meta, txq, index);
@ -1876,7 +1895,7 @@ static int iwl_pcie_send_hcmd_sync(struct iwl_trans *trans,
} }
if (!(cmd->flags & CMD_SEND_IN_RFKILL) && if (!(cmd->flags & CMD_SEND_IN_RFKILL) &&
test_bit(STATUS_RFKILL, &trans->status)) { test_bit(STATUS_RFKILL_OPMODE, &trans->status)) {
IWL_DEBUG_RF_KILL(trans, "RFKILL in SYNC CMD... no rsp\n"); IWL_DEBUG_RF_KILL(trans, "RFKILL in SYNC CMD... no rsp\n");
ret = -ERFKILL; ret = -ERFKILL;
goto cancel; goto cancel;
@ -1913,7 +1932,7 @@ cancel:
int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd) int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
{ {
if (!(cmd->flags & CMD_SEND_IN_RFKILL) && if (!(cmd->flags & CMD_SEND_IN_RFKILL) &&
test_bit(STATUS_RFKILL, &trans->status)) { test_bit(STATUS_RFKILL_OPMODE, &trans->status)) {
IWL_DEBUG_RF_KILL(trans, "Dropping CMD 0x%x: RF KILL\n", IWL_DEBUG_RF_KILL(trans, "Dropping CMD 0x%x: RF KILL\n",
cmd->id); cmd->id);
return -ERFKILL; return -ERFKILL;
@ -1980,9 +1999,8 @@ static int iwl_fill_data_tbs(struct iwl_trans *trans, struct sk_buff *skb,
iwl_pcie_get_tfd(trans_pcie, txq, txq->write_ptr), iwl_pcie_get_tfd(trans_pcie, txq, txq->write_ptr),
trans_pcie->tfd_size, trans_pcie->tfd_size,
&dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len, &dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len,
skb->data + hdr_len, tb2_len); hdr_len);
trace_iwlwifi_dev_tx_data(trans->dev, skb, trace_iwlwifi_dev_tx_data(trans->dev, skb, hdr_len);
hdr_len, skb->len - hdr_len);
return 0; return 0;
} }
@ -2054,8 +2072,7 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
trace_iwlwifi_dev_tx(trans->dev, skb, trace_iwlwifi_dev_tx(trans->dev, skb,
iwl_pcie_get_tfd(trans_pcie, txq, txq->write_ptr), iwl_pcie_get_tfd(trans_pcie, txq, txq->write_ptr),
trans_pcie->tfd_size, trans_pcie->tfd_size,
&dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len, &dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len, 0);
NULL, 0);
ip_hdrlen = skb_transport_header(skb) - skb_network_header(skb); ip_hdrlen = skb_transport_header(skb) - skb_network_header(skb);
snap_ip_tcp_hdrlen = 8 + ip_hdrlen + tcp_hdrlen(skb); snap_ip_tcp_hdrlen = 8 + ip_hdrlen + tcp_hdrlen(skb);