mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-12-29 14:05:19 +08:00
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/iwlwifi/iwlwifi-next
This commit is contained in:
commit
bf4c69f7dd
@ -180,7 +180,7 @@ void iwlagn_dev_txfifo_flush(struct iwl_priv *priv)
|
||||
goto done;
|
||||
}
|
||||
IWL_DEBUG_INFO(priv, "wait transmit/flush all frames\n");
|
||||
iwl_trans_wait_tx_queue_empty(priv->trans);
|
||||
iwl_trans_wait_tx_queue_empty(priv->trans, 0xffffffff);
|
||||
done:
|
||||
ieee80211_wake_queues(priv->hw);
|
||||
mutex_unlock(&priv->mutex);
|
||||
|
@ -1119,7 +1119,7 @@ static void iwlagn_mac_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
|
||||
}
|
||||
}
|
||||
IWL_DEBUG_MAC80211(priv, "wait transmit/flush all frames\n");
|
||||
iwl_trans_wait_tx_queue_empty(priv->trans);
|
||||
iwl_trans_wait_tx_queue_empty(priv->trans, 0xffffffff);
|
||||
done:
|
||||
mutex_unlock(&priv->mutex);
|
||||
IWL_DEBUG_MAC80211(priv, "leave\n");
|
||||
|
@ -2053,6 +2053,17 @@ static bool iwl_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state)
|
||||
return false;
|
||||
}
|
||||
|
||||
static void iwl_napi_add(struct iwl_op_mode *op_mode,
|
||||
struct napi_struct *napi,
|
||||
struct net_device *napi_dev,
|
||||
int (*poll)(struct napi_struct *, int),
|
||||
int weight)
|
||||
{
|
||||
struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
|
||||
|
||||
ieee80211_napi_add(priv->hw, napi, napi_dev, poll, weight);
|
||||
}
|
||||
|
||||
static const struct iwl_op_mode_ops iwl_dvm_ops = {
|
||||
.start = iwl_op_mode_dvm_start,
|
||||
.stop = iwl_op_mode_dvm_stop,
|
||||
@ -2065,6 +2076,7 @@ static const struct iwl_op_mode_ops iwl_dvm_ops = {
|
||||
.cmd_queue_full = iwl_cmd_queue_full,
|
||||
.nic_config = iwl_nic_config,
|
||||
.wimax_active = iwl_wimax_active,
|
||||
.napi_add = iwl_napi_add,
|
||||
};
|
||||
|
||||
/*****************************************************************************
|
||||
|
@ -62,6 +62,7 @@ static const struct iwl_base_params iwl1000_base_params = {
|
||||
.led_compensation = 51,
|
||||
.wd_timeout = IWL_WATCHDOG_DISABLED,
|
||||
.max_event_log_size = 128,
|
||||
.scd_chain_ext_wa = true,
|
||||
};
|
||||
|
||||
static const struct iwl_ht_params iwl1000_ht_params = {
|
||||
|
@ -75,6 +75,7 @@ static const struct iwl_base_params iwl2000_base_params = {
|
||||
.wd_timeout = IWL_DEF_WD_TIMEOUT,
|
||||
.max_event_log_size = 512,
|
||||
.shadow_reg_enable = false, /* TODO: fix bugs using this feature */
|
||||
.scd_chain_ext_wa = true,
|
||||
};
|
||||
|
||||
|
||||
@ -88,6 +89,7 @@ static const struct iwl_base_params iwl2030_base_params = {
|
||||
.wd_timeout = IWL_LONG_WD_TIMEOUT,
|
||||
.max_event_log_size = 512,
|
||||
.shadow_reg_enable = false, /* TODO: fix bugs using this feature */
|
||||
.scd_chain_ext_wa = true,
|
||||
};
|
||||
|
||||
static const struct iwl_ht_params iwl2000_ht_params = {
|
||||
|
@ -61,6 +61,7 @@ static const struct iwl_base_params iwl5000_base_params = {
|
||||
.led_compensation = 51,
|
||||
.wd_timeout = IWL_WATCHDOG_DISABLED,
|
||||
.max_event_log_size = 512,
|
||||
.scd_chain_ext_wa = true,
|
||||
};
|
||||
|
||||
static const struct iwl_ht_params iwl5000_ht_params = {
|
||||
|
@ -85,6 +85,7 @@ static const struct iwl_base_params iwl6000_base_params = {
|
||||
.wd_timeout = IWL_DEF_WD_TIMEOUT,
|
||||
.max_event_log_size = 512,
|
||||
.shadow_reg_enable = false, /* TODO: fix bugs using this feature */
|
||||
.scd_chain_ext_wa = true,
|
||||
};
|
||||
|
||||
static const struct iwl_base_params iwl6050_base_params = {
|
||||
@ -97,6 +98,7 @@ static const struct iwl_base_params iwl6050_base_params = {
|
||||
.wd_timeout = IWL_DEF_WD_TIMEOUT,
|
||||
.max_event_log_size = 1024,
|
||||
.shadow_reg_enable = false, /* TODO: fix bugs using this feature */
|
||||
.scd_chain_ext_wa = true,
|
||||
};
|
||||
|
||||
static const struct iwl_base_params iwl6000_g2_base_params = {
|
||||
@ -109,6 +111,7 @@ static const struct iwl_base_params iwl6000_g2_base_params = {
|
||||
.wd_timeout = IWL_LONG_WD_TIMEOUT,
|
||||
.max_event_log_size = 512,
|
||||
.shadow_reg_enable = false, /* TODO: fix bugs using this feature */
|
||||
.scd_chain_ext_wa = true,
|
||||
};
|
||||
|
||||
static const struct iwl_ht_params iwl6000_ht_params = {
|
||||
|
@ -71,12 +71,12 @@
|
||||
#define IWL3160_UCODE_API_MAX 9
|
||||
|
||||
/* Oldest version we won't warn about */
|
||||
#define IWL7260_UCODE_API_OK 8
|
||||
#define IWL3160_UCODE_API_OK 8
|
||||
#define IWL7260_UCODE_API_OK 9
|
||||
#define IWL3160_UCODE_API_OK 9
|
||||
|
||||
/* Lowest firmware API version supported */
|
||||
#define IWL7260_UCODE_API_MIN 7
|
||||
#define IWL3160_UCODE_API_MIN 7
|
||||
#define IWL7260_UCODE_API_MIN 8
|
||||
#define IWL3160_UCODE_API_MIN 8
|
||||
|
||||
/* NVM versions */
|
||||
#define IWL7260_NVM_VERSION 0x0a1d
|
||||
@ -107,6 +107,7 @@ static const struct iwl_base_params iwl7000_base_params = {
|
||||
.max_event_log_size = 512,
|
||||
.shadow_reg_enable = true,
|
||||
.pcie_l1_allowed = true,
|
||||
.apmg_wake_up_wa = true,
|
||||
};
|
||||
|
||||
static const struct iwl_ht_params iwl7000_ht_params = {
|
||||
|
@ -146,6 +146,9 @@ static inline u8 num_of_ant(u8 mask)
|
||||
* @wd_timeout: TX queues watchdog timeout
|
||||
* @max_event_log_size: size of event log buffer size for ucode event logging
|
||||
* @shadow_reg_enable: HW shadow register support
|
||||
* @apmg_wake_up_wa: should the MAC access REQ be asserted when a command
|
||||
* is in flight. This is due to a HW bug in 7260, 3160 and 7265.
|
||||
* @scd_chain_ext_wa: should the chain extension feature in SCD be disabled.
|
||||
*/
|
||||
struct iwl_base_params {
|
||||
int eeprom_size;
|
||||
@ -160,6 +163,8 @@ struct iwl_base_params {
|
||||
u32 max_event_log_size;
|
||||
const bool shadow_reg_enable;
|
||||
const bool pcie_l1_allowed;
|
||||
const bool apmg_wake_up_wa;
|
||||
const bool scd_chain_ext_wa;
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -77,26 +77,21 @@
|
||||
* @IWL_UCODE_TLV_FLAGS_UAPSD: This uCode image supports uAPSD
|
||||
* @IWL_UCODE_TLV_FLAGS_SHORT_BL: 16 entries of black list instead of 64 in scan
|
||||
* offload profile config command.
|
||||
* @IWL_UCODE_TLV_FLAGS_RX_ENERGY_API: supports rx signal strength api
|
||||
* @IWL_UCODE_TLV_FLAGS_TIME_EVENT_API_V2: using the new time event API.
|
||||
* @IWL_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS: D3 image supports up to six
|
||||
* (rather than two) IPv6 addresses
|
||||
* @IWL_UCODE_TLV_FLAGS_BF_UPDATED: new beacon filtering API
|
||||
* @IWL_UCODE_TLV_FLAGS_NO_BASIC_SSID: not sending a probe with the SSID element
|
||||
* from the probe request template.
|
||||
* @IWL_UCODE_TLV_FLAGS_D3_CONTINUITY_API: modified D3 API to allow keeping
|
||||
* connection when going back to D0
|
||||
* @IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL: new NS offload (small version)
|
||||
* @IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_LARGE: new NS offload (large version)
|
||||
* @IWL_UCODE_TLV_FLAGS_SCHED_SCAN: this uCode image supports scheduled scan.
|
||||
* @IWL_UCODE_TLV_FLAGS_STA_KEY_CMD: new ADD_STA and ADD_STA_KEY command API
|
||||
* @IWL_UCODE_TLV_FLAGS_DEVICE_PS_CMD: support device wide power command
|
||||
* containing CAM (Continuous Active Mode) indication.
|
||||
* @IWL_UCODE_TLV_FLAGS_P2P_PM: P2P client supports PM as a stand alone MAC
|
||||
* @IWL_UCODE_TLV_FLAGS_P2P_BSS_PS_DCM: support power save on BSS station and
|
||||
* P2P client interfaces simultaneously if they are in different bindings.
|
||||
* @IWL_UCODE_TLV_FLAGS_P2P_BSS_PS_SCM: support power save on BSS station and
|
||||
* P2P client interfaces simultaneously if they are in same bindings.
|
||||
* @IWL_UCODE_TLV_FLAGS_P2P_PS_UAPSD: P2P client supports uAPSD power save
|
||||
* @IWL_UCODE_TLV_FLAGS_BCAST_FILTERING: uCode supports broadcast filtering.
|
||||
* @IWL_UCODE_TLV_FLAGS_GO_UAPSD: AP/GO interfaces support uAPSD clients
|
||||
* @IWL_UCODE_TLV_FLAGS_EBS_SUPPORT: this uCode image supports EBS.
|
||||
*/
|
||||
enum iwl_ucode_tlv_flag {
|
||||
IWL_UCODE_TLV_FLAGS_PAN = BIT(0),
|
||||
@ -104,22 +99,15 @@ enum iwl_ucode_tlv_flag {
|
||||
IWL_UCODE_TLV_FLAGS_MFP = BIT(2),
|
||||
IWL_UCODE_TLV_FLAGS_P2P = BIT(3),
|
||||
IWL_UCODE_TLV_FLAGS_DW_BC_TABLE = BIT(4),
|
||||
IWL_UCODE_TLV_FLAGS_NEWBT_COEX = BIT(5),
|
||||
IWL_UCODE_TLV_FLAGS_PM_CMD_SUPPORT = BIT(6),
|
||||
IWL_UCODE_TLV_FLAGS_SHORT_BL = BIT(7),
|
||||
IWL_UCODE_TLV_FLAGS_RX_ENERGY_API = BIT(8),
|
||||
IWL_UCODE_TLV_FLAGS_TIME_EVENT_API_V2 = BIT(9),
|
||||
IWL_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS = BIT(10),
|
||||
IWL_UCODE_TLV_FLAGS_BF_UPDATED = BIT(11),
|
||||
IWL_UCODE_TLV_FLAGS_NO_BASIC_SSID = BIT(12),
|
||||
IWL_UCODE_TLV_FLAGS_D3_CONTINUITY_API = BIT(14),
|
||||
IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL = BIT(15),
|
||||
IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_LARGE = BIT(16),
|
||||
IWL_UCODE_TLV_FLAGS_SCHED_SCAN = BIT(17),
|
||||
IWL_UCODE_TLV_FLAGS_STA_KEY_CMD = BIT(19),
|
||||
IWL_UCODE_TLV_FLAGS_DEVICE_PS_CMD = BIT(20),
|
||||
IWL_UCODE_TLV_FLAGS_P2P_PM = BIT(21),
|
||||
IWL_UCODE_TLV_FLAGS_BSS_P2P_PS_DCM = BIT(22),
|
||||
IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT = BIT(24),
|
||||
IWL_UCODE_TLV_FLAGS_BSS_P2P_PS_SCM = BIT(23),
|
||||
IWL_UCODE_TLV_FLAGS_EBS_SUPPORT = BIT(25),
|
||||
IWL_UCODE_TLV_FLAGS_P2P_PS_UAPSD = BIT(26),
|
||||
IWL_UCODE_TLV_FLAGS_BCAST_FILTERING = BIT(29),
|
||||
IWL_UCODE_TLV_FLAGS_GO_UAPSD = BIT(30),
|
||||
@ -183,6 +171,7 @@ enum iwl_ucode_sec {
|
||||
#define IWL_UCODE_SECTION_MAX 12
|
||||
#define IWL_API_ARRAY_SIZE 1
|
||||
#define IWL_CAPABILITIES_ARRAY_SIZE 1
|
||||
#define CPU1_CPU2_SEPARATOR_SECTION 0xFFFFCCCC
|
||||
|
||||
struct iwl_ucode_capabilities {
|
||||
u32 max_probe_length;
|
||||
|
@ -134,12 +134,13 @@ static const u8 iwl_nvm_channels_family_8000[] = {
|
||||
149, 153, 157, 161, 165, 169, 173, 177, 181
|
||||
};
|
||||
|
||||
#define IWL_NUM_CHANNELS ARRAY_SIZE(iwl_nvm_channels)
|
||||
#define IWL_NUM_CHANNELS ARRAY_SIZE(iwl_nvm_channels)
|
||||
#define IWL_NUM_CHANNELS_FAMILY_8000 ARRAY_SIZE(iwl_nvm_channels_family_8000)
|
||||
#define NUM_2GHZ_CHANNELS 14
|
||||
#define FIRST_2GHZ_HT_MINUS 5
|
||||
#define LAST_2GHZ_HT_PLUS 9
|
||||
#define LAST_5GHZ_HT 161
|
||||
#define NUM_2GHZ_CHANNELS 14
|
||||
#define NUM_2GHZ_CHANNELS_FAMILY_8000 13
|
||||
#define FIRST_2GHZ_HT_MINUS 5
|
||||
#define LAST_2GHZ_HT_PLUS 9
|
||||
#define LAST_5GHZ_HT 161
|
||||
|
||||
#define DEFAULT_MAX_TX_POWER 16
|
||||
|
||||
@ -202,21 +203,23 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
|
||||
struct ieee80211_channel *channel;
|
||||
u16 ch_flags;
|
||||
bool is_5ghz;
|
||||
int num_of_ch;
|
||||
int num_of_ch, num_2ghz_channels;
|
||||
const u8 *nvm_chan;
|
||||
|
||||
if (cfg->device_family != IWL_DEVICE_FAMILY_8000) {
|
||||
num_of_ch = IWL_NUM_CHANNELS;
|
||||
nvm_chan = &iwl_nvm_channels[0];
|
||||
num_2ghz_channels = NUM_2GHZ_CHANNELS;
|
||||
} else {
|
||||
num_of_ch = IWL_NUM_CHANNELS_FAMILY_8000;
|
||||
nvm_chan = &iwl_nvm_channels_family_8000[0];
|
||||
num_2ghz_channels = NUM_2GHZ_CHANNELS_FAMILY_8000;
|
||||
}
|
||||
|
||||
for (ch_idx = 0; ch_idx < num_of_ch; ch_idx++) {
|
||||
ch_flags = __le16_to_cpup(nvm_ch_flags + ch_idx);
|
||||
|
||||
if (ch_idx >= NUM_2GHZ_CHANNELS &&
|
||||
if (ch_idx >= num_2ghz_channels &&
|
||||
!data->sku_cap_band_52GHz_enable)
|
||||
ch_flags &= ~NVM_CHANNEL_VALID;
|
||||
|
||||
@ -225,7 +228,7 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
|
||||
"Ch. %d Flags %x [%sGHz] - No traffic\n",
|
||||
nvm_chan[ch_idx],
|
||||
ch_flags,
|
||||
(ch_idx >= NUM_2GHZ_CHANNELS) ?
|
||||
(ch_idx >= num_2ghz_channels) ?
|
||||
"5.2" : "2.4");
|
||||
continue;
|
||||
}
|
||||
@ -234,7 +237,7 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
|
||||
n_channels++;
|
||||
|
||||
channel->hw_value = nvm_chan[ch_idx];
|
||||
channel->band = (ch_idx < NUM_2GHZ_CHANNELS) ?
|
||||
channel->band = (ch_idx < num_2ghz_channels) ?
|
||||
IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
|
||||
channel->center_freq =
|
||||
ieee80211_channel_to_frequency(
|
||||
@ -242,7 +245,7 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
|
||||
|
||||
/* TODO: Need to be dependent to the NVM */
|
||||
channel->flags = IEEE80211_CHAN_NO_HT40;
|
||||
if (ch_idx < NUM_2GHZ_CHANNELS &&
|
||||
if (ch_idx < num_2ghz_channels &&
|
||||
(ch_flags & NVM_CHANNEL_40MHZ)) {
|
||||
if (nvm_chan[ch_idx] <= LAST_2GHZ_HT_PLUS)
|
||||
channel->flags &= ~IEEE80211_CHAN_NO_HT40PLUS;
|
||||
@ -250,7 +253,7 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
|
||||
channel->flags &= ~IEEE80211_CHAN_NO_HT40MINUS;
|
||||
} else if (nvm_chan[ch_idx] <= LAST_5GHZ_HT &&
|
||||
(ch_flags & NVM_CHANNEL_40MHZ)) {
|
||||
if ((ch_idx - NUM_2GHZ_CHANNELS) % 2 == 0)
|
||||
if ((ch_idx - num_2ghz_channels) % 2 == 0)
|
||||
channel->flags &= ~IEEE80211_CHAN_NO_HT40PLUS;
|
||||
else
|
||||
channel->flags &= ~IEEE80211_CHAN_NO_HT40MINUS;
|
||||
|
@ -63,6 +63,7 @@
|
||||
#ifndef __iwl_op_mode_h__
|
||||
#define __iwl_op_mode_h__
|
||||
|
||||
#include <linux/netdevice.h>
|
||||
#include <linux/debugfs.h>
|
||||
|
||||
struct iwl_op_mode;
|
||||
@ -112,8 +113,11 @@ struct iwl_cfg;
|
||||
* @stop: stop the op_mode. Must free all the memory allocated.
|
||||
* May sleep
|
||||
* @rx: Rx notification to the op_mode. rxb is the Rx buffer itself. Cmd is the
|
||||
* HCMD this Rx responds to.
|
||||
* This callback may sleep, it is called from a threaded IRQ handler.
|
||||
* HCMD this Rx responds to. Can't sleep.
|
||||
* @napi_add: NAPI initialisation. The transport is fully responsible for NAPI,
|
||||
* but the higher layers need to know about it (in particular mac80211 to
|
||||
* to able to call the right NAPI RX functions); this function is needed
|
||||
* to eventually call netif_napi_add() with higher layer involvement.
|
||||
* @queue_full: notifies that a HW queue is full.
|
||||
* Must be atomic and called with BH disabled.
|
||||
* @queue_not_full: notifies that a HW queue is not full any more.
|
||||
@ -143,6 +147,11 @@ struct iwl_op_mode_ops {
|
||||
void (*stop)(struct iwl_op_mode *op_mode);
|
||||
int (*rx)(struct iwl_op_mode *op_mode, struct iwl_rx_cmd_buffer *rxb,
|
||||
struct iwl_device_cmd *cmd);
|
||||
void (*napi_add)(struct iwl_op_mode *op_mode,
|
||||
struct napi_struct *napi,
|
||||
struct net_device *napi_dev,
|
||||
int (*poll)(struct napi_struct *, int),
|
||||
int weight);
|
||||
void (*queue_full)(struct iwl_op_mode *op_mode, int queue);
|
||||
void (*queue_not_full)(struct iwl_op_mode *op_mode, int queue);
|
||||
bool (*hw_rf_kill)(struct iwl_op_mode *op_mode, bool state);
|
||||
@ -180,7 +189,6 @@ static inline int iwl_op_mode_rx(struct iwl_op_mode *op_mode,
|
||||
struct iwl_rx_cmd_buffer *rxb,
|
||||
struct iwl_device_cmd *cmd)
|
||||
{
|
||||
might_sleep();
|
||||
return op_mode->ops->rx(op_mode, rxb, cmd);
|
||||
}
|
||||
|
||||
@ -249,4 +257,15 @@ static inline int iwl_op_mode_exit_d0i3(struct iwl_op_mode *op_mode)
|
||||
return op_mode->ops->exit_d0i3(op_mode);
|
||||
}
|
||||
|
||||
static inline void iwl_op_mode_napi_add(struct iwl_op_mode *op_mode,
|
||||
struct napi_struct *napi,
|
||||
struct net_device *napi_dev,
|
||||
int (*poll)(struct napi_struct *, int),
|
||||
int weight)
|
||||
{
|
||||
if (!op_mode->ops->napi_add)
|
||||
return;
|
||||
op_mode->ops->napi_add(op_mode, napi, napi_dev, poll, weight);
|
||||
}
|
||||
|
||||
#endif /* __iwl_op_mode_h__ */
|
||||
|
@ -348,4 +348,12 @@ enum secure_load_status_reg {
|
||||
|
||||
#define LMPM_SECURE_TIME_OUT (100)
|
||||
|
||||
/* Rx FIFO */
|
||||
#define RXF_SIZE_ADDR (0xa00c88)
|
||||
#define RXF_SIZE_BYTE_CND_POS (7)
|
||||
#define RXF_SIZE_BYTE_CNT_MSK (0x3ff << RXF_SIZE_BYTE_CND_POS)
|
||||
|
||||
#define RXF_LD_FENCE_OFFSET_ADDR (0xa00c10)
|
||||
#define RXF_FIFO_RD_FENCE_ADDR (0xa00c0c)
|
||||
|
||||
#endif /* __iwl_prph_h__ */
|
||||
|
@ -437,8 +437,7 @@ struct iwl_trans;
|
||||
* this one. The op_mode must not configure the HCMD queue. May sleep.
|
||||
* @txq_disable: de-configure a Tx queue to send AMPDUs
|
||||
* Must be atomic
|
||||
* @wait_tx_queue_empty: wait until all tx queues are empty
|
||||
* May sleep
|
||||
* @wait_tx_queue_empty: wait until tx queues are empty. May sleep.
|
||||
* @dbgfs_register: add the dbgfs files under this directory. Files will be
|
||||
* automatically deleted.
|
||||
* @write8: write a u8 to a register at offset ofs from the BAR
|
||||
@ -490,7 +489,7 @@ struct iwl_trans_ops {
|
||||
void (*txq_disable)(struct iwl_trans *trans, int queue);
|
||||
|
||||
int (*dbgfs_register)(struct iwl_trans *trans, struct dentry* dir);
|
||||
int (*wait_tx_queue_empty)(struct iwl_trans *trans);
|
||||
int (*wait_tx_queue_empty)(struct iwl_trans *trans, u32 txq_bm);
|
||||
|
||||
void (*write8)(struct iwl_trans *trans, u32 ofs, u8 val);
|
||||
void (*write32)(struct iwl_trans *trans, u32 ofs, u32 val);
|
||||
@ -759,12 +758,13 @@ static inline void iwl_trans_ac_txq_enable(struct iwl_trans *trans, int queue,
|
||||
IWL_MAX_TID_COUNT, IWL_FRAME_LIMIT, 0);
|
||||
}
|
||||
|
||||
static inline int iwl_trans_wait_tx_queue_empty(struct iwl_trans *trans)
|
||||
static inline int iwl_trans_wait_tx_queue_empty(struct iwl_trans *trans,
|
||||
u32 txq_bm)
|
||||
{
|
||||
if (unlikely(trans->state != IWL_TRANS_FW_ALIVE))
|
||||
IWL_ERR(trans, "%s bad state = %d", __func__, trans->state);
|
||||
|
||||
return trans->ops->wait_tx_queue_empty(trans);
|
||||
return trans->ops->wait_tx_queue_empty(trans, txq_bm);
|
||||
}
|
||||
|
||||
static inline int iwl_trans_dbgfs_register(struct iwl_trans *trans,
|
||||
|
@ -104,11 +104,8 @@ static const u8 iwl_bt_prio_tbl[BT_COEX_PRIO_TBL_EVT_MAX] = {
|
||||
#define BT_DISABLE_REDUCED_TXPOWER_THRESHOLD (-65)
|
||||
#define BT_ANTENNA_COUPLING_THRESHOLD (30)
|
||||
|
||||
int iwl_send_bt_prio_tbl(struct iwl_mvm *mvm)
|
||||
static int iwl_send_bt_prio_tbl(struct iwl_mvm *mvm)
|
||||
{
|
||||
if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_NEWBT_COEX))
|
||||
return 0;
|
||||
|
||||
return iwl_mvm_send_cmd_pdu(mvm, BT_COEX_PRIO_TABLE, CMD_SYNC,
|
||||
sizeof(struct iwl_bt_coex_prio_tbl_cmd),
|
||||
&iwl_bt_prio_tbl);
|
||||
@ -573,8 +570,9 @@ int iwl_send_bt_init_conf(struct iwl_mvm *mvm)
|
||||
int ret;
|
||||
u32 flags;
|
||||
|
||||
if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_NEWBT_COEX))
|
||||
return 0;
|
||||
ret = iwl_send_bt_prio_tbl(mvm);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
bt_cmd = kzalloc(sizeof(*bt_cmd), GFP_KERNEL);
|
||||
if (!bt_cmd)
|
||||
@ -582,10 +580,12 @@ int iwl_send_bt_init_conf(struct iwl_mvm *mvm)
|
||||
cmd.data[0] = bt_cmd;
|
||||
|
||||
bt_cmd->max_kill = 5;
|
||||
bt_cmd->bt4_antenna_isolation_thr = BT_ANTENNA_COUPLING_THRESHOLD,
|
||||
bt_cmd->bt4_antenna_isolation = iwlwifi_mod_params.ant_coupling,
|
||||
bt_cmd->bt4_tx_tx_delta_freq_thr = 15,
|
||||
bt_cmd->bt4_tx_rx_max_freq0 = 15,
|
||||
bt_cmd->bt4_antenna_isolation_thr = BT_ANTENNA_COUPLING_THRESHOLD;
|
||||
bt_cmd->bt4_antenna_isolation = iwlwifi_mod_params.ant_coupling;
|
||||
bt_cmd->bt4_tx_tx_delta_freq_thr = 15;
|
||||
bt_cmd->bt4_tx_rx_max_freq0 = 15;
|
||||
bt_cmd->override_primary_lut = BT_COEX_INVALID_LUT;
|
||||
bt_cmd->override_secondary_lut = BT_COEX_INVALID_LUT;
|
||||
|
||||
flags = iwlwifi_mod_params.bt_coex_active ?
|
||||
BT_COEX_NW : BT_COEX_DISABLE;
|
||||
@ -1215,6 +1215,17 @@ bool iwl_mvm_bt_coex_is_mimo_allowed(struct iwl_mvm *mvm,
|
||||
return iwl_get_coex_type(mvm, mvmsta->vif) == BT_COEX_TIGHT_LUT;
|
||||
}
|
||||
|
||||
bool iwl_mvm_bt_coex_is_tpc_allowed(struct iwl_mvm *mvm,
|
||||
enum ieee80211_band band)
|
||||
{
|
||||
u32 bt_activity = le32_to_cpu(mvm->last_bt_notif.bt_activity_grading);
|
||||
|
||||
if (band != IEEE80211_BAND_2GHZ)
|
||||
return false;
|
||||
|
||||
return bt_activity >= BT_LOW_TRAFFIC;
|
||||
}
|
||||
|
||||
u8 iwl_mvm_bt_coex_tx_prio(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr,
|
||||
struct ieee80211_tx_info *info, u8 ac)
|
||||
{
|
||||
@ -1249,9 +1260,6 @@ u8 iwl_mvm_bt_coex_tx_prio(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr,
|
||||
|
||||
void iwl_mvm_bt_coex_vif_change(struct iwl_mvm *mvm)
|
||||
{
|
||||
if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_NEWBT_COEX))
|
||||
return;
|
||||
|
||||
iwl_mvm_bt_coex_notif_handle(mvm);
|
||||
}
|
||||
|
||||
|
@ -744,10 +744,8 @@ static int iwl_mvm_get_last_nonqos_seq(struct iwl_mvm *mvm,
|
||||
int err;
|
||||
u32 size;
|
||||
|
||||
if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_D3_CONTINUITY_API) {
|
||||
cmd.data[0] = &query_cmd;
|
||||
cmd.len[0] = sizeof(query_cmd);
|
||||
}
|
||||
cmd.data[0] = &query_cmd;
|
||||
cmd.len[0] = sizeof(query_cmd);
|
||||
|
||||
err = iwl_mvm_send_cmd(mvm, &cmd);
|
||||
if (err)
|
||||
@ -758,10 +756,8 @@ static int iwl_mvm_get_last_nonqos_seq(struct iwl_mvm *mvm,
|
||||
err = -EINVAL;
|
||||
} else {
|
||||
err = le16_to_cpup((__le16 *)cmd.resp_pkt->data);
|
||||
/* new API returns next, not last-used seqno */
|
||||
if (mvm->fw->ucode_capa.flags &
|
||||
IWL_UCODE_TLV_FLAGS_D3_CONTINUITY_API)
|
||||
err = (u16) (err - 0x10);
|
||||
/* firmware returns next, not last-used seqno */
|
||||
err = (u16) (err - 0x10);
|
||||
}
|
||||
|
||||
iwl_free_resp(&cmd);
|
||||
@ -785,10 +781,6 @@ void iwl_mvm_set_last_nonqos_seq(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
|
||||
|
||||
mvmvif->seqno_valid = false;
|
||||
|
||||
if (!(mvm->fw->ucode_capa.flags &
|
||||
IWL_UCODE_TLV_FLAGS_D3_CONTINUITY_API))
|
||||
return;
|
||||
|
||||
if (iwl_mvm_send_cmd_pdu(mvm, NON_QOS_TX_COUNTER_CMD, CMD_SYNC,
|
||||
sizeof(query_cmd), &query_cmd))
|
||||
IWL_ERR(mvm, "failed to set non-QoS seqno\n");
|
||||
@ -1082,6 +1074,15 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
|
||||
|
||||
int iwl_mvm_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
|
||||
{
|
||||
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
|
||||
|
||||
if (iwl_mvm_is_d0i3_supported(mvm)) {
|
||||
mutex_lock(&mvm->d0i3_suspend_mutex);
|
||||
__set_bit(D0I3_DEFER_WAKEUP, &mvm->d0i3_suspend_flags);
|
||||
mutex_unlock(&mvm->d0i3_suspend_mutex);
|
||||
return 0;
|
||||
}
|
||||
|
||||
return __iwl_mvm_suspend(hw, wowlan, false);
|
||||
}
|
||||
|
||||
@ -1277,7 +1278,7 @@ static void iwl_mvm_set_tkip_rx_seq(struct tkip_sc *scs,
|
||||
}
|
||||
|
||||
static void iwl_mvm_set_key_rx_seq(struct ieee80211_key_conf *key,
|
||||
struct iwl_wowlan_status_v6 *status)
|
||||
struct iwl_wowlan_status *status)
|
||||
{
|
||||
union iwl_all_tsc_rsc *rsc = &status->gtk.rsc.all_tsc_rsc;
|
||||
|
||||
@ -1294,7 +1295,7 @@ static void iwl_mvm_set_key_rx_seq(struct ieee80211_key_conf *key,
|
||||
}
|
||||
|
||||
struct iwl_mvm_d3_gtk_iter_data {
|
||||
struct iwl_wowlan_status_v6 *status;
|
||||
struct iwl_wowlan_status *status;
|
||||
void *last_gtk;
|
||||
u32 cipher;
|
||||
bool find_phase, unhandled_cipher;
|
||||
@ -1370,7 +1371,7 @@ static void iwl_mvm_d3_update_gtks(struct ieee80211_hw *hw,
|
||||
|
||||
static bool iwl_mvm_setup_connection_keep(struct iwl_mvm *mvm,
|
||||
struct ieee80211_vif *vif,
|
||||
struct iwl_wowlan_status_v6 *status)
|
||||
struct iwl_wowlan_status *status)
|
||||
{
|
||||
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
|
||||
struct iwl_mvm_d3_gtk_iter_data gtkdata = {
|
||||
@ -1468,7 +1469,7 @@ static bool iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm,
|
||||
.flags = CMD_SYNC | CMD_WANT_SKB,
|
||||
};
|
||||
struct iwl_wowlan_status_data status;
|
||||
struct iwl_wowlan_status_v6 *status_v6;
|
||||
struct iwl_wowlan_status *fw_status;
|
||||
int ret, len, status_size, i;
|
||||
bool keep;
|
||||
struct ieee80211_sta *ap_sta;
|
||||
@ -1505,10 +1506,7 @@ static bool iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm,
|
||||
if (!cmd.resp_pkt)
|
||||
goto out_unlock;
|
||||
|
||||
if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_D3_CONTINUITY_API)
|
||||
status_size = sizeof(struct iwl_wowlan_status_v6);
|
||||
else
|
||||
status_size = sizeof(struct iwl_wowlan_status_v4);
|
||||
status_size = sizeof(*fw_status);
|
||||
|
||||
len = iwl_rx_packet_payload_len(cmd.resp_pkt);
|
||||
if (len < status_size) {
|
||||
@ -1516,35 +1514,18 @@ static bool iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm,
|
||||
goto out_free_resp;
|
||||
}
|
||||
|
||||
if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_D3_CONTINUITY_API) {
|
||||
status_v6 = (void *)cmd.resp_pkt->data;
|
||||
fw_status = (void *)cmd.resp_pkt->data;
|
||||
|
||||
status.pattern_number = le16_to_cpu(status_v6->pattern_number);
|
||||
for (i = 0; i < 8; i++)
|
||||
status.qos_seq_ctr[i] =
|
||||
le16_to_cpu(status_v6->qos_seq_ctr[i]);
|
||||
status.wakeup_reasons = le32_to_cpu(status_v6->wakeup_reasons);
|
||||
status.wake_packet_length =
|
||||
le32_to_cpu(status_v6->wake_packet_length);
|
||||
status.wake_packet_bufsize =
|
||||
le32_to_cpu(status_v6->wake_packet_bufsize);
|
||||
status.wake_packet = status_v6->wake_packet;
|
||||
} else {
|
||||
struct iwl_wowlan_status_v4 *status_v4;
|
||||
status_v6 = NULL;
|
||||
status_v4 = (void *)cmd.resp_pkt->data;
|
||||
|
||||
status.pattern_number = le16_to_cpu(status_v4->pattern_number);
|
||||
for (i = 0; i < 8; i++)
|
||||
status.qos_seq_ctr[i] =
|
||||
le16_to_cpu(status_v4->qos_seq_ctr[i]);
|
||||
status.wakeup_reasons = le32_to_cpu(status_v4->wakeup_reasons);
|
||||
status.wake_packet_length =
|
||||
le32_to_cpu(status_v4->wake_packet_length);
|
||||
status.wake_packet_bufsize =
|
||||
le32_to_cpu(status_v4->wake_packet_bufsize);
|
||||
status.wake_packet = status_v4->wake_packet;
|
||||
}
|
||||
status.pattern_number = le16_to_cpu(fw_status->pattern_number);
|
||||
for (i = 0; i < 8; i++)
|
||||
status.qos_seq_ctr[i] =
|
||||
le16_to_cpu(fw_status->qos_seq_ctr[i]);
|
||||
status.wakeup_reasons = le32_to_cpu(fw_status->wakeup_reasons);
|
||||
status.wake_packet_length =
|
||||
le32_to_cpu(fw_status->wake_packet_length);
|
||||
status.wake_packet_bufsize =
|
||||
le32_to_cpu(fw_status->wake_packet_bufsize);
|
||||
status.wake_packet = fw_status->wake_packet;
|
||||
|
||||
if (len != status_size + ALIGN(status.wake_packet_bufsize, 4)) {
|
||||
IWL_ERR(mvm, "Invalid WoWLAN status response!\n");
|
||||
@ -1571,7 +1552,7 @@ static bool iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm,
|
||||
|
||||
iwl_mvm_report_wakeup_reasons(mvm, vif, &status);
|
||||
|
||||
keep = iwl_mvm_setup_connection_keep(mvm, vif, status_v6);
|
||||
keep = iwl_mvm_setup_connection_keep(mvm, vif, fw_status);
|
||||
|
||||
iwl_free_resp(&cmd);
|
||||
return keep;
|
||||
@ -1674,6 +1655,19 @@ int iwl_mvm_resume(struct ieee80211_hw *hw)
|
||||
{
|
||||
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
|
||||
|
||||
if (iwl_mvm_is_d0i3_supported(mvm)) {
|
||||
bool exit_now;
|
||||
|
||||
mutex_lock(&mvm->d0i3_suspend_mutex);
|
||||
__clear_bit(D0I3_DEFER_WAKEUP, &mvm->d0i3_suspend_flags);
|
||||
exit_now = __test_and_clear_bit(D0I3_PENDING_WAKEUP,
|
||||
&mvm->d0i3_suspend_flags);
|
||||
mutex_unlock(&mvm->d0i3_suspend_mutex);
|
||||
if (exit_now)
|
||||
_iwl_mvm_exit_d0i3(mvm);
|
||||
return 0;
|
||||
}
|
||||
|
||||
return __iwl_mvm_resume(mvm, false);
|
||||
}
|
||||
|
||||
|
@ -103,10 +103,6 @@ static void iwl_dbgfs_update_pm(struct iwl_mvm *mvm,
|
||||
IWL_DEBUG_POWER(mvm, "tx_data_timeout=%d\n", val);
|
||||
dbgfs_pm->tx_data_timeout = val;
|
||||
break;
|
||||
case MVM_DEBUGFS_PM_DISABLE_POWER_OFF:
|
||||
IWL_DEBUG_POWER(mvm, "disable_power_off=%d\n", val);
|
||||
dbgfs_pm->disable_power_off = val;
|
||||
break;
|
||||
case MVM_DEBUGFS_PM_LPRX_ENA:
|
||||
IWL_DEBUG_POWER(mvm, "lprx %s\n", val ? "enabled" : "disabled");
|
||||
dbgfs_pm->lprx_ena = val;
|
||||
@ -154,12 +150,6 @@ static ssize_t iwl_dbgfs_pm_params_write(struct ieee80211_vif *vif, char *buf,
|
||||
if (sscanf(buf + 16, "%d", &val) != 1)
|
||||
return -EINVAL;
|
||||
param = MVM_DEBUGFS_PM_TX_DATA_TIMEOUT;
|
||||
} else if (!strncmp("disable_power_off=", buf, 18) &&
|
||||
!(mvm->fw->ucode_capa.flags &
|
||||
IWL_UCODE_TLV_FLAGS_DEVICE_PS_CMD)) {
|
||||
if (sscanf(buf + 18, "%d", &val) != 1)
|
||||
return -EINVAL;
|
||||
param = MVM_DEBUGFS_PM_DISABLE_POWER_OFF;
|
||||
} else if (!strncmp("lprx=", buf, 5)) {
|
||||
if (sscanf(buf + 5, "%d", &val) != 1)
|
||||
return -EINVAL;
|
||||
@ -592,8 +582,7 @@ void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
|
||||
return;
|
||||
}
|
||||
|
||||
if ((mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_PM_CMD_SUPPORT) &&
|
||||
iwlmvm_mod_params.power_scheme != IWL_POWER_SCHEME_CAM &&
|
||||
if (iwlmvm_mod_params.power_scheme != IWL_POWER_SCHEME_CAM &&
|
||||
((vif->type == NL80211_IFTYPE_STATION && !vif->p2p) ||
|
||||
(vif->type == NL80211_IFTYPE_STATION && vif->p2p &&
|
||||
mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BSS_P2P_PS_DCM)))
|
||||
|
@ -136,9 +136,6 @@ static int iwl_dbgfs_fw_error_dump_open(struct inode *inode, struct file *file)
|
||||
|
||||
file->private_data = mvm->fw_error_dump;
|
||||
mvm->fw_error_dump = NULL;
|
||||
kfree(mvm->fw_error_sram);
|
||||
mvm->fw_error_sram = NULL;
|
||||
mvm->fw_error_sram_len = 0;
|
||||
ret = 0;
|
||||
|
||||
out:
|
||||
@ -1004,6 +1001,7 @@ static ssize_t iwl_dbgfs_d0i3_refs_read(struct file *file,
|
||||
PRINT_MVM_REF(IWL_MVM_REF_P2P_CLIENT);
|
||||
PRINT_MVM_REF(IWL_MVM_REF_AP_IBSS);
|
||||
PRINT_MVM_REF(IWL_MVM_REF_USER);
|
||||
PRINT_MVM_REF(IWL_MVM_REF_EXIT_WORK);
|
||||
|
||||
return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
|
||||
}
|
||||
@ -1108,9 +1106,9 @@ MVM_DEBUGFS_READ_WRITE_FILE_OPS(scan_ant_rxchain, 8);
|
||||
MVM_DEBUGFS_READ_WRITE_FILE_OPS(d0i3_refs, 8);
|
||||
|
||||
static const struct file_operations iwl_dbgfs_fw_error_dump_ops = {
|
||||
.open = iwl_dbgfs_fw_error_dump_open,
|
||||
.read = iwl_dbgfs_fw_error_dump_read,
|
||||
.release = iwl_dbgfs_fw_error_dump_release,
|
||||
.open = iwl_dbgfs_fw_error_dump_open,
|
||||
.read = iwl_dbgfs_fw_error_dump_read,
|
||||
.release = iwl_dbgfs_fw_error_dump_release,
|
||||
};
|
||||
|
||||
#ifdef CONFIG_IWLWIFI_BCAST_FILTERING
|
||||
@ -1138,9 +1136,8 @@ int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir)
|
||||
MVM_DEBUGFS_ADD_FILE(fw_error_dump, dbgfs_dir, S_IRUSR);
|
||||
MVM_DEBUGFS_ADD_FILE(bt_notif, dbgfs_dir, S_IRUSR);
|
||||
MVM_DEBUGFS_ADD_FILE(bt_cmd, dbgfs_dir, S_IRUSR);
|
||||
if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_DEVICE_PS_CMD)
|
||||
MVM_DEBUGFS_ADD_FILE(disable_power_off, mvm->debugfs_dir,
|
||||
S_IRUSR | S_IWUSR);
|
||||
MVM_DEBUGFS_ADD_FILE(disable_power_off, mvm->debugfs_dir,
|
||||
S_IRUSR | S_IWUSR);
|
||||
MVM_DEBUGFS_ADD_FILE(fw_rx_stats, mvm->debugfs_dir, S_IRUSR);
|
||||
MVM_DEBUGFS_ADD_FILE(drv_rx_stats, mvm->debugfs_dir, S_IRUSR);
|
||||
MVM_DEBUGFS_ADD_FILE(fw_restart, mvm->debugfs_dir, S_IWUSR);
|
||||
|
@ -141,7 +141,8 @@ enum iwl_bt_coex_lut_type {
|
||||
BT_COEX_TX_DIS_LUT,
|
||||
|
||||
BT_COEX_MAX_LUT,
|
||||
};
|
||||
BT_COEX_INVALID_LUT = 0xff,
|
||||
}; /* BT_COEX_DECISION_LUT_INDEX_API_E_VER_1 */
|
||||
|
||||
#define BT_COEX_LUT_SIZE (12)
|
||||
#define BT_COEX_CORUN_LUT_SIZE (32)
|
||||
@ -154,19 +155,23 @@ enum iwl_bt_coex_lut_type {
|
||||
* @flags:&enum iwl_bt_coex_flags
|
||||
* @max_kill:
|
||||
* @bt_reduced_tx_power: enum %iwl_bt_reduced_tx_power
|
||||
* @bt4_antenna_isolation:
|
||||
* @bt4_antenna_isolation_thr:
|
||||
* @bt4_tx_tx_delta_freq_thr:
|
||||
* @bt4_tx_rx_max_freq0:
|
||||
* @bt_prio_boost:
|
||||
* @override_primary_lut: enum %iwl_bt_coex_lut_type: BT_COEX_INVALID_LUT
|
||||
* should be set by default
|
||||
* @override_secondary_lut: enum %iwl_bt_coex_lut_type: BT_COEX_INVALID_LUT
|
||||
* should be set by default
|
||||
* @bt4_antenna_isolation: antenna isolation
|
||||
* @bt4_antenna_isolation_thr: antenna threshold value
|
||||
* @bt4_tx_tx_delta_freq_thr: TxTx delta frequency
|
||||
* @bt4_tx_rx_max_freq0: TxRx max frequency
|
||||
* @bt_prio_boost: BT priority boost registers
|
||||
* @wifi_tx_prio_boost: SW boost of wifi tx priority
|
||||
* @wifi_rx_prio_boost: SW boost of wifi rx priority
|
||||
* @kill_ack_msk:
|
||||
* @kill_cts_msk:
|
||||
* @decision_lut:
|
||||
* @bt4_multiprio_lut:
|
||||
* @bt4_corun_lut20:
|
||||
* @bt4_corun_lut40:
|
||||
* @kill_ack_msk: kill ACK mask. 1 - Tx ACK, 0 - kill Tx of ACK.
|
||||
* @kill_cts_msk: kill CTS mask. 1 - Tx CTS, 0 - kill Tx of CTS.
|
||||
* @decision_lut: PTA decision LUT, per Prio-Ch
|
||||
* @bt4_multiprio_lut: multi priority LUT configuration
|
||||
* @bt4_corun_lut20: co-running 20 MHz LUT configuration
|
||||
* @bt4_corun_lut40: co-running 40 MHz LUT configuration
|
||||
* @valid_bit_msk: enum %iwl_bt_coex_valid_bit_msk
|
||||
*
|
||||
* The structure is used for the BT_COEX command.
|
||||
@ -175,7 +180,8 @@ struct iwl_bt_coex_cmd {
|
||||
__le32 flags;
|
||||
u8 max_kill;
|
||||
u8 bt_reduced_tx_power;
|
||||
u8 reserved[2];
|
||||
u8 override_primary_lut;
|
||||
u8 override_secondary_lut;
|
||||
|
||||
u8 bt4_antenna_isolation;
|
||||
u8 bt4_antenna_isolation_thr;
|
||||
@ -194,7 +200,7 @@ struct iwl_bt_coex_cmd {
|
||||
__le32 bt4_corun_lut40[BT_COEX_CORUN_LUT_SIZE];
|
||||
|
||||
__le32 valid_bit_msk;
|
||||
} __packed; /* BT_COEX_CMD_API_S_VER_3 */
|
||||
} __packed; /* BT_COEX_CMD_API_S_VER_5 */
|
||||
|
||||
/**
|
||||
* struct iwl_bt_coex_ci_cmd - bt coex channel inhibition command
|
||||
@ -282,7 +288,7 @@ enum iwl_bt_activity_grading {
|
||||
BT_ON_NO_CONNECTION = 1,
|
||||
BT_LOW_TRAFFIC = 2,
|
||||
BT_HIGH_TRAFFIC = 3,
|
||||
};
|
||||
}; /* BT_COEX_BT_ACTIVITY_GRADING_API_E_VER_1 */
|
||||
|
||||
/**
|
||||
* struct iwl_bt_coex_profile_notif - notification about BT coex
|
||||
@ -310,7 +316,7 @@ struct iwl_bt_coex_profile_notif {
|
||||
__le32 primary_ch_lut;
|
||||
__le32 secondary_ch_lut;
|
||||
__le32 bt_activity_grading;
|
||||
} __packed; /* BT_COEX_PROFILE_NTFY_API_S_VER_2 */
|
||||
} __packed; /* BT_COEX_PROFILE_NTFY_API_S_VER_3 */
|
||||
|
||||
enum iwl_bt_coex_prio_table_event {
|
||||
BT_COEX_PRIO_TBL_EVT_INIT_CALIB1 = 0,
|
||||
|
@ -345,21 +345,6 @@ enum iwl_wowlan_wakeup_reason {
|
||||
IWL_WOWLAN_WAKEUP_BY_REM_WAKE_WAKEUP_PACKET = BIT(12),
|
||||
}; /* WOWLAN_WAKE_UP_REASON_API_E_VER_2 */
|
||||
|
||||
struct iwl_wowlan_status_v4 {
|
||||
__le64 replay_ctr;
|
||||
__le16 pattern_number;
|
||||
__le16 non_qos_seq_ctr;
|
||||
__le16 qos_seq_ctr[8];
|
||||
__le32 wakeup_reasons;
|
||||
__le32 rekey_status;
|
||||
__le32 num_of_gtk_rekeys;
|
||||
__le32 transmitted_ndps;
|
||||
__le32 received_beacons;
|
||||
__le32 wake_packet_length;
|
||||
__le32 wake_packet_bufsize;
|
||||
u8 wake_packet[]; /* can be truncated from _length to _bufsize */
|
||||
} __packed; /* WOWLAN_STATUSES_API_S_VER_4 */
|
||||
|
||||
struct iwl_wowlan_gtk_status {
|
||||
u8 key_index;
|
||||
u8 reserved[3];
|
||||
@ -368,7 +353,7 @@ struct iwl_wowlan_gtk_status {
|
||||
struct iwl_wowlan_rsc_tsc_params_cmd rsc;
|
||||
} __packed;
|
||||
|
||||
struct iwl_wowlan_status_v6 {
|
||||
struct iwl_wowlan_status {
|
||||
struct iwl_wowlan_gtk_status gtk;
|
||||
__le64 replay_ctr;
|
||||
__le16 pattern_number;
|
||||
|
@ -334,7 +334,7 @@ enum {
|
||||
*/
|
||||
struct iwl_lq_cmd {
|
||||
u8 sta_id;
|
||||
u8 reserved1;
|
||||
u8 reduced_tpc;
|
||||
u16 control;
|
||||
/* LINK_QUAL_GENERAL_PARAMS_API_S_VER_1 */
|
||||
u8 flags;
|
||||
|
@ -169,8 +169,12 @@ enum iwl_scan_type {
|
||||
SCAN_TYPE_DISCOVERY_FORCED = 6,
|
||||
}; /* SCAN_ACTIVITY_TYPE_E_VER_1 */
|
||||
|
||||
/* Maximal number of channels to scan */
|
||||
#define MAX_NUM_SCAN_CHANNELS 0x24
|
||||
/**
|
||||
* Maximal number of channels to scan
|
||||
* it should be equal to:
|
||||
* max(IWL_NUM_CHANNELS, IWL_NUM_CHANNELS_FAMILY_8000)
|
||||
*/
|
||||
#define MAX_NUM_SCAN_CHANNELS 50
|
||||
|
||||
/**
|
||||
* struct iwl_scan_cmd - scan request command
|
||||
@ -534,13 +538,16 @@ struct iwl_scan_offload_schedule {
|
||||
*
|
||||
* IWL_SCAN_OFFLOAD_FLAG_PASS_ALL: pass all results - no filtering.
|
||||
* IWL_SCAN_OFFLOAD_FLAG_CACHED_CHANNEL: add cached channels to partial scan.
|
||||
* IWL_SCAN_OFFLOAD_FLAG_ENERGY_SCAN: use energy based scan before partial scan
|
||||
* on A band.
|
||||
* IWL_SCAN_OFFLOAD_FLAG_EBS_QUICK_MODE: EBS duration is 100mSec - typical
|
||||
* beacon period. Finding channel activity in this mode is not guaranteed.
|
||||
* IWL_SCAN_OFFLOAD_FLAG_EBS_ACCURATE_MODE: EBS duration is 200mSec.
|
||||
* Assuming beacon period is 100ms finding channel activity is guaranteed.
|
||||
*/
|
||||
enum iwl_scan_offload_flags {
|
||||
IWL_SCAN_OFFLOAD_FLAG_PASS_ALL = BIT(0),
|
||||
IWL_SCAN_OFFLOAD_FLAG_CACHED_CHANNEL = BIT(2),
|
||||
IWL_SCAN_OFFLOAD_FLAG_ENERGY_SCAN = BIT(3),
|
||||
IWL_SCAN_OFFLOAD_FLAG_EBS_QUICK_MODE = BIT(5),
|
||||
IWL_SCAN_OFFLOAD_FLAG_EBS_ACCURATE_MODE = BIT(6),
|
||||
};
|
||||
|
||||
/**
|
||||
@ -563,17 +570,24 @@ enum iwl_scan_offload_compleate_status {
|
||||
IWL_SCAN_OFFLOAD_ABORTED = 2,
|
||||
};
|
||||
|
||||
enum iwl_scan_ebs_status {
|
||||
IWL_SCAN_EBS_SUCCESS,
|
||||
IWL_SCAN_EBS_FAILED,
|
||||
IWL_SCAN_EBS_CHAN_NOT_FOUND,
|
||||
};
|
||||
|
||||
/**
|
||||
* iwl_scan_offload_complete - SCAN_OFFLOAD_COMPLETE_NTF_API_S_VER_1
|
||||
* @last_schedule_line: last schedule line executed (fast or regular)
|
||||
* @last_schedule_iteration: last scan iteration executed before scan abort
|
||||
* @status: enum iwl_scan_offload_compleate_status
|
||||
* @ebs_status: last EBS status, see IWL_SCAN_EBS_*
|
||||
*/
|
||||
struct iwl_scan_offload_complete {
|
||||
u8 last_schedule_line;
|
||||
u8 last_schedule_iteration;
|
||||
u8 status;
|
||||
u8 reserved;
|
||||
u8 ebs_status;
|
||||
} __packed;
|
||||
|
||||
/**
|
||||
|
@ -255,22 +255,19 @@ struct iwl_mvm_keyinfo {
|
||||
} __packed;
|
||||
|
||||
/**
|
||||
* struct iwl_mvm_add_sta_cmd_v5 - Add/modify a station in the fw's sta table.
|
||||
* struct iwl_mvm_add_sta_cmd - Add/modify a station in the fw's sta table.
|
||||
* ( REPLY_ADD_STA = 0x18 )
|
||||
* @add_modify: 1: modify existing, 0: add new station
|
||||
* @unicast_tx_key_id: unicast tx key id. Relevant only when unicast key sent
|
||||
* @multicast_tx_key_id: multicast tx key id. Relevant only when multicast key
|
||||
* sent
|
||||
* @awake_acs:
|
||||
* @tid_disable_tx: is tid BIT(tid) enabled for Tx. Clear BIT(x) to enable
|
||||
* AMPDU for tid x. Set %STA_MODIFY_TID_DISABLE_TX to change this field.
|
||||
* @mac_id_n_color: the Mac context this station belongs to
|
||||
* @addr[ETH_ALEN]: station's MAC address
|
||||
* @sta_id: index of station in uCode's station table
|
||||
* @modify_mask: STA_MODIFY_*, selects which parameters to modify vs. leave
|
||||
* alone. 1 - modify, 0 - don't change.
|
||||
* @key: look at %iwl_mvm_keyinfo
|
||||
* @station_flags: look at %iwl_sta_flags
|
||||
* @station_flags_msk: what of %station_flags have changed
|
||||
* @tid_disable_tx: is tid BIT(tid) enabled for Tx. Clear BIT(x) to enable
|
||||
* AMPDU for tid x. Set %STA_MODIFY_TID_DISABLE_TX to change this field.
|
||||
* @add_immediate_ba_tid: tid for which to add block-ack support (Rx)
|
||||
* Set %STA_MODIFY_ADD_BA_TID to use this field, and also set
|
||||
* add_immediate_ba_ssn.
|
||||
@ -294,40 +291,7 @@ struct iwl_mvm_keyinfo {
|
||||
* ADD_STA sets up the table entry for one station, either creating a new
|
||||
* entry, or modifying a pre-existing one.
|
||||
*/
|
||||
struct iwl_mvm_add_sta_cmd_v5 {
|
||||
u8 add_modify;
|
||||
u8 unicast_tx_key_id;
|
||||
u8 multicast_tx_key_id;
|
||||
u8 reserved1;
|
||||
__le32 mac_id_n_color;
|
||||
u8 addr[ETH_ALEN];
|
||||
__le16 reserved2;
|
||||
u8 sta_id;
|
||||
u8 modify_mask;
|
||||
__le16 reserved3;
|
||||
struct iwl_mvm_keyinfo key;
|
||||
__le32 station_flags;
|
||||
__le32 station_flags_msk;
|
||||
__le16 tid_disable_tx;
|
||||
__le16 reserved4;
|
||||
u8 add_immediate_ba_tid;
|
||||
u8 remove_immediate_ba_tid;
|
||||
__le16 add_immediate_ba_ssn;
|
||||
__le16 sleep_tx_count;
|
||||
__le16 sleep_state_flags;
|
||||
__le16 assoc_id;
|
||||
__le16 beamform_flags;
|
||||
__le32 tfd_queue_msk;
|
||||
} __packed; /* ADD_STA_CMD_API_S_VER_5 */
|
||||
|
||||
/**
|
||||
* struct iwl_mvm_add_sta_cmd_v7 - Add / modify a station
|
||||
* VER_7 of this command is quite similar to VER_5 except
|
||||
* exclusion of all fields related to the security key installation.
|
||||
* It only differs from VER_6 by the "awake_acs" field that is
|
||||
* reserved and ignored in VER_6.
|
||||
*/
|
||||
struct iwl_mvm_add_sta_cmd_v7 {
|
||||
struct iwl_mvm_add_sta_cmd {
|
||||
u8 add_modify;
|
||||
u8 awake_acs;
|
||||
__le16 tid_disable_tx;
|
||||
|
@ -482,7 +482,8 @@ struct iwl_mvm_tx_resp {
|
||||
u8 pa_integ_res_b[3];
|
||||
u8 pa_integ_res_c[3];
|
||||
__le16 measurement_req_id;
|
||||
__le16 reserved;
|
||||
u8 reduced_tpc;
|
||||
u8 reserved;
|
||||
|
||||
__le32 tfd_info;
|
||||
__le16 seq_ctl;
|
||||
|
@ -71,6 +71,7 @@
|
||||
#include "fw-api-power.h"
|
||||
#include "fw-api-d3.h"
|
||||
#include "fw-api-coex.h"
|
||||
#include "fw-api-scan.h"
|
||||
|
||||
/* maximal number of Tx queues in any platform */
|
||||
#define IWL_MVM_MAX_QUEUES 20
|
||||
@ -604,52 +605,7 @@ enum {
|
||||
TE_V1_NOTIF_INTERNAL_FRAG_END = BIT(7),
|
||||
}; /* MAC_EVENT_ACTION_API_E_VER_2 */
|
||||
|
||||
|
||||
/**
|
||||
* struct iwl_time_event_cmd_api_v1 - configuring Time Events
|
||||
* with struct MAC_TIME_EVENT_DATA_API_S_VER_1 (see also
|
||||
* with version 2. determined by IWL_UCODE_TLV_FLAGS)
|
||||
* ( TIME_EVENT_CMD = 0x29 )
|
||||
* @id_and_color: ID and color of the relevant MAC
|
||||
* @action: action to perform, one of FW_CTXT_ACTION_*
|
||||
* @id: this field has two meanings, depending on the action:
|
||||
* If the action is ADD, then it means the type of event to add.
|
||||
* For all other actions it is the unique event ID assigned when the
|
||||
* event was added by the FW.
|
||||
* @apply_time: When to start the Time Event (in GP2)
|
||||
* @max_delay: maximum delay to event's start (apply time), in TU
|
||||
* @depends_on: the unique ID of the event we depend on (if any)
|
||||
* @interval: interval between repetitions, in TU
|
||||
* @interval_reciprocal: 2^32 / interval
|
||||
* @duration: duration of event in TU
|
||||
* @repeat: how many repetitions to do, can be TE_REPEAT_ENDLESS
|
||||
* @dep_policy: one of TE_V1_INDEPENDENT, TE_V1_DEP_OTHER, TE_V1_DEP_TSF
|
||||
* and TE_V1_EVENT_SOCIOPATHIC
|
||||
* @is_present: 0 or 1, are we present or absent during the Time Event
|
||||
* @max_frags: maximal number of fragments the Time Event can be divided to
|
||||
* @notify: notifications using TE_V1_NOTIF_* (whom to notify when)
|
||||
*/
|
||||
struct iwl_time_event_cmd_v1 {
|
||||
/* COMMON_INDEX_HDR_API_S_VER_1 */
|
||||
__le32 id_and_color;
|
||||
__le32 action;
|
||||
__le32 id;
|
||||
/* MAC_TIME_EVENT_DATA_API_S_VER_1 */
|
||||
__le32 apply_time;
|
||||
__le32 max_delay;
|
||||
__le32 dep_policy;
|
||||
__le32 depends_on;
|
||||
__le32 is_present;
|
||||
__le32 max_frags;
|
||||
__le32 interval;
|
||||
__le32 interval_reciprocal;
|
||||
__le32 duration;
|
||||
__le32 repeat;
|
||||
__le32 notify;
|
||||
} __packed; /* MAC_TIME_EVENT_CMD_API_S_VER_1 */
|
||||
|
||||
|
||||
/* Time event - defines for command API v2 */
|
||||
/* Time event - defines for command API */
|
||||
|
||||
/*
|
||||
* @TE_V2_FRAG_NONE: fragmentation of the time event is NOT allowed.
|
||||
@ -680,7 +636,7 @@ enum {
|
||||
#define TE_V2_PLACEMENT_POS 12
|
||||
#define TE_V2_ABSENCE_POS 15
|
||||
|
||||
/* Time event policy values (for time event cmd api v2)
|
||||
/* Time event policy values
|
||||
* A notification (both event and fragment) includes a status indicating weather
|
||||
* the FW was able to schedule the event or not. For fragment start/end
|
||||
* notification the status is always success. There is no start/end fragment
|
||||
@ -727,7 +683,7 @@ enum {
|
||||
};
|
||||
|
||||
/**
|
||||
* struct iwl_time_event_cmd_api_v2 - configuring Time Events
|
||||
* struct iwl_time_event_cmd_api - configuring Time Events
|
||||
* with struct MAC_TIME_EVENT_DATA_API_S_VER_2 (see also
|
||||
* with version 1. determined by IWL_UCODE_TLV_FLAGS)
|
||||
* ( TIME_EVENT_CMD = 0x29 )
|
||||
@ -750,7 +706,7 @@ enum {
|
||||
* TE_EVENT_SOCIOPATHIC
|
||||
* using TE_ABSENCE and using TE_NOTIF_*
|
||||
*/
|
||||
struct iwl_time_event_cmd_v2 {
|
||||
struct iwl_time_event_cmd {
|
||||
/* COMMON_INDEX_HDR_API_S_VER_1 */
|
||||
__le32 id_and_color;
|
||||
__le32 action;
|
||||
|
@ -71,10 +71,12 @@
|
||||
* enum iwl_fw_error_dump_type - types of data in the dump file
|
||||
* @IWL_FW_ERROR_DUMP_SRAM:
|
||||
* @IWL_FW_ERROR_DUMP_REG:
|
||||
* @IWL_FW_ERROR_DUMP_RXF:
|
||||
*/
|
||||
enum iwl_fw_error_dump_type {
|
||||
IWL_FW_ERROR_DUMP_SRAM = 0,
|
||||
IWL_FW_ERROR_DUMP_REG = 1,
|
||||
IWL_FW_ERROR_DUMP_RXF = 2,
|
||||
|
||||
IWL_FW_ERROR_DUMP_MAX,
|
||||
};
|
||||
@ -89,7 +91,7 @@ struct iwl_fw_error_dump_data {
|
||||
__le32 type;
|
||||
__le32 len;
|
||||
__u8 data[];
|
||||
} __packed __aligned(4);
|
||||
} __packed;
|
||||
|
||||
/**
|
||||
* struct iwl_fw_error_dump_file - the layout of the header of the file
|
||||
@ -101,6 +103,6 @@ struct iwl_fw_error_dump_file {
|
||||
__le32 barker;
|
||||
__le32 file_len;
|
||||
u8 data[0];
|
||||
} __packed __aligned(4);
|
||||
} __packed;
|
||||
|
||||
#endif /* __fw_error_dump_h__ */
|
||||
|
@ -288,7 +288,7 @@ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
|
||||
goto error;
|
||||
}
|
||||
|
||||
ret = iwl_send_bt_prio_tbl(mvm);
|
||||
ret = iwl_send_bt_init_conf(mvm);
|
||||
if (ret)
|
||||
goto error;
|
||||
|
||||
@ -424,10 +424,6 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
|
||||
if (ret)
|
||||
goto error;
|
||||
|
||||
ret = iwl_send_bt_prio_tbl(mvm);
|
||||
if (ret)
|
||||
goto error;
|
||||
|
||||
ret = iwl_send_bt_init_conf(mvm);
|
||||
if (ret)
|
||||
goto error;
|
||||
@ -468,12 +464,6 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
|
||||
/* Initialize tx backoffs to the minimal possible */
|
||||
iwl_mvm_tt_tx_backoff(mvm, 0);
|
||||
|
||||
if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_PM_CMD_SUPPORT)) {
|
||||
ret = iwl_power_legacy_set_cam_mode(mvm);
|
||||
if (ret)
|
||||
goto error;
|
||||
}
|
||||
|
||||
ret = iwl_mvm_power_update_device(mvm);
|
||||
if (ret)
|
||||
goto error;
|
||||
|
@ -667,12 +667,9 @@ static void iwl_mvm_mac_ctxt_cmd_common(struct iwl_mvm *mvm,
|
||||
if (vif->bss_conf.qos)
|
||||
cmd->qos_flags |= cpu_to_le32(MAC_QOS_FLG_UPDATE_EDCA);
|
||||
|
||||
/* Don't use cts to self as the fw doesn't support it currently. */
|
||||
if (vif->bss_conf.use_cts_prot) {
|
||||
cmd->protection_flags |= cpu_to_le32(MAC_PROT_FLG_TGG_PROTECT);
|
||||
if (IWL_UCODE_API(mvm->fw->ucode_ver) >= 8)
|
||||
cmd->protection_flags |=
|
||||
cpu_to_le32(MAC_PROT_FLG_SELF_CTS_EN);
|
||||
cmd->protection_flags |= cpu_to_le32(MAC_PROT_FLG_SELF_CTS_EN);
|
||||
}
|
||||
IWL_DEBUG_RATE(mvm, "use_cts_prot %d, ht_operation_mode %d\n",
|
||||
vif->bss_conf.use_cts_prot,
|
||||
|
@ -276,6 +276,7 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
|
||||
IEEE80211_HW_AMPDU_AGGREGATION |
|
||||
IEEE80211_HW_TIMING_BEACON_ONLY |
|
||||
IEEE80211_HW_CONNECTION_MONITOR |
|
||||
IEEE80211_HW_SUPPORTS_UAPSD |
|
||||
IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS |
|
||||
IEEE80211_HW_SUPPORTS_STATIC_SMPS;
|
||||
|
||||
@ -285,6 +286,8 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
|
||||
IEEE80211_RADIOTAP_MCS_HAVE_STBC;
|
||||
hw->radiotap_vht_details |= IEEE80211_RADIOTAP_VHT_KNOWN_STBC;
|
||||
hw->rate_control_algorithm = "iwl-mvm-rs";
|
||||
hw->uapsd_queues = IWL_UAPSD_AC_INFO;
|
||||
hw->uapsd_max_sp_len = IWL_UAPSD_MAX_SP;
|
||||
|
||||
/*
|
||||
* Enable 11w if advertised by firmware and software crypto
|
||||
@ -295,11 +298,9 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
|
||||
!iwlwifi_mod_params.sw_crypto)
|
||||
hw->flags |= IEEE80211_HW_MFP_CAPABLE;
|
||||
|
||||
if (0 && mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT) {
|
||||
hw->flags |= IEEE80211_HW_SUPPORTS_UAPSD;
|
||||
hw->uapsd_queues = IWL_UAPSD_AC_INFO;
|
||||
hw->uapsd_max_sp_len = IWL_UAPSD_MAX_SP;
|
||||
}
|
||||
/* Disable uAPSD due to firmware issues */
|
||||
if (true)
|
||||
hw->flags &= ~IEEE80211_HW_SUPPORTS_UAPSD;
|
||||
|
||||
hw->sta_data_size = sizeof(struct iwl_mvm_sta);
|
||||
hw->vif_data_size = sizeof(struct iwl_mvm_vif);
|
||||
@ -309,11 +310,8 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
|
||||
BIT(NL80211_IFTYPE_P2P_CLIENT) |
|
||||
BIT(NL80211_IFTYPE_AP) |
|
||||
BIT(NL80211_IFTYPE_P2P_GO) |
|
||||
BIT(NL80211_IFTYPE_P2P_DEVICE);
|
||||
|
||||
/* IBSS has bugs in older versions */
|
||||
if (IWL_UCODE_API(mvm->fw->ucode_ver) >= 8)
|
||||
hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_ADHOC);
|
||||
BIT(NL80211_IFTYPE_P2P_DEVICE) |
|
||||
BIT(NL80211_IFTYPE_ADHOC);
|
||||
|
||||
hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
|
||||
hw->wiphy->regulatory_flags |= REGULATORY_CUSTOM_REG |
|
||||
@ -365,14 +363,11 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
|
||||
else
|
||||
hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
|
||||
|
||||
if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_SCHED_SCAN) {
|
||||
hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_SCHED_SCAN;
|
||||
hw->wiphy->max_sched_scan_ssids = PROBE_OPTION_MAX;
|
||||
hw->wiphy->max_match_sets = IWL_SCAN_MAX_PROFILES;
|
||||
/* we create the 802.11 header and zero length SSID IE. */
|
||||
hw->wiphy->max_sched_scan_ie_len =
|
||||
SCAN_OFFLOAD_PROBE_REQ_SIZE - 24 - 2;
|
||||
}
|
||||
hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_SCHED_SCAN;
|
||||
hw->wiphy->max_sched_scan_ssids = PROBE_OPTION_MAX;
|
||||
hw->wiphy->max_match_sets = IWL_SCAN_MAX_PROFILES;
|
||||
/* we create the 802.11 header and zero length SSID IE. */
|
||||
hw->wiphy->max_sched_scan_ie_len = SCAN_OFFLOAD_PROBE_REQ_SIZE - 24 - 2;
|
||||
|
||||
hw->wiphy->features |= NL80211_FEATURE_P2P_GO_CTWIN |
|
||||
NL80211_FEATURE_P2P_GO_OPPPS;
|
||||
@ -386,7 +381,11 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PM_SLEEP
|
||||
if (mvm->fw->img[IWL_UCODE_WOWLAN].sec[0].len &&
|
||||
if (iwl_mvm_is_d0i3_supported(mvm) &&
|
||||
device_can_wakeup(mvm->trans->dev)) {
|
||||
mvm->wowlan.flags = WIPHY_WOWLAN_ANY;
|
||||
hw->wiphy->wowlan = &mvm->wowlan;
|
||||
} else if (mvm->fw->img[IWL_UCODE_WOWLAN].sec[0].len &&
|
||||
mvm->trans->ops->d3_suspend &&
|
||||
mvm->trans->ops->d3_resume &&
|
||||
device_can_wakeup(mvm->trans->dev)) {
|
||||
@ -827,8 +826,7 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
|
||||
goto out_remove_mac;
|
||||
|
||||
if (!mvm->bf_allowed_vif &&
|
||||
vif->type == NL80211_IFTYPE_STATION && !vif->p2p &&
|
||||
mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BF_UPDATED){
|
||||
vif->type == NL80211_IFTYPE_STATION && !vif->p2p) {
|
||||
mvm->bf_allowed_vif = mvmvif;
|
||||
vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER |
|
||||
IEEE80211_VIF_SUPPORTS_CQM_RSSI;
|
||||
@ -1223,6 +1221,10 @@ static int iwl_mvm_configure_bcast_filter(struct iwl_mvm *mvm,
|
||||
if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BCAST_FILTERING))
|
||||
return 0;
|
||||
|
||||
/* bcast filtering isn't supported for P2P client */
|
||||
if (vif->p2p)
|
||||
return 0;
|
||||
|
||||
if (!iwl_mvm_bcast_filter_build_cmd(mvm, &cmd))
|
||||
return 0;
|
||||
|
||||
@ -1697,6 +1699,11 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
|
||||
ret = iwl_mvm_add_sta(mvm, vif, sta);
|
||||
} else if (old_state == IEEE80211_STA_NONE &&
|
||||
new_state == IEEE80211_STA_AUTH) {
|
||||
/*
|
||||
* EBS may be disabled due to previous failures reported by FW.
|
||||
* Reset EBS status here assuming environment has been changed.
|
||||
*/
|
||||
mvm->last_ebs_successful = true;
|
||||
ret = 0;
|
||||
} else if (old_state == IEEE80211_STA_AUTH &&
|
||||
new_state == IEEE80211_STA_ASSOC) {
|
||||
|
@ -164,7 +164,6 @@ enum iwl_dbgfs_pm_mask {
|
||||
MVM_DEBUGFS_PM_SKIP_DTIM_PERIODS = BIT(2),
|
||||
MVM_DEBUGFS_PM_RX_DATA_TIMEOUT = BIT(3),
|
||||
MVM_DEBUGFS_PM_TX_DATA_TIMEOUT = BIT(4),
|
||||
MVM_DEBUGFS_PM_DISABLE_POWER_OFF = BIT(5),
|
||||
MVM_DEBUGFS_PM_LPRX_ENA = BIT(6),
|
||||
MVM_DEBUGFS_PM_LPRX_RSSI_THRESHOLD = BIT(7),
|
||||
MVM_DEBUGFS_PM_SNOOZE_ENABLE = BIT(8),
|
||||
@ -177,7 +176,6 @@ struct iwl_dbgfs_pm {
|
||||
u32 tx_data_timeout;
|
||||
bool skip_over_dtim;
|
||||
u8 skip_dtim_periods;
|
||||
bool disable_power_off;
|
||||
bool lprx_ena;
|
||||
u32 lprx_rssi_threshold;
|
||||
bool snooze_ena;
|
||||
@ -232,6 +230,7 @@ enum iwl_mvm_ref_type {
|
||||
IWL_MVM_REF_USER,
|
||||
IWL_MVM_REF_TX,
|
||||
IWL_MVM_REF_TX_AGG,
|
||||
IWL_MVM_REF_EXIT_WORK,
|
||||
|
||||
IWL_MVM_REF_COUNT,
|
||||
};
|
||||
@ -265,6 +264,7 @@ struct iwl_mvm_vif_bf_data {
|
||||
* @uploaded: indicates the MAC context has been added to the device
|
||||
* @ap_ibss_active: indicates that AP/IBSS is configured and that the interface
|
||||
* should get quota etc.
|
||||
* @pm_enabled - Indicate if MAC power management is allowed
|
||||
* @monitor_active: indicates that monitor context is configured, and that the
|
||||
* interface should get quota etc.
|
||||
* @low_latency: indicates that this interface is in low-latency mode
|
||||
@ -283,6 +283,7 @@ struct iwl_mvm_vif {
|
||||
|
||||
bool uploaded;
|
||||
bool ap_ibss_active;
|
||||
bool pm_enabled;
|
||||
bool monitor_active;
|
||||
bool low_latency;
|
||||
struct iwl_mvm_vif_bf_data bf_data;
|
||||
@ -451,6 +452,11 @@ struct iwl_mvm_frame_stats {
|
||||
int last_frame_idx;
|
||||
};
|
||||
|
||||
enum {
|
||||
D0I3_DEFER_WAKEUP,
|
||||
D0I3_PENDING_WAKEUP,
|
||||
};
|
||||
|
||||
struct iwl_mvm {
|
||||
/* for logger access */
|
||||
struct device *dev;
|
||||
@ -535,6 +541,8 @@ struct iwl_mvm {
|
||||
/* Internal station */
|
||||
struct iwl_mvm_int_sta aux_sta;
|
||||
|
||||
bool last_ebs_successful;
|
||||
|
||||
u8 scan_last_antenna_idx; /* to toggle TX between antennas */
|
||||
u8 mgmt_last_antenna_idx;
|
||||
|
||||
@ -578,6 +586,8 @@ struct iwl_mvm {
|
||||
void *fw_error_dump;
|
||||
void *fw_error_sram;
|
||||
u32 fw_error_sram_len;
|
||||
u32 *fw_error_rxf;
|
||||
u32 fw_error_rxf_len;
|
||||
|
||||
struct led_classdev led;
|
||||
|
||||
@ -601,6 +611,9 @@ struct iwl_mvm {
|
||||
bool d0i3_offloading;
|
||||
struct work_struct d0i3_exit_work;
|
||||
struct sk_buff_head d0i3_tx;
|
||||
/* protect d0i3_suspend_flags */
|
||||
struct mutex d0i3_suspend_mutex;
|
||||
unsigned long d0i3_suspend_flags;
|
||||
/* sync d0i3_tx queue and IWL_MVM_STATUS_IN_D0I3 status flag */
|
||||
spinlock_t d0i3_tx_lock;
|
||||
wait_queue_head_t d0i3_exit_waitq;
|
||||
@ -629,8 +642,6 @@ struct iwl_mvm {
|
||||
|
||||
/* Indicate if device power save is allowed */
|
||||
bool ps_disabled;
|
||||
/* Indicate if device power management is allowed */
|
||||
bool pm_disabled;
|
||||
};
|
||||
|
||||
/* Extract MVM priv from op_mode and _hw */
|
||||
@ -705,6 +716,7 @@ void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm);
|
||||
#ifdef CONFIG_IWLWIFI_DEBUGFS
|
||||
void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm);
|
||||
void iwl_mvm_fw_error_sram_dump(struct iwl_mvm *mvm);
|
||||
void iwl_mvm_fw_error_rxf_dump(struct iwl_mvm *mvm);
|
||||
#endif
|
||||
u8 first_antenna(u8 mask);
|
||||
u8 iwl_mvm_next_antenna(struct iwl_mvm *mvm, u8 valid, u8 last_idx);
|
||||
@ -874,8 +886,6 @@ void iwl_mvm_update_frame_stats(struct iwl_mvm *mvm,
|
||||
int rs_pretty_print_rate(char *buf, const u32 rate);
|
||||
|
||||
/* power management */
|
||||
int iwl_power_legacy_set_cam_mode(struct iwl_mvm *mvm);
|
||||
|
||||
int iwl_mvm_power_update_device(struct iwl_mvm *mvm);
|
||||
int iwl_mvm_power_update_mac(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
|
||||
int iwl_mvm_power_mac_dbgfs_read(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
|
||||
@ -922,9 +932,9 @@ int iwl_mvm_send_proto_offload(struct iwl_mvm *mvm,
|
||||
void iwl_mvm_ref(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type);
|
||||
void iwl_mvm_unref(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type);
|
||||
void iwl_mvm_d0i3_enable_tx(struct iwl_mvm *mvm, __le16 *qos_seq);
|
||||
int _iwl_mvm_exit_d0i3(struct iwl_mvm *mvm);
|
||||
|
||||
/* BT Coex */
|
||||
int iwl_send_bt_prio_tbl(struct iwl_mvm *mvm);
|
||||
int iwl_send_bt_init_conf(struct iwl_mvm *mvm);
|
||||
int iwl_mvm_rx_bt_coex_notif(struct iwl_mvm *mvm,
|
||||
struct iwl_rx_cmd_buffer *rxb,
|
||||
@ -936,6 +946,8 @@ u16 iwl_mvm_coex_agg_time_limit(struct iwl_mvm *mvm,
|
||||
struct ieee80211_sta *sta);
|
||||
bool iwl_mvm_bt_coex_is_mimo_allowed(struct iwl_mvm *mvm,
|
||||
struct ieee80211_sta *sta);
|
||||
bool iwl_mvm_bt_coex_is_tpc_allowed(struct iwl_mvm *mvm,
|
||||
enum ieee80211_band band);
|
||||
u8 iwl_mvm_bt_coex_tx_prio(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr,
|
||||
struct ieee80211_tx_info *info, u8 ac);
|
||||
int iwl_mvm_bt_coex_reduced_txp(struct iwl_mvm *mvm, u8 sta_id, bool enable);
|
||||
|
@ -402,6 +402,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
|
||||
mvm->sf_state = SF_UNINIT;
|
||||
|
||||
mutex_init(&mvm->mutex);
|
||||
mutex_init(&mvm->d0i3_suspend_mutex);
|
||||
spin_lock_init(&mvm->async_handlers_lock);
|
||||
INIT_LIST_HEAD(&mvm->time_event_list);
|
||||
INIT_LIST_HEAD(&mvm->async_handlers_list);
|
||||
@ -538,6 +539,7 @@ static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode)
|
||||
kfree(mvm->scan_cmd);
|
||||
vfree(mvm->fw_error_dump);
|
||||
kfree(mvm->fw_error_sram);
|
||||
kfree(mvm->fw_error_rxf);
|
||||
kfree(mvm->mcast_filter_cmd);
|
||||
mvm->mcast_filter_cmd = NULL;
|
||||
|
||||
@ -821,8 +823,9 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
|
||||
return;
|
||||
|
||||
file_len = mvm->fw_error_sram_len +
|
||||
mvm->fw_error_rxf_len +
|
||||
sizeof(*dump_file) +
|
||||
sizeof(*dump_data);
|
||||
sizeof(*dump_data) * 2;
|
||||
|
||||
dump_file = vmalloc(file_len);
|
||||
if (!dump_file)
|
||||
@ -833,7 +836,12 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
|
||||
dump_file->barker = cpu_to_le32(IWL_FW_ERROR_DUMP_BARKER);
|
||||
dump_file->file_len = cpu_to_le32(file_len);
|
||||
dump_data = (void *)dump_file->data;
|
||||
dump_data->type = IWL_FW_ERROR_DUMP_SRAM;
|
||||
dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_RXF);
|
||||
dump_data->len = cpu_to_le32(mvm->fw_error_rxf_len);
|
||||
memcpy(dump_data->data, mvm->fw_error_rxf, mvm->fw_error_rxf_len);
|
||||
|
||||
dump_data = (void *)((u8 *)dump_data->data + mvm->fw_error_rxf_len);
|
||||
dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_SRAM);
|
||||
dump_data->len = cpu_to_le32(mvm->fw_error_sram_len);
|
||||
|
||||
/*
|
||||
@ -842,6 +850,14 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
|
||||
* mvm->fw_error_sram right now.
|
||||
*/
|
||||
memcpy(dump_data->data, mvm->fw_error_sram, mvm->fw_error_sram_len);
|
||||
|
||||
kfree(mvm->fw_error_rxf);
|
||||
mvm->fw_error_rxf = NULL;
|
||||
mvm->fw_error_rxf_len = 0;
|
||||
|
||||
kfree(mvm->fw_error_sram);
|
||||
mvm->fw_error_sram = NULL;
|
||||
mvm->fw_error_sram_len = 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
@ -853,6 +869,7 @@ static void iwl_mvm_nic_error(struct iwl_op_mode *op_mode)
|
||||
|
||||
#ifdef CONFIG_IWLWIFI_DEBUGFS
|
||||
iwl_mvm_fw_error_sram_dump(mvm);
|
||||
iwl_mvm_fw_error_rxf_dump(mvm);
|
||||
#endif
|
||||
|
||||
iwl_mvm_nic_restart(mvm);
|
||||
@ -1128,7 +1145,7 @@ static void iwl_mvm_d0i3_exit_work(struct work_struct *wk)
|
||||
.id = WOWLAN_GET_STATUSES,
|
||||
.flags = CMD_SYNC | CMD_HIGH_PRIO | CMD_WANT_SKB,
|
||||
};
|
||||
struct iwl_wowlan_status_v6 *status;
|
||||
struct iwl_wowlan_status *status;
|
||||
int ret;
|
||||
u32 disconnection_reasons, wakeup_reasons;
|
||||
__le16 *qos_seq = NULL;
|
||||
@ -1158,18 +1175,27 @@ static void iwl_mvm_d0i3_exit_work(struct work_struct *wk)
|
||||
iwl_free_resp(&get_status_cmd);
|
||||
out:
|
||||
iwl_mvm_d0i3_enable_tx(mvm, qos_seq);
|
||||
iwl_mvm_unref(mvm, IWL_MVM_REF_EXIT_WORK);
|
||||
mutex_unlock(&mvm->mutex);
|
||||
}
|
||||
|
||||
static int iwl_mvm_exit_d0i3(struct iwl_op_mode *op_mode)
|
||||
int _iwl_mvm_exit_d0i3(struct iwl_mvm *mvm)
|
||||
{
|
||||
struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
|
||||
u32 flags = CMD_ASYNC | CMD_HIGH_PRIO | CMD_SEND_IN_IDLE |
|
||||
CMD_WAKE_UP_TRANS;
|
||||
int ret;
|
||||
|
||||
IWL_DEBUG_RPM(mvm, "MVM exiting D0i3\n");
|
||||
|
||||
mutex_lock(&mvm->d0i3_suspend_mutex);
|
||||
if (test_bit(D0I3_DEFER_WAKEUP, &mvm->d0i3_suspend_flags)) {
|
||||
IWL_DEBUG_RPM(mvm, "Deferring d0i3 exit until resume\n");
|
||||
__set_bit(D0I3_PENDING_WAKEUP, &mvm->d0i3_suspend_flags);
|
||||
mutex_unlock(&mvm->d0i3_suspend_mutex);
|
||||
return 0;
|
||||
}
|
||||
mutex_unlock(&mvm->d0i3_suspend_mutex);
|
||||
|
||||
ret = iwl_mvm_send_cmd_pdu(mvm, D0I3_END_CMD, flags, 0, NULL);
|
||||
if (ret)
|
||||
goto out;
|
||||
@ -1183,6 +1209,25 @@ out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int iwl_mvm_exit_d0i3(struct iwl_op_mode *op_mode)
|
||||
{
|
||||
struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
|
||||
|
||||
iwl_mvm_ref(mvm, IWL_MVM_REF_EXIT_WORK);
|
||||
return _iwl_mvm_exit_d0i3(mvm);
|
||||
}
|
||||
|
||||
static void iwl_mvm_napi_add(struct iwl_op_mode *op_mode,
|
||||
struct napi_struct *napi,
|
||||
struct net_device *napi_dev,
|
||||
int (*poll)(struct napi_struct *, int),
|
||||
int weight)
|
||||
{
|
||||
struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
|
||||
|
||||
ieee80211_napi_add(mvm->hw, napi, napi_dev, poll, weight);
|
||||
}
|
||||
|
||||
static const struct iwl_op_mode_ops iwl_mvm_ops = {
|
||||
.start = iwl_op_mode_mvm_start,
|
||||
.stop = iwl_op_mode_mvm_stop,
|
||||
@ -1196,4 +1241,5 @@ static const struct iwl_op_mode_ops iwl_mvm_ops = {
|
||||
.nic_config = iwl_mvm_nic_config,
|
||||
.enter_d0i3 = iwl_mvm_enter_d0i3,
|
||||
.exit_d0i3 = iwl_mvm_exit_d0i3,
|
||||
.napi_add = iwl_mvm_napi_add,
|
||||
};
|
||||
|
@ -268,6 +268,30 @@ static void iwl_mvm_power_configure_uapsd(struct iwl_mvm *mvm,
|
||||
IWL_MVM_PS_HEAVY_RX_THLD_PERCENT;
|
||||
}
|
||||
|
||||
static bool iwl_mvm_power_allow_uapsd(struct iwl_mvm *mvm,
|
||||
struct ieee80211_vif *vif)
|
||||
{
|
||||
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
|
||||
|
||||
if (!memcmp(mvmvif->uapsd_misbehaving_bssid, vif->bss_conf.bssid,
|
||||
ETH_ALEN))
|
||||
return false;
|
||||
|
||||
if (vif->p2p &&
|
||||
!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_P2P_PS_UAPSD))
|
||||
return false;
|
||||
/*
|
||||
* Avoid using uAPSD if P2P client is associated to GO that uses
|
||||
* opportunistic power save. This is due to current FW limitation.
|
||||
*/
|
||||
if (vif->p2p &&
|
||||
(vif->bss_conf.p2p_noa_attr.oppps_ctwindow &
|
||||
IEEE80211_P2P_OPPPS_ENABLE_BIT))
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm,
|
||||
struct ieee80211_vif *vif,
|
||||
struct iwl_mac_power_cmd *cmd)
|
||||
@ -280,7 +304,6 @@ static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm,
|
||||
bool radar_detect = false;
|
||||
struct iwl_mvm_vif *mvmvif __maybe_unused =
|
||||
iwl_mvm_vif_from_mac80211(vif);
|
||||
bool allow_uapsd = true;
|
||||
|
||||
cmd->id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id,
|
||||
mvmvif->color));
|
||||
@ -303,13 +326,8 @@ static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm,
|
||||
|
||||
cmd->flags |= cpu_to_le16(POWER_FLAGS_POWER_SAVE_ENA_MSK);
|
||||
|
||||
#ifdef CONFIG_IWLWIFI_DEBUGFS
|
||||
if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_DISABLE_POWER_OFF &&
|
||||
mvmvif->dbgfs_pm.disable_power_off)
|
||||
cmd->flags &= cpu_to_le16(~POWER_FLAGS_POWER_SAVE_ENA_MSK);
|
||||
#endif
|
||||
if (!vif->bss_conf.ps || iwl_mvm_vif_low_latency(mvmvif) ||
|
||||
mvm->pm_disabled)
|
||||
!mvmvif->pm_enabled)
|
||||
return;
|
||||
|
||||
cmd->flags |= cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK);
|
||||
@ -351,23 +369,7 @@ static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm,
|
||||
cpu_to_le32(IWL_MVM_WOWLAN_PS_TX_DATA_TIMEOUT);
|
||||
}
|
||||
|
||||
if (!memcmp(mvmvif->uapsd_misbehaving_bssid, vif->bss_conf.bssid,
|
||||
ETH_ALEN))
|
||||
allow_uapsd = false;
|
||||
|
||||
if (vif->p2p &&
|
||||
!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_P2P_PS_UAPSD))
|
||||
allow_uapsd = false;
|
||||
/*
|
||||
* Avoid using uAPSD if P2P client is associated to GO that uses
|
||||
* opportunistic power save. This is due to current FW limitation.
|
||||
*/
|
||||
if (vif->p2p &&
|
||||
vif->bss_conf.p2p_noa_attr.oppps_ctwindow &
|
||||
IEEE80211_P2P_OPPPS_ENABLE_BIT)
|
||||
allow_uapsd = false;
|
||||
|
||||
if (allow_uapsd)
|
||||
if (iwl_mvm_power_allow_uapsd(mvm, vif))
|
||||
iwl_mvm_power_configure_uapsd(mvm, vif, cmd);
|
||||
|
||||
#ifdef CONFIG_IWLWIFI_DEBUGFS
|
||||
@ -421,13 +423,6 @@ static int iwl_mvm_power_send_cmd(struct iwl_mvm *mvm,
|
||||
{
|
||||
struct iwl_mac_power_cmd cmd = {};
|
||||
|
||||
if (vif->type != NL80211_IFTYPE_STATION)
|
||||
return 0;
|
||||
|
||||
if (vif->p2p &&
|
||||
!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BSS_P2P_PS_DCM))
|
||||
return 0;
|
||||
|
||||
iwl_mvm_power_build_cmd(mvm, vif, &cmd);
|
||||
iwl_mvm_power_log(mvm, &cmd);
|
||||
#ifdef CONFIG_IWLWIFI_DEBUGFS
|
||||
@ -444,12 +439,6 @@ int iwl_mvm_power_update_device(struct iwl_mvm *mvm)
|
||||
.flags = cpu_to_le16(DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK),
|
||||
};
|
||||
|
||||
if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_PM_CMD_SUPPORT))
|
||||
return 0;
|
||||
|
||||
if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_DEVICE_PS_CMD))
|
||||
return 0;
|
||||
|
||||
if (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_CAM)
|
||||
mvm->ps_disabled = true;
|
||||
|
||||
@ -508,86 +497,69 @@ int iwl_mvm_power_uapsd_misbehaving_ap_notif(struct iwl_mvm *mvm,
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct iwl_power_constraint {
|
||||
struct iwl_power_vifs {
|
||||
struct ieee80211_vif *bf_vif;
|
||||
struct ieee80211_vif *bss_vif;
|
||||
struct ieee80211_vif *p2p_vif;
|
||||
u16 bss_phyctx_id;
|
||||
u16 p2p_phyctx_id;
|
||||
bool pm_disabled;
|
||||
bool ps_disabled;
|
||||
struct iwl_mvm *mvm;
|
||||
struct ieee80211_vif *ap_vif;
|
||||
struct ieee80211_vif *monitor_vif;
|
||||
bool p2p_active;
|
||||
bool bss_active;
|
||||
bool ap_active;
|
||||
bool monitor_active;
|
||||
};
|
||||
|
||||
static void iwl_mvm_power_iterator(void *_data, u8 *mac,
|
||||
struct ieee80211_vif *vif)
|
||||
{
|
||||
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
|
||||
struct iwl_power_constraint *power_iterator = _data;
|
||||
struct iwl_mvm *mvm = power_iterator->mvm;
|
||||
struct iwl_power_vifs *power_iterator = _data;
|
||||
|
||||
mvmvif->pm_enabled = false;
|
||||
switch (ieee80211_vif_type_p2p(vif)) {
|
||||
case NL80211_IFTYPE_P2P_DEVICE:
|
||||
break;
|
||||
|
||||
case NL80211_IFTYPE_P2P_GO:
|
||||
case NL80211_IFTYPE_AP:
|
||||
/* no BSS power mgmt if we have an active AP */
|
||||
if (mvmvif->ap_ibss_active)
|
||||
power_iterator->pm_disabled = true;
|
||||
/* only a single MAC of the same type */
|
||||
WARN_ON(power_iterator->ap_vif);
|
||||
power_iterator->ap_vif = vif;
|
||||
if (mvmvif->phy_ctxt)
|
||||
if (mvmvif->phy_ctxt->id < MAX_PHYS)
|
||||
power_iterator->ap_active = true;
|
||||
break;
|
||||
|
||||
case NL80211_IFTYPE_MONITOR:
|
||||
/* no BSS power mgmt and no device power save */
|
||||
power_iterator->pm_disabled = true;
|
||||
power_iterator->ps_disabled = true;
|
||||
/* only a single MAC of the same type */
|
||||
WARN_ON(power_iterator->monitor_vif);
|
||||
power_iterator->monitor_vif = vif;
|
||||
if (mvmvif->phy_ctxt)
|
||||
if (mvmvif->phy_ctxt->id < MAX_PHYS)
|
||||
power_iterator->monitor_active = true;
|
||||
break;
|
||||
|
||||
case NL80211_IFTYPE_P2P_CLIENT:
|
||||
if (mvmvif->phy_ctxt)
|
||||
power_iterator->p2p_phyctx_id = mvmvif->phy_ctxt->id;
|
||||
|
||||
/* we should have only one P2P vif */
|
||||
/* only a single MAC of the same type */
|
||||
WARN_ON(power_iterator->p2p_vif);
|
||||
power_iterator->p2p_vif = vif;
|
||||
|
||||
IWL_DEBUG_POWER(mvm, "p2p: p2p_id=%d, bss_id=%d\n",
|
||||
power_iterator->p2p_phyctx_id,
|
||||
power_iterator->bss_phyctx_id);
|
||||
if (!(mvm->fw->ucode_capa.flags &
|
||||
IWL_UCODE_TLV_FLAGS_BSS_P2P_PS_DCM)) {
|
||||
/* no BSS power mgmt if we have a P2P client*/
|
||||
power_iterator->pm_disabled = true;
|
||||
} else if (power_iterator->p2p_phyctx_id < MAX_PHYS &&
|
||||
power_iterator->bss_phyctx_id < MAX_PHYS &&
|
||||
power_iterator->p2p_phyctx_id ==
|
||||
power_iterator->bss_phyctx_id) {
|
||||
power_iterator->pm_disabled = true;
|
||||
}
|
||||
if (mvmvif->phy_ctxt)
|
||||
if (mvmvif->phy_ctxt->id < MAX_PHYS)
|
||||
power_iterator->p2p_active = true;
|
||||
break;
|
||||
|
||||
case NL80211_IFTYPE_STATION:
|
||||
if (mvmvif->phy_ctxt)
|
||||
power_iterator->bss_phyctx_id = mvmvif->phy_ctxt->id;
|
||||
|
||||
/* we should have only one BSS vif */
|
||||
/* only a single MAC of the same type */
|
||||
WARN_ON(power_iterator->bss_vif);
|
||||
power_iterator->bss_vif = vif;
|
||||
if (mvmvif->phy_ctxt)
|
||||
if (mvmvif->phy_ctxt->id < MAX_PHYS)
|
||||
power_iterator->bss_active = true;
|
||||
|
||||
if (mvmvif->bf_data.bf_enabled &&
|
||||
!WARN_ON(power_iterator->bf_vif))
|
||||
power_iterator->bf_vif = vif;
|
||||
|
||||
IWL_DEBUG_POWER(mvm, "bss: p2p_id=%d, bss_id=%d\n",
|
||||
power_iterator->p2p_phyctx_id,
|
||||
power_iterator->bss_phyctx_id);
|
||||
if (mvm->fw->ucode_capa.flags &
|
||||
IWL_UCODE_TLV_FLAGS_BSS_P2P_PS_DCM &&
|
||||
(power_iterator->p2p_phyctx_id < MAX_PHYS &&
|
||||
power_iterator->bss_phyctx_id < MAX_PHYS &&
|
||||
power_iterator->p2p_phyctx_id ==
|
||||
power_iterator->bss_phyctx_id))
|
||||
power_iterator->pm_disabled = true;
|
||||
break;
|
||||
|
||||
default:
|
||||
@ -596,70 +568,118 @@ static void iwl_mvm_power_iterator(void *_data, u8 *mac,
|
||||
}
|
||||
|
||||
static void
|
||||
iwl_mvm_power_get_global_constraint(struct iwl_mvm *mvm,
|
||||
struct iwl_power_constraint *constraint)
|
||||
iwl_mvm_power_set_pm(struct iwl_mvm *mvm,
|
||||
struct iwl_power_vifs *vifs)
|
||||
{
|
||||
struct iwl_mvm_vif *bss_mvmvif = NULL;
|
||||
struct iwl_mvm_vif *p2p_mvmvif = NULL;
|
||||
struct iwl_mvm_vif *ap_mvmvif = NULL;
|
||||
bool client_same_channel = false;
|
||||
bool ap_same_channel = false;
|
||||
|
||||
lockdep_assert_held(&mvm->mutex);
|
||||
|
||||
if (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_CAM) {
|
||||
constraint->pm_disabled = true;
|
||||
constraint->ps_disabled = true;
|
||||
}
|
||||
|
||||
/* get vifs info + set pm_enable to false */
|
||||
ieee80211_iterate_active_interfaces_atomic(mvm->hw,
|
||||
IEEE80211_IFACE_ITER_NORMAL,
|
||||
iwl_mvm_power_iterator, constraint);
|
||||
iwl_mvm_power_iterator, vifs);
|
||||
|
||||
if (vifs->bss_vif)
|
||||
bss_mvmvif = iwl_mvm_vif_from_mac80211(vifs->bss_vif);
|
||||
|
||||
if (vifs->p2p_vif)
|
||||
p2p_mvmvif = iwl_mvm_vif_from_mac80211(vifs->p2p_vif);
|
||||
|
||||
if (vifs->ap_vif)
|
||||
ap_mvmvif = iwl_mvm_vif_from_mac80211(vifs->ap_vif);
|
||||
|
||||
/* enable PM on bss if bss stand alone */
|
||||
if (vifs->bss_active && !vifs->p2p_active && !vifs->ap_active) {
|
||||
bss_mvmvif->pm_enabled = true;
|
||||
return;
|
||||
}
|
||||
|
||||
/* enable PM on p2p if p2p stand alone */
|
||||
if (vifs->p2p_active && !vifs->bss_active && !vifs->ap_active) {
|
||||
if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_P2P_PM)
|
||||
p2p_mvmvif->pm_enabled = true;
|
||||
return;
|
||||
}
|
||||
|
||||
if (vifs->bss_active && vifs->p2p_active)
|
||||
client_same_channel = (bss_mvmvif->phy_ctxt->id ==
|
||||
p2p_mvmvif->phy_ctxt->id);
|
||||
if (vifs->bss_active && vifs->ap_active)
|
||||
ap_same_channel = (bss_mvmvif->phy_ctxt->id ==
|
||||
ap_mvmvif->phy_ctxt->id);
|
||||
|
||||
/* bss is not stand alone: enable PM if alone on its channel */
|
||||
if (vifs->bss_active && !(client_same_channel || ap_same_channel) &&
|
||||
(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BSS_P2P_PS_DCM)) {
|
||||
bss_mvmvif->pm_enabled = true;
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* There is only one channel in the system and there are only
|
||||
* bss and p2p clients that share it
|
||||
*/
|
||||
if (client_same_channel && !vifs->ap_active &&
|
||||
(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BSS_P2P_PS_SCM)) {
|
||||
/* share same channel*/
|
||||
bss_mvmvif->pm_enabled = true;
|
||||
if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_P2P_PM)
|
||||
p2p_mvmvif->pm_enabled = true;
|
||||
}
|
||||
}
|
||||
|
||||
int iwl_mvm_power_update_mac(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
|
||||
{
|
||||
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
|
||||
struct iwl_power_constraint constraint = {
|
||||
.p2p_phyctx_id = MAX_PHYS,
|
||||
.bss_phyctx_id = MAX_PHYS,
|
||||
.mvm = mvm,
|
||||
};
|
||||
struct iwl_mvm_vif *mvmvif;
|
||||
struct iwl_power_vifs vifs = {};
|
||||
bool ba_enable;
|
||||
int ret;
|
||||
|
||||
lockdep_assert_held(&mvm->mutex);
|
||||
|
||||
if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_PM_CMD_SUPPORT))
|
||||
return 0;
|
||||
|
||||
iwl_mvm_power_get_global_constraint(mvm, &constraint);
|
||||
mvm->ps_disabled = constraint.ps_disabled;
|
||||
mvm->pm_disabled = constraint.pm_disabled;
|
||||
iwl_mvm_power_set_pm(mvm, &vifs);
|
||||
|
||||
/* disable PS if CAM */
|
||||
if (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_CAM) {
|
||||
mvm->ps_disabled = true;
|
||||
} else {
|
||||
/* don't update device power state unless we add / remove monitor */
|
||||
if (vif->type == NL80211_IFTYPE_MONITOR) {
|
||||
ret = iwl_mvm_power_update_device(mvm);
|
||||
if (vifs.monitor_vif) {
|
||||
if (vifs.monitor_active)
|
||||
mvm->ps_disabled = true;
|
||||
ret = iwl_mvm_power_update_device(mvm);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
if (vifs.bss_vif) {
|
||||
ret = iwl_mvm_power_send_cmd(mvm, vifs.bss_vif);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (constraint.bss_vif) {
|
||||
ret = iwl_mvm_power_send_cmd(mvm, constraint.bss_vif);
|
||||
if (vifs.p2p_vif) {
|
||||
ret = iwl_mvm_power_send_cmd(mvm, vifs.p2p_vif);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (constraint.p2p_vif) {
|
||||
ret = iwl_mvm_power_send_cmd(mvm, constraint.p2p_vif);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (!constraint.bf_vif)
|
||||
if (!vifs.bf_vif)
|
||||
return 0;
|
||||
|
||||
vif = constraint.bf_vif;
|
||||
vif = vifs.bf_vif;
|
||||
mvmvif = iwl_mvm_vif_from_mac80211(vif);
|
||||
|
||||
ba_enable = !(constraint.pm_disabled || constraint.ps_disabled ||
|
||||
ba_enable = !(!mvmvif->pm_enabled || mvm->ps_disabled ||
|
||||
!vif->bss_conf.ps || iwl_mvm_vif_low_latency(mvmvif));
|
||||
|
||||
return iwl_mvm_update_beacon_abort(mvm, constraint.bf_vif, ba_enable);
|
||||
return iwl_mvm_update_beacon_abort(mvm, vifs.bf_vif, ba_enable);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_IWLWIFI_DEBUGFS
|
||||
@ -671,19 +691,10 @@ int iwl_mvm_power_mac_dbgfs_read(struct iwl_mvm *mvm,
|
||||
struct iwl_mac_power_cmd cmd = {};
|
||||
int pos = 0;
|
||||
|
||||
if (WARN_ON(!(mvm->fw->ucode_capa.flags &
|
||||
IWL_UCODE_TLV_FLAGS_PM_CMD_SUPPORT)))
|
||||
return 0;
|
||||
|
||||
mutex_lock(&mvm->mutex);
|
||||
memcpy(&cmd, &mvmvif->mac_pwr_cmd, sizeof(cmd));
|
||||
mutex_unlock(&mvm->mutex);
|
||||
|
||||
if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_DEVICE_PS_CMD))
|
||||
pos += scnprintf(buf+pos, bufsz-pos, "disable_power_off = %d\n",
|
||||
(cmd.flags &
|
||||
cpu_to_le16(POWER_FLAGS_POWER_SAVE_ENA_MSK)) ?
|
||||
0 : 1);
|
||||
pos += scnprintf(buf+pos, bufsz-pos, "power_scheme = %d\n",
|
||||
iwlmvm_mod_params.power_scheme);
|
||||
pos += scnprintf(buf+pos, bufsz-pos, "flags = 0x%x\n",
|
||||
@ -826,8 +837,7 @@ int iwl_mvm_disable_beacon_filter(struct iwl_mvm *mvm,
|
||||
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
|
||||
int ret;
|
||||
|
||||
if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BF_UPDATED) ||
|
||||
vif->type != NL80211_IFTYPE_STATION || vif->p2p)
|
||||
if (vif->type != NL80211_IFTYPE_STATION || vif->p2p)
|
||||
return 0;
|
||||
|
||||
ret = iwl_mvm_beacon_filter_send_cmd(mvm, &cmd, flags);
|
||||
@ -914,13 +924,3 @@ int iwl_mvm_update_beacon_filter(struct iwl_mvm *mvm,
|
||||
|
||||
return iwl_mvm_enable_beacon_filter(mvm, vif, flags);
|
||||
}
|
||||
|
||||
int iwl_power_legacy_set_cam_mode(struct iwl_mvm *mvm)
|
||||
{
|
||||
struct iwl_powertable_cmd cmd = {
|
||||
.keep_alive_seconds = POWER_KEEP_ALIVE_PERIOD_SEC,
|
||||
};
|
||||
|
||||
return iwl_mvm_send_cmd_pdu(mvm, POWER_TABLE_CMD, CMD_SYNC,
|
||||
sizeof(cmd), &cmd);
|
||||
}
|
||||
|
@ -527,6 +527,9 @@ static void rs_rate_scale_clear_tbl_windows(struct iwl_mvm *mvm,
|
||||
IWL_DEBUG_RATE(mvm, "Clearing up window stats\n");
|
||||
for (i = 0; i < IWL_RATE_COUNT; i++)
|
||||
rs_rate_scale_clear_window(&tbl->win[i]);
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(tbl->tpc_win); i++)
|
||||
rs_rate_scale_clear_window(&tbl->tpc_win[i]);
|
||||
}
|
||||
|
||||
static inline u8 rs_is_valid_ant(u8 valid_antenna, u8 ant_type)
|
||||
@ -656,17 +659,34 @@ static int _rs_collect_tx_data(struct iwl_scale_tbl_info *tbl,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int rs_collect_tx_data(struct iwl_scale_tbl_info *tbl,
|
||||
int scale_index, int attempts, int successes)
|
||||
static int rs_collect_tx_data(struct iwl_lq_sta *lq_sta,
|
||||
struct iwl_scale_tbl_info *tbl,
|
||||
int scale_index, int attempts, int successes,
|
||||
u8 reduced_txp)
|
||||
{
|
||||
struct iwl_rate_scale_data *window = NULL;
|
||||
int ret;
|
||||
|
||||
if (scale_index < 0 || scale_index >= IWL_RATE_COUNT)
|
||||
return -EINVAL;
|
||||
|
||||
if (tbl->column != RS_COLUMN_INVALID) {
|
||||
lq_sta->tx_stats[tbl->column][scale_index].total += attempts;
|
||||
lq_sta->tx_stats[tbl->column][scale_index].success += successes;
|
||||
}
|
||||
|
||||
/* Select window for current tx bit rate */
|
||||
window = &(tbl->win[scale_index]);
|
||||
|
||||
ret = _rs_collect_tx_data(tbl, scale_index, attempts, successes,
|
||||
window);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (WARN_ON_ONCE(reduced_txp > TPC_MAX_REDUCTION))
|
||||
return -EINVAL;
|
||||
|
||||
window = &tbl->tpc_win[reduced_txp];
|
||||
return _rs_collect_tx_data(tbl, scale_index, attempts, successes,
|
||||
window);
|
||||
}
|
||||
@ -1000,6 +1020,7 @@ static void rs_tx_status(void *mvm_r, struct ieee80211_supported_band *sband,
|
||||
u32 ucode_rate;
|
||||
struct rs_rate rate;
|
||||
struct iwl_scale_tbl_info *curr_tbl, *other_tbl, *tmp_tbl;
|
||||
u8 reduced_txp = (uintptr_t)info->status.status_driver_data[0];
|
||||
|
||||
/* Treat uninitialized rate scaling data same as non-existing. */
|
||||
if (!lq_sta) {
|
||||
@ -1141,9 +1162,10 @@ static void rs_tx_status(void *mvm_r, struct ieee80211_supported_band *sband,
|
||||
if (info->flags & IEEE80211_TX_STAT_AMPDU) {
|
||||
ucode_rate = le32_to_cpu(table->rs_table[0]);
|
||||
rs_rate_from_ucode_rate(ucode_rate, info->band, &rate);
|
||||
rs_collect_tx_data(curr_tbl, rate.index,
|
||||
rs_collect_tx_data(lq_sta, curr_tbl, rate.index,
|
||||
info->status.ampdu_len,
|
||||
info->status.ampdu_ack_len);
|
||||
info->status.ampdu_ack_len,
|
||||
reduced_txp);
|
||||
|
||||
/* Update success/fail counts if not searching for new mode */
|
||||
if (lq_sta->rs_state == RS_STATE_STAY_IN_COLUMN) {
|
||||
@ -1176,8 +1198,9 @@ static void rs_tx_status(void *mvm_r, struct ieee80211_supported_band *sband,
|
||||
else
|
||||
continue;
|
||||
|
||||
rs_collect_tx_data(tmp_tbl, rate.index, 1,
|
||||
i < retries ? 0 : legacy_success);
|
||||
rs_collect_tx_data(lq_sta, tmp_tbl, rate.index, 1,
|
||||
i < retries ? 0 : legacy_success,
|
||||
reduced_txp);
|
||||
}
|
||||
|
||||
/* Update success/fail counts if not searching for new mode */
|
||||
@ -1188,6 +1211,7 @@ static void rs_tx_status(void *mvm_r, struct ieee80211_supported_band *sband,
|
||||
}
|
||||
/* The last TX rate is cached in lq_sta; it's set in if/else above */
|
||||
lq_sta->last_rate_n_flags = ucode_rate;
|
||||
IWL_DEBUG_RATE(mvm, "reduced txpower: %d\n", reduced_txp);
|
||||
done:
|
||||
/* See if there's a better rate or modulation mode to try. */
|
||||
if (sta && sta->supp_rates[sband->band])
|
||||
@ -1769,6 +1793,198 @@ out:
|
||||
return action;
|
||||
}
|
||||
|
||||
static void rs_get_adjacent_txp(struct iwl_mvm *mvm, int index,
|
||||
int *weaker, int *stronger)
|
||||
{
|
||||
*weaker = index + TPC_TX_POWER_STEP;
|
||||
if (*weaker > TPC_MAX_REDUCTION)
|
||||
*weaker = TPC_INVALID;
|
||||
|
||||
*stronger = index - TPC_TX_POWER_STEP;
|
||||
if (*stronger < 0)
|
||||
*stronger = TPC_INVALID;
|
||||
}
|
||||
|
||||
static bool rs_tpc_allowed(struct iwl_mvm *mvm, struct rs_rate *rate,
|
||||
enum ieee80211_band band)
|
||||
{
|
||||
int index = rate->index;
|
||||
|
||||
/*
|
||||
* allow tpc only if power management is enabled, or bt coex
|
||||
* activity grade allows it and we are on 2.4Ghz.
|
||||
*/
|
||||
if (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_CAM &&
|
||||
!iwl_mvm_bt_coex_is_tpc_allowed(mvm, band))
|
||||
return false;
|
||||
|
||||
IWL_DEBUG_RATE(mvm, "check rate, table type: %d\n", rate->type);
|
||||
if (is_legacy(rate))
|
||||
return index == IWL_RATE_54M_INDEX;
|
||||
if (is_ht(rate))
|
||||
return index == IWL_RATE_MCS_7_INDEX;
|
||||
if (is_vht(rate))
|
||||
return index == IWL_RATE_MCS_7_INDEX ||
|
||||
index == IWL_RATE_MCS_8_INDEX ||
|
||||
index == IWL_RATE_MCS_9_INDEX;
|
||||
|
||||
WARN_ON_ONCE(1);
|
||||
return false;
|
||||
}
|
||||
|
||||
enum tpc_action {
|
||||
TPC_ACTION_STAY,
|
||||
TPC_ACTION_DECREASE,
|
||||
TPC_ACTION_INCREASE,
|
||||
TPC_ACTION_NO_RESTIRCTION,
|
||||
};
|
||||
|
||||
static enum tpc_action rs_get_tpc_action(struct iwl_mvm *mvm,
|
||||
s32 sr, int weak, int strong,
|
||||
int current_tpt,
|
||||
int weak_tpt, int strong_tpt)
|
||||
{
|
||||
/* stay until we have valid tpt */
|
||||
if (current_tpt == IWL_INVALID_VALUE) {
|
||||
IWL_DEBUG_RATE(mvm, "no current tpt. stay.\n");
|
||||
return TPC_ACTION_STAY;
|
||||
}
|
||||
|
||||
/* Too many failures, increase txp */
|
||||
if (sr <= TPC_SR_FORCE_INCREASE || current_tpt == 0) {
|
||||
IWL_DEBUG_RATE(mvm, "increase txp because of weak SR\n");
|
||||
return TPC_ACTION_NO_RESTIRCTION;
|
||||
}
|
||||
|
||||
/* try decreasing first if applicable */
|
||||
if (weak != TPC_INVALID) {
|
||||
if (weak_tpt == IWL_INVALID_VALUE &&
|
||||
(strong_tpt == IWL_INVALID_VALUE ||
|
||||
current_tpt >= strong_tpt)) {
|
||||
IWL_DEBUG_RATE(mvm,
|
||||
"no weak txp measurement. decrease txp\n");
|
||||
return TPC_ACTION_DECREASE;
|
||||
}
|
||||
|
||||
if (weak_tpt > current_tpt) {
|
||||
IWL_DEBUG_RATE(mvm,
|
||||
"lower txp has better tpt. decrease txp\n");
|
||||
return TPC_ACTION_DECREASE;
|
||||
}
|
||||
}
|
||||
|
||||
/* next, increase if needed */
|
||||
if (sr < TPC_SR_NO_INCREASE && strong != TPC_INVALID) {
|
||||
if (weak_tpt == IWL_INVALID_VALUE &&
|
||||
strong_tpt != IWL_INVALID_VALUE &&
|
||||
current_tpt < strong_tpt) {
|
||||
IWL_DEBUG_RATE(mvm,
|
||||
"higher txp has better tpt. increase txp\n");
|
||||
return TPC_ACTION_INCREASE;
|
||||
}
|
||||
|
||||
if (weak_tpt < current_tpt &&
|
||||
(strong_tpt == IWL_INVALID_VALUE ||
|
||||
strong_tpt > current_tpt)) {
|
||||
IWL_DEBUG_RATE(mvm,
|
||||
"lower txp has worse tpt. increase txp\n");
|
||||
return TPC_ACTION_INCREASE;
|
||||
}
|
||||
}
|
||||
|
||||
IWL_DEBUG_RATE(mvm, "no need to increase or decrease txp - stay\n");
|
||||
return TPC_ACTION_STAY;
|
||||
}
|
||||
|
||||
static bool rs_tpc_perform(struct iwl_mvm *mvm,
|
||||
struct ieee80211_sta *sta,
|
||||
struct iwl_lq_sta *lq_sta,
|
||||
struct iwl_scale_tbl_info *tbl)
|
||||
{
|
||||
struct iwl_mvm_sta *mvm_sta = (void *)sta->drv_priv;
|
||||
struct ieee80211_vif *vif = mvm_sta->vif;
|
||||
struct ieee80211_chanctx_conf *chanctx_conf;
|
||||
enum ieee80211_band band;
|
||||
struct iwl_rate_scale_data *window;
|
||||
struct rs_rate *rate = &tbl->rate;
|
||||
enum tpc_action action;
|
||||
s32 sr;
|
||||
u8 cur = lq_sta->lq.reduced_tpc;
|
||||
int current_tpt;
|
||||
int weak, strong;
|
||||
int weak_tpt = IWL_INVALID_VALUE, strong_tpt = IWL_INVALID_VALUE;
|
||||
|
||||
#ifdef CONFIG_MAC80211_DEBUGFS
|
||||
if (lq_sta->dbg_fixed_txp_reduction <= TPC_MAX_REDUCTION) {
|
||||
IWL_DEBUG_RATE(mvm, "fixed tpc: %d",
|
||||
lq_sta->dbg_fixed_txp_reduction);
|
||||
lq_sta->lq.reduced_tpc = lq_sta->dbg_fixed_txp_reduction;
|
||||
return cur != lq_sta->dbg_fixed_txp_reduction;
|
||||
}
|
||||
#endif
|
||||
|
||||
rcu_read_lock();
|
||||
chanctx_conf = rcu_dereference(vif->chanctx_conf);
|
||||
if (WARN_ON(!chanctx_conf))
|
||||
band = IEEE80211_NUM_BANDS;
|
||||
else
|
||||
band = chanctx_conf->def.chan->band;
|
||||
rcu_read_unlock();
|
||||
|
||||
if (!rs_tpc_allowed(mvm, rate, band)) {
|
||||
IWL_DEBUG_RATE(mvm,
|
||||
"tpc is not allowed. remove txp restrictions");
|
||||
lq_sta->lq.reduced_tpc = TPC_NO_REDUCTION;
|
||||
return cur != TPC_NO_REDUCTION;
|
||||
}
|
||||
|
||||
rs_get_adjacent_txp(mvm, cur, &weak, &strong);
|
||||
|
||||
/* Collect measured throughputs for current and adjacent rates */
|
||||
window = tbl->tpc_win;
|
||||
sr = window[cur].success_ratio;
|
||||
current_tpt = window[cur].average_tpt;
|
||||
if (weak != TPC_INVALID)
|
||||
weak_tpt = window[weak].average_tpt;
|
||||
if (strong != TPC_INVALID)
|
||||
strong_tpt = window[strong].average_tpt;
|
||||
|
||||
IWL_DEBUG_RATE(mvm,
|
||||
"(TPC: %d): cur_tpt %d SR %d weak %d strong %d weak_tpt %d strong_tpt %d\n",
|
||||
cur, current_tpt, sr, weak, strong,
|
||||
weak_tpt, strong_tpt);
|
||||
|
||||
action = rs_get_tpc_action(mvm, sr, weak, strong,
|
||||
current_tpt, weak_tpt, strong_tpt);
|
||||
|
||||
/* override actions if we are on the edge */
|
||||
if (weak == TPC_INVALID && action == TPC_ACTION_DECREASE) {
|
||||
IWL_DEBUG_RATE(mvm, "already in lowest txp, stay");
|
||||
action = TPC_ACTION_STAY;
|
||||
} else if (strong == TPC_INVALID &&
|
||||
(action == TPC_ACTION_INCREASE ||
|
||||
action == TPC_ACTION_NO_RESTIRCTION)) {
|
||||
IWL_DEBUG_RATE(mvm, "already in highest txp, stay");
|
||||
action = TPC_ACTION_STAY;
|
||||
}
|
||||
|
||||
switch (action) {
|
||||
case TPC_ACTION_DECREASE:
|
||||
lq_sta->lq.reduced_tpc = weak;
|
||||
return true;
|
||||
case TPC_ACTION_INCREASE:
|
||||
lq_sta->lq.reduced_tpc = strong;
|
||||
return true;
|
||||
case TPC_ACTION_NO_RESTIRCTION:
|
||||
lq_sta->lq.reduced_tpc = TPC_NO_REDUCTION;
|
||||
return true;
|
||||
case TPC_ACTION_STAY:
|
||||
/* do nothing */
|
||||
break;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
/*
|
||||
* Do rate scaling and search for new modulation mode.
|
||||
*/
|
||||
@ -2019,6 +2235,8 @@ static void rs_rate_scale_perform(struct iwl_mvm *mvm,
|
||||
break;
|
||||
case RS_ACTION_STAY:
|
||||
/* No change */
|
||||
update_lq = rs_tpc_perform(mvm, sta, lq_sta, tbl);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
@ -2478,6 +2696,7 @@ void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
|
||||
lq_sta->is_agg = 0;
|
||||
#ifdef CONFIG_MAC80211_DEBUGFS
|
||||
lq_sta->dbg_fixed_rate = 0;
|
||||
lq_sta->dbg_fixed_txp_reduction = TPC_INVALID;
|
||||
#endif
|
||||
#ifdef CONFIG_IWLWIFI_DEBUGFS
|
||||
iwl_mvm_reset_frame_stats(mvm, &mvm->drv_rx_stats);
|
||||
@ -2653,6 +2872,7 @@ static void rs_fill_lq_cmd(struct iwl_mvm *mvm,
|
||||
rs_build_rates_table_from_fixed(mvm, lq_cmd,
|
||||
lq_sta->band,
|
||||
lq_sta->dbg_fixed_rate);
|
||||
lq_cmd->reduced_tpc = 0;
|
||||
ant = (lq_sta->dbg_fixed_rate & RATE_MCS_ANT_ABC_MSK) >>
|
||||
RATE_MCS_ANT_POS;
|
||||
} else
|
||||
@ -2783,7 +3003,6 @@ static ssize_t rs_sta_dbgfs_scale_table_write(struct file *file,
|
||||
size_t buf_size;
|
||||
u32 parsed_rate;
|
||||
|
||||
|
||||
mvm = lq_sta->drv;
|
||||
memset(buf, 0, sizeof(buf));
|
||||
buf_size = min(count, sizeof(buf) - 1);
|
||||
@ -2856,6 +3075,7 @@ static ssize_t rs_sta_dbgfs_scale_table_read(struct file *file,
|
||||
lq_sta->lq.agg_disable_start_th,
|
||||
lq_sta->lq.agg_frame_cnt_limit);
|
||||
|
||||
desc += sprintf(buff+desc, "reduced tpc=%d\n", lq_sta->lq.reduced_tpc);
|
||||
desc += sprintf(buff+desc,
|
||||
"Start idx [0]=0x%x [1]=0x%x [2]=0x%x [3]=0x%x\n",
|
||||
lq_sta->lq.initial_rate_index[0],
|
||||
@ -2928,6 +3148,94 @@ static const struct file_operations rs_sta_dbgfs_stats_table_ops = {
|
||||
.llseek = default_llseek,
|
||||
};
|
||||
|
||||
static ssize_t rs_sta_dbgfs_drv_tx_stats_read(struct file *file,
|
||||
char __user *user_buf,
|
||||
size_t count, loff_t *ppos)
|
||||
{
|
||||
static const char * const column_name[] = {
|
||||
[RS_COLUMN_LEGACY_ANT_A] = "LEGACY_ANT_A",
|
||||
[RS_COLUMN_LEGACY_ANT_B] = "LEGACY_ANT_B",
|
||||
[RS_COLUMN_SISO_ANT_A] = "SISO_ANT_A",
|
||||
[RS_COLUMN_SISO_ANT_B] = "SISO_ANT_B",
|
||||
[RS_COLUMN_SISO_ANT_A_SGI] = "SISO_ANT_A_SGI",
|
||||
[RS_COLUMN_SISO_ANT_B_SGI] = "SISO_ANT_B_SGI",
|
||||
[RS_COLUMN_MIMO2] = "MIMO2",
|
||||
[RS_COLUMN_MIMO2_SGI] = "MIMO2_SGI",
|
||||
};
|
||||
|
||||
static const char * const rate_name[] = {
|
||||
[IWL_RATE_1M_INDEX] = "1M",
|
||||
[IWL_RATE_2M_INDEX] = "2M",
|
||||
[IWL_RATE_5M_INDEX] = "5.5M",
|
||||
[IWL_RATE_11M_INDEX] = "11M",
|
||||
[IWL_RATE_6M_INDEX] = "6M|MCS0",
|
||||
[IWL_RATE_9M_INDEX] = "9M",
|
||||
[IWL_RATE_12M_INDEX] = "12M|MCS1",
|
||||
[IWL_RATE_18M_INDEX] = "18M|MCS2",
|
||||
[IWL_RATE_24M_INDEX] = "24M|MCS3",
|
||||
[IWL_RATE_36M_INDEX] = "36M|MCS4",
|
||||
[IWL_RATE_48M_INDEX] = "48M|MCS5",
|
||||
[IWL_RATE_54M_INDEX] = "54M|MCS6",
|
||||
[IWL_RATE_MCS_7_INDEX] = "MCS7",
|
||||
[IWL_RATE_MCS_8_INDEX] = "MCS8",
|
||||
[IWL_RATE_MCS_9_INDEX] = "MCS9",
|
||||
};
|
||||
|
||||
char *buff, *pos, *endpos;
|
||||
int col, rate;
|
||||
ssize_t ret;
|
||||
struct iwl_lq_sta *lq_sta = file->private_data;
|
||||
struct rs_rate_stats *stats;
|
||||
static const size_t bufsz = 1024;
|
||||
|
||||
buff = kmalloc(bufsz, GFP_KERNEL);
|
||||
if (!buff)
|
||||
return -ENOMEM;
|
||||
|
||||
pos = buff;
|
||||
endpos = pos + bufsz;
|
||||
|
||||
pos += scnprintf(pos, endpos - pos, "COLUMN,");
|
||||
for (rate = 0; rate < IWL_RATE_COUNT; rate++)
|
||||
pos += scnprintf(pos, endpos - pos, "%s,", rate_name[rate]);
|
||||
pos += scnprintf(pos, endpos - pos, "\n");
|
||||
|
||||
for (col = 0; col < RS_COLUMN_COUNT; col++) {
|
||||
pos += scnprintf(pos, endpos - pos,
|
||||
"%s,", column_name[col]);
|
||||
|
||||
for (rate = 0; rate < IWL_RATE_COUNT; rate++) {
|
||||
stats = &(lq_sta->tx_stats[col][rate]);
|
||||
pos += scnprintf(pos, endpos - pos,
|
||||
"%llu/%llu,",
|
||||
stats->success,
|
||||
stats->total);
|
||||
}
|
||||
pos += scnprintf(pos, endpos - pos, "\n");
|
||||
}
|
||||
|
||||
ret = simple_read_from_buffer(user_buf, count, ppos, buff, pos - buff);
|
||||
kfree(buff);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static ssize_t rs_sta_dbgfs_drv_tx_stats_write(struct file *file,
|
||||
const char __user *user_buf,
|
||||
size_t count, loff_t *ppos)
|
||||
{
|
||||
struct iwl_lq_sta *lq_sta = file->private_data;
|
||||
memset(lq_sta->tx_stats, 0, sizeof(lq_sta->tx_stats));
|
||||
|
||||
return count;
|
||||
}
|
||||
|
||||
static const struct file_operations rs_sta_dbgfs_drv_tx_stats_ops = {
|
||||
.read = rs_sta_dbgfs_drv_tx_stats_read,
|
||||
.write = rs_sta_dbgfs_drv_tx_stats_write,
|
||||
.open = simple_open,
|
||||
.llseek = default_llseek,
|
||||
};
|
||||
|
||||
static void rs_add_debugfs(void *mvm, void *mvm_sta, struct dentry *dir)
|
||||
{
|
||||
struct iwl_lq_sta *lq_sta = mvm_sta;
|
||||
@ -2937,9 +3245,15 @@ static void rs_add_debugfs(void *mvm, void *mvm_sta, struct dentry *dir)
|
||||
lq_sta->rs_sta_dbgfs_stats_table_file =
|
||||
debugfs_create_file("rate_stats_table", S_IRUSR, dir,
|
||||
lq_sta, &rs_sta_dbgfs_stats_table_ops);
|
||||
lq_sta->rs_sta_dbgfs_drv_tx_stats_file =
|
||||
debugfs_create_file("drv_tx_stats", S_IRUSR | S_IWUSR, dir,
|
||||
lq_sta, &rs_sta_dbgfs_drv_tx_stats_ops);
|
||||
lq_sta->rs_sta_dbgfs_tx_agg_tid_en_file =
|
||||
debugfs_create_u8("tx_agg_tid_enable", S_IRUSR | S_IWUSR, dir,
|
||||
&lq_sta->tx_agg_tid_en);
|
||||
lq_sta->rs_sta_dbgfs_reduced_txp_file =
|
||||
debugfs_create_u8("reduced_tpc", S_IRUSR | S_IWUSR, dir,
|
||||
&lq_sta->dbg_fixed_txp_reduction);
|
||||
}
|
||||
|
||||
static void rs_remove_debugfs(void *mvm, void *mvm_sta)
|
||||
@ -2947,7 +3261,9 @@ static void rs_remove_debugfs(void *mvm, void *mvm_sta)
|
||||
struct iwl_lq_sta *lq_sta = mvm_sta;
|
||||
debugfs_remove(lq_sta->rs_sta_dbgfs_scale_table_file);
|
||||
debugfs_remove(lq_sta->rs_sta_dbgfs_stats_table_file);
|
||||
debugfs_remove(lq_sta->rs_sta_dbgfs_drv_tx_stats_file);
|
||||
debugfs_remove(lq_sta->rs_sta_dbgfs_tx_agg_tid_en_file);
|
||||
debugfs_remove(lq_sta->rs_sta_dbgfs_reduced_txp_file);
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -158,6 +158,13 @@ enum {
|
||||
#define RS_SR_FORCE_DECREASE 1920 /* 15% */
|
||||
#define RS_SR_NO_DECREASE 10880 /* 85% */
|
||||
|
||||
#define TPC_SR_FORCE_INCREASE 9600 /* 75% */
|
||||
#define TPC_SR_NO_INCREASE 10880 /* 85% */
|
||||
#define TPC_TX_POWER_STEP 3
|
||||
#define TPC_MAX_REDUCTION 15
|
||||
#define TPC_NO_REDUCTION 0
|
||||
#define TPC_INVALID 0xff
|
||||
|
||||
#define LINK_QUAL_AGG_TIME_LIMIT_DEF (4000) /* 4 milliseconds */
|
||||
#define LINK_QUAL_AGG_TIME_LIMIT_MAX (8000)
|
||||
#define LINK_QUAL_AGG_TIME_LIMIT_MIN (100)
|
||||
@ -266,9 +273,16 @@ enum rs_column {
|
||||
RS_COLUMN_MIMO2_SGI,
|
||||
|
||||
RS_COLUMN_LAST = RS_COLUMN_MIMO2_SGI,
|
||||
RS_COLUMN_COUNT = RS_COLUMN_LAST + 1,
|
||||
RS_COLUMN_INVALID,
|
||||
};
|
||||
|
||||
/* Packet stats per rate */
|
||||
struct rs_rate_stats {
|
||||
u64 success;
|
||||
u64 total;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct iwl_scale_tbl_info -- tx params and success history for all rates
|
||||
*
|
||||
@ -280,6 +294,8 @@ struct iwl_scale_tbl_info {
|
||||
enum rs_column column;
|
||||
const u16 *expected_tpt; /* throughput metrics; expected_tpt_G, etc. */
|
||||
struct iwl_rate_scale_data win[IWL_RATE_COUNT]; /* rate histories */
|
||||
/* per txpower-reduction history */
|
||||
struct iwl_rate_scale_data tpc_win[TPC_MAX_REDUCTION + 1];
|
||||
};
|
||||
|
||||
enum {
|
||||
@ -315,6 +331,8 @@ struct iwl_lq_sta {
|
||||
bool is_vht;
|
||||
enum ieee80211_band band;
|
||||
|
||||
struct rs_rate_stats tx_stats[RS_COLUMN_COUNT][IWL_RATE_COUNT];
|
||||
|
||||
/* The following are bitmaps of rates; IWL_RATE_6M_MASK, etc. */
|
||||
unsigned long active_legacy_rate;
|
||||
unsigned long active_siso_rate;
|
||||
@ -334,8 +352,11 @@ struct iwl_lq_sta {
|
||||
#ifdef CONFIG_MAC80211_DEBUGFS
|
||||
struct dentry *rs_sta_dbgfs_scale_table_file;
|
||||
struct dentry *rs_sta_dbgfs_stats_table_file;
|
||||
struct dentry *rs_sta_dbgfs_drv_tx_stats_file;
|
||||
struct dentry *rs_sta_dbgfs_tx_agg_tid_en_file;
|
||||
struct dentry *rs_sta_dbgfs_reduced_txp_file;
|
||||
u32 dbg_fixed_rate;
|
||||
u8 dbg_fixed_txp_reduction;
|
||||
#endif
|
||||
struct iwl_mvm *drv;
|
||||
|
||||
@ -345,6 +366,9 @@ struct iwl_lq_sta {
|
||||
u32 last_rate_n_flags;
|
||||
/* packets destined for this STA are aggregated */
|
||||
u8 is_agg;
|
||||
|
||||
/* tx power reduce for this sta */
|
||||
int tpc_reduce;
|
||||
};
|
||||
|
||||
/* Initialize station's rate scaling information after adding station */
|
||||
|
@ -60,7 +60,6 @@
|
||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*****************************************************************************/
|
||||
#include "iwl-trans.h"
|
||||
|
||||
#include "mvm.h"
|
||||
#include "fw-api.h"
|
||||
|
||||
@ -130,42 +129,7 @@ static void iwl_mvm_pass_packet_to_mac80211(struct iwl_mvm *mvm,
|
||||
|
||||
memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats));
|
||||
|
||||
ieee80211_rx_ni(mvm->hw, skb);
|
||||
}
|
||||
|
||||
static void iwl_mvm_calc_rssi(struct iwl_mvm *mvm,
|
||||
struct iwl_rx_phy_info *phy_info,
|
||||
struct ieee80211_rx_status *rx_status)
|
||||
{
|
||||
int rssi_a, rssi_b, rssi_a_dbm, rssi_b_dbm, max_rssi_dbm;
|
||||
u32 agc_a, agc_b;
|
||||
u32 val;
|
||||
|
||||
val = le32_to_cpu(phy_info->non_cfg_phy[IWL_RX_INFO_AGC_IDX]);
|
||||
agc_a = (val & IWL_OFDM_AGC_A_MSK) >> IWL_OFDM_AGC_A_POS;
|
||||
agc_b = (val & IWL_OFDM_AGC_B_MSK) >> IWL_OFDM_AGC_B_POS;
|
||||
|
||||
val = le32_to_cpu(phy_info->non_cfg_phy[IWL_RX_INFO_RSSI_AB_IDX]);
|
||||
rssi_a = (val & IWL_OFDM_RSSI_INBAND_A_MSK) >> IWL_OFDM_RSSI_A_POS;
|
||||
rssi_b = (val & IWL_OFDM_RSSI_INBAND_B_MSK) >> IWL_OFDM_RSSI_B_POS;
|
||||
|
||||
/*
|
||||
* dBm = rssi dB - agc dB - constant.
|
||||
* Higher AGC (higher radio gain) means lower signal.
|
||||
*/
|
||||
rssi_a_dbm = rssi_a - IWL_RSSI_OFFSET - agc_a;
|
||||
rssi_b_dbm = rssi_b - IWL_RSSI_OFFSET - agc_b;
|
||||
max_rssi_dbm = max_t(int, rssi_a_dbm, rssi_b_dbm);
|
||||
|
||||
IWL_DEBUG_STATS(mvm, "Rssi In A %d B %d Max %d AGCA %d AGCB %d\n",
|
||||
rssi_a_dbm, rssi_b_dbm, max_rssi_dbm, agc_a, agc_b);
|
||||
|
||||
rx_status->signal = max_rssi_dbm;
|
||||
rx_status->chains = (le16_to_cpu(phy_info->phy_flags) &
|
||||
RX_RES_PHY_FLAGS_ANTENNA)
|
||||
>> RX_RES_PHY_FLAGS_ANTENNA_POS;
|
||||
rx_status->chain_signal[0] = rssi_a_dbm;
|
||||
rx_status->chain_signal[1] = rssi_b_dbm;
|
||||
ieee80211_rx(mvm->hw, skb);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -337,10 +301,7 @@ int iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
|
||||
*/
|
||||
/*rx_status.flag |= RX_FLAG_MACTIME_MPDU;*/
|
||||
|
||||
if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_RX_ENERGY_API)
|
||||
iwl_mvm_get_signal_strength(mvm, phy_info, &rx_status);
|
||||
else
|
||||
iwl_mvm_calc_rssi(mvm, phy_info, &rx_status);
|
||||
iwl_mvm_get_signal_strength(mvm, phy_info, &rx_status);
|
||||
|
||||
IWL_DEBUG_STATS_LIMIT(mvm, "Rssi %d, TSF %llu\n", rx_status.signal,
|
||||
(unsigned long long)rx_status.mactime);
|
||||
@ -394,6 +355,8 @@ int iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
|
||||
rx_status.rate_idx = rate_n_flags & RATE_VHT_MCS_RATE_CODE_MSK;
|
||||
rx_status.flag |= RX_FLAG_VHT;
|
||||
rx_status.flag |= stbc << RX_FLAG_STBC_SHIFT;
|
||||
if (rate_n_flags & RATE_MCS_BF_MSK)
|
||||
rx_status.vht_flag |= RX_VHT_FLAG_BF;
|
||||
} else {
|
||||
rx_status.rate_idx =
|
||||
iwl_mvm_legacy_rate_to_mac80211_idx(rate_n_flags,
|
||||
|
@ -348,7 +348,10 @@ int iwl_mvm_scan_request(struct iwl_mvm *mvm,
|
||||
struct iwl_mvm_scan_params params = {};
|
||||
|
||||
lockdep_assert_held(&mvm->mutex);
|
||||
BUG_ON(mvm->scan_cmd == NULL);
|
||||
|
||||
/* we should have failed registration if scan_cmd was NULL */
|
||||
if (WARN_ON(mvm->scan_cmd == NULL))
|
||||
return -ENOMEM;
|
||||
|
||||
IWL_DEBUG_SCAN(mvm, "Handling mac80211 scan request\n");
|
||||
mvm->scan_status = IWL_MVM_SCAN_OS;
|
||||
@ -567,9 +570,13 @@ int iwl_mvm_rx_scan_offload_complete_notif(struct iwl_mvm *mvm,
|
||||
/* scan status must be locked for proper checking */
|
||||
lockdep_assert_held(&mvm->mutex);
|
||||
|
||||
IWL_DEBUG_SCAN(mvm, "Scheduled scan completed, status %s\n",
|
||||
IWL_DEBUG_SCAN(mvm,
|
||||
"Scheduled scan completed, status %s EBS status %s:%d\n",
|
||||
scan_notif->status == IWL_SCAN_OFFLOAD_COMPLETED ?
|
||||
"completed" : "aborted");
|
||||
"completed" : "aborted", scan_notif->ebs_status ==
|
||||
IWL_SCAN_EBS_SUCCESS ? "success" : "failed",
|
||||
scan_notif->ebs_status);
|
||||
|
||||
|
||||
/* only call mac80211 completion if the stop was initiated by FW */
|
||||
if (mvm->scan_status == IWL_MVM_SCAN_SCHED) {
|
||||
@ -577,6 +584,8 @@ int iwl_mvm_rx_scan_offload_complete_notif(struct iwl_mvm *mvm,
|
||||
ieee80211_sched_scan_stopped(mvm->hw);
|
||||
}
|
||||
|
||||
mvm->last_ebs_successful = !scan_notif->ebs_status;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -913,6 +922,11 @@ int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm,
|
||||
scan_req.flags |= cpu_to_le16(IWL_SCAN_OFFLOAD_FLAG_PASS_ALL);
|
||||
}
|
||||
|
||||
if (mvm->last_ebs_successful &&
|
||||
mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_EBS_SUPPORT)
|
||||
scan_req.flags |=
|
||||
cpu_to_le16(IWL_SCAN_OFFLOAD_FLAG_EBS_ACCURATE_MODE);
|
||||
|
||||
return iwl_mvm_send_cmd_pdu(mvm, SCAN_OFFLOAD_REQUEST_CMD, CMD_SYNC,
|
||||
sizeof(scan_req), &scan_req);
|
||||
}
|
||||
|
@ -237,9 +237,6 @@ int iwl_mvm_sf_update(struct iwl_mvm *mvm, struct ieee80211_vif *changed_vif,
|
||||
.sta_vif_ap_sta_id = IWL_MVM_STATION_COUNT,
|
||||
};
|
||||
|
||||
if (IWL_UCODE_API(mvm->fw->ucode_ver) < 8)
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* Ignore the call if we are in HW Restart flow, or if the handled
|
||||
* vif is a p2p device.
|
||||
|
@ -66,115 +66,6 @@
|
||||
#include "sta.h"
|
||||
#include "rs.h"
|
||||
|
||||
static void iwl_mvm_add_sta_cmd_v7_to_v5(struct iwl_mvm_add_sta_cmd_v7 *cmd_v7,
|
||||
struct iwl_mvm_add_sta_cmd_v5 *cmd_v5)
|
||||
{
|
||||
memset(cmd_v5, 0, sizeof(*cmd_v5));
|
||||
|
||||
cmd_v5->add_modify = cmd_v7->add_modify;
|
||||
cmd_v5->tid_disable_tx = cmd_v7->tid_disable_tx;
|
||||
cmd_v5->mac_id_n_color = cmd_v7->mac_id_n_color;
|
||||
memcpy(cmd_v5->addr, cmd_v7->addr, ETH_ALEN);
|
||||
cmd_v5->sta_id = cmd_v7->sta_id;
|
||||
cmd_v5->modify_mask = cmd_v7->modify_mask;
|
||||
cmd_v5->station_flags = cmd_v7->station_flags;
|
||||
cmd_v5->station_flags_msk = cmd_v7->station_flags_msk;
|
||||
cmd_v5->add_immediate_ba_tid = cmd_v7->add_immediate_ba_tid;
|
||||
cmd_v5->remove_immediate_ba_tid = cmd_v7->remove_immediate_ba_tid;
|
||||
cmd_v5->add_immediate_ba_ssn = cmd_v7->add_immediate_ba_ssn;
|
||||
cmd_v5->sleep_tx_count = cmd_v7->sleep_tx_count;
|
||||
cmd_v5->sleep_state_flags = cmd_v7->sleep_state_flags;
|
||||
cmd_v5->assoc_id = cmd_v7->assoc_id;
|
||||
cmd_v5->beamform_flags = cmd_v7->beamform_flags;
|
||||
cmd_v5->tfd_queue_msk = cmd_v7->tfd_queue_msk;
|
||||
}
|
||||
|
||||
static void
|
||||
iwl_mvm_add_sta_key_to_add_sta_cmd_v5(struct iwl_mvm_add_sta_key_cmd *key_cmd,
|
||||
struct iwl_mvm_add_sta_cmd_v5 *sta_cmd,
|
||||
u32 mac_id_n_color)
|
||||
{
|
||||
memset(sta_cmd, 0, sizeof(*sta_cmd));
|
||||
|
||||
sta_cmd->sta_id = key_cmd->sta_id;
|
||||
sta_cmd->add_modify = STA_MODE_MODIFY;
|
||||
sta_cmd->modify_mask = STA_MODIFY_KEY;
|
||||
sta_cmd->mac_id_n_color = cpu_to_le32(mac_id_n_color);
|
||||
|
||||
sta_cmd->key.key_offset = key_cmd->key_offset;
|
||||
sta_cmd->key.key_flags = key_cmd->key_flags;
|
||||
memcpy(sta_cmd->key.key, key_cmd->key, sizeof(sta_cmd->key.key));
|
||||
sta_cmd->key.tkip_rx_tsc_byte2 = key_cmd->tkip_rx_tsc_byte2;
|
||||
memcpy(sta_cmd->key.tkip_rx_ttak, key_cmd->tkip_rx_ttak,
|
||||
sizeof(sta_cmd->key.tkip_rx_ttak));
|
||||
}
|
||||
|
||||
static int iwl_mvm_send_add_sta_cmd_status(struct iwl_mvm *mvm,
|
||||
struct iwl_mvm_add_sta_cmd_v7 *cmd,
|
||||
int *status)
|
||||
{
|
||||
struct iwl_mvm_add_sta_cmd_v5 cmd_v5;
|
||||
|
||||
if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_STA_KEY_CMD)
|
||||
return iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(*cmd),
|
||||
cmd, status);
|
||||
|
||||
iwl_mvm_add_sta_cmd_v7_to_v5(cmd, &cmd_v5);
|
||||
|
||||
return iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(cmd_v5),
|
||||
&cmd_v5, status);
|
||||
}
|
||||
|
||||
static int iwl_mvm_send_add_sta_cmd(struct iwl_mvm *mvm, u32 flags,
|
||||
struct iwl_mvm_add_sta_cmd_v7 *cmd)
|
||||
{
|
||||
struct iwl_mvm_add_sta_cmd_v5 cmd_v5;
|
||||
|
||||
if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_STA_KEY_CMD)
|
||||
return iwl_mvm_send_cmd_pdu(mvm, ADD_STA, flags,
|
||||
sizeof(*cmd), cmd);
|
||||
|
||||
iwl_mvm_add_sta_cmd_v7_to_v5(cmd, &cmd_v5);
|
||||
|
||||
return iwl_mvm_send_cmd_pdu(mvm, ADD_STA, flags, sizeof(cmd_v5),
|
||||
&cmd_v5);
|
||||
}
|
||||
|
||||
static int
|
||||
iwl_mvm_send_add_sta_key_cmd_status(struct iwl_mvm *mvm,
|
||||
struct iwl_mvm_add_sta_key_cmd *cmd,
|
||||
u32 mac_id_n_color,
|
||||
int *status)
|
||||
{
|
||||
struct iwl_mvm_add_sta_cmd_v5 sta_cmd;
|
||||
|
||||
if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_STA_KEY_CMD)
|
||||
return iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY,
|
||||
sizeof(*cmd), cmd, status);
|
||||
|
||||
iwl_mvm_add_sta_key_to_add_sta_cmd_v5(cmd, &sta_cmd, mac_id_n_color);
|
||||
|
||||
return iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(sta_cmd),
|
||||
&sta_cmd, status);
|
||||
}
|
||||
|
||||
static int iwl_mvm_send_add_sta_key_cmd(struct iwl_mvm *mvm,
|
||||
u32 flags,
|
||||
struct iwl_mvm_add_sta_key_cmd *cmd,
|
||||
u32 mac_id_n_color)
|
||||
{
|
||||
struct iwl_mvm_add_sta_cmd_v5 sta_cmd;
|
||||
|
||||
if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_STA_KEY_CMD)
|
||||
return iwl_mvm_send_cmd_pdu(mvm, ADD_STA_KEY, flags,
|
||||
sizeof(*cmd), cmd);
|
||||
|
||||
iwl_mvm_add_sta_key_to_add_sta_cmd_v5(cmd, &sta_cmd, mac_id_n_color);
|
||||
|
||||
return iwl_mvm_send_cmd_pdu(mvm, ADD_STA, flags, sizeof(sta_cmd),
|
||||
&sta_cmd);
|
||||
}
|
||||
|
||||
static int iwl_mvm_find_free_sta_id(struct iwl_mvm *mvm,
|
||||
enum nl80211_iftype iftype)
|
||||
{
|
||||
@ -207,7 +98,7 @@ int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
|
||||
bool update)
|
||||
{
|
||||
struct iwl_mvm_sta *mvm_sta = (void *)sta->drv_priv;
|
||||
struct iwl_mvm_add_sta_cmd_v7 add_sta_cmd;
|
||||
struct iwl_mvm_add_sta_cmd add_sta_cmd;
|
||||
int ret;
|
||||
u32 status;
|
||||
u32 agg_size = 0, mpdu_dens = 0;
|
||||
@ -295,7 +186,8 @@ int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
|
||||
cpu_to_le32(mpdu_dens << STA_FLG_AGG_MPDU_DENS_SHIFT);
|
||||
|
||||
status = ADD_STA_SUCCESS;
|
||||
ret = iwl_mvm_send_add_sta_cmd_status(mvm, &add_sta_cmd, &status);
|
||||
ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(add_sta_cmd),
|
||||
&add_sta_cmd, &status);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
@ -380,7 +272,7 @@ int iwl_mvm_update_sta(struct iwl_mvm *mvm,
|
||||
int iwl_mvm_drain_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
|
||||
bool drain)
|
||||
{
|
||||
struct iwl_mvm_add_sta_cmd_v7 cmd = {};
|
||||
struct iwl_mvm_add_sta_cmd cmd = {};
|
||||
int ret;
|
||||
u32 status;
|
||||
|
||||
@ -393,7 +285,8 @@ int iwl_mvm_drain_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
|
||||
cmd.station_flags_msk = cpu_to_le32(STA_FLG_DRAIN_FLOW);
|
||||
|
||||
status = ADD_STA_SUCCESS;
|
||||
ret = iwl_mvm_send_add_sta_cmd_status(mvm, &cmd, &status);
|
||||
ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(cmd),
|
||||
&cmd, &status);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
@ -498,7 +391,7 @@ void iwl_mvm_sta_drained_wk(struct work_struct *wk)
|
||||
sta_id);
|
||||
continue;
|
||||
}
|
||||
rcu_assign_pointer(mvm->fw_id_to_mac_id[sta_id], NULL);
|
||||
RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta_id], NULL);
|
||||
clear_bit(sta_id, mvm->sta_drained);
|
||||
}
|
||||
|
||||
@ -520,14 +413,6 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
|
||||
/* flush its queues here since we are freeing mvm_sta */
|
||||
ret = iwl_mvm_flush_tx_path(mvm, mvm_sta->tfd_queue_msk, true);
|
||||
|
||||
/*
|
||||
* Put a non-NULL since the fw station isn't removed.
|
||||
* It will be removed after the MAC will be set as
|
||||
* unassoc.
|
||||
*/
|
||||
rcu_assign_pointer(mvm->fw_id_to_mac_id[mvm_sta->sta_id],
|
||||
ERR_PTR(-EINVAL));
|
||||
|
||||
/* if we are associated - we can't remove the AP STA now */
|
||||
if (vif->bss_conf.assoc)
|
||||
return ret;
|
||||
@ -557,7 +442,7 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
|
||||
} else {
|
||||
spin_unlock_bh(&mvm_sta->lock);
|
||||
ret = iwl_mvm_rm_sta_common(mvm, mvm_sta->sta_id);
|
||||
rcu_assign_pointer(mvm->fw_id_to_mac_id[mvm_sta->sta_id], NULL);
|
||||
RCU_INIT_POINTER(mvm->fw_id_to_mac_id[mvm_sta->sta_id], NULL);
|
||||
}
|
||||
|
||||
return ret;
|
||||
@ -571,7 +456,7 @@ int iwl_mvm_rm_sta_id(struct iwl_mvm *mvm,
|
||||
|
||||
lockdep_assert_held(&mvm->mutex);
|
||||
|
||||
rcu_assign_pointer(mvm->fw_id_to_mac_id[sta_id], NULL);
|
||||
RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta_id], NULL);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -593,7 +478,7 @@ int iwl_mvm_allocate_int_sta(struct iwl_mvm *mvm, struct iwl_mvm_int_sta *sta,
|
||||
|
||||
void iwl_mvm_dealloc_int_sta(struct iwl_mvm *mvm, struct iwl_mvm_int_sta *sta)
|
||||
{
|
||||
rcu_assign_pointer(mvm->fw_id_to_mac_id[sta->sta_id], NULL);
|
||||
RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta->sta_id], NULL);
|
||||
memset(sta, 0, sizeof(struct iwl_mvm_int_sta));
|
||||
sta->sta_id = IWL_MVM_STATION_COUNT;
|
||||
}
|
||||
@ -603,13 +488,13 @@ static int iwl_mvm_add_int_sta_common(struct iwl_mvm *mvm,
|
||||
const u8 *addr,
|
||||
u16 mac_id, u16 color)
|
||||
{
|
||||
struct iwl_mvm_add_sta_cmd_v7 cmd;
|
||||
struct iwl_mvm_add_sta_cmd cmd;
|
||||
int ret;
|
||||
u32 status;
|
||||
|
||||
lockdep_assert_held(&mvm->mutex);
|
||||
|
||||
memset(&cmd, 0, sizeof(struct iwl_mvm_add_sta_cmd_v7));
|
||||
memset(&cmd, 0, sizeof(cmd));
|
||||
cmd.sta_id = sta->sta_id;
|
||||
cmd.mac_id_n_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mac_id,
|
||||
color));
|
||||
@ -619,7 +504,8 @@ static int iwl_mvm_add_int_sta_common(struct iwl_mvm *mvm,
|
||||
if (addr)
|
||||
memcpy(cmd.addr, addr, ETH_ALEN);
|
||||
|
||||
ret = iwl_mvm_send_add_sta_cmd_status(mvm, &cmd, &status);
|
||||
ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(cmd),
|
||||
&cmd, &status);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
@ -753,7 +639,7 @@ int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
|
||||
int tid, u16 ssn, bool start)
|
||||
{
|
||||
struct iwl_mvm_sta *mvm_sta = (void *)sta->drv_priv;
|
||||
struct iwl_mvm_add_sta_cmd_v7 cmd = {};
|
||||
struct iwl_mvm_add_sta_cmd cmd = {};
|
||||
int ret;
|
||||
u32 status;
|
||||
|
||||
@ -777,7 +663,8 @@ int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
|
||||
STA_MODIFY_REMOVE_BA_TID;
|
||||
|
||||
status = ADD_STA_SUCCESS;
|
||||
ret = iwl_mvm_send_add_sta_cmd_status(mvm, &cmd, &status);
|
||||
ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(cmd),
|
||||
&cmd, &status);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
@ -812,7 +699,7 @@ static int iwl_mvm_sta_tx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
|
||||
int tid, u8 queue, bool start)
|
||||
{
|
||||
struct iwl_mvm_sta *mvm_sta = (void *)sta->drv_priv;
|
||||
struct iwl_mvm_add_sta_cmd_v7 cmd = {};
|
||||
struct iwl_mvm_add_sta_cmd cmd = {};
|
||||
int ret;
|
||||
u32 status;
|
||||
|
||||
@ -834,7 +721,8 @@ static int iwl_mvm_sta_tx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
|
||||
cmd.tid_disable_tx = cpu_to_le16(mvm_sta->tid_disable_agg);
|
||||
|
||||
status = ADD_STA_SUCCESS;
|
||||
ret = iwl_mvm_send_add_sta_cmd_status(mvm, &cmd, &status);
|
||||
ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(cmd),
|
||||
&cmd, &status);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
@ -1129,12 +1017,11 @@ static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm,
|
||||
u8 sta_id, u32 tkip_iv32, u16 *tkip_p1k,
|
||||
u32 cmd_flags)
|
||||
{
|
||||
__le16 key_flags;
|
||||
struct iwl_mvm_add_sta_key_cmd cmd = {};
|
||||
__le16 key_flags;
|
||||
int ret, status;
|
||||
u16 keyidx;
|
||||
int i;
|
||||
u32 mac_id_n_color = mvm_sta->mac_id_n_color;
|
||||
|
||||
keyidx = (keyconf->keyidx << STA_KEY_FLG_KEYID_POS) &
|
||||
STA_KEY_FLG_KEYID_MSK;
|
||||
@ -1167,12 +1054,11 @@ static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm,
|
||||
|
||||
status = ADD_STA_SUCCESS;
|
||||
if (cmd_flags == CMD_SYNC)
|
||||
ret = iwl_mvm_send_add_sta_key_cmd_status(mvm, &cmd,
|
||||
mac_id_n_color,
|
||||
&status);
|
||||
ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, sizeof(cmd),
|
||||
&cmd, &status);
|
||||
else
|
||||
ret = iwl_mvm_send_add_sta_key_cmd(mvm, CMD_ASYNC, &cmd,
|
||||
mac_id_n_color);
|
||||
ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA_KEY, CMD_ASYNC,
|
||||
sizeof(cmd), &cmd);
|
||||
|
||||
switch (status) {
|
||||
case ADD_STA_SUCCESS:
|
||||
@ -1399,9 +1285,8 @@ int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm,
|
||||
cmd.sta_id = sta_id;
|
||||
|
||||
status = ADD_STA_SUCCESS;
|
||||
ret = iwl_mvm_send_add_sta_key_cmd_status(mvm, &cmd,
|
||||
mvm_sta->mac_id_n_color,
|
||||
&status);
|
||||
ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, sizeof(cmd),
|
||||
&cmd, &status);
|
||||
|
||||
switch (status) {
|
||||
case ADD_STA_SUCCESS:
|
||||
@ -1448,7 +1333,7 @@ void iwl_mvm_sta_modify_ps_wake(struct iwl_mvm *mvm,
|
||||
struct ieee80211_sta *sta)
|
||||
{
|
||||
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
|
||||
struct iwl_mvm_add_sta_cmd_v7 cmd = {
|
||||
struct iwl_mvm_add_sta_cmd cmd = {
|
||||
.add_modify = STA_MODE_MODIFY,
|
||||
.sta_id = mvmsta->sta_id,
|
||||
.station_flags_msk = cpu_to_le32(STA_FLG_PS),
|
||||
@ -1456,7 +1341,7 @@ void iwl_mvm_sta_modify_ps_wake(struct iwl_mvm *mvm,
|
||||
};
|
||||
int ret;
|
||||
|
||||
ret = iwl_mvm_send_add_sta_cmd(mvm, CMD_ASYNC, &cmd);
|
||||
ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC, sizeof(cmd), &cmd);
|
||||
if (ret)
|
||||
IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
|
||||
}
|
||||
@ -1468,7 +1353,7 @@ void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm,
|
||||
bool agg)
|
||||
{
|
||||
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
|
||||
struct iwl_mvm_add_sta_cmd_v7 cmd = {
|
||||
struct iwl_mvm_add_sta_cmd cmd = {
|
||||
.add_modify = STA_MODE_MODIFY,
|
||||
.sta_id = mvmsta->sta_id,
|
||||
.modify_mask = STA_MODIFY_SLEEPING_STA_TX_COUNT,
|
||||
@ -1538,7 +1423,7 @@ void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm,
|
||||
cmd.sleep_state_flags |= cpu_to_le16(STA_SLEEP_STATE_UAPSD);
|
||||
}
|
||||
|
||||
ret = iwl_mvm_send_add_sta_cmd(mvm, CMD_ASYNC, &cmd);
|
||||
ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC, sizeof(cmd), &cmd);
|
||||
if (ret)
|
||||
IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
|
||||
}
|
||||
|
@ -253,6 +253,8 @@ enum iwl_mvm_agg_state {
|
||||
* This is basically (last acked packet++).
|
||||
* @rate_n_flags: Rate at which Tx was attempted. Holds the data between the
|
||||
* Tx response (TX_CMD), and the block ack notification (COMPRESSED_BA).
|
||||
* @reduced_tpc: Reduced tx power. Holds the data between the
|
||||
* Tx response (TX_CMD), and the block ack notification (COMPRESSED_BA).
|
||||
* @state: state of the BA agreement establishment / tear down.
|
||||
* @txq_id: Tx queue used by the BA session
|
||||
* @ssn: the first packet to be sent in AGG HW queue in Tx AGG start flow, or
|
||||
@ -265,6 +267,7 @@ struct iwl_mvm_tid_data {
|
||||
u16 next_reclaimed;
|
||||
/* The rest is Tx AGG related */
|
||||
u32 rate_n_flags;
|
||||
u8 reduced_tpc;
|
||||
enum iwl_mvm_agg_state state;
|
||||
u16 txq_id;
|
||||
u16 ssn;
|
||||
|
@ -273,67 +273,10 @@ static bool iwl_mvm_time_event_response(struct iwl_notif_wait_data *notif_wait,
|
||||
return true;
|
||||
}
|
||||
|
||||
/* used to convert from time event API v2 to v1 */
|
||||
#define TE_V2_DEP_POLICY_MSK (TE_V2_DEP_OTHER | TE_V2_DEP_TSF |\
|
||||
TE_V2_EVENT_SOCIOPATHIC)
|
||||
static inline u16 te_v2_get_notify(__le16 policy)
|
||||
{
|
||||
return le16_to_cpu(policy) & TE_V2_NOTIF_MSK;
|
||||
}
|
||||
|
||||
static inline u16 te_v2_get_dep_policy(__le16 policy)
|
||||
{
|
||||
return (le16_to_cpu(policy) & TE_V2_DEP_POLICY_MSK) >>
|
||||
TE_V2_PLACEMENT_POS;
|
||||
}
|
||||
|
||||
static inline u16 te_v2_get_absence(__le16 policy)
|
||||
{
|
||||
return (le16_to_cpu(policy) & TE_V2_ABSENCE) >> TE_V2_ABSENCE_POS;
|
||||
}
|
||||
|
||||
static void iwl_mvm_te_v2_to_v1(const struct iwl_time_event_cmd_v2 *cmd_v2,
|
||||
struct iwl_time_event_cmd_v1 *cmd_v1)
|
||||
{
|
||||
cmd_v1->id_and_color = cmd_v2->id_and_color;
|
||||
cmd_v1->action = cmd_v2->action;
|
||||
cmd_v1->id = cmd_v2->id;
|
||||
cmd_v1->apply_time = cmd_v2->apply_time;
|
||||
cmd_v1->max_delay = cmd_v2->max_delay;
|
||||
cmd_v1->depends_on = cmd_v2->depends_on;
|
||||
cmd_v1->interval = cmd_v2->interval;
|
||||
cmd_v1->duration = cmd_v2->duration;
|
||||
if (cmd_v2->repeat == TE_V2_REPEAT_ENDLESS)
|
||||
cmd_v1->repeat = cpu_to_le32(TE_V1_REPEAT_ENDLESS);
|
||||
else
|
||||
cmd_v1->repeat = cpu_to_le32(cmd_v2->repeat);
|
||||
cmd_v1->max_frags = cpu_to_le32(cmd_v2->max_frags);
|
||||
cmd_v1->interval_reciprocal = 0; /* unused */
|
||||
|
||||
cmd_v1->dep_policy = cpu_to_le32(te_v2_get_dep_policy(cmd_v2->policy));
|
||||
cmd_v1->is_present = cpu_to_le32(!te_v2_get_absence(cmd_v2->policy));
|
||||
cmd_v1->notify = cpu_to_le32(te_v2_get_notify(cmd_v2->policy));
|
||||
}
|
||||
|
||||
static int iwl_mvm_send_time_event_cmd(struct iwl_mvm *mvm,
|
||||
const struct iwl_time_event_cmd_v2 *cmd)
|
||||
{
|
||||
struct iwl_time_event_cmd_v1 cmd_v1;
|
||||
|
||||
if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_TIME_EVENT_API_V2)
|
||||
return iwl_mvm_send_cmd_pdu(mvm, TIME_EVENT_CMD, CMD_SYNC,
|
||||
sizeof(*cmd), cmd);
|
||||
|
||||
iwl_mvm_te_v2_to_v1(cmd, &cmd_v1);
|
||||
return iwl_mvm_send_cmd_pdu(mvm, TIME_EVENT_CMD, CMD_SYNC,
|
||||
sizeof(cmd_v1), &cmd_v1);
|
||||
}
|
||||
|
||||
|
||||
static int iwl_mvm_time_event_send_add(struct iwl_mvm *mvm,
|
||||
struct ieee80211_vif *vif,
|
||||
struct iwl_mvm_time_event_data *te_data,
|
||||
struct iwl_time_event_cmd_v2 *te_cmd)
|
||||
struct iwl_time_event_cmd *te_cmd)
|
||||
{
|
||||
static const u8 time_event_response[] = { TIME_EVENT_CMD };
|
||||
struct iwl_notification_wait wait_time_event;
|
||||
@ -369,7 +312,8 @@ static int iwl_mvm_time_event_send_add(struct iwl_mvm *mvm,
|
||||
ARRAY_SIZE(time_event_response),
|
||||
iwl_mvm_time_event_response, te_data);
|
||||
|
||||
ret = iwl_mvm_send_time_event_cmd(mvm, te_cmd);
|
||||
ret = iwl_mvm_send_cmd_pdu(mvm, TIME_EVENT_CMD, CMD_SYNC,
|
||||
sizeof(*te_cmd), te_cmd);
|
||||
if (ret) {
|
||||
IWL_ERR(mvm, "Couldn't send TIME_EVENT_CMD: %d\n", ret);
|
||||
iwl_remove_notification(&mvm->notif_wait, &wait_time_event);
|
||||
@ -397,7 +341,7 @@ void iwl_mvm_protect_session(struct iwl_mvm *mvm,
|
||||
{
|
||||
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
|
||||
struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data;
|
||||
struct iwl_time_event_cmd_v2 time_cmd = {};
|
||||
struct iwl_time_event_cmd time_cmd = {};
|
||||
|
||||
lockdep_assert_held(&mvm->mutex);
|
||||
|
||||
@ -453,7 +397,7 @@ void iwl_mvm_remove_time_event(struct iwl_mvm *mvm,
|
||||
struct iwl_mvm_vif *mvmvif,
|
||||
struct iwl_mvm_time_event_data *te_data)
|
||||
{
|
||||
struct iwl_time_event_cmd_v2 time_cmd = {};
|
||||
struct iwl_time_event_cmd time_cmd = {};
|
||||
u32 id, uid;
|
||||
int ret;
|
||||
|
||||
@ -490,7 +434,8 @@ void iwl_mvm_remove_time_event(struct iwl_mvm *mvm,
|
||||
cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color));
|
||||
|
||||
IWL_DEBUG_TE(mvm, "Removing TE 0x%x\n", le32_to_cpu(time_cmd.id));
|
||||
ret = iwl_mvm_send_time_event_cmd(mvm, &time_cmd);
|
||||
ret = iwl_mvm_send_cmd_pdu(mvm, TIME_EVENT_CMD, CMD_SYNC,
|
||||
sizeof(time_cmd), &time_cmd);
|
||||
if (WARN_ON(ret))
|
||||
return;
|
||||
}
|
||||
@ -510,7 +455,7 @@ int iwl_mvm_start_p2p_roc(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
|
||||
{
|
||||
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
|
||||
struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data;
|
||||
struct iwl_time_event_cmd_v2 time_cmd = {};
|
||||
struct iwl_time_event_cmd time_cmd = {};
|
||||
|
||||
lockdep_assert_held(&mvm->mutex);
|
||||
if (te_data->running) {
|
||||
|
@ -468,13 +468,14 @@ void iwl_mvm_tt_handler(struct iwl_mvm *mvm)
|
||||
}
|
||||
|
||||
if (params->support_tx_backoff) {
|
||||
tx_backoff = 0;
|
||||
tx_backoff = tt->min_backoff;
|
||||
for (i = 0; i < TT_TX_BACKOFF_SIZE; i++) {
|
||||
if (temperature < params->tx_backoff[i].temperature)
|
||||
break;
|
||||
tx_backoff = params->tx_backoff[i].backoff;
|
||||
tx_backoff = max(tt->min_backoff,
|
||||
params->tx_backoff[i].backoff);
|
||||
}
|
||||
if (tx_backoff != 0)
|
||||
if (tx_backoff != tt->min_backoff)
|
||||
throttle_enable = true;
|
||||
if (tt->tx_backoff != tx_backoff)
|
||||
iwl_mvm_tt_tx_backoff(mvm, tx_backoff);
|
||||
@ -484,7 +485,8 @@ void iwl_mvm_tt_handler(struct iwl_mvm *mvm)
|
||||
IWL_WARN(mvm,
|
||||
"Due to high temperature thermal throttling initiated\n");
|
||||
tt->throttle = true;
|
||||
} else if (tt->throttle && !tt->dynamic_smps && tt->tx_backoff == 0 &&
|
||||
} else if (tt->throttle && !tt->dynamic_smps &&
|
||||
tt->tx_backoff == tt->min_backoff &&
|
||||
temperature <= params->tx_protection_exit) {
|
||||
IWL_WARN(mvm,
|
||||
"Temperature is back to normal thermal throttling stopped\n");
|
||||
|
@ -636,7 +636,11 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
|
||||
seq_ctl = le16_to_cpu(hdr->seq_ctrl);
|
||||
}
|
||||
|
||||
ieee80211_tx_status_ni(mvm->hw, skb);
|
||||
BUILD_BUG_ON(ARRAY_SIZE(info->status.status_driver_data) < 1);
|
||||
info->status.status_driver_data[0] =
|
||||
(void *)(uintptr_t)tx_resp->reduced_tpc;
|
||||
|
||||
ieee80211_tx_status(mvm->hw, skb);
|
||||
}
|
||||
|
||||
if (txq_id >= mvm->first_agg_queue) {
|
||||
@ -815,6 +819,7 @@ static void iwl_mvm_rx_tx_cmd_agg(struct iwl_mvm *mvm,
|
||||
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
|
||||
mvmsta->tid_data[tid].rate_n_flags =
|
||||
le32_to_cpu(tx_resp->initial_rate);
|
||||
mvmsta->tid_data[tid].reduced_tpc = tx_resp->reduced_tpc;
|
||||
}
|
||||
|
||||
rcu_read_unlock();
|
||||
@ -928,6 +933,8 @@ int iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
|
||||
info->status.ampdu_len = ba_notif->txed;
|
||||
iwl_mvm_hwrate_to_tx_status(tid_data->rate_n_flags,
|
||||
info);
|
||||
info->status.status_driver_data[0] =
|
||||
(void *)(uintptr_t)tid_data->reduced_tpc;
|
||||
}
|
||||
}
|
||||
|
||||
@ -937,7 +944,7 @@ int iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
|
||||
|
||||
while (!skb_queue_empty(&reclaimed_skbs)) {
|
||||
skb = __skb_dequeue(&reclaimed_skbs);
|
||||
ieee80211_tx_status_ni(mvm->hw, skb);
|
||||
ieee80211_tx_status(mvm->hw, skb);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -64,6 +64,7 @@
|
||||
|
||||
#include "iwl-debug.h"
|
||||
#include "iwl-io.h"
|
||||
#include "iwl-prph.h"
|
||||
|
||||
#include "mvm.h"
|
||||
#include "fw-api-rs.h"
|
||||
@ -469,6 +470,8 @@ void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm)
|
||||
mvm->status, table.valid);
|
||||
}
|
||||
|
||||
/* Do not change this output - scripts rely on it */
|
||||
|
||||
IWL_ERR(mvm, "Loaded firmware version: %s\n", mvm->fw->fw_version);
|
||||
|
||||
trace_iwlwifi_dev_ucode_error(trans->dev, table.error_id, table.tsf_low,
|
||||
@ -522,7 +525,7 @@ void iwl_mvm_fw_error_sram_dump(struct iwl_mvm *mvm)
|
||||
u32 ofs, sram_len;
|
||||
void *sram;
|
||||
|
||||
if (!mvm->ucode_loaded || mvm->fw_error_sram)
|
||||
if (!mvm->ucode_loaded || mvm->fw_error_sram || mvm->fw_error_dump)
|
||||
return;
|
||||
|
||||
img = &mvm->fw->img[mvm->cur_ucode];
|
||||
@ -538,6 +541,47 @@ void iwl_mvm_fw_error_sram_dump(struct iwl_mvm *mvm)
|
||||
mvm->fw_error_sram_len = sram_len;
|
||||
}
|
||||
|
||||
void iwl_mvm_fw_error_rxf_dump(struct iwl_mvm *mvm)
|
||||
{
|
||||
int i, reg_val;
|
||||
unsigned long flags;
|
||||
|
||||
if (!mvm->ucode_loaded || mvm->fw_error_rxf || mvm->fw_error_dump)
|
||||
return;
|
||||
|
||||
/* reading buffer size */
|
||||
reg_val = iwl_trans_read_prph(mvm->trans, RXF_SIZE_ADDR);
|
||||
mvm->fw_error_rxf_len =
|
||||
(reg_val & RXF_SIZE_BYTE_CNT_MSK) >> RXF_SIZE_BYTE_CND_POS;
|
||||
|
||||
/* the register holds the value divided by 128 */
|
||||
mvm->fw_error_rxf_len = mvm->fw_error_rxf_len << 7;
|
||||
|
||||
if (!mvm->fw_error_rxf_len)
|
||||
return;
|
||||
|
||||
mvm->fw_error_rxf = kzalloc(mvm->fw_error_rxf_len, GFP_ATOMIC);
|
||||
if (!mvm->fw_error_rxf) {
|
||||
mvm->fw_error_rxf_len = 0;
|
||||
return;
|
||||
}
|
||||
|
||||
if (!iwl_trans_grab_nic_access(mvm->trans, false, &flags)) {
|
||||
kfree(mvm->fw_error_rxf);
|
||||
mvm->fw_error_rxf = NULL;
|
||||
mvm->fw_error_rxf_len = 0;
|
||||
return;
|
||||
}
|
||||
|
||||
for (i = 0; i < (mvm->fw_error_rxf_len / sizeof(u32)); i++) {
|
||||
iwl_trans_write_prph(mvm->trans, RXF_LD_FENCE_OFFSET_ADDR,
|
||||
i * sizeof(u32));
|
||||
mvm->fw_error_rxf[i] =
|
||||
iwl_trans_read_prph(mvm->trans, RXF_FIFO_RD_FENCE_ADDR);
|
||||
}
|
||||
iwl_trans_release_nic_access(mvm->trans, &flags);
|
||||
}
|
||||
|
||||
/**
|
||||
* iwl_mvm_send_lq_cmd() - Send link quality command
|
||||
* @init: This command is sent as part of station initialization right
|
||||
|
@ -102,7 +102,7 @@ struct iwl_rxq {
|
||||
u32 write_actual;
|
||||
struct list_head rx_free;
|
||||
struct list_head rx_used;
|
||||
int need_update;
|
||||
bool need_update;
|
||||
struct iwl_rb_status *rb_stts;
|
||||
dma_addr_t rb_stts_dma;
|
||||
spinlock_t lock;
|
||||
@ -231,7 +231,7 @@ struct iwl_txq {
|
||||
spinlock_t lock;
|
||||
struct timer_list stuck_timer;
|
||||
struct iwl_trans_pcie *trans_pcie;
|
||||
u8 need_update;
|
||||
bool need_update;
|
||||
u8 active;
|
||||
bool ampdu;
|
||||
};
|
||||
@ -270,6 +270,9 @@ struct iwl_trans_pcie {
|
||||
struct iwl_trans *trans;
|
||||
struct iwl_drv *drv;
|
||||
|
||||
struct net_device napi_dev;
|
||||
struct napi_struct napi;
|
||||
|
||||
/* INT ICT Table */
|
||||
__le32 *ict_tbl;
|
||||
dma_addr_t ict_tbl_dma;
|
||||
@ -362,7 +365,7 @@ void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, int fifo,
|
||||
void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int queue);
|
||||
int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
|
||||
struct iwl_device_cmd *dev_cmd, int txq_id);
|
||||
void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans *trans, struct iwl_txq *txq);
|
||||
void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans);
|
||||
int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
|
||||
void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
|
||||
struct iwl_rx_cmd_buffer *rxb, int handler_status);
|
||||
|
@ -145,15 +145,13 @@ int iwl_pcie_rx_stop(struct iwl_trans *trans)
|
||||
/*
|
||||
* iwl_pcie_rxq_inc_wr_ptr - Update the write pointer for the RX queue
|
||||
*/
|
||||
static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans,
|
||||
struct iwl_rxq *rxq)
|
||||
static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans)
|
||||
{
|
||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
struct iwl_rxq *rxq = &trans_pcie->rxq;
|
||||
u32 reg;
|
||||
|
||||
spin_lock(&rxq->lock);
|
||||
|
||||
if (rxq->need_update == 0)
|
||||
goto exit_unlock;
|
||||
lockdep_assert_held(&rxq->lock);
|
||||
|
||||
/*
|
||||
* explicitly wake up the NIC if:
|
||||
@ -169,13 +167,27 @@ static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans,
|
||||
reg);
|
||||
iwl_set_bit(trans, CSR_GP_CNTRL,
|
||||
CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
|
||||
goto exit_unlock;
|
||||
rxq->need_update = true;
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
rxq->write_actual = round_down(rxq->write, 8);
|
||||
iwl_write32(trans, FH_RSCSR_CHNL0_WPTR, rxq->write_actual);
|
||||
rxq->need_update = 0;
|
||||
}
|
||||
|
||||
static void iwl_pcie_rxq_check_wrptr(struct iwl_trans *trans)
|
||||
{
|
||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
struct iwl_rxq *rxq = &trans_pcie->rxq;
|
||||
|
||||
spin_lock(&rxq->lock);
|
||||
|
||||
if (!rxq->need_update)
|
||||
goto exit_unlock;
|
||||
|
||||
iwl_pcie_rxq_inc_wr_ptr(trans);
|
||||
rxq->need_update = false;
|
||||
|
||||
exit_unlock:
|
||||
spin_unlock(&rxq->lock);
|
||||
@ -236,9 +248,8 @@ static void iwl_pcie_rxq_restock(struct iwl_trans *trans)
|
||||
* Increment device's write pointer in multiples of 8. */
|
||||
if (rxq->write_actual != (rxq->write & ~0x7)) {
|
||||
spin_lock(&rxq->lock);
|
||||
rxq->need_update = 1;
|
||||
iwl_pcie_rxq_inc_wr_ptr(trans);
|
||||
spin_unlock(&rxq->lock);
|
||||
iwl_pcie_rxq_inc_wr_ptr(trans, rxq);
|
||||
}
|
||||
}
|
||||
|
||||
@ -362,20 +373,9 @@ static void iwl_pcie_rxq_free_rbs(struct iwl_trans *trans)
|
||||
* Also restock the Rx queue via iwl_pcie_rxq_restock.
|
||||
* This is called as a scheduled work item (except for during initialization)
|
||||
*/
|
||||
static void iwl_pcie_rx_replenish(struct iwl_trans *trans)
|
||||
static void iwl_pcie_rx_replenish(struct iwl_trans *trans, gfp_t gfp)
|
||||
{
|
||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
|
||||
iwl_pcie_rxq_alloc_rbs(trans, GFP_KERNEL);
|
||||
|
||||
spin_lock(&trans_pcie->irq_lock);
|
||||
iwl_pcie_rxq_restock(trans);
|
||||
spin_unlock(&trans_pcie->irq_lock);
|
||||
}
|
||||
|
||||
static void iwl_pcie_rx_replenish_now(struct iwl_trans *trans)
|
||||
{
|
||||
iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC);
|
||||
iwl_pcie_rxq_alloc_rbs(trans, gfp);
|
||||
|
||||
iwl_pcie_rxq_restock(trans);
|
||||
}
|
||||
@ -385,7 +385,7 @@ static void iwl_pcie_rx_replenish_work(struct work_struct *data)
|
||||
struct iwl_trans_pcie *trans_pcie =
|
||||
container_of(data, struct iwl_trans_pcie, rx_replenish);
|
||||
|
||||
iwl_pcie_rx_replenish(trans_pcie->trans);
|
||||
iwl_pcie_rx_replenish(trans_pcie->trans, GFP_KERNEL);
|
||||
}
|
||||
|
||||
static int iwl_pcie_rx_alloc(struct iwl_trans *trans)
|
||||
@ -521,14 +521,13 @@ int iwl_pcie_rx_init(struct iwl_trans *trans)
|
||||
memset(rxq->rb_stts, 0, sizeof(*rxq->rb_stts));
|
||||
spin_unlock(&rxq->lock);
|
||||
|
||||
iwl_pcie_rx_replenish(trans);
|
||||
iwl_pcie_rx_replenish(trans, GFP_KERNEL);
|
||||
|
||||
iwl_pcie_rx_hw_init(trans, rxq);
|
||||
|
||||
spin_lock(&trans_pcie->irq_lock);
|
||||
rxq->need_update = 1;
|
||||
iwl_pcie_rxq_inc_wr_ptr(trans, rxq);
|
||||
spin_unlock(&trans_pcie->irq_lock);
|
||||
spin_lock(&rxq->lock);
|
||||
iwl_pcie_rxq_inc_wr_ptr(trans);
|
||||
spin_unlock(&rxq->lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -673,7 +672,6 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
|
||||
/* Reuse the page if possible. For notification packets and
|
||||
* SKBs that fail to Rx correctly, add them back into the
|
||||
* rx_free list for reuse later. */
|
||||
spin_lock(&rxq->lock);
|
||||
if (rxb->page != NULL) {
|
||||
rxb->page_dma =
|
||||
dma_map_page(trans->dev, rxb->page, 0,
|
||||
@ -694,7 +692,6 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
|
||||
}
|
||||
} else
|
||||
list_add_tail(&rxb->list, &rxq->rx_used);
|
||||
spin_unlock(&rxq->lock);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -709,6 +706,8 @@ static void iwl_pcie_rx_handle(struct iwl_trans *trans)
|
||||
u32 count = 8;
|
||||
int total_empty;
|
||||
|
||||
restart:
|
||||
spin_lock(&rxq->lock);
|
||||
/* uCode's read index (stored in shared DRAM) indicates the last Rx
|
||||
* buffer that the driver may process (last buffer filled by ucode). */
|
||||
r = le16_to_cpu(ACCESS_ONCE(rxq->rb_stts->closed_rb_num)) & 0x0FFF;
|
||||
@ -743,18 +742,25 @@ static void iwl_pcie_rx_handle(struct iwl_trans *trans)
|
||||
count++;
|
||||
if (count >= 8) {
|
||||
rxq->read = i;
|
||||
iwl_pcie_rx_replenish_now(trans);
|
||||
spin_unlock(&rxq->lock);
|
||||
iwl_pcie_rx_replenish(trans, GFP_ATOMIC);
|
||||
count = 0;
|
||||
goto restart;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* Backtrack one entry */
|
||||
rxq->read = i;
|
||||
spin_unlock(&rxq->lock);
|
||||
|
||||
if (fill_rx)
|
||||
iwl_pcie_rx_replenish_now(trans);
|
||||
iwl_pcie_rx_replenish(trans, GFP_ATOMIC);
|
||||
else
|
||||
iwl_pcie_rxq_restock(trans);
|
||||
|
||||
if (trans_pcie->napi.poll)
|
||||
napi_gro_flush(&trans_pcie->napi, false);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -876,7 +882,6 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
|
||||
struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
|
||||
u32 inta = 0;
|
||||
u32 handled = 0;
|
||||
u32 i;
|
||||
|
||||
lock_map_acquire(&trans->sync_cmd_lockdep_map);
|
||||
|
||||
@ -1028,9 +1033,8 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
|
||||
/* uCode wakes up after power-down sleep */
|
||||
if (inta & CSR_INT_BIT_WAKEUP) {
|
||||
IWL_DEBUG_ISR(trans, "Wakeup interrupt\n");
|
||||
iwl_pcie_rxq_inc_wr_ptr(trans, &trans_pcie->rxq);
|
||||
for (i = 0; i < trans->cfg->base_params->num_of_queues; i++)
|
||||
iwl_pcie_txq_inc_wr_ptr(trans, &trans_pcie->txq[i]);
|
||||
iwl_pcie_rxq_check_wrptr(trans);
|
||||
iwl_pcie_txq_check_wrptrs(trans);
|
||||
|
||||
isr_stats->wakeup++;
|
||||
|
||||
@ -1068,8 +1072,6 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
|
||||
iwl_write8(trans, CSR_INT_PERIODIC_REG,
|
||||
CSR_INT_PERIODIC_DIS);
|
||||
|
||||
iwl_pcie_rx_handle(trans);
|
||||
|
||||
/*
|
||||
* Enable periodic interrupt in 8 msec only if we received
|
||||
* real RX interrupt (instead of just periodic int), to catch
|
||||
@ -1082,6 +1084,10 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
|
||||
CSR_INT_PERIODIC_ENA);
|
||||
|
||||
isr_stats->rx++;
|
||||
|
||||
local_bh_disable();
|
||||
iwl_pcie_rx_handle(trans);
|
||||
local_bh_enable();
|
||||
}
|
||||
|
||||
/* This "Tx" DMA channel is used only for loading uCode */
|
||||
|
@ -103,7 +103,6 @@ static void iwl_pcie_set_pwr(struct iwl_trans *trans, bool vaux)
|
||||
|
||||
/* PCI registers */
|
||||
#define PCI_CFG_RETRY_TIMEOUT 0x041
|
||||
#define CPU1_CPU2_SEPARATOR_SECTION 0xFFFFCCCC
|
||||
|
||||
static void iwl_pcie_apm_config(struct iwl_trans *trans)
|
||||
{
|
||||
@ -1053,6 +1052,12 @@ static void iwl_trans_pcie_write_prph(struct iwl_trans *trans, u32 addr,
|
||||
iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_WDAT, val);
|
||||
}
|
||||
|
||||
static int iwl_pcie_dummy_napi_poll(struct napi_struct *napi, int budget)
|
||||
{
|
||||
WARN_ON(1);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void iwl_trans_pcie_configure(struct iwl_trans *trans,
|
||||
const struct iwl_trans_config *trans_cfg)
|
||||
{
|
||||
@ -1079,6 +1084,18 @@ static void iwl_trans_pcie_configure(struct iwl_trans *trans,
|
||||
|
||||
trans_pcie->command_names = trans_cfg->command_names;
|
||||
trans_pcie->bc_table_dword = trans_cfg->bc_table_dword;
|
||||
|
||||
/* Initialize NAPI here - it should be before registering to mac80211
|
||||
* in the opmode but after the HW struct is allocated.
|
||||
* As this function may be called again in some corner cases don't
|
||||
* do anything if NAPI was already initialized.
|
||||
*/
|
||||
if (!trans_pcie->napi.poll && trans->op_mode->ops->napi_add) {
|
||||
init_dummy_netdev(&trans_pcie->napi_dev);
|
||||
iwl_op_mode_napi_add(trans->op_mode, &trans_pcie->napi,
|
||||
&trans_pcie->napi_dev,
|
||||
iwl_pcie_dummy_napi_poll, 64);
|
||||
}
|
||||
}
|
||||
|
||||
void iwl_trans_pcie_free(struct iwl_trans *trans)
|
||||
@ -1099,6 +1116,9 @@ void iwl_trans_pcie_free(struct iwl_trans *trans)
|
||||
pci_disable_device(trans_pcie->pci_dev);
|
||||
kmem_cache_destroy(trans->dev_cmd_pool);
|
||||
|
||||
if (trans_pcie->napi.poll)
|
||||
netif_napi_del(&trans_pcie->napi);
|
||||
|
||||
kfree(trans);
|
||||
}
|
||||
|
||||
@ -1237,7 +1257,7 @@ static int iwl_trans_pcie_write_mem(struct iwl_trans *trans, u32 addr,
|
||||
|
||||
#define IWL_FLUSH_WAIT_MS 2000
|
||||
|
||||
static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans)
|
||||
static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, u32 txq_bm)
|
||||
{
|
||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
struct iwl_txq *txq;
|
||||
@ -1250,13 +1270,31 @@ static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans)
|
||||
|
||||
/* waiting for all the tx frames complete might take a while */
|
||||
for (cnt = 0; cnt < trans->cfg->base_params->num_of_queues; cnt++) {
|
||||
u8 wr_ptr;
|
||||
|
||||
if (cnt == trans_pcie->cmd_queue)
|
||||
continue;
|
||||
if (!test_bit(cnt, trans_pcie->queue_used))
|
||||
continue;
|
||||
if (!(BIT(cnt) & txq_bm))
|
||||
continue;
|
||||
|
||||
IWL_DEBUG_TX_QUEUES(trans, "Emptying queue %d...\n", cnt);
|
||||
txq = &trans_pcie->txq[cnt];
|
||||
q = &txq->q;
|
||||
while (q->read_ptr != q->write_ptr && !time_after(jiffies,
|
||||
now + msecs_to_jiffies(IWL_FLUSH_WAIT_MS)))
|
||||
wr_ptr = ACCESS_ONCE(q->write_ptr);
|
||||
|
||||
while (q->read_ptr != ACCESS_ONCE(q->write_ptr) &&
|
||||
!time_after(jiffies,
|
||||
now + msecs_to_jiffies(IWL_FLUSH_WAIT_MS))) {
|
||||
u8 write_ptr = ACCESS_ONCE(q->write_ptr);
|
||||
|
||||
if (WARN_ONCE(wr_ptr != write_ptr,
|
||||
"WR pointer moved while flushing %d -> %d\n",
|
||||
wr_ptr, write_ptr))
|
||||
return -ETIMEDOUT;
|
||||
msleep(1);
|
||||
}
|
||||
|
||||
if (q->read_ptr != q->write_ptr) {
|
||||
IWL_ERR(trans,
|
||||
@ -1264,6 +1302,7 @@ static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans)
|
||||
ret = -ETIMEDOUT;
|
||||
break;
|
||||
}
|
||||
IWL_DEBUG_TX_QUEUES(trans, "Queue %d is now empty.\n", cnt);
|
||||
}
|
||||
|
||||
if (!ret)
|
||||
|
@ -287,14 +287,14 @@ static void iwl_pcie_txq_inval_byte_cnt_tbl(struct iwl_trans *trans,
|
||||
/*
|
||||
* iwl_pcie_txq_inc_wr_ptr - Send new write index to hardware
|
||||
*/
|
||||
void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans *trans, struct iwl_txq *txq)
|
||||
static void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans *trans,
|
||||
struct iwl_txq *txq)
|
||||
{
|
||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
u32 reg = 0;
|
||||
int txq_id = txq->q.id;
|
||||
|
||||
if (txq->need_update == 0)
|
||||
return;
|
||||
lockdep_assert_held(&txq->lock);
|
||||
|
||||
/*
|
||||
* explicitly wake up the NIC if:
|
||||
@ -317,6 +317,7 @@ void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans *trans, struct iwl_txq *txq)
|
||||
txq_id, reg);
|
||||
iwl_set_bit(trans, CSR_GP_CNTRL,
|
||||
CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
|
||||
txq->need_update = true;
|
||||
return;
|
||||
}
|
||||
}
|
||||
@ -327,8 +328,23 @@ void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans *trans, struct iwl_txq *txq)
|
||||
*/
|
||||
IWL_DEBUG_TX(trans, "Q:%d WR: 0x%x\n", txq_id, txq->q.write_ptr);
|
||||
iwl_write32(trans, HBUS_TARG_WRPTR, txq->q.write_ptr | (txq_id << 8));
|
||||
}
|
||||
|
||||
txq->need_update = 0;
|
||||
void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans)
|
||||
{
|
||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
int i;
|
||||
|
||||
for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) {
|
||||
struct iwl_txq *txq = &trans_pcie->txq[i];
|
||||
|
||||
spin_lock(&txq->lock);
|
||||
if (trans_pcie->txq[i].need_update) {
|
||||
iwl_pcie_txq_inc_wr_ptr(trans, txq);
|
||||
trans_pcie->txq[i].need_update = false;
|
||||
}
|
||||
spin_unlock(&txq->lock);
|
||||
}
|
||||
}
|
||||
|
||||
static inline dma_addr_t iwl_pcie_tfd_tb_get_addr(struct iwl_tfd *tfd, u8 idx)
|
||||
@ -542,7 +558,7 @@ static int iwl_pcie_txq_init(struct iwl_trans *trans, struct iwl_txq *txq,
|
||||
{
|
||||
int ret;
|
||||
|
||||
txq->need_update = 0;
|
||||
txq->need_update = false;
|
||||
|
||||
/* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise
|
||||
* iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */
|
||||
@ -680,7 +696,8 @@ void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr)
|
||||
/* The chain extension of the SCD doesn't work well. This feature is
|
||||
* enabled by default by the HW, so we need to disable it manually.
|
||||
*/
|
||||
iwl_write_prph(trans, SCD_CHAINEXT_EN, 0);
|
||||
if (trans->cfg->base_params->scd_chain_ext_wa)
|
||||
iwl_write_prph(trans, SCD_CHAINEXT_EN, 0);
|
||||
|
||||
iwl_trans_ac_txq_enable(trans, trans_pcie->cmd_queue,
|
||||
trans_pcie->cmd_fifo);
|
||||
@ -1028,7 +1045,8 @@ static void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx)
|
||||
}
|
||||
}
|
||||
|
||||
if (q->read_ptr == q->write_ptr) {
|
||||
if (trans->cfg->base_params->apmg_wake_up_wa &&
|
||||
q->read_ptr == q->write_ptr) {
|
||||
spin_lock_irqsave(&trans_pcie->reg_lock, flags);
|
||||
WARN_ON(!trans_pcie->cmd_in_flight);
|
||||
trans_pcie->cmd_in_flight = false;
|
||||
@ -1392,8 +1410,6 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
|
||||
kfree(txq->entries[idx].free_buf);
|
||||
txq->entries[idx].free_buf = dup_buf;
|
||||
|
||||
txq->need_update = 1;
|
||||
|
||||
trace_iwlwifi_dev_hcmd(trans->dev, cmd, cmd_size, &out_cmd->hdr);
|
||||
|
||||
/* start timer if queue currently empty */
|
||||
@ -1405,9 +1421,11 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
|
||||
/*
|
||||
* wake up the NIC to make sure that the firmware will see the host
|
||||
* command - we will let the NIC sleep once all the host commands
|
||||
* returned.
|
||||
* returned. This needs to be done only on NICs that have
|
||||
* apmg_wake_up_wa set.
|
||||
*/
|
||||
if (!trans_pcie->cmd_in_flight) {
|
||||
if (trans->cfg->base_params->apmg_wake_up_wa &&
|
||||
!trans_pcie->cmd_in_flight) {
|
||||
trans_pcie->cmd_in_flight = true;
|
||||
__iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL,
|
||||
CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
|
||||
@ -1661,7 +1679,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
|
||||
dma_addr_t tb0_phys, tb1_phys, scratch_phys;
|
||||
void *tb1_addr;
|
||||
u16 len, tb1_len, tb2_len;
|
||||
u8 wait_write_ptr = 0;
|
||||
bool wait_write_ptr;
|
||||
__le16 fc = hdr->frame_control;
|
||||
u8 hdr_len = ieee80211_hdrlen(fc);
|
||||
u16 wifi_seq;
|
||||
@ -1762,12 +1780,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
|
||||
trace_iwlwifi_dev_tx_data(trans->dev, skb,
|
||||
skb->data + hdr_len, tb2_len);
|
||||
|
||||
if (!ieee80211_has_morefrags(fc)) {
|
||||
txq->need_update = 1;
|
||||
} else {
|
||||
wait_write_ptr = 1;
|
||||
txq->need_update = 0;
|
||||
}
|
||||
wait_write_ptr = ieee80211_has_morefrags(fc);
|
||||
|
||||
/* start timer if queue currently empty */
|
||||
if (txq->need_update && q->read_ptr == q->write_ptr &&
|
||||
@ -1776,21 +1789,18 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
|
||||
|
||||
/* Tell device the write index *just past* this latest filled TFD */
|
||||
q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
|
||||
iwl_pcie_txq_inc_wr_ptr(trans, txq);
|
||||
if (!wait_write_ptr)
|
||||
iwl_pcie_txq_inc_wr_ptr(trans, txq);
|
||||
|
||||
/*
|
||||
* At this point the frame is "transmitted" successfully
|
||||
* and we will get a TX status notification eventually,
|
||||
* regardless of the value of ret. "ret" only indicates
|
||||
* whether or not we should update the write pointer.
|
||||
* and we will get a TX status notification eventually.
|
||||
*/
|
||||
if (iwl_queue_space(q) < q->high_mark) {
|
||||
if (wait_write_ptr) {
|
||||
txq->need_update = 1;
|
||||
if (wait_write_ptr)
|
||||
iwl_pcie_txq_inc_wr_ptr(trans, txq);
|
||||
} else {
|
||||
else
|
||||
iwl_stop_queue(trans, txq);
|
||||
}
|
||||
}
|
||||
spin_unlock(&txq->lock);
|
||||
return 0;
|
||||
|
Loading…
Reference in New Issue
Block a user