mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-13 23:34:05 +08:00
ath10k:
* enable channel 144 on 5 GHz * enable Adaptive Noise Immunity (ANI) by default * add Wake on Wireless LAN (WOW) patterns support * add basic Tunneled Direct Link Setup (TDLS) support * add multi-channel support for QCA6174 * enable IBSS RSN support * enable Bluetooth Coexistance whenever firmware supports it * add more versatile way to set bitrates used by the firmware ath9k: * spectral scan: add support for multiple FFT frames per report iwlwifi: * major rework of the scan code (Luca) * some work on the thermal code (Chaya Rachel) * some work on the firwmare debugging infrastructure brcmfmac: * SDIO suspend and resume fixes * wiphy band info and changes in regulatory settings * add support for BCM4324 SDIO and BCM4358 PCIe * enable support of PCIe devices on router platforms (Hante) -----BEGIN PGP SIGNATURE----- Version: GnuPG v1.4.11 (GNU/Linux) iQEcBAABAgAGBQJVXbP3AAoJEG4XJFUm622b5KoH/1qlTHsKcyvdxlhQOgYNGCXA HNMwcxtwFyRYHFeVTGOQp2BVknEoqWTwGv1m4FQ1pBSSwuUvAyw4BHNSRat/zaNc wLnZgUYKH5VHeoE/cpe/Asowau+u8hru1adPsVSjudTXMinKrNaDUfjSs2U+UR0+ BaC3PtsANk7wH82+bZq3qXYjcaZITObDe3WBmMNMG0nTimS6pScgnTUnfHch+CEA 0sTOlZF+QTGiH/c5tw2SAoRft4OG+oTnWYQ+vEEQsVev7Yegasa/kg4NdDVdjBNk 9VH9aDlQfGgxodCoeJuQCDzUZL8ixnvYTLeUTxqypzx9Cw0TsLDwoMQA+Ux3G8w= =JSya -----END PGP SIGNATURE----- Merge tag 'wireless-drivers-next-for-davem-2015-05-21' of git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/wireless-drivers-next Kalle Valo says: ==================== ath10k: * enable channel 144 on 5 GHz * enable Adaptive Noise Immunity (ANI) by default * add Wake on Wireless LAN (WOW) patterns support * add basic Tunneled Direct Link Setup (TDLS) support * add multi-channel support for QCA6174 * enable IBSS RSN support * enable Bluetooth Coexistance whenever firmware supports it * add more versatile way to set bitrates used by the firmware ath9k: * spectral scan: add support for multiple FFT frames per report iwlwifi: * major rework of the scan code (Luca) * some work on the thermal code (Chaya Rachel) * some work on the firwmare debugging infrastructure brcmfmac: * SDIO suspend and resume fixes * wiphy band info and changes in regulatory settings * add support for BCM4324 SDIO and BCM4358 PCIe * enable support of PCIe devices on router platforms (Hante) ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
d98c3edcbb
@ -226,6 +226,7 @@ int bcma_gpio_init(struct bcma_drv_cc *cc)
|
||||
chip->of_node = cc->core->dev.of_node;
|
||||
#endif
|
||||
switch (bus->chipinfo.id) {
|
||||
case BCMA_CHIP_ID_BCM4707:
|
||||
case BCMA_CHIP_ID_BCM5357:
|
||||
case BCMA_CHIP_ID_BCM53572:
|
||||
chip->ngpio = 32;
|
||||
@ -235,16 +236,17 @@ int bcma_gpio_init(struct bcma_drv_cc *cc)
|
||||
}
|
||||
|
||||
/*
|
||||
* On MIPS we register GPIO devices (LEDs, buttons) using absolute GPIO
|
||||
* pin numbers. We don't have Device Tree there and we can't really use
|
||||
* relative (per chip) numbers.
|
||||
* So let's use predictable base for BCM47XX and "random" for all other.
|
||||
* Register SoC GPIO devices with absolute GPIO pin base.
|
||||
* On MIPS, we don't have Device Tree and we can't use relative (per chip)
|
||||
* GPIO numbers.
|
||||
* On some ARM devices, user space may want to access some system GPIO
|
||||
* pins directly, which is easier to do with a predictable GPIO base.
|
||||
*/
|
||||
#if IS_BUILTIN(CONFIG_BCM47XX)
|
||||
chip->base = bus->num * BCMA_GPIO_MAX_PINS;
|
||||
#else
|
||||
chip->base = -1;
|
||||
#endif
|
||||
if (IS_BUILTIN(CONFIG_BCM47XX) ||
|
||||
cc->core->bus->hosttype == BCMA_HOSTTYPE_SOC)
|
||||
chip->base = bus->num * BCMA_GPIO_MAX_PINS;
|
||||
else
|
||||
chip->base = -1;
|
||||
|
||||
err = bcma_gpio_irq_domain_init(cc);
|
||||
if (err)
|
||||
|
@ -251,6 +251,7 @@ void ath_printk(const char *level, const struct ath_common *common,
|
||||
* @ATH_DBG_DFS: radar datection
|
||||
* @ATH_DBG_WOW: Wake on Wireless
|
||||
* @ATH_DBG_DYNACK: dynack handling
|
||||
* @ATH_DBG_SPECTRAL_SCAN: FFT spectral scan
|
||||
* @ATH_DBG_ANY: enable all debugging
|
||||
*
|
||||
* The debug level is used to control the amount and type of debugging output
|
||||
@ -280,6 +281,7 @@ enum ATH_DEBUG {
|
||||
ATH_DBG_WOW = 0x00020000,
|
||||
ATH_DBG_CHAN_CTX = 0x00040000,
|
||||
ATH_DBG_DYNACK = 0x00080000,
|
||||
ATH_DBG_SPECTRAL_SCAN = 0x00100000,
|
||||
ATH_DBG_ANY = 0xffffffff
|
||||
};
|
||||
|
||||
|
@ -10,13 +10,15 @@ ath10k_core-y += mac.o \
|
||||
wmi.o \
|
||||
wmi-tlv.o \
|
||||
bmi.o \
|
||||
hw.o
|
||||
hw.o \
|
||||
p2p.o
|
||||
|
||||
ath10k_core-$(CONFIG_ATH10K_DEBUGFS) += spectral.o
|
||||
ath10k_core-$(CONFIG_NL80211_TESTMODE) += testmode.o
|
||||
ath10k_core-$(CONFIG_ATH10K_TRACING) += trace.o
|
||||
ath10k_core-$(CONFIG_THERMAL) += thermal.o
|
||||
ath10k_core-$(CONFIG_MAC80211_DEBUGFS) += debugfs_sta.o
|
||||
ath10k_core-$(CONFIG_PM) += wow.o
|
||||
|
||||
obj-$(CONFIG_ATH10K_PCI) += ath10k_pci.o
|
||||
ath10k_pci-y += pci.o \
|
||||
|
@ -482,6 +482,71 @@ static int ath10k_fetch_cal_file(struct ath10k *ar)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ath10k_core_fetch_spec_board_file(struct ath10k *ar)
|
||||
{
|
||||
char filename[100];
|
||||
|
||||
scnprintf(filename, sizeof(filename), "board-%s-%s.bin",
|
||||
ath10k_bus_str(ar->hif.bus), ar->spec_board_id);
|
||||
|
||||
ar->board = ath10k_fetch_fw_file(ar, ar->hw_params.fw.dir, filename);
|
||||
if (IS_ERR(ar->board))
|
||||
return PTR_ERR(ar->board);
|
||||
|
||||
ar->board_data = ar->board->data;
|
||||
ar->board_len = ar->board->size;
|
||||
ar->spec_board_loaded = true;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ath10k_core_fetch_generic_board_file(struct ath10k *ar)
|
||||
{
|
||||
if (!ar->hw_params.fw.board) {
|
||||
ath10k_err(ar, "failed to find board file fw entry\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ar->board = ath10k_fetch_fw_file(ar,
|
||||
ar->hw_params.fw.dir,
|
||||
ar->hw_params.fw.board);
|
||||
if (IS_ERR(ar->board))
|
||||
return PTR_ERR(ar->board);
|
||||
|
||||
ar->board_data = ar->board->data;
|
||||
ar->board_len = ar->board->size;
|
||||
ar->spec_board_loaded = false;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ath10k_core_fetch_board_file(struct ath10k *ar)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (strlen(ar->spec_board_id) > 0) {
|
||||
ret = ath10k_core_fetch_spec_board_file(ar);
|
||||
if (ret) {
|
||||
ath10k_info(ar, "failed to load spec board file, falling back to generic: %d\n",
|
||||
ret);
|
||||
goto generic;
|
||||
}
|
||||
|
||||
ath10k_dbg(ar, ATH10K_DBG_BOOT, "found specific board file for %s\n",
|
||||
ar->spec_board_id);
|
||||
return 0;
|
||||
}
|
||||
|
||||
generic:
|
||||
ret = ath10k_core_fetch_generic_board_file(ar);
|
||||
if (ret) {
|
||||
ath10k_err(ar, "failed to fetch generic board data: %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ath10k_core_fetch_firmware_api_1(struct ath10k *ar)
|
||||
{
|
||||
int ret = 0;
|
||||
@ -491,23 +556,6 @@ static int ath10k_core_fetch_firmware_api_1(struct ath10k *ar)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (ar->hw_params.fw.board == NULL) {
|
||||
ath10k_err(ar, "board data file not defined");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ar->board = ath10k_fetch_fw_file(ar,
|
||||
ar->hw_params.fw.dir,
|
||||
ar->hw_params.fw.board);
|
||||
if (IS_ERR(ar->board)) {
|
||||
ret = PTR_ERR(ar->board);
|
||||
ath10k_err(ar, "could not fetch board data (%d)\n", ret);
|
||||
goto err;
|
||||
}
|
||||
|
||||
ar->board_data = ar->board->data;
|
||||
ar->board_len = ar->board->size;
|
||||
|
||||
ar->firmware = ath10k_fetch_fw_file(ar,
|
||||
ar->hw_params.fw.dir,
|
||||
ar->hw_params.fw.fw);
|
||||
@ -675,6 +723,17 @@ static int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name)
|
||||
ath10k_dbg(ar, ATH10K_DBG_BOOT, "found fw ie wmi op version %d\n",
|
||||
ar->wmi.op_version);
|
||||
break;
|
||||
case ATH10K_FW_IE_HTT_OP_VERSION:
|
||||
if (ie_len != sizeof(u32))
|
||||
break;
|
||||
|
||||
version = (__le32 *)data;
|
||||
|
||||
ar->htt.op_version = le32_to_cpup(version);
|
||||
|
||||
ath10k_dbg(ar, ATH10K_DBG_BOOT, "found fw ie htt op version %d\n",
|
||||
ar->htt.op_version);
|
||||
break;
|
||||
default:
|
||||
ath10k_warn(ar, "Unknown FW IE: %u\n",
|
||||
le32_to_cpu(hdr->id));
|
||||
@ -695,27 +754,6 @@ static int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name)
|
||||
goto err;
|
||||
}
|
||||
|
||||
/* now fetch the board file */
|
||||
if (ar->hw_params.fw.board == NULL) {
|
||||
ath10k_err(ar, "board data file not defined");
|
||||
ret = -EINVAL;
|
||||
goto err;
|
||||
}
|
||||
|
||||
ar->board = ath10k_fetch_fw_file(ar,
|
||||
ar->hw_params.fw.dir,
|
||||
ar->hw_params.fw.board);
|
||||
if (IS_ERR(ar->board)) {
|
||||
ret = PTR_ERR(ar->board);
|
||||
ath10k_err(ar, "could not fetch board data '%s/%s' (%d)\n",
|
||||
ar->hw_params.fw.dir, ar->hw_params.fw.board,
|
||||
ret);
|
||||
goto err;
|
||||
}
|
||||
|
||||
ar->board_data = ar->board->data;
|
||||
ar->board_len = ar->board->size;
|
||||
|
||||
return 0;
|
||||
|
||||
err:
|
||||
@ -730,6 +768,19 @@ static int ath10k_core_fetch_firmware_files(struct ath10k *ar)
|
||||
/* calibration file is optional, don't check for any errors */
|
||||
ath10k_fetch_cal_file(ar);
|
||||
|
||||
ret = ath10k_core_fetch_board_file(ar);
|
||||
if (ret) {
|
||||
ath10k_err(ar, "failed to fetch board file: %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ar->fw_api = 5;
|
||||
ath10k_dbg(ar, ATH10K_DBG_BOOT, "trying fw api %d\n", ar->fw_api);
|
||||
|
||||
ret = ath10k_core_fetch_firmware_api_n(ar, ATH10K_FW_API5_FILE);
|
||||
if (ret == 0)
|
||||
goto success;
|
||||
|
||||
ar->fw_api = 4;
|
||||
ath10k_dbg(ar, ATH10K_DBG_BOOT, "trying fw api %d\n", ar->fw_api);
|
||||
|
||||
@ -958,6 +1009,8 @@ static int ath10k_core_init_firmware_features(struct ath10k *ar)
|
||||
ar->max_num_stations = TARGET_NUM_STATIONS;
|
||||
ar->max_num_vdevs = TARGET_NUM_VDEVS;
|
||||
ar->htt.max_num_pending_tx = TARGET_NUM_MSDU_DESC;
|
||||
ar->fw_stats_req_mask = WMI_STAT_PDEV | WMI_STAT_VDEV |
|
||||
WMI_STAT_PEER;
|
||||
break;
|
||||
case ATH10K_FW_WMI_OP_VERSION_10_1:
|
||||
case ATH10K_FW_WMI_OP_VERSION_10_2:
|
||||
@ -966,12 +1019,17 @@ static int ath10k_core_init_firmware_features(struct ath10k *ar)
|
||||
ar->max_num_stations = TARGET_10X_NUM_STATIONS;
|
||||
ar->max_num_vdevs = TARGET_10X_NUM_VDEVS;
|
||||
ar->htt.max_num_pending_tx = TARGET_10X_NUM_MSDU_DESC;
|
||||
ar->fw_stats_req_mask = WMI_STAT_PEER;
|
||||
break;
|
||||
case ATH10K_FW_WMI_OP_VERSION_TLV:
|
||||
ar->max_num_peers = TARGET_TLV_NUM_PEERS;
|
||||
ar->max_num_stations = TARGET_TLV_NUM_STATIONS;
|
||||
ar->max_num_vdevs = TARGET_TLV_NUM_VDEVS;
|
||||
ar->max_num_tdls_vdevs = TARGET_TLV_NUM_TDLS_VDEVS;
|
||||
ar->htt.max_num_pending_tx = TARGET_TLV_NUM_MSDU_DESC;
|
||||
ar->wow.max_num_patterns = TARGET_TLV_NUM_WOW_PATTERNS;
|
||||
ar->fw_stats_req_mask = WMI_STAT_PDEV | WMI_STAT_VDEV |
|
||||
WMI_STAT_PEER;
|
||||
break;
|
||||
case ATH10K_FW_WMI_OP_VERSION_UNSET:
|
||||
case ATH10K_FW_WMI_OP_VERSION_MAX:
|
||||
@ -979,6 +1037,29 @@ static int ath10k_core_init_firmware_features(struct ath10k *ar)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* Backwards compatibility for firmwares without
|
||||
* ATH10K_FW_IE_HTT_OP_VERSION.
|
||||
*/
|
||||
if (ar->htt.op_version == ATH10K_FW_HTT_OP_VERSION_UNSET) {
|
||||
switch (ar->wmi.op_version) {
|
||||
case ATH10K_FW_WMI_OP_VERSION_MAIN:
|
||||
ar->htt.op_version = ATH10K_FW_HTT_OP_VERSION_MAIN;
|
||||
break;
|
||||
case ATH10K_FW_WMI_OP_VERSION_10_1:
|
||||
case ATH10K_FW_WMI_OP_VERSION_10_2:
|
||||
case ATH10K_FW_WMI_OP_VERSION_10_2_4:
|
||||
ar->htt.op_version = ATH10K_FW_HTT_OP_VERSION_10_1;
|
||||
break;
|
||||
case ATH10K_FW_WMI_OP_VERSION_TLV:
|
||||
ar->htt.op_version = ATH10K_FW_HTT_OP_VERSION_TLV;
|
||||
break;
|
||||
case ATH10K_FW_WMI_OP_VERSION_UNSET:
|
||||
case ATH10K_FW_WMI_OP_VERSION_MAX:
|
||||
WARN_ON(1);
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1080,9 +1161,8 @@ int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode)
|
||||
|
||||
if (mode == ATH10K_FIRMWARE_MODE_NORMAL) {
|
||||
status = ath10k_wmi_wait_for_service_ready(ar);
|
||||
if (status <= 0) {
|
||||
if (status) {
|
||||
ath10k_warn(ar, "wmi service ready event not received");
|
||||
status = -ETIMEDOUT;
|
||||
goto err_hif_stop;
|
||||
}
|
||||
}
|
||||
@ -1098,9 +1178,8 @@ int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode)
|
||||
}
|
||||
|
||||
status = ath10k_wmi_wait_for_unified_ready(ar);
|
||||
if (status <= 0) {
|
||||
if (status) {
|
||||
ath10k_err(ar, "wmi unified ready event not received\n");
|
||||
status = -ETIMEDOUT;
|
||||
goto err_hif_stop;
|
||||
}
|
||||
|
||||
@ -1151,6 +1230,7 @@ EXPORT_SYMBOL(ath10k_core_start);
|
||||
int ath10k_wait_for_suspend(struct ath10k *ar, u32 suspend_opt)
|
||||
{
|
||||
int ret;
|
||||
unsigned long time_left;
|
||||
|
||||
reinit_completion(&ar->target_suspend);
|
||||
|
||||
@ -1160,9 +1240,9 @@ int ath10k_wait_for_suspend(struct ath10k *ar, u32 suspend_opt)
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = wait_for_completion_timeout(&ar->target_suspend, 1 * HZ);
|
||||
time_left = wait_for_completion_timeout(&ar->target_suspend, 1 * HZ);
|
||||
|
||||
if (ret == 0) {
|
||||
if (!time_left) {
|
||||
ath10k_warn(ar, "suspend timed out - target pause event never came\n");
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
@ -1386,6 +1466,7 @@ struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev,
|
||||
init_completion(&ar->scan.completed);
|
||||
init_completion(&ar->scan.on_channel);
|
||||
init_completion(&ar->target_suspend);
|
||||
init_completion(&ar->wow.wakeup_completed);
|
||||
|
||||
init_completion(&ar->install_key_done);
|
||||
init_completion(&ar->vdev_setup_done);
|
||||
|
@ -35,6 +35,7 @@
|
||||
#include "../dfs_pattern_detector.h"
|
||||
#include "spectral.h"
|
||||
#include "thermal.h"
|
||||
#include "wow.h"
|
||||
|
||||
#define MS(_v, _f) (((_v) & _f##_MASK) >> _f##_LSB)
|
||||
#define SM(_v, _f) (((_v) << _f##_LSB) & _f##_MASK)
|
||||
@ -43,15 +44,16 @@
|
||||
#define ATH10K_SCAN_ID 0
|
||||
#define WMI_READY_TIMEOUT (5 * HZ)
|
||||
#define ATH10K_FLUSH_TIMEOUT_HZ (5*HZ)
|
||||
#define ATH10K_NUM_CHANS 38
|
||||
#define ATH10K_CONNECTION_LOSS_HZ (3*HZ)
|
||||
#define ATH10K_NUM_CHANS 39
|
||||
|
||||
/* Antenna noise floor */
|
||||
#define ATH10K_DEFAULT_NOISE_FLOOR -95
|
||||
|
||||
#define ATH10K_MAX_NUM_MGMT_PENDING 128
|
||||
|
||||
/* number of failed packets */
|
||||
#define ATH10K_KICKOUT_THRESHOLD 50
|
||||
/* number of failed packets (20 packets with 16 sw reties each) */
|
||||
#define ATH10K_KICKOUT_THRESHOLD (20 * 16)
|
||||
|
||||
/*
|
||||
* Use insanely high numbers to make sure that the firmware implementation
|
||||
@ -82,6 +84,8 @@ struct ath10k_skb_cb {
|
||||
dma_addr_t paddr;
|
||||
u8 eid;
|
||||
u8 vdev_id;
|
||||
enum ath10k_hw_txrx_mode txmode;
|
||||
bool is_protected;
|
||||
|
||||
struct {
|
||||
u8 tid;
|
||||
@ -280,6 +284,15 @@ struct ath10k_sta {
|
||||
#endif
|
||||
};
|
||||
|
||||
struct ath10k_chanctx {
|
||||
/* Used to story copy of chanctx_conf to avoid inconsistencies. Ideally
|
||||
* mac80211 should allow some sort of explicit locking to guarantee
|
||||
* that the publicly available chanctx_conf can be accessed safely at
|
||||
* all times.
|
||||
*/
|
||||
struct ieee80211_chanctx_conf conf;
|
||||
};
|
||||
|
||||
#define ATH10K_VDEV_SETUP_TIMEOUT_HZ (5*HZ)
|
||||
|
||||
enum ath10k_beacon_state {
|
||||
@ -301,6 +314,7 @@ struct ath10k_vif {
|
||||
enum ath10k_beacon_state beacon_state;
|
||||
void *beacon_buf;
|
||||
dma_addr_t beacon_paddr;
|
||||
unsigned long tx_paused; /* arbitrary values defined by target */
|
||||
|
||||
struct ath10k *ar;
|
||||
struct ieee80211_vif *vif;
|
||||
@ -334,13 +348,13 @@ struct ath10k_vif {
|
||||
} ap;
|
||||
} u;
|
||||
|
||||
u8 fixed_rate;
|
||||
u8 fixed_nss;
|
||||
u8 force_sgi;
|
||||
bool use_cts_prot;
|
||||
int num_legacy_stations;
|
||||
int txpower;
|
||||
struct wmi_wmm_params_all_arg wmm_params;
|
||||
struct work_struct ap_csa_work;
|
||||
struct delayed_work connection_loss_work;
|
||||
struct cfg80211_bitrate_mask bitrate_mask;
|
||||
};
|
||||
|
||||
struct ath10k_vif_iter {
|
||||
@ -440,6 +454,12 @@ enum ath10k_fw_features {
|
||||
*/
|
||||
ATH10K_FW_FEATURE_MULTI_VIF_PS_SUPPORT = 5,
|
||||
|
||||
/* Some firmware revisions have an incomplete WoWLAN implementation
|
||||
* despite WMI service bit being advertised. This feature flag is used
|
||||
* to distinguish whether WoWLAN is really supported or not.
|
||||
*/
|
||||
ATH10K_FW_FEATURE_WOWLAN_SUPPORT = 6,
|
||||
|
||||
/* keep last */
|
||||
ATH10K_FW_FEATURE_COUNT,
|
||||
};
|
||||
@ -498,6 +518,11 @@ static inline const char *ath10k_scan_state_str(enum ath10k_scan_state state)
|
||||
return "unknown";
|
||||
}
|
||||
|
||||
enum ath10k_tx_pause_reason {
|
||||
ATH10K_TX_PAUSE_Q_FULL,
|
||||
ATH10K_TX_PAUSE_MAX,
|
||||
};
|
||||
|
||||
struct ath10k {
|
||||
struct ath_common ath_common;
|
||||
struct ieee80211_hw *hw;
|
||||
@ -511,12 +536,15 @@ struct ath10k {
|
||||
u32 fw_version_minor;
|
||||
u16 fw_version_release;
|
||||
u16 fw_version_build;
|
||||
u32 fw_stats_req_mask;
|
||||
u32 phy_capability;
|
||||
u32 hw_min_tx_power;
|
||||
u32 hw_max_tx_power;
|
||||
u32 ht_cap_info;
|
||||
u32 vht_cap_info;
|
||||
u32 num_rf_chains;
|
||||
/* protected by conf_mutex */
|
||||
bool ani_enabled;
|
||||
|
||||
DECLARE_BITMAP(fw_features, ATH10K_FW_FEATURE_COUNT);
|
||||
|
||||
@ -565,6 +593,9 @@ struct ath10k {
|
||||
|
||||
const struct firmware *cal_file;
|
||||
|
||||
char spec_board_id[100];
|
||||
bool spec_board_loaded;
|
||||
|
||||
int fw_api;
|
||||
enum ath10k_cal_mode cal_mode;
|
||||
|
||||
@ -593,6 +624,7 @@ struct ath10k {
|
||||
struct cfg80211_chan_def chandef;
|
||||
|
||||
unsigned long long free_vdev_map;
|
||||
struct ath10k_vif *monitor_arvif;
|
||||
bool monitor;
|
||||
int monitor_vdev_id;
|
||||
bool monitor_started;
|
||||
@ -633,6 +665,7 @@ struct ath10k {
|
||||
int max_num_peers;
|
||||
int max_num_stations;
|
||||
int max_num_vdevs;
|
||||
int max_num_tdls_vdevs;
|
||||
|
||||
struct work_struct offchan_tx_work;
|
||||
struct sk_buff_head offchan_tx_queue;
|
||||
@ -655,6 +688,8 @@ struct ath10k {
|
||||
|
||||
struct dfs_pattern_detector *dfs_detector;
|
||||
|
||||
unsigned long tx_paused; /* see ATH10K_TX_PAUSE_ */
|
||||
|
||||
#ifdef CONFIG_ATH10K_DEBUGFS
|
||||
struct ath10k_debug debug;
|
||||
#endif
|
||||
@ -686,6 +721,7 @@ struct ath10k {
|
||||
} stats;
|
||||
|
||||
struct ath10k_thermal thermal;
|
||||
struct ath10k_wow wow;
|
||||
|
||||
/* must be last */
|
||||
u8 drv_priv[0] __aligned(sizeof(void *));
|
||||
|
@ -124,10 +124,14 @@ EXPORT_SYMBOL(ath10k_info);
|
||||
|
||||
void ath10k_print_driver_info(struct ath10k *ar)
|
||||
{
|
||||
ath10k_info(ar, "%s (0x%08x, 0x%08x) fw %s api %d htt %d.%d wmi %d cal %s max_sta %d\n",
|
||||
ath10k_info(ar, "%s (0x%08x, 0x%08x%s%s%s) fw %s api %d htt %d.%d wmi %d cal %s max_sta %d\n",
|
||||
ar->hw_params.name,
|
||||
ar->target_version,
|
||||
ar->chip_id,
|
||||
(strlen(ar->spec_board_id) > 0 ? ", " : ""),
|
||||
ar->spec_board_id,
|
||||
(strlen(ar->spec_board_id) > 0 && !ar->spec_board_loaded
|
||||
? " fallback" : ""),
|
||||
ar->hw->wiphy->fw_version,
|
||||
ar->fw_api,
|
||||
ar->htt.target_version_major,
|
||||
@ -380,12 +384,12 @@ unlock:
|
||||
|
||||
static int ath10k_debug_fw_stats_request(struct ath10k *ar)
|
||||
{
|
||||
unsigned long timeout;
|
||||
unsigned long timeout, time_left;
|
||||
int ret;
|
||||
|
||||
lockdep_assert_held(&ar->conf_mutex);
|
||||
|
||||
timeout = jiffies + msecs_to_jiffies(1*HZ);
|
||||
timeout = jiffies + msecs_to_jiffies(1 * HZ);
|
||||
|
||||
ath10k_debug_fw_stats_reset(ar);
|
||||
|
||||
@ -395,18 +399,16 @@ static int ath10k_debug_fw_stats_request(struct ath10k *ar)
|
||||
|
||||
reinit_completion(&ar->debug.fw_stats_complete);
|
||||
|
||||
ret = ath10k_wmi_request_stats(ar,
|
||||
WMI_STAT_PDEV |
|
||||
WMI_STAT_VDEV |
|
||||
WMI_STAT_PEER);
|
||||
ret = ath10k_wmi_request_stats(ar, ar->fw_stats_req_mask);
|
||||
if (ret) {
|
||||
ath10k_warn(ar, "could not request stats (%d)\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = wait_for_completion_timeout(&ar->debug.fw_stats_complete,
|
||||
1*HZ);
|
||||
if (ret == 0)
|
||||
time_left =
|
||||
wait_for_completion_timeout(&ar->debug.fw_stats_complete,
|
||||
1 * HZ);
|
||||
if (!time_left)
|
||||
return -ETIMEDOUT;
|
||||
|
||||
spin_lock_bh(&ar->data_lock);
|
||||
@ -1708,6 +1710,61 @@ static int ath10k_debug_cal_data_release(struct inode *inode,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static ssize_t ath10k_write_ani_enable(struct file *file,
|
||||
const char __user *user_buf,
|
||||
size_t count, loff_t *ppos)
|
||||
{
|
||||
struct ath10k *ar = file->private_data;
|
||||
int ret;
|
||||
u8 enable;
|
||||
|
||||
if (kstrtou8_from_user(user_buf, count, 0, &enable))
|
||||
return -EINVAL;
|
||||
|
||||
mutex_lock(&ar->conf_mutex);
|
||||
|
||||
if (ar->ani_enabled == enable) {
|
||||
ret = count;
|
||||
goto exit;
|
||||
}
|
||||
|
||||
ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->ani_enable,
|
||||
enable);
|
||||
if (ret) {
|
||||
ath10k_warn(ar, "ani_enable failed from debugfs: %d\n", ret);
|
||||
goto exit;
|
||||
}
|
||||
ar->ani_enabled = enable;
|
||||
|
||||
ret = count;
|
||||
|
||||
exit:
|
||||
mutex_unlock(&ar->conf_mutex);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static ssize_t ath10k_read_ani_enable(struct file *file, char __user *user_buf,
|
||||
size_t count, loff_t *ppos)
|
||||
{
|
||||
struct ath10k *ar = file->private_data;
|
||||
int len = 0;
|
||||
char buf[32];
|
||||
|
||||
len = scnprintf(buf, sizeof(buf) - len, "%d\n",
|
||||
ar->ani_enabled);
|
||||
|
||||
return simple_read_from_buffer(user_buf, count, ppos, buf, len);
|
||||
}
|
||||
|
||||
static const struct file_operations fops_ani_enable = {
|
||||
.read = ath10k_read_ani_enable,
|
||||
.write = ath10k_write_ani_enable,
|
||||
.open = simple_open,
|
||||
.owner = THIS_MODULE,
|
||||
.llseek = default_llseek,
|
||||
};
|
||||
|
||||
static const struct file_operations fops_cal_data = {
|
||||
.open = ath10k_debug_cal_data_open,
|
||||
.read = ath10k_debug_cal_data_read,
|
||||
@ -1991,6 +2048,50 @@ static const struct file_operations fops_pktlog_filter = {
|
||||
.open = simple_open
|
||||
};
|
||||
|
||||
static ssize_t ath10k_write_quiet_period(struct file *file,
|
||||
const char __user *ubuf,
|
||||
size_t count, loff_t *ppos)
|
||||
{
|
||||
struct ath10k *ar = file->private_data;
|
||||
u32 period;
|
||||
|
||||
if (kstrtouint_from_user(ubuf, count, 0, &period))
|
||||
return -EINVAL;
|
||||
|
||||
if (period < ATH10K_QUIET_PERIOD_MIN) {
|
||||
ath10k_warn(ar, "Quiet period %u can not be lesser than 25ms\n",
|
||||
period);
|
||||
return -EINVAL;
|
||||
}
|
||||
mutex_lock(&ar->conf_mutex);
|
||||
ar->thermal.quiet_period = period;
|
||||
ath10k_thermal_set_throttling(ar);
|
||||
mutex_unlock(&ar->conf_mutex);
|
||||
|
||||
return count;
|
||||
}
|
||||
|
||||
static ssize_t ath10k_read_quiet_period(struct file *file, char __user *ubuf,
|
||||
size_t count, loff_t *ppos)
|
||||
{
|
||||
char buf[32];
|
||||
struct ath10k *ar = file->private_data;
|
||||
int len = 0;
|
||||
|
||||
mutex_lock(&ar->conf_mutex);
|
||||
len = scnprintf(buf, sizeof(buf) - len, "%d\n",
|
||||
ar->thermal.quiet_period);
|
||||
mutex_unlock(&ar->conf_mutex);
|
||||
|
||||
return simple_read_from_buffer(ubuf, count, ppos, buf, len);
|
||||
}
|
||||
|
||||
static const struct file_operations fops_quiet_period = {
|
||||
.read = ath10k_read_quiet_period,
|
||||
.write = ath10k_write_quiet_period,
|
||||
.open = simple_open
|
||||
};
|
||||
|
||||
int ath10k_debug_create(struct ath10k *ar)
|
||||
{
|
||||
ar->debug.fw_crash_data = vzalloc(sizeof(*ar->debug.fw_crash_data));
|
||||
@ -2068,6 +2169,9 @@ int ath10k_debug_register(struct ath10k *ar)
|
||||
debugfs_create_file("cal_data", S_IRUSR, ar->debug.debugfs_phy,
|
||||
ar, &fops_cal_data);
|
||||
|
||||
debugfs_create_file("ani_enable", S_IRUSR | S_IWUSR,
|
||||
ar->debug.debugfs_phy, ar, &fops_ani_enable);
|
||||
|
||||
debugfs_create_file("nf_cal_period", S_IRUSR | S_IWUSR,
|
||||
ar->debug.debugfs_phy, ar, &fops_nf_cal_period);
|
||||
|
||||
@ -2088,6 +2192,9 @@ int ath10k_debug_register(struct ath10k *ar)
|
||||
debugfs_create_file("pktlog_filter", S_IRUGO | S_IWUSR,
|
||||
ar->debug.debugfs_phy, ar, &fops_pktlog_filter);
|
||||
|
||||
debugfs_create_file("quiet_period", S_IRUGO | S_IWUSR,
|
||||
ar->debug.debugfs_phy, ar, &fops_quiet_period);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -86,21 +86,6 @@ static void ath10k_htc_notify_tx_completion(struct ath10k_htc_ep *ep,
|
||||
ep->ep_ops.ep_tx_complete(ep->htc->ar, skb);
|
||||
}
|
||||
|
||||
/* assumes tx_lock is held */
|
||||
static bool ath10k_htc_ep_need_credit_update(struct ath10k_htc_ep *ep)
|
||||
{
|
||||
struct ath10k *ar = ep->htc->ar;
|
||||
|
||||
if (!ep->tx_credit_flow_enabled)
|
||||
return false;
|
||||
if (ep->tx_credits >= ep->tx_credits_per_max_message)
|
||||
return false;
|
||||
|
||||
ath10k_dbg(ar, ATH10K_DBG_HTC, "HTC: endpoint %d needs credit update\n",
|
||||
ep->eid);
|
||||
return true;
|
||||
}
|
||||
|
||||
static void ath10k_htc_prepare_tx_skb(struct ath10k_htc_ep *ep,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
@ -111,13 +96,10 @@ static void ath10k_htc_prepare_tx_skb(struct ath10k_htc_ep *ep,
|
||||
hdr->eid = ep->eid;
|
||||
hdr->len = __cpu_to_le16(skb->len - sizeof(*hdr));
|
||||
hdr->flags = 0;
|
||||
hdr->flags |= ATH10K_HTC_FLAG_NEED_CREDIT_UPDATE;
|
||||
|
||||
spin_lock_bh(&ep->htc->tx_lock);
|
||||
hdr->seq_no = ep->seq_no++;
|
||||
|
||||
if (ath10k_htc_ep_need_credit_update(ep))
|
||||
hdr->flags |= ATH10K_HTC_FLAG_NEED_CREDIT_UPDATE;
|
||||
|
||||
spin_unlock_bh(&ep->htc->tx_lock);
|
||||
}
|
||||
|
||||
@ -414,7 +396,8 @@ static int ath10k_htc_rx_completion_handler(struct ath10k *ar,
|
||||
struct ath10k_htc_msg *msg = (struct ath10k_htc_msg *)skb->data;
|
||||
|
||||
switch (__le16_to_cpu(msg->hdr.message_id)) {
|
||||
default:
|
||||
case ATH10K_HTC_MSG_READY_ID:
|
||||
case ATH10K_HTC_MSG_CONNECT_SERVICE_RESP_ID:
|
||||
/* handle HTC control message */
|
||||
if (completion_done(&htc->ctl_resp)) {
|
||||
/*
|
||||
@ -438,6 +421,10 @@ static int ath10k_htc_rx_completion_handler(struct ath10k *ar,
|
||||
break;
|
||||
case ATH10K_HTC_MSG_SEND_SUSPEND_COMPLETE:
|
||||
htc->htc_ops.target_send_suspend_complete(ar);
|
||||
break;
|
||||
default:
|
||||
ath10k_warn(ar, "ignoring unsolicited htc ep0 event\n");
|
||||
break;
|
||||
}
|
||||
goto out;
|
||||
}
|
||||
@ -548,6 +535,7 @@ int ath10k_htc_wait_target(struct ath10k_htc *htc)
|
||||
{
|
||||
struct ath10k *ar = htc->ar;
|
||||
int i, status = 0;
|
||||
unsigned long time_left;
|
||||
struct ath10k_htc_svc_conn_req conn_req;
|
||||
struct ath10k_htc_svc_conn_resp conn_resp;
|
||||
struct ath10k_htc_msg *msg;
|
||||
@ -555,9 +543,9 @@ int ath10k_htc_wait_target(struct ath10k_htc *htc)
|
||||
u16 credit_count;
|
||||
u16 credit_size;
|
||||
|
||||
status = wait_for_completion_timeout(&htc->ctl_resp,
|
||||
ATH10K_HTC_WAIT_TIMEOUT_HZ);
|
||||
if (status == 0) {
|
||||
time_left = wait_for_completion_timeout(&htc->ctl_resp,
|
||||
ATH10K_HTC_WAIT_TIMEOUT_HZ);
|
||||
if (!time_left) {
|
||||
/* Workaround: In some cases the PCI HIF doesn't
|
||||
* receive interrupt for the control response message
|
||||
* even if the buffer was completed. It is suspected
|
||||
@ -569,10 +557,11 @@ int ath10k_htc_wait_target(struct ath10k_htc *htc)
|
||||
for (i = 0; i < CE_COUNT; i++)
|
||||
ath10k_hif_send_complete_check(htc->ar, i, 1);
|
||||
|
||||
status = wait_for_completion_timeout(&htc->ctl_resp,
|
||||
ATH10K_HTC_WAIT_TIMEOUT_HZ);
|
||||
time_left =
|
||||
wait_for_completion_timeout(&htc->ctl_resp,
|
||||
ATH10K_HTC_WAIT_TIMEOUT_HZ);
|
||||
|
||||
if (status == 0)
|
||||
if (!time_left)
|
||||
status = -ETIMEDOUT;
|
||||
}
|
||||
|
||||
@ -646,6 +635,7 @@ int ath10k_htc_connect_service(struct ath10k_htc *htc,
|
||||
struct sk_buff *skb;
|
||||
unsigned int max_msg_size = 0;
|
||||
int length, status;
|
||||
unsigned long time_left;
|
||||
bool disable_credit_flow_ctrl = false;
|
||||
u16 message_id, service_id, flags = 0;
|
||||
u8 tx_alloc = 0;
|
||||
@ -701,10 +691,10 @@ int ath10k_htc_connect_service(struct ath10k_htc *htc,
|
||||
}
|
||||
|
||||
/* wait for response */
|
||||
status = wait_for_completion_timeout(&htc->ctl_resp,
|
||||
ATH10K_HTC_CONN_SVC_TIMEOUT_HZ);
|
||||
if (status == 0) {
|
||||
ath10k_err(ar, "Service connect timeout: %d\n", status);
|
||||
time_left = wait_for_completion_timeout(&htc->ctl_resp,
|
||||
ATH10K_HTC_CONN_SVC_TIMEOUT_HZ);
|
||||
if (!time_left) {
|
||||
ath10k_err(ar, "Service connect timeout\n");
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
|
||||
|
@ -22,6 +22,86 @@
|
||||
#include "core.h"
|
||||
#include "debug.h"
|
||||
|
||||
static const enum htt_t2h_msg_type htt_main_t2h_msg_types[] = {
|
||||
[HTT_MAIN_T2H_MSG_TYPE_VERSION_CONF] = HTT_T2H_MSG_TYPE_VERSION_CONF,
|
||||
[HTT_MAIN_T2H_MSG_TYPE_RX_IND] = HTT_T2H_MSG_TYPE_RX_IND,
|
||||
[HTT_MAIN_T2H_MSG_TYPE_RX_FLUSH] = HTT_T2H_MSG_TYPE_RX_FLUSH,
|
||||
[HTT_MAIN_T2H_MSG_TYPE_PEER_MAP] = HTT_T2H_MSG_TYPE_PEER_MAP,
|
||||
[HTT_MAIN_T2H_MSG_TYPE_PEER_UNMAP] = HTT_T2H_MSG_TYPE_PEER_UNMAP,
|
||||
[HTT_MAIN_T2H_MSG_TYPE_RX_ADDBA] = HTT_T2H_MSG_TYPE_RX_ADDBA,
|
||||
[HTT_MAIN_T2H_MSG_TYPE_RX_DELBA] = HTT_T2H_MSG_TYPE_RX_DELBA,
|
||||
[HTT_MAIN_T2H_MSG_TYPE_TX_COMPL_IND] = HTT_T2H_MSG_TYPE_TX_COMPL_IND,
|
||||
[HTT_MAIN_T2H_MSG_TYPE_PKTLOG] = HTT_T2H_MSG_TYPE_PKTLOG,
|
||||
[HTT_MAIN_T2H_MSG_TYPE_STATS_CONF] = HTT_T2H_MSG_TYPE_STATS_CONF,
|
||||
[HTT_MAIN_T2H_MSG_TYPE_RX_FRAG_IND] = HTT_T2H_MSG_TYPE_RX_FRAG_IND,
|
||||
[HTT_MAIN_T2H_MSG_TYPE_SEC_IND] = HTT_T2H_MSG_TYPE_SEC_IND,
|
||||
[HTT_MAIN_T2H_MSG_TYPE_TX_INSPECT_IND] =
|
||||
HTT_T2H_MSG_TYPE_TX_INSPECT_IND,
|
||||
[HTT_MAIN_T2H_MSG_TYPE_MGMT_TX_COMPL_IND] =
|
||||
HTT_T2H_MSG_TYPE_MGMT_TX_COMPLETION,
|
||||
[HTT_MAIN_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND] =
|
||||
HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND,
|
||||
[HTT_MAIN_T2H_MSG_TYPE_RX_PN_IND] = HTT_T2H_MSG_TYPE_RX_PN_IND,
|
||||
[HTT_MAIN_T2H_MSG_TYPE_RX_OFFLOAD_DELIVER_IND] =
|
||||
HTT_T2H_MSG_TYPE_RX_OFFLOAD_DELIVER_IND,
|
||||
[HTT_MAIN_T2H_MSG_TYPE_TEST] = HTT_T2H_MSG_TYPE_TEST,
|
||||
};
|
||||
|
||||
static const enum htt_t2h_msg_type htt_10x_t2h_msg_types[] = {
|
||||
[HTT_10X_T2H_MSG_TYPE_VERSION_CONF] = HTT_T2H_MSG_TYPE_VERSION_CONF,
|
||||
[HTT_10X_T2H_MSG_TYPE_RX_IND] = HTT_T2H_MSG_TYPE_RX_IND,
|
||||
[HTT_10X_T2H_MSG_TYPE_RX_FLUSH] = HTT_T2H_MSG_TYPE_RX_FLUSH,
|
||||
[HTT_10X_T2H_MSG_TYPE_PEER_MAP] = HTT_T2H_MSG_TYPE_PEER_MAP,
|
||||
[HTT_10X_T2H_MSG_TYPE_PEER_UNMAP] = HTT_T2H_MSG_TYPE_PEER_UNMAP,
|
||||
[HTT_10X_T2H_MSG_TYPE_RX_ADDBA] = HTT_T2H_MSG_TYPE_RX_ADDBA,
|
||||
[HTT_10X_T2H_MSG_TYPE_RX_DELBA] = HTT_T2H_MSG_TYPE_RX_DELBA,
|
||||
[HTT_10X_T2H_MSG_TYPE_TX_COMPL_IND] = HTT_T2H_MSG_TYPE_TX_COMPL_IND,
|
||||
[HTT_10X_T2H_MSG_TYPE_PKTLOG] = HTT_T2H_MSG_TYPE_PKTLOG,
|
||||
[HTT_10X_T2H_MSG_TYPE_STATS_CONF] = HTT_T2H_MSG_TYPE_STATS_CONF,
|
||||
[HTT_10X_T2H_MSG_TYPE_RX_FRAG_IND] = HTT_T2H_MSG_TYPE_RX_FRAG_IND,
|
||||
[HTT_10X_T2H_MSG_TYPE_SEC_IND] = HTT_T2H_MSG_TYPE_SEC_IND,
|
||||
[HTT_10X_T2H_MSG_TYPE_RC_UPDATE_IND] = HTT_T2H_MSG_TYPE_RC_UPDATE_IND,
|
||||
[HTT_10X_T2H_MSG_TYPE_TX_INSPECT_IND] = HTT_T2H_MSG_TYPE_TX_INSPECT_IND,
|
||||
[HTT_10X_T2H_MSG_TYPE_TEST] = HTT_T2H_MSG_TYPE_TEST,
|
||||
[HTT_10X_T2H_MSG_TYPE_CHAN_CHANGE] = HTT_T2H_MSG_TYPE_CHAN_CHANGE,
|
||||
[HTT_10X_T2H_MSG_TYPE_AGGR_CONF] = HTT_T2H_MSG_TYPE_AGGR_CONF,
|
||||
[HTT_10X_T2H_MSG_TYPE_STATS_NOUPLOAD] = HTT_T2H_MSG_TYPE_STATS_NOUPLOAD,
|
||||
[HTT_10X_T2H_MSG_TYPE_MGMT_TX_COMPL_IND] =
|
||||
HTT_T2H_MSG_TYPE_MGMT_TX_COMPLETION,
|
||||
};
|
||||
|
||||
static const enum htt_t2h_msg_type htt_tlv_t2h_msg_types[] = {
|
||||
[HTT_TLV_T2H_MSG_TYPE_VERSION_CONF] = HTT_T2H_MSG_TYPE_VERSION_CONF,
|
||||
[HTT_TLV_T2H_MSG_TYPE_RX_IND] = HTT_T2H_MSG_TYPE_RX_IND,
|
||||
[HTT_TLV_T2H_MSG_TYPE_RX_FLUSH] = HTT_T2H_MSG_TYPE_RX_FLUSH,
|
||||
[HTT_TLV_T2H_MSG_TYPE_PEER_MAP] = HTT_T2H_MSG_TYPE_PEER_MAP,
|
||||
[HTT_TLV_T2H_MSG_TYPE_PEER_UNMAP] = HTT_T2H_MSG_TYPE_PEER_UNMAP,
|
||||
[HTT_TLV_T2H_MSG_TYPE_RX_ADDBA] = HTT_T2H_MSG_TYPE_RX_ADDBA,
|
||||
[HTT_TLV_T2H_MSG_TYPE_RX_DELBA] = HTT_T2H_MSG_TYPE_RX_DELBA,
|
||||
[HTT_TLV_T2H_MSG_TYPE_TX_COMPL_IND] = HTT_T2H_MSG_TYPE_TX_COMPL_IND,
|
||||
[HTT_TLV_T2H_MSG_TYPE_PKTLOG] = HTT_T2H_MSG_TYPE_PKTLOG,
|
||||
[HTT_TLV_T2H_MSG_TYPE_STATS_CONF] = HTT_T2H_MSG_TYPE_STATS_CONF,
|
||||
[HTT_TLV_T2H_MSG_TYPE_RX_FRAG_IND] = HTT_T2H_MSG_TYPE_RX_FRAG_IND,
|
||||
[HTT_TLV_T2H_MSG_TYPE_SEC_IND] = HTT_T2H_MSG_TYPE_SEC_IND,
|
||||
[HTT_TLV_T2H_MSG_TYPE_RC_UPDATE_IND] = HTT_T2H_MSG_TYPE_RC_UPDATE_IND,
|
||||
[HTT_TLV_T2H_MSG_TYPE_TX_INSPECT_IND] = HTT_T2H_MSG_TYPE_TX_INSPECT_IND,
|
||||
[HTT_TLV_T2H_MSG_TYPE_MGMT_TX_COMPL_IND] =
|
||||
HTT_T2H_MSG_TYPE_MGMT_TX_COMPLETION,
|
||||
[HTT_TLV_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND] =
|
||||
HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND,
|
||||
[HTT_TLV_T2H_MSG_TYPE_RX_PN_IND] = HTT_T2H_MSG_TYPE_RX_PN_IND,
|
||||
[HTT_TLV_T2H_MSG_TYPE_RX_OFFLOAD_DELIVER_IND] =
|
||||
HTT_T2H_MSG_TYPE_RX_OFFLOAD_DELIVER_IND,
|
||||
[HTT_TLV_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND] =
|
||||
HTT_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND,
|
||||
[HTT_TLV_T2H_MSG_TYPE_WDI_IPA_OP_RESPONSE] =
|
||||
HTT_T2H_MSG_TYPE_WDI_IPA_OP_RESPONSE,
|
||||
[HTT_TLV_T2H_MSG_TYPE_CHAN_CHANGE] = HTT_T2H_MSG_TYPE_CHAN_CHANGE,
|
||||
[HTT_TLV_T2H_MSG_TYPE_RX_OFLD_PKT_ERR] =
|
||||
HTT_T2H_MSG_TYPE_RX_OFLD_PKT_ERR,
|
||||
[HTT_TLV_T2H_MSG_TYPE_TEST] = HTT_T2H_MSG_TYPE_TEST,
|
||||
};
|
||||
|
||||
int ath10k_htt_connect(struct ath10k_htt *htt)
|
||||
{
|
||||
struct ath10k_htc_svc_conn_req conn_req;
|
||||
@ -66,6 +146,24 @@ int ath10k_htt_init(struct ath10k *ar)
|
||||
8 + /* llc snap */
|
||||
2; /* ip4 dscp or ip6 priority */
|
||||
|
||||
switch (ar->htt.op_version) {
|
||||
case ATH10K_FW_HTT_OP_VERSION_10_1:
|
||||
ar->htt.t2h_msg_types = htt_10x_t2h_msg_types;
|
||||
ar->htt.t2h_msg_types_max = HTT_10X_T2H_NUM_MSGS;
|
||||
break;
|
||||
case ATH10K_FW_HTT_OP_VERSION_TLV:
|
||||
ar->htt.t2h_msg_types = htt_tlv_t2h_msg_types;
|
||||
ar->htt.t2h_msg_types_max = HTT_TLV_T2H_NUM_MSGS;
|
||||
break;
|
||||
case ATH10K_FW_HTT_OP_VERSION_MAIN:
|
||||
ar->htt.t2h_msg_types = htt_main_t2h_msg_types;
|
||||
ar->htt.t2h_msg_types_max = HTT_MAIN_T2H_NUM_MSGS;
|
||||
break;
|
||||
case ATH10K_FW_HTT_OP_VERSION_MAX:
|
||||
case ATH10K_FW_HTT_OP_VERSION_UNSET:
|
||||
WARN_ON(1);
|
||||
return -EINVAL;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -25,7 +25,9 @@
|
||||
#include <net/mac80211.h>
|
||||
|
||||
#include "htc.h"
|
||||
#include "hw.h"
|
||||
#include "rx_desc.h"
|
||||
#include "hw.h"
|
||||
|
||||
enum htt_dbg_stats_type {
|
||||
HTT_DBG_STATS_WAL_PDEV_TXRX = 1 << 0,
|
||||
@ -271,35 +273,108 @@ enum htt_mgmt_tx_status {
|
||||
|
||||
/*=== target -> host messages ===============================================*/
|
||||
|
||||
enum htt_t2h_msg_type {
|
||||
HTT_T2H_MSG_TYPE_VERSION_CONF = 0x0,
|
||||
HTT_T2H_MSG_TYPE_RX_IND = 0x1,
|
||||
HTT_T2H_MSG_TYPE_RX_FLUSH = 0x2,
|
||||
HTT_T2H_MSG_TYPE_PEER_MAP = 0x3,
|
||||
HTT_T2H_MSG_TYPE_PEER_UNMAP = 0x4,
|
||||
HTT_T2H_MSG_TYPE_RX_ADDBA = 0x5,
|
||||
HTT_T2H_MSG_TYPE_RX_DELBA = 0x6,
|
||||
HTT_T2H_MSG_TYPE_TX_COMPL_IND = 0x7,
|
||||
HTT_T2H_MSG_TYPE_PKTLOG = 0x8,
|
||||
HTT_T2H_MSG_TYPE_STATS_CONF = 0x9,
|
||||
HTT_T2H_MSG_TYPE_RX_FRAG_IND = 0xa,
|
||||
HTT_T2H_MSG_TYPE_SEC_IND = 0xb,
|
||||
HTT_T2H_MSG_TYPE_RC_UPDATE_IND = 0xc,
|
||||
HTT_T2H_MSG_TYPE_TX_INSPECT_IND = 0xd,
|
||||
HTT_T2H_MSG_TYPE_MGMT_TX_COMPLETION = 0xe,
|
||||
HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND = 0xf,
|
||||
HTT_T2H_MSG_TYPE_RX_PN_IND = 0x10,
|
||||
HTT_T2H_MSG_TYPE_RX_OFFLOAD_DELIVER_IND = 0x11,
|
||||
HTT_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND = 0x12,
|
||||
enum htt_main_t2h_msg_type {
|
||||
HTT_MAIN_T2H_MSG_TYPE_VERSION_CONF = 0x0,
|
||||
HTT_MAIN_T2H_MSG_TYPE_RX_IND = 0x1,
|
||||
HTT_MAIN_T2H_MSG_TYPE_RX_FLUSH = 0x2,
|
||||
HTT_MAIN_T2H_MSG_TYPE_PEER_MAP = 0x3,
|
||||
HTT_MAIN_T2H_MSG_TYPE_PEER_UNMAP = 0x4,
|
||||
HTT_MAIN_T2H_MSG_TYPE_RX_ADDBA = 0x5,
|
||||
HTT_MAIN_T2H_MSG_TYPE_RX_DELBA = 0x6,
|
||||
HTT_MAIN_T2H_MSG_TYPE_TX_COMPL_IND = 0x7,
|
||||
HTT_MAIN_T2H_MSG_TYPE_PKTLOG = 0x8,
|
||||
HTT_MAIN_T2H_MSG_TYPE_STATS_CONF = 0x9,
|
||||
HTT_MAIN_T2H_MSG_TYPE_RX_FRAG_IND = 0xa,
|
||||
HTT_MAIN_T2H_MSG_TYPE_SEC_IND = 0xb,
|
||||
HTT_MAIN_T2H_MSG_TYPE_TX_INSPECT_IND = 0xd,
|
||||
HTT_MAIN_T2H_MSG_TYPE_MGMT_TX_COMPL_IND = 0xe,
|
||||
HTT_MAIN_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND = 0xf,
|
||||
HTT_MAIN_T2H_MSG_TYPE_RX_PN_IND = 0x10,
|
||||
HTT_MAIN_T2H_MSG_TYPE_RX_OFFLOAD_DELIVER_IND = 0x11,
|
||||
HTT_MAIN_T2H_MSG_TYPE_TEST,
|
||||
/* keep this last */
|
||||
HTT_MAIN_T2H_NUM_MSGS
|
||||
};
|
||||
|
||||
enum htt_10x_t2h_msg_type {
|
||||
HTT_10X_T2H_MSG_TYPE_VERSION_CONF = 0x0,
|
||||
HTT_10X_T2H_MSG_TYPE_RX_IND = 0x1,
|
||||
HTT_10X_T2H_MSG_TYPE_RX_FLUSH = 0x2,
|
||||
HTT_10X_T2H_MSG_TYPE_PEER_MAP = 0x3,
|
||||
HTT_10X_T2H_MSG_TYPE_PEER_UNMAP = 0x4,
|
||||
HTT_10X_T2H_MSG_TYPE_RX_ADDBA = 0x5,
|
||||
HTT_10X_T2H_MSG_TYPE_RX_DELBA = 0x6,
|
||||
HTT_10X_T2H_MSG_TYPE_TX_COMPL_IND = 0x7,
|
||||
HTT_10X_T2H_MSG_TYPE_PKTLOG = 0x8,
|
||||
HTT_10X_T2H_MSG_TYPE_STATS_CONF = 0x9,
|
||||
HTT_10X_T2H_MSG_TYPE_RX_FRAG_IND = 0xa,
|
||||
HTT_10X_T2H_MSG_TYPE_SEC_IND = 0xb,
|
||||
HTT_10X_T2H_MSG_TYPE_RC_UPDATE_IND = 0xc,
|
||||
HTT_10X_T2H_MSG_TYPE_TX_INSPECT_IND = 0xd,
|
||||
HTT_10X_T2H_MSG_TYPE_TEST = 0xe,
|
||||
HTT_10X_T2H_MSG_TYPE_CHAN_CHANGE = 0xf,
|
||||
HTT_10X_T2H_MSG_TYPE_AGGR_CONF = 0x11,
|
||||
HTT_10X_T2H_MSG_TYPE_STATS_NOUPLOAD = 0x12,
|
||||
HTT_10X_T2H_MSG_TYPE_MGMT_TX_COMPL_IND = 0x13,
|
||||
/* keep this last */
|
||||
HTT_10X_T2H_NUM_MSGS
|
||||
};
|
||||
|
||||
enum htt_tlv_t2h_msg_type {
|
||||
HTT_TLV_T2H_MSG_TYPE_VERSION_CONF = 0x0,
|
||||
HTT_TLV_T2H_MSG_TYPE_RX_IND = 0x1,
|
||||
HTT_TLV_T2H_MSG_TYPE_RX_FLUSH = 0x2,
|
||||
HTT_TLV_T2H_MSG_TYPE_PEER_MAP = 0x3,
|
||||
HTT_TLV_T2H_MSG_TYPE_PEER_UNMAP = 0x4,
|
||||
HTT_TLV_T2H_MSG_TYPE_RX_ADDBA = 0x5,
|
||||
HTT_TLV_T2H_MSG_TYPE_RX_DELBA = 0x6,
|
||||
HTT_TLV_T2H_MSG_TYPE_TX_COMPL_IND = 0x7,
|
||||
HTT_TLV_T2H_MSG_TYPE_PKTLOG = 0x8,
|
||||
HTT_TLV_T2H_MSG_TYPE_STATS_CONF = 0x9,
|
||||
HTT_TLV_T2H_MSG_TYPE_RX_FRAG_IND = 0xa,
|
||||
HTT_TLV_T2H_MSG_TYPE_SEC_IND = 0xb,
|
||||
HTT_TLV_T2H_MSG_TYPE_RC_UPDATE_IND = 0xc, /* deprecated */
|
||||
HTT_TLV_T2H_MSG_TYPE_TX_INSPECT_IND = 0xd,
|
||||
HTT_TLV_T2H_MSG_TYPE_MGMT_TX_COMPL_IND = 0xe,
|
||||
HTT_TLV_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND = 0xf,
|
||||
HTT_TLV_T2H_MSG_TYPE_RX_PN_IND = 0x10,
|
||||
HTT_TLV_T2H_MSG_TYPE_RX_OFFLOAD_DELIVER_IND = 0x11,
|
||||
HTT_TLV_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND = 0x12,
|
||||
/* 0x13 reservd */
|
||||
HTT_T2H_MSG_TYPE_WDI_IPA_OP_RESPONSE = 0x14,
|
||||
HTT_TLV_T2H_MSG_TYPE_WDI_IPA_OP_RESPONSE = 0x14,
|
||||
HTT_TLV_T2H_MSG_TYPE_CHAN_CHANGE = 0x15,
|
||||
HTT_TLV_T2H_MSG_TYPE_RX_OFLD_PKT_ERR = 0x16,
|
||||
HTT_TLV_T2H_MSG_TYPE_TEST,
|
||||
/* keep this last */
|
||||
HTT_TLV_T2H_NUM_MSGS
|
||||
};
|
||||
|
||||
/* FIXME: Do not depend on this event id. Numbering of this event id is
|
||||
* broken across different firmware revisions and HTT version fails to
|
||||
* indicate this.
|
||||
*/
|
||||
enum htt_t2h_msg_type {
|
||||
HTT_T2H_MSG_TYPE_VERSION_CONF,
|
||||
HTT_T2H_MSG_TYPE_RX_IND,
|
||||
HTT_T2H_MSG_TYPE_RX_FLUSH,
|
||||
HTT_T2H_MSG_TYPE_PEER_MAP,
|
||||
HTT_T2H_MSG_TYPE_PEER_UNMAP,
|
||||
HTT_T2H_MSG_TYPE_RX_ADDBA,
|
||||
HTT_T2H_MSG_TYPE_RX_DELBA,
|
||||
HTT_T2H_MSG_TYPE_TX_COMPL_IND,
|
||||
HTT_T2H_MSG_TYPE_PKTLOG,
|
||||
HTT_T2H_MSG_TYPE_STATS_CONF,
|
||||
HTT_T2H_MSG_TYPE_RX_FRAG_IND,
|
||||
HTT_T2H_MSG_TYPE_SEC_IND,
|
||||
HTT_T2H_MSG_TYPE_RC_UPDATE_IND,
|
||||
HTT_T2H_MSG_TYPE_TX_INSPECT_IND,
|
||||
HTT_T2H_MSG_TYPE_MGMT_TX_COMPLETION,
|
||||
HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND,
|
||||
HTT_T2H_MSG_TYPE_RX_PN_IND,
|
||||
HTT_T2H_MSG_TYPE_RX_OFFLOAD_DELIVER_IND,
|
||||
HTT_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND,
|
||||
HTT_T2H_MSG_TYPE_WDI_IPA_OP_RESPONSE,
|
||||
HTT_T2H_MSG_TYPE_CHAN_CHANGE,
|
||||
HTT_T2H_MSG_TYPE_RX_OFLD_PKT_ERR,
|
||||
HTT_T2H_MSG_TYPE_AGGR_CONF,
|
||||
HTT_T2H_MSG_TYPE_STATS_NOUPLOAD,
|
||||
HTT_T2H_MSG_TYPE_TEST,
|
||||
|
||||
/* keep this last */
|
||||
HTT_T2H_NUM_MSGS
|
||||
};
|
||||
@ -1222,6 +1297,7 @@ struct htt_tx_done {
|
||||
u32 msdu_id;
|
||||
bool discard;
|
||||
bool no_ack;
|
||||
bool success;
|
||||
};
|
||||
|
||||
struct htt_peer_map_event {
|
||||
@ -1248,6 +1324,10 @@ struct ath10k_htt {
|
||||
u8 target_version_major;
|
||||
u8 target_version_minor;
|
||||
struct completion target_version_received;
|
||||
enum ath10k_fw_htt_op_version op_version;
|
||||
|
||||
const enum htt_t2h_msg_type *t2h_msg_types;
|
||||
u32 t2h_msg_types_max;
|
||||
|
||||
struct {
|
||||
/*
|
||||
|
@ -637,58 +637,21 @@ static int ath10k_htt_rx_crypto_tail_len(struct ath10k *ar,
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct rfc1042_hdr {
|
||||
u8 llc_dsap;
|
||||
u8 llc_ssap;
|
||||
u8 llc_ctrl;
|
||||
u8 snap_oui[3];
|
||||
__be16 snap_type;
|
||||
} __packed;
|
||||
|
||||
struct amsdu_subframe_hdr {
|
||||
u8 dst[ETH_ALEN];
|
||||
u8 src[ETH_ALEN];
|
||||
__be16 len;
|
||||
} __packed;
|
||||
|
||||
static const u8 rx_legacy_rate_idx[] = {
|
||||
3, /* 0x00 - 11Mbps */
|
||||
2, /* 0x01 - 5.5Mbps */
|
||||
1, /* 0x02 - 2Mbps */
|
||||
0, /* 0x03 - 1Mbps */
|
||||
3, /* 0x04 - 11Mbps */
|
||||
2, /* 0x05 - 5.5Mbps */
|
||||
1, /* 0x06 - 2Mbps */
|
||||
0, /* 0x07 - 1Mbps */
|
||||
10, /* 0x08 - 48Mbps */
|
||||
8, /* 0x09 - 24Mbps */
|
||||
6, /* 0x0A - 12Mbps */
|
||||
4, /* 0x0B - 6Mbps */
|
||||
11, /* 0x0C - 54Mbps */
|
||||
9, /* 0x0D - 36Mbps */
|
||||
7, /* 0x0E - 18Mbps */
|
||||
5, /* 0x0F - 9Mbps */
|
||||
};
|
||||
|
||||
static void ath10k_htt_rx_h_rates(struct ath10k *ar,
|
||||
struct ieee80211_rx_status *status,
|
||||
struct htt_rx_desc *rxd)
|
||||
{
|
||||
enum ieee80211_band band;
|
||||
u8 cck, rate, rate_idx, bw, sgi, mcs, nss;
|
||||
struct ieee80211_supported_band *sband;
|
||||
u8 cck, rate, bw, sgi, mcs, nss;
|
||||
u8 preamble = 0;
|
||||
u32 info1, info2, info3;
|
||||
|
||||
/* Band value can't be set as undefined but freq can be 0 - use that to
|
||||
* determine whether band is provided.
|
||||
*
|
||||
* FIXME: Perhaps this can go away if CCK rate reporting is a little
|
||||
* reworked?
|
||||
*/
|
||||
if (!status->freq)
|
||||
return;
|
||||
|
||||
band = status->band;
|
||||
info1 = __le32_to_cpu(rxd->ppdu_start.info1);
|
||||
info2 = __le32_to_cpu(rxd->ppdu_start.info2);
|
||||
info3 = __le32_to_cpu(rxd->ppdu_start.info3);
|
||||
@ -697,31 +660,18 @@ static void ath10k_htt_rx_h_rates(struct ath10k *ar,
|
||||
|
||||
switch (preamble) {
|
||||
case HTT_RX_LEGACY:
|
||||
/* To get legacy rate index band is required. Since band can't
|
||||
* be undefined check if freq is non-zero.
|
||||
*/
|
||||
if (!status->freq)
|
||||
return;
|
||||
|
||||
cck = info1 & RX_PPDU_START_INFO1_L_SIG_RATE_SELECT;
|
||||
rate = MS(info1, RX_PPDU_START_INFO1_L_SIG_RATE);
|
||||
rate_idx = 0;
|
||||
rate &= ~RX_PPDU_START_RATE_FLAG;
|
||||
|
||||
if (rate < 0x08 || rate > 0x0F)
|
||||
break;
|
||||
|
||||
switch (band) {
|
||||
case IEEE80211_BAND_2GHZ:
|
||||
if (cck)
|
||||
rate &= ~BIT(3);
|
||||
rate_idx = rx_legacy_rate_idx[rate];
|
||||
break;
|
||||
case IEEE80211_BAND_5GHZ:
|
||||
rate_idx = rx_legacy_rate_idx[rate];
|
||||
/* We are using same rate table registering
|
||||
HW - ath10k_rates[]. In case of 5GHz skip
|
||||
CCK rates, so -4 here */
|
||||
rate_idx -= 4;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
status->rate_idx = rate_idx;
|
||||
sband = &ar->mac.sbands[status->band];
|
||||
status->rate_idx = ath10k_mac_hw_rate_to_idx(sband, rate);
|
||||
break;
|
||||
case HTT_RX_HT:
|
||||
case HTT_RX_HT_WITH_TXBF:
|
||||
@ -773,8 +723,87 @@ static void ath10k_htt_rx_h_rates(struct ath10k *ar,
|
||||
}
|
||||
}
|
||||
|
||||
static struct ieee80211_channel *
|
||||
ath10k_htt_rx_h_peer_channel(struct ath10k *ar, struct htt_rx_desc *rxd)
|
||||
{
|
||||
struct ath10k_peer *peer;
|
||||
struct ath10k_vif *arvif;
|
||||
struct cfg80211_chan_def def;
|
||||
u16 peer_id;
|
||||
|
||||
lockdep_assert_held(&ar->data_lock);
|
||||
|
||||
if (!rxd)
|
||||
return NULL;
|
||||
|
||||
if (rxd->attention.flags &
|
||||
__cpu_to_le32(RX_ATTENTION_FLAGS_PEER_IDX_INVALID))
|
||||
return NULL;
|
||||
|
||||
if (!(rxd->msdu_end.info0 &
|
||||
__cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU)))
|
||||
return NULL;
|
||||
|
||||
peer_id = MS(__le32_to_cpu(rxd->mpdu_start.info0),
|
||||
RX_MPDU_START_INFO0_PEER_IDX);
|
||||
|
||||
peer = ath10k_peer_find_by_id(ar, peer_id);
|
||||
if (!peer)
|
||||
return NULL;
|
||||
|
||||
arvif = ath10k_get_arvif(ar, peer->vdev_id);
|
||||
if (WARN_ON_ONCE(!arvif))
|
||||
return NULL;
|
||||
|
||||
if (WARN_ON(ath10k_mac_vif_chan(arvif->vif, &def)))
|
||||
return NULL;
|
||||
|
||||
return def.chan;
|
||||
}
|
||||
|
||||
static struct ieee80211_channel *
|
||||
ath10k_htt_rx_h_vdev_channel(struct ath10k *ar, u32 vdev_id)
|
||||
{
|
||||
struct ath10k_vif *arvif;
|
||||
struct cfg80211_chan_def def;
|
||||
|
||||
lockdep_assert_held(&ar->data_lock);
|
||||
|
||||
list_for_each_entry(arvif, &ar->arvifs, list) {
|
||||
if (arvif->vdev_id == vdev_id &&
|
||||
ath10k_mac_vif_chan(arvif->vif, &def) == 0)
|
||||
return def.chan;
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void
|
||||
ath10k_htt_rx_h_any_chan_iter(struct ieee80211_hw *hw,
|
||||
struct ieee80211_chanctx_conf *conf,
|
||||
void *data)
|
||||
{
|
||||
struct cfg80211_chan_def *def = data;
|
||||
|
||||
*def = conf->def;
|
||||
}
|
||||
|
||||
static struct ieee80211_channel *
|
||||
ath10k_htt_rx_h_any_channel(struct ath10k *ar)
|
||||
{
|
||||
struct cfg80211_chan_def def = {};
|
||||
|
||||
ieee80211_iter_chan_contexts_atomic(ar->hw,
|
||||
ath10k_htt_rx_h_any_chan_iter,
|
||||
&def);
|
||||
|
||||
return def.chan;
|
||||
}
|
||||
|
||||
static bool ath10k_htt_rx_h_channel(struct ath10k *ar,
|
||||
struct ieee80211_rx_status *status)
|
||||
struct ieee80211_rx_status *status,
|
||||
struct htt_rx_desc *rxd,
|
||||
u32 vdev_id)
|
||||
{
|
||||
struct ieee80211_channel *ch;
|
||||
|
||||
@ -782,6 +811,12 @@ static bool ath10k_htt_rx_h_channel(struct ath10k *ar,
|
||||
ch = ar->scan_channel;
|
||||
if (!ch)
|
||||
ch = ar->rx_channel;
|
||||
if (!ch)
|
||||
ch = ath10k_htt_rx_h_peer_channel(ar, rxd);
|
||||
if (!ch)
|
||||
ch = ath10k_htt_rx_h_vdev_channel(ar, vdev_id);
|
||||
if (!ch)
|
||||
ch = ath10k_htt_rx_h_any_channel(ar);
|
||||
spin_unlock_bh(&ar->data_lock);
|
||||
|
||||
if (!ch)
|
||||
@ -819,7 +854,8 @@ static void ath10k_htt_rx_h_mactime(struct ath10k *ar,
|
||||
|
||||
static void ath10k_htt_rx_h_ppdu(struct ath10k *ar,
|
||||
struct sk_buff_head *amsdu,
|
||||
struct ieee80211_rx_status *status)
|
||||
struct ieee80211_rx_status *status,
|
||||
u32 vdev_id)
|
||||
{
|
||||
struct sk_buff *first;
|
||||
struct htt_rx_desc *rxd;
|
||||
@ -851,7 +887,7 @@ static void ath10k_htt_rx_h_ppdu(struct ath10k *ar,
|
||||
status->flag |= RX_FLAG_NO_SIGNAL_VAL;
|
||||
|
||||
ath10k_htt_rx_h_signal(ar, status, rxd);
|
||||
ath10k_htt_rx_h_channel(ar, status);
|
||||
ath10k_htt_rx_h_channel(ar, status, rxd, vdev_id);
|
||||
ath10k_htt_rx_h_rates(ar, status, rxd);
|
||||
}
|
||||
|
||||
@ -1522,7 +1558,7 @@ static void ath10k_htt_rx_handler(struct ath10k_htt *htt,
|
||||
break;
|
||||
}
|
||||
|
||||
ath10k_htt_rx_h_ppdu(ar, &amsdu, rx_status);
|
||||
ath10k_htt_rx_h_ppdu(ar, &amsdu, rx_status, 0xffff);
|
||||
ath10k_htt_rx_h_unchain(ar, &amsdu, ret > 0);
|
||||
ath10k_htt_rx_h_filter(ar, &amsdu, rx_status);
|
||||
ath10k_htt_rx_h_mpdu(ar, &amsdu, rx_status);
|
||||
@ -1569,7 +1605,7 @@ static void ath10k_htt_rx_frag_handler(struct ath10k_htt *htt,
|
||||
return;
|
||||
}
|
||||
|
||||
ath10k_htt_rx_h_ppdu(ar, &amsdu, rx_status);
|
||||
ath10k_htt_rx_h_ppdu(ar, &amsdu, rx_status, 0xffff);
|
||||
ath10k_htt_rx_h_filter(ar, &amsdu, rx_status);
|
||||
ath10k_htt_rx_h_mpdu(ar, &amsdu, rx_status);
|
||||
ath10k_htt_rx_h_deliver(ar, &amsdu, rx_status);
|
||||
@ -1598,6 +1634,7 @@ static void ath10k_htt_rx_frm_tx_compl(struct ath10k *ar,
|
||||
tx_done.no_ack = true;
|
||||
break;
|
||||
case HTT_DATA_TX_STATUS_OK:
|
||||
tx_done.success = true;
|
||||
break;
|
||||
case HTT_DATA_TX_STATUS_DISCARD:
|
||||
case HTT_DATA_TX_STATUS_POSTPONE:
|
||||
@ -1796,7 +1833,7 @@ static void ath10k_htt_rx_h_rx_offload(struct ath10k *ar,
|
||||
status->flag |= RX_FLAG_NO_SIGNAL_VAL;
|
||||
|
||||
ath10k_htt_rx_h_rx_offload_prot(status, msdu);
|
||||
ath10k_htt_rx_h_channel(ar, status);
|
||||
ath10k_htt_rx_h_channel(ar, status, NULL, rx->vdev_id);
|
||||
ath10k_process_rx(ar, status, msdu);
|
||||
}
|
||||
}
|
||||
@ -1869,7 +1906,7 @@ static void ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb)
|
||||
* better to report something than nothing though. This
|
||||
* should still give an idea about rx rate to the user.
|
||||
*/
|
||||
ath10k_htt_rx_h_ppdu(ar, &amsdu, status);
|
||||
ath10k_htt_rx_h_ppdu(ar, &amsdu, status, vdev_id);
|
||||
ath10k_htt_rx_h_filter(ar, &amsdu, status);
|
||||
ath10k_htt_rx_h_mpdu(ar, &amsdu, status);
|
||||
ath10k_htt_rx_h_deliver(ar, &amsdu, status);
|
||||
@ -1892,6 +1929,7 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
|
||||
{
|
||||
struct ath10k_htt *htt = &ar->htt;
|
||||
struct htt_resp *resp = (struct htt_resp *)skb->data;
|
||||
enum htt_t2h_msg_type type;
|
||||
|
||||
/* confirm alignment */
|
||||
if (!IS_ALIGNED((unsigned long)skb->data, 4))
|
||||
@ -1899,7 +1937,16 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
|
||||
|
||||
ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx, msg_type: 0x%0X\n",
|
||||
resp->hdr.msg_type);
|
||||
switch (resp->hdr.msg_type) {
|
||||
|
||||
if (resp->hdr.msg_type >= ar->htt.t2h_msg_types_max) {
|
||||
ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx, unsupported msg_type: 0x%0X\n max: 0x%0X",
|
||||
resp->hdr.msg_type, ar->htt.t2h_msg_types_max);
|
||||
dev_kfree_skb_any(skb);
|
||||
return;
|
||||
}
|
||||
type = ar->htt.t2h_msg_types[resp->hdr.msg_type];
|
||||
|
||||
switch (type) {
|
||||
case HTT_T2H_MSG_TYPE_VERSION_CONF: {
|
||||
htt->target_version_major = resp->ver_resp.major;
|
||||
htt->target_version_minor = resp->ver_resp.minor;
|
||||
@ -1937,6 +1984,7 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
|
||||
|
||||
switch (status) {
|
||||
case HTT_MGMT_TX_STATUS_OK:
|
||||
tx_done.success = true;
|
||||
break;
|
||||
case HTT_MGMT_TX_STATUS_RETRY:
|
||||
tx_done.no_ack = true;
|
||||
@ -1976,7 +2024,6 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
|
||||
break;
|
||||
}
|
||||
case HTT_T2H_MSG_TYPE_TEST:
|
||||
/* FIX THIS */
|
||||
break;
|
||||
case HTT_T2H_MSG_TYPE_STATS_CONF:
|
||||
trace_ath10k_htt_stats(ar, skb->data, skb->len);
|
||||
@ -2018,11 +2065,8 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
|
||||
return;
|
||||
}
|
||||
case HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND:
|
||||
/* FIXME: This WMI-TLV event is overlapping with 10.2
|
||||
* CHAN_CHANGE - both being 0xF. Neither is being used in
|
||||
* practice so no immediate action is necessary. Nevertheless
|
||||
* HTT may need an abstraction layer like WMI has one day.
|
||||
*/
|
||||
break;
|
||||
case HTT_T2H_MSG_TYPE_CHAN_CHANGE:
|
||||
break;
|
||||
default:
|
||||
ath10k_warn(ar, "htt event (%d) not handled\n",
|
||||
|
@ -26,7 +26,7 @@ void __ath10k_htt_tx_dec_pending(struct ath10k_htt *htt)
|
||||
{
|
||||
htt->num_pending_tx--;
|
||||
if (htt->num_pending_tx == htt->max_num_pending_tx - 1)
|
||||
ieee80211_wake_queues(htt->ar->hw);
|
||||
ath10k_mac_tx_unlock(htt->ar, ATH10K_TX_PAUSE_Q_FULL);
|
||||
}
|
||||
|
||||
static void ath10k_htt_tx_dec_pending(struct ath10k_htt *htt)
|
||||
@ -49,7 +49,7 @@ static int ath10k_htt_tx_inc_pending(struct ath10k_htt *htt)
|
||||
|
||||
htt->num_pending_tx++;
|
||||
if (htt->num_pending_tx == htt->max_num_pending_tx)
|
||||
ieee80211_stop_queues(htt->ar->hw);
|
||||
ath10k_mac_tx_lock(htt->ar, ATH10K_TX_PAUSE_Q_FULL);
|
||||
|
||||
exit:
|
||||
spin_unlock_bh(&htt->tx_lock);
|
||||
@ -420,9 +420,8 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
|
||||
int res;
|
||||
u8 flags0 = 0;
|
||||
u16 msdu_id, flags1 = 0;
|
||||
dma_addr_t paddr;
|
||||
u32 frags_paddr;
|
||||
bool use_frags;
|
||||
dma_addr_t paddr = 0;
|
||||
u32 frags_paddr = 0;
|
||||
|
||||
res = ath10k_htt_tx_inc_pending(htt);
|
||||
if (res)
|
||||
@ -440,12 +439,6 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
|
||||
prefetch_len = min(htt->prefetch_len, msdu->len);
|
||||
prefetch_len = roundup(prefetch_len, 4);
|
||||
|
||||
/* Since HTT 3.0 there is no separate mgmt tx command. However in case
|
||||
* of mgmt tx using TX_FRM there is not tx fragment list. Instead of tx
|
||||
* fragment list host driver specifies directly frame pointer. */
|
||||
use_frags = htt->target_version_major < 3 ||
|
||||
!ieee80211_is_mgmt(hdr->frame_control);
|
||||
|
||||
skb_cb->htt.txbuf = dma_pool_alloc(htt->tx_pool, GFP_ATOMIC,
|
||||
&paddr);
|
||||
if (!skb_cb->htt.txbuf) {
|
||||
@ -466,7 +459,12 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
|
||||
if (res)
|
||||
goto err_free_txbuf;
|
||||
|
||||
if (likely(use_frags)) {
|
||||
switch (skb_cb->txmode) {
|
||||
case ATH10K_HW_TXRX_RAW:
|
||||
case ATH10K_HW_TXRX_NATIVE_WIFI:
|
||||
flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT;
|
||||
/* pass through */
|
||||
case ATH10K_HW_TXRX_ETHERNET:
|
||||
frags = skb_cb->htt.txbuf->frags;
|
||||
|
||||
frags[0].paddr = __cpu_to_le32(skb_cb->paddr);
|
||||
@ -474,15 +472,17 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
|
||||
frags[1].paddr = 0;
|
||||
frags[1].len = 0;
|
||||
|
||||
flags0 |= SM(ATH10K_HW_TXRX_NATIVE_WIFI,
|
||||
HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
|
||||
flags0 |= SM(skb_cb->txmode, HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
|
||||
|
||||
frags_paddr = skb_cb->htt.txbuf_paddr;
|
||||
} else {
|
||||
break;
|
||||
case ATH10K_HW_TXRX_MGMT:
|
||||
flags0 |= SM(ATH10K_HW_TXRX_MGMT,
|
||||
HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
|
||||
flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT;
|
||||
|
||||
frags_paddr = skb_cb->paddr;
|
||||
break;
|
||||
}
|
||||
|
||||
/* Normally all commands go through HTC which manages tx credits for
|
||||
@ -508,11 +508,9 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
|
||||
prefetch_len);
|
||||
skb_cb->htt.txbuf->htc_hdr.flags = 0;
|
||||
|
||||
if (!ieee80211_has_protected(hdr->frame_control))
|
||||
if (!skb_cb->is_protected)
|
||||
flags0 |= HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT;
|
||||
|
||||
flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT;
|
||||
|
||||
flags1 |= SM((u16)vdev_id, HTT_DATA_TX_DESC_FLAGS1_VDEV_ID);
|
||||
flags1 |= SM((u16)tid, HTT_DATA_TX_DESC_FLAGS1_EXT_TID);
|
||||
if (msdu->ip_summed == CHECKSUM_PARTIAL) {
|
||||
|
@ -78,6 +78,9 @@ enum qca6174_chip_id_rev {
|
||||
/* added support for ATH10K_FW_IE_WMI_OP_VERSION */
|
||||
#define ATH10K_FW_API4_FILE "firmware-4.bin"
|
||||
|
||||
/* HTT id conflict fix for management frames over HTT */
|
||||
#define ATH10K_FW_API5_FILE "firmware-5.bin"
|
||||
|
||||
#define ATH10K_FW_UTF_FILE "utf.bin"
|
||||
|
||||
/* includes also the null byte */
|
||||
@ -104,6 +107,11 @@ enum ath10k_fw_ie_type {
|
||||
* FW API 4 and above.
|
||||
*/
|
||||
ATH10K_FW_IE_WMI_OP_VERSION = 5,
|
||||
|
||||
/* HTT "operations" interface version, 32 bit value. Supported from
|
||||
* FW API 5 and above.
|
||||
*/
|
||||
ATH10K_FW_IE_HTT_OP_VERSION = 6,
|
||||
};
|
||||
|
||||
enum ath10k_fw_wmi_op_version {
|
||||
@ -119,6 +127,20 @@ enum ath10k_fw_wmi_op_version {
|
||||
ATH10K_FW_WMI_OP_VERSION_MAX,
|
||||
};
|
||||
|
||||
enum ath10k_fw_htt_op_version {
|
||||
ATH10K_FW_HTT_OP_VERSION_UNSET = 0,
|
||||
|
||||
ATH10K_FW_HTT_OP_VERSION_MAIN = 1,
|
||||
|
||||
/* also used in 10.2 and 10.2.4 branches */
|
||||
ATH10K_FW_HTT_OP_VERSION_10_1 = 2,
|
||||
|
||||
ATH10K_FW_HTT_OP_VERSION_TLV = 3,
|
||||
|
||||
/* keep last */
|
||||
ATH10K_FW_HTT_OP_VERSION_MAX,
|
||||
};
|
||||
|
||||
enum ath10k_hw_rev {
|
||||
ATH10K_HW_QCA988X,
|
||||
ATH10K_HW_QCA6174,
|
||||
@ -180,6 +202,27 @@ struct ath10k_pktlog_hdr {
|
||||
u8 payload[0];
|
||||
} __packed;
|
||||
|
||||
enum ath10k_hw_rate_ofdm {
|
||||
ATH10K_HW_RATE_OFDM_48M = 0,
|
||||
ATH10K_HW_RATE_OFDM_24M,
|
||||
ATH10K_HW_RATE_OFDM_12M,
|
||||
ATH10K_HW_RATE_OFDM_6M,
|
||||
ATH10K_HW_RATE_OFDM_54M,
|
||||
ATH10K_HW_RATE_OFDM_36M,
|
||||
ATH10K_HW_RATE_OFDM_18M,
|
||||
ATH10K_HW_RATE_OFDM_9M,
|
||||
};
|
||||
|
||||
enum ath10k_hw_rate_cck {
|
||||
ATH10K_HW_RATE_CCK_LP_11M = 0,
|
||||
ATH10K_HW_RATE_CCK_LP_5_5M,
|
||||
ATH10K_HW_RATE_CCK_LP_2M,
|
||||
ATH10K_HW_RATE_CCK_LP_1M,
|
||||
ATH10K_HW_RATE_CCK_SP_11M,
|
||||
ATH10K_HW_RATE_CCK_SP_5_5M,
|
||||
ATH10K_HW_RATE_CCK_SP_2M,
|
||||
};
|
||||
|
||||
/* Target specific defines for MAIN firmware */
|
||||
#define TARGET_NUM_VDEVS 8
|
||||
#define TARGET_NUM_PEER_AST 2
|
||||
@ -223,7 +266,7 @@ struct ath10k_pktlog_hdr {
|
||||
#define TARGET_10X_NUM_WDS_ENTRIES 32
|
||||
#define TARGET_10X_DMA_BURST_SIZE 0
|
||||
#define TARGET_10X_MAC_AGGR_DELIM 0
|
||||
#define TARGET_10X_AST_SKID_LIMIT 16
|
||||
#define TARGET_10X_AST_SKID_LIMIT 128
|
||||
#define TARGET_10X_NUM_STATIONS 128
|
||||
#define TARGET_10X_NUM_PEERS ((TARGET_10X_NUM_STATIONS) + \
|
||||
(TARGET_10X_NUM_VDEVS))
|
||||
@ -256,13 +299,13 @@ struct ath10k_pktlog_hdr {
|
||||
#define TARGET_10_2_DMA_BURST_SIZE 1
|
||||
|
||||
/* Target specific defines for WMI-TLV firmware */
|
||||
#define TARGET_TLV_NUM_VDEVS 3
|
||||
#define TARGET_TLV_NUM_VDEVS 4
|
||||
#define TARGET_TLV_NUM_STATIONS 32
|
||||
#define TARGET_TLV_NUM_PEERS ((TARGET_TLV_NUM_STATIONS) + \
|
||||
(TARGET_TLV_NUM_VDEVS) + \
|
||||
2)
|
||||
#define TARGET_TLV_NUM_PEERS 35
|
||||
#define TARGET_TLV_NUM_TDLS_VDEVS 1
|
||||
#define TARGET_TLV_NUM_TIDS ((TARGET_TLV_NUM_PEERS) * 2)
|
||||
#define TARGET_TLV_NUM_MSDU_DESC (1024 + 32)
|
||||
#define TARGET_TLV_NUM_WOW_PATTERNS 22
|
||||
|
||||
/* Number of Copy Engines supported */
|
||||
#define CE_COUNT 8
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -23,11 +23,22 @@
|
||||
|
||||
#define WEP_KEYID_SHIFT 6
|
||||
|
||||
enum wmi_tlv_tx_pause_id;
|
||||
enum wmi_tlv_tx_pause_action;
|
||||
|
||||
struct ath10k_generic_iter {
|
||||
struct ath10k *ar;
|
||||
int ret;
|
||||
};
|
||||
|
||||
struct rfc1042_hdr {
|
||||
u8 llc_dsap;
|
||||
u8 llc_ssap;
|
||||
u8 llc_ctrl;
|
||||
u8 snap_oui[3];
|
||||
__be16 snap_type;
|
||||
} __packed;
|
||||
|
||||
struct ath10k *ath10k_mac_create(size_t priv_size);
|
||||
void ath10k_mac_destroy(struct ath10k *ar);
|
||||
int ath10k_mac_register(struct ath10k *ar);
|
||||
@ -45,6 +56,24 @@ void ath10k_mac_vif_beacon_free(struct ath10k_vif *arvif);
|
||||
void ath10k_drain_tx(struct ath10k *ar);
|
||||
bool ath10k_mac_is_peer_wep_key_set(struct ath10k *ar, const u8 *addr,
|
||||
u8 keyidx);
|
||||
int ath10k_mac_vif_chan(struct ieee80211_vif *vif,
|
||||
struct cfg80211_chan_def *def);
|
||||
|
||||
void ath10k_mac_handle_beacon(struct ath10k *ar, struct sk_buff *skb);
|
||||
void ath10k_mac_handle_beacon_miss(struct ath10k *ar, u32 vdev_id);
|
||||
void ath10k_mac_handle_tx_pause(struct ath10k *ar, u32 vdev_id,
|
||||
enum wmi_tlv_tx_pause_id pause_id,
|
||||
enum wmi_tlv_tx_pause_action action);
|
||||
|
||||
u8 ath10k_mac_hw_rate_to_idx(const struct ieee80211_supported_band *sband,
|
||||
u8 hw_rate);
|
||||
u8 ath10k_mac_bitrate_to_idx(const struct ieee80211_supported_band *sband,
|
||||
u32 bitrate);
|
||||
|
||||
void ath10k_mac_tx_lock(struct ath10k *ar, int reason);
|
||||
void ath10k_mac_tx_unlock(struct ath10k *ar, int reason);
|
||||
void ath10k_mac_vif_tx_lock(struct ath10k_vif *arvif, int reason);
|
||||
void ath10k_mac_vif_tx_unlock(struct ath10k_vif *arvif, int reason);
|
||||
|
||||
static inline struct ath10k_vif *ath10k_vif_to_arvif(struct ieee80211_vif *vif)
|
||||
{
|
||||
|
156
drivers/net/wireless/ath/ath10k/p2p.c
Normal file
156
drivers/net/wireless/ath/ath10k/p2p.c
Normal file
@ -0,0 +1,156 @@
|
||||
/*
|
||||
* Copyright (c) 2015 Qualcomm Atheros, Inc.
|
||||
*
|
||||
* Permission to use, copy, modify, and/or distribute this software for any
|
||||
* purpose with or without fee is hereby granted, provided that the above
|
||||
* copyright notice and this permission notice appear in all copies.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
|
||||
#include "core.h"
|
||||
#include "wmi.h"
|
||||
#include "mac.h"
|
||||
#include "p2p.h"
|
||||
|
||||
static void ath10k_p2p_noa_ie_fill(u8 *data, size_t len,
|
||||
const struct wmi_p2p_noa_info *noa)
|
||||
{
|
||||
struct ieee80211_p2p_noa_attr *noa_attr;
|
||||
u8 ctwindow_oppps = noa->ctwindow_oppps;
|
||||
u8 ctwindow = ctwindow_oppps >> WMI_P2P_OPPPS_CTWINDOW_OFFSET;
|
||||
bool oppps = !!(ctwindow_oppps & WMI_P2P_OPPPS_ENABLE_BIT);
|
||||
__le16 *noa_attr_len;
|
||||
u16 attr_len;
|
||||
u8 noa_descriptors = noa->num_descriptors;
|
||||
int i;
|
||||
|
||||
/* P2P IE */
|
||||
data[0] = WLAN_EID_VENDOR_SPECIFIC;
|
||||
data[1] = len - 2;
|
||||
data[2] = (WLAN_OUI_WFA >> 16) & 0xff;
|
||||
data[3] = (WLAN_OUI_WFA >> 8) & 0xff;
|
||||
data[4] = (WLAN_OUI_WFA >> 0) & 0xff;
|
||||
data[5] = WLAN_OUI_TYPE_WFA_P2P;
|
||||
|
||||
/* NOA ATTR */
|
||||
data[6] = IEEE80211_P2P_ATTR_ABSENCE_NOTICE;
|
||||
noa_attr_len = (__le16 *)&data[7]; /* 2 bytes */
|
||||
noa_attr = (struct ieee80211_p2p_noa_attr *)&data[9];
|
||||
|
||||
noa_attr->index = noa->index;
|
||||
noa_attr->oppps_ctwindow = ctwindow;
|
||||
if (oppps)
|
||||
noa_attr->oppps_ctwindow |= IEEE80211_P2P_OPPPS_ENABLE_BIT;
|
||||
|
||||
for (i = 0; i < noa_descriptors; i++) {
|
||||
noa_attr->desc[i].count =
|
||||
__le32_to_cpu(noa->descriptors[i].type_count);
|
||||
noa_attr->desc[i].duration = noa->descriptors[i].duration;
|
||||
noa_attr->desc[i].interval = noa->descriptors[i].interval;
|
||||
noa_attr->desc[i].start_time = noa->descriptors[i].start_time;
|
||||
}
|
||||
|
||||
attr_len = 2; /* index + oppps_ctwindow */
|
||||
attr_len += noa_descriptors * sizeof(struct ieee80211_p2p_noa_desc);
|
||||
*noa_attr_len = __cpu_to_le16(attr_len);
|
||||
}
|
||||
|
||||
static size_t ath10k_p2p_noa_ie_len_compute(const struct wmi_p2p_noa_info *noa)
|
||||
{
|
||||
size_t len = 0;
|
||||
|
||||
if (!noa->num_descriptors &&
|
||||
!(noa->ctwindow_oppps & WMI_P2P_OPPPS_ENABLE_BIT))
|
||||
return 0;
|
||||
|
||||
len += 1 + 1 + 4; /* EID + len + OUI */
|
||||
len += 1 + 2; /* noa attr + attr len */
|
||||
len += 1 + 1; /* index + oppps_ctwindow */
|
||||
len += noa->num_descriptors * sizeof(struct ieee80211_p2p_noa_desc);
|
||||
|
||||
return len;
|
||||
}
|
||||
|
||||
static void ath10k_p2p_noa_ie_assign(struct ath10k_vif *arvif, void *ie,
|
||||
size_t len)
|
||||
{
|
||||
struct ath10k *ar = arvif->ar;
|
||||
|
||||
lockdep_assert_held(&ar->data_lock);
|
||||
|
||||
kfree(arvif->u.ap.noa_data);
|
||||
|
||||
arvif->u.ap.noa_data = ie;
|
||||
arvif->u.ap.noa_len = len;
|
||||
}
|
||||
|
||||
static void __ath10k_p2p_noa_update(struct ath10k_vif *arvif,
|
||||
const struct wmi_p2p_noa_info *noa)
|
||||
{
|
||||
struct ath10k *ar = arvif->ar;
|
||||
void *ie;
|
||||
size_t len;
|
||||
|
||||
lockdep_assert_held(&ar->data_lock);
|
||||
|
||||
ath10k_p2p_noa_ie_assign(arvif, NULL, 0);
|
||||
|
||||
len = ath10k_p2p_noa_ie_len_compute(noa);
|
||||
if (!len)
|
||||
return;
|
||||
|
||||
ie = kmalloc(len, GFP_ATOMIC);
|
||||
if (!ie)
|
||||
return;
|
||||
|
||||
ath10k_p2p_noa_ie_fill(ie, len, noa);
|
||||
ath10k_p2p_noa_ie_assign(arvif, ie, len);
|
||||
}
|
||||
|
||||
void ath10k_p2p_noa_update(struct ath10k_vif *arvif,
|
||||
const struct wmi_p2p_noa_info *noa)
|
||||
{
|
||||
struct ath10k *ar = arvif->ar;
|
||||
|
||||
spin_lock_bh(&ar->data_lock);
|
||||
__ath10k_p2p_noa_update(arvif, noa);
|
||||
spin_unlock_bh(&ar->data_lock);
|
||||
}
|
||||
|
||||
struct ath10k_p2p_noa_arg {
|
||||
u32 vdev_id;
|
||||
const struct wmi_p2p_noa_info *noa;
|
||||
};
|
||||
|
||||
static void ath10k_p2p_noa_update_vdev_iter(void *data, u8 *mac,
|
||||
struct ieee80211_vif *vif)
|
||||
{
|
||||
struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
|
||||
struct ath10k_p2p_noa_arg *arg = data;
|
||||
|
||||
if (arvif->vdev_id != arg->vdev_id)
|
||||
return;
|
||||
|
||||
ath10k_p2p_noa_update(arvif, arg->noa);
|
||||
}
|
||||
|
||||
void ath10k_p2p_noa_update_by_vdev_id(struct ath10k *ar, u32 vdev_id,
|
||||
const struct wmi_p2p_noa_info *noa)
|
||||
{
|
||||
struct ath10k_p2p_noa_arg arg = {
|
||||
.vdev_id = vdev_id,
|
||||
.noa = noa,
|
||||
};
|
||||
|
||||
ieee80211_iterate_active_interfaces_atomic(ar->hw,
|
||||
IEEE80211_IFACE_ITER_NORMAL,
|
||||
ath10k_p2p_noa_update_vdev_iter,
|
||||
&arg);
|
||||
}
|
28
drivers/net/wireless/ath/ath10k/p2p.h
Normal file
28
drivers/net/wireless/ath/ath10k/p2p.h
Normal file
@ -0,0 +1,28 @@
|
||||
/*
|
||||
* Copyright (c) 2015 Qualcomm Atheros, Inc.
|
||||
*
|
||||
* Permission to use, copy, modify, and/or distribute this software for any
|
||||
* purpose with or without fee is hereby granted, provided that the above
|
||||
* copyright notice and this permission notice appear in all copies.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
|
||||
#ifndef _P2P_H
|
||||
#define _P2P_H
|
||||
|
||||
struct ath10k_vif;
|
||||
struct wmi_p2p_noa_info;
|
||||
|
||||
void ath10k_p2p_noa_update(struct ath10k_vif *arvif,
|
||||
const struct wmi_p2p_noa_info *noa);
|
||||
void ath10k_p2p_noa_update_by_vdev_id(struct ath10k *ar, u32 vdev_id,
|
||||
const struct wmi_p2p_noa_info *noa);
|
||||
|
||||
#endif
|
@ -113,7 +113,7 @@ static const struct ce_attr host_ce_config_wlan[] = {
|
||||
.flags = CE_ATTR_FLAGS,
|
||||
.src_nentries = 0,
|
||||
.src_sz_max = 2048,
|
||||
.dest_nentries = 32,
|
||||
.dest_nentries = 128,
|
||||
},
|
||||
|
||||
/* CE3: host->target WMI */
|
||||
@ -183,7 +183,7 @@ static const struct ce_pipe_config target_ce_config_wlan[] = {
|
||||
{
|
||||
.pipenum = __cpu_to_le32(2),
|
||||
.pipedir = __cpu_to_le32(PIPEDIR_IN),
|
||||
.nentries = __cpu_to_le32(32),
|
||||
.nentries = __cpu_to_le32(64),
|
||||
.nbytes_max = __cpu_to_le32(2048),
|
||||
.flags = __cpu_to_le32(CE_ATTR_FLAGS),
|
||||
.reserved = __cpu_to_le32(0),
|
||||
@ -819,6 +819,21 @@ static int ath10k_pci_wake_wait(struct ath10k *ar)
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
|
||||
/* The rule is host is forbidden from accessing device registers while it's
|
||||
* asleep. Currently ath10k_pci_wake() and ath10k_pci_sleep() calls aren't
|
||||
* balanced and the device is kept awake all the time. This is intended for a
|
||||
* simpler solution for the following problems:
|
||||
*
|
||||
* * device can enter sleep during s2ram without the host knowing,
|
||||
*
|
||||
* * irq handlers access registers which is a problem if other device asserts
|
||||
* a shared irq line when ath10k is between hif_power_down() and
|
||||
* hif_power_up().
|
||||
*
|
||||
* FIXME: If power consumption is a concern (and there are *real* gains) then a
|
||||
* refcounted wake/sleep needs to be implemented.
|
||||
*/
|
||||
|
||||
static int ath10k_pci_wake(struct ath10k *ar)
|
||||
{
|
||||
ath10k_pci_reg_write32(ar, PCIE_SOC_WAKE_ADDRESS,
|
||||
@ -1524,12 +1539,11 @@ static int ath10k_pci_get_num_banks(struct ath10k *ar)
|
||||
switch (MS(ar->chip_id, SOC_CHIP_ID_REV)) {
|
||||
case QCA6174_HW_1_0_CHIP_ID_REV:
|
||||
case QCA6174_HW_1_1_CHIP_ID_REV:
|
||||
case QCA6174_HW_2_1_CHIP_ID_REV:
|
||||
case QCA6174_HW_2_2_CHIP_ID_REV:
|
||||
return 3;
|
||||
case QCA6174_HW_1_3_CHIP_ID_REV:
|
||||
return 2;
|
||||
case QCA6174_HW_2_1_CHIP_ID_REV:
|
||||
case QCA6174_HW_2_2_CHIP_ID_REV:
|
||||
return 6;
|
||||
case QCA6174_HW_3_0_CHIP_ID_REV:
|
||||
case QCA6174_HW_3_1_CHIP_ID_REV:
|
||||
case QCA6174_HW_3_2_CHIP_ID_REV:
|
||||
@ -2034,28 +2048,13 @@ static void ath10k_pci_hif_power_down(struct ath10k *ar)
|
||||
/* Currently hif_power_up performs effectively a reset and hif_stop
|
||||
* resets the chip as well so there's no point in resetting here.
|
||||
*/
|
||||
|
||||
ath10k_pci_sleep(ar);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PM
|
||||
|
||||
#define ATH10K_PCI_PM_CONTROL 0x44
|
||||
|
||||
static int ath10k_pci_hif_suspend(struct ath10k *ar)
|
||||
{
|
||||
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
|
||||
struct pci_dev *pdev = ar_pci->pdev;
|
||||
u32 val;
|
||||
|
||||
pci_read_config_dword(pdev, ATH10K_PCI_PM_CONTROL, &val);
|
||||
|
||||
if ((val & 0x000000ff) != 0x3) {
|
||||
pci_save_state(pdev);
|
||||
pci_disable_device(pdev);
|
||||
pci_write_config_dword(pdev, ATH10K_PCI_PM_CONTROL,
|
||||
(val & 0xffffff00) | 0x03);
|
||||
}
|
||||
ath10k_pci_sleep(ar);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -2065,25 +2064,24 @@ static int ath10k_pci_hif_resume(struct ath10k *ar)
|
||||
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
|
||||
struct pci_dev *pdev = ar_pci->pdev;
|
||||
u32 val;
|
||||
int ret;
|
||||
|
||||
pci_read_config_dword(pdev, ATH10K_PCI_PM_CONTROL, &val);
|
||||
|
||||
if ((val & 0x000000ff) != 0) {
|
||||
pci_restore_state(pdev);
|
||||
pci_write_config_dword(pdev, ATH10K_PCI_PM_CONTROL,
|
||||
val & 0xffffff00);
|
||||
/*
|
||||
* Suspend/Resume resets the PCI configuration space,
|
||||
* so we have to re-disable the RETRY_TIMEOUT register (0x41)
|
||||
* to keep PCI Tx retries from interfering with C3 CPU state
|
||||
*/
|
||||
pci_read_config_dword(pdev, 0x40, &val);
|
||||
|
||||
if ((val & 0x0000ff00) != 0)
|
||||
pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
|
||||
ret = ath10k_pci_wake(ar);
|
||||
if (ret) {
|
||||
ath10k_err(ar, "failed to wake device up on resume: %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
/* Suspend/Resume resets the PCI configuration space, so we have to
|
||||
* re-disable the RETRY_TIMEOUT register (0x41) to keep PCI Tx retries
|
||||
* from interfering with C3 CPU state. pci_restore_state won't help
|
||||
* here since it only restores the first 64 bytes pci config header.
|
||||
*/
|
||||
pci_read_config_dword(pdev, 0x40, &val);
|
||||
if ((val & 0x0000ff00) != 0)
|
||||
pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
|
||||
|
||||
return ret;
|
||||
}
|
||||
#endif
|
||||
|
||||
@ -2177,6 +2175,13 @@ static irqreturn_t ath10k_pci_interrupt_handler(int irq, void *arg)
|
||||
{
|
||||
struct ath10k *ar = arg;
|
||||
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
|
||||
int ret;
|
||||
|
||||
ret = ath10k_pci_wake(ar);
|
||||
if (ret) {
|
||||
ath10k_warn(ar, "failed to wake device up on irq: %d\n", ret);
|
||||
return IRQ_NONE;
|
||||
}
|
||||
|
||||
if (ar_pci->num_msi_intrs == 0) {
|
||||
if (!ath10k_pci_irq_pending(ar))
|
||||
@ -2621,6 +2626,12 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
|
||||
ar_pci->dev = &pdev->dev;
|
||||
ar_pci->ar = ar;
|
||||
|
||||
if (pdev->subsystem_vendor || pdev->subsystem_device)
|
||||
scnprintf(ar->spec_board_id, sizeof(ar->spec_board_id),
|
||||
"%04x:%04x:%04x:%04x",
|
||||
pdev->vendor, pdev->device,
|
||||
pdev->subsystem_vendor, pdev->subsystem_device);
|
||||
|
||||
spin_lock_init(&ar_pci->ce_lock);
|
||||
setup_timer(&ar_pci->rx_post_retry, ath10k_pci_rx_replenish_retry,
|
||||
(unsigned long)ar);
|
||||
@ -2678,11 +2689,9 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
|
||||
if (!ath10k_pci_chip_is_supported(pdev->device, chip_id)) {
|
||||
ath10k_err(ar, "device %04x with chip_id %08x isn't supported\n",
|
||||
pdev->device, chip_id);
|
||||
goto err_sleep;
|
||||
goto err_free_irq;
|
||||
}
|
||||
|
||||
ath10k_pci_sleep(ar);
|
||||
|
||||
ret = ath10k_core_register(ar, chip_id);
|
||||
if (ret) {
|
||||
ath10k_err(ar, "failed to register driver core: %d\n", ret);
|
||||
@ -2770,7 +2779,19 @@ module_exit(ath10k_pci_exit);
|
||||
MODULE_AUTHOR("Qualcomm Atheros");
|
||||
MODULE_DESCRIPTION("Driver support for Atheros QCA988X PCIe devices");
|
||||
MODULE_LICENSE("Dual BSD/GPL");
|
||||
|
||||
/* QCA988x 2.0 firmware files */
|
||||
MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_FW_FILE);
|
||||
MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API2_FILE);
|
||||
MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API3_FILE);
|
||||
MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API4_FILE);
|
||||
MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API5_FILE);
|
||||
MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_BOARD_DATA_FILE);
|
||||
|
||||
/* QCA6174 2.1 firmware files */
|
||||
MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" ATH10K_FW_API4_FILE);
|
||||
MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" QCA6174_HW_2_1_BOARD_DATA_FILE);
|
||||
|
||||
/* QCA6174 3.1 firmware files */
|
||||
MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" ATH10K_FW_API4_FILE);
|
||||
MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" QCA6174_HW_3_0_BOARD_DATA_FILE);
|
||||
|
@ -661,6 +661,28 @@ struct rx_msdu_end {
|
||||
#define RX_PPDU_START_INFO5_SERVICE_MASK 0x0000ffff
|
||||
#define RX_PPDU_START_INFO5_SERVICE_LSB 0
|
||||
|
||||
/* No idea what this flag means. It seems to be always set in rate. */
|
||||
#define RX_PPDU_START_RATE_FLAG BIT(3)
|
||||
|
||||
enum rx_ppdu_start_rate {
|
||||
RX_PPDU_START_RATE_OFDM_48M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_OFDM_48M,
|
||||
RX_PPDU_START_RATE_OFDM_24M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_OFDM_24M,
|
||||
RX_PPDU_START_RATE_OFDM_12M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_OFDM_12M,
|
||||
RX_PPDU_START_RATE_OFDM_6M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_OFDM_6M,
|
||||
RX_PPDU_START_RATE_OFDM_54M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_OFDM_54M,
|
||||
RX_PPDU_START_RATE_OFDM_36M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_OFDM_36M,
|
||||
RX_PPDU_START_RATE_OFDM_18M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_OFDM_18M,
|
||||
RX_PPDU_START_RATE_OFDM_9M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_OFDM_9M,
|
||||
|
||||
RX_PPDU_START_RATE_CCK_LP_11M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_CCK_LP_11M,
|
||||
RX_PPDU_START_RATE_CCK_LP_5_5M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_CCK_LP_5_5M,
|
||||
RX_PPDU_START_RATE_CCK_LP_2M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_CCK_LP_2M,
|
||||
RX_PPDU_START_RATE_CCK_LP_1M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_CCK_LP_1M,
|
||||
RX_PPDU_START_RATE_CCK_SP_11M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_CCK_SP_11M,
|
||||
RX_PPDU_START_RATE_CCK_SP_5_5M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_CCK_SP_5_5M,
|
||||
RX_PPDU_START_RATE_CCK_SP_2M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_CCK_SP_2M,
|
||||
};
|
||||
|
||||
struct rx_ppdu_start {
|
||||
struct {
|
||||
u8 pri20_mhz;
|
||||
|
@ -23,102 +23,50 @@
|
||||
#include "debug.h"
|
||||
#include "wmi-ops.h"
|
||||
|
||||
static int ath10k_thermal_get_active_vifs(struct ath10k *ar,
|
||||
enum wmi_vdev_type type)
|
||||
static int
|
||||
ath10k_thermal_get_max_throttle_state(struct thermal_cooling_device *cdev,
|
||||
unsigned long *state)
|
||||
{
|
||||
struct ath10k_vif *arvif;
|
||||
int count = 0;
|
||||
|
||||
lockdep_assert_held(&ar->conf_mutex);
|
||||
|
||||
list_for_each_entry(arvif, &ar->arvifs, list) {
|
||||
if (!arvif->is_started)
|
||||
continue;
|
||||
|
||||
if (!arvif->is_up)
|
||||
continue;
|
||||
|
||||
if (arvif->vdev_type != type)
|
||||
continue;
|
||||
|
||||
count++;
|
||||
}
|
||||
return count;
|
||||
}
|
||||
|
||||
static int ath10k_thermal_get_max_dutycycle(struct thermal_cooling_device *cdev,
|
||||
unsigned long *state)
|
||||
{
|
||||
*state = ATH10K_QUIET_DUTY_CYCLE_MAX;
|
||||
*state = ATH10K_THERMAL_THROTTLE_MAX;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ath10k_thermal_get_cur_dutycycle(struct thermal_cooling_device *cdev,
|
||||
unsigned long *state)
|
||||
static int
|
||||
ath10k_thermal_get_cur_throttle_state(struct thermal_cooling_device *cdev,
|
||||
unsigned long *state)
|
||||
{
|
||||
struct ath10k *ar = cdev->devdata;
|
||||
|
||||
mutex_lock(&ar->conf_mutex);
|
||||
*state = ar->thermal.duty_cycle;
|
||||
*state = ar->thermal.throttle_state;
|
||||
mutex_unlock(&ar->conf_mutex);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ath10k_thermal_set_cur_dutycycle(struct thermal_cooling_device *cdev,
|
||||
unsigned long duty_cycle)
|
||||
static int
|
||||
ath10k_thermal_set_cur_throttle_state(struct thermal_cooling_device *cdev,
|
||||
unsigned long throttle_state)
|
||||
{
|
||||
struct ath10k *ar = cdev->devdata;
|
||||
u32 period, duration, enabled;
|
||||
int num_bss, ret = 0;
|
||||
|
||||
if (throttle_state > ATH10K_THERMAL_THROTTLE_MAX) {
|
||||
ath10k_warn(ar, "throttle state %ld is exceeding the limit %d\n",
|
||||
throttle_state, ATH10K_THERMAL_THROTTLE_MAX);
|
||||
return -EINVAL;
|
||||
}
|
||||
mutex_lock(&ar->conf_mutex);
|
||||
if (ar->state != ATH10K_STATE_ON) {
|
||||
ret = -ENETDOWN;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (duty_cycle > ATH10K_QUIET_DUTY_CYCLE_MAX) {
|
||||
ath10k_warn(ar, "duty cycle %ld is exceeding the limit %d\n",
|
||||
duty_cycle, ATH10K_QUIET_DUTY_CYCLE_MAX);
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
/* TODO: Right now, thermal mitigation is handled only for single/multi
|
||||
* vif AP mode. Since quiet param is not validated in STA mode, it needs
|
||||
* to be investigated further to handle multi STA and multi-vif (AP+STA)
|
||||
* mode properly.
|
||||
*/
|
||||
num_bss = ath10k_thermal_get_active_vifs(ar, WMI_VDEV_TYPE_AP);
|
||||
if (!num_bss) {
|
||||
ath10k_warn(ar, "no active AP interfaces\n");
|
||||
ret = -ENETDOWN;
|
||||
goto out;
|
||||
}
|
||||
period = max(ATH10K_QUIET_PERIOD_MIN,
|
||||
(ATH10K_QUIET_PERIOD_DEFAULT / num_bss));
|
||||
duration = (period * duty_cycle) / 100;
|
||||
enabled = duration ? 1 : 0;
|
||||
|
||||
ret = ath10k_wmi_pdev_set_quiet_mode(ar, period, duration,
|
||||
ATH10K_QUIET_START_OFFSET,
|
||||
enabled);
|
||||
if (ret) {
|
||||
ath10k_warn(ar, "failed to set quiet mode period %u duarion %u enabled %u ret %d\n",
|
||||
period, duration, enabled, ret);
|
||||
goto out;
|
||||
}
|
||||
ar->thermal.duty_cycle = duty_cycle;
|
||||
out:
|
||||
ar->thermal.throttle_state = throttle_state;
|
||||
ath10k_thermal_set_throttling(ar);
|
||||
mutex_unlock(&ar->conf_mutex);
|
||||
return ret;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct thermal_cooling_device_ops ath10k_thermal_ops = {
|
||||
.get_max_state = ath10k_thermal_get_max_dutycycle,
|
||||
.get_cur_state = ath10k_thermal_get_cur_dutycycle,
|
||||
.set_cur_state = ath10k_thermal_set_cur_dutycycle,
|
||||
.get_max_state = ath10k_thermal_get_max_throttle_state,
|
||||
.get_cur_state = ath10k_thermal_get_cur_throttle_state,
|
||||
.set_cur_state = ath10k_thermal_set_cur_throttle_state,
|
||||
};
|
||||
|
||||
static ssize_t ath10k_thermal_show_temp(struct device *dev,
|
||||
@ -127,6 +75,7 @@ static ssize_t ath10k_thermal_show_temp(struct device *dev,
|
||||
{
|
||||
struct ath10k *ar = dev_get_drvdata(dev);
|
||||
int ret, temperature;
|
||||
unsigned long time_left;
|
||||
|
||||
mutex_lock(&ar->conf_mutex);
|
||||
|
||||
@ -148,9 +97,9 @@ static ssize_t ath10k_thermal_show_temp(struct device *dev,
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = wait_for_completion_timeout(&ar->thermal.wmi_sync,
|
||||
ATH10K_THERMAL_SYNC_TIMEOUT_HZ);
|
||||
if (ret == 0) {
|
||||
time_left = wait_for_completion_timeout(&ar->thermal.wmi_sync,
|
||||
ATH10K_THERMAL_SYNC_TIMEOUT_HZ);
|
||||
if (!time_left) {
|
||||
ath10k_warn(ar, "failed to synchronize thermal read\n");
|
||||
ret = -ETIMEDOUT;
|
||||
goto out;
|
||||
@ -184,6 +133,32 @@ static struct attribute *ath10k_hwmon_attrs[] = {
|
||||
};
|
||||
ATTRIBUTE_GROUPS(ath10k_hwmon);
|
||||
|
||||
void ath10k_thermal_set_throttling(struct ath10k *ar)
|
||||
{
|
||||
u32 period, duration, enabled;
|
||||
int ret;
|
||||
|
||||
lockdep_assert_held(&ar->conf_mutex);
|
||||
|
||||
if (!ar->wmi.ops->gen_pdev_set_quiet_mode)
|
||||
return;
|
||||
|
||||
if (ar->state != ATH10K_STATE_ON)
|
||||
return;
|
||||
|
||||
period = ar->thermal.quiet_period;
|
||||
duration = (period * ar->thermal.throttle_state) / 100;
|
||||
enabled = duration ? 1 : 0;
|
||||
|
||||
ret = ath10k_wmi_pdev_set_quiet_mode(ar, period, duration,
|
||||
ATH10K_QUIET_START_OFFSET,
|
||||
enabled);
|
||||
if (ret) {
|
||||
ath10k_warn(ar, "failed to set quiet mode period %u duarion %u enabled %u ret %d\n",
|
||||
period, duration, enabled, ret);
|
||||
}
|
||||
}
|
||||
|
||||
int ath10k_thermal_register(struct ath10k *ar)
|
||||
{
|
||||
struct thermal_cooling_device *cdev;
|
||||
@ -202,11 +177,12 @@ int ath10k_thermal_register(struct ath10k *ar)
|
||||
ret = sysfs_create_link(&ar->dev->kobj, &cdev->device.kobj,
|
||||
"cooling_device");
|
||||
if (ret) {
|
||||
ath10k_err(ar, "failed to create thermal symlink\n");
|
||||
ath10k_err(ar, "failed to create cooling device symlink\n");
|
||||
goto err_cooling_destroy;
|
||||
}
|
||||
|
||||
ar->thermal.cdev = cdev;
|
||||
ar->thermal.quiet_period = ATH10K_QUIET_PERIOD_DEFAULT;
|
||||
|
||||
/* Do not register hwmon device when temperature reading is not
|
||||
* supported by firmware
|
||||
@ -231,7 +207,7 @@ int ath10k_thermal_register(struct ath10k *ar)
|
||||
return 0;
|
||||
|
||||
err_remove_link:
|
||||
sysfs_remove_link(&ar->dev->kobj, "thermal_sensor");
|
||||
sysfs_remove_link(&ar->dev->kobj, "cooling_device");
|
||||
err_cooling_destroy:
|
||||
thermal_cooling_device_unregister(cdev);
|
||||
return ret;
|
||||
|
@ -19,16 +19,17 @@
|
||||
#define ATH10K_QUIET_PERIOD_DEFAULT 100
|
||||
#define ATH10K_QUIET_PERIOD_MIN 25
|
||||
#define ATH10K_QUIET_START_OFFSET 10
|
||||
#define ATH10K_QUIET_DUTY_CYCLE_MAX 70
|
||||
#define ATH10K_HWMON_NAME_LEN 15
|
||||
#define ATH10K_THERMAL_SYNC_TIMEOUT_HZ (5*HZ)
|
||||
#define ATH10K_THERMAL_THROTTLE_MAX 100
|
||||
|
||||
struct ath10k_thermal {
|
||||
struct thermal_cooling_device *cdev;
|
||||
struct completion wmi_sync;
|
||||
|
||||
/* protected by conf_mutex */
|
||||
u32 duty_cycle;
|
||||
u32 throttle_state;
|
||||
u32 quiet_period;
|
||||
/* temperature value in Celcius degree
|
||||
* protected by data_lock
|
||||
*/
|
||||
@ -39,6 +40,7 @@ struct ath10k_thermal {
|
||||
int ath10k_thermal_register(struct ath10k *ar);
|
||||
void ath10k_thermal_unregister(struct ath10k *ar);
|
||||
void ath10k_thermal_event_temperature(struct ath10k *ar, int temperature);
|
||||
void ath10k_thermal_set_throttling(struct ath10k *ar);
|
||||
#else
|
||||
static inline int ath10k_thermal_register(struct ath10k *ar)
|
||||
{
|
||||
@ -54,5 +56,9 @@ static inline void ath10k_thermal_event_temperature(struct ath10k *ar,
|
||||
{
|
||||
}
|
||||
|
||||
static inline void ath10k_thermal_set_throttling(struct ath10k *ar)
|
||||
{
|
||||
}
|
||||
|
||||
#endif
|
||||
#endif /* _THERMAL_ */
|
||||
|
@ -21,11 +21,16 @@
|
||||
#include "core.h"
|
||||
|
||||
#if !defined(_TRACE_H_)
|
||||
static inline u32 ath10k_frm_hdr_len(const void *buf)
|
||||
static inline u32 ath10k_frm_hdr_len(const void *buf, size_t len)
|
||||
{
|
||||
const struct ieee80211_hdr *hdr = buf;
|
||||
|
||||
return ieee80211_hdrlen(hdr->frame_control);
|
||||
/* In some rare cases (e.g. fcs error) device reports frame buffer
|
||||
* shorter than what frame header implies (e.g. len = 0). The buffer
|
||||
* can still be accessed so do a simple min() to guarantee caller
|
||||
* doesn't get value greater than len.
|
||||
*/
|
||||
return min_t(u32, len, ieee80211_hdrlen(hdr->frame_control));
|
||||
}
|
||||
#endif
|
||||
|
||||
@ -46,7 +51,7 @@ static inline void trace_ ## name(proto) {}
|
||||
#undef TRACE_SYSTEM
|
||||
#define TRACE_SYSTEM ath10k
|
||||
|
||||
#define ATH10K_MSG_MAX 200
|
||||
#define ATH10K_MSG_MAX 400
|
||||
|
||||
DECLARE_EVENT_CLASS(ath10k_log_event,
|
||||
TP_PROTO(struct ath10k *ar, struct va_format *vaf),
|
||||
@ -360,13 +365,13 @@ DECLARE_EVENT_CLASS(ath10k_hdr_event,
|
||||
__string(device, dev_name(ar->dev))
|
||||
__string(driver, dev_driver_string(ar->dev))
|
||||
__field(size_t, len)
|
||||
__dynamic_array(u8, data, ath10k_frm_hdr_len(data))
|
||||
__dynamic_array(u8, data, ath10k_frm_hdr_len(data, len))
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__assign_str(device, dev_name(ar->dev));
|
||||
__assign_str(driver, dev_driver_string(ar->dev));
|
||||
__entry->len = ath10k_frm_hdr_len(data);
|
||||
__entry->len = ath10k_frm_hdr_len(data, len);
|
||||
memcpy(__get_dynamic_array(data), data, __entry->len);
|
||||
),
|
||||
|
||||
@ -387,15 +392,16 @@ DECLARE_EVENT_CLASS(ath10k_payload_event,
|
||||
__string(device, dev_name(ar->dev))
|
||||
__string(driver, dev_driver_string(ar->dev))
|
||||
__field(size_t, len)
|
||||
__dynamic_array(u8, payload, (len - ath10k_frm_hdr_len(data)))
|
||||
__dynamic_array(u8, payload, (len -
|
||||
ath10k_frm_hdr_len(data, len)))
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__assign_str(device, dev_name(ar->dev));
|
||||
__assign_str(driver, dev_driver_string(ar->dev));
|
||||
__entry->len = len - ath10k_frm_hdr_len(data);
|
||||
__entry->len = len - ath10k_frm_hdr_len(data, len);
|
||||
memcpy(__get_dynamic_array(payload),
|
||||
data + ath10k_frm_hdr_len(data), __entry->len);
|
||||
data + ath10k_frm_hdr_len(data, len), __entry->len);
|
||||
),
|
||||
|
||||
TP_printk(
|
||||
|
@ -55,8 +55,10 @@ void ath10k_txrx_tx_unref(struct ath10k_htt *htt,
|
||||
|
||||
lockdep_assert_held(&htt->tx_lock);
|
||||
|
||||
ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx completion msdu_id %u discard %d no_ack %d\n",
|
||||
tx_done->msdu_id, !!tx_done->discard, !!tx_done->no_ack);
|
||||
ath10k_dbg(ar, ATH10K_DBG_HTT,
|
||||
"htt tx completion msdu_id %u discard %d no_ack %d success %d\n",
|
||||
tx_done->msdu_id, !!tx_done->discard,
|
||||
!!tx_done->no_ack, !!tx_done->success);
|
||||
|
||||
if (tx_done->msdu_id >= htt->max_num_pending_tx) {
|
||||
ath10k_warn(ar, "warning: msdu_id %d too big, ignoring\n",
|
||||
@ -97,6 +99,9 @@ void ath10k_txrx_tx_unref(struct ath10k_htt *htt,
|
||||
if (tx_done->no_ack)
|
||||
info->flags &= ~IEEE80211_TX_STAT_ACK;
|
||||
|
||||
if (tx_done->success && (info->flags & IEEE80211_TX_CTL_NO_ACK))
|
||||
info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED;
|
||||
|
||||
ieee80211_tx_status(htt->ar->hw, msdu);
|
||||
/* we do not own the msdu anymore */
|
||||
|
||||
|
@ -45,6 +45,10 @@ struct wmi_ops {
|
||||
struct wmi_rdy_ev_arg *arg);
|
||||
int (*pull_fw_stats)(struct ath10k *ar, struct sk_buff *skb,
|
||||
struct ath10k_fw_stats *stats);
|
||||
int (*pull_roam_ev)(struct ath10k *ar, struct sk_buff *skb,
|
||||
struct wmi_roam_ev_arg *arg);
|
||||
int (*pull_wow_event)(struct ath10k *ar, struct sk_buff *skb,
|
||||
struct wmi_wow_ev_arg *arg);
|
||||
|
||||
struct sk_buff *(*gen_pdev_suspend)(struct ath10k *ar, u32 suspend_opt);
|
||||
struct sk_buff *(*gen_pdev_resume)(struct ath10k *ar);
|
||||
@ -81,7 +85,8 @@ struct wmi_ops {
|
||||
struct sk_buff *(*gen_vdev_wmm_conf)(struct ath10k *ar, u32 vdev_id,
|
||||
const struct wmi_wmm_params_all_arg *arg);
|
||||
struct sk_buff *(*gen_peer_create)(struct ath10k *ar, u32 vdev_id,
|
||||
const u8 peer_addr[ETH_ALEN]);
|
||||
const u8 peer_addr[ETH_ALEN],
|
||||
enum wmi_peer_type peer_type);
|
||||
struct sk_buff *(*gen_peer_delete)(struct ath10k *ar, u32 vdev_id,
|
||||
const u8 peer_addr[ETH_ALEN]);
|
||||
struct sk_buff *(*gen_peer_flush)(struct ath10k *ar, u32 vdev_id,
|
||||
@ -148,6 +153,27 @@ struct wmi_ops {
|
||||
u32 num_ac);
|
||||
struct sk_buff *(*gen_sta_keepalive)(struct ath10k *ar,
|
||||
const struct wmi_sta_keepalive_arg *arg);
|
||||
struct sk_buff *(*gen_wow_enable)(struct ath10k *ar);
|
||||
struct sk_buff *(*gen_wow_add_wakeup_event)(struct ath10k *ar, u32 vdev_id,
|
||||
enum wmi_wow_wakeup_event event,
|
||||
u32 enable);
|
||||
struct sk_buff *(*gen_wow_host_wakeup_ind)(struct ath10k *ar);
|
||||
struct sk_buff *(*gen_wow_add_pattern)(struct ath10k *ar, u32 vdev_id,
|
||||
u32 pattern_id,
|
||||
const u8 *pattern,
|
||||
const u8 *mask,
|
||||
int pattern_len,
|
||||
int pattern_offset);
|
||||
struct sk_buff *(*gen_wow_del_pattern)(struct ath10k *ar, u32 vdev_id,
|
||||
u32 pattern_id);
|
||||
struct sk_buff *(*gen_update_fw_tdls_state)(struct ath10k *ar,
|
||||
u32 vdev_id,
|
||||
enum wmi_tdls_state state);
|
||||
struct sk_buff *(*gen_tdls_peer_update)(struct ath10k *ar,
|
||||
const struct wmi_tdls_peer_update_cmd_arg *arg,
|
||||
const struct wmi_tdls_peer_capab_arg *cap,
|
||||
const struct wmi_channel_arg *chan);
|
||||
struct sk_buff *(*gen_adaptive_qcs)(struct ath10k *ar, bool enable);
|
||||
};
|
||||
|
||||
int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb, u32 cmd_id);
|
||||
@ -273,6 +299,26 @@ ath10k_wmi_pull_fw_stats(struct ath10k *ar, struct sk_buff *skb,
|
||||
return ar->wmi.ops->pull_fw_stats(ar, skb, stats);
|
||||
}
|
||||
|
||||
static inline int
|
||||
ath10k_wmi_pull_roam_ev(struct ath10k *ar, struct sk_buff *skb,
|
||||
struct wmi_roam_ev_arg *arg)
|
||||
{
|
||||
if (!ar->wmi.ops->pull_roam_ev)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
return ar->wmi.ops->pull_roam_ev(ar, skb, arg);
|
||||
}
|
||||
|
||||
static inline int
|
||||
ath10k_wmi_pull_wow_event(struct ath10k *ar, struct sk_buff *skb,
|
||||
struct wmi_wow_ev_arg *arg)
|
||||
{
|
||||
if (!ar->wmi.ops->pull_wow_event)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
return ar->wmi.ops->pull_wow_event(ar, skb, arg);
|
||||
}
|
||||
|
||||
static inline int
|
||||
ath10k_wmi_mgmt_tx(struct ath10k *ar, struct sk_buff *msdu)
|
||||
{
|
||||
@ -624,14 +670,15 @@ ath10k_wmi_vdev_wmm_conf(struct ath10k *ar, u32 vdev_id,
|
||||
|
||||
static inline int
|
||||
ath10k_wmi_peer_create(struct ath10k *ar, u32 vdev_id,
|
||||
const u8 peer_addr[ETH_ALEN])
|
||||
const u8 peer_addr[ETH_ALEN],
|
||||
enum wmi_peer_type peer_type)
|
||||
{
|
||||
struct sk_buff *skb;
|
||||
|
||||
if (!ar->wmi.ops->gen_peer_create)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
skb = ar->wmi.ops->gen_peer_create(ar, vdev_id, peer_addr);
|
||||
skb = ar->wmi.ops->gen_peer_create(ar, vdev_id, peer_addr, peer_type);
|
||||
if (IS_ERR(skb))
|
||||
return PTR_ERR(skb);
|
||||
|
||||
@ -1060,4 +1107,145 @@ ath10k_wmi_sta_keepalive(struct ath10k *ar,
|
||||
return ath10k_wmi_cmd_send(ar, skb, cmd_id);
|
||||
}
|
||||
|
||||
static inline int
|
||||
ath10k_wmi_wow_enable(struct ath10k *ar)
|
||||
{
|
||||
struct sk_buff *skb;
|
||||
u32 cmd_id;
|
||||
|
||||
if (!ar->wmi.ops->gen_wow_enable)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
skb = ar->wmi.ops->gen_wow_enable(ar);
|
||||
if (IS_ERR(skb))
|
||||
return PTR_ERR(skb);
|
||||
|
||||
cmd_id = ar->wmi.cmd->wow_enable_cmdid;
|
||||
return ath10k_wmi_cmd_send(ar, skb, cmd_id);
|
||||
}
|
||||
|
||||
static inline int
|
||||
ath10k_wmi_wow_add_wakeup_event(struct ath10k *ar, u32 vdev_id,
|
||||
enum wmi_wow_wakeup_event event,
|
||||
u32 enable)
|
||||
{
|
||||
struct sk_buff *skb;
|
||||
u32 cmd_id;
|
||||
|
||||
if (!ar->wmi.ops->gen_wow_add_wakeup_event)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
skb = ar->wmi.ops->gen_wow_add_wakeup_event(ar, vdev_id, event, enable);
|
||||
if (IS_ERR(skb))
|
||||
return PTR_ERR(skb);
|
||||
|
||||
cmd_id = ar->wmi.cmd->wow_enable_disable_wake_event_cmdid;
|
||||
return ath10k_wmi_cmd_send(ar, skb, cmd_id);
|
||||
}
|
||||
|
||||
static inline int
|
||||
ath10k_wmi_wow_host_wakeup_ind(struct ath10k *ar)
|
||||
{
|
||||
struct sk_buff *skb;
|
||||
u32 cmd_id;
|
||||
|
||||
if (!ar->wmi.ops->gen_wow_host_wakeup_ind)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
skb = ar->wmi.ops->gen_wow_host_wakeup_ind(ar);
|
||||
if (IS_ERR(skb))
|
||||
return PTR_ERR(skb);
|
||||
|
||||
cmd_id = ar->wmi.cmd->wow_hostwakeup_from_sleep_cmdid;
|
||||
return ath10k_wmi_cmd_send(ar, skb, cmd_id);
|
||||
}
|
||||
|
||||
static inline int
|
||||
ath10k_wmi_wow_add_pattern(struct ath10k *ar, u32 vdev_id, u32 pattern_id,
|
||||
const u8 *pattern, const u8 *mask,
|
||||
int pattern_len, int pattern_offset)
|
||||
{
|
||||
struct sk_buff *skb;
|
||||
u32 cmd_id;
|
||||
|
||||
if (!ar->wmi.ops->gen_wow_add_pattern)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
skb = ar->wmi.ops->gen_wow_add_pattern(ar, vdev_id, pattern_id,
|
||||
pattern, mask, pattern_len,
|
||||
pattern_offset);
|
||||
if (IS_ERR(skb))
|
||||
return PTR_ERR(skb);
|
||||
|
||||
cmd_id = ar->wmi.cmd->wow_add_wake_pattern_cmdid;
|
||||
return ath10k_wmi_cmd_send(ar, skb, cmd_id);
|
||||
}
|
||||
|
||||
static inline int
|
||||
ath10k_wmi_wow_del_pattern(struct ath10k *ar, u32 vdev_id, u32 pattern_id)
|
||||
{
|
||||
struct sk_buff *skb;
|
||||
u32 cmd_id;
|
||||
|
||||
if (!ar->wmi.ops->gen_wow_del_pattern)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
skb = ar->wmi.ops->gen_wow_del_pattern(ar, vdev_id, pattern_id);
|
||||
if (IS_ERR(skb))
|
||||
return PTR_ERR(skb);
|
||||
|
||||
cmd_id = ar->wmi.cmd->wow_del_wake_pattern_cmdid;
|
||||
return ath10k_wmi_cmd_send(ar, skb, cmd_id);
|
||||
}
|
||||
|
||||
static inline int
|
||||
ath10k_wmi_update_fw_tdls_state(struct ath10k *ar, u32 vdev_id,
|
||||
enum wmi_tdls_state state)
|
||||
{
|
||||
struct sk_buff *skb;
|
||||
|
||||
if (!ar->wmi.ops->gen_update_fw_tdls_state)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
skb = ar->wmi.ops->gen_update_fw_tdls_state(ar, vdev_id, state);
|
||||
if (IS_ERR(skb))
|
||||
return PTR_ERR(skb);
|
||||
|
||||
return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->tdls_set_state_cmdid);
|
||||
}
|
||||
|
||||
static inline int
|
||||
ath10k_wmi_tdls_peer_update(struct ath10k *ar,
|
||||
const struct wmi_tdls_peer_update_cmd_arg *arg,
|
||||
const struct wmi_tdls_peer_capab_arg *cap,
|
||||
const struct wmi_channel_arg *chan)
|
||||
{
|
||||
struct sk_buff *skb;
|
||||
|
||||
if (!ar->wmi.ops->gen_tdls_peer_update)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
skb = ar->wmi.ops->gen_tdls_peer_update(ar, arg, cap, chan);
|
||||
if (IS_ERR(skb))
|
||||
return PTR_ERR(skb);
|
||||
|
||||
return ath10k_wmi_cmd_send(ar, skb,
|
||||
ar->wmi.cmd->tdls_peer_update_cmdid);
|
||||
}
|
||||
|
||||
static inline int
|
||||
ath10k_wmi_adaptive_qcs(struct ath10k *ar, bool enable)
|
||||
{
|
||||
struct sk_buff *skb;
|
||||
|
||||
if (!ar->wmi.ops->gen_adaptive_qcs)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
skb = ar->wmi.ops->gen_adaptive_qcs(ar, enable);
|
||||
if (IS_ERR(skb))
|
||||
return PTR_ERR(skb);
|
||||
|
||||
return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->adaptive_qcs_cmdid);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
@ -16,10 +16,13 @@
|
||||
*/
|
||||
#include "core.h"
|
||||
#include "debug.h"
|
||||
#include "mac.h"
|
||||
#include "hw.h"
|
||||
#include "mac.h"
|
||||
#include "wmi.h"
|
||||
#include "wmi-ops.h"
|
||||
#include "wmi-tlv.h"
|
||||
#include "p2p.h"
|
||||
|
||||
/***************/
|
||||
/* TLV helpers */
|
||||
@ -31,9 +34,9 @@ struct wmi_tlv_policy {
|
||||
|
||||
static const struct wmi_tlv_policy wmi_tlv_policies[] = {
|
||||
[WMI_TLV_TAG_ARRAY_BYTE]
|
||||
= { .min_len = sizeof(u8) },
|
||||
= { .min_len = 0 },
|
||||
[WMI_TLV_TAG_ARRAY_UINT32]
|
||||
= { .min_len = sizeof(u32) },
|
||||
= { .min_len = 0 },
|
||||
[WMI_TLV_TAG_STRUCT_SCAN_EVENT]
|
||||
= { .min_len = sizeof(struct wmi_scan_event) },
|
||||
[WMI_TLV_TAG_STRUCT_MGMT_RX_HDR]
|
||||
@ -62,6 +65,14 @@ static const struct wmi_tlv_policy wmi_tlv_policies[] = {
|
||||
= { .min_len = sizeof(struct wmi_tlv_bcn_tx_status_ev) },
|
||||
[WMI_TLV_TAG_STRUCT_DIAG_DATA_CONTAINER_EVENT]
|
||||
= { .min_len = sizeof(struct wmi_tlv_diag_data_ev) },
|
||||
[WMI_TLV_TAG_STRUCT_P2P_NOA_EVENT]
|
||||
= { .min_len = sizeof(struct wmi_tlv_p2p_noa_ev) },
|
||||
[WMI_TLV_TAG_STRUCT_ROAM_EVENT]
|
||||
= { .min_len = sizeof(struct wmi_tlv_roam_ev) },
|
||||
[WMI_TLV_TAG_STRUCT_WOW_EVENT_INFO]
|
||||
= { .min_len = sizeof(struct wmi_tlv_wow_event_info) },
|
||||
[WMI_TLV_TAG_STRUCT_TX_PAUSE_EVENT]
|
||||
= { .min_len = sizeof(struct wmi_tlv_tx_pause_ev) },
|
||||
};
|
||||
|
||||
static int
|
||||
@ -168,6 +179,7 @@ static int ath10k_wmi_tlv_event_bcn_tx_status(struct ath10k *ar,
|
||||
{
|
||||
const void **tb;
|
||||
const struct wmi_tlv_bcn_tx_status_ev *ev;
|
||||
struct ath10k_vif *arvif;
|
||||
u32 vdev_id, tx_status;
|
||||
int ret;
|
||||
|
||||
@ -201,6 +213,10 @@ static int ath10k_wmi_tlv_event_bcn_tx_status(struct ath10k *ar,
|
||||
break;
|
||||
}
|
||||
|
||||
arvif = ath10k_get_arvif(ar, vdev_id);
|
||||
if (arvif && arvif->is_up && arvif->vif->csa_active)
|
||||
ieee80211_queue_work(ar->hw, &arvif->ap_csa_work);
|
||||
|
||||
kfree(tb);
|
||||
return 0;
|
||||
}
|
||||
@ -296,6 +312,83 @@ static int ath10k_wmi_tlv_event_diag(struct ath10k *ar,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ath10k_wmi_tlv_event_p2p_noa(struct ath10k *ar,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
const void **tb;
|
||||
const struct wmi_tlv_p2p_noa_ev *ev;
|
||||
const struct wmi_p2p_noa_info *noa;
|
||||
int ret, vdev_id;
|
||||
|
||||
tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
|
||||
if (IS_ERR(tb)) {
|
||||
ret = PTR_ERR(tb);
|
||||
ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ev = tb[WMI_TLV_TAG_STRUCT_P2P_NOA_EVENT];
|
||||
noa = tb[WMI_TLV_TAG_STRUCT_P2P_NOA_INFO];
|
||||
|
||||
if (!ev || !noa) {
|
||||
kfree(tb);
|
||||
return -EPROTO;
|
||||
}
|
||||
|
||||
vdev_id = __le32_to_cpu(ev->vdev_id);
|
||||
|
||||
ath10k_dbg(ar, ATH10K_DBG_WMI,
|
||||
"wmi tlv p2p noa vdev_id %i descriptors %hhu\n",
|
||||
vdev_id, noa->num_descriptors);
|
||||
|
||||
ath10k_p2p_noa_update_by_vdev_id(ar, vdev_id, noa);
|
||||
kfree(tb);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ath10k_wmi_tlv_event_tx_pause(struct ath10k *ar,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
const void **tb;
|
||||
const struct wmi_tlv_tx_pause_ev *ev;
|
||||
int ret, vdev_id;
|
||||
u32 pause_id, action, vdev_map, peer_id, tid_map;
|
||||
|
||||
tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
|
||||
if (IS_ERR(tb)) {
|
||||
ret = PTR_ERR(tb);
|
||||
ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ev = tb[WMI_TLV_TAG_STRUCT_TX_PAUSE_EVENT];
|
||||
if (!ev) {
|
||||
kfree(tb);
|
||||
return -EPROTO;
|
||||
}
|
||||
|
||||
pause_id = __le32_to_cpu(ev->pause_id);
|
||||
action = __le32_to_cpu(ev->action);
|
||||
vdev_map = __le32_to_cpu(ev->vdev_map);
|
||||
peer_id = __le32_to_cpu(ev->peer_id);
|
||||
tid_map = __le32_to_cpu(ev->tid_map);
|
||||
|
||||
ath10k_dbg(ar, ATH10K_DBG_WMI,
|
||||
"wmi tlv tx pause pause_id %u action %u vdev_map 0x%08x peer_id %u tid_map 0x%08x\n",
|
||||
pause_id, action, vdev_map, peer_id, tid_map);
|
||||
|
||||
for (vdev_id = 0; vdev_map; vdev_id++) {
|
||||
if (!(vdev_map & BIT(vdev_id)))
|
||||
continue;
|
||||
|
||||
vdev_map &= ~BIT(vdev_id);
|
||||
ath10k_mac_handle_tx_pause(ar, vdev_id, pause_id, action);
|
||||
}
|
||||
|
||||
kfree(tb);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/***********/
|
||||
/* TLV ops */
|
||||
/***********/
|
||||
@ -417,6 +510,12 @@ static void ath10k_wmi_tlv_op_rx(struct ath10k *ar, struct sk_buff *skb)
|
||||
case WMI_TLV_DIAG_EVENTID:
|
||||
ath10k_wmi_tlv_event_diag(ar, skb);
|
||||
break;
|
||||
case WMI_TLV_P2P_NOA_EVENTID:
|
||||
ath10k_wmi_tlv_event_p2p_noa(ar, skb);
|
||||
break;
|
||||
case WMI_TLV_TX_PAUSE_EVENTID:
|
||||
ath10k_wmi_tlv_event_tx_pause(ar, skb);
|
||||
break;
|
||||
default:
|
||||
ath10k_warn(ar, "Unknown eventid: %d\n", id);
|
||||
break;
|
||||
@ -1012,6 +1111,65 @@ static int ath10k_wmi_tlv_op_pull_fw_stats(struct ath10k *ar,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ath10k_wmi_tlv_op_pull_roam_ev(struct ath10k *ar,
|
||||
struct sk_buff *skb,
|
||||
struct wmi_roam_ev_arg *arg)
|
||||
{
|
||||
const void **tb;
|
||||
const struct wmi_tlv_roam_ev *ev;
|
||||
int ret;
|
||||
|
||||
tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
|
||||
if (IS_ERR(tb)) {
|
||||
ret = PTR_ERR(tb);
|
||||
ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ev = tb[WMI_TLV_TAG_STRUCT_ROAM_EVENT];
|
||||
if (!ev) {
|
||||
kfree(tb);
|
||||
return -EPROTO;
|
||||
}
|
||||
|
||||
arg->vdev_id = ev->vdev_id;
|
||||
arg->reason = ev->reason;
|
||||
arg->rssi = ev->rssi;
|
||||
|
||||
kfree(tb);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
ath10k_wmi_tlv_op_pull_wow_ev(struct ath10k *ar, struct sk_buff *skb,
|
||||
struct wmi_wow_ev_arg *arg)
|
||||
{
|
||||
const void **tb;
|
||||
const struct wmi_tlv_wow_event_info *ev;
|
||||
int ret;
|
||||
|
||||
tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
|
||||
if (IS_ERR(tb)) {
|
||||
ret = PTR_ERR(tb);
|
||||
ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ev = tb[WMI_TLV_TAG_STRUCT_WOW_EVENT_INFO];
|
||||
if (!ev) {
|
||||
kfree(tb);
|
||||
return -EPROTO;
|
||||
}
|
||||
|
||||
arg->vdev_id = __le32_to_cpu(ev->vdev_id);
|
||||
arg->flag = __le32_to_cpu(ev->flag);
|
||||
arg->wake_reason = __le32_to_cpu(ev->wake_reason);
|
||||
arg->data_len = __le32_to_cpu(ev->data_len);
|
||||
|
||||
kfree(tb);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct sk_buff *
|
||||
ath10k_wmi_tlv_op_gen_pdev_suspend(struct ath10k *ar, u32 opt)
|
||||
{
|
||||
@ -1160,8 +1318,8 @@ static struct sk_buff *ath10k_wmi_tlv_op_gen_init(struct ath10k *ar)
|
||||
cfg->num_peers = __cpu_to_le32(TARGET_TLV_NUM_PEERS);
|
||||
|
||||
if (test_bit(WMI_SERVICE_RX_FULL_REORDER, ar->wmi.svc_map)) {
|
||||
cfg->num_offload_peers = __cpu_to_le32(3);
|
||||
cfg->num_offload_reorder_bufs = __cpu_to_le32(3);
|
||||
cfg->num_offload_peers = __cpu_to_le32(TARGET_TLV_NUM_VDEVS);
|
||||
cfg->num_offload_reorder_bufs = __cpu_to_le32(TARGET_TLV_NUM_VDEVS);
|
||||
} else {
|
||||
cfg->num_offload_peers = __cpu_to_le32(0);
|
||||
cfg->num_offload_reorder_bufs = __cpu_to_le32(0);
|
||||
@ -1178,8 +1336,8 @@ static struct sk_buff *ath10k_wmi_tlv_op_gen_init(struct ath10k *ar)
|
||||
cfg->rx_timeout_pri[3] = __cpu_to_le32(0x28);
|
||||
cfg->rx_decap_mode = __cpu_to_le32(1);
|
||||
cfg->scan_max_pending_reqs = __cpu_to_le32(4);
|
||||
cfg->bmiss_offload_max_vdev = __cpu_to_le32(3);
|
||||
cfg->roam_offload_max_vdev = __cpu_to_le32(3);
|
||||
cfg->bmiss_offload_max_vdev = __cpu_to_le32(TARGET_TLV_NUM_VDEVS);
|
||||
cfg->roam_offload_max_vdev = __cpu_to_le32(TARGET_TLV_NUM_VDEVS);
|
||||
cfg->roam_offload_max_ap_profiles = __cpu_to_le32(8);
|
||||
cfg->num_mcast_groups = __cpu_to_le32(0);
|
||||
cfg->num_mcast_table_elems = __cpu_to_le32(0);
|
||||
@ -1193,11 +1351,11 @@ static struct sk_buff *ath10k_wmi_tlv_op_gen_init(struct ath10k *ar)
|
||||
cfg->gtk_offload_max_vdev = __cpu_to_le32(2);
|
||||
cfg->num_msdu_desc = __cpu_to_le32(TARGET_TLV_NUM_MSDU_DESC);
|
||||
cfg->max_frag_entries = __cpu_to_le32(2);
|
||||
cfg->num_tdls_vdevs = __cpu_to_le32(1);
|
||||
cfg->num_tdls_vdevs = __cpu_to_le32(TARGET_TLV_NUM_TDLS_VDEVS);
|
||||
cfg->num_tdls_conn_table_entries = __cpu_to_le32(0x20);
|
||||
cfg->beacon_tx_offload_max_vdev = __cpu_to_le32(2);
|
||||
cfg->num_multicast_filter_entries = __cpu_to_le32(5);
|
||||
cfg->num_wow_filters = __cpu_to_le32(0x16);
|
||||
cfg->num_wow_filters = __cpu_to_le32(ar->wow.max_num_patterns);
|
||||
cfg->num_keep_alive_pattern = __cpu_to_le32(6);
|
||||
cfg->keep_alive_pattern_size = __cpu_to_le32(0);
|
||||
cfg->max_tdls_concurrent_sleep_sta = __cpu_to_le32(1);
|
||||
@ -1248,7 +1406,7 @@ ath10k_wmi_tlv_op_gen_start_scan(struct ath10k *ar,
|
||||
cmd = (void *)tlv->value;
|
||||
|
||||
ath10k_wmi_put_start_scan_common(&cmd->common, arg);
|
||||
cmd->burst_duration_ms = __cpu_to_le32(0);
|
||||
cmd->burst_duration_ms = __cpu_to_le32(arg->burst_duration_ms);
|
||||
cmd->num_channels = __cpu_to_le32(arg->n_channels);
|
||||
cmd->num_ssids = __cpu_to_le32(arg->n_ssids);
|
||||
cmd->num_bssids = __cpu_to_le32(arg->n_bssids);
|
||||
@ -1408,8 +1566,6 @@ ath10k_wmi_tlv_op_gen_vdev_start(struct ath10k *ar,
|
||||
void *ptr;
|
||||
u32 flags = 0;
|
||||
|
||||
if (WARN_ON(arg->ssid && arg->ssid_len == 0))
|
||||
return ERR_PTR(-EINVAL);
|
||||
if (WARN_ON(arg->hidden_ssid && !arg->ssid))
|
||||
return ERR_PTR(-EINVAL);
|
||||
if (WARN_ON(arg->ssid_len > sizeof(cmd->ssid.ssid)))
|
||||
@ -1782,7 +1938,8 @@ ath10k_wmi_tlv_op_gen_sta_keepalive(struct ath10k *ar,
|
||||
|
||||
static struct sk_buff *
|
||||
ath10k_wmi_tlv_op_gen_peer_create(struct ath10k *ar, u32 vdev_id,
|
||||
const u8 peer_addr[ETH_ALEN])
|
||||
const u8 peer_addr[ETH_ALEN],
|
||||
enum wmi_peer_type peer_type)
|
||||
{
|
||||
struct wmi_tlv_peer_create_cmd *cmd;
|
||||
struct wmi_tlv *tlv;
|
||||
@ -1797,7 +1954,7 @@ ath10k_wmi_tlv_op_gen_peer_create(struct ath10k *ar, u32 vdev_id,
|
||||
tlv->len = __cpu_to_le16(sizeof(*cmd));
|
||||
cmd = (void *)tlv->value;
|
||||
cmd->vdev_id = __cpu_to_le32(vdev_id);
|
||||
cmd->peer_type = __cpu_to_le32(WMI_TLV_PEER_TYPE_DEFAULT); /* FIXME */
|
||||
cmd->peer_type = __cpu_to_le32(peer_type);
|
||||
ether_addr_copy(cmd->peer_addr.addr, peer_addr);
|
||||
|
||||
ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv peer create\n");
|
||||
@ -2027,7 +2184,7 @@ ath10k_wmi_tlv_op_gen_set_ap_ps(struct ath10k *ar, u32 vdev_id, const u8 *mac,
|
||||
if (!mac)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
|
||||
skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
|
||||
if (!skb)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
@ -2485,6 +2642,387 @@ ath10k_wmi_tlv_op_gen_p2p_go_bcn_ie(struct ath10k *ar, u32 vdev_id,
|
||||
return skb;
|
||||
}
|
||||
|
||||
static struct sk_buff *
|
||||
ath10k_wmi_tlv_op_gen_update_fw_tdls_state(struct ath10k *ar, u32 vdev_id,
|
||||
enum wmi_tdls_state state)
|
||||
{
|
||||
struct wmi_tdls_set_state_cmd *cmd;
|
||||
struct wmi_tlv *tlv;
|
||||
struct sk_buff *skb;
|
||||
void *ptr;
|
||||
size_t len;
|
||||
/* Set to options from wmi_tlv_tdls_options,
|
||||
* for now none of them are enabled.
|
||||
*/
|
||||
u32 options = 0;
|
||||
|
||||
len = sizeof(*tlv) + sizeof(*cmd);
|
||||
skb = ath10k_wmi_alloc_skb(ar, len);
|
||||
if (!skb)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
ptr = (void *)skb->data;
|
||||
tlv = ptr;
|
||||
tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_TDLS_SET_STATE_CMD);
|
||||
tlv->len = __cpu_to_le16(sizeof(*cmd));
|
||||
|
||||
cmd = (void *)tlv->value;
|
||||
cmd->vdev_id = __cpu_to_le32(vdev_id);
|
||||
cmd->state = __cpu_to_le32(state);
|
||||
cmd->notification_interval_ms = __cpu_to_le32(5000);
|
||||
cmd->tx_discovery_threshold = __cpu_to_le32(100);
|
||||
cmd->tx_teardown_threshold = __cpu_to_le32(5);
|
||||
cmd->rssi_teardown_threshold = __cpu_to_le32(-75);
|
||||
cmd->rssi_delta = __cpu_to_le32(-20);
|
||||
cmd->tdls_options = __cpu_to_le32(options);
|
||||
cmd->tdls_peer_traffic_ind_window = __cpu_to_le32(2);
|
||||
cmd->tdls_peer_traffic_response_timeout_ms = __cpu_to_le32(5000);
|
||||
cmd->tdls_puapsd_mask = __cpu_to_le32(0xf);
|
||||
cmd->tdls_puapsd_inactivity_time_ms = __cpu_to_le32(0);
|
||||
cmd->tdls_puapsd_rx_frame_threshold = __cpu_to_le32(10);
|
||||
|
||||
ptr += sizeof(*tlv);
|
||||
ptr += sizeof(*cmd);
|
||||
|
||||
ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv update fw tdls state %d for vdev %i\n",
|
||||
state, vdev_id);
|
||||
return skb;
|
||||
}
|
||||
|
||||
static u32 ath10k_wmi_tlv_prepare_peer_qos(u8 uapsd_queues, u8 sp)
|
||||
{
|
||||
u32 peer_qos = 0;
|
||||
|
||||
if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO)
|
||||
peer_qos |= WMI_TLV_TDLS_PEER_QOS_AC_VO;
|
||||
if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VI)
|
||||
peer_qos |= WMI_TLV_TDLS_PEER_QOS_AC_VI;
|
||||
if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BK)
|
||||
peer_qos |= WMI_TLV_TDLS_PEER_QOS_AC_BK;
|
||||
if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BE)
|
||||
peer_qos |= WMI_TLV_TDLS_PEER_QOS_AC_BE;
|
||||
|
||||
peer_qos |= SM(sp, WMI_TLV_TDLS_PEER_SP);
|
||||
|
||||
return peer_qos;
|
||||
}
|
||||
|
||||
static struct sk_buff *
|
||||
ath10k_wmi_tlv_op_gen_tdls_peer_update(struct ath10k *ar,
|
||||
const struct wmi_tdls_peer_update_cmd_arg *arg,
|
||||
const struct wmi_tdls_peer_capab_arg *cap,
|
||||
const struct wmi_channel_arg *chan_arg)
|
||||
{
|
||||
struct wmi_tdls_peer_update_cmd *cmd;
|
||||
struct wmi_tdls_peer_capab *peer_cap;
|
||||
struct wmi_channel *chan;
|
||||
struct wmi_tlv *tlv;
|
||||
struct sk_buff *skb;
|
||||
u32 peer_qos;
|
||||
void *ptr;
|
||||
int len;
|
||||
int i;
|
||||
|
||||
len = sizeof(*tlv) + sizeof(*cmd) +
|
||||
sizeof(*tlv) + sizeof(*peer_cap) +
|
||||
sizeof(*tlv) + cap->peer_chan_len * sizeof(*chan);
|
||||
|
||||
skb = ath10k_wmi_alloc_skb(ar, len);
|
||||
if (!skb)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
ptr = (void *)skb->data;
|
||||
tlv = ptr;
|
||||
tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_TDLS_PEER_UPDATE_CMD);
|
||||
tlv->len = __cpu_to_le16(sizeof(*cmd));
|
||||
|
||||
cmd = (void *)tlv->value;
|
||||
cmd->vdev_id = __cpu_to_le32(arg->vdev_id);
|
||||
ether_addr_copy(cmd->peer_macaddr.addr, arg->addr);
|
||||
cmd->peer_state = __cpu_to_le32(arg->peer_state);
|
||||
|
||||
ptr += sizeof(*tlv);
|
||||
ptr += sizeof(*cmd);
|
||||
|
||||
tlv = ptr;
|
||||
tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_TDLS_PEER_CAPABILITIES);
|
||||
tlv->len = __cpu_to_le16(sizeof(*peer_cap));
|
||||
peer_cap = (void *)tlv->value;
|
||||
peer_qos = ath10k_wmi_tlv_prepare_peer_qos(cap->peer_uapsd_queues,
|
||||
cap->peer_max_sp);
|
||||
peer_cap->peer_qos = __cpu_to_le32(peer_qos);
|
||||
peer_cap->buff_sta_support = __cpu_to_le32(cap->buff_sta_support);
|
||||
peer_cap->off_chan_support = __cpu_to_le32(cap->off_chan_support);
|
||||
peer_cap->peer_curr_operclass = __cpu_to_le32(cap->peer_curr_operclass);
|
||||
peer_cap->self_curr_operclass = __cpu_to_le32(cap->self_curr_operclass);
|
||||
peer_cap->peer_chan_len = __cpu_to_le32(cap->peer_chan_len);
|
||||
peer_cap->peer_operclass_len = __cpu_to_le32(cap->peer_operclass_len);
|
||||
|
||||
for (i = 0; i < WMI_TDLS_MAX_SUPP_OPER_CLASSES; i++)
|
||||
peer_cap->peer_operclass[i] = cap->peer_operclass[i];
|
||||
|
||||
peer_cap->is_peer_responder = __cpu_to_le32(cap->is_peer_responder);
|
||||
peer_cap->pref_offchan_num = __cpu_to_le32(cap->pref_offchan_num);
|
||||
peer_cap->pref_offchan_bw = __cpu_to_le32(cap->pref_offchan_bw);
|
||||
|
||||
ptr += sizeof(*tlv);
|
||||
ptr += sizeof(*peer_cap);
|
||||
|
||||
tlv = ptr;
|
||||
tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT);
|
||||
tlv->len = __cpu_to_le16(cap->peer_chan_len * sizeof(*chan));
|
||||
|
||||
ptr += sizeof(*tlv);
|
||||
|
||||
for (i = 0; i < cap->peer_chan_len; i++) {
|
||||
tlv = ptr;
|
||||
tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_CHANNEL);
|
||||
tlv->len = __cpu_to_le16(sizeof(*chan));
|
||||
chan = (void *)tlv->value;
|
||||
ath10k_wmi_put_wmi_channel(chan, &chan_arg[i]);
|
||||
|
||||
ptr += sizeof(*tlv);
|
||||
ptr += sizeof(*chan);
|
||||
}
|
||||
|
||||
ath10k_dbg(ar, ATH10K_DBG_WMI,
|
||||
"wmi tlv tdls peer update vdev %i state %d n_chans %u\n",
|
||||
arg->vdev_id, arg->peer_state, cap->peer_chan_len);
|
||||
return skb;
|
||||
}
|
||||
|
||||
static struct sk_buff *
|
||||
ath10k_wmi_tlv_op_gen_wow_enable(struct ath10k *ar)
|
||||
{
|
||||
struct wmi_tlv_wow_enable_cmd *cmd;
|
||||
struct wmi_tlv *tlv;
|
||||
struct sk_buff *skb;
|
||||
size_t len;
|
||||
|
||||
len = sizeof(*tlv) + sizeof(*cmd);
|
||||
skb = ath10k_wmi_alloc_skb(ar, len);
|
||||
if (!skb)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
tlv = (struct wmi_tlv *)skb->data;
|
||||
tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_WOW_ENABLE_CMD);
|
||||
tlv->len = __cpu_to_le16(sizeof(*cmd));
|
||||
cmd = (void *)tlv->value;
|
||||
|
||||
cmd->enable = __cpu_to_le32(1);
|
||||
|
||||
ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv wow enable\n");
|
||||
return skb;
|
||||
}
|
||||
|
||||
static struct sk_buff *
|
||||
ath10k_wmi_tlv_op_gen_wow_add_wakeup_event(struct ath10k *ar,
|
||||
u32 vdev_id,
|
||||
enum wmi_wow_wakeup_event event,
|
||||
u32 enable)
|
||||
{
|
||||
struct wmi_tlv_wow_add_del_event_cmd *cmd;
|
||||
struct wmi_tlv *tlv;
|
||||
struct sk_buff *skb;
|
||||
size_t len;
|
||||
|
||||
len = sizeof(*tlv) + sizeof(*cmd);
|
||||
skb = ath10k_wmi_alloc_skb(ar, len);
|
||||
if (!skb)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
tlv = (struct wmi_tlv *)skb->data;
|
||||
tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_WOW_ADD_DEL_EVT_CMD);
|
||||
tlv->len = __cpu_to_le16(sizeof(*cmd));
|
||||
cmd = (void *)tlv->value;
|
||||
|
||||
cmd->vdev_id = __cpu_to_le32(vdev_id);
|
||||
cmd->is_add = __cpu_to_le32(enable);
|
||||
cmd->event_bitmap = __cpu_to_le32(1 << event);
|
||||
|
||||
ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv wow add wakeup event %s enable %d vdev_id %d\n",
|
||||
wow_wakeup_event(event), enable, vdev_id);
|
||||
return skb;
|
||||
}
|
||||
|
||||
static struct sk_buff *
|
||||
ath10k_wmi_tlv_gen_wow_host_wakeup_ind(struct ath10k *ar)
|
||||
{
|
||||
struct wmi_tlv_wow_host_wakeup_ind *cmd;
|
||||
struct wmi_tlv *tlv;
|
||||
struct sk_buff *skb;
|
||||
size_t len;
|
||||
|
||||
len = sizeof(*tlv) + sizeof(*cmd);
|
||||
skb = ath10k_wmi_alloc_skb(ar, len);
|
||||
if (!skb)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
tlv = (struct wmi_tlv *)skb->data;
|
||||
tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_WOW_HOSTWAKEUP_FROM_SLEEP_CMD);
|
||||
tlv->len = __cpu_to_le16(sizeof(*cmd));
|
||||
cmd = (void *)tlv->value;
|
||||
|
||||
ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv wow host wakeup ind\n");
|
||||
return skb;
|
||||
}
|
||||
|
||||
static struct sk_buff *
|
||||
ath10k_wmi_tlv_op_gen_wow_add_pattern(struct ath10k *ar, u32 vdev_id,
|
||||
u32 pattern_id, const u8 *pattern,
|
||||
const u8 *bitmask, int pattern_len,
|
||||
int pattern_offset)
|
||||
{
|
||||
struct wmi_tlv_wow_add_pattern_cmd *cmd;
|
||||
struct wmi_tlv_wow_bitmap_pattern *bitmap;
|
||||
struct wmi_tlv *tlv;
|
||||
struct sk_buff *skb;
|
||||
void *ptr;
|
||||
size_t len;
|
||||
|
||||
len = sizeof(*tlv) + sizeof(*cmd) +
|
||||
sizeof(*tlv) + /* array struct */
|
||||
sizeof(*tlv) + sizeof(*bitmap) + /* bitmap */
|
||||
sizeof(*tlv) + /* empty ipv4 sync */
|
||||
sizeof(*tlv) + /* empty ipv6 sync */
|
||||
sizeof(*tlv) + /* empty magic */
|
||||
sizeof(*tlv) + /* empty info timeout */
|
||||
sizeof(*tlv) + sizeof(u32); /* ratelimit interval */
|
||||
|
||||
skb = ath10k_wmi_alloc_skb(ar, len);
|
||||
if (!skb)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
/* cmd */
|
||||
ptr = (void *)skb->data;
|
||||
tlv = ptr;
|
||||
tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_WOW_ADD_PATTERN_CMD);
|
||||
tlv->len = __cpu_to_le16(sizeof(*cmd));
|
||||
cmd = (void *)tlv->value;
|
||||
|
||||
cmd->vdev_id = __cpu_to_le32(vdev_id);
|
||||
cmd->pattern_id = __cpu_to_le32(pattern_id);
|
||||
cmd->pattern_type = __cpu_to_le32(WOW_BITMAP_PATTERN);
|
||||
|
||||
ptr += sizeof(*tlv);
|
||||
ptr += sizeof(*cmd);
|
||||
|
||||
/* bitmap */
|
||||
tlv = ptr;
|
||||
tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT);
|
||||
tlv->len = __cpu_to_le16(sizeof(*tlv) + sizeof(*bitmap));
|
||||
|
||||
ptr += sizeof(*tlv);
|
||||
|
||||
tlv = ptr;
|
||||
tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_WOW_BITMAP_PATTERN_T);
|
||||
tlv->len = __cpu_to_le16(sizeof(*bitmap));
|
||||
bitmap = (void *)tlv->value;
|
||||
|
||||
memcpy(bitmap->patternbuf, pattern, pattern_len);
|
||||
memcpy(bitmap->bitmaskbuf, bitmask, pattern_len);
|
||||
bitmap->pattern_offset = __cpu_to_le32(pattern_offset);
|
||||
bitmap->pattern_len = __cpu_to_le32(pattern_len);
|
||||
bitmap->bitmask_len = __cpu_to_le32(pattern_len);
|
||||
bitmap->pattern_id = __cpu_to_le32(pattern_id);
|
||||
|
||||
ptr += sizeof(*tlv);
|
||||
ptr += sizeof(*bitmap);
|
||||
|
||||
/* ipv4 sync */
|
||||
tlv = ptr;
|
||||
tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT);
|
||||
tlv->len = __cpu_to_le16(0);
|
||||
|
||||
ptr += sizeof(*tlv);
|
||||
|
||||
/* ipv6 sync */
|
||||
tlv = ptr;
|
||||
tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT);
|
||||
tlv->len = __cpu_to_le16(0);
|
||||
|
||||
ptr += sizeof(*tlv);
|
||||
|
||||
/* magic */
|
||||
tlv = ptr;
|
||||
tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT);
|
||||
tlv->len = __cpu_to_le16(0);
|
||||
|
||||
ptr += sizeof(*tlv);
|
||||
|
||||
/* pattern info timeout */
|
||||
tlv = ptr;
|
||||
tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_UINT32);
|
||||
tlv->len = __cpu_to_le16(0);
|
||||
|
||||
ptr += sizeof(*tlv);
|
||||
|
||||
/* ratelimit interval */
|
||||
tlv = ptr;
|
||||
tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_UINT32);
|
||||
tlv->len = __cpu_to_le16(sizeof(u32));
|
||||
|
||||
ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv wow add pattern vdev_id %d pattern_id %d, pattern_offset %d\n",
|
||||
vdev_id, pattern_id, pattern_offset);
|
||||
return skb;
|
||||
}
|
||||
|
||||
static struct sk_buff *
|
||||
ath10k_wmi_tlv_op_gen_wow_del_pattern(struct ath10k *ar, u32 vdev_id,
|
||||
u32 pattern_id)
|
||||
{
|
||||
struct wmi_tlv_wow_del_pattern_cmd *cmd;
|
||||
struct wmi_tlv *tlv;
|
||||
struct sk_buff *skb;
|
||||
size_t len;
|
||||
|
||||
len = sizeof(*tlv) + sizeof(*cmd);
|
||||
skb = ath10k_wmi_alloc_skb(ar, len);
|
||||
if (!skb)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
tlv = (struct wmi_tlv *)skb->data;
|
||||
tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_WOW_DEL_PATTERN_CMD);
|
||||
tlv->len = __cpu_to_le16(sizeof(*cmd));
|
||||
cmd = (void *)tlv->value;
|
||||
|
||||
cmd->vdev_id = __cpu_to_le32(vdev_id);
|
||||
cmd->pattern_id = __cpu_to_le32(pattern_id);
|
||||
cmd->pattern_type = __cpu_to_le32(WOW_BITMAP_PATTERN);
|
||||
|
||||
ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv wow del pattern vdev_id %d pattern_id %d\n",
|
||||
vdev_id, pattern_id);
|
||||
return skb;
|
||||
}
|
||||
|
||||
static struct sk_buff *
|
||||
ath10k_wmi_tlv_op_gen_adaptive_qcs(struct ath10k *ar, bool enable)
|
||||
{
|
||||
struct wmi_tlv_adaptive_qcs *cmd;
|
||||
struct wmi_tlv *tlv;
|
||||
struct sk_buff *skb;
|
||||
void *ptr;
|
||||
size_t len;
|
||||
|
||||
len = sizeof(*tlv) + sizeof(*cmd);
|
||||
skb = ath10k_wmi_alloc_skb(ar, len);
|
||||
if (!skb)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
ptr = (void *)skb->data;
|
||||
tlv = ptr;
|
||||
tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_RESMGR_ADAPTIVE_OCS_CMD);
|
||||
tlv->len = __cpu_to_le16(sizeof(*cmd));
|
||||
cmd = (void *)tlv->value;
|
||||
cmd->enable = __cpu_to_le32(enable ? 1 : 0);
|
||||
|
||||
ptr += sizeof(*tlv);
|
||||
ptr += sizeof(*cmd);
|
||||
|
||||
ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv adaptive qcs %d\n", enable);
|
||||
return skb;
|
||||
}
|
||||
|
||||
/****************/
|
||||
/* TLV mappings */
|
||||
/****************/
|
||||
@ -2609,6 +3147,9 @@ static struct wmi_cmd_map wmi_tlv_cmd_map = {
|
||||
.gpio_output_cmdid = WMI_TLV_GPIO_OUTPUT_CMDID,
|
||||
.pdev_get_temperature_cmdid = WMI_TLV_CMD_UNSUPPORTED,
|
||||
.vdev_set_wmm_params_cmdid = WMI_TLV_VDEV_SET_WMM_PARAMS_CMDID,
|
||||
.tdls_set_state_cmdid = WMI_TLV_TDLS_SET_STATE_CMDID,
|
||||
.tdls_peer_update_cmdid = WMI_TLV_TDLS_PEER_UPDATE_CMDID,
|
||||
.adaptive_qcs_cmdid = WMI_TLV_RESMGR_ADAPTIVE_OCS_CMDID,
|
||||
};
|
||||
|
||||
static struct wmi_pdev_param_map wmi_tlv_pdev_param_map = {
|
||||
@ -2736,6 +3277,8 @@ static const struct wmi_ops wmi_tlv_ops = {
|
||||
.pull_svc_rdy = ath10k_wmi_tlv_op_pull_svc_rdy_ev,
|
||||
.pull_rdy = ath10k_wmi_tlv_op_pull_rdy_ev,
|
||||
.pull_fw_stats = ath10k_wmi_tlv_op_pull_fw_stats,
|
||||
.pull_roam_ev = ath10k_wmi_tlv_op_pull_roam_ev,
|
||||
.pull_wow_event = ath10k_wmi_tlv_op_pull_wow_ev,
|
||||
|
||||
.gen_pdev_suspend = ath10k_wmi_tlv_op_gen_pdev_suspend,
|
||||
.gen_pdev_resume = ath10k_wmi_tlv_op_gen_pdev_resume,
|
||||
@ -2781,6 +3324,14 @@ static const struct wmi_ops wmi_tlv_ops = {
|
||||
.gen_p2p_go_bcn_ie = ath10k_wmi_tlv_op_gen_p2p_go_bcn_ie,
|
||||
.gen_vdev_sta_uapsd = ath10k_wmi_tlv_op_gen_vdev_sta_uapsd,
|
||||
.gen_sta_keepalive = ath10k_wmi_tlv_op_gen_sta_keepalive,
|
||||
.gen_wow_enable = ath10k_wmi_tlv_op_gen_wow_enable,
|
||||
.gen_wow_add_wakeup_event = ath10k_wmi_tlv_op_gen_wow_add_wakeup_event,
|
||||
.gen_wow_host_wakeup_ind = ath10k_wmi_tlv_gen_wow_host_wakeup_ind,
|
||||
.gen_wow_add_pattern = ath10k_wmi_tlv_op_gen_wow_add_pattern,
|
||||
.gen_wow_del_pattern = ath10k_wmi_tlv_op_gen_wow_del_pattern,
|
||||
.gen_update_fw_tdls_state = ath10k_wmi_tlv_op_gen_update_fw_tdls_state,
|
||||
.gen_tdls_peer_update = ath10k_wmi_tlv_op_gen_tdls_peer_update,
|
||||
.gen_adaptive_qcs = ath10k_wmi_tlv_op_gen_adaptive_qcs,
|
||||
};
|
||||
|
||||
/************/
|
||||
|
@ -1454,6 +1454,174 @@ struct wmi_tlv_stats_ev {
|
||||
__le32 num_chan_stats;
|
||||
} __packed;
|
||||
|
||||
struct wmi_tlv_p2p_noa_ev {
|
||||
__le32 vdev_id;
|
||||
} __packed;
|
||||
|
||||
struct wmi_tlv_roam_ev {
|
||||
__le32 vdev_id;
|
||||
__le32 reason;
|
||||
__le32 rssi;
|
||||
} __packed;
|
||||
|
||||
struct wmi_tlv_wow_add_del_event_cmd {
|
||||
__le32 vdev_id;
|
||||
__le32 is_add;
|
||||
__le32 event_bitmap;
|
||||
} __packed;
|
||||
|
||||
struct wmi_tlv_wow_enable_cmd {
|
||||
__le32 enable;
|
||||
} __packed;
|
||||
|
||||
struct wmi_tlv_wow_host_wakeup_ind {
|
||||
__le32 reserved;
|
||||
} __packed;
|
||||
|
||||
struct wmi_tlv_wow_event_info {
|
||||
__le32 vdev_id;
|
||||
__le32 flag;
|
||||
__le32 wake_reason;
|
||||
__le32 data_len;
|
||||
} __packed;
|
||||
|
||||
enum wmi_tlv_pattern_type {
|
||||
WOW_PATTERN_MIN = 0,
|
||||
WOW_BITMAP_PATTERN = WOW_PATTERN_MIN,
|
||||
WOW_IPV4_SYNC_PATTERN,
|
||||
WOW_IPV6_SYNC_PATTERN,
|
||||
WOW_WILD_CARD_PATTERN,
|
||||
WOW_TIMER_PATTERN,
|
||||
WOW_MAGIC_PATTERN,
|
||||
WOW_IPV6_RA_PATTERN,
|
||||
WOW_IOAC_PKT_PATTERN,
|
||||
WOW_IOAC_TMR_PATTERN,
|
||||
WOW_PATTERN_MAX
|
||||
};
|
||||
|
||||
#define WOW_DEFAULT_BITMAP_PATTERN_SIZE 148
|
||||
#define WOW_DEFAULT_BITMASK_SIZE 148
|
||||
|
||||
struct wmi_tlv_wow_bitmap_pattern {
|
||||
u8 patternbuf[WOW_DEFAULT_BITMAP_PATTERN_SIZE];
|
||||
u8 bitmaskbuf[WOW_DEFAULT_BITMASK_SIZE];
|
||||
__le32 pattern_offset;
|
||||
__le32 pattern_len;
|
||||
__le32 bitmask_len;
|
||||
__le32 pattern_id;
|
||||
} __packed;
|
||||
|
||||
struct wmi_tlv_wow_add_pattern_cmd {
|
||||
__le32 vdev_id;
|
||||
__le32 pattern_id;
|
||||
__le32 pattern_type;
|
||||
} __packed;
|
||||
|
||||
struct wmi_tlv_wow_del_pattern_cmd {
|
||||
__le32 vdev_id;
|
||||
__le32 pattern_id;
|
||||
__le32 pattern_type;
|
||||
} __packed;
|
||||
|
||||
/* TDLS Options */
|
||||
enum wmi_tlv_tdls_options {
|
||||
WMI_TLV_TDLS_OFFCHAN_EN = BIT(0),
|
||||
WMI_TLV_TDLS_BUFFER_STA_EN = BIT(1),
|
||||
WMI_TLV_TDLS_SLEEP_STA_EN = BIT(2),
|
||||
};
|
||||
|
||||
struct wmi_tdls_set_state_cmd {
|
||||
__le32 vdev_id;
|
||||
__le32 state;
|
||||
__le32 notification_interval_ms;
|
||||
__le32 tx_discovery_threshold;
|
||||
__le32 tx_teardown_threshold;
|
||||
__le32 rssi_teardown_threshold;
|
||||
__le32 rssi_delta;
|
||||
__le32 tdls_options;
|
||||
__le32 tdls_peer_traffic_ind_window;
|
||||
__le32 tdls_peer_traffic_response_timeout_ms;
|
||||
__le32 tdls_puapsd_mask;
|
||||
__le32 tdls_puapsd_inactivity_time_ms;
|
||||
__le32 tdls_puapsd_rx_frame_threshold;
|
||||
} __packed;
|
||||
|
||||
struct wmi_tdls_peer_update_cmd {
|
||||
__le32 vdev_id;
|
||||
struct wmi_mac_addr peer_macaddr;
|
||||
__le32 peer_state;
|
||||
} __packed;
|
||||
|
||||
enum {
|
||||
WMI_TLV_TDLS_PEER_QOS_AC_VO = BIT(0),
|
||||
WMI_TLV_TDLS_PEER_QOS_AC_VI = BIT(1),
|
||||
WMI_TLV_TDLS_PEER_QOS_AC_BK = BIT(2),
|
||||
WMI_TLV_TDLS_PEER_QOS_AC_BE = BIT(3),
|
||||
};
|
||||
|
||||
#define WMI_TLV_TDLS_PEER_SP_MASK 0x60
|
||||
#define WMI_TLV_TDLS_PEER_SP_LSB 5
|
||||
|
||||
struct wmi_tdls_peer_capab {
|
||||
__le32 peer_qos;
|
||||
__le32 buff_sta_support;
|
||||
__le32 off_chan_support;
|
||||
__le32 peer_curr_operclass;
|
||||
__le32 self_curr_operclass;
|
||||
__le32 peer_chan_len;
|
||||
__le32 peer_operclass_len;
|
||||
u8 peer_operclass[WMI_TDLS_MAX_SUPP_OPER_CLASSES];
|
||||
__le32 is_peer_responder;
|
||||
__le32 pref_offchan_num;
|
||||
__le32 pref_offchan_bw;
|
||||
} __packed;
|
||||
|
||||
struct wmi_tlv_adaptive_qcs {
|
||||
__le32 enable;
|
||||
} __packed;
|
||||
|
||||
/**
|
||||
* wmi_tlv_tx_pause_id - firmware tx queue pause reason types
|
||||
*
|
||||
* @WMI_TLV_TX_PAUSE_ID_MCC: used for by multi-channel firmware scheduler.
|
||||
* Only vdev_map is valid.
|
||||
* @WMI_TLV_TX_PAUSE_ID_AP_PEER_PS: peer in AP mode is asleep.
|
||||
* Only peer_id is valid.
|
||||
* @WMI_TLV_TX_PAUSE_ID_AP_PEER_UAPSD: Only peer_id and tid_map are valid.
|
||||
* @WMI_TLV_TX_PAUSE_ID_P2P_CLI_NOA: Only vdev_map is valid.
|
||||
* @WMI_TLV_TX_PAUSE_ID_P2P_GO_PS: Only vdev_map is valid.
|
||||
* @WMI_TLV_TX_PAUSE_ID_STA_ADD_BA: Only peer_id and tid_map are valid.
|
||||
* @WMI_TLV_TX_PAUSE_ID_AP_PS: When all peers are asleep in AP mode. Only
|
||||
* vdev_map is valid.
|
||||
* @WMI_TLV_TX_PAUSE_ID_IBSS_PS: When all peers are asleep in IBSS mode. Only
|
||||
* vdev_map is valid.
|
||||
* @WMI_TLV_TX_PAUSE_ID_HOST: Host itself requested tx pause.
|
||||
*/
|
||||
enum wmi_tlv_tx_pause_id {
|
||||
WMI_TLV_TX_PAUSE_ID_MCC = 1,
|
||||
WMI_TLV_TX_PAUSE_ID_AP_PEER_PS = 2,
|
||||
WMI_TLV_TX_PAUSE_ID_AP_PEER_UAPSD = 3,
|
||||
WMI_TLV_TX_PAUSE_ID_P2P_CLI_NOA = 4,
|
||||
WMI_TLV_TX_PAUSE_ID_P2P_GO_PS = 5,
|
||||
WMI_TLV_TX_PAUSE_ID_STA_ADD_BA = 6,
|
||||
WMI_TLV_TX_PAUSE_ID_AP_PS = 7,
|
||||
WMI_TLV_TX_PAUSE_ID_IBSS_PS = 8,
|
||||
WMI_TLV_TX_PAUSE_ID_HOST = 21,
|
||||
};
|
||||
|
||||
enum wmi_tlv_tx_pause_action {
|
||||
WMI_TLV_TX_PAUSE_ACTION_STOP,
|
||||
WMI_TLV_TX_PAUSE_ACTION_WAKE,
|
||||
};
|
||||
|
||||
struct wmi_tlv_tx_pause_ev {
|
||||
__le32 pause_id;
|
||||
__le32 action;
|
||||
__le32 vdev_map;
|
||||
__le32 peer_id;
|
||||
__le32 tid_map;
|
||||
} __packed;
|
||||
|
||||
void ath10k_wmi_tlv_attach(struct ath10k *ar);
|
||||
|
||||
#endif
|
||||
|
@ -26,6 +26,7 @@
|
||||
#include "mac.h"
|
||||
#include "testmode.h"
|
||||
#include "wmi-ops.h"
|
||||
#include "p2p.h"
|
||||
|
||||
/* MAIN WMI cmd track */
|
||||
static struct wmi_cmd_map wmi_cmd_map = {
|
||||
@ -884,20 +885,24 @@ void ath10k_wmi_put_wmi_channel(struct wmi_channel *ch,
|
||||
|
||||
int ath10k_wmi_wait_for_service_ready(struct ath10k *ar)
|
||||
{
|
||||
int ret;
|
||||
unsigned long time_left;
|
||||
|
||||
ret = wait_for_completion_timeout(&ar->wmi.service_ready,
|
||||
WMI_SERVICE_READY_TIMEOUT_HZ);
|
||||
return ret;
|
||||
time_left = wait_for_completion_timeout(&ar->wmi.service_ready,
|
||||
WMI_SERVICE_READY_TIMEOUT_HZ);
|
||||
if (!time_left)
|
||||
return -ETIMEDOUT;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int ath10k_wmi_wait_for_unified_ready(struct ath10k *ar)
|
||||
{
|
||||
int ret;
|
||||
unsigned long time_left;
|
||||
|
||||
ret = wait_for_completion_timeout(&ar->wmi.unified_ready,
|
||||
WMI_UNIFIED_READY_TIMEOUT_HZ);
|
||||
return ret;
|
||||
time_left = wait_for_completion_timeout(&ar->wmi.unified_ready,
|
||||
WMI_UNIFIED_READY_TIMEOUT_HZ);
|
||||
if (!time_left)
|
||||
return -ETIMEDOUT;
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct sk_buff *ath10k_wmi_alloc_skb(struct ath10k *ar, u32 len)
|
||||
@ -1351,63 +1356,6 @@ static inline enum ieee80211_band phy_mode_to_band(u32 phy_mode)
|
||||
return band;
|
||||
}
|
||||
|
||||
static inline u8 get_rate_idx(u32 rate, enum ieee80211_band band)
|
||||
{
|
||||
u8 rate_idx = 0;
|
||||
|
||||
/* rate in Kbps */
|
||||
switch (rate) {
|
||||
case 1000:
|
||||
rate_idx = 0;
|
||||
break;
|
||||
case 2000:
|
||||
rate_idx = 1;
|
||||
break;
|
||||
case 5500:
|
||||
rate_idx = 2;
|
||||
break;
|
||||
case 11000:
|
||||
rate_idx = 3;
|
||||
break;
|
||||
case 6000:
|
||||
rate_idx = 4;
|
||||
break;
|
||||
case 9000:
|
||||
rate_idx = 5;
|
||||
break;
|
||||
case 12000:
|
||||
rate_idx = 6;
|
||||
break;
|
||||
case 18000:
|
||||
rate_idx = 7;
|
||||
break;
|
||||
case 24000:
|
||||
rate_idx = 8;
|
||||
break;
|
||||
case 36000:
|
||||
rate_idx = 9;
|
||||
break;
|
||||
case 48000:
|
||||
rate_idx = 10;
|
||||
break;
|
||||
case 54000:
|
||||
rate_idx = 11;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
if (band == IEEE80211_BAND_5GHZ) {
|
||||
if (rate_idx > 3)
|
||||
/* Omit CCK rates */
|
||||
rate_idx -= 4;
|
||||
else
|
||||
rate_idx = 0;
|
||||
}
|
||||
|
||||
return rate_idx;
|
||||
}
|
||||
|
||||
/* If keys are configured, HW decrypts all frames
|
||||
* with protected bit set. Mark such frames as decrypted.
|
||||
*/
|
||||
@ -1489,6 +1437,7 @@ int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb)
|
||||
struct wmi_mgmt_rx_ev_arg arg = {};
|
||||
struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
|
||||
struct ieee80211_hdr *hdr;
|
||||
struct ieee80211_supported_band *sband;
|
||||
u32 rx_status;
|
||||
u32 channel;
|
||||
u32 phy_mode;
|
||||
@ -1559,9 +1508,11 @@ int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb)
|
||||
if (phy_mode == MODE_11B && status->band == IEEE80211_BAND_5GHZ)
|
||||
ath10k_dbg(ar, ATH10K_DBG_MGMT, "wmi mgmt rx 11b (CCK) on 5GHz\n");
|
||||
|
||||
sband = &ar->mac.sbands[status->band];
|
||||
|
||||
status->freq = ieee80211_channel_to_frequency(channel, status->band);
|
||||
status->signal = snr + ATH10K_DEFAULT_NOISE_FLOOR;
|
||||
status->rate_idx = get_rate_idx(rate, status->band);
|
||||
status->rate_idx = ath10k_mac_bitrate_to_idx(sband, rate / 100);
|
||||
|
||||
hdr = (struct ieee80211_hdr *)skb->data;
|
||||
fc = le16_to_cpu(hdr->frame_control);
|
||||
@ -1585,6 +1536,9 @@ int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb)
|
||||
}
|
||||
}
|
||||
|
||||
if (ieee80211_is_beacon(hdr->frame_control))
|
||||
ath10k_mac_handle_beacon(ar, skb);
|
||||
|
||||
ath10k_dbg(ar, ATH10K_DBG_MGMT,
|
||||
"event mgmt rx skb %p len %d ftype %02x stype %02x\n",
|
||||
skb, skb->len,
|
||||
@ -2276,109 +2230,25 @@ static void ath10k_wmi_update_tim(struct ath10k *ar,
|
||||
tim->bitmap_ctrl, pvm_len);
|
||||
}
|
||||
|
||||
static void ath10k_p2p_fill_noa_ie(u8 *data, u32 len,
|
||||
const struct wmi_p2p_noa_info *noa)
|
||||
{
|
||||
struct ieee80211_p2p_noa_attr *noa_attr;
|
||||
u8 ctwindow_oppps = noa->ctwindow_oppps;
|
||||
u8 ctwindow = ctwindow_oppps >> WMI_P2P_OPPPS_CTWINDOW_OFFSET;
|
||||
bool oppps = !!(ctwindow_oppps & WMI_P2P_OPPPS_ENABLE_BIT);
|
||||
__le16 *noa_attr_len;
|
||||
u16 attr_len;
|
||||
u8 noa_descriptors = noa->num_descriptors;
|
||||
int i;
|
||||
|
||||
/* P2P IE */
|
||||
data[0] = WLAN_EID_VENDOR_SPECIFIC;
|
||||
data[1] = len - 2;
|
||||
data[2] = (WLAN_OUI_WFA >> 16) & 0xff;
|
||||
data[3] = (WLAN_OUI_WFA >> 8) & 0xff;
|
||||
data[4] = (WLAN_OUI_WFA >> 0) & 0xff;
|
||||
data[5] = WLAN_OUI_TYPE_WFA_P2P;
|
||||
|
||||
/* NOA ATTR */
|
||||
data[6] = IEEE80211_P2P_ATTR_ABSENCE_NOTICE;
|
||||
noa_attr_len = (__le16 *)&data[7]; /* 2 bytes */
|
||||
noa_attr = (struct ieee80211_p2p_noa_attr *)&data[9];
|
||||
|
||||
noa_attr->index = noa->index;
|
||||
noa_attr->oppps_ctwindow = ctwindow;
|
||||
if (oppps)
|
||||
noa_attr->oppps_ctwindow |= IEEE80211_P2P_OPPPS_ENABLE_BIT;
|
||||
|
||||
for (i = 0; i < noa_descriptors; i++) {
|
||||
noa_attr->desc[i].count =
|
||||
__le32_to_cpu(noa->descriptors[i].type_count);
|
||||
noa_attr->desc[i].duration = noa->descriptors[i].duration;
|
||||
noa_attr->desc[i].interval = noa->descriptors[i].interval;
|
||||
noa_attr->desc[i].start_time = noa->descriptors[i].start_time;
|
||||
}
|
||||
|
||||
attr_len = 2; /* index + oppps_ctwindow */
|
||||
attr_len += noa_descriptors * sizeof(struct ieee80211_p2p_noa_desc);
|
||||
*noa_attr_len = __cpu_to_le16(attr_len);
|
||||
}
|
||||
|
||||
static u32 ath10k_p2p_calc_noa_ie_len(const struct wmi_p2p_noa_info *noa)
|
||||
{
|
||||
u32 len = 0;
|
||||
u8 noa_descriptors = noa->num_descriptors;
|
||||
u8 opp_ps_info = noa->ctwindow_oppps;
|
||||
bool opps_enabled = !!(opp_ps_info & WMI_P2P_OPPPS_ENABLE_BIT);
|
||||
|
||||
if (!noa_descriptors && !opps_enabled)
|
||||
return len;
|
||||
|
||||
len += 1 + 1 + 4; /* EID + len + OUI */
|
||||
len += 1 + 2; /* noa attr + attr len */
|
||||
len += 1 + 1; /* index + oppps_ctwindow */
|
||||
len += noa_descriptors * sizeof(struct ieee80211_p2p_noa_desc);
|
||||
|
||||
return len;
|
||||
}
|
||||
|
||||
static void ath10k_wmi_update_noa(struct ath10k *ar, struct ath10k_vif *arvif,
|
||||
struct sk_buff *bcn,
|
||||
const struct wmi_p2p_noa_info *noa)
|
||||
{
|
||||
u8 *new_data, *old_data = arvif->u.ap.noa_data;
|
||||
u32 new_len;
|
||||
|
||||
if (arvif->vdev_subtype != WMI_VDEV_SUBTYPE_P2P_GO)
|
||||
return;
|
||||
|
||||
ath10k_dbg(ar, ATH10K_DBG_MGMT, "noa changed: %d\n", noa->changed);
|
||||
if (noa->changed & WMI_P2P_NOA_CHANGED_BIT) {
|
||||
new_len = ath10k_p2p_calc_noa_ie_len(noa);
|
||||
if (!new_len)
|
||||
goto cleanup;
|
||||
|
||||
new_data = kmalloc(new_len, GFP_ATOMIC);
|
||||
if (!new_data)
|
||||
goto cleanup;
|
||||
|
||||
ath10k_p2p_fill_noa_ie(new_data, new_len, noa);
|
||||
|
||||
spin_lock_bh(&ar->data_lock);
|
||||
arvif->u.ap.noa_data = new_data;
|
||||
arvif->u.ap.noa_len = new_len;
|
||||
spin_unlock_bh(&ar->data_lock);
|
||||
kfree(old_data);
|
||||
}
|
||||
if (noa->changed & WMI_P2P_NOA_CHANGED_BIT)
|
||||
ath10k_p2p_noa_update(arvif, noa);
|
||||
|
||||
if (arvif->u.ap.noa_data)
|
||||
if (!pskb_expand_head(bcn, 0, arvif->u.ap.noa_len, GFP_ATOMIC))
|
||||
memcpy(skb_put(bcn, arvif->u.ap.noa_len),
|
||||
arvif->u.ap.noa_data,
|
||||
arvif->u.ap.noa_len);
|
||||
return;
|
||||
|
||||
cleanup:
|
||||
spin_lock_bh(&ar->data_lock);
|
||||
arvif->u.ap.noa_data = NULL;
|
||||
arvif->u.ap.noa_len = 0;
|
||||
spin_unlock_bh(&ar->data_lock);
|
||||
kfree(old_data);
|
||||
return;
|
||||
}
|
||||
|
||||
static int ath10k_wmi_op_pull_swba_ev(struct ath10k *ar, struct sk_buff *skb,
|
||||
@ -2555,6 +2425,7 @@ static void ath10k_dfs_radar_report(struct ath10k *ar,
|
||||
u64 tsf)
|
||||
{
|
||||
u32 reg0, reg1, tsf32l;
|
||||
struct ieee80211_channel *ch;
|
||||
struct pulse_event pe;
|
||||
u64 tsf64;
|
||||
u8 rssi, width;
|
||||
@ -2583,6 +2454,15 @@ static void ath10k_dfs_radar_report(struct ath10k *ar,
|
||||
if (!ar->dfs_detector)
|
||||
return;
|
||||
|
||||
spin_lock_bh(&ar->data_lock);
|
||||
ch = ar->rx_channel;
|
||||
spin_unlock_bh(&ar->data_lock);
|
||||
|
||||
if (!ch) {
|
||||
ath10k_warn(ar, "failed to derive channel for radar pulse, treating as radar\n");
|
||||
goto radar_detected;
|
||||
}
|
||||
|
||||
/* report event to DFS pattern detector */
|
||||
tsf32l = __le32_to_cpu(phyerr->tsf_timestamp);
|
||||
tsf64 = tsf & (~0xFFFFFFFFULL);
|
||||
@ -2598,10 +2478,10 @@ static void ath10k_dfs_radar_report(struct ath10k *ar,
|
||||
rssi = 0;
|
||||
|
||||
pe.ts = tsf64;
|
||||
pe.freq = ar->hw->conf.chandef.chan->center_freq;
|
||||
pe.freq = ch->center_freq;
|
||||
pe.width = width;
|
||||
pe.rssi = rssi;
|
||||
|
||||
pe.chirp = (MS(reg0, RADAR_REPORT_REG0_PULSE_IS_CHIRP) != 0);
|
||||
ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
|
||||
"dfs add pulse freq: %d, width: %d, rssi %d, tsf: %llX\n",
|
||||
pe.freq, pe.width, pe.rssi, pe.ts);
|
||||
@ -2614,6 +2494,7 @@ static void ath10k_dfs_radar_report(struct ath10k *ar,
|
||||
return;
|
||||
}
|
||||
|
||||
radar_detected:
|
||||
ath10k_dbg(ar, ATH10K_DBG_REGULATORY, "dfs radar detected\n");
|
||||
ATH10K_DFS_STAT_INC(ar, radar_detected);
|
||||
|
||||
@ -2872,7 +2753,43 @@ void ath10k_wmi_event_phyerr(struct ath10k *ar, struct sk_buff *skb)
|
||||
|
||||
void ath10k_wmi_event_roam(struct ath10k *ar, struct sk_buff *skb)
|
||||
{
|
||||
ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_ROAM_EVENTID\n");
|
||||
struct wmi_roam_ev_arg arg = {};
|
||||
int ret;
|
||||
u32 vdev_id;
|
||||
u32 reason;
|
||||
s32 rssi;
|
||||
|
||||
ret = ath10k_wmi_pull_roam_ev(ar, skb, &arg);
|
||||
if (ret) {
|
||||
ath10k_warn(ar, "failed to parse roam event: %d\n", ret);
|
||||
return;
|
||||
}
|
||||
|
||||
vdev_id = __le32_to_cpu(arg.vdev_id);
|
||||
reason = __le32_to_cpu(arg.reason);
|
||||
rssi = __le32_to_cpu(arg.rssi);
|
||||
rssi += WMI_SPECTRAL_NOISE_FLOOR_REF_DEFAULT;
|
||||
|
||||
ath10k_dbg(ar, ATH10K_DBG_WMI,
|
||||
"wmi roam event vdev %u reason 0x%08x rssi %d\n",
|
||||
vdev_id, reason, rssi);
|
||||
|
||||
if (reason >= WMI_ROAM_REASON_MAX)
|
||||
ath10k_warn(ar, "ignoring unknown roam event reason %d on vdev %i\n",
|
||||
reason, vdev_id);
|
||||
|
||||
switch (reason) {
|
||||
case WMI_ROAM_REASON_BEACON_MISS:
|
||||
ath10k_mac_handle_beacon_miss(ar, vdev_id);
|
||||
break;
|
||||
case WMI_ROAM_REASON_BETTER_AP:
|
||||
case WMI_ROAM_REASON_LOW_RSSI:
|
||||
case WMI_ROAM_REASON_SUITABLE_AP_FOUND:
|
||||
case WMI_ROAM_REASON_HO_FAILED:
|
||||
ath10k_warn(ar, "ignoring not implemented roam event reason %d on vdev %i\n",
|
||||
reason, vdev_id);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
void ath10k_wmi_event_profile_match(struct ath10k *ar, struct sk_buff *skb)
|
||||
@ -2942,7 +2859,19 @@ void ath10k_wmi_event_rtt_error_report(struct ath10k *ar, struct sk_buff *skb)
|
||||
|
||||
void ath10k_wmi_event_wow_wakeup_host(struct ath10k *ar, struct sk_buff *skb)
|
||||
{
|
||||
ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_WOW_WAKEUP_HOST_EVENTID\n");
|
||||
struct wmi_wow_ev_arg ev = {};
|
||||
int ret;
|
||||
|
||||
complete(&ar->wow.wakeup_completed);
|
||||
|
||||
ret = ath10k_wmi_pull_wow_event(ar, skb, &ev);
|
||||
if (ret) {
|
||||
ath10k_warn(ar, "failed to parse wow wakeup event: %d\n", ret);
|
||||
return;
|
||||
}
|
||||
|
||||
ath10k_dbg(ar, ATH10K_DBG_WMI, "wow wakeup host reason %s\n",
|
||||
wow_reason(ev.wake_reason));
|
||||
}
|
||||
|
||||
void ath10k_wmi_event_dcs_interference(struct ath10k *ar, struct sk_buff *skb)
|
||||
@ -3231,6 +3160,21 @@ static int ath10k_wmi_op_pull_rdy_ev(struct ath10k *ar, struct sk_buff *skb,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ath10k_wmi_op_pull_roam_ev(struct ath10k *ar, struct sk_buff *skb,
|
||||
struct wmi_roam_ev_arg *arg)
|
||||
{
|
||||
struct wmi_roam_ev *ev = (void *)skb->data;
|
||||
|
||||
if (skb->len < sizeof(*ev))
|
||||
return -EPROTO;
|
||||
|
||||
skb_pull(skb, sizeof(*ev));
|
||||
arg->vdev_id = ev->vdev_id;
|
||||
arg->reason = ev->reason;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int ath10k_wmi_event_ready(struct ath10k *ar, struct sk_buff *skb)
|
||||
{
|
||||
struct wmi_rdy_ev_arg arg = {};
|
||||
@ -3989,6 +3933,8 @@ static struct sk_buff *ath10k_wmi_10_2_op_gen_init(struct ath10k *ar)
|
||||
cmd = (struct wmi_init_cmd_10_2 *)buf->data;
|
||||
|
||||
features = WMI_10_2_RX_BATCH_MODE;
|
||||
if (test_bit(WMI_SERVICE_COEX_GPIO, ar->wmi.svc_map))
|
||||
features |= WMI_10_2_COEX_GPIO;
|
||||
cmd->resource_config.feature_mask = __cpu_to_le32(features);
|
||||
|
||||
memcpy(&cmd->resource_config.common, &config, sizeof(config));
|
||||
@ -4315,8 +4261,6 @@ ath10k_wmi_op_gen_vdev_start(struct ath10k *ar,
|
||||
const char *cmdname;
|
||||
u32 flags = 0;
|
||||
|
||||
if (WARN_ON(arg->ssid && arg->ssid_len == 0))
|
||||
return ERR_PTR(-EINVAL);
|
||||
if (WARN_ON(arg->hidden_ssid && !arg->ssid))
|
||||
return ERR_PTR(-EINVAL);
|
||||
if (WARN_ON(arg->ssid_len > sizeof(cmd->ssid.ssid)))
|
||||
@ -4539,7 +4483,8 @@ ath10k_wmi_op_gen_vdev_spectral_enable(struct ath10k *ar, u32 vdev_id,
|
||||
|
||||
static struct sk_buff *
|
||||
ath10k_wmi_op_gen_peer_create(struct ath10k *ar, u32 vdev_id,
|
||||
const u8 peer_addr[ETH_ALEN])
|
||||
const u8 peer_addr[ETH_ALEN],
|
||||
enum wmi_peer_type peer_type)
|
||||
{
|
||||
struct wmi_peer_create_cmd *cmd;
|
||||
struct sk_buff *skb;
|
||||
@ -5223,6 +5168,7 @@ static const struct wmi_ops wmi_ops = {
|
||||
.pull_svc_rdy = ath10k_wmi_main_op_pull_svc_rdy_ev,
|
||||
.pull_rdy = ath10k_wmi_op_pull_rdy_ev,
|
||||
.pull_fw_stats = ath10k_wmi_main_op_pull_fw_stats,
|
||||
.pull_roam_ev = ath10k_wmi_op_pull_roam_ev,
|
||||
|
||||
.gen_pdev_suspend = ath10k_wmi_op_gen_pdev_suspend,
|
||||
.gen_pdev_resume = ath10k_wmi_op_gen_pdev_resume,
|
||||
@ -5268,6 +5214,7 @@ static const struct wmi_ops wmi_ops = {
|
||||
/* .gen_bcn_tmpl not implemented */
|
||||
/* .gen_prb_tmpl not implemented */
|
||||
/* .gen_p2p_go_bcn_ie not implemented */
|
||||
/* .gen_adaptive_qcs not implemented */
|
||||
};
|
||||
|
||||
static const struct wmi_ops wmi_10_1_ops = {
|
||||
@ -5290,6 +5237,7 @@ static const struct wmi_ops wmi_10_1_ops = {
|
||||
.pull_swba = ath10k_wmi_op_pull_swba_ev,
|
||||
.pull_phyerr = ath10k_wmi_op_pull_phyerr_ev,
|
||||
.pull_rdy = ath10k_wmi_op_pull_rdy_ev,
|
||||
.pull_roam_ev = ath10k_wmi_op_pull_roam_ev,
|
||||
|
||||
.gen_pdev_suspend = ath10k_wmi_op_gen_pdev_suspend,
|
||||
.gen_pdev_resume = ath10k_wmi_op_gen_pdev_resume,
|
||||
@ -5330,6 +5278,7 @@ static const struct wmi_ops wmi_10_1_ops = {
|
||||
/* .gen_bcn_tmpl not implemented */
|
||||
/* .gen_prb_tmpl not implemented */
|
||||
/* .gen_p2p_go_bcn_ie not implemented */
|
||||
/* .gen_adaptive_qcs not implemented */
|
||||
};
|
||||
|
||||
static const struct wmi_ops wmi_10_2_ops = {
|
||||
@ -5353,6 +5302,7 @@ static const struct wmi_ops wmi_10_2_ops = {
|
||||
.pull_swba = ath10k_wmi_op_pull_swba_ev,
|
||||
.pull_phyerr = ath10k_wmi_op_pull_phyerr_ev,
|
||||
.pull_rdy = ath10k_wmi_op_pull_rdy_ev,
|
||||
.pull_roam_ev = ath10k_wmi_op_pull_roam_ev,
|
||||
|
||||
.gen_pdev_suspend = ath10k_wmi_op_gen_pdev_suspend,
|
||||
.gen_pdev_resume = ath10k_wmi_op_gen_pdev_resume,
|
||||
@ -5413,6 +5363,7 @@ static const struct wmi_ops wmi_10_2_4_ops = {
|
||||
.pull_swba = ath10k_wmi_op_pull_swba_ev,
|
||||
.pull_phyerr = ath10k_wmi_op_pull_phyerr_ev,
|
||||
.pull_rdy = ath10k_wmi_op_pull_rdy_ev,
|
||||
.pull_roam_ev = ath10k_wmi_op_pull_roam_ev,
|
||||
|
||||
.gen_pdev_suspend = ath10k_wmi_op_gen_pdev_suspend,
|
||||
.gen_pdev_resume = ath10k_wmi_op_gen_pdev_resume,
|
||||
@ -5452,6 +5403,7 @@ static const struct wmi_ops wmi_10_2_4_ops = {
|
||||
/* .gen_bcn_tmpl not implemented */
|
||||
/* .gen_prb_tmpl not implemented */
|
||||
/* .gen_p2p_go_bcn_ie not implemented */
|
||||
/* .gen_adaptive_qcs not implemented */
|
||||
};
|
||||
|
||||
int ath10k_wmi_attach(struct ath10k *ar)
|
||||
|
@ -148,6 +148,8 @@ enum wmi_service {
|
||||
WMI_SERVICE_STA_RX_IPA_OFFLOAD_SUPPORT,
|
||||
WMI_SERVICE_MDNS_OFFLOAD,
|
||||
WMI_SERVICE_SAP_AUTH_OFFLOAD,
|
||||
WMI_SERVICE_ATF,
|
||||
WMI_SERVICE_COEX_GPIO,
|
||||
|
||||
/* keep last */
|
||||
WMI_SERVICE_MAX,
|
||||
@ -177,6 +179,8 @@ enum wmi_10x_service {
|
||||
WMI_10X_SERVICE_SMART_ANTENNA_SW_SUPPORT,
|
||||
WMI_10X_SERVICE_FORCE_FW_HANG,
|
||||
WMI_10X_SERVICE_SMART_ANTENNA_HW_SUPPORT,
|
||||
WMI_10X_SERVICE_ATF,
|
||||
WMI_10X_SERVICE_COEX_GPIO,
|
||||
};
|
||||
|
||||
enum wmi_main_service {
|
||||
@ -293,6 +297,8 @@ static inline char *wmi_service_name(int service_id)
|
||||
SVCSTR(WMI_SERVICE_STA_RX_IPA_OFFLOAD_SUPPORT);
|
||||
SVCSTR(WMI_SERVICE_MDNS_OFFLOAD);
|
||||
SVCSTR(WMI_SERVICE_SAP_AUTH_OFFLOAD);
|
||||
SVCSTR(WMI_SERVICE_ATF);
|
||||
SVCSTR(WMI_SERVICE_COEX_GPIO);
|
||||
default:
|
||||
return NULL;
|
||||
}
|
||||
@ -356,6 +362,10 @@ static inline void wmi_10x_svc_map(const __le32 *in, unsigned long *out,
|
||||
WMI_SERVICE_FORCE_FW_HANG, len);
|
||||
SVCMAP(WMI_10X_SERVICE_SMART_ANTENNA_HW_SUPPORT,
|
||||
WMI_SERVICE_SMART_ANTENNA_HW_SUPPORT, len);
|
||||
SVCMAP(WMI_10X_SERVICE_ATF,
|
||||
WMI_SERVICE_ATF, len);
|
||||
SVCMAP(WMI_10X_SERVICE_COEX_GPIO,
|
||||
WMI_SERVICE_COEX_GPIO, len);
|
||||
}
|
||||
|
||||
static inline void wmi_main_svc_map(const __le32 *in, unsigned long *out,
|
||||
@ -552,6 +562,9 @@ struct wmi_cmd_map {
|
||||
u32 gpio_output_cmdid;
|
||||
u32 pdev_get_temperature_cmdid;
|
||||
u32 vdev_set_wmm_params_cmdid;
|
||||
u32 tdls_set_state_cmdid;
|
||||
u32 tdls_peer_update_cmdid;
|
||||
u32 adaptive_qcs_cmdid;
|
||||
};
|
||||
|
||||
/*
|
||||
@ -1952,6 +1965,7 @@ struct wmi_resource_config_10x {
|
||||
enum wmi_10_2_feature_mask {
|
||||
WMI_10_2_RX_BATCH_MODE = BIT(0),
|
||||
WMI_10_2_ATF_CONFIG = BIT(1),
|
||||
WMI_10_2_COEX_GPIO = BIT(3),
|
||||
};
|
||||
|
||||
struct wmi_resource_config_10_2 {
|
||||
@ -2166,6 +2180,7 @@ struct wmi_start_scan_arg {
|
||||
u32 max_scan_time;
|
||||
u32 probe_delay;
|
||||
u32 scan_ctrl_flags;
|
||||
u32 burst_duration_ms;
|
||||
|
||||
u32 ie_len;
|
||||
u32 n_channels;
|
||||
@ -4333,6 +4348,12 @@ struct wmi_peer_create_cmd {
|
||||
struct wmi_mac_addr peer_macaddr;
|
||||
} __packed;
|
||||
|
||||
enum wmi_peer_type {
|
||||
WMI_PEER_TYPE_DEFAULT = 0,
|
||||
WMI_PEER_TYPE_BSS = 1,
|
||||
WMI_PEER_TYPE_TDLS = 2,
|
||||
};
|
||||
|
||||
struct wmi_peer_delete_cmd {
|
||||
__le32 vdev_id;
|
||||
struct wmi_mac_addr peer_macaddr;
|
||||
@ -4644,9 +4665,7 @@ struct wmi_peer_sta_kickout_event {
|
||||
} __packed;
|
||||
|
||||
#define WMI_CHAN_INFO_FLAG_COMPLETE BIT(0)
|
||||
|
||||
/* FIXME: empirically extrapolated */
|
||||
#define WMI_CHAN_INFO_MSEC(x) ((x) / 76595)
|
||||
#define WMI_CHAN_INFO_MSEC(x) ((x) / 88000)
|
||||
|
||||
/* Beacon filter wmi command info */
|
||||
#define BCN_FLT_MAX_SUPPORTED_IES 256
|
||||
@ -4769,6 +4788,22 @@ struct wmi_dbglog_cfg_cmd {
|
||||
__le32 config_valid;
|
||||
} __packed;
|
||||
|
||||
enum wmi_roam_reason {
|
||||
WMI_ROAM_REASON_BETTER_AP = 1,
|
||||
WMI_ROAM_REASON_BEACON_MISS = 2,
|
||||
WMI_ROAM_REASON_LOW_RSSI = 3,
|
||||
WMI_ROAM_REASON_SUITABLE_AP_FOUND = 4,
|
||||
WMI_ROAM_REASON_HO_FAILED = 5,
|
||||
|
||||
/* keep last */
|
||||
WMI_ROAM_REASON_MAX,
|
||||
};
|
||||
|
||||
struct wmi_roam_ev {
|
||||
__le32 vdev_id;
|
||||
__le32 reason;
|
||||
} __packed;
|
||||
|
||||
#define ATH10K_FRAGMT_THRESHOLD_MIN 540
|
||||
#define ATH10K_FRAGMT_THRESHOLD_MAX 2346
|
||||
|
||||
@ -4857,11 +4892,200 @@ struct wmi_rdy_ev_arg {
|
||||
const u8 *mac_addr;
|
||||
};
|
||||
|
||||
struct wmi_roam_ev_arg {
|
||||
__le32 vdev_id;
|
||||
__le32 reason;
|
||||
__le32 rssi;
|
||||
};
|
||||
|
||||
struct wmi_pdev_temperature_event {
|
||||
/* temperature value in Celcius degree */
|
||||
__le32 temperature;
|
||||
} __packed;
|
||||
|
||||
/* WOW structures */
|
||||
enum wmi_wow_wakeup_event {
|
||||
WOW_BMISS_EVENT = 0,
|
||||
WOW_BETTER_AP_EVENT,
|
||||
WOW_DEAUTH_RECVD_EVENT,
|
||||
WOW_MAGIC_PKT_RECVD_EVENT,
|
||||
WOW_GTK_ERR_EVENT,
|
||||
WOW_FOURWAY_HSHAKE_EVENT,
|
||||
WOW_EAPOL_RECVD_EVENT,
|
||||
WOW_NLO_DETECTED_EVENT,
|
||||
WOW_DISASSOC_RECVD_EVENT,
|
||||
WOW_PATTERN_MATCH_EVENT,
|
||||
WOW_CSA_IE_EVENT,
|
||||
WOW_PROBE_REQ_WPS_IE_EVENT,
|
||||
WOW_AUTH_REQ_EVENT,
|
||||
WOW_ASSOC_REQ_EVENT,
|
||||
WOW_HTT_EVENT,
|
||||
WOW_RA_MATCH_EVENT,
|
||||
WOW_HOST_AUTO_SHUTDOWN_EVENT,
|
||||
WOW_IOAC_MAGIC_EVENT,
|
||||
WOW_IOAC_SHORT_EVENT,
|
||||
WOW_IOAC_EXTEND_EVENT,
|
||||
WOW_IOAC_TIMER_EVENT,
|
||||
WOW_DFS_PHYERR_RADAR_EVENT,
|
||||
WOW_BEACON_EVENT,
|
||||
WOW_CLIENT_KICKOUT_EVENT,
|
||||
WOW_EVENT_MAX,
|
||||
};
|
||||
|
||||
#define C2S(x) case x: return #x
|
||||
|
||||
static inline const char *wow_wakeup_event(enum wmi_wow_wakeup_event ev)
|
||||
{
|
||||
switch (ev) {
|
||||
C2S(WOW_BMISS_EVENT);
|
||||
C2S(WOW_BETTER_AP_EVENT);
|
||||
C2S(WOW_DEAUTH_RECVD_EVENT);
|
||||
C2S(WOW_MAGIC_PKT_RECVD_EVENT);
|
||||
C2S(WOW_GTK_ERR_EVENT);
|
||||
C2S(WOW_FOURWAY_HSHAKE_EVENT);
|
||||
C2S(WOW_EAPOL_RECVD_EVENT);
|
||||
C2S(WOW_NLO_DETECTED_EVENT);
|
||||
C2S(WOW_DISASSOC_RECVD_EVENT);
|
||||
C2S(WOW_PATTERN_MATCH_EVENT);
|
||||
C2S(WOW_CSA_IE_EVENT);
|
||||
C2S(WOW_PROBE_REQ_WPS_IE_EVENT);
|
||||
C2S(WOW_AUTH_REQ_EVENT);
|
||||
C2S(WOW_ASSOC_REQ_EVENT);
|
||||
C2S(WOW_HTT_EVENT);
|
||||
C2S(WOW_RA_MATCH_EVENT);
|
||||
C2S(WOW_HOST_AUTO_SHUTDOWN_EVENT);
|
||||
C2S(WOW_IOAC_MAGIC_EVENT);
|
||||
C2S(WOW_IOAC_SHORT_EVENT);
|
||||
C2S(WOW_IOAC_EXTEND_EVENT);
|
||||
C2S(WOW_IOAC_TIMER_EVENT);
|
||||
C2S(WOW_DFS_PHYERR_RADAR_EVENT);
|
||||
C2S(WOW_BEACON_EVENT);
|
||||
C2S(WOW_CLIENT_KICKOUT_EVENT);
|
||||
C2S(WOW_EVENT_MAX);
|
||||
default:
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
enum wmi_wow_wake_reason {
|
||||
WOW_REASON_UNSPECIFIED = -1,
|
||||
WOW_REASON_NLOD = 0,
|
||||
WOW_REASON_AP_ASSOC_LOST,
|
||||
WOW_REASON_LOW_RSSI,
|
||||
WOW_REASON_DEAUTH_RECVD,
|
||||
WOW_REASON_DISASSOC_RECVD,
|
||||
WOW_REASON_GTK_HS_ERR,
|
||||
WOW_REASON_EAP_REQ,
|
||||
WOW_REASON_FOURWAY_HS_RECV,
|
||||
WOW_REASON_TIMER_INTR_RECV,
|
||||
WOW_REASON_PATTERN_MATCH_FOUND,
|
||||
WOW_REASON_RECV_MAGIC_PATTERN,
|
||||
WOW_REASON_P2P_DISC,
|
||||
WOW_REASON_WLAN_HB,
|
||||
WOW_REASON_CSA_EVENT,
|
||||
WOW_REASON_PROBE_REQ_WPS_IE_RECV,
|
||||
WOW_REASON_AUTH_REQ_RECV,
|
||||
WOW_REASON_ASSOC_REQ_RECV,
|
||||
WOW_REASON_HTT_EVENT,
|
||||
WOW_REASON_RA_MATCH,
|
||||
WOW_REASON_HOST_AUTO_SHUTDOWN,
|
||||
WOW_REASON_IOAC_MAGIC_EVENT,
|
||||
WOW_REASON_IOAC_SHORT_EVENT,
|
||||
WOW_REASON_IOAC_EXTEND_EVENT,
|
||||
WOW_REASON_IOAC_TIMER_EVENT,
|
||||
WOW_REASON_ROAM_HO,
|
||||
WOW_REASON_DFS_PHYERR_RADADR_EVENT,
|
||||
WOW_REASON_BEACON_RECV,
|
||||
WOW_REASON_CLIENT_KICKOUT_EVENT,
|
||||
WOW_REASON_DEBUG_TEST = 0xFF,
|
||||
};
|
||||
|
||||
static inline const char *wow_reason(enum wmi_wow_wake_reason reason)
|
||||
{
|
||||
switch (reason) {
|
||||
C2S(WOW_REASON_UNSPECIFIED);
|
||||
C2S(WOW_REASON_NLOD);
|
||||
C2S(WOW_REASON_AP_ASSOC_LOST);
|
||||
C2S(WOW_REASON_LOW_RSSI);
|
||||
C2S(WOW_REASON_DEAUTH_RECVD);
|
||||
C2S(WOW_REASON_DISASSOC_RECVD);
|
||||
C2S(WOW_REASON_GTK_HS_ERR);
|
||||
C2S(WOW_REASON_EAP_REQ);
|
||||
C2S(WOW_REASON_FOURWAY_HS_RECV);
|
||||
C2S(WOW_REASON_TIMER_INTR_RECV);
|
||||
C2S(WOW_REASON_PATTERN_MATCH_FOUND);
|
||||
C2S(WOW_REASON_RECV_MAGIC_PATTERN);
|
||||
C2S(WOW_REASON_P2P_DISC);
|
||||
C2S(WOW_REASON_WLAN_HB);
|
||||
C2S(WOW_REASON_CSA_EVENT);
|
||||
C2S(WOW_REASON_PROBE_REQ_WPS_IE_RECV);
|
||||
C2S(WOW_REASON_AUTH_REQ_RECV);
|
||||
C2S(WOW_REASON_ASSOC_REQ_RECV);
|
||||
C2S(WOW_REASON_HTT_EVENT);
|
||||
C2S(WOW_REASON_RA_MATCH);
|
||||
C2S(WOW_REASON_HOST_AUTO_SHUTDOWN);
|
||||
C2S(WOW_REASON_IOAC_MAGIC_EVENT);
|
||||
C2S(WOW_REASON_IOAC_SHORT_EVENT);
|
||||
C2S(WOW_REASON_IOAC_EXTEND_EVENT);
|
||||
C2S(WOW_REASON_IOAC_TIMER_EVENT);
|
||||
C2S(WOW_REASON_ROAM_HO);
|
||||
C2S(WOW_REASON_DFS_PHYERR_RADADR_EVENT);
|
||||
C2S(WOW_REASON_BEACON_RECV);
|
||||
C2S(WOW_REASON_CLIENT_KICKOUT_EVENT);
|
||||
C2S(WOW_REASON_DEBUG_TEST);
|
||||
default:
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
#undef C2S
|
||||
|
||||
struct wmi_wow_ev_arg {
|
||||
u32 vdev_id;
|
||||
u32 flag;
|
||||
enum wmi_wow_wake_reason wake_reason;
|
||||
u32 data_len;
|
||||
};
|
||||
|
||||
#define WOW_MIN_PATTERN_SIZE 1
|
||||
#define WOW_MAX_PATTERN_SIZE 148
|
||||
#define WOW_MAX_PKT_OFFSET 128
|
||||
|
||||
enum wmi_tdls_state {
|
||||
WMI_TDLS_DISABLE,
|
||||
WMI_TDLS_ENABLE_PASSIVE,
|
||||
WMI_TDLS_ENABLE_ACTIVE,
|
||||
};
|
||||
|
||||
enum wmi_tdls_peer_state {
|
||||
WMI_TDLS_PEER_STATE_PEERING,
|
||||
WMI_TDLS_PEER_STATE_CONNECTED,
|
||||
WMI_TDLS_PEER_STATE_TEARDOWN,
|
||||
};
|
||||
|
||||
struct wmi_tdls_peer_update_cmd_arg {
|
||||
u32 vdev_id;
|
||||
enum wmi_tdls_peer_state peer_state;
|
||||
u8 addr[ETH_ALEN];
|
||||
};
|
||||
|
||||
#define WMI_TDLS_MAX_SUPP_OPER_CLASSES 32
|
||||
|
||||
struct wmi_tdls_peer_capab_arg {
|
||||
u8 peer_uapsd_queues;
|
||||
u8 peer_max_sp;
|
||||
u32 buff_sta_support;
|
||||
u32 off_chan_support;
|
||||
u32 peer_curr_operclass;
|
||||
u32 self_curr_operclass;
|
||||
u32 peer_chan_len;
|
||||
u32 peer_operclass_len;
|
||||
u8 peer_operclass[WMI_TDLS_MAX_SUPP_OPER_CLASSES];
|
||||
u32 is_peer_responder;
|
||||
u32 pref_offchan_num;
|
||||
u32 pref_offchan_bw;
|
||||
};
|
||||
|
||||
struct ath10k;
|
||||
struct ath10k_vif;
|
||||
struct ath10k_fw_stats_pdev;
|
||||
|
321
drivers/net/wireless/ath/ath10k/wow.c
Normal file
321
drivers/net/wireless/ath/ath10k/wow.c
Normal file
@ -0,0 +1,321 @@
|
||||
/*
|
||||
* Copyright (c) 2015 Qualcomm Atheros, Inc.
|
||||
*
|
||||
* Permission to use, copy, modify, and/or distribute this software for any
|
||||
* purpose with or without fee is hereby granted, provided that the above
|
||||
* copyright notice and this permission notice appear in all copies.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
|
||||
#include "mac.h"
|
||||
|
||||
#include <net/mac80211.h>
|
||||
#include "hif.h"
|
||||
#include "core.h"
|
||||
#include "debug.h"
|
||||
#include "wmi.h"
|
||||
#include "wmi-ops.h"
|
||||
|
||||
static const struct wiphy_wowlan_support ath10k_wowlan_support = {
|
||||
.flags = WIPHY_WOWLAN_DISCONNECT |
|
||||
WIPHY_WOWLAN_MAGIC_PKT,
|
||||
.pattern_min_len = WOW_MIN_PATTERN_SIZE,
|
||||
.pattern_max_len = WOW_MAX_PATTERN_SIZE,
|
||||
.max_pkt_offset = WOW_MAX_PKT_OFFSET,
|
||||
};
|
||||
|
||||
static int ath10k_wow_vif_cleanup(struct ath10k_vif *arvif)
|
||||
{
|
||||
struct ath10k *ar = arvif->ar;
|
||||
int i, ret;
|
||||
|
||||
for (i = 0; i < WOW_EVENT_MAX; i++) {
|
||||
ret = ath10k_wmi_wow_add_wakeup_event(ar, arvif->vdev_id, i, 0);
|
||||
if (ret) {
|
||||
ath10k_warn(ar, "failed to issue wow wakeup for event %s on vdev %i: %d\n",
|
||||
wow_wakeup_event(i), arvif->vdev_id, ret);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
for (i = 0; i < ar->wow.max_num_patterns; i++) {
|
||||
ret = ath10k_wmi_wow_del_pattern(ar, arvif->vdev_id, i);
|
||||
if (ret) {
|
||||
ath10k_warn(ar, "failed to delete wow pattern %d for vdev %i: %d\n",
|
||||
i, arvif->vdev_id, ret);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ath10k_wow_cleanup(struct ath10k *ar)
|
||||
{
|
||||
struct ath10k_vif *arvif;
|
||||
int ret;
|
||||
|
||||
lockdep_assert_held(&ar->conf_mutex);
|
||||
|
||||
list_for_each_entry(arvif, &ar->arvifs, list) {
|
||||
ret = ath10k_wow_vif_cleanup(arvif);
|
||||
if (ret) {
|
||||
ath10k_warn(ar, "failed to clean wow wakeups on vdev %i: %d\n",
|
||||
arvif->vdev_id, ret);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ath10k_vif_wow_set_wakeups(struct ath10k_vif *arvif,
|
||||
struct cfg80211_wowlan *wowlan)
|
||||
{
|
||||
int ret, i;
|
||||
unsigned long wow_mask = 0;
|
||||
struct ath10k *ar = arvif->ar;
|
||||
const struct cfg80211_pkt_pattern *patterns = wowlan->patterns;
|
||||
int pattern_id = 0;
|
||||
|
||||
/* Setup requested WOW features */
|
||||
switch (arvif->vdev_type) {
|
||||
case WMI_VDEV_TYPE_IBSS:
|
||||
__set_bit(WOW_BEACON_EVENT, &wow_mask);
|
||||
/* fall through */
|
||||
case WMI_VDEV_TYPE_AP:
|
||||
__set_bit(WOW_DEAUTH_RECVD_EVENT, &wow_mask);
|
||||
__set_bit(WOW_DISASSOC_RECVD_EVENT, &wow_mask);
|
||||
__set_bit(WOW_PROBE_REQ_WPS_IE_EVENT, &wow_mask);
|
||||
__set_bit(WOW_AUTH_REQ_EVENT, &wow_mask);
|
||||
__set_bit(WOW_ASSOC_REQ_EVENT, &wow_mask);
|
||||
__set_bit(WOW_HTT_EVENT, &wow_mask);
|
||||
__set_bit(WOW_RA_MATCH_EVENT, &wow_mask);
|
||||
break;
|
||||
case WMI_VDEV_TYPE_STA:
|
||||
if (wowlan->disconnect) {
|
||||
__set_bit(WOW_DEAUTH_RECVD_EVENT, &wow_mask);
|
||||
__set_bit(WOW_DISASSOC_RECVD_EVENT, &wow_mask);
|
||||
__set_bit(WOW_BMISS_EVENT, &wow_mask);
|
||||
__set_bit(WOW_CSA_IE_EVENT, &wow_mask);
|
||||
}
|
||||
|
||||
if (wowlan->magic_pkt)
|
||||
__set_bit(WOW_MAGIC_PKT_RECVD_EVENT, &wow_mask);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
for (i = 0; i < wowlan->n_patterns; i++) {
|
||||
u8 bitmask[WOW_MAX_PATTERN_SIZE] = {};
|
||||
int j;
|
||||
|
||||
if (patterns[i].pattern_len > WOW_MAX_PATTERN_SIZE)
|
||||
continue;
|
||||
|
||||
/* convert bytemask to bitmask */
|
||||
for (j = 0; j < patterns[i].pattern_len; j++)
|
||||
if (patterns[i].mask[j / 8] & BIT(j % 8))
|
||||
bitmask[j] = 0xff;
|
||||
|
||||
ret = ath10k_wmi_wow_add_pattern(ar, arvif->vdev_id,
|
||||
pattern_id,
|
||||
patterns[i].pattern,
|
||||
bitmask,
|
||||
patterns[i].pattern_len,
|
||||
patterns[i].pkt_offset);
|
||||
if (ret) {
|
||||
ath10k_warn(ar, "failed to add pattern %i to vdev %i: %d\n",
|
||||
pattern_id,
|
||||
arvif->vdev_id, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
pattern_id++;
|
||||
__set_bit(WOW_PATTERN_MATCH_EVENT, &wow_mask);
|
||||
}
|
||||
|
||||
for (i = 0; i < WOW_EVENT_MAX; i++) {
|
||||
if (!test_bit(i, &wow_mask))
|
||||
continue;
|
||||
ret = ath10k_wmi_wow_add_wakeup_event(ar, arvif->vdev_id, i, 1);
|
||||
if (ret) {
|
||||
ath10k_warn(ar, "failed to enable wakeup event %s on vdev %i: %d\n",
|
||||
wow_wakeup_event(i), arvif->vdev_id, ret);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ath10k_wow_set_wakeups(struct ath10k *ar,
|
||||
struct cfg80211_wowlan *wowlan)
|
||||
{
|
||||
struct ath10k_vif *arvif;
|
||||
int ret;
|
||||
|
||||
lockdep_assert_held(&ar->conf_mutex);
|
||||
|
||||
list_for_each_entry(arvif, &ar->arvifs, list) {
|
||||
ret = ath10k_vif_wow_set_wakeups(arvif, wowlan);
|
||||
if (ret) {
|
||||
ath10k_warn(ar, "failed to set wow wakeups on vdev %i: %d\n",
|
||||
arvif->vdev_id, ret);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ath10k_wow_enable(struct ath10k *ar)
|
||||
{
|
||||
int ret;
|
||||
|
||||
lockdep_assert_held(&ar->conf_mutex);
|
||||
|
||||
reinit_completion(&ar->target_suspend);
|
||||
|
||||
ret = ath10k_wmi_wow_enable(ar);
|
||||
if (ret) {
|
||||
ath10k_warn(ar, "failed to issue wow enable: %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = wait_for_completion_timeout(&ar->target_suspend, 3 * HZ);
|
||||
if (ret == 0) {
|
||||
ath10k_warn(ar, "timed out while waiting for suspend completion\n");
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ath10k_wow_wakeup(struct ath10k *ar)
|
||||
{
|
||||
int ret;
|
||||
|
||||
lockdep_assert_held(&ar->conf_mutex);
|
||||
|
||||
reinit_completion(&ar->wow.wakeup_completed);
|
||||
|
||||
ret = ath10k_wmi_wow_host_wakeup_ind(ar);
|
||||
if (ret) {
|
||||
ath10k_warn(ar, "failed to send wow wakeup indication: %d\n",
|
||||
ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = wait_for_completion_timeout(&ar->wow.wakeup_completed, 3 * HZ);
|
||||
if (ret == 0) {
|
||||
ath10k_warn(ar, "timed out while waiting for wow wakeup completion\n");
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int ath10k_wow_op_suspend(struct ieee80211_hw *hw,
|
||||
struct cfg80211_wowlan *wowlan)
|
||||
{
|
||||
struct ath10k *ar = hw->priv;
|
||||
int ret;
|
||||
|
||||
mutex_lock(&ar->conf_mutex);
|
||||
|
||||
if (WARN_ON(!test_bit(ATH10K_FW_FEATURE_WOWLAN_SUPPORT,
|
||||
ar->fw_features))) {
|
||||
ret = 1;
|
||||
goto exit;
|
||||
}
|
||||
|
||||
ret = ath10k_wow_cleanup(ar);
|
||||
if (ret) {
|
||||
ath10k_warn(ar, "failed to clear wow wakeup events: %d\n",
|
||||
ret);
|
||||
goto exit;
|
||||
}
|
||||
|
||||
ret = ath10k_wow_set_wakeups(ar, wowlan);
|
||||
if (ret) {
|
||||
ath10k_warn(ar, "failed to set wow wakeup events: %d\n",
|
||||
ret);
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
ret = ath10k_wow_enable(ar);
|
||||
if (ret) {
|
||||
ath10k_warn(ar, "failed to start wow: %d\n", ret);
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
ret = ath10k_hif_suspend(ar);
|
||||
if (ret) {
|
||||
ath10k_warn(ar, "failed to suspend hif: %d\n", ret);
|
||||
goto wakeup;
|
||||
}
|
||||
|
||||
goto exit;
|
||||
|
||||
wakeup:
|
||||
ath10k_wow_wakeup(ar);
|
||||
|
||||
cleanup:
|
||||
ath10k_wow_cleanup(ar);
|
||||
|
||||
exit:
|
||||
mutex_unlock(&ar->conf_mutex);
|
||||
return ret ? 1 : 0;
|
||||
}
|
||||
|
||||
int ath10k_wow_op_resume(struct ieee80211_hw *hw)
|
||||
{
|
||||
struct ath10k *ar = hw->priv;
|
||||
int ret;
|
||||
|
||||
mutex_lock(&ar->conf_mutex);
|
||||
|
||||
if (WARN_ON(!test_bit(ATH10K_FW_FEATURE_WOWLAN_SUPPORT,
|
||||
ar->fw_features))) {
|
||||
ret = 1;
|
||||
goto exit;
|
||||
}
|
||||
|
||||
ret = ath10k_hif_resume(ar);
|
||||
if (ret) {
|
||||
ath10k_warn(ar, "failed to resume hif: %d\n", ret);
|
||||
goto exit;
|
||||
}
|
||||
|
||||
ret = ath10k_wow_wakeup(ar);
|
||||
if (ret)
|
||||
ath10k_warn(ar, "failed to wakeup from wow: %d\n", ret);
|
||||
|
||||
exit:
|
||||
mutex_unlock(&ar->conf_mutex);
|
||||
return ret ? 1 : 0;
|
||||
}
|
||||
|
||||
int ath10k_wow_init(struct ath10k *ar)
|
||||
{
|
||||
if (!test_bit(ATH10K_FW_FEATURE_WOWLAN_SUPPORT, ar->fw_features))
|
||||
return 0;
|
||||
|
||||
if (WARN_ON(!test_bit(WMI_SERVICE_WOW, ar->wmi.svc_map)))
|
||||
return -EINVAL;
|
||||
|
||||
ar->wow.wowlan_support = ath10k_wowlan_support;
|
||||
ar->wow.wowlan_support.n_patterns = ar->wow.max_num_patterns;
|
||||
ar->hw->wiphy->wowlan = &ar->wow.wowlan_support;
|
||||
|
||||
return 0;
|
||||
}
|
40
drivers/net/wireless/ath/ath10k/wow.h
Normal file
40
drivers/net/wireless/ath/ath10k/wow.h
Normal file
@ -0,0 +1,40 @@
|
||||
/*
|
||||
* Copyright (c) 2015 Qualcomm Atheros, Inc.
|
||||
*
|
||||
* Permission to use, copy, modify, and/or distribute this software for any
|
||||
* purpose with or without fee is hereby granted, provided that the above
|
||||
* copyright notice and this permission notice appear in all copies.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
#ifndef _WOW_H_
|
||||
#define _WOW_H_
|
||||
|
||||
struct ath10k_wow {
|
||||
u32 max_num_patterns;
|
||||
struct completion wakeup_completed;
|
||||
struct wiphy_wowlan_support wowlan_support;
|
||||
};
|
||||
|
||||
#ifdef CONFIG_PM
|
||||
|
||||
int ath10k_wow_init(struct ath10k *ar);
|
||||
int ath10k_wow_op_suspend(struct ieee80211_hw *hw,
|
||||
struct cfg80211_wowlan *wowlan);
|
||||
int ath10k_wow_op_resume(struct ieee80211_hw *hw);
|
||||
|
||||
#else
|
||||
|
||||
static inline int ath10k_wow_init(struct ath10k *ar)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
#endif /* CONFIG_PM */
|
||||
#endif /* _WOW_H_ */
|
@ -15,6 +15,7 @@
|
||||
*/
|
||||
|
||||
#include <linux/relay.h>
|
||||
#include <linux/random.h>
|
||||
#include "ath9k.h"
|
||||
|
||||
static s8 fix_rssi_inv_only(u8 rssi_val)
|
||||
@ -36,21 +37,480 @@ static void ath_debug_send_fft_sample(struct ath_spec_scan_priv *spec_priv,
|
||||
relay_write(spec_priv->rfs_chan_spec_scan, fft_sample_tlv, length);
|
||||
}
|
||||
|
||||
typedef int (ath_cmn_fft_idx_validator) (u8 *sample_end, int bytes_read);
|
||||
|
||||
static int
|
||||
ath_cmn_max_idx_verify_ht20_fft(u8 *sample_end, int bytes_read)
|
||||
{
|
||||
struct ath_ht20_mag_info *mag_info;
|
||||
u8 *sample;
|
||||
u16 max_magnitude;
|
||||
u8 max_index;
|
||||
u8 max_exp;
|
||||
|
||||
/* Sanity check so that we don't read outside the read
|
||||
* buffer
|
||||
*/
|
||||
if (bytes_read < SPECTRAL_HT20_SAMPLE_LEN - 1)
|
||||
return -1;
|
||||
|
||||
mag_info = (struct ath_ht20_mag_info *) (sample_end -
|
||||
sizeof(struct ath_ht20_mag_info) + 1);
|
||||
|
||||
sample = sample_end - SPECTRAL_HT20_SAMPLE_LEN + 1;
|
||||
|
||||
max_index = spectral_max_index(mag_info->all_bins,
|
||||
SPECTRAL_HT20_NUM_BINS);
|
||||
max_magnitude = spectral_max_magnitude(mag_info->all_bins);
|
||||
|
||||
max_exp = mag_info->max_exp & 0xf;
|
||||
|
||||
/* Don't try to read something outside the read buffer
|
||||
* in case of a missing byte (so bins[0] will be outside
|
||||
* the read buffer)
|
||||
*/
|
||||
if (bytes_read < SPECTRAL_HT20_SAMPLE_LEN && max_index < 1)
|
||||
return -1;
|
||||
|
||||
if (sample[max_index] != (max_magnitude >> max_exp))
|
||||
return -1;
|
||||
else
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
ath_cmn_max_idx_verify_ht20_40_fft(u8 *sample_end, int bytes_read)
|
||||
{
|
||||
struct ath_ht20_40_mag_info *mag_info;
|
||||
u8 *sample;
|
||||
u16 lower_mag, upper_mag;
|
||||
u8 lower_max_index, upper_max_index;
|
||||
u8 max_exp;
|
||||
int dc_pos = SPECTRAL_HT20_40_NUM_BINS / 2;
|
||||
|
||||
/* Sanity check so that we don't read outside the read
|
||||
* buffer
|
||||
*/
|
||||
if (bytes_read < SPECTRAL_HT20_40_SAMPLE_LEN - 1)
|
||||
return -1;
|
||||
|
||||
mag_info = (struct ath_ht20_40_mag_info *) (sample_end -
|
||||
sizeof(struct ath_ht20_40_mag_info) + 1);
|
||||
|
||||
sample = sample_end - SPECTRAL_HT20_40_SAMPLE_LEN + 1;
|
||||
|
||||
lower_mag = spectral_max_magnitude(mag_info->lower_bins);
|
||||
lower_max_index = spectral_max_index(mag_info->lower_bins,
|
||||
SPECTRAL_HT20_40_NUM_BINS);
|
||||
|
||||
upper_mag = spectral_max_magnitude(mag_info->upper_bins);
|
||||
upper_max_index = spectral_max_index(mag_info->upper_bins,
|
||||
SPECTRAL_HT20_40_NUM_BINS);
|
||||
|
||||
max_exp = mag_info->max_exp & 0xf;
|
||||
|
||||
/* Don't try to read something outside the read buffer
|
||||
* in case of a missing byte (so bins[0] will be outside
|
||||
* the read buffer)
|
||||
*/
|
||||
if (bytes_read < SPECTRAL_HT20_40_SAMPLE_LEN &&
|
||||
((upper_max_index < 1) || (lower_max_index < 1)))
|
||||
return -1;
|
||||
|
||||
/* Some time hardware messes up the index and adds
|
||||
* the index of the middle point (dc_pos). Try to fix it.
|
||||
*/
|
||||
if ((upper_max_index - dc_pos > 0) &&
|
||||
(sample[upper_max_index] == (upper_mag >> max_exp)))
|
||||
upper_max_index -= dc_pos;
|
||||
|
||||
if ((lower_max_index - dc_pos > 0) &&
|
||||
(sample[lower_max_index - dc_pos] == (lower_mag >> max_exp)))
|
||||
lower_max_index -= dc_pos;
|
||||
|
||||
if ((sample[upper_max_index + dc_pos] != (upper_mag >> max_exp)) ||
|
||||
(sample[lower_max_index] != (lower_mag >> max_exp)))
|
||||
return -1;
|
||||
else
|
||||
return 0;
|
||||
}
|
||||
|
||||
typedef int (ath_cmn_fft_sample_handler) (struct ath_rx_status *rs,
|
||||
struct ath_spec_scan_priv *spec_priv,
|
||||
u8 *sample_buf, u64 tsf, u16 freq, int chan_type);
|
||||
|
||||
static int
|
||||
ath_cmn_process_ht20_fft(struct ath_rx_status *rs,
|
||||
struct ath_spec_scan_priv *spec_priv,
|
||||
u8 *sample_buf,
|
||||
u64 tsf, u16 freq, int chan_type)
|
||||
{
|
||||
struct fft_sample_ht20 fft_sample_20;
|
||||
struct ath_common *common = ath9k_hw_common(spec_priv->ah);
|
||||
struct ath_hw *ah = spec_priv->ah;
|
||||
struct ath_ht20_mag_info *mag_info;
|
||||
struct fft_sample_tlv *tlv;
|
||||
int i = 0;
|
||||
int ret = 0;
|
||||
int dc_pos = SPECTRAL_HT20_NUM_BINS / 2;
|
||||
u16 magnitude, tmp_mag, length;
|
||||
u8 max_index, bitmap_w, max_exp;
|
||||
|
||||
length = sizeof(fft_sample_20) - sizeof(struct fft_sample_tlv);
|
||||
fft_sample_20.tlv.type = ATH_FFT_SAMPLE_HT20;
|
||||
fft_sample_20.tlv.length = __cpu_to_be16(length);
|
||||
fft_sample_20.freq = __cpu_to_be16(freq);
|
||||
fft_sample_20.rssi = fix_rssi_inv_only(rs->rs_rssi_ctl[0]);
|
||||
fft_sample_20.noise = ah->noise;
|
||||
|
||||
mag_info = (struct ath_ht20_mag_info *) (sample_buf +
|
||||
SPECTRAL_HT20_NUM_BINS);
|
||||
|
||||
magnitude = spectral_max_magnitude(mag_info->all_bins);
|
||||
fft_sample_20.max_magnitude = __cpu_to_be16(magnitude);
|
||||
|
||||
max_index = spectral_max_index(mag_info->all_bins,
|
||||
SPECTRAL_HT20_NUM_BINS);
|
||||
fft_sample_20.max_index = max_index;
|
||||
|
||||
bitmap_w = spectral_bitmap_weight(mag_info->all_bins);
|
||||
fft_sample_20.bitmap_weight = bitmap_w;
|
||||
|
||||
max_exp = mag_info->max_exp & 0xf;
|
||||
fft_sample_20.max_exp = max_exp;
|
||||
|
||||
fft_sample_20.tsf = __cpu_to_be64(tsf);
|
||||
|
||||
memcpy(fft_sample_20.data, sample_buf, SPECTRAL_HT20_NUM_BINS);
|
||||
|
||||
ath_dbg(common, SPECTRAL_SCAN, "FFT HT20 frame: max mag 0x%X,"
|
||||
"max_mag_idx %i\n",
|
||||
magnitude >> max_exp,
|
||||
max_index);
|
||||
|
||||
if (fft_sample_20.data[max_index] != (magnitude >> max_exp)) {
|
||||
ath_dbg(common, SPECTRAL_SCAN, "Magnitude mismatch !\n");
|
||||
ret = -1;
|
||||
}
|
||||
|
||||
/* DC value (value in the middle) is the blind spot of the spectral
|
||||
* sample and invalid, interpolate it.
|
||||
*/
|
||||
fft_sample_20.data[dc_pos] = (fft_sample_20.data[dc_pos + 1] +
|
||||
fft_sample_20.data[dc_pos - 1]) / 2;
|
||||
|
||||
/* Check if the maximum magnitude is indeed maximum,
|
||||
* also if the maximum value was at dc_pos, calculate
|
||||
* a new one (since value at dc_pos is invalid).
|
||||
*/
|
||||
if (max_index == dc_pos) {
|
||||
tmp_mag = 0;
|
||||
for (i = 0; i < dc_pos; i++) {
|
||||
if (fft_sample_20.data[i] > tmp_mag) {
|
||||
tmp_mag = fft_sample_20.data[i];
|
||||
fft_sample_20.max_index = i;
|
||||
}
|
||||
}
|
||||
|
||||
magnitude = tmp_mag << max_exp;
|
||||
fft_sample_20.max_magnitude = __cpu_to_be16(magnitude);
|
||||
|
||||
ath_dbg(common, SPECTRAL_SCAN,
|
||||
"Calculated new lower max 0x%X at %i\n",
|
||||
tmp_mag, fft_sample_20.max_index);
|
||||
} else
|
||||
for (i = 0; i < SPECTRAL_HT20_NUM_BINS; i++) {
|
||||
if (fft_sample_20.data[i] == (magnitude >> max_exp))
|
||||
ath_dbg(common, SPECTRAL_SCAN,
|
||||
"Got max: 0x%X at index %i\n",
|
||||
fft_sample_20.data[i], i);
|
||||
|
||||
if (fft_sample_20.data[i] > (magnitude >> max_exp)) {
|
||||
ath_dbg(common, SPECTRAL_SCAN,
|
||||
"Got bin %i greater than max: 0x%X\n",
|
||||
i, fft_sample_20.data[i]);
|
||||
ret = -1;
|
||||
}
|
||||
}
|
||||
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
tlv = (struct fft_sample_tlv *)&fft_sample_20;
|
||||
|
||||
ath_debug_send_fft_sample(spec_priv, tlv);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
ath_cmn_process_ht20_40_fft(struct ath_rx_status *rs,
|
||||
struct ath_spec_scan_priv *spec_priv,
|
||||
u8 *sample_buf,
|
||||
u64 tsf, u16 freq, int chan_type)
|
||||
{
|
||||
struct fft_sample_ht20_40 fft_sample_40;
|
||||
struct ath_common *common = ath9k_hw_common(spec_priv->ah);
|
||||
struct ath_hw *ah = spec_priv->ah;
|
||||
struct ath9k_hw_cal_data *caldata = ah->caldata;
|
||||
struct ath_ht20_40_mag_info *mag_info;
|
||||
struct fft_sample_tlv *tlv;
|
||||
int dc_pos = SPECTRAL_HT20_40_NUM_BINS / 2;
|
||||
int i = 0;
|
||||
int ret = 0;
|
||||
s16 ext_nf;
|
||||
u16 lower_mag, upper_mag, tmp_mag, length;
|
||||
s8 lower_rssi, upper_rssi;
|
||||
u8 lower_max_index, upper_max_index;
|
||||
u8 lower_bitmap_w, upper_bitmap_w, max_exp;
|
||||
|
||||
if (caldata)
|
||||
ext_nf = ath9k_hw_getchan_noise(ah, ah->curchan,
|
||||
caldata->nfCalHist[3].privNF);
|
||||
else
|
||||
ext_nf = ATH_DEFAULT_NOISE_FLOOR;
|
||||
|
||||
length = sizeof(fft_sample_40) - sizeof(struct fft_sample_tlv);
|
||||
fft_sample_40.tlv.type = ATH_FFT_SAMPLE_HT20_40;
|
||||
fft_sample_40.tlv.length = __cpu_to_be16(length);
|
||||
fft_sample_40.freq = __cpu_to_be16(freq);
|
||||
fft_sample_40.channel_type = chan_type;
|
||||
|
||||
if (chan_type == NL80211_CHAN_HT40PLUS) {
|
||||
lower_rssi = fix_rssi_inv_only(rs->rs_rssi_ctl[0]);
|
||||
upper_rssi = fix_rssi_inv_only(rs->rs_rssi_ext[0]);
|
||||
|
||||
fft_sample_40.lower_noise = ah->noise;
|
||||
fft_sample_40.upper_noise = ext_nf;
|
||||
} else {
|
||||
lower_rssi = fix_rssi_inv_only(rs->rs_rssi_ext[0]);
|
||||
upper_rssi = fix_rssi_inv_only(rs->rs_rssi_ctl[0]);
|
||||
|
||||
fft_sample_40.lower_noise = ext_nf;
|
||||
fft_sample_40.upper_noise = ah->noise;
|
||||
}
|
||||
|
||||
fft_sample_40.lower_rssi = lower_rssi;
|
||||
fft_sample_40.upper_rssi = upper_rssi;
|
||||
|
||||
mag_info = (struct ath_ht20_40_mag_info *) (sample_buf +
|
||||
SPECTRAL_HT20_40_NUM_BINS);
|
||||
|
||||
lower_mag = spectral_max_magnitude(mag_info->lower_bins);
|
||||
fft_sample_40.lower_max_magnitude = __cpu_to_be16(lower_mag);
|
||||
|
||||
upper_mag = spectral_max_magnitude(mag_info->upper_bins);
|
||||
fft_sample_40.upper_max_magnitude = __cpu_to_be16(upper_mag);
|
||||
|
||||
lower_max_index = spectral_max_index(mag_info->lower_bins,
|
||||
SPECTRAL_HT20_40_NUM_BINS);
|
||||
fft_sample_40.lower_max_index = lower_max_index;
|
||||
|
||||
upper_max_index = spectral_max_index(mag_info->upper_bins,
|
||||
SPECTRAL_HT20_40_NUM_BINS);
|
||||
fft_sample_40.upper_max_index = upper_max_index;
|
||||
|
||||
lower_bitmap_w = spectral_bitmap_weight(mag_info->lower_bins);
|
||||
fft_sample_40.lower_bitmap_weight = lower_bitmap_w;
|
||||
|
||||
upper_bitmap_w = spectral_bitmap_weight(mag_info->upper_bins);
|
||||
fft_sample_40.upper_bitmap_weight = upper_bitmap_w;
|
||||
|
||||
max_exp = mag_info->max_exp & 0xf;
|
||||
fft_sample_40.max_exp = max_exp;
|
||||
|
||||
fft_sample_40.tsf = __cpu_to_be64(tsf);
|
||||
|
||||
memcpy(fft_sample_40.data, sample_buf, SPECTRAL_HT20_40_NUM_BINS);
|
||||
|
||||
ath_dbg(common, SPECTRAL_SCAN, "FFT HT20/40 frame: lower mag 0x%X,"
|
||||
"lower_mag_idx %i, upper mag 0x%X,"
|
||||
"upper_mag_idx %i\n",
|
||||
lower_mag >> max_exp,
|
||||
lower_max_index,
|
||||
upper_mag >> max_exp,
|
||||
upper_max_index);
|
||||
|
||||
/* Some time hardware messes up the index and adds
|
||||
* the index of the middle point (dc_pos). Try to fix it.
|
||||
*/
|
||||
if ((upper_max_index - dc_pos > 0) &&
|
||||
(fft_sample_40.data[upper_max_index] == (upper_mag >> max_exp))) {
|
||||
upper_max_index -= dc_pos;
|
||||
fft_sample_40.upper_max_index = upper_max_index;
|
||||
}
|
||||
|
||||
if ((lower_max_index - dc_pos > 0) &&
|
||||
(fft_sample_40.data[lower_max_index - dc_pos] ==
|
||||
(lower_mag >> max_exp))) {
|
||||
lower_max_index -= dc_pos;
|
||||
fft_sample_40.lower_max_index = lower_max_index;
|
||||
}
|
||||
|
||||
/* Check if we got the expected magnitude values at
|
||||
* the expected bins
|
||||
*/
|
||||
if ((fft_sample_40.data[upper_max_index + dc_pos]
|
||||
!= (upper_mag >> max_exp)) ||
|
||||
(fft_sample_40.data[lower_max_index]
|
||||
!= (lower_mag >> max_exp))) {
|
||||
ath_dbg(common, SPECTRAL_SCAN, "Magnitude mismatch !\n");
|
||||
ret = -1;
|
||||
}
|
||||
|
||||
/* DC value (value in the middle) is the blind spot of the spectral
|
||||
* sample and invalid, interpolate it.
|
||||
*/
|
||||
fft_sample_40.data[dc_pos] = (fft_sample_40.data[dc_pos + 1] +
|
||||
fft_sample_40.data[dc_pos - 1]) / 2;
|
||||
|
||||
/* Check if the maximum magnitudes are indeed maximum,
|
||||
* also if the maximum value was at dc_pos, calculate
|
||||
* a new one (since value at dc_pos is invalid).
|
||||
*/
|
||||
if (lower_max_index == dc_pos) {
|
||||
tmp_mag = 0;
|
||||
for (i = 0; i < dc_pos; i++) {
|
||||
if (fft_sample_40.data[i] > tmp_mag) {
|
||||
tmp_mag = fft_sample_40.data[i];
|
||||
fft_sample_40.lower_max_index = i;
|
||||
}
|
||||
}
|
||||
|
||||
lower_mag = tmp_mag << max_exp;
|
||||
fft_sample_40.lower_max_magnitude = __cpu_to_be16(lower_mag);
|
||||
|
||||
ath_dbg(common, SPECTRAL_SCAN,
|
||||
"Calculated new lower max 0x%X at %i\n",
|
||||
tmp_mag, fft_sample_40.lower_max_index);
|
||||
} else
|
||||
for (i = 0; i < dc_pos; i++) {
|
||||
if (fft_sample_40.data[i] == (lower_mag >> max_exp))
|
||||
ath_dbg(common, SPECTRAL_SCAN,
|
||||
"Got lower mag: 0x%X at index %i\n",
|
||||
fft_sample_40.data[i], i);
|
||||
|
||||
if (fft_sample_40.data[i] > (lower_mag >> max_exp)) {
|
||||
ath_dbg(common, SPECTRAL_SCAN,
|
||||
"Got lower bin %i higher than max: 0x%X\n",
|
||||
i, fft_sample_40.data[i]);
|
||||
ret = -1;
|
||||
}
|
||||
}
|
||||
|
||||
if (upper_max_index == dc_pos) {
|
||||
tmp_mag = 0;
|
||||
for (i = dc_pos; i < SPECTRAL_HT20_40_NUM_BINS; i++) {
|
||||
if (fft_sample_40.data[i] > tmp_mag) {
|
||||
tmp_mag = fft_sample_40.data[i];
|
||||
fft_sample_40.upper_max_index = i;
|
||||
}
|
||||
}
|
||||
upper_mag = tmp_mag << max_exp;
|
||||
fft_sample_40.upper_max_magnitude = __cpu_to_be16(upper_mag);
|
||||
|
||||
ath_dbg(common, SPECTRAL_SCAN,
|
||||
"Calculated new upper max 0x%X at %i\n",
|
||||
tmp_mag, i);
|
||||
} else
|
||||
for (i = dc_pos; i < SPECTRAL_HT20_40_NUM_BINS; i++) {
|
||||
if (fft_sample_40.data[i] == (upper_mag >> max_exp))
|
||||
ath_dbg(common, SPECTRAL_SCAN,
|
||||
"Got upper mag: 0x%X at index %i\n",
|
||||
fft_sample_40.data[i], i);
|
||||
|
||||
if (fft_sample_40.data[i] > (upper_mag >> max_exp)) {
|
||||
ath_dbg(common, SPECTRAL_SCAN,
|
||||
"Got upper bin %i higher than max: 0x%X\n",
|
||||
i, fft_sample_40.data[i]);
|
||||
|
||||
ret = -1;
|
||||
}
|
||||
}
|
||||
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
tlv = (struct fft_sample_tlv *)&fft_sample_40;
|
||||
|
||||
ath_debug_send_fft_sample(spec_priv, tlv);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void
|
||||
ath_cmn_copy_fft_frame(u8 *in, u8 *out, int sample_len, int sample_bytes)
|
||||
{
|
||||
switch (sample_bytes - sample_len) {
|
||||
case -1:
|
||||
/* First byte missing */
|
||||
memcpy(&out[1], in,
|
||||
sample_len - 1);
|
||||
break;
|
||||
case 0:
|
||||
/* Length correct, nothing to do. */
|
||||
memcpy(out, in, sample_len);
|
||||
break;
|
||||
case 1:
|
||||
/* MAC added 2 extra bytes AND first byte
|
||||
* is missing.
|
||||
*/
|
||||
memcpy(&out[1], in, 30);
|
||||
out[31] = in[31];
|
||||
memcpy(&out[32], &in[33],
|
||||
sample_len - 32);
|
||||
break;
|
||||
case 2:
|
||||
/* MAC added 2 extra bytes at bin 30 and 32,
|
||||
* remove them.
|
||||
*/
|
||||
memcpy(out, in, 30);
|
||||
out[30] = in[31];
|
||||
memcpy(&out[31], &in[33],
|
||||
sample_len - 31);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
static int
|
||||
ath_cmn_is_fft_buf_full(struct ath_spec_scan_priv *spec_priv)
|
||||
{
|
||||
int i = 0;
|
||||
int ret = 0;
|
||||
struct rchan *rc = spec_priv->rfs_chan_spec_scan;
|
||||
|
||||
for_each_online_cpu(i)
|
||||
ret += relay_buf_full(rc->buf[i]);
|
||||
|
||||
i = num_online_cpus();
|
||||
|
||||
if (ret == i)
|
||||
return 1;
|
||||
else
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* returns 1 if this was a spectral frame, even if not handled. */
|
||||
int ath_cmn_process_fft(struct ath_spec_scan_priv *spec_priv, struct ieee80211_hdr *hdr,
|
||||
struct ath_rx_status *rs, u64 tsf)
|
||||
{
|
||||
u8 sample_buf[SPECTRAL_SAMPLE_MAX_LEN] = {0};
|
||||
struct ath_hw *ah = spec_priv->ah;
|
||||
struct ath_common *common = ath9k_hw_common(spec_priv->ah);
|
||||
u8 num_bins, *bins, *vdata = (u8 *)hdr;
|
||||
struct fft_sample_ht20 fft_sample_20;
|
||||
struct fft_sample_ht20_40 fft_sample_40;
|
||||
struct fft_sample_tlv *tlv;
|
||||
u8 num_bins, *vdata = (u8 *)hdr;
|
||||
struct ath_radar_info *radar_info;
|
||||
int len = rs->rs_datalen;
|
||||
int dc_pos;
|
||||
u16 fft_len, length, freq = ah->curchan->chan->center_freq;
|
||||
int i;
|
||||
int got_slen = 0;
|
||||
u8 *sample_start;
|
||||
int sample_bytes = 0;
|
||||
int ret = 0;
|
||||
u16 fft_len, sample_len, freq = ah->curchan->chan->center_freq;
|
||||
enum nl80211_channel_type chan_type;
|
||||
ath_cmn_fft_idx_validator *fft_idx_validator;
|
||||
ath_cmn_fft_sample_handler *fft_handler;
|
||||
|
||||
/* AR9280 and before report via ATH9K_PHYERR_RADAR, AR93xx and newer
|
||||
* via ATH9K_PHYERR_SPECTRAL. Haven't seen ATH9K_PHYERR_FALSE_RADAR_EXT
|
||||
@ -68,140 +528,170 @@ int ath_cmn_process_fft(struct ath_spec_scan_priv *spec_priv, struct ieee80211_h
|
||||
if (!(radar_info->pulse_bw_info & SPECTRAL_SCAN_BITMASK))
|
||||
return 0;
|
||||
|
||||
/* Output buffers are full, no need to process anything
|
||||
* since there is no space to put the result anyway
|
||||
*/
|
||||
ret = ath_cmn_is_fft_buf_full(spec_priv);
|
||||
if (ret == 1) {
|
||||
ath_dbg(common, SPECTRAL_SCAN, "FFT report ignored, no space "
|
||||
"left on output buffers\n");
|
||||
return 1;
|
||||
}
|
||||
|
||||
chan_type = cfg80211_get_chandef_type(&common->hw->conf.chandef);
|
||||
if ((chan_type == NL80211_CHAN_HT40MINUS) ||
|
||||
(chan_type == NL80211_CHAN_HT40PLUS)) {
|
||||
fft_len = SPECTRAL_HT20_40_TOTAL_DATA_LEN;
|
||||
sample_len = SPECTRAL_HT20_40_SAMPLE_LEN;
|
||||
num_bins = SPECTRAL_HT20_40_NUM_BINS;
|
||||
bins = (u8 *)fft_sample_40.data;
|
||||
fft_idx_validator = &ath_cmn_max_idx_verify_ht20_40_fft;
|
||||
fft_handler = &ath_cmn_process_ht20_40_fft;
|
||||
} else {
|
||||
fft_len = SPECTRAL_HT20_TOTAL_DATA_LEN;
|
||||
sample_len = SPECTRAL_HT20_SAMPLE_LEN;
|
||||
num_bins = SPECTRAL_HT20_NUM_BINS;
|
||||
bins = (u8 *)fft_sample_20.data;
|
||||
fft_idx_validator = ath_cmn_max_idx_verify_ht20_fft;
|
||||
fft_handler = &ath_cmn_process_ht20_fft;
|
||||
}
|
||||
|
||||
/* Variation in the data length is possible and will be fixed later */
|
||||
if ((len > fft_len + 2) || (len < fft_len - 1))
|
||||
return 1;
|
||||
ath_dbg(common, SPECTRAL_SCAN, "Got radar dump bw_info: 0x%X,"
|
||||
"len: %i fft_len: %i\n",
|
||||
radar_info->pulse_bw_info,
|
||||
len,
|
||||
fft_len);
|
||||
sample_start = vdata;
|
||||
for (i = 0; i < len - 2; i++) {
|
||||
sample_bytes++;
|
||||
|
||||
switch (len - fft_len) {
|
||||
case 0:
|
||||
/* length correct, nothing to do. */
|
||||
memcpy(bins, vdata, num_bins);
|
||||
break;
|
||||
case -1:
|
||||
/* first byte missing, duplicate it. */
|
||||
memcpy(&bins[1], vdata, num_bins - 1);
|
||||
bins[0] = vdata[0];
|
||||
break;
|
||||
case 2:
|
||||
/* MAC added 2 extra bytes at bin 30 and 32, remove them. */
|
||||
memcpy(bins, vdata, 30);
|
||||
bins[30] = vdata[31];
|
||||
memcpy(&bins[31], &vdata[33], num_bins - 31);
|
||||
break;
|
||||
case 1:
|
||||
/* MAC added 2 extra bytes AND first byte is missing. */
|
||||
bins[0] = vdata[0];
|
||||
memcpy(&bins[1], vdata, 30);
|
||||
bins[31] = vdata[31];
|
||||
memcpy(&bins[32], &vdata[33], num_bins - 32);
|
||||
break;
|
||||
default:
|
||||
return 1;
|
||||
}
|
||||
|
||||
/* DC value (value in the middle) is the blind spot of the spectral
|
||||
* sample and invalid, interpolate it.
|
||||
*/
|
||||
dc_pos = num_bins / 2;
|
||||
bins[dc_pos] = (bins[dc_pos + 1] + bins[dc_pos - 1]) / 2;
|
||||
|
||||
if ((chan_type == NL80211_CHAN_HT40MINUS) ||
|
||||
(chan_type == NL80211_CHAN_HT40PLUS)) {
|
||||
s8 lower_rssi, upper_rssi;
|
||||
s16 ext_nf;
|
||||
u8 lower_max_index, upper_max_index;
|
||||
u8 lower_bitmap_w, upper_bitmap_w;
|
||||
u16 lower_mag, upper_mag;
|
||||
struct ath9k_hw_cal_data *caldata = ah->caldata;
|
||||
struct ath_ht20_40_mag_info *mag_info;
|
||||
|
||||
if (caldata)
|
||||
ext_nf = ath9k_hw_getchan_noise(ah, ah->curchan,
|
||||
caldata->nfCalHist[3].privNF);
|
||||
else
|
||||
ext_nf = ATH_DEFAULT_NOISE_FLOOR;
|
||||
|
||||
length = sizeof(fft_sample_40) - sizeof(struct fft_sample_tlv);
|
||||
fft_sample_40.tlv.type = ATH_FFT_SAMPLE_HT20_40;
|
||||
fft_sample_40.tlv.length = __cpu_to_be16(length);
|
||||
fft_sample_40.freq = __cpu_to_be16(freq);
|
||||
fft_sample_40.channel_type = chan_type;
|
||||
|
||||
if (chan_type == NL80211_CHAN_HT40PLUS) {
|
||||
lower_rssi = fix_rssi_inv_only(rs->rs_rssi_ctl[0]);
|
||||
upper_rssi = fix_rssi_inv_only(rs->rs_rssi_ext[0]);
|
||||
|
||||
fft_sample_40.lower_noise = ah->noise;
|
||||
fft_sample_40.upper_noise = ext_nf;
|
||||
} else {
|
||||
lower_rssi = fix_rssi_inv_only(rs->rs_rssi_ext[0]);
|
||||
upper_rssi = fix_rssi_inv_only(rs->rs_rssi_ctl[0]);
|
||||
|
||||
fft_sample_40.lower_noise = ext_nf;
|
||||
fft_sample_40.upper_noise = ah->noise;
|
||||
/* Only a single sample received, no need to look
|
||||
* for the sample's end, do the correction based
|
||||
* on the packet's length instead. Note that hw
|
||||
* will always put the radar_info structure on
|
||||
* the end.
|
||||
*/
|
||||
if (len <= fft_len + 2) {
|
||||
sample_bytes = len - sizeof(struct ath_radar_info);
|
||||
got_slen = 1;
|
||||
}
|
||||
fft_sample_40.lower_rssi = lower_rssi;
|
||||
fft_sample_40.upper_rssi = upper_rssi;
|
||||
|
||||
mag_info = ((struct ath_ht20_40_mag_info *)radar_info) - 1;
|
||||
lower_mag = spectral_max_magnitude(mag_info->lower_bins);
|
||||
upper_mag = spectral_max_magnitude(mag_info->upper_bins);
|
||||
fft_sample_40.lower_max_magnitude = __cpu_to_be16(lower_mag);
|
||||
fft_sample_40.upper_max_magnitude = __cpu_to_be16(upper_mag);
|
||||
lower_max_index = spectral_max_index(mag_info->lower_bins);
|
||||
upper_max_index = spectral_max_index(mag_info->upper_bins);
|
||||
fft_sample_40.lower_max_index = lower_max_index;
|
||||
fft_sample_40.upper_max_index = upper_max_index;
|
||||
lower_bitmap_w = spectral_bitmap_weight(mag_info->lower_bins);
|
||||
upper_bitmap_w = spectral_bitmap_weight(mag_info->upper_bins);
|
||||
fft_sample_40.lower_bitmap_weight = lower_bitmap_w;
|
||||
fft_sample_40.upper_bitmap_weight = upper_bitmap_w;
|
||||
fft_sample_40.max_exp = mag_info->max_exp & 0xf;
|
||||
/* Search for the end of the FFT frame between
|
||||
* sample_len - 1 and sample_len + 2. exp_max is 3
|
||||
* bits long and it's the only value on the last
|
||||
* byte of the frame so since it'll be smaller than
|
||||
* the next byte (the first bin of the next sample)
|
||||
* 90% of the time, we can use it as a separator.
|
||||
*/
|
||||
if (vdata[i] <= 0x7 && sample_bytes >= sample_len - 1) {
|
||||
|
||||
fft_sample_40.tsf = __cpu_to_be64(tsf);
|
||||
/* Got a frame length within boundaries, there are
|
||||
* four scenarios here:
|
||||
*
|
||||
* a) sample_len -> We got the correct length
|
||||
* b) sample_len + 2 -> 2 bytes added around bin[31]
|
||||
* c) sample_len - 1 -> The first byte is missing
|
||||
* d) sample_len + 1 -> b + c at the same time
|
||||
*
|
||||
* When MAC adds 2 extra bytes, bin[31] and bin[32]
|
||||
* have the same value, so we can use that for further
|
||||
* verification in cases b and d.
|
||||
*/
|
||||
|
||||
tlv = (struct fft_sample_tlv *)&fft_sample_40;
|
||||
} else {
|
||||
u8 max_index, bitmap_w;
|
||||
u16 magnitude;
|
||||
struct ath_ht20_mag_info *mag_info;
|
||||
/* Did we go too far ? If so we couldn't determine
|
||||
* this sample's boundaries, discard any further
|
||||
* data
|
||||
*/
|
||||
if ((sample_bytes > sample_len + 2) ||
|
||||
((sample_bytes > sample_len) &&
|
||||
(sample_start[31] != sample_start[32])))
|
||||
break;
|
||||
|
||||
length = sizeof(fft_sample_20) - sizeof(struct fft_sample_tlv);
|
||||
fft_sample_20.tlv.type = ATH_FFT_SAMPLE_HT20;
|
||||
fft_sample_20.tlv.length = __cpu_to_be16(length);
|
||||
fft_sample_20.freq = __cpu_to_be16(freq);
|
||||
/* See if we got a valid frame by checking the
|
||||
* consistency of mag_info fields. This is to
|
||||
* prevent from "fixing" a correct frame.
|
||||
* Failure is non-fatal, later frames may
|
||||
* be valid.
|
||||
*/
|
||||
if (!fft_idx_validator(&vdata[i], i)) {
|
||||
ath_dbg(common, SPECTRAL_SCAN,
|
||||
"Found valid fft frame at %i\n", i);
|
||||
got_slen = 1;
|
||||
}
|
||||
|
||||
fft_sample_20.rssi = fix_rssi_inv_only(rs->rs_rssi_ctl[0]);
|
||||
fft_sample_20.noise = ah->noise;
|
||||
/* We expect 1 - 2 more bytes */
|
||||
else if ((sample_start[31] == sample_start[32]) &&
|
||||
(sample_bytes >= sample_len) &&
|
||||
(sample_bytes < sample_len + 2) &&
|
||||
(vdata[i + 1] <= 0x7))
|
||||
continue;
|
||||
|
||||
mag_info = ((struct ath_ht20_mag_info *)radar_info) - 1;
|
||||
magnitude = spectral_max_magnitude(mag_info->all_bins);
|
||||
fft_sample_20.max_magnitude = __cpu_to_be16(magnitude);
|
||||
max_index = spectral_max_index(mag_info->all_bins);
|
||||
fft_sample_20.max_index = max_index;
|
||||
bitmap_w = spectral_bitmap_weight(mag_info->all_bins);
|
||||
fft_sample_20.bitmap_weight = bitmap_w;
|
||||
fft_sample_20.max_exp = mag_info->max_exp & 0xf;
|
||||
/* Try to distinguish cases a and c */
|
||||
else if ((sample_bytes == sample_len - 1) &&
|
||||
(vdata[i + 1] <= 0x7))
|
||||
continue;
|
||||
|
||||
fft_sample_20.tsf = __cpu_to_be64(tsf);
|
||||
got_slen = 1;
|
||||
}
|
||||
|
||||
tlv = (struct fft_sample_tlv *)&fft_sample_20;
|
||||
if (got_slen) {
|
||||
ath_dbg(common, SPECTRAL_SCAN, "FFT frame len: %i\n",
|
||||
sample_bytes);
|
||||
|
||||
/* Only try to fix a frame if it's the only one
|
||||
* on the report, else just skip it.
|
||||
*/
|
||||
if (sample_bytes != sample_len && len <= fft_len + 2) {
|
||||
ath_cmn_copy_fft_frame(sample_start,
|
||||
sample_buf, sample_len,
|
||||
sample_bytes);
|
||||
|
||||
fft_handler(rs, spec_priv, sample_buf,
|
||||
tsf, freq, chan_type);
|
||||
|
||||
memset(sample_buf, 0, SPECTRAL_SAMPLE_MAX_LEN);
|
||||
|
||||
/* Mix the received bins to the /dev/random
|
||||
* pool
|
||||
*/
|
||||
add_device_randomness(sample_buf, num_bins);
|
||||
}
|
||||
|
||||
/* Process a normal frame */
|
||||
if (sample_bytes == sample_len) {
|
||||
ret = fft_handler(rs, spec_priv, sample_start,
|
||||
tsf, freq, chan_type);
|
||||
|
||||
/* Mix the received bins to the /dev/random
|
||||
* pool
|
||||
*/
|
||||
add_device_randomness(sample_start, num_bins);
|
||||
}
|
||||
|
||||
/* Short report processed, break out of the
|
||||
* loop.
|
||||
*/
|
||||
if (len <= fft_len + 2)
|
||||
break;
|
||||
|
||||
sample_start = &vdata[i + 1];
|
||||
|
||||
/* -1 to grab sample_len -1, -2 since
|
||||
* they 'll get increased by one. In case
|
||||
* of failure try to recover by going byte
|
||||
* by byte instead.
|
||||
*/
|
||||
if (ret == 0) {
|
||||
i += num_bins - 2;
|
||||
sample_bytes = num_bins - 2;
|
||||
}
|
||||
got_slen = 0;
|
||||
}
|
||||
}
|
||||
|
||||
ath_debug_send_fft_sample(spec_priv, tlv);
|
||||
|
||||
i -= num_bins - 2;
|
||||
if (len - i != sizeof(struct ath_radar_info))
|
||||
ath_dbg(common, SPECTRAL_SCAN, "FFT report truncated"
|
||||
"(bytes left: %i)\n",
|
||||
len - i);
|
||||
return 1;
|
||||
}
|
||||
EXPORT_SYMBOL(ath_cmn_process_fft);
|
||||
|
@ -66,6 +66,8 @@ struct ath_ht20_fft_packet {
|
||||
} __packed;
|
||||
|
||||
#define SPECTRAL_HT20_TOTAL_DATA_LEN (sizeof(struct ath_ht20_fft_packet))
|
||||
#define SPECTRAL_HT20_SAMPLE_LEN (sizeof(struct ath_ht20_mag_info) +\
|
||||
SPECTRAL_HT20_NUM_BINS)
|
||||
|
||||
/* Dynamic 20/40 mode:
|
||||
*
|
||||
@ -101,6 +103,10 @@ struct ath_spec_scan_priv {
|
||||
};
|
||||
|
||||
#define SPECTRAL_HT20_40_TOTAL_DATA_LEN (sizeof(struct ath_ht20_40_fft_packet))
|
||||
#define SPECTRAL_HT20_40_SAMPLE_LEN (sizeof(struct ath_ht20_40_mag_info) +\
|
||||
SPECTRAL_HT20_40_NUM_BINS)
|
||||
|
||||
#define SPECTRAL_SAMPLE_MAX_LEN SPECTRAL_HT20_40_SAMPLE_LEN
|
||||
|
||||
/* grabs the max magnitude from the all/upper/lower bins */
|
||||
static inline u16 spectral_max_magnitude(u8 *bins)
|
||||
@ -111,17 +117,32 @@ static inline u16 spectral_max_magnitude(u8 *bins)
|
||||
}
|
||||
|
||||
/* return the max magnitude from the all/upper/lower bins */
|
||||
static inline u8 spectral_max_index(u8 *bins)
|
||||
static inline u8 spectral_max_index(u8 *bins, int num_bins)
|
||||
{
|
||||
s8 m = (bins[2] & 0xfc) >> 2;
|
||||
u8 zero_idx = num_bins / 2;
|
||||
|
||||
/* TODO: this still doesn't always report the right values ... */
|
||||
if (m > 32)
|
||||
/* It's a 5 bit signed int, remove its sign and use one's
|
||||
* complement interpretation to add the sign back to the 8
|
||||
* bit int
|
||||
*/
|
||||
if (m & 0x20) {
|
||||
m &= ~0x20;
|
||||
m |= 0xe0;
|
||||
else
|
||||
m &= ~0xe0;
|
||||
}
|
||||
|
||||
return m + 29;
|
||||
/* Bring the zero point to the beginning
|
||||
* instead of the middle so that we can use
|
||||
* it for array lookup and that we don't deal
|
||||
* with negative values later
|
||||
*/
|
||||
m += zero_idx;
|
||||
|
||||
/* Sanity check to make sure index is within bounds */
|
||||
if (m < 0 || m > num_bins - 1)
|
||||
m = 0;
|
||||
|
||||
return m;
|
||||
}
|
||||
|
||||
/* return the bitmap weight from the all/upper/lower bins */
|
||||
|
@ -594,7 +594,7 @@ static void ath9k_init_misc(struct ath9k_htc_priv *priv)
|
||||
|
||||
priv->spec_priv.ah = priv->ah;
|
||||
priv->spec_priv.spec_config.enabled = 0;
|
||||
priv->spec_priv.spec_config.short_repeat = false;
|
||||
priv->spec_priv.spec_config.short_repeat = true;
|
||||
priv->spec_priv.spec_config.count = 8;
|
||||
priv->spec_priv.spec_config.endless = false;
|
||||
priv->spec_priv.spec_config.period = 0x12;
|
||||
|
@ -41,30 +41,31 @@ struct radar_types {
|
||||
|
||||
/* percentage on ppb threshold to trigger detection */
|
||||
#define MIN_PPB_THRESH 50
|
||||
#define PPB_THRESH(PPB) ((PPB * MIN_PPB_THRESH + 50) / 100)
|
||||
#define PPB_THRESH_RATE(PPB, RATE) ((PPB * RATE + 100 - RATE) / 100)
|
||||
#define PPB_THRESH(PPB) PPB_THRESH_RATE(PPB, MIN_PPB_THRESH)
|
||||
#define PRF2PRI(PRF) ((1000000 + PRF / 2) / PRF)
|
||||
/* percentage of pulse width tolerance */
|
||||
#define WIDTH_TOLERANCE 5
|
||||
#define WIDTH_LOWER(X) ((X*(100-WIDTH_TOLERANCE)+50)/100)
|
||||
#define WIDTH_UPPER(X) ((X*(100+WIDTH_TOLERANCE)+50)/100)
|
||||
|
||||
#define ETSI_PATTERN(ID, WMIN, WMAX, PMIN, PMAX, PRF, PPB) \
|
||||
#define ETSI_PATTERN(ID, WMIN, WMAX, PMIN, PMAX, PRF, PPB, CHIRP) \
|
||||
{ \
|
||||
ID, WIDTH_LOWER(WMIN), WIDTH_UPPER(WMAX), \
|
||||
(PRF2PRI(PMAX) - PRI_TOLERANCE), \
|
||||
(PRF2PRI(PMIN) * PRF + PRI_TOLERANCE), PRF, PPB * PRF, \
|
||||
PPB_THRESH(PPB), PRI_TOLERANCE, \
|
||||
PPB_THRESH(PPB), PRI_TOLERANCE, CHIRP \
|
||||
}
|
||||
|
||||
/* radar types as defined by ETSI EN-301-893 v1.5.1 */
|
||||
static const struct radar_detector_specs etsi_radar_ref_types_v15[] = {
|
||||
ETSI_PATTERN(0, 0, 1, 700, 700, 1, 18),
|
||||
ETSI_PATTERN(1, 0, 5, 200, 1000, 1, 10),
|
||||
ETSI_PATTERN(2, 0, 15, 200, 1600, 1, 15),
|
||||
ETSI_PATTERN(3, 0, 15, 2300, 4000, 1, 25),
|
||||
ETSI_PATTERN(4, 20, 30, 2000, 4000, 1, 20),
|
||||
ETSI_PATTERN(5, 0, 2, 300, 400, 3, 10),
|
||||
ETSI_PATTERN(6, 0, 2, 400, 1200, 3, 15),
|
||||
ETSI_PATTERN(0, 0, 1, 700, 700, 1, 18, false),
|
||||
ETSI_PATTERN(1, 0, 5, 200, 1000, 1, 10, false),
|
||||
ETSI_PATTERN(2, 0, 15, 200, 1600, 1, 15, false),
|
||||
ETSI_PATTERN(3, 0, 15, 2300, 4000, 1, 25, false),
|
||||
ETSI_PATTERN(4, 20, 30, 2000, 4000, 1, 20, false),
|
||||
ETSI_PATTERN(5, 0, 2, 300, 400, 3, 10, false),
|
||||
ETSI_PATTERN(6, 0, 2, 400, 1200, 3, 15, false),
|
||||
};
|
||||
|
||||
static const struct radar_types etsi_radar_types_v15 = {
|
||||
@ -73,21 +74,30 @@ static const struct radar_types etsi_radar_types_v15 = {
|
||||
.radar_types = etsi_radar_ref_types_v15,
|
||||
};
|
||||
|
||||
#define FCC_PATTERN(ID, WMIN, WMAX, PMIN, PMAX, PRF, PPB) \
|
||||
#define FCC_PATTERN(ID, WMIN, WMAX, PMIN, PMAX, PRF, PPB, CHIRP) \
|
||||
{ \
|
||||
ID, WIDTH_LOWER(WMIN), WIDTH_UPPER(WMAX), \
|
||||
PMIN - PRI_TOLERANCE, \
|
||||
PMAX * PRF + PRI_TOLERANCE, PRF, PPB * PRF, \
|
||||
PPB_THRESH(PPB), PRI_TOLERANCE, \
|
||||
PPB_THRESH(PPB), PRI_TOLERANCE, CHIRP \
|
||||
}
|
||||
|
||||
/* radar types released on August 14, 2014
|
||||
* type 1 PRI values randomly selected within the range of 518 and 3066.
|
||||
* divide it to 3 groups is good enough for both of radar detection and
|
||||
* avoiding false detection based on practical test results
|
||||
* collected for more than a year.
|
||||
*/
|
||||
static const struct radar_detector_specs fcc_radar_ref_types[] = {
|
||||
FCC_PATTERN(0, 0, 1, 1428, 1428, 1, 18),
|
||||
FCC_PATTERN(1, 0, 5, 150, 230, 1, 23),
|
||||
FCC_PATTERN(2, 6, 10, 200, 500, 1, 16),
|
||||
FCC_PATTERN(3, 11, 20, 200, 500, 1, 12),
|
||||
FCC_PATTERN(4, 50, 100, 1000, 2000, 1, 1),
|
||||
FCC_PATTERN(5, 0, 1, 333, 333, 1, 9),
|
||||
FCC_PATTERN(0, 0, 1, 1428, 1428, 1, 18, false),
|
||||
FCC_PATTERN(101, 0, 1, 518, 938, 1, 57, false),
|
||||
FCC_PATTERN(102, 0, 1, 938, 2000, 1, 27, false),
|
||||
FCC_PATTERN(103, 0, 1, 2000, 3066, 1, 18, false),
|
||||
FCC_PATTERN(2, 0, 5, 150, 230, 1, 23, false),
|
||||
FCC_PATTERN(3, 6, 10, 200, 500, 1, 16, false),
|
||||
FCC_PATTERN(4, 11, 20, 200, 500, 1, 12, false),
|
||||
FCC_PATTERN(5, 50, 100, 1000, 2000, 1, 1, true),
|
||||
FCC_PATTERN(6, 0, 1, 333, 333, 1, 9, false),
|
||||
};
|
||||
|
||||
static const struct radar_types fcc_radar_types = {
|
||||
@ -96,17 +106,23 @@ static const struct radar_types fcc_radar_types = {
|
||||
.radar_types = fcc_radar_ref_types,
|
||||
};
|
||||
|
||||
#define JP_PATTERN FCC_PATTERN
|
||||
#define JP_PATTERN(ID, WMIN, WMAX, PMIN, PMAX, PRF, PPB, RATE, CHIRP) \
|
||||
{ \
|
||||
ID, WIDTH_LOWER(WMIN), WIDTH_UPPER(WMAX), \
|
||||
PMIN - PRI_TOLERANCE, \
|
||||
PMAX * PRF + PRI_TOLERANCE, PRF, PPB * PRF, \
|
||||
PPB_THRESH_RATE(PPB, RATE), PRI_TOLERANCE, CHIRP \
|
||||
}
|
||||
static const struct radar_detector_specs jp_radar_ref_types[] = {
|
||||
JP_PATTERN(0, 0, 1, 1428, 1428, 1, 18),
|
||||
JP_PATTERN(1, 2, 3, 3846, 3846, 1, 18),
|
||||
JP_PATTERN(2, 0, 1, 1388, 1388, 1, 18),
|
||||
JP_PATTERN(3, 1, 2, 4000, 4000, 1, 18),
|
||||
JP_PATTERN(4, 0, 5, 150, 230, 1, 23),
|
||||
JP_PATTERN(5, 6, 10, 200, 500, 1, 16),
|
||||
JP_PATTERN(6, 11, 20, 200, 500, 1, 12),
|
||||
JP_PATTERN(7, 50, 100, 1000, 2000, 1, 20),
|
||||
JP_PATTERN(5, 0, 1, 333, 333, 1, 9),
|
||||
JP_PATTERN(0, 0, 1, 1428, 1428, 1, 18, 29, false),
|
||||
JP_PATTERN(1, 2, 3, 3846, 3846, 1, 18, 29, false),
|
||||
JP_PATTERN(2, 0, 1, 1388, 1388, 1, 18, 50, false),
|
||||
JP_PATTERN(3, 1, 2, 4000, 4000, 1, 18, 50, false),
|
||||
JP_PATTERN(4, 0, 5, 150, 230, 1, 23, 50, false),
|
||||
JP_PATTERN(5, 6, 10, 200, 500, 1, 16, 50, false),
|
||||
JP_PATTERN(6, 11, 20, 200, 500, 1, 12, 50, false),
|
||||
JP_PATTERN(7, 50, 100, 1000, 2000, 1, 20, 50, false),
|
||||
JP_PATTERN(5, 0, 1, 333, 333, 1, 9, 50, false),
|
||||
};
|
||||
|
||||
static const struct radar_types jp_radar_types = {
|
||||
|
@ -40,12 +40,14 @@ struct ath_dfs_pool_stats {
|
||||
* @freq: channel frequency in MHz
|
||||
* @width: pulse duration in us
|
||||
* @rssi: rssi of radar event
|
||||
* @chirp: chirp detected in pulse
|
||||
*/
|
||||
struct pulse_event {
|
||||
u64 ts;
|
||||
u16 freq;
|
||||
u8 width;
|
||||
u8 rssi;
|
||||
bool chirp;
|
||||
};
|
||||
|
||||
/**
|
||||
@ -59,6 +61,7 @@ struct pulse_event {
|
||||
* @ppb: pulses per bursts for this type
|
||||
* @ppb_thresh: number of pulses required to trigger detection
|
||||
* @max_pri_tolerance: pulse time stamp tolerance on both sides [us]
|
||||
* @chirp: chirp required for the radar pattern
|
||||
*/
|
||||
struct radar_detector_specs {
|
||||
u8 type_id;
|
||||
@ -70,6 +73,7 @@ struct radar_detector_specs {
|
||||
u8 ppb;
|
||||
u8 ppb_thresh;
|
||||
u8 max_pri_tolerance;
|
||||
bool chirp;
|
||||
};
|
||||
|
||||
/**
|
||||
|
@ -390,6 +390,10 @@ static struct pri_sequence *pri_detector_add_pulse(struct pri_detector *de,
|
||||
if ((ts - de->last_ts) < rs->max_pri_tolerance)
|
||||
/* if delta to last pulse is too short, don't use this pulse */
|
||||
return NULL;
|
||||
/* radar detector spec needs chirp, but not detected */
|
||||
if (rs->chirp && rs->chirp != event->chirp)
|
||||
return NULL;
|
||||
|
||||
de->last_ts = ts;
|
||||
|
||||
max_updated_seq = pseq_handler_add_to_existing_seqs(de, ts);
|
||||
|
@ -1011,6 +1011,14 @@ static int brcmf_sdiod_remove(struct brcmf_sdio_dev *sdiodev)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void brcmf_sdiod_host_fixup(struct mmc_host *host)
|
||||
{
|
||||
/* runtime-pm powers off the device */
|
||||
pm_runtime_forbid(host->parent);
|
||||
/* avoid removal detection upon resume */
|
||||
host->caps |= MMC_CAP_NONREMOVABLE;
|
||||
}
|
||||
|
||||
static int brcmf_sdiod_probe(struct brcmf_sdio_dev *sdiodev)
|
||||
{
|
||||
struct sdio_func *func;
|
||||
@ -1076,7 +1084,7 @@ static int brcmf_sdiod_probe(struct brcmf_sdio_dev *sdiodev)
|
||||
ret = -ENODEV;
|
||||
goto out;
|
||||
}
|
||||
pm_runtime_forbid(host->parent);
|
||||
brcmf_sdiod_host_fixup(host);
|
||||
out:
|
||||
if (ret)
|
||||
brcmf_sdiod_remove(sdiodev);
|
||||
@ -1246,15 +1254,15 @@ static int brcmf_ops_sdio_suspend(struct device *dev)
|
||||
brcmf_sdiod_freezer_on(sdiodev);
|
||||
brcmf_sdio_wd_timer(sdiodev->bus, 0);
|
||||
|
||||
sdio_flags = MMC_PM_KEEP_POWER;
|
||||
if (sdiodev->wowl_enabled) {
|
||||
sdio_flags = MMC_PM_KEEP_POWER;
|
||||
if (sdiodev->pdata->oob_irq_supported)
|
||||
enable_irq_wake(sdiodev->pdata->oob_irq_nr);
|
||||
else
|
||||
sdio_flags = MMC_PM_WAKE_SDIO_IRQ;
|
||||
if (sdio_set_host_pm_flags(sdiodev->func[1], sdio_flags))
|
||||
brcmf_err("Failed to set pm_flags %x\n", sdio_flags);
|
||||
sdio_flags |= MMC_PM_WAKE_SDIO_IRQ;
|
||||
}
|
||||
if (sdio_set_host_pm_flags(sdiodev->func[1], sdio_flags))
|
||||
brcmf_err("Failed to set pm_flags %x\n", sdio_flags);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -129,13 +129,47 @@ static struct ieee80211_rate __wl_rates[] = {
|
||||
RATETAB_ENT(BRCM_RATE_54M, 0),
|
||||
};
|
||||
|
||||
#define wl_a_rates (__wl_rates + 4)
|
||||
#define wl_a_rates_size 8
|
||||
#define wl_g_rates (__wl_rates + 0)
|
||||
#define wl_g_rates_size 12
|
||||
#define wl_g_rates_size ARRAY_SIZE(__wl_rates)
|
||||
#define wl_a_rates (__wl_rates + 4)
|
||||
#define wl_a_rates_size (wl_g_rates_size - 4)
|
||||
|
||||
#define CHAN2G(_channel, _freq) { \
|
||||
.band = IEEE80211_BAND_2GHZ, \
|
||||
.center_freq = (_freq), \
|
||||
.hw_value = (_channel), \
|
||||
.flags = IEEE80211_CHAN_DISABLED, \
|
||||
.max_antenna_gain = 0, \
|
||||
.max_power = 30, \
|
||||
}
|
||||
|
||||
#define CHAN5G(_channel) { \
|
||||
.band = IEEE80211_BAND_5GHZ, \
|
||||
.center_freq = 5000 + (5 * (_channel)), \
|
||||
.hw_value = (_channel), \
|
||||
.flags = IEEE80211_CHAN_DISABLED, \
|
||||
.max_antenna_gain = 0, \
|
||||
.max_power = 30, \
|
||||
}
|
||||
|
||||
static struct ieee80211_channel __wl_2ghz_channels[] = {
|
||||
CHAN2G(1, 2412), CHAN2G(2, 2417), CHAN2G(3, 2422), CHAN2G(4, 2427),
|
||||
CHAN2G(5, 2432), CHAN2G(6, 2437), CHAN2G(7, 2442), CHAN2G(8, 2447),
|
||||
CHAN2G(9, 2452), CHAN2G(10, 2457), CHAN2G(11, 2462), CHAN2G(12, 2467),
|
||||
CHAN2G(13, 2472), CHAN2G(14, 2484)
|
||||
};
|
||||
|
||||
static struct ieee80211_channel __wl_5ghz_channels[] = {
|
||||
CHAN5G(34), CHAN5G(36), CHAN5G(38), CHAN5G(40), CHAN5G(42),
|
||||
CHAN5G(44), CHAN5G(46), CHAN5G(48), CHAN5G(52), CHAN5G(56),
|
||||
CHAN5G(60), CHAN5G(64), CHAN5G(100), CHAN5G(104), CHAN5G(108),
|
||||
CHAN5G(112), CHAN5G(116), CHAN5G(120), CHAN5G(124), CHAN5G(128),
|
||||
CHAN5G(132), CHAN5G(136), CHAN5G(140), CHAN5G(144), CHAN5G(149),
|
||||
CHAN5G(153), CHAN5G(157), CHAN5G(161), CHAN5G(165)
|
||||
};
|
||||
|
||||
/* Band templates duplicated per wiphy. The channel info
|
||||
* is filled in after querying the device.
|
||||
* above is added to the band during setup.
|
||||
*/
|
||||
static const struct ieee80211_supported_band __wl_band_2ghz = {
|
||||
.band = IEEE80211_BAND_2GHZ,
|
||||
@ -143,7 +177,7 @@ static const struct ieee80211_supported_band __wl_band_2ghz = {
|
||||
.n_bitrates = wl_g_rates_size,
|
||||
};
|
||||
|
||||
static const struct ieee80211_supported_band __wl_band_5ghz_a = {
|
||||
static const struct ieee80211_supported_band __wl_band_5ghz = {
|
||||
.band = IEEE80211_BAND_5GHZ,
|
||||
.bitrates = wl_a_rates,
|
||||
.n_bitrates = wl_a_rates_size,
|
||||
@ -5253,40 +5287,6 @@ dongle_scantime_out:
|
||||
return err;
|
||||
}
|
||||
|
||||
/* Filter the list of channels received from firmware counting only
|
||||
* the 20MHz channels. The wiphy band data only needs those which get
|
||||
* flagged to indicate if they can take part in higher bandwidth.
|
||||
*/
|
||||
static void brcmf_count_20mhz_channels(struct brcmf_cfg80211_info *cfg,
|
||||
struct brcmf_chanspec_list *chlist,
|
||||
u32 chcnt[])
|
||||
{
|
||||
u32 total = le32_to_cpu(chlist->count);
|
||||
struct brcmu_chan ch;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < total; i++) {
|
||||
ch.chspec = (u16)le32_to_cpu(chlist->element[i]);
|
||||
cfg->d11inf.decchspec(&ch);
|
||||
|
||||
/* Firmware gives a ordered list. We skip non-20MHz
|
||||
* channels is 2G. For 5G we can abort upon reaching
|
||||
* a non-20MHz channel in the list.
|
||||
*/
|
||||
if (ch.bw != BRCMU_CHAN_BW_20) {
|
||||
if (ch.band == BRCMU_CHAN_BAND_5G)
|
||||
break;
|
||||
else
|
||||
continue;
|
||||
}
|
||||
|
||||
if (ch.band == BRCMU_CHAN_BAND_2G)
|
||||
chcnt[0] += 1;
|
||||
else if (ch.band == BRCMU_CHAN_BAND_5G)
|
||||
chcnt[1] += 1;
|
||||
}
|
||||
}
|
||||
|
||||
static void brcmf_update_bw40_channel_flag(struct ieee80211_channel *channel,
|
||||
struct brcmu_chan *ch)
|
||||
{
|
||||
@ -5322,7 +5322,6 @@ static int brcmf_construct_chaninfo(struct brcmf_cfg80211_info *cfg,
|
||||
u32 i, j;
|
||||
u32 total;
|
||||
u32 chaninfo;
|
||||
u32 chcnt[2] = { 0, 0 };
|
||||
u32 index;
|
||||
|
||||
pbuf = kzalloc(BRCMF_DCMD_MEDLEN, GFP_KERNEL);
|
||||
@ -5339,42 +5338,15 @@ static int brcmf_construct_chaninfo(struct brcmf_cfg80211_info *cfg,
|
||||
goto fail_pbuf;
|
||||
}
|
||||
|
||||
brcmf_count_20mhz_channels(cfg, list, chcnt);
|
||||
wiphy = cfg_to_wiphy(cfg);
|
||||
if (chcnt[0]) {
|
||||
band = kmemdup(&__wl_band_2ghz, sizeof(__wl_band_2ghz),
|
||||
GFP_KERNEL);
|
||||
if (band == NULL) {
|
||||
err = -ENOMEM;
|
||||
goto fail_pbuf;
|
||||
}
|
||||
band->channels = kcalloc(chcnt[0], sizeof(*channel),
|
||||
GFP_KERNEL);
|
||||
if (band->channels == NULL) {
|
||||
kfree(band);
|
||||
err = -ENOMEM;
|
||||
goto fail_pbuf;
|
||||
}
|
||||
band->n_channels = 0;
|
||||
wiphy->bands[IEEE80211_BAND_2GHZ] = band;
|
||||
}
|
||||
if (chcnt[1]) {
|
||||
band = kmemdup(&__wl_band_5ghz_a, sizeof(__wl_band_5ghz_a),
|
||||
GFP_KERNEL);
|
||||
if (band == NULL) {
|
||||
err = -ENOMEM;
|
||||
goto fail_band2g;
|
||||
}
|
||||
band->channels = kcalloc(chcnt[1], sizeof(*channel),
|
||||
GFP_KERNEL);
|
||||
if (band->channels == NULL) {
|
||||
kfree(band);
|
||||
err = -ENOMEM;
|
||||
goto fail_band2g;
|
||||
}
|
||||
band->n_channels = 0;
|
||||
wiphy->bands[IEEE80211_BAND_5GHZ] = band;
|
||||
}
|
||||
band = wiphy->bands[IEEE80211_BAND_2GHZ];
|
||||
if (band)
|
||||
for (i = 0; i < band->n_channels; i++)
|
||||
band->channels[i].flags = IEEE80211_CHAN_DISABLED;
|
||||
band = wiphy->bands[IEEE80211_BAND_5GHZ];
|
||||
if (band)
|
||||
for (i = 0; i < band->n_channels; i++)
|
||||
band->channels[i].flags = IEEE80211_CHAN_DISABLED;
|
||||
|
||||
total = le32_to_cpu(list->count);
|
||||
for (i = 0; i < total; i++) {
|
||||
@ -5389,6 +5361,8 @@ static int brcmf_construct_chaninfo(struct brcmf_cfg80211_info *cfg,
|
||||
brcmf_err("Invalid channel Spec. 0x%x.\n", ch.chspec);
|
||||
continue;
|
||||
}
|
||||
if (!band)
|
||||
continue;
|
||||
if (!(bw_cap[band->band] & WLC_BW_40MHZ_BIT) &&
|
||||
ch.bw == BRCMU_CHAN_BW_40)
|
||||
continue;
|
||||
@ -5416,9 +5390,9 @@ static int brcmf_construct_chaninfo(struct brcmf_cfg80211_info *cfg,
|
||||
} else if (ch.bw == BRCMU_CHAN_BW_40) {
|
||||
brcmf_update_bw40_channel_flag(&channel[index], &ch);
|
||||
} else {
|
||||
/* disable other bandwidths for now as mentioned
|
||||
* order assure they are enabled for subsequent
|
||||
* chanspecs.
|
||||
/* enable the channel and disable other bandwidths
|
||||
* for now as mentioned order assure they are enabled
|
||||
* for subsequent chanspecs.
|
||||
*/
|
||||
channel[index].flags = IEEE80211_CHAN_NO_HT40 |
|
||||
IEEE80211_CHAN_NO_80MHZ;
|
||||
@ -5437,16 +5411,8 @@ static int brcmf_construct_chaninfo(struct brcmf_cfg80211_info *cfg,
|
||||
IEEE80211_CHAN_NO_IR;
|
||||
}
|
||||
}
|
||||
if (index == band->n_channels)
|
||||
band->n_channels++;
|
||||
}
|
||||
kfree(pbuf);
|
||||
return 0;
|
||||
|
||||
fail_band2g:
|
||||
kfree(wiphy->bands[IEEE80211_BAND_2GHZ]->channels);
|
||||
kfree(wiphy->bands[IEEE80211_BAND_2GHZ]);
|
||||
wiphy->bands[IEEE80211_BAND_2GHZ] = NULL;
|
||||
fail_pbuf:
|
||||
kfree(pbuf);
|
||||
return err;
|
||||
@ -5779,7 +5745,12 @@ static void brcmf_wiphy_wowl_params(struct wiphy *wiphy)
|
||||
|
||||
static int brcmf_setup_wiphy(struct wiphy *wiphy, struct brcmf_if *ifp)
|
||||
{
|
||||
struct ieee80211_supported_band *band;
|
||||
struct ieee80211_iface_combination ifc_combo;
|
||||
__le32 bandlist[3];
|
||||
u32 n_bands;
|
||||
int err, i;
|
||||
|
||||
wiphy->max_scan_ssids = WL_NUM_SCAN_MAX;
|
||||
wiphy->max_scan_ie_len = BRCMF_SCAN_IE_LEN_MAX;
|
||||
wiphy->max_num_pmkids = WL_NUM_PMKIDS_MAX;
|
||||
@ -5812,7 +5783,8 @@ static int brcmf_setup_wiphy(struct wiphy *wiphy, struct brcmf_if *ifp)
|
||||
wiphy->flags |= WIPHY_FLAG_SUPPORTS_FW_ROAM;
|
||||
wiphy->mgmt_stypes = brcmf_txrx_stypes;
|
||||
wiphy->max_remain_on_channel_duration = 5000;
|
||||
brcmf_wiphy_pno_params(wiphy);
|
||||
if (brcmf_feat_is_enabled(ifp, BRCMF_FEAT_PNO))
|
||||
brcmf_wiphy_pno_params(wiphy);
|
||||
|
||||
/* vendor commands/events support */
|
||||
wiphy->vendor_commands = brcmf_vendor_cmds;
|
||||
@ -5821,7 +5793,52 @@ static int brcmf_setup_wiphy(struct wiphy *wiphy, struct brcmf_if *ifp)
|
||||
if (brcmf_feat_is_enabled(ifp, BRCMF_FEAT_WOWL))
|
||||
brcmf_wiphy_wowl_params(wiphy);
|
||||
|
||||
return brcmf_setup_wiphybands(wiphy);
|
||||
err = brcmf_fil_cmd_data_get(ifp, BRCMF_C_GET_BANDLIST, &bandlist,
|
||||
sizeof(bandlist));
|
||||
if (err) {
|
||||
brcmf_err("could not obtain band info: err=%d\n", err);
|
||||
return err;
|
||||
}
|
||||
/* first entry in bandlist is number of bands */
|
||||
n_bands = le32_to_cpu(bandlist[0]);
|
||||
for (i = 1; i <= n_bands && i < ARRAY_SIZE(bandlist); i++) {
|
||||
if (bandlist[i] == cpu_to_le32(WLC_BAND_2G)) {
|
||||
band = kmemdup(&__wl_band_2ghz, sizeof(__wl_band_2ghz),
|
||||
GFP_KERNEL);
|
||||
if (!band)
|
||||
return -ENOMEM;
|
||||
|
||||
band->channels = kmemdup(&__wl_2ghz_channels,
|
||||
sizeof(__wl_2ghz_channels),
|
||||
GFP_KERNEL);
|
||||
if (!band->channels) {
|
||||
kfree(band);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
band->n_channels = ARRAY_SIZE(__wl_2ghz_channels);
|
||||
wiphy->bands[IEEE80211_BAND_2GHZ] = band;
|
||||
}
|
||||
if (bandlist[i] == cpu_to_le32(WLC_BAND_5G)) {
|
||||
band = kmemdup(&__wl_band_5ghz, sizeof(__wl_band_5ghz),
|
||||
GFP_KERNEL);
|
||||
if (!band)
|
||||
return -ENOMEM;
|
||||
|
||||
band->channels = kmemdup(&__wl_5ghz_channels,
|
||||
sizeof(__wl_5ghz_channels),
|
||||
GFP_KERNEL);
|
||||
if (!band->channels) {
|
||||
kfree(band);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
band->n_channels = ARRAY_SIZE(__wl_5ghz_channels);
|
||||
wiphy->bands[IEEE80211_BAND_5GHZ] = band;
|
||||
}
|
||||
}
|
||||
err = brcmf_setup_wiphybands(wiphy);
|
||||
return err;
|
||||
}
|
||||
|
||||
static s32 brcmf_config_dongle(struct brcmf_cfg80211_info *cfg)
|
||||
@ -6007,11 +6024,18 @@ static void brcmf_cfg80211_reg_notifier(struct wiphy *wiphy,
|
||||
memset(&ccreq, 0, sizeof(ccreq));
|
||||
ccreq.rev = cpu_to_le32(-1);
|
||||
memcpy(ccreq.ccode, req->alpha2, sizeof(req->alpha2));
|
||||
brcmf_fil_iovar_data_set(ifp, "country", &ccreq, sizeof(ccreq));
|
||||
if (brcmf_fil_iovar_data_set(ifp, "country", &ccreq, sizeof(ccreq))) {
|
||||
brcmf_err("firmware rejected country setting\n");
|
||||
return;
|
||||
}
|
||||
brcmf_setup_wiphybands(wiphy);
|
||||
}
|
||||
|
||||
static void brcmf_free_wiphy(struct wiphy *wiphy)
|
||||
{
|
||||
if (!wiphy)
|
||||
return;
|
||||
|
||||
kfree(wiphy->iface_combinations);
|
||||
if (wiphy->bands[IEEE80211_BAND_2GHZ]) {
|
||||
kfree(wiphy->bands[IEEE80211_BAND_2GHZ]->channels);
|
||||
|
@ -649,6 +649,7 @@ static u32 brcmf_chip_tcm_rambase(struct brcmf_chip_priv *ci)
|
||||
case BRCM_CC_43567_CHIP_ID:
|
||||
case BRCM_CC_43569_CHIP_ID:
|
||||
case BRCM_CC_43570_CHIP_ID:
|
||||
case BRCM_CC_4358_CHIP_ID:
|
||||
case BRCM_CC_43602_CHIP_ID:
|
||||
return 0x180000;
|
||||
default:
|
||||
|
@ -124,6 +124,7 @@ void brcmf_feat_attach(struct brcmf_pub *drvr)
|
||||
struct brcmf_if *ifp = drvr->iflist[0];
|
||||
|
||||
brcmf_feat_iovar_int_get(ifp, BRCMF_FEAT_MCHAN, "mchan");
|
||||
brcmf_feat_iovar_int_get(ifp, BRCMF_FEAT_PNO, "pfn");
|
||||
if (drvr->bus_if->wowl_supported)
|
||||
brcmf_feat_iovar_int_get(ifp, BRCMF_FEAT_WOWL, "wowl");
|
||||
if (drvr->bus_if->chip != BRCM_CC_43362_CHIP_ID)
|
||||
|
@ -19,11 +19,15 @@
|
||||
/*
|
||||
* Features:
|
||||
*
|
||||
* MBSS: multiple BSSID support (eg. guest network in AP mode).
|
||||
* MCHAN: multi-channel for concurrent P2P.
|
||||
* PNO: preferred network offload.
|
||||
* WOWL: Wake-On-WLAN.
|
||||
*/
|
||||
#define BRCMF_FEAT_LIST \
|
||||
BRCMF_FEAT_DEF(MBSS) \
|
||||
BRCMF_FEAT_DEF(MCHAN) \
|
||||
BRCMF_FEAT_DEF(PNO) \
|
||||
BRCMF_FEAT_DEF(WOWL)
|
||||
/*
|
||||
* Quirks:
|
||||
|
@ -23,6 +23,10 @@
|
||||
#include "debug.h"
|
||||
#include "firmware.h"
|
||||
|
||||
#define BRCMF_FW_MAX_NVRAM_SIZE 64000
|
||||
#define BRCMF_FW_NVRAM_DEVPATH_LEN 19 /* devpath0=pcie/1/4/ */
|
||||
#define BRCMF_FW_NVRAM_PCIEDEV_LEN 9 /* pcie/1/4/ */
|
||||
|
||||
char brcmf_firmware_path[BRCMF_FW_PATH_LEN];
|
||||
module_param_string(firmware_path, brcmf_firmware_path,
|
||||
BRCMF_FW_PATH_LEN, 0440);
|
||||
@ -46,6 +50,8 @@ enum nvram_parser_state {
|
||||
* @column: current column in line.
|
||||
* @pos: byte offset in input buffer.
|
||||
* @entry: start position of key,value entry.
|
||||
* @multi_dev_v1: detect pcie multi device v1 (compressed).
|
||||
* @multi_dev_v2: detect pcie multi device v2.
|
||||
*/
|
||||
struct nvram_parser {
|
||||
enum nvram_parser_state state;
|
||||
@ -56,6 +62,8 @@ struct nvram_parser {
|
||||
u32 column;
|
||||
u32 pos;
|
||||
u32 entry;
|
||||
bool multi_dev_v1;
|
||||
bool multi_dev_v2;
|
||||
};
|
||||
|
||||
static bool is_nvram_char(char c)
|
||||
@ -108,6 +116,10 @@ static enum nvram_parser_state brcmf_nvram_handle_key(struct nvram_parser *nvp)
|
||||
st = COMMENT;
|
||||
else
|
||||
st = VALUE;
|
||||
if (strncmp(&nvp->fwnv->data[nvp->entry], "devpath", 7) == 0)
|
||||
nvp->multi_dev_v1 = true;
|
||||
if (strncmp(&nvp->fwnv->data[nvp->entry], "pcie/", 5) == 0)
|
||||
nvp->multi_dev_v2 = true;
|
||||
} else if (!is_nvram_char(c)) {
|
||||
brcmf_dbg(INFO, "warning: ln=%d:col=%d: '=' expected, skip invalid key entry\n",
|
||||
nvp->line, nvp->column);
|
||||
@ -133,6 +145,8 @@ brcmf_nvram_handle_value(struct nvram_parser *nvp)
|
||||
ekv = (u8 *)&nvp->fwnv->data[nvp->pos];
|
||||
skv = (u8 *)&nvp->fwnv->data[nvp->entry];
|
||||
cplen = ekv - skv;
|
||||
if (nvp->nvram_len + cplen + 1 >= BRCMF_FW_MAX_NVRAM_SIZE)
|
||||
return END;
|
||||
/* copy to output buffer */
|
||||
memcpy(&nvp->nvram[nvp->nvram_len], skv, cplen);
|
||||
nvp->nvram_len += cplen;
|
||||
@ -180,10 +194,18 @@ static enum nvram_parser_state
|
||||
static int brcmf_init_nvram_parser(struct nvram_parser *nvp,
|
||||
const struct firmware *nv)
|
||||
{
|
||||
size_t size;
|
||||
|
||||
memset(nvp, 0, sizeof(*nvp));
|
||||
nvp->fwnv = nv;
|
||||
/* Limit size to MAX_NVRAM_SIZE, some files contain lot of comment */
|
||||
if (nv->size > BRCMF_FW_MAX_NVRAM_SIZE)
|
||||
size = BRCMF_FW_MAX_NVRAM_SIZE;
|
||||
else
|
||||
size = nv->size;
|
||||
/* Alloc for extra 0 byte + roundup by 4 + length field */
|
||||
nvp->nvram = kzalloc(nv->size + 1 + 3 + sizeof(u32), GFP_KERNEL);
|
||||
size += 1 + 3 + sizeof(u32);
|
||||
nvp->nvram = kzalloc(size, GFP_KERNEL);
|
||||
if (!nvp->nvram)
|
||||
return -ENOMEM;
|
||||
|
||||
@ -192,12 +214,136 @@ static int brcmf_init_nvram_parser(struct nvram_parser *nvp,
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* brcmf_fw_strip_multi_v1 :Some nvram files contain settings for multiple
|
||||
* devices. Strip it down for one device, use domain_nr/bus_nr to determine
|
||||
* which data is to be returned. v1 is the version where nvram is stored
|
||||
* compressed and "devpath" maps to index for valid entries.
|
||||
*/
|
||||
static void brcmf_fw_strip_multi_v1(struct nvram_parser *nvp, u16 domain_nr,
|
||||
u16 bus_nr)
|
||||
{
|
||||
u32 i, j;
|
||||
bool found;
|
||||
u8 *nvram;
|
||||
u8 id;
|
||||
|
||||
nvram = kzalloc(nvp->nvram_len + 1 + 3 + sizeof(u32), GFP_KERNEL);
|
||||
if (!nvram)
|
||||
goto fail;
|
||||
|
||||
/* min length: devpath0=pcie/1/4/ + 0:x=y */
|
||||
if (nvp->nvram_len < BRCMF_FW_NVRAM_DEVPATH_LEN + 6)
|
||||
goto fail;
|
||||
|
||||
/* First search for the devpathX and see if it is the configuration
|
||||
* for domain_nr/bus_nr. Search complete nvp
|
||||
*/
|
||||
found = false;
|
||||
i = 0;
|
||||
while (i < nvp->nvram_len - BRCMF_FW_NVRAM_DEVPATH_LEN) {
|
||||
/* Format: devpathX=pcie/Y/Z/
|
||||
* Y = domain_nr, Z = bus_nr, X = virtual ID
|
||||
*/
|
||||
if ((strncmp(&nvp->nvram[i], "devpath", 7) == 0) &&
|
||||
(strncmp(&nvp->nvram[i + 8], "=pcie/", 6) == 0)) {
|
||||
if (((nvp->nvram[i + 14] - '0') == domain_nr) &&
|
||||
((nvp->nvram[i + 16] - '0') == bus_nr)) {
|
||||
id = nvp->nvram[i + 7] - '0';
|
||||
found = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
while (nvp->nvram[i] != 0)
|
||||
i++;
|
||||
i++;
|
||||
}
|
||||
if (!found)
|
||||
goto fail;
|
||||
|
||||
/* Now copy all valid entries, release old nvram and assign new one */
|
||||
i = 0;
|
||||
j = 0;
|
||||
while (i < nvp->nvram_len) {
|
||||
if ((nvp->nvram[i] - '0' == id) && (nvp->nvram[i + 1] == ':')) {
|
||||
i += 2;
|
||||
while (nvp->nvram[i] != 0) {
|
||||
nvram[j] = nvp->nvram[i];
|
||||
i++;
|
||||
j++;
|
||||
}
|
||||
nvram[j] = 0;
|
||||
j++;
|
||||
}
|
||||
while (nvp->nvram[i] != 0)
|
||||
i++;
|
||||
i++;
|
||||
}
|
||||
kfree(nvp->nvram);
|
||||
nvp->nvram = nvram;
|
||||
nvp->nvram_len = j;
|
||||
return;
|
||||
|
||||
fail:
|
||||
kfree(nvram);
|
||||
nvp->nvram_len = 0;
|
||||
}
|
||||
|
||||
/* brcmf_fw_strip_multi_v2 :Some nvram files contain settings for multiple
|
||||
* devices. Strip it down for one device, use domain_nr/bus_nr to determine
|
||||
* which data is to be returned. v2 is the version where nvram is stored
|
||||
* uncompressed, all relevant valid entries are identified by
|
||||
* pcie/domain_nr/bus_nr:
|
||||
*/
|
||||
static void brcmf_fw_strip_multi_v2(struct nvram_parser *nvp, u16 domain_nr,
|
||||
u16 bus_nr)
|
||||
{
|
||||
u32 i, j;
|
||||
u8 *nvram;
|
||||
|
||||
nvram = kzalloc(nvp->nvram_len + 1 + 3 + sizeof(u32), GFP_KERNEL);
|
||||
if (!nvram)
|
||||
goto fail;
|
||||
|
||||
/* Copy all valid entries, release old nvram and assign new one.
|
||||
* Valid entries are of type pcie/X/Y/ where X = domain_nr and
|
||||
* Y = bus_nr.
|
||||
*/
|
||||
i = 0;
|
||||
j = 0;
|
||||
while (i < nvp->nvram_len - BRCMF_FW_NVRAM_PCIEDEV_LEN) {
|
||||
if ((strncmp(&nvp->nvram[i], "pcie/", 5) == 0) &&
|
||||
(nvp->nvram[i + 6] == '/') && (nvp->nvram[i + 8] == '/') &&
|
||||
((nvp->nvram[i + 5] - '0') == domain_nr) &&
|
||||
((nvp->nvram[i + 7] - '0') == bus_nr)) {
|
||||
i += BRCMF_FW_NVRAM_PCIEDEV_LEN;
|
||||
while (nvp->nvram[i] != 0) {
|
||||
nvram[j] = nvp->nvram[i];
|
||||
i++;
|
||||
j++;
|
||||
}
|
||||
nvram[j] = 0;
|
||||
j++;
|
||||
}
|
||||
while (nvp->nvram[i] != 0)
|
||||
i++;
|
||||
i++;
|
||||
}
|
||||
kfree(nvp->nvram);
|
||||
nvp->nvram = nvram;
|
||||
nvp->nvram_len = j;
|
||||
return;
|
||||
fail:
|
||||
kfree(nvram);
|
||||
nvp->nvram_len = 0;
|
||||
}
|
||||
|
||||
/* brcmf_nvram_strip :Takes a buffer of "<var>=<value>\n" lines read from a fil
|
||||
* and ending in a NUL. Removes carriage returns, empty lines, comment lines,
|
||||
* and converts newlines to NULs. Shortens buffer as needed and pads with NULs.
|
||||
* End of buffer is completed with token identifying length of buffer.
|
||||
*/
|
||||
static void *brcmf_fw_nvram_strip(const struct firmware *nv, u32 *new_length)
|
||||
static void *brcmf_fw_nvram_strip(const struct firmware *nv, u32 *new_length,
|
||||
u16 domain_nr, u16 bus_nr)
|
||||
{
|
||||
struct nvram_parser nvp;
|
||||
u32 pad;
|
||||
@ -212,6 +358,16 @@ static void *brcmf_fw_nvram_strip(const struct firmware *nv, u32 *new_length)
|
||||
if (nvp.state == END)
|
||||
break;
|
||||
}
|
||||
if (nvp.multi_dev_v1)
|
||||
brcmf_fw_strip_multi_v1(&nvp, domain_nr, bus_nr);
|
||||
else if (nvp.multi_dev_v2)
|
||||
brcmf_fw_strip_multi_v2(&nvp, domain_nr, bus_nr);
|
||||
|
||||
if (nvp.nvram_len == 0) {
|
||||
kfree(nvp.nvram);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
pad = nvp.nvram_len;
|
||||
*new_length = roundup(nvp.nvram_len + 1, 4);
|
||||
while (pad != *new_length) {
|
||||
@ -239,6 +395,8 @@ struct brcmf_fw {
|
||||
u16 flags;
|
||||
const struct firmware *code;
|
||||
const char *nvram_name;
|
||||
u16 domain_nr;
|
||||
u16 bus_nr;
|
||||
void (*done)(struct device *dev, const struct firmware *fw,
|
||||
void *nvram_image, u32 nvram_len);
|
||||
};
|
||||
@ -254,7 +412,8 @@ static void brcmf_fw_request_nvram_done(const struct firmware *fw, void *ctx)
|
||||
goto fail;
|
||||
|
||||
if (fw) {
|
||||
nvram = brcmf_fw_nvram_strip(fw, &nvram_length);
|
||||
nvram = brcmf_fw_nvram_strip(fw, &nvram_length,
|
||||
fwctx->domain_nr, fwctx->bus_nr);
|
||||
release_firmware(fw);
|
||||
if (!nvram && !(fwctx->flags & BRCMF_FW_REQ_NV_OPTIONAL))
|
||||
goto fail;
|
||||
@ -309,11 +468,12 @@ fail:
|
||||
kfree(fwctx);
|
||||
}
|
||||
|
||||
int brcmf_fw_get_firmwares(struct device *dev, u16 flags,
|
||||
const char *code, const char *nvram,
|
||||
void (*fw_cb)(struct device *dev,
|
||||
const struct firmware *fw,
|
||||
void *nvram_image, u32 nvram_len))
|
||||
int brcmf_fw_get_firmwares_pcie(struct device *dev, u16 flags,
|
||||
const char *code, const char *nvram,
|
||||
void (*fw_cb)(struct device *dev,
|
||||
const struct firmware *fw,
|
||||
void *nvram_image, u32 nvram_len),
|
||||
u16 domain_nr, u16 bus_nr)
|
||||
{
|
||||
struct brcmf_fw *fwctx;
|
||||
|
||||
@ -333,8 +493,21 @@ int brcmf_fw_get_firmwares(struct device *dev, u16 flags,
|
||||
fwctx->done = fw_cb;
|
||||
if (flags & BRCMF_FW_REQUEST_NVRAM)
|
||||
fwctx->nvram_name = nvram;
|
||||
fwctx->domain_nr = domain_nr;
|
||||
fwctx->bus_nr = bus_nr;
|
||||
|
||||
return request_firmware_nowait(THIS_MODULE, true, code, dev,
|
||||
GFP_KERNEL, fwctx,
|
||||
brcmf_fw_request_code_done);
|
||||
}
|
||||
|
||||
int brcmf_fw_get_firmwares(struct device *dev, u16 flags,
|
||||
const char *code, const char *nvram,
|
||||
void (*fw_cb)(struct device *dev,
|
||||
const struct firmware *fw,
|
||||
void *nvram_image, u32 nvram_len))
|
||||
{
|
||||
return brcmf_fw_get_firmwares_pcie(dev, flags, code, nvram, fw_cb, 0,
|
||||
0);
|
||||
}
|
||||
|
||||
|
@ -32,6 +32,12 @@ void brcmf_fw_nvram_free(void *nvram);
|
||||
* fails it will not use the callback, but call device_release_driver()
|
||||
* instead which will call the driver .remove() callback.
|
||||
*/
|
||||
int brcmf_fw_get_firmwares_pcie(struct device *dev, u16 flags,
|
||||
const char *code, const char *nvram,
|
||||
void (*fw_cb)(struct device *dev,
|
||||
const struct firmware *fw,
|
||||
void *nvram_image, u32 nvram_len),
|
||||
u16 domain_nr, u16 bus_nr);
|
||||
int brcmf_fw_get_firmwares(struct device *dev, u16 flags,
|
||||
const char *code, const char *nvram,
|
||||
void (*fw_cb)(struct device *dev,
|
||||
|
@ -51,6 +51,8 @@ enum brcmf_pcie_state {
|
||||
#define BRCMF_PCIE_4356_NVRAM_NAME "brcm/brcmfmac4356-pcie.txt"
|
||||
#define BRCMF_PCIE_43570_FW_NAME "brcm/brcmfmac43570-pcie.bin"
|
||||
#define BRCMF_PCIE_43570_NVRAM_NAME "brcm/brcmfmac43570-pcie.txt"
|
||||
#define BRCMF_PCIE_4358_FW_NAME "brcm/brcmfmac4358-pcie.bin"
|
||||
#define BRCMF_PCIE_4358_NVRAM_NAME "brcm/brcmfmac4358-pcie.txt"
|
||||
|
||||
#define BRCMF_PCIE_FW_UP_TIMEOUT 2000 /* msec */
|
||||
|
||||
@ -189,6 +191,8 @@ MODULE_FIRMWARE(BRCMF_PCIE_4356_FW_NAME);
|
||||
MODULE_FIRMWARE(BRCMF_PCIE_4356_NVRAM_NAME);
|
||||
MODULE_FIRMWARE(BRCMF_PCIE_43570_FW_NAME);
|
||||
MODULE_FIRMWARE(BRCMF_PCIE_43570_NVRAM_NAME);
|
||||
MODULE_FIRMWARE(BRCMF_PCIE_4358_FW_NAME);
|
||||
MODULE_FIRMWARE(BRCMF_PCIE_4358_NVRAM_NAME);
|
||||
|
||||
|
||||
struct brcmf_pcie_console {
|
||||
@ -1333,6 +1337,10 @@ static int brcmf_pcie_get_fwnames(struct brcmf_pciedev_info *devinfo)
|
||||
fw_name = BRCMF_PCIE_43570_FW_NAME;
|
||||
nvram_name = BRCMF_PCIE_43570_NVRAM_NAME;
|
||||
break;
|
||||
case BRCM_CC_4358_CHIP_ID:
|
||||
fw_name = BRCMF_PCIE_4358_FW_NAME;
|
||||
nvram_name = BRCMF_PCIE_4358_NVRAM_NAME;
|
||||
break;
|
||||
default:
|
||||
brcmf_err("Unsupported chip 0x%04x\n", devinfo->ci->chip);
|
||||
return -ENODEV;
|
||||
@ -1609,7 +1617,7 @@ static void brcmf_pcie_setup(struct device *dev, const struct firmware *fw,
|
||||
bus->msgbuf->commonrings[i] =
|
||||
&devinfo->shared.commonrings[i]->commonring;
|
||||
|
||||
flowrings = kcalloc(devinfo->shared.nrof_flowrings, sizeof(flowrings),
|
||||
flowrings = kcalloc(devinfo->shared.nrof_flowrings, sizeof(*flowrings),
|
||||
GFP_KERNEL);
|
||||
if (!flowrings)
|
||||
goto fail;
|
||||
@ -1641,8 +1649,13 @@ brcmf_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
|
||||
struct brcmf_pciedev_info *devinfo;
|
||||
struct brcmf_pciedev *pcie_bus_dev;
|
||||
struct brcmf_bus *bus;
|
||||
u16 domain_nr;
|
||||
u16 bus_nr;
|
||||
|
||||
brcmf_dbg(PCIE, "Enter %x:%x\n", pdev->vendor, pdev->device);
|
||||
domain_nr = pci_domain_nr(pdev->bus) + 1;
|
||||
bus_nr = pdev->bus->number;
|
||||
brcmf_dbg(PCIE, "Enter %x:%x (%d/%d)\n", pdev->vendor, pdev->device,
|
||||
domain_nr, bus_nr);
|
||||
|
||||
ret = -ENOMEM;
|
||||
devinfo = kzalloc(sizeof(*devinfo), GFP_KERNEL);
|
||||
@ -1691,10 +1704,10 @@ brcmf_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
|
||||
if (ret)
|
||||
goto fail_bus;
|
||||
|
||||
ret = brcmf_fw_get_firmwares(bus->dev, BRCMF_FW_REQUEST_NVRAM |
|
||||
BRCMF_FW_REQ_NV_OPTIONAL,
|
||||
devinfo->fw_name, devinfo->nvram_name,
|
||||
brcmf_pcie_setup);
|
||||
ret = brcmf_fw_get_firmwares_pcie(bus->dev, BRCMF_FW_REQUEST_NVRAM |
|
||||
BRCMF_FW_REQ_NV_OPTIONAL,
|
||||
devinfo->fw_name, devinfo->nvram_name,
|
||||
brcmf_pcie_setup, domain_nr, bus_nr);
|
||||
if (ret == 0)
|
||||
return 0;
|
||||
fail_bus:
|
||||
@ -1850,9 +1863,11 @@ static struct pci_device_id brcmf_pcie_devid_table[] = {
|
||||
BRCMF_PCIE_DEVICE(BRCM_PCIE_4356_DEVICE_ID),
|
||||
BRCMF_PCIE_DEVICE(BRCM_PCIE_43567_DEVICE_ID),
|
||||
BRCMF_PCIE_DEVICE(BRCM_PCIE_43570_DEVICE_ID),
|
||||
BRCMF_PCIE_DEVICE(BRCM_PCIE_4358_DEVICE_ID),
|
||||
BRCMF_PCIE_DEVICE(BRCM_PCIE_43602_DEVICE_ID),
|
||||
BRCMF_PCIE_DEVICE(BRCM_PCIE_43602_2G_DEVICE_ID),
|
||||
BRCMF_PCIE_DEVICE(BRCM_PCIE_43602_5G_DEVICE_ID),
|
||||
BRCMF_PCIE_DEVICE(BRCM_PCIE_43602_RAW_DEVICE_ID),
|
||||
{ /* end: all zeroes */ }
|
||||
};
|
||||
|
||||
|
@ -601,6 +601,8 @@ static const struct sdiod_drive_str sdiod_drvstr_tab2_3v3[] = {
|
||||
#define BCM43241B0_NVRAM_NAME "brcm/brcmfmac43241b0-sdio.txt"
|
||||
#define BCM43241B4_FIRMWARE_NAME "brcm/brcmfmac43241b4-sdio.bin"
|
||||
#define BCM43241B4_NVRAM_NAME "brcm/brcmfmac43241b4-sdio.txt"
|
||||
#define BCM43241B5_FIRMWARE_NAME "brcm/brcmfmac43241b5-sdio.bin"
|
||||
#define BCM43241B5_NVRAM_NAME "brcm/brcmfmac43241b5-sdio.txt"
|
||||
#define BCM4329_FIRMWARE_NAME "brcm/brcmfmac4329-sdio.bin"
|
||||
#define BCM4329_NVRAM_NAME "brcm/brcmfmac4329-sdio.txt"
|
||||
#define BCM4330_FIRMWARE_NAME "brcm/brcmfmac4330-sdio.bin"
|
||||
@ -628,6 +630,8 @@ MODULE_FIRMWARE(BCM43241B0_FIRMWARE_NAME);
|
||||
MODULE_FIRMWARE(BCM43241B0_NVRAM_NAME);
|
||||
MODULE_FIRMWARE(BCM43241B4_FIRMWARE_NAME);
|
||||
MODULE_FIRMWARE(BCM43241B4_NVRAM_NAME);
|
||||
MODULE_FIRMWARE(BCM43241B5_FIRMWARE_NAME);
|
||||
MODULE_FIRMWARE(BCM43241B5_NVRAM_NAME);
|
||||
MODULE_FIRMWARE(BCM4329_FIRMWARE_NAME);
|
||||
MODULE_FIRMWARE(BCM4329_NVRAM_NAME);
|
||||
MODULE_FIRMWARE(BCM4330_FIRMWARE_NAME);
|
||||
@ -667,7 +671,8 @@ enum brcmf_firmware_type {
|
||||
static const struct brcmf_firmware_names brcmf_fwname_data[] = {
|
||||
{ BRCM_CC_43143_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM43143) },
|
||||
{ BRCM_CC_43241_CHIP_ID, 0x0000001F, BRCMF_FIRMWARE_NVRAM(BCM43241B0) },
|
||||
{ BRCM_CC_43241_CHIP_ID, 0xFFFFFFE0, BRCMF_FIRMWARE_NVRAM(BCM43241B4) },
|
||||
{ BRCM_CC_43241_CHIP_ID, 0x00000020, BRCMF_FIRMWARE_NVRAM(BCM43241B4) },
|
||||
{ BRCM_CC_43241_CHIP_ID, 0xFFFFFFC0, BRCMF_FIRMWARE_NVRAM(BCM43241B5) },
|
||||
{ BRCM_CC_4329_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM4329) },
|
||||
{ BRCM_CC_4330_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM4330) },
|
||||
{ BRCM_CC_4334_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM4334) },
|
||||
@ -3550,10 +3555,6 @@ void brcmf_sdio_isr(struct brcmf_sdio *bus)
|
||||
return;
|
||||
}
|
||||
|
||||
if (bus->sdiodev->state != BRCMF_SDIOD_DATA) {
|
||||
brcmf_err("bus is down. we have nothing to do\n");
|
||||
return;
|
||||
}
|
||||
/* Count the interrupt call */
|
||||
bus->sdcnt.intrcount++;
|
||||
if (in_interrupt())
|
||||
|
@ -1270,8 +1270,13 @@ static int brcmf_usb_probe_cb(struct brcmf_usbdev_info *devinfo)
|
||||
bus->chiprev = bus_pub->chiprev;
|
||||
|
||||
/* request firmware here */
|
||||
brcmf_fw_get_firmwares(dev, 0, brcmf_usb_get_fwname(devinfo), NULL,
|
||||
brcmf_usb_probe_phase2);
|
||||
ret = brcmf_fw_get_firmwares(dev, 0, brcmf_usb_get_fwname(devinfo),
|
||||
NULL, brcmf_usb_probe_phase2);
|
||||
if (ret) {
|
||||
brcmf_err("firmware request failed: %d\n", ret);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
fail:
|
||||
|
@ -45,6 +45,7 @@
|
||||
#define BRCM_CC_43567_CHIP_ID 43567
|
||||
#define BRCM_CC_43569_CHIP_ID 43569
|
||||
#define BRCM_CC_43570_CHIP_ID 43570
|
||||
#define BRCM_CC_4358_CHIP_ID 0x4358
|
||||
#define BRCM_CC_43602_CHIP_ID 43602
|
||||
|
||||
/* USB Device IDs */
|
||||
@ -59,9 +60,11 @@
|
||||
#define BRCM_PCIE_4356_DEVICE_ID 0x43ec
|
||||
#define BRCM_PCIE_43567_DEVICE_ID 0x43d3
|
||||
#define BRCM_PCIE_43570_DEVICE_ID 0x43d9
|
||||
#define BRCM_PCIE_4358_DEVICE_ID 0x43e9
|
||||
#define BRCM_PCIE_43602_DEVICE_ID 0x43ba
|
||||
#define BRCM_PCIE_43602_2G_DEVICE_ID 0x43bb
|
||||
#define BRCM_PCIE_43602_5G_DEVICE_ID 0x43bc
|
||||
#define BRCM_PCIE_43602_RAW_DEVICE_ID 43602
|
||||
|
||||
/* brcmsmac IDs */
|
||||
#define BCM4313_D11N2G_ID 0x4727 /* 4313 802.11n 2.4G device */
|
||||
|
@ -21,6 +21,7 @@ config IWLWIFI
|
||||
Intel 7260 Wi-Fi Adapter
|
||||
Intel 3160 Wi-Fi Adapter
|
||||
Intel 7265 Wi-Fi Adapter
|
||||
Intel 8260 Wi-Fi Adapter
|
||||
|
||||
|
||||
This driver uses the kernel's mac80211 subsystem.
|
||||
@ -53,16 +54,17 @@ config IWLDVM
|
||||
tristate "Intel Wireless WiFi DVM Firmware support"
|
||||
default IWLWIFI
|
||||
help
|
||||
This is the driver that supports the DVM firmware which is
|
||||
used by most existing devices (with the exception of 7260
|
||||
and 3160).
|
||||
This is the driver that supports the DVM firmware. The list
|
||||
of the devices that use this firmware is available here:
|
||||
https://wireless.wiki.kernel.org/en/users/drivers/iwlwifi#firmware
|
||||
|
||||
config IWLMVM
|
||||
tristate "Intel Wireless WiFi MVM Firmware support"
|
||||
select WANT_DEV_COREDUMP
|
||||
help
|
||||
This is the driver that supports the MVM firmware which is
|
||||
currently only available for 7260 and 3160 devices.
|
||||
This is the driver that supports the MVM firmware. The list
|
||||
of the devices that use this firmware is available here:
|
||||
https://wireless.wiki.kernel.org/en/users/drivers/iwlwifi#firmware
|
||||
|
||||
# don't call it _MODULE -- will confuse Kconfig/fixdep/...
|
||||
config IWLWIFI_OPMODE_MODULAR
|
||||
|
@ -128,6 +128,28 @@ static const struct iwl_base_params iwl7000_base_params = {
|
||||
.apmg_wake_up_wa = true,
|
||||
};
|
||||
|
||||
static const struct iwl_tt_params iwl7000_high_temp_tt_params = {
|
||||
.ct_kill_entry = 118,
|
||||
.ct_kill_exit = 96,
|
||||
.ct_kill_duration = 5,
|
||||
.dynamic_smps_entry = 114,
|
||||
.dynamic_smps_exit = 110,
|
||||
.tx_protection_entry = 114,
|
||||
.tx_protection_exit = 108,
|
||||
.tx_backoff = {
|
||||
{.temperature = 112, .backoff = 300},
|
||||
{.temperature = 113, .backoff = 800},
|
||||
{.temperature = 114, .backoff = 1500},
|
||||
{.temperature = 115, .backoff = 3000},
|
||||
{.temperature = 116, .backoff = 5000},
|
||||
{.temperature = 117, .backoff = 10000},
|
||||
},
|
||||
.support_ct_kill = true,
|
||||
.support_dynamic_smps = true,
|
||||
.support_tx_protection = true,
|
||||
.support_tx_backoff = true,
|
||||
};
|
||||
|
||||
static const struct iwl_ht_params iwl7000_ht_params = {
|
||||
.stbc = true,
|
||||
.ht40_bands = BIT(IEEE80211_BAND_2GHZ) | BIT(IEEE80211_BAND_5GHZ),
|
||||
@ -170,6 +192,7 @@ const struct iwl_cfg iwl7260_2ac_cfg_high_temp = {
|
||||
.host_interrupt_operation_mode = true,
|
||||
.lp_xtal_workaround = true,
|
||||
.dccm_len = IWL7260_DCCM_LEN,
|
||||
.thermal_params = &iwl7000_high_temp_tt_params,
|
||||
};
|
||||
|
||||
const struct iwl_cfg iwl7260_2n_cfg = {
|
||||
|
@ -194,6 +194,49 @@ struct iwl_ht_params {
|
||||
u8 ht40_bands;
|
||||
};
|
||||
|
||||
/*
|
||||
* Tx-backoff threshold
|
||||
* @temperature: The threshold in Celsius
|
||||
* @backoff: The tx-backoff in uSec
|
||||
*/
|
||||
struct iwl_tt_tx_backoff {
|
||||
s32 temperature;
|
||||
u32 backoff;
|
||||
};
|
||||
|
||||
#define TT_TX_BACKOFF_SIZE 6
|
||||
|
||||
/**
|
||||
* struct iwl_tt_params - thermal throttling parameters
|
||||
* @ct_kill_entry: CT Kill entry threshold
|
||||
* @ct_kill_exit: CT Kill exit threshold
|
||||
* @ct_kill_duration: The time intervals (in uSec) in which the driver needs
|
||||
* to checks whether to exit CT Kill.
|
||||
* @dynamic_smps_entry: Dynamic SMPS entry threshold
|
||||
* @dynamic_smps_exit: Dynamic SMPS exit threshold
|
||||
* @tx_protection_entry: TX protection entry threshold
|
||||
* @tx_protection_exit: TX protection exit threshold
|
||||
* @tx_backoff: Array of thresholds for tx-backoff , in ascending order.
|
||||
* @support_ct_kill: Support CT Kill?
|
||||
* @support_dynamic_smps: Support dynamic SMPS?
|
||||
* @support_tx_protection: Support tx protection?
|
||||
* @support_tx_backoff: Support tx-backoff?
|
||||
*/
|
||||
struct iwl_tt_params {
|
||||
s32 ct_kill_entry;
|
||||
s32 ct_kill_exit;
|
||||
u32 ct_kill_duration;
|
||||
s32 dynamic_smps_entry;
|
||||
s32 dynamic_smps_exit;
|
||||
s32 tx_protection_entry;
|
||||
s32 tx_protection_exit;
|
||||
struct iwl_tt_tx_backoff tx_backoff[TT_TX_BACKOFF_SIZE];
|
||||
bool support_ct_kill;
|
||||
bool support_dynamic_smps;
|
||||
bool support_tx_protection;
|
||||
bool support_tx_backoff;
|
||||
};
|
||||
|
||||
/*
|
||||
* information on how to parse the EEPROM
|
||||
*/
|
||||
@ -316,6 +359,7 @@ struct iwl_cfg {
|
||||
const u32 dccm2_len;
|
||||
const u32 smem_offset;
|
||||
const u32 smem_len;
|
||||
const struct iwl_tt_params *thermal_params;
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -6,7 +6,7 @@
|
||||
* GPL LICENSE SUMMARY
|
||||
*
|
||||
* Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
|
||||
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
|
||||
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of version 2 of the GNU General Public License as
|
||||
@ -32,7 +32,7 @@
|
||||
* BSD LICENSE
|
||||
*
|
||||
* Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
|
||||
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
|
||||
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
@ -436,6 +436,7 @@ enum iwl_fw_dbg_monitor_mode {
|
||||
*
|
||||
* @version: version of the TLV - currently 0
|
||||
* @monitor_mode: %enum iwl_fw_dbg_monitor_mode
|
||||
* @size_power: buffer size will be 2^(size_power + 11)
|
||||
* @base_reg: addr of the base addr register (PRPH)
|
||||
* @end_reg: addr of the end addr register (PRPH)
|
||||
* @write_ptr_reg: the addr of the reg of the write pointer
|
||||
@ -449,7 +450,8 @@ enum iwl_fw_dbg_monitor_mode {
|
||||
struct iwl_fw_dbg_dest_tlv {
|
||||
u8 version;
|
||||
u8 monitor_mode;
|
||||
u8 reserved[2];
|
||||
u8 size_power;
|
||||
u8 reserved;
|
||||
__le32 base_reg;
|
||||
__le32 end_reg;
|
||||
__le32 write_ptr_reg;
|
||||
|
@ -348,6 +348,9 @@ enum secure_load_status_reg {
|
||||
#define MON_BUFF_WRPTR (0xa03c44)
|
||||
#define MON_BUFF_CYCLE_CNT (0xa03c48)
|
||||
|
||||
#define MON_DMARB_RD_CTL_ADDR (0xa03c60)
|
||||
#define MON_DMARB_RD_DATA_ADDR (0xa03c5c)
|
||||
|
||||
#define DBGC_IN_SAMPLE (0xa03c00)
|
||||
|
||||
/* enable the ID buf for read */
|
||||
|
@ -6,7 +6,7 @@
|
||||
* GPL LICENSE SUMMARY
|
||||
*
|
||||
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
|
||||
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
|
||||
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of version 2 of the GNU General Public License as
|
||||
@ -32,7 +32,7 @@
|
||||
* BSD LICENSE
|
||||
*
|
||||
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
|
||||
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
|
||||
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
@ -981,7 +981,8 @@ iwl_mvm_netdetect_config(struct iwl_mvm *mvm,
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = iwl_mvm_scan_offload_start(mvm, vif, nd_config, &mvm->nd_ies);
|
||||
ret = iwl_mvm_sched_scan_start(mvm, vif, nd_config, &mvm->nd_ies,
|
||||
IWL_MVM_SCAN_NETDETECT);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
@ -6,7 +6,7 @@
|
||||
* GPL LICENSE SUMMARY
|
||||
*
|
||||
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
|
||||
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
|
||||
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of version 2 of the GNU General Public License as
|
||||
@ -32,7 +32,7 @@
|
||||
* BSD LICENSE
|
||||
*
|
||||
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
|
||||
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
|
||||
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
@ -274,50 +274,18 @@ struct iwl_scan_offload_profile_cfg {
|
||||
} __packed;
|
||||
|
||||
/**
|
||||
* iwl_scan_offload_schedule - schedule of scan offload
|
||||
* iwl_scan_schedule_lmac - schedule of scan offload
|
||||
* @delay: delay between iterations, in seconds.
|
||||
* @iterations: num of scan iterations
|
||||
* @full_scan_mul: number of partial scans before each full scan
|
||||
*/
|
||||
struct iwl_scan_offload_schedule {
|
||||
struct iwl_scan_schedule_lmac {
|
||||
__le16 delay;
|
||||
u8 iterations;
|
||||
u8 full_scan_mul;
|
||||
} __packed;
|
||||
} __packed; /* SCAN_SCHEDULE_API_S */
|
||||
|
||||
/*
|
||||
* iwl_scan_offload_flags
|
||||
*
|
||||
* IWL_SCAN_OFFLOAD_FLAG_PASS_ALL: pass all results - no filtering.
|
||||
* IWL_SCAN_OFFLOAD_FLAG_CACHED_CHANNEL: add cached channels to partial scan.
|
||||
* IWL_SCAN_OFFLOAD_FLAG_EBS_QUICK_MODE: EBS duration is 100mSec - typical
|
||||
* beacon period. Finding channel activity in this mode is not guaranteed.
|
||||
* IWL_SCAN_OFFLOAD_FLAG_EBS_ACCURATE_MODE: EBS duration is 200mSec.
|
||||
* Assuming beacon period is 100ms finding channel activity is guaranteed.
|
||||
*/
|
||||
enum iwl_scan_offload_flags {
|
||||
IWL_SCAN_OFFLOAD_FLAG_PASS_ALL = BIT(0),
|
||||
IWL_SCAN_OFFLOAD_FLAG_CACHED_CHANNEL = BIT(2),
|
||||
IWL_SCAN_OFFLOAD_FLAG_EBS_QUICK_MODE = BIT(5),
|
||||
IWL_SCAN_OFFLOAD_FLAG_EBS_ACCURATE_MODE = BIT(6),
|
||||
};
|
||||
|
||||
/**
|
||||
* iwl_scan_offload_req - scan offload request command
|
||||
* @flags: bitmap - enum iwl_scan_offload_flags.
|
||||
* @watchdog: maximum scan duration in TU.
|
||||
* @delay: delay in seconds before first iteration.
|
||||
* @schedule_line: scan offload schedule, for fast and regular scan.
|
||||
*/
|
||||
struct iwl_scan_offload_req {
|
||||
__le16 flags;
|
||||
__le16 watchdog;
|
||||
__le16 delay;
|
||||
__le16 reserved;
|
||||
struct iwl_scan_offload_schedule schedule_line[2];
|
||||
} __packed;
|
||||
|
||||
enum iwl_scan_offload_compleate_status {
|
||||
enum iwl_scan_offload_complete_status {
|
||||
IWL_SCAN_OFFLOAD_COMPLETED = 1,
|
||||
IWL_SCAN_OFFLOAD_ABORTED = 2,
|
||||
};
|
||||
@ -464,7 +432,7 @@ enum iwl_scan_priority {
|
||||
};
|
||||
|
||||
/**
|
||||
* iwl_scan_req_unified_lmac - SCAN_REQUEST_CMD_API_S_VER_1
|
||||
* iwl_scan_req_lmac - SCAN_REQUEST_CMD_API_S_VER_1
|
||||
* @reserved1: for alignment and future use
|
||||
* @channel_num: num of channels to scan
|
||||
* @active-dwell: dwell time for active channels
|
||||
@ -487,7 +455,7 @@ enum iwl_scan_priority {
|
||||
* @channel_opt: channel optimization options, for full and partial scan
|
||||
* @data: channel configuration and probe request packet.
|
||||
*/
|
||||
struct iwl_scan_req_unified_lmac {
|
||||
struct iwl_scan_req_lmac {
|
||||
/* SCAN_REQUEST_FIXED_PART_API_S_VER_7 */
|
||||
__le32 reserved1;
|
||||
u8 n_channels;
|
||||
@ -508,7 +476,7 @@ struct iwl_scan_req_unified_lmac {
|
||||
/* SCAN_REQ_PERIODIC_PARAMS_API_S */
|
||||
__le32 iter_num;
|
||||
__le32 delay;
|
||||
struct iwl_scan_offload_schedule schedule[2];
|
||||
struct iwl_scan_schedule_lmac schedule[2];
|
||||
struct iwl_scan_channel_opt channel_opt[2];
|
||||
u8 data[];
|
||||
} __packed;
|
||||
@ -582,7 +550,11 @@ struct iwl_mvm_umac_cmd_hdr {
|
||||
u8 ver;
|
||||
} __packed;
|
||||
|
||||
#define IWL_MVM_MAX_SIMULTANEOUS_SCANS 8
|
||||
/* The maximum of either of these cannot exceed 8, because we use an
|
||||
* 8-bit mask (see IWL_MVM_SCAN_MASK in mvm.h).
|
||||
*/
|
||||
#define IWL_MVM_MAX_UMAC_SCANS 8
|
||||
#define IWL_MVM_MAX_LMAC_SCANS 1
|
||||
|
||||
enum scan_config_flags {
|
||||
SCAN_CONFIG_FLAG_ACTIVATE = BIT(0),
|
||||
|
@ -147,13 +147,6 @@ enum {
|
||||
|
||||
LQ_CMD = 0x4e,
|
||||
|
||||
/* Calibration */
|
||||
TEMPERATURE_NOTIFICATION = 0x62,
|
||||
CALIBRATION_CFG_CMD = 0x65,
|
||||
CALIBRATION_RES_NOTIFICATION = 0x66,
|
||||
CALIBRATION_COMPLETE_NOTIFICATION = 0x67,
|
||||
RADIO_VERSION_NOTIFICATION = 0x68,
|
||||
|
||||
/* Scan offload */
|
||||
SCAN_OFFLOAD_REQUEST_CMD = 0x51,
|
||||
SCAN_OFFLOAD_ABORT_CMD = 0x52,
|
||||
|
@ -832,21 +832,6 @@ int iwl_mvm_rx_card_state_notif(struct iwl_mvm *mvm,
|
||||
return 0;
|
||||
}
|
||||
|
||||
int iwl_mvm_rx_radio_ver(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
|
||||
struct iwl_device_cmd *cmd)
|
||||
{
|
||||
struct iwl_rx_packet *pkt = rxb_addr(rxb);
|
||||
struct iwl_radio_version_notif *radio_version = (void *)pkt->data;
|
||||
|
||||
/* TODO: what to do with that? */
|
||||
IWL_DEBUG_INFO(mvm,
|
||||
"Radio version: flavor: 0x%08x, step 0x%08x, dash 0x%08x\n",
|
||||
le32_to_cpu(radio_version->radio_flavor),
|
||||
le32_to_cpu(radio_version->radio_step),
|
||||
le32_to_cpu(radio_version->radio_dash));
|
||||
return 0;
|
||||
}
|
||||
|
||||
int iwl_mvm_rx_mfuart_notif(struct iwl_mvm *mvm,
|
||||
struct iwl_rx_cmd_buffer *rxb,
|
||||
struct iwl_device_cmd *cmd)
|
||||
|
@ -80,7 +80,6 @@
|
||||
#include "sta.h"
|
||||
#include "time-event.h"
|
||||
#include "iwl-eeprom-parse.h"
|
||||
#include "fw-api-scan.h"
|
||||
#include "iwl-phy-db.h"
|
||||
#include "testmode.h"
|
||||
#include "iwl-fw-error-dump.h"
|
||||
@ -506,10 +505,18 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
|
||||
|
||||
iwl_mvm_reset_phy_ctxts(mvm);
|
||||
|
||||
hw->wiphy->max_scan_ie_len = iwl_mvm_max_scan_ie_len(mvm, false);
|
||||
hw->wiphy->max_scan_ie_len = iwl_mvm_max_scan_ie_len(mvm);
|
||||
|
||||
hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX;
|
||||
|
||||
BUILD_BUG_ON(IWL_MVM_MAX_UMAC_SCANS > HWEIGHT32(IWL_MVM_SCAN_MASK) ||
|
||||
IWL_MVM_MAX_LMAC_SCANS > HWEIGHT32(IWL_MVM_SCAN_MASK));
|
||||
|
||||
if (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN)
|
||||
mvm->max_scans = IWL_MVM_MAX_UMAC_SCANS;
|
||||
else
|
||||
mvm->max_scans = IWL_MVM_MAX_LMAC_SCANS;
|
||||
|
||||
if (mvm->nvm_data->bands[IEEE80211_BAND_2GHZ].n_channels)
|
||||
hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
|
||||
&mvm->nvm_data->bands[IEEE80211_BAND_2GHZ];
|
||||
@ -532,14 +539,12 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
|
||||
else
|
||||
hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
|
||||
|
||||
if (IWL_UCODE_API(mvm->fw->ucode_ver) >= 10) {
|
||||
hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_SCHED_SCAN;
|
||||
hw->wiphy->max_sched_scan_ssids = PROBE_OPTION_MAX;
|
||||
hw->wiphy->max_match_sets = IWL_SCAN_MAX_PROFILES;
|
||||
/* we create the 802.11 header and zero length SSID IE. */
|
||||
hw->wiphy->max_sched_scan_ie_len =
|
||||
SCAN_OFFLOAD_PROBE_REQ_SIZE - 24 - 2;
|
||||
}
|
||||
hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_SCHED_SCAN;
|
||||
hw->wiphy->max_sched_scan_ssids = PROBE_OPTION_MAX;
|
||||
hw->wiphy->max_match_sets = IWL_SCAN_MAX_PROFILES;
|
||||
/* we create the 802.11 header and zero length SSID IE. */
|
||||
hw->wiphy->max_sched_scan_ie_len =
|
||||
SCAN_OFFLOAD_PROBE_REQ_SIZE - 24 - 2;
|
||||
|
||||
hw->wiphy->features |= NL80211_FEATURE_P2P_GO_CTWIN |
|
||||
NL80211_FEATURE_LOW_PRIORITY_SCAN |
|
||||
@ -1227,22 +1232,23 @@ static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm)
|
||||
|
||||
iwl_trans_stop_device(mvm->trans);
|
||||
|
||||
mvm->scan_status = IWL_MVM_SCAN_NONE;
|
||||
mvm->scan_status = 0;
|
||||
mvm->ps_disabled = false;
|
||||
mvm->calibrating = false;
|
||||
|
||||
/* just in case one was running */
|
||||
ieee80211_remain_on_channel_expired(mvm->hw);
|
||||
|
||||
ieee80211_iterate_active_interfaces_atomic(
|
||||
mvm->hw, IEEE80211_IFACE_ITER_RESUME_ALL,
|
||||
iwl_mvm_cleanup_iterator, mvm);
|
||||
/*
|
||||
* cleanup all interfaces, even inactive ones, as some might have
|
||||
* gone down during the HW restart
|
||||
*/
|
||||
ieee80211_iterate_interfaces(mvm->hw, 0, iwl_mvm_cleanup_iterator, mvm);
|
||||
|
||||
mvm->p2p_device_vif = NULL;
|
||||
mvm->d0i3_ap_sta_id = IWL_MVM_STATION_COUNT;
|
||||
|
||||
iwl_mvm_reset_phy_ctxts(mvm);
|
||||
memset(mvm->fw_key_table, 0, sizeof(mvm->fw_key_table));
|
||||
memset(mvm->sta_drained, 0, sizeof(mvm->sta_drained));
|
||||
memset(mvm->tfd_drained, 0, sizeof(mvm->tfd_drained));
|
||||
memset(&mvm->last_bt_notif, 0, sizeof(mvm->last_bt_notif));
|
||||
@ -1426,7 +1432,7 @@ void __iwl_mvm_mac_stop(struct iwl_mvm *mvm)
|
||||
if (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN) {
|
||||
int i;
|
||||
|
||||
for (i = 0; i < IWL_MVM_MAX_SIMULTANEOUS_SCANS; i++) {
|
||||
for (i = 0; i < mvm->max_scans; i++) {
|
||||
if (WARN_ONCE(mvm->scan_uid[i],
|
||||
"UMAC scan UID %d was not cleaned\n",
|
||||
mvm->scan_uid[i]))
|
||||
@ -2373,89 +2379,21 @@ static void iwl_mvm_bss_info_changed(struct ieee80211_hw *hw,
|
||||
iwl_mvm_unref(mvm, IWL_MVM_REF_BSS_CHANGED);
|
||||
}
|
||||
|
||||
static int iwl_mvm_cancel_scan_wait_notif(struct iwl_mvm *mvm,
|
||||
enum iwl_scan_status scan_type)
|
||||
{
|
||||
int ret;
|
||||
bool wait_for_handlers = false;
|
||||
|
||||
mutex_lock(&mvm->mutex);
|
||||
|
||||
if (mvm->scan_status != scan_type) {
|
||||
ret = 0;
|
||||
/* make sure there are no pending notifications */
|
||||
wait_for_handlers = true;
|
||||
goto out;
|
||||
}
|
||||
|
||||
switch (scan_type) {
|
||||
case IWL_MVM_SCAN_SCHED:
|
||||
ret = iwl_mvm_scan_offload_stop(mvm, true);
|
||||
break;
|
||||
case IWL_MVM_SCAN_OS:
|
||||
ret = iwl_mvm_cancel_scan(mvm);
|
||||
break;
|
||||
case IWL_MVM_SCAN_NONE:
|
||||
default:
|
||||
WARN_ON_ONCE(1);
|
||||
ret = -EINVAL;
|
||||
break;
|
||||
}
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
wait_for_handlers = true;
|
||||
out:
|
||||
mutex_unlock(&mvm->mutex);
|
||||
|
||||
/* make sure we consume the completion notification */
|
||||
if (wait_for_handlers)
|
||||
iwl_mvm_wait_for_async_handlers(mvm);
|
||||
|
||||
return ret;
|
||||
}
|
||||
static int iwl_mvm_mac_hw_scan(struct ieee80211_hw *hw,
|
||||
struct ieee80211_vif *vif,
|
||||
struct ieee80211_scan_request *hw_req)
|
||||
{
|
||||
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
|
||||
struct cfg80211_scan_request *req = &hw_req->req;
|
||||
int ret;
|
||||
|
||||
if (req->n_channels == 0 ||
|
||||
req->n_channels > mvm->fw->ucode_capa.n_scan_channels)
|
||||
if (hw_req->req.n_channels == 0 ||
|
||||
hw_req->req.n_channels > mvm->fw->ucode_capa.n_scan_channels)
|
||||
return -EINVAL;
|
||||
|
||||
if (!(mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN)) {
|
||||
ret = iwl_mvm_cancel_scan_wait_notif(mvm, IWL_MVM_SCAN_SCHED);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
mutex_lock(&mvm->mutex);
|
||||
|
||||
if (iwl_mvm_is_lar_supported(mvm) && !mvm->lar_regdom_set) {
|
||||
IWL_ERR(mvm, "scan while LAR regdomain is not set\n");
|
||||
ret = -EBUSY;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (mvm->scan_status != IWL_MVM_SCAN_NONE) {
|
||||
ret = -EBUSY;
|
||||
goto out;
|
||||
}
|
||||
|
||||
iwl_mvm_ref(mvm, IWL_MVM_REF_SCAN);
|
||||
|
||||
if (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN)
|
||||
ret = iwl_mvm_scan_umac(mvm, vif, hw_req);
|
||||
else
|
||||
ret = iwl_mvm_unified_scan_lmac(mvm, vif, hw_req);
|
||||
|
||||
if (ret)
|
||||
iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
|
||||
out:
|
||||
ret = iwl_mvm_reg_scan_start(mvm, vif, &hw_req->req, &hw_req->ies);
|
||||
mutex_unlock(&mvm->mutex);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -2476,7 +2414,7 @@ static void iwl_mvm_mac_cancel_hw_scan(struct ieee80211_hw *hw,
|
||||
/* FIXME: for now, we ignore this race for UMAC scans, since
|
||||
* they don't set the scan_status.
|
||||
*/
|
||||
if ((mvm->scan_status == IWL_MVM_SCAN_OS) ||
|
||||
if ((mvm->scan_status & IWL_MVM_SCAN_REGULAR) ||
|
||||
(mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN))
|
||||
iwl_mvm_cancel_scan(mvm);
|
||||
|
||||
@ -2794,35 +2732,17 @@ static int iwl_mvm_mac_sched_scan_start(struct ieee80211_hw *hw,
|
||||
struct ieee80211_scan_ies *ies)
|
||||
{
|
||||
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
|
||||
|
||||
int ret;
|
||||
|
||||
if (!(mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN)) {
|
||||
ret = iwl_mvm_cancel_scan_wait_notif(mvm, IWL_MVM_SCAN_OS);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
mutex_lock(&mvm->mutex);
|
||||
|
||||
if (iwl_mvm_is_lar_supported(mvm) && !mvm->lar_regdom_set) {
|
||||
IWL_ERR(mvm, "sched-scan while LAR regdomain is not set\n");
|
||||
ret = -EBUSY;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (!vif->bss_conf.idle) {
|
||||
ret = -EBUSY;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (mvm->scan_status != IWL_MVM_SCAN_NONE) {
|
||||
ret = -EBUSY;
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = iwl_mvm_scan_offload_start(mvm, vif, req, ies);
|
||||
if (ret)
|
||||
mvm->scan_status = IWL_MVM_SCAN_NONE;
|
||||
ret = iwl_mvm_sched_scan_start(mvm, vif, req, ies, IWL_MVM_SCAN_SCHED);
|
||||
|
||||
out:
|
||||
mutex_unlock(&mvm->mutex);
|
||||
@ -2848,7 +2768,7 @@ static int iwl_mvm_mac_sched_scan_stop(struct ieee80211_hw *hw,
|
||||
/* FIXME: for now, we ignore this race for UMAC scans, since
|
||||
* they don't set the scan_status.
|
||||
*/
|
||||
if (mvm->scan_status != IWL_MVM_SCAN_SCHED &&
|
||||
if (!(mvm->scan_status & IWL_MVM_SCAN_SCHED) &&
|
||||
!(mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN)) {
|
||||
mutex_unlock(&mvm->mutex);
|
||||
return 0;
|
||||
@ -2922,8 +2842,21 @@ static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
|
||||
break;
|
||||
}
|
||||
|
||||
/* During FW restart, in order to restore the state as it was,
|
||||
* don't try to reprogram keys we previously failed for.
|
||||
*/
|
||||
if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) &&
|
||||
key->hw_key_idx == STA_KEY_IDX_INVALID) {
|
||||
IWL_DEBUG_MAC80211(mvm,
|
||||
"skip invalid idx key programming during restart\n");
|
||||
ret = 0;
|
||||
break;
|
||||
}
|
||||
|
||||
IWL_DEBUG_MAC80211(mvm, "set hwcrypto key\n");
|
||||
ret = iwl_mvm_set_sta_key(mvm, vif, sta, key, false);
|
||||
ret = iwl_mvm_set_sta_key(mvm, vif, sta, key,
|
||||
test_bit(IWL_MVM_STATUS_IN_HW_RESTART,
|
||||
&mvm->status));
|
||||
if (ret) {
|
||||
IWL_WARN(mvm, "set key failed\n");
|
||||
/*
|
||||
@ -3001,7 +2934,7 @@ static bool iwl_mvm_rx_aux_roc(struct iwl_notif_wait_data *notif_wait,
|
||||
return true;
|
||||
}
|
||||
|
||||
#define AUX_ROC_MAX_DELAY_ON_CHANNEL 5000
|
||||
#define AUX_ROC_MAX_DELAY_ON_CHANNEL 200
|
||||
static int iwl_mvm_send_aux_roc_cmd(struct iwl_mvm *mvm,
|
||||
struct ieee80211_channel *channel,
|
||||
struct ieee80211_vif *vif,
|
||||
|
@ -6,7 +6,7 @@
|
||||
* GPL LICENSE SUMMARY
|
||||
*
|
||||
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
|
||||
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
|
||||
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of version 2 of the GNU General Public License as
|
||||
@ -32,7 +32,7 @@
|
||||
* BSD LICENSE
|
||||
*
|
||||
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
|
||||
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
|
||||
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
@ -76,6 +76,7 @@
|
||||
#include "iwl-notif-wait.h"
|
||||
#include "iwl-eeprom-parse.h"
|
||||
#include "iwl-fw-file.h"
|
||||
#include "iwl-config.h"
|
||||
#include "sta.h"
|
||||
#include "fw-api.h"
|
||||
#include "constants.h"
|
||||
@ -446,9 +447,23 @@ iwl_mvm_vif_from_mac80211(struct ieee80211_vif *vif)
|
||||
extern const u8 tid_to_mac80211_ac[];
|
||||
|
||||
enum iwl_scan_status {
|
||||
IWL_MVM_SCAN_NONE,
|
||||
IWL_MVM_SCAN_OS,
|
||||
IWL_MVM_SCAN_SCHED,
|
||||
IWL_MVM_SCAN_REGULAR = BIT(0),
|
||||
IWL_MVM_SCAN_SCHED = BIT(1),
|
||||
IWL_MVM_SCAN_NETDETECT = BIT(2),
|
||||
|
||||
IWL_MVM_SCAN_STOPPING_REGULAR = BIT(8),
|
||||
IWL_MVM_SCAN_STOPPING_SCHED = BIT(9),
|
||||
IWL_MVM_SCAN_STOPPING_NETDETECT = BIT(10),
|
||||
|
||||
IWL_MVM_SCAN_REGULAR_MASK = IWL_MVM_SCAN_REGULAR |
|
||||
IWL_MVM_SCAN_STOPPING_REGULAR,
|
||||
IWL_MVM_SCAN_SCHED_MASK = IWL_MVM_SCAN_SCHED |
|
||||
IWL_MVM_SCAN_STOPPING_SCHED,
|
||||
IWL_MVM_SCAN_NETDETECT_MASK = IWL_MVM_SCAN_NETDETECT |
|
||||
IWL_MVM_SCAN_STOPPING_NETDETECT,
|
||||
|
||||
IWL_MVM_SCAN_STOPPING_MASK = 0xff00,
|
||||
IWL_MVM_SCAN_MASK = 0x00ff,
|
||||
};
|
||||
|
||||
/**
|
||||
@ -463,49 +478,6 @@ struct iwl_nvm_section {
|
||||
const u8 *data;
|
||||
};
|
||||
|
||||
/*
|
||||
* Tx-backoff threshold
|
||||
* @temperature: The threshold in Celsius
|
||||
* @backoff: The tx-backoff in uSec
|
||||
*/
|
||||
struct iwl_tt_tx_backoff {
|
||||
s32 temperature;
|
||||
u32 backoff;
|
||||
};
|
||||
|
||||
#define TT_TX_BACKOFF_SIZE 6
|
||||
|
||||
/**
|
||||
* struct iwl_tt_params - thermal throttling parameters
|
||||
* @ct_kill_entry: CT Kill entry threshold
|
||||
* @ct_kill_exit: CT Kill exit threshold
|
||||
* @ct_kill_duration: The time intervals (in uSec) in which the driver needs
|
||||
* to checks whether to exit CT Kill.
|
||||
* @dynamic_smps_entry: Dynamic SMPS entry threshold
|
||||
* @dynamic_smps_exit: Dynamic SMPS exit threshold
|
||||
* @tx_protection_entry: TX protection entry threshold
|
||||
* @tx_protection_exit: TX protection exit threshold
|
||||
* @tx_backoff: Array of thresholds for tx-backoff , in ascending order.
|
||||
* @support_ct_kill: Support CT Kill?
|
||||
* @support_dynamic_smps: Support dynamic SMPS?
|
||||
* @support_tx_protection: Support tx protection?
|
||||
* @support_tx_backoff: Support tx-backoff?
|
||||
*/
|
||||
struct iwl_tt_params {
|
||||
s32 ct_kill_entry;
|
||||
s32 ct_kill_exit;
|
||||
u32 ct_kill_duration;
|
||||
s32 dynamic_smps_entry;
|
||||
s32 dynamic_smps_exit;
|
||||
s32 tx_protection_entry;
|
||||
s32 tx_protection_exit;
|
||||
struct iwl_tt_tx_backoff tx_backoff[TT_TX_BACKOFF_SIZE];
|
||||
bool support_ct_kill;
|
||||
bool support_dynamic_smps;
|
||||
bool support_tx_protection;
|
||||
bool support_tx_backoff;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct iwl_mvm_tt_mgnt - Thermal Throttling Management structure
|
||||
* @ct_kill_exit: worker to exit thermal kill
|
||||
@ -520,7 +492,7 @@ struct iwl_mvm_tt_mgmt {
|
||||
bool dynamic_smps;
|
||||
u32 tx_backoff;
|
||||
u32 min_backoff;
|
||||
const struct iwl_tt_params *params;
|
||||
struct iwl_tt_params params;
|
||||
bool throttle;
|
||||
};
|
||||
|
||||
@ -647,12 +619,15 @@ struct iwl_mvm {
|
||||
u32 rts_threshold;
|
||||
|
||||
/* Scan status, cmd (pre-allocated) and auxiliary station */
|
||||
enum iwl_scan_status scan_status;
|
||||
unsigned int scan_status;
|
||||
void *scan_cmd;
|
||||
struct iwl_mcast_filter_cmd *mcast_filter_cmd;
|
||||
|
||||
/* max number of simultaneous scans the FW supports */
|
||||
unsigned int max_scans;
|
||||
|
||||
/* UMAC scan tracking */
|
||||
u32 scan_uid[IWL_MVM_MAX_SIMULTANEOUS_SCANS];
|
||||
u32 scan_uid[IWL_MVM_MAX_UMAC_SCANS];
|
||||
u8 scan_seq_num, sched_scan_seq_num;
|
||||
|
||||
/* rx chain antennas set through debugfs for the scan command */
|
||||
@ -1083,8 +1058,6 @@ int iwl_mvm_rx_tx_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
|
||||
struct iwl_device_cmd *cmd);
|
||||
int iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
|
||||
struct iwl_device_cmd *cmd);
|
||||
int iwl_mvm_rx_radio_ver(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
|
||||
struct iwl_device_cmd *cmd);
|
||||
int iwl_mvm_rx_ant_coupling_notif(struct iwl_mvm *mvm,
|
||||
struct iwl_rx_cmd_buffer *rxb,
|
||||
struct iwl_device_cmd *cmd);
|
||||
@ -1093,8 +1066,6 @@ int iwl_mvm_rx_fw_error(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
|
||||
int iwl_mvm_rx_card_state_notif(struct iwl_mvm *mvm,
|
||||
struct iwl_rx_cmd_buffer *rxb,
|
||||
struct iwl_device_cmd *cmd);
|
||||
int iwl_mvm_rx_radio_ver(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
|
||||
struct iwl_device_cmd *cmd);
|
||||
int iwl_mvm_rx_mfuart_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
|
||||
struct iwl_device_cmd *cmd);
|
||||
int iwl_mvm_rx_shared_mem_cfg_notif(struct iwl_mvm *mvm,
|
||||
@ -1146,9 +1117,12 @@ int iwl_mvm_update_quotas(struct iwl_mvm *mvm, bool force_upload,
|
||||
struct ieee80211_vif *disabled_vif);
|
||||
|
||||
/* Scanning */
|
||||
int iwl_mvm_reg_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
|
||||
struct cfg80211_scan_request *req,
|
||||
struct ieee80211_scan_ies *ies);
|
||||
int iwl_mvm_scan_size(struct iwl_mvm *mvm);
|
||||
int iwl_mvm_cancel_scan(struct iwl_mvm *mvm);
|
||||
int iwl_mvm_max_scan_ie_len(struct iwl_mvm *mvm, bool is_sched_scan);
|
||||
int iwl_mvm_max_scan_ie_len(struct iwl_mvm *mvm);
|
||||
void iwl_mvm_report_scan_aborted(struct iwl_mvm *mvm);
|
||||
|
||||
/* Scheduled scan */
|
||||
@ -1160,31 +1134,18 @@ int iwl_mvm_rx_scan_offload_iter_complete_notif(struct iwl_mvm *mvm,
|
||||
struct iwl_device_cmd *cmd);
|
||||
int iwl_mvm_config_sched_scan_profiles(struct iwl_mvm *mvm,
|
||||
struct cfg80211_sched_scan_request *req);
|
||||
int iwl_mvm_scan_offload_start(struct iwl_mvm *mvm,
|
||||
struct ieee80211_vif *vif,
|
||||
struct cfg80211_sched_scan_request *req,
|
||||
struct ieee80211_scan_ies *ies);
|
||||
int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm,
|
||||
struct ieee80211_vif *vif,
|
||||
struct cfg80211_sched_scan_request *req,
|
||||
struct ieee80211_scan_ies *ies,
|
||||
int type);
|
||||
int iwl_mvm_scan_offload_stop(struct iwl_mvm *mvm, bool notify);
|
||||
int iwl_mvm_rx_scan_offload_results(struct iwl_mvm *mvm,
|
||||
struct iwl_rx_cmd_buffer *rxb,
|
||||
struct iwl_device_cmd *cmd);
|
||||
|
||||
/* Unified scan */
|
||||
int iwl_mvm_unified_scan_lmac(struct iwl_mvm *mvm,
|
||||
struct ieee80211_vif *vif,
|
||||
struct ieee80211_scan_request *req);
|
||||
int iwl_mvm_unified_sched_scan_lmac(struct iwl_mvm *mvm,
|
||||
struct ieee80211_vif *vif,
|
||||
struct cfg80211_sched_scan_request *req,
|
||||
struct ieee80211_scan_ies *ies);
|
||||
|
||||
/* UMAC scan */
|
||||
int iwl_mvm_config_scan(struct iwl_mvm *mvm);
|
||||
int iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
|
||||
struct ieee80211_scan_request *req);
|
||||
int iwl_mvm_sched_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
|
||||
struct cfg80211_sched_scan_request *req,
|
||||
struct ieee80211_scan_ies *ies);
|
||||
int iwl_mvm_rx_umac_scan_complete_notif(struct iwl_mvm *mvm,
|
||||
struct iwl_rx_cmd_buffer *rxb,
|
||||
struct iwl_device_cmd *cmd);
|
||||
|
@ -246,7 +246,6 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = {
|
||||
RX_HANDLER(SCAN_COMPLETE_UMAC, iwl_mvm_rx_umac_scan_complete_notif,
|
||||
true),
|
||||
|
||||
RX_HANDLER(RADIO_VERSION_NOTIFICATION, iwl_mvm_rx_radio_ver, false),
|
||||
RX_HANDLER(CARD_STATE_NOTIFICATION, iwl_mvm_rx_card_state_notif, false),
|
||||
|
||||
RX_HANDLER(MISSED_BEACONS_NOTIFICATION, iwl_mvm_rx_missed_beacons_notif,
|
||||
@ -280,7 +279,6 @@ static const char *const iwl_mvm_cmd_strings[REPLY_MAX] = {
|
||||
CMD(BINDING_CONTEXT_CMD),
|
||||
CMD(TIME_QUOTA_CMD),
|
||||
CMD(NON_QOS_TX_COUNTER_CMD),
|
||||
CMD(RADIO_VERSION_NOTIFICATION),
|
||||
CMD(SCAN_REQUEST_CMD),
|
||||
CMD(SCAN_ABORT_CMD),
|
||||
CMD(SCAN_START_NOTIFICATION),
|
||||
@ -290,7 +288,6 @@ static const char *const iwl_mvm_cmd_strings[REPLY_MAX] = {
|
||||
CMD(PHY_CONFIGURATION_CMD),
|
||||
CMD(CALIB_RES_NOTIF_PHY_DB),
|
||||
CMD(SET_CALIB_DEFAULT_CMD),
|
||||
CMD(CALIBRATION_COMPLETE_NOTIFICATION),
|
||||
CMD(ADD_STA_KEY),
|
||||
CMD(ADD_STA),
|
||||
CMD(REMOVE_STA),
|
||||
@ -1263,11 +1260,13 @@ static void iwl_mvm_d0i3_exit_work(struct work_struct *wk)
|
||||
ieee80211_iterate_active_interfaces(
|
||||
mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
|
||||
iwl_mvm_d0i3_disconnect_iter, mvm);
|
||||
|
||||
iwl_free_resp(&get_status_cmd);
|
||||
out:
|
||||
iwl_mvm_d0i3_enable_tx(mvm, qos_seq);
|
||||
|
||||
/* qos_seq might point inside resp_pkt, so free it only now */
|
||||
if (get_status_cmd.resp_pkt)
|
||||
iwl_free_resp(&get_status_cmd);
|
||||
|
||||
/* the FW might have updated the regdomain */
|
||||
iwl_mvm_update_changed_regdom(mvm);
|
||||
|
||||
|
@ -1,7 +1,7 @@
|
||||
/******************************************************************************
|
||||
*
|
||||
* Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
|
||||
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
|
||||
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of version 2 of the GNU General Public License as
|
||||
@ -2133,7 +2133,7 @@ static void rs_rate_scale_perform(struct iwl_mvm *mvm,
|
||||
}
|
||||
|
||||
/* current tx rate */
|
||||
index = lq_sta->last_txrate_idx;
|
||||
index = rate->index;
|
||||
|
||||
/* rates available for this association, and for modulation mode */
|
||||
rate_mask = rs_get_supported_rates(lq_sta, rate);
|
||||
@ -2181,14 +2181,7 @@ static void rs_rate_scale_perform(struct iwl_mvm *mvm,
|
||||
* or search for a new one? */
|
||||
rs_stay_in_table(lq_sta, false);
|
||||
|
||||
goto out;
|
||||
}
|
||||
/* Else we have enough samples; calculate estimate of
|
||||
* actual average throughput */
|
||||
if (window->average_tpt != ((window->success_ratio *
|
||||
tbl->expected_tpt[index] + 64) / 128)) {
|
||||
window->average_tpt = ((window->success_ratio *
|
||||
tbl->expected_tpt[index] + 64) / 128);
|
||||
return;
|
||||
}
|
||||
|
||||
/* If we are searching for better modulation mode, check success. */
|
||||
@ -2400,9 +2393,6 @@ lq_update:
|
||||
rs_set_stay_in_table(mvm, 0, lq_sta);
|
||||
}
|
||||
}
|
||||
|
||||
out:
|
||||
lq_sta->last_txrate_idx = index;
|
||||
}
|
||||
|
||||
struct rs_init_rate_info {
|
||||
@ -2545,7 +2535,6 @@ static void rs_initialize_lq(struct iwl_mvm *mvm,
|
||||
rate = &tbl->rate;
|
||||
|
||||
rs_get_initial_rate(mvm, lq_sta, band, rate);
|
||||
lq_sta->last_txrate_idx = rate->index;
|
||||
|
||||
WARN_ON_ONCE(rate->ant != ANT_A && rate->ant != ANT_B);
|
||||
if (rate->ant == ANT_A)
|
||||
@ -3223,9 +3212,6 @@ static void rs_fill_lq_cmd(struct iwl_mvm *mvm,
|
||||
if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LQ_SS_PARAMS)
|
||||
rs_set_lq_ss_params(mvm, sta, lq_sta, initial_rate);
|
||||
|
||||
if (num_of_ant(initial_rate->ant) == 1)
|
||||
lq_cmd->single_stream_ant_msk = initial_rate->ant;
|
||||
|
||||
mvmsta = iwl_mvm_sta_from_mac80211(sta);
|
||||
mvmvif = iwl_mvm_vif_from_mac80211(mvmsta->vif);
|
||||
|
||||
|
@ -322,8 +322,6 @@ struct iwl_lq_sta {
|
||||
struct iwl_scale_tbl_info lq_info[LQ_SIZE]; /* "active", "search" */
|
||||
u8 tx_agg_tid_en;
|
||||
|
||||
/* used to be in sta_info */
|
||||
int last_txrate_idx;
|
||||
/* last tx rate_n_flags */
|
||||
u32 last_rate_n_flags;
|
||||
/* packets destined for this STA are aggregated */
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -70,7 +70,7 @@
|
||||
static void iwl_mvm_enter_ctkill(struct iwl_mvm *mvm)
|
||||
{
|
||||
struct iwl_mvm_tt_mgmt *tt = &mvm->thermal_throttle;
|
||||
u32 duration = mvm->thermal_throttle.params->ct_kill_duration;
|
||||
u32 duration = tt->params.ct_kill_duration;
|
||||
|
||||
if (test_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status))
|
||||
return;
|
||||
@ -223,7 +223,7 @@ static void check_exit_ctkill(struct work_struct *work)
|
||||
tt = container_of(work, struct iwl_mvm_tt_mgmt, ct_kill_exit.work);
|
||||
mvm = container_of(tt, struct iwl_mvm, thermal_throttle);
|
||||
|
||||
duration = tt->params->ct_kill_duration;
|
||||
duration = tt->params.ct_kill_duration;
|
||||
|
||||
mutex_lock(&mvm->mutex);
|
||||
|
||||
@ -247,7 +247,7 @@ static void check_exit_ctkill(struct work_struct *work)
|
||||
|
||||
IWL_DEBUG_TEMP(mvm, "NIC temperature: %d\n", temp);
|
||||
|
||||
if (temp <= tt->params->ct_kill_exit) {
|
||||
if (temp <= tt->params.ct_kill_exit) {
|
||||
mutex_unlock(&mvm->mutex);
|
||||
iwl_mvm_exit_ctkill(mvm);
|
||||
return;
|
||||
@ -325,7 +325,7 @@ void iwl_mvm_tt_tx_backoff(struct iwl_mvm *mvm, u32 backoff)
|
||||
|
||||
void iwl_mvm_tt_handler(struct iwl_mvm *mvm)
|
||||
{
|
||||
const struct iwl_tt_params *params = mvm->thermal_throttle.params;
|
||||
struct iwl_tt_params *params = &mvm->thermal_throttle.params;
|
||||
struct iwl_mvm_tt_mgmt *tt = &mvm->thermal_throttle;
|
||||
s32 temperature = mvm->temperature;
|
||||
bool throttle_enable = false;
|
||||
@ -340,7 +340,7 @@ void iwl_mvm_tt_handler(struct iwl_mvm *mvm)
|
||||
}
|
||||
|
||||
if (params->support_ct_kill &&
|
||||
temperature <= tt->params->ct_kill_exit) {
|
||||
temperature <= params->ct_kill_exit) {
|
||||
iwl_mvm_exit_ctkill(mvm);
|
||||
return;
|
||||
}
|
||||
@ -400,7 +400,7 @@ void iwl_mvm_tt_handler(struct iwl_mvm *mvm)
|
||||
}
|
||||
}
|
||||
|
||||
static const struct iwl_tt_params iwl7000_tt_params = {
|
||||
static const struct iwl_tt_params iwl_mvm_default_tt_params = {
|
||||
.ct_kill_entry = 118,
|
||||
.ct_kill_exit = 96,
|
||||
.ct_kill_duration = 5,
|
||||
@ -422,38 +422,16 @@ static const struct iwl_tt_params iwl7000_tt_params = {
|
||||
.support_tx_backoff = true,
|
||||
};
|
||||
|
||||
static const struct iwl_tt_params iwl7000_high_temp_tt_params = {
|
||||
.ct_kill_entry = 118,
|
||||
.ct_kill_exit = 96,
|
||||
.ct_kill_duration = 5,
|
||||
.dynamic_smps_entry = 114,
|
||||
.dynamic_smps_exit = 110,
|
||||
.tx_protection_entry = 114,
|
||||
.tx_protection_exit = 108,
|
||||
.tx_backoff = {
|
||||
{.temperature = 112, .backoff = 300},
|
||||
{.temperature = 113, .backoff = 800},
|
||||
{.temperature = 114, .backoff = 1500},
|
||||
{.temperature = 115, .backoff = 3000},
|
||||
{.temperature = 116, .backoff = 5000},
|
||||
{.temperature = 117, .backoff = 10000},
|
||||
},
|
||||
.support_ct_kill = true,
|
||||
.support_dynamic_smps = true,
|
||||
.support_tx_protection = true,
|
||||
.support_tx_backoff = true,
|
||||
};
|
||||
|
||||
void iwl_mvm_tt_initialize(struct iwl_mvm *mvm, u32 min_backoff)
|
||||
{
|
||||
struct iwl_mvm_tt_mgmt *tt = &mvm->thermal_throttle;
|
||||
|
||||
IWL_DEBUG_TEMP(mvm, "Initialize Thermal Throttling\n");
|
||||
|
||||
if (mvm->cfg->high_temp)
|
||||
tt->params = &iwl7000_high_temp_tt_params;
|
||||
if (mvm->cfg->thermal_params)
|
||||
tt->params = *mvm->cfg->thermal_params;
|
||||
else
|
||||
tt->params = &iwl7000_tt_params;
|
||||
tt->params = iwl_mvm_default_tt_params;
|
||||
|
||||
tt->throttle = false;
|
||||
tt->dynamic_smps = false;
|
||||
|
@ -101,14 +101,26 @@ static void iwl_pcie_free_fw_monitor(struct iwl_trans *trans)
|
||||
trans_pcie->fw_mon_size = 0;
|
||||
}
|
||||
|
||||
static void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans)
|
||||
static void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans, u8 max_power)
|
||||
{
|
||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
struct page *page = NULL;
|
||||
dma_addr_t phys;
|
||||
u32 size;
|
||||
u32 size = 0;
|
||||
u8 power;
|
||||
|
||||
if (!max_power) {
|
||||
/* default max_power is maximum */
|
||||
max_power = 26;
|
||||
} else {
|
||||
max_power += 11;
|
||||
}
|
||||
|
||||
if (WARN(max_power > 26,
|
||||
"External buffer size for monitor is too big %d, check the FW TLV\n",
|
||||
max_power))
|
||||
return;
|
||||
|
||||
if (trans_pcie->fw_mon_page) {
|
||||
dma_sync_single_for_device(trans->dev, trans_pcie->fw_mon_phys,
|
||||
trans_pcie->fw_mon_size,
|
||||
@ -117,7 +129,7 @@ static void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans)
|
||||
}
|
||||
|
||||
phys = 0;
|
||||
for (power = 26; power >= 11; power--) {
|
||||
for (power = max_power; power >= 11; power--) {
|
||||
int order;
|
||||
|
||||
size = BIT(power);
|
||||
@ -143,6 +155,12 @@ static void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans)
|
||||
if (WARN_ON_ONCE(!page))
|
||||
return;
|
||||
|
||||
if (power != max_power)
|
||||
IWL_ERR(trans,
|
||||
"Sorry - debug buffer is only %luK while you requested %luK\n",
|
||||
(unsigned long)BIT(power - 10),
|
||||
(unsigned long)BIT(max_power - 10));
|
||||
|
||||
trans_pcie->fw_mon_page = page;
|
||||
trans_pcie->fw_mon_phys = phys;
|
||||
trans_pcie->fw_mon_size = size;
|
||||
@ -834,7 +852,7 @@ static void iwl_pcie_apply_destination(struct iwl_trans *trans)
|
||||
get_fw_dbg_mode_string(dest->monitor_mode));
|
||||
|
||||
if (dest->monitor_mode == EXTERNAL_MODE)
|
||||
iwl_pcie_alloc_fw_monitor(trans);
|
||||
iwl_pcie_alloc_fw_monitor(trans, dest->size_power);
|
||||
else
|
||||
IWL_WARN(trans, "PCI should have external buffer debug\n");
|
||||
|
||||
@ -908,7 +926,7 @@ static int iwl_pcie_load_given_ucode(struct iwl_trans *trans,
|
||||
/* supported for 7000 only for the moment */
|
||||
if (iwlwifi_mod_params.fw_monitor &&
|
||||
trans->cfg->device_family == IWL_DEVICE_FAMILY_7000) {
|
||||
iwl_pcie_alloc_fw_monitor(trans);
|
||||
iwl_pcie_alloc_fw_monitor(trans, 0);
|
||||
|
||||
if (trans_pcie->fw_mon_size) {
|
||||
iwl_write_prph(trans, MON_BUFF_BASE_ADDR,
|
||||
@ -2198,6 +2216,29 @@ static u32 iwl_trans_pcie_fh_regs_dump(struct iwl_trans *trans,
|
||||
return sizeof(**data) + fh_regs_len;
|
||||
}
|
||||
|
||||
static u32
|
||||
iwl_trans_pci_dump_marbh_monitor(struct iwl_trans *trans,
|
||||
struct iwl_fw_error_dump_fw_mon *fw_mon_data,
|
||||
u32 monitor_len)
|
||||
{
|
||||
u32 buf_size_in_dwords = (monitor_len >> 2);
|
||||
u32 *buffer = (u32 *)fw_mon_data->data;
|
||||
unsigned long flags;
|
||||
u32 i;
|
||||
|
||||
if (!iwl_trans_grab_nic_access(trans, false, &flags))
|
||||
return 0;
|
||||
|
||||
__iwl_write_prph(trans, MON_DMARB_RD_CTL_ADDR, 0x1);
|
||||
for (i = 0; i < buf_size_in_dwords; i++)
|
||||
buffer[i] = __iwl_read_prph(trans, MON_DMARB_RD_DATA_ADDR);
|
||||
__iwl_write_prph(trans, MON_DMARB_RD_CTL_ADDR, 0x0);
|
||||
|
||||
iwl_trans_release_nic_access(trans, &flags);
|
||||
|
||||
return monitor_len;
|
||||
}
|
||||
|
||||
static
|
||||
struct iwl_trans_dump_data *iwl_trans_pcie_dump_data(struct iwl_trans *trans)
|
||||
{
|
||||
@ -2250,7 +2291,8 @@ struct iwl_trans_dump_data *iwl_trans_pcie_dump_data(struct iwl_trans *trans)
|
||||
trans->dbg_dest_tlv->end_shift;
|
||||
|
||||
/* Make "end" point to the actual end */
|
||||
if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
|
||||
if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000 ||
|
||||
trans->dbg_dest_tlv->monitor_mode == MARBH_MODE)
|
||||
end += (1 << trans->dbg_dest_tlv->end_shift);
|
||||
monitor_len = end - base;
|
||||
len += sizeof(*data) + sizeof(struct iwl_fw_error_dump_fw_mon) +
|
||||
@ -2326,9 +2368,6 @@ struct iwl_trans_dump_data *iwl_trans_pcie_dump_data(struct iwl_trans *trans)
|
||||
|
||||
len += sizeof(*data) + sizeof(*fw_mon_data);
|
||||
if (trans_pcie->fw_mon_page) {
|
||||
data->len = cpu_to_le32(trans_pcie->fw_mon_size +
|
||||
sizeof(*fw_mon_data));
|
||||
|
||||
/*
|
||||
* The firmware is now asserted, it won't write anything
|
||||
* to the buffer. CPU can take ownership to fetch the
|
||||
@ -2343,10 +2382,8 @@ struct iwl_trans_dump_data *iwl_trans_pcie_dump_data(struct iwl_trans *trans)
|
||||
page_address(trans_pcie->fw_mon_page),
|
||||
trans_pcie->fw_mon_size);
|
||||
|
||||
len += trans_pcie->fw_mon_size;
|
||||
} else {
|
||||
/* If we are here then the buffer is internal */
|
||||
|
||||
monitor_len = trans_pcie->fw_mon_size;
|
||||
} else if (trans->dbg_dest_tlv->monitor_mode == SMEM_MODE) {
|
||||
/*
|
||||
* Update pointers to reflect actual values after
|
||||
* shifting
|
||||
@ -2355,10 +2392,18 @@ struct iwl_trans_dump_data *iwl_trans_pcie_dump_data(struct iwl_trans *trans)
|
||||
trans->dbg_dest_tlv->base_shift;
|
||||
iwl_trans_read_mem(trans, base, fw_mon_data->data,
|
||||
monitor_len / sizeof(u32));
|
||||
data->len = cpu_to_le32(sizeof(*fw_mon_data) +
|
||||
monitor_len);
|
||||
len += monitor_len;
|
||||
} else if (trans->dbg_dest_tlv->monitor_mode == MARBH_MODE) {
|
||||
monitor_len =
|
||||
iwl_trans_pci_dump_marbh_monitor(trans,
|
||||
fw_mon_data,
|
||||
monitor_len);
|
||||
} else {
|
||||
/* Didn't match anything - output no monitor data */
|
||||
monitor_len = 0;
|
||||
}
|
||||
|
||||
len += monitor_len;
|
||||
data->len = cpu_to_le32(monitor_len + sizeof(*fw_mon_data));
|
||||
}
|
||||
|
||||
dump_data->len = len;
|
||||
|
@ -621,18 +621,28 @@ int mwifiex_ret_802_11_associate(struct mwifiex_private *priv,
|
||||
struct ieee_types_assoc_rsp *assoc_rsp;
|
||||
struct mwifiex_bssdescriptor *bss_desc;
|
||||
bool enable_data = true;
|
||||
u16 cap_info, status_code;
|
||||
u16 cap_info, status_code, aid;
|
||||
|
||||
assoc_rsp = (struct ieee_types_assoc_rsp *) &resp->params;
|
||||
|
||||
cap_info = le16_to_cpu(assoc_rsp->cap_info_bitmap);
|
||||
status_code = le16_to_cpu(assoc_rsp->status_code);
|
||||
aid = le16_to_cpu(assoc_rsp->a_id);
|
||||
|
||||
if ((aid & (BIT(15) | BIT(14))) != (BIT(15) | BIT(14)))
|
||||
dev_err(priv->adapter->dev,
|
||||
"invalid AID value 0x%x; bits 15:14 not set\n",
|
||||
aid);
|
||||
|
||||
aid &= ~(BIT(15) | BIT(14));
|
||||
|
||||
priv->assoc_rsp_size = min(le16_to_cpu(resp->size) - S_DS_GEN,
|
||||
sizeof(priv->assoc_rsp_buf));
|
||||
|
||||
memcpy(priv->assoc_rsp_buf, &resp->params, priv->assoc_rsp_size);
|
||||
|
||||
assoc_rsp->a_id = cpu_to_le16(aid);
|
||||
|
||||
if (status_code) {
|
||||
priv->adapter->dbg.num_cmd_assoc_failure++;
|
||||
dev_err(priv->adapter->dev,
|
||||
|
@ -231,11 +231,10 @@ int mwifiex_main_process(struct mwifiex_adapter *adapter)
|
||||
goto exit_main_proc;
|
||||
} else {
|
||||
adapter->mwifiex_processing = true;
|
||||
spin_unlock_irqrestore(&adapter->main_proc_lock, flags);
|
||||
}
|
||||
process_start:
|
||||
do {
|
||||
adapter->more_task_flag = false;
|
||||
spin_unlock_irqrestore(&adapter->main_proc_lock, flags);
|
||||
if ((adapter->hw_status == MWIFIEX_HW_STATUS_CLOSING) ||
|
||||
(adapter->hw_status == MWIFIEX_HW_STATUS_NOT_READY))
|
||||
break;
|
||||
@ -275,7 +274,6 @@ process_start:
|
||||
adapter->pm_wakeup_fw_try = true;
|
||||
mod_timer(&adapter->wakeup_timer, jiffies + (HZ*3));
|
||||
adapter->if_ops.wakeup(adapter);
|
||||
spin_lock_irqsave(&adapter->main_proc_lock, flags);
|
||||
continue;
|
||||
}
|
||||
|
||||
@ -335,7 +333,6 @@ process_start:
|
||||
(adapter->ps_state == PS_STATE_PRE_SLEEP) ||
|
||||
(adapter->ps_state == PS_STATE_SLEEP_CFM) ||
|
||||
adapter->tx_lock_flag){
|
||||
spin_lock_irqsave(&adapter->main_proc_lock, flags);
|
||||
continue;
|
||||
}
|
||||
|
||||
@ -386,12 +383,14 @@ process_start:
|
||||
}
|
||||
break;
|
||||
}
|
||||
spin_lock_irqsave(&adapter->main_proc_lock, flags);
|
||||
} while (true);
|
||||
|
||||
spin_lock_irqsave(&adapter->main_proc_lock, flags);
|
||||
if (adapter->more_task_flag)
|
||||
if (adapter->more_task_flag) {
|
||||
adapter->more_task_flag = false;
|
||||
spin_unlock_irqrestore(&adapter->main_proc_lock, flags);
|
||||
goto process_start;
|
||||
}
|
||||
adapter->mwifiex_processing = false;
|
||||
spin_unlock_irqrestore(&adapter->main_proc_lock, flags);
|
||||
|
||||
|
@ -64,6 +64,8 @@ int mwifiex_wait_queue_complete(struct mwifiex_adapter *adapter,
|
||||
*(cmd_queued->condition),
|
||||
(12 * HZ));
|
||||
if (status <= 0) {
|
||||
if (status == 0)
|
||||
status = -ETIMEDOUT;
|
||||
dev_err(adapter->dev, "cmd_wait_q terminated: %d\n", status);
|
||||
mwifiex_cancel_all_pending_cmd(adapter);
|
||||
return status;
|
||||
|
@ -255,7 +255,7 @@ static int mwifiex_tdls_add_vht_oper(struct mwifiex_private *priv,
|
||||
if (sta_ptr->tdls_cap.extcap.ext_capab[7] &
|
||||
WLAN_EXT_CAPA8_TDLS_WIDE_BW_ENABLED) {
|
||||
dev_dbg(adapter->dev,
|
||||
"TDLS peer doesn't support wider bandwitdh\n");
|
||||
"TDLS peer doesn't support wider bandwidth\n");
|
||||
return 0;
|
||||
}
|
||||
} else {
|
||||
|
@ -167,7 +167,7 @@ mwifiex_set_ht_params(struct mwifiex_private *priv,
|
||||
ht_ie = cfg80211_find_ie(WLAN_EID_HT_CAPABILITY, params->beacon.tail,
|
||||
params->beacon.tail_len);
|
||||
if (ht_ie) {
|
||||
memcpy(&bss_cfg->ht_cap, ht_ie,
|
||||
memcpy(&bss_cfg->ht_cap, ht_ie + 2,
|
||||
sizeof(struct ieee80211_ht_cap));
|
||||
cap_info = le16_to_cpu(bss_cfg->ht_cap.cap_info);
|
||||
memset(&bss_cfg->ht_cap.mcs, 0,
|
||||
|
@ -536,13 +536,16 @@ void
|
||||
mwifiex_set_sta_ht_cap(struct mwifiex_private *priv, const u8 *ies,
|
||||
int ies_len, struct mwifiex_sta_node *node)
|
||||
{
|
||||
struct ieee_types_header *ht_cap_ie;
|
||||
const struct ieee80211_ht_cap *ht_cap;
|
||||
|
||||
if (!ies)
|
||||
return;
|
||||
|
||||
ht_cap = (void *)cfg80211_find_ie(WLAN_EID_HT_CAPABILITY, ies, ies_len);
|
||||
if (ht_cap) {
|
||||
ht_cap_ie = (void *)cfg80211_find_ie(WLAN_EID_HT_CAPABILITY, ies,
|
||||
ies_len);
|
||||
if (ht_cap_ie) {
|
||||
ht_cap = (void *)(ht_cap_ie + 1);
|
||||
node->is_11n_enabled = 1;
|
||||
node->max_amsdu = le16_to_cpu(ht_cap->cap_info) &
|
||||
IEEE80211_HT_CAP_MAX_AMSDU ?
|
||||
|
@ -428,6 +428,15 @@ mwifiex_wmm_init(struct mwifiex_adapter *adapter)
|
||||
priv->tos_to_tid_inv[i];
|
||||
}
|
||||
|
||||
priv->aggr_prio_tbl[6].amsdu
|
||||
= priv->aggr_prio_tbl[6].ampdu_ap
|
||||
= priv->aggr_prio_tbl[6].ampdu_user
|
||||
= BA_STREAM_NOT_ALLOWED;
|
||||
|
||||
priv->aggr_prio_tbl[7].amsdu = priv->aggr_prio_tbl[7].ampdu_ap
|
||||
= priv->aggr_prio_tbl[7].ampdu_user
|
||||
= BA_STREAM_NOT_ALLOWED;
|
||||
|
||||
mwifiex_set_ba_params(priv);
|
||||
mwifiex_reset_11n_rx_seq_num(priv);
|
||||
|
||||
|
@ -875,7 +875,7 @@ static void _rtl92ee_c2h_content_parsing(struct ieee80211_hw *hw, u8 c2h_cmd_id,
|
||||
break;
|
||||
default:
|
||||
RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE,
|
||||
"[C2H], Unkown packet!! CmdId(%#X)!\n", c2h_cmd_id);
|
||||
"[C2H], Unknown packet!! CmdId(%#X)!\n", c2h_cmd_id);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -613,7 +613,7 @@ static void _rtl8723be_c2h_content_parsing(struct ieee80211_hw *hw,
|
||||
break;
|
||||
default:
|
||||
RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE,
|
||||
"[C2H], Unkown packet!! CmdId(%#X)!\n", c2h_cmd_id);
|
||||
"[C2H], Unknown packet!! CmdId(%#X)!\n", c2h_cmd_id);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -24,6 +24,7 @@
|
||||
#include <linux/ip.h>
|
||||
#include <linux/firmware.h>
|
||||
#include <linux/etherdevice.h>
|
||||
#include <linux/irq.h>
|
||||
|
||||
#include "../wlcore/wlcore.h"
|
||||
#include "../wlcore/debug.h"
|
||||
@ -578,7 +579,7 @@ static struct wl18xx_priv_conf wl18xx_default_priv_conf = {
|
||||
|
||||
static const struct wlcore_partition_set wl18xx_ptable[PART_TABLE_LEN] = {
|
||||
[PART_TOP_PRCM_ELP_SOC] = {
|
||||
.mem = { .start = 0x00A02000, .size = 0x00010000 },
|
||||
.mem = { .start = 0x00A00000, .size = 0x00012000 },
|
||||
.reg = { .start = 0x00807000, .size = 0x00005000 },
|
||||
.mem2 = { .start = 0x00800000, .size = 0x0000B000 },
|
||||
.mem3 = { .start = 0x00000000, .size = 0x00000000 },
|
||||
@ -862,6 +863,7 @@ static int wl18xx_pre_upload(struct wl1271 *wl)
|
||||
{
|
||||
u32 tmp;
|
||||
int ret;
|
||||
u16 irq_invert;
|
||||
|
||||
BUILD_BUG_ON(sizeof(struct wl18xx_mac_and_phy_params) >
|
||||
WL18XX_PHY_INIT_MEM_SIZE);
|
||||
@ -911,6 +913,28 @@ static int wl18xx_pre_upload(struct wl1271 *wl)
|
||||
/* re-enable FDSP clock */
|
||||
ret = wlcore_write32(wl, WL18XX_PHY_FPGA_SPARE_1,
|
||||
MEM_FDSP_CLK_120_ENABLE);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
||||
ret = irq_get_trigger_type(wl->irq);
|
||||
if ((ret == IRQ_TYPE_LEVEL_LOW) || (ret == IRQ_TYPE_EDGE_FALLING)) {
|
||||
wl1271_info("using inverted interrupt logic: %d", ret);
|
||||
ret = wlcore_set_partition(wl,
|
||||
&wl->ptable[PART_TOP_PRCM_ELP_SOC]);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
||||
ret = wl18xx_top_reg_read(wl, TOP_FN0_CCCR_REG_32, &irq_invert);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
||||
irq_invert |= BIT(1);
|
||||
ret = wl18xx_top_reg_write(wl, TOP_FN0_CCCR_REG_32, irq_invert);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
||||
ret = wlcore_set_partition(wl, &wl->ptable[PART_PHY_INIT]);
|
||||
}
|
||||
|
||||
out:
|
||||
return ret;
|
||||
@ -1351,9 +1375,10 @@ out:
|
||||
}
|
||||
|
||||
#define WL18XX_CONF_FILE_NAME "ti-connectivity/wl18xx-conf.bin"
|
||||
static int wl18xx_conf_init(struct wl1271 *wl, struct device *dev)
|
||||
|
||||
static int wl18xx_load_conf_file(struct device *dev, struct wlcore_conf *conf,
|
||||
struct wl18xx_priv_conf *priv_conf)
|
||||
{
|
||||
struct wl18xx_priv *priv = wl->priv;
|
||||
struct wlcore_conf_file *conf_file;
|
||||
const struct firmware *fw;
|
||||
int ret;
|
||||
@ -1362,14 +1387,14 @@ static int wl18xx_conf_init(struct wl1271 *wl, struct device *dev)
|
||||
if (ret < 0) {
|
||||
wl1271_error("could not get configuration binary %s: %d",
|
||||
WL18XX_CONF_FILE_NAME, ret);
|
||||
goto out_fallback;
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (fw->size != WL18XX_CONF_SIZE) {
|
||||
wl1271_error("configuration binary file size is wrong, expected %zu got %zu",
|
||||
WL18XX_CONF_SIZE, fw->size);
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
goto out_release;
|
||||
}
|
||||
|
||||
conf_file = (struct wlcore_conf_file *) fw->data;
|
||||
@ -1379,7 +1404,7 @@ static int wl18xx_conf_init(struct wl1271 *wl, struct device *dev)
|
||||
"expected 0x%0x got 0x%0x", WL18XX_CONF_MAGIC,
|
||||
conf_file->header.magic);
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
goto out_release;
|
||||
}
|
||||
|
||||
if (conf_file->header.version != cpu_to_le32(WL18XX_CONF_VERSION)) {
|
||||
@ -1387,30 +1412,34 @@ static int wl18xx_conf_init(struct wl1271 *wl, struct device *dev)
|
||||
"expected 0x%08x got 0x%08x",
|
||||
WL18XX_CONF_VERSION, conf_file->header.version);
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
goto out_release;
|
||||
}
|
||||
|
||||
memcpy(&wl->conf, &conf_file->core, sizeof(wl18xx_conf));
|
||||
memcpy(&priv->conf, &conf_file->priv, sizeof(priv->conf));
|
||||
memcpy(conf, &conf_file->core, sizeof(*conf));
|
||||
memcpy(priv_conf, &conf_file->priv, sizeof(*priv_conf));
|
||||
|
||||
goto out;
|
||||
|
||||
out_fallback:
|
||||
wl1271_warning("falling back to default config");
|
||||
|
||||
/* apply driver default configuration */
|
||||
memcpy(&wl->conf, &wl18xx_conf, sizeof(wl18xx_conf));
|
||||
/* apply default private configuration */
|
||||
memcpy(&priv->conf, &wl18xx_default_priv_conf, sizeof(priv->conf));
|
||||
|
||||
/* For now we just fallback */
|
||||
return 0;
|
||||
|
||||
out:
|
||||
out_release:
|
||||
release_firmware(fw);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int wl18xx_conf_init(struct wl1271 *wl, struct device *dev)
|
||||
{
|
||||
struct wl18xx_priv *priv = wl->priv;
|
||||
|
||||
if (wl18xx_load_conf_file(dev, &wl->conf, &priv->conf) < 0) {
|
||||
wl1271_warning("falling back to default config");
|
||||
|
||||
/* apply driver default configuration */
|
||||
memcpy(&wl->conf, &wl18xx_conf, sizeof(wl->conf));
|
||||
/* apply default private configuration */
|
||||
memcpy(&priv->conf, &wl18xx_default_priv_conf,
|
||||
sizeof(priv->conf));
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int wl18xx_plt_init(struct wl1271 *wl)
|
||||
{
|
||||
int ret;
|
||||
|
@ -109,6 +109,7 @@
|
||||
|
||||
#define WL18XX_WELP_ARM_COMMAND (WL18XX_REGISTERS_BASE + 0x7100)
|
||||
#define WL18XX_ENABLE (WL18XX_REGISTERS_BASE + 0x01543C)
|
||||
#define TOP_FN0_CCCR_REG_32 (WL18XX_TOP_OCP_BASE + 0x64)
|
||||
|
||||
/* PRCM registers */
|
||||
#define PLATFORM_DETECTION 0xA0E3E0
|
||||
|
@ -5965,10 +5965,6 @@ static int wl12xx_get_hw_info(struct wl1271 *wl)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = wl12xx_set_power_on(wl);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
ret = wlcore_read_reg(wl, REG_CHIP_ID_B, &wl->chip.id);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
@ -5984,7 +5980,6 @@ static int wl12xx_get_hw_info(struct wl1271 *wl)
|
||||
ret = wl->ops->get_mac(wl);
|
||||
|
||||
out:
|
||||
wl1271_power_off(wl);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -6432,10 +6427,22 @@ static void wlcore_nvs_cb(const struct firmware *fw, void *context)
|
||||
else
|
||||
wl->irq_flags |= IRQF_ONESHOT;
|
||||
|
||||
ret = wl12xx_set_power_on(wl);
|
||||
if (ret < 0)
|
||||
goto out_free_nvs;
|
||||
|
||||
ret = wl12xx_get_hw_info(wl);
|
||||
if (ret < 0) {
|
||||
wl1271_error("couldn't get hw info");
|
||||
wl1271_power_off(wl);
|
||||
goto out_free_nvs;
|
||||
}
|
||||
|
||||
ret = request_threaded_irq(wl->irq, hardirq_fn, wlcore_irq,
|
||||
wl->irq_flags, pdev->name, wl);
|
||||
if (ret < 0) {
|
||||
wl1271_error("request_irq() failed: %d", ret);
|
||||
wl1271_error("interrupt configuration failed");
|
||||
wl1271_power_off(wl);
|
||||
goto out_free_nvs;
|
||||
}
|
||||
|
||||
@ -6449,12 +6456,7 @@ static void wlcore_nvs_cb(const struct firmware *fw, void *context)
|
||||
}
|
||||
#endif
|
||||
disable_irq(wl->irq);
|
||||
|
||||
ret = wl12xx_get_hw_info(wl);
|
||||
if (ret < 0) {
|
||||
wl1271_error("couldn't get hw info");
|
||||
goto out_irq;
|
||||
}
|
||||
wl1271_power_off(wl);
|
||||
|
||||
ret = wl->ops->identify_chip(wl);
|
||||
if (ret < 0)
|
||||
|
Loading…
Reference in New Issue
Block a user