mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-12-25 20:14:25 +08:00
Merge ath-next from ath.git
Major changes in ath10k: * add support for qca6174 hardware * enable RX batching to reduce CPU load
This commit is contained in:
commit
d53071143a
@ -9,12 +9,14 @@ ath10k_core-y += mac.o \
|
||||
txrx.o \
|
||||
wmi.o \
|
||||
wmi-tlv.o \
|
||||
bmi.o
|
||||
bmi.o \
|
||||
hw.o
|
||||
|
||||
ath10k_core-$(CONFIG_ATH10K_DEBUGFS) += spectral.o
|
||||
ath10k_core-$(CONFIG_NL80211_TESTMODE) += testmode.o
|
||||
ath10k_core-$(CONFIG_ATH10K_TRACING) += trace.o
|
||||
ath10k_core-$(CONFIG_THERMAL) += thermal.o
|
||||
ath10k_core-$(CONFIG_MAC80211_DEBUGFS) += debugfs_sta.o
|
||||
|
||||
obj-$(CONFIG_ATH10K_PCI) += ath10k_pci.o
|
||||
ath10k_pci-y += pci.o \
|
||||
|
@ -803,7 +803,7 @@ int ath10k_ce_disable_interrupts(struct ath10k *ar)
|
||||
int ce_id;
|
||||
|
||||
for (ce_id = 0; ce_id < CE_COUNT; ce_id++) {
|
||||
u32 ctrl_addr = ath10k_ce_base_address(ce_id);
|
||||
u32 ctrl_addr = ath10k_ce_base_address(ar, ce_id);
|
||||
|
||||
ath10k_ce_copy_complete_intr_disable(ar, ctrl_addr);
|
||||
ath10k_ce_error_intr_disable(ar, ctrl_addr);
|
||||
@ -832,7 +832,7 @@ static int ath10k_ce_init_src_ring(struct ath10k *ar,
|
||||
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
|
||||
struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
|
||||
struct ath10k_ce_ring *src_ring = ce_state->src_ring;
|
||||
u32 nentries, ctrl_addr = ath10k_ce_base_address(ce_id);
|
||||
u32 nentries, ctrl_addr = ath10k_ce_base_address(ar, ce_id);
|
||||
|
||||
nentries = roundup_pow_of_two(attr->src_nentries);
|
||||
|
||||
@ -869,7 +869,7 @@ static int ath10k_ce_init_dest_ring(struct ath10k *ar,
|
||||
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
|
||||
struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
|
||||
struct ath10k_ce_ring *dest_ring = ce_state->dest_ring;
|
||||
u32 nentries, ctrl_addr = ath10k_ce_base_address(ce_id);
|
||||
u32 nentries, ctrl_addr = ath10k_ce_base_address(ar, ce_id);
|
||||
|
||||
nentries = roundup_pow_of_two(attr->dest_nentries);
|
||||
|
||||
@ -1051,7 +1051,7 @@ int ath10k_ce_init_pipe(struct ath10k *ar, unsigned int ce_id,
|
||||
|
||||
static void ath10k_ce_deinit_src_ring(struct ath10k *ar, unsigned int ce_id)
|
||||
{
|
||||
u32 ctrl_addr = ath10k_ce_base_address(ce_id);
|
||||
u32 ctrl_addr = ath10k_ce_base_address(ar, ce_id);
|
||||
|
||||
ath10k_ce_src_ring_base_addr_set(ar, ctrl_addr, 0);
|
||||
ath10k_ce_src_ring_size_set(ar, ctrl_addr, 0);
|
||||
@ -1061,7 +1061,7 @@ static void ath10k_ce_deinit_src_ring(struct ath10k *ar, unsigned int ce_id)
|
||||
|
||||
static void ath10k_ce_deinit_dest_ring(struct ath10k *ar, unsigned int ce_id)
|
||||
{
|
||||
u32 ctrl_addr = ath10k_ce_base_address(ce_id);
|
||||
u32 ctrl_addr = ath10k_ce_base_address(ar, ce_id);
|
||||
|
||||
ath10k_ce_dest_ring_base_addr_set(ar, ctrl_addr, 0);
|
||||
ath10k_ce_dest_ring_size_set(ar, ctrl_addr, 0);
|
||||
@ -1098,7 +1098,7 @@ int ath10k_ce_alloc_pipe(struct ath10k *ar, int ce_id,
|
||||
|
||||
ce_state->ar = ar;
|
||||
ce_state->id = ce_id;
|
||||
ce_state->ctrl_addr = ath10k_ce_base_address(ce_id);
|
||||
ce_state->ctrl_addr = ath10k_ce_base_address(ar, ce_id);
|
||||
ce_state->attr_flags = attr->flags;
|
||||
ce_state->src_sz_max = attr->src_sz_max;
|
||||
|
||||
|
@ -394,7 +394,7 @@ struct ce_attr {
|
||||
#define DST_WATERMARK_HIGH_RESET 0
|
||||
#define DST_WATERMARK_ADDRESS 0x0050
|
||||
|
||||
static inline u32 ath10k_ce_base_address(unsigned int ce_id)
|
||||
static inline u32 ath10k_ce_base_address(struct ath10k *ar, unsigned int ce_id)
|
||||
{
|
||||
return CE0_BASE_ADDRESS + (CE1_BASE_ADDRESS - CE0_BASE_ADDRESS) * ce_id;
|
||||
}
|
||||
|
@ -57,6 +57,49 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
|
||||
.board_ext_size = QCA988X_BOARD_EXT_DATA_SZ,
|
||||
},
|
||||
},
|
||||
{
|
||||
.id = QCA6174_HW_2_1_VERSION,
|
||||
.name = "qca6174 hw2.1",
|
||||
.patch_load_addr = QCA6174_HW_2_1_PATCH_LOAD_ADDR,
|
||||
.uart_pin = 6,
|
||||
.fw = {
|
||||
.dir = QCA6174_HW_2_1_FW_DIR,
|
||||
.fw = QCA6174_HW_2_1_FW_FILE,
|
||||
.otp = QCA6174_HW_2_1_OTP_FILE,
|
||||
.board = QCA6174_HW_2_1_BOARD_DATA_FILE,
|
||||
.board_size = QCA6174_BOARD_DATA_SZ,
|
||||
.board_ext_size = QCA6174_BOARD_EXT_DATA_SZ,
|
||||
},
|
||||
},
|
||||
{
|
||||
.id = QCA6174_HW_3_0_VERSION,
|
||||
.name = "qca6174 hw3.0",
|
||||
.patch_load_addr = QCA6174_HW_3_0_PATCH_LOAD_ADDR,
|
||||
.uart_pin = 6,
|
||||
.fw = {
|
||||
.dir = QCA6174_HW_3_0_FW_DIR,
|
||||
.fw = QCA6174_HW_3_0_FW_FILE,
|
||||
.otp = QCA6174_HW_3_0_OTP_FILE,
|
||||
.board = QCA6174_HW_3_0_BOARD_DATA_FILE,
|
||||
.board_size = QCA6174_BOARD_DATA_SZ,
|
||||
.board_ext_size = QCA6174_BOARD_EXT_DATA_SZ,
|
||||
},
|
||||
},
|
||||
{
|
||||
.id = QCA6174_HW_3_2_VERSION,
|
||||
.name = "qca6174 hw3.2",
|
||||
.patch_load_addr = QCA6174_HW_3_0_PATCH_LOAD_ADDR,
|
||||
.uart_pin = 6,
|
||||
.fw = {
|
||||
/* uses same binaries as hw3.0 */
|
||||
.dir = QCA6174_HW_3_0_FW_DIR,
|
||||
.fw = QCA6174_HW_3_0_FW_FILE,
|
||||
.otp = QCA6174_HW_3_0_OTP_FILE,
|
||||
.board = QCA6174_HW_3_0_BOARD_DATA_FILE,
|
||||
.board_size = QCA6174_BOARD_DATA_SZ,
|
||||
.board_ext_size = QCA6174_BOARD_EXT_DATA_SZ,
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
static void ath10k_send_suspend_complete(struct ath10k *ar)
|
||||
@ -927,6 +970,7 @@ static int ath10k_core_init_firmware_features(struct ath10k *ar)
|
||||
case ATH10K_FW_WMI_OP_VERSION_TLV:
|
||||
ar->max_num_peers = TARGET_TLV_NUM_PEERS;
|
||||
ar->max_num_stations = TARGET_TLV_NUM_STATIONS;
|
||||
ar->max_num_vdevs = TARGET_TLV_NUM_VDEVS;
|
||||
ar->htt.max_num_pending_tx = TARGET_TLV_NUM_MSDU_DESC;
|
||||
break;
|
||||
case ATH10K_FW_WMI_OP_VERSION_UNSET:
|
||||
@ -1060,6 +1104,18 @@ int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode)
|
||||
goto err_hif_stop;
|
||||
}
|
||||
|
||||
/* If firmware indicates Full Rx Reorder support it must be used in a
|
||||
* slightly different manner. Let HTT code know.
|
||||
*/
|
||||
ar->htt.rx_ring.in_ord_rx = !!(test_bit(WMI_SERVICE_RX_FULL_REORDER,
|
||||
ar->wmi.svc_map));
|
||||
|
||||
status = ath10k_htt_rx_ring_refill(ar);
|
||||
if (status) {
|
||||
ath10k_err(ar, "failed to refill htt rx ring: %d\n", status);
|
||||
goto err_hif_stop;
|
||||
}
|
||||
|
||||
/* we don't care about HTT in UTF mode */
|
||||
if (mode == ATH10K_FIRMWARE_MODE_NORMAL) {
|
||||
status = ath10k_htt_setup(&ar->htt);
|
||||
@ -1295,6 +1351,7 @@ EXPORT_SYMBOL(ath10k_core_unregister);
|
||||
|
||||
struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev,
|
||||
enum ath10k_bus bus,
|
||||
enum ath10k_hw_rev hw_rev,
|
||||
const struct ath10k_hif_ops *hif_ops)
|
||||
{
|
||||
struct ath10k *ar;
|
||||
@ -1307,9 +1364,24 @@ struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev,
|
||||
ar->ath_common.priv = ar;
|
||||
ar->ath_common.hw = ar->hw;
|
||||
ar->dev = dev;
|
||||
ar->hw_rev = hw_rev;
|
||||
ar->hif.ops = hif_ops;
|
||||
ar->hif.bus = bus;
|
||||
|
||||
switch (hw_rev) {
|
||||
case ATH10K_HW_QCA988X:
|
||||
ar->regs = &qca988x_regs;
|
||||
break;
|
||||
case ATH10K_HW_QCA6174:
|
||||
ar->regs = &qca6174_regs;
|
||||
break;
|
||||
default:
|
||||
ath10k_err(ar, "unsupported core hardware revision %d\n",
|
||||
hw_rev);
|
||||
ret = -ENOTSUPP;
|
||||
goto err_free_mac;
|
||||
}
|
||||
|
||||
init_completion(&ar->scan.started);
|
||||
init_completion(&ar->scan.completed);
|
||||
init_completion(&ar->scan.on_channel);
|
||||
|
@ -97,6 +97,11 @@ struct ath10k_skb_cb {
|
||||
} bcn;
|
||||
} __packed;
|
||||
|
||||
struct ath10k_skb_rxcb {
|
||||
dma_addr_t paddr;
|
||||
struct hlist_node hlist;
|
||||
};
|
||||
|
||||
static inline struct ath10k_skb_cb *ATH10K_SKB_CB(struct sk_buff *skb)
|
||||
{
|
||||
BUILD_BUG_ON(sizeof(struct ath10k_skb_cb) >
|
||||
@ -104,6 +109,15 @@ static inline struct ath10k_skb_cb *ATH10K_SKB_CB(struct sk_buff *skb)
|
||||
return (struct ath10k_skb_cb *)&IEEE80211_SKB_CB(skb)->driver_data;
|
||||
}
|
||||
|
||||
static inline struct ath10k_skb_rxcb *ATH10K_SKB_RXCB(struct sk_buff *skb)
|
||||
{
|
||||
BUILD_BUG_ON(sizeof(struct ath10k_skb_rxcb) > sizeof(skb->cb));
|
||||
return (struct ath10k_skb_rxcb *)skb->cb;
|
||||
}
|
||||
|
||||
#define ATH10K_RXCB_SKB(rxcb) \
|
||||
container_of((void *)rxcb, struct sk_buff, cb)
|
||||
|
||||
static inline u32 host_interest_item_address(u32 item_offset)
|
||||
{
|
||||
return QCA988X_HOST_INTEREST_ADDRESS + item_offset;
|
||||
@ -239,10 +253,21 @@ struct ath10k_sta {
|
||||
u32 smps;
|
||||
|
||||
struct work_struct update_wk;
|
||||
|
||||
#ifdef CONFIG_MAC80211_DEBUGFS
|
||||
/* protected by conf_mutex */
|
||||
bool aggr_mode;
|
||||
#endif
|
||||
};
|
||||
|
||||
#define ATH10K_VDEV_SETUP_TIMEOUT_HZ (5*HZ)
|
||||
|
||||
enum ath10k_beacon_state {
|
||||
ATH10K_BEACON_SCHEDULED = 0,
|
||||
ATH10K_BEACON_SENDING,
|
||||
ATH10K_BEACON_SENT,
|
||||
};
|
||||
|
||||
struct ath10k_vif {
|
||||
struct list_head list;
|
||||
|
||||
@ -253,7 +278,7 @@ struct ath10k_vif {
|
||||
u32 dtim_period;
|
||||
struct sk_buff *beacon;
|
||||
/* protected by data_lock */
|
||||
bool beacon_sent;
|
||||
enum ath10k_beacon_state beacon_state;
|
||||
void *beacon_buf;
|
||||
dma_addr_t beacon_paddr;
|
||||
|
||||
@ -266,10 +291,8 @@ struct ath10k_vif {
|
||||
u32 aid;
|
||||
u8 bssid[ETH_ALEN];
|
||||
|
||||
struct work_struct wep_key_work;
|
||||
struct ieee80211_key_conf *wep_keys[WMI_MAX_KEY_INDEX + 1];
|
||||
u8 def_wep_key_idx;
|
||||
u8 def_wep_key_newidx;
|
||||
s8 def_wep_key_idx;
|
||||
|
||||
u16 tx_seq_no;
|
||||
|
||||
@ -296,6 +319,7 @@ struct ath10k_vif {
|
||||
bool use_cts_prot;
|
||||
int num_legacy_stations;
|
||||
int txpower;
|
||||
struct wmi_wmm_params_all_arg wmm_params;
|
||||
};
|
||||
|
||||
struct ath10k_vif_iter {
|
||||
@ -326,6 +350,7 @@ struct ath10k_debug {
|
||||
|
||||
/* protected by conf_mutex */
|
||||
u32 fw_dbglog_mask;
|
||||
u32 fw_dbglog_level;
|
||||
u32 pktlog_filter;
|
||||
u32 reg_addr;
|
||||
u32 nf_cal_period;
|
||||
@ -452,6 +477,7 @@ struct ath10k {
|
||||
struct device *dev;
|
||||
u8 mac_addr[ETH_ALEN];
|
||||
|
||||
enum ath10k_hw_rev hw_rev;
|
||||
u32 chip_id;
|
||||
u32 target_version;
|
||||
u8 fw_version_major;
|
||||
@ -467,9 +493,6 @@ struct ath10k {
|
||||
|
||||
DECLARE_BITMAP(fw_features, ATH10K_FW_FEATURE_COUNT);
|
||||
|
||||
struct targetdef *targetdef;
|
||||
struct hostdef *hostdef;
|
||||
|
||||
bool p2p;
|
||||
|
||||
struct {
|
||||
@ -479,6 +502,7 @@ struct ath10k {
|
||||
|
||||
struct completion target_suspend;
|
||||
|
||||
const struct ath10k_hw_regs *regs;
|
||||
struct ath10k_bmi bmi;
|
||||
struct ath10k_wmi wmi;
|
||||
struct ath10k_htc htc;
|
||||
@ -559,7 +583,6 @@ struct ath10k {
|
||||
u8 cfg_tx_chainmask;
|
||||
u8 cfg_rx_chainmask;
|
||||
|
||||
struct wmi_pdev_set_wmm_params_arg wmm_params;
|
||||
struct completion install_key_done;
|
||||
|
||||
struct completion vdev_setup_done;
|
||||
@ -643,6 +666,7 @@ struct ath10k {
|
||||
|
||||
struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev,
|
||||
enum ath10k_bus bus,
|
||||
enum ath10k_hw_rev hw_rev,
|
||||
const struct ath10k_hif_ops *hif_ops);
|
||||
void ath10k_core_destroy(struct ath10k *ar);
|
||||
|
||||
|
@ -371,7 +371,7 @@ static int ath10k_debug_fw_stats_request(struct ath10k *ar)
|
||||
|
||||
ret = wait_for_completion_timeout(&ar->debug.fw_stats_complete,
|
||||
1*HZ);
|
||||
if (ret <= 0)
|
||||
if (ret == 0)
|
||||
return -ETIMEDOUT;
|
||||
|
||||
spin_lock_bh(&ar->data_lock);
|
||||
@ -1318,10 +1318,10 @@ static ssize_t ath10k_read_fw_dbglog(struct file *file,
|
||||
{
|
||||
struct ath10k *ar = file->private_data;
|
||||
unsigned int len;
|
||||
char buf[32];
|
||||
char buf[64];
|
||||
|
||||
len = scnprintf(buf, sizeof(buf), "0x%08x\n",
|
||||
ar->debug.fw_dbglog_mask);
|
||||
len = scnprintf(buf, sizeof(buf), "0x%08x %u\n",
|
||||
ar->debug.fw_dbglog_mask, ar->debug.fw_dbglog_level);
|
||||
|
||||
return simple_read_from_buffer(user_buf, count, ppos, buf, len);
|
||||
}
|
||||
@ -1331,19 +1331,32 @@ static ssize_t ath10k_write_fw_dbglog(struct file *file,
|
||||
size_t count, loff_t *ppos)
|
||||
{
|
||||
struct ath10k *ar = file->private_data;
|
||||
unsigned long mask;
|
||||
int ret;
|
||||
char buf[64];
|
||||
unsigned int log_level, mask;
|
||||
|
||||
ret = kstrtoul_from_user(user_buf, count, 0, &mask);
|
||||
if (ret)
|
||||
return ret;
|
||||
simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, user_buf, count);
|
||||
|
||||
/* make sure that buf is null terminated */
|
||||
buf[sizeof(buf) - 1] = 0;
|
||||
|
||||
ret = sscanf(buf, "%x %u", &mask, &log_level);
|
||||
|
||||
if (!ret)
|
||||
return -EINVAL;
|
||||
|
||||
if (ret == 1)
|
||||
/* default if user did not specify */
|
||||
log_level = ATH10K_DBGLOG_LEVEL_WARN;
|
||||
|
||||
mutex_lock(&ar->conf_mutex);
|
||||
|
||||
ar->debug.fw_dbglog_mask = mask;
|
||||
ar->debug.fw_dbglog_level = log_level;
|
||||
|
||||
if (ar->state == ATH10K_STATE_ON) {
|
||||
ret = ath10k_wmi_dbglog_cfg(ar, ar->debug.fw_dbglog_mask);
|
||||
ret = ath10k_wmi_dbglog_cfg(ar, ar->debug.fw_dbglog_mask,
|
||||
ar->debug.fw_dbglog_level);
|
||||
if (ret) {
|
||||
ath10k_warn(ar, "dbglog cfg failed from debugfs: %d\n",
|
||||
ret);
|
||||
@ -1685,7 +1698,8 @@ int ath10k_debug_start(struct ath10k *ar)
|
||||
ret);
|
||||
|
||||
if (ar->debug.fw_dbglog_mask) {
|
||||
ret = ath10k_wmi_dbglog_cfg(ar, ar->debug.fw_dbglog_mask);
|
||||
ret = ath10k_wmi_dbglog_cfg(ar, ar->debug.fw_dbglog_mask,
|
||||
ATH10K_DBGLOG_LEVEL_WARN);
|
||||
if (ret)
|
||||
/* not serious */
|
||||
ath10k_warn(ar, "failed to enable dbglog during start: %d",
|
||||
|
@ -48,6 +48,12 @@ enum ath10k_pktlog_filter {
|
||||
ATH10K_PKTLOG_ANY = 0x00000001f,
|
||||
};
|
||||
|
||||
enum ath10k_dbg_aggr_mode {
|
||||
ATH10K_DBG_AGGR_MODE_AUTO,
|
||||
ATH10K_DBG_AGGR_MODE_MANUAL,
|
||||
ATH10K_DBG_AGGR_MODE_MAX,
|
||||
};
|
||||
|
||||
extern unsigned int ath10k_debug_mask;
|
||||
|
||||
__printf(2, 3) void ath10k_info(struct ath10k *ar, const char *fmt, ...);
|
||||
@ -77,7 +83,6 @@ int ath10k_debug_get_et_sset_count(struct ieee80211_hw *hw,
|
||||
void ath10k_debug_get_et_stats(struct ieee80211_hw *hw,
|
||||
struct ieee80211_vif *vif,
|
||||
struct ethtool_stats *stats, u64 *data);
|
||||
|
||||
#else
|
||||
static inline int ath10k_debug_start(struct ath10k *ar)
|
||||
{
|
||||
@ -129,6 +134,10 @@ ath10k_debug_get_new_fw_crash_data(struct ath10k *ar)
|
||||
#define ath10k_debug_get_et_stats NULL
|
||||
|
||||
#endif /* CONFIG_ATH10K_DEBUGFS */
|
||||
#ifdef CONFIG_MAC80211_DEBUGFS
|
||||
void ath10k_sta_add_debugfs(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
|
||||
struct ieee80211_sta *sta, struct dentry *dir);
|
||||
#endif /* CONFIG_MAC80211_DEBUGFS */
|
||||
|
||||
#ifdef CONFIG_ATH10K_DEBUG
|
||||
__printf(3, 4) void ath10k_dbg(struct ath10k *ar,
|
||||
|
243
drivers/net/wireless/ath/ath10k/debugfs_sta.c
Normal file
243
drivers/net/wireless/ath/ath10k/debugfs_sta.c
Normal file
@ -0,0 +1,243 @@
|
||||
/*
|
||||
* Copyright (c) 2014 Qualcomm Atheros, Inc.
|
||||
*
|
||||
* Permission to use, copy, modify, and/or distribute this software for any
|
||||
* purpose with or without fee is hereby granted, provided that the above
|
||||
* copyright notice and this permission notice appear in all copies.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
|
||||
#include "core.h"
|
||||
#include "wmi-ops.h"
|
||||
#include "debug.h"
|
||||
|
||||
static ssize_t ath10k_dbg_sta_read_aggr_mode(struct file *file,
|
||||
char __user *user_buf,
|
||||
size_t count, loff_t *ppos)
|
||||
{
|
||||
struct ieee80211_sta *sta = file->private_data;
|
||||
struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
|
||||
struct ath10k *ar = arsta->arvif->ar;
|
||||
char buf[32];
|
||||
int len = 0;
|
||||
|
||||
mutex_lock(&ar->conf_mutex);
|
||||
len = scnprintf(buf, sizeof(buf) - len, "aggregation mode: %s\n",
|
||||
(arsta->aggr_mode == ATH10K_DBG_AGGR_MODE_AUTO) ?
|
||||
"auto" : "manual");
|
||||
mutex_unlock(&ar->conf_mutex);
|
||||
|
||||
return simple_read_from_buffer(user_buf, count, ppos, buf, len);
|
||||
}
|
||||
|
||||
static ssize_t ath10k_dbg_sta_write_aggr_mode(struct file *file,
|
||||
const char __user *user_buf,
|
||||
size_t count, loff_t *ppos)
|
||||
{
|
||||
struct ieee80211_sta *sta = file->private_data;
|
||||
struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
|
||||
struct ath10k *ar = arsta->arvif->ar;
|
||||
u32 aggr_mode;
|
||||
int ret;
|
||||
|
||||
if (kstrtouint_from_user(user_buf, count, 0, &aggr_mode))
|
||||
return -EINVAL;
|
||||
|
||||
if (aggr_mode >= ATH10K_DBG_AGGR_MODE_MAX)
|
||||
return -EINVAL;
|
||||
|
||||
mutex_lock(&ar->conf_mutex);
|
||||
if ((ar->state != ATH10K_STATE_ON) ||
|
||||
(aggr_mode == arsta->aggr_mode)) {
|
||||
ret = count;
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = ath10k_wmi_addba_clear_resp(ar, arsta->arvif->vdev_id, sta->addr);
|
||||
if (ret) {
|
||||
ath10k_warn(ar, "failed to clear addba session ret: %d\n", ret);
|
||||
goto out;
|
||||
}
|
||||
|
||||
arsta->aggr_mode = aggr_mode;
|
||||
out:
|
||||
mutex_unlock(&ar->conf_mutex);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static const struct file_operations fops_aggr_mode = {
|
||||
.read = ath10k_dbg_sta_read_aggr_mode,
|
||||
.write = ath10k_dbg_sta_write_aggr_mode,
|
||||
.open = simple_open,
|
||||
.owner = THIS_MODULE,
|
||||
.llseek = default_llseek,
|
||||
};
|
||||
|
||||
static ssize_t ath10k_dbg_sta_write_addba(struct file *file,
|
||||
const char __user *user_buf,
|
||||
size_t count, loff_t *ppos)
|
||||
{
|
||||
struct ieee80211_sta *sta = file->private_data;
|
||||
struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
|
||||
struct ath10k *ar = arsta->arvif->ar;
|
||||
u32 tid, buf_size;
|
||||
int ret;
|
||||
char buf[64];
|
||||
|
||||
simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, user_buf, count);
|
||||
|
||||
/* make sure that buf is null terminated */
|
||||
buf[sizeof(buf) - 1] = '\0';
|
||||
|
||||
ret = sscanf(buf, "%u %u", &tid, &buf_size);
|
||||
if (ret != 2)
|
||||
return -EINVAL;
|
||||
|
||||
/* Valid TID values are 0 through 15 */
|
||||
if (tid > HTT_DATA_TX_EXT_TID_MGMT - 2)
|
||||
return -EINVAL;
|
||||
|
||||
mutex_lock(&ar->conf_mutex);
|
||||
if ((ar->state != ATH10K_STATE_ON) ||
|
||||
(arsta->aggr_mode != ATH10K_DBG_AGGR_MODE_MANUAL)) {
|
||||
ret = count;
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = ath10k_wmi_addba_send(ar, arsta->arvif->vdev_id, sta->addr,
|
||||
tid, buf_size);
|
||||
if (ret) {
|
||||
ath10k_warn(ar, "failed to send addba request: vdev_id %u peer %pM tid %u buf_size %u\n",
|
||||
arsta->arvif->vdev_id, sta->addr, tid, buf_size);
|
||||
}
|
||||
|
||||
ret = count;
|
||||
out:
|
||||
mutex_unlock(&ar->conf_mutex);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static const struct file_operations fops_addba = {
|
||||
.write = ath10k_dbg_sta_write_addba,
|
||||
.open = simple_open,
|
||||
.owner = THIS_MODULE,
|
||||
.llseek = default_llseek,
|
||||
};
|
||||
|
||||
static ssize_t ath10k_dbg_sta_write_addba_resp(struct file *file,
|
||||
const char __user *user_buf,
|
||||
size_t count, loff_t *ppos)
|
||||
{
|
||||
struct ieee80211_sta *sta = file->private_data;
|
||||
struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
|
||||
struct ath10k *ar = arsta->arvif->ar;
|
||||
u32 tid, status;
|
||||
int ret;
|
||||
char buf[64];
|
||||
|
||||
simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, user_buf, count);
|
||||
|
||||
/* make sure that buf is null terminated */
|
||||
buf[sizeof(buf) - 1] = '\0';
|
||||
|
||||
ret = sscanf(buf, "%u %u", &tid, &status);
|
||||
if (ret != 2)
|
||||
return -EINVAL;
|
||||
|
||||
/* Valid TID values are 0 through 15 */
|
||||
if (tid > HTT_DATA_TX_EXT_TID_MGMT - 2)
|
||||
return -EINVAL;
|
||||
|
||||
mutex_lock(&ar->conf_mutex);
|
||||
if ((ar->state != ATH10K_STATE_ON) ||
|
||||
(arsta->aggr_mode != ATH10K_DBG_AGGR_MODE_MANUAL)) {
|
||||
ret = count;
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = ath10k_wmi_addba_set_resp(ar, arsta->arvif->vdev_id, sta->addr,
|
||||
tid, status);
|
||||
if (ret) {
|
||||
ath10k_warn(ar, "failed to send addba response: vdev_id %u peer %pM tid %u status%u\n",
|
||||
arsta->arvif->vdev_id, sta->addr, tid, status);
|
||||
}
|
||||
ret = count;
|
||||
out:
|
||||
mutex_unlock(&ar->conf_mutex);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static const struct file_operations fops_addba_resp = {
|
||||
.write = ath10k_dbg_sta_write_addba_resp,
|
||||
.open = simple_open,
|
||||
.owner = THIS_MODULE,
|
||||
.llseek = default_llseek,
|
||||
};
|
||||
|
||||
static ssize_t ath10k_dbg_sta_write_delba(struct file *file,
|
||||
const char __user *user_buf,
|
||||
size_t count, loff_t *ppos)
|
||||
{
|
||||
struct ieee80211_sta *sta = file->private_data;
|
||||
struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
|
||||
struct ath10k *ar = arsta->arvif->ar;
|
||||
u32 tid, initiator, reason;
|
||||
int ret;
|
||||
char buf[64];
|
||||
|
||||
simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, user_buf, count);
|
||||
|
||||
/* make sure that buf is null terminated */
|
||||
buf[sizeof(buf) - 1] = '\0';
|
||||
|
||||
ret = sscanf(buf, "%u %u %u", &tid, &initiator, &reason);
|
||||
if (ret != 3)
|
||||
return -EINVAL;
|
||||
|
||||
/* Valid TID values are 0 through 15 */
|
||||
if (tid > HTT_DATA_TX_EXT_TID_MGMT - 2)
|
||||
return -EINVAL;
|
||||
|
||||
mutex_lock(&ar->conf_mutex);
|
||||
if ((ar->state != ATH10K_STATE_ON) ||
|
||||
(arsta->aggr_mode != ATH10K_DBG_AGGR_MODE_MANUAL)) {
|
||||
ret = count;
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = ath10k_wmi_delba_send(ar, arsta->arvif->vdev_id, sta->addr,
|
||||
tid, initiator, reason);
|
||||
if (ret) {
|
||||
ath10k_warn(ar, "failed to send delba: vdev_id %u peer %pM tid %u initiator %u reason %u\n",
|
||||
arsta->arvif->vdev_id, sta->addr, tid, initiator,
|
||||
reason);
|
||||
}
|
||||
ret = count;
|
||||
out:
|
||||
mutex_unlock(&ar->conf_mutex);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static const struct file_operations fops_delba = {
|
||||
.write = ath10k_dbg_sta_write_delba,
|
||||
.open = simple_open,
|
||||
.owner = THIS_MODULE,
|
||||
.llseek = default_llseek,
|
||||
};
|
||||
|
||||
void ath10k_sta_add_debugfs(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
|
||||
struct ieee80211_sta *sta, struct dentry *dir)
|
||||
{
|
||||
debugfs_create_file("aggr_mode", S_IRUGO | S_IWUSR, dir, sta,
|
||||
&fops_aggr_mode);
|
||||
debugfs_create_file("addba", S_IWUSR, dir, sta, &fops_addba);
|
||||
debugfs_create_file("addba_resp", S_IWUSR, dir, sta, &fops_addba_resp);
|
||||
debugfs_create_file("delba", S_IWUSR, dir, sta, &fops_delba);
|
||||
}
|
@ -703,11 +703,9 @@ int ath10k_htc_connect_service(struct ath10k_htc *htc,
|
||||
/* wait for response */
|
||||
status = wait_for_completion_timeout(&htc->ctl_resp,
|
||||
ATH10K_HTC_CONN_SVC_TIMEOUT_HZ);
|
||||
if (status <= 0) {
|
||||
if (status == 0)
|
||||
status = -ETIMEDOUT;
|
||||
if (status == 0) {
|
||||
ath10k_err(ar, "Service connect timeout: %d\n", status);
|
||||
return status;
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
|
||||
/* we controlled the buffer creation, it's aligned */
|
||||
|
@ -53,7 +53,6 @@ int ath10k_htt_init(struct ath10k *ar)
|
||||
struct ath10k_htt *htt = &ar->htt;
|
||||
|
||||
htt->ar = ar;
|
||||
htt->max_throughput_mbps = 800;
|
||||
|
||||
/*
|
||||
* Prefetch enough data to satisfy target
|
||||
@ -102,7 +101,7 @@ int ath10k_htt_setup(struct ath10k_htt *htt)
|
||||
|
||||
status = wait_for_completion_timeout(&htt->target_version_received,
|
||||
HTT_TARGET_VERSION_TIMEOUT_HZ);
|
||||
if (status <= 0) {
|
||||
if (status == 0) {
|
||||
ath10k_warn(ar, "htt version request timed out\n");
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
|
@ -21,6 +21,7 @@
|
||||
#include <linux/bug.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/dmapool.h>
|
||||
#include <linux/hashtable.h>
|
||||
#include <net/mac80211.h>
|
||||
|
||||
#include "htc.h"
|
||||
@ -286,7 +287,19 @@ enum htt_t2h_msg_type {
|
||||
HTT_T2H_MSG_TYPE_RC_UPDATE_IND = 0xc,
|
||||
HTT_T2H_MSG_TYPE_TX_INSPECT_IND = 0xd,
|
||||
HTT_T2H_MSG_TYPE_MGMT_TX_COMPLETION = 0xe,
|
||||
HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND = 0xf,
|
||||
HTT_T2H_MSG_TYPE_RX_PN_IND = 0x10,
|
||||
HTT_T2H_MSG_TYPE_RX_OFFLOAD_DELIVER_IND = 0x11,
|
||||
HTT_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND = 0x12,
|
||||
/* 0x13 reservd */
|
||||
HTT_T2H_MSG_TYPE_WDI_IPA_OP_RESPONSE = 0x14,
|
||||
|
||||
/* FIXME: Do not depend on this event id. Numbering of this event id is
|
||||
* broken across different firmware revisions and HTT version fails to
|
||||
* indicate this.
|
||||
*/
|
||||
HTT_T2H_MSG_TYPE_TEST,
|
||||
|
||||
/* keep this last */
|
||||
HTT_T2H_NUM_MSGS
|
||||
};
|
||||
@ -655,6 +668,53 @@ struct htt_rx_fragment_indication {
|
||||
#define HTT_RX_FRAG_IND_INFO1_FLUSH_SEQ_NUM_END_MASK 0x00000FC0
|
||||
#define HTT_RX_FRAG_IND_INFO1_FLUSH_SEQ_NUM_END_LSB 6
|
||||
|
||||
struct htt_rx_pn_ind {
|
||||
__le16 peer_id;
|
||||
u8 tid;
|
||||
u8 seqno_start;
|
||||
u8 seqno_end;
|
||||
u8 pn_ie_count;
|
||||
u8 reserved;
|
||||
u8 pn_ies[0];
|
||||
} __packed;
|
||||
|
||||
struct htt_rx_offload_msdu {
|
||||
__le16 msdu_len;
|
||||
__le16 peer_id;
|
||||
u8 vdev_id;
|
||||
u8 tid;
|
||||
u8 fw_desc;
|
||||
u8 payload[0];
|
||||
} __packed;
|
||||
|
||||
struct htt_rx_offload_ind {
|
||||
u8 reserved;
|
||||
__le16 msdu_count;
|
||||
} __packed;
|
||||
|
||||
struct htt_rx_in_ord_msdu_desc {
|
||||
__le32 msdu_paddr;
|
||||
__le16 msdu_len;
|
||||
u8 fw_desc;
|
||||
u8 reserved;
|
||||
} __packed;
|
||||
|
||||
struct htt_rx_in_ord_ind {
|
||||
u8 info;
|
||||
__le16 peer_id;
|
||||
u8 vdev_id;
|
||||
u8 reserved;
|
||||
__le16 msdu_count;
|
||||
struct htt_rx_in_ord_msdu_desc msdu_descs[0];
|
||||
} __packed;
|
||||
|
||||
#define HTT_RX_IN_ORD_IND_INFO_TID_MASK 0x0000001f
|
||||
#define HTT_RX_IN_ORD_IND_INFO_TID_LSB 0
|
||||
#define HTT_RX_IN_ORD_IND_INFO_OFFLOAD_MASK 0x00000020
|
||||
#define HTT_RX_IN_ORD_IND_INFO_OFFLOAD_LSB 5
|
||||
#define HTT_RX_IN_ORD_IND_INFO_FRAG_MASK 0x00000040
|
||||
#define HTT_RX_IN_ORD_IND_INFO_FRAG_LSB 6
|
||||
|
||||
/*
|
||||
* target -> host test message definition
|
||||
*
|
||||
@ -1150,6 +1210,9 @@ struct htt_resp {
|
||||
struct htt_rx_test rx_test;
|
||||
struct htt_pktlog_msg pktlog_msg;
|
||||
struct htt_stats_conf stats_conf;
|
||||
struct htt_rx_pn_ind rx_pn_ind;
|
||||
struct htt_rx_offload_ind rx_offload_ind;
|
||||
struct htt_rx_in_ord_ind rx_in_ord_ind;
|
||||
};
|
||||
} __packed;
|
||||
|
||||
@ -1182,7 +1245,6 @@ struct ath10k_htt {
|
||||
struct ath10k *ar;
|
||||
enum ath10k_htc_ep_id eid;
|
||||
|
||||
int max_throughput_mbps;
|
||||
u8 target_version_major;
|
||||
u8 target_version_minor;
|
||||
struct completion target_version_received;
|
||||
@ -1198,6 +1260,20 @@ struct ath10k_htt {
|
||||
* filled.
|
||||
*/
|
||||
struct sk_buff **netbufs_ring;
|
||||
|
||||
/* This is used only with firmware supporting IN_ORD_IND.
|
||||
*
|
||||
* With Full Rx Reorder the HTT Rx Ring is more of a temporary
|
||||
* buffer ring from which buffer addresses are copied by the
|
||||
* firmware to MAC Rx ring. Firmware then delivers IN_ORD_IND
|
||||
* pointing to specific (re-ordered) buffers.
|
||||
*
|
||||
* FIXME: With kernel generic hashing functions there's a lot
|
||||
* of hash collisions for sk_buffs.
|
||||
*/
|
||||
bool in_ord_rx;
|
||||
DECLARE_HASHTABLE(skb_table, 4);
|
||||
|
||||
/*
|
||||
* Ring of buffer addresses -
|
||||
* This ring holds the "physical" device address of the
|
||||
@ -1252,12 +1328,11 @@ struct ath10k_htt {
|
||||
|
||||
unsigned int prefetch_len;
|
||||
|
||||
/* Protects access to %pending_tx, %used_msdu_ids */
|
||||
/* Protects access to pending_tx, num_pending_tx */
|
||||
spinlock_t tx_lock;
|
||||
int max_num_pending_tx;
|
||||
int num_pending_tx;
|
||||
struct sk_buff **pending_tx;
|
||||
unsigned long *used_msdu_ids; /* bitmap */
|
||||
struct idr pending_tx;
|
||||
wait_queue_head_t empty_tx_wq;
|
||||
struct dma_pool *tx_pool;
|
||||
|
||||
@ -1271,6 +1346,7 @@ struct ath10k_htt {
|
||||
struct tasklet_struct txrx_compl_task;
|
||||
struct sk_buff_head tx_compl_q;
|
||||
struct sk_buff_head rx_compl_q;
|
||||
struct sk_buff_head rx_in_ord_compl_q;
|
||||
|
||||
/* rx_status template */
|
||||
struct ieee80211_rx_status rx_status;
|
||||
@ -1334,6 +1410,7 @@ int ath10k_htt_tx_alloc(struct ath10k_htt *htt);
|
||||
void ath10k_htt_tx_free(struct ath10k_htt *htt);
|
||||
|
||||
int ath10k_htt_rx_alloc(struct ath10k_htt *htt);
|
||||
int ath10k_htt_rx_ring_refill(struct ath10k *ar);
|
||||
void ath10k_htt_rx_free(struct ath10k_htt *htt);
|
||||
|
||||
void ath10k_htt_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb);
|
||||
@ -1346,7 +1423,7 @@ int ath10k_htt_h2t_aggr_cfg_msg(struct ath10k_htt *htt,
|
||||
u8 max_subfrms_amsdu);
|
||||
|
||||
void __ath10k_htt_tx_dec_pending(struct ath10k_htt *htt);
|
||||
int ath10k_htt_tx_alloc_msdu_id(struct ath10k_htt *htt);
|
||||
int ath10k_htt_tx_alloc_msdu_id(struct ath10k_htt *htt, struct sk_buff *skb);
|
||||
void ath10k_htt_tx_free_msdu_id(struct ath10k_htt *htt, u16 msdu_id);
|
||||
int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *);
|
||||
int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *);
|
||||
|
@ -25,8 +25,8 @@
|
||||
|
||||
#include <linux/log2.h>
|
||||
|
||||
#define HTT_RX_RING_SIZE 1024
|
||||
#define HTT_RX_RING_FILL_LEVEL 1000
|
||||
#define HTT_RX_RING_SIZE HTT_RX_RING_SIZE_MAX
|
||||
#define HTT_RX_RING_FILL_LEVEL (((HTT_RX_RING_SIZE) / 2) - 1)
|
||||
|
||||
/* when under memory pressure rx ring refill may fail and needs a retry */
|
||||
#define HTT_RX_RING_REFILL_RETRY_MS 50
|
||||
@ -34,31 +34,70 @@
|
||||
static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb);
|
||||
static void ath10k_htt_txrx_compl_task(unsigned long ptr);
|
||||
|
||||
static struct sk_buff *
|
||||
ath10k_htt_rx_find_skb_paddr(struct ath10k *ar, u32 paddr)
|
||||
{
|
||||
struct ath10k_skb_rxcb *rxcb;
|
||||
|
||||
hash_for_each_possible(ar->htt.rx_ring.skb_table, rxcb, hlist, paddr)
|
||||
if (rxcb->paddr == paddr)
|
||||
return ATH10K_RXCB_SKB(rxcb);
|
||||
|
||||
WARN_ON_ONCE(1);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void ath10k_htt_rx_ring_free(struct ath10k_htt *htt)
|
||||
{
|
||||
struct sk_buff *skb;
|
||||
struct ath10k_skb_cb *cb;
|
||||
struct ath10k_skb_rxcb *rxcb;
|
||||
struct hlist_node *n;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < htt->rx_ring.fill_cnt; i++) {
|
||||
skb = htt->rx_ring.netbufs_ring[i];
|
||||
cb = ATH10K_SKB_CB(skb);
|
||||
dma_unmap_single(htt->ar->dev, cb->paddr,
|
||||
skb->len + skb_tailroom(skb),
|
||||
DMA_FROM_DEVICE);
|
||||
dev_kfree_skb_any(skb);
|
||||
if (htt->rx_ring.in_ord_rx) {
|
||||
hash_for_each_safe(htt->rx_ring.skb_table, i, n, rxcb, hlist) {
|
||||
skb = ATH10K_RXCB_SKB(rxcb);
|
||||
dma_unmap_single(htt->ar->dev, rxcb->paddr,
|
||||
skb->len + skb_tailroom(skb),
|
||||
DMA_FROM_DEVICE);
|
||||
hash_del(&rxcb->hlist);
|
||||
dev_kfree_skb_any(skb);
|
||||
}
|
||||
} else {
|
||||
for (i = 0; i < htt->rx_ring.size; i++) {
|
||||
skb = htt->rx_ring.netbufs_ring[i];
|
||||
if (!skb)
|
||||
continue;
|
||||
|
||||
rxcb = ATH10K_SKB_RXCB(skb);
|
||||
dma_unmap_single(htt->ar->dev, rxcb->paddr,
|
||||
skb->len + skb_tailroom(skb),
|
||||
DMA_FROM_DEVICE);
|
||||
dev_kfree_skb_any(skb);
|
||||
}
|
||||
}
|
||||
|
||||
htt->rx_ring.fill_cnt = 0;
|
||||
hash_init(htt->rx_ring.skb_table);
|
||||
memset(htt->rx_ring.netbufs_ring, 0,
|
||||
htt->rx_ring.size * sizeof(htt->rx_ring.netbufs_ring[0]));
|
||||
}
|
||||
|
||||
static int __ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num)
|
||||
{
|
||||
struct htt_rx_desc *rx_desc;
|
||||
struct ath10k_skb_rxcb *rxcb;
|
||||
struct sk_buff *skb;
|
||||
dma_addr_t paddr;
|
||||
int ret = 0, idx;
|
||||
|
||||
/* The Full Rx Reorder firmware has no way of telling the host
|
||||
* implicitly when it copied HTT Rx Ring buffers to MAC Rx Ring.
|
||||
* To keep things simple make sure ring is always half empty. This
|
||||
* guarantees there'll be no replenishment overruns possible.
|
||||
*/
|
||||
BUILD_BUG_ON(HTT_RX_RING_FILL_LEVEL >= HTT_RX_RING_SIZE / 2);
|
||||
|
||||
idx = __le32_to_cpu(*htt->rx_ring.alloc_idx.vaddr);
|
||||
while (num > 0) {
|
||||
skb = dev_alloc_skb(HTT_RX_BUF_SIZE + HTT_RX_DESC_ALIGN);
|
||||
@ -86,17 +125,29 @@ static int __ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num)
|
||||
goto fail;
|
||||
}
|
||||
|
||||
ATH10K_SKB_CB(skb)->paddr = paddr;
|
||||
rxcb = ATH10K_SKB_RXCB(skb);
|
||||
rxcb->paddr = paddr;
|
||||
htt->rx_ring.netbufs_ring[idx] = skb;
|
||||
htt->rx_ring.paddrs_ring[idx] = __cpu_to_le32(paddr);
|
||||
htt->rx_ring.fill_cnt++;
|
||||
|
||||
if (htt->rx_ring.in_ord_rx) {
|
||||
hash_add(htt->rx_ring.skb_table,
|
||||
&ATH10K_SKB_RXCB(skb)->hlist,
|
||||
(u32)paddr);
|
||||
}
|
||||
|
||||
num--;
|
||||
idx++;
|
||||
idx &= htt->rx_ring.size_mask;
|
||||
}
|
||||
|
||||
fail:
|
||||
/*
|
||||
* Make sure the rx buffer is updated before available buffer
|
||||
* index to avoid any potential rx ring corruption.
|
||||
*/
|
||||
mb();
|
||||
*htt->rx_ring.alloc_idx.vaddr = __cpu_to_le32(idx);
|
||||
return ret;
|
||||
}
|
||||
@ -153,22 +204,20 @@ static void ath10k_htt_rx_ring_refill_retry(unsigned long arg)
|
||||
ath10k_htt_rx_msdu_buff_replenish(htt);
|
||||
}
|
||||
|
||||
static void ath10k_htt_rx_ring_clean_up(struct ath10k_htt *htt)
|
||||
int ath10k_htt_rx_ring_refill(struct ath10k *ar)
|
||||
{
|
||||
struct sk_buff *skb;
|
||||
int i;
|
||||
struct ath10k_htt *htt = &ar->htt;
|
||||
int ret;
|
||||
|
||||
for (i = 0; i < htt->rx_ring.size; i++) {
|
||||
skb = htt->rx_ring.netbufs_ring[i];
|
||||
if (!skb)
|
||||
continue;
|
||||
spin_lock_bh(&htt->rx_ring.lock);
|
||||
ret = ath10k_htt_rx_ring_fill_n(htt, (htt->rx_ring.fill_level -
|
||||
htt->rx_ring.fill_cnt));
|
||||
spin_unlock_bh(&htt->rx_ring.lock);
|
||||
|
||||
dma_unmap_single(htt->ar->dev, ATH10K_SKB_CB(skb)->paddr,
|
||||
skb->len + skb_tailroom(skb),
|
||||
DMA_FROM_DEVICE);
|
||||
dev_kfree_skb_any(skb);
|
||||
htt->rx_ring.netbufs_ring[i] = NULL;
|
||||
}
|
||||
if (ret)
|
||||
ath10k_htt_rx_ring_free(htt);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
void ath10k_htt_rx_free(struct ath10k_htt *htt)
|
||||
@ -179,8 +228,9 @@ void ath10k_htt_rx_free(struct ath10k_htt *htt)
|
||||
|
||||
skb_queue_purge(&htt->tx_compl_q);
|
||||
skb_queue_purge(&htt->rx_compl_q);
|
||||
skb_queue_purge(&htt->rx_in_ord_compl_q);
|
||||
|
||||
ath10k_htt_rx_ring_clean_up(htt);
|
||||
ath10k_htt_rx_ring_free(htt);
|
||||
|
||||
dma_free_coherent(htt->ar->dev,
|
||||
(htt->rx_ring.size *
|
||||
@ -212,6 +262,7 @@ static inline struct sk_buff *ath10k_htt_rx_netbuf_pop(struct ath10k_htt *htt)
|
||||
idx = htt->rx_ring.sw_rd_idx.msdu_payld;
|
||||
msdu = htt->rx_ring.netbufs_ring[idx];
|
||||
htt->rx_ring.netbufs_ring[idx] = NULL;
|
||||
htt->rx_ring.paddrs_ring[idx] = 0;
|
||||
|
||||
idx++;
|
||||
idx &= htt->rx_ring.size_mask;
|
||||
@ -219,7 +270,7 @@ static inline struct sk_buff *ath10k_htt_rx_netbuf_pop(struct ath10k_htt *htt)
|
||||
htt->rx_ring.fill_cnt--;
|
||||
|
||||
dma_unmap_single(htt->ar->dev,
|
||||
ATH10K_SKB_CB(msdu)->paddr,
|
||||
ATH10K_SKB_RXCB(msdu)->paddr,
|
||||
msdu->len + skb_tailroom(msdu),
|
||||
DMA_FROM_DEVICE);
|
||||
ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx netbuf pop: ",
|
||||
@ -379,6 +430,82 @@ static void ath10k_htt_rx_replenish_task(unsigned long ptr)
|
||||
ath10k_htt_rx_msdu_buff_replenish(htt);
|
||||
}
|
||||
|
||||
static struct sk_buff *ath10k_htt_rx_pop_paddr(struct ath10k_htt *htt,
|
||||
u32 paddr)
|
||||
{
|
||||
struct ath10k *ar = htt->ar;
|
||||
struct ath10k_skb_rxcb *rxcb;
|
||||
struct sk_buff *msdu;
|
||||
|
||||
lockdep_assert_held(&htt->rx_ring.lock);
|
||||
|
||||
msdu = ath10k_htt_rx_find_skb_paddr(ar, paddr);
|
||||
if (!msdu)
|
||||
return NULL;
|
||||
|
||||
rxcb = ATH10K_SKB_RXCB(msdu);
|
||||
hash_del(&rxcb->hlist);
|
||||
htt->rx_ring.fill_cnt--;
|
||||
|
||||
dma_unmap_single(htt->ar->dev, rxcb->paddr,
|
||||
msdu->len + skb_tailroom(msdu),
|
||||
DMA_FROM_DEVICE);
|
||||
ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx netbuf pop: ",
|
||||
msdu->data, msdu->len + skb_tailroom(msdu));
|
||||
|
||||
return msdu;
|
||||
}
|
||||
|
||||
static int ath10k_htt_rx_pop_paddr_list(struct ath10k_htt *htt,
|
||||
struct htt_rx_in_ord_ind *ev,
|
||||
struct sk_buff_head *list)
|
||||
{
|
||||
struct ath10k *ar = htt->ar;
|
||||
struct htt_rx_in_ord_msdu_desc *msdu_desc = ev->msdu_descs;
|
||||
struct htt_rx_desc *rxd;
|
||||
struct sk_buff *msdu;
|
||||
int msdu_count;
|
||||
bool is_offload;
|
||||
u32 paddr;
|
||||
|
||||
lockdep_assert_held(&htt->rx_ring.lock);
|
||||
|
||||
msdu_count = __le16_to_cpu(ev->msdu_count);
|
||||
is_offload = !!(ev->info & HTT_RX_IN_ORD_IND_INFO_OFFLOAD_MASK);
|
||||
|
||||
while (msdu_count--) {
|
||||
paddr = __le32_to_cpu(msdu_desc->msdu_paddr);
|
||||
|
||||
msdu = ath10k_htt_rx_pop_paddr(htt, paddr);
|
||||
if (!msdu) {
|
||||
__skb_queue_purge(list);
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
__skb_queue_tail(list, msdu);
|
||||
|
||||
if (!is_offload) {
|
||||
rxd = (void *)msdu->data;
|
||||
|
||||
trace_ath10k_htt_rx_desc(ar, rxd, sizeof(*rxd));
|
||||
|
||||
skb_put(msdu, sizeof(*rxd));
|
||||
skb_pull(msdu, sizeof(*rxd));
|
||||
skb_put(msdu, __le16_to_cpu(msdu_desc->msdu_len));
|
||||
|
||||
if (!(__le32_to_cpu(rxd->attention.flags) &
|
||||
RX_ATTENTION_FLAGS_MSDU_DONE)) {
|
||||
ath10k_warn(htt->ar, "tried to pop an incomplete frame, oops!\n");
|
||||
return -EIO;
|
||||
}
|
||||
}
|
||||
|
||||
msdu_desc++;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int ath10k_htt_rx_alloc(struct ath10k_htt *htt)
|
||||
{
|
||||
struct ath10k *ar = htt->ar;
|
||||
@ -424,7 +551,7 @@ int ath10k_htt_rx_alloc(struct ath10k_htt *htt)
|
||||
|
||||
htt->rx_ring.alloc_idx.vaddr = vaddr;
|
||||
htt->rx_ring.alloc_idx.paddr = paddr;
|
||||
htt->rx_ring.sw_rd_idx.msdu_payld = 0;
|
||||
htt->rx_ring.sw_rd_idx.msdu_payld = htt->rx_ring.size_mask;
|
||||
*htt->rx_ring.alloc_idx.vaddr = 0;
|
||||
|
||||
/* Initialize the Rx refill retry timer */
|
||||
@ -433,14 +560,15 @@ int ath10k_htt_rx_alloc(struct ath10k_htt *htt)
|
||||
spin_lock_init(&htt->rx_ring.lock);
|
||||
|
||||
htt->rx_ring.fill_cnt = 0;
|
||||
if (__ath10k_htt_rx_ring_fill_n(htt, htt->rx_ring.fill_level))
|
||||
goto err_fill_ring;
|
||||
htt->rx_ring.sw_rd_idx.msdu_payld = 0;
|
||||
hash_init(htt->rx_ring.skb_table);
|
||||
|
||||
tasklet_init(&htt->rx_replenish_task, ath10k_htt_rx_replenish_task,
|
||||
(unsigned long)htt);
|
||||
|
||||
skb_queue_head_init(&htt->tx_compl_q);
|
||||
skb_queue_head_init(&htt->rx_compl_q);
|
||||
skb_queue_head_init(&htt->rx_in_ord_compl_q);
|
||||
|
||||
tasklet_init(&htt->txrx_compl_task, ath10k_htt_txrx_compl_task,
|
||||
(unsigned long)htt);
|
||||
@ -449,12 +577,6 @@ int ath10k_htt_rx_alloc(struct ath10k_htt *htt)
|
||||
htt->rx_ring.size, htt->rx_ring.fill_level);
|
||||
return 0;
|
||||
|
||||
err_fill_ring:
|
||||
ath10k_htt_rx_ring_free(htt);
|
||||
dma_free_coherent(htt->ar->dev,
|
||||
sizeof(*htt->rx_ring.alloc_idx.vaddr),
|
||||
htt->rx_ring.alloc_idx.vaddr,
|
||||
htt->rx_ring.alloc_idx.paddr);
|
||||
err_dma_idx:
|
||||
dma_free_coherent(htt->ar->dev,
|
||||
(htt->rx_ring.size *
|
||||
@ -691,7 +813,7 @@ static void ath10k_htt_rx_h_mactime(struct ath10k *ar,
|
||||
*
|
||||
* FIXME: Can we get/compute 64bit TSF?
|
||||
*/
|
||||
status->mactime = __le32_to_cpu(rxd->ppdu_end.tsf_timestamp);
|
||||
status->mactime = __le32_to_cpu(rxd->ppdu_end.common.tsf_timestamp);
|
||||
status->flag |= RX_FLAG_MACTIME_END;
|
||||
}
|
||||
|
||||
@ -1578,6 +1700,194 @@ static void ath10k_htt_rx_delba(struct ath10k *ar, struct htt_resp *resp)
|
||||
spin_unlock_bh(&ar->data_lock);
|
||||
}
|
||||
|
||||
static int ath10k_htt_rx_extract_amsdu(struct sk_buff_head *list,
|
||||
struct sk_buff_head *amsdu)
|
||||
{
|
||||
struct sk_buff *msdu;
|
||||
struct htt_rx_desc *rxd;
|
||||
|
||||
if (skb_queue_empty(list))
|
||||
return -ENOBUFS;
|
||||
|
||||
if (WARN_ON(!skb_queue_empty(amsdu)))
|
||||
return -EINVAL;
|
||||
|
||||
while ((msdu = __skb_dequeue(list))) {
|
||||
__skb_queue_tail(amsdu, msdu);
|
||||
|
||||
rxd = (void *)msdu->data - sizeof(*rxd);
|
||||
if (rxd->msdu_end.info0 &
|
||||
__cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU))
|
||||
break;
|
||||
}
|
||||
|
||||
msdu = skb_peek_tail(amsdu);
|
||||
rxd = (void *)msdu->data - sizeof(*rxd);
|
||||
if (!(rxd->msdu_end.info0 &
|
||||
__cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU))) {
|
||||
skb_queue_splice_init(amsdu, list);
|
||||
return -EAGAIN;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void ath10k_htt_rx_h_rx_offload_prot(struct ieee80211_rx_status *status,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
|
||||
|
||||
if (!ieee80211_has_protected(hdr->frame_control))
|
||||
return;
|
||||
|
||||
/* Offloaded frames are already decrypted but firmware insists they are
|
||||
* protected in the 802.11 header. Strip the flag. Otherwise mac80211
|
||||
* will drop the frame.
|
||||
*/
|
||||
|
||||
hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED);
|
||||
status->flag |= RX_FLAG_DECRYPTED |
|
||||
RX_FLAG_IV_STRIPPED |
|
||||
RX_FLAG_MMIC_STRIPPED;
|
||||
}
|
||||
|
||||
static void ath10k_htt_rx_h_rx_offload(struct ath10k *ar,
|
||||
struct sk_buff_head *list)
|
||||
{
|
||||
struct ath10k_htt *htt = &ar->htt;
|
||||
struct ieee80211_rx_status *status = &htt->rx_status;
|
||||
struct htt_rx_offload_msdu *rx;
|
||||
struct sk_buff *msdu;
|
||||
size_t offset;
|
||||
|
||||
while ((msdu = __skb_dequeue(list))) {
|
||||
/* Offloaded frames don't have Rx descriptor. Instead they have
|
||||
* a short meta information header.
|
||||
*/
|
||||
|
||||
rx = (void *)msdu->data;
|
||||
|
||||
skb_put(msdu, sizeof(*rx));
|
||||
skb_pull(msdu, sizeof(*rx));
|
||||
|
||||
if (skb_tailroom(msdu) < __le16_to_cpu(rx->msdu_len)) {
|
||||
ath10k_warn(ar, "dropping frame: offloaded rx msdu is too long!\n");
|
||||
dev_kfree_skb_any(msdu);
|
||||
continue;
|
||||
}
|
||||
|
||||
skb_put(msdu, __le16_to_cpu(rx->msdu_len));
|
||||
|
||||
/* Offloaded rx header length isn't multiple of 2 nor 4 so the
|
||||
* actual payload is unaligned. Align the frame. Otherwise
|
||||
* mac80211 complains. This shouldn't reduce performance much
|
||||
* because these offloaded frames are rare.
|
||||
*/
|
||||
offset = 4 - ((unsigned long)msdu->data & 3);
|
||||
skb_put(msdu, offset);
|
||||
memmove(msdu->data + offset, msdu->data, msdu->len);
|
||||
skb_pull(msdu, offset);
|
||||
|
||||
/* FIXME: The frame is NWifi. Re-construct QoS Control
|
||||
* if possible later.
|
||||
*/
|
||||
|
||||
memset(status, 0, sizeof(*status));
|
||||
status->flag |= RX_FLAG_NO_SIGNAL_VAL;
|
||||
|
||||
ath10k_htt_rx_h_rx_offload_prot(status, msdu);
|
||||
ath10k_htt_rx_h_channel(ar, status);
|
||||
ath10k_process_rx(ar, status, msdu);
|
||||
}
|
||||
}
|
||||
|
||||
static void ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb)
|
||||
{
|
||||
struct ath10k_htt *htt = &ar->htt;
|
||||
struct htt_resp *resp = (void *)skb->data;
|
||||
struct ieee80211_rx_status *status = &htt->rx_status;
|
||||
struct sk_buff_head list;
|
||||
struct sk_buff_head amsdu;
|
||||
u16 peer_id;
|
||||
u16 msdu_count;
|
||||
u8 vdev_id;
|
||||
u8 tid;
|
||||
bool offload;
|
||||
bool frag;
|
||||
int ret;
|
||||
|
||||
lockdep_assert_held(&htt->rx_ring.lock);
|
||||
|
||||
if (htt->rx_confused)
|
||||
return;
|
||||
|
||||
skb_pull(skb, sizeof(resp->hdr));
|
||||
skb_pull(skb, sizeof(resp->rx_in_ord_ind));
|
||||
|
||||
peer_id = __le16_to_cpu(resp->rx_in_ord_ind.peer_id);
|
||||
msdu_count = __le16_to_cpu(resp->rx_in_ord_ind.msdu_count);
|
||||
vdev_id = resp->rx_in_ord_ind.vdev_id;
|
||||
tid = SM(resp->rx_in_ord_ind.info, HTT_RX_IN_ORD_IND_INFO_TID);
|
||||
offload = !!(resp->rx_in_ord_ind.info &
|
||||
HTT_RX_IN_ORD_IND_INFO_OFFLOAD_MASK);
|
||||
frag = !!(resp->rx_in_ord_ind.info & HTT_RX_IN_ORD_IND_INFO_FRAG_MASK);
|
||||
|
||||
ath10k_dbg(ar, ATH10K_DBG_HTT,
|
||||
"htt rx in ord vdev %i peer %i tid %i offload %i frag %i msdu count %i\n",
|
||||
vdev_id, peer_id, tid, offload, frag, msdu_count);
|
||||
|
||||
if (skb->len < msdu_count * sizeof(*resp->rx_in_ord_ind.msdu_descs)) {
|
||||
ath10k_warn(ar, "dropping invalid in order rx indication\n");
|
||||
return;
|
||||
}
|
||||
|
||||
/* The event can deliver more than 1 A-MSDU. Each A-MSDU is later
|
||||
* extracted and processed.
|
||||
*/
|
||||
__skb_queue_head_init(&list);
|
||||
ret = ath10k_htt_rx_pop_paddr_list(htt, &resp->rx_in_ord_ind, &list);
|
||||
if (ret < 0) {
|
||||
ath10k_warn(ar, "failed to pop paddr list: %d\n", ret);
|
||||
htt->rx_confused = true;
|
||||
return;
|
||||
}
|
||||
|
||||
/* Offloaded frames are very different and need to be handled
|
||||
* separately.
|
||||
*/
|
||||
if (offload)
|
||||
ath10k_htt_rx_h_rx_offload(ar, &list);
|
||||
|
||||
while (!skb_queue_empty(&list)) {
|
||||
__skb_queue_head_init(&amsdu);
|
||||
ret = ath10k_htt_rx_extract_amsdu(&list, &amsdu);
|
||||
switch (ret) {
|
||||
case 0:
|
||||
/* Note: The in-order indication may report interleaved
|
||||
* frames from different PPDUs meaning reported rx rate
|
||||
* to mac80211 isn't accurate/reliable. It's still
|
||||
* better to report something than nothing though. This
|
||||
* should still give an idea about rx rate to the user.
|
||||
*/
|
||||
ath10k_htt_rx_h_ppdu(ar, &amsdu, status);
|
||||
ath10k_htt_rx_h_filter(ar, &amsdu, status);
|
||||
ath10k_htt_rx_h_mpdu(ar, &amsdu, status);
|
||||
ath10k_htt_rx_h_deliver(ar, &amsdu, status);
|
||||
break;
|
||||
case -EAGAIN:
|
||||
/* fall through */
|
||||
default:
|
||||
/* Should not happen. */
|
||||
ath10k_warn(ar, "failed to extract amsdu: %d\n", ret);
|
||||
htt->rx_confused = true;
|
||||
__skb_queue_purge(&list);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
tasklet_schedule(&htt->rx_replenish_task);
|
||||
}
|
||||
|
||||
void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
|
||||
{
|
||||
struct ath10k_htt *htt = &ar->htt;
|
||||
@ -1700,6 +2010,20 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
|
||||
*/
|
||||
break;
|
||||
}
|
||||
case HTT_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND: {
|
||||
spin_lock_bh(&htt->rx_ring.lock);
|
||||
__skb_queue_tail(&htt->rx_in_ord_compl_q, skb);
|
||||
spin_unlock_bh(&htt->rx_ring.lock);
|
||||
tasklet_schedule(&htt->txrx_compl_task);
|
||||
return;
|
||||
}
|
||||
case HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND:
|
||||
/* FIXME: This WMI-TLV event is overlapping with 10.2
|
||||
* CHAN_CHANGE - both being 0xF. Neither is being used in
|
||||
* practice so no immediate action is necessary. Nevertheless
|
||||
* HTT may need an abstraction layer like WMI has one day.
|
||||
*/
|
||||
break;
|
||||
default:
|
||||
ath10k_warn(ar, "htt event (%d) not handled\n",
|
||||
resp->hdr.msg_type);
|
||||
@ -1715,6 +2039,7 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
|
||||
static void ath10k_htt_txrx_compl_task(unsigned long ptr)
|
||||
{
|
||||
struct ath10k_htt *htt = (struct ath10k_htt *)ptr;
|
||||
struct ath10k *ar = htt->ar;
|
||||
struct htt_resp *resp;
|
||||
struct sk_buff *skb;
|
||||
|
||||
@ -1731,5 +2056,10 @@ static void ath10k_htt_txrx_compl_task(unsigned long ptr)
|
||||
ath10k_htt_rx_handler(htt, &resp->rx_ind);
|
||||
dev_kfree_skb_any(skb);
|
||||
}
|
||||
|
||||
while ((skb = __skb_dequeue(&htt->rx_in_ord_compl_q))) {
|
||||
ath10k_htt_rx_in_ord_ind(ar, skb);
|
||||
dev_kfree_skb_any(skb);
|
||||
}
|
||||
spin_unlock_bh(&htt->rx_ring.lock);
|
||||
}
|
||||
|
@ -56,21 +56,18 @@ exit:
|
||||
return ret;
|
||||
}
|
||||
|
||||
int ath10k_htt_tx_alloc_msdu_id(struct ath10k_htt *htt)
|
||||
int ath10k_htt_tx_alloc_msdu_id(struct ath10k_htt *htt, struct sk_buff *skb)
|
||||
{
|
||||
struct ath10k *ar = htt->ar;
|
||||
int msdu_id;
|
||||
int ret;
|
||||
|
||||
lockdep_assert_held(&htt->tx_lock);
|
||||
|
||||
msdu_id = find_first_zero_bit(htt->used_msdu_ids,
|
||||
htt->max_num_pending_tx);
|
||||
if (msdu_id == htt->max_num_pending_tx)
|
||||
return -ENOBUFS;
|
||||
ret = idr_alloc(&htt->pending_tx, skb, 0, 0x10000, GFP_ATOMIC);
|
||||
|
||||
ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx alloc msdu_id %d\n", msdu_id);
|
||||
__set_bit(msdu_id, htt->used_msdu_ids);
|
||||
return msdu_id;
|
||||
ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx alloc msdu_id %d\n", ret);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
void ath10k_htt_tx_free_msdu_id(struct ath10k_htt *htt, u16 msdu_id)
|
||||
@ -79,74 +76,53 @@ void ath10k_htt_tx_free_msdu_id(struct ath10k_htt *htt, u16 msdu_id)
|
||||
|
||||
lockdep_assert_held(&htt->tx_lock);
|
||||
|
||||
if (!test_bit(msdu_id, htt->used_msdu_ids))
|
||||
ath10k_warn(ar, "trying to free unallocated msdu_id %d\n",
|
||||
msdu_id);
|
||||
|
||||
ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx free msdu_id %hu\n", msdu_id);
|
||||
__clear_bit(msdu_id, htt->used_msdu_ids);
|
||||
|
||||
idr_remove(&htt->pending_tx, msdu_id);
|
||||
}
|
||||
|
||||
int ath10k_htt_tx_alloc(struct ath10k_htt *htt)
|
||||
{
|
||||
struct ath10k *ar = htt->ar;
|
||||
|
||||
spin_lock_init(&htt->tx_lock);
|
||||
|
||||
ath10k_dbg(ar, ATH10K_DBG_BOOT, "htt tx max num pending tx %d\n",
|
||||
htt->max_num_pending_tx);
|
||||
|
||||
htt->pending_tx = kzalloc(sizeof(*htt->pending_tx) *
|
||||
htt->max_num_pending_tx, GFP_KERNEL);
|
||||
if (!htt->pending_tx)
|
||||
return -ENOMEM;
|
||||
|
||||
htt->used_msdu_ids = kzalloc(sizeof(unsigned long) *
|
||||
BITS_TO_LONGS(htt->max_num_pending_tx),
|
||||
GFP_KERNEL);
|
||||
if (!htt->used_msdu_ids) {
|
||||
kfree(htt->pending_tx);
|
||||
return -ENOMEM;
|
||||
}
|
||||
spin_lock_init(&htt->tx_lock);
|
||||
idr_init(&htt->pending_tx);
|
||||
|
||||
htt->tx_pool = dma_pool_create("ath10k htt tx pool", htt->ar->dev,
|
||||
sizeof(struct ath10k_htt_txbuf), 4, 0);
|
||||
if (!htt->tx_pool) {
|
||||
kfree(htt->used_msdu_ids);
|
||||
kfree(htt->pending_tx);
|
||||
idr_destroy(&htt->pending_tx);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void ath10k_htt_tx_free_pending(struct ath10k_htt *htt)
|
||||
static int ath10k_htt_tx_clean_up_pending(int msdu_id, void *skb, void *ctx)
|
||||
{
|
||||
struct ath10k *ar = htt->ar;
|
||||
struct ath10k *ar = ctx;
|
||||
struct ath10k_htt *htt = &ar->htt;
|
||||
struct htt_tx_done tx_done = {0};
|
||||
int msdu_id;
|
||||
|
||||
ath10k_dbg(ar, ATH10K_DBG_HTT, "force cleanup msdu_id %hu\n", msdu_id);
|
||||
|
||||
tx_done.discard = 1;
|
||||
tx_done.msdu_id = msdu_id;
|
||||
|
||||
spin_lock_bh(&htt->tx_lock);
|
||||
for (msdu_id = 0; msdu_id < htt->max_num_pending_tx; msdu_id++) {
|
||||
if (!test_bit(msdu_id, htt->used_msdu_ids))
|
||||
continue;
|
||||
|
||||
ath10k_dbg(ar, ATH10K_DBG_HTT, "force cleanup msdu_id %hu\n",
|
||||
msdu_id);
|
||||
|
||||
tx_done.discard = 1;
|
||||
tx_done.msdu_id = msdu_id;
|
||||
|
||||
ath10k_txrx_tx_unref(htt, &tx_done);
|
||||
}
|
||||
ath10k_txrx_tx_unref(htt, &tx_done);
|
||||
spin_unlock_bh(&htt->tx_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void ath10k_htt_tx_free(struct ath10k_htt *htt)
|
||||
{
|
||||
ath10k_htt_tx_free_pending(htt);
|
||||
kfree(htt->pending_tx);
|
||||
kfree(htt->used_msdu_ids);
|
||||
idr_for_each(&htt->pending_tx, ath10k_htt_tx_clean_up_pending, htt->ar);
|
||||
idr_destroy(&htt->pending_tx);
|
||||
dma_pool_destroy(htt->tx_pool);
|
||||
}
|
||||
|
||||
@ -378,13 +354,12 @@ int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
|
||||
len += sizeof(cmd->mgmt_tx);
|
||||
|
||||
spin_lock_bh(&htt->tx_lock);
|
||||
res = ath10k_htt_tx_alloc_msdu_id(htt);
|
||||
res = ath10k_htt_tx_alloc_msdu_id(htt, msdu);
|
||||
if (res < 0) {
|
||||
spin_unlock_bh(&htt->tx_lock);
|
||||
goto err_tx_dec;
|
||||
}
|
||||
msdu_id = res;
|
||||
htt->pending_tx[msdu_id] = msdu;
|
||||
spin_unlock_bh(&htt->tx_lock);
|
||||
|
||||
txdesc = ath10k_htc_alloc_skb(ar, len);
|
||||
@ -423,7 +398,6 @@ err_free_txdesc:
|
||||
dev_kfree_skb_any(txdesc);
|
||||
err_free_msdu_id:
|
||||
spin_lock_bh(&htt->tx_lock);
|
||||
htt->pending_tx[msdu_id] = NULL;
|
||||
ath10k_htt_tx_free_msdu_id(htt, msdu_id);
|
||||
spin_unlock_bh(&htt->tx_lock);
|
||||
err_tx_dec:
|
||||
@ -455,13 +429,12 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
|
||||
goto err;
|
||||
|
||||
spin_lock_bh(&htt->tx_lock);
|
||||
res = ath10k_htt_tx_alloc_msdu_id(htt);
|
||||
res = ath10k_htt_tx_alloc_msdu_id(htt, msdu);
|
||||
if (res < 0) {
|
||||
spin_unlock_bh(&htt->tx_lock);
|
||||
goto err_tx_dec;
|
||||
}
|
||||
msdu_id = res;
|
||||
htt->pending_tx[msdu_id] = msdu;
|
||||
spin_unlock_bh(&htt->tx_lock);
|
||||
|
||||
prefetch_len = min(htt->prefetch_len, msdu->len);
|
||||
@ -475,10 +448,18 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
|
||||
|
||||
skb_cb->htt.txbuf = dma_pool_alloc(htt->tx_pool, GFP_ATOMIC,
|
||||
&paddr);
|
||||
if (!skb_cb->htt.txbuf)
|
||||
if (!skb_cb->htt.txbuf) {
|
||||
res = -ENOMEM;
|
||||
goto err_free_msdu_id;
|
||||
}
|
||||
skb_cb->htt.txbuf_paddr = paddr;
|
||||
|
||||
if ((ieee80211_is_action(hdr->frame_control) ||
|
||||
ieee80211_is_deauth(hdr->frame_control) ||
|
||||
ieee80211_is_disassoc(hdr->frame_control)) &&
|
||||
ieee80211_has_protected(hdr->frame_control))
|
||||
skb_put(msdu, IEEE80211_CCMP_MIC_LEN);
|
||||
|
||||
skb_cb->paddr = dma_map_single(dev, msdu->data, msdu->len,
|
||||
DMA_TO_DEVICE);
|
||||
res = dma_mapping_error(dev, skb_cb->paddr);
|
||||
@ -534,8 +515,10 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
|
||||
|
||||
flags1 |= SM((u16)vdev_id, HTT_DATA_TX_DESC_FLAGS1_VDEV_ID);
|
||||
flags1 |= SM((u16)tid, HTT_DATA_TX_DESC_FLAGS1_EXT_TID);
|
||||
flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L3_OFFLOAD;
|
||||
flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L4_OFFLOAD;
|
||||
if (msdu->ip_summed == CHECKSUM_PARTIAL) {
|
||||
flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L3_OFFLOAD;
|
||||
flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L4_OFFLOAD;
|
||||
}
|
||||
|
||||
/* Prevent firmware from sending up tx inspection requests. There's
|
||||
* nothing ath10k can do with frames requested for inspection so force
|
||||
@ -593,7 +576,6 @@ err_free_txbuf:
|
||||
skb_cb->htt.txbuf_paddr);
|
||||
err_free_msdu_id:
|
||||
spin_lock_bh(&htt->tx_lock);
|
||||
htt->pending_tx[msdu_id] = NULL;
|
||||
ath10k_htt_tx_free_msdu_id(htt, msdu_id);
|
||||
spin_unlock_bh(&htt->tx_lock);
|
||||
err_tx_dec:
|
||||
|
58
drivers/net/wireless/ath/ath10k/hw.c
Normal file
58
drivers/net/wireless/ath/ath10k/hw.c
Normal file
@ -0,0 +1,58 @@
|
||||
/*
|
||||
* Copyright (c) 2014-2015 Qualcomm Atheros, Inc.
|
||||
*
|
||||
* Permission to use, copy, modify, and/or distribute this software for any
|
||||
* purpose with or without fee is hereby granted, provided that the above
|
||||
* copyright notice and this permission notice appear in all copies.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
|
||||
#include <linux/types.h>
|
||||
#include "hw.h"
|
||||
|
||||
const struct ath10k_hw_regs qca988x_regs = {
|
||||
.rtc_state_cold_reset_mask = 0x00000400,
|
||||
.rtc_soc_base_address = 0x00004000,
|
||||
.rtc_wmac_base_address = 0x00005000,
|
||||
.soc_core_base_address = 0x00009000,
|
||||
.ce_wrapper_base_address = 0x00057000,
|
||||
.ce0_base_address = 0x00057400,
|
||||
.ce1_base_address = 0x00057800,
|
||||
.ce2_base_address = 0x00057c00,
|
||||
.ce3_base_address = 0x00058000,
|
||||
.ce4_base_address = 0x00058400,
|
||||
.ce5_base_address = 0x00058800,
|
||||
.ce6_base_address = 0x00058c00,
|
||||
.ce7_base_address = 0x00059000,
|
||||
.soc_reset_control_si0_rst_mask = 0x00000001,
|
||||
.soc_reset_control_ce_rst_mask = 0x00040000,
|
||||
.soc_chip_id_address = 0x00ec,
|
||||
.scratch_3_address = 0x0030,
|
||||
};
|
||||
|
||||
const struct ath10k_hw_regs qca6174_regs = {
|
||||
.rtc_state_cold_reset_mask = 0x00002000,
|
||||
.rtc_soc_base_address = 0x00000800,
|
||||
.rtc_wmac_base_address = 0x00001000,
|
||||
.soc_core_base_address = 0x0003a000,
|
||||
.ce_wrapper_base_address = 0x00034000,
|
||||
.ce0_base_address = 0x00034400,
|
||||
.ce1_base_address = 0x00034800,
|
||||
.ce2_base_address = 0x00034c00,
|
||||
.ce3_base_address = 0x00035000,
|
||||
.ce4_base_address = 0x00035400,
|
||||
.ce5_base_address = 0x00035800,
|
||||
.ce6_base_address = 0x00035c00,
|
||||
.ce7_base_address = 0x00036000,
|
||||
.soc_reset_control_si0_rst_mask = 0x00000000,
|
||||
.soc_reset_control_ce_rst_mask = 0x00000001,
|
||||
.soc_chip_id_address = 0x000f0,
|
||||
.scratch_3_address = 0x0028,
|
||||
};
|
@ -34,6 +34,44 @@
|
||||
#define QCA988X_HW_2_0_BOARD_DATA_FILE "board.bin"
|
||||
#define QCA988X_HW_2_0_PATCH_LOAD_ADDR 0x1234
|
||||
|
||||
/* QCA6174 target BMI version signatures */
|
||||
#define QCA6174_HW_1_0_VERSION 0x05000000
|
||||
#define QCA6174_HW_1_1_VERSION 0x05000001
|
||||
#define QCA6174_HW_1_3_VERSION 0x05000003
|
||||
#define QCA6174_HW_2_1_VERSION 0x05010000
|
||||
#define QCA6174_HW_3_0_VERSION 0x05020000
|
||||
#define QCA6174_HW_3_2_VERSION 0x05030000
|
||||
|
||||
enum qca6174_pci_rev {
|
||||
QCA6174_PCI_REV_1_1 = 0x11,
|
||||
QCA6174_PCI_REV_1_3 = 0x13,
|
||||
QCA6174_PCI_REV_2_0 = 0x20,
|
||||
QCA6174_PCI_REV_3_0 = 0x30,
|
||||
};
|
||||
|
||||
enum qca6174_chip_id_rev {
|
||||
QCA6174_HW_1_0_CHIP_ID_REV = 0,
|
||||
QCA6174_HW_1_1_CHIP_ID_REV = 1,
|
||||
QCA6174_HW_1_3_CHIP_ID_REV = 2,
|
||||
QCA6174_HW_2_1_CHIP_ID_REV = 4,
|
||||
QCA6174_HW_2_2_CHIP_ID_REV = 5,
|
||||
QCA6174_HW_3_0_CHIP_ID_REV = 8,
|
||||
QCA6174_HW_3_1_CHIP_ID_REV = 9,
|
||||
QCA6174_HW_3_2_CHIP_ID_REV = 10,
|
||||
};
|
||||
|
||||
#define QCA6174_HW_2_1_FW_DIR "ath10k/QCA6174/hw2.1"
|
||||
#define QCA6174_HW_2_1_FW_FILE "firmware.bin"
|
||||
#define QCA6174_HW_2_1_OTP_FILE "otp.bin"
|
||||
#define QCA6174_HW_2_1_BOARD_DATA_FILE "board.bin"
|
||||
#define QCA6174_HW_2_1_PATCH_LOAD_ADDR 0x1234
|
||||
|
||||
#define QCA6174_HW_3_0_FW_DIR "ath10k/QCA6174/hw3.0"
|
||||
#define QCA6174_HW_3_0_FW_FILE "firmware.bin"
|
||||
#define QCA6174_HW_3_0_OTP_FILE "otp.bin"
|
||||
#define QCA6174_HW_3_0_BOARD_DATA_FILE "board.bin"
|
||||
#define QCA6174_HW_3_0_PATCH_LOAD_ADDR 0x1234
|
||||
|
||||
#define ATH10K_FW_API2_FILE "firmware-2.bin"
|
||||
#define ATH10K_FW_API3_FILE "firmware-3.bin"
|
||||
|
||||
@ -81,6 +119,37 @@ enum ath10k_fw_wmi_op_version {
|
||||
ATH10K_FW_WMI_OP_VERSION_MAX,
|
||||
};
|
||||
|
||||
enum ath10k_hw_rev {
|
||||
ATH10K_HW_QCA988X,
|
||||
ATH10K_HW_QCA6174,
|
||||
};
|
||||
|
||||
struct ath10k_hw_regs {
|
||||
u32 rtc_state_cold_reset_mask;
|
||||
u32 rtc_soc_base_address;
|
||||
u32 rtc_wmac_base_address;
|
||||
u32 soc_core_base_address;
|
||||
u32 ce_wrapper_base_address;
|
||||
u32 ce0_base_address;
|
||||
u32 ce1_base_address;
|
||||
u32 ce2_base_address;
|
||||
u32 ce3_base_address;
|
||||
u32 ce4_base_address;
|
||||
u32 ce5_base_address;
|
||||
u32 ce6_base_address;
|
||||
u32 ce7_base_address;
|
||||
u32 soc_reset_control_si0_rst_mask;
|
||||
u32 soc_reset_control_ce_rst_mask;
|
||||
u32 soc_chip_id_address;
|
||||
u32 scratch_3_address;
|
||||
};
|
||||
|
||||
extern const struct ath10k_hw_regs qca988x_regs;
|
||||
extern const struct ath10k_hw_regs qca6174_regs;
|
||||
|
||||
#define QCA_REV_988X(ar) ((ar)->hw_rev == ATH10K_HW_QCA988X)
|
||||
#define QCA_REV_6174(ar) ((ar)->hw_rev == ATH10K_HW_QCA6174)
|
||||
|
||||
/* Known pecularities:
|
||||
* - current FW doesn't support raw rx mode (last tested v599)
|
||||
* - current FW dumps upon raw tx mode (last tested v599)
|
||||
@ -183,6 +252,9 @@ struct ath10k_pktlog_hdr {
|
||||
#define TARGET_10X_NUM_MSDU_DESC (1024 + 400)
|
||||
#define TARGET_10X_MAX_FRAG_ENTRIES 0
|
||||
|
||||
/* 10.2 parameters */
|
||||
#define TARGET_10_2_DMA_BURST_SIZE 1
|
||||
|
||||
/* Target specific defines for WMI-TLV firmware */
|
||||
#define TARGET_TLV_NUM_VDEVS 3
|
||||
#define TARGET_TLV_NUM_STATIONS 32
|
||||
@ -222,7 +294,7 @@ struct ath10k_pktlog_hdr {
|
||||
/* as of IP3.7.1 */
|
||||
#define RTC_STATE_V_ON 3
|
||||
|
||||
#define RTC_STATE_COLD_RESET_MASK 0x00000400
|
||||
#define RTC_STATE_COLD_RESET_MASK ar->regs->rtc_state_cold_reset_mask
|
||||
#define RTC_STATE_V_LSB 0
|
||||
#define RTC_STATE_V_MASK 0x00000007
|
||||
#define RTC_STATE_ADDRESS 0x0000
|
||||
@ -231,12 +303,12 @@ struct ath10k_pktlog_hdr {
|
||||
#define PCIE_SOC_WAKE_RESET 0x00000000
|
||||
#define SOC_GLOBAL_RESET_ADDRESS 0x0008
|
||||
|
||||
#define RTC_SOC_BASE_ADDRESS 0x00004000
|
||||
#define RTC_WMAC_BASE_ADDRESS 0x00005000
|
||||
#define RTC_SOC_BASE_ADDRESS ar->regs->rtc_soc_base_address
|
||||
#define RTC_WMAC_BASE_ADDRESS ar->regs->rtc_wmac_base_address
|
||||
#define MAC_COEX_BASE_ADDRESS 0x00006000
|
||||
#define BT_COEX_BASE_ADDRESS 0x00007000
|
||||
#define SOC_PCIE_BASE_ADDRESS 0x00008000
|
||||
#define SOC_CORE_BASE_ADDRESS 0x00009000
|
||||
#define SOC_CORE_BASE_ADDRESS ar->regs->soc_core_base_address
|
||||
#define WLAN_UART_BASE_ADDRESS 0x0000c000
|
||||
#define WLAN_SI_BASE_ADDRESS 0x00010000
|
||||
#define WLAN_GPIO_BASE_ADDRESS 0x00014000
|
||||
@ -245,23 +317,23 @@ struct ath10k_pktlog_hdr {
|
||||
#define EFUSE_BASE_ADDRESS 0x00030000
|
||||
#define FPGA_REG_BASE_ADDRESS 0x00039000
|
||||
#define WLAN_UART2_BASE_ADDRESS 0x00054c00
|
||||
#define CE_WRAPPER_BASE_ADDRESS 0x00057000
|
||||
#define CE0_BASE_ADDRESS 0x00057400
|
||||
#define CE1_BASE_ADDRESS 0x00057800
|
||||
#define CE2_BASE_ADDRESS 0x00057c00
|
||||
#define CE3_BASE_ADDRESS 0x00058000
|
||||
#define CE4_BASE_ADDRESS 0x00058400
|
||||
#define CE5_BASE_ADDRESS 0x00058800
|
||||
#define CE6_BASE_ADDRESS 0x00058c00
|
||||
#define CE7_BASE_ADDRESS 0x00059000
|
||||
#define CE_WRAPPER_BASE_ADDRESS ar->regs->ce_wrapper_base_address
|
||||
#define CE0_BASE_ADDRESS ar->regs->ce0_base_address
|
||||
#define CE1_BASE_ADDRESS ar->regs->ce1_base_address
|
||||
#define CE2_BASE_ADDRESS ar->regs->ce2_base_address
|
||||
#define CE3_BASE_ADDRESS ar->regs->ce3_base_address
|
||||
#define CE4_BASE_ADDRESS ar->regs->ce4_base_address
|
||||
#define CE5_BASE_ADDRESS ar->regs->ce5_base_address
|
||||
#define CE6_BASE_ADDRESS ar->regs->ce6_base_address
|
||||
#define CE7_BASE_ADDRESS ar->regs->ce7_base_address
|
||||
#define DBI_BASE_ADDRESS 0x00060000
|
||||
#define WLAN_ANALOG_INTF_PCIE_BASE_ADDRESS 0x0006c000
|
||||
#define PCIE_LOCAL_BASE_ADDRESS 0x00080000
|
||||
|
||||
#define SOC_RESET_CONTROL_ADDRESS 0x00000000
|
||||
#define SOC_RESET_CONTROL_OFFSET 0x00000000
|
||||
#define SOC_RESET_CONTROL_SI0_RST_MASK 0x00000001
|
||||
#define SOC_RESET_CONTROL_CE_RST_MASK 0x00040000
|
||||
#define SOC_RESET_CONTROL_SI0_RST_MASK ar->regs->soc_reset_control_si0_rst_mask
|
||||
#define SOC_RESET_CONTROL_CE_RST_MASK ar->regs->soc_reset_control_ce_rst_mask
|
||||
#define SOC_RESET_CONTROL_CPU_WARM_RST_MASK 0x00000040
|
||||
#define SOC_CPU_CLOCK_OFFSET 0x00000020
|
||||
#define SOC_CPU_CLOCK_STANDARD_LSB 0
|
||||
@ -275,7 +347,7 @@ struct ath10k_pktlog_hdr {
|
||||
#define SOC_LF_TIMER_CONTROL0_ADDRESS 0x00000050
|
||||
#define SOC_LF_TIMER_CONTROL0_ENABLE_MASK 0x00000004
|
||||
|
||||
#define SOC_CHIP_ID_ADDRESS 0x000000ec
|
||||
#define SOC_CHIP_ID_ADDRESS ar->regs->soc_chip_id_address
|
||||
#define SOC_CHIP_ID_REV_LSB 8
|
||||
#define SOC_CHIP_ID_REV_MASK 0x00000f00
|
||||
|
||||
@ -331,7 +403,7 @@ struct ath10k_pktlog_hdr {
|
||||
#define PCIE_INTR_ENABLE_ADDRESS 0x0008
|
||||
#define PCIE_INTR_CAUSE_ADDRESS 0x000c
|
||||
#define PCIE_INTR_CLR_ADDRESS 0x0014
|
||||
#define SCRATCH_3_ADDRESS 0x0030
|
||||
#define SCRATCH_3_ADDRESS ar->regs->scratch_3_address
|
||||
#define CPU_INTR_ADDRESS 0x0010
|
||||
|
||||
/* Firmware indications to the Host via SCRATCH_3 register. */
|
||||
|
@ -37,7 +37,7 @@
|
||||
static int ath10k_send_key(struct ath10k_vif *arvif,
|
||||
struct ieee80211_key_conf *key,
|
||||
enum set_key_cmd cmd,
|
||||
const u8 *macaddr)
|
||||
const u8 *macaddr, bool def_idx)
|
||||
{
|
||||
struct ath10k *ar = arvif->ar;
|
||||
struct wmi_vdev_install_key_arg arg = {
|
||||
@ -58,10 +58,7 @@ static int ath10k_send_key(struct ath10k_vif *arvif,
|
||||
switch (key->cipher) {
|
||||
case WLAN_CIPHER_SUITE_CCMP:
|
||||
arg.key_cipher = WMI_CIPHER_AES_CCM;
|
||||
if (arvif->vdev_type == WMI_VDEV_TYPE_AP)
|
||||
key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV_MGMT;
|
||||
else
|
||||
key->flags |= IEEE80211_KEY_FLAG_SW_MGMT_TX;
|
||||
key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV_MGMT;
|
||||
break;
|
||||
case WLAN_CIPHER_SUITE_TKIP:
|
||||
arg.key_cipher = WMI_CIPHER_TKIP;
|
||||
@ -75,6 +72,9 @@ static int ath10k_send_key(struct ath10k_vif *arvif,
|
||||
* Otherwise pairwise key must be set */
|
||||
if (memcmp(macaddr, arvif->vif->addr, ETH_ALEN))
|
||||
arg.key_flags = WMI_KEY_PAIRWISE;
|
||||
|
||||
if (def_idx)
|
||||
arg.key_flags |= WMI_KEY_TX_USAGE;
|
||||
break;
|
||||
default:
|
||||
ath10k_warn(ar, "cipher %d is not supported\n", key->cipher);
|
||||
@ -92,7 +92,7 @@ static int ath10k_send_key(struct ath10k_vif *arvif,
|
||||
static int ath10k_install_key(struct ath10k_vif *arvif,
|
||||
struct ieee80211_key_conf *key,
|
||||
enum set_key_cmd cmd,
|
||||
const u8 *macaddr)
|
||||
const u8 *macaddr, bool def_idx)
|
||||
{
|
||||
struct ath10k *ar = arvif->ar;
|
||||
int ret;
|
||||
@ -101,7 +101,7 @@ static int ath10k_install_key(struct ath10k_vif *arvif,
|
||||
|
||||
reinit_completion(&ar->install_key_done);
|
||||
|
||||
ret = ath10k_send_key(arvif, key, cmd, macaddr);
|
||||
ret = ath10k_send_key(arvif, key, cmd, macaddr, def_idx);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
@ -119,6 +119,7 @@ static int ath10k_install_peer_wep_keys(struct ath10k_vif *arvif,
|
||||
struct ath10k_peer *peer;
|
||||
int ret;
|
||||
int i;
|
||||
bool def_idx;
|
||||
|
||||
lockdep_assert_held(&ar->conf_mutex);
|
||||
|
||||
@ -132,9 +133,14 @@ static int ath10k_install_peer_wep_keys(struct ath10k_vif *arvif,
|
||||
for (i = 0; i < ARRAY_SIZE(arvif->wep_keys); i++) {
|
||||
if (arvif->wep_keys[i] == NULL)
|
||||
continue;
|
||||
/* set TX_USAGE flag for default key id */
|
||||
if (arvif->def_wep_key_idx == i)
|
||||
def_idx = true;
|
||||
else
|
||||
def_idx = false;
|
||||
|
||||
ret = ath10k_install_key(arvif, arvif->wep_keys[i], SET_KEY,
|
||||
addr);
|
||||
addr, def_idx);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
@ -168,8 +174,9 @@ static int ath10k_clear_peer_keys(struct ath10k_vif *arvif,
|
||||
if (peer->keys[i] == NULL)
|
||||
continue;
|
||||
|
||||
/* key flags are not required to delete the key */
|
||||
ret = ath10k_install_key(arvif, peer->keys[i],
|
||||
DISABLE_KEY, addr);
|
||||
DISABLE_KEY, addr, false);
|
||||
if (ret && first_errno == 0)
|
||||
first_errno = ret;
|
||||
|
||||
@ -243,8 +250,8 @@ static int ath10k_clear_vdev_key(struct ath10k_vif *arvif,
|
||||
|
||||
if (i == ARRAY_SIZE(peer->keys))
|
||||
break;
|
||||
|
||||
ret = ath10k_install_key(arvif, key, DISABLE_KEY, addr);
|
||||
/* key flags are not required to delete the key */
|
||||
ret = ath10k_install_key(arvif, key, DISABLE_KEY, addr, false);
|
||||
if (ret && first_errno == 0)
|
||||
first_errno = ret;
|
||||
|
||||
@ -524,10 +531,14 @@ void ath10k_mac_vif_beacon_free(struct ath10k_vif *arvif)
|
||||
dma_unmap_single(ar->dev, ATH10K_SKB_CB(arvif->beacon)->paddr,
|
||||
arvif->beacon->len, DMA_TO_DEVICE);
|
||||
|
||||
if (WARN_ON(arvif->beacon_state != ATH10K_BEACON_SCHEDULED &&
|
||||
arvif->beacon_state != ATH10K_BEACON_SENT))
|
||||
return;
|
||||
|
||||
dev_kfree_skb_any(arvif->beacon);
|
||||
|
||||
arvif->beacon = NULL;
|
||||
arvif->beacon_sent = false;
|
||||
arvif->beacon_state = ATH10K_BEACON_SCHEDULED;
|
||||
}
|
||||
|
||||
static void ath10k_mac_vif_beacon_cleanup(struct ath10k_vif *arvif)
|
||||
@ -967,6 +978,143 @@ static int ath10k_vdev_stop(struct ath10k_vif *arvif)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int ath10k_mac_setup_bcn_p2p_ie(struct ath10k_vif *arvif,
|
||||
struct sk_buff *bcn)
|
||||
{
|
||||
struct ath10k *ar = arvif->ar;
|
||||
struct ieee80211_mgmt *mgmt;
|
||||
const u8 *p2p_ie;
|
||||
int ret;
|
||||
|
||||
if (arvif->vdev_type != WMI_VDEV_TYPE_AP)
|
||||
return 0;
|
||||
|
||||
if (arvif->vdev_subtype != WMI_VDEV_SUBTYPE_P2P_GO)
|
||||
return 0;
|
||||
|
||||
mgmt = (void *)bcn->data;
|
||||
p2p_ie = cfg80211_find_vendor_ie(WLAN_OUI_WFA, WLAN_OUI_TYPE_WFA_P2P,
|
||||
mgmt->u.beacon.variable,
|
||||
bcn->len - (mgmt->u.beacon.variable -
|
||||
bcn->data));
|
||||
if (!p2p_ie)
|
||||
return -ENOENT;
|
||||
|
||||
ret = ath10k_wmi_p2p_go_bcn_ie(ar, arvif->vdev_id, p2p_ie);
|
||||
if (ret) {
|
||||
ath10k_warn(ar, "failed to submit p2p go bcn ie for vdev %i: %d\n",
|
||||
arvif->vdev_id, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ath10k_mac_remove_vendor_ie(struct sk_buff *skb, unsigned int oui,
|
||||
u8 oui_type, size_t ie_offset)
|
||||
{
|
||||
size_t len;
|
||||
const u8 *next;
|
||||
const u8 *end;
|
||||
u8 *ie;
|
||||
|
||||
if (WARN_ON(skb->len < ie_offset))
|
||||
return -EINVAL;
|
||||
|
||||
ie = (u8 *)cfg80211_find_vendor_ie(oui, oui_type,
|
||||
skb->data + ie_offset,
|
||||
skb->len - ie_offset);
|
||||
if (!ie)
|
||||
return -ENOENT;
|
||||
|
||||
len = ie[1] + 2;
|
||||
end = skb->data + skb->len;
|
||||
next = ie + len;
|
||||
|
||||
if (WARN_ON(next > end))
|
||||
return -EINVAL;
|
||||
|
||||
memmove(ie, next, end - next);
|
||||
skb_trim(skb, skb->len - len);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ath10k_mac_setup_bcn_tmpl(struct ath10k_vif *arvif)
|
||||
{
|
||||
struct ath10k *ar = arvif->ar;
|
||||
struct ieee80211_hw *hw = ar->hw;
|
||||
struct ieee80211_vif *vif = arvif->vif;
|
||||
struct ieee80211_mutable_offsets offs = {};
|
||||
struct sk_buff *bcn;
|
||||
int ret;
|
||||
|
||||
if (!test_bit(WMI_SERVICE_BEACON_OFFLOAD, ar->wmi.svc_map))
|
||||
return 0;
|
||||
|
||||
bcn = ieee80211_beacon_get_template(hw, vif, &offs);
|
||||
if (!bcn) {
|
||||
ath10k_warn(ar, "failed to get beacon template from mac80211\n");
|
||||
return -EPERM;
|
||||
}
|
||||
|
||||
ret = ath10k_mac_setup_bcn_p2p_ie(arvif, bcn);
|
||||
if (ret) {
|
||||
ath10k_warn(ar, "failed to setup p2p go bcn ie: %d\n", ret);
|
||||
kfree_skb(bcn);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* P2P IE is inserted by firmware automatically (as configured above)
|
||||
* so remove it from the base beacon template to avoid duplicate P2P
|
||||
* IEs in beacon frames.
|
||||
*/
|
||||
ath10k_mac_remove_vendor_ie(bcn, WLAN_OUI_WFA, WLAN_OUI_TYPE_WFA_P2P,
|
||||
offsetof(struct ieee80211_mgmt,
|
||||
u.beacon.variable));
|
||||
|
||||
ret = ath10k_wmi_bcn_tmpl(ar, arvif->vdev_id, offs.tim_offset, bcn, 0,
|
||||
0, NULL, 0);
|
||||
kfree_skb(bcn);
|
||||
|
||||
if (ret) {
|
||||
ath10k_warn(ar, "failed to submit beacon template command: %d\n",
|
||||
ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ath10k_mac_setup_prb_tmpl(struct ath10k_vif *arvif)
|
||||
{
|
||||
struct ath10k *ar = arvif->ar;
|
||||
struct ieee80211_hw *hw = ar->hw;
|
||||
struct ieee80211_vif *vif = arvif->vif;
|
||||
struct sk_buff *prb;
|
||||
int ret;
|
||||
|
||||
if (!test_bit(WMI_SERVICE_BEACON_OFFLOAD, ar->wmi.svc_map))
|
||||
return 0;
|
||||
|
||||
prb = ieee80211_proberesp_get(hw, vif);
|
||||
if (!prb) {
|
||||
ath10k_warn(ar, "failed to get probe resp template from mac80211\n");
|
||||
return -EPERM;
|
||||
}
|
||||
|
||||
ret = ath10k_wmi_prb_tmpl(ar, arvif->vdev_id, prb);
|
||||
kfree_skb(prb);
|
||||
|
||||
if (ret) {
|
||||
ath10k_warn(ar, "failed to submit probe resp template command: %d\n",
|
||||
ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void ath10k_control_beaconing(struct ath10k_vif *arvif,
|
||||
struct ieee80211_bss_conf *info)
|
||||
{
|
||||
@ -1152,6 +1300,38 @@ static int ath10k_mac_vif_setup_ps(struct ath10k_vif *arvif)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ath10k_mac_vif_disable_keepalive(struct ath10k_vif *arvif)
|
||||
{
|
||||
struct ath10k *ar = arvif->ar;
|
||||
struct wmi_sta_keepalive_arg arg = {};
|
||||
int ret;
|
||||
|
||||
lockdep_assert_held(&arvif->ar->conf_mutex);
|
||||
|
||||
if (arvif->vdev_type != WMI_VDEV_TYPE_STA)
|
||||
return 0;
|
||||
|
||||
if (!test_bit(WMI_SERVICE_STA_KEEP_ALIVE, ar->wmi.svc_map))
|
||||
return 0;
|
||||
|
||||
/* Some firmware revisions have a bug and ignore the `enabled` field.
|
||||
* Instead use the interval to disable the keepalive.
|
||||
*/
|
||||
arg.vdev_id = arvif->vdev_id;
|
||||
arg.enabled = 1;
|
||||
arg.method = WMI_STA_KEEPALIVE_METHOD_NULL_FRAME;
|
||||
arg.interval = WMI_STA_KEEPALIVE_INTERVAL_DISABLE;
|
||||
|
||||
ret = ath10k_wmi_sta_keepalive(ar, &arg);
|
||||
if (ret) {
|
||||
ath10k_warn(ar, "failed to submit keepalive on vdev %i: %d\n",
|
||||
arvif->vdev_id, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**********************/
|
||||
/* Station management */
|
||||
/**********************/
|
||||
@ -1420,6 +1600,10 @@ static void ath10k_peer_assoc_h_vht(struct ath10k *ar,
|
||||
return;
|
||||
|
||||
arg->peer_flags |= WMI_PEER_VHT;
|
||||
|
||||
if (ar->hw->conf.chandef.chan->band == IEEE80211_BAND_2GHZ)
|
||||
arg->peer_flags |= WMI_PEER_VHT_2G;
|
||||
|
||||
arg->peer_vht_caps = vht_cap->cap;
|
||||
|
||||
ampdu_factor = (vht_cap->cap &
|
||||
@ -1498,7 +1682,12 @@ static void ath10k_peer_assoc_h_phymode(struct ath10k *ar,
|
||||
|
||||
switch (ar->hw->conf.chandef.chan->band) {
|
||||
case IEEE80211_BAND_2GHZ:
|
||||
if (sta->ht_cap.ht_supported) {
|
||||
if (sta->vht_cap.vht_supported) {
|
||||
if (sta->bandwidth == IEEE80211_STA_RX_BW_40)
|
||||
phymode = MODE_11AC_VHT40;
|
||||
else
|
||||
phymode = MODE_11AC_VHT20;
|
||||
} else if (sta->ht_cap.ht_supported) {
|
||||
if (sta->bandwidth == IEEE80211_STA_RX_BW_40)
|
||||
phymode = MODE_11NG_HT40;
|
||||
else
|
||||
@ -1680,7 +1869,8 @@ static void ath10k_bss_disassoc(struct ieee80211_hw *hw,
|
||||
ath10k_warn(ar, "faield to down vdev %i: %d\n",
|
||||
arvif->vdev_id, ret);
|
||||
|
||||
arvif->def_wep_key_idx = 0;
|
||||
arvif->def_wep_key_idx = -1;
|
||||
|
||||
arvif->is_up = false;
|
||||
}
|
||||
|
||||
@ -1739,11 +1929,14 @@ static int ath10k_station_assoc(struct ath10k *ar,
|
||||
}
|
||||
}
|
||||
|
||||
ret = ath10k_install_peer_wep_keys(arvif, sta->addr);
|
||||
if (ret) {
|
||||
ath10k_warn(ar, "failed to install peer wep keys for vdev %i: %d\n",
|
||||
arvif->vdev_id, ret);
|
||||
return ret;
|
||||
/* Plumb cached keys only for static WEP */
|
||||
if (arvif->def_wep_key_idx != -1) {
|
||||
ret = ath10k_install_peer_wep_keys(arvif, sta->addr);
|
||||
if (ret) {
|
||||
ath10k_warn(ar, "failed to install peer wep keys for vdev %i: %d\n",
|
||||
arvif->vdev_id, ret);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -2008,75 +2201,13 @@ static void ath10k_tx_h_nwifi(struct ieee80211_hw *hw, struct sk_buff *skb)
|
||||
* used only for CQM purposes (e.g. hostapd station keepalive ping) so
|
||||
* it is safe to downgrade to NullFunc.
|
||||
*/
|
||||
hdr = (void *)skb->data;
|
||||
if (ieee80211_is_qos_nullfunc(hdr->frame_control)) {
|
||||
hdr->frame_control &= ~__cpu_to_le16(IEEE80211_STYPE_QOS_DATA);
|
||||
cb->htt.tid = HTT_DATA_TX_EXT_TID_NON_QOS_MCAST_BCAST;
|
||||
}
|
||||
}
|
||||
|
||||
static void ath10k_tx_wep_key_work(struct work_struct *work)
|
||||
{
|
||||
struct ath10k_vif *arvif = container_of(work, struct ath10k_vif,
|
||||
wep_key_work);
|
||||
struct ath10k *ar = arvif->ar;
|
||||
int ret, keyidx = arvif->def_wep_key_newidx;
|
||||
|
||||
mutex_lock(&arvif->ar->conf_mutex);
|
||||
|
||||
if (arvif->ar->state != ATH10K_STATE_ON)
|
||||
goto unlock;
|
||||
|
||||
if (arvif->def_wep_key_idx == keyidx)
|
||||
goto unlock;
|
||||
|
||||
ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d set keyidx %d\n",
|
||||
arvif->vdev_id, keyidx);
|
||||
|
||||
ret = ath10k_wmi_vdev_set_param(arvif->ar,
|
||||
arvif->vdev_id,
|
||||
arvif->ar->wmi.vdev_param->def_keyid,
|
||||
keyidx);
|
||||
if (ret) {
|
||||
ath10k_warn(ar, "failed to update wep key index for vdev %d: %d\n",
|
||||
arvif->vdev_id,
|
||||
ret);
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
arvif->def_wep_key_idx = keyidx;
|
||||
|
||||
unlock:
|
||||
mutex_unlock(&arvif->ar->conf_mutex);
|
||||
}
|
||||
|
||||
static void ath10k_tx_h_update_wep_key(struct ieee80211_vif *vif,
|
||||
struct ieee80211_key_conf *key,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
|
||||
struct ath10k *ar = arvif->ar;
|
||||
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
|
||||
|
||||
if (!ieee80211_has_protected(hdr->frame_control))
|
||||
return;
|
||||
|
||||
if (!key)
|
||||
return;
|
||||
|
||||
if (key->cipher != WLAN_CIPHER_SUITE_WEP40 &&
|
||||
key->cipher != WLAN_CIPHER_SUITE_WEP104)
|
||||
return;
|
||||
|
||||
if (key->keyidx == arvif->def_wep_key_idx)
|
||||
return;
|
||||
|
||||
/* FIXME: Most likely a few frames will be TXed with an old key. Simply
|
||||
* queueing frames until key index is updated is not an option because
|
||||
* sk_buff may need more processing to be done, e.g. offchannel */
|
||||
arvif->def_wep_key_newidx = key->keyidx;
|
||||
ieee80211_queue_work(ar->hw, &arvif->wep_key_work);
|
||||
}
|
||||
|
||||
static void ath10k_tx_h_add_p2p_noa_ie(struct ath10k *ar,
|
||||
struct ieee80211_vif *vif,
|
||||
struct sk_buff *skb)
|
||||
@ -2228,7 +2359,7 @@ void ath10k_offchan_tx_work(struct work_struct *work)
|
||||
|
||||
ret = wait_for_completion_timeout(&ar->offchan_tx_completed,
|
||||
3 * HZ);
|
||||
if (ret <= 0)
|
||||
if (ret == 0)
|
||||
ath10k_warn(ar, "timed out waiting for offchannel skb %p\n",
|
||||
skb);
|
||||
|
||||
@ -2290,6 +2421,7 @@ void __ath10k_scan_finish(struct ath10k *ar)
|
||||
case ATH10K_SCAN_RUNNING:
|
||||
if (ar->scan.is_roc)
|
||||
ieee80211_remain_on_channel_expired(ar->hw);
|
||||
/* fall through */
|
||||
case ATH10K_SCAN_ABORTING:
|
||||
if (!ar->scan.is_roc)
|
||||
ieee80211_scan_completed(ar->hw,
|
||||
@ -2436,7 +2568,6 @@ static void ath10k_tx(struct ieee80211_hw *hw,
|
||||
struct ath10k *ar = hw->priv;
|
||||
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
|
||||
struct ieee80211_vif *vif = info->control.vif;
|
||||
struct ieee80211_key_conf *key = info->control.hw_key;
|
||||
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
|
||||
|
||||
/* We should disable CCK RATE due to P2P */
|
||||
@ -2450,7 +2581,6 @@ static void ath10k_tx(struct ieee80211_hw *hw,
|
||||
/* it makes no sense to process injected frames like that */
|
||||
if (vif && vif->type != NL80211_IFTYPE_MONITOR) {
|
||||
ath10k_tx_h_nwifi(hw, skb);
|
||||
ath10k_tx_h_update_wep_key(vif, key, skb);
|
||||
ath10k_tx_h_add_p2p_noa_ie(ar, vif, skb);
|
||||
ath10k_tx_h_seq_no(vif, skb);
|
||||
}
|
||||
@ -2957,7 +3087,6 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
|
||||
arvif->ar = ar;
|
||||
arvif->vif = vif;
|
||||
|
||||
INIT_WORK(&arvif->wep_key_work, ath10k_tx_wep_key_work);
|
||||
INIT_LIST_HEAD(&arvif->list);
|
||||
|
||||
if (ar->free_vdev_map == 0) {
|
||||
@ -3046,15 +3175,18 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
|
||||
ar->free_vdev_map &= ~(1LL << arvif->vdev_id);
|
||||
list_add(&arvif->list, &ar->arvifs);
|
||||
|
||||
vdev_param = ar->wmi.vdev_param->def_keyid;
|
||||
ret = ath10k_wmi_vdev_set_param(ar, 0, vdev_param,
|
||||
arvif->def_wep_key_idx);
|
||||
/* It makes no sense to have firmware do keepalives. mac80211 already
|
||||
* takes care of this with idle connection polling.
|
||||
*/
|
||||
ret = ath10k_mac_vif_disable_keepalive(arvif);
|
||||
if (ret) {
|
||||
ath10k_warn(ar, "failed to set vdev %i default key id: %d\n",
|
||||
ath10k_warn(ar, "failed to disable keepalive on vdev %i: %d\n",
|
||||
arvif->vdev_id, ret);
|
||||
goto err_vdev_delete;
|
||||
}
|
||||
|
||||
arvif->def_wep_key_idx = -1;
|
||||
|
||||
vdev_param = ar->wmi.vdev_param->tx_encap_type;
|
||||
ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
|
||||
ATH10K_HW_TXRX_NATIVE_WIFI);
|
||||
@ -3173,8 +3305,6 @@ static void ath10k_remove_interface(struct ieee80211_hw *hw,
|
||||
struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
|
||||
int ret;
|
||||
|
||||
cancel_work_sync(&arvif->wep_key_work);
|
||||
|
||||
mutex_lock(&ar->conf_mutex);
|
||||
|
||||
spin_lock_bh(&ar->data_lock);
|
||||
@ -3285,9 +3415,21 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
|
||||
if (ret)
|
||||
ath10k_warn(ar, "failed to set beacon mode for vdev %d: %i\n",
|
||||
arvif->vdev_id, ret);
|
||||
|
||||
ret = ath10k_mac_setup_bcn_tmpl(arvif);
|
||||
if (ret)
|
||||
ath10k_warn(ar, "failed to update beacon template: %d\n",
|
||||
ret);
|
||||
}
|
||||
|
||||
if (changed & BSS_CHANGED_BEACON_INFO) {
|
||||
if (changed & BSS_CHANGED_AP_PROBE_RESP) {
|
||||
ret = ath10k_mac_setup_prb_tmpl(arvif);
|
||||
if (ret)
|
||||
ath10k_warn(ar, "failed to setup probe resp template on vdev %i: %d\n",
|
||||
arvif->vdev_id, ret);
|
||||
}
|
||||
|
||||
if (changed & (BSS_CHANGED_BEACON_INFO | BSS_CHANGED_BEACON)) {
|
||||
arvif->dtim_period = info->dtim_period;
|
||||
|
||||
ath10k_dbg(ar, ATH10K_DBG_MAC,
|
||||
@ -3534,6 +3676,7 @@ static int ath10k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
|
||||
const u8 *peer_addr;
|
||||
bool is_wep = key->cipher == WLAN_CIPHER_SUITE_WEP40 ||
|
||||
key->cipher == WLAN_CIPHER_SUITE_WEP104;
|
||||
bool def_idx = false;
|
||||
int ret = 0;
|
||||
|
||||
if (key->keyidx > WMI_MAX_KEY_INDEX)
|
||||
@ -3579,7 +3722,14 @@ static int ath10k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
|
||||
ath10k_clear_vdev_key(arvif, key);
|
||||
}
|
||||
|
||||
ret = ath10k_install_key(arvif, key, cmd, peer_addr);
|
||||
/* set TX_USAGE flag for all the keys incase of dot1x-WEP. For
|
||||
* static WEP, do not set this flag for the keys whose key id
|
||||
* is greater than default key id.
|
||||
*/
|
||||
if (arvif->def_wep_key_idx == -1)
|
||||
def_idx = true;
|
||||
|
||||
ret = ath10k_install_key(arvif, key, cmd, peer_addr, def_idx);
|
||||
if (ret) {
|
||||
ath10k_warn(ar, "failed to install key for vdev %i peer %pM: %d\n",
|
||||
arvif->vdev_id, peer_addr, ret);
|
||||
@ -3604,6 +3754,39 @@ exit:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void ath10k_set_default_unicast_key(struct ieee80211_hw *hw,
|
||||
struct ieee80211_vif *vif,
|
||||
int keyidx)
|
||||
{
|
||||
struct ath10k *ar = hw->priv;
|
||||
struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
|
||||
int ret;
|
||||
|
||||
mutex_lock(&arvif->ar->conf_mutex);
|
||||
|
||||
if (arvif->ar->state != ATH10K_STATE_ON)
|
||||
goto unlock;
|
||||
|
||||
ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d set keyidx %d\n",
|
||||
arvif->vdev_id, keyidx);
|
||||
|
||||
ret = ath10k_wmi_vdev_set_param(arvif->ar,
|
||||
arvif->vdev_id,
|
||||
arvif->ar->wmi.vdev_param->def_keyid,
|
||||
keyidx);
|
||||
|
||||
if (ret) {
|
||||
ath10k_warn(ar, "failed to update wep key index for vdev %d: %d\n",
|
||||
arvif->vdev_id,
|
||||
ret);
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
arvif->def_wep_key_idx = keyidx;
|
||||
unlock:
|
||||
mutex_unlock(&arvif->ar->conf_mutex);
|
||||
}
|
||||
|
||||
static void ath10k_sta_rc_update_wk(struct work_struct *wk)
|
||||
{
|
||||
struct ath10k *ar;
|
||||
@ -3839,6 +4022,8 @@ static int ath10k_conf_tx_uapsd(struct ath10k *ar, struct ieee80211_vif *vif,
|
||||
u16 ac, bool enable)
|
||||
{
|
||||
struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
|
||||
struct wmi_sta_uapsd_auto_trig_arg arg = {};
|
||||
u32 prio = 0, acc = 0;
|
||||
u32 value = 0;
|
||||
int ret = 0;
|
||||
|
||||
@ -3851,18 +4036,26 @@ static int ath10k_conf_tx_uapsd(struct ath10k *ar, struct ieee80211_vif *vif,
|
||||
case IEEE80211_AC_VO:
|
||||
value = WMI_STA_PS_UAPSD_AC3_DELIVERY_EN |
|
||||
WMI_STA_PS_UAPSD_AC3_TRIGGER_EN;
|
||||
prio = 7;
|
||||
acc = 3;
|
||||
break;
|
||||
case IEEE80211_AC_VI:
|
||||
value = WMI_STA_PS_UAPSD_AC2_DELIVERY_EN |
|
||||
WMI_STA_PS_UAPSD_AC2_TRIGGER_EN;
|
||||
prio = 5;
|
||||
acc = 2;
|
||||
break;
|
||||
case IEEE80211_AC_BE:
|
||||
value = WMI_STA_PS_UAPSD_AC1_DELIVERY_EN |
|
||||
WMI_STA_PS_UAPSD_AC1_TRIGGER_EN;
|
||||
prio = 2;
|
||||
acc = 1;
|
||||
break;
|
||||
case IEEE80211_AC_BK:
|
||||
value = WMI_STA_PS_UAPSD_AC0_DELIVERY_EN |
|
||||
WMI_STA_PS_UAPSD_AC0_TRIGGER_EN;
|
||||
prio = 0;
|
||||
acc = 0;
|
||||
break;
|
||||
}
|
||||
|
||||
@ -3904,6 +4097,29 @@ static int ath10k_conf_tx_uapsd(struct ath10k *ar, struct ieee80211_vif *vif,
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (test_bit(WMI_SERVICE_STA_UAPSD_BASIC_AUTO_TRIG, ar->wmi.svc_map) ||
|
||||
test_bit(WMI_SERVICE_STA_UAPSD_VAR_AUTO_TRIG, ar->wmi.svc_map)) {
|
||||
/* Only userspace can make an educated decision when to send
|
||||
* trigger frame. The following effectively disables u-UAPSD
|
||||
* autotrigger in firmware (which is enabled by default
|
||||
* provided the autotrigger service is available).
|
||||
*/
|
||||
|
||||
arg.wmm_ac = acc;
|
||||
arg.user_priority = prio;
|
||||
arg.service_interval = 0;
|
||||
arg.suspend_interval = WMI_STA_UAPSD_MAX_INTERVAL_MSEC;
|
||||
arg.delay_interval = WMI_STA_UAPSD_MAX_INTERVAL_MSEC;
|
||||
|
||||
ret = ath10k_wmi_vdev_sta_uapsd(ar, arvif->vdev_id,
|
||||
arvif->bssid, &arg, 1);
|
||||
if (ret) {
|
||||
ath10k_warn(ar, "failed to set uapsd auto trigger %d\n",
|
||||
ret);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
exit:
|
||||
return ret;
|
||||
}
|
||||
@ -3913,6 +4129,7 @@ static int ath10k_conf_tx(struct ieee80211_hw *hw,
|
||||
const struct ieee80211_tx_queue_params *params)
|
||||
{
|
||||
struct ath10k *ar = hw->priv;
|
||||
struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
|
||||
struct wmi_wmm_params_arg *p = NULL;
|
||||
int ret;
|
||||
|
||||
@ -3920,16 +4137,16 @@ static int ath10k_conf_tx(struct ieee80211_hw *hw,
|
||||
|
||||
switch (ac) {
|
||||
case IEEE80211_AC_VO:
|
||||
p = &ar->wmm_params.ac_vo;
|
||||
p = &arvif->wmm_params.ac_vo;
|
||||
break;
|
||||
case IEEE80211_AC_VI:
|
||||
p = &ar->wmm_params.ac_vi;
|
||||
p = &arvif->wmm_params.ac_vi;
|
||||
break;
|
||||
case IEEE80211_AC_BE:
|
||||
p = &ar->wmm_params.ac_be;
|
||||
p = &arvif->wmm_params.ac_be;
|
||||
break;
|
||||
case IEEE80211_AC_BK:
|
||||
p = &ar->wmm_params.ac_bk;
|
||||
p = &arvif->wmm_params.ac_bk;
|
||||
break;
|
||||
}
|
||||
|
||||
@ -3949,11 +4166,23 @@ static int ath10k_conf_tx(struct ieee80211_hw *hw,
|
||||
*/
|
||||
p->txop = params->txop * 32;
|
||||
|
||||
/* FIXME: FW accepts wmm params per hw, not per vif */
|
||||
ret = ath10k_wmi_pdev_set_wmm_params(ar, &ar->wmm_params);
|
||||
if (ret) {
|
||||
ath10k_warn(ar, "failed to set wmm params: %d\n", ret);
|
||||
goto exit;
|
||||
if (ar->wmi.ops->gen_vdev_wmm_conf) {
|
||||
ret = ath10k_wmi_vdev_wmm_conf(ar, arvif->vdev_id,
|
||||
&arvif->wmm_params);
|
||||
if (ret) {
|
||||
ath10k_warn(ar, "failed to set vdev wmm params on vdev %i: %d\n",
|
||||
arvif->vdev_id, ret);
|
||||
goto exit;
|
||||
}
|
||||
} else {
|
||||
/* This won't work well with multi-interface cases but it's
|
||||
* better than nothing.
|
||||
*/
|
||||
ret = ath10k_wmi_pdev_set_wmm_params(ar, &arvif->wmm_params);
|
||||
if (ret) {
|
||||
ath10k_warn(ar, "failed to set wmm params: %d\n", ret);
|
||||
goto exit;
|
||||
}
|
||||
}
|
||||
|
||||
ret = ath10k_conf_tx_uapsd(ar, vif, ac, params->uapsd);
|
||||
@ -4723,6 +4952,7 @@ static const struct ieee80211_ops ath10k_ops = {
|
||||
.hw_scan = ath10k_hw_scan,
|
||||
.cancel_hw_scan = ath10k_cancel_hw_scan,
|
||||
.set_key = ath10k_set_key,
|
||||
.set_default_unicast_key = ath10k_set_default_unicast_key,
|
||||
.sta_state = ath10k_sta_state,
|
||||
.conf_tx = ath10k_conf_tx,
|
||||
.remain_on_channel = ath10k_remain_on_channel,
|
||||
@ -4748,6 +4978,9 @@ static const struct ieee80211_ops ath10k_ops = {
|
||||
.suspend = ath10k_suspend,
|
||||
.resume = ath10k_resume,
|
||||
#endif
|
||||
#ifdef CONFIG_MAC80211_DEBUGFS
|
||||
.sta_add_debugfs = ath10k_sta_add_debugfs,
|
||||
#endif
|
||||
};
|
||||
|
||||
#define RATETAB_ENT(_rate, _rateid, _flags) { \
|
||||
@ -5064,7 +5297,8 @@ int ath10k_mac_register(struct ath10k *ar)
|
||||
band->bitrates = ath10k_g_rates;
|
||||
band->ht_cap = ht_cap;
|
||||
|
||||
/* vht is not supported in 2.4 GHz */
|
||||
/* Enable the VHT support at 2.4 GHz */
|
||||
band->vht_cap = vht_cap;
|
||||
|
||||
ar->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = band;
|
||||
}
|
||||
@ -5128,6 +5362,19 @@ int ath10k_mac_register(struct ath10k *ar)
|
||||
|
||||
ar->hw->max_listen_interval = ATH10K_MAX_HW_LISTEN_INTERVAL;
|
||||
|
||||
if (test_bit(WMI_SERVICE_BEACON_OFFLOAD, ar->wmi.svc_map)) {
|
||||
ar->hw->wiphy->flags |= WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD;
|
||||
|
||||
/* Firmware delivers WPS/P2P Probe Requests frames to driver so
|
||||
* that userspace (e.g. wpa_supplicant/hostapd) can generate
|
||||
* correct Probe Responses. This is more of a hack advert..
|
||||
*/
|
||||
ar->hw->wiphy->probe_resp_offload |=
|
||||
NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS |
|
||||
NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 |
|
||||
NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P;
|
||||
}
|
||||
|
||||
ar->hw->wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
|
||||
ar->hw->wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH;
|
||||
ar->hw->wiphy->max_remain_on_channel_duration = 5000;
|
||||
|
@ -58,9 +58,11 @@ MODULE_PARM_DESC(reset_mode, "0: auto, 1: warm only (default: 0)");
|
||||
#define ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS 3
|
||||
|
||||
#define QCA988X_2_0_DEVICE_ID (0x003c)
|
||||
#define QCA6174_2_1_DEVICE_ID (0x003e)
|
||||
|
||||
static const struct pci_device_id ath10k_pci_id_table[] = {
|
||||
{ PCI_VDEVICE(ATHEROS, QCA988X_2_0_DEVICE_ID) }, /* PCI-E QCA988X V2 */
|
||||
{ PCI_VDEVICE(ATHEROS, QCA6174_2_1_DEVICE_ID) }, /* PCI-E QCA6174 V2.1 */
|
||||
{0}
|
||||
};
|
||||
|
||||
@ -70,6 +72,11 @@ static const struct ath10k_pci_supp_chip ath10k_pci_supp_chips[] = {
|
||||
* because of that.
|
||||
*/
|
||||
{ QCA988X_2_0_DEVICE_ID, QCA988X_HW_2_0_CHIP_ID_REV },
|
||||
{ QCA6174_2_1_DEVICE_ID, QCA6174_HW_2_1_CHIP_ID_REV },
|
||||
{ QCA6174_2_1_DEVICE_ID, QCA6174_HW_2_2_CHIP_ID_REV },
|
||||
{ QCA6174_2_1_DEVICE_ID, QCA6174_HW_3_0_CHIP_ID_REV },
|
||||
{ QCA6174_2_1_DEVICE_ID, QCA6174_HW_3_1_CHIP_ID_REV },
|
||||
{ QCA6174_2_1_DEVICE_ID, QCA6174_HW_3_2_CHIP_ID_REV },
|
||||
};
|
||||
|
||||
static void ath10k_pci_buffer_cleanup(struct ath10k *ar);
|
||||
@ -403,7 +410,7 @@ static int __ath10k_pci_rx_post_buf(struct ath10k_pci_pipe *pipe)
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
ATH10K_SKB_CB(skb)->paddr = paddr;
|
||||
ATH10K_SKB_RXCB(skb)->paddr = paddr;
|
||||
|
||||
ret = __ath10k_ce_rx_post_buf(ce_pipe, skb, paddr);
|
||||
if (ret) {
|
||||
@ -872,7 +879,7 @@ static void ath10k_pci_ce_recv_data(struct ath10k_ce_pipe *ce_state)
|
||||
&flags) == 0) {
|
||||
skb = transfer_context;
|
||||
max_nbytes = skb->len + skb_tailroom(skb);
|
||||
dma_unmap_single(ar->dev, ATH10K_SKB_CB(skb)->paddr,
|
||||
dma_unmap_single(ar->dev, ATH10K_SKB_RXCB(skb)->paddr,
|
||||
max_nbytes, DMA_FROM_DEVICE);
|
||||
|
||||
if (unlikely(max_nbytes < nbytes)) {
|
||||
@ -1238,7 +1245,7 @@ static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pci_pipe)
|
||||
|
||||
ce_ring->per_transfer_context[i] = NULL;
|
||||
|
||||
dma_unmap_single(ar->dev, ATH10K_SKB_CB(skb)->paddr,
|
||||
dma_unmap_single(ar->dev, ATH10K_SKB_RXCB(skb)->paddr,
|
||||
skb->len + skb_tailroom(skb),
|
||||
DMA_FROM_DEVICE);
|
||||
dev_kfree_skb_any(skb);
|
||||
@ -1506,6 +1513,35 @@ static int ath10k_pci_wake_target_cpu(struct ath10k *ar)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ath10k_pci_get_num_banks(struct ath10k *ar)
|
||||
{
|
||||
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
|
||||
|
||||
switch (ar_pci->pdev->device) {
|
||||
case QCA988X_2_0_DEVICE_ID:
|
||||
return 1;
|
||||
case QCA6174_2_1_DEVICE_ID:
|
||||
switch (MS(ar->chip_id, SOC_CHIP_ID_REV)) {
|
||||
case QCA6174_HW_1_0_CHIP_ID_REV:
|
||||
case QCA6174_HW_1_1_CHIP_ID_REV:
|
||||
return 3;
|
||||
case QCA6174_HW_1_3_CHIP_ID_REV:
|
||||
return 2;
|
||||
case QCA6174_HW_2_1_CHIP_ID_REV:
|
||||
case QCA6174_HW_2_2_CHIP_ID_REV:
|
||||
return 6;
|
||||
case QCA6174_HW_3_0_CHIP_ID_REV:
|
||||
case QCA6174_HW_3_1_CHIP_ID_REV:
|
||||
case QCA6174_HW_3_2_CHIP_ID_REV:
|
||||
return 9;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
ath10k_warn(ar, "unknown number of banks, assuming 1\n");
|
||||
return 1;
|
||||
}
|
||||
|
||||
static int ath10k_pci_init_config(struct ath10k *ar)
|
||||
{
|
||||
u32 interconnect_targ_addr;
|
||||
@ -1616,7 +1652,8 @@ static int ath10k_pci_init_config(struct ath10k *ar)
|
||||
/* first bank is switched to IRAM */
|
||||
ealloc_value |= ((HI_EARLY_ALLOC_MAGIC << HI_EARLY_ALLOC_MAGIC_SHIFT) &
|
||||
HI_EARLY_ALLOC_MAGIC_MASK);
|
||||
ealloc_value |= ((1 << HI_EARLY_ALLOC_IRAM_BANKS_SHIFT) &
|
||||
ealloc_value |= ((ath10k_pci_get_num_banks(ar) <<
|
||||
HI_EARLY_ALLOC_IRAM_BANKS_SHIFT) &
|
||||
HI_EARLY_ALLOC_IRAM_BANKS_MASK);
|
||||
|
||||
ret = ath10k_pci_diag_write32(ar, ealloc_targ_addr, ealloc_value);
|
||||
@ -1812,12 +1849,12 @@ static int ath10k_pci_warm_reset(struct ath10k *ar)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ath10k_pci_chip_reset(struct ath10k *ar)
|
||||
static int ath10k_pci_qca988x_chip_reset(struct ath10k *ar)
|
||||
{
|
||||
int i, ret;
|
||||
u32 val;
|
||||
|
||||
ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot chip reset\n");
|
||||
ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot 988x chip reset\n");
|
||||
|
||||
/* Some hardware revisions (e.g. CUS223v2) has issues with cold reset.
|
||||
* It is thus preferred to use warm reset which is safer but may not be
|
||||
@ -1881,11 +1918,53 @@ static int ath10k_pci_chip_reset(struct ath10k *ar)
|
||||
return ret;
|
||||
}
|
||||
|
||||
ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot chip reset complete (cold)\n");
|
||||
ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca988x chip reset complete (cold)\n");
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ath10k_pci_qca6174_chip_reset(struct ath10k *ar)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca6174 chip reset\n");
|
||||
|
||||
/* FIXME: QCA6174 requires cold + warm reset to work. */
|
||||
|
||||
ret = ath10k_pci_cold_reset(ar);
|
||||
if (ret) {
|
||||
ath10k_warn(ar, "failed to cold reset: %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = ath10k_pci_wait_for_target_init(ar);
|
||||
if (ret) {
|
||||
ath10k_warn(ar, "failed to wait for target after cold reset: %d\n",
|
||||
ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = ath10k_pci_warm_reset(ar);
|
||||
if (ret) {
|
||||
ath10k_warn(ar, "failed to warm reset: %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca6174 chip reset complete (cold)\n");
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ath10k_pci_chip_reset(struct ath10k *ar)
|
||||
{
|
||||
if (QCA_REV_988X(ar))
|
||||
return ath10k_pci_qca988x_chip_reset(ar);
|
||||
else if (QCA_REV_6174(ar))
|
||||
return ath10k_pci_qca6174_chip_reset(ar);
|
||||
else
|
||||
return -ENOTSUPP;
|
||||
}
|
||||
|
||||
static int ath10k_pci_hif_power_up(struct ath10k *ar)
|
||||
{
|
||||
int ret;
|
||||
@ -1910,6 +1989,12 @@ static int ath10k_pci_hif_power_up(struct ath10k *ar)
|
||||
*/
|
||||
ret = ath10k_pci_chip_reset(ar);
|
||||
if (ret) {
|
||||
if (ath10k_pci_has_fw_crashed(ar)) {
|
||||
ath10k_warn(ar, "firmware crashed during chip reset\n");
|
||||
ath10k_pci_fw_crashed_clear(ar);
|
||||
ath10k_pci_fw_crashed_dump(ar);
|
||||
}
|
||||
|
||||
ath10k_err(ar, "failed to reset chip: %d\n", ret);
|
||||
goto err_sleep;
|
||||
}
|
||||
@ -2041,6 +2126,7 @@ static void ath10k_msi_err_tasklet(unsigned long data)
|
||||
return;
|
||||
}
|
||||
|
||||
ath10k_pci_irq_disable(ar);
|
||||
ath10k_pci_fw_crashed_clear(ar);
|
||||
ath10k_pci_fw_crashed_dump(ar);
|
||||
}
|
||||
@ -2110,6 +2196,7 @@ static void ath10k_pci_tasklet(unsigned long data)
|
||||
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
|
||||
|
||||
if (ath10k_pci_has_fw_crashed(ar)) {
|
||||
ath10k_pci_irq_disable(ar);
|
||||
ath10k_pci_fw_crashed_clear(ar);
|
||||
ath10k_pci_fw_crashed_dump(ar);
|
||||
return;
|
||||
@ -2352,8 +2439,6 @@ static int ath10k_pci_wait_for_target_init(struct ath10k *ar)
|
||||
|
||||
if (val & FW_IND_EVENT_PENDING) {
|
||||
ath10k_warn(ar, "device has crashed during init\n");
|
||||
ath10k_pci_fw_crashed_clear(ar);
|
||||
ath10k_pci_fw_crashed_dump(ar);
|
||||
return -ECOMM;
|
||||
}
|
||||
|
||||
@ -2507,11 +2592,23 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
|
||||
int ret = 0;
|
||||
struct ath10k *ar;
|
||||
struct ath10k_pci *ar_pci;
|
||||
enum ath10k_hw_rev hw_rev;
|
||||
u32 chip_id;
|
||||
|
||||
ar = ath10k_core_create(sizeof(*ar_pci), &pdev->dev,
|
||||
ATH10K_BUS_PCI,
|
||||
&ath10k_pci_hif_ops);
|
||||
switch (pci_dev->device) {
|
||||
case QCA988X_2_0_DEVICE_ID:
|
||||
hw_rev = ATH10K_HW_QCA988X;
|
||||
break;
|
||||
case QCA6174_2_1_DEVICE_ID:
|
||||
hw_rev = ATH10K_HW_QCA6174;
|
||||
break;
|
||||
default:
|
||||
WARN_ON(1);
|
||||
return -ENOTSUPP;
|
||||
}
|
||||
|
||||
ar = ath10k_core_create(sizeof(*ar_pci), &pdev->dev, ATH10K_BUS_PCI,
|
||||
hw_rev, &ath10k_pci_hif_ops);
|
||||
if (!ar) {
|
||||
dev_err(&pdev->dev, "failed to allocate core\n");
|
||||
return -ENOMEM;
|
||||
@ -2540,18 +2637,6 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
|
||||
goto err_release;
|
||||
}
|
||||
|
||||
chip_id = ath10k_pci_soc_read32(ar, SOC_CHIP_ID_ADDRESS);
|
||||
if (chip_id == 0xffffffff) {
|
||||
ath10k_err(ar, "failed to get chip id\n");
|
||||
goto err_sleep;
|
||||
}
|
||||
|
||||
if (!ath10k_pci_chip_is_supported(pdev->device, chip_id)) {
|
||||
ath10k_err(ar, "device %04x with chip_id %08x isn't supported\n",
|
||||
pdev->device, chip_id);
|
||||
goto err_sleep;
|
||||
}
|
||||
|
||||
ret = ath10k_pci_alloc_pipes(ar);
|
||||
if (ret) {
|
||||
ath10k_err(ar, "failed to allocate copy engine pipes: %d\n",
|
||||
@ -2578,6 +2663,24 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
|
||||
goto err_deinit_irq;
|
||||
}
|
||||
|
||||
ret = ath10k_pci_chip_reset(ar);
|
||||
if (ret) {
|
||||
ath10k_err(ar, "failed to reset chip: %d\n", ret);
|
||||
goto err_free_irq;
|
||||
}
|
||||
|
||||
chip_id = ath10k_pci_soc_read32(ar, SOC_CHIP_ID_ADDRESS);
|
||||
if (chip_id == 0xffffffff) {
|
||||
ath10k_err(ar, "failed to get chip id\n");
|
||||
goto err_free_irq;
|
||||
}
|
||||
|
||||
if (!ath10k_pci_chip_is_supported(pdev->device, chip_id)) {
|
||||
ath10k_err(ar, "device %04x with chip_id %08x isn't supported\n",
|
||||
pdev->device, chip_id);
|
||||
goto err_sleep;
|
||||
}
|
||||
|
||||
ath10k_pci_sleep(ar);
|
||||
|
||||
ret = ath10k_core_register(ar, chip_id);
|
||||
|
@ -194,7 +194,7 @@ static inline struct ath10k_pci *ath10k_pci_priv(struct ath10k *ar)
|
||||
|
||||
#define ATH10K_PCI_RX_POST_RETRY_MS 50
|
||||
#define ATH_PCI_RESET_WAIT_MAX 10 /* ms */
|
||||
#define PCIE_WAKE_TIMEOUT 5000 /* 5ms */
|
||||
#define PCIE_WAKE_TIMEOUT 10000 /* 10ms */
|
||||
|
||||
#define BAR_NUM 0
|
||||
|
||||
|
@ -850,7 +850,7 @@ struct rx_ppdu_start {
|
||||
|
||||
#define RX_PPDU_END_INFO1_PPDU_DONE (1 << 15)
|
||||
|
||||
struct rx_ppdu_end {
|
||||
struct rx_ppdu_end_common {
|
||||
__le32 evm_p0;
|
||||
__le32 evm_p1;
|
||||
__le32 evm_p2;
|
||||
@ -873,10 +873,33 @@ struct rx_ppdu_end {
|
||||
u8 phy_err_code;
|
||||
__le16 flags; /* %RX_PPDU_END_FLAGS_ */
|
||||
__le32 info0; /* %RX_PPDU_END_INFO0_ */
|
||||
} __packed;
|
||||
|
||||
struct rx_ppdu_end_qca988x {
|
||||
__le16 bb_length;
|
||||
__le16 info1; /* %RX_PPDU_END_INFO1_ */
|
||||
} __packed;
|
||||
|
||||
#define RX_PPDU_END_RTT_CORRELATION_VALUE_MASK 0x00ffffff
|
||||
#define RX_PPDU_END_RTT_CORRELATION_VALUE_LSB 0
|
||||
#define RX_PPDU_END_RTT_UNUSED_MASK 0x7f000000
|
||||
#define RX_PPDU_END_RTT_UNUSED_LSB 24
|
||||
#define RX_PPDU_END_RTT_NORMAL_MODE BIT(31)
|
||||
|
||||
struct rx_ppdu_end_qca6174 {
|
||||
__le32 rtt; /* %RX_PPDU_END_RTT_ */
|
||||
__le16 bb_length;
|
||||
__le16 info1; /* %RX_PPDU_END_INFO1_ */
|
||||
} __packed;
|
||||
|
||||
struct rx_ppdu_end {
|
||||
struct rx_ppdu_end_common common;
|
||||
union {
|
||||
struct rx_ppdu_end_qca988x qca988x;
|
||||
struct rx_ppdu_end_qca6174 qca6174;
|
||||
} __packed;
|
||||
} __packed;
|
||||
|
||||
/*
|
||||
* evm_p0
|
||||
* EVM for pilot 0. Contain EVM for streams: 0, 1, 2 and 3.
|
||||
|
@ -18,6 +18,8 @@
|
||||
#ifndef __TARGADDRS_H__
|
||||
#define __TARGADDRS_H__
|
||||
|
||||
#include "hw.h"
|
||||
|
||||
/*
|
||||
* xxx_HOST_INTEREST_ADDRESS is the address in Target RAM of the
|
||||
* host_interest structure. It must match the address of the _host_interest
|
||||
@ -445,4 +447,7 @@ Fw Mode/SubMode Mask
|
||||
#define QCA988X_BOARD_DATA_SZ 7168
|
||||
#define QCA988X_BOARD_EXT_DATA_SZ 0
|
||||
|
||||
#define QCA6174_BOARD_DATA_SZ 8192
|
||||
#define QCA6174_BOARD_EXT_DATA_SZ 0
|
||||
|
||||
#endif /* __TARGADDRS_H__ */
|
||||
|
@ -98,7 +98,7 @@ static int ath10k_thermal_set_cur_dutycycle(struct thermal_cooling_device *cdev,
|
||||
}
|
||||
period = max(ATH10K_QUIET_PERIOD_MIN,
|
||||
(ATH10K_QUIET_PERIOD_DEFAULT / num_bss));
|
||||
duration = period * (duty_cycle / 100);
|
||||
duration = (period * duty_cycle) / 100;
|
||||
enabled = duration ? 1 : 0;
|
||||
|
||||
ret = ath10k_wmi_pdev_set_quiet_mode(ar, period, duration,
|
||||
@ -160,7 +160,8 @@ static ssize_t ath10k_thermal_show_temp(struct device *dev,
|
||||
temperature = ar->thermal.temperature;
|
||||
spin_unlock_bh(&ar->data_lock);
|
||||
|
||||
ret = snprintf(buf, PAGE_SIZE, "%d", temperature);
|
||||
/* display in millidegree celcius */
|
||||
ret = snprintf(buf, PAGE_SIZE, "%d\n", temperature * 1000);
|
||||
out:
|
||||
mutex_unlock(&ar->conf_mutex);
|
||||
return ret;
|
||||
@ -215,7 +216,7 @@ int ath10k_thermal_register(struct ath10k *ar)
|
||||
|
||||
/* Avoid linking error on devm_hwmon_device_register_with_groups, I
|
||||
* guess linux/hwmon.h is missing proper stubs. */
|
||||
if (!config_enabled(HWMON))
|
||||
if (!config_enabled(CONFIG_HWMON))
|
||||
return 0;
|
||||
|
||||
hwmon_dev = devm_hwmon_device_register_with_groups(ar->dev,
|
||||
|
@ -453,6 +453,74 @@ TRACE_EVENT(ath10k_htt_rx_desc,
|
||||
)
|
||||
);
|
||||
|
||||
TRACE_EVENT(ath10k_wmi_diag_container,
|
||||
TP_PROTO(struct ath10k *ar,
|
||||
u8 type,
|
||||
u32 timestamp,
|
||||
u32 code,
|
||||
u16 len,
|
||||
const void *data),
|
||||
|
||||
TP_ARGS(ar, type, timestamp, code, len, data),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__string(device, dev_name(ar->dev))
|
||||
__string(driver, dev_driver_string(ar->dev))
|
||||
__field(u8, type)
|
||||
__field(u32, timestamp)
|
||||
__field(u32, code)
|
||||
__field(u16, len)
|
||||
__dynamic_array(u8, data, len)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__assign_str(device, dev_name(ar->dev));
|
||||
__assign_str(driver, dev_driver_string(ar->dev));
|
||||
__entry->type = type;
|
||||
__entry->timestamp = timestamp;
|
||||
__entry->code = code;
|
||||
__entry->len = len;
|
||||
memcpy(__get_dynamic_array(data), data, len);
|
||||
),
|
||||
|
||||
TP_printk(
|
||||
"%s %s diag container type %hhu timestamp %u code %u len %d",
|
||||
__get_str(driver),
|
||||
__get_str(device),
|
||||
__entry->type,
|
||||
__entry->timestamp,
|
||||
__entry->code,
|
||||
__entry->len
|
||||
)
|
||||
);
|
||||
|
||||
TRACE_EVENT(ath10k_wmi_diag,
|
||||
TP_PROTO(struct ath10k *ar, const void *data, size_t len),
|
||||
|
||||
TP_ARGS(ar, data, len),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__string(device, dev_name(ar->dev))
|
||||
__string(driver, dev_driver_string(ar->dev))
|
||||
__field(u16, len)
|
||||
__dynamic_array(u8, data, len)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__assign_str(device, dev_name(ar->dev));
|
||||
__assign_str(driver, dev_driver_string(ar->dev));
|
||||
__entry->len = len;
|
||||
memcpy(__get_dynamic_array(data), data, len);
|
||||
),
|
||||
|
||||
TP_printk(
|
||||
"%s %s tlv diag len %d",
|
||||
__get_str(driver),
|
||||
__get_str(device),
|
||||
__entry->len
|
||||
)
|
||||
);
|
||||
|
||||
#endif /* _TRACE_H_ || TRACE_HEADER_MULTI_READ*/
|
||||
|
||||
/* we don't want to use include/trace/events */
|
||||
|
@ -64,7 +64,13 @@ void ath10k_txrx_tx_unref(struct ath10k_htt *htt,
|
||||
return;
|
||||
}
|
||||
|
||||
msdu = htt->pending_tx[tx_done->msdu_id];
|
||||
msdu = idr_find(&htt->pending_tx, tx_done->msdu_id);
|
||||
if (!msdu) {
|
||||
ath10k_warn(ar, "received tx completion for invalid msdu_id: %d\n",
|
||||
tx_done->msdu_id);
|
||||
return;
|
||||
}
|
||||
|
||||
skb_cb = ATH10K_SKB_CB(msdu);
|
||||
|
||||
dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
|
||||
@ -95,7 +101,6 @@ void ath10k_txrx_tx_unref(struct ath10k_htt *htt,
|
||||
/* we do not own the msdu anymore */
|
||||
|
||||
exit:
|
||||
htt->pending_tx[tx_done->msdu_id] = NULL;
|
||||
ath10k_htt_tx_free_msdu_id(htt, tx_done->msdu_id);
|
||||
__ath10k_htt_tx_dec_pending(htt);
|
||||
if (htt->num_pending_tx == 0)
|
||||
|
@ -78,6 +78,8 @@ struct wmi_ops {
|
||||
const struct wmi_vdev_spectral_conf_arg *arg);
|
||||
struct sk_buff *(*gen_vdev_spectral_enable)(struct ath10k *ar, u32 vdev_id,
|
||||
u32 trigger, u32 enable);
|
||||
struct sk_buff *(*gen_vdev_wmm_conf)(struct ath10k *ar, u32 vdev_id,
|
||||
const struct wmi_wmm_params_all_arg *arg);
|
||||
struct sk_buff *(*gen_peer_create)(struct ath10k *ar, u32 vdev_id,
|
||||
const u8 peer_addr[ETH_ALEN]);
|
||||
struct sk_buff *(*gen_peer_delete)(struct ath10k *ar, u32 vdev_id,
|
||||
@ -102,16 +104,20 @@ struct wmi_ops {
|
||||
u32 value);
|
||||
struct sk_buff *(*gen_scan_chan_list)(struct ath10k *ar,
|
||||
const struct wmi_scan_chan_list_arg *arg);
|
||||
struct sk_buff *(*gen_beacon_dma)(struct ath10k_vif *arvif);
|
||||
struct sk_buff *(*gen_beacon_dma)(struct ath10k *ar, u32 vdev_id,
|
||||
const void *bcn, size_t bcn_len,
|
||||
u32 bcn_paddr, bool dtim_zero,
|
||||
bool deliver_cab);
|
||||
struct sk_buff *(*gen_pdev_set_wmm)(struct ath10k *ar,
|
||||
const struct wmi_pdev_set_wmm_params_arg *arg);
|
||||
const struct wmi_wmm_params_all_arg *arg);
|
||||
struct sk_buff *(*gen_request_stats)(struct ath10k *ar,
|
||||
enum wmi_stats_id stats_id);
|
||||
struct sk_buff *(*gen_force_fw_hang)(struct ath10k *ar,
|
||||
enum wmi_force_fw_hang_type type,
|
||||
u32 delay_ms);
|
||||
struct sk_buff *(*gen_mgmt_tx)(struct ath10k *ar, struct sk_buff *skb);
|
||||
struct sk_buff *(*gen_dbglog_cfg)(struct ath10k *ar, u32 module_enable);
|
||||
struct sk_buff *(*gen_dbglog_cfg)(struct ath10k *ar, u32 module_enable,
|
||||
u32 log_level);
|
||||
struct sk_buff *(*gen_pktlog_enable)(struct ath10k *ar, u32 filter);
|
||||
struct sk_buff *(*gen_pktlog_disable)(struct ath10k *ar);
|
||||
struct sk_buff *(*gen_pdev_set_quiet_mode)(struct ath10k *ar,
|
||||
@ -119,6 +125,30 @@ struct wmi_ops {
|
||||
u32 next_offset,
|
||||
u32 enabled);
|
||||
struct sk_buff *(*gen_pdev_get_temperature)(struct ath10k *ar);
|
||||
struct sk_buff *(*gen_addba_clear_resp)(struct ath10k *ar, u32 vdev_id,
|
||||
const u8 *mac);
|
||||
struct sk_buff *(*gen_addba_send)(struct ath10k *ar, u32 vdev_id,
|
||||
const u8 *mac, u32 tid, u32 buf_size);
|
||||
struct sk_buff *(*gen_addba_set_resp)(struct ath10k *ar, u32 vdev_id,
|
||||
const u8 *mac, u32 tid,
|
||||
u32 status);
|
||||
struct sk_buff *(*gen_delba_send)(struct ath10k *ar, u32 vdev_id,
|
||||
const u8 *mac, u32 tid, u32 initiator,
|
||||
u32 reason);
|
||||
struct sk_buff *(*gen_bcn_tmpl)(struct ath10k *ar, u32 vdev_id,
|
||||
u32 tim_ie_offset, struct sk_buff *bcn,
|
||||
u32 prb_caps, u32 prb_erp,
|
||||
void *prb_ies, size_t prb_ies_len);
|
||||
struct sk_buff *(*gen_prb_tmpl)(struct ath10k *ar, u32 vdev_id,
|
||||
struct sk_buff *bcn);
|
||||
struct sk_buff *(*gen_p2p_go_bcn_ie)(struct ath10k *ar, u32 vdev_id,
|
||||
const u8 *p2p_ie);
|
||||
struct sk_buff *(*gen_vdev_sta_uapsd)(struct ath10k *ar, u32 vdev_id,
|
||||
const u8 peer_addr[ETH_ALEN],
|
||||
const struct wmi_sta_uapsd_auto_trig_arg *args,
|
||||
u32 num_ac);
|
||||
struct sk_buff *(*gen_sta_keepalive)(struct ath10k *ar,
|
||||
const struct wmi_sta_keepalive_arg *arg);
|
||||
};
|
||||
|
||||
int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb, u32 cmd_id);
|
||||
@ -557,6 +587,42 @@ ath10k_wmi_vdev_spectral_enable(struct ath10k *ar, u32 vdev_id, u32 trigger,
|
||||
return ath10k_wmi_cmd_send(ar, skb, cmd_id);
|
||||
}
|
||||
|
||||
static inline int
|
||||
ath10k_wmi_vdev_sta_uapsd(struct ath10k *ar, u32 vdev_id,
|
||||
const u8 peer_addr[ETH_ALEN],
|
||||
const struct wmi_sta_uapsd_auto_trig_arg *args,
|
||||
u32 num_ac)
|
||||
{
|
||||
struct sk_buff *skb;
|
||||
u32 cmd_id;
|
||||
|
||||
if (!ar->wmi.ops->gen_vdev_sta_uapsd)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
skb = ar->wmi.ops->gen_vdev_sta_uapsd(ar, vdev_id, peer_addr, args,
|
||||
num_ac);
|
||||
if (IS_ERR(skb))
|
||||
return PTR_ERR(skb);
|
||||
|
||||
cmd_id = ar->wmi.cmd->sta_uapsd_auto_trig_cmdid;
|
||||
return ath10k_wmi_cmd_send(ar, skb, cmd_id);
|
||||
}
|
||||
|
||||
static inline int
|
||||
ath10k_wmi_vdev_wmm_conf(struct ath10k *ar, u32 vdev_id,
|
||||
const struct wmi_wmm_params_all_arg *arg)
|
||||
{
|
||||
struct sk_buff *skb;
|
||||
u32 cmd_id;
|
||||
|
||||
skb = ar->wmi.ops->gen_vdev_wmm_conf(ar, vdev_id, arg);
|
||||
if (IS_ERR(skb))
|
||||
return PTR_ERR(skb);
|
||||
|
||||
cmd_id = ar->wmi.cmd->vdev_set_wmm_params_cmdid;
|
||||
return ath10k_wmi_cmd_send(ar, skb, cmd_id);
|
||||
}
|
||||
|
||||
static inline int
|
||||
ath10k_wmi_peer_create(struct ath10k *ar, u32 vdev_id,
|
||||
const u8 peer_addr[ETH_ALEN])
|
||||
@ -706,16 +772,19 @@ ath10k_wmi_peer_assoc(struct ath10k *ar,
|
||||
}
|
||||
|
||||
static inline int
|
||||
ath10k_wmi_beacon_send_ref_nowait(struct ath10k_vif *arvif)
|
||||
ath10k_wmi_beacon_send_ref_nowait(struct ath10k *ar, u32 vdev_id,
|
||||
const void *bcn, size_t bcn_len,
|
||||
u32 bcn_paddr, bool dtim_zero,
|
||||
bool deliver_cab)
|
||||
{
|
||||
struct ath10k *ar = arvif->ar;
|
||||
struct sk_buff *skb;
|
||||
int ret;
|
||||
|
||||
if (!ar->wmi.ops->gen_beacon_dma)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
skb = ar->wmi.ops->gen_beacon_dma(arvif);
|
||||
skb = ar->wmi.ops->gen_beacon_dma(ar, vdev_id, bcn, bcn_len, bcn_paddr,
|
||||
dtim_zero, deliver_cab);
|
||||
if (IS_ERR(skb))
|
||||
return PTR_ERR(skb);
|
||||
|
||||
@ -731,7 +800,7 @@ ath10k_wmi_beacon_send_ref_nowait(struct ath10k_vif *arvif)
|
||||
|
||||
static inline int
|
||||
ath10k_wmi_pdev_set_wmm_params(struct ath10k *ar,
|
||||
const struct wmi_pdev_set_wmm_params_arg *arg)
|
||||
const struct wmi_wmm_params_all_arg *arg)
|
||||
{
|
||||
struct sk_buff *skb;
|
||||
|
||||
@ -778,14 +847,14 @@ ath10k_wmi_force_fw_hang(struct ath10k *ar,
|
||||
}
|
||||
|
||||
static inline int
|
||||
ath10k_wmi_dbglog_cfg(struct ath10k *ar, u32 module_enable)
|
||||
ath10k_wmi_dbglog_cfg(struct ath10k *ar, u32 module_enable, u32 log_level)
|
||||
{
|
||||
struct sk_buff *skb;
|
||||
|
||||
if (!ar->wmi.ops->gen_dbglog_cfg)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
skb = ar->wmi.ops->gen_dbglog_cfg(ar, module_enable);
|
||||
skb = ar->wmi.ops->gen_dbglog_cfg(ar, module_enable, log_level);
|
||||
if (IS_ERR(skb))
|
||||
return PTR_ERR(skb);
|
||||
|
||||
@ -857,4 +926,139 @@ ath10k_wmi_pdev_get_temperature(struct ath10k *ar)
|
||||
ar->wmi.cmd->pdev_get_temperature_cmdid);
|
||||
}
|
||||
|
||||
static inline int
|
||||
ath10k_wmi_addba_clear_resp(struct ath10k *ar, u32 vdev_id, const u8 *mac)
|
||||
{
|
||||
struct sk_buff *skb;
|
||||
|
||||
if (!ar->wmi.ops->gen_addba_clear_resp)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
skb = ar->wmi.ops->gen_addba_clear_resp(ar, vdev_id, mac);
|
||||
if (IS_ERR(skb))
|
||||
return PTR_ERR(skb);
|
||||
|
||||
return ath10k_wmi_cmd_send(ar, skb,
|
||||
ar->wmi.cmd->addba_clear_resp_cmdid);
|
||||
}
|
||||
|
||||
static inline int
|
||||
ath10k_wmi_addba_send(struct ath10k *ar, u32 vdev_id, const u8 *mac,
|
||||
u32 tid, u32 buf_size)
|
||||
{
|
||||
struct sk_buff *skb;
|
||||
|
||||
if (!ar->wmi.ops->gen_addba_send)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
skb = ar->wmi.ops->gen_addba_send(ar, vdev_id, mac, tid, buf_size);
|
||||
if (IS_ERR(skb))
|
||||
return PTR_ERR(skb);
|
||||
|
||||
return ath10k_wmi_cmd_send(ar, skb,
|
||||
ar->wmi.cmd->addba_send_cmdid);
|
||||
}
|
||||
|
||||
static inline int
|
||||
ath10k_wmi_addba_set_resp(struct ath10k *ar, u32 vdev_id, const u8 *mac,
|
||||
u32 tid, u32 status)
|
||||
{
|
||||
struct sk_buff *skb;
|
||||
|
||||
if (!ar->wmi.ops->gen_addba_set_resp)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
skb = ar->wmi.ops->gen_addba_set_resp(ar, vdev_id, mac, tid, status);
|
||||
if (IS_ERR(skb))
|
||||
return PTR_ERR(skb);
|
||||
|
||||
return ath10k_wmi_cmd_send(ar, skb,
|
||||
ar->wmi.cmd->addba_set_resp_cmdid);
|
||||
}
|
||||
|
||||
static inline int
|
||||
ath10k_wmi_delba_send(struct ath10k *ar, u32 vdev_id, const u8 *mac,
|
||||
u32 tid, u32 initiator, u32 reason)
|
||||
{
|
||||
struct sk_buff *skb;
|
||||
|
||||
if (!ar->wmi.ops->gen_delba_send)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
skb = ar->wmi.ops->gen_delba_send(ar, vdev_id, mac, tid, initiator,
|
||||
reason);
|
||||
if (IS_ERR(skb))
|
||||
return PTR_ERR(skb);
|
||||
|
||||
return ath10k_wmi_cmd_send(ar, skb,
|
||||
ar->wmi.cmd->delba_send_cmdid);
|
||||
}
|
||||
|
||||
static inline int
|
||||
ath10k_wmi_bcn_tmpl(struct ath10k *ar, u32 vdev_id, u32 tim_ie_offset,
|
||||
struct sk_buff *bcn, u32 prb_caps, u32 prb_erp,
|
||||
void *prb_ies, size_t prb_ies_len)
|
||||
{
|
||||
struct sk_buff *skb;
|
||||
|
||||
if (!ar->wmi.ops->gen_bcn_tmpl)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
skb = ar->wmi.ops->gen_bcn_tmpl(ar, vdev_id, tim_ie_offset, bcn,
|
||||
prb_caps, prb_erp, prb_ies,
|
||||
prb_ies_len);
|
||||
if (IS_ERR(skb))
|
||||
return PTR_ERR(skb);
|
||||
|
||||
return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->bcn_tmpl_cmdid);
|
||||
}
|
||||
|
||||
static inline int
|
||||
ath10k_wmi_prb_tmpl(struct ath10k *ar, u32 vdev_id, struct sk_buff *prb)
|
||||
{
|
||||
struct sk_buff *skb;
|
||||
|
||||
if (!ar->wmi.ops->gen_prb_tmpl)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
skb = ar->wmi.ops->gen_prb_tmpl(ar, vdev_id, prb);
|
||||
if (IS_ERR(skb))
|
||||
return PTR_ERR(skb);
|
||||
|
||||
return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->prb_tmpl_cmdid);
|
||||
}
|
||||
|
||||
static inline int
|
||||
ath10k_wmi_p2p_go_bcn_ie(struct ath10k *ar, u32 vdev_id, const u8 *p2p_ie)
|
||||
{
|
||||
struct sk_buff *skb;
|
||||
|
||||
if (!ar->wmi.ops->gen_p2p_go_bcn_ie)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
skb = ar->wmi.ops->gen_p2p_go_bcn_ie(ar, vdev_id, p2p_ie);
|
||||
if (IS_ERR(skb))
|
||||
return PTR_ERR(skb);
|
||||
|
||||
return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->p2p_go_set_beacon_ie);
|
||||
}
|
||||
|
||||
static inline int
|
||||
ath10k_wmi_sta_keepalive(struct ath10k *ar,
|
||||
const struct wmi_sta_keepalive_arg *arg)
|
||||
{
|
||||
struct sk_buff *skb;
|
||||
u32 cmd_id;
|
||||
|
||||
if (!ar->wmi.ops->gen_sta_keepalive)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
skb = ar->wmi.ops->gen_sta_keepalive(ar, arg);
|
||||
if (IS_ERR(skb))
|
||||
return PTR_ERR(skb);
|
||||
|
||||
cmd_id = ar->wmi.cmd->sta_keepalive_cmd;
|
||||
return ath10k_wmi_cmd_send(ar, skb, cmd_id);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
@ -58,6 +58,10 @@ static const struct wmi_tlv_policy wmi_tlv_policies[] = {
|
||||
= { .min_len = sizeof(struct wlan_host_mem_req) },
|
||||
[WMI_TLV_TAG_STRUCT_READY_EVENT]
|
||||
= { .min_len = sizeof(struct wmi_tlv_rdy_ev) },
|
||||
[WMI_TLV_TAG_STRUCT_OFFLOAD_BCN_TX_STATUS_EVENT]
|
||||
= { .min_len = sizeof(struct wmi_tlv_bcn_tx_status_ev) },
|
||||
[WMI_TLV_TAG_STRUCT_DIAG_DATA_CONTAINER_EVENT]
|
||||
= { .min_len = sizeof(struct wmi_tlv_diag_data_ev) },
|
||||
};
|
||||
|
||||
static int
|
||||
@ -156,6 +160,142 @@ static u16 ath10k_wmi_tlv_len(const void *ptr)
|
||||
return __le16_to_cpu((((const struct wmi_tlv *)ptr) - 1)->len);
|
||||
}
|
||||
|
||||
/**************/
|
||||
/* TLV events */
|
||||
/**************/
|
||||
static int ath10k_wmi_tlv_event_bcn_tx_status(struct ath10k *ar,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
const void **tb;
|
||||
const struct wmi_tlv_bcn_tx_status_ev *ev;
|
||||
u32 vdev_id, tx_status;
|
||||
int ret;
|
||||
|
||||
tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
|
||||
if (IS_ERR(tb)) {
|
||||
ret = PTR_ERR(tb);
|
||||
ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ev = tb[WMI_TLV_TAG_STRUCT_OFFLOAD_BCN_TX_STATUS_EVENT];
|
||||
if (!ev) {
|
||||
kfree(tb);
|
||||
return -EPROTO;
|
||||
}
|
||||
|
||||
tx_status = __le32_to_cpu(ev->tx_status);
|
||||
vdev_id = __le32_to_cpu(ev->vdev_id);
|
||||
|
||||
switch (tx_status) {
|
||||
case WMI_TLV_BCN_TX_STATUS_OK:
|
||||
break;
|
||||
case WMI_TLV_BCN_TX_STATUS_XRETRY:
|
||||
case WMI_TLV_BCN_TX_STATUS_DROP:
|
||||
case WMI_TLV_BCN_TX_STATUS_FILTERED:
|
||||
/* FIXME: It's probably worth telling mac80211 to stop the
|
||||
* interface as it is crippled.
|
||||
*/
|
||||
ath10k_warn(ar, "received bcn tmpl tx status on vdev %i: %d",
|
||||
vdev_id, tx_status);
|
||||
break;
|
||||
}
|
||||
|
||||
kfree(tb);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ath10k_wmi_tlv_event_diag_data(struct ath10k *ar,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
const void **tb;
|
||||
const struct wmi_tlv_diag_data_ev *ev;
|
||||
const struct wmi_tlv_diag_item *item;
|
||||
const void *data;
|
||||
int ret, num_items, len;
|
||||
|
||||
tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
|
||||
if (IS_ERR(tb)) {
|
||||
ret = PTR_ERR(tb);
|
||||
ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ev = tb[WMI_TLV_TAG_STRUCT_DIAG_DATA_CONTAINER_EVENT];
|
||||
data = tb[WMI_TLV_TAG_ARRAY_BYTE];
|
||||
if (!ev || !data) {
|
||||
kfree(tb);
|
||||
return -EPROTO;
|
||||
}
|
||||
|
||||
num_items = __le32_to_cpu(ev->num_items);
|
||||
len = ath10k_wmi_tlv_len(data);
|
||||
|
||||
while (num_items--) {
|
||||
if (len == 0)
|
||||
break;
|
||||
if (len < sizeof(*item)) {
|
||||
ath10k_warn(ar, "failed to parse diag data: can't fit item header\n");
|
||||
break;
|
||||
}
|
||||
|
||||
item = data;
|
||||
|
||||
if (len < sizeof(*item) + __le16_to_cpu(item->len)) {
|
||||
ath10k_warn(ar, "failed to parse diag data: item is too long\n");
|
||||
break;
|
||||
}
|
||||
|
||||
trace_ath10k_wmi_diag_container(ar,
|
||||
item->type,
|
||||
__le32_to_cpu(item->timestamp),
|
||||
__le32_to_cpu(item->code),
|
||||
__le16_to_cpu(item->len),
|
||||
item->payload);
|
||||
|
||||
len -= sizeof(*item);
|
||||
len -= roundup(__le16_to_cpu(item->len), 4);
|
||||
|
||||
data += sizeof(*item);
|
||||
data += roundup(__le16_to_cpu(item->len), 4);
|
||||
}
|
||||
|
||||
if (num_items != -1 || len != 0)
|
||||
ath10k_warn(ar, "failed to parse diag data event: num_items %d len %d\n",
|
||||
num_items, len);
|
||||
|
||||
kfree(tb);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ath10k_wmi_tlv_event_diag(struct ath10k *ar,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
const void **tb;
|
||||
const void *data;
|
||||
int ret, len;
|
||||
|
||||
tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
|
||||
if (IS_ERR(tb)) {
|
||||
ret = PTR_ERR(tb);
|
||||
ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
data = tb[WMI_TLV_TAG_ARRAY_BYTE];
|
||||
if (!data) {
|
||||
kfree(tb);
|
||||
return -EPROTO;
|
||||
}
|
||||
len = ath10k_wmi_tlv_len(data);
|
||||
|
||||
ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv diag event len %d\n", len);
|
||||
trace_ath10k_wmi_diag(ar, data, len);
|
||||
|
||||
kfree(tb);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/***********/
|
||||
/* TLV ops */
|
||||
/***********/
|
||||
@ -268,6 +408,15 @@ static void ath10k_wmi_tlv_op_rx(struct ath10k *ar, struct sk_buff *skb)
|
||||
case WMI_TLV_READY_EVENTID:
|
||||
ath10k_wmi_event_ready(ar, skb);
|
||||
break;
|
||||
case WMI_TLV_OFFLOAD_BCN_TX_STATUS_EVENTID:
|
||||
ath10k_wmi_tlv_event_bcn_tx_status(ar, skb);
|
||||
break;
|
||||
case WMI_TLV_DIAG_DATA_CONTAINER_EVENTID:
|
||||
ath10k_wmi_tlv_event_diag_data(ar, skb);
|
||||
break;
|
||||
case WMI_TLV_DIAG_EVENTID:
|
||||
ath10k_wmi_tlv_event_diag(ar, skb);
|
||||
break;
|
||||
default:
|
||||
ath10k_warn(ar, "Unknown eventid: %d\n", id);
|
||||
break;
|
||||
@ -903,8 +1052,15 @@ static struct sk_buff *ath10k_wmi_tlv_op_gen_init(struct ath10k *ar)
|
||||
|
||||
cfg->num_vdevs = __cpu_to_le32(TARGET_TLV_NUM_VDEVS);
|
||||
cfg->num_peers = __cpu_to_le32(TARGET_TLV_NUM_PEERS);
|
||||
cfg->num_offload_peers = __cpu_to_le32(0);
|
||||
cfg->num_offload_reorder_bufs = __cpu_to_le32(0);
|
||||
|
||||
if (test_bit(WMI_SERVICE_RX_FULL_REORDER, ar->wmi.svc_map)) {
|
||||
cfg->num_offload_peers = __cpu_to_le32(3);
|
||||
cfg->num_offload_reorder_bufs = __cpu_to_le32(3);
|
||||
} else {
|
||||
cfg->num_offload_peers = __cpu_to_le32(0);
|
||||
cfg->num_offload_reorder_bufs = __cpu_to_le32(0);
|
||||
}
|
||||
|
||||
cfg->num_peer_keys = __cpu_to_le32(2);
|
||||
cfg->num_tids = __cpu_to_le32(TARGET_TLV_NUM_TIDS);
|
||||
cfg->ast_skid_limit = __cpu_to_le32(0x10);
|
||||
@ -1356,6 +1512,173 @@ ath10k_wmi_tlv_op_gen_vdev_install_key(struct ath10k *ar,
|
||||
return skb;
|
||||
}
|
||||
|
||||
static void *ath10k_wmi_tlv_put_uapsd_ac(struct ath10k *ar, void *ptr,
|
||||
const struct wmi_sta_uapsd_auto_trig_arg *arg)
|
||||
{
|
||||
struct wmi_sta_uapsd_auto_trig_param *ac;
|
||||
struct wmi_tlv *tlv;
|
||||
|
||||
tlv = ptr;
|
||||
tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_STA_UAPSD_AUTO_TRIG_PARAM);
|
||||
tlv->len = __cpu_to_le16(sizeof(*ac));
|
||||
ac = (void *)tlv->value;
|
||||
|
||||
ac->wmm_ac = __cpu_to_le32(arg->wmm_ac);
|
||||
ac->user_priority = __cpu_to_le32(arg->user_priority);
|
||||
ac->service_interval = __cpu_to_le32(arg->service_interval);
|
||||
ac->suspend_interval = __cpu_to_le32(arg->suspend_interval);
|
||||
ac->delay_interval = __cpu_to_le32(arg->delay_interval);
|
||||
|
||||
ath10k_dbg(ar, ATH10K_DBG_WMI,
|
||||
"wmi tlv vdev sta uapsd auto trigger ac %d prio %d svc int %d susp int %d delay int %d\n",
|
||||
ac->wmm_ac, ac->user_priority, ac->service_interval,
|
||||
ac->suspend_interval, ac->delay_interval);
|
||||
|
||||
return ptr + sizeof(*tlv) + sizeof(*ac);
|
||||
}
|
||||
|
||||
static struct sk_buff *
|
||||
ath10k_wmi_tlv_op_gen_vdev_sta_uapsd(struct ath10k *ar, u32 vdev_id,
|
||||
const u8 peer_addr[ETH_ALEN],
|
||||
const struct wmi_sta_uapsd_auto_trig_arg *args,
|
||||
u32 num_ac)
|
||||
{
|
||||
struct wmi_sta_uapsd_auto_trig_cmd_fixed_param *cmd;
|
||||
struct wmi_sta_uapsd_auto_trig_param *ac;
|
||||
struct wmi_tlv *tlv;
|
||||
struct sk_buff *skb;
|
||||
size_t len;
|
||||
size_t ac_tlv_len;
|
||||
void *ptr;
|
||||
int i;
|
||||
|
||||
ac_tlv_len = num_ac * (sizeof(*tlv) + sizeof(*ac));
|
||||
len = sizeof(*tlv) + sizeof(*cmd) +
|
||||
sizeof(*tlv) + ac_tlv_len;
|
||||
skb = ath10k_wmi_alloc_skb(ar, len);
|
||||
if (!skb)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
ptr = (void *)skb->data;
|
||||
tlv = ptr;
|
||||
tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_STA_UAPSD_AUTO_TRIG_CMD);
|
||||
tlv->len = __cpu_to_le16(sizeof(*cmd));
|
||||
cmd = (void *)tlv->value;
|
||||
cmd->vdev_id = __cpu_to_le32(vdev_id);
|
||||
cmd->num_ac = __cpu_to_le32(num_ac);
|
||||
ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
|
||||
|
||||
ptr += sizeof(*tlv);
|
||||
ptr += sizeof(*cmd);
|
||||
|
||||
tlv = ptr;
|
||||
tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT);
|
||||
tlv->len = __cpu_to_le16(ac_tlv_len);
|
||||
ac = (void *)tlv->value;
|
||||
|
||||
ptr += sizeof(*tlv);
|
||||
for (i = 0; i < num_ac; i++)
|
||||
ptr = ath10k_wmi_tlv_put_uapsd_ac(ar, ptr, &args[i]);
|
||||
|
||||
ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev sta uapsd auto trigger\n");
|
||||
return skb;
|
||||
}
|
||||
|
||||
static void *ath10k_wmi_tlv_put_wmm(void *ptr,
|
||||
const struct wmi_wmm_params_arg *arg)
|
||||
{
|
||||
struct wmi_wmm_params *wmm;
|
||||
struct wmi_tlv *tlv;
|
||||
|
||||
tlv = ptr;
|
||||
tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_WMM_PARAMS);
|
||||
tlv->len = __cpu_to_le16(sizeof(*wmm));
|
||||
wmm = (void *)tlv->value;
|
||||
ath10k_wmi_set_wmm_param(wmm, arg);
|
||||
|
||||
return ptr + sizeof(*tlv) + sizeof(*wmm);
|
||||
}
|
||||
|
||||
static struct sk_buff *
|
||||
ath10k_wmi_tlv_op_gen_vdev_wmm_conf(struct ath10k *ar, u32 vdev_id,
|
||||
const struct wmi_wmm_params_all_arg *arg)
|
||||
{
|
||||
struct wmi_tlv_vdev_set_wmm_cmd *cmd;
|
||||
struct wmi_wmm_params *wmm;
|
||||
struct wmi_tlv *tlv;
|
||||
struct sk_buff *skb;
|
||||
size_t len;
|
||||
void *ptr;
|
||||
|
||||
len = (sizeof(*tlv) + sizeof(*cmd)) +
|
||||
(4 * (sizeof(*tlv) + sizeof(*wmm)));
|
||||
skb = ath10k_wmi_alloc_skb(ar, len);
|
||||
if (!skb)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
ptr = (void *)skb->data;
|
||||
tlv = ptr;
|
||||
tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_SET_WMM_PARAMS_CMD);
|
||||
tlv->len = __cpu_to_le16(sizeof(*cmd));
|
||||
cmd = (void *)tlv->value;
|
||||
cmd->vdev_id = __cpu_to_le32(vdev_id);
|
||||
|
||||
ptr += sizeof(*tlv);
|
||||
ptr += sizeof(*cmd);
|
||||
|
||||
ptr = ath10k_wmi_tlv_put_wmm(ptr, &arg->ac_be);
|
||||
ptr = ath10k_wmi_tlv_put_wmm(ptr, &arg->ac_bk);
|
||||
ptr = ath10k_wmi_tlv_put_wmm(ptr, &arg->ac_vi);
|
||||
ptr = ath10k_wmi_tlv_put_wmm(ptr, &arg->ac_vo);
|
||||
|
||||
ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev wmm conf\n");
|
||||
return skb;
|
||||
}
|
||||
|
||||
static struct sk_buff *
|
||||
ath10k_wmi_tlv_op_gen_sta_keepalive(struct ath10k *ar,
|
||||
const struct wmi_sta_keepalive_arg *arg)
|
||||
{
|
||||
struct wmi_tlv_sta_keepalive_cmd *cmd;
|
||||
struct wmi_sta_keepalive_arp_resp *arp;
|
||||
struct sk_buff *skb;
|
||||
struct wmi_tlv *tlv;
|
||||
void *ptr;
|
||||
size_t len;
|
||||
|
||||
len = sizeof(*tlv) + sizeof(*cmd) +
|
||||
sizeof(*tlv) + sizeof(*arp);
|
||||
skb = ath10k_wmi_alloc_skb(ar, len);
|
||||
if (!skb)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
ptr = (void *)skb->data;
|
||||
tlv = ptr;
|
||||
tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_STA_KEEPALIVE_CMD);
|
||||
tlv->len = __cpu_to_le16(sizeof(*cmd));
|
||||
cmd = (void *)tlv->value;
|
||||
cmd->vdev_id = __cpu_to_le32(arg->vdev_id);
|
||||
cmd->enabled = __cpu_to_le32(arg->enabled);
|
||||
cmd->method = __cpu_to_le32(arg->method);
|
||||
cmd->interval = __cpu_to_le32(arg->interval);
|
||||
|
||||
ptr += sizeof(*tlv);
|
||||
ptr += sizeof(*cmd);
|
||||
|
||||
tlv = ptr;
|
||||
tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_STA_KEEPALVE_ARP_RESPONSE);
|
||||
tlv->len = __cpu_to_le16(sizeof(*arp));
|
||||
arp = (void *)tlv->value;
|
||||
|
||||
arp->src_ip4_addr = arg->src_ip4_addr;
|
||||
arp->dest_ip4_addr = arg->dest_ip4_addr;
|
||||
ether_addr_copy(arp->dest_mac_addr.addr, arg->dest_mac_addr);
|
||||
|
||||
ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv sta keepalive vdev %d enabled %d method %d inverval %d\n",
|
||||
arg->vdev_id, arg->enabled, arg->method, arg->interval);
|
||||
return skb;
|
||||
}
|
||||
|
||||
static struct sk_buff *
|
||||
ath10k_wmi_tlv_op_gen_peer_create(struct ath10k *ar, u32 vdev_id,
|
||||
const u8 peer_addr[ETH_ALEN])
|
||||
@ -1678,13 +2001,15 @@ ath10k_wmi_tlv_op_gen_scan_chan_list(struct ath10k *ar,
|
||||
}
|
||||
|
||||
static struct sk_buff *
|
||||
ath10k_wmi_tlv_op_gen_beacon_dma(struct ath10k_vif *arvif)
|
||||
ath10k_wmi_tlv_op_gen_beacon_dma(struct ath10k *ar, u32 vdev_id,
|
||||
const void *bcn, size_t bcn_len,
|
||||
u32 bcn_paddr, bool dtim_zero,
|
||||
bool deliver_cab)
|
||||
|
||||
{
|
||||
struct ath10k *ar = arvif->ar;
|
||||
struct wmi_bcn_tx_ref_cmd *cmd;
|
||||
struct wmi_tlv *tlv;
|
||||
struct sk_buff *skb;
|
||||
struct sk_buff *beacon = arvif->beacon;
|
||||
struct ieee80211_hdr *hdr;
|
||||
u16 fc;
|
||||
|
||||
@ -1692,48 +2017,33 @@ ath10k_wmi_tlv_op_gen_beacon_dma(struct ath10k_vif *arvif)
|
||||
if (!skb)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
hdr = (struct ieee80211_hdr *)beacon->data;
|
||||
hdr = (struct ieee80211_hdr *)bcn;
|
||||
fc = le16_to_cpu(hdr->frame_control);
|
||||
|
||||
tlv = (void *)skb->data;
|
||||
tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_BCN_SEND_FROM_HOST_CMD);
|
||||
tlv->len = __cpu_to_le16(sizeof(*cmd));
|
||||
cmd = (void *)tlv->value;
|
||||
cmd->vdev_id = __cpu_to_le32(arvif->vdev_id);
|
||||
cmd->data_len = __cpu_to_le32(beacon->len);
|
||||
cmd->data_ptr = __cpu_to_le32(ATH10K_SKB_CB(beacon)->paddr);
|
||||
cmd->vdev_id = __cpu_to_le32(vdev_id);
|
||||
cmd->data_len = __cpu_to_le32(bcn_len);
|
||||
cmd->data_ptr = __cpu_to_le32(bcn_paddr);
|
||||
cmd->msdu_id = 0;
|
||||
cmd->frame_control = __cpu_to_le32(fc);
|
||||
cmd->flags = 0;
|
||||
|
||||
if (ATH10K_SKB_CB(beacon)->bcn.dtim_zero)
|
||||
if (dtim_zero)
|
||||
cmd->flags |= __cpu_to_le32(WMI_BCN_TX_REF_FLAG_DTIM_ZERO);
|
||||
|
||||
if (ATH10K_SKB_CB(beacon)->bcn.deliver_cab)
|
||||
if (deliver_cab)
|
||||
cmd->flags |= __cpu_to_le32(WMI_BCN_TX_REF_FLAG_DELIVER_CAB);
|
||||
|
||||
ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv beacon dma\n");
|
||||
return skb;
|
||||
}
|
||||
|
||||
static void *ath10k_wmi_tlv_put_wmm(void *ptr,
|
||||
const struct wmi_wmm_params_arg *arg)
|
||||
{
|
||||
struct wmi_wmm_params *wmm;
|
||||
struct wmi_tlv *tlv;
|
||||
|
||||
tlv = ptr;
|
||||
tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_WMM_PARAMS);
|
||||
tlv->len = __cpu_to_le16(sizeof(*wmm));
|
||||
wmm = (void *)tlv->value;
|
||||
ath10k_wmi_pdev_set_wmm_param(wmm, arg);
|
||||
|
||||
return ptr + sizeof(*tlv) + sizeof(*wmm);
|
||||
}
|
||||
|
||||
static struct sk_buff *
|
||||
ath10k_wmi_tlv_op_gen_pdev_set_wmm(struct ath10k *ar,
|
||||
const struct wmi_pdev_set_wmm_params_arg *arg)
|
||||
const struct wmi_wmm_params_all_arg *arg)
|
||||
{
|
||||
struct wmi_tlv_pdev_set_wmm_cmd *cmd;
|
||||
struct wmi_wmm_params *wmm;
|
||||
@ -1816,8 +2126,8 @@ ath10k_wmi_tlv_op_gen_force_fw_hang(struct ath10k *ar,
|
||||
}
|
||||
|
||||
static struct sk_buff *
|
||||
ath10k_wmi_tlv_op_gen_dbglog_cfg(struct ath10k *ar, u32 module_enable)
|
||||
{
|
||||
ath10k_wmi_tlv_op_gen_dbglog_cfg(struct ath10k *ar, u32 module_enable,
|
||||
u32 log_level) {
|
||||
struct wmi_tlv_dbglog_cmd *cmd;
|
||||
struct wmi_tlv *tlv;
|
||||
struct sk_buff *skb;
|
||||
@ -1922,6 +2232,159 @@ ath10k_wmi_tlv_op_gen_pktlog_disable(struct ath10k *ar)
|
||||
return skb;
|
||||
}
|
||||
|
||||
static struct sk_buff *
|
||||
ath10k_wmi_tlv_op_gen_bcn_tmpl(struct ath10k *ar, u32 vdev_id,
|
||||
u32 tim_ie_offset, struct sk_buff *bcn,
|
||||
u32 prb_caps, u32 prb_erp, void *prb_ies,
|
||||
size_t prb_ies_len)
|
||||
{
|
||||
struct wmi_tlv_bcn_tmpl_cmd *cmd;
|
||||
struct wmi_tlv_bcn_prb_info *info;
|
||||
struct wmi_tlv *tlv;
|
||||
struct sk_buff *skb;
|
||||
void *ptr;
|
||||
size_t len;
|
||||
|
||||
if (WARN_ON(prb_ies_len > 0 && !prb_ies))
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
len = sizeof(*tlv) + sizeof(*cmd) +
|
||||
sizeof(*tlv) + sizeof(*info) + prb_ies_len +
|
||||
sizeof(*tlv) + roundup(bcn->len, 4);
|
||||
skb = ath10k_wmi_alloc_skb(ar, len);
|
||||
if (!skb)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
ptr = (void *)skb->data;
|
||||
tlv = ptr;
|
||||
tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_BCN_TMPL_CMD);
|
||||
tlv->len = __cpu_to_le16(sizeof(*cmd));
|
||||
cmd = (void *)tlv->value;
|
||||
cmd->vdev_id = __cpu_to_le32(vdev_id);
|
||||
cmd->tim_ie_offset = __cpu_to_le32(tim_ie_offset);
|
||||
cmd->buf_len = __cpu_to_le32(bcn->len);
|
||||
|
||||
ptr += sizeof(*tlv);
|
||||
ptr += sizeof(*cmd);
|
||||
|
||||
/* FIXME: prb_ies_len should be probably aligned to 4byte boundary but
|
||||
* then it is then impossible to pass original ie len.
|
||||
* This chunk is not used yet so if setting probe resp template yields
|
||||
* problems with beaconing or crashes firmware look here.
|
||||
*/
|
||||
tlv = ptr;
|
||||
tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_BCN_PRB_INFO);
|
||||
tlv->len = __cpu_to_le16(sizeof(*info) + prb_ies_len);
|
||||
info = (void *)tlv->value;
|
||||
info->caps = __cpu_to_le32(prb_caps);
|
||||
info->erp = __cpu_to_le32(prb_erp);
|
||||
memcpy(info->ies, prb_ies, prb_ies_len);
|
||||
|
||||
ptr += sizeof(*tlv);
|
||||
ptr += sizeof(*info);
|
||||
ptr += prb_ies_len;
|
||||
|
||||
tlv = ptr;
|
||||
tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_BYTE);
|
||||
tlv->len = __cpu_to_le16(roundup(bcn->len, 4));
|
||||
memcpy(tlv->value, bcn->data, bcn->len);
|
||||
|
||||
/* FIXME: Adjust TSF? */
|
||||
|
||||
ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv bcn tmpl vdev_id %i\n",
|
||||
vdev_id);
|
||||
return skb;
|
||||
}
|
||||
|
||||
static struct sk_buff *
|
||||
ath10k_wmi_tlv_op_gen_prb_tmpl(struct ath10k *ar, u32 vdev_id,
|
||||
struct sk_buff *prb)
|
||||
{
|
||||
struct wmi_tlv_prb_tmpl_cmd *cmd;
|
||||
struct wmi_tlv_bcn_prb_info *info;
|
||||
struct wmi_tlv *tlv;
|
||||
struct sk_buff *skb;
|
||||
void *ptr;
|
||||
size_t len;
|
||||
|
||||
len = sizeof(*tlv) + sizeof(*cmd) +
|
||||
sizeof(*tlv) + sizeof(*info) +
|
||||
sizeof(*tlv) + roundup(prb->len, 4);
|
||||
skb = ath10k_wmi_alloc_skb(ar, len);
|
||||
if (!skb)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
ptr = (void *)skb->data;
|
||||
tlv = ptr;
|
||||
tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PRB_TMPL_CMD);
|
||||
tlv->len = __cpu_to_le16(sizeof(*cmd));
|
||||
cmd = (void *)tlv->value;
|
||||
cmd->vdev_id = __cpu_to_le32(vdev_id);
|
||||
cmd->buf_len = __cpu_to_le32(prb->len);
|
||||
|
||||
ptr += sizeof(*tlv);
|
||||
ptr += sizeof(*cmd);
|
||||
|
||||
tlv = ptr;
|
||||
tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_BCN_PRB_INFO);
|
||||
tlv->len = __cpu_to_le16(sizeof(*info));
|
||||
info = (void *)tlv->value;
|
||||
info->caps = 0;
|
||||
info->erp = 0;
|
||||
|
||||
ptr += sizeof(*tlv);
|
||||
ptr += sizeof(*info);
|
||||
|
||||
tlv = ptr;
|
||||
tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_BYTE);
|
||||
tlv->len = __cpu_to_le16(roundup(prb->len, 4));
|
||||
memcpy(tlv->value, prb->data, prb->len);
|
||||
|
||||
ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv prb tmpl vdev_id %i\n",
|
||||
vdev_id);
|
||||
return skb;
|
||||
}
|
||||
|
||||
static struct sk_buff *
|
||||
ath10k_wmi_tlv_op_gen_p2p_go_bcn_ie(struct ath10k *ar, u32 vdev_id,
|
||||
const u8 *p2p_ie)
|
||||
{
|
||||
struct wmi_tlv_p2p_go_bcn_ie *cmd;
|
||||
struct wmi_tlv *tlv;
|
||||
struct sk_buff *skb;
|
||||
void *ptr;
|
||||
size_t len;
|
||||
|
||||
len = sizeof(*tlv) + sizeof(*cmd) +
|
||||
sizeof(*tlv) + roundup(p2p_ie[1] + 2, 4);
|
||||
skb = ath10k_wmi_alloc_skb(ar, len);
|
||||
if (!skb)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
ptr = (void *)skb->data;
|
||||
tlv = ptr;
|
||||
tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_P2P_GO_SET_BEACON_IE);
|
||||
tlv->len = __cpu_to_le16(sizeof(*cmd));
|
||||
cmd = (void *)tlv->value;
|
||||
cmd->vdev_id = __cpu_to_le32(vdev_id);
|
||||
cmd->ie_len = __cpu_to_le32(p2p_ie[1] + 2);
|
||||
|
||||
ptr += sizeof(*tlv);
|
||||
ptr += sizeof(*cmd);
|
||||
|
||||
tlv = ptr;
|
||||
tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_BYTE);
|
||||
tlv->len = __cpu_to_le16(roundup(p2p_ie[1] + 2, 4));
|
||||
memcpy(tlv->value, p2p_ie, p2p_ie[1] + 2);
|
||||
|
||||
ptr += sizeof(*tlv);
|
||||
ptr += roundup(p2p_ie[1] + 2, 4);
|
||||
|
||||
ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv p2p go bcn ie for vdev %i\n",
|
||||
vdev_id);
|
||||
return skb;
|
||||
}
|
||||
|
||||
/****************/
|
||||
/* TLV mappings */
|
||||
/****************/
|
||||
@ -2045,6 +2508,7 @@ static struct wmi_cmd_map wmi_tlv_cmd_map = {
|
||||
.gpio_config_cmdid = WMI_TLV_GPIO_CONFIG_CMDID,
|
||||
.gpio_output_cmdid = WMI_TLV_GPIO_OUTPUT_CMDID,
|
||||
.pdev_get_temperature_cmdid = WMI_TLV_CMD_UNSUPPORTED,
|
||||
.vdev_set_wmm_params_cmdid = WMI_TLV_VDEV_SET_WMM_PARAMS_CMDID,
|
||||
};
|
||||
|
||||
static struct wmi_pdev_param_map wmi_tlv_pdev_param_map = {
|
||||
@ -2188,6 +2652,7 @@ static const struct wmi_ops wmi_tlv_ops = {
|
||||
.gen_vdev_down = ath10k_wmi_tlv_op_gen_vdev_down,
|
||||
.gen_vdev_set_param = ath10k_wmi_tlv_op_gen_vdev_set_param,
|
||||
.gen_vdev_install_key = ath10k_wmi_tlv_op_gen_vdev_install_key,
|
||||
.gen_vdev_wmm_conf = ath10k_wmi_tlv_op_gen_vdev_wmm_conf,
|
||||
.gen_peer_create = ath10k_wmi_tlv_op_gen_peer_create,
|
||||
.gen_peer_delete = ath10k_wmi_tlv_op_gen_peer_delete,
|
||||
.gen_peer_flush = ath10k_wmi_tlv_op_gen_peer_flush,
|
||||
@ -2207,6 +2672,15 @@ static const struct wmi_ops wmi_tlv_ops = {
|
||||
.gen_pktlog_disable = ath10k_wmi_tlv_op_gen_pktlog_disable,
|
||||
/* .gen_pdev_set_quiet_mode not implemented */
|
||||
/* .gen_pdev_get_temperature not implemented */
|
||||
/* .gen_addba_clear_resp not implemented */
|
||||
/* .gen_addba_send not implemented */
|
||||
/* .gen_addba_set_resp not implemented */
|
||||
/* .gen_delba_send not implemented */
|
||||
.gen_bcn_tmpl = ath10k_wmi_tlv_op_gen_bcn_tmpl,
|
||||
.gen_prb_tmpl = ath10k_wmi_tlv_op_gen_prb_tmpl,
|
||||
.gen_p2p_go_bcn_ie = ath10k_wmi_tlv_op_gen_p2p_go_bcn_ie,
|
||||
.gen_vdev_sta_uapsd = ath10k_wmi_tlv_op_gen_vdev_sta_uapsd,
|
||||
.gen_sta_keepalive = ath10k_wmi_tlv_op_gen_sta_keepalive,
|
||||
};
|
||||
|
||||
/************/
|
||||
|
@ -1302,6 +1302,10 @@ struct wmi_tlv_pdev_set_wmm_cmd {
|
||||
__le32 dg_type; /* no idea.. */
|
||||
} __packed;
|
||||
|
||||
struct wmi_tlv_vdev_set_wmm_cmd {
|
||||
__le32 vdev_id;
|
||||
} __packed;
|
||||
|
||||
struct wmi_tlv_phyerr_ev {
|
||||
__le32 num_phyerrs;
|
||||
__le32 tsf_l32;
|
||||
@ -1375,6 +1379,66 @@ struct wmi_tlv_pktlog_disable {
|
||||
__le32 reserved;
|
||||
} __packed;
|
||||
|
||||
enum wmi_tlv_bcn_tx_status {
|
||||
WMI_TLV_BCN_TX_STATUS_OK,
|
||||
WMI_TLV_BCN_TX_STATUS_XRETRY,
|
||||
WMI_TLV_BCN_TX_STATUS_DROP,
|
||||
WMI_TLV_BCN_TX_STATUS_FILTERED,
|
||||
};
|
||||
|
||||
struct wmi_tlv_bcn_tx_status_ev {
|
||||
__le32 vdev_id;
|
||||
__le32 tx_status;
|
||||
} __packed;
|
||||
|
||||
struct wmi_tlv_bcn_prb_info {
|
||||
__le32 caps;
|
||||
__le32 erp;
|
||||
u8 ies[0];
|
||||
} __packed;
|
||||
|
||||
struct wmi_tlv_bcn_tmpl_cmd {
|
||||
__le32 vdev_id;
|
||||
__le32 tim_ie_offset;
|
||||
__le32 buf_len;
|
||||
} __packed;
|
||||
|
||||
struct wmi_tlv_prb_tmpl_cmd {
|
||||
__le32 vdev_id;
|
||||
__le32 buf_len;
|
||||
} __packed;
|
||||
|
||||
struct wmi_tlv_p2p_go_bcn_ie {
|
||||
__le32 vdev_id;
|
||||
__le32 ie_len;
|
||||
} __packed;
|
||||
|
||||
enum wmi_tlv_diag_item_type {
|
||||
WMI_TLV_DIAG_ITEM_TYPE_FW_EVENT,
|
||||
WMI_TLV_DIAG_ITEM_TYPE_FW_LOG,
|
||||
WMI_TLV_DIAG_ITEM_TYPE_FW_DEBUG_MSG,
|
||||
};
|
||||
|
||||
struct wmi_tlv_diag_item {
|
||||
u8 type;
|
||||
u8 reserved;
|
||||
__le16 len;
|
||||
__le32 timestamp;
|
||||
__le32 code;
|
||||
u8 payload[0];
|
||||
} __packed;
|
||||
|
||||
struct wmi_tlv_diag_data_ev {
|
||||
__le32 num_items;
|
||||
} __packed;
|
||||
|
||||
struct wmi_tlv_sta_keepalive_cmd {
|
||||
__le32 vdev_id;
|
||||
__le32 enabled;
|
||||
__le32 method; /* WMI_STA_KEEPALIVE_METHOD_ */
|
||||
__le32 interval; /* in seconds */
|
||||
} __packed;
|
||||
|
||||
void ath10k_wmi_tlv_attach(struct ath10k *ar);
|
||||
|
||||
#endif
|
||||
|
@ -956,23 +956,45 @@ err_pull:
|
||||
|
||||
static void ath10k_wmi_tx_beacon_nowait(struct ath10k_vif *arvif)
|
||||
{
|
||||
struct ath10k *ar = arvif->ar;
|
||||
struct ath10k_skb_cb *cb;
|
||||
struct sk_buff *bcn;
|
||||
int ret;
|
||||
|
||||
lockdep_assert_held(&arvif->ar->data_lock);
|
||||
spin_lock_bh(&ar->data_lock);
|
||||
|
||||
if (arvif->beacon == NULL)
|
||||
return;
|
||||
bcn = arvif->beacon;
|
||||
|
||||
if (arvif->beacon_sent)
|
||||
return;
|
||||
if (!bcn)
|
||||
goto unlock;
|
||||
|
||||
ret = ath10k_wmi_beacon_send_ref_nowait(arvif);
|
||||
if (ret)
|
||||
return;
|
||||
cb = ATH10K_SKB_CB(bcn);
|
||||
|
||||
/* We need to retain the arvif->beacon reference for DMA unmapping and
|
||||
* freeing the skbuff later. */
|
||||
arvif->beacon_sent = true;
|
||||
switch (arvif->beacon_state) {
|
||||
case ATH10K_BEACON_SENDING:
|
||||
case ATH10K_BEACON_SENT:
|
||||
break;
|
||||
case ATH10K_BEACON_SCHEDULED:
|
||||
arvif->beacon_state = ATH10K_BEACON_SENDING;
|
||||
spin_unlock_bh(&ar->data_lock);
|
||||
|
||||
ret = ath10k_wmi_beacon_send_ref_nowait(arvif->ar,
|
||||
arvif->vdev_id,
|
||||
bcn->data, bcn->len,
|
||||
cb->paddr,
|
||||
cb->bcn.dtim_zero,
|
||||
cb->bcn.deliver_cab);
|
||||
|
||||
spin_lock_bh(&ar->data_lock);
|
||||
|
||||
if (ret == 0)
|
||||
arvif->beacon_state = ATH10K_BEACON_SENT;
|
||||
else
|
||||
arvif->beacon_state = ATH10K_BEACON_SCHEDULED;
|
||||
}
|
||||
|
||||
unlock:
|
||||
spin_unlock_bh(&ar->data_lock);
|
||||
}
|
||||
|
||||
static void ath10k_wmi_tx_beacons_iter(void *data, u8 *mac,
|
||||
@ -985,12 +1007,10 @@ static void ath10k_wmi_tx_beacons_iter(void *data, u8 *mac,
|
||||
|
||||
static void ath10k_wmi_tx_beacons_nowait(struct ath10k *ar)
|
||||
{
|
||||
spin_lock_bh(&ar->data_lock);
|
||||
ieee80211_iterate_active_interfaces_atomic(ar->hw,
|
||||
IEEE80211_IFACE_ITER_NORMAL,
|
||||
ath10k_wmi_tx_beacons_iter,
|
||||
NULL);
|
||||
spin_unlock_bh(&ar->data_lock);
|
||||
}
|
||||
|
||||
static void ath10k_wmi_op_ep_tx_credits(struct ath10k *ar)
|
||||
@ -1680,12 +1700,9 @@ int ath10k_wmi_event_debug_mesg(struct ath10k *ar, struct sk_buff *skb)
|
||||
return 0;
|
||||
}
|
||||
|
||||
void ath10k_wmi_pull_pdev_stats(const struct wmi_pdev_stats *src,
|
||||
struct ath10k_fw_stats_pdev *dst)
|
||||
void ath10k_wmi_pull_pdev_stats_base(const struct wmi_pdev_stats_base *src,
|
||||
struct ath10k_fw_stats_pdev *dst)
|
||||
{
|
||||
const struct wal_dbg_tx_stats *tx = &src->wal.tx;
|
||||
const struct wal_dbg_rx_stats *rx = &src->wal.rx;
|
||||
|
||||
dst->ch_noise_floor = __le32_to_cpu(src->chan_nf);
|
||||
dst->tx_frame_count = __le32_to_cpu(src->tx_frame_count);
|
||||
dst->rx_frame_count = __le32_to_cpu(src->rx_frame_count);
|
||||
@ -1693,44 +1710,63 @@ void ath10k_wmi_pull_pdev_stats(const struct wmi_pdev_stats *src,
|
||||
dst->cycle_count = __le32_to_cpu(src->cycle_count);
|
||||
dst->phy_err_count = __le32_to_cpu(src->phy_err_count);
|
||||
dst->chan_tx_power = __le32_to_cpu(src->chan_tx_pwr);
|
||||
}
|
||||
|
||||
dst->comp_queued = __le32_to_cpu(tx->comp_queued);
|
||||
dst->comp_delivered = __le32_to_cpu(tx->comp_delivered);
|
||||
dst->msdu_enqued = __le32_to_cpu(tx->msdu_enqued);
|
||||
dst->mpdu_enqued = __le32_to_cpu(tx->mpdu_enqued);
|
||||
dst->wmm_drop = __le32_to_cpu(tx->wmm_drop);
|
||||
dst->local_enqued = __le32_to_cpu(tx->local_enqued);
|
||||
dst->local_freed = __le32_to_cpu(tx->local_freed);
|
||||
dst->hw_queued = __le32_to_cpu(tx->hw_queued);
|
||||
dst->hw_reaped = __le32_to_cpu(tx->hw_reaped);
|
||||
dst->underrun = __le32_to_cpu(tx->underrun);
|
||||
dst->tx_abort = __le32_to_cpu(tx->tx_abort);
|
||||
dst->mpdus_requed = __le32_to_cpu(tx->mpdus_requed);
|
||||
dst->tx_ko = __le32_to_cpu(tx->tx_ko);
|
||||
dst->data_rc = __le32_to_cpu(tx->data_rc);
|
||||
dst->self_triggers = __le32_to_cpu(tx->self_triggers);
|
||||
dst->sw_retry_failure = __le32_to_cpu(tx->sw_retry_failure);
|
||||
dst->illgl_rate_phy_err = __le32_to_cpu(tx->illgl_rate_phy_err);
|
||||
dst->pdev_cont_xretry = __le32_to_cpu(tx->pdev_cont_xretry);
|
||||
dst->pdev_tx_timeout = __le32_to_cpu(tx->pdev_tx_timeout);
|
||||
dst->pdev_resets = __le32_to_cpu(tx->pdev_resets);
|
||||
dst->phy_underrun = __le32_to_cpu(tx->phy_underrun);
|
||||
dst->txop_ovf = __le32_to_cpu(tx->txop_ovf);
|
||||
void ath10k_wmi_pull_pdev_stats_tx(const struct wmi_pdev_stats_tx *src,
|
||||
struct ath10k_fw_stats_pdev *dst)
|
||||
{
|
||||
dst->comp_queued = __le32_to_cpu(src->comp_queued);
|
||||
dst->comp_delivered = __le32_to_cpu(src->comp_delivered);
|
||||
dst->msdu_enqued = __le32_to_cpu(src->msdu_enqued);
|
||||
dst->mpdu_enqued = __le32_to_cpu(src->mpdu_enqued);
|
||||
dst->wmm_drop = __le32_to_cpu(src->wmm_drop);
|
||||
dst->local_enqued = __le32_to_cpu(src->local_enqued);
|
||||
dst->local_freed = __le32_to_cpu(src->local_freed);
|
||||
dst->hw_queued = __le32_to_cpu(src->hw_queued);
|
||||
dst->hw_reaped = __le32_to_cpu(src->hw_reaped);
|
||||
dst->underrun = __le32_to_cpu(src->underrun);
|
||||
dst->tx_abort = __le32_to_cpu(src->tx_abort);
|
||||
dst->mpdus_requed = __le32_to_cpu(src->mpdus_requed);
|
||||
dst->tx_ko = __le32_to_cpu(src->tx_ko);
|
||||
dst->data_rc = __le32_to_cpu(src->data_rc);
|
||||
dst->self_triggers = __le32_to_cpu(src->self_triggers);
|
||||
dst->sw_retry_failure = __le32_to_cpu(src->sw_retry_failure);
|
||||
dst->illgl_rate_phy_err = __le32_to_cpu(src->illgl_rate_phy_err);
|
||||
dst->pdev_cont_xretry = __le32_to_cpu(src->pdev_cont_xretry);
|
||||
dst->pdev_tx_timeout = __le32_to_cpu(src->pdev_tx_timeout);
|
||||
dst->pdev_resets = __le32_to_cpu(src->pdev_resets);
|
||||
dst->phy_underrun = __le32_to_cpu(src->phy_underrun);
|
||||
dst->txop_ovf = __le32_to_cpu(src->txop_ovf);
|
||||
}
|
||||
|
||||
dst->mid_ppdu_route_change = __le32_to_cpu(rx->mid_ppdu_route_change);
|
||||
dst->status_rcvd = __le32_to_cpu(rx->status_rcvd);
|
||||
dst->r0_frags = __le32_to_cpu(rx->r0_frags);
|
||||
dst->r1_frags = __le32_to_cpu(rx->r1_frags);
|
||||
dst->r2_frags = __le32_to_cpu(rx->r2_frags);
|
||||
dst->r3_frags = __le32_to_cpu(rx->r3_frags);
|
||||
dst->htt_msdus = __le32_to_cpu(rx->htt_msdus);
|
||||
dst->htt_mpdus = __le32_to_cpu(rx->htt_mpdus);
|
||||
dst->loc_msdus = __le32_to_cpu(rx->loc_msdus);
|
||||
dst->loc_mpdus = __le32_to_cpu(rx->loc_mpdus);
|
||||
dst->oversize_amsdu = __le32_to_cpu(rx->oversize_amsdu);
|
||||
dst->phy_errs = __le32_to_cpu(rx->phy_errs);
|
||||
dst->phy_err_drop = __le32_to_cpu(rx->phy_err_drop);
|
||||
dst->mpdu_errs = __le32_to_cpu(rx->mpdu_errs);
|
||||
void ath10k_wmi_pull_pdev_stats_rx(const struct wmi_pdev_stats_rx *src,
|
||||
struct ath10k_fw_stats_pdev *dst)
|
||||
{
|
||||
dst->mid_ppdu_route_change = __le32_to_cpu(src->mid_ppdu_route_change);
|
||||
dst->status_rcvd = __le32_to_cpu(src->status_rcvd);
|
||||
dst->r0_frags = __le32_to_cpu(src->r0_frags);
|
||||
dst->r1_frags = __le32_to_cpu(src->r1_frags);
|
||||
dst->r2_frags = __le32_to_cpu(src->r2_frags);
|
||||
dst->r3_frags = __le32_to_cpu(src->r3_frags);
|
||||
dst->htt_msdus = __le32_to_cpu(src->htt_msdus);
|
||||
dst->htt_mpdus = __le32_to_cpu(src->htt_mpdus);
|
||||
dst->loc_msdus = __le32_to_cpu(src->loc_msdus);
|
||||
dst->loc_mpdus = __le32_to_cpu(src->loc_mpdus);
|
||||
dst->oversize_amsdu = __le32_to_cpu(src->oversize_amsdu);
|
||||
dst->phy_errs = __le32_to_cpu(src->phy_errs);
|
||||
dst->phy_err_drop = __le32_to_cpu(src->phy_err_drop);
|
||||
dst->mpdu_errs = __le32_to_cpu(src->mpdu_errs);
|
||||
}
|
||||
|
||||
void ath10k_wmi_pull_pdev_stats_extra(const struct wmi_pdev_stats_extra *src,
|
||||
struct ath10k_fw_stats_pdev *dst)
|
||||
{
|
||||
dst->ack_rx_bad = __le32_to_cpu(src->ack_rx_bad);
|
||||
dst->rts_bad = __le32_to_cpu(src->rts_bad);
|
||||
dst->rts_good = __le32_to_cpu(src->rts_good);
|
||||
dst->fcs_bad = __le32_to_cpu(src->fcs_bad);
|
||||
dst->no_beacons = __le32_to_cpu(src->no_beacons);
|
||||
dst->mib_int_count = __le32_to_cpu(src->mib_int_count);
|
||||
}
|
||||
|
||||
void ath10k_wmi_pull_peer_stats(const struct wmi_peer_stats *src,
|
||||
@ -1768,7 +1804,10 @@ static int ath10k_wmi_main_op_pull_fw_stats(struct ath10k *ar,
|
||||
if (!dst)
|
||||
continue;
|
||||
|
||||
ath10k_wmi_pull_pdev_stats(src, dst);
|
||||
ath10k_wmi_pull_pdev_stats_base(&src->base, dst);
|
||||
ath10k_wmi_pull_pdev_stats_tx(&src->tx, dst);
|
||||
ath10k_wmi_pull_pdev_stats_rx(&src->rx, dst);
|
||||
|
||||
list_add_tail(&dst->list, &stats->pdevs);
|
||||
}
|
||||
|
||||
@ -1820,14 +1859,10 @@ static int ath10k_wmi_10x_op_pull_fw_stats(struct ath10k *ar,
|
||||
if (!dst)
|
||||
continue;
|
||||
|
||||
ath10k_wmi_pull_pdev_stats(&src->old, dst);
|
||||
|
||||
dst->ack_rx_bad = __le32_to_cpu(src->ack_rx_bad);
|
||||
dst->rts_bad = __le32_to_cpu(src->rts_bad);
|
||||
dst->rts_good = __le32_to_cpu(src->rts_good);
|
||||
dst->fcs_bad = __le32_to_cpu(src->fcs_bad);
|
||||
dst->no_beacons = __le32_to_cpu(src->no_beacons);
|
||||
dst->mib_int_count = __le32_to_cpu(src->mib_int_count);
|
||||
ath10k_wmi_pull_pdev_stats_base(&src->base, dst);
|
||||
ath10k_wmi_pull_pdev_stats_tx(&src->tx, dst);
|
||||
ath10k_wmi_pull_pdev_stats_rx(&src->rx, dst);
|
||||
ath10k_wmi_pull_pdev_stats_extra(&src->extra, dst);
|
||||
|
||||
list_add_tail(&dst->list, &stats->pdevs);
|
||||
}
|
||||
@ -1856,6 +1891,164 @@ static int ath10k_wmi_10x_op_pull_fw_stats(struct ath10k *ar,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ath10k_wmi_10_2_op_pull_fw_stats(struct ath10k *ar,
|
||||
struct sk_buff *skb,
|
||||
struct ath10k_fw_stats *stats)
|
||||
{
|
||||
const struct wmi_10_2_stats_event *ev = (void *)skb->data;
|
||||
u32 num_pdev_stats;
|
||||
u32 num_pdev_ext_stats;
|
||||
u32 num_vdev_stats;
|
||||
u32 num_peer_stats;
|
||||
int i;
|
||||
|
||||
if (!skb_pull(skb, sizeof(*ev)))
|
||||
return -EPROTO;
|
||||
|
||||
num_pdev_stats = __le32_to_cpu(ev->num_pdev_stats);
|
||||
num_pdev_ext_stats = __le32_to_cpu(ev->num_pdev_ext_stats);
|
||||
num_vdev_stats = __le32_to_cpu(ev->num_vdev_stats);
|
||||
num_peer_stats = __le32_to_cpu(ev->num_peer_stats);
|
||||
|
||||
for (i = 0; i < num_pdev_stats; i++) {
|
||||
const struct wmi_10_2_pdev_stats *src;
|
||||
struct ath10k_fw_stats_pdev *dst;
|
||||
|
||||
src = (void *)skb->data;
|
||||
if (!skb_pull(skb, sizeof(*src)))
|
||||
return -EPROTO;
|
||||
|
||||
dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
|
||||
if (!dst)
|
||||
continue;
|
||||
|
||||
ath10k_wmi_pull_pdev_stats_base(&src->base, dst);
|
||||
ath10k_wmi_pull_pdev_stats_tx(&src->tx, dst);
|
||||
ath10k_wmi_pull_pdev_stats_rx(&src->rx, dst);
|
||||
ath10k_wmi_pull_pdev_stats_extra(&src->extra, dst);
|
||||
/* FIXME: expose 10.2 specific values */
|
||||
|
||||
list_add_tail(&dst->list, &stats->pdevs);
|
||||
}
|
||||
|
||||
for (i = 0; i < num_pdev_ext_stats; i++) {
|
||||
const struct wmi_10_2_pdev_ext_stats *src;
|
||||
|
||||
src = (void *)skb->data;
|
||||
if (!skb_pull(skb, sizeof(*src)))
|
||||
return -EPROTO;
|
||||
|
||||
/* FIXME: expose values to userspace
|
||||
*
|
||||
* Note: Even though this loop seems to do nothing it is
|
||||
* required to parse following sub-structures properly.
|
||||
*/
|
||||
}
|
||||
|
||||
/* fw doesn't implement vdev stats */
|
||||
|
||||
for (i = 0; i < num_peer_stats; i++) {
|
||||
const struct wmi_10_2_peer_stats *src;
|
||||
struct ath10k_fw_stats_peer *dst;
|
||||
|
||||
src = (void *)skb->data;
|
||||
if (!skb_pull(skb, sizeof(*src)))
|
||||
return -EPROTO;
|
||||
|
||||
dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
|
||||
if (!dst)
|
||||
continue;
|
||||
|
||||
ath10k_wmi_pull_peer_stats(&src->old, dst);
|
||||
|
||||
dst->peer_rx_rate = __le32_to_cpu(src->peer_rx_rate);
|
||||
/* FIXME: expose 10.2 specific values */
|
||||
|
||||
list_add_tail(&dst->list, &stats->peers);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ath10k_wmi_10_2_4_op_pull_fw_stats(struct ath10k *ar,
|
||||
struct sk_buff *skb,
|
||||
struct ath10k_fw_stats *stats)
|
||||
{
|
||||
const struct wmi_10_2_stats_event *ev = (void *)skb->data;
|
||||
u32 num_pdev_stats;
|
||||
u32 num_pdev_ext_stats;
|
||||
u32 num_vdev_stats;
|
||||
u32 num_peer_stats;
|
||||
int i;
|
||||
|
||||
if (!skb_pull(skb, sizeof(*ev)))
|
||||
return -EPROTO;
|
||||
|
||||
num_pdev_stats = __le32_to_cpu(ev->num_pdev_stats);
|
||||
num_pdev_ext_stats = __le32_to_cpu(ev->num_pdev_ext_stats);
|
||||
num_vdev_stats = __le32_to_cpu(ev->num_vdev_stats);
|
||||
num_peer_stats = __le32_to_cpu(ev->num_peer_stats);
|
||||
|
||||
for (i = 0; i < num_pdev_stats; i++) {
|
||||
const struct wmi_10_2_pdev_stats *src;
|
||||
struct ath10k_fw_stats_pdev *dst;
|
||||
|
||||
src = (void *)skb->data;
|
||||
if (!skb_pull(skb, sizeof(*src)))
|
||||
return -EPROTO;
|
||||
|
||||
dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
|
||||
if (!dst)
|
||||
continue;
|
||||
|
||||
ath10k_wmi_pull_pdev_stats_base(&src->base, dst);
|
||||
ath10k_wmi_pull_pdev_stats_tx(&src->tx, dst);
|
||||
ath10k_wmi_pull_pdev_stats_rx(&src->rx, dst);
|
||||
ath10k_wmi_pull_pdev_stats_extra(&src->extra, dst);
|
||||
/* FIXME: expose 10.2 specific values */
|
||||
|
||||
list_add_tail(&dst->list, &stats->pdevs);
|
||||
}
|
||||
|
||||
for (i = 0; i < num_pdev_ext_stats; i++) {
|
||||
const struct wmi_10_2_pdev_ext_stats *src;
|
||||
|
||||
src = (void *)skb->data;
|
||||
if (!skb_pull(skb, sizeof(*src)))
|
||||
return -EPROTO;
|
||||
|
||||
/* FIXME: expose values to userspace
|
||||
*
|
||||
* Note: Even though this loop seems to do nothing it is
|
||||
* required to parse following sub-structures properly.
|
||||
*/
|
||||
}
|
||||
|
||||
/* fw doesn't implement vdev stats */
|
||||
|
||||
for (i = 0; i < num_peer_stats; i++) {
|
||||
const struct wmi_10_2_4_peer_stats *src;
|
||||
struct ath10k_fw_stats_peer *dst;
|
||||
|
||||
src = (void *)skb->data;
|
||||
if (!skb_pull(skb, sizeof(*src)))
|
||||
return -EPROTO;
|
||||
|
||||
dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
|
||||
if (!dst)
|
||||
continue;
|
||||
|
||||
ath10k_wmi_pull_peer_stats(&src->common.old, dst);
|
||||
|
||||
dst->peer_rx_rate = __le32_to_cpu(src->common.peer_rx_rate);
|
||||
/* FIXME: expose 10.2 specific values */
|
||||
|
||||
list_add_tail(&dst->list, &stats->peers);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void ath10k_wmi_event_update_stats(struct ath10k *ar, struct sk_buff *skb)
|
||||
{
|
||||
ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_UPDATE_STATS_EVENTID\n");
|
||||
@ -2279,9 +2472,19 @@ void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb)
|
||||
spin_lock_bh(&ar->data_lock);
|
||||
|
||||
if (arvif->beacon) {
|
||||
if (!arvif->beacon_sent)
|
||||
ath10k_warn(ar, "SWBA overrun on vdev %d\n",
|
||||
switch (arvif->beacon_state) {
|
||||
case ATH10K_BEACON_SENT:
|
||||
break;
|
||||
case ATH10K_BEACON_SCHEDULED:
|
||||
ath10k_warn(ar, "SWBA overrun on vdev %d, skipped old beacon\n",
|
||||
arvif->vdev_id);
|
||||
break;
|
||||
case ATH10K_BEACON_SENDING:
|
||||
ath10k_warn(ar, "SWBA overrun on vdev %d, skipped new beacon\n",
|
||||
arvif->vdev_id);
|
||||
dev_kfree_skb(bcn);
|
||||
goto skip;
|
||||
}
|
||||
|
||||
ath10k_mac_vif_beacon_free(arvif);
|
||||
}
|
||||
@ -2309,15 +2512,16 @@ void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb)
|
||||
}
|
||||
|
||||
arvif->beacon = bcn;
|
||||
arvif->beacon_sent = false;
|
||||
arvif->beacon_state = ATH10K_BEACON_SCHEDULED;
|
||||
|
||||
trace_ath10k_tx_hdr(ar, bcn->data, bcn->len);
|
||||
trace_ath10k_tx_payload(ar, bcn->data, bcn->len);
|
||||
|
||||
ath10k_wmi_tx_beacon_nowait(arvif);
|
||||
skip:
|
||||
spin_unlock_bh(&ar->data_lock);
|
||||
}
|
||||
|
||||
ath10k_wmi_tx_beacons_nowait(ar);
|
||||
}
|
||||
|
||||
void ath10k_wmi_event_tbttoffset_update(struct ath10k *ar, struct sk_buff *skb)
|
||||
@ -3710,7 +3914,7 @@ static struct sk_buff *ath10k_wmi_10_2_op_gen_init(struct ath10k *ar)
|
||||
struct wmi_init_cmd_10_2 *cmd;
|
||||
struct sk_buff *buf;
|
||||
struct wmi_resource_config_10x config = {};
|
||||
u32 len, val;
|
||||
u32 len, val, features;
|
||||
|
||||
config.num_vdevs = __cpu_to_le32(TARGET_10X_NUM_VDEVS);
|
||||
config.num_peers = __cpu_to_le32(TARGET_10X_NUM_PEERS);
|
||||
@ -3744,7 +3948,7 @@ static struct sk_buff *ath10k_wmi_10_2_op_gen_init(struct ath10k *ar)
|
||||
config.mcast2ucast_mode = __cpu_to_le32(TARGET_10X_MCAST2UCAST_MODE);
|
||||
config.tx_dbg_log_size = __cpu_to_le32(TARGET_10X_TX_DBG_LOG_SIZE);
|
||||
config.num_wds_entries = __cpu_to_le32(TARGET_10X_NUM_WDS_ENTRIES);
|
||||
config.dma_burst_size = __cpu_to_le32(TARGET_10X_DMA_BURST_SIZE);
|
||||
config.dma_burst_size = __cpu_to_le32(TARGET_10_2_DMA_BURST_SIZE);
|
||||
config.mac_aggr_delim = __cpu_to_le32(TARGET_10X_MAC_AGGR_DELIM);
|
||||
|
||||
val = TARGET_10X_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK;
|
||||
@ -3764,6 +3968,9 @@ static struct sk_buff *ath10k_wmi_10_2_op_gen_init(struct ath10k *ar)
|
||||
|
||||
cmd = (struct wmi_init_cmd_10_2 *)buf->data;
|
||||
|
||||
features = WMI_10_2_RX_BATCH_MODE;
|
||||
cmd->resource_config.feature_mask = __cpu_to_le32(features);
|
||||
|
||||
memcpy(&cmd->resource_config.common, &config, sizeof(config));
|
||||
ath10k_wmi_put_host_mem_chunks(ar, &cmd->mem_chunks);
|
||||
|
||||
@ -4680,12 +4887,12 @@ ath10k_wmi_10_2_op_gen_pdev_get_temperature(struct ath10k *ar)
|
||||
|
||||
/* This function assumes the beacon is already DMA mapped */
|
||||
static struct sk_buff *
|
||||
ath10k_wmi_op_gen_beacon_dma(struct ath10k_vif *arvif)
|
||||
ath10k_wmi_op_gen_beacon_dma(struct ath10k *ar, u32 vdev_id, const void *bcn,
|
||||
size_t bcn_len, u32 bcn_paddr, bool dtim_zero,
|
||||
bool deliver_cab)
|
||||
{
|
||||
struct ath10k *ar = arvif->ar;
|
||||
struct wmi_bcn_tx_ref_cmd *cmd;
|
||||
struct sk_buff *skb;
|
||||
struct sk_buff *beacon = arvif->beacon;
|
||||
struct ieee80211_hdr *hdr;
|
||||
u16 fc;
|
||||
|
||||
@ -4693,29 +4900,29 @@ ath10k_wmi_op_gen_beacon_dma(struct ath10k_vif *arvif)
|
||||
if (!skb)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
hdr = (struct ieee80211_hdr *)beacon->data;
|
||||
hdr = (struct ieee80211_hdr *)bcn;
|
||||
fc = le16_to_cpu(hdr->frame_control);
|
||||
|
||||
cmd = (struct wmi_bcn_tx_ref_cmd *)skb->data;
|
||||
cmd->vdev_id = __cpu_to_le32(arvif->vdev_id);
|
||||
cmd->data_len = __cpu_to_le32(beacon->len);
|
||||
cmd->data_ptr = __cpu_to_le32(ATH10K_SKB_CB(beacon)->paddr);
|
||||
cmd->vdev_id = __cpu_to_le32(vdev_id);
|
||||
cmd->data_len = __cpu_to_le32(bcn_len);
|
||||
cmd->data_ptr = __cpu_to_le32(bcn_paddr);
|
||||
cmd->msdu_id = 0;
|
||||
cmd->frame_control = __cpu_to_le32(fc);
|
||||
cmd->flags = 0;
|
||||
cmd->antenna_mask = __cpu_to_le32(WMI_BCN_TX_REF_DEF_ANTENNA);
|
||||
|
||||
if (ATH10K_SKB_CB(beacon)->bcn.dtim_zero)
|
||||
if (dtim_zero)
|
||||
cmd->flags |= __cpu_to_le32(WMI_BCN_TX_REF_FLAG_DTIM_ZERO);
|
||||
|
||||
if (ATH10K_SKB_CB(beacon)->bcn.deliver_cab)
|
||||
if (deliver_cab)
|
||||
cmd->flags |= __cpu_to_le32(WMI_BCN_TX_REF_FLAG_DELIVER_CAB);
|
||||
|
||||
return skb;
|
||||
}
|
||||
|
||||
void ath10k_wmi_pdev_set_wmm_param(struct wmi_wmm_params *params,
|
||||
const struct wmi_wmm_params_arg *arg)
|
||||
void ath10k_wmi_set_wmm_param(struct wmi_wmm_params *params,
|
||||
const struct wmi_wmm_params_arg *arg)
|
||||
{
|
||||
params->cwmin = __cpu_to_le32(arg->cwmin);
|
||||
params->cwmax = __cpu_to_le32(arg->cwmax);
|
||||
@ -4727,7 +4934,7 @@ void ath10k_wmi_pdev_set_wmm_param(struct wmi_wmm_params *params,
|
||||
|
||||
static struct sk_buff *
|
||||
ath10k_wmi_op_gen_pdev_set_wmm(struct ath10k *ar,
|
||||
const struct wmi_pdev_set_wmm_params_arg *arg)
|
||||
const struct wmi_wmm_params_all_arg *arg)
|
||||
{
|
||||
struct wmi_pdev_set_wmm_params *cmd;
|
||||
struct sk_buff *skb;
|
||||
@ -4737,10 +4944,10 @@ ath10k_wmi_op_gen_pdev_set_wmm(struct ath10k *ar,
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
cmd = (struct wmi_pdev_set_wmm_params *)skb->data;
|
||||
ath10k_wmi_pdev_set_wmm_param(&cmd->ac_be, &arg->ac_be);
|
||||
ath10k_wmi_pdev_set_wmm_param(&cmd->ac_bk, &arg->ac_bk);
|
||||
ath10k_wmi_pdev_set_wmm_param(&cmd->ac_vi, &arg->ac_vi);
|
||||
ath10k_wmi_pdev_set_wmm_param(&cmd->ac_vo, &arg->ac_vo);
|
||||
ath10k_wmi_set_wmm_param(&cmd->ac_be, &arg->ac_be);
|
||||
ath10k_wmi_set_wmm_param(&cmd->ac_bk, &arg->ac_bk);
|
||||
ath10k_wmi_set_wmm_param(&cmd->ac_vi, &arg->ac_vi);
|
||||
ath10k_wmi_set_wmm_param(&cmd->ac_vo, &arg->ac_vo);
|
||||
|
||||
ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi pdev set wmm params\n");
|
||||
return skb;
|
||||
@ -4784,7 +4991,8 @@ ath10k_wmi_op_gen_force_fw_hang(struct ath10k *ar,
|
||||
}
|
||||
|
||||
static struct sk_buff *
|
||||
ath10k_wmi_op_gen_dbglog_cfg(struct ath10k *ar, u32 module_enable)
|
||||
ath10k_wmi_op_gen_dbglog_cfg(struct ath10k *ar, u32 module_enable,
|
||||
u32 log_level)
|
||||
{
|
||||
struct wmi_dbglog_cfg_cmd *cmd;
|
||||
struct sk_buff *skb;
|
||||
@ -4797,7 +5005,7 @@ ath10k_wmi_op_gen_dbglog_cfg(struct ath10k *ar, u32 module_enable)
|
||||
cmd = (struct wmi_dbglog_cfg_cmd *)skb->data;
|
||||
|
||||
if (module_enable) {
|
||||
cfg = SM(ATH10K_DBGLOG_LEVEL_VERBOSE,
|
||||
cfg = SM(log_level,
|
||||
ATH10K_DBGLOG_CFG_LOG_LVL);
|
||||
} else {
|
||||
/* set back defaults, all modules with WARN level */
|
||||
@ -4877,6 +5085,109 @@ ath10k_wmi_op_gen_pdev_set_quiet_mode(struct ath10k *ar, u32 period,
|
||||
return skb;
|
||||
}
|
||||
|
||||
static struct sk_buff *
|
||||
ath10k_wmi_op_gen_addba_clear_resp(struct ath10k *ar, u32 vdev_id,
|
||||
const u8 *mac)
|
||||
{
|
||||
struct wmi_addba_clear_resp_cmd *cmd;
|
||||
struct sk_buff *skb;
|
||||
|
||||
if (!mac)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
|
||||
if (!skb)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
cmd = (struct wmi_addba_clear_resp_cmd *)skb->data;
|
||||
cmd->vdev_id = __cpu_to_le32(vdev_id);
|
||||
ether_addr_copy(cmd->peer_macaddr.addr, mac);
|
||||
|
||||
ath10k_dbg(ar, ATH10K_DBG_WMI,
|
||||
"wmi addba clear resp vdev_id 0x%X mac_addr %pM\n",
|
||||
vdev_id, mac);
|
||||
return skb;
|
||||
}
|
||||
|
||||
static struct sk_buff *
|
||||
ath10k_wmi_op_gen_addba_send(struct ath10k *ar, u32 vdev_id, const u8 *mac,
|
||||
u32 tid, u32 buf_size)
|
||||
{
|
||||
struct wmi_addba_send_cmd *cmd;
|
||||
struct sk_buff *skb;
|
||||
|
||||
if (!mac)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
|
||||
if (!skb)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
cmd = (struct wmi_addba_send_cmd *)skb->data;
|
||||
cmd->vdev_id = __cpu_to_le32(vdev_id);
|
||||
ether_addr_copy(cmd->peer_macaddr.addr, mac);
|
||||
cmd->tid = __cpu_to_le32(tid);
|
||||
cmd->buffersize = __cpu_to_le32(buf_size);
|
||||
|
||||
ath10k_dbg(ar, ATH10K_DBG_WMI,
|
||||
"wmi addba send vdev_id 0x%X mac_addr %pM tid %u bufsize %u\n",
|
||||
vdev_id, mac, tid, buf_size);
|
||||
return skb;
|
||||
}
|
||||
|
||||
static struct sk_buff *
|
||||
ath10k_wmi_op_gen_addba_set_resp(struct ath10k *ar, u32 vdev_id, const u8 *mac,
|
||||
u32 tid, u32 status)
|
||||
{
|
||||
struct wmi_addba_setresponse_cmd *cmd;
|
||||
struct sk_buff *skb;
|
||||
|
||||
if (!mac)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
|
||||
if (!skb)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
cmd = (struct wmi_addba_setresponse_cmd *)skb->data;
|
||||
cmd->vdev_id = __cpu_to_le32(vdev_id);
|
||||
ether_addr_copy(cmd->peer_macaddr.addr, mac);
|
||||
cmd->tid = __cpu_to_le32(tid);
|
||||
cmd->statuscode = __cpu_to_le32(status);
|
||||
|
||||
ath10k_dbg(ar, ATH10K_DBG_WMI,
|
||||
"wmi addba set resp vdev_id 0x%X mac_addr %pM tid %u status %u\n",
|
||||
vdev_id, mac, tid, status);
|
||||
return skb;
|
||||
}
|
||||
|
||||
static struct sk_buff *
|
||||
ath10k_wmi_op_gen_delba_send(struct ath10k *ar, u32 vdev_id, const u8 *mac,
|
||||
u32 tid, u32 initiator, u32 reason)
|
||||
{
|
||||
struct wmi_delba_send_cmd *cmd;
|
||||
struct sk_buff *skb;
|
||||
|
||||
if (!mac)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
|
||||
if (!skb)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
cmd = (struct wmi_delba_send_cmd *)skb->data;
|
||||
cmd->vdev_id = __cpu_to_le32(vdev_id);
|
||||
ether_addr_copy(cmd->peer_macaddr.addr, mac);
|
||||
cmd->tid = __cpu_to_le32(tid);
|
||||
cmd->initiator = __cpu_to_le32(initiator);
|
||||
cmd->reasoncode = __cpu_to_le32(reason);
|
||||
|
||||
ath10k_dbg(ar, ATH10K_DBG_WMI,
|
||||
"wmi delba send vdev_id 0x%X mac_addr %pM tid %u initiator %u reason %u\n",
|
||||
vdev_id, mac, tid, initiator, reason);
|
||||
return skb;
|
||||
}
|
||||
|
||||
static const struct wmi_ops wmi_ops = {
|
||||
.rx = ath10k_wmi_op_rx,
|
||||
.map_svc = wmi_main_svc_map,
|
||||
@ -4909,6 +5220,7 @@ static const struct wmi_ops wmi_ops = {
|
||||
.gen_vdev_install_key = ath10k_wmi_op_gen_vdev_install_key,
|
||||
.gen_vdev_spectral_conf = ath10k_wmi_op_gen_vdev_spectral_conf,
|
||||
.gen_vdev_spectral_enable = ath10k_wmi_op_gen_vdev_spectral_enable,
|
||||
/* .gen_vdev_wmm_conf not implemented */
|
||||
.gen_peer_create = ath10k_wmi_op_gen_peer_create,
|
||||
.gen_peer_delete = ath10k_wmi_op_gen_peer_delete,
|
||||
.gen_peer_flush = ath10k_wmi_op_gen_peer_flush,
|
||||
@ -4928,6 +5240,13 @@ static const struct wmi_ops wmi_ops = {
|
||||
.gen_pktlog_disable = ath10k_wmi_op_gen_pktlog_disable,
|
||||
.gen_pdev_set_quiet_mode = ath10k_wmi_op_gen_pdev_set_quiet_mode,
|
||||
/* .gen_pdev_get_temperature not implemented */
|
||||
.gen_addba_clear_resp = ath10k_wmi_op_gen_addba_clear_resp,
|
||||
.gen_addba_send = ath10k_wmi_op_gen_addba_send,
|
||||
.gen_addba_set_resp = ath10k_wmi_op_gen_addba_set_resp,
|
||||
.gen_delba_send = ath10k_wmi_op_gen_delba_send,
|
||||
/* .gen_bcn_tmpl not implemented */
|
||||
/* .gen_prb_tmpl not implemented */
|
||||
/* .gen_p2p_go_bcn_ie not implemented */
|
||||
};
|
||||
|
||||
static const struct wmi_ops wmi_10_1_ops = {
|
||||
@ -4965,6 +5284,7 @@ static const struct wmi_ops wmi_10_1_ops = {
|
||||
.gen_vdev_install_key = ath10k_wmi_op_gen_vdev_install_key,
|
||||
.gen_vdev_spectral_conf = ath10k_wmi_op_gen_vdev_spectral_conf,
|
||||
.gen_vdev_spectral_enable = ath10k_wmi_op_gen_vdev_spectral_enable,
|
||||
/* .gen_vdev_wmm_conf not implemented */
|
||||
.gen_peer_create = ath10k_wmi_op_gen_peer_create,
|
||||
.gen_peer_delete = ath10k_wmi_op_gen_peer_delete,
|
||||
.gen_peer_flush = ath10k_wmi_op_gen_peer_flush,
|
||||
@ -4982,10 +5302,18 @@ static const struct wmi_ops wmi_10_1_ops = {
|
||||
.gen_pktlog_enable = ath10k_wmi_op_gen_pktlog_enable,
|
||||
.gen_pktlog_disable = ath10k_wmi_op_gen_pktlog_disable,
|
||||
.gen_pdev_set_quiet_mode = ath10k_wmi_op_gen_pdev_set_quiet_mode,
|
||||
.gen_addba_clear_resp = ath10k_wmi_op_gen_addba_clear_resp,
|
||||
.gen_addba_send = ath10k_wmi_op_gen_addba_send,
|
||||
.gen_addba_set_resp = ath10k_wmi_op_gen_addba_set_resp,
|
||||
.gen_delba_send = ath10k_wmi_op_gen_delba_send,
|
||||
/* .gen_bcn_tmpl not implemented */
|
||||
/* .gen_prb_tmpl not implemented */
|
||||
/* .gen_p2p_go_bcn_ie not implemented */
|
||||
};
|
||||
|
||||
static const struct wmi_ops wmi_10_2_ops = {
|
||||
.rx = ath10k_wmi_10_2_op_rx,
|
||||
.pull_fw_stats = ath10k_wmi_10_2_op_pull_fw_stats,
|
||||
.gen_init = ath10k_wmi_10_2_op_gen_init,
|
||||
.gen_peer_assoc = ath10k_wmi_10_2_op_gen_peer_assoc,
|
||||
/* .gen_pdev_get_temperature not implemented */
|
||||
@ -4993,7 +5321,6 @@ static const struct wmi_ops wmi_10_2_ops = {
|
||||
/* shared with 10.1 */
|
||||
.map_svc = wmi_10x_svc_map,
|
||||
.pull_svc_rdy = ath10k_wmi_10x_op_pull_svc_rdy_ev,
|
||||
.pull_fw_stats = ath10k_wmi_10x_op_pull_fw_stats,
|
||||
.gen_pdev_set_rd = ath10k_wmi_10x_op_gen_pdev_set_rd,
|
||||
.gen_start_scan = ath10k_wmi_10x_op_gen_start_scan,
|
||||
|
||||
@ -5020,6 +5347,7 @@ static const struct wmi_ops wmi_10_2_ops = {
|
||||
.gen_vdev_install_key = ath10k_wmi_op_gen_vdev_install_key,
|
||||
.gen_vdev_spectral_conf = ath10k_wmi_op_gen_vdev_spectral_conf,
|
||||
.gen_vdev_spectral_enable = ath10k_wmi_op_gen_vdev_spectral_enable,
|
||||
/* .gen_vdev_wmm_conf not implemented */
|
||||
.gen_peer_create = ath10k_wmi_op_gen_peer_create,
|
||||
.gen_peer_delete = ath10k_wmi_op_gen_peer_delete,
|
||||
.gen_peer_flush = ath10k_wmi_op_gen_peer_flush,
|
||||
@ -5037,10 +5365,15 @@ static const struct wmi_ops wmi_10_2_ops = {
|
||||
.gen_pktlog_enable = ath10k_wmi_op_gen_pktlog_enable,
|
||||
.gen_pktlog_disable = ath10k_wmi_op_gen_pktlog_disable,
|
||||
.gen_pdev_set_quiet_mode = ath10k_wmi_op_gen_pdev_set_quiet_mode,
|
||||
.gen_addba_clear_resp = ath10k_wmi_op_gen_addba_clear_resp,
|
||||
.gen_addba_send = ath10k_wmi_op_gen_addba_send,
|
||||
.gen_addba_set_resp = ath10k_wmi_op_gen_addba_set_resp,
|
||||
.gen_delba_send = ath10k_wmi_op_gen_delba_send,
|
||||
};
|
||||
|
||||
static const struct wmi_ops wmi_10_2_4_ops = {
|
||||
.rx = ath10k_wmi_10_2_op_rx,
|
||||
.pull_fw_stats = ath10k_wmi_10_2_4_op_pull_fw_stats,
|
||||
.gen_init = ath10k_wmi_10_2_op_gen_init,
|
||||
.gen_peer_assoc = ath10k_wmi_10_2_op_gen_peer_assoc,
|
||||
.gen_pdev_get_temperature = ath10k_wmi_10_2_op_gen_pdev_get_temperature,
|
||||
@ -5048,7 +5381,6 @@ static const struct wmi_ops wmi_10_2_4_ops = {
|
||||
/* shared with 10.1 */
|
||||
.map_svc = wmi_10x_svc_map,
|
||||
.pull_svc_rdy = ath10k_wmi_10x_op_pull_svc_rdy_ev,
|
||||
.pull_fw_stats = ath10k_wmi_10x_op_pull_fw_stats,
|
||||
.gen_pdev_set_rd = ath10k_wmi_10x_op_gen_pdev_set_rd,
|
||||
.gen_start_scan = ath10k_wmi_10x_op_gen_start_scan,
|
||||
|
||||
@ -5092,6 +5424,13 @@ static const struct wmi_ops wmi_10_2_4_ops = {
|
||||
.gen_pktlog_enable = ath10k_wmi_op_gen_pktlog_enable,
|
||||
.gen_pktlog_disable = ath10k_wmi_op_gen_pktlog_disable,
|
||||
.gen_pdev_set_quiet_mode = ath10k_wmi_op_gen_pdev_set_quiet_mode,
|
||||
.gen_addba_clear_resp = ath10k_wmi_op_gen_addba_clear_resp,
|
||||
.gen_addba_send = ath10k_wmi_op_gen_addba_send,
|
||||
.gen_addba_set_resp = ath10k_wmi_op_gen_addba_set_resp,
|
||||
.gen_delba_send = ath10k_wmi_op_gen_delba_send,
|
||||
/* .gen_bcn_tmpl not implemented */
|
||||
/* .gen_prb_tmpl not implemented */
|
||||
/* .gen_p2p_go_bcn_ie not implemented */
|
||||
};
|
||||
|
||||
int ath10k_wmi_attach(struct ath10k *ar)
|
||||
|
@ -551,6 +551,7 @@ struct wmi_cmd_map {
|
||||
u32 gpio_config_cmdid;
|
||||
u32 gpio_output_cmdid;
|
||||
u32 pdev_get_temperature_cmdid;
|
||||
u32 vdev_set_wmm_params_cmdid;
|
||||
};
|
||||
|
||||
/*
|
||||
@ -2939,14 +2940,14 @@ struct wmi_wmm_params_arg {
|
||||
u32 no_ack;
|
||||
};
|
||||
|
||||
struct wmi_pdev_set_wmm_params_arg {
|
||||
struct wmi_wmm_params_all_arg {
|
||||
struct wmi_wmm_params_arg ac_be;
|
||||
struct wmi_wmm_params_arg ac_bk;
|
||||
struct wmi_wmm_params_arg ac_vi;
|
||||
struct wmi_wmm_params_arg ac_vo;
|
||||
};
|
||||
|
||||
struct wal_dbg_tx_stats {
|
||||
struct wmi_pdev_stats_tx {
|
||||
/* Num HTT cookies queued to dispatch list */
|
||||
__le32 comp_queued;
|
||||
|
||||
@ -3016,7 +3017,7 @@ struct wal_dbg_tx_stats {
|
||||
__le32 txop_ovf;
|
||||
} __packed;
|
||||
|
||||
struct wal_dbg_rx_stats {
|
||||
struct wmi_pdev_stats_rx {
|
||||
/* Cnts any change in ring routing mid-ppdu */
|
||||
__le32 mid_ppdu_route_change;
|
||||
|
||||
@ -3050,17 +3051,11 @@ struct wal_dbg_rx_stats {
|
||||
__le32 mpdu_errs;
|
||||
} __packed;
|
||||
|
||||
struct wal_dbg_peer_stats {
|
||||
struct wmi_pdev_stats_peer {
|
||||
/* REMOVE THIS ONCE REAL PEER STAT COUNTERS ARE ADDED */
|
||||
__le32 dummy;
|
||||
} __packed;
|
||||
|
||||
struct wal_dbg_stats {
|
||||
struct wal_dbg_tx_stats tx;
|
||||
struct wal_dbg_rx_stats rx;
|
||||
struct wal_dbg_peer_stats peer;
|
||||
} __packed;
|
||||
|
||||
enum wmi_stats_id {
|
||||
WMI_REQUEST_PEER_STAT = 0x01,
|
||||
WMI_REQUEST_AP_STAT = 0x02
|
||||
@ -3127,23 +3122,38 @@ struct wmi_stats_event {
|
||||
u8 data[0];
|
||||
} __packed;
|
||||
|
||||
struct wmi_10_2_stats_event {
|
||||
__le32 stats_id; /* %WMI_REQUEST_ */
|
||||
__le32 num_pdev_stats;
|
||||
__le32 num_pdev_ext_stats;
|
||||
__le32 num_vdev_stats;
|
||||
__le32 num_peer_stats;
|
||||
__le32 num_bcnflt_stats;
|
||||
u8 data[0];
|
||||
} __packed;
|
||||
|
||||
/*
|
||||
* PDEV statistics
|
||||
* TODO: add all PDEV stats here
|
||||
*/
|
||||
struct wmi_pdev_stats {
|
||||
__le32 chan_nf; /* Channel noise floor */
|
||||
__le32 tx_frame_count; /* TX frame count */
|
||||
__le32 rx_frame_count; /* RX frame count */
|
||||
__le32 rx_clear_count; /* rx clear count */
|
||||
__le32 cycle_count; /* cycle count */
|
||||
__le32 phy_err_count; /* Phy error count */
|
||||
__le32 chan_tx_pwr; /* channel tx power */
|
||||
struct wal_dbg_stats wal; /* WAL dbg stats */
|
||||
struct wmi_pdev_stats_base {
|
||||
__le32 chan_nf;
|
||||
__le32 tx_frame_count;
|
||||
__le32 rx_frame_count;
|
||||
__le32 rx_clear_count;
|
||||
__le32 cycle_count;
|
||||
__le32 phy_err_count;
|
||||
__le32 chan_tx_pwr;
|
||||
} __packed;
|
||||
|
||||
struct wmi_10x_pdev_stats {
|
||||
struct wmi_pdev_stats old;
|
||||
struct wmi_pdev_stats {
|
||||
struct wmi_pdev_stats_base base;
|
||||
struct wmi_pdev_stats_tx tx;
|
||||
struct wmi_pdev_stats_rx rx;
|
||||
struct wmi_pdev_stats_peer peer;
|
||||
} __packed;
|
||||
|
||||
struct wmi_pdev_stats_extra {
|
||||
__le32 ack_rx_bad;
|
||||
__le32 rts_bad;
|
||||
__le32 rts_good;
|
||||
@ -3152,6 +3162,30 @@ struct wmi_10x_pdev_stats {
|
||||
__le32 mib_int_count;
|
||||
} __packed;
|
||||
|
||||
struct wmi_10x_pdev_stats {
|
||||
struct wmi_pdev_stats_base base;
|
||||
struct wmi_pdev_stats_tx tx;
|
||||
struct wmi_pdev_stats_rx rx;
|
||||
struct wmi_pdev_stats_peer peer;
|
||||
struct wmi_pdev_stats_extra extra;
|
||||
} __packed;
|
||||
|
||||
struct wmi_pdev_stats_mem {
|
||||
__le32 dram_free;
|
||||
__le32 iram_free;
|
||||
} __packed;
|
||||
|
||||
struct wmi_10_2_pdev_stats {
|
||||
struct wmi_pdev_stats_base base;
|
||||
struct wmi_pdev_stats_tx tx;
|
||||
__le32 mc_drop;
|
||||
struct wmi_pdev_stats_rx rx;
|
||||
__le32 pdev_rx_timeout;
|
||||
struct wmi_pdev_stats_mem mem;
|
||||
struct wmi_pdev_stats_peer peer;
|
||||
struct wmi_pdev_stats_extra extra;
|
||||
} __packed;
|
||||
|
||||
/*
|
||||
* VDEV statistics
|
||||
* TODO: add all VDEV stats here
|
||||
@ -3175,6 +3209,32 @@ struct wmi_10x_peer_stats {
|
||||
__le32 peer_rx_rate;
|
||||
} __packed;
|
||||
|
||||
struct wmi_10_2_peer_stats {
|
||||
struct wmi_peer_stats old;
|
||||
__le32 peer_rx_rate;
|
||||
__le32 current_per;
|
||||
__le32 retries;
|
||||
__le32 tx_rate_count;
|
||||
__le32 max_4ms_frame_len;
|
||||
__le32 total_sub_frames;
|
||||
__le32 tx_bytes;
|
||||
__le32 num_pkt_loss_overflow[4];
|
||||
__le32 num_pkt_loss_excess_retry[4];
|
||||
} __packed;
|
||||
|
||||
struct wmi_10_2_4_peer_stats {
|
||||
struct wmi_10_2_peer_stats common;
|
||||
__le32 unknown_value; /* FIXME: what is this word? */
|
||||
} __packed;
|
||||
|
||||
struct wmi_10_2_pdev_ext_stats {
|
||||
__le32 rx_rssi_comb;
|
||||
__le32 rx_rssi[4];
|
||||
__le32 rx_mcs[10];
|
||||
__le32 tx_mcs[10];
|
||||
__le32 ack_rssi;
|
||||
} __packed;
|
||||
|
||||
struct wmi_vdev_create_cmd {
|
||||
__le32 vdev_id;
|
||||
__le32 vdev_type;
|
||||
@ -4060,6 +4120,30 @@ enum wmi_sta_ps_param_uapsd {
|
||||
WMI_STA_PS_UAPSD_AC3_TRIGGER_EN = (1 << 7),
|
||||
};
|
||||
|
||||
#define WMI_STA_UAPSD_MAX_INTERVAL_MSEC UINT_MAX
|
||||
|
||||
struct wmi_sta_uapsd_auto_trig_param {
|
||||
__le32 wmm_ac;
|
||||
__le32 user_priority;
|
||||
__le32 service_interval;
|
||||
__le32 suspend_interval;
|
||||
__le32 delay_interval;
|
||||
};
|
||||
|
||||
struct wmi_sta_uapsd_auto_trig_cmd_fixed_param {
|
||||
__le32 vdev_id;
|
||||
struct wmi_mac_addr peer_macaddr;
|
||||
__le32 num_ac;
|
||||
};
|
||||
|
||||
struct wmi_sta_uapsd_auto_trig_arg {
|
||||
u32 wmm_ac;
|
||||
u32 user_priority;
|
||||
u32 service_interval;
|
||||
u32 suspend_interval;
|
||||
u32 delay_interval;
|
||||
};
|
||||
|
||||
enum wmi_sta_powersave_param {
|
||||
/*
|
||||
* Controls how frames are retrievd from AP while STA is sleeping
|
||||
@ -4430,7 +4514,7 @@ struct wmi_peer_set_q_empty_callback_cmd {
|
||||
#define WMI_PEER_SPATIAL_MUX 0x00200000
|
||||
#define WMI_PEER_VHT 0x02000000
|
||||
#define WMI_PEER_80MHZ 0x04000000
|
||||
#define WMI_PEER_PMF 0x08000000
|
||||
#define WMI_PEER_VHT_2G 0x08000000
|
||||
|
||||
/*
|
||||
* Peer rate capabilities.
|
||||
@ -4581,6 +4665,11 @@ enum wmi_sta_keepalive_method {
|
||||
WMI_STA_KEEPALIVE_METHOD_UNSOLICITATED_ARP_RESPONSE = 2,
|
||||
};
|
||||
|
||||
#define WMI_STA_KEEPALIVE_INTERVAL_DISABLE 0
|
||||
|
||||
/* Firmware crashes if keepalive interval exceeds this limit */
|
||||
#define WMI_STA_KEEPALIVE_INTERVAL_MAX_SECONDS 0xffff
|
||||
|
||||
/* note: ip4 addresses are in network byte order, i.e. big endian */
|
||||
struct wmi_sta_keepalive_arp_resp {
|
||||
__be32 src_ip4_addr;
|
||||
@ -4596,6 +4685,16 @@ struct wmi_sta_keepalive_cmd {
|
||||
struct wmi_sta_keepalive_arp_resp arp_resp;
|
||||
} __packed;
|
||||
|
||||
struct wmi_sta_keepalive_arg {
|
||||
u32 vdev_id;
|
||||
u32 enabled;
|
||||
u32 method;
|
||||
u32 interval;
|
||||
__be32 src_ip4_addr;
|
||||
__be32 dest_ip4_addr;
|
||||
const u8 dest_mac_addr[ETH_ALEN];
|
||||
};
|
||||
|
||||
enum wmi_force_fw_hang_type {
|
||||
WMI_FORCE_FW_HANG_ASSERT = 1,
|
||||
WMI_FORCE_FW_HANG_NO_DETECT,
|
||||
@ -4772,16 +4871,22 @@ int ath10k_wmi_cmd_send_nowait(struct ath10k *ar, struct sk_buff *skb,
|
||||
u32 cmd_id);
|
||||
void ath10k_wmi_start_scan_init(struct ath10k *ar, struct wmi_start_scan_arg *);
|
||||
|
||||
void ath10k_wmi_pull_pdev_stats(const struct wmi_pdev_stats *src,
|
||||
struct ath10k_fw_stats_pdev *dst);
|
||||
void ath10k_wmi_pull_pdev_stats_base(const struct wmi_pdev_stats_base *src,
|
||||
struct ath10k_fw_stats_pdev *dst);
|
||||
void ath10k_wmi_pull_pdev_stats_tx(const struct wmi_pdev_stats_tx *src,
|
||||
struct ath10k_fw_stats_pdev *dst);
|
||||
void ath10k_wmi_pull_pdev_stats_rx(const struct wmi_pdev_stats_rx *src,
|
||||
struct ath10k_fw_stats_pdev *dst);
|
||||
void ath10k_wmi_pull_pdev_stats_extra(const struct wmi_pdev_stats_extra *src,
|
||||
struct ath10k_fw_stats_pdev *dst);
|
||||
void ath10k_wmi_pull_peer_stats(const struct wmi_peer_stats *src,
|
||||
struct ath10k_fw_stats_peer *dst);
|
||||
void ath10k_wmi_put_host_mem_chunks(struct ath10k *ar,
|
||||
struct wmi_host_mem_chunks *chunks);
|
||||
void ath10k_wmi_put_start_scan_common(struct wmi_start_scan_common *cmn,
|
||||
const struct wmi_start_scan_arg *arg);
|
||||
void ath10k_wmi_pdev_set_wmm_param(struct wmi_wmm_params *params,
|
||||
const struct wmi_wmm_params_arg *arg);
|
||||
void ath10k_wmi_set_wmm_param(struct wmi_wmm_params *params,
|
||||
const struct wmi_wmm_params_arg *arg);
|
||||
void ath10k_wmi_put_wmi_channel(struct wmi_channel *ch,
|
||||
const struct wmi_channel_arg *arg);
|
||||
int ath10k_wmi_start_scan_verify(const struct wmi_start_scan_arg *arg);
|
||||
|
Loading…
Reference in New Issue
Block a user